Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- minigpt2/lib/python3.10/site-packages/ray/data/__pycache__/__init__.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/__pycache__/context.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/__pycache__/iterator.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__init__.py +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__pycache__/__init__.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__pycache__/transform_polars.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__pycache__/transform_pyarrow.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/transform_polars.py +40 -0
- minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/transform_pyarrow.py +497 -0
- minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/__init__.py +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/plan.py +25 -0
- minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/__init__.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/all_to_all_operator.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/from_operators.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/map_operator.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/write_operator.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/optimizers.py +94 -0
- minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/util.py +112 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/__init__.py +58 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/__init__.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/datasink.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/datasource.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/file_based_datasource.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/file_datasink.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/file_meta_provider.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/filename_provider.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/parquet_meta_provider.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/partitioning.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/path_util.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/datasink.py +198 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/datasource.py +243 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/file_based_datasource.py +533 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/file_datasink.py +262 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/file_meta_provider.py +484 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/filename_provider.py +122 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/parquet_meta_provider.py +252 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/partitioning.py +456 -0
- minigpt2/lib/python3.10/site-packages/ray/data/datasource/path_util.py +205 -0
- minigpt2/lib/python3.10/site-packages/ray/data/extensions/__init__.py +45 -0
- minigpt2/lib/python3.10/site-packages/ray/data/extensions/__pycache__/__init__.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/extensions/__pycache__/object_extension.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/extensions/__pycache__/tensor_extension.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/extensions/object_extension.py +10 -0
- minigpt2/lib/python3.10/site-packages/ray/data/extensions/tensor_extension.py +15 -0
- minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__init__.py +50 -0
- minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/__init__.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/chain.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/concatenator.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/discretizer.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/encoder.cpython-310.pyc +0 -0
minigpt2/lib/python3.10/site-packages/ray/data/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.73 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/__pycache__/context.cpython-310.pyc
ADDED
|
Binary file (15.5 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/__pycache__/iterator.cpython-310.pyc
ADDED
|
Binary file (37.2 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__init__.py
ADDED
|
File without changes
|
minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (182 Bytes). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__pycache__/transform_polars.cpython-310.pyc
ADDED
|
Binary file (1.44 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/__pycache__/transform_pyarrow.cpython-310.pyc
ADDED
|
Binary file (11.9 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/transform_polars.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import TYPE_CHECKING, List
|
| 2 |
+
|
| 3 |
+
try:
|
| 4 |
+
import pyarrow
|
| 5 |
+
except ImportError:
|
| 6 |
+
pyarrow = None
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
if TYPE_CHECKING:
|
| 10 |
+
from ray.data._internal.planner.exchange.sort_task_spec import SortKey
|
| 11 |
+
|
| 12 |
+
pl = None
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def check_polars_installed():
|
| 16 |
+
try:
|
| 17 |
+
global pl
|
| 18 |
+
import polars as pl
|
| 19 |
+
except ImportError:
|
| 20 |
+
raise ImportError(
|
| 21 |
+
"polars not installed. Install with `pip install polars` or set "
|
| 22 |
+
"`DataContext.use_polars = False` to fall back to pyarrow"
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def sort(table: "pyarrow.Table", sort_key: "SortKey") -> "pyarrow.Table":
|
| 27 |
+
check_polars_installed()
|
| 28 |
+
df = pl.from_arrow(table)
|
| 29 |
+
return df.sort(sort_key.get_columns(), reverse=sort_key.get_descending()).to_arrow()
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def concat_and_sort(
|
| 33 |
+
blocks: List["pyarrow.Table"], sort_key: "SortKey"
|
| 34 |
+
) -> "pyarrow.Table":
|
| 35 |
+
check_polars_installed()
|
| 36 |
+
blocks = [pl.from_arrow(block) for block in blocks]
|
| 37 |
+
df = pl.concat(blocks).sort(
|
| 38 |
+
sort_key.get_columns(), reverse=sort_key.get_descending()
|
| 39 |
+
)
|
| 40 |
+
return df.to_arrow()
|
minigpt2/lib/python3.10/site-packages/ray/data/_internal/arrow_ops/transform_pyarrow.py
ADDED
|
@@ -0,0 +1,497 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import TYPE_CHECKING, List, Union
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from packaging.version import parse as parse_version
|
| 5 |
+
|
| 6 |
+
from ray._private.utils import _get_pyarrow_version
|
| 7 |
+
from ray.air.util.tensor_extensions.arrow import (
|
| 8 |
+
INT32_OVERFLOW_THRESHOLD,
|
| 9 |
+
MIN_PYARROW_VERSION_CHUNKED_ARRAY_TO_NUMPY_ZERO_COPY_ONLY,
|
| 10 |
+
PYARROW_VERSION,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
try:
|
| 14 |
+
import pyarrow
|
| 15 |
+
except ImportError:
|
| 16 |
+
pyarrow = None
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
if TYPE_CHECKING:
|
| 20 |
+
from ray.data._internal.planner.exchange.sort_task_spec import SortKey
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def sort(table: "pyarrow.Table", sort_key: "SortKey") -> "pyarrow.Table":
|
| 24 |
+
import pyarrow.compute as pac
|
| 25 |
+
|
| 26 |
+
indices = pac.sort_indices(table, sort_keys=sort_key.to_arrow_sort_args())
|
| 27 |
+
return take_table(table, indices)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def take_table(
|
| 31 |
+
table: "pyarrow.Table",
|
| 32 |
+
indices: Union[List[int], "pyarrow.Array", "pyarrow.ChunkedArray"],
|
| 33 |
+
) -> "pyarrow.Table":
|
| 34 |
+
"""Select rows from the table.
|
| 35 |
+
|
| 36 |
+
This method is an alternative to pyarrow.Table.take(), which breaks for
|
| 37 |
+
extension arrays. This is exposed as a static method for easier use on
|
| 38 |
+
intermediate tables, not underlying an ArrowBlockAccessor.
|
| 39 |
+
"""
|
| 40 |
+
from ray.air.util.transform_pyarrow import (
|
| 41 |
+
_concatenate_extension_column,
|
| 42 |
+
_is_column_extension_type,
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
if any(_is_column_extension_type(col) for col in table.columns):
|
| 46 |
+
new_cols = []
|
| 47 |
+
for col in table.columns:
|
| 48 |
+
if _is_column_extension_type(col) and col.num_chunks > 1:
|
| 49 |
+
# .take() will concatenate internally, which currently breaks for
|
| 50 |
+
# extension arrays.
|
| 51 |
+
col = _concatenate_extension_column(col)
|
| 52 |
+
new_cols.append(col.take(indices))
|
| 53 |
+
table = pyarrow.Table.from_arrays(new_cols, schema=table.schema)
|
| 54 |
+
else:
|
| 55 |
+
table = table.take(indices)
|
| 56 |
+
return table
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def unify_schemas(
|
| 60 |
+
schemas: List["pyarrow.Schema"],
|
| 61 |
+
) -> "pyarrow.Schema":
|
| 62 |
+
"""Version of `pyarrow.unify_schemas()` which also handles checks for
|
| 63 |
+
variable-shaped tensors in the given schemas.
|
| 64 |
+
|
| 65 |
+
This function scans all input schemas to identify columns that contain
|
| 66 |
+
variable-shaped tensors or objects. For tensor columns, it ensures the
|
| 67 |
+
use of appropriate tensor types (including variable-shaped tensor types).
|
| 68 |
+
For object columns, it uses a specific object type to accommodate any
|
| 69 |
+
objects present. Additionally, it handles columns with null-typed lists
|
| 70 |
+
by determining their actual types from the given schemas.
|
| 71 |
+
|
| 72 |
+
Currently, it disallows the concatenation of tensor columns and
|
| 73 |
+
pickled object columsn for performance reasons.
|
| 74 |
+
"""
|
| 75 |
+
import pyarrow as pa
|
| 76 |
+
|
| 77 |
+
from ray.air.util.object_extensions.arrow import ArrowPythonObjectType
|
| 78 |
+
from ray.air.util.tensor_extensions.arrow import (
|
| 79 |
+
ArrowTensorType,
|
| 80 |
+
ArrowVariableShapedTensorType,
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
schemas_to_unify = []
|
| 84 |
+
schema_field_overrides = {}
|
| 85 |
+
|
| 86 |
+
# Rollup columns with opaque (null-typed) lists, to override types in
|
| 87 |
+
# the following for-loop.
|
| 88 |
+
cols_with_null_list = set()
|
| 89 |
+
|
| 90 |
+
all_columns = set()
|
| 91 |
+
for schema in schemas:
|
| 92 |
+
for col_name in schema.names:
|
| 93 |
+
col_type = schema.field(col_name).type
|
| 94 |
+
if pa.types.is_list(col_type) and pa.types.is_null(col_type.value_type):
|
| 95 |
+
cols_with_null_list.add(col_name)
|
| 96 |
+
all_columns.add(col_name)
|
| 97 |
+
|
| 98 |
+
from ray.air.util.tensor_extensions.arrow import (
|
| 99 |
+
get_arrow_extension_fixed_shape_tensor_types,
|
| 100 |
+
get_arrow_extension_tensor_types,
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
arrow_tensor_types = get_arrow_extension_tensor_types()
|
| 104 |
+
arrow_fixed_shape_tensor_types = get_arrow_extension_fixed_shape_tensor_types()
|
| 105 |
+
|
| 106 |
+
columns_with_objects = set()
|
| 107 |
+
columns_with_tensor_array = set()
|
| 108 |
+
for col_name in all_columns:
|
| 109 |
+
for s in schemas:
|
| 110 |
+
indices = s.get_all_field_indices(col_name)
|
| 111 |
+
if len(indices) > 1:
|
| 112 |
+
# This is broken for Pandas blocks and broken with the logic here
|
| 113 |
+
raise ValueError(
|
| 114 |
+
f"Schema {s} has multiple fields with the same name: {col_name}"
|
| 115 |
+
)
|
| 116 |
+
elif len(indices) == 0:
|
| 117 |
+
continue
|
| 118 |
+
if isinstance(s.field(col_name).type, ArrowPythonObjectType):
|
| 119 |
+
columns_with_objects.add(col_name)
|
| 120 |
+
if isinstance(s.field(col_name).type, arrow_tensor_types):
|
| 121 |
+
columns_with_tensor_array.add(col_name)
|
| 122 |
+
|
| 123 |
+
if len(columns_with_objects.intersection(columns_with_tensor_array)) > 0:
|
| 124 |
+
# This is supportable if we use object type, but it will be expensive
|
| 125 |
+
raise ValueError(
|
| 126 |
+
"Found columns with both objects and tensors: "
|
| 127 |
+
f"{columns_with_tensor_array.intersection(columns_with_objects)}"
|
| 128 |
+
)
|
| 129 |
+
for col_name in columns_with_tensor_array:
|
| 130 |
+
tensor_array_types = [
|
| 131 |
+
s.field(col_name).type
|
| 132 |
+
for s in schemas
|
| 133 |
+
if isinstance(s.field(col_name).type, arrow_tensor_types)
|
| 134 |
+
]
|
| 135 |
+
|
| 136 |
+
if ArrowTensorType._need_variable_shaped_tensor_array(tensor_array_types):
|
| 137 |
+
if isinstance(tensor_array_types[0], ArrowVariableShapedTensorType):
|
| 138 |
+
new_type = tensor_array_types[0]
|
| 139 |
+
elif isinstance(tensor_array_types[0], arrow_fixed_shape_tensor_types):
|
| 140 |
+
new_type = ArrowVariableShapedTensorType(
|
| 141 |
+
dtype=tensor_array_types[0].scalar_type,
|
| 142 |
+
ndim=len(tensor_array_types[0].shape),
|
| 143 |
+
)
|
| 144 |
+
else:
|
| 145 |
+
raise ValueError(
|
| 146 |
+
"Detected need for variable shaped tensor representation, "
|
| 147 |
+
f"but schema is not ArrayTensorType: {tensor_array_types[0]}"
|
| 148 |
+
)
|
| 149 |
+
schema_field_overrides[col_name] = new_type
|
| 150 |
+
|
| 151 |
+
for col_name in columns_with_objects:
|
| 152 |
+
schema_field_overrides[col_name] = ArrowPythonObjectType()
|
| 153 |
+
|
| 154 |
+
if cols_with_null_list:
|
| 155 |
+
# For each opaque list column, iterate through all schemas until we find
|
| 156 |
+
# a valid value_type that can be used to override the column types in
|
| 157 |
+
# the following for-loop.
|
| 158 |
+
for col_name in cols_with_null_list:
|
| 159 |
+
for schema in schemas:
|
| 160 |
+
col_type = schema.field(col_name).type
|
| 161 |
+
if not pa.types.is_list(col_type) or not pa.types.is_null(
|
| 162 |
+
col_type.value_type
|
| 163 |
+
):
|
| 164 |
+
schema_field_overrides[col_name] = col_type
|
| 165 |
+
break
|
| 166 |
+
|
| 167 |
+
if schema_field_overrides:
|
| 168 |
+
# Go through all schemas and update the types of columns from the above loop.
|
| 169 |
+
for schema in schemas:
|
| 170 |
+
for col_name, col_new_type in schema_field_overrides.items():
|
| 171 |
+
var_shaped_col = schema.field(col_name).with_type(col_new_type)
|
| 172 |
+
col_idx = schema.get_field_index(col_name)
|
| 173 |
+
schema = schema.set(col_idx, var_shaped_col)
|
| 174 |
+
schemas_to_unify.append(schema)
|
| 175 |
+
else:
|
| 176 |
+
schemas_to_unify = schemas
|
| 177 |
+
# Let Arrow unify the schema of non-tensor extension type columns.
|
| 178 |
+
return pyarrow.unify_schemas(schemas_to_unify)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def _concatenate_chunked_arrays(arrs: "pyarrow.ChunkedArray") -> "pyarrow.ChunkedArray":
|
| 182 |
+
"""
|
| 183 |
+
Concatenate provided chunked arrays into a single chunked array.
|
| 184 |
+
"""
|
| 185 |
+
from ray.data.extensions import get_arrow_extension_tensor_types
|
| 186 |
+
|
| 187 |
+
tensor_types = get_arrow_extension_tensor_types()
|
| 188 |
+
|
| 189 |
+
# Single flat list of chunks across all chunked arrays.
|
| 190 |
+
chunks = []
|
| 191 |
+
type_ = None
|
| 192 |
+
for arr in arrs:
|
| 193 |
+
if type_ is None:
|
| 194 |
+
type_ = arr.type
|
| 195 |
+
else:
|
| 196 |
+
if isinstance(type_, tensor_types):
|
| 197 |
+
raise ValueError(
|
| 198 |
+
"_concatenate_chunked_arrays should only be used on non-tensor "
|
| 199 |
+
f"extension types, but got a chunked array of type {type_}."
|
| 200 |
+
)
|
| 201 |
+
assert type_ == arr.type, f"Types mismatch: {type_} != {arr.type}"
|
| 202 |
+
# Add chunks for this chunked array to flat chunk list.
|
| 203 |
+
chunks.extend(arr.chunks)
|
| 204 |
+
# Construct chunked array on flat list of chunks.
|
| 205 |
+
return pyarrow.chunked_array(chunks, type=type_)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def concat(blocks: List["pyarrow.Table"]) -> "pyarrow.Table":
|
| 209 |
+
"""Concatenate provided Arrow Tables into a single Arrow Table. This has special
|
| 210 |
+
handling for extension types that pyarrow.concat_tables does not yet support.
|
| 211 |
+
"""
|
| 212 |
+
import pyarrow as pa
|
| 213 |
+
|
| 214 |
+
from ray.air.util.tensor_extensions.arrow import ArrowConversionError
|
| 215 |
+
from ray.data.extensions import (
|
| 216 |
+
ArrowPythonObjectArray,
|
| 217 |
+
ArrowPythonObjectType,
|
| 218 |
+
ArrowTensorArray,
|
| 219 |
+
get_arrow_extension_tensor_types,
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
tensor_types = get_arrow_extension_tensor_types()
|
| 223 |
+
|
| 224 |
+
if not blocks:
|
| 225 |
+
# Short-circuit on empty list of blocks.
|
| 226 |
+
return blocks
|
| 227 |
+
|
| 228 |
+
if len(blocks) == 1:
|
| 229 |
+
return blocks[0]
|
| 230 |
+
|
| 231 |
+
# Rollup columns with opaque (null-typed) lists, to process in following for-loop.
|
| 232 |
+
cols_with_null_list = set()
|
| 233 |
+
for b in blocks:
|
| 234 |
+
for col_name in b.schema.names:
|
| 235 |
+
col_type = b.schema.field(col_name).type
|
| 236 |
+
if pa.types.is_list(col_type) and pa.types.is_null(col_type.value_type):
|
| 237 |
+
cols_with_null_list.add(col_name)
|
| 238 |
+
|
| 239 |
+
# If the result contains pyarrow schemas, unify them
|
| 240 |
+
schemas_to_unify = [b.schema for b in blocks]
|
| 241 |
+
try:
|
| 242 |
+
schema = unify_schemas(schemas_to_unify)
|
| 243 |
+
except Exception as e:
|
| 244 |
+
raise ArrowConversionError(str(blocks)) from e
|
| 245 |
+
|
| 246 |
+
if (
|
| 247 |
+
any(isinstance(type_, pa.ExtensionType) for type_ in schema.types)
|
| 248 |
+
or cols_with_null_list
|
| 249 |
+
):
|
| 250 |
+
# Custom handling for extension array columns.
|
| 251 |
+
cols = []
|
| 252 |
+
for col_name in schema.names:
|
| 253 |
+
col_chunked_arrays = []
|
| 254 |
+
for block in blocks:
|
| 255 |
+
col_chunked_arrays.append(block.column(col_name))
|
| 256 |
+
|
| 257 |
+
if isinstance(schema.field(col_name).type, tensor_types):
|
| 258 |
+
# For our tensor extension types, manually construct a chunked array
|
| 259 |
+
# containing chunks from all blocks. This is to handle
|
| 260 |
+
# homogeneous-shaped block columns having different shapes across
|
| 261 |
+
# blocks: if tensor element shapes differ across blocks, a
|
| 262 |
+
# variable-shaped tensor array will be returned.
|
| 263 |
+
col = ArrowTensorArray._chunk_tensor_arrays(
|
| 264 |
+
[chunk for ca in col_chunked_arrays for chunk in ca.chunks]
|
| 265 |
+
)
|
| 266 |
+
elif isinstance(schema.field(col_name).type, ArrowPythonObjectType):
|
| 267 |
+
chunks_to_concat = []
|
| 268 |
+
# Cast everything to objects if concatenated with an object column
|
| 269 |
+
for ca in col_chunked_arrays:
|
| 270 |
+
for chunk in ca.chunks:
|
| 271 |
+
if isinstance(ca.type, ArrowPythonObjectType):
|
| 272 |
+
chunks_to_concat.append(chunk)
|
| 273 |
+
else:
|
| 274 |
+
chunks_to_concat.append(
|
| 275 |
+
ArrowPythonObjectArray.from_objects(chunk.to_pylist())
|
| 276 |
+
)
|
| 277 |
+
col = pa.chunked_array(chunks_to_concat)
|
| 278 |
+
else:
|
| 279 |
+
if col_name in cols_with_null_list:
|
| 280 |
+
# For each opaque list column, iterate through all schemas until
|
| 281 |
+
# we find a valid value_type that can be used to override the
|
| 282 |
+
# column types in the following for-loop.
|
| 283 |
+
scalar_type = None
|
| 284 |
+
for arr in col_chunked_arrays:
|
| 285 |
+
if not pa.types.is_list(arr.type) or not pa.types.is_null(
|
| 286 |
+
arr.type.value_type
|
| 287 |
+
):
|
| 288 |
+
scalar_type = arr.type
|
| 289 |
+
break
|
| 290 |
+
|
| 291 |
+
if scalar_type is not None:
|
| 292 |
+
for c_idx in range(len(col_chunked_arrays)):
|
| 293 |
+
c = col_chunked_arrays[c_idx]
|
| 294 |
+
if pa.types.is_list(c.type) and pa.types.is_null(
|
| 295 |
+
c.type.value_type
|
| 296 |
+
):
|
| 297 |
+
if pa.types.is_list(scalar_type):
|
| 298 |
+
# If we are dealing with a list input,
|
| 299 |
+
# cast the array to the scalar_type found above.
|
| 300 |
+
col_chunked_arrays[c_idx] = c.cast(scalar_type)
|
| 301 |
+
else:
|
| 302 |
+
# If we are dealing with a single value, construct
|
| 303 |
+
# a new array with null values filled.
|
| 304 |
+
col_chunked_arrays[c_idx] = pa.chunked_array(
|
| 305 |
+
[pa.nulls(c.length(), type=scalar_type)]
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
col = _concatenate_chunked_arrays(col_chunked_arrays)
|
| 309 |
+
cols.append(col)
|
| 310 |
+
|
| 311 |
+
# Build the concatenated table.
|
| 312 |
+
table = pyarrow.Table.from_arrays(cols, schema=schema)
|
| 313 |
+
# Validate table schema (this is a cheap check by default).
|
| 314 |
+
table.validate()
|
| 315 |
+
else:
|
| 316 |
+
# No extension array columns, so use built-in pyarrow.concat_tables.
|
| 317 |
+
if parse_version(_get_pyarrow_version()) >= parse_version("14.0.0"):
|
| 318 |
+
# `promote` was superseded by `promote_options='default'` in Arrow 14. To
|
| 319 |
+
# prevent `FutureWarning`s, we manually check the Arrow version and use the
|
| 320 |
+
# appropriate parameter.
|
| 321 |
+
table = pyarrow.concat_tables(blocks, promote_options="default")
|
| 322 |
+
else:
|
| 323 |
+
table = pyarrow.concat_tables(blocks, promote=True)
|
| 324 |
+
return table
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
def concat_and_sort(
|
| 328 |
+
blocks: List["pyarrow.Table"], sort_key: "SortKey"
|
| 329 |
+
) -> "pyarrow.Table":
|
| 330 |
+
import pyarrow.compute as pac
|
| 331 |
+
|
| 332 |
+
ret = concat(blocks)
|
| 333 |
+
indices = pac.sort_indices(ret, sort_keys=sort_key.to_arrow_sort_args())
|
| 334 |
+
return take_table(ret, indices)
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def to_numpy(
|
| 338 |
+
array: Union["pyarrow.Array", "pyarrow.ChunkedArray"],
|
| 339 |
+
*,
|
| 340 |
+
zero_copy_only: bool = True,
|
| 341 |
+
) -> np.ndarray:
|
| 342 |
+
"""Wrapper for `Array`s and `ChunkedArray`s `to_numpy` API,
|
| 343 |
+
handling API divergence b/w Arrow versions"""
|
| 344 |
+
|
| 345 |
+
import pyarrow as pa
|
| 346 |
+
|
| 347 |
+
if isinstance(array, pa.Array):
|
| 348 |
+
return array.to_numpy(zero_copy_only=zero_copy_only)
|
| 349 |
+
elif isinstance(array, pa.ChunkedArray):
|
| 350 |
+
if PYARROW_VERSION >= MIN_PYARROW_VERSION_CHUNKED_ARRAY_TO_NUMPY_ZERO_COPY_ONLY:
|
| 351 |
+
return array.to_numpy(zero_copy_only=zero_copy_only)
|
| 352 |
+
else:
|
| 353 |
+
return array.to_numpy()
|
| 354 |
+
else:
|
| 355 |
+
raise ValueError(
|
| 356 |
+
f"Either of `Array` or `ChunkedArray` was expected, got {type(array)}"
|
| 357 |
+
)
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
def combine_chunks(table: "pyarrow.Table") -> "pyarrow.Table":
|
| 361 |
+
"""This is counterpart for Pyarrow's `Table.combine_chunks` that's using
|
| 362 |
+
extended `ChunkedArray` combination protocol.
|
| 363 |
+
|
| 364 |
+
For more details check out `combine_chunked_array` py-doc
|
| 365 |
+
"""
|
| 366 |
+
|
| 367 |
+
new_column_values_arrays = []
|
| 368 |
+
|
| 369 |
+
for col in table.columns:
|
| 370 |
+
new_column_values_arrays.append(combine_chunked_array(col))
|
| 371 |
+
|
| 372 |
+
return pyarrow.Table.from_arrays(new_column_values_arrays, schema=table.schema)
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def combine_chunked_array(
|
| 376 |
+
array: "pyarrow.ChunkedArray",
|
| 377 |
+
) -> Union["pyarrow.Array", "pyarrow.ChunkedArray"]:
|
| 378 |
+
"""This is counterpart for Pyarrow's `ChunkedArray.combine_chunks` that additionally
|
| 379 |
+
|
| 380 |
+
1. Handles `ExtensionType`s (like ArrowTensorType, ArrowTensorTypeV2,
|
| 381 |
+
ArrowPythonObjectType, etc)
|
| 382 |
+
|
| 383 |
+
2. Making sure `ChunkedArray`s comprising provided `Table` are combined
|
| 384 |
+
safely, ie avoiding overflows of Arrow's internal offsets (using int32 for
|
| 385 |
+
most of its native types, other than "large" kind).
|
| 386 |
+
|
| 387 |
+
For more details check py-doc of `_try_combine_chunks_safe` method.
|
| 388 |
+
"""
|
| 389 |
+
|
| 390 |
+
import pyarrow as pa
|
| 391 |
+
|
| 392 |
+
from ray.air.util.transform_pyarrow import (
|
| 393 |
+
_concatenate_extension_column,
|
| 394 |
+
_is_column_extension_type,
|
| 395 |
+
)
|
| 396 |
+
|
| 397 |
+
assert isinstance(
|
| 398 |
+
array, pa.ChunkedArray
|
| 399 |
+
), f"Expected `ChunkedArray`, got {type(array)}"
|
| 400 |
+
|
| 401 |
+
if _is_column_extension_type(array):
|
| 402 |
+
# Arrow `ExtensionArray`s can't be concatenated via `combine_chunks`,
|
| 403 |
+
# hence require manual concatenation
|
| 404 |
+
return _concatenate_extension_column(array)
|
| 405 |
+
elif len(array.chunks) == 0:
|
| 406 |
+
# NOTE: In case there's no chunks, we need to explicitly create
|
| 407 |
+
# an empty array since calling into `combine_chunks` would fail
|
| 408 |
+
# due to it expecting at least 1 chunk to be present
|
| 409 |
+
return pa.array([], type=array.type)
|
| 410 |
+
else:
|
| 411 |
+
return _try_combine_chunks_safe(array)
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
def _try_combine_chunks_safe(
|
| 415 |
+
array: "pyarrow.ChunkedArray", max_chunk_size=INT32_OVERFLOW_THRESHOLD
|
| 416 |
+
) -> Union["pyarrow.Array", "pyarrow.ChunkedArray"]:
|
| 417 |
+
"""This method provides a safe way of combining `ChunkedArray`s exceeding 2 GiB
|
| 418 |
+
in size, which aren't using "large_*" types (and therefore relying on int32
|
| 419 |
+
offsets).
|
| 420 |
+
|
| 421 |
+
When handling provided `ChunkedArray` this method will be either
|
| 422 |
+
|
| 423 |
+
- Relying on PyArrow's default `combine_chunks` (therefore returning single
|
| 424 |
+
contiguous `Array`) in cases when
|
| 425 |
+
- Array's total size is < 2 GiB
|
| 426 |
+
- Array's underlying type is of "large" kind (ie using one of the
|
| 427 |
+
`large_*` type family)
|
| 428 |
+
- Safely combining subsets of tasks such that resulting `Array`s to not
|
| 429 |
+
exceed 2 GiB in size (therefore returning another `ChunkedArray` albeit
|
| 430 |
+
with potentially smaller number of chunks that have resulted from clumping
|
| 431 |
+
the original ones)
|
| 432 |
+
|
| 433 |
+
Returns:
|
| 434 |
+
- pa.Array if it's possible to combine provided pa.ChunkedArray into single
|
| 435 |
+
contiguous array
|
| 436 |
+
- pa.ChunkedArray (albeit with chunks re-combined) if it's not possible to
|
| 437 |
+
produce single pa.Array
|
| 438 |
+
"""
|
| 439 |
+
|
| 440 |
+
import pyarrow as pa
|
| 441 |
+
|
| 442 |
+
from ray.air.util.transform_pyarrow import _is_column_extension_type
|
| 443 |
+
|
| 444 |
+
assert not _is_column_extension_type(
|
| 445 |
+
array
|
| 446 |
+
), f"Arrow `ExtensionType`s are not accepted (got {array.type})"
|
| 447 |
+
|
| 448 |
+
int64_type_predicates = [
|
| 449 |
+
pa.types.is_large_list,
|
| 450 |
+
pa.types.is_large_string,
|
| 451 |
+
pa.types.is_large_binary,
|
| 452 |
+
pa.types.is_large_unicode,
|
| 453 |
+
]
|
| 454 |
+
|
| 455 |
+
if array.nbytes < max_chunk_size or any(
|
| 456 |
+
p(array.type) for p in int64_type_predicates
|
| 457 |
+
):
|
| 458 |
+
# It's safe to combine provided `ChunkedArray` in either of 2 cases:
|
| 459 |
+
# - It's cumulative size is < 2 GiB
|
| 460 |
+
# - It's of 'large' kind (ie one using int64 offsets internally)
|
| 461 |
+
return array.combine_chunks()
|
| 462 |
+
|
| 463 |
+
# In this case it's actually *NOT* safe to try to directly combine
|
| 464 |
+
# Arrow's `ChunkedArray` and is impossible to produce single, contiguous
|
| 465 |
+
# `Array` since
|
| 466 |
+
# - It's estimated to hold > 2 GiB
|
| 467 |
+
# - Its type is not of the "large" kind (and hence is using int32
|
| 468 |
+
# offsets internally, which would overflow)
|
| 469 |
+
#
|
| 470 |
+
# In this case instead of combining into single contiguous array, we
|
| 471 |
+
# instead just "clump" existing chunks into bigger ones, but no bigger
|
| 472 |
+
# than 2 GiB each.
|
| 473 |
+
#
|
| 474 |
+
# NOTE: This branch actually returns `ChunkedArray` and not an `Array`
|
| 475 |
+
|
| 476 |
+
# To stay under 2 GiB limit we are slicing provided list of chunks into
|
| 477 |
+
# slices no larger than 2 GiB (as compared to just directly using `concat_arrays`)
|
| 478 |
+
slices = []
|
| 479 |
+
|
| 480 |
+
cur_slice_start = 0
|
| 481 |
+
cur_slice_size_bytes = 0
|
| 482 |
+
|
| 483 |
+
for i, chunk in enumerate(array.chunks):
|
| 484 |
+
chunk_size = chunk.nbytes
|
| 485 |
+
|
| 486 |
+
if cur_slice_size_bytes + chunk_size > max_chunk_size:
|
| 487 |
+
slices.append(array.chunks[cur_slice_start:i])
|
| 488 |
+
|
| 489 |
+
cur_slice_start = i
|
| 490 |
+
cur_slice_size_bytes = 0
|
| 491 |
+
|
| 492 |
+
cur_slice_size_bytes += chunk_size
|
| 493 |
+
|
| 494 |
+
# Add remaining chunks as last slice
|
| 495 |
+
slices.append(array.chunks[cur_slice_start:])
|
| 496 |
+
|
| 497 |
+
return pa.chunked_array([pa.concat_arrays(s) for s in slices])
|
minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/__init__.py
ADDED
|
File without changes
|
minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/plan.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import TYPE_CHECKING
|
| 2 |
+
|
| 3 |
+
from .operator import Operator
|
| 4 |
+
|
| 5 |
+
if TYPE_CHECKING:
|
| 6 |
+
from ray.data import DataContext
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class Plan:
|
| 10 |
+
"""Abstract class for logical/physical execution plans.
|
| 11 |
+
|
| 12 |
+
This plan should hold an operator representing the plan DAG and any auxiliary data
|
| 13 |
+
that's useful for plan optimization or execution.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, context: "DataContext"):
|
| 17 |
+
self._context = context
|
| 18 |
+
|
| 19 |
+
@property
|
| 20 |
+
def dag(self) -> Operator:
|
| 21 |
+
raise NotImplementedError
|
| 22 |
+
|
| 23 |
+
@property
|
| 24 |
+
def context(self) -> Operator:
|
| 25 |
+
return self._context
|
minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (190 Bytes). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/all_to_all_operator.cpython-310.pyc
ADDED
|
Binary file (5.45 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/from_operators.cpython-310.pyc
ADDED
|
Binary file (4.62 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/map_operator.cpython-310.pyc
ADDED
|
Binary file (9.89 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/write_operator.cpython-310.pyc
ADDED
|
Binary file (1.37 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/optimizers.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional, Type
|
| 2 |
+
|
| 3 |
+
from ray.data._internal.logical.interfaces import (
|
| 4 |
+
LogicalPlan,
|
| 5 |
+
Optimizer,
|
| 6 |
+
PhysicalPlan,
|
| 7 |
+
Rule,
|
| 8 |
+
)
|
| 9 |
+
from ray.data._internal.logical.rules.inherit_batch_format import InheritBatchFormatRule
|
| 10 |
+
from ray.data._internal.logical.rules.inherit_target_max_block_size import (
|
| 11 |
+
InheritTargetMaxBlockSizeRule,
|
| 12 |
+
)
|
| 13 |
+
from ray.data._internal.logical.rules.operator_fusion import OperatorFusionRule
|
| 14 |
+
from ray.data._internal.logical.rules.randomize_blocks import ReorderRandomizeBlocksRule
|
| 15 |
+
from ray.data._internal.logical.rules.set_read_parallelism import SetReadParallelismRule
|
| 16 |
+
from ray.data._internal.logical.rules.zero_copy_map_fusion import (
|
| 17 |
+
EliminateBuildOutputBlocks,
|
| 18 |
+
)
|
| 19 |
+
from ray.data._internal.planner.planner import Planner
|
| 20 |
+
from ray.util.annotations import DeveloperAPI
|
| 21 |
+
|
| 22 |
+
_LOGICAL_RULES = [
|
| 23 |
+
ReorderRandomizeBlocksRule,
|
| 24 |
+
InheritBatchFormatRule,
|
| 25 |
+
]
|
| 26 |
+
|
| 27 |
+
_PHYSICAL_RULES = [
|
| 28 |
+
InheritTargetMaxBlockSizeRule,
|
| 29 |
+
SetReadParallelismRule,
|
| 30 |
+
OperatorFusionRule,
|
| 31 |
+
EliminateBuildOutputBlocks,
|
| 32 |
+
]
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@DeveloperAPI
|
| 36 |
+
def register_logical_rule(cls: Type[Rule], insert_index: Optional[int] = None):
|
| 37 |
+
if cls in _LOGICAL_RULES:
|
| 38 |
+
return
|
| 39 |
+
|
| 40 |
+
if insert_index is None:
|
| 41 |
+
_LOGICAL_RULES.append(cls)
|
| 42 |
+
else:
|
| 43 |
+
_LOGICAL_RULES.insert(insert_index, cls)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
@DeveloperAPI
|
| 47 |
+
def get_logical_rules() -> List[Type[Rule]]:
|
| 48 |
+
return list(_LOGICAL_RULES)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@DeveloperAPI
|
| 52 |
+
def register_physical_rule(cls: Type[Rule], insert_index: Optional[int] = None):
|
| 53 |
+
if cls in _PHYSICAL_RULES:
|
| 54 |
+
return
|
| 55 |
+
|
| 56 |
+
if insert_index is None:
|
| 57 |
+
_PHYSICAL_RULES.append(cls)
|
| 58 |
+
else:
|
| 59 |
+
_PHYSICAL_RULES.insert(insert_index, cls)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@DeveloperAPI
|
| 63 |
+
def get_physical_rules() -> List[Type[Rule]]:
|
| 64 |
+
return list(_PHYSICAL_RULES)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class LogicalOptimizer(Optimizer):
|
| 68 |
+
"""The optimizer for logical operators."""
|
| 69 |
+
|
| 70 |
+
@property
|
| 71 |
+
def rules(self) -> List[Rule]:
|
| 72 |
+
return [rule_cls() for rule_cls in _LOGICAL_RULES]
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class PhysicalOptimizer(Optimizer):
|
| 76 |
+
"""The optimizer for physical operators."""
|
| 77 |
+
|
| 78 |
+
@property
|
| 79 |
+
def rules(self) -> List[Rule]:
|
| 80 |
+
return [rule_cls() for rule_cls in _PHYSICAL_RULES]
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def get_execution_plan(logical_plan: LogicalPlan) -> PhysicalPlan:
|
| 84 |
+
"""Get the physical execution plan for the provided logical plan.
|
| 85 |
+
|
| 86 |
+
This process has 3 steps:
|
| 87 |
+
(1) logical optimization: optimize logical operators.
|
| 88 |
+
(2) planning: convert logical to physical operators.
|
| 89 |
+
(3) physical optimization: optimize physical operators.
|
| 90 |
+
"""
|
| 91 |
+
optimized_logical_plan = LogicalOptimizer().optimize(logical_plan)
|
| 92 |
+
logical_plan._dag = optimized_logical_plan.dag
|
| 93 |
+
physical_plan = Planner().plan(optimized_logical_plan)
|
| 94 |
+
return PhysicalOptimizer().optimize(physical_plan)
|
minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/util.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import re
|
| 3 |
+
import threading
|
| 4 |
+
from typing import Dict
|
| 5 |
+
|
| 6 |
+
from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag
|
| 7 |
+
from ray.data._internal.logical.interfaces import LogicalOperator
|
| 8 |
+
from ray.data._internal.logical.operators.map_operator import AbstractUDFMap
|
| 9 |
+
from ray.data._internal.logical.operators.read_operator import Read
|
| 10 |
+
from ray.data._internal.logical.operators.write_operator import Write
|
| 11 |
+
|
| 12 |
+
# The dictionary for the operator name and count.
|
| 13 |
+
_recorded_operators = dict()
|
| 14 |
+
_recorded_operators_lock = threading.Lock()
|
| 15 |
+
|
| 16 |
+
# The white list of operator names allowed to be recorded.
|
| 17 |
+
_op_name_white_list = [
|
| 18 |
+
# Read
|
| 19 |
+
"ReadBigQuery",
|
| 20 |
+
"ReadRange",
|
| 21 |
+
"ReadMongo",
|
| 22 |
+
"ReadParquet",
|
| 23 |
+
"ReadParquetBulk",
|
| 24 |
+
"ReadImage",
|
| 25 |
+
"ReadJSON",
|
| 26 |
+
"ReadCSV",
|
| 27 |
+
"ReadText",
|
| 28 |
+
"ReadNumpy",
|
| 29 |
+
"ReadTFRecord",
|
| 30 |
+
"ReadBinary",
|
| 31 |
+
"ReadTorch",
|
| 32 |
+
"ReadAvro",
|
| 33 |
+
"ReadWebDataset",
|
| 34 |
+
"ReadSQL",
|
| 35 |
+
"ReadDatabricksUC",
|
| 36 |
+
"ReadLance",
|
| 37 |
+
"ReadHuggingFace",
|
| 38 |
+
"ReadCustom",
|
| 39 |
+
# From
|
| 40 |
+
"FromArrow",
|
| 41 |
+
"FromItems",
|
| 42 |
+
"FromNumpy",
|
| 43 |
+
"FromPandas",
|
| 44 |
+
# Write
|
| 45 |
+
"WriteBigQuery",
|
| 46 |
+
"WriteParquet",
|
| 47 |
+
"WriteJSON",
|
| 48 |
+
"WriteCSV",
|
| 49 |
+
"WriteTFRecord",
|
| 50 |
+
"WriteNumpy",
|
| 51 |
+
"WriteMongo",
|
| 52 |
+
"WriteWebDataset",
|
| 53 |
+
"WriteSQL",
|
| 54 |
+
"WriteCustom",
|
| 55 |
+
# Map
|
| 56 |
+
"Map",
|
| 57 |
+
"MapBatches",
|
| 58 |
+
"Filter",
|
| 59 |
+
"FlatMap",
|
| 60 |
+
# All-to-all
|
| 61 |
+
"RandomizeBlockOrder",
|
| 62 |
+
"RandomShuffle",
|
| 63 |
+
"Repartition",
|
| 64 |
+
"Sort",
|
| 65 |
+
"Aggregate",
|
| 66 |
+
# N-ary
|
| 67 |
+
"Zip",
|
| 68 |
+
"Union",
|
| 69 |
+
]
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def record_operators_usage(op: LogicalOperator):
|
| 73 |
+
"""Record logical operator usage with Ray telemetry."""
|
| 74 |
+
ops_dict = dict()
|
| 75 |
+
_collect_operators_to_dict(op, ops_dict)
|
| 76 |
+
ops_json_str = ""
|
| 77 |
+
with _recorded_operators_lock:
|
| 78 |
+
for op, count in ops_dict.items():
|
| 79 |
+
_recorded_operators.setdefault(op, 0)
|
| 80 |
+
_recorded_operators[op] += count
|
| 81 |
+
ops_json_str = json.dumps(_recorded_operators)
|
| 82 |
+
|
| 83 |
+
record_extra_usage_tag(TagKey.DATA_LOGICAL_OPS, ops_json_str)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def _collect_operators_to_dict(op: LogicalOperator, ops_dict: Dict[str, int]):
|
| 87 |
+
"""Collect the logical operator name and count into `ops_dict`."""
|
| 88 |
+
for child in op.input_dependencies:
|
| 89 |
+
_collect_operators_to_dict(child, ops_dict)
|
| 90 |
+
|
| 91 |
+
op_name = op.name
|
| 92 |
+
|
| 93 |
+
# Check read and write operator, and anonymize user-defined data source.
|
| 94 |
+
if isinstance(op, Read):
|
| 95 |
+
op_name = f"Read{op._datasource.get_name()}"
|
| 96 |
+
if op_name not in _op_name_white_list:
|
| 97 |
+
op_name = "ReadCustom"
|
| 98 |
+
elif isinstance(op, Write):
|
| 99 |
+
op_name = f"Write{op._datasink_or_legacy_datasource.get_name()}"
|
| 100 |
+
if op_name not in _op_name_white_list:
|
| 101 |
+
op_name = "WriteCustom"
|
| 102 |
+
elif isinstance(op, AbstractUDFMap):
|
| 103 |
+
# Remove the function name from the map operator name.
|
| 104 |
+
# E.g., Map(<lambda>) -> Map
|
| 105 |
+
op_name = re.sub("\\(.*\\)$", "", op_name)
|
| 106 |
+
|
| 107 |
+
# Anonymize any operator name if not in white list.
|
| 108 |
+
if op_name not in _op_name_white_list:
|
| 109 |
+
op_name = "Unknown"
|
| 110 |
+
|
| 111 |
+
ops_dict.setdefault(op_name, 0)
|
| 112 |
+
ops_dict[op_name] += 1
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/__init__.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ray.data._internal.datasource.sql_datasource import Connection
|
| 2 |
+
from ray.data.datasource.datasink import Datasink, DummyOutputDatasink
|
| 3 |
+
from ray.data.datasource.datasource import (
|
| 4 |
+
Datasource,
|
| 5 |
+
RandomIntRowDatasource,
|
| 6 |
+
Reader,
|
| 7 |
+
ReadTask,
|
| 8 |
+
)
|
| 9 |
+
from ray.data.datasource.file_based_datasource import (
|
| 10 |
+
FileBasedDatasource,
|
| 11 |
+
_S3FileSystemWrapper,
|
| 12 |
+
)
|
| 13 |
+
from ray.data.datasource.file_datasink import (
|
| 14 |
+
BlockBasedFileDatasink,
|
| 15 |
+
RowBasedFileDatasink,
|
| 16 |
+
)
|
| 17 |
+
from ray.data.datasource.file_meta_provider import (
|
| 18 |
+
BaseFileMetadataProvider,
|
| 19 |
+
DefaultFileMetadataProvider,
|
| 20 |
+
FastFileMetadataProvider,
|
| 21 |
+
FileMetadataProvider,
|
| 22 |
+
)
|
| 23 |
+
from ray.data.datasource.filename_provider import FilenameProvider
|
| 24 |
+
from ray.data.datasource.parquet_meta_provider import ParquetMetadataProvider
|
| 25 |
+
from ray.data.datasource.partitioning import (
|
| 26 |
+
Partitioning,
|
| 27 |
+
PartitionStyle,
|
| 28 |
+
PathPartitionFilter,
|
| 29 |
+
PathPartitionParser,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
# Note: HuggingFaceDatasource should NOT be imported here, because
|
| 33 |
+
# we want to only import the Hugging Face datasets library when we use
|
| 34 |
+
# ray.data.from_huggingface() or HuggingFaceDatasource() directly.
|
| 35 |
+
__all__ = [
|
| 36 |
+
"BaseFileMetadataProvider",
|
| 37 |
+
"BlockBasedFileDatasink",
|
| 38 |
+
"Connection",
|
| 39 |
+
"Datasink",
|
| 40 |
+
"Datasource",
|
| 41 |
+
"DeltaSharingDatasource",
|
| 42 |
+
"DefaultFileMetadataProvider",
|
| 43 |
+
"DummyOutputDatasink",
|
| 44 |
+
"FastFileMetadataProvider",
|
| 45 |
+
"FileBasedDatasource",
|
| 46 |
+
"FileMetadataProvider",
|
| 47 |
+
"FilenameProvider",
|
| 48 |
+
"ParquetMetadataProvider",
|
| 49 |
+
"PartitionStyle",
|
| 50 |
+
"PathPartitionFilter",
|
| 51 |
+
"PathPartitionParser",
|
| 52 |
+
"Partitioning",
|
| 53 |
+
"RandomIntRowDatasource",
|
| 54 |
+
"ReadTask",
|
| 55 |
+
"Reader",
|
| 56 |
+
"RowBasedFileDatasink",
|
| 57 |
+
"_S3FileSystemWrapper",
|
| 58 |
+
]
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.37 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/datasink.cpython-310.pyc
ADDED
|
Binary file (7.47 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/datasource.cpython-310.pyc
ADDED
|
Binary file (9.85 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/file_based_datasource.cpython-310.pyc
ADDED
|
Binary file (14.3 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/file_datasink.cpython-310.pyc
ADDED
|
Binary file (9.49 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/file_meta_provider.cpython-310.pyc
ADDED
|
Binary file (14.5 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/filename_provider.cpython-310.pyc
ADDED
|
Binary file (5.4 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/parquet_meta_provider.cpython-310.pyc
ADDED
|
Binary file (8.52 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/partitioning.cpython-310.pyc
ADDED
|
Binary file (18.5 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/__pycache__/path_util.cpython-310.pyc
ADDED
|
Binary file (5.63 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/datasink.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from dataclasses import dataclass, fields
|
| 3 |
+
from typing import Iterable, List, Optional
|
| 4 |
+
|
| 5 |
+
import ray
|
| 6 |
+
from ray.data._internal.execution.interfaces import TaskContext
|
| 7 |
+
from ray.data.block import Block, BlockAccessor
|
| 8 |
+
from ray.util.annotations import DeveloperAPI
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@dataclass
|
| 14 |
+
@DeveloperAPI
|
| 15 |
+
class WriteResult:
|
| 16 |
+
"""Result of a write operation, containing stats/metrics
|
| 17 |
+
on the written data.
|
| 18 |
+
|
| 19 |
+
Attributes:
|
| 20 |
+
total_num_rows: The total number of rows written.
|
| 21 |
+
total_size_bytes: The total size of the written data in bytes.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
num_rows: int = 0
|
| 25 |
+
size_bytes: int = 0
|
| 26 |
+
|
| 27 |
+
@staticmethod
|
| 28 |
+
def aggregate_write_results(write_results: List["WriteResult"]) -> "WriteResult":
|
| 29 |
+
"""Aggregate a list of write results.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
write_results: A list of write results.
|
| 33 |
+
|
| 34 |
+
Returns:
|
| 35 |
+
A single write result that aggregates the input results.
|
| 36 |
+
"""
|
| 37 |
+
total_num_rows = 0
|
| 38 |
+
total_size_bytes = 0
|
| 39 |
+
|
| 40 |
+
for write_result in write_results:
|
| 41 |
+
total_num_rows += write_result.num_rows
|
| 42 |
+
total_size_bytes += write_result.size_bytes
|
| 43 |
+
|
| 44 |
+
return WriteResult(
|
| 45 |
+
num_rows=total_num_rows,
|
| 46 |
+
size_bytes=total_size_bytes,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
@DeveloperAPI
|
| 51 |
+
class Datasink:
|
| 52 |
+
"""Interface for defining write-related logic.
|
| 53 |
+
|
| 54 |
+
If you want to write data to something that isn't built-in, subclass this class
|
| 55 |
+
and call :meth:`~ray.data.Dataset.write_datasink`.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
def on_write_start(self) -> None:
|
| 59 |
+
"""Callback for when a write job starts.
|
| 60 |
+
|
| 61 |
+
Use this method to perform setup for write tasks. For example, creating a
|
| 62 |
+
staging bucket in S3.
|
| 63 |
+
"""
|
| 64 |
+
pass
|
| 65 |
+
|
| 66 |
+
def write(
|
| 67 |
+
self,
|
| 68 |
+
blocks: Iterable[Block],
|
| 69 |
+
ctx: TaskContext,
|
| 70 |
+
) -> None:
|
| 71 |
+
"""Write blocks. This is used by a single write task.
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
blocks: Generator of data blocks.
|
| 75 |
+
ctx: ``TaskContext`` for the write task.
|
| 76 |
+
"""
|
| 77 |
+
raise NotImplementedError
|
| 78 |
+
|
| 79 |
+
def on_write_complete(self, write_result_blocks: List[Block]) -> WriteResult:
|
| 80 |
+
"""Callback for when a write job completes.
|
| 81 |
+
|
| 82 |
+
This can be used to "commit" a write output. This method must
|
| 83 |
+
succeed prior to ``write_datasink()`` returning to the user. If this
|
| 84 |
+
method fails, then ``on_write_failed()`` is called.
|
| 85 |
+
|
| 86 |
+
Args:
|
| 87 |
+
write_result_blocks: The blocks resulting from executing
|
| 88 |
+
the Write operator, containing write results and stats.
|
| 89 |
+
Returns:
|
| 90 |
+
A ``WriteResult`` object containing the aggregated stats of all
|
| 91 |
+
the input write results.
|
| 92 |
+
"""
|
| 93 |
+
write_results = [
|
| 94 |
+
result["write_result"].iloc[0] for result in write_result_blocks
|
| 95 |
+
]
|
| 96 |
+
aggregated_write_results = WriteResult.aggregate_write_results(write_results)
|
| 97 |
+
|
| 98 |
+
aggregated_results_str = ""
|
| 99 |
+
for k in fields(aggregated_write_results.__class__):
|
| 100 |
+
v = getattr(aggregated_write_results, k.name)
|
| 101 |
+
aggregated_results_str += f"\t- {k.name}: {v}\n"
|
| 102 |
+
|
| 103 |
+
logger.info(
|
| 104 |
+
f"Write operation succeeded. Aggregated write results:\n"
|
| 105 |
+
f"{aggregated_results_str}"
|
| 106 |
+
)
|
| 107 |
+
return aggregated_write_results
|
| 108 |
+
|
| 109 |
+
def on_write_failed(self, error: Exception) -> None:
|
| 110 |
+
"""Callback for when a write job fails.
|
| 111 |
+
|
| 112 |
+
This is called on a best-effort basis on write failures.
|
| 113 |
+
|
| 114 |
+
Args:
|
| 115 |
+
error: The first error encountered.
|
| 116 |
+
"""
|
| 117 |
+
pass
|
| 118 |
+
|
| 119 |
+
def get_name(self) -> str:
|
| 120 |
+
"""Return a human-readable name for this datasink.
|
| 121 |
+
|
| 122 |
+
This is used as the names of the write tasks.
|
| 123 |
+
"""
|
| 124 |
+
name = type(self).__name__
|
| 125 |
+
datasink_suffix = "Datasink"
|
| 126 |
+
if name.startswith("_"):
|
| 127 |
+
name = name[1:]
|
| 128 |
+
if name.endswith(datasink_suffix):
|
| 129 |
+
name = name[: -len(datasink_suffix)]
|
| 130 |
+
return name
|
| 131 |
+
|
| 132 |
+
@property
|
| 133 |
+
def supports_distributed_writes(self) -> bool:
|
| 134 |
+
"""If ``False``, only launch write tasks on the driver's node."""
|
| 135 |
+
return True
|
| 136 |
+
|
| 137 |
+
@property
|
| 138 |
+
def num_rows_per_write(self) -> Optional[int]:
|
| 139 |
+
"""The target number of rows to pass to each :meth:`~ray.data.Datasink.write` call.
|
| 140 |
+
|
| 141 |
+
If ``None``, Ray Data passes a system-chosen number of rows.
|
| 142 |
+
"""
|
| 143 |
+
return None
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
@DeveloperAPI
|
| 147 |
+
class DummyOutputDatasink(Datasink):
|
| 148 |
+
"""An example implementation of a writable datasource for testing.
|
| 149 |
+
Examples:
|
| 150 |
+
>>> import ray
|
| 151 |
+
>>> from ray.data.datasource import DummyOutputDatasink
|
| 152 |
+
>>> output = DummyOutputDatasink()
|
| 153 |
+
>>> ray.data.range(10).write_datasink(output)
|
| 154 |
+
>>> assert output.num_ok == 1
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
def __init__(self):
|
| 158 |
+
ctx = ray.data.DataContext.get_current()
|
| 159 |
+
|
| 160 |
+
# Setup a dummy actor to send the data. In a real datasource, write
|
| 161 |
+
# tasks would send data to an external system instead of a Ray actor.
|
| 162 |
+
@ray.remote(scheduling_strategy=ctx.scheduling_strategy)
|
| 163 |
+
class DataSink:
|
| 164 |
+
def __init__(self):
|
| 165 |
+
self.rows_written = 0
|
| 166 |
+
self.enabled = True
|
| 167 |
+
|
| 168 |
+
def write(self, block: Block) -> None:
|
| 169 |
+
block = BlockAccessor.for_block(block)
|
| 170 |
+
self.rows_written += block.num_rows()
|
| 171 |
+
|
| 172 |
+
def get_rows_written(self):
|
| 173 |
+
return self.rows_written
|
| 174 |
+
|
| 175 |
+
self.data_sink = DataSink.remote()
|
| 176 |
+
self.num_ok = 0
|
| 177 |
+
self.num_failed = 0
|
| 178 |
+
self.enabled = True
|
| 179 |
+
|
| 180 |
+
def write(
|
| 181 |
+
self,
|
| 182 |
+
blocks: Iterable[Block],
|
| 183 |
+
ctx: TaskContext,
|
| 184 |
+
) -> None:
|
| 185 |
+
tasks = []
|
| 186 |
+
if not self.enabled:
|
| 187 |
+
raise ValueError("disabled")
|
| 188 |
+
for b in blocks:
|
| 189 |
+
tasks.append(self.data_sink.write.remote(b))
|
| 190 |
+
ray.get(tasks)
|
| 191 |
+
|
| 192 |
+
def on_write_complete(self, write_result_blocks: List[Block]) -> WriteResult:
|
| 193 |
+
self.num_ok += 1
|
| 194 |
+
aggregated_results = super().on_write_complete(write_result_blocks)
|
| 195 |
+
return aggregated_results
|
| 196 |
+
|
| 197 |
+
def on_write_failed(self, error: Exception) -> None:
|
| 198 |
+
self.num_failed += 1
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/datasource.py
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Callable, Iterable, List, Optional
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from ray.data._internal.util import _check_pyarrow_version
|
| 6 |
+
from ray.data.block import Block, BlockMetadata
|
| 7 |
+
from ray.util.annotations import Deprecated, DeveloperAPI, PublicAPI
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@PublicAPI
|
| 11 |
+
class Datasource:
|
| 12 |
+
"""Interface for defining a custom :class:`~ray.data.Dataset` datasource.
|
| 13 |
+
|
| 14 |
+
To read a datasource into a dataset, use :meth:`~ray.data.read_datasource`.
|
| 15 |
+
""" # noqa: E501
|
| 16 |
+
|
| 17 |
+
@Deprecated
|
| 18 |
+
def create_reader(self, **read_args) -> "Reader":
|
| 19 |
+
"""
|
| 20 |
+
Deprecated: Implement :meth:`~ray.data.Datasource.get_read_tasks` and
|
| 21 |
+
:meth:`~ray.data.Datasource.estimate_inmemory_data_size` instead.
|
| 22 |
+
"""
|
| 23 |
+
return _LegacyDatasourceReader(self, **read_args)
|
| 24 |
+
|
| 25 |
+
@Deprecated
|
| 26 |
+
def prepare_read(self, parallelism: int, **read_args) -> List["ReadTask"]:
|
| 27 |
+
"""
|
| 28 |
+
Deprecated: Implement :meth:`~ray.data.Datasource.get_read_tasks` and
|
| 29 |
+
:meth:`~ray.data.Datasource.estimate_inmemory_data_size` instead.
|
| 30 |
+
"""
|
| 31 |
+
raise NotImplementedError
|
| 32 |
+
|
| 33 |
+
def get_name(self) -> str:
|
| 34 |
+
"""Return a human-readable name for this datasource.
|
| 35 |
+
This will be used as the names of the read tasks.
|
| 36 |
+
"""
|
| 37 |
+
name = type(self).__name__
|
| 38 |
+
datasource_suffix = "Datasource"
|
| 39 |
+
if name.endswith(datasource_suffix):
|
| 40 |
+
name = name[: -len(datasource_suffix)]
|
| 41 |
+
return name
|
| 42 |
+
|
| 43 |
+
def estimate_inmemory_data_size(self) -> Optional[int]:
|
| 44 |
+
"""Return an estimate of the in-memory data size, or None if unknown.
|
| 45 |
+
|
| 46 |
+
Note that the in-memory data size may be larger than the on-disk data size.
|
| 47 |
+
"""
|
| 48 |
+
raise NotImplementedError
|
| 49 |
+
|
| 50 |
+
def get_read_tasks(self, parallelism: int) -> List["ReadTask"]:
|
| 51 |
+
"""Execute the read and return read tasks.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
parallelism: The requested read parallelism. The number of read
|
| 55 |
+
tasks should equal to this value if possible.
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
A list of read tasks that can be executed to read blocks from the
|
| 59 |
+
datasource in parallel.
|
| 60 |
+
"""
|
| 61 |
+
raise NotImplementedError
|
| 62 |
+
|
| 63 |
+
@property
|
| 64 |
+
def should_create_reader(self) -> bool:
|
| 65 |
+
has_implemented_get_read_tasks = (
|
| 66 |
+
type(self).get_read_tasks is not Datasource.get_read_tasks
|
| 67 |
+
)
|
| 68 |
+
has_implemented_estimate_inmemory_data_size = (
|
| 69 |
+
type(self).estimate_inmemory_data_size
|
| 70 |
+
is not Datasource.estimate_inmemory_data_size
|
| 71 |
+
)
|
| 72 |
+
return (
|
| 73 |
+
not has_implemented_get_read_tasks
|
| 74 |
+
or not has_implemented_estimate_inmemory_data_size
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
@property
|
| 78 |
+
def supports_distributed_reads(self) -> bool:
|
| 79 |
+
"""If ``False``, only launch read tasks on the driver's node."""
|
| 80 |
+
return True
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
@Deprecated
|
| 84 |
+
class Reader:
|
| 85 |
+
"""A bound read operation for a :class:`~ray.data.Datasource`.
|
| 86 |
+
|
| 87 |
+
This is a stateful class so that reads can be prepared in multiple stages.
|
| 88 |
+
For example, it is useful for :class:`Datasets <ray.data.Dataset>` to know the
|
| 89 |
+
in-memory size of the read prior to executing it.
|
| 90 |
+
"""
|
| 91 |
+
|
| 92 |
+
def estimate_inmemory_data_size(self) -> Optional[int]:
|
| 93 |
+
"""Return an estimate of the in-memory data size, or None if unknown.
|
| 94 |
+
|
| 95 |
+
Note that the in-memory data size may be larger than the on-disk data size.
|
| 96 |
+
"""
|
| 97 |
+
raise NotImplementedError
|
| 98 |
+
|
| 99 |
+
def get_read_tasks(self, parallelism: int) -> List["ReadTask"]:
|
| 100 |
+
"""Execute the read and return read tasks.
|
| 101 |
+
|
| 102 |
+
Args:
|
| 103 |
+
parallelism: The requested read parallelism. The number of read
|
| 104 |
+
tasks should equal to this value if possible.
|
| 105 |
+
read_args: Additional kwargs to pass to the datasource impl.
|
| 106 |
+
|
| 107 |
+
Returns:
|
| 108 |
+
A list of read tasks that can be executed to read blocks from the
|
| 109 |
+
datasource in parallel.
|
| 110 |
+
"""
|
| 111 |
+
raise NotImplementedError
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
class _LegacyDatasourceReader(Reader):
|
| 115 |
+
def __init__(self, datasource: Datasource, **read_args):
|
| 116 |
+
self._datasource = datasource
|
| 117 |
+
self._read_args = read_args
|
| 118 |
+
|
| 119 |
+
def estimate_inmemory_data_size(self) -> Optional[int]:
|
| 120 |
+
return None
|
| 121 |
+
|
| 122 |
+
def get_read_tasks(self, parallelism: int) -> List["ReadTask"]:
|
| 123 |
+
return self._datasource.prepare_read(parallelism, **self._read_args)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
@DeveloperAPI
|
| 127 |
+
class ReadTask(Callable[[], Iterable[Block]]):
|
| 128 |
+
"""A function used to read blocks from the :class:`~ray.data.Dataset`.
|
| 129 |
+
|
| 130 |
+
Read tasks are generated by :meth:`~ray.data.Datasource.get_read_tasks`,
|
| 131 |
+
and return a list of ``ray.data.Block`` when called. Initial metadata about the read
|
| 132 |
+
operation can be retrieved via the ``metadata`` attribute prior to executing the
|
| 133 |
+
read. Final metadata is returned after the read along with the blocks.
|
| 134 |
+
|
| 135 |
+
Ray will execute read tasks in remote functions to parallelize execution.
|
| 136 |
+
Note that the number of blocks returned can vary at runtime. For example,
|
| 137 |
+
if a task is reading a single large file it can return multiple blocks to
|
| 138 |
+
avoid running out of memory during the read.
|
| 139 |
+
|
| 140 |
+
The initial metadata should reflect all the blocks returned by the read,
|
| 141 |
+
e.g., if the metadata says ``num_rows=1000``, the read can return a single
|
| 142 |
+
block of 1000 rows, or multiple blocks with 1000 rows altogether.
|
| 143 |
+
|
| 144 |
+
The final metadata (returned with the actual block) reflects the exact
|
| 145 |
+
contents of the block itself.
|
| 146 |
+
"""
|
| 147 |
+
|
| 148 |
+
def __init__(self, read_fn: Callable[[], Iterable[Block]], metadata: BlockMetadata):
|
| 149 |
+
self._metadata = metadata
|
| 150 |
+
self._read_fn = read_fn
|
| 151 |
+
|
| 152 |
+
@property
|
| 153 |
+
def metadata(self) -> BlockMetadata:
|
| 154 |
+
return self._metadata
|
| 155 |
+
|
| 156 |
+
@property
|
| 157 |
+
def read_fn(self) -> Callable[[], Iterable[Block]]:
|
| 158 |
+
return self._read_fn
|
| 159 |
+
|
| 160 |
+
def __call__(self) -> Iterable[Block]:
|
| 161 |
+
result = self._read_fn()
|
| 162 |
+
if not hasattr(result, "__iter__"):
|
| 163 |
+
DeprecationWarning(
|
| 164 |
+
"Read function must return Iterable[Block], got {}. "
|
| 165 |
+
"Probably you need to return `[block]` instead of "
|
| 166 |
+
"`block`.".format(result)
|
| 167 |
+
)
|
| 168 |
+
yield from result
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
@DeveloperAPI
|
| 172 |
+
class RandomIntRowDatasource(Datasource):
|
| 173 |
+
"""An example datasource that generates rows with random int64 columns.
|
| 174 |
+
|
| 175 |
+
Examples:
|
| 176 |
+
>>> import ray
|
| 177 |
+
>>> from ray.data.datasource import RandomIntRowDatasource
|
| 178 |
+
>>> source = RandomIntRowDatasource() # doctest: +SKIP
|
| 179 |
+
>>> ray.data.read_datasource( # doctest: +SKIP
|
| 180 |
+
... source, n=10, num_columns=2).take()
|
| 181 |
+
{'c_0': 1717767200176864416, 'c_1': 999657309586757214}
|
| 182 |
+
{'c_0': 4983608804013926748, 'c_1': 1160140066899844087}
|
| 183 |
+
"""
|
| 184 |
+
|
| 185 |
+
def __init__(self, n: int, num_columns: int):
|
| 186 |
+
self._n = n
|
| 187 |
+
self._num_columns = num_columns
|
| 188 |
+
|
| 189 |
+
def estimate_inmemory_data_size(self) -> Optional[int]:
|
| 190 |
+
return self._n * self._num_columns * 8
|
| 191 |
+
|
| 192 |
+
def get_read_tasks(
|
| 193 |
+
self,
|
| 194 |
+
parallelism: int,
|
| 195 |
+
) -> List[ReadTask]:
|
| 196 |
+
_check_pyarrow_version()
|
| 197 |
+
import pyarrow
|
| 198 |
+
|
| 199 |
+
read_tasks: List[ReadTask] = []
|
| 200 |
+
n = self._n
|
| 201 |
+
num_columns = self._num_columns
|
| 202 |
+
block_size = max(1, n // parallelism)
|
| 203 |
+
|
| 204 |
+
def make_block(count: int, num_columns: int) -> Block:
|
| 205 |
+
return pyarrow.Table.from_arrays(
|
| 206 |
+
np.random.randint(
|
| 207 |
+
np.iinfo(np.int64).max, size=(num_columns, count), dtype=np.int64
|
| 208 |
+
),
|
| 209 |
+
names=[f"c_{i}" for i in range(num_columns)],
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
schema = pyarrow.Table.from_pydict(
|
| 213 |
+
{f"c_{i}": [0] for i in range(num_columns)}
|
| 214 |
+
).schema
|
| 215 |
+
|
| 216 |
+
i = 0
|
| 217 |
+
while i < n:
|
| 218 |
+
count = min(block_size, n - i)
|
| 219 |
+
meta = BlockMetadata(
|
| 220 |
+
num_rows=count,
|
| 221 |
+
size_bytes=8 * count * num_columns,
|
| 222 |
+
schema=schema,
|
| 223 |
+
input_files=None,
|
| 224 |
+
exec_stats=None,
|
| 225 |
+
)
|
| 226 |
+
read_tasks.append(
|
| 227 |
+
ReadTask(
|
| 228 |
+
lambda count=count, num_columns=num_columns: [
|
| 229 |
+
make_block(count, num_columns)
|
| 230 |
+
],
|
| 231 |
+
meta,
|
| 232 |
+
)
|
| 233 |
+
)
|
| 234 |
+
i += block_size
|
| 235 |
+
|
| 236 |
+
return read_tasks
|
| 237 |
+
|
| 238 |
+
def get_name(self) -> str:
|
| 239 |
+
"""Return a human-readable name for this datasource.
|
| 240 |
+
This will be used as the names of the read tasks.
|
| 241 |
+
Note: overrides the base `Datasource` method.
|
| 242 |
+
"""
|
| 243 |
+
return "RandomInt"
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/file_based_datasource.py
ADDED
|
@@ -0,0 +1,533 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
import logging
|
| 3 |
+
from typing import (
|
| 4 |
+
TYPE_CHECKING,
|
| 5 |
+
Any,
|
| 6 |
+
Callable,
|
| 7 |
+
Dict,
|
| 8 |
+
Iterable,
|
| 9 |
+
Iterator,
|
| 10 |
+
List,
|
| 11 |
+
Literal,
|
| 12 |
+
Optional,
|
| 13 |
+
Union,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
|
| 18 |
+
import ray
|
| 19 |
+
from ray.data._internal.util import (
|
| 20 |
+
_check_pyarrow_version,
|
| 21 |
+
_is_local_scheme,
|
| 22 |
+
call_with_retry,
|
| 23 |
+
make_async_gen,
|
| 24 |
+
)
|
| 25 |
+
from ray.data.block import Block, BlockAccessor
|
| 26 |
+
from ray.data.context import DataContext
|
| 27 |
+
from ray.data.datasource.datasource import Datasource, ReadTask
|
| 28 |
+
from ray.data.datasource.file_meta_provider import (
|
| 29 |
+
BaseFileMetadataProvider,
|
| 30 |
+
DefaultFileMetadataProvider,
|
| 31 |
+
)
|
| 32 |
+
from ray.data.datasource.partitioning import (
|
| 33 |
+
Partitioning,
|
| 34 |
+
PathPartitionFilter,
|
| 35 |
+
PathPartitionParser,
|
| 36 |
+
)
|
| 37 |
+
from ray.data.datasource.path_util import (
|
| 38 |
+
_has_file_extension,
|
| 39 |
+
_resolve_paths_and_filesystem,
|
| 40 |
+
)
|
| 41 |
+
from ray.util.annotations import DeveloperAPI
|
| 42 |
+
|
| 43 |
+
if TYPE_CHECKING:
|
| 44 |
+
import pandas as pd
|
| 45 |
+
import pyarrow
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
logger = logging.getLogger(__name__)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# We should parallelize file size fetch operations beyond this threshold.
|
| 52 |
+
FILE_SIZE_FETCH_PARALLELIZATION_THRESHOLD = 16
|
| 53 |
+
|
| 54 |
+
# 16 file size fetches from S3 takes ~1.5 seconds with Arrow's S3FileSystem.
|
| 55 |
+
PATHS_PER_FILE_SIZE_FETCH_TASK = 16
|
| 56 |
+
|
| 57 |
+
# The max retry backoff in seconds for opening file.
|
| 58 |
+
OPEN_FILE_RETRY_MAX_BACKOFF_SECONDS = 32
|
| 59 |
+
|
| 60 |
+
# The max number of attempts for opening file.
|
| 61 |
+
OPEN_FILE_MAX_ATTEMPTS = 10
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
@DeveloperAPI
|
| 65 |
+
class FileBasedDatasource(Datasource):
|
| 66 |
+
"""File-based datasource for reading files.
|
| 67 |
+
|
| 68 |
+
Don't use this class directly. Instead, subclass it and implement `_read_stream()`.
|
| 69 |
+
"""
|
| 70 |
+
|
| 71 |
+
# If `_WRITE_FILE_PER_ROW` is `True`, this datasource calls `_write_row` and writes
|
| 72 |
+
# each row to a file. Otherwise, this datasource calls `_write_block` and writes
|
| 73 |
+
# each block to a file.
|
| 74 |
+
_WRITE_FILE_PER_ROW = False
|
| 75 |
+
_FILE_EXTENSIONS: Optional[Union[str, List[str]]] = None
|
| 76 |
+
# Number of threads for concurrent reading within each read task.
|
| 77 |
+
# If zero or negative, reading will be performed in the main thread.
|
| 78 |
+
_NUM_THREADS_PER_TASK = 0
|
| 79 |
+
|
| 80 |
+
def __init__(
|
| 81 |
+
self,
|
| 82 |
+
paths: Union[str, List[str]],
|
| 83 |
+
*,
|
| 84 |
+
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
|
| 85 |
+
schema: Optional[Union[type, "pyarrow.lib.Schema"]] = None,
|
| 86 |
+
open_stream_args: Optional[Dict[str, Any]] = None,
|
| 87 |
+
meta_provider: BaseFileMetadataProvider = DefaultFileMetadataProvider(),
|
| 88 |
+
partition_filter: PathPartitionFilter = None,
|
| 89 |
+
partitioning: Partitioning = None,
|
| 90 |
+
ignore_missing_paths: bool = False,
|
| 91 |
+
shuffle: Union[Literal["files"], None] = None,
|
| 92 |
+
include_paths: bool = False,
|
| 93 |
+
file_extensions: Optional[List[str]] = None,
|
| 94 |
+
):
|
| 95 |
+
_check_pyarrow_version()
|
| 96 |
+
|
| 97 |
+
self._supports_distributed_reads = not _is_local_scheme(paths)
|
| 98 |
+
if not self._supports_distributed_reads and ray.util.client.ray.is_connected():
|
| 99 |
+
raise ValueError(
|
| 100 |
+
"Because you're using Ray Client, read tasks scheduled on the Ray "
|
| 101 |
+
"cluster can't access your local files. To fix this issue, store "
|
| 102 |
+
"files in cloud storage or a distributed filesystem like NFS."
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
self._schema = schema
|
| 106 |
+
self._open_stream_args = open_stream_args
|
| 107 |
+
self._meta_provider = meta_provider
|
| 108 |
+
self._partition_filter = partition_filter
|
| 109 |
+
self._partitioning = partitioning
|
| 110 |
+
self._ignore_missing_paths = ignore_missing_paths
|
| 111 |
+
self._include_paths = include_paths
|
| 112 |
+
paths, self._filesystem = _resolve_paths_and_filesystem(paths, filesystem)
|
| 113 |
+
paths, file_sizes = map(
|
| 114 |
+
list,
|
| 115 |
+
zip(
|
| 116 |
+
*meta_provider.expand_paths(
|
| 117 |
+
paths,
|
| 118 |
+
self._filesystem,
|
| 119 |
+
partitioning,
|
| 120 |
+
ignore_missing_paths=ignore_missing_paths,
|
| 121 |
+
)
|
| 122 |
+
),
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
if ignore_missing_paths and len(paths) == 0:
|
| 126 |
+
raise ValueError(
|
| 127 |
+
"None of the provided paths exist. "
|
| 128 |
+
"The 'ignore_missing_paths' field is set to True."
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
if self._partition_filter is not None:
|
| 132 |
+
# Use partition filter to skip files which are not needed.
|
| 133 |
+
path_to_size = dict(zip(paths, file_sizes))
|
| 134 |
+
paths = self._partition_filter(paths)
|
| 135 |
+
file_sizes = [path_to_size[p] for p in paths]
|
| 136 |
+
if len(paths) == 0:
|
| 137 |
+
raise ValueError(
|
| 138 |
+
"No input files found to read. Please double check that "
|
| 139 |
+
"'partition_filter' field is set properly."
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
if file_extensions is not None:
|
| 143 |
+
path_to_size = dict(zip(paths, file_sizes))
|
| 144 |
+
paths = [p for p in paths if _has_file_extension(p, file_extensions)]
|
| 145 |
+
file_sizes = [path_to_size[p] for p in paths]
|
| 146 |
+
if len(paths) == 0:
|
| 147 |
+
raise ValueError(
|
| 148 |
+
"No input files found to read with the following file extensions: "
|
| 149 |
+
f"{file_extensions}. Please double check that "
|
| 150 |
+
"'file_extensions' field is set properly."
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
_validate_shuffle_arg(shuffle)
|
| 154 |
+
self._file_metadata_shuffler = None
|
| 155 |
+
if shuffle == "files":
|
| 156 |
+
self._file_metadata_shuffler = np.random.default_rng()
|
| 157 |
+
|
| 158 |
+
# Read tasks serialize `FileBasedDatasource` instances, and the list of paths
|
| 159 |
+
# can be large. To avoid slow serialization speeds, we store a reference to
|
| 160 |
+
# the paths rather than the paths themselves.
|
| 161 |
+
self._paths_ref = ray.put(paths)
|
| 162 |
+
self._file_sizes_ref = ray.put(file_sizes)
|
| 163 |
+
|
| 164 |
+
def _paths(self) -> List[str]:
|
| 165 |
+
return ray.get(self._paths_ref)
|
| 166 |
+
|
| 167 |
+
def _file_sizes(self) -> List[float]:
|
| 168 |
+
return ray.get(self._file_sizes_ref)
|
| 169 |
+
|
| 170 |
+
def estimate_inmemory_data_size(self) -> Optional[int]:
|
| 171 |
+
total_size = 0
|
| 172 |
+
for sz in self._file_sizes():
|
| 173 |
+
if sz is not None:
|
| 174 |
+
total_size += sz
|
| 175 |
+
return total_size
|
| 176 |
+
|
| 177 |
+
def get_read_tasks(self, parallelism: int) -> List[ReadTask]:
|
| 178 |
+
import numpy as np
|
| 179 |
+
|
| 180 |
+
ctx = DataContext.get_current()
|
| 181 |
+
open_stream_args = self._open_stream_args
|
| 182 |
+
partitioning = self._partitioning
|
| 183 |
+
|
| 184 |
+
paths = self._paths()
|
| 185 |
+
file_sizes = self._file_sizes()
|
| 186 |
+
|
| 187 |
+
if self._file_metadata_shuffler is not None:
|
| 188 |
+
files_metadata = list(zip(paths, file_sizes))
|
| 189 |
+
shuffled_files_metadata = [
|
| 190 |
+
files_metadata[i]
|
| 191 |
+
for i in self._file_metadata_shuffler.permutation(len(files_metadata))
|
| 192 |
+
]
|
| 193 |
+
paths, file_sizes = list(map(list, zip(*shuffled_files_metadata)))
|
| 194 |
+
|
| 195 |
+
read_stream = self._read_stream
|
| 196 |
+
filesystem = _wrap_s3_serialization_workaround(self._filesystem)
|
| 197 |
+
|
| 198 |
+
if open_stream_args is None:
|
| 199 |
+
open_stream_args = {}
|
| 200 |
+
|
| 201 |
+
open_input_source = self._open_input_source
|
| 202 |
+
|
| 203 |
+
def read_files(
|
| 204 |
+
read_paths: Iterable[str],
|
| 205 |
+
) -> Iterable[Block]:
|
| 206 |
+
nonlocal filesystem, open_stream_args, partitioning
|
| 207 |
+
|
| 208 |
+
DataContext._set_current(ctx)
|
| 209 |
+
fs = _unwrap_s3_serialization_workaround(filesystem)
|
| 210 |
+
for read_path in read_paths:
|
| 211 |
+
partitions: Dict[str, str] = {}
|
| 212 |
+
if partitioning is not None:
|
| 213 |
+
parse = PathPartitionParser(partitioning)
|
| 214 |
+
partitions = parse(read_path)
|
| 215 |
+
|
| 216 |
+
with _open_file_with_retry(
|
| 217 |
+
read_path,
|
| 218 |
+
lambda read_path=read_path: open_input_source(
|
| 219 |
+
fs, read_path, **open_stream_args
|
| 220 |
+
),
|
| 221 |
+
) as f:
|
| 222 |
+
for block in read_stream(f, read_path):
|
| 223 |
+
if partitions:
|
| 224 |
+
block = _add_partitions(block, partitions)
|
| 225 |
+
if self._include_paths:
|
| 226 |
+
block_accessor = BlockAccessor.for_block(block)
|
| 227 |
+
block = block_accessor.append_column(
|
| 228 |
+
"path", [read_path] * block_accessor.num_rows()
|
| 229 |
+
)
|
| 230 |
+
yield block
|
| 231 |
+
|
| 232 |
+
def create_read_task_fn(read_paths, num_threads):
|
| 233 |
+
def read_task_fn():
|
| 234 |
+
nonlocal num_threads, read_paths
|
| 235 |
+
|
| 236 |
+
# TODO: We should refactor the code so that we can get the results in
|
| 237 |
+
# order even when using multiple threads.
|
| 238 |
+
if ctx.execution_options.preserve_order:
|
| 239 |
+
num_threads = 0
|
| 240 |
+
|
| 241 |
+
if num_threads > 0:
|
| 242 |
+
if len(read_paths) < num_threads:
|
| 243 |
+
num_threads = len(read_paths)
|
| 244 |
+
|
| 245 |
+
logger.debug(
|
| 246 |
+
f"Reading {len(read_paths)} files with {num_threads} threads."
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
yield from make_async_gen(
|
| 250 |
+
iter(read_paths),
|
| 251 |
+
read_files,
|
| 252 |
+
num_workers=num_threads,
|
| 253 |
+
)
|
| 254 |
+
else:
|
| 255 |
+
logger.debug(f"Reading {len(read_paths)} files.")
|
| 256 |
+
yield from read_files(read_paths)
|
| 257 |
+
|
| 258 |
+
return read_task_fn
|
| 259 |
+
|
| 260 |
+
# fix https://github.com/ray-project/ray/issues/24296
|
| 261 |
+
parallelism = min(parallelism, len(paths))
|
| 262 |
+
|
| 263 |
+
read_tasks = []
|
| 264 |
+
split_paths = np.array_split(paths, parallelism)
|
| 265 |
+
split_file_sizes = np.array_split(file_sizes, parallelism)
|
| 266 |
+
|
| 267 |
+
for read_paths, file_sizes in zip(split_paths, split_file_sizes):
|
| 268 |
+
if len(read_paths) <= 0:
|
| 269 |
+
continue
|
| 270 |
+
|
| 271 |
+
meta = self._meta_provider(
|
| 272 |
+
read_paths,
|
| 273 |
+
self._schema,
|
| 274 |
+
rows_per_file=self._rows_per_file(),
|
| 275 |
+
file_sizes=file_sizes,
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
read_task_fn = create_read_task_fn(read_paths, self._NUM_THREADS_PER_TASK)
|
| 279 |
+
|
| 280 |
+
read_task = ReadTask(read_task_fn, meta)
|
| 281 |
+
|
| 282 |
+
read_tasks.append(read_task)
|
| 283 |
+
|
| 284 |
+
return read_tasks
|
| 285 |
+
|
| 286 |
+
def _open_input_source(
|
| 287 |
+
self,
|
| 288 |
+
filesystem: "pyarrow.fs.FileSystem",
|
| 289 |
+
path: str,
|
| 290 |
+
**open_args,
|
| 291 |
+
) -> "pyarrow.NativeFile":
|
| 292 |
+
"""Opens a source path for reading and returns the associated Arrow NativeFile.
|
| 293 |
+
|
| 294 |
+
The default implementation opens the source path as a sequential input stream,
|
| 295 |
+
using ctx.streaming_read_buffer_size as the buffer size if none is given by the
|
| 296 |
+
caller.
|
| 297 |
+
|
| 298 |
+
Implementations that do not support streaming reads (e.g. that require random
|
| 299 |
+
access) should override this method.
|
| 300 |
+
"""
|
| 301 |
+
import pyarrow as pa
|
| 302 |
+
from pyarrow.fs import HadoopFileSystem
|
| 303 |
+
|
| 304 |
+
ctx = DataContext.get_current()
|
| 305 |
+
|
| 306 |
+
compression = open_args.get("compression", None)
|
| 307 |
+
if compression is None:
|
| 308 |
+
try:
|
| 309 |
+
# If no compression manually given, try to detect
|
| 310 |
+
# compression codec from path.
|
| 311 |
+
compression = pa.Codec.detect(path).name
|
| 312 |
+
except (ValueError, TypeError):
|
| 313 |
+
# Arrow's compression inference on the file path
|
| 314 |
+
# doesn't work for Snappy, so we double-check ourselves.
|
| 315 |
+
import pathlib
|
| 316 |
+
|
| 317 |
+
suffix = pathlib.Path(path).suffix
|
| 318 |
+
if suffix and suffix[1:] == "snappy":
|
| 319 |
+
compression = "snappy"
|
| 320 |
+
else:
|
| 321 |
+
compression = None
|
| 322 |
+
|
| 323 |
+
buffer_size = open_args.pop("buffer_size", None)
|
| 324 |
+
if buffer_size is None:
|
| 325 |
+
buffer_size = ctx.streaming_read_buffer_size
|
| 326 |
+
|
| 327 |
+
if compression == "snappy":
|
| 328 |
+
# Arrow doesn't support streaming Snappy decompression since the canonical
|
| 329 |
+
# C++ Snappy library doesn't natively support streaming decompression. We
|
| 330 |
+
# works around this by manually decompressing the file with python-snappy.
|
| 331 |
+
open_args["compression"] = None
|
| 332 |
+
else:
|
| 333 |
+
open_args["compression"] = compression
|
| 334 |
+
|
| 335 |
+
file = call_with_retry(
|
| 336 |
+
lambda: filesystem.open_input_stream(
|
| 337 |
+
path, buffer_size=buffer_size, **open_args
|
| 338 |
+
),
|
| 339 |
+
description=f"open file {path}",
|
| 340 |
+
match=ctx.retried_io_errors,
|
| 341 |
+
)
|
| 342 |
+
|
| 343 |
+
if compression == "snappy":
|
| 344 |
+
import snappy
|
| 345 |
+
|
| 346 |
+
stream = io.BytesIO()
|
| 347 |
+
if isinstance(filesystem, HadoopFileSystem):
|
| 348 |
+
snappy.hadoop_snappy.stream_decompress(src=file, dst=stream)
|
| 349 |
+
else:
|
| 350 |
+
snappy.stream_decompress(src=file, dst=stream)
|
| 351 |
+
stream.seek(0)
|
| 352 |
+
|
| 353 |
+
file = pa.PythonFile(stream, mode="r")
|
| 354 |
+
|
| 355 |
+
return file
|
| 356 |
+
|
| 357 |
+
def _rows_per_file(self):
|
| 358 |
+
"""Returns the number of rows per file, or None if unknown."""
|
| 359 |
+
return None
|
| 360 |
+
|
| 361 |
+
def _read_stream(self, f: "pyarrow.NativeFile", path: str) -> Iterator[Block]:
|
| 362 |
+
"""Streaming read a single file.
|
| 363 |
+
|
| 364 |
+
This method should be implemented by subclasses.
|
| 365 |
+
"""
|
| 366 |
+
raise NotImplementedError(
|
| 367 |
+
"Subclasses of FileBasedDatasource must implement _read_stream()."
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
+
@property
|
| 371 |
+
def supports_distributed_reads(self) -> bool:
|
| 372 |
+
return self._supports_distributed_reads
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def _add_partitions(
|
| 376 |
+
data: Union["pyarrow.Table", "pd.DataFrame"], partitions: Dict[str, Any]
|
| 377 |
+
) -> Union["pyarrow.Table", "pd.DataFrame"]:
|
| 378 |
+
import pandas as pd
|
| 379 |
+
import pyarrow as pa
|
| 380 |
+
|
| 381 |
+
assert isinstance(data, (pa.Table, pd.DataFrame))
|
| 382 |
+
if isinstance(data, pa.Table):
|
| 383 |
+
return _add_partitions_to_table(data, partitions)
|
| 384 |
+
if isinstance(data, pd.DataFrame):
|
| 385 |
+
return _add_partitions_to_dataframe(data, partitions)
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
def _add_partitions_to_table(
|
| 389 |
+
table: "pyarrow.Table", partitions: Dict[str, Any]
|
| 390 |
+
) -> "pyarrow.Table":
|
| 391 |
+
import pyarrow as pa
|
| 392 |
+
import pyarrow.compute as pc
|
| 393 |
+
|
| 394 |
+
column_names = set(table.column_names)
|
| 395 |
+
for field, value in partitions.items():
|
| 396 |
+
column = pa.array([value] * len(table))
|
| 397 |
+
if field in column_names:
|
| 398 |
+
# TODO: Handle cast error.
|
| 399 |
+
column_type = table.schema.field(field).type
|
| 400 |
+
column = column.cast(column_type)
|
| 401 |
+
|
| 402 |
+
values_are_equal = pc.all(pc.equal(column, table[field]))
|
| 403 |
+
values_are_equal = values_are_equal.as_py()
|
| 404 |
+
|
| 405 |
+
if not values_are_equal:
|
| 406 |
+
raise ValueError(
|
| 407 |
+
f"Partition column {field} exists in table data, but partition "
|
| 408 |
+
f"value '{value}' is different from in-data values: "
|
| 409 |
+
f"{table[field].unique().to_pylist()}."
|
| 410 |
+
)
|
| 411 |
+
|
| 412 |
+
i = table.schema.get_field_index(field)
|
| 413 |
+
table = table.set_column(i, field, column)
|
| 414 |
+
else:
|
| 415 |
+
table = table.append_column(field, column)
|
| 416 |
+
|
| 417 |
+
return table
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
def _add_partitions_to_dataframe(
|
| 421 |
+
df: "pd.DataFrame", partitions: Dict[str, Any]
|
| 422 |
+
) -> "pd.DataFrame":
|
| 423 |
+
import pandas as pd
|
| 424 |
+
|
| 425 |
+
for field, value in partitions.items():
|
| 426 |
+
column = pd.Series(data=[value] * len(df), name=field)
|
| 427 |
+
|
| 428 |
+
if field in df:
|
| 429 |
+
column = column.astype(df[field].dtype)
|
| 430 |
+
mask = df[field].notna()
|
| 431 |
+
if not df[field][mask].equals(column[mask]):
|
| 432 |
+
raise ValueError(
|
| 433 |
+
f"Partition column {field} exists in table data, but partition "
|
| 434 |
+
f"value '{value}' is different from in-data values: "
|
| 435 |
+
f"{list(df[field].unique())}."
|
| 436 |
+
)
|
| 437 |
+
|
| 438 |
+
df[field] = column
|
| 439 |
+
|
| 440 |
+
return df
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
def _wrap_s3_serialization_workaround(filesystem: "pyarrow.fs.FileSystem"):
|
| 444 |
+
# This is needed because pa.fs.S3FileSystem assumes pa.fs is already
|
| 445 |
+
# imported before deserialization. See #17085.
|
| 446 |
+
import pyarrow as pa
|
| 447 |
+
import pyarrow.fs
|
| 448 |
+
|
| 449 |
+
if isinstance(filesystem, pa.fs.S3FileSystem):
|
| 450 |
+
return _S3FileSystemWrapper(filesystem)
|
| 451 |
+
return filesystem
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
def _unwrap_s3_serialization_workaround(
|
| 455 |
+
filesystem: Union["pyarrow.fs.FileSystem", "_S3FileSystemWrapper"]
|
| 456 |
+
):
|
| 457 |
+
if isinstance(filesystem, _S3FileSystemWrapper):
|
| 458 |
+
return filesystem.unwrap()
|
| 459 |
+
else:
|
| 460 |
+
return filesystem
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
class _S3FileSystemWrapper:
|
| 464 |
+
def __init__(self, fs: "pyarrow.fs.S3FileSystem"):
|
| 465 |
+
self._fs = fs
|
| 466 |
+
|
| 467 |
+
def unwrap(self):
|
| 468 |
+
return self._fs
|
| 469 |
+
|
| 470 |
+
@classmethod
|
| 471 |
+
def _reconstruct(cls, fs_reconstruct, fs_args):
|
| 472 |
+
# Implicitly trigger S3 subsystem initialization by importing
|
| 473 |
+
# pyarrow.fs.
|
| 474 |
+
import pyarrow.fs # noqa: F401
|
| 475 |
+
|
| 476 |
+
return cls(fs_reconstruct(*fs_args))
|
| 477 |
+
|
| 478 |
+
def __reduce__(self):
|
| 479 |
+
return _S3FileSystemWrapper._reconstruct, self._fs.__reduce__()
|
| 480 |
+
|
| 481 |
+
|
| 482 |
+
def _wrap_arrow_serialization_workaround(kwargs: dict) -> dict:
|
| 483 |
+
if "filesystem" in kwargs:
|
| 484 |
+
kwargs["filesystem"] = _wrap_s3_serialization_workaround(kwargs["filesystem"])
|
| 485 |
+
|
| 486 |
+
return kwargs
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
def _unwrap_arrow_serialization_workaround(kwargs: dict) -> dict:
|
| 490 |
+
if isinstance(kwargs.get("filesystem"), _S3FileSystemWrapper):
|
| 491 |
+
kwargs["filesystem"] = kwargs["filesystem"].unwrap()
|
| 492 |
+
return kwargs
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
def _resolve_kwargs(
|
| 496 |
+
kwargs_fn: Callable[[], Dict[str, Any]], **kwargs
|
| 497 |
+
) -> Dict[str, Any]:
|
| 498 |
+
if kwargs_fn:
|
| 499 |
+
kwarg_overrides = kwargs_fn()
|
| 500 |
+
kwargs.update(kwarg_overrides)
|
| 501 |
+
return kwargs
|
| 502 |
+
|
| 503 |
+
|
| 504 |
+
def _open_file_with_retry(
|
| 505 |
+
file_path: str,
|
| 506 |
+
open_file: Callable[[], "pyarrow.NativeFile"],
|
| 507 |
+
) -> "pyarrow.NativeFile":
|
| 508 |
+
"""Open file with an exponential backoff retry strategy.
|
| 509 |
+
|
| 510 |
+
This is to avoid transient task failure with remote storage (such as S3),
|
| 511 |
+
when the remote storage throttles the requests.
|
| 512 |
+
"""
|
| 513 |
+
if OPEN_FILE_MAX_ATTEMPTS < 1:
|
| 514 |
+
raise ValueError(
|
| 515 |
+
"OPEN_FILE_MAX_ATTEMPTS cannot be negative or 0. Get: "
|
| 516 |
+
f"{OPEN_FILE_MAX_ATTEMPTS}"
|
| 517 |
+
)
|
| 518 |
+
|
| 519 |
+
return call_with_retry(
|
| 520 |
+
open_file,
|
| 521 |
+
description=f"open file {file_path}",
|
| 522 |
+
match=DataContext.get_current().retried_io_errors,
|
| 523 |
+
max_attempts=OPEN_FILE_MAX_ATTEMPTS,
|
| 524 |
+
max_backoff_s=OPEN_FILE_RETRY_MAX_BACKOFF_SECONDS,
|
| 525 |
+
)
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
def _validate_shuffle_arg(shuffle: Optional[str]) -> None:
|
| 529 |
+
if shuffle not in [None, "files"]:
|
| 530 |
+
raise ValueError(
|
| 531 |
+
f"Invalid value for 'shuffle': {shuffle}. "
|
| 532 |
+
"Valid values are None, 'files'."
|
| 533 |
+
)
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/file_datasink.py
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import posixpath
|
| 3 |
+
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional
|
| 4 |
+
from urllib.parse import urlparse
|
| 5 |
+
|
| 6 |
+
from ray._private.utils import _add_creatable_buckets_param_if_s3_uri
|
| 7 |
+
from ray.data._internal.delegating_block_builder import DelegatingBlockBuilder
|
| 8 |
+
from ray.data._internal.execution.interfaces import TaskContext
|
| 9 |
+
from ray.data._internal.util import _is_local_scheme, call_with_retry
|
| 10 |
+
from ray.data.block import Block, BlockAccessor
|
| 11 |
+
from ray.data.context import DataContext
|
| 12 |
+
from ray.data.datasource.datasink import Datasink, WriteResult
|
| 13 |
+
from ray.data.datasource.filename_provider import (
|
| 14 |
+
FilenameProvider,
|
| 15 |
+
_DefaultFilenameProvider,
|
| 16 |
+
)
|
| 17 |
+
from ray.data.datasource.path_util import _resolve_paths_and_filesystem
|
| 18 |
+
from ray.util.annotations import DeveloperAPI
|
| 19 |
+
|
| 20 |
+
if TYPE_CHECKING:
|
| 21 |
+
import pyarrow
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
WRITE_FILE_MAX_ATTEMPTS = 10
|
| 27 |
+
WRITE_FILE_RETRY_MAX_BACKOFF_SECONDS = 32
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class _FileDatasink(Datasink):
|
| 31 |
+
def __init__(
|
| 32 |
+
self,
|
| 33 |
+
path: str,
|
| 34 |
+
*,
|
| 35 |
+
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
|
| 36 |
+
try_create_dir: bool = True,
|
| 37 |
+
open_stream_args: Optional[Dict[str, Any]] = None,
|
| 38 |
+
filename_provider: Optional[FilenameProvider] = None,
|
| 39 |
+
dataset_uuid: Optional[str] = None,
|
| 40 |
+
file_format: Optional[str] = None,
|
| 41 |
+
):
|
| 42 |
+
"""Initialize this datasink.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
path: The folder to write files to.
|
| 46 |
+
filesystem: The filesystem to write files to. If not provided, the
|
| 47 |
+
filesystem is inferred from the path.
|
| 48 |
+
try_create_dir: Whether to create the directory to write files to.
|
| 49 |
+
open_stream_args: Arguments to pass to ``filesystem.open_output_stream``.
|
| 50 |
+
filename_provider: A :class:`ray.data.datasource.FilenameProvider` that
|
| 51 |
+
generates filenames for each row or block.
|
| 52 |
+
dataset_uuid: The UUID of the dataset being written. If specified, it's
|
| 53 |
+
included in the filename.
|
| 54 |
+
file_format: The file extension. If specified, files are written with this
|
| 55 |
+
extension.
|
| 56 |
+
"""
|
| 57 |
+
if open_stream_args is None:
|
| 58 |
+
open_stream_args = {}
|
| 59 |
+
|
| 60 |
+
if filename_provider is None:
|
| 61 |
+
filename_provider = _DefaultFilenameProvider(
|
| 62 |
+
dataset_uuid=dataset_uuid, file_format=file_format
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
self.unresolved_path = path
|
| 66 |
+
paths, self.filesystem = _resolve_paths_and_filesystem(path, filesystem)
|
| 67 |
+
assert len(paths) == 1, len(paths)
|
| 68 |
+
self.path = paths[0]
|
| 69 |
+
|
| 70 |
+
self.try_create_dir = try_create_dir
|
| 71 |
+
self.open_stream_args = open_stream_args
|
| 72 |
+
self.filename_provider = filename_provider
|
| 73 |
+
self.dataset_uuid = dataset_uuid
|
| 74 |
+
self.file_format = file_format
|
| 75 |
+
|
| 76 |
+
self.has_created_dir = False
|
| 77 |
+
|
| 78 |
+
def open_output_stream(self, path: str) -> "pyarrow.NativeFile":
|
| 79 |
+
return self.filesystem.open_output_stream(path, **self.open_stream_args)
|
| 80 |
+
|
| 81 |
+
def on_write_start(self) -> None:
|
| 82 |
+
"""Create a directory to write files to.
|
| 83 |
+
|
| 84 |
+
If ``try_create_dir`` is ``False``, this method is a no-op.
|
| 85 |
+
"""
|
| 86 |
+
from pyarrow.fs import FileType
|
| 87 |
+
|
| 88 |
+
# We should skip creating directories in s3 unless the user specifically
|
| 89 |
+
# overrides this behavior. PyArrow's s3fs implementation for create_dir
|
| 90 |
+
# will attempt to check if the parent directory exists before trying to
|
| 91 |
+
# create the directory (with recursive=True it will try to do this to
|
| 92 |
+
# all of the directories until the root of the bucket). An IAM Policy that
|
| 93 |
+
# restricts access to a subset of prefixes within the bucket might cause
|
| 94 |
+
# the creation of the directory to fail even if the permissions should
|
| 95 |
+
# allow the data can be written to the specified path. For example if a
|
| 96 |
+
# a policy only allows users to write blobs prefixed with s3://bucket/foo
|
| 97 |
+
# a call to create_dir for s3://bucket/foo/bar will fail even though it
|
| 98 |
+
# should not.
|
| 99 |
+
parsed_uri = urlparse(self.path)
|
| 100 |
+
is_s3_uri = parsed_uri.scheme == "s3"
|
| 101 |
+
skip_create_dir_for_s3 = (
|
| 102 |
+
is_s3_uri and not DataContext.get_current().s3_try_create_dir
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
if self.try_create_dir and not skip_create_dir_for_s3:
|
| 106 |
+
if self.filesystem.get_file_info(self.path).type is FileType.NotFound:
|
| 107 |
+
# Arrow's S3FileSystem doesn't allow creating buckets by default, so we
|
| 108 |
+
# add a query arg enabling bucket creation if an S3 URI is provided.
|
| 109 |
+
tmp = _add_creatable_buckets_param_if_s3_uri(self.path)
|
| 110 |
+
self.filesystem.create_dir(tmp, recursive=True)
|
| 111 |
+
self.has_created_dir = True
|
| 112 |
+
|
| 113 |
+
def write(
|
| 114 |
+
self,
|
| 115 |
+
blocks: Iterable[Block],
|
| 116 |
+
ctx: TaskContext,
|
| 117 |
+
) -> None:
|
| 118 |
+
builder = DelegatingBlockBuilder()
|
| 119 |
+
for block in blocks:
|
| 120 |
+
builder.add_block(block)
|
| 121 |
+
block = builder.build()
|
| 122 |
+
block_accessor = BlockAccessor.for_block(block)
|
| 123 |
+
|
| 124 |
+
if block_accessor.num_rows() == 0:
|
| 125 |
+
logger.warning(f"Skipped writing empty block to {self.path}")
|
| 126 |
+
return
|
| 127 |
+
|
| 128 |
+
self.write_block(block_accessor, 0, ctx)
|
| 129 |
+
|
| 130 |
+
def write_block(self, block: BlockAccessor, block_index: int, ctx: TaskContext):
|
| 131 |
+
raise NotImplementedError
|
| 132 |
+
|
| 133 |
+
def on_write_complete(self, write_result_blocks: List[Block]) -> WriteResult:
|
| 134 |
+
aggregated_results = super().on_write_complete(write_result_blocks)
|
| 135 |
+
|
| 136 |
+
# If no rows were written, we can delete the directory.
|
| 137 |
+
if self.has_created_dir and aggregated_results.num_rows == 0:
|
| 138 |
+
self.filesystem.delete_dir(self.path)
|
| 139 |
+
return aggregated_results
|
| 140 |
+
|
| 141 |
+
@property
|
| 142 |
+
def supports_distributed_writes(self) -> bool:
|
| 143 |
+
return not _is_local_scheme(self.unresolved_path)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
@DeveloperAPI
|
| 147 |
+
class RowBasedFileDatasink(_FileDatasink):
|
| 148 |
+
"""A datasink that writes one row to each file.
|
| 149 |
+
|
| 150 |
+
Subclasses must implement ``write_row_to_file`` and call the superclass constructor.
|
| 151 |
+
|
| 152 |
+
Examples:
|
| 153 |
+
.. testcode::
|
| 154 |
+
|
| 155 |
+
import io
|
| 156 |
+
from typing import Any, Dict
|
| 157 |
+
|
| 158 |
+
import pyarrow
|
| 159 |
+
from PIL import Image
|
| 160 |
+
|
| 161 |
+
from ray.data.datasource import RowBasedFileDatasink
|
| 162 |
+
|
| 163 |
+
class ImageDatasink(RowBasedFileDatasink):
|
| 164 |
+
def __init__(self, path: str, *, column: str, file_format: str = "png"):
|
| 165 |
+
super().__init__(path, file_format=file_format)
|
| 166 |
+
self._file_format = file_format
|
| 167 |
+
self._column = column
|
| 168 |
+
|
| 169 |
+
def write_row_to_file(self, row: Dict[str, Any], file: "pyarrow.NativeFile"):
|
| 170 |
+
image = Image.fromarray(row[self._column])
|
| 171 |
+
buffer = io.BytesIO()
|
| 172 |
+
image.save(buffer, format=self._file_format)
|
| 173 |
+
file.write(buffer.getvalue())
|
| 174 |
+
""" # noqa: E501
|
| 175 |
+
|
| 176 |
+
def write_row_to_file(self, row: Dict[str, Any], file: "pyarrow.NativeFile"):
|
| 177 |
+
"""Write a row to a file.
|
| 178 |
+
|
| 179 |
+
Args:
|
| 180 |
+
row: The row to write.
|
| 181 |
+
file: The file to write the row to.
|
| 182 |
+
"""
|
| 183 |
+
raise NotImplementedError
|
| 184 |
+
|
| 185 |
+
def write_block(self, block: BlockAccessor, block_index: int, ctx: TaskContext):
|
| 186 |
+
for row_index, row in enumerate(block.iter_rows(public_row_format=False)):
|
| 187 |
+
filename = self.filename_provider.get_filename_for_row(
|
| 188 |
+
row, ctx.task_idx, block_index, row_index
|
| 189 |
+
)
|
| 190 |
+
write_path = posixpath.join(self.path, filename)
|
| 191 |
+
|
| 192 |
+
def write_row_to_path():
|
| 193 |
+
with self.open_output_stream(write_path) as file:
|
| 194 |
+
self.write_row_to_file(row, file)
|
| 195 |
+
|
| 196 |
+
logger.debug(f"Writing {write_path} file.")
|
| 197 |
+
call_with_retry(
|
| 198 |
+
write_row_to_path,
|
| 199 |
+
description=f"write '{write_path}'",
|
| 200 |
+
match=DataContext.get_current().retried_io_errors,
|
| 201 |
+
max_attempts=WRITE_FILE_MAX_ATTEMPTS,
|
| 202 |
+
max_backoff_s=WRITE_FILE_RETRY_MAX_BACKOFF_SECONDS,
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
@DeveloperAPI
|
| 207 |
+
class BlockBasedFileDatasink(_FileDatasink):
|
| 208 |
+
"""A datasink that writes multiple rows to each file.
|
| 209 |
+
|
| 210 |
+
Subclasses must implement ``write_block_to_file`` and call the superclass
|
| 211 |
+
constructor.
|
| 212 |
+
|
| 213 |
+
Examples:
|
| 214 |
+
.. testcode::
|
| 215 |
+
|
| 216 |
+
class CSVDatasink(BlockBasedFileDatasink):
|
| 217 |
+
def __init__(self, path: str):
|
| 218 |
+
super().__init__(path, file_format="csv")
|
| 219 |
+
|
| 220 |
+
def write_block_to_file(self, block: BlockAccessor, file: "pyarrow.NativeFile"):
|
| 221 |
+
from pyarrow import csv
|
| 222 |
+
csv.write_csv(block.to_arrow(), file)
|
| 223 |
+
""" # noqa: E501
|
| 224 |
+
|
| 225 |
+
def __init__(
|
| 226 |
+
self, path, *, num_rows_per_file: Optional[int] = None, **file_datasink_kwargs
|
| 227 |
+
):
|
| 228 |
+
super().__init__(path, **file_datasink_kwargs)
|
| 229 |
+
|
| 230 |
+
self._num_rows_per_file = num_rows_per_file
|
| 231 |
+
|
| 232 |
+
def write_block_to_file(self, block: BlockAccessor, file: "pyarrow.NativeFile"):
|
| 233 |
+
"""Write a block of data to a file.
|
| 234 |
+
|
| 235 |
+
Args:
|
| 236 |
+
block: The block to write.
|
| 237 |
+
file: The file to write the block to.
|
| 238 |
+
"""
|
| 239 |
+
raise NotImplementedError
|
| 240 |
+
|
| 241 |
+
def write_block(self, block: BlockAccessor, block_index: int, ctx: TaskContext):
|
| 242 |
+
filename = self.filename_provider.get_filename_for_block(
|
| 243 |
+
block, ctx.task_idx, block_index
|
| 244 |
+
)
|
| 245 |
+
write_path = posixpath.join(self.path, filename)
|
| 246 |
+
|
| 247 |
+
def write_block_to_path():
|
| 248 |
+
with self.open_output_stream(write_path) as file:
|
| 249 |
+
self.write_block_to_file(block, file)
|
| 250 |
+
|
| 251 |
+
logger.debug(f"Writing {write_path} file.")
|
| 252 |
+
call_with_retry(
|
| 253 |
+
write_block_to_path,
|
| 254 |
+
description=f"write '{write_path}'",
|
| 255 |
+
match=DataContext.get_current().retried_io_errors,
|
| 256 |
+
max_attempts=WRITE_FILE_MAX_ATTEMPTS,
|
| 257 |
+
max_backoff_s=WRITE_FILE_RETRY_MAX_BACKOFF_SECONDS,
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
@property
|
| 261 |
+
def num_rows_per_write(self) -> Optional[int]:
|
| 262 |
+
return self._num_rows_per_file
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/file_meta_provider.py
ADDED
|
@@ -0,0 +1,484 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
import pathlib
|
| 5 |
+
import re
|
| 6 |
+
from typing import (
|
| 7 |
+
TYPE_CHECKING,
|
| 8 |
+
Callable,
|
| 9 |
+
Iterator,
|
| 10 |
+
List,
|
| 11 |
+
Optional,
|
| 12 |
+
Tuple,
|
| 13 |
+
TypeVar,
|
| 14 |
+
Union,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
|
| 19 |
+
import ray
|
| 20 |
+
from ray.data._internal.progress_bar import ProgressBar
|
| 21 |
+
from ray.data._internal.remote_fn import cached_remote_fn
|
| 22 |
+
from ray.data._internal.util import call_with_retry
|
| 23 |
+
from ray.data.block import BlockMetadata
|
| 24 |
+
from ray.data.datasource.partitioning import Partitioning
|
| 25 |
+
from ray.util.annotations import DeveloperAPI
|
| 26 |
+
|
| 27 |
+
if TYPE_CHECKING:
|
| 28 |
+
import pyarrow
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
logger = logging.getLogger(__name__)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@DeveloperAPI
|
| 35 |
+
class FileMetadataProvider:
|
| 36 |
+
"""Abstract callable that provides metadata for the files of a single dataset block.
|
| 37 |
+
|
| 38 |
+
Current subclasses:
|
| 39 |
+
- :class:`BaseFileMetadataProvider`
|
| 40 |
+
- :class:`ParquetMetadataProvider`
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
def _get_block_metadata(
|
| 44 |
+
self,
|
| 45 |
+
paths: List[str],
|
| 46 |
+
schema: Optional[Union[type, "pyarrow.lib.Schema"]],
|
| 47 |
+
**kwargs,
|
| 48 |
+
) -> BlockMetadata:
|
| 49 |
+
"""Resolves and returns block metadata for files in the given paths.
|
| 50 |
+
|
| 51 |
+
All file paths provided should belong to a single dataset block.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
paths: The file paths for a single dataset block.
|
| 55 |
+
schema: The user-provided or inferred schema for the given paths,
|
| 56 |
+
if any.
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
BlockMetadata aggregated across the given paths.
|
| 60 |
+
"""
|
| 61 |
+
raise NotImplementedError
|
| 62 |
+
|
| 63 |
+
def __call__(
|
| 64 |
+
self,
|
| 65 |
+
paths: List[str],
|
| 66 |
+
schema: Optional[Union[type, "pyarrow.lib.Schema"]],
|
| 67 |
+
**kwargs,
|
| 68 |
+
) -> BlockMetadata:
|
| 69 |
+
return self._get_block_metadata(paths, schema, **kwargs)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
@DeveloperAPI
|
| 73 |
+
class BaseFileMetadataProvider(FileMetadataProvider):
|
| 74 |
+
"""Abstract callable that provides metadata for
|
| 75 |
+
:class:`~ray.data.datasource.file_based_datasource.FileBasedDatasource`
|
| 76 |
+
implementations that reuse the base :meth:`~ray.data.Datasource.prepare_read`
|
| 77 |
+
method.
|
| 78 |
+
|
| 79 |
+
Also supports file and file size discovery in input directory paths.
|
| 80 |
+
|
| 81 |
+
Current subclasses:
|
| 82 |
+
- :class:`DefaultFileMetadataProvider`
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
def _get_block_metadata(
|
| 86 |
+
self,
|
| 87 |
+
paths: List[str],
|
| 88 |
+
schema: Optional[Union[type, "pyarrow.lib.Schema"]],
|
| 89 |
+
*,
|
| 90 |
+
rows_per_file: Optional[int],
|
| 91 |
+
file_sizes: List[Optional[int]],
|
| 92 |
+
) -> BlockMetadata:
|
| 93 |
+
"""Resolves and returns block metadata for files of a single dataset block.
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
paths: The file paths for a single dataset block. These
|
| 97 |
+
paths will always be a subset of those previously returned from
|
| 98 |
+
:meth:`.expand_paths`.
|
| 99 |
+
schema: The user-provided or inferred schema for the given file
|
| 100 |
+
paths, if any.
|
| 101 |
+
rows_per_file: The fixed number of rows per input file, or None.
|
| 102 |
+
file_sizes: Optional file size per input file previously returned
|
| 103 |
+
from :meth:`.expand_paths`, where `file_sizes[i]` holds the size of
|
| 104 |
+
the file at `paths[i]`.
|
| 105 |
+
|
| 106 |
+
Returns:
|
| 107 |
+
BlockMetadata aggregated across the given file paths.
|
| 108 |
+
"""
|
| 109 |
+
raise NotImplementedError
|
| 110 |
+
|
| 111 |
+
def expand_paths(
|
| 112 |
+
self,
|
| 113 |
+
paths: List[str],
|
| 114 |
+
filesystem: Optional["pyarrow.fs.FileSystem"],
|
| 115 |
+
partitioning: Optional[Partitioning] = None,
|
| 116 |
+
ignore_missing_paths: bool = False,
|
| 117 |
+
) -> Iterator[Tuple[str, int]]:
|
| 118 |
+
"""Expands all paths into concrete file paths by walking directories.
|
| 119 |
+
|
| 120 |
+
Also returns a sidecar of file sizes.
|
| 121 |
+
|
| 122 |
+
The input paths must be normalized for compatibility with the input
|
| 123 |
+
filesystem prior to invocation.
|
| 124 |
+
|
| 125 |
+
Args:
|
| 126 |
+
paths: A list of file and/or directory paths compatible with the
|
| 127 |
+
given filesystem.
|
| 128 |
+
filesystem: The filesystem implementation that should be used for
|
| 129 |
+
expanding all paths and reading their files.
|
| 130 |
+
ignore_missing_paths: If True, ignores any file paths in ``paths`` that
|
| 131 |
+
are not found. Defaults to False.
|
| 132 |
+
|
| 133 |
+
Returns:
|
| 134 |
+
An iterator of `(file_path, file_size)` pairs. None may be returned for the
|
| 135 |
+
file size if it is either unknown or will be fetched later by
|
| 136 |
+
`_get_block_metadata()`, but the length of
|
| 137 |
+
both lists must be equal.
|
| 138 |
+
"""
|
| 139 |
+
raise NotImplementedError
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
@DeveloperAPI
|
| 143 |
+
class DefaultFileMetadataProvider(BaseFileMetadataProvider):
|
| 144 |
+
"""Default metadata provider for
|
| 145 |
+
:class:`~ray.data.datasource.file_based_datasource.FileBasedDatasource`
|
| 146 |
+
implementations that reuse the base `prepare_read` method.
|
| 147 |
+
|
| 148 |
+
Calculates block size in bytes as the sum of its constituent file sizes,
|
| 149 |
+
and assumes a fixed number of rows per file.
|
| 150 |
+
"""
|
| 151 |
+
|
| 152 |
+
def _get_block_metadata(
|
| 153 |
+
self,
|
| 154 |
+
paths: List[str],
|
| 155 |
+
schema: Optional[Union[type, "pyarrow.lib.Schema"]],
|
| 156 |
+
*,
|
| 157 |
+
rows_per_file: Optional[int],
|
| 158 |
+
file_sizes: List[Optional[int]],
|
| 159 |
+
) -> BlockMetadata:
|
| 160 |
+
if rows_per_file is None:
|
| 161 |
+
num_rows = None
|
| 162 |
+
else:
|
| 163 |
+
num_rows = len(paths) * rows_per_file
|
| 164 |
+
return BlockMetadata(
|
| 165 |
+
num_rows=num_rows,
|
| 166 |
+
size_bytes=None if None in file_sizes else int(sum(file_sizes)),
|
| 167 |
+
schema=schema,
|
| 168 |
+
input_files=paths,
|
| 169 |
+
exec_stats=None,
|
| 170 |
+
) # Exec stats filled in later.
|
| 171 |
+
|
| 172 |
+
def expand_paths(
|
| 173 |
+
self,
|
| 174 |
+
paths: List[str],
|
| 175 |
+
filesystem: "pyarrow.fs.FileSystem",
|
| 176 |
+
partitioning: Optional[Partitioning] = None,
|
| 177 |
+
ignore_missing_paths: bool = False,
|
| 178 |
+
) -> Iterator[Tuple[str, int]]:
|
| 179 |
+
yield from _expand_paths(paths, filesystem, partitioning, ignore_missing_paths)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
@DeveloperAPI
|
| 183 |
+
class FastFileMetadataProvider(DefaultFileMetadataProvider):
|
| 184 |
+
"""Fast Metadata provider for
|
| 185 |
+
:class:`~ray.data.datasource.file_based_datasource.FileBasedDatasource`
|
| 186 |
+
implementations.
|
| 187 |
+
|
| 188 |
+
Offers improved performance vs.
|
| 189 |
+
:class:`DefaultFileMetadataProvider`
|
| 190 |
+
by skipping directory path expansion and file size collection.
|
| 191 |
+
While this performance improvement may be negligible for local filesystems,
|
| 192 |
+
it can be substantial for cloud storage service providers.
|
| 193 |
+
|
| 194 |
+
This should only be used when all input paths exist and are known to be files.
|
| 195 |
+
"""
|
| 196 |
+
|
| 197 |
+
def expand_paths(
|
| 198 |
+
self,
|
| 199 |
+
paths: List[str],
|
| 200 |
+
filesystem: "pyarrow.fs.FileSystem",
|
| 201 |
+
partitioning: Optional[Partitioning] = None,
|
| 202 |
+
ignore_missing_paths: bool = False,
|
| 203 |
+
) -> Iterator[Tuple[str, int]]:
|
| 204 |
+
if ignore_missing_paths:
|
| 205 |
+
raise ValueError(
|
| 206 |
+
"`ignore_missing_paths` cannot be set when used with "
|
| 207 |
+
"`FastFileMetadataProvider`. All paths must exist when "
|
| 208 |
+
"using `FastFileMetadataProvider`."
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
logger.warning(
|
| 212 |
+
f"Skipping expansion of {len(paths)} path(s). If your paths contain "
|
| 213 |
+
f"directories or if file size collection is required, try rerunning this "
|
| 214 |
+
f"read with `meta_provider=DefaultFileMetadataProvider()`."
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
yield from zip(paths, itertools.repeat(None, len(paths)))
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def _handle_read_os_error(error: OSError, paths: Union[str, List[str]]) -> str:
|
| 221 |
+
# NOTE: this is not comprehensive yet, and should be extended as more errors arise.
|
| 222 |
+
# NOTE: The latter patterns are raised in Arrow 10+, while the former is raised in
|
| 223 |
+
# Arrow < 10.
|
| 224 |
+
aws_error_pattern = (
|
| 225 |
+
r"^(?:(.*)AWS Error \[code \d+\]: No response body\.(.*))|"
|
| 226 |
+
r"(?:(.*)AWS Error UNKNOWN \(HTTP status 400\) during HeadObject operation: "
|
| 227 |
+
r"No response body\.(.*))|"
|
| 228 |
+
r"(?:(.*)AWS Error ACCESS_DENIED during HeadObject operation: No response "
|
| 229 |
+
r"body\.(.*))$"
|
| 230 |
+
)
|
| 231 |
+
if re.match(aws_error_pattern, str(error)):
|
| 232 |
+
# Specially handle AWS error when reading files, to give a clearer error
|
| 233 |
+
# message to avoid confusing users. The real issue is most likely that the AWS
|
| 234 |
+
# S3 file credentials have not been properly configured yet.
|
| 235 |
+
if isinstance(paths, str):
|
| 236 |
+
# Quote to highlight single file path in error message for better
|
| 237 |
+
# readability. List of file paths will be shown up as ['foo', 'boo'],
|
| 238 |
+
# so only quote single file path here.
|
| 239 |
+
paths = f'"{paths}"'
|
| 240 |
+
raise OSError(
|
| 241 |
+
(
|
| 242 |
+
f"Failing to read AWS S3 file(s): {paths}. "
|
| 243 |
+
"Please check that file exists and has properly configured access. "
|
| 244 |
+
"You can also run AWS CLI command to get more detailed error message "
|
| 245 |
+
"(e.g., aws s3 ls <file-name>). "
|
| 246 |
+
"See https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3/index.html " # noqa
|
| 247 |
+
"and https://docs.ray.io/en/latest/data/creating-datasets.html#reading-from-remote-storage " # noqa
|
| 248 |
+
"for more information."
|
| 249 |
+
)
|
| 250 |
+
)
|
| 251 |
+
else:
|
| 252 |
+
raise error
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def _expand_paths(
|
| 256 |
+
paths: List[str],
|
| 257 |
+
filesystem: "pyarrow.fs.FileSystem",
|
| 258 |
+
partitioning: Optional[Partitioning],
|
| 259 |
+
ignore_missing_paths: bool = False,
|
| 260 |
+
) -> Iterator[Tuple[str, int]]:
|
| 261 |
+
"""Get the file sizes for all provided file paths."""
|
| 262 |
+
from pyarrow.fs import LocalFileSystem
|
| 263 |
+
|
| 264 |
+
from ray.data.datasource.file_based_datasource import (
|
| 265 |
+
FILE_SIZE_FETCH_PARALLELIZATION_THRESHOLD,
|
| 266 |
+
)
|
| 267 |
+
from ray.data.datasource.path_util import _unwrap_protocol
|
| 268 |
+
|
| 269 |
+
# We break down our processing paths into a few key cases:
|
| 270 |
+
# 1. If len(paths) < threshold, fetch the file info for the individual files/paths
|
| 271 |
+
# serially.
|
| 272 |
+
# 2. If all paths are contained under the same parent directory (or base directory,
|
| 273 |
+
# if using partitioning), fetch all file infos at this prefix and filter to the
|
| 274 |
+
# provided paths on the client; this should be a single file info request.
|
| 275 |
+
# 3. If more than threshold requests required, parallelize them via Ray tasks.
|
| 276 |
+
# 1. Small # of paths case.
|
| 277 |
+
if (
|
| 278 |
+
len(paths) < FILE_SIZE_FETCH_PARALLELIZATION_THRESHOLD
|
| 279 |
+
# Local file systems are very fast to hit.
|
| 280 |
+
or isinstance(filesystem, LocalFileSystem)
|
| 281 |
+
):
|
| 282 |
+
yield from _get_file_infos_serial(paths, filesystem, ignore_missing_paths)
|
| 283 |
+
else:
|
| 284 |
+
# 2. Common path prefix case.
|
| 285 |
+
# Get longest common path of all paths.
|
| 286 |
+
common_path = os.path.commonpath(paths)
|
| 287 |
+
# If parent directory (or base directory, if using partitioning) is common to
|
| 288 |
+
# all paths, fetch all file infos at that prefix and filter the response to the
|
| 289 |
+
# provided paths.
|
| 290 |
+
if (
|
| 291 |
+
partitioning is not None
|
| 292 |
+
and common_path == _unwrap_protocol(partitioning.base_dir)
|
| 293 |
+
) or all(str(pathlib.Path(path).parent) == common_path for path in paths):
|
| 294 |
+
yield from _get_file_infos_common_path_prefix(
|
| 295 |
+
paths, common_path, filesystem, ignore_missing_paths
|
| 296 |
+
)
|
| 297 |
+
# 3. Parallelization case.
|
| 298 |
+
else:
|
| 299 |
+
# Parallelize requests via Ray tasks.
|
| 300 |
+
yield from _get_file_infos_parallel(paths, filesystem, ignore_missing_paths)
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
def _get_file_infos_serial(
|
| 304 |
+
paths: List[str],
|
| 305 |
+
filesystem: "pyarrow.fs.FileSystem",
|
| 306 |
+
ignore_missing_paths: bool = False,
|
| 307 |
+
) -> Iterator[Tuple[str, int]]:
|
| 308 |
+
for path in paths:
|
| 309 |
+
yield from _get_file_infos(path, filesystem, ignore_missing_paths)
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def _get_file_infos_common_path_prefix(
|
| 313 |
+
paths: List[str],
|
| 314 |
+
common_path: str,
|
| 315 |
+
filesystem: "pyarrow.fs.FileSystem",
|
| 316 |
+
ignore_missing_paths: bool = False,
|
| 317 |
+
) -> Iterator[Tuple[str, int]]:
|
| 318 |
+
path_to_size = {path: None for path in paths}
|
| 319 |
+
for path, file_size in _get_file_infos(
|
| 320 |
+
common_path, filesystem, ignore_missing_paths
|
| 321 |
+
):
|
| 322 |
+
if path in path_to_size:
|
| 323 |
+
path_to_size[path] = file_size
|
| 324 |
+
|
| 325 |
+
# Check if all `paths` have file size metadata.
|
| 326 |
+
# If any of paths has no file size, fall back to get files metadata in parallel.
|
| 327 |
+
# This can happen when path is a directory, but not a file.
|
| 328 |
+
have_missing_path = False
|
| 329 |
+
for path in paths:
|
| 330 |
+
if path_to_size[path] is None:
|
| 331 |
+
logger.debug(
|
| 332 |
+
f"Finding path {path} not have file size metadata. "
|
| 333 |
+
"Fall back to get files metadata in parallel for all paths."
|
| 334 |
+
)
|
| 335 |
+
have_missing_path = True
|
| 336 |
+
break
|
| 337 |
+
|
| 338 |
+
if have_missing_path:
|
| 339 |
+
# Parallelize requests via Ray tasks.
|
| 340 |
+
yield from _get_file_infos_parallel(paths, filesystem, ignore_missing_paths)
|
| 341 |
+
else:
|
| 342 |
+
# Iterate over `paths` to yield each path in original order.
|
| 343 |
+
# NOTE: do not iterate over `path_to_size` because the dictionary skips
|
| 344 |
+
# duplicated path, while `paths` might contain duplicated path if one wants
|
| 345 |
+
# to read same file multiple times.
|
| 346 |
+
for path in paths:
|
| 347 |
+
yield path, path_to_size[path]
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
def _get_file_infos_parallel(
|
| 351 |
+
paths: List[str],
|
| 352 |
+
filesystem: "pyarrow.fs.FileSystem",
|
| 353 |
+
ignore_missing_paths: bool = False,
|
| 354 |
+
) -> Iterator[Tuple[str, int]]:
|
| 355 |
+
from ray.data.datasource.file_based_datasource import (
|
| 356 |
+
PATHS_PER_FILE_SIZE_FETCH_TASK,
|
| 357 |
+
_unwrap_s3_serialization_workaround,
|
| 358 |
+
_wrap_s3_serialization_workaround,
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
logger.warning(
|
| 362 |
+
f"Expanding {len(paths)} path(s). This may be a HIGH LATENCY "
|
| 363 |
+
f"operation on some cloud storage services. Moving all the "
|
| 364 |
+
"paths to a common parent directory will lead to faster "
|
| 365 |
+
"metadata fetching."
|
| 366 |
+
)
|
| 367 |
+
|
| 368 |
+
# Capture the filesystem in the fetcher func closure, but wrap it in our
|
| 369 |
+
# serialization workaround to make sure that the pickle roundtrip works as expected.
|
| 370 |
+
filesystem = _wrap_s3_serialization_workaround(filesystem)
|
| 371 |
+
|
| 372 |
+
def _file_infos_fetcher(paths: List[str]) -> List[Tuple[str, int]]:
|
| 373 |
+
fs = _unwrap_s3_serialization_workaround(filesystem)
|
| 374 |
+
return list(
|
| 375 |
+
itertools.chain.from_iterable(
|
| 376 |
+
_get_file_infos(path, fs, ignore_missing_paths) for path in paths
|
| 377 |
+
)
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
yield from _fetch_metadata_parallel(
|
| 381 |
+
paths, _file_infos_fetcher, PATHS_PER_FILE_SIZE_FETCH_TASK
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
Uri = TypeVar("Uri")
|
| 386 |
+
Meta = TypeVar("Meta")
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
def _fetch_metadata_parallel(
|
| 390 |
+
uris: List[Uri],
|
| 391 |
+
fetch_func: Callable[[List[Uri]], List[Meta]],
|
| 392 |
+
desired_uris_per_task: int,
|
| 393 |
+
**ray_remote_args,
|
| 394 |
+
) -> Iterator[Meta]:
|
| 395 |
+
"""Fetch file metadata in parallel using Ray tasks."""
|
| 396 |
+
remote_fetch_func = cached_remote_fn(fetch_func)
|
| 397 |
+
if ray_remote_args:
|
| 398 |
+
remote_fetch_func = remote_fetch_func.options(**ray_remote_args)
|
| 399 |
+
# Choose a parallelism that results in a # of metadata fetches per task that
|
| 400 |
+
# dominates the Ray task overhead while ensuring good parallelism.
|
| 401 |
+
# Always launch at least 2 parallel fetch tasks.
|
| 402 |
+
parallelism = max(len(uris) // desired_uris_per_task, 2)
|
| 403 |
+
metadata_fetch_bar = ProgressBar(
|
| 404 |
+
"Metadata Fetch Progress", total=parallelism, unit="task"
|
| 405 |
+
)
|
| 406 |
+
fetch_tasks = []
|
| 407 |
+
for uri_chunk in np.array_split(uris, parallelism):
|
| 408 |
+
if len(uri_chunk) == 0:
|
| 409 |
+
continue
|
| 410 |
+
fetch_tasks.append(remote_fetch_func.remote(uri_chunk))
|
| 411 |
+
results = metadata_fetch_bar.fetch_until_complete(fetch_tasks)
|
| 412 |
+
yield from itertools.chain.from_iterable(results)
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
def _get_file_infos(
|
| 416 |
+
path: str, filesystem: "pyarrow.fs.FileSystem", ignore_missing_path: bool = False
|
| 417 |
+
) -> List[Tuple[str, int]]:
|
| 418 |
+
"""Get the file info for all files at or under the provided path."""
|
| 419 |
+
from pyarrow.fs import FileType
|
| 420 |
+
|
| 421 |
+
file_infos = []
|
| 422 |
+
try:
|
| 423 |
+
ctx = ray.data.DataContext.get_current()
|
| 424 |
+
file_info = call_with_retry(
|
| 425 |
+
lambda: filesystem.get_file_info(path),
|
| 426 |
+
description="get file info",
|
| 427 |
+
match=ctx.retried_io_errors,
|
| 428 |
+
)
|
| 429 |
+
except OSError as e:
|
| 430 |
+
_handle_read_os_error(e, path)
|
| 431 |
+
if file_info.type == FileType.Directory:
|
| 432 |
+
for file_path, file_size in _expand_directory(path, filesystem):
|
| 433 |
+
file_infos.append((file_path, file_size))
|
| 434 |
+
elif file_info.type == FileType.File:
|
| 435 |
+
file_infos.append((path, file_info.size))
|
| 436 |
+
elif file_info.type == FileType.NotFound and ignore_missing_path:
|
| 437 |
+
pass
|
| 438 |
+
else:
|
| 439 |
+
raise FileNotFoundError(path)
|
| 440 |
+
|
| 441 |
+
return file_infos
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
def _expand_directory(
|
| 445 |
+
path: str,
|
| 446 |
+
filesystem: "pyarrow.fs.FileSystem",
|
| 447 |
+
exclude_prefixes: Optional[List[str]] = None,
|
| 448 |
+
ignore_missing_path: bool = False,
|
| 449 |
+
) -> List[Tuple[str, int]]:
|
| 450 |
+
"""
|
| 451 |
+
Expand the provided directory path to a list of file paths.
|
| 452 |
+
|
| 453 |
+
Args:
|
| 454 |
+
path: The directory path to expand.
|
| 455 |
+
filesystem: The filesystem implementation that should be used for
|
| 456 |
+
reading these files.
|
| 457 |
+
exclude_prefixes: The file relative path prefixes that should be
|
| 458 |
+
excluded from the returned file set. Default excluded prefixes are
|
| 459 |
+
"." and "_".
|
| 460 |
+
|
| 461 |
+
Returns:
|
| 462 |
+
An iterator of (file_path, file_size) tuples.
|
| 463 |
+
"""
|
| 464 |
+
if exclude_prefixes is None:
|
| 465 |
+
exclude_prefixes = [".", "_"]
|
| 466 |
+
|
| 467 |
+
from pyarrow.fs import FileSelector
|
| 468 |
+
|
| 469 |
+
selector = FileSelector(path, recursive=True, allow_not_found=ignore_missing_path)
|
| 470 |
+
files = filesystem.get_file_info(selector)
|
| 471 |
+
base_path = selector.base_dir
|
| 472 |
+
out = []
|
| 473 |
+
for file_ in files:
|
| 474 |
+
if not file_.is_file:
|
| 475 |
+
continue
|
| 476 |
+
file_path = file_.path
|
| 477 |
+
if not file_path.startswith(base_path):
|
| 478 |
+
continue
|
| 479 |
+
relative = file_path[len(base_path) :]
|
| 480 |
+
if any(relative.startswith(prefix) for prefix in exclude_prefixes):
|
| 481 |
+
continue
|
| 482 |
+
out.append((file_path, file_.size))
|
| 483 |
+
# We sort the paths to guarantee a stable order.
|
| 484 |
+
return sorted(out)
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/filename_provider.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, Optional
|
| 2 |
+
|
| 3 |
+
from ray.data.block import Block
|
| 4 |
+
from ray.util.annotations import PublicAPI
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@PublicAPI(stability="alpha")
|
| 8 |
+
class FilenameProvider:
|
| 9 |
+
"""Generates filenames when you write a :class:`~ray.data.Dataset`.
|
| 10 |
+
|
| 11 |
+
Use this class to customize the filenames used when writing a Dataset.
|
| 12 |
+
|
| 13 |
+
Some methods write each row to a separate file, while others write each block to a
|
| 14 |
+
separate file. For example, :meth:`ray.data.Dataset.write_images` writes individual
|
| 15 |
+
rows, and :func:`ray.data.Dataset.write_parquet` writes blocks of data. For more
|
| 16 |
+
information about blocks, see :ref:`Data internals <datasets_scheduling>`.
|
| 17 |
+
|
| 18 |
+
If you're writing each row to a separate file, implement
|
| 19 |
+
:meth:`~FilenameProvider.get_filename_for_row`. Otherwise, implement
|
| 20 |
+
:meth:`~FilenameProvider.get_filename_for_block`.
|
| 21 |
+
|
| 22 |
+
Example:
|
| 23 |
+
|
| 24 |
+
This snippet shows you how to encode labels in written files. For example, if
|
| 25 |
+
`"cat"` is a label, you might write a file named `cat_000000_000000_000000.png`.
|
| 26 |
+
|
| 27 |
+
.. testcode::
|
| 28 |
+
|
| 29 |
+
import ray
|
| 30 |
+
from ray.data.datasource import FilenameProvider
|
| 31 |
+
|
| 32 |
+
class ImageFilenameProvider(FilenameProvider):
|
| 33 |
+
|
| 34 |
+
def __init__(self, file_format: str):
|
| 35 |
+
self.file_format = file_format
|
| 36 |
+
|
| 37 |
+
def get_filename_for_row(self, row, task_index, block_index, row_index):
|
| 38 |
+
return (
|
| 39 |
+
f"{row['label']}_{task_index:06}_{block_index:06}"
|
| 40 |
+
f"_{row_index:06}.{self.file_format}"
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
ds = ray.data.read_parquet("s3://anonymous@ray-example-data/images.parquet")
|
| 44 |
+
ds.write_images(
|
| 45 |
+
"/tmp/results",
|
| 46 |
+
column="image",
|
| 47 |
+
filename_provider=ImageFilenameProvider("png")
|
| 48 |
+
)
|
| 49 |
+
""" # noqa: E501
|
| 50 |
+
|
| 51 |
+
def get_filename_for_block(
|
| 52 |
+
self, block: Block, task_index: int, block_index: int
|
| 53 |
+
) -> str:
|
| 54 |
+
"""Generate a filename for a block of data.
|
| 55 |
+
|
| 56 |
+
.. note::
|
| 57 |
+
Filenames must be unique and deterministic for a given task and block index.
|
| 58 |
+
|
| 59 |
+
A block consists of multiple rows and corresponds to a single output file.
|
| 60 |
+
Each task might produce a different number of blocks.
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
block: The block that will be written to a file.
|
| 64 |
+
task_index: The index of the the write task.
|
| 65 |
+
block_index: The index of the block *within* the write task.
|
| 66 |
+
"""
|
| 67 |
+
raise NotImplementedError
|
| 68 |
+
|
| 69 |
+
def get_filename_for_row(
|
| 70 |
+
self, row: Dict[str, Any], task_index: int, block_index: int, row_index: int
|
| 71 |
+
) -> str:
|
| 72 |
+
"""Generate a filename for a row.
|
| 73 |
+
|
| 74 |
+
.. note::
|
| 75 |
+
Filenames must be unique and deterministic for a given task, block, and row
|
| 76 |
+
index.
|
| 77 |
+
|
| 78 |
+
A block consists of multiple rows, and each row corresponds to a single
|
| 79 |
+
output file. Each task might produce a different number of blocks, and each
|
| 80 |
+
block might contain a different number of rows.
|
| 81 |
+
|
| 82 |
+
.. tip::
|
| 83 |
+
If you require a contiguous row index into the global dataset, use
|
| 84 |
+
:meth:`~ray.data.Dataset.iter_rows`. This method is single-threaded and
|
| 85 |
+
isn't recommended for large datasets.
|
| 86 |
+
|
| 87 |
+
Args:
|
| 88 |
+
row: The row that will be written to a file.
|
| 89 |
+
task_index: The index of the the write task.
|
| 90 |
+
block_index: The index of the block *within* the write task.
|
| 91 |
+
row_index: The index of the row *within* the block.
|
| 92 |
+
"""
|
| 93 |
+
raise NotImplementedError
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class _DefaultFilenameProvider(FilenameProvider):
|
| 97 |
+
def __init__(
|
| 98 |
+
self, dataset_uuid: Optional[str] = None, file_format: Optional[str] = None
|
| 99 |
+
):
|
| 100 |
+
self._dataset_uuid = dataset_uuid
|
| 101 |
+
self._file_format = file_format
|
| 102 |
+
|
| 103 |
+
def get_filename_for_block(
|
| 104 |
+
self, block: Block, task_index: int, block_index: int
|
| 105 |
+
) -> str:
|
| 106 |
+
file_id = f"{task_index:06}_{block_index:06}"
|
| 107 |
+
return self._generate_filename(file_id)
|
| 108 |
+
|
| 109 |
+
def get_filename_for_row(
|
| 110 |
+
self, row: Dict[str, Any], task_index: int, block_index: int, row_index: int
|
| 111 |
+
) -> str:
|
| 112 |
+
file_id = f"{task_index:06}_{block_index:06}_{row_index:06}"
|
| 113 |
+
return self._generate_filename(file_id)
|
| 114 |
+
|
| 115 |
+
def _generate_filename(self, file_id: str) -> str:
|
| 116 |
+
filename = ""
|
| 117 |
+
if self._dataset_uuid is not None:
|
| 118 |
+
filename += f"{self._dataset_uuid}_"
|
| 119 |
+
filename += file_id
|
| 120 |
+
if self._file_format is not None:
|
| 121 |
+
filename += f".{self._file_format}"
|
| 122 |
+
return filename
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/parquet_meta_provider.py
ADDED
|
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import TYPE_CHECKING, List, Optional, Union
|
| 2 |
+
|
| 3 |
+
import ray.cloudpickle as cloudpickle
|
| 4 |
+
from ray.data._internal.util import call_with_retry
|
| 5 |
+
from ray.data.block import BlockMetadata
|
| 6 |
+
from ray.data.datasource.file_meta_provider import (
|
| 7 |
+
FileMetadataProvider,
|
| 8 |
+
_fetch_metadata_parallel,
|
| 9 |
+
)
|
| 10 |
+
from ray.util.annotations import DeveloperAPI
|
| 11 |
+
|
| 12 |
+
if TYPE_CHECKING:
|
| 13 |
+
import pyarrow
|
| 14 |
+
|
| 15 |
+
from ray.data._internal.datasource.parquet_datasource import SerializedFragment
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
FRAGMENTS_PER_META_FETCH = 6
|
| 19 |
+
PARALLELIZE_META_FETCH_THRESHOLD = 24
|
| 20 |
+
|
| 21 |
+
# The application-level exceptions to retry for metadata prefetching task.
|
| 22 |
+
# Default to retry on access denied and read timeout errors because AWS S3 would throw
|
| 23 |
+
# these transient errors when load is too high.
|
| 24 |
+
RETRY_EXCEPTIONS_FOR_META_FETCH_TASK = ["AWS Error ACCESS_DENIED", "Timeout"]
|
| 25 |
+
# Maximum number of retries for metadata prefetching task due to transient errors.
|
| 26 |
+
RETRY_MAX_ATTEMPTS_FOR_META_FETCH_TASK = 32
|
| 27 |
+
# Maximum retry back-off interval in seconds for failed metadata prefetching task.
|
| 28 |
+
RETRY_MAX_BACKOFF_S_FOR_META_FETCH_TASK = 64
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class _ParquetFileFragmentMetaData:
|
| 32 |
+
"""Class to store metadata of a Parquet file fragment. This includes
|
| 33 |
+
all attributes from `pyarrow.parquet.FileMetaData` except for `schema`,
|
| 34 |
+
which is stored in `self.schema_pickled` as a pickled object from
|
| 35 |
+
`cloudpickle.loads()`, used in deduplicating schemas across multiple fragments."""
|
| 36 |
+
|
| 37 |
+
def __init__(self, fragment_metadata: "pyarrow.parquet.FileMetaData"):
|
| 38 |
+
self.created_by = fragment_metadata.created_by
|
| 39 |
+
self.format_version = fragment_metadata.format_version
|
| 40 |
+
self.num_columns = fragment_metadata.num_columns
|
| 41 |
+
self.num_row_groups = fragment_metadata.num_row_groups
|
| 42 |
+
self.num_rows = fragment_metadata.num_rows
|
| 43 |
+
self.serialized_size = fragment_metadata.serialized_size
|
| 44 |
+
# This is a pickled schema object, to be set later with
|
| 45 |
+
# `self.set_schema_pickled()`. To get the underlying schema, use
|
| 46 |
+
# `cloudpickle.loads(self.schema_pickled)`.
|
| 47 |
+
self.schema_pickled = None
|
| 48 |
+
|
| 49 |
+
# Calculate the total byte size of the file fragment using the original
|
| 50 |
+
# object, as it is not possible to access row groups from this class.
|
| 51 |
+
self.total_byte_size = 0
|
| 52 |
+
for row_group_idx in range(fragment_metadata.num_row_groups):
|
| 53 |
+
row_group_metadata = fragment_metadata.row_group(row_group_idx)
|
| 54 |
+
self.total_byte_size += row_group_metadata.total_byte_size
|
| 55 |
+
|
| 56 |
+
def set_schema_pickled(self, schema_pickled: bytes):
|
| 57 |
+
"""Note: to get the underlying schema, use
|
| 58 |
+
`cloudpickle.loads(self.schema_pickled)`."""
|
| 59 |
+
self.schema_pickled = schema_pickled
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@DeveloperAPI
|
| 63 |
+
class ParquetMetadataProvider(FileMetadataProvider):
|
| 64 |
+
"""Provides block metadata for Arrow Parquet file fragments."""
|
| 65 |
+
|
| 66 |
+
def _get_block_metadata(
|
| 67 |
+
self,
|
| 68 |
+
paths: List[str],
|
| 69 |
+
schema: Optional[Union[type, "pyarrow.lib.Schema"]],
|
| 70 |
+
*,
|
| 71 |
+
num_fragments: int,
|
| 72 |
+
prefetched_metadata: Optional[List["_ParquetFileFragmentMetaData"]],
|
| 73 |
+
) -> BlockMetadata:
|
| 74 |
+
"""Resolves and returns block metadata for files of a single dataset block.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
paths: The file paths for a single dataset block.
|
| 78 |
+
schema: The user-provided or inferred schema for the given file
|
| 79 |
+
paths, if any.
|
| 80 |
+
num_fragments: The number of Parquet file fragments derived from the input
|
| 81 |
+
file paths.
|
| 82 |
+
prefetched_metadata: Metadata previously returned from
|
| 83 |
+
`prefetch_file_metadata()` for each file fragment, where
|
| 84 |
+
`prefetched_metadata[i]` contains the metadata for `fragments[i]`.
|
| 85 |
+
|
| 86 |
+
Returns:
|
| 87 |
+
BlockMetadata aggregated across the given file paths.
|
| 88 |
+
"""
|
| 89 |
+
if (
|
| 90 |
+
prefetched_metadata is not None
|
| 91 |
+
and len(prefetched_metadata) == num_fragments
|
| 92 |
+
and all(m is not None for m in prefetched_metadata)
|
| 93 |
+
):
|
| 94 |
+
# Fragment metadata was available, construct a normal
|
| 95 |
+
# BlockMetadata.
|
| 96 |
+
block_metadata = BlockMetadata(
|
| 97 |
+
num_rows=sum(m.num_rows for m in prefetched_metadata),
|
| 98 |
+
size_bytes=sum(m.total_byte_size for m in prefetched_metadata),
|
| 99 |
+
schema=schema,
|
| 100 |
+
input_files=paths,
|
| 101 |
+
exec_stats=None,
|
| 102 |
+
) # Exec stats filled in later.
|
| 103 |
+
else:
|
| 104 |
+
# Fragment metadata was not available, construct an empty
|
| 105 |
+
# BlockMetadata.
|
| 106 |
+
block_metadata = BlockMetadata(
|
| 107 |
+
num_rows=None,
|
| 108 |
+
size_bytes=None,
|
| 109 |
+
schema=schema,
|
| 110 |
+
input_files=paths,
|
| 111 |
+
exec_stats=None,
|
| 112 |
+
)
|
| 113 |
+
return block_metadata
|
| 114 |
+
|
| 115 |
+
def prefetch_file_metadata(
|
| 116 |
+
self,
|
| 117 |
+
fragments: List["pyarrow.dataset.ParquetFileFragment"],
|
| 118 |
+
**ray_remote_args,
|
| 119 |
+
) -> Optional[List[_ParquetFileFragmentMetaData]]:
|
| 120 |
+
"""Pre-fetches file metadata for all Parquet file fragments in a single batch.
|
| 121 |
+
|
| 122 |
+
Subsets of the metadata returned will be provided as input to subsequent calls
|
| 123 |
+
to ``_get_block_metadata`` together with their corresponding Parquet file
|
| 124 |
+
fragments.
|
| 125 |
+
|
| 126 |
+
Args:
|
| 127 |
+
fragments: The Parquet file fragments to fetch metadata for.
|
| 128 |
+
|
| 129 |
+
Returns:
|
| 130 |
+
Metadata resolved for each input file fragment, or `None`. Metadata
|
| 131 |
+
must be returned in the same order as all input file fragments, such
|
| 132 |
+
that `metadata[i]` always contains the metadata for `fragments[i]`.
|
| 133 |
+
"""
|
| 134 |
+
from ray.data._internal.datasource.parquet_datasource import SerializedFragment
|
| 135 |
+
|
| 136 |
+
if len(fragments) > PARALLELIZE_META_FETCH_THRESHOLD:
|
| 137 |
+
# Wrap Parquet fragments in serialization workaround.
|
| 138 |
+
fragments = [SerializedFragment(fragment) for fragment in fragments]
|
| 139 |
+
# Fetch Parquet metadata in parallel using Ray tasks.
|
| 140 |
+
|
| 141 |
+
def fetch_func(fragments):
|
| 142 |
+
return _fetch_metadata_serialization_wrapper(
|
| 143 |
+
fragments,
|
| 144 |
+
# Ensure that retry settings are propagated to remote tasks.
|
| 145 |
+
retry_match=RETRY_EXCEPTIONS_FOR_META_FETCH_TASK,
|
| 146 |
+
retry_max_attempts=RETRY_MAX_ATTEMPTS_FOR_META_FETCH_TASK,
|
| 147 |
+
retry_max_interval=RETRY_MAX_BACKOFF_S_FOR_META_FETCH_TASK,
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
raw_metadata = list(
|
| 151 |
+
_fetch_metadata_parallel(
|
| 152 |
+
fragments,
|
| 153 |
+
fetch_func,
|
| 154 |
+
FRAGMENTS_PER_META_FETCH,
|
| 155 |
+
**ray_remote_args,
|
| 156 |
+
)
|
| 157 |
+
)
|
| 158 |
+
else:
|
| 159 |
+
raw_metadata = _fetch_metadata(fragments)
|
| 160 |
+
|
| 161 |
+
return _dedupe_metadata(raw_metadata)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def _fetch_metadata_serialization_wrapper(
|
| 165 |
+
fragments: List["SerializedFragment"],
|
| 166 |
+
retry_match: Optional[List[str]],
|
| 167 |
+
retry_max_attempts: int,
|
| 168 |
+
retry_max_interval: int,
|
| 169 |
+
) -> List["pyarrow.parquet.FileMetaData"]:
|
| 170 |
+
from ray.data._internal.datasource.parquet_datasource import (
|
| 171 |
+
_deserialize_fragments_with_retry,
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
deserialized_fragments = _deserialize_fragments_with_retry(fragments)
|
| 175 |
+
try:
|
| 176 |
+
metadata = call_with_retry(
|
| 177 |
+
lambda: _fetch_metadata(deserialized_fragments),
|
| 178 |
+
description="fetch metdata",
|
| 179 |
+
match=retry_match,
|
| 180 |
+
max_attempts=retry_max_attempts,
|
| 181 |
+
max_backoff_s=retry_max_interval,
|
| 182 |
+
)
|
| 183 |
+
except OSError as e:
|
| 184 |
+
raise RuntimeError(
|
| 185 |
+
f"Exceeded maximum number of attempts ({retry_max_attempts}) to retry "
|
| 186 |
+
"metadata fetching task. Metadata fetching tasks can fail due to transient "
|
| 187 |
+
"errors like rate limiting.\n"
|
| 188 |
+
"\n"
|
| 189 |
+
"To increase the maximum number of attempts, configure "
|
| 190 |
+
"`RETRY_MAX_ATTEMPTS_FOR_META_FETCH_TASK`. For example:\n"
|
| 191 |
+
"```\n"
|
| 192 |
+
"ray.data._internal.datasource.parquet_datasource.RETRY_MAX_ATTEMPTS_FOR_META_FETCH_TASK = 64\n" # noqa: E501
|
| 193 |
+
"```\n"
|
| 194 |
+
"To increase the maximum retry backoff interval, configure "
|
| 195 |
+
"`RETRY_MAX_BACKOFF_S_FOR_META_FETCH_TASK`. For example:\n"
|
| 196 |
+
"```\n"
|
| 197 |
+
"ray.data._internal.datasource.parquet_datasource.RETRY_MAX_BACKOFF_S_FOR_META_FETCH_TASK = 128\n" # noqa: E501
|
| 198 |
+
"```\n"
|
| 199 |
+
"If the error continues to occur, you can also try decresasing the "
|
| 200 |
+
"concurency of metadata fetching tasks by setting "
|
| 201 |
+
"`NUM_CPUS_FOR_META_FETCH_TASK` to a larger value. For example:\n"
|
| 202 |
+
"```\n"
|
| 203 |
+
"ray.data._internal.datasource.parquet_datasource.NUM_CPUS_FOR_META_FETCH_TASK = 4.\n" # noqa: E501
|
| 204 |
+
"```\n"
|
| 205 |
+
"To change which exceptions to retry on, set "
|
| 206 |
+
"`RETRY_EXCEPTIONS_FOR_META_FETCH_TASK` to a list of error messages. For "
|
| 207 |
+
"example:\n"
|
| 208 |
+
"```\n"
|
| 209 |
+
'ray.data._internal.datasource.parquet_datasource.RETRY_EXCEPTIONS_FOR_META_FETCH_TASK = ["AWS Error ACCESS_DENIED", "Timeout"]\n' # noqa: E501
|
| 210 |
+
"```"
|
| 211 |
+
) from e
|
| 212 |
+
return metadata
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def _fetch_metadata(
|
| 216 |
+
fragments: List["pyarrow.dataset.ParquetFileFragment"],
|
| 217 |
+
) -> List["pyarrow.parquet.FileMetaData"]:
|
| 218 |
+
fragment_metadata = []
|
| 219 |
+
for f in fragments:
|
| 220 |
+
try:
|
| 221 |
+
fragment_metadata.append(f.metadata)
|
| 222 |
+
except AttributeError:
|
| 223 |
+
break
|
| 224 |
+
return fragment_metadata
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def _dedupe_metadata(
|
| 228 |
+
raw_metadatas: List["pyarrow.parquet.FileMetaData"],
|
| 229 |
+
) -> List[_ParquetFileFragmentMetaData]:
|
| 230 |
+
"""For datasets with a large number of columns, the FileMetaData
|
| 231 |
+
(in particular the schema) can be very large. We can reduce the
|
| 232 |
+
memory usage by only keeping unique schema objects across all
|
| 233 |
+
file fragments. This method deduplicates the schemas and returns
|
| 234 |
+
a list of `_ParquetFileFragmentMetaData` objects."""
|
| 235 |
+
schema_to_id = {} # schema_id -> serialized_schema
|
| 236 |
+
id_to_schema = {} # serialized_schema -> schema_id
|
| 237 |
+
stripped_metadatas = []
|
| 238 |
+
for fragment_metadata in raw_metadatas:
|
| 239 |
+
stripped_md = _ParquetFileFragmentMetaData(fragment_metadata)
|
| 240 |
+
|
| 241 |
+
schema_ser = cloudpickle.dumps(fragment_metadata.schema.to_arrow_schema())
|
| 242 |
+
if schema_ser not in schema_to_id:
|
| 243 |
+
schema_id = len(schema_to_id)
|
| 244 |
+
schema_to_id[schema_ser] = schema_id
|
| 245 |
+
id_to_schema[schema_id] = schema_ser
|
| 246 |
+
stripped_md.set_schema_pickled(schema_ser)
|
| 247 |
+
else:
|
| 248 |
+
schema_id = schema_to_id.get(schema_ser)
|
| 249 |
+
existing_schema_ser = id_to_schema[schema_id]
|
| 250 |
+
stripped_md.set_schema_pickled(existing_schema_ser)
|
| 251 |
+
stripped_metadatas.append(stripped_md)
|
| 252 |
+
return stripped_metadatas
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/partitioning.py
ADDED
|
@@ -0,0 +1,456 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import posixpath
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
from enum import Enum
|
| 4 |
+
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, Union
|
| 5 |
+
|
| 6 |
+
from ray.util.annotations import DeveloperAPI, PublicAPI
|
| 7 |
+
|
| 8 |
+
if TYPE_CHECKING:
|
| 9 |
+
import pyarrow
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
PartitionDataType = Type[Union[int, float, str, bool]]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@DeveloperAPI
|
| 16 |
+
class PartitionStyle(str, Enum):
|
| 17 |
+
"""Supported dataset partition styles.
|
| 18 |
+
|
| 19 |
+
Inherits from `str` to simplify plain text serialization/deserialization.
|
| 20 |
+
|
| 21 |
+
Examples:
|
| 22 |
+
>>> # Serialize to JSON text.
|
| 23 |
+
>>> json.dumps(PartitionStyle.HIVE) # doctest: +SKIP
|
| 24 |
+
'"hive"'
|
| 25 |
+
|
| 26 |
+
>>> # Deserialize from JSON text.
|
| 27 |
+
>>> PartitionStyle(json.loads('"hive"')) # doctest: +SKIP
|
| 28 |
+
<PartitionStyle.HIVE: 'hive'>
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
HIVE = "hive"
|
| 32 |
+
DIRECTORY = "dir"
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@DeveloperAPI
|
| 36 |
+
@dataclass
|
| 37 |
+
class Partitioning:
|
| 38 |
+
"""Partition scheme used to describe path-based partitions.
|
| 39 |
+
|
| 40 |
+
Path-based partition formats embed all partition keys and values directly in
|
| 41 |
+
their dataset file paths.
|
| 42 |
+
|
| 43 |
+
For example, to read a dataset with
|
| 44 |
+
`Hive-style partitions <https://athena.guide/articles/hive-style-partitioning>`_:
|
| 45 |
+
|
| 46 |
+
>>> import ray
|
| 47 |
+
>>> from ray.data.datasource.partitioning import Partitioning
|
| 48 |
+
>>> ds = ray.data.read_csv(
|
| 49 |
+
... "s3://anonymous@ray-example-data/iris.csv",
|
| 50 |
+
... partitioning=Partitioning("hive"),
|
| 51 |
+
... )
|
| 52 |
+
|
| 53 |
+
Instead, if your files are arranged in a directory structure such as:
|
| 54 |
+
|
| 55 |
+
.. code::
|
| 56 |
+
|
| 57 |
+
root/dog/dog_0.jpeg
|
| 58 |
+
root/dog/dog_1.jpeg
|
| 59 |
+
...
|
| 60 |
+
|
| 61 |
+
root/cat/cat_0.jpeg
|
| 62 |
+
root/cat/cat_1.jpeg
|
| 63 |
+
...
|
| 64 |
+
|
| 65 |
+
Then you can use directory-based partitioning:
|
| 66 |
+
|
| 67 |
+
>>> import ray
|
| 68 |
+
>>> from ray.data.datasource.partitioning import Partitioning
|
| 69 |
+
>>> root = "s3://anonymous@air-example-data/cifar-10/images"
|
| 70 |
+
>>> partitioning = Partitioning("dir", field_names=["class"], base_dir=root)
|
| 71 |
+
>>> ds = ray.data.read_images(root, partitioning=partitioning)
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
#: The partition style - may be either HIVE or DIRECTORY.
|
| 75 |
+
style: PartitionStyle
|
| 76 |
+
#: "/"-delimited base directory that all partitioned paths should
|
| 77 |
+
#: exist under (exclusive). File paths either outside of, or at the first
|
| 78 |
+
#: level of, this directory will be considered unpartitioned. Specify
|
| 79 |
+
#: `None` or an empty string to search for partitions in all file path
|
| 80 |
+
#: directories.
|
| 81 |
+
base_dir: Optional[str] = None
|
| 82 |
+
#: The partition key field names (i.e. column names for tabular
|
| 83 |
+
#: datasets). When non-empty, the order and length of partition key
|
| 84 |
+
#: field names must match the order and length of partition values.
|
| 85 |
+
#: Required when parsing DIRECTORY partitioned paths or generating
|
| 86 |
+
#: HIVE partitioned paths.
|
| 87 |
+
field_names: Optional[List[str]] = None
|
| 88 |
+
#: A dictionary that maps partition key names to their desired data type. If not
|
| 89 |
+
#: provided, the data type defaults to string.
|
| 90 |
+
field_types: Optional[Dict[str, PartitionDataType]] = None
|
| 91 |
+
#: Filesystem that will be used for partition path file I/O.
|
| 92 |
+
filesystem: Optional["pyarrow.fs.FileSystem"] = None
|
| 93 |
+
|
| 94 |
+
def __post_init__(self):
|
| 95 |
+
if self.base_dir is None:
|
| 96 |
+
self.base_dir = ""
|
| 97 |
+
|
| 98 |
+
if self.field_types is None:
|
| 99 |
+
self.field_types = {}
|
| 100 |
+
|
| 101 |
+
self._normalized_base_dir = None
|
| 102 |
+
self._resolved_filesystem = None
|
| 103 |
+
|
| 104 |
+
@property
|
| 105 |
+
def normalized_base_dir(self) -> str:
|
| 106 |
+
"""Returns the base directory normalized for compatibility with a filesystem."""
|
| 107 |
+
if self._normalized_base_dir is None:
|
| 108 |
+
self._normalize_base_dir()
|
| 109 |
+
return self._normalized_base_dir
|
| 110 |
+
|
| 111 |
+
@property
|
| 112 |
+
def resolved_filesystem(self) -> "pyarrow.fs.FileSystem":
|
| 113 |
+
"""Returns the filesystem resolved for compatibility with a base directory."""
|
| 114 |
+
if self._resolved_filesystem is None:
|
| 115 |
+
self._normalize_base_dir()
|
| 116 |
+
return self._resolved_filesystem
|
| 117 |
+
|
| 118 |
+
def _normalize_base_dir(self):
|
| 119 |
+
"""Normalizes the partition base directory for compatibility with the
|
| 120 |
+
given filesystem.
|
| 121 |
+
|
| 122 |
+
This should be called once a filesystem has been resolved to ensure that this
|
| 123 |
+
base directory is correctly discovered at the root of all partitioned file
|
| 124 |
+
paths.
|
| 125 |
+
"""
|
| 126 |
+
from ray.data.datasource.path_util import _resolve_paths_and_filesystem
|
| 127 |
+
|
| 128 |
+
paths, self._resolved_filesystem = _resolve_paths_and_filesystem(
|
| 129 |
+
self.base_dir,
|
| 130 |
+
self.filesystem,
|
| 131 |
+
)
|
| 132 |
+
assert (
|
| 133 |
+
len(paths) == 1
|
| 134 |
+
), f"Expected 1 normalized base directory, but found {len(paths)}"
|
| 135 |
+
normalized_base_dir = paths[0]
|
| 136 |
+
if len(normalized_base_dir) and not normalized_base_dir.endswith("/"):
|
| 137 |
+
normalized_base_dir += "/"
|
| 138 |
+
self._normalized_base_dir = normalized_base_dir
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
@DeveloperAPI
|
| 142 |
+
class PathPartitionParser:
|
| 143 |
+
"""Partition parser for path-based partition formats.
|
| 144 |
+
|
| 145 |
+
Path-based partition formats embed all partition keys and values directly in
|
| 146 |
+
their dataset file paths.
|
| 147 |
+
|
| 148 |
+
Two path partition formats are currently supported - `HIVE` and `DIRECTORY`.
|
| 149 |
+
|
| 150 |
+
For `HIVE` Partitioning, all partition directories under the base directory
|
| 151 |
+
will be discovered based on `{key1}={value1}/{key2}={value2}` naming
|
| 152 |
+
conventions. Key/value pairs do not need to be presented in the same
|
| 153 |
+
order across all paths. Directory names nested under the base directory that
|
| 154 |
+
don't follow this naming condition will be considered unpartitioned. If a
|
| 155 |
+
partition filter is defined, then it will be called with an empty input
|
| 156 |
+
dictionary for each unpartitioned file.
|
| 157 |
+
|
| 158 |
+
For `DIRECTORY` Partitioning, all directories under the base directory will
|
| 159 |
+
be interpreted as partition values of the form `{value1}/{value2}`. An
|
| 160 |
+
accompanying ordered list of partition field names must also be provided,
|
| 161 |
+
where the order and length of all partition values must match the order and
|
| 162 |
+
length of field names. Files stored directly in the base directory will
|
| 163 |
+
be considered unpartitioned. If a partition filter is defined, then it will
|
| 164 |
+
be called with an empty input dictionary for each unpartitioned file. For
|
| 165 |
+
example, if the base directory is `"foo"`, then `"foo.csv"` and `"foo/bar.csv"`
|
| 166 |
+
would be considered unpartitioned files but `"foo/bar/baz.csv"` would be associated
|
| 167 |
+
with partition `"bar"`. If the base directory is undefined, then `"foo.csv"` would
|
| 168 |
+
be unpartitioned, `"foo/bar.csv"` would be associated with partition `"foo"`, and
|
| 169 |
+
"foo/bar/baz.csv" would be associated with partition `("foo", "bar")`.
|
| 170 |
+
"""
|
| 171 |
+
|
| 172 |
+
@staticmethod
|
| 173 |
+
def of(
|
| 174 |
+
style: PartitionStyle = PartitionStyle.HIVE,
|
| 175 |
+
base_dir: Optional[str] = None,
|
| 176 |
+
field_names: Optional[List[str]] = None,
|
| 177 |
+
field_types: Optional[Dict[str, PartitionDataType]] = None,
|
| 178 |
+
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
|
| 179 |
+
) -> "PathPartitionParser":
|
| 180 |
+
"""Creates a path-based partition parser using a flattened argument list.
|
| 181 |
+
|
| 182 |
+
Args:
|
| 183 |
+
style: The partition style - may be either HIVE or DIRECTORY.
|
| 184 |
+
base_dir: "/"-delimited base directory to start searching for partitions
|
| 185 |
+
(exclusive). File paths outside of this directory will be considered
|
| 186 |
+
unpartitioned. Specify `None` or an empty string to search for
|
| 187 |
+
partitions in all file path directories.
|
| 188 |
+
field_names: The partition key names. Required for DIRECTORY partitioning.
|
| 189 |
+
Optional for HIVE partitioning. When non-empty, the order and length of
|
| 190 |
+
partition key field names must match the order and length of partition
|
| 191 |
+
directories discovered. Partition key field names are not required to
|
| 192 |
+
exist in the dataset schema.
|
| 193 |
+
field_types: A dictionary that maps partition key names to their desired
|
| 194 |
+
data type. If not provided, the data type default to string.
|
| 195 |
+
filesystem: Filesystem that will be used for partition path file I/O.
|
| 196 |
+
|
| 197 |
+
Returns:
|
| 198 |
+
The new path-based partition parser.
|
| 199 |
+
"""
|
| 200 |
+
scheme = Partitioning(style, base_dir, field_names, field_types, filesystem)
|
| 201 |
+
return PathPartitionParser(scheme)
|
| 202 |
+
|
| 203 |
+
def __init__(self, partitioning: Partitioning):
|
| 204 |
+
"""Creates a path-based partition parser.
|
| 205 |
+
|
| 206 |
+
Args:
|
| 207 |
+
partitioning: The path-based partition scheme. The parser starts
|
| 208 |
+
searching for partitions from this scheme's base directory. File paths
|
| 209 |
+
outside the base directory will be considered unpartitioned. If the
|
| 210 |
+
base directory is `None` or an empty string then this will search for
|
| 211 |
+
partitions in all file path directories. Field names are required for
|
| 212 |
+
DIRECTORY partitioning, and optional for HIVE partitioning. When
|
| 213 |
+
non-empty, the order and length of partition key field names must match
|
| 214 |
+
the order and length of partition directories discovered.
|
| 215 |
+
"""
|
| 216 |
+
style = partitioning.style
|
| 217 |
+
field_names = partitioning.field_names
|
| 218 |
+
if style == PartitionStyle.DIRECTORY and not field_names:
|
| 219 |
+
raise ValueError(
|
| 220 |
+
"Directory partitioning requires a corresponding list of "
|
| 221 |
+
"partition key field names. Please retry your request with one "
|
| 222 |
+
"or more field names specified."
|
| 223 |
+
)
|
| 224 |
+
parsers = {
|
| 225 |
+
PartitionStyle.HIVE: self._parse_hive_path,
|
| 226 |
+
PartitionStyle.DIRECTORY: self._parse_dir_path,
|
| 227 |
+
}
|
| 228 |
+
self._parser_fn: Callable[[str], Dict[str, str]] = parsers.get(style)
|
| 229 |
+
if self._parser_fn is None:
|
| 230 |
+
raise ValueError(
|
| 231 |
+
f"Unsupported partition style: {style}. "
|
| 232 |
+
f"Supported styles: {parsers.keys()}"
|
| 233 |
+
)
|
| 234 |
+
self._scheme = partitioning
|
| 235 |
+
|
| 236 |
+
def __call__(self, path: str) -> Dict[str, str]:
|
| 237 |
+
"""Parses partition keys and values from a single file path.
|
| 238 |
+
|
| 239 |
+
Args:
|
| 240 |
+
path: Input file path to parse.
|
| 241 |
+
|
| 242 |
+
Returns:
|
| 243 |
+
Dictionary mapping directory partition keys to values from the input file
|
| 244 |
+
path. Returns an empty dictionary for unpartitioned files.
|
| 245 |
+
"""
|
| 246 |
+
dir_path = self._dir_path_trim_base(path)
|
| 247 |
+
if dir_path is None:
|
| 248 |
+
return {}
|
| 249 |
+
partitions: Dict[str, str] = self._parser_fn(dir_path)
|
| 250 |
+
|
| 251 |
+
for field, data_type in self._scheme.field_types.items():
|
| 252 |
+
partitions[field] = _cast_value(partitions[field], data_type)
|
| 253 |
+
|
| 254 |
+
return partitions
|
| 255 |
+
|
| 256 |
+
@property
|
| 257 |
+
def scheme(self) -> Partitioning:
|
| 258 |
+
"""Returns the partitioning for this parser."""
|
| 259 |
+
return self._scheme
|
| 260 |
+
|
| 261 |
+
def _dir_path_trim_base(self, path: str) -> Optional[str]:
|
| 262 |
+
"""Trims the normalized base directory and returns the directory path.
|
| 263 |
+
|
| 264 |
+
Returns None if the path does not start with the normalized base directory.
|
| 265 |
+
Simply returns the directory path if the base directory is undefined.
|
| 266 |
+
"""
|
| 267 |
+
if not path.startswith(self._scheme.normalized_base_dir):
|
| 268 |
+
return None
|
| 269 |
+
path = path[len(self._scheme.normalized_base_dir) :]
|
| 270 |
+
return posixpath.dirname(path)
|
| 271 |
+
|
| 272 |
+
def _parse_hive_path(self, dir_path: str) -> Dict[str, str]:
|
| 273 |
+
"""Hive partition path parser.
|
| 274 |
+
|
| 275 |
+
Returns a dictionary mapping partition keys to values given a hive-style
|
| 276 |
+
partition path of the form "{key1}={value1}/{key2}={value2}/..." or an empty
|
| 277 |
+
dictionary for unpartitioned files.
|
| 278 |
+
"""
|
| 279 |
+
dirs = [d for d in dir_path.split("/") if d and (d.count("=") == 1)]
|
| 280 |
+
kv_pairs = [d.split("=") for d in dirs] if dirs else []
|
| 281 |
+
field_names = self._scheme.field_names
|
| 282 |
+
if field_names and kv_pairs:
|
| 283 |
+
if len(kv_pairs) != len(field_names):
|
| 284 |
+
raise ValueError(
|
| 285 |
+
f"Expected {len(field_names)} partition value(s) but found "
|
| 286 |
+
f"{len(kv_pairs)}: {kv_pairs}."
|
| 287 |
+
)
|
| 288 |
+
for i, field_name in enumerate(field_names):
|
| 289 |
+
if kv_pairs[i][0] != field_name:
|
| 290 |
+
raise ValueError(
|
| 291 |
+
f"Expected partition key {field_name} but found "
|
| 292 |
+
f"{kv_pairs[i][0]}"
|
| 293 |
+
)
|
| 294 |
+
return dict(kv_pairs)
|
| 295 |
+
|
| 296 |
+
def _parse_dir_path(self, dir_path: str) -> Dict[str, str]:
|
| 297 |
+
"""Directory partition path parser.
|
| 298 |
+
|
| 299 |
+
Returns a dictionary mapping directory partition keys to values from a
|
| 300 |
+
partition path of the form "{value1}/{value2}/..." or an empty dictionary for
|
| 301 |
+
unpartitioned files.
|
| 302 |
+
|
| 303 |
+
Requires a corresponding ordered list of partition key field names to map the
|
| 304 |
+
correct key to each value.
|
| 305 |
+
"""
|
| 306 |
+
dirs = [d for d in dir_path.split("/") if d]
|
| 307 |
+
field_names = self._scheme.field_names
|
| 308 |
+
|
| 309 |
+
if dirs and len(dirs) != len(field_names):
|
| 310 |
+
raise ValueError(
|
| 311 |
+
f"Expected {len(field_names)} partition value(s) but found "
|
| 312 |
+
f"{len(dirs)}: {dirs}."
|
| 313 |
+
)
|
| 314 |
+
|
| 315 |
+
if not dirs:
|
| 316 |
+
return {}
|
| 317 |
+
return {
|
| 318 |
+
field: directory
|
| 319 |
+
for field, directory in zip(field_names, dirs)
|
| 320 |
+
if field is not None
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
@PublicAPI(stability="beta")
|
| 325 |
+
class PathPartitionFilter:
|
| 326 |
+
"""Partition filter for path-based partition formats.
|
| 327 |
+
|
| 328 |
+
Used to explicitly keep or reject files based on a custom filter function that
|
| 329 |
+
takes partition keys and values parsed from the file's path as input.
|
| 330 |
+
"""
|
| 331 |
+
|
| 332 |
+
@staticmethod
|
| 333 |
+
def of(
|
| 334 |
+
filter_fn: Callable[[Dict[str, str]], bool],
|
| 335 |
+
style: PartitionStyle = PartitionStyle.HIVE,
|
| 336 |
+
base_dir: Optional[str] = None,
|
| 337 |
+
field_names: Optional[List[str]] = None,
|
| 338 |
+
field_types: Optional[Dict[str, PartitionDataType]] = None,
|
| 339 |
+
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
|
| 340 |
+
) -> "PathPartitionFilter":
|
| 341 |
+
"""Creates a path-based partition filter using a flattened argument list.
|
| 342 |
+
|
| 343 |
+
Args:
|
| 344 |
+
filter_fn: Callback used to filter partitions. Takes a dictionary mapping
|
| 345 |
+
partition keys to values as input. Unpartitioned files are denoted with
|
| 346 |
+
an empty input dictionary. Returns `True` to read a file for that
|
| 347 |
+
partition or `False` to skip it. Partition keys and values are always
|
| 348 |
+
strings read from the filesystem path. For example, this removes all
|
| 349 |
+
unpartitioned files:
|
| 350 |
+
|
| 351 |
+
.. code:: python
|
| 352 |
+
|
| 353 |
+
lambda d: True if d else False
|
| 354 |
+
|
| 355 |
+
This raises an assertion error for any unpartitioned file found:
|
| 356 |
+
|
| 357 |
+
.. code:: python
|
| 358 |
+
|
| 359 |
+
def do_assert(val, msg):
|
| 360 |
+
assert val, msg
|
| 361 |
+
|
| 362 |
+
lambda d: do_assert(d, "Expected all files to be partitioned!")
|
| 363 |
+
|
| 364 |
+
And this only reads files from January, 2022 partitions:
|
| 365 |
+
|
| 366 |
+
.. code:: python
|
| 367 |
+
|
| 368 |
+
lambda d: d["month"] == "January" and d["year"] == "2022"
|
| 369 |
+
|
| 370 |
+
style: The partition style - may be either HIVE or DIRECTORY.
|
| 371 |
+
base_dir: "/"-delimited base directory to start searching for partitions
|
| 372 |
+
(exclusive). File paths outside of this directory will be considered
|
| 373 |
+
unpartitioned. Specify `None` or an empty string to search for
|
| 374 |
+
partitions in all file path directories.
|
| 375 |
+
field_names: The partition key names. Required for DIRECTORY partitioning.
|
| 376 |
+
Optional for HIVE partitioning. When non-empty, the order and length of
|
| 377 |
+
partition key field names must match the order and length of partition
|
| 378 |
+
directories discovered. Partition key field names are not required to
|
| 379 |
+
exist in the dataset schema.
|
| 380 |
+
field_types: A dictionary that maps partition key names to their desired
|
| 381 |
+
data type. If not provided, the data type defaults to string.
|
| 382 |
+
filesystem: Filesystem that will be used for partition path file I/O.
|
| 383 |
+
|
| 384 |
+
Returns:
|
| 385 |
+
The new path-based partition filter.
|
| 386 |
+
"""
|
| 387 |
+
scheme = Partitioning(style, base_dir, field_names, field_types, filesystem)
|
| 388 |
+
path_partition_parser = PathPartitionParser(scheme)
|
| 389 |
+
return PathPartitionFilter(path_partition_parser, filter_fn)
|
| 390 |
+
|
| 391 |
+
def __init__(
|
| 392 |
+
self,
|
| 393 |
+
path_partition_parser: PathPartitionParser,
|
| 394 |
+
filter_fn: Callable[[Dict[str, str]], bool],
|
| 395 |
+
):
|
| 396 |
+
"""Creates a new path-based partition filter based on a parser.
|
| 397 |
+
|
| 398 |
+
Args:
|
| 399 |
+
path_partition_parser: The path-based partition parser.
|
| 400 |
+
filter_fn: Callback used to filter partitions. Takes a dictionary mapping
|
| 401 |
+
partition keys to values as input. Unpartitioned files are denoted with
|
| 402 |
+
an empty input dictionary. Returns `True` to read a file for that
|
| 403 |
+
partition or `False` to skip it. Partition keys and values are always
|
| 404 |
+
strings read from the filesystem path. For example, this removes all
|
| 405 |
+
unpartitioned files:
|
| 406 |
+
``lambda d: True if d else False``
|
| 407 |
+
This raises an assertion error for any unpartitioned file found:
|
| 408 |
+
``lambda d: assert d, "Expected all files to be partitioned!"``
|
| 409 |
+
And this only reads files from January, 2022 partitions:
|
| 410 |
+
``lambda d: d["month"] == "January" and d["year"] == "2022"``
|
| 411 |
+
"""
|
| 412 |
+
self._parser = path_partition_parser
|
| 413 |
+
self._filter_fn = filter_fn
|
| 414 |
+
|
| 415 |
+
def __call__(self, paths: List[str]) -> List[str]:
|
| 416 |
+
"""Returns all paths that pass this partition scheme's partition filter.
|
| 417 |
+
|
| 418 |
+
If no partition filter is set, then returns all input paths. If a base
|
| 419 |
+
directory is set, then only paths under this base directory will be parsed
|
| 420 |
+
for partitions. All paths outside of this base directory will automatically
|
| 421 |
+
be considered unpartitioned, and passed into the filter function as empty
|
| 422 |
+
dictionaries.
|
| 423 |
+
|
| 424 |
+
Also normalizes the partition base directory for compatibility with the
|
| 425 |
+
given filesystem before applying the filter.
|
| 426 |
+
|
| 427 |
+
Args:
|
| 428 |
+
paths: Paths to pass through the partition filter function. All
|
| 429 |
+
paths should be normalized for compatibility with the given
|
| 430 |
+
filesystem.
|
| 431 |
+
Returns:
|
| 432 |
+
List of paths that pass the partition filter, or all paths if no
|
| 433 |
+
partition filter is defined.
|
| 434 |
+
"""
|
| 435 |
+
filtered_paths = paths
|
| 436 |
+
if self._filter_fn is not None:
|
| 437 |
+
filtered_paths = [
|
| 438 |
+
path for path in paths if self._filter_fn(self._parser(path))
|
| 439 |
+
]
|
| 440 |
+
return filtered_paths
|
| 441 |
+
|
| 442 |
+
@property
|
| 443 |
+
def parser(self) -> PathPartitionParser:
|
| 444 |
+
"""Returns the path partition parser for this filter."""
|
| 445 |
+
return self._parser
|
| 446 |
+
|
| 447 |
+
|
| 448 |
+
def _cast_value(value: str, data_type: PartitionDataType) -> Any:
|
| 449 |
+
if data_type is int:
|
| 450 |
+
return int(value)
|
| 451 |
+
elif data_type is float:
|
| 452 |
+
return float(value)
|
| 453 |
+
elif data_type is bool:
|
| 454 |
+
return value.lower() == "true"
|
| 455 |
+
else:
|
| 456 |
+
return value
|
minigpt2/lib/python3.10/site-packages/ray/data/datasource/path_util.py
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pathlib
|
| 2 |
+
import sys
|
| 3 |
+
import urllib
|
| 4 |
+
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
|
| 5 |
+
|
| 6 |
+
from ray.data._internal.util import _resolve_custom_scheme
|
| 7 |
+
|
| 8 |
+
if TYPE_CHECKING:
|
| 9 |
+
import pyarrow
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def _has_file_extension(path: str, extensions: Optional[List[str]]) -> bool:
|
| 13 |
+
"""Check if a path has a file extension in the provided list.
|
| 14 |
+
|
| 15 |
+
Examples:
|
| 16 |
+
>>> _has_file_extension("foo.csv", ["csv"])
|
| 17 |
+
True
|
| 18 |
+
>>> _has_file_extension("foo.csv", ["json", "jsonl"])
|
| 19 |
+
False
|
| 20 |
+
>>> _has_file_extension("foo.csv", None)
|
| 21 |
+
True
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
path: The path to check.
|
| 25 |
+
extensions: A list of extensions to check against. If `None`, any extension is
|
| 26 |
+
considered valid.
|
| 27 |
+
"""
|
| 28 |
+
assert extensions is None or isinstance(extensions, list), type(extensions)
|
| 29 |
+
|
| 30 |
+
if extensions is None:
|
| 31 |
+
return True
|
| 32 |
+
|
| 33 |
+
# `Path.suffixes` contain leading dots. The user-specified extensions don't.
|
| 34 |
+
extensions = [f".{ext.lower()}" for ext in extensions]
|
| 35 |
+
suffixes = [suffix.lower() for suffix in pathlib.Path(path).suffixes]
|
| 36 |
+
return any(ext in suffixes for ext in extensions)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _resolve_paths_and_filesystem(
|
| 40 |
+
paths: Union[str, List[str]],
|
| 41 |
+
filesystem: "pyarrow.fs.FileSystem" = None,
|
| 42 |
+
) -> Tuple[List[str], "pyarrow.fs.FileSystem"]:
|
| 43 |
+
"""
|
| 44 |
+
Resolves and normalizes all provided paths, infers a filesystem from the
|
| 45 |
+
paths and ensures that all paths use the same filesystem.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
paths: A single file/directory path or a list of file/directory paths.
|
| 49 |
+
A list of paths can contain both files and directories.
|
| 50 |
+
filesystem: The filesystem implementation that should be used for
|
| 51 |
+
reading these files. If None, a filesystem will be inferred. If not
|
| 52 |
+
None, the provided filesystem will still be validated against all
|
| 53 |
+
filesystems inferred from the provided paths to ensure
|
| 54 |
+
compatibility.
|
| 55 |
+
"""
|
| 56 |
+
import pyarrow as pa
|
| 57 |
+
from pyarrow.fs import (
|
| 58 |
+
FileSystem,
|
| 59 |
+
FSSpecHandler,
|
| 60 |
+
PyFileSystem,
|
| 61 |
+
_resolve_filesystem_and_path,
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
if isinstance(paths, str):
|
| 65 |
+
paths = [paths]
|
| 66 |
+
if isinstance(paths, pathlib.Path):
|
| 67 |
+
paths = [str(paths)]
|
| 68 |
+
elif not isinstance(paths, list) or any(not isinstance(p, str) for p in paths):
|
| 69 |
+
raise ValueError(
|
| 70 |
+
"Expected `paths` to be a `str`, `pathlib.Path`, or `list[str]`, but got "
|
| 71 |
+
f"`{paths}`."
|
| 72 |
+
)
|
| 73 |
+
elif len(paths) == 0:
|
| 74 |
+
raise ValueError("Must provide at least one path.")
|
| 75 |
+
|
| 76 |
+
need_unwrap_path_protocol = True
|
| 77 |
+
if filesystem and not isinstance(filesystem, FileSystem):
|
| 78 |
+
err_msg = (
|
| 79 |
+
f"The filesystem passed must either conform to "
|
| 80 |
+
f"pyarrow.fs.FileSystem, or "
|
| 81 |
+
f"fsspec.spec.AbstractFileSystem. The provided "
|
| 82 |
+
f"filesystem was: {filesystem}"
|
| 83 |
+
)
|
| 84 |
+
try:
|
| 85 |
+
import fsspec
|
| 86 |
+
from fsspec.implementations.http import HTTPFileSystem
|
| 87 |
+
except ModuleNotFoundError:
|
| 88 |
+
# If filesystem is not a pyarrow filesystem and fsspec isn't
|
| 89 |
+
# installed, then filesystem is neither a pyarrow filesystem nor
|
| 90 |
+
# an fsspec filesystem, so we raise a TypeError.
|
| 91 |
+
raise TypeError(err_msg) from None
|
| 92 |
+
if not isinstance(filesystem, fsspec.spec.AbstractFileSystem):
|
| 93 |
+
raise TypeError(err_msg) from None
|
| 94 |
+
if isinstance(filesystem, HTTPFileSystem):
|
| 95 |
+
# If filesystem is fsspec HTTPFileSystem, the protocol/scheme of paths
|
| 96 |
+
# should not be unwrapped/removed, because HTTPFileSystem expects full file
|
| 97 |
+
# paths including protocol/scheme. This is different behavior compared to
|
| 98 |
+
# file systems implementation in pyarrow.fs.FileSystem.
|
| 99 |
+
need_unwrap_path_protocol = False
|
| 100 |
+
|
| 101 |
+
filesystem = PyFileSystem(FSSpecHandler(filesystem))
|
| 102 |
+
|
| 103 |
+
resolved_paths = []
|
| 104 |
+
for path in paths:
|
| 105 |
+
path = _resolve_custom_scheme(path)
|
| 106 |
+
try:
|
| 107 |
+
resolved_filesystem, resolved_path = _resolve_filesystem_and_path(
|
| 108 |
+
path, filesystem
|
| 109 |
+
)
|
| 110 |
+
except pa.lib.ArrowInvalid as e:
|
| 111 |
+
if "Cannot parse URI" in str(e):
|
| 112 |
+
resolved_filesystem, resolved_path = _resolve_filesystem_and_path(
|
| 113 |
+
_encode_url(path), filesystem
|
| 114 |
+
)
|
| 115 |
+
resolved_path = _decode_url(resolved_path)
|
| 116 |
+
elif "Unrecognized filesystem type in URI" in str(e):
|
| 117 |
+
scheme = urllib.parse.urlparse(path, allow_fragments=False).scheme
|
| 118 |
+
if scheme in ["http", "https"]:
|
| 119 |
+
# If scheme of path is HTTP and filesystem is not resolved,
|
| 120 |
+
# try to use fsspec HTTPFileSystem. This expects fsspec is
|
| 121 |
+
# installed.
|
| 122 |
+
try:
|
| 123 |
+
from fsspec.implementations.http import HTTPFileSystem
|
| 124 |
+
except ModuleNotFoundError:
|
| 125 |
+
raise ImportError(
|
| 126 |
+
"Please install fsspec to read files from HTTP."
|
| 127 |
+
) from None
|
| 128 |
+
|
| 129 |
+
resolved_filesystem = PyFileSystem(FSSpecHandler(HTTPFileSystem()))
|
| 130 |
+
resolved_path = path
|
| 131 |
+
need_unwrap_path_protocol = False
|
| 132 |
+
else:
|
| 133 |
+
raise
|
| 134 |
+
else:
|
| 135 |
+
raise
|
| 136 |
+
if filesystem is None:
|
| 137 |
+
filesystem = resolved_filesystem
|
| 138 |
+
elif need_unwrap_path_protocol:
|
| 139 |
+
resolved_path = _unwrap_protocol(resolved_path)
|
| 140 |
+
resolved_path = filesystem.normalize_path(resolved_path)
|
| 141 |
+
resolved_paths.append(resolved_path)
|
| 142 |
+
|
| 143 |
+
return resolved_paths, filesystem
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def _unwrap_protocol(path):
|
| 147 |
+
"""
|
| 148 |
+
Slice off any protocol prefixes on path.
|
| 149 |
+
"""
|
| 150 |
+
if sys.platform == "win32" and _is_local_windows_path(path):
|
| 151 |
+
# Represent as posix path such that downstream functions properly handle it.
|
| 152 |
+
# This is executed when 'file://' is NOT included in the path.
|
| 153 |
+
return pathlib.Path(path).as_posix()
|
| 154 |
+
|
| 155 |
+
parsed = urllib.parse.urlparse(path, allow_fragments=False) # support '#' in path
|
| 156 |
+
query = "?" + parsed.query if parsed.query else "" # support '?' in path
|
| 157 |
+
netloc = parsed.netloc
|
| 158 |
+
if parsed.scheme == "s3" and "@" in parsed.netloc:
|
| 159 |
+
# If the path contains an @, it is assumed to be an anonymous
|
| 160 |
+
# credentialed path, and we need to strip off the credentials.
|
| 161 |
+
netloc = parsed.netloc.split("@")[-1]
|
| 162 |
+
|
| 163 |
+
parsed_path = parsed.path
|
| 164 |
+
# urlparse prepends the path with a '/'. This does not work on Windows
|
| 165 |
+
# so if this is the case strip the leading slash.
|
| 166 |
+
if (
|
| 167 |
+
sys.platform == "win32"
|
| 168 |
+
and not netloc
|
| 169 |
+
and len(parsed_path) >= 3
|
| 170 |
+
and parsed_path[0] == "/" # The problematic leading slash
|
| 171 |
+
and parsed_path[1].isalpha() # Ensure it is a drive letter.
|
| 172 |
+
and parsed_path[2:4] in (":", ":/")
|
| 173 |
+
):
|
| 174 |
+
parsed_path = parsed_path[1:]
|
| 175 |
+
|
| 176 |
+
return netloc + parsed_path + query
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def _is_url(path) -> bool:
|
| 180 |
+
return urllib.parse.urlparse(path).scheme != ""
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def _is_local_windows_path(path: str) -> bool:
|
| 184 |
+
"""Determines if path is a Windows file-system location."""
|
| 185 |
+
if sys.platform != "win32":
|
| 186 |
+
return False
|
| 187 |
+
|
| 188 |
+
if len(path) >= 1 and path[0] == "\\":
|
| 189 |
+
return True
|
| 190 |
+
if (
|
| 191 |
+
len(path) >= 3
|
| 192 |
+
and path[1] == ":"
|
| 193 |
+
and (path[2] == "/" or path[2] == "\\")
|
| 194 |
+
and path[0].isalpha()
|
| 195 |
+
):
|
| 196 |
+
return True
|
| 197 |
+
return False
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def _encode_url(path):
|
| 201 |
+
return urllib.parse.quote(path, safe="/:")
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def _decode_url(path):
|
| 205 |
+
return urllib.parse.unquote(path)
|
minigpt2/lib/python3.10/site-packages/ray/data/extensions/__init__.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ray.air.util.tensor_extensions.arrow import (
|
| 2 |
+
ArrowTensorTypeV2,
|
| 3 |
+
get_arrow_extension_tensor_types,
|
| 4 |
+
)
|
| 5 |
+
from ray.data.extensions.object_extension import (
|
| 6 |
+
ArrowPythonObjectArray,
|
| 7 |
+
ArrowPythonObjectScalar,
|
| 8 |
+
ArrowPythonObjectType,
|
| 9 |
+
PythonObjectArray,
|
| 10 |
+
PythonObjectDtype,
|
| 11 |
+
_object_extension_type_allowed,
|
| 12 |
+
)
|
| 13 |
+
from ray.data.extensions.tensor_extension import (
|
| 14 |
+
ArrowConversionError,
|
| 15 |
+
ArrowTensorArray,
|
| 16 |
+
ArrowTensorType,
|
| 17 |
+
ArrowVariableShapedTensorArray,
|
| 18 |
+
ArrowVariableShapedTensorType,
|
| 19 |
+
TensorArray,
|
| 20 |
+
TensorArrayElement,
|
| 21 |
+
TensorDtype,
|
| 22 |
+
column_needs_tensor_extension,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
__all__ = [
|
| 26 |
+
# Tensor array extension.
|
| 27 |
+
"TensorDtype",
|
| 28 |
+
"TensorArray",
|
| 29 |
+
"TensorArrayElement",
|
| 30 |
+
"ArrowTensorType",
|
| 31 |
+
"ArrowTensorTypeV2",
|
| 32 |
+
"ArrowTensorArray",
|
| 33 |
+
"ArrowVariableShapedTensorType",
|
| 34 |
+
"ArrowVariableShapedTensorArray",
|
| 35 |
+
"column_needs_tensor_extension",
|
| 36 |
+
"ArrowConversionError",
|
| 37 |
+
# Object array extension
|
| 38 |
+
"ArrowPythonObjectArray",
|
| 39 |
+
"ArrowPythonObjectType",
|
| 40 |
+
"ArrowPythonObjectScalar",
|
| 41 |
+
"PythonObjectArray",
|
| 42 |
+
"PythonObjectDtype",
|
| 43 |
+
"_object_extension_type_allowed",
|
| 44 |
+
"get_arrow_extension_tensor_types",
|
| 45 |
+
]
|
minigpt2/lib/python3.10/site-packages/ray/data/extensions/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (974 Bytes). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/extensions/__pycache__/object_extension.cpython-310.pyc
ADDED
|
Binary file (478 Bytes). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/extensions/__pycache__/tensor_extension.cpython-310.pyc
ADDED
|
Binary file (670 Bytes). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/extensions/object_extension.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ray.air.util.object_extensions.arrow import ( # noqa: F401
|
| 2 |
+
ArrowPythonObjectArray,
|
| 3 |
+
ArrowPythonObjectScalar,
|
| 4 |
+
ArrowPythonObjectType,
|
| 5 |
+
_object_extension_type_allowed,
|
| 6 |
+
)
|
| 7 |
+
from ray.air.util.object_extensions.pandas import ( # noqa: F401
|
| 8 |
+
PythonObjectArray,
|
| 9 |
+
PythonObjectDtype,
|
| 10 |
+
)
|
minigpt2/lib/python3.10/site-packages/ray/data/extensions/tensor_extension.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ray.air.util.tensor_extensions.arrow import ( # noqa: F401
|
| 2 |
+
ArrowConversionError,
|
| 3 |
+
ArrowTensorArray,
|
| 4 |
+
ArrowTensorType,
|
| 5 |
+
ArrowTensorTypeV2,
|
| 6 |
+
ArrowVariableShapedTensorArray,
|
| 7 |
+
ArrowVariableShapedTensorType,
|
| 8 |
+
)
|
| 9 |
+
from ray.air.util.tensor_extensions.pandas import ( # noqa: F401
|
| 10 |
+
TensorArray,
|
| 11 |
+
TensorArrayElement,
|
| 12 |
+
TensorDtype,
|
| 13 |
+
column_needs_tensor_extension,
|
| 14 |
+
)
|
| 15 |
+
from ray.air.util.tensor_extensions.utils import create_ragged_ndarray # noqa: F401
|
minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__init__.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ray.data.preprocessors.chain import Chain
|
| 2 |
+
from ray.data.preprocessors.concatenator import Concatenator
|
| 3 |
+
from ray.data.preprocessors.discretizer import (
|
| 4 |
+
CustomKBinsDiscretizer,
|
| 5 |
+
UniformKBinsDiscretizer,
|
| 6 |
+
)
|
| 7 |
+
from ray.data.preprocessors.encoder import (
|
| 8 |
+
Categorizer,
|
| 9 |
+
LabelEncoder,
|
| 10 |
+
MultiHotEncoder,
|
| 11 |
+
OneHotEncoder,
|
| 12 |
+
OrdinalEncoder,
|
| 13 |
+
)
|
| 14 |
+
from ray.data.preprocessors.hasher import FeatureHasher
|
| 15 |
+
from ray.data.preprocessors.imputer import SimpleImputer
|
| 16 |
+
from ray.data.preprocessors.normalizer import Normalizer
|
| 17 |
+
from ray.data.preprocessors.scaler import (
|
| 18 |
+
MaxAbsScaler,
|
| 19 |
+
MinMaxScaler,
|
| 20 |
+
RobustScaler,
|
| 21 |
+
StandardScaler,
|
| 22 |
+
)
|
| 23 |
+
from ray.data.preprocessors.tokenizer import Tokenizer
|
| 24 |
+
from ray.data.preprocessors.torch import TorchVisionPreprocessor
|
| 25 |
+
from ray.data.preprocessors.transformer import PowerTransformer
|
| 26 |
+
from ray.data.preprocessors.vectorizer import CountVectorizer, HashingVectorizer
|
| 27 |
+
|
| 28 |
+
__all__ = [
|
| 29 |
+
"Categorizer",
|
| 30 |
+
"CountVectorizer",
|
| 31 |
+
"Chain",
|
| 32 |
+
"FeatureHasher",
|
| 33 |
+
"HashingVectorizer",
|
| 34 |
+
"LabelEncoder",
|
| 35 |
+
"MaxAbsScaler",
|
| 36 |
+
"MinMaxScaler",
|
| 37 |
+
"MultiHotEncoder",
|
| 38 |
+
"Normalizer",
|
| 39 |
+
"OneHotEncoder",
|
| 40 |
+
"OrdinalEncoder",
|
| 41 |
+
"PowerTransformer",
|
| 42 |
+
"RobustScaler",
|
| 43 |
+
"SimpleImputer",
|
| 44 |
+
"StandardScaler",
|
| 45 |
+
"Concatenator",
|
| 46 |
+
"Tokenizer",
|
| 47 |
+
"TorchVisionPreprocessor",
|
| 48 |
+
"CustomKBinsDiscretizer",
|
| 49 |
+
"UniformKBinsDiscretizer",
|
| 50 |
+
]
|
minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.37 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/chain.cpython-310.pyc
ADDED
|
Binary file (3.64 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/concatenator.cpython-310.pyc
ADDED
|
Binary file (4.77 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/discretizer.cpython-310.pyc
ADDED
|
Binary file (11.7 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/encoder.cpython-310.pyc
ADDED
|
Binary file (25.8 kB). View file
|
|
|