diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/__init__.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6666f96ba4a5d5b9dfe4d83737f80c6dd14f80c3 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/arrow_serialization.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/arrow_serialization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff4b540658a96f2b99760939c558bbcb3cb03e21 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/arrow_serialization.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/async_compat.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/async_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f2a2e960c989fe5ca6ca8395489b05dd72546c7 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/async_compat.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/async_utils.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/async_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20baa3c897006b8289f6a8dee7fbfd6e8fbdcb5d Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/async_utils.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/auto_init_hook.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/auto_init_hook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0372a1154c2a21d58188dbd25ab646dc40da2c08 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/auto_init_hook.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/conftest_utils.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/conftest_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d53d15d5d1c60adde1bd34c2ad32e1105cc74e51 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/conftest_utils.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/dict.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/dict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f0d69885ea69823edb68a9c1110c87a7fc4374f Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/dict.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/external_storage.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/external_storage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..390bb842962081d9aac87657c47c2791726e6852 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/external_storage.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/function_manager.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/function_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c519eeff420c68a0717242c80538ba87be9b343 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/function_manager.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/gcs_aio_client.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/gcs_aio_client.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a3a8f8621069cc3ebf9b1ca9d64d191aeb8a3de Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/gcs_aio_client.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/gcs_pubsub.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/gcs_pubsub.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..149a24bee5b97b02c49521387f1b645af0cfa932 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/gcs_pubsub.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/inspect_util.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/inspect_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..824998b23893baca1c2765083e93d6f5a55fcfb6 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/inspect_util.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/internal_api.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/internal_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1b6a12544d0f87584db68eae086b45868e8028a Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/internal_api.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/memory_monitor.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/memory_monitor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b93714b4c0c68d2c7ac74b81be058ca8226f862 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/memory_monitor.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/metrics_agent.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/metrics_agent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5a04d2237fd1170312446e325fede8a9f29ce86 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/metrics_agent.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/parameter.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/parameter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..918f6527e3f442f8cf91e195dd36c3ee5d652518 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/parameter.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/process_watcher.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/process_watcher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f043f56b37adada2957d35fcd997b7dab6a21a0 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/process_watcher.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/profiling.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/profiling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49a2deedbf116a68983d528d85c50e3ca808ed97 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/profiling.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/prometheus_exporter.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/prometheus_exporter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a7f988bc12dfe46381552585a38f19f01a1e3c8 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/prometheus_exporter.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/protobuf_compat.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/protobuf_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ac5d2fc504fe4783e3a709cfabdb0796c62bb05 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/protobuf_compat.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/pydantic_compat.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/pydantic_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ae41e6bdc2e8bc1751b6062253c2444015d3c29 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/pydantic_compat.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/ray_client_microbenchmark.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/ray_client_microbenchmark.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ba13d4a56264d22708bd3b278d71276993240a9 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/ray_client_microbenchmark.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/ray_constants.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/ray_constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6fdf535b33f70678573bebb9267b73eda562bf12 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/ray_constants.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/ray_microbenchmark_helpers.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/ray_microbenchmark_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6eff934674250db43a15de47cdc9e2f2c50f899 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/ray_microbenchmark_helpers.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/ray_option_utils.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/ray_option_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0921c9fbc493d17b287749fb35137479b9ab89ec Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/ray_option_utils.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/ray_perf.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/ray_perf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e71bca667dc6d2cab9f41fbdf603ee467f9830f3 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/ray_perf.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/ray_process_reaper.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/ray_process_reaper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bc31072f4968f96ea5fda6ac056b1461df8a139 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/ray_process_reaper.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/resource_spec.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/resource_spec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a41ffba1e9054b2d5ea1a9a8be00db62b05151a8 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/resource_spec.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/serialization.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/serialization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c90a14e5bdd488a3bf87b147619fcc74c3d53ff2 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/serialization.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/services.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/services.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db86609c501a567fd41b99401408374616f9d6a4 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/services.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/signature.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/signature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43b49478af2fe390efffb6ce2ae302997604993b Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/signature.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/state_api_test_utils.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/state_api_test_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd07328d0340a14c843d1fd1add5f743be7c33b7 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/state_api_test_utils.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/storage.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/storage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca42ff5b7e490bcc376d5b3d1582c5c5c499c8f3 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/storage.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/test_utils.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/test_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..018fbf0c7fb22613b6a35f9844363cfcd46b275f Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/__pycache__/test_utils.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/accelerators/__pycache__/__init__.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/accelerators/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26e0c5ecacebcb63d9d8dceb5ec88530f2c40a18 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/accelerators/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/accelerators/__pycache__/amd_gpu.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/accelerators/__pycache__/amd_gpu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..441256cce302e9449d69cb2c6fd7022147a7dc97 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/accelerators/__pycache__/amd_gpu.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/ray_logging/__pycache__/constants.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/ray_logging/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b17fcf59d4772153d4746e53a7074f7922b4618 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/ray_logging/__pycache__/constants.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/ray_logging/__pycache__/logging_config.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/ray_logging/__pycache__/logging_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0939ae4c851222f8b844b43641fc9e720be95f7a Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/ray_logging/__pycache__/logging_config.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/ray_logging/formatters.py b/deepseek/lib/python3.10/site-packages/ray/_private/ray_logging/formatters.py new file mode 100644 index 0000000000000000000000000000000000000000..3f8792d50698db72e6257dc8f3e50c9c39518452 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/_private/ray_logging/formatters.py @@ -0,0 +1,84 @@ +import logging +import json +from ray._private.ray_logging.constants import ( + LogKey, + LOGRECORD_STANDARD_ATTRS, + LOGGER_FLATTEN_KEYS, +) +from ray._private.ray_constants import LOGGER_FORMAT +from typing import Any, Dict + + +def _append_flatten_attributes(formatted_attrs: Dict[str, Any], key: str, value: Any): + """Flatten the dictionary values for special keys and append the values in place. + + If the key is in `LOGGER_FLATTEN_KEYS`, the value will be flattened and appended + to the `formatted_attrs` dictionary. Otherwise, the key-value pair will be appended + directly. + """ + if key in LOGGER_FLATTEN_KEYS: + if not isinstance(value, dict): + raise ValueError( + f"Expected a dictionary passing into {key}, but got {type(value)}" + ) + for k, v in value.items(): + if k in formatted_attrs: + raise KeyError(f"Found duplicated key in the log record: {k}") + formatted_attrs[k] = v + else: + formatted_attrs[key] = value + + +def generate_record_format_attrs( + formatter: logging.Formatter, + record: logging.LogRecord, + exclude_standard_attrs, +) -> dict: + record_format_attrs = {} + + # If `exclude_standard_attrs` is False, include the standard attributes. + # Otherwise, include only Ray and user-provided context. + if not exclude_standard_attrs: + record_format_attrs.update( + { + LogKey.ASCTIME.value: formatter.formatTime(record), + LogKey.LEVELNAME.value: record.levelname, + LogKey.MESSAGE.value: record.getMessage(), + LogKey.FILENAME.value: record.filename, + LogKey.LINENO.value: record.lineno, + } + ) + if record.exc_info: + if not record.exc_text: + record.exc_text = formatter.formatException(record.exc_info) + record_format_attrs[LogKey.EXC_TEXT.value] = record.exc_text + + for key, value in record.__dict__.items(): + # Both Ray and user-provided context are stored in `record_format`. + if key not in LOGRECORD_STANDARD_ATTRS: + _append_flatten_attributes(record_format_attrs, key, value) + return record_format_attrs + + +class JSONFormatter(logging.Formatter): + def format(self, record): + record_format_attrs = generate_record_format_attrs( + self, record, exclude_standard_attrs=False + ) + return json.dumps(record_format_attrs) + + +class TextFormatter(logging.Formatter): + def __init__(self) -> None: + self._inner_formatter = logging.Formatter(LOGGER_FORMAT) + + def format(self, record: logging.LogRecord) -> str: + s = self._inner_formatter.format(record) + record_format_attrs = generate_record_format_attrs( + self, record, exclude_standard_attrs=True + ) + + additional_attrs = " ".join( + [f"{key}={value}" for key, value in record_format_attrs.items()] + ) + return f"{s} {additional_attrs}" diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/__pycache__/__init__.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..754c99230f30aaafb33b757f4637e5cc6266ca92 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/__pycache__/mpi.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/__pycache__/mpi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bccab69f1658f00b09e43450c5ff8cff40b8733f Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/__pycache__/mpi.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/__pycache__/setup_hook.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/__pycache__/setup_hook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..609a6d85acf0b844ab184d2ca37fce39812119c6 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/__pycache__/setup_hook.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/__pycache__/uri_cache.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/__pycache__/uri_cache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..896de5393454bb8c0b1c373455e0e7e8555e0589 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/__pycache__/uri_cache.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/__pycache__/validation.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/__pycache__/validation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b7fcfa92d2fc1780a242a420b8934d3c75ced45 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/__pycache__/validation.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/__pycache__/working_dir.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/__pycache__/working_dir.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3770498695bcac4522ed80308a9938c352bceeb Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/__pycache__/working_dir.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/agent/thirdparty_files/multidict-6.1.0.dist-info/LICENSE b/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/agent/thirdparty_files/multidict-6.1.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..8727172ae058e56805bd8ed0f988b6788711dcfd --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/_private/runtime_env/agent/thirdparty_files/multidict-6.1.0.dist-info/LICENSE @@ -0,0 +1,13 @@ + Copyright 2016 Andrew Svetlov and aio-libs contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/__init__.py b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..318cd9b60a2b4cd78072ac24c6fc669178fe8535 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/__init__.py @@ -0,0 +1,3 @@ +from .config import Config +from .core import from_dict +from .exceptions import * diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/__pycache__/__init__.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e37cd180d0d3c3e5ff3c2042be3778637e705ffb Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/__pycache__/core.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b02087268a5755d1b9e8144d48242f78e3de7cc7 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/__pycache__/core.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/__pycache__/types.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/__pycache__/types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aaa39e6e785950a3b4520e99b340dd866a69c031 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/__pycache__/types.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/config.py b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/config.py new file mode 100644 index 0000000000000000000000000000000000000000..1766a68e2235cc9cde28116ba7e8310f048159f2 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/config.py @@ -0,0 +1,12 @@ +from dataclasses import dataclass, field +from typing import Dict, Any, Callable, Optional, Type, List + + +@dataclass +class Config: + type_hooks: Dict[Type, Callable[[Any], Any]] = field(default_factory=dict) + cast: List[Type] = field(default_factory=list) + forward_references: Optional[Dict[str, Any]] = None + check_types: bool = True + strict: bool = False + strict_unions_match: bool = False diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/core.py b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/core.py new file mode 100644 index 0000000000000000000000000000000000000000..eccfe5e9640232039b8089c6f9756f2beed582ef --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/core.py @@ -0,0 +1,140 @@ +import copy +from dataclasses import is_dataclass +from itertools import zip_longest +from typing import TypeVar, Type, Optional, get_type_hints, Mapping, Any + +from .config import Config +from .data import Data +from .dataclasses import get_default_value_for_field, create_instance, DefaultValueNotFoundError, get_fields +from .exceptions import ( + ForwardReferenceError, + WrongTypeError, + DaciteError, + UnionMatchError, + MissingValueError, + DaciteFieldError, + UnexpectedDataError, + StrictUnionMatchError, +) +from .types import ( + is_instance, + is_generic_collection, + is_union, + extract_generic, + is_optional, + transform_value, + extract_origin_collection, + is_init_var, + extract_init_var, +) + +T = TypeVar("T") + + +def from_dict(data_class: Type[T], data: Data, config: Optional[Config] = None) -> T: + """Create a data class instance from a dictionary. + + :param data_class: a data class type + :param data: a dictionary of a input data + :param config: a configuration of the creation process + :return: an instance of a data class + """ + init_values: Data = {} + post_init_values: Data = {} + config = config or Config() + try: + data_class_hints = get_type_hints(data_class, globalns=config.forward_references) + except NameError as error: + raise ForwardReferenceError(str(error)) + data_class_fields = get_fields(data_class) + if config.strict: + extra_fields = set(data.keys()) - {f.name for f in data_class_fields} + if extra_fields: + raise UnexpectedDataError(keys=extra_fields) + for field in data_class_fields: + field = copy.copy(field) + field.type = data_class_hints[field.name] + try: + try: + field_data = data[field.name] + transformed_value = transform_value( + type_hooks=config.type_hooks, cast=config.cast, target_type=field.type, value=field_data + ) + value = _build_value(type_=field.type, data=transformed_value, config=config) + except DaciteFieldError as error: + error.update_path(field.name) + raise + if config.check_types and not is_instance(value, field.type): + raise WrongTypeError(field_path=field.name, field_type=field.type, value=value) + except KeyError: + try: + value = get_default_value_for_field(field) + except DefaultValueNotFoundError: + if not field.init: + continue + raise MissingValueError(field.name) + if field.init: + init_values[field.name] = value + else: + post_init_values[field.name] = value + + return create_instance(data_class=data_class, init_values=init_values, post_init_values=post_init_values) + + +def _build_value(type_: Type, data: Any, config: Config) -> Any: + if is_init_var(type_): + type_ = extract_init_var(type_) + if is_union(type_): + return _build_value_for_union(union=type_, data=data, config=config) + elif is_generic_collection(type_) and is_instance(data, extract_origin_collection(type_)): + return _build_value_for_collection(collection=type_, data=data, config=config) + elif is_dataclass(type_) and is_instance(data, Data): + return from_dict(data_class=type_, data=data, config=config) + return data + + +def _build_value_for_union(union: Type, data: Any, config: Config) -> Any: + types = extract_generic(union) + if is_optional(union) and len(types) == 2: + return _build_value(type_=types[0], data=data, config=config) + union_matches = {} + for inner_type in types: + try: + # noinspection PyBroadException + try: + data = transform_value( + type_hooks=config.type_hooks, cast=config.cast, target_type=inner_type, value=data + ) + except Exception: # pylint: disable=broad-except + continue + value = _build_value(type_=inner_type, data=data, config=config) + if is_instance(value, inner_type): + if config.strict_unions_match: + union_matches[inner_type] = value + else: + return value + except DaciteError: + pass + if config.strict_unions_match: + if len(union_matches) > 1: + raise StrictUnionMatchError(union_matches) + return union_matches.popitem()[1] + if not config.check_types: + return data + raise UnionMatchError(field_type=union, value=data) + + +def _build_value_for_collection(collection: Type, data: Any, config: Config) -> Any: + data_type = data.__class__ + if is_instance(data, Mapping): + item_type = extract_generic(collection, defaults=(Any, Any))[1] + return data_type((key, _build_value(type_=item_type, data=value, config=config)) for key, value in data.items()) + elif is_instance(data, tuple): + types = extract_generic(collection) + if len(types) == 2 and types[1] == Ellipsis: + return data_type(_build_value(type_=types[0], data=item, config=config) for item in data) + return data_type( + _build_value(type_=type_, data=item, config=config) for item, type_ in zip_longest(data, types) + ) + item_type = extract_generic(collection, defaults=(Any,))[0] + return data_type(_build_value(type_=item_type, data=item, config=config) for item in data) diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/dataclasses.py b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/dataclasses.py new file mode 100644 index 0000000000000000000000000000000000000000..b5db7ac013444f4d5d72439dd396235278302ba7 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/dataclasses.py @@ -0,0 +1,33 @@ +from dataclasses import Field, MISSING, _FIELDS, _FIELD, _FIELD_INITVAR # type: ignore +from typing import Type, Any, TypeVar, List + +from .data import Data +from .types import is_optional + +T = TypeVar("T", bound=Any) + + +class DefaultValueNotFoundError(Exception): + pass + + +def get_default_value_for_field(field: Field) -> Any: + if field.default != MISSING: + return field.default + elif field.default_factory != MISSING: # type: ignore + return field.default_factory() # type: ignore + elif is_optional(field.type): + return None + raise DefaultValueNotFoundError() + + +def create_instance(data_class: Type[T], init_values: Data, post_init_values: Data) -> T: + instance = data_class(**init_values) + for key, value in post_init_values.items(): + setattr(instance, key, value) + return instance + + +def get_fields(data_class: Type[T]) -> List[Field]: + fields = getattr(data_class, _FIELDS) + return [f for f in fields.values() if f._field_type is _FIELD or f._field_type is _FIELD_INITVAR] diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/types.py b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/types.py new file mode 100644 index 0000000000000000000000000000000000000000..1d4dfea4f2a5b124fa00ef5237fc96567648b598 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/dacite/types.py @@ -0,0 +1,172 @@ +from dataclasses import InitVar +from typing import Type, Any, Optional, Union, Collection, TypeVar, Dict, Callable, Mapping, List, Tuple + +T = TypeVar("T", bound=Any) + + +def transform_value( + type_hooks: Dict[Type, Callable[[Any], Any]], cast: List[Type], target_type: Type, value: Any +) -> Any: + if target_type in type_hooks: + value = type_hooks[target_type](value) + else: + for cast_type in cast: + if is_subclass(target_type, cast_type): + if is_generic_collection(target_type): + value = extract_origin_collection(target_type)(value) + else: + value = target_type(value) + break + if is_optional(target_type): + if value is None: + return None + target_type = extract_optional(target_type) + return transform_value(type_hooks, cast, target_type, value) + if is_generic_collection(target_type) and isinstance(value, extract_origin_collection(target_type)): + collection_cls = value.__class__ + if issubclass(collection_cls, dict): + key_cls, item_cls = extract_generic(target_type, defaults=(Any, Any)) + return collection_cls( + { + transform_value(type_hooks, cast, key_cls, key): transform_value(type_hooks, cast, item_cls, item) + for key, item in value.items() + } + ) + item_cls = extract_generic(target_type, defaults=(Any,))[0] + return collection_cls(transform_value(type_hooks, cast, item_cls, item) for item in value) + return value + + +def extract_origin_collection(collection: Type) -> Type: + try: + return collection.__extra__ + except AttributeError: + return collection.__origin__ + + +def is_optional(type_: Type) -> bool: + return is_union(type_) and type(None) in extract_generic(type_) + + +def extract_optional(optional: Type[Optional[T]]) -> T: + for type_ in extract_generic(optional): + if type_ is not type(None): + return type_ + raise ValueError("can not find not-none value") + + +def is_generic(type_: Type) -> bool: + return hasattr(type_, "__origin__") + + +def is_union(type_: Type) -> bool: + return is_generic(type_) and type_.__origin__ == Union + + +def is_literal(type_: Type) -> bool: + try: + from typing import Literal # type: ignore + + return is_generic(type_) and type_.__origin__ == Literal + except ImportError: + return False + + +def is_new_type(type_: Type) -> bool: + return hasattr(type_, "__supertype__") + + +def extract_new_type(type_: Type) -> Type: + return type_.__supertype__ + + +def is_init_var(type_: Type) -> bool: + return isinstance(type_, InitVar) or type_ is InitVar + + +def extract_init_var(type_: Type) -> Union[Type, Any]: + try: + return type_.type + except AttributeError: + return Any + + +def is_instance(value: Any, type_: Type) -> bool: + if type_ == Any: + return True + elif is_union(type_): + return any(is_instance(value, t) for t in extract_generic(type_)) + elif is_generic_collection(type_): + origin = extract_origin_collection(type_) + if not isinstance(value, origin): + return False + if not extract_generic(type_): + return True + if isinstance(value, tuple): + tuple_types = extract_generic(type_) + if len(tuple_types) == 1 and tuple_types[0] == (): + return len(value) == 0 + elif len(tuple_types) == 2 and tuple_types[1] is ...: + return all(is_instance(item, tuple_types[0]) for item in value) + else: + if len(tuple_types) != len(value): + return False + return all(is_instance(item, item_type) for item, item_type in zip(value, tuple_types)) + if isinstance(value, Mapping): + key_type, val_type = extract_generic(type_, defaults=(Any, Any)) + for key, val in value.items(): + if not is_instance(key, key_type) or not is_instance(val, val_type): + return False + return True + return all(is_instance(item, extract_generic(type_, defaults=(Any,))[0]) for item in value) + elif is_new_type(type_): + return is_instance(value, extract_new_type(type_)) + elif is_literal(type_): + return value in extract_generic(type_) + elif is_init_var(type_): + return is_instance(value, extract_init_var(type_)) + elif is_type_generic(type_): + return is_subclass(value, extract_generic(type_)[0]) + else: + try: + # As described in PEP 484 - section: "The numeric tower" + if isinstance(value, (int, float)) and type_ in [float, complex]: + return True + return isinstance(value, type_) + except TypeError: + return False + + +def is_generic_collection(type_: Type) -> bool: + if not is_generic(type_): + return False + origin = extract_origin_collection(type_) + try: + return bool(origin and issubclass(origin, Collection)) + except (TypeError, AttributeError): + return False + + +def extract_generic(type_: Type, defaults: Tuple = ()) -> tuple: + try: + if hasattr(type_, "_special") and type_._special: + return defaults + return type_.__args__ or defaults # type: ignore + except AttributeError: + return defaults + + +def is_subclass(sub_type: Type, base_type: Type) -> bool: + if is_generic_collection(sub_type): + sub_type = extract_origin_collection(sub_type) + try: + return issubclass(sub_type, base_type) + except TypeError: + return False + + +def is_type_generic(type_: Type) -> bool: + try: + return type_.__origin__ in (type, Type) + except AttributeError: + return False diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/pathspec/__pycache__/compat.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/pathspec/__pycache__/compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e2b1059d3efd8291be3df013bc9fe39dd36b7a4 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/pathspec/__pycache__/compat.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/pathspec/patterns/__pycache__/__init__.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/pathspec/patterns/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6af93cc844d736caa5a70fa0c6e8840b2af972cb Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/pathspec/patterns/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/pathspec/util.py b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/pathspec/util.py new file mode 100644 index 0000000000000000000000000000000000000000..bcba8783b61c9613aa4b0dec76a36a488a1b3b60 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/pathspec/util.py @@ -0,0 +1,600 @@ +# encoding: utf-8 +""" +This module provides utility methods for dealing with path-specs. +""" + +import os +import os.path +import posixpath +import stat + +from .compat import Collection, Iterable, string_types, unicode + +NORMALIZE_PATH_SEPS = [sep for sep in [os.sep, os.altsep] if sep and sep != posixpath.sep] +""" +*NORMALIZE_PATH_SEPS* (:class:`list` of :class:`str`) contains the path +separators that need to be normalized to the POSIX separator for the +current operating system. The separators are determined by examining +:data:`os.sep` and :data:`os.altsep`. +""" + +_registered_patterns = {} +""" +*_registered_patterns* (:class:`dict`) maps a name (:class:`str`) to the +registered pattern factory (:class:`~collections.abc.Callable`). +""" + + +def detailed_match_files(patterns, files, all_matches=None): + """ + Matches the files to the patterns, and returns which patterns matched + the files. + + *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) + contains the patterns to use. + + *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains + the normalized file paths to be matched against *patterns*. + + *all_matches* (:class:`boot` or :data:`None`) is whether to return all + matches patterns (:data:`True`), or only the last matched pattern + (:data:`False`). Default is :data:`None` for :data:`False`. + + Returns the matched files (:class:`dict`) which maps each matched file + (:class:`str`) to the patterns that matched in order (:class:`.MatchDetail`). + """ + all_files = files if isinstance(files, Collection) else list(files) + return_files = {} + for pattern in patterns: + if pattern.include is not None: + result_files = pattern.match(all_files) + if pattern.include: + # Add files and record pattern. + for result_file in result_files: + if result_file in return_files: + if all_matches: + return_files[result_file].patterns.append(pattern) + else: + return_files[result_file].patterns[0] = pattern + else: + return_files[result_file] = MatchDetail([pattern]) + + else: + # Remove files. + for file in result_files: + del return_files[file] + + return return_files + + +def _is_iterable(value): + """ + Check whether the value is an iterable (excludes strings). + + *value* is the value to check, + + Returns whether *value* is a iterable (:class:`bool`). + """ + return isinstance(value, Iterable) and not isinstance(value, (unicode, bytes)) + + +def iter_tree_entries(root, on_error=None, follow_links=None): + """ + Walks the specified directory for all files and directories. + + *root* (:class:`str`) is the root directory to search. + + *on_error* (:class:`~collections.abc.Callable` or :data:`None`) + optionally is the error handler for file-system exceptions. It will be + called with the exception (:exc:`OSError`). Reraise the exception to + abort the walk. Default is :data:`None` to ignore file-system + exceptions. + + *follow_links* (:class:`bool` or :data:`None`) optionally is whether + to walk symbolic links that resolve to directories. Default is + :data:`None` for :data:`True`. + + Raises :exc:`RecursionError` if recursion is detected. + + Returns an :class:`~collections.abc.Iterable` yielding each file or + directory entry (:class:`.TreeEntry`) relative to *root*. + """ + if on_error is not None and not callable(on_error): + raise TypeError("on_error:{!r} is not callable.".format(on_error)) + + if follow_links is None: + follow_links = True + + for entry in _iter_tree_entries_next(os.path.abspath(root), '', {}, on_error, follow_links): + yield entry + + +def iter_tree_files(root, on_error=None, follow_links=None): + """ + Walks the specified directory for all files. + + *root* (:class:`str`) is the root directory to search for files. + + *on_error* (:class:`~collections.abc.Callable` or :data:`None`) + optionally is the error handler for file-system exceptions. It will be + called with the exception (:exc:`OSError`). Reraise the exception to + abort the walk. Default is :data:`None` to ignore file-system + exceptions. + + *follow_links* (:class:`bool` or :data:`None`) optionally is whether + to walk symbolic links that resolve to directories. Default is + :data:`None` for :data:`True`. + + Raises :exc:`RecursionError` if recursion is detected. + + Returns an :class:`~collections.abc.Iterable` yielding the path to + each file (:class:`str`) relative to *root*. + """ + if on_error is not None and not callable(on_error): + raise TypeError("on_error:{!r} is not callable.".format(on_error)) + + if follow_links is None: + follow_links = True + + for entry in _iter_tree_entries_next(os.path.abspath(root), '', {}, on_error, follow_links): + if not entry.is_dir(follow_links): + yield entry.path + + +# Alias `iter_tree_files()` as `iter_tree()`. +iter_tree = iter_tree_files + + +def _iter_tree_entries_next(root_full, dir_rel, memo, on_error, follow_links): + """ + Scan the directory for all descendant files. + + *root_full* (:class:`str`) the absolute path to the root directory. + + *dir_rel* (:class:`str`) the path to the directory to scan relative to + *root_full*. + + *memo* (:class:`dict`) keeps track of ancestor directories + encountered. Maps each ancestor real path (:class:`str`) to relative + path (:class:`str`). + + *on_error* (:class:`~collections.abc.Callable` or :data:`None`) + optionally is the error handler for file-system exceptions. + + *follow_links* (:class:`bool`) is whether to walk symbolic links that + resolve to directories. + + Yields each entry (:class:`.TreeEntry`). + """ + dir_full = os.path.join(root_full, dir_rel) + dir_real = os.path.realpath(dir_full) + + # Remember each encountered ancestor directory and its canonical + # (real) path. If a canonical path is encountered more than once, + # recursion has occurred. + if dir_real not in memo: + memo[dir_real] = dir_rel + else: + raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel) + + for node_name in os.listdir(dir_full): + node_rel = os.path.join(dir_rel, node_name) + node_full = os.path.join(root_full, node_rel) + + # Inspect child node. + try: + node_lstat = os.lstat(node_full) + except OSError as e: + if on_error is not None: + on_error(e) + continue + + if stat.S_ISLNK(node_lstat.st_mode): + # Child node is a link, inspect the target node. + is_link = True + try: + node_stat = os.stat(node_full) + except OSError as e: + if on_error is not None: + on_error(e) + continue + else: + is_link = False + node_stat = node_lstat + + if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link): + # Child node is a directory, recurse into it and yield its + # descendant files. + yield TreeEntry(node_name, node_rel, node_lstat, node_stat) + + for entry in _iter_tree_entries_next(root_full, node_rel, memo, on_error, follow_links): + yield entry + + elif stat.S_ISREG(node_stat.st_mode) or is_link: + # Child node is either a file or an unfollowed link, yield it. + yield TreeEntry(node_name, node_rel, node_lstat, node_stat) + + # NOTE: Make sure to remove the canonical (real) path of the directory + # from the ancestors memo once we are done with it. This allows the + # same directory to appear multiple times. If this is not done, the + # second occurrence of the directory will be incorrectly interpreted + # as a recursion. See . + del memo[dir_real] + + +def lookup_pattern(name): + """ + Lookups a registered pattern factory by name. + + *name* (:class:`str`) is the name of the pattern factory. + + Returns the registered pattern factory (:class:`~collections.abc.Callable`). + If no pattern factory is registered, raises :exc:`KeyError`. + """ + return _registered_patterns[name] + + +def match_file(patterns, file): + """ + Matches the file to the patterns. + + *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) + contains the patterns to use. + + *file* (:class:`str`) is the normalized file path to be matched + against *patterns*. + + Returns :data:`True` if *file* matched; otherwise, :data:`False`. + """ + matched = False + for pattern in patterns: + if pattern.include is not None: + if file in pattern.match((file,)): + matched = pattern.include + return matched + + +def match_files(patterns, files): + """ + Matches the files to the patterns. + + *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) + contains the patterns to use. + + *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains + the normalized file paths to be matched against *patterns*. + + Returns the matched files (:class:`set` of :class:`str`). + """ + all_files = files if isinstance(files, Collection) else list(files) + return_files = set() + for pattern in patterns: + if pattern.include is not None: + result_files = pattern.match(all_files) + if pattern.include: + return_files.update(result_files) + else: + return_files.difference_update(result_files) + return return_files + + +def _normalize_entries(entries, separators=None): + """ + Normalizes the entry paths to use the POSIX path separator. + + *entries* (:class:`~collections.abc.Iterable` of :class:`.TreeEntry`) + contains the entries to be normalized. + + *separators* (:class:`~collections.abc.Collection` of :class:`str`; or + :data:`None`) optionally contains the path separators to normalize. + See :func:`normalize_file` for more information. + + Returns a :class:`dict` mapping the each normalized file path (:class:`str`) + to the entry (:class:`.TreeEntry`) + """ + norm_files = {} + for entry in entries: + norm_files[normalize_file(entry.path, separators=separators)] = entry + return norm_files + + +def normalize_file(file, separators=None): + """ + Normalizes the file path to use the POSIX path separator (i.e., ``'/'``). + + *file* (:class:`str` or :class:`pathlib.PurePath`) is the file path. + + *separators* (:class:`~collections.abc.Collection` of :class:`str`; or + :data:`None`) optionally contains the path separators to normalize. + This does not need to include the POSIX path separator (``'/'``), but + including it will not affect the results. Default is :data:`None` for + :data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty + container (e.g., an empty tuple ``()``). + + Returns the normalized file path (:class:`str`). + """ + # Normalize path separators. + if separators is None: + separators = NORMALIZE_PATH_SEPS + + # Convert path object to string. + norm_file = str(file) + + for sep in separators: + norm_file = norm_file.replace(sep, posixpath.sep) + + # Remove current directory prefix. + if norm_file.startswith('./'): + norm_file = norm_file[2:] + + return norm_file + + +def normalize_files(files, separators=None): + """ + Normalizes the file paths to use the POSIX path separator. + + *files* (:class:`~collections.abc.Iterable` of :class:`str` or + :class:`pathlib.PurePath`) contains the file paths to be normalized. + + *separators* (:class:`~collections.abc.Collection` of :class:`str`; or + :data:`None`) optionally contains the path separators to normalize. + See :func:`normalize_file` for more information. + + Returns a :class:`dict` mapping the each normalized file path (:class:`str`) + to the original file path (:class:`str`) + """ + norm_files = {} + for path in files: + norm_files[normalize_file(path, separators=separators)] = path + return norm_files + + +def register_pattern(name, pattern_factory, override=None): + """ + Registers the specified pattern factory. + + *name* (:class:`str`) is the name to register the pattern factory + under. + + *pattern_factory* (:class:`~collections.abc.Callable`) is used to + compile patterns. It must accept an uncompiled pattern (:class:`str`) + and return the compiled pattern (:class:`.Pattern`). + + *override* (:class:`bool` or :data:`None`) optionally is whether to + allow overriding an already registered pattern under the same name + (:data:`True`), instead of raising an :exc:`AlreadyRegisteredError` + (:data:`False`). Default is :data:`None` for :data:`False`. + """ + if not isinstance(name, string_types): + raise TypeError("name:{!r} is not a string.".format(name)) + if not callable(pattern_factory): + raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory)) + if name in _registered_patterns and not override: + raise AlreadyRegisteredError(name, _registered_patterns[name]) + _registered_patterns[name] = pattern_factory + + +class AlreadyRegisteredError(Exception): + """ + The :exc:`AlreadyRegisteredError` exception is raised when a pattern + factory is registered under a name already in use. + """ + + def __init__(self, name, pattern_factory): + """ + Initializes the :exc:`AlreadyRegisteredError` instance. + + *name* (:class:`str`) is the name of the registered pattern. + + *pattern_factory* (:class:`~collections.abc.Callable`) is the + registered pattern factory. + """ + super(AlreadyRegisteredError, self).__init__(name, pattern_factory) + + @property + def message(self): + """ + *message* (:class:`str`) is the error message. + """ + return "{name!r} is already registered for pattern factory:{pattern_factory!r}.".format( + name=self.name, + pattern_factory=self.pattern_factory, + ) + + @property + def name(self): + """ + *name* (:class:`str`) is the name of the registered pattern. + """ + return self.args[0] + + @property + def pattern_factory(self): + """ + *pattern_factory* (:class:`~collections.abc.Callable`) is the + registered pattern factory. + """ + return self.args[1] + + +class RecursionError(Exception): + """ + The :exc:`RecursionError` exception is raised when recursion is + detected. + """ + + def __init__(self, real_path, first_path, second_path): + """ + Initializes the :exc:`RecursionError` instance. + + *real_path* (:class:`str`) is the real path that recursion was + encountered on. + + *first_path* (:class:`str`) is the first path encountered for + *real_path*. + + *second_path* (:class:`str`) is the second path encountered for + *real_path*. + """ + super(RecursionError, self).__init__(real_path, first_path, second_path) + + @property + def first_path(self): + """ + *first_path* (:class:`str`) is the first path encountered for + :attr:`self.real_path `. + """ + return self.args[1] + + @property + def message(self): + """ + *message* (:class:`str`) is the error message. + """ + return "Real path {real!r} was encountered at {first!r} and then {second!r}.".format( + real=self.real_path, + first=self.first_path, + second=self.second_path, + ) + + @property + def real_path(self): + """ + *real_path* (:class:`str`) is the real path that recursion was + encountered on. + """ + return self.args[0] + + @property + def second_path(self): + """ + *second_path* (:class:`str`) is the second path encountered for + :attr:`self.real_path `. + """ + return self.args[2] + + +class MatchDetail(object): + """ + The :class:`.MatchDetail` class contains information about + """ + + #: Make the class dict-less. + __slots__ = ('patterns',) + + def __init__(self, patterns): + """ + Initialize the :class:`.MatchDetail` instance. + + *patterns* (:class:`~collections.abc.Sequence` of :class:`~pathspec.pattern.Pattern`) + contains the patterns that matched the file in the order they were + encountered. + """ + + self.patterns = patterns + """ + *patterns* (:class:`~collections.abc.Sequence` of :class:`~pathspec.pattern.Pattern`) + contains the patterns that matched the file in the order they were + encountered. + """ + + +class TreeEntry(object): + """ + The :class:`.TreeEntry` class contains information about a file-system + entry. + """ + + #: Make the class dict-less. + __slots__ = ('_lstat', 'name', 'path', '_stat') + + def __init__(self, name, path, lstat, stat): + """ + Initialize the :class:`.TreeEntry` instance. + + *name* (:class:`str`) is the base name of the entry. + + *path* (:class:`str`) is the relative path of the entry. + + *lstat* (:class:`~os.stat_result`) is the stat result of the direct + entry. + + *stat* (:class:`~os.stat_result`) is the stat result of the entry, + potentially linked. + """ + + self._lstat = lstat + """ + *_lstat* (:class:`~os.stat_result`) is the stat result of the direct + entry. + """ + + self.name = name + """ + *name* (:class:`str`) is the base name of the entry. + """ + + self.path = path + """ + *path* (:class:`str`) is the path of the entry. + """ + + self._stat = stat + """ + *_stat* (:class:`~os.stat_result`) is the stat result of the linked + entry. + """ + + def is_dir(self, follow_links=None): + """ + Get whether the entry is a directory. + + *follow_links* (:class:`bool` or :data:`None`) is whether to follow + symbolic links. If this is :data:`True`, a symlink to a directory + will result in :data:`True`. Default is :data:`None` for :data:`True`. + + Returns whether the entry is a directory (:class:`bool`). + """ + if follow_links is None: + follow_links = True + + node_stat = self._stat if follow_links else self._lstat + return stat.S_ISDIR(node_stat.st_mode) + + def is_file(self, follow_links=None): + """ + Get whether the entry is a regular file. + + *follow_links* (:class:`bool` or :data:`None`) is whether to follow + symbolic links. If this is :data:`True`, a symlink to a regular file + will result in :data:`True`. Default is :data:`None` for :data:`True`. + + Returns whether the entry is a regular file (:class:`bool`). + """ + if follow_links is None: + follow_links = True + + node_stat = self._stat if follow_links else self._lstat + return stat.S_ISREG(node_stat.st_mode) + + def is_symlink(self): + """ + Returns whether the entry is a symbolic link (:class:`bool`). + """ + return stat.S_ISLNK(self._lstat.st_mode) + + def stat(self, follow_links=None): + """ + Get the cached stat result for the entry. + + *follow_links* (:class:`bool` or :data:`None`) is whether to follow + symbolic links. If this is :data:`True`, the stat result of the + linked file will be returned. Default is :data:`None` for :data:`True`. + + Returns that stat result (:class:`~os.stat_result`). + """ + if follow_links is None: + follow_links = True + + return self._stat if follow_links else self._lstat diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/pyamdsmi/__pycache__/__init__.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/pyamdsmi/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53b8cf1e35e3b9f7319fc5dd0c7f58bdf0a20b7e Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/pyamdsmi/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/pynvml/__init__.py b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/pynvml/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2cc389aba708d35b22f8d1ddca46e7a0f01f093a --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/pynvml/__init__.py @@ -0,0 +1,4 @@ +from ray._private.thirdparty.pynvml.pynvml import * +# nvdia-ml-py version +# Note: we pick this version to use the V2 API which is supported by older drivers +__version__ = "11.495.46" \ No newline at end of file diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/tabulate/__init__.py b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/tabulate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/tabulate/__pycache__/__init__.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/tabulate/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cef9251d34e79de9feeeb387b3f4daf8e1e540b Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/tabulate/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/tabulate/tabulate.py b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/tabulate/tabulate.py new file mode 100644 index 0000000000000000000000000000000000000000..83b1090ffaf9aabee2f874360c52415ebfeaf1fd --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/_private/thirdparty/tabulate/tabulate.py @@ -0,0 +1,2720 @@ +# -*- coding: utf-8 -*- + +# Version 0.9.0, commit bf58e37e6b35e3cc9a0bd740f752abfd32b6e6f8 + +"""Pretty-print tabular data.""" + +from collections import namedtuple +from collections.abc import Iterable, Sized +from html import escape as htmlescape +from itertools import chain, zip_longest as izip_longest +from functools import reduce, partial +import io +import re +import math +import textwrap +import dataclasses + +try: + import wcwidth # optional wide-character (CJK) support +except ImportError: + wcwidth = None + + +def _is_file(f): + return isinstance(f, io.IOBase) + + +__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"] +try: + from .version import version as __version__ # noqa: F401 +except ImportError: + pass # running __init__.py as a script, AppVeyor pytests + + +# minimum extra space in headers +MIN_PADDING = 2 + +# Whether or not to preserve leading/trailing whitespace in data. +PRESERVE_WHITESPACE = False + +_DEFAULT_FLOATFMT = "g" +_DEFAULT_INTFMT = "" +_DEFAULT_MISSINGVAL = "" +# default align will be overwritten by "left", "center" or "decimal" +# depending on the formatter +_DEFAULT_ALIGN = "default" + + +# if True, enable wide-character (CJK) support +WIDE_CHARS_MODE = wcwidth is not None + +# Constant that can be used as part of passed rows to generate a separating line +# It is purposely an unprintable character, very unlikely to be used in a table +SEPARATING_LINE = "\001" + +Line = namedtuple("Line", ["begin", "hline", "sep", "end"]) + + +DataRow = namedtuple("DataRow", ["begin", "sep", "end"]) + + +# A table structure is supposed to be: +# +# --- lineabove --------- +# headerrow +# --- linebelowheader --- +# datarow +# --- linebetweenrows --- +# ... (more datarows) ... +# --- linebetweenrows --- +# last datarow +# --- linebelow --------- +# +# TableFormat's line* elements can be +# +# - either None, if the element is not used, +# - or a Line tuple, +# - or a function: [col_widths], [col_alignments] -> string. +# +# TableFormat's *row elements can be +# +# - either None, if the element is not used, +# - or a DataRow tuple, +# - or a function: [cell_values], [col_widths], [col_alignments] -> string. +# +# padding (an integer) is the amount of white space around data values. +# +# with_header_hide: +# +# - either None, to display all table elements unconditionally, +# - or a list of elements not to be displayed if the table has column headers. +# +TableFormat = namedtuple( + "TableFormat", + [ + "lineabove", + "linebelowheader", + "linebetweenrows", + "linebelow", + "headerrow", + "datarow", + "padding", + "with_header_hide", + ], +) + + +def _is_separating_line(row): + row_type = type(row) + is_sl = (row_type == list or row_type == str) and ( + (len(row) >= 1 and row[0] == SEPARATING_LINE) + or (len(row) >= 2 and row[1] == SEPARATING_LINE) + ) + return is_sl + + +def _pipe_segment_with_colons(align, colwidth): + """Return a segment of a horizontal line with optional colons which + indicate column's alignment (as in `pipe` output format).""" + w = colwidth + if align in ["right", "decimal"]: + return ("-" * (w - 1)) + ":" + elif align == "center": + return ":" + ("-" * (w - 2)) + ":" + elif align == "left": + return ":" + ("-" * (w - 1)) + else: + return "-" * w + + +def _pipe_line_with_colons(colwidths, colaligns): + """Return a horizontal line with optional colons to indicate column's + alignment (as in `pipe` output format).""" + if not colaligns: # e.g. printing an empty data frame (github issue #15) + colaligns = [""] * len(colwidths) + segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)] + return "|" + "|".join(segments) + "|" + + +def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns): + alignment = { + "left": "", + "right": 'align="right"| ', + "center": 'align="center"| ', + "decimal": 'align="right"| ', + } + # hard-coded padding _around_ align attribute and value together + # rather than padding parameter which affects only the value + values_with_attrs = [ + " " + alignment.get(a, "") + c + " " for c, a in zip(cell_values, colaligns) + ] + colsep = separator * 2 + return (separator + colsep.join(values_with_attrs)).rstrip() + + +def _textile_row_with_attrs(cell_values, colwidths, colaligns): + cell_values[0] += " " + alignment = {"left": "<.", "right": ">.", "center": "=.", "decimal": ">."} + values = (alignment.get(a, "") + v for a, v in zip(colaligns, cell_values)) + return "|" + "|".join(values) + "|" + + +def _html_begin_table_without_header(colwidths_ignore, colaligns_ignore): + # this table header will be suppressed if there is a header row + return "\n" + + +def _html_row_with_attrs(celltag, unsafe, cell_values, colwidths, colaligns): + alignment = { + "left": "", + "right": ' style="text-align: right;"', + "center": ' style="text-align: center;"', + "decimal": ' style="text-align: right;"', + } + if unsafe: + values_with_attrs = [ + "<{0}{1}>{2}".format(celltag, alignment.get(a, ""), c) + for c, a in zip(cell_values, colaligns) + ] + else: + values_with_attrs = [ + "<{0}{1}>{2}".format(celltag, alignment.get(a, ""), htmlescape(c)) + for c, a in zip(cell_values, colaligns) + ] + rowhtml = "{}".format("".join(values_with_attrs).rstrip()) + if celltag == "th": # it's a header row, create a new table header + rowhtml = f"
\n\n{rowhtml}\n\n" + return rowhtml + + +def _moin_row_with_attrs(celltag, cell_values, colwidths, colaligns, header=""): + alignment = { + "left": "", + "right": '', + "center": '', + "decimal": '', + } + values_with_attrs = [ + "{}{} {} ".format(celltag, alignment.get(a, ""), header + c + header) + for c, a in zip(cell_values, colaligns) + ] + return "".join(values_with_attrs) + "||" + + +def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False, longtable=False): + alignment = {"left": "l", "right": "r", "center": "c", "decimal": "r"} + tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns]) + return "\n".join( + [ + ("\\begin{tabular}{" if not longtable else "\\begin{longtable}{") + + tabular_columns_fmt + + "}", + "\\toprule" if booktabs else "\\hline", + ] + ) + + +def _asciidoc_row(is_header, *args): + """handle header and data rows for asciidoc format""" + + def make_header_line(is_header, colwidths, colaligns): + # generate the column specifiers + + alignment = {"left": "<", "right": ">", "center": "^", "decimal": ">"} + # use the column widths generated by tabulate for the asciidoc column width specifiers + asciidoc_alignments = zip( + colwidths, [alignment[colalign] for colalign in colaligns] + ) + asciidoc_column_specifiers = [ + "{:d}{}".format(width, align) for width, align in asciidoc_alignments + ] + header_list = ['cols="' + (",".join(asciidoc_column_specifiers)) + '"'] + + # generate the list of options (currently only "header") + options_list = [] + + if is_header: + options_list.append("header") + + if options_list: + header_list += ['options="' + ",".join(options_list) + '"'] + + # generate the list of entries in the table header field + + return "[{}]\n|====".format(",".join(header_list)) + + if len(args) == 2: + # two arguments are passed if called in the context of aboveline + # print the table header with column widths and optional header tag + return make_header_line(False, *args) + + elif len(args) == 3: + # three arguments are passed if called in the context of dataline or headerline + # print the table line and make the aboveline if it is a header + + cell_values, colwidths, colaligns = args + data_line = "|" + "|".join(cell_values) + + if is_header: + return make_header_line(True, colwidths, colaligns) + "\n" + data_line + else: + return data_line + + else: + raise ValueError( + " _asciidoc_row() requires two (colwidths, colaligns) " + + "or three (cell_values, colwidths, colaligns) arguments) " + ) + + +LATEX_ESCAPE_RULES = { + r"&": r"\&", + r"%": r"\%", + r"$": r"\$", + r"#": r"\#", + r"_": r"\_", + r"^": r"\^{}", + r"{": r"\{", + r"}": r"\}", + r"~": r"\textasciitilde{}", + "\\": r"\textbackslash{}", + r"<": r"\ensuremath{<}", + r">": r"\ensuremath{>}", +} + + +def _latex_row(cell_values, colwidths, colaligns, escrules=LATEX_ESCAPE_RULES): + def escape_char(c): + return escrules.get(c, c) + + escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values] + rowfmt = DataRow("", "&", "\\\\") + return _build_simple_row(escaped_values, rowfmt) + + +def _rst_escape_first_column(rows, headers): + def escape_empty(val): + if isinstance(val, (str, bytes)) and not val.strip(): + return ".." + else: + return val + + new_headers = list(headers) + new_rows = [] + if headers: + new_headers[0] = escape_empty(headers[0]) + for row in rows: + new_row = list(row) + if new_row: + new_row[0] = escape_empty(row[0]) + new_rows.append(new_row) + return new_rows, new_headers + + +_table_formats = { + "simple": TableFormat( + lineabove=Line("", "-", " ", ""), + linebelowheader=Line("", "-", " ", ""), + linebetweenrows=None, + linebelow=Line("", "-", " ", ""), + headerrow=DataRow("", " ", ""), + datarow=DataRow("", " ", ""), + padding=0, + with_header_hide=["lineabove", "linebelow"], + ), + "plain": TableFormat( + lineabove=None, + linebelowheader=None, + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("", " ", ""), + datarow=DataRow("", " ", ""), + padding=0, + with_header_hide=None, + ), + "grid": TableFormat( + lineabove=Line("+", "-", "+", "+"), + linebelowheader=Line("+", "=", "+", "+"), + linebetweenrows=Line("+", "-", "+", "+"), + linebelow=Line("+", "-", "+", "+"), + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "simple_grid": TableFormat( + lineabove=Line("┌", "─", "┬", "┐"), + linebelowheader=Line("├", "─", "┼", "┤"), + linebetweenrows=Line("├", "─", "┼", "┤"), + linebelow=Line("└", "─", "┴", "┘"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "rounded_grid": TableFormat( + lineabove=Line("╭", "─", "┬", "╮"), + linebelowheader=Line("├", "─", "┼", "┤"), + linebetweenrows=Line("├", "─", "┼", "┤"), + linebelow=Line("╰", "─", "┴", "╯"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "heavy_grid": TableFormat( + lineabove=Line("┏", "━", "┳", "┓"), + linebelowheader=Line("┣", "━", "╋", "┫"), + linebetweenrows=Line("┣", "━", "╋", "┫"), + linebelow=Line("┗", "━", "┻", "┛"), + headerrow=DataRow("┃", "┃", "┃"), + datarow=DataRow("┃", "┃", "┃"), + padding=1, + with_header_hide=None, + ), + "mixed_grid": TableFormat( + lineabove=Line("┍", "━", "┯", "┑"), + linebelowheader=Line("┝", "━", "┿", "┥"), + linebetweenrows=Line("├", "─", "┼", "┤"), + linebelow=Line("┕", "━", "┷", "┙"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "double_grid": TableFormat( + lineabove=Line("╔", "═", "╦", "╗"), + linebelowheader=Line("╠", "═", "╬", "╣"), + linebetweenrows=Line("╠", "═", "╬", "╣"), + linebelow=Line("╚", "═", "╩", "╝"), + headerrow=DataRow("║", "║", "║"), + datarow=DataRow("║", "║", "║"), + padding=1, + with_header_hide=None, + ), + "fancy_grid": TableFormat( + lineabove=Line("╒", "═", "╤", "╕"), + linebelowheader=Line("╞", "═", "╪", "╡"), + linebetweenrows=Line("├", "─", "┼", "┤"), + linebelow=Line("╘", "═", "╧", "╛"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "outline": TableFormat( + lineabove=Line("+", "-", "+", "+"), + linebelowheader=Line("+", "=", "+", "+"), + linebetweenrows=None, + linebelow=Line("+", "-", "+", "+"), + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "simple_outline": TableFormat( + lineabove=Line("┌", "─", "┬", "┐"), + linebelowheader=Line("├", "─", "┼", "┤"), + linebetweenrows=None, + linebelow=Line("└", "─", "┴", "┘"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "rounded_outline": TableFormat( + lineabove=Line("╭", "─", "┬", "╮"), + linebelowheader=Line("├", "─", "┼", "┤"), + linebetweenrows=None, + linebelow=Line("╰", "─", "┴", "╯"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "heavy_outline": TableFormat( + lineabove=Line("┏", "━", "┳", "┓"), + linebelowheader=Line("┣", "━", "╋", "┫"), + linebetweenrows=None, + linebelow=Line("┗", "━", "┻", "┛"), + headerrow=DataRow("┃", "┃", "┃"), + datarow=DataRow("┃", "┃", "┃"), + padding=1, + with_header_hide=None, + ), + "mixed_outline": TableFormat( + lineabove=Line("┍", "━", "┯", "┑"), + linebelowheader=Line("┝", "━", "┿", "┥"), + linebetweenrows=None, + linebelow=Line("┕", "━", "┷", "┙"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "double_outline": TableFormat( + lineabove=Line("╔", "═", "╦", "╗"), + linebelowheader=Line("╠", "═", "╬", "╣"), + linebetweenrows=None, + linebelow=Line("╚", "═", "╩", "╝"), + headerrow=DataRow("║", "║", "║"), + datarow=DataRow("║", "║", "║"), + padding=1, + with_header_hide=None, + ), + "fancy_outline": TableFormat( + lineabove=Line("╒", "═", "╤", "╕"), + linebelowheader=Line("╞", "═", "╪", "╡"), + linebetweenrows=None, + linebelow=Line("╘", "═", "╧", "╛"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "github": TableFormat( + lineabove=Line("|", "-", "|", "|"), + linebelowheader=Line("|", "-", "|", "|"), + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=["lineabove"], + ), + "pipe": TableFormat( + lineabove=_pipe_line_with_colons, + linebelowheader=_pipe_line_with_colons, + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=["lineabove"], + ), + "orgtbl": TableFormat( + lineabove=None, + linebelowheader=Line("|", "-", "+", "|"), + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "jira": TableFormat( + lineabove=None, + linebelowheader=None, + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("||", "||", "||"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "presto": TableFormat( + lineabove=None, + linebelowheader=Line("", "-", "+", ""), + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("", "|", ""), + datarow=DataRow("", "|", ""), + padding=1, + with_header_hide=None, + ), + "pretty": TableFormat( + lineabove=Line("+", "-", "+", "+"), + linebelowheader=Line("+", "-", "+", "+"), + linebetweenrows=None, + linebelow=Line("+", "-", "+", "+"), + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "psql": TableFormat( + lineabove=Line("+", "-", "+", "+"), + linebelowheader=Line("|", "-", "+", "|"), + linebetweenrows=None, + linebelow=Line("+", "-", "+", "+"), + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "rst": TableFormat( + lineabove=Line("", "=", " ", ""), + linebelowheader=Line("", "=", " ", ""), + linebetweenrows=None, + linebelow=Line("", "=", " ", ""), + headerrow=DataRow("", " ", ""), + datarow=DataRow("", " ", ""), + padding=0, + with_header_hide=None, + ), + "mediawiki": TableFormat( + lineabove=Line( + '{| class="wikitable" style="text-align: left;"', + "", + "", + "\n|+ \n|-", + ), + linebelowheader=Line("|-", "", "", ""), + linebetweenrows=Line("|-", "", "", ""), + linebelow=Line("|}", "", "", ""), + headerrow=partial(_mediawiki_row_with_attrs, "!"), + datarow=partial(_mediawiki_row_with_attrs, "|"), + padding=0, + with_header_hide=None, + ), + "moinmoin": TableFormat( + lineabove=None, + linebelowheader=None, + linebetweenrows=None, + linebelow=None, + headerrow=partial(_moin_row_with_attrs, "||", header="'''"), + datarow=partial(_moin_row_with_attrs, "||"), + padding=1, + with_header_hide=None, + ), + "youtrack": TableFormat( + lineabove=None, + linebelowheader=None, + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("|| ", " || ", " || "), + datarow=DataRow("| ", " | ", " |"), + padding=1, + with_header_hide=None, + ), + "html": TableFormat( + lineabove=_html_begin_table_without_header, + linebelowheader="", + linebetweenrows=None, + linebelow=Line("\n
", "", "", ""), + headerrow=partial(_html_row_with_attrs, "th", False), + datarow=partial(_html_row_with_attrs, "td", False), + padding=0, + with_header_hide=["lineabove"], + ), + "unsafehtml": TableFormat( + lineabove=_html_begin_table_without_header, + linebelowheader="", + linebetweenrows=None, + linebelow=Line("\n", "", "", ""), + headerrow=partial(_html_row_with_attrs, "th", True), + datarow=partial(_html_row_with_attrs, "td", True), + padding=0, + with_header_hide=["lineabove"], + ), + "latex": TableFormat( + lineabove=_latex_line_begin_tabular, + linebelowheader=Line("\\hline", "", "", ""), + linebetweenrows=None, + linebelow=Line("\\hline\n\\end{tabular}", "", "", ""), + headerrow=_latex_row, + datarow=_latex_row, + padding=1, + with_header_hide=None, + ), + "latex_raw": TableFormat( + lineabove=_latex_line_begin_tabular, + linebelowheader=Line("\\hline", "", "", ""), + linebetweenrows=None, + linebelow=Line("\\hline\n\\end{tabular}", "", "", ""), + headerrow=partial(_latex_row, escrules={}), + datarow=partial(_latex_row, escrules={}), + padding=1, + with_header_hide=None, + ), + "latex_booktabs": TableFormat( + lineabove=partial(_latex_line_begin_tabular, booktabs=True), + linebelowheader=Line("\\midrule", "", "", ""), + linebetweenrows=None, + linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""), + headerrow=_latex_row, + datarow=_latex_row, + padding=1, + with_header_hide=None, + ), + "latex_longtable": TableFormat( + lineabove=partial(_latex_line_begin_tabular, longtable=True), + linebelowheader=Line("\\hline\n\\endhead", "", "", ""), + linebetweenrows=None, + linebelow=Line("\\hline\n\\end{longtable}", "", "", ""), + headerrow=_latex_row, + datarow=_latex_row, + padding=1, + with_header_hide=None, + ), + "tsv": TableFormat( + lineabove=None, + linebelowheader=None, + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("", "\t", ""), + datarow=DataRow("", "\t", ""), + padding=0, + with_header_hide=None, + ), + "textile": TableFormat( + lineabove=None, + linebelowheader=None, + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("|_. ", "|_.", "|"), + datarow=_textile_row_with_attrs, + padding=1, + with_header_hide=None, + ), + "asciidoc": TableFormat( + lineabove=partial(_asciidoc_row, False), + linebelowheader=None, + linebetweenrows=None, + linebelow=Line("|====", "", "", ""), + headerrow=partial(_asciidoc_row, True), + datarow=partial(_asciidoc_row, False), + padding=1, + with_header_hide=["lineabove"], + ), +} + + +tabulate_formats = list(sorted(_table_formats.keys())) + +# The table formats for which multiline cells will be folded into subsequent +# table rows. The key is the original format specified at the API. The value is +# the format that will be used to represent the original format. +multiline_formats = { + "plain": "plain", + "simple": "simple", + "grid": "grid", + "simple_grid": "simple_grid", + "rounded_grid": "rounded_grid", + "heavy_grid": "heavy_grid", + "mixed_grid": "mixed_grid", + "double_grid": "double_grid", + "fancy_grid": "fancy_grid", + "pipe": "pipe", + "orgtbl": "orgtbl", + "jira": "jira", + "presto": "presto", + "pretty": "pretty", + "psql": "psql", + "rst": "rst", +} + +# TODO: Add multiline support for the remaining table formats: +# - mediawiki: Replace \n with
+# - moinmoin: TBD +# - youtrack: TBD +# - html: Replace \n with
+# - latex*: Use "makecell" package: In header, replace X\nY with +# \thead{X\\Y} and in data row, replace X\nY with \makecell{X\\Y} +# - tsv: TBD +# - textile: Replace \n with
(must be well-formed XML) + +_multiline_codes = re.compile(r"\r|\n|\r\n") +_multiline_codes_bytes = re.compile(b"\r|\n|\r\n") + +# Handle ANSI escape sequences for both control sequence introducer (CSI) and +# operating system command (OSC). Both of these begin with 0x1b (or octal 033), +# which will be shown below as ESC. +# +# CSI ANSI escape codes have the following format, defined in section 5.4 of ECMA-48: +# +# CSI: ESC followed by the '[' character (0x5b) +# Parameter Bytes: 0..n bytes in the range 0x30-0x3f +# Intermediate Bytes: 0..n bytes in the range 0x20-0x2f +# Final Byte: a single byte in the range 0x40-0x7e +# +# Also include the terminal hyperlink sequences as described here: +# https://gist.github.com/egmontkob/eb114294efbcd5adb1944c9f3cb5feda +# +# OSC 8 ; params ; uri ST display_text OSC 8 ;; ST +# +# Example: \x1b]8;;https://example.com\x5ctext to show\x1b]8;;\x5c +# +# Where: +# OSC: ESC followed by the ']' character (0x5d) +# params: 0..n optional key value pairs separated by ':' (e.g. foo=bar:baz=qux:abc=123) +# URI: the actual URI with protocol scheme (e.g. https://, file://, ftp://) +# ST: ESC followed by the '\' character (0x5c) +_esc = r"\x1b" +_csi = rf"{_esc}\[" +_osc = rf"{_esc}\]" +_st = rf"{_esc}\\" + +_ansi_escape_pat = rf""" + ( + # terminal colors, etc + {_csi} # CSI + [\x30-\x3f]* # parameter bytes + [\x20-\x2f]* # intermediate bytes + [\x40-\x7e] # final byte + | + # terminal hyperlinks + {_osc}8; # OSC opening + (\w+=\w+:?)* # key=value params list (submatch 2) + ; # delimiter + ([^{_esc}]+) # URI - anything but ESC (submatch 3) + {_st} # ST + ([^{_esc}]+) # link text - anything but ESC (submatch 4) + {_osc}8;;{_st} # "closing" OSC sequence + ) +""" +_ansi_codes = re.compile(_ansi_escape_pat, re.VERBOSE) +_ansi_codes_bytes = re.compile(_ansi_escape_pat.encode("utf8"), re.VERBOSE) +_ansi_color_reset_code = "\033[0m" + +_float_with_thousands_separators = re.compile( + r"^(([+-]?[0-9]{1,3})(?:,([0-9]{3}))*)?(?(1)\.[0-9]*|\.[0-9]+)?$" +) + + +def simple_separated_format(separator): + """Construct a simple TableFormat with columns separated by a separator. + + >>> tsv = simple_separated_format("\\t") ; \ + tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23' + True + + """ + return TableFormat( + None, + None, + None, + None, + headerrow=DataRow("", separator, ""), + datarow=DataRow("", separator, ""), + padding=0, + with_header_hide=None, + ) + + +def _isnumber_with_thousands_separator(string): + """ + >>> _isnumber_with_thousands_separator(".") + False + >>> _isnumber_with_thousands_separator("1") + True + >>> _isnumber_with_thousands_separator("1.") + True + >>> _isnumber_with_thousands_separator(".1") + True + >>> _isnumber_with_thousands_separator("1000") + False + >>> _isnumber_with_thousands_separator("1,000") + True + >>> _isnumber_with_thousands_separator("1,0000") + False + >>> _isnumber_with_thousands_separator("1,000.1234") + True + >>> _isnumber_with_thousands_separator(b"1,000.1234") + True + >>> _isnumber_with_thousands_separator("+1,000.1234") + True + >>> _isnumber_with_thousands_separator("-1,000.1234") + True + """ + try: + string = string.decode() + except (UnicodeDecodeError, AttributeError): + pass + + return bool(re.match(_float_with_thousands_separators, string)) + + +def _isconvertible(conv, string): + try: + conv(string) + return True + except (ValueError, TypeError): + return False + + +def _isnumber(string): + """ + >>> _isnumber("123.45") + True + >>> _isnumber("123") + True + >>> _isnumber("spam") + False + >>> _isnumber("123e45678") + False + >>> _isnumber("inf") + True + """ + if not _isconvertible(float, string): + return False + elif isinstance(string, (str, bytes)) and ( + math.isinf(float(string)) or math.isnan(float(string)) + ): + return string.lower() in ["inf", "-inf", "nan"] + return True + + +def _isint(string, inttype=int): + """ + >>> _isint("123") + True + >>> _isint("123.45") + False + """ + return ( + type(string) is inttype + or isinstance(string, (bytes, str)) + and _isconvertible(inttype, string) + ) + + +def _isbool(string): + """ + >>> _isbool(True) + True + >>> _isbool("False") + True + >>> _isbool(1) + False + """ + return type(string) is bool or ( + isinstance(string, (bytes, str)) and string in ("True", "False") + ) + + +def _type(string, has_invisible=True, numparse=True): + """The least generic type (type(None), int, float, str, unicode). + + >>> _type(None) is type(None) + True + >>> _type("foo") is type("") + True + >>> _type("1") is type(1) + True + >>> _type('\x1b[31m42\x1b[0m') is type(42) + True + >>> _type('\x1b[31m42\x1b[0m') is type(42) + True + + """ + + if has_invisible and isinstance(string, (str, bytes)): + string = _strip_ansi(string) + + if string is None: + return type(None) + elif hasattr(string, "isoformat"): # datetime.datetime, date, and time + return str + elif _isbool(string): + return bool + elif _isint(string) and numparse: + return int + elif _isnumber(string) and numparse: + return float + elif isinstance(string, bytes): + return bytes + else: + return str + + +def _afterpoint(string): + """Symbols after a decimal point, -1 if the string lacks the decimal point. + + >>> _afterpoint("123.45") + 2 + >>> _afterpoint("1001") + -1 + >>> _afterpoint("eggs") + -1 + >>> _afterpoint("123e45") + 2 + >>> _afterpoint("123,456.78") + 2 + + """ + if _isnumber(string) or _isnumber_with_thousands_separator(string): + if _isint(string): + return -1 + else: + pos = string.rfind(".") + pos = string.lower().rfind("e") if pos < 0 else pos + if pos >= 0: + return len(string) - pos - 1 + else: + return -1 # no point + else: + return -1 # not a number + + +def _padleft(width, s): + """Flush right. + + >>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430' + True + + """ + fmt = "{0:>%ds}" % width + return fmt.format(s) + + +def _padright(width, s): + """Flush left. + + >>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 ' + True + + """ + fmt = "{0:<%ds}" % width + return fmt.format(s) + + +def _padboth(width, s): + """Center string. + + >>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 ' + True + + """ + fmt = "{0:^%ds}" % width + return fmt.format(s) + + +def _padnone(ignore_width, s): + return s + + +def _strip_ansi(s): + r"""Remove ANSI escape sequences, both CSI (color codes, etc) and OSC hyperlinks. + + CSI sequences are simply removed from the output, while OSC hyperlinks are replaced + with the link text. Note: it may be desirable to show the URI instead but this is not + supported. + + >>> repr(_strip_ansi('\x1B]8;;https://example.com\x1B\\This is a link\x1B]8;;\x1B\\')) + "'This is a link'" + + >>> repr(_strip_ansi('\x1b[31mred\x1b[0m text')) + "'red text'" + + """ + if isinstance(s, str): + return _ansi_codes.sub(r"\4", s) + else: # a bytestring + return _ansi_codes_bytes.sub(r"\4", s) + + +def _visible_width(s): + """Visible width of a printed string. ANSI color codes are removed. + + >>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world") + (5, 5) + + """ + # optional wide-character support + if wcwidth is not None and WIDE_CHARS_MODE: + len_fn = wcwidth.wcswidth + else: + len_fn = len + if isinstance(s, (str, bytes)): + return len_fn(_strip_ansi(s)) + else: + return len_fn(str(s)) + + +def _is_multiline(s): + if isinstance(s, str): + return bool(re.search(_multiline_codes, s)) + else: # a bytestring + return bool(re.search(_multiline_codes_bytes, s)) + + +def _multiline_width(multiline_s, line_width_fn=len): + """Visible width of a potentially multiline content.""" + return max(map(line_width_fn, re.split("[\r\n]", multiline_s))) + + +def _choose_width_fn(has_invisible, enable_widechars, is_multiline): + """Return a function to calculate visible cell width.""" + if has_invisible: + line_width_fn = _visible_width + elif enable_widechars: # optional wide-character support if available + line_width_fn = wcwidth.wcswidth + else: + line_width_fn = len + if is_multiline: + width_fn = lambda s: _multiline_width(s, line_width_fn) # noqa + else: + width_fn = line_width_fn + return width_fn + + +def _align_column_choose_padfn(strings, alignment, has_invisible): + if alignment == "right": + if not PRESERVE_WHITESPACE: + strings = [s.strip() for s in strings] + padfn = _padleft + elif alignment == "center": + if not PRESERVE_WHITESPACE: + strings = [s.strip() for s in strings] + padfn = _padboth + elif alignment == "decimal": + if has_invisible: + decimals = [_afterpoint(_strip_ansi(s)) for s in strings] + else: + decimals = [_afterpoint(s) for s in strings] + maxdecimals = max(decimals) + strings = [s + (maxdecimals - decs) * " " for s, decs in zip(strings, decimals)] + padfn = _padleft + elif not alignment: + padfn = _padnone + else: + if not PRESERVE_WHITESPACE: + strings = [s.strip() for s in strings] + padfn = _padright + return strings, padfn + + +def _align_column_choose_width_fn(has_invisible, enable_widechars, is_multiline): + if has_invisible: + line_width_fn = _visible_width + elif enable_widechars: # optional wide-character support if available + line_width_fn = wcwidth.wcswidth + else: + line_width_fn = len + if is_multiline: + width_fn = lambda s: _align_column_multiline_width(s, line_width_fn) # noqa + else: + width_fn = line_width_fn + return width_fn + + +def _align_column_multiline_width(multiline_s, line_width_fn=len): + """Visible width of a potentially multiline content.""" + return list(map(line_width_fn, re.split("[\r\n]", multiline_s))) + + +def _flat_list(nested_list): + ret = [] + for item in nested_list: + if isinstance(item, list): + for subitem in item: + ret.append(subitem) + else: + ret.append(item) + return ret + + +def _align_column( + strings, + alignment, + minwidth=0, + has_invisible=True, + enable_widechars=False, + is_multiline=False, +): + """[string] -> [padded_string]""" + strings, padfn = _align_column_choose_padfn(strings, alignment, has_invisible) + width_fn = _align_column_choose_width_fn( + has_invisible, enable_widechars, is_multiline + ) + + s_widths = list(map(width_fn, strings)) + maxwidth = max(max(_flat_list(s_widths)), minwidth) + # TODO: refactor column alignment in single-line and multiline modes + if is_multiline: + if not enable_widechars and not has_invisible: + padded_strings = [ + "\n".join([padfn(maxwidth, s) for s in ms.splitlines()]) + for ms in strings + ] + else: + # enable wide-character width corrections + s_lens = [[len(s) for s in re.split("[\r\n]", ms)] for ms in strings] + visible_widths = [ + [maxwidth - (w - l) for w, l in zip(mw, ml)] + for mw, ml in zip(s_widths, s_lens) + ] + # wcswidth and _visible_width don't count invisible characters; + # padfn doesn't need to apply another correction + padded_strings = [ + "\n".join([padfn(w, s) for s, w in zip((ms.splitlines() or ms), mw)]) + for ms, mw in zip(strings, visible_widths) + ] + else: # single-line cell values + if not enable_widechars and not has_invisible: + padded_strings = [padfn(maxwidth, s) for s in strings] + else: + # enable wide-character width corrections + s_lens = list(map(len, strings)) + visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)] + # wcswidth and _visible_width don't count invisible characters; + # padfn doesn't need to apply another correction + padded_strings = [padfn(w, s) for s, w in zip(strings, visible_widths)] + return padded_strings + + +def _more_generic(type1, type2): + types = { + type(None): 0, # noqa + bool: 1, + int: 2, + float: 3, + bytes: 4, + str: 5, + } + invtypes = { + 5: str, + 4: bytes, + 3: float, + 2: int, + 1: bool, + 0: type(None), + } + moregeneric = max(types.get(type1, 5), types.get(type2, 5)) + return invtypes[moregeneric] + + +def _column_type(strings, has_invisible=True, numparse=True): + """The least generic type all column values are convertible to. + + >>> _column_type([True, False]) is bool + True + >>> _column_type(["1", "2"]) is int + True + >>> _column_type(["1", "2.3"]) is float + True + >>> _column_type(["1", "2.3", "four"]) is str + True + >>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is str + True + >>> _column_type([None, "brux"]) is str + True + >>> _column_type([1, 2, None]) is int + True + >>> import datetime as dt + >>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is str + True + + """ + types = [_type(s, has_invisible, numparse) for s in strings] + return reduce(_more_generic, types, bool) + + +def _format(val, valtype, floatfmt, intfmt, missingval="", has_invisible=True): + """Format a value according to its type. + + Unicode is supported: + + >>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \ + tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \ + good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \ + tabulate(tbl, headers=hrow) == good_result + True + + """ # noqa + if val is None: + return missingval + + if valtype is str: + return f"{val}" + elif valtype is int: + return format(val, intfmt) + elif valtype is bytes: + try: + return str(val, "ascii") + except (TypeError, UnicodeDecodeError): + return str(val) + elif valtype is float: + is_a_colored_number = has_invisible and isinstance(val, (str, bytes)) + if is_a_colored_number: + raw_val = _strip_ansi(val) + formatted_val = format(float(raw_val), floatfmt) + return val.replace(raw_val, formatted_val) + else: + return format(float(val), floatfmt) + else: + return f"{val}" + + +def _align_header( + header, alignment, width, visible_width, is_multiline=False, width_fn=None +): + "Pad string header to width chars given known visible_width of the header." + if is_multiline: + header_lines = re.split(_multiline_codes, header) + padded_lines = [ + _align_header(h, alignment, width, width_fn(h)) for h in header_lines + ] + return "\n".join(padded_lines) + # else: not multiline + ninvisible = len(header) - visible_width + width += ninvisible + if alignment == "left": + return _padright(width, header) + elif alignment == "center": + return _padboth(width, header) + elif not alignment: + return f"{header}" + else: + return _padleft(width, header) + + +def _remove_separating_lines(rows): + if type(rows) == list: + separating_lines = [] + sans_rows = [] + for index, row in enumerate(rows): + if _is_separating_line(row): + separating_lines.append(index) + else: + sans_rows.append(row) + return sans_rows, separating_lines + else: + return rows, None + + +def _reinsert_separating_lines(rows, separating_lines): + if separating_lines: + for index in separating_lines: + rows.insert(index, SEPARATING_LINE) + + +def _prepend_row_index(rows, index): + """Add a left-most index column.""" + if index is None or index is False: + return rows + if isinstance(index, Sized) and len(index) != len(rows): + raise ValueError( + "index must be as long as the number of data rows: " + + "len(index)={} len(rows)={}".format(len(index), len(rows)) + ) + sans_rows, separating_lines = _remove_separating_lines(rows) + new_rows = [] + index_iter = iter(index) + for row in sans_rows: + index_v = next(index_iter) + new_rows.append([index_v] + list(row)) + rows = new_rows + _reinsert_separating_lines(rows, separating_lines) + return rows + + +def _bool(val): + "A wrapper around standard bool() which doesn't throw on NumPy arrays" + try: + return bool(val) + except ValueError: # val is likely to be a numpy array with many elements + return False + + +def _normalize_tabular_data(tabular_data, headers, showindex="default"): + """Transform a supported data type to a list of lists, and a list of headers. + + Supported tabular data types: + + * list-of-lists or another iterable of iterables + + * list of named tuples (usually used with headers="keys") + + * list of dicts (usually used with headers="keys") + + * list of OrderedDicts (usually used with headers="keys") + + * list of dataclasses (Python 3.7+ only, usually used with headers="keys") + + * 2D NumPy arrays + + * NumPy record arrays (usually used with headers="keys") + + * dict of iterables (usually used with headers="keys") + + * pandas.DataFrame (usually used with headers="keys") + + The first row can be used as headers if headers="firstrow", + column indices can be used as headers if headers="keys". + + If showindex="default", show row indices of the pandas.DataFrame. + If showindex="always", show row indices for all types of data. + If showindex="never", don't show row indices for all types of data. + If showindex is an iterable, show its values as row indices. + + """ + + try: + bool(headers) + is_headers2bool_broken = False # noqa + except ValueError: # numpy.ndarray, pandas.core.index.Index, ... + is_headers2bool_broken = True # noqa + headers = list(headers) + + index = None + if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"): + # dict-like and pandas.DataFrame? + if hasattr(tabular_data.values, "__call__"): + # likely a conventional dict + keys = tabular_data.keys() + rows = list( + izip_longest(*tabular_data.values()) + ) # columns have to be transposed + elif hasattr(tabular_data, "index"): + # values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0) + keys = list(tabular_data) + if ( + showindex in ["default", "always", True] + and tabular_data.index.name is not None + ): + if isinstance(tabular_data.index.name, list): + keys[:0] = tabular_data.index.name + else: + keys[:0] = [tabular_data.index.name] + vals = tabular_data.values # values matrix doesn't need to be transposed + # for DataFrames add an index per default + index = list(tabular_data.index) + rows = [list(row) for row in vals] + else: + raise ValueError("tabular data doesn't appear to be a dict or a DataFrame") + + if headers == "keys": + headers = list(map(str, keys)) # headers should be strings + + else: # it's a usual iterable of iterables, or a NumPy array, or an iterable of dataclasses + rows = list(tabular_data) + + if headers == "keys" and not rows: + # an empty table (issue #81) + headers = [] + elif ( + headers == "keys" + and hasattr(tabular_data, "dtype") + and getattr(tabular_data.dtype, "names") + ): + # numpy record array + headers = tabular_data.dtype.names + elif ( + headers == "keys" + and len(rows) > 0 + and isinstance(rows[0], tuple) + and hasattr(rows[0], "_fields") + ): + # namedtuple + headers = list(map(str, rows[0]._fields)) + elif len(rows) > 0 and hasattr(rows[0], "keys") and hasattr(rows[0], "values"): + # dict-like object + uniq_keys = set() # implements hashed lookup + keys = [] # storage for set + if headers == "firstrow": + firstdict = rows[0] if len(rows) > 0 else {} + keys.extend(firstdict.keys()) + uniq_keys.update(keys) + rows = rows[1:] + for row in rows: + for k in row.keys(): + # Save unique items in input order + if k not in uniq_keys: + keys.append(k) + uniq_keys.add(k) + if headers == "keys": + headers = keys + elif isinstance(headers, dict): + # a dict of headers for a list of dicts + headers = [headers.get(k, k) for k in keys] + headers = list(map(str, headers)) + elif headers == "firstrow": + if len(rows) > 0: + headers = [firstdict.get(k, k) for k in keys] + headers = list(map(str, headers)) + else: + headers = [] + elif headers: + raise ValueError( + "headers for a list of dicts is not a dict or a keyword" + ) + rows = [[row.get(k) for k in keys] for row in rows] + + elif ( + headers == "keys" + and hasattr(tabular_data, "description") + and hasattr(tabular_data, "fetchone") + and hasattr(tabular_data, "rowcount") + ): + # Python Database API cursor object (PEP 0249) + # print tabulate(cursor, headers='keys') + headers = [column[0] for column in tabular_data.description] + + elif ( + dataclasses is not None + and len(rows) > 0 + and dataclasses.is_dataclass(rows[0]) + ): + # Python 3.7+'s dataclass + field_names = [field.name for field in dataclasses.fields(rows[0])] + if headers == "keys": + headers = field_names + rows = [[getattr(row, f) for f in field_names] for row in rows] + + elif headers == "keys" and len(rows) > 0: + # keys are column indices + headers = list(map(str, range(len(rows[0])))) + + # take headers from the first row if necessary + if headers == "firstrow" and len(rows) > 0: + if index is not None: + headers = [index[0]] + list(rows[0]) + index = index[1:] + else: + headers = rows[0] + headers = list(map(str, headers)) # headers should be strings + rows = rows[1:] + elif headers == "firstrow": + headers = [] + + headers = list(map(str, headers)) + # rows = list(map(list, rows)) + rows = list(map(lambda r: r if _is_separating_line(r) else list(r), rows)) + + # add or remove an index column + showindex_is_a_str = type(showindex) in [str, bytes] + if showindex == "default" and index is not None: + rows = _prepend_row_index(rows, index) + elif isinstance(showindex, Sized) and not showindex_is_a_str: + rows = _prepend_row_index(rows, list(showindex)) + elif isinstance(showindex, Iterable) and not showindex_is_a_str: + rows = _prepend_row_index(rows, showindex) + elif showindex == "always" or (_bool(showindex) and not showindex_is_a_str): + if index is None: + index = list(range(len(rows))) + rows = _prepend_row_index(rows, index) + elif showindex == "never" or (not _bool(showindex) and not showindex_is_a_str): + pass + + # pad with empty headers for initial columns if necessary + if headers and len(rows) > 0: + nhs = len(headers) + ncols = len(rows[0]) + if nhs < ncols: + headers = [""] * (ncols - nhs) + headers + + return rows, headers + + +def _wrap_text_to_colwidths(list_of_lists, colwidths, numparses=True): + numparses = _expand_iterable(numparses, len(list_of_lists[0]), True) + + result = [] + + for row in list_of_lists: + new_row = [] + for cell, width, numparse in zip(row, colwidths, numparses): + if _isnumber(cell) and numparse: + new_row.append(cell) + continue + + if width is not None: + wrapper = _CustomTextWrap(width=width) + # Cast based on our internal type handling + # Any future custom formatting of types (such as datetimes) + # may need to be more explicit than just `str` of the object + casted_cell = ( + str(cell) if _isnumber(cell) else _type(cell, numparse)(cell) + ) + wrapped = wrapper.wrap(casted_cell) + new_row.append("\n".join(wrapped)) + else: + new_row.append(cell) + result.append(new_row) + + return result + + +def _to_str(s, encoding="utf8", errors="ignore"): + """ + A type safe wrapper for converting a bytestring to str. This is essentially just + a wrapper around .decode() intended for use with things like map(), but with some + specific behavior: + + 1. if the given parameter is not a bytestring, it is returned unmodified + 2. decode() is called for the given parameter and assumes utf8 encoding, but the + default error behavior is changed from 'strict' to 'ignore' + + >>> repr(_to_str(b'foo')) + "'foo'" + + >>> repr(_to_str('foo')) + "'foo'" + + >>> repr(_to_str(42)) + "'42'" + + """ + if isinstance(s, bytes): + return s.decode(encoding=encoding, errors=errors) + return str(s) + + +def tabulate( + tabular_data, + headers=(), + tablefmt="simple", + floatfmt=_DEFAULT_FLOATFMT, + intfmt=_DEFAULT_INTFMT, + numalign=_DEFAULT_ALIGN, + stralign=_DEFAULT_ALIGN, + missingval=_DEFAULT_MISSINGVAL, + showindex="default", + disable_numparse=False, + colalign=None, + maxcolwidths=None, + rowalign=None, + maxheadercolwidths=None, +): + """Format a fixed width table for pretty printing. + + >>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]])) + --- --------- + 1 2.34 + -56 8.999 + 2 10001 + --- --------- + + The first required argument (`tabular_data`) can be a + list-of-lists (or another iterable of iterables), a list of named + tuples, a dictionary of iterables, an iterable of dictionaries, + an iterable of dataclasses (Python 3.7+), a two-dimensional NumPy array, + NumPy record array, or a Pandas' dataframe. + + + Table headers + ------------- + + To print nice column headers, supply the second argument (`headers`): + + - `headers` can be an explicit list of column headers + - if `headers="firstrow"`, then the first row of data is used + - if `headers="keys"`, then dictionary keys or column indices are used + + Otherwise a headerless table is produced. + + If the number of headers is less than the number of columns, they + are supposed to be names of the last columns. This is consistent + with the plain-text format of R and Pandas' dataframes. + + >>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]], + ... headers="firstrow")) + sex age + ----- ----- ----- + Alice F 24 + Bob M 19 + + By default, pandas.DataFrame data have an additional column called + row index. To add a similar column to all other types of data, + use `showindex="always"` or `showindex=True`. To suppress row indices + for all types of data, pass `showindex="never" or `showindex=False`. + To add a custom row index column, pass `showindex=some_iterable`. + + >>> print(tabulate([["F",24],["M",19]], showindex="always")) + - - -- + 0 F 24 + 1 M 19 + - - -- + + + Column alignment + ---------------- + + `tabulate` tries to detect column types automatically, and aligns + the values properly. By default it aligns decimal points of the + numbers (or flushes integer numbers to the right), and flushes + everything else to the left. Possible column alignments + (`numalign`, `stralign`) are: "right", "center", "left", "decimal" + (only for `numalign`), and None (to disable alignment). + + + Table formats + ------------- + + `intfmt` is a format specification used for columns which + contain numeric data without a decimal point. This can also be + a list or tuple of format strings, one per column. + + `floatfmt` is a format specification used for columns which + contain numeric data with a decimal point. This can also be + a list or tuple of format strings, one per column. + + `None` values are replaced with a `missingval` string (like + `floatfmt`, this can also be a list of values for different + columns): + + >>> print(tabulate([["spam", 1, None], + ... ["eggs", 42, 3.14], + ... ["other", None, 2.7]], missingval="?")) + ----- -- ---- + spam 1 ? + eggs 42 3.14 + other ? 2.7 + ----- -- ---- + + Various plain-text table formats (`tablefmt`) are supported: + 'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki', + 'latex', 'latex_raw', 'latex_booktabs', 'latex_longtable' and tsv. + Variable `tabulate_formats`contains the list of currently supported formats. + + "plain" format doesn't use any pseudographics to draw tables, + it separates columns with a double space: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "plain")) + strings numbers + spam 41.9999 + eggs 451 + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain")) + spam 41.9999 + eggs 451 + + "simple" format is like Pandoc simple_tables: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "simple")) + strings numbers + --------- --------- + spam 41.9999 + eggs 451 + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple")) + ---- -------- + spam 41.9999 + eggs 451 + ---- -------- + + "grid" is similar to tables produced by Emacs table.el package or + Pandoc grid_tables: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "grid")) + +-----------+-----------+ + | strings | numbers | + +===========+===========+ + | spam | 41.9999 | + +-----------+-----------+ + | eggs | 451 | + +-----------+-----------+ + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid")) + +------+----------+ + | spam | 41.9999 | + +------+----------+ + | eggs | 451 | + +------+----------+ + + "simple_grid" draws a grid using single-line box-drawing + characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "simple_grid")) + ┌───────────┬───────────┐ + │ strings │ numbers │ + ├───────────┼───────────┤ + │ spam │ 41.9999 │ + ├───────────┼───────────┤ + │ eggs │ 451 │ + └───────────┴───────────┘ + + "rounded_grid" draws a grid using single-line box-drawing + characters with rounded corners: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "rounded_grid")) + ╭───────────┬───────────╮ + │ strings │ numbers │ + ├───────────┼───────────┤ + │ spam │ 41.9999 │ + ├───────────┼───────────┤ + │ eggs │ 451 │ + ╰───────────┴───────────╯ + + "heavy_grid" draws a grid using bold (thick) single-line box-drawing + characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "heavy_grid")) + ┏━━━━━━━━━━━┳━━━━━━━━━━━┓ + ┃ strings ┃ numbers ┃ + ┣━━━━━━━━━━━╋━━━━━━━━━━━┫ + ┃ spam ┃ 41.9999 ┃ + ┣━━━━━━━━━━━╋━━━━━━━━━━━┫ + ┃ eggs ┃ 451 ┃ + ┗━━━━━━━━━━━┻━━━━━━━━━━━┛ + + "mixed_grid" draws a grid using a mix of light (thin) and heavy (thick) lines + box-drawing characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "mixed_grid")) + ┍━━━━━━━━━━━┯━━━━━━━━━━━┑ + │ strings │ numbers │ + ┝━━━━━━━━━━━┿━━━━━━━━━━━┥ + │ spam │ 41.9999 │ + ├───────────┼───────────┤ + │ eggs │ 451 │ + ┕━━━━━━━━━━━┷━━━━━━━━━━━┙ + + "double_grid" draws a grid using double-line box-drawing + characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "double_grid")) + ╔═══════════╦═══════════╗ + ║ strings ║ numbers ║ + ╠═══════════╬═══════════╣ + ║ spam ║ 41.9999 ║ + ╠═══════════╬═══════════╣ + ║ eggs ║ 451 ║ + ╚═══════════╩═══════════╝ + + "fancy_grid" draws a grid using a mix of single and + double-line box-drawing characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "fancy_grid")) + ╒═══════════╤═══════════╕ + │ strings │ numbers │ + ╞═══════════╪═══════════╡ + │ spam │ 41.9999 │ + ├───────────┼───────────┤ + │ eggs │ 451 │ + ╘═══════════╧═══════════╛ + + "outline" is the same as the "grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "outline")) + +-----------+-----------+ + | strings | numbers | + +===========+===========+ + | spam | 41.9999 | + | eggs | 451 | + +-----------+-----------+ + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="outline")) + +------+----------+ + | spam | 41.9999 | + | eggs | 451 | + +------+----------+ + + "simple_outline" is the same as the "simple_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "simple_outline")) + ┌───────────┬───────────┐ + │ strings │ numbers │ + ├───────────┼───────────┤ + │ spam │ 41.9999 │ + │ eggs │ 451 │ + └───────────┴───────────┘ + + "rounded_outline" is the same as the "rounded_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "rounded_outline")) + ╭───────────┬───────────╮ + │ strings │ numbers │ + ├───────────┼───────────┤ + │ spam │ 41.9999 │ + │ eggs │ 451 │ + ╰───────────┴───────────╯ + + "heavy_outline" is the same as the "heavy_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "heavy_outline")) + ┏━━━━━━━━━━━┳━━━━━━━━━━━┓ + ┃ strings ┃ numbers ┃ + ┣━━━━━━━━━━━╋━━━━━━━━━━━┫ + ┃ spam ┃ 41.9999 ┃ + ┃ eggs ┃ 451 ┃ + ┗━━━━━━━━━━━┻━━━━━━━━━━━┛ + + "mixed_outline" is the same as the "mixed_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "mixed_outline")) + ┍━━━━━━━━━━━┯━━━━━━━━━━━┑ + │ strings │ numbers │ + ┝━━━━━━━━━━━┿━━━━━━━━━━━┥ + │ spam │ 41.9999 │ + │ eggs │ 451 │ + ┕━━━━━━━━━━━┷━━━━━━━━━━━┙ + + "double_outline" is the same as the "double_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "double_outline")) + ╔═══════════╦═══════════╗ + ║ strings ║ numbers ║ + ╠═══════════╬═══════════╣ + ║ spam ║ 41.9999 ║ + ║ eggs ║ 451 ║ + ╚═══════════╩═══════════╝ + + "fancy_outline" is the same as the "fancy_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "fancy_outline")) + ╒═══════════╤═══════════╕ + │ strings │ numbers │ + ╞═══════════╪═══════════╡ + │ spam │ 41.9999 │ + │ eggs │ 451 │ + ╘═══════════╧═══════════╛ + + "pipe" is like tables in PHP Markdown Extra extension or Pandoc + pipe_tables: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "pipe")) + | strings | numbers | + |:----------|----------:| + | spam | 41.9999 | + | eggs | 451 | + + "presto" is like tables produce by the Presto CLI: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "presto")) + strings | numbers + -----------+----------- + spam | 41.9999 + eggs | 451 + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe")) + |:-----|---------:| + | spam | 41.9999 | + | eggs | 451 | + + "orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They + are slightly different from "pipe" format by not using colons to + define column alignment, and using a "+" sign to indicate line + intersections: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "orgtbl")) + | strings | numbers | + |-----------+-----------| + | spam | 41.9999 | + | eggs | 451 | + + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl")) + | spam | 41.9999 | + | eggs | 451 | + + "rst" is like a simple table format from reStructuredText; please + note that reStructuredText accepts also "grid" tables: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "rst")) + ========= ========= + strings numbers + ========= ========= + spam 41.9999 + eggs 451 + ========= ========= + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst")) + ==== ======== + spam 41.9999 + eggs 451 + ==== ======== + + "mediawiki" produces a table markup used in Wikipedia and on other + MediaWiki-based sites: + + >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]], + ... headers="firstrow", tablefmt="mediawiki")) + {| class="wikitable" style="text-align: left;" + |+ + |- + ! strings !! align="right"| numbers + |- + | spam || align="right"| 41.9999 + |- + | eggs || align="right"| 451 + |} + + "html" produces HTML markup as an html.escape'd str + with a ._repr_html_ method so that Jupyter Lab and Notebook display the HTML + and a .str property so that the raw HTML remains accessible + the unsafehtml table format can be used if an unescaped HTML format is required: + + >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]], + ... headers="firstrow", tablefmt="html")) + + + + + + + + +
strings numbers
spam 41.9999
eggs 451
+ + "latex" produces a tabular environment of LaTeX document markup: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex")) + \\begin{tabular}{lr} + \\hline + spam & 41.9999 \\\\ + eggs & 451 \\\\ + \\hline + \\end{tabular} + + "latex_raw" is similar to "latex", but doesn't escape special characters, + such as backslash and underscore, so LaTeX commands may embedded into + cells' values: + + >>> print(tabulate([["spam$_9$", 41.9999], ["\\\\emph{eggs}", "451.0"]], tablefmt="latex_raw")) + \\begin{tabular}{lr} + \\hline + spam$_9$ & 41.9999 \\\\ + \\emph{eggs} & 451 \\\\ + \\hline + \\end{tabular} + + "latex_booktabs" produces a tabular environment of LaTeX document markup + using the booktabs.sty package: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs")) + \\begin{tabular}{lr} + \\toprule + spam & 41.9999 \\\\ + eggs & 451 \\\\ + \\bottomrule + \\end{tabular} + + "latex_longtable" produces a tabular environment that can stretch along + multiple pages, using the longtable package for LaTeX. + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_longtable")) + \\begin{longtable}{lr} + \\hline + spam & 41.9999 \\\\ + eggs & 451 \\\\ + \\hline + \\end{longtable} + + + Number parsing + -------------- + By default, anything which can be parsed as a number is a number. + This ensures numbers represented as strings are aligned properly. + This can lead to weird results for particular strings such as + specific git SHAs e.g. "42992e1" will be parsed into the number + 429920 and aligned as such. + + To completely disable number parsing (and alignment), use + `disable_numparse=True`. For more fine grained control, a list column + indices is used to disable number parsing only on those columns + e.g. `disable_numparse=[0, 2]` would disable number parsing only on the + first and third columns. + + Column Widths and Auto Line Wrapping + ------------------------------------ + Tabulate will, by default, set the width of each column to the length of the + longest element in that column. However, in situations where fields are expected + to reasonably be too long to look good as a single line, tabulate can help automate + word wrapping long fields for you. Use the parameter `maxcolwidth` to provide a + list of maximal column widths + + >>> print(tabulate( \ + [('1', 'John Smith', \ + 'This is a rather long description that might look better if it is wrapped a bit')], \ + headers=("Issue Id", "Author", "Description"), \ + maxcolwidths=[None, None, 30], \ + tablefmt="grid" \ + )) + +------------+------------+-------------------------------+ + | Issue Id | Author | Description | + +============+============+===============================+ + | 1 | John Smith | This is a rather long | + | | | description that might look | + | | | better if it is wrapped a bit | + +------------+------------+-------------------------------+ + + Header column width can be specified in a similar way using `maxheadercolwidth` + + """ + + if tabular_data is None: + tabular_data = [] + + list_of_lists, headers = _normalize_tabular_data( + tabular_data, headers, showindex=showindex + ) + list_of_lists, separating_lines = _remove_separating_lines(list_of_lists) + + if maxcolwidths is not None: + num_cols = len(list_of_lists[0]) + if isinstance(maxcolwidths, int): # Expand scalar for all columns + maxcolwidths = _expand_iterable(maxcolwidths, num_cols, maxcolwidths) + else: # Ignore col width for any 'trailing' columns + maxcolwidths = _expand_iterable(maxcolwidths, num_cols, None) + + numparses = _expand_numparse(disable_numparse, num_cols) + list_of_lists = _wrap_text_to_colwidths( + list_of_lists, maxcolwidths, numparses=numparses + ) + + if maxheadercolwidths is not None: + num_cols = len(list_of_lists[0]) + if isinstance(maxheadercolwidths, int): # Expand scalar for all columns + maxheadercolwidths = _expand_iterable( + maxheadercolwidths, num_cols, maxheadercolwidths + ) + else: # Ignore col width for any 'trailing' columns + maxheadercolwidths = _expand_iterable(maxheadercolwidths, num_cols, None) + + numparses = _expand_numparse(disable_numparse, num_cols) + headers = _wrap_text_to_colwidths( + [headers], maxheadercolwidths, numparses=numparses + )[0] + + # empty values in the first column of RST tables should be escaped (issue #82) + # "" should be escaped as "\\ " or ".." + if tablefmt == "rst": + list_of_lists, headers = _rst_escape_first_column(list_of_lists, headers) + + # PrettyTable formatting does not use any extra padding. + # Numbers are not parsed and are treated the same as strings for alignment. + # Check if pretty is the format being used and override the defaults so it + # does not impact other formats. + min_padding = MIN_PADDING + if tablefmt == "pretty": + min_padding = 0 + disable_numparse = True + numalign = "center" if numalign == _DEFAULT_ALIGN else numalign + stralign = "center" if stralign == _DEFAULT_ALIGN else stralign + else: + numalign = "decimal" if numalign == _DEFAULT_ALIGN else numalign + stralign = "left" if stralign == _DEFAULT_ALIGN else stralign + + # optimization: look for ANSI control codes once, + # enable smart width functions only if a control code is found + # + # convert the headers and rows into a single, tab-delimited string ensuring + # that any bytestrings are decoded safely (i.e. errors ignored) + plain_text = "\t".join( + chain( + # headers + map(_to_str, headers), + # rows: chain the rows together into a single iterable after mapping + # the bytestring conversino to each cell value + chain.from_iterable(map(_to_str, row) for row in list_of_lists), + ) + ) + + has_invisible = _ansi_codes.search(plain_text) is not None + + enable_widechars = wcwidth is not None and WIDE_CHARS_MODE + if ( + not isinstance(tablefmt, TableFormat) + and tablefmt in multiline_formats + and _is_multiline(plain_text) + ): + tablefmt = multiline_formats.get(tablefmt, tablefmt) + is_multiline = True + else: + is_multiline = False + width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline) + + # format rows and columns, convert numeric values to strings + cols = list(izip_longest(*list_of_lists)) + numparses = _expand_numparse(disable_numparse, len(cols)) + coltypes = [_column_type(col, numparse=np) for col, np in zip(cols, numparses)] + if isinstance(floatfmt, str): # old version + float_formats = len(cols) * [ + floatfmt + ] # just duplicate the string to use in each column + else: # if floatfmt is list, tuple etc we have one per column + float_formats = list(floatfmt) + if len(float_formats) < len(cols): + float_formats.extend((len(cols) - len(float_formats)) * [_DEFAULT_FLOATFMT]) + if isinstance(intfmt, str): # old version + int_formats = len(cols) * [ + intfmt + ] # just duplicate the string to use in each column + else: # if intfmt is list, tuple etc we have one per column + int_formats = list(intfmt) + if len(int_formats) < len(cols): + int_formats.extend((len(cols) - len(int_formats)) * [_DEFAULT_INTFMT]) + if isinstance(missingval, str): + missing_vals = len(cols) * [missingval] + else: + missing_vals = list(missingval) + if len(missing_vals) < len(cols): + missing_vals.extend((len(cols) - len(missing_vals)) * [_DEFAULT_MISSINGVAL]) + cols = [ + [_format(v, ct, fl_fmt, int_fmt, miss_v, has_invisible) for v in c] + for c, ct, fl_fmt, int_fmt, miss_v in zip( + cols, coltypes, float_formats, int_formats, missing_vals + ) + ] + + # align columns + aligns = [numalign if ct in [int, float] else stralign for ct in coltypes] + if colalign is not None: + assert isinstance(colalign, Iterable) + for idx, align in enumerate(colalign): + aligns[idx] = align + minwidths = ( + [width_fn(h) + min_padding for h in headers] if headers else [0] * len(cols) + ) + cols = [ + _align_column(c, a, minw, has_invisible, enable_widechars, is_multiline) + for c, a, minw in zip(cols, aligns, minwidths) + ] + + if headers: + # align headers and add headers + t_cols = cols or [[""]] * len(headers) + t_aligns = aligns or [stralign] * len(headers) + minwidths = [ + max(minw, max(width_fn(cl) for cl in c)) + for minw, c in zip(minwidths, t_cols) + ] + headers = [ + _align_header(h, a, minw, width_fn(h), is_multiline, width_fn) + for h, a, minw in zip(headers, t_aligns, minwidths) + ] + rows = list(zip(*cols)) + else: + minwidths = [max(width_fn(cl) for cl in c) for c in cols] + rows = list(zip(*cols)) + + if not isinstance(tablefmt, TableFormat): + tablefmt = _table_formats.get(tablefmt, _table_formats["simple"]) + + ra_default = rowalign if isinstance(rowalign, str) else None + rowaligns = _expand_iterable(rowalign, len(rows), ra_default) + _reinsert_separating_lines(rows, separating_lines) + + return _format_table( + tablefmt, headers, rows, minwidths, aligns, is_multiline, rowaligns=rowaligns + ) + + +def _expand_numparse(disable_numparse, column_count): + """ + Return a list of bools of length `column_count` which indicates whether + number parsing should be used on each column. + If `disable_numparse` is a list of indices, each of those indices are False, + and everything else is True. + If `disable_numparse` is a bool, then the returned list is all the same. + """ + if isinstance(disable_numparse, Iterable): + numparses = [True] * column_count + for index in disable_numparse: + numparses[index] = False + return numparses + else: + return [not disable_numparse] * column_count + + +def _expand_iterable(original, num_desired, default): + """ + Expands the `original` argument to return a return a list of + length `num_desired`. If `original` is shorter than `num_desired`, it will + be padded with the value in `default`. + If `original` is not a list to begin with (i.e. scalar value) a list of + length `num_desired` completely populated with `default will be returned + """ + if isinstance(original, Iterable) and not isinstance(original, str): + return original + [default] * (num_desired - len(original)) + else: + return [default] * num_desired + + +def _pad_row(cells, padding): + if cells: + pad = " " * padding + padded_cells = [pad + cell + pad for cell in cells] + return padded_cells + else: + return cells + + +def _build_simple_row(padded_cells, rowfmt): + "Format row according to DataRow format without padding." + begin, sep, end = rowfmt + return (begin + sep.join(padded_cells) + end).rstrip() + + +def _build_row(padded_cells, colwidths, colaligns, rowfmt): + "Return a string which represents a row of data cells." + if not rowfmt: + return None + if hasattr(rowfmt, "__call__"): + return rowfmt(padded_cells, colwidths, colaligns) + else: + return _build_simple_row(padded_cells, rowfmt) + + +def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt, rowalign=None): + # NOTE: rowalign is ignored and exists for api compatibility with _append_multiline_row + lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt)) + return lines + + +def _align_cell_veritically(text_lines, num_lines, column_width, row_alignment): + delta_lines = num_lines - len(text_lines) + blank = [" " * column_width] + if row_alignment == "bottom": + return blank * delta_lines + text_lines + elif row_alignment == "center": + top_delta = delta_lines // 2 + bottom_delta = delta_lines - top_delta + return top_delta * blank + text_lines + bottom_delta * blank + else: + return text_lines + blank * delta_lines + + +def _append_multiline_row( + lines, padded_multiline_cells, padded_widths, colaligns, rowfmt, pad, rowalign=None +): + colwidths = [w - 2 * pad for w in padded_widths] + cells_lines = [c.splitlines() for c in padded_multiline_cells] + nlines = max(map(len, cells_lines)) # number of lines in the row + # vertically pad cells where some lines are missing + # cells_lines = [ + # (cl + [" " * w] * (nlines - len(cl))) for cl, w in zip(cells_lines, colwidths) + # ] + + cells_lines = [ + _align_cell_veritically(cl, nlines, w, rowalign) + for cl, w in zip(cells_lines, colwidths) + ] + lines_cells = [[cl[i] for cl in cells_lines] for i in range(nlines)] + for ln in lines_cells: + padded_ln = _pad_row(ln, pad) + _append_basic_row(lines, padded_ln, colwidths, colaligns, rowfmt) + return lines + + +def _build_line(colwidths, colaligns, linefmt): + "Return a string which represents a horizontal line." + if not linefmt: + return None + if hasattr(linefmt, "__call__"): + return linefmt(colwidths, colaligns) + else: + begin, fill, sep, end = linefmt + cells = [fill * w for w in colwidths] + return _build_simple_row(cells, (begin, sep, end)) + + +def _append_line(lines, colwidths, colaligns, linefmt): + lines.append(_build_line(colwidths, colaligns, linefmt)) + return lines + + +class JupyterHTMLStr(str): + """Wrap the string with a _repr_html_ method so that Jupyter + displays the HTML table""" + + def _repr_html_(self): + return self + + @property + def str(self): + """add a .str property so that the raw string is still accessible""" + return self + + +def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline, rowaligns): + """Produce a plain-text representation of the table.""" + lines = [] + hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else [] + pad = fmt.padding + headerrow = fmt.headerrow + + padded_widths = [(w + 2 * pad) for w in colwidths] + if is_multiline: + pad_row = lambda row, _: row # noqa do it later, in _append_multiline_row + append_row = partial(_append_multiline_row, pad=pad) + else: + pad_row = _pad_row + append_row = _append_basic_row + + padded_headers = pad_row(headers, pad) + padded_rows = [pad_row(row, pad) for row in rows] + + if fmt.lineabove and "lineabove" not in hidden: + _append_line(lines, padded_widths, colaligns, fmt.lineabove) + + if padded_headers: + append_row(lines, padded_headers, padded_widths, colaligns, headerrow) + if fmt.linebelowheader and "linebelowheader" not in hidden: + _append_line(lines, padded_widths, colaligns, fmt.linebelowheader) + + if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden: + # initial rows with a line below + for row, ralign in zip(padded_rows[:-1], rowaligns): + append_row( + lines, row, padded_widths, colaligns, fmt.datarow, rowalign=ralign + ) + _append_line(lines, padded_widths, colaligns, fmt.linebetweenrows) + # the last row without a line below + append_row( + lines, + padded_rows[-1], + padded_widths, + colaligns, + fmt.datarow, + rowalign=rowaligns[-1], + ) + else: + separating_line = ( + fmt.linebetweenrows + or fmt.linebelowheader + or fmt.linebelow + or fmt.lineabove + or Line("", "", "", "") + ) + for row in padded_rows: + # test to see if either the 1st column or the 2nd column (account for showindex) has + # the SEPARATING_LINE flag + if _is_separating_line(row): + _append_line(lines, padded_widths, colaligns, separating_line) + else: + append_row(lines, row, padded_widths, colaligns, fmt.datarow) + + if fmt.linebelow and "linebelow" not in hidden: + _append_line(lines, padded_widths, colaligns, fmt.linebelow) + + if headers or rows: + output = "\n".join(lines) + if fmt.lineabove == _html_begin_table_without_header: + return JupyterHTMLStr(output) + else: + return output + else: # a completely empty table + return "" + + +class _CustomTextWrap(textwrap.TextWrapper): + """A custom implementation of CPython's textwrap.TextWrapper. This supports + both wide characters (Korea, Japanese, Chinese) - including mixed string. + For the most part, the `_handle_long_word` and `_wrap_chunks` functions were + copy pasted out of the CPython baseline, and updated with our custom length + and line appending logic. + """ + + def __init__(self, *args, **kwargs): + self._active_codes = [] + self.max_lines = None # For python2 compatibility + textwrap.TextWrapper.__init__(self, *args, **kwargs) + + @staticmethod + def _len(item): + """Custom len that gets console column width for wide + and non-wide characters as well as ignores color codes""" + stripped = _strip_ansi(item) + if wcwidth: + return wcwidth.wcswidth(stripped) + else: + return len(stripped) + + def _update_lines(self, lines, new_line): + """Adds a new line to the list of lines the text is being wrapped into + This function will also track any ANSI color codes in this string as well + as add any colors from previous lines order to preserve the same formatting + as a single unwrapped string. + """ + code_matches = [x for x in _ansi_codes.finditer(new_line)] + color_codes = [ + code.string[code.span()[0] : code.span()[1]] for code in code_matches + ] + + # Add color codes from earlier in the unwrapped line, and then track any new ones we add. + new_line = "".join(self._active_codes) + new_line + + for code in color_codes: + if code != _ansi_color_reset_code: + self._active_codes.append(code) + else: # A single reset code resets everything + self._active_codes = [] + + # Always ensure each line is color terminted if any colors are + # still active, otherwise colors will bleed into other cells on the console + if len(self._active_codes) > 0: + new_line = new_line + _ansi_color_reset_code + + lines.append(new_line) + + def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): + """_handle_long_word(chunks : [string], + cur_line : [string], + cur_len : int, width : int) + Handle a chunk of text (most likely a word, not whitespace) that + is too long to fit in any line. + """ + # Figure out when indent is larger than the specified width, and make + # sure at least one character is stripped off on every pass + if width < 1: + space_left = 1 + else: + space_left = width - cur_len + + # If we're allowed to break long words, then do so: put as much + # of the next chunk onto the current line as will fit. + if self.break_long_words: + # Tabulate Custom: Build the string up piece-by-piece in order to + # take each charcter's width into account + chunk = reversed_chunks[-1] + i = 1 + while self._len(chunk[:i]) <= space_left: + i = i + 1 + cur_line.append(chunk[: i - 1]) + reversed_chunks[-1] = chunk[i - 1 :] + + # Otherwise, we have to preserve the long word intact. Only add + # it to the current line if there's nothing already there -- + # that minimizes how much we violate the width constraint. + elif not cur_line: + cur_line.append(reversed_chunks.pop()) + + # If we're not allowed to break long words, and there's already + # text on the current line, do nothing. Next time through the + # main loop of _wrap_chunks(), we'll wind up here again, but + # cur_len will be zero, so the next line will be entirely + # devoted to the long word that we can't handle right now. + + def _wrap_chunks(self, chunks): + """_wrap_chunks(chunks : [string]) -> [string] + Wrap a sequence of text chunks and return a list of lines of + length 'self.width' or less. (If 'break_long_words' is false, + some lines may be longer than this.) Chunks correspond roughly + to words and the whitespace between them: each chunk is + indivisible (modulo 'break_long_words'), but a line break can + come between any two chunks. Chunks should not have internal + whitespace; ie. a chunk is either all whitespace or a "word". + Whitespace chunks will be removed from the beginning and end of + lines, but apart from that whitespace is preserved. + """ + lines = [] + if self.width <= 0: + raise ValueError("invalid width %r (must be > 0)" % self.width) + if self.max_lines is not None: + if self.max_lines > 1: + indent = self.subsequent_indent + else: + indent = self.initial_indent + if self._len(indent) + self._len(self.placeholder.lstrip()) > self.width: + raise ValueError("placeholder too large for max width") + + # Arrange in reverse order so items can be efficiently popped + # from a stack of chucks. + chunks.reverse() + + while chunks: + + # Start the list of chunks that will make up the current line. + # cur_len is just the length of all the chunks in cur_line. + cur_line = [] + cur_len = 0 + + # Figure out which static string will prefix this line. + if lines: + indent = self.subsequent_indent + else: + indent = self.initial_indent + + # Maximum width for this line. + width = self.width - self._len(indent) + + # First chunk on line is whitespace -- drop it, unless this + # is the very beginning of the text (ie. no lines started yet). + if self.drop_whitespace and chunks[-1].strip() == "" and lines: + del chunks[-1] + + while chunks: + chunk_len = self._len(chunks[-1]) + + # Can at least squeeze this chunk onto the current line. + if cur_len + chunk_len <= width: + cur_line.append(chunks.pop()) + cur_len += chunk_len + + # Nope, this line is full. + else: + break + + # The current line is full, and the next chunk is too big to + # fit on *any* line (not just this one). + if chunks and self._len(chunks[-1]) > width: + self._handle_long_word(chunks, cur_line, cur_len, width) + cur_len = sum(map(self._len, cur_line)) + + # If the last chunk on this line is all whitespace, drop it. + if self.drop_whitespace and cur_line and cur_line[-1].strip() == "": + cur_len -= self._len(cur_line[-1]) + del cur_line[-1] + + if cur_line: + if ( + self.max_lines is None + or len(lines) + 1 < self.max_lines + or ( + not chunks + or self.drop_whitespace + and len(chunks) == 1 + and not chunks[0].strip() + ) + and cur_len <= width + ): + # Convert current line back to a string and store it in + # list of all lines (return value). + self._update_lines(lines, indent + "".join(cur_line)) + else: + while cur_line: + if ( + cur_line[-1].strip() + and cur_len + self._len(self.placeholder) <= width + ): + cur_line.append(self.placeholder) + self._update_lines(lines, indent + "".join(cur_line)) + break + cur_len -= self._len(cur_line[-1]) + del cur_line[-1] + else: + if lines: + prev_line = lines[-1].rstrip() + if ( + self._len(prev_line) + self._len(self.placeholder) + <= self.width + ): + lines[-1] = prev_line + self.placeholder + break + self._update_lines(lines, indent + self.placeholder.lstrip()) + break + + return lines + + +def _main(): + """\ + Usage: tabulate [options] [FILE ...] + + Pretty-print tabular data. + See also https://github.com/astanin/python-tabulate + + FILE a filename of the file with tabular data; + if "-" or missing, read data from stdin. + + Options: + + -h, --help show this message + -1, --header use the first row of data as a table header + -o FILE, --output FILE print table to FILE (default: stdout) + -s REGEXP, --sep REGEXP use a custom column separator (default: whitespace) + -F FPFMT, --float FPFMT floating point number format (default: g) + -I INTFMT, --int INTFMT integer point number format (default: "") + -f FMT, --format FMT set output table format; supported formats: + plain, simple, grid, fancy_grid, pipe, orgtbl, + rst, mediawiki, html, latex, latex_raw, + latex_booktabs, latex_longtable, tsv + (default: simple) + """ + import getopt + import sys + import textwrap + + usage = textwrap.dedent(_main.__doc__) + try: + opts, args = getopt.getopt( + sys.argv[1:], + "h1o:s:F:A:f:", + ["help", "header", "output", "sep=", "float=", "int=", "align=", "format="], + ) + except getopt.GetoptError as e: + print(e) + print(usage) + sys.exit(2) + headers = [] + floatfmt = _DEFAULT_FLOATFMT + intfmt = _DEFAULT_INTFMT + colalign = None + tablefmt = "simple" + sep = r"\s+" + outfile = "-" + for opt, value in opts: + if opt in ["-1", "--header"]: + headers = "firstrow" + elif opt in ["-o", "--output"]: + outfile = value + elif opt in ["-F", "--float"]: + floatfmt = value + elif opt in ["-I", "--int"]: + intfmt = value + elif opt in ["-C", "--colalign"]: + colalign = value.split() + elif opt in ["-f", "--format"]: + if value not in tabulate_formats: + print("%s is not a supported table format" % value) + print(usage) + sys.exit(3) + tablefmt = value + elif opt in ["-s", "--sep"]: + sep = value + elif opt in ["-h", "--help"]: + print(usage) + sys.exit(0) + files = [sys.stdin] if not args else args + with (sys.stdout if outfile == "-" else open(outfile, "w")) as out: + for f in files: + if f == "-": + f = sys.stdin + if _is_file(f): + _pprint_file( + f, + headers=headers, + tablefmt=tablefmt, + sep=sep, + floatfmt=floatfmt, + intfmt=intfmt, + file=out, + colalign=colalign, + ) + else: + with open(f) as fobj: + _pprint_file( + fobj, + headers=headers, + tablefmt=tablefmt, + sep=sep, + floatfmt=floatfmt, + intfmt=intfmt, + file=out, + colalign=colalign, + ) + + +def _pprint_file(fobject, headers, tablefmt, sep, floatfmt, intfmt, file, colalign): + rows = fobject.readlines() + table = [re.split(sep, r.rstrip()) for r in rows if r.strip()] + print( + tabulate( + table, + headers, + tablefmt, + floatfmt=floatfmt, + intfmt=intfmt, + colalign=colalign, + ), + file=file, + ) + + +if __name__ == "__main__": + _main() diff --git a/deepseek/lib/python3.10/site-packages/ray/_private/usage/__init__.py b/deepseek/lib/python3.10/site-packages/ray/_private/usage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/deepseek/lib/python3.10/site-packages/ray/data/_internal/memory_tracing.py b/deepseek/lib/python3.10/site-packages/ray/data/_internal/memory_tracing.py new file mode 100644 index 0000000000000000000000000000000000000000..f44c648452adb6d7ad2d157fa72f6af8fb6296c6 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/data/_internal/memory_tracing.py @@ -0,0 +1,147 @@ +"""Utility for debugging object store memory eager deletion in Datasets. + +NOTE: the performance overhead of tracing object allocation is fairly substantial. +This is meant to use in unit test for debugging. Please do not enable in production, +without performance optimization. + +Enable with RAY_DATA_TRACE_ALLOCATIONS=1. + +Basic usage is to call `trace_allocation` each time a new object is created, and call +`trace_deallocation` when an object should be disposed of. When the workload is +complete, call `leak_report` to view possibly leaked objects. + +Note that so called "leaked" objects will be reclaimed eventually by reference counting +in Ray. This is just to debug the eager deletion protocol which is more efficient. +""" + +from io import StringIO +from typing import Dict, List + +import ray +from ray.data.context import DataContext + + +def trace_allocation(ref: ray.ObjectRef, loc: str) -> None: + """Record that an object has been created. + + Args: + ref: The object created. + loc: A human-readable string identifying the call site. + """ + ctx = DataContext.get_current() + if ctx.trace_allocations: + tracer = _get_mem_actor() + # TODO: it would be nice to determine loc automatically based on the stack. + ray.get(tracer.trace_alloc.remote([ref], loc)) + + +def trace_deallocation(ref: ray.ObjectRef, loc: str, free: bool = True) -> None: + """Record that an object has been deleted (and delete if free=True). + + Args: + ref: The object we no longer need. + loc: A human-readable string identifying the call site. + free: Whether to eagerly destroy the object instead of waiting for Ray + reference counting to kick in. + """ + if free: + ray._private.internal_api.free(ref, local_only=False) + ctx = DataContext.get_current() + if ctx.trace_allocations: + tracer = _get_mem_actor() + ray.get(tracer.trace_dealloc.remote([ref], loc, free)) + + +def leak_report() -> str: + tracer = _get_mem_actor() + return ray.get(tracer.leak_report.remote()) + + +@ray.remote(num_cpus=0) +class _MemActor: + def __init__(self): + self.allocated: Dict[ray.ObjectRef, dict] = {} + self.deallocated: Dict[ray.ObjectRef, dict] = {} + self.skip_dealloc: Dict[ray.ObjectRef, str] = {} + self.peak_mem = 0 + self.cur_mem = 0 + + def trace_alloc(self, ref: List[ray.ObjectRef], loc: str): + ref = ref[0] # Avoid Ray materializing the ref. + if ref not in self.allocated: + meta = ray.experimental.get_object_locations([ref]) + size_bytes = meta.get("object_size", 0) + if not size_bytes: + size_bytes = -1 + from ray import cloudpickle as pickle + + try: + obj = ray.get(ref, timeout=5.0) + size_bytes = len(pickle.dumps(obj)) + except Exception: + print("[mem_tracing] ERROR getting size") + size_bytes = -1 + print(f"[mem_tracing] Allocated {size_bytes} bytes at {loc}: {ref}") + entry = { + "size_bytes": size_bytes, + "loc": loc, + } + self.allocated[ref] = entry + self.cur_mem += size_bytes + self.peak_mem = max(self.cur_mem, self.peak_mem) + + def trace_dealloc(self, ref: List[ray.ObjectRef], loc: str, freed: bool): + ref = ref[0] # Avoid Ray materializing the ref. + size_bytes = self.allocated.get(ref, {}).get("size_bytes", 0) + if freed: + print(f"[mem_tracing] Freed {size_bytes} bytes at {loc}: {ref}") + if ref in self.allocated: + self.cur_mem -= size_bytes + self.deallocated[ref] = self.allocated.pop(ref) + self.deallocated[ref]["dealloc_loc"] = loc + if ref in self.deallocated: + # This object reference is already deallocated. + pass + else: + print(f"[mem_tracing] WARNING: allocation of {ref} was not traced!") + else: + print(f"[mem_tracing] Skipped freeing {size_bytes} bytes at {loc}: {ref}") + self.skip_dealloc[ref] = loc + + def leak_report(self) -> str: + output = StringIO() + output.write("[mem_tracing] ===== Leaked objects =====\n") + for ref in self.allocated: + size_bytes = self.allocated[ref].get("size_bytes") + loc = self.allocated[ref].get("loc") + if ref in self.skip_dealloc: + dealloc_loc = self.skip_dealloc[ref] + output.write( + f"[mem_tracing] Leaked object, created at {loc}, size " + f"{size_bytes}, skipped dealloc at {dealloc_loc}: {ref}\n" + ) + else: + output.write( + f"[mem_tracing] Leaked object, created at {loc}, " + f"size {size_bytes}: {ref}\n" + ) + output.write("[mem_tracing] ===== End leaked objects =====\n") + output.write("[mem_tracing] ===== Freed objects =====\n") + for ref in self.deallocated: + size_bytes = self.deallocated[ref].get("size_bytes") + loc = self.deallocated[ref].get("loc") + dealloc_loc = self.deallocated[ref].get("dealloc_loc") + output.write( + f"[mem_tracing] Freed object from {loc} at {dealloc_loc}, " + f"size {size_bytes}: {ref}\n" + ) + output.write("[mem_tracing] ===== End freed objects =====\n") + output.write(f"[mem_tracing] Peak size bytes {self.peak_mem}\n") + output.write(f"[mem_tracing] Current size bytes {self.cur_mem}\n") + return output.getvalue() + + +def _get_mem_actor(): + return _MemActor.options( + name="mem_tracing_actor", get_if_exists=True, lifetime="detached" + ).remote() diff --git a/deepseek/lib/python3.10/site-packages/ray/data/_internal/pandas_block.py b/deepseek/lib/python3.10/site-packages/ray/data/_internal/pandas_block.py new file mode 100644 index 0000000000000000000000000000000000000000..119469b46c1b91719ffb3dcfd981abaffbc2fbc6 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/data/_internal/pandas_block.py @@ -0,0 +1,627 @@ +import collections +import heapq +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterator, + List, + Optional, + Sequence, + Tuple, + TypeVar, + Union, +) + +import numpy as np + +from ray.air.constants import TENSOR_COLUMN_NAME +from ray.air.util.tensor_extensions.utils import _is_ndarray_tensor +from ray.data._internal.numpy_support import convert_to_numpy, validate_numpy_batch +from ray.data._internal.row import TableRow +from ray.data._internal.table_block import TableBlockAccessor, TableBlockBuilder +from ray.data._internal.util import find_partitions +from ray.data.block import ( + Block, + BlockAccessor, + BlockExecStats, + BlockMetadata, + BlockType, + KeyType, + U, +) +from ray.data.context import DataContext + +if TYPE_CHECKING: + import pandas + import pyarrow + + from ray.data._internal.planner.exchange.sort_task_spec import SortKey + from ray.data.aggregate import AggregateFn + +T = TypeVar("T") + +_pandas = None + + +def lazy_import_pandas(): + global _pandas + if _pandas is None: + import pandas + + _pandas = pandas + return _pandas + + +class PandasRow(TableRow): + """ + Row of a tabular Dataset backed by a Pandas DataFrame block. + """ + + def __getitem__(self, key: Union[str, List[str]]) -> Any: + from ray.data.extensions import TensorArrayElement + + pd = lazy_import_pandas() + + def get_item(keys: List[str]) -> Any: + col = self._row[keys] + if len(col) == 0: + return None + + items = col.iloc[0] + if isinstance(items.iloc[0], TensorArrayElement): + # Getting an item in a Pandas tensor column may return + # a TensorArrayElement, which we have to convert to an ndarray. + return pd.Series(item.to_numpy() for item in items) + + try: + # Try to interpret this as a numpy-type value. + # See https://stackoverflow.com/questions/9452775/converting-numpy-dtypes-to-native-python-types. # noqa: E501 + return pd.Series(item.as_py() for item in items) + + except (AttributeError, ValueError): + # Fallback to the original form. + return items + + is_single_item = isinstance(key, str) + keys = [key] if is_single_item else key + + items = get_item(keys) + + if items is None: + return None + elif is_single_item: + return items.iloc[0] + else: + return items + + def __iter__(self) -> Iterator: + for k in self._row.columns: + yield k + + def __len__(self): + return self._row.shape[1] + + +class PandasBlockBuilder(TableBlockBuilder): + def __init__(self): + pandas = lazy_import_pandas() + super().__init__(pandas.DataFrame) + + @staticmethod + def _table_from_pydict(columns: Dict[str, List[Any]]) -> "pandas.DataFrame": + pandas = lazy_import_pandas() + + pd_columns: Dict[str, Any] = {} + + for col_name, col_vals in columns.items(): + np_col_vals = convert_to_numpy(col_vals) + + if col_name == TENSOR_COLUMN_NAME or _is_ndarray_tensor(np_col_vals): + from ray.data.extensions.tensor_extension import TensorArray + + pd_columns[col_name] = TensorArray(np_col_vals) + else: + pd_columns[col_name] = np_col_vals + + return pandas.DataFrame(pd_columns) + + @staticmethod + def _concat_tables(tables: List["pandas.DataFrame"]) -> "pandas.DataFrame": + pandas = lazy_import_pandas() + from ray.air.util.data_batch_conversion import ( + _cast_ndarray_columns_to_tensor_extension, + ) + + if len(tables) > 1: + df = pandas.concat(tables, ignore_index=True) + df.reset_index(drop=True, inplace=True) + else: + df = tables[0] + ctx = DataContext.get_current() + if ctx.enable_tensor_extension_casting: + df = _cast_ndarray_columns_to_tensor_extension(df) + return df + + @staticmethod + def _concat_would_copy() -> bool: + return True + + @staticmethod + def _empty_table() -> "pandas.DataFrame": + pandas = lazy_import_pandas() + return pandas.DataFrame() + + def block_type(self) -> BlockType: + return BlockType.PANDAS + + +# This is to be compatible with pyarrow.lib.schema +# TODO (kfstorm): We need a format-independent way to represent schema. +PandasBlockSchema = collections.namedtuple("PandasBlockSchema", ["names", "types"]) + + +class PandasBlockAccessor(TableBlockAccessor): + ROW_TYPE = PandasRow + + def __init__(self, table: "pandas.DataFrame"): + super().__init__(table) + + def column_names(self) -> List[str]: + return self._table.columns.tolist() + + def append_column(self, name: str, data: Any) -> Block: + assert name not in self._table.columns + + if any(isinstance(item, np.ndarray) for item in data): + raise NotImplementedError( + f"`{self.__class__.__name__}.append_column()` doesn't support " + "array-like data." + ) + + table = self._table.copy() + table[name] = data + return table + + @staticmethod + def _build_tensor_row(row: PandasRow) -> np.ndarray: + from ray.data.extensions import TensorArrayElement + + tensor = row[TENSOR_COLUMN_NAME].iloc[0] + if isinstance(tensor, TensorArrayElement): + # Getting an item in a Pandas tensor column may return a TensorArrayElement, + # which we have to convert to an ndarray. + tensor = tensor.to_numpy() + return tensor + + def slice(self, start: int, end: int, copy: bool = False) -> "pandas.DataFrame": + view = self._table[start:end] + view.reset_index(drop=True, inplace=True) + if copy: + view = view.copy(deep=True) + return view + + def take(self, indices: List[int]) -> "pandas.DataFrame": + table = self._table.take(indices) + table.reset_index(drop=True, inplace=True) + return table + + def select(self, columns: List[str]) -> "pandas.DataFrame": + if not all(isinstance(col, str) for col in columns): + raise ValueError( + "Columns must be a list of column name strings when aggregating on " + f"Pandas blocks, but got: {columns}." + ) + return self._table[columns] + + def random_shuffle(self, random_seed: Optional[int]) -> "pandas.DataFrame": + table = self._table.sample(frac=1, random_state=random_seed) + table.reset_index(drop=True, inplace=True) + return table + + def schema(self) -> PandasBlockSchema: + dtypes = self._table.dtypes + schema = PandasBlockSchema( + names=dtypes.index.tolist(), types=dtypes.values.tolist() + ) + # Column names with non-str types of a pandas DataFrame is not + # supported by Ray Dataset. + if any(not isinstance(name, str) for name in schema.names): + raise ValueError( + "A Pandas DataFrame with column names of non-str types" + " is not supported by Ray Dataset. Column names of this" + f" DataFrame: {schema.names!r}." + ) + return schema + + def to_pandas(self) -> "pandas.DataFrame": + from ray.air.util.data_batch_conversion import _cast_tensor_columns_to_ndarrays + + ctx = DataContext.get_current() + table = self._table + if ctx.enable_tensor_extension_casting: + table = _cast_tensor_columns_to_ndarrays(table) + return table + + def to_numpy( + self, columns: Optional[Union[str, List[str]]] = None + ) -> Union[np.ndarray, Dict[str, np.ndarray]]: + if columns is None: + columns = self._table.columns.tolist() + should_be_single_ndarray = False + elif isinstance(columns, list): + should_be_single_ndarray = False + else: + columns = [columns] + should_be_single_ndarray = True + + column_names_set = set(self._table.columns) + for column in columns: + if column not in column_names_set: + raise ValueError( + f"Cannot find column {column}, available columns: " + f"{self._table.columns.tolist()}" + ) + + arrays = [] + for column in columns: + arrays.append(self._table[column].to_numpy()) + + if should_be_single_ndarray: + arrays = arrays[0] + else: + arrays = dict(zip(columns, arrays)) + return arrays + + def to_arrow(self) -> "pyarrow.Table": + import pyarrow + + # Set `preserve_index=False` so that Arrow doesn't add a '__index_level_0__' + # column to the resulting table. + return pyarrow.Table.from_pandas(self._table, preserve_index=False) + + @staticmethod + def numpy_to_block( + batch: Union[Dict[str, np.ndarray], Dict[str, list]], + ) -> "pandas.DataFrame": + validate_numpy_batch(batch) + + block = PandasBlockBuilder._table_from_pydict(batch) + return block + + def num_rows(self) -> int: + return self._table.shape[0] + + def size_bytes(self) -> int: + return int(self._table.memory_usage(index=True, deep=True).sum()) + + def _zip(self, acc: BlockAccessor) -> "pandas.DataFrame": + r = self.to_pandas().copy(deep=False) + s = acc.to_pandas() + for col_name in s.columns: + col = s[col_name] + column_names = list(r.columns) + # Ensure the column names are unique after zip. + if col_name in column_names: + i = 1 + new_name = col_name + while new_name in column_names: + new_name = "{}_{}".format(col_name, i) + i += 1 + col_name = new_name + r[col_name] = col + return r + + @staticmethod + def builder() -> PandasBlockBuilder: + return PandasBlockBuilder() + + @staticmethod + def _empty_table() -> "pandas.DataFrame": + return PandasBlockBuilder._empty_table() + + def _sample(self, n_samples: int, sort_key: "SortKey") -> "pandas.DataFrame": + return self._table[sort_key.get_columns()].sample(n_samples, ignore_index=True) + + def _apply_agg( + self, agg_fn: Callable[["pandas.Series", bool], U], on: str + ) -> Optional[U]: + """Helper providing null handling around applying an aggregation to a column.""" + pd = lazy_import_pandas() + if on is not None and not isinstance(on, str): + raise ValueError( + "on must be a string or None when aggregating on Pandas blocks, but " + f"got: {type(on)}." + ) + + if self.num_rows() == 0: + return None + + col = self._table[on] + try: + val = agg_fn(col) + except TypeError as e: + # Converting an all-null column in an Arrow Table to a Pandas DataFrame + # column will result in an all-None column of object type, which will raise + # a type error when attempting to do most binary operations. We explicitly + # check for this type failure here so we can properly propagate a null. + if np.issubdtype(col.dtype, np.object_) and col.isnull().all(): + return None + raise e from None + if pd.isnull(val): + return None + return val + + def count(self, on: str) -> Optional[U]: + return self._apply_agg(lambda col: col.count(), on) + + def sum(self, on: str, ignore_nulls: bool) -> Optional[U]: + pd = lazy_import_pandas() + if on is not None and not isinstance(on, str): + raise ValueError( + "on must be a string or None when aggregating on Pandas blocks, but " + f"got: {type(on)}." + ) + + if self.num_rows() == 0: + return None + + col = self._table[on] + if col.isnull().all(): + # Short-circuit on an all-null column, returning None. This is required for + # sum() since it will otherwise return 0 when summing on an all-null column, + # which is not what we want. + return None + val = col.sum(skipna=ignore_nulls) + if pd.isnull(val): + return None + return val + + def min(self, on: str, ignore_nulls: bool) -> Optional[U]: + return self._apply_agg(lambda col: col.min(skipna=ignore_nulls), on) + + def max(self, on: str, ignore_nulls: bool) -> Optional[U]: + return self._apply_agg(lambda col: col.max(skipna=ignore_nulls), on) + + def mean(self, on: str, ignore_nulls: bool) -> Optional[U]: + return self._apply_agg(lambda col: col.mean(skipna=ignore_nulls), on) + + def sum_of_squared_diffs_from_mean( + self, + on: str, + ignore_nulls: bool, + mean: Optional[U] = None, + ) -> Optional[U]: + if mean is None: + mean = self.mean(on, ignore_nulls) + return self._apply_agg( + lambda col: ((col - mean) ** 2).sum(skipna=ignore_nulls), + on, + ) + + def sort_and_partition( + self, boundaries: List[T], sort_key: "SortKey" + ) -> List[Block]: + if self._table.shape[0] == 0: + # If the pyarrow table is empty we may not have schema + # so calling sort_indices() will raise an error. + return [self._empty_table() for _ in range(len(boundaries) + 1)] + + columns, ascending = sort_key.to_pandas_sort_args() + table = self._table.sort_values(by=columns, ascending=ascending) + if len(boundaries) == 0: + return [table] + + return find_partitions(table, boundaries, sort_key) + + def combine( + self, sort_key: "SortKey", aggs: Tuple["AggregateFn"] + ) -> "pandas.DataFrame": + """Combine rows with the same key into an accumulator. + + This assumes the block is already sorted by key in ascending order. + + Args: + sort_key: A SortKey object which holds column names/keys. + If this is ``None``, place all rows in a single group. + + aggs: The aggregations to do. + + Returns: + A sorted block of [k, v_1, ..., v_n] columns where k is the groupby + key and v_i is the partially combined accumulator for the ith given + aggregation. + If key is None then the k column is omitted. + """ + keys: List[str] = sort_key.get_columns() + pd = lazy_import_pandas() + + def iter_groups() -> Iterator[Tuple[Sequence[KeyType], Block]]: + """Creates an iterator over zero-copy group views.""" + if not keys: + # Global aggregation consists of a single "group", so we short-circuit. + yield tuple(), self.to_block() + return + + start = end = 0 + iter = self.iter_rows(public_row_format=False) + next_row = None + while True: + try: + if next_row is None: + next_row = next(iter) + next_keys = next_row[keys] + while np.all(next_row[keys] == next_keys): + end += 1 + try: + next_row = next(iter) + except StopIteration: + next_row = None + break + if isinstance(next_keys, pd.Series): + next_keys = next_keys.values + yield next_keys, self.slice(start, end, copy=False) + start = end + except StopIteration: + break + + builder = PandasBlockBuilder() + for group_keys, group_view in iter_groups(): + # Aggregate. + init_vals = group_keys + if len(group_keys) == 1: + init_vals = group_keys[0] + accumulators = [agg.init(init_vals) for agg in aggs] + for i in range(len(aggs)): + accumulators[i] = aggs[i].accumulate_block(accumulators[i], group_view) + + # Build the row. + row = {} + if keys: + for k, gk in zip(keys, group_keys): + row[k] = gk + + count = collections.defaultdict(int) + for agg, accumulator in zip(aggs, accumulators): + name = agg.name + # Check for conflicts with existing aggregation name. + if count[name] > 0: + name = self._munge_conflict(name, count[name]) + count[name] += 1 + row[name] = accumulator + + builder.add(row) + + return builder.build() + + @staticmethod + def merge_sorted_blocks( + blocks: List[Block], sort_key: "SortKey" + ) -> Tuple["pandas.DataFrame", BlockMetadata]: + pd = lazy_import_pandas() + stats = BlockExecStats.builder() + blocks = [b for b in blocks if b.shape[0] > 0] + if len(blocks) == 0: + ret = PandasBlockAccessor._empty_table() + else: + # Handle blocks of different types. + blocks = TableBlockAccessor.normalize_block_types(blocks, "pandas") + ret = pd.concat(blocks, ignore_index=True) + columns, ascending = sort_key.to_pandas_sort_args() + ret = ret.sort_values(by=columns, ascending=ascending) + return ret, PandasBlockAccessor(ret).get_metadata(exec_stats=stats.build()) + + @staticmethod + def aggregate_combined_blocks( + blocks: List["pandas.DataFrame"], + sort_key: "SortKey", + aggs: Tuple["AggregateFn"], + finalize: bool, + ) -> Tuple["pandas.DataFrame", BlockMetadata]: + """Aggregate sorted, partially combined blocks with the same key range. + + This assumes blocks are already sorted by key in ascending order, + so we can do merge sort to get all the rows with the same key. + + Args: + blocks: A list of partially combined and sorted blocks. + sort_key: The column name of key or None for global aggregation. + aggs: The aggregations to do. + finalize: Whether to finalize the aggregation. This is used as an + optimization for cases where we repeatedly combine partially + aggregated groups. + + Returns: + A block of [k, v_1, ..., v_n] columns and its metadata where k is + the groupby key and v_i is the corresponding aggregation result for + the ith given aggregation. + If key is None then the k column is omitted. + """ + + stats = BlockExecStats.builder() + keys = sort_key.get_columns() + + def key_fn(r): + if keys: + return tuple(r[keys]) + else: + return (0,) + + # Handle blocks of different types. + blocks = TableBlockAccessor.normalize_block_types(blocks, "pandas") + + iter = heapq.merge( + *[ + PandasBlockAccessor(block).iter_rows(public_row_format=False) + for block in blocks + ], + key=key_fn, + ) + next_row = None + builder = PandasBlockBuilder() + while True: + try: + if next_row is None: + next_row = next(iter) + next_keys = key_fn(next_row) + next_key_columns = keys + + def gen(): + nonlocal iter + nonlocal next_row + while key_fn(next_row) == next_keys: + yield next_row + try: + next_row = next(iter) + except StopIteration: + next_row = None + break + + # Merge. + first = True + accumulators = [None] * len(aggs) + resolved_agg_names = [None] * len(aggs) + for r in gen(): + if first: + count = collections.defaultdict(int) + for i in range(len(aggs)): + name = aggs[i].name + # Check for conflicts with existing aggregation + # name. + if count[name] > 0: + name = PandasBlockAccessor._munge_conflict( + name, count[name] + ) + count[name] += 1 + resolved_agg_names[i] = name + accumulators[i] = r[name] + first = False + else: + for i in range(len(aggs)): + accumulators[i] = aggs[i].merge( + accumulators[i], r[resolved_agg_names[i]] + ) + # Build the row. + row = {} + if keys: + for col_name, next_key in zip(next_key_columns, next_keys): + row[col_name] = next_key + + for agg, agg_name, accumulator in zip( + aggs, resolved_agg_names, accumulators + ): + if finalize: + row[agg_name] = agg.finalize(accumulator) + else: + row[agg_name] = accumulator + + builder.add(row) + except StopIteration: + break + + ret = builder.build() + return ret, PandasBlockAccessor(ret).get_metadata(exec_stats=stats.build()) + + def block_type(self) -> BlockType: + return BlockType.PANDAS diff --git a/deepseek/lib/python3.10/site-packages/ray/data/_internal/plan.py b/deepseek/lib/python3.10/site-packages/ray/data/_internal/plan.py new file mode 100644 index 0000000000000000000000000000000000000000..40f24ea4326851317fd4df01f5b482110e505116 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/data/_internal/plan.py @@ -0,0 +1,602 @@ +import copy +import itertools +import logging +from typing import TYPE_CHECKING, Iterator, List, Optional, Tuple, Type, Union + +import pyarrow + +import ray +from ray._private.internal_api import get_memory_info_reply, get_state_from_address +from ray.data._internal.execution.interfaces import RefBundle +from ray.data._internal.logical.interfaces.logical_operator import LogicalOperator +from ray.data._internal.logical.interfaces.logical_plan import LogicalPlan +from ray.data._internal.logical.operators.from_operators import AbstractFrom +from ray.data._internal.logical.operators.input_data_operator import InputData +from ray.data._internal.logical.operators.read_operator import Read +from ray.data._internal.stats import DatasetStats +from ray.data._internal.util import create_dataset_tag, unify_block_metadata_schema +from ray.data.block import BlockMetadata +from ray.data.context import DataContext +from ray.data.exceptions import omit_traceback_stdout +from ray.util.debug import log_once + +if TYPE_CHECKING: + + from ray.data._internal.execution.interfaces import Executor + from ray.data.dataset import Dataset + + +# Scheduling strategy can be inherited from prev operator if not specified. +INHERITABLE_REMOTE_ARGS = ["scheduling_strategy"] + + +logger = logging.getLogger(__name__) + + +class ExecutionPlan: + """A lazy execution plan for a Dataset. + + This lazy execution plan builds up a chain of ``List[RefBundle]`` --> + ``List[RefBundle]`` operators. Prior to execution, we apply a set of logical + plan optimizations, such as operator fusion, in order to reduce Ray task + overhead and data copies. + + Internally, the execution plan holds a snapshot of a computed list of + blocks and their associated metadata under ``self._snapshot_bundle``, + where this snapshot is the cached output of executing the operator chain.""" + + def __init__( + self, + stats: DatasetStats, + *, + data_context: Optional[DataContext] = None, + ): + """Create a plan with no transformation operators. + + Args: + stats: Stats for the base blocks. + data_context: :class:`~ray.data.context.DataContext` + object to use for execution. + """ + self._in_stats = stats + # A computed snapshot of some prefix of operators and their corresponding + # output blocks and stats. + self._snapshot_operator: Optional[LogicalOperator] = None + self._snapshot_stats = None + self._snapshot_bundle = None + # Snapshot of only metadata corresponding to the final operator's + # output bundles, used as the source of truth for the Dataset's schema + # and count. This is calculated and cached when the plan is executed as an + # iterator (`execute_to_iterator()`), and avoids caching + # all of the output blocks in memory like in `self.snapshot_bundle`. + # TODO(scottjlee): To keep the caching logic consistent, update `execute()` + # to also store the metadata in `_snapshot_metadata` instead of + # `_snapshot_bundle`. For example, we could store the blocks in + # `self._snapshot_blocks` and the metadata in `self._snapshot_metadata`. + self._snapshot_metadata: Optional[BlockMetadata] = None + + # Cached schema. + self._schema = None + # Set when a Dataset is constructed with this plan + self._dataset_uuid = None + + self._dataset_name = None + + self._has_started_execution = False + + if data_context is None: + # Snapshot the current context, so that the config of Datasets is always + # determined by the config at the time it was created. + self._context = copy.deepcopy(DataContext.get_current()) + else: + self._context = data_context + + def __repr__(self) -> str: + return ( + f"ExecutionPlan(" + f"dataset_uuid={self._dataset_uuid}, " + f"snapshot_operator={self._snapshot_operator}" + f")" + ) + + def get_plan_as_string(self, dataset_cls: Type["Dataset"]) -> str: + """Create a cosmetic string representation of this execution plan. + + Returns: + The string representation of this execution plan. + """ + # NOTE: this is used for Dataset.__repr__ to give a user-facing string + # representation. Ideally ExecutionPlan.__repr__ should be replaced with this + # method as well. + + from ray.data.dataset import MaterializedDataset + + # Do not force execution for schema, as this method is expected to be very + # cheap. + plan_str = "" + plan_max_depth = 0 + if not self.has_computed_output(): + + def generate_logical_plan_string( + op: LogicalOperator, + curr_str: str = "", + depth: int = 0, + ): + """Traverse (DFS) the LogicalPlan DAG and + return a string representation of the operators.""" + if isinstance(op, (Read, InputData, AbstractFrom)): + return curr_str, depth + + curr_max_depth = depth + op_name = op.name + if depth == 0: + curr_str += f"{op_name}\n" + else: + trailing_space = " " * ((depth - 1) * 3) + curr_str += f"{trailing_space}+- {op_name}\n" + + for input in op.input_dependencies: + curr_str, input_max_depth = generate_logical_plan_string( + input, curr_str, depth + 1 + ) + curr_max_depth = max(curr_max_depth, input_max_depth) + return curr_str, curr_max_depth + + # generate_logical_plan_string(self._logical_plan.dag) + plan_str, plan_max_depth = generate_logical_plan_string( + self._logical_plan.dag + ) + + if self._snapshot_bundle is not None: + # This plan has executed some but not all operators. + schema = unify_block_metadata_schema(self._snapshot_bundle.metadata) + count = self._snapshot_bundle.num_rows() + elif self._snapshot_metadata is not None: + schema = self._snapshot_metadata.schema + count = self._snapshot_metadata.num_rows + else: + # This plan hasn't executed any operators. + sources = self._logical_plan.sources() + # TODO(@bveeramani): Handle schemas for n-ary operators like `Union`. + if len(sources) > 1: + # Multiple sources, cannot determine schema. + schema = None + count = None + else: + assert len(sources) == 1 + plan = ExecutionPlan(DatasetStats(metadata={}, parent=None)) + plan.link_logical_plan(LogicalPlan(sources[0], plan._context)) + schema = plan.schema() + count = plan.meta_count() + else: + # Get schema of output blocks. + schema = self.schema(fetch_if_missing=False) + count = self._snapshot_bundle.num_rows() + + if schema is None: + schema_str = "Unknown schema" + elif isinstance(schema, type): + schema_str = str(schema) + else: + schema_str = [] + for n, t in zip(schema.names, schema.types): + if hasattr(t, "__name__"): + t = t.__name__ + schema_str.append(f"{n}: {t}") + schema_str = ", ".join(schema_str) + schema_str = "{" + schema_str + "}" + + if count is None: + count = "?" + + num_blocks = None + if dataset_cls == MaterializedDataset: + num_blocks = self.initial_num_blocks() + assert num_blocks is not None + + name_str = ( + "name={}, ".format(self._dataset_name) + if self._dataset_name is not None + else "" + ) + num_blocks_str = f"num_blocks={num_blocks}, " if num_blocks else "" + + dataset_str = "{}({}{}num_rows={}, schema={})".format( + dataset_cls.__name__, + name_str, + num_blocks_str, + count, + schema_str, + ) + + # If the resulting string representation fits in one line, use it directly. + SCHEMA_LINE_CHAR_LIMIT = 80 + MIN_FIELD_LENGTH = 10 + INDENT_STR = " " * 3 + trailing_space = INDENT_STR * plan_max_depth + + if len(dataset_str) > SCHEMA_LINE_CHAR_LIMIT: + # If the resulting string representation exceeds the line char limit, + # first try breaking up each `Dataset` parameter into its own line + # and check if each line fits within the line limit. We check the + # `schema` param's length, since this is likely the longest string. + schema_str_on_new_line = f"{trailing_space}{INDENT_STR}schema={schema_str}" + if len(schema_str_on_new_line) > SCHEMA_LINE_CHAR_LIMIT: + # If the schema cannot fit on a single line, break up each field + # into its own line. + schema_str = [] + for n, t in zip(schema.names, schema.types): + if hasattr(t, "__name__"): + t = t.__name__ + col_str = f"{trailing_space}{INDENT_STR * 2}{n}: {t}" + # If the field line exceeds the char limit, abbreviate + # the field name to fit while maintaining the full type + if len(col_str) > SCHEMA_LINE_CHAR_LIMIT: + shortened_suffix = f"...: {str(t)}" + # Show at least 10 characters of the field name, even if + # we have already hit the line limit with the type. + chars_left_for_col_name = max( + SCHEMA_LINE_CHAR_LIMIT - len(shortened_suffix), + MIN_FIELD_LENGTH, + ) + col_str = ( + f"{col_str[:chars_left_for_col_name]}{shortened_suffix}" + ) + schema_str.append(col_str) + schema_str = ",\n".join(schema_str) + schema_str = ( + "{\n" + schema_str + f"\n{trailing_space}{INDENT_STR}" + "}" + ) + name_str = ( + f"\n{trailing_space}{INDENT_STR}name={self._dataset_name}," + if self._dataset_name is not None + else "" + ) + num_blocks_str = ( + f"\n{trailing_space}{INDENT_STR}num_blocks={num_blocks}," + if num_blocks + else "" + ) + dataset_str = ( + f"{dataset_cls.__name__}(" + f"{name_str}" + f"{num_blocks_str}" + f"\n{trailing_space}{INDENT_STR}num_rows={count}," + f"\n{trailing_space}{INDENT_STR}schema={schema_str}" + f"\n{trailing_space})" + ) + + if plan_max_depth == 0: + plan_str += dataset_str + else: + plan_str += f"{INDENT_STR * (plan_max_depth - 1)}+- {dataset_str}" + return plan_str + + def link_logical_plan(self, logical_plan: "LogicalPlan"): + """Link the logical plan into this execution plan. + + This is used for triggering execution for optimizer code path in this legacy + execution plan. + """ + self._logical_plan = logical_plan + self._logical_plan._context = self._context + + def copy(self) -> "ExecutionPlan": + """Create a shallow copy of this execution plan. + + This copy can be executed without mutating the original, but clearing the copy + will also clear the original. + + Returns: + A shallow copy of this execution plan. + """ + plan_copy = ExecutionPlan( + self._in_stats, + data_context=self._context, + ) + if self._snapshot_bundle is not None: + # Copy over the existing snapshot. + plan_copy._snapshot_bundle = self._snapshot_bundle + plan_copy._snapshot_operator = self._snapshot_operator + plan_copy._snapshot_stats = self._snapshot_stats + plan_copy._dataset_name = self._dataset_name + return plan_copy + + def deep_copy(self) -> "ExecutionPlan": + """Create a deep copy of this execution plan. + + This copy can be executed AND cleared without mutating the original. + + Returns: + A deep copy of this execution plan. + """ + plan_copy = ExecutionPlan(copy.copy(self._in_stats)) + if self._snapshot_bundle: + # Copy over the existing snapshot. + plan_copy._snapshot_bundle = copy.copy(self._snapshot_bundle) + plan_copy._snapshot_operator = copy.copy(self._snapshot_operator) + plan_copy._snapshot_stats = copy.copy(self._snapshot_stats) + plan_copy._dataset_name = self._dataset_name + return plan_copy + + def initial_num_blocks(self) -> Optional[int]: + """Get the estimated number of blocks from the logical plan + after applying execution plan optimizations, but prior to + fully executing the dataset.""" + return self._logical_plan.dag.estimated_num_outputs() + + def schema( + self, fetch_if_missing: bool = False + ) -> Union[type, "pyarrow.lib.Schema"]: + """Get the schema after applying all execution plan optimizations, + but prior to fully executing the dataset + (unless `fetch_if_missing` is set to True). + + Args: + fetch_if_missing: Whether to execute the plan to fetch the schema. + + Returns: + The schema of the output dataset. + """ + if self._schema is not None: + return self._schema + + schema = None + if self.has_computed_output(): + schema = unify_block_metadata_schema(self._snapshot_bundle.metadata) + elif self._logical_plan.dag.aggregate_output_metadata().schema is not None: + schema = self._logical_plan.dag.aggregate_output_metadata().schema + elif fetch_if_missing: + iter_ref_bundles, _, _ = self.execute_to_iterator() + for ref_bundle in iter_ref_bundles: + for metadata in ref_bundle.metadata: + if metadata.schema is not None and ( + metadata.num_rows is None or metadata.num_rows > 0 + ): + schema = metadata.schema + break + elif self.is_read_only(): + # For consistency with the previous implementation, we fetch the schema if + # the plan is read-only even if `fetch_if_missing` is False. + iter_ref_bundles, _, _ = self.execute_to_iterator() + try: + ref_bundle = next(iter(iter_ref_bundles)) + for metadata in ref_bundle.metadata: + if metadata.schema is not None: + schema = metadata.schema + break + except StopIteration: # Empty dataset. + schema = None + + self._schema = schema + return self._schema + + def cache_schema(self, schema: Union[type, "pyarrow.lib.Schema"]): + self._schema = schema + + def input_files(self) -> Optional[List[str]]: + """Get the input files of the dataset, if available.""" + return self._logical_plan.dag.aggregate_output_metadata().input_files + + def meta_count(self) -> Optional[int]: + """Get the number of rows after applying all plan optimizations, if possible. + + This method will never trigger any computation. + + Returns: + The number of records of the result Dataset, or None. + """ + if self.has_computed_output(): + num_rows = sum(m.num_rows for m in self._snapshot_bundle.metadata) + elif self._logical_plan.dag.aggregate_output_metadata().num_rows is not None: + num_rows = self._logical_plan.dag.aggregate_output_metadata().num_rows + else: + num_rows = None + return num_rows + + @omit_traceback_stdout + def execute_to_iterator( + self, + ) -> Tuple[Iterator[RefBundle], DatasetStats, Optional["Executor"]]: + """Execute this plan, returning an iterator. + + This will use streaming execution to generate outputs. + + Returns: + Tuple of iterator over output RefBundles, DatasetStats, and the executor. + """ + self._has_started_execution = True + + # Always used the saved context for execution. + ctx = self._context + + if self.has_computed_output(): + bundle = self.execute() + return iter([bundle]), self._snapshot_stats, None + + from ray.data._internal.execution.legacy_compat import ( + execute_to_legacy_bundle_iterator, + ) + from ray.data._internal.execution.streaming_executor import StreamingExecutor + + metrics_tag = create_dataset_tag(self._dataset_name, self._dataset_uuid) + executor = StreamingExecutor(copy.deepcopy(ctx.execution_options), metrics_tag) + bundle_iter = execute_to_legacy_bundle_iterator(executor, self) + # Since the generator doesn't run any code until we try to fetch the first + # value, force execution of one bundle before we call get_stats(). + gen = iter(bundle_iter) + try: + bundle_iter = itertools.chain([next(gen)], gen) + except StopIteration: + pass + self._snapshot_stats = executor.get_stats() + return bundle_iter, self._snapshot_stats, executor + + @omit_traceback_stdout + def execute( + self, + preserve_order: bool = False, + ) -> RefBundle: + """Execute this plan. + + Args: + preserve_order: Whether to preserve order in execution. + + Returns: + The blocks of the output dataset. + """ + self._has_started_execution = True + + # Always used the saved context for execution. + context = self._context + + if not ray.available_resources().get("CPU"): + if log_once("cpu_warning"): + logger.warning( + "Warning: The Ray cluster currently does not have " + "any available CPUs. The Dataset job will hang unless more CPUs " + "are freed up. A common reason is that cluster resources are " + "used by Actors or Tune trials; see the following link " + "for more details: " + "https://docs.ray.io/en/latest/data/data-internals.html#ray-data-and-tune" # noqa: E501 + ) + if not self.has_computed_output(): + from ray.data._internal.execution.legacy_compat import ( + _get_initial_stats_from_plan, + execute_to_legacy_block_list, + ) + + if self._logical_plan.dag.output_data() is not None: + # If the data is already materialized (e.g., `from_pandas`), we can + # skip execution and directly return the output data. This avoids + # recording unnecessary metrics for an empty plan execution. + stats = _get_initial_stats_from_plan(self) + + # TODO(@bveeramani): Make `ExecutionPlan.execute()` return + # `List[RefBundle]` instead of `RefBundle`. Among other reasons, it'd + # allow us to remove the unwrapping logic below. + output_bundles = self._logical_plan.dag.output_data() + owns_blocks = all(bundle.owns_blocks for bundle in output_bundles) + bundle = RefBundle( + [ + (block, metadata) + for bundle in output_bundles + for block, metadata in bundle.blocks + ], + owns_blocks=owns_blocks, + ) + else: + from ray.data._internal.execution.streaming_executor import ( + StreamingExecutor, + ) + + metrics_tag = create_dataset_tag(self._dataset_name, self._dataset_uuid) + executor = StreamingExecutor( + copy.deepcopy(context.execution_options), + metrics_tag, + ) + blocks = execute_to_legacy_block_list( + executor, + self, + dataset_uuid=self._dataset_uuid, + preserve_order=preserve_order, + ) + bundle = RefBundle( + tuple(blocks.iter_blocks_with_metadata()), + owns_blocks=blocks._owned_by_consumer, + ) + stats = executor.get_stats() + stats_summary_string = stats.to_summary().to_string( + include_parent=False + ) + if context.enable_auto_log_stats: + logger.info(stats_summary_string) + + # Retrieve memory-related stats from ray. + try: + reply = get_memory_info_reply( + get_state_from_address(ray.get_runtime_context().gcs_address) + ) + if reply.store_stats.spill_time_total_s > 0: + stats.global_bytes_spilled = int( + reply.store_stats.spilled_bytes_total + ) + if reply.store_stats.restore_time_total_s > 0: + stats.global_bytes_restored = int( + reply.store_stats.restored_bytes_total + ) + except Exception as e: + logger.debug( + "Skipping recording memory spilled and restored statistics due to " + f"exception: {e}" + ) + + stats.dataset_bytes_spilled = 0 + + def collect_stats(cur_stats): + stats.dataset_bytes_spilled += cur_stats.extra_metrics.get( + "obj_store_mem_spilled", 0 + ) + for parent in cur_stats.parents: + collect_stats(parent) + + collect_stats(stats) + + # Set the snapshot to the output of the final operator. + self._snapshot_bundle = bundle + self._snapshot_operator = self._logical_plan.dag + self._snapshot_stats = stats + self._snapshot_stats.dataset_uuid = self._dataset_uuid + + return self._snapshot_bundle + + @property + def has_started_execution(self) -> bool: + """Return ``True`` if this plan has been partially or fully executed.""" + return self._has_started_execution + + def clear_snapshot(self) -> None: + """Clear the snapshot kept in the plan to the beginning state.""" + self._snapshot_bundle = None + self._snapshot_operator = None + self._snapshot_stats = None + + def stats(self) -> DatasetStats: + """Return stats for this plan. + + If the plan isn't executed, an empty stats object will be returned. + """ + if not self._snapshot_stats: + return DatasetStats(metadata={}, parent=None) + return self._snapshot_stats + + def has_lazy_input(self) -> bool: + """Return whether this plan has lazy input blocks.""" + return all(isinstance(op, Read) for op in self._logical_plan.sources()) + + def is_read_only(self, root_op: Optional[LogicalOperator] = None) -> bool: + """Return whether the LogicalPlan corresponding to `root_op` + contains only a Read op. By default, the last operator of + the LogicalPlan is used.""" + if root_op is None: + root_op = self._logical_plan.dag + return isinstance(root_op, Read) and len(root_op.input_dependencies) == 0 + + def has_computed_output(self) -> bool: + """Whether this plan has a computed snapshot for the final operator, i.e. for + the output of this plan. + """ + return ( + self._snapshot_bundle is not None + and self._snapshot_operator == self._logical_plan.dag + ) + + def require_preserve_order(self) -> bool: + """Whether this plan requires to preserve order.""" + from ray.data._internal.logical.operators.all_to_all_operator import Sort + from ray.data._internal.logical.operators.n_ary_operator import Zip + + for op in self._logical_plan.dag.post_order_iter(): + if isinstance(op, (Zip, Sort)): + return True + return False diff --git a/deepseek/lib/python3.10/site-packages/ray/data/_internal/size_estimator.py b/deepseek/lib/python3.10/site-packages/ray/data/_internal/size_estimator.py new file mode 100644 index 0000000000000000000000000000000000000000..75714cc50b8d61e2b103820a730a4051810c4277 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/data/_internal/size_estimator.py @@ -0,0 +1,92 @@ +from typing import Any, List + +import ray +from ray import cloudpickle + +_ray_initialized = False + + +class SizeEstimator: + """Efficiently estimates the Ray serialized size of a stream of items. + + For efficiency, this only samples a fraction of the added items for real + Ray-serialization. + """ + + def __init__(self): + self._running_mean = RunningMean() + self._count = 0 + + def add(self, item: Any) -> None: + self._count += 1 + if self._count <= 10: + self._running_mean.add(self._real_size(item), weight=1) + elif self._count <= 100: + if self._count % 10 == 0: + self._running_mean.add(self._real_size(item), weight=10) + elif self._count % 100 == 0: + self._running_mean.add(self._real_size(item), weight=100) + + def add_block(self, block: List[Any]) -> None: + if self._count < 10: + for i in range(min(10 - self._count, len(block))): + self._running_mean.add(self._real_size(block[i]), weight=1) + if self._count < 100: + for i in range( + 10 - (self._count % 10), min(100 - self._count, len(block)), 10 + ): + self._running_mean.add(self._real_size(block[i]), weight=10) + if (len(block) + (self._count % 100)) // 100 > 1: + for i in range(100 - (self._count % 100), len(block), 100): + self._running_mean.add(self._real_size(block[i]), weight=100) + self._count += len(block) + + def size_bytes(self) -> int: + return int(self._running_mean.mean * self._count) + + def _real_size(self, item: Any) -> int: + is_client = ray.util.client.ray.is_connected() + # In client mode, fallback to using Ray cloudpickle instead of the + # real serializer. + if is_client: + return len(cloudpickle.dumps(item)) + + # We're using an internal Ray API, and have to ensure it's + # initialized # by calling a public API. + global _ray_initialized + if not _ray_initialized: + _ray_initialized = True + ray.put(None) + return ( + ray._private.worker.global_worker.get_serialization_context() + .serialize(item) + .total_bytes + ) + + +# Adapted from the RLlib MeanStdFilter. +class RunningMean: + def __init__(self): + self._weight = 0 + self._mean = 0 + + def add(self, x: int, weight: int = 1) -> None: + if weight == 0: + return + n1 = self._weight + n2 = weight + n = n1 + n2 + M = (n1 * self._mean + n2 * x) / n + self._weight = n + self._mean = M + + @property + def n(self) -> int: + return self._weight + + @property + def mean(self) -> float: + return self._mean + + def __repr__(self): + return "(n={}, mean={})".format(self.n, self.mean) diff --git a/deepseek/lib/python3.10/site-packages/ray/data/_internal/stats.py b/deepseek/lib/python3.10/site-packages/ray/data/_internal/stats.py new file mode 100644 index 0000000000000000000000000000000000000000..fc6903cd92e2c59d251afa73832e970ad5151472 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/data/_internal/stats.py @@ -0,0 +1,1495 @@ +import collections +import logging +import threading +import time +from contextlib import contextmanager +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Set, Tuple, Union +from uuid import uuid4 + +import numpy as np + +import ray +from ray.actor import ActorHandle +from ray.data._internal.block_list import BlockList +from ray.data._internal.execution.interfaces.op_runtime_metrics import ( + MetricsGroup, + OpRuntimeMetrics, +) +from ray.data._internal.util import capfirst +from ray.data.block import BlockMetadata +from ray.data.context import DataContext +from ray.util.annotations import DeveloperAPI +from ray.util.metrics import Gauge +from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy + +logger = logging.getLogger(__name__) + +STATS_ACTOR_NAME = "datasets_stats_actor" +STATS_ACTOR_NAMESPACE = "_dataset_stats_actor" + + +StatsDict = Dict[str, List[BlockMetadata]] + + +def fmt(seconds: float) -> str: + if seconds > 1: + return str(round(seconds, 2)) + "s" + elif seconds > 0.001: + return str(round(seconds * 1000, 2)) + "ms" + else: + return str(round(seconds * 1000 * 1000, 2)) + "us" + + +def leveled_indent(lvl: int = 0, spaces_per_indent: int = 3) -> str: + """Returns a string of spaces which contains `level` indents, + each indent containing `spaces_per_indent` spaces. For example: + >>> leveled_indent(2, 3) + ' ' + """ + return (" " * spaces_per_indent) * lvl + + +class Timer: + """Helper class for tracking accumulated time (in seconds).""" + + def __init__(self): + self._value: float = 0 + self._min: float = float("inf") + self._max: float = 0 + self._total_count: float = 0 + + @contextmanager + def timer(self) -> None: + time_start = time.perf_counter() + try: + yield + finally: + self.add(time.perf_counter() - time_start) + + def add(self, value: float) -> None: + self._value += value + if value < self._min: + self._min = value + if value > self._max: + self._max = value + self._total_count += 1 + + def get(self) -> float: + return self._value + + def min(self) -> float: + return self._min + + def max(self) -> float: + return self._max + + def avg(self) -> float: + return self._value / self._total_count if self._total_count else float("inf") + + +class _DatasetStatsBuilder: + """Helper class for building dataset stats. + + When this class is created, we record the start time. When build() is + called with the final blocks of the new dataset, the time delta is + saved as part of the stats.""" + + def __init__( + self, + operator_name: str, + parent: "DatasetStats", + override_start_time: Optional[float], + ): + self.operator_name = operator_name + self.parent = parent + self.start_time = override_start_time or time.perf_counter() + + def build_multioperator(self, metadata: StatsDict) -> "DatasetStats": + op_metadata = {} + for i, (k, v) in enumerate(metadata.items()): + capped_k = capfirst(k) + if len(metadata) > 1: + if i == 0: + op_metadata[self.operator_name + capped_k] = v + else: + op_metadata[self.operator_name.split("->")[-1] + capped_k] = v + else: + op_metadata[self.operator_name] = v + stats = DatasetStats( + metadata=op_metadata, + parent=self.parent, + base_name=self.operator_name, + ) + stats.time_total_s = time.perf_counter() - self.start_time + return stats + + def build(self, final_blocks: BlockList) -> "DatasetStats": + stats = DatasetStats( + metadata={self.operator_name: final_blocks.get_metadata()}, + parent=self.parent, + ) + stats.time_total_s = time.perf_counter() - self.start_time + return stats + + +@ray.remote(num_cpus=0) +class _StatsActor: + """Actor holding stats for blocks created by LazyBlockList. + + This actor is shared across all datasets created in the same cluster. + In order to cap memory usage, we set a max number of stats to keep + in the actor. When this limit is exceeded, the stats will be garbage + collected in FIFO order. + + TODO(ekl) we should consider refactoring LazyBlockList so stats can be + extracted without using an out-of-band actor.""" + + def __init__(self, max_stats=1000): + # Mapping from uuid -> (task_id -> list of blocks statistics). + self.metadata = collections.defaultdict(dict) + self.last_time = {} + self.start_time = {} + self.max_stats = max_stats + self.fifo_queue = [] + + # Assign dataset uuids with a global counter. + self.next_dataset_id = 0 + # Dataset metadata to be queried directly by DashboardHead api. + self.datasets: Dict[str, Any] = {} + + # Ray Data dashboard metrics + # Everything is a gauge because we need to reset all of + # a dataset's metrics to 0 after each finishes execution. + op_tags_keys = ("dataset", "operator") + + # TODO(scottjlee): move these overvie metrics as fields in a + # separate dataclass, similar to OpRuntimeMetrics. + self.spilled_bytes = Gauge( + "data_spilled_bytes", + description="""Bytes spilled by dataset operators. + DataContext.enable_get_object_locations_for_metrics + must be set to True to report this metric""", + tag_keys=op_tags_keys, + ) + self.allocated_bytes = Gauge( + "data_allocated_bytes", + description="Bytes allocated by dataset operators", + tag_keys=op_tags_keys, + ) + self.freed_bytes = Gauge( + "data_freed_bytes", + description="Bytes freed by dataset operators", + tag_keys=op_tags_keys, + ) + self.current_bytes = Gauge( + "data_current_bytes", + description="Bytes currently in memory store used by dataset operators", + tag_keys=op_tags_keys, + ) + self.cpu_usage_cores = Gauge( + "data_cpu_usage_cores", + description="CPUs allocated to dataset operators", + tag_keys=op_tags_keys, + ) + self.gpu_usage_cores = Gauge( + "data_gpu_usage_cores", + description="GPUs allocated to dataset operators", + tag_keys=op_tags_keys, + ) + self.output_bytes = Gauge( + "data_output_bytes", + description="Bytes outputted by dataset operators", + tag_keys=op_tags_keys, + ) + self.output_rows = Gauge( + "data_output_rows", + description="Rows outputted by dataset operators", + tag_keys=op_tags_keys, + ) + + # === Metrics from OpRuntimeMetrics === + # Inputs-related metrics + self.execution_metrics_inputs = ( + self._create_prometheus_metrics_for_execution_metrics( + metrics_group=MetricsGroup.INPUTS, + tag_keys=op_tags_keys, + ) + ) + + # Outputs-related metrics + self.execution_metrics_outputs = ( + self._create_prometheus_metrics_for_execution_metrics( + metrics_group=MetricsGroup.OUTPUTS, + tag_keys=op_tags_keys, + ) + ) + + # Task-related metrics + self.execution_metrics_tasks = ( + self._create_prometheus_metrics_for_execution_metrics( + metrics_group=MetricsGroup.TASKS, + tag_keys=op_tags_keys, + ) + ) + + # Object store memory-related metrics + self.execution_metrics_obj_store_memory = ( + self._create_prometheus_metrics_for_execution_metrics( + metrics_group=MetricsGroup.OBJECT_STORE_MEMORY, + tag_keys=op_tags_keys, + ) + ) + + # Miscellaneous metrics + self.execution_metrics_misc = ( + self._create_prometheus_metrics_for_execution_metrics( + metrics_group=MetricsGroup.MISC, + tag_keys=op_tags_keys, + ) + ) + + iter_tag_keys = ("dataset",) + self.iter_total_blocked_s = Gauge( + "data_iter_total_blocked_seconds", + description="Seconds user thread is blocked by iter_batches()", + tag_keys=iter_tag_keys, + ) + self.iter_user_s = Gauge( + "data_iter_user_seconds", + description="Seconds spent in user code", + tag_keys=iter_tag_keys, + ) + self.iter_initialize_s = Gauge( + "data_iter_initialize_seconds", + description="Seconds spent in iterator initialization code", + tag_keys=iter_tag_keys, + ) + + def _create_prometheus_metrics_for_execution_metrics( + self, metrics_group: MetricsGroup, tag_keys: Tuple[str, ...] + ) -> Dict[str, Gauge]: + metrics = {} + for metric in OpRuntimeMetrics.get_metrics(): + if not metric.metrics_group == metrics_group: + continue + metric_name = f"data_{metric.name}" + metric_description = metric.description + metrics[metric.name] = Gauge( + metric_name, + description=metric_description, + tag_keys=tag_keys, + ) + return metrics + + def record_start(self, stats_uuid): + self.start_time[stats_uuid] = time.perf_counter() + self.fifo_queue.append(stats_uuid) + # Purge the oldest stats if the limit is exceeded. + if len(self.fifo_queue) > self.max_stats: + uuid = self.fifo_queue.pop(0) + if uuid in self.start_time: + del self.start_time[uuid] + if uuid in self.last_time: + del self.last_time[uuid] + if uuid in self.metadata: + del self.metadata[uuid] + + def record_task( + self, stats_uuid: str, task_idx: int, blocks_metadata: List[BlockMetadata] + ): + # Null out the schema to keep the stats size small. + # TODO(chengsu): ideally schema should be null out on caller side. + for metadata in blocks_metadata: + metadata.schema = None + if stats_uuid in self.start_time: + self.metadata[stats_uuid][task_idx] = blocks_metadata + self.last_time[stats_uuid] = time.perf_counter() + + def get(self, stats_uuid): + if stats_uuid not in self.metadata: + return {}, 0.0 + return ( + self.metadata[stats_uuid], + self.last_time[stats_uuid] - self.start_time[stats_uuid], + ) + + def _get_stats_dict_size(self): + return len(self.start_time), len(self.last_time), len(self.metadata) + + def get_dataset_id(self): + dataset_id = str(self.next_dataset_id) + self.next_dataset_id += 1 + return dataset_id + + def update_metrics(self, execution_metrics, iteration_metrics): + for metrics in execution_metrics: + self.update_execution_metrics(*metrics) + for metrics in iteration_metrics: + self.update_iteration_metrics(*metrics) + + def update_execution_metrics( + self, + dataset_tag: str, + op_metrics: List[Dict[str, Union[int, float]]], + operator_tags: List[str], + state: Dict[str, Any], + ): + for stats, operator_tag in zip(op_metrics, operator_tags): + tags = self._create_tags(dataset_tag, operator_tag) + + self.spilled_bytes.set(stats.get("obj_store_mem_spilled", 0), tags) + self.freed_bytes.set(stats.get("obj_store_mem_freed", 0), tags) + self.current_bytes.set(stats.get("obj_store_mem_used", 0), tags) + self.output_bytes.set(stats.get("bytes_task_outputs_generated", 0), tags) + self.output_rows.set(stats.get("rows_task_outputs_generated", 0), tags) + self.cpu_usage_cores.set(stats.get("cpu_usage", 0), tags) + self.gpu_usage_cores.set(stats.get("gpu_usage", 0), tags) + + for field_name, prom_metric in self.execution_metrics_inputs.items(): + prom_metric.set(stats.get(field_name, 0), tags) + + for field_name, prom_metric in self.execution_metrics_outputs.items(): + prom_metric.set(stats.get(field_name, 0), tags) + + for field_name, prom_metric in self.execution_metrics_tasks.items(): + prom_metric.set(stats.get(field_name, 0), tags) + + for ( + field_name, + prom_metric, + ) in self.execution_metrics_obj_store_memory.items(): + prom_metric.set(stats.get(field_name, 0), tags) + + for field_name, prom_metric in self.execution_metrics_misc.items(): + prom_metric.set(stats.get(field_name, 0), tags) + + # This update is called from a dataset's executor, + # so all tags should contain the same dataset + self.update_dataset(dataset_tag, state) + + def update_iteration_metrics( + self, + stats: "DatasetStats", + dataset_tag, + ): + tags = self._create_tags(dataset_tag) + self.iter_total_blocked_s.set(stats.iter_total_blocked_s.get(), tags) + self.iter_user_s.set(stats.iter_user_s.get(), tags) + self.iter_initialize_s.set(stats.iter_initialize_s.get(), tags) + + def register_dataset(self, job_id: str, dataset_tag: str, operator_tags: List[str]): + self.datasets[dataset_tag] = { + "job_id": job_id, + "state": "RUNNING", + "progress": 0, + "total": 0, + "start_time": time.time(), + "end_time": None, + "operators": { + operator: { + "state": "RUNNING", + "progress": 0, + "total": 0, + } + for operator in operator_tags + }, + } + + def update_dataset(self, dataset_tag, state): + self.datasets[dataset_tag].update(state) + + def get_datasets(self, job_id: Optional[str] = None): + if not job_id: + return self.datasets + return {k: v for k, v in self.datasets.items() if v["job_id"] == job_id} + + def _create_tags(self, dataset_tag: str, operator_tag: Optional[str] = None): + tags = {"dataset": dataset_tag} + if operator_tag is not None: + tags["operator"] = operator_tag + return tags + + +# Creating/getting an actor from multiple threads is not safe. +# https://github.com/ray-project/ray/issues/41324 +_stats_actor_lock: threading.RLock = threading.RLock() + + +def _get_or_create_stats_actor(): + ctx = DataContext.get_current() + scheduling_strategy = ctx.scheduling_strategy + if not ray.util.client.ray.is_connected(): + # Pin the stats actor to the local node + # so it fate-shares with the driver. + scheduling_strategy = NodeAffinitySchedulingStrategy( + ray.get_runtime_context().get_node_id(), + soft=False, + ) + with _stats_actor_lock: + return _StatsActor.options( + name=STATS_ACTOR_NAME, + namespace=STATS_ACTOR_NAMESPACE, + get_if_exists=True, + lifetime="detached", + scheduling_strategy=scheduling_strategy, + ).remote() + + +class _StatsManager: + """A Class containing util functions that manage remote calls to _StatsActor. + + This class collects stats from execution and iteration codepaths and keeps + track of the latest snapshot. + + An instance of this class runs a single background thread that periodically + forwards the latest execution/iteration stats to the _StatsActor. + + This thread will terminate itself after being inactive (meaning that there are + no active executors or iterators) for STATS_ACTOR_UPDATE_THREAD_INACTIVITY_LIMIT + iterations. After terminating, a new thread will start if more calls are made + to this class. + """ + + # Interval for making remote calls to the _StatsActor. + STATS_ACTOR_UPDATE_INTERVAL_SECONDS = 5 + + # After this many iterations of inactivity, + # _StatsManager._update_thread will close itself. + UPDATE_THREAD_INACTIVITY_LIMIT = 5 + + def __init__(self): + # Lazily get stats actor handle to avoid circular import. + self._stats_actor_handle: Optional[ActorHandle] = None + self._stats_actor_cluster_id = None + + # Last execution stats snapshots for all executing datasets + self._last_execution_stats = {} + # Last iteration stats snapshots for all running iterators + self._last_iteration_stats: Dict[ + str, Tuple[Dict[str, str], "DatasetStats"] + ] = {} + # Lock for updating stats snapshots + self._stats_lock: threading.Lock = threading.Lock() + + # Background thread to make remote calls to _StatsActor + self._update_thread: Optional[threading.Thread] = None + self._update_thread_lock: threading.Lock = threading.Lock() + + def _stats_actor(self, create_if_not_exists=True) -> Optional[ActorHandle]: + if ray._private.worker._global_node is None: + raise RuntimeError("Global node is not initialized.") + current_cluster_id = ray._private.worker._global_node.cluster_id + if ( + self._stats_actor_handle is None + or self._stats_actor_cluster_id != current_cluster_id + ): + if create_if_not_exists: + self._stats_actor_handle = _get_or_create_stats_actor() + else: + try: + self._stats_actor_handle = ray.get_actor( + name=STATS_ACTOR_NAME, namespace=STATS_ACTOR_NAMESPACE + ) + except ValueError: + return None + self._stats_actor_cluster_id = current_cluster_id + return self._stats_actor_handle + + def _start_thread_if_not_running(self): + # Start background update thread if not running. + with self._update_thread_lock: + if self._update_thread is None or not self._update_thread.is_alive(): + + def _run_update_loop(): + iter_stats_inactivity = 0 + while True: + if self._last_iteration_stats or self._last_execution_stats: + try: + # Do not create _StatsActor if it doesn't exist because + # this thread can be running even after the cluster is + # shutdown. Creating an actor will automatically start + # a new cluster. + stats_actor = self._stats_actor( + create_if_not_exists=False + ) + if stats_actor is None: + continue + stats_actor.update_metrics.remote( + execution_metrics=list( + self._last_execution_stats.values() + ), + iteration_metrics=list( + self._last_iteration_stats.values() + ), + ) + iter_stats_inactivity = 0 + except Exception: + logger.debug( + "Error occurred during remote call to _StatsActor.", + exc_info=True, + ) + return + else: + iter_stats_inactivity += 1 + if ( + iter_stats_inactivity + >= _StatsManager.UPDATE_THREAD_INACTIVITY_LIMIT + ): + logger.debug( + "Terminating StatsManager thread due to inactivity." + ) + return + time.sleep(StatsManager.STATS_ACTOR_UPDATE_INTERVAL_SECONDS) + + self._update_thread = threading.Thread( + target=_run_update_loop, daemon=True + ) + self._update_thread.start() + + # Execution methods + + def update_execution_metrics( + self, + dataset_tag: str, + op_metrics: List[OpRuntimeMetrics], + operator_tags: List[str], + state: Dict[str, Any], + force_update: bool = False, + ): + op_metrics_dicts = [metric.as_dict() for metric in op_metrics] + args = (dataset_tag, op_metrics_dicts, operator_tags, state) + if force_update: + self._stats_actor().update_execution_metrics.remote(*args) + else: + with self._stats_lock: + self._last_execution_stats[dataset_tag] = args + self._start_thread_if_not_running() + + def clear_last_execution_stats(self, dataset_tag: str): + # After dataset completes execution, remove cached execution stats. + # Marks the dataset as finished on job page's Ray Data Overview. + with self._stats_lock: + if dataset_tag in self._last_execution_stats: + del self._last_execution_stats[dataset_tag] + + # Iteration methods + + def update_iteration_metrics(self, stats: "DatasetStats", dataset_tag: str): + with self._stats_lock: + self._last_iteration_stats[dataset_tag] = (stats, dataset_tag) + self._start_thread_if_not_running() + + def clear_iteration_metrics(self, dataset_tag: str): + # Delete the last iteration stats so that update thread will have + # a chance to terminate. + # Note we don't reset the actual metric values through the StatsActor + # since the value is essentially a counter value. See + # https://github.com/ray-project/ray/pull/48618 for more context. + with self._stats_lock: + if dataset_tag in self._last_iteration_stats: + del self._last_iteration_stats[dataset_tag] + + # Other methods + + def register_dataset_to_stats_actor(self, dataset_tag, operator_tags): + self._stats_actor().register_dataset.remote( + ray.get_runtime_context().get_job_id(), + dataset_tag, + operator_tags, + ) + + def get_dataset_id_from_stats_actor(self) -> str: + try: + return ray.get(self._stats_actor().get_dataset_id.remote()) + except Exception: + # Getting dataset id from _StatsActor may fail, in this case + # fall back to uuid4 + return uuid4().hex + + +StatsManager = _StatsManager() + + +class DatasetStats: + """Holds the execution times for a given Dataset. + + This object contains a reference to the parent Dataset's stats as well, + but not the Dataset object itself, to allow its blocks to be dropped from + memory.""" + + def __init__( + self, + *, + metadata: StatsDict, + parent: Union[Optional["DatasetStats"], List["DatasetStats"]], + needs_stats_actor: bool = False, + stats_uuid: str = None, + base_name: str = None, + ): + """Create dataset stats. + + Args: + metadata: Dict of operators used to create this Dataset from the + previous one. Typically one entry, e.g., {"map": [...]}. + parent: Reference to parent Dataset's stats, or a list of parents + if there are multiple. + needs_stats_actor: Whether this Dataset's stats needs a stats actor for + stats collection. This is currently only used for Datasets using a + lazy datasource (i.e. a LazyBlockList). + stats_uuid: The uuid for the stats, used to fetch the right stats + from the stats actor. + base_name: The name of the base operation for a multi-operator operation. + """ + + self.metadata: StatsDict = metadata + if parent is not None and not isinstance(parent, list): + parent = [parent] + self.parents: List["DatasetStats"] = parent or [] + self.number: int = ( + 0 if not self.parents else max(p.number for p in self.parents) + 1 + ) + self.base_name = base_name + # TODO(ekl) deprecate and remove the notion of dataset UUID once we move + # fully to streaming execution. + self.dataset_uuid: str = "unknown_uuid" + self.time_total_s: float = 0 + self.needs_stats_actor = needs_stats_actor + self.stats_uuid = stats_uuid + + # Streaming executor stats + self.streaming_exec_schedule_s: Timer = Timer() + + # Iteration stats, filled out if the user iterates over the dataset. + self.iter_wait_s: Timer = Timer() + self.iter_get_s: Timer = Timer() + self.iter_next_batch_s: Timer = Timer() + self.iter_format_batch_s: Timer = Timer() + self.iter_collate_batch_s: Timer = Timer() + self.iter_finalize_batch_s: Timer = Timer() + self.iter_total_blocked_s: Timer = Timer() + self.iter_user_s: Timer = Timer() + self.iter_initialize_s: Timer = Timer() + self.iter_total_s: Timer = Timer() + self.extra_metrics = {} + + # Block fetch stats during iteration. + # These are stats about locations of blocks when the iterator is trying to + # consume them. The iteration performance will be affected depending on + # whether the block is in the local object store of the node where the + # iterator is running. + # This serves as an indicator of block prefetching effectiveness. + self.iter_blocks_local: int = 0 + self.iter_blocks_remote: int = 0 + self.iter_unknown_location: int = 0 + + # Memory usage stats + self.global_bytes_spilled: int = 0 + self.global_bytes_restored: int = 0 + self.dataset_bytes_spilled: int = 0 + + # Streaming split coordinator stats (dataset level) + self.streaming_split_coordinator_s: Timer = Timer() + + @property + def stats_actor(self): + return _get_or_create_stats_actor() + + def child_builder( + self, name: str, override_start_time: Optional[float] = None + ) -> _DatasetStatsBuilder: + """Start recording stats for an op of the given name (e.g., map).""" + return _DatasetStatsBuilder(name, self, override_start_time) + + def to_summary(self) -> "DatasetStatsSummary": + """Generate a `DatasetStatsSummary` object from the given `DatasetStats` + object, which can be used to generate a summary string.""" + if self.needs_stats_actor: + ac = self.stats_actor + # TODO(chengsu): this is a super hack, clean it up. + stats_map, self.time_total_s = ray.get(ac.get.remote(self.stats_uuid)) + # Only populate stats when stats from all read tasks are ready at + # stats actor. + if len(stats_map.items()) == len(self.metadata["Read"]): + self.metadata["Read"] = [] + for _, blocks_metadata in sorted(stats_map.items()): + self.metadata["Read"] += blocks_metadata + + operators_stats = [] + is_sub_operator = len(self.metadata) > 1 + for name, meta in self.metadata.items(): + operators_stats.append( + OperatorStatsSummary.from_block_metadata( + name, + meta, + is_sub_operator=is_sub_operator, + ) + ) + + iter_stats = IterStatsSummary( + self.iter_wait_s, + self.iter_get_s, + self.iter_next_batch_s, + self.iter_format_batch_s, + self.iter_collate_batch_s, + self.iter_finalize_batch_s, + self.iter_total_blocked_s, + self.iter_user_s, + self.iter_initialize_s, + self.iter_total_s, + self.streaming_split_coordinator_s, + self.iter_blocks_local, + self.iter_blocks_remote, + self.iter_unknown_location, + ) + stats_summary_parents = [] + if self.parents is not None: + stats_summary_parents = [p.to_summary() for p in self.parents] + streaming_exec_schedule_s = ( + self.streaming_exec_schedule_s.get() + if self.streaming_exec_schedule_s + else 0 + ) + return DatasetStatsSummary( + operators_stats, + iter_stats, + stats_summary_parents, + self.number, + self.dataset_uuid, + self.time_total_s, + self.base_name, + self.extra_metrics, + self.global_bytes_spilled, + self.global_bytes_restored, + self.dataset_bytes_spilled, + streaming_exec_schedule_s, + ) + + def runtime_metrics(self) -> str: + """Generate a string representing the runtime metrics of a Dataset. This is + a high level summary of the time spent in Ray Data code broken down by operator. + It also includes the time spent in the scheduler. Times are shown as the total + time for each operator and percentages of time are shown as a fraction of the + total time for the whole dataset.""" + return self.to_summary().runtime_metrics() + + +@DeveloperAPI +@dataclass +class DatasetStatsSummary: + operators_stats: List["OperatorStatsSummary"] + iter_stats: "IterStatsSummary" + parents: List["DatasetStatsSummary"] + number: int + dataset_uuid: str + time_total_s: float + base_name: str + extra_metrics: Dict[str, Any] + global_bytes_spilled: int + global_bytes_restored: int + dataset_bytes_spilled: int + streaming_exec_schedule_s: float + + def to_string( + self, + already_printed: Optional[Set[str]] = None, + include_parent: bool = True, + add_global_stats=True, + ) -> str: + """Return a human-readable summary of this Dataset's stats. + + Args: + already_printed: Set of operator IDs that have already had its stats printed + out. + include_parent: If true, also include parent stats summary; otherwise, only + log stats of the latest operator. + add_global_stats: If true, includes global stats to this summary. + Returns: + String with summary statistics for executing the Dataset. + """ + if already_printed is None: + already_printed = set() + + out = "" + if self.parents and include_parent: + for p in self.parents: + parent_sum = p.to_string(already_printed, add_global_stats=False) + if parent_sum: + out += parent_sum + out += "\n" + operators_stats_summary = None + if len(self.operators_stats) == 1: + operators_stats_summary = self.operators_stats[0] + operator_name = operators_stats_summary.operator_name + operator_uuid = self.dataset_uuid + operator_name + out += "Operator {} {}: ".format(self.number, operator_name) + if operator_uuid in already_printed: + out += "[execution cached]\n" + else: + already_printed.add(operator_uuid) + out += str(operators_stats_summary) + elif len(self.operators_stats) > 1: + rounded_total = round(self.time_total_s, 2) + if rounded_total <= 0: + # Handle -0.0 case. + rounded_total = 0 + out += "Operator {} {}: executed in {}s\n".format( + self.number, self.base_name, rounded_total + ) + for n, operators_stats_summary in enumerate(self.operators_stats): + operator_name = operators_stats_summary.operator_name + operator_uuid = self.dataset_uuid + operator_name + out += "\n" + out += "\tSuboperator {} {}: ".format(n, operator_name) + if operator_uuid in already_printed: + out += "\t[execution cached]\n" + else: + already_printed.add(operator_uuid) + out += str(operators_stats_summary) + verbose_stats_logs = DataContext.get_current().verbose_stats_logs + if verbose_stats_logs and self.extra_metrics: + indent = ( + "\t" + if operators_stats_summary and operators_stats_summary.is_sub_operator + else "" + ) + out += indent + out += "* Extra metrics: " + str(self.extra_metrics) + "\n" + out += str(self.iter_stats) + + if len(self.operators_stats) > 0 and add_global_stats: + mb_spilled = round(self.global_bytes_spilled / 1e6) + mb_restored = round(self.global_bytes_restored / 1e6) + if mb_spilled or mb_restored: + out += "\nCluster memory:\n" + out += "* Spilled to disk: {}MB\n".format(mb_spilled) + out += "* Restored from disk: {}MB\n".format(mb_restored) + + dataset_mb_spilled = round(self.dataset_bytes_spilled / 1e6) + if dataset_mb_spilled: + out += "\nDataset memory:\n" + out += "* Spilled to disk: {}MB\n".format(dataset_mb_spilled) + + # For throughput, we compute both an observed Ray Data dataset throughput + # and an estimated single node dataset throughput. + + # The observed dataset throughput is computed by dividing the total number + # of rows produced by the total wall time of the dataset (i.e. from start to + # finish how long did the dataset take to be processed). With the recursive + # nature of the DatasetStatsSummary, we use get_total_wall_time to determine + # the total wall time (this finds the difference between the earliest start + # and latest end for any block in any operator). + + # The estimated single node dataset throughput is computed by dividing the + # total number of rows produced the sum of the wall times across all blocks + # of all operators. This assumes that on a single node the work done would + # be equivalent, with no concurrency. + output_num_rows = self.operators_stats[-1].output_num_rows + total_num_out_rows = output_num_rows["sum"] if output_num_rows else 0 + wall_time = self.get_total_wall_time() + total_time_all_blocks = self.get_total_time_all_blocks() + if total_num_out_rows and wall_time and total_time_all_blocks: + out += "\n" + out += "Dataset throughput:\n" + out += ( + "\t* Ray Data throughput:" + f" {total_num_out_rows / wall_time} " + "rows/s\n" + ) + out += ( + "\t* Estimated single node throughput:" + f" {total_num_out_rows / total_time_all_blocks} " + "rows/s\n" + ) + if verbose_stats_logs and add_global_stats: + out += "\n" + self.runtime_metrics() + + return out + + @staticmethod + def _collect_dataset_stats_summaries( + curr: "DatasetStatsSummary", + ) -> List["DatasetStatsSummary"]: + summs = [] + # TODO: Do operators ever have multiple parents? Do we need to deduplicate? + for p in curr.parents: + if p and p.parents: + summs.extend(DatasetStatsSummary._collect_dataset_stats_summaries(p)) + return summs + [curr] + + @staticmethod + def _find_start_and_end(summ: "DatasetStatsSummary") -> Tuple[float, float]: + earliest_start = min(ops.earliest_start_time for ops in summ.operators_stats) + latest_end = max(ops.latest_end_time for ops in summ.operators_stats) + return earliest_start, latest_end + + def runtime_metrics(self) -> str: + total_wall_time = self.get_total_wall_time() + + def fmt_line(name: str, time: float) -> str: + return f"* {name}: {fmt(time)} ({time / total_wall_time * 100:.3f}%)\n" + + summaries = DatasetStatsSummary._collect_dataset_stats_summaries(self) + out = "Runtime Metrics:\n" + for summ in summaries: + if len(summ.operators_stats) > 0: + earliest_start, latest_end = DatasetStatsSummary._find_start_and_end( + summ + ) + op_total_time = latest_end - earliest_start + out += fmt_line(summ.base_name, op_total_time) + out += fmt_line("Scheduling", self.streaming_exec_schedule_s) + out += fmt_line("Total", total_wall_time) + return out + + def __repr__(self, level=0) -> str: + indent = leveled_indent(level) + operators_stats = "\n".join( + [ss.__repr__(level + 2) for ss in self.operators_stats] + ) + parent_stats = "\n".join([ps.__repr__(level + 2) for ps in self.parents]) + extra_metrics = "\n".join( + f"{leveled_indent(level + 2)}{k}: {v}," + for k, v in self.extra_metrics.items() + ) + + # Handle formatting case for empty outputs. + operators_stats = ( + f"\n{operators_stats},\n{indent} " if operators_stats else "" + ) + parent_stats = f"\n{parent_stats},\n{indent} " if parent_stats else "" + extra_metrics = f"\n{extra_metrics}\n{indent} " if extra_metrics else "" + return ( + f"{indent}DatasetStatsSummary(\n" + f"{indent} dataset_uuid={self.dataset_uuid},\n" + f"{indent} base_name={self.base_name},\n" + f"{indent} number={self.number},\n" + f"{indent} extra_metrics={{{extra_metrics}}},\n" + f"{indent} operators_stats=[{operators_stats}],\n" + f"{indent} iter_stats={self.iter_stats.__repr__(level+1)},\n" + f"{indent} global_bytes_spilled={self.global_bytes_spilled / 1e6}MB,\n" + f"{indent} global_bytes_restored={self.global_bytes_restored / 1e6}MB,\n" + f"{indent} dataset_bytes_spilled={self.dataset_bytes_spilled / 1e6}MB,\n" + f"{indent} parents=[{parent_stats}],\n" + f"{indent})" + ) + + def get_total_wall_time(self) -> float: + """Calculate the total wall time for the dataset, this is done by finding + the earliest start time and latest end time for any block in any operator. + The wall time is the difference of these two times. + """ + start_ends = [ + DatasetStatsSummary._find_start_and_end(summ) + for summ in DatasetStatsSummary._collect_dataset_stats_summaries(self) + if len(summ.operators_stats) > 0 + ] + if len(start_ends) == 0: + return 0 + else: + earliest_start = min(start_end[0] for start_end in start_ends) + latest_end = max(start_end[1] for start_end in start_ends) + return latest_end - earliest_start + + def get_total_time_all_blocks(self) -> float: + """Calculate the sum of the wall times across all blocks of all operators.""" + summaries = DatasetStatsSummary._collect_dataset_stats_summaries(self) + return sum( + ( + sum( + ops.wall_time.get("sum", 0) if ops.wall_time else 0 + for ops in summ.operators_stats + ) + ) + for summ in summaries + ) + + def get_total_cpu_time(self) -> float: + parent_sum = sum(p.get_total_cpu_time() for p in self.parents) + return parent_sum + sum( + ss.cpu_time.get("sum", 0) for ss in self.operators_stats + ) + + def get_max_heap_memory(self) -> float: + parent_memory = [p.get_max_heap_memory() for p in self.parents] + parent_max = max(parent_memory) if parent_memory else 0 + if not self.operators_stats: + return parent_max + + return max( + parent_max, + *[ss.memory.get("max", 0) for ss in self.operators_stats], + ) + + +@dataclass +class OperatorStatsSummary: + operator_name: str + # Whether the operator associated with this OperatorStatsSummary object + # is a suboperator + is_sub_operator: bool + # This is the total walltime of the entire operator, typically obtained from + # `DatasetStats.time_total_s`. An important distinction is that this is the + # overall runtime of the operator, pulled from the stats actor, whereas the + # computed walltimes in `self.wall_time` are calculated on a operator level. + time_total_s: float + earliest_start_time: float + latest_end_time: float + # String summarizing high-level statistics from executing the operator + block_execution_summary_str: str + # The fields below are dicts with stats aggregated across blocks + # processed in this operator. For example: + # {"min": ..., "max": ..., "mean": ..., "sum": ...} + wall_time: Optional[Dict[str, float]] = None + cpu_time: Optional[Dict[str, float]] = None + udf_time: Optional[Dict[str, float]] = None + # memory: no "sum" stat + memory: Optional[Dict[str, float]] = None + output_num_rows: Optional[Dict[str, float]] = None + output_size_bytes: Optional[Dict[str, float]] = None + # node_count: "count" stat instead of "sum" + node_count: Optional[Dict[str, float]] = None + task_rows: Optional[Dict[str, float]] = None + + @classmethod + def from_block_metadata( + cls, + operator_name: str, + block_metas: List[BlockMetadata], + is_sub_operator: bool, + ) -> "OperatorStatsSummary": + """Calculate the stats for a operator from a given list of blocks, + and generates a `OperatorStatsSummary` object with the results. + + Args: + block_metas: List of `BlockMetadata` to calculate stats of + operator_name: Name of operator associated with `blocks` + is_sub_operator: Whether this set of blocks belongs to a sub operator. + Returns: + A `OperatorStatsSummary` object initialized with the calculated statistics + """ + exec_stats = [m.exec_stats for m in block_metas if m.exec_stats is not None] + rounded_total = 0 + time_total_s = 0 + earliest_start_time, latest_end_time = 0, 0 + + if exec_stats: + # Calculate the total execution time of operator as + # the difference between the latest end time and + # the earliest start time of all blocks in the operator. + earliest_start_time = min(s.start_time_s for s in exec_stats) + latest_end_time = max(s.end_time_s for s in exec_stats) + time_total_s = latest_end_time - earliest_start_time + + if is_sub_operator: + exec_summary_str = "{} blocks produced\n".format(len(exec_stats)) + else: + if exec_stats: + rounded_total = round(time_total_s, 2) + if rounded_total <= 0: + # Handle -0.0 case. + rounded_total = 0 + exec_summary_str = "{} blocks produced in {}s".format( + len(exec_stats), rounded_total + ) + else: + exec_summary_str = "" + exec_summary_str += "\n" + + task_rows = collections.defaultdict(int) + for meta in block_metas: + if meta.num_rows is not None and meta.exec_stats is not None: + task_rows[meta.exec_stats.task_idx] += meta.num_rows + task_rows_stats = None + if len(task_rows) > 0: + task_rows_stats = { + "min": min(task_rows.values()), + "max": max(task_rows.values()), + "mean": int(np.mean(list(task_rows.values()))), + "count": len(task_rows), + } + exec_summary_str = "{} tasks executed, {}".format( + len(task_rows), exec_summary_str + ) + + wall_time_stats, cpu_stats, memory_stats, udf_stats = None, None, None, None + if exec_stats: + wall_time_stats = { + "min": min([e.wall_time_s for e in exec_stats]), + "max": max([e.wall_time_s for e in exec_stats]), + "mean": np.mean([e.wall_time_s for e in exec_stats]), + "sum": sum([e.wall_time_s for e in exec_stats]), + } + cpu_stats = { + "min": min([e.cpu_time_s for e in exec_stats]), + "max": max([e.cpu_time_s for e in exec_stats]), + "mean": np.mean([e.cpu_time_s for e in exec_stats]), + "sum": sum([e.cpu_time_s for e in exec_stats]), + } + + memory_stats_mb = [ + round(e.max_rss_bytes / (1024 * 1024), 2) for e in exec_stats + ] + memory_stats = { + "min": min(memory_stats_mb), + "max": max(memory_stats_mb), + "mean": int(np.mean(memory_stats_mb)), + } + + udf_stats = { + "min": min([e.udf_time_s for e in exec_stats]), + "max": max([e.udf_time_s for e in exec_stats]), + "mean": np.mean([e.udf_time_s for e in exec_stats]), + "sum": sum([e.udf_time_s for e in exec_stats]), + } + + output_num_rows_stats = None + output_num_rows = [m.num_rows for m in block_metas if m.num_rows is not None] + if output_num_rows: + output_num_rows_stats = { + "min": min(output_num_rows), + "max": max(output_num_rows), + "mean": int(np.mean(output_num_rows)), + "sum": sum(output_num_rows), + } + + output_size_bytes_stats = None + output_size_bytes = [ + m.size_bytes for m in block_metas if m.size_bytes is not None + ] + if output_size_bytes: + output_size_bytes_stats = { + "min": min(output_size_bytes), + "max": max(output_size_bytes), + "mean": int(np.mean(output_size_bytes)), + "sum": sum(output_size_bytes), + } + + node_counts_stats = None + if exec_stats: + node_tasks = collections.defaultdict(set) + for s in exec_stats: + node_tasks[s.node_id].add(s.task_idx) + + node_counts = {node: len(tasks) for node, tasks in node_tasks.items()} + node_counts_stats = { + "min": min(node_counts.values()), + "max": max(node_counts.values()), + "mean": int(np.mean(list(node_counts.values()))), + "count": len(node_counts), + } + + return OperatorStatsSummary( + operator_name=operator_name, + is_sub_operator=is_sub_operator, + time_total_s=time_total_s, + earliest_start_time=earliest_start_time, + latest_end_time=latest_end_time, + block_execution_summary_str=exec_summary_str, + wall_time=wall_time_stats, + cpu_time=cpu_stats, + udf_time=udf_stats, + memory=memory_stats, + output_num_rows=output_num_rows_stats, + output_size_bytes=output_size_bytes_stats, + node_count=node_counts_stats, + task_rows=task_rows_stats, + ) + + def __str__(self) -> str: + """For a given (pre-calculated) `OperatorStatsSummary` object (e.g. generated from + `OperatorStatsSummary.from_block_metadata()`), returns a human-friendly string + that summarizes operator execution statistics. + + Returns: + String with summary statistics for executing the given operator. + """ + indent = "\t" if self.is_sub_operator else "" + out = self.block_execution_summary_str + + wall_time_stats = self.wall_time + if wall_time_stats: + out += indent + out += "* Remote wall time: {} min, {} max, {} mean, {} total\n".format( + fmt(wall_time_stats["min"]), + fmt(wall_time_stats["max"]), + fmt(wall_time_stats["mean"]), + fmt(wall_time_stats["sum"]), + ) + + cpu_stats = self.cpu_time + if cpu_stats: + out += indent + out += "* Remote cpu time: {} min, {} max, {} mean, {} total\n".format( + fmt(cpu_stats["min"]), + fmt(cpu_stats["max"]), + fmt(cpu_stats["mean"]), + fmt(cpu_stats["sum"]), + ) + + udf_stats = self.udf_time + if udf_stats: + out += indent + out += "* UDF time: {} min, {} max, {} mean, {} total\n".format( + fmt(udf_stats["min"]), + fmt(udf_stats["max"]), + fmt(udf_stats["mean"]), + fmt(udf_stats["sum"]), + ) + + memory_stats = self.memory + if memory_stats: + out += indent + out += "* Peak heap memory usage (MiB): {} min, {} max, {} mean\n".format( + memory_stats["min"], + memory_stats["max"], + memory_stats["mean"], + ) + + output_num_rows_stats = self.output_num_rows + if output_num_rows_stats: + out += indent + out += ( + "* Output num rows per block: {} min, {} max, {} mean, {} total\n" + ).format( + output_num_rows_stats["min"], + output_num_rows_stats["max"], + output_num_rows_stats["mean"], + output_num_rows_stats["sum"], + ) + + output_size_bytes_stats = self.output_size_bytes + if output_size_bytes_stats: + out += indent + out += ( + "* Output size bytes per block: {} min, {} max, {} mean, {} total\n" + ).format( + output_size_bytes_stats["min"], + output_size_bytes_stats["max"], + output_size_bytes_stats["mean"], + output_size_bytes_stats["sum"], + ) + + task_rows = self.task_rows + if task_rows: + out += indent + out += ( + "* Output rows per task: {} min, {} max, {} mean, {} tasks used\n" + ).format( + task_rows["min"], + task_rows["max"], + task_rows["mean"], + task_rows["count"], + ) + + node_count_stats = self.node_count + if node_count_stats: + out += indent + out += "* Tasks per node: {} min, {} max, {} mean; {} nodes used\n".format( + node_count_stats["min"], + node_count_stats["max"], + node_count_stats["mean"], + node_count_stats["count"], + ) + if output_num_rows_stats and self.time_total_s and wall_time_stats: + # For throughput, we compute both an observed Ray Data operator throughput + # and an estimated single node operator throughput. + + # The observed Ray Data operator throughput is computed by dividing the + # total number of rows produced by the wall time of the operator, + # time_total_s. + + # The estimated single node operator throughput is computed by dividing the + # total number of rows produced by the the sum of the wall times across all + # blocks of the operator. This assumes that on a single node the work done + # would be equivalent, with no concurrency. + total_num_out_rows = output_num_rows_stats["sum"] + out += indent + out += "* Operator throughput:\n" + out += ( + indent + "\t* Ray Data throughput:" + f" {total_num_out_rows / self.time_total_s} " + "rows/s\n" + ) + out += ( + indent + "\t* Estimated single node throughput:" + f" {total_num_out_rows / wall_time_stats['sum']} " + "rows/s\n" + ) + return out + + def __repr__(self, level=0) -> str: + """For a given (pre-calculated) `OperatorStatsSummary` object (e.g. generated from + `OperatorStatsSummary.from_block_metadata()`), returns a human-friendly string + that summarizes operator execution statistics. + + Returns: + String with summary statistics for executing the given operator. + """ + indent = leveled_indent(level) + indent += leveled_indent(1) if self.is_sub_operator else "" + + wall_time_stats = {k: fmt(v) for k, v in (self.wall_time or {}).items()} + cpu_stats = {k: fmt(v) for k, v in (self.cpu_time or {}).items()} + memory_stats = {k: fmt(v) for k, v in (self.memory or {}).items()} + output_num_rows_stats = { + k: fmt(v) for k, v in (self.output_num_rows or {}).items() + } + output_size_bytes_stats = { + k: fmt(v) for k, v in (self.output_size_bytes or {}).items() + } + node_conut_stats = {k: fmt(v) for k, v in (self.node_count or {}).items()} + out = ( + f"{indent}OperatorStatsSummary(\n" + f"{indent} operator_name='{self.operator_name}',\n" + f"{indent} is_suboperator={self.is_sub_operator},\n" + f"{indent} time_total_s={fmt(self.time_total_s)},\n" + # block_execution_summary_str already ends with \n + f"{indent} block_execution_summary_str={self.block_execution_summary_str}" + f"{indent} wall_time={wall_time_stats or None},\n" + f"{indent} cpu_time={cpu_stats or None},\n" + f"{indent} memory={memory_stats or None},\n" + f"{indent} output_num_rows={output_num_rows_stats or None},\n" + f"{indent} output_size_bytes={output_size_bytes_stats or None},\n" + f"{indent} node_count={node_conut_stats or None},\n" + f"{indent})" + ) + return out + + +@dataclass +class IterStatsSummary: + # Time spent in actor based prefetching, in seconds. + wait_time: Timer + # Time spent in `ray.get()`, in seconds + get_time: Timer + # Time spent in batch building, in seconds + next_time: Timer + # Time spent in `_format_batch_()`, in seconds + format_time: Timer + # Time spent in collate fn, in seconds + collate_time: Timer + # Time spent in finalize_fn, in seconds + finalize_batch_time: Timer + # Total time user thread is blocked by iter_batches + block_time: Timer + # Time spent in user code, in seconds + user_time: Timer + initialize_time: Timer + # Total time taken by Dataset iterator, in seconds + total_time: Timer + # Time spent in streaming split coordinator + streaming_split_coord_time: Timer + # Num of blocks that are in local object store + iter_blocks_local: int + # Num of blocks that are in remote node and have to fetch locally + iter_blocks_remote: int + # Num of blocks with unknown locations + iter_unknown_location: int + + def __str__(self) -> str: + return self.to_string() + + def to_string(self) -> str: + out = "" + if ( + self.block_time.get() + or self.total_time.get() + or self.get_time.get() + or self.next_time.get() + or self.format_time.get() + or self.collate_time.get() + or self.finalize_batch_time.get() + ): + out += "\nDataset iterator time breakdown:\n" + if self.total_time.get(): + out += "* Total time overall: {}\n".format(fmt(self.total_time.get())) + if self.initialize_time.get(): + out += ( + " * Total time in Ray Data iterator initialization code: " + "{}\n".format(fmt(self.initialize_time.get())) + ) + if self.block_time.get(): + out += ( + " * Total time user thread is blocked by Ray Data iter_batches: " + "{}\n".format(fmt(self.block_time.get())) + ) + if self.user_time.get(): + out += " * Total execution time for user thread: {}\n".format( + fmt(self.user_time.get()) + ) + out += ( + "* Batch iteration time breakdown (summed across prefetch threads):\n" + ) + if self.get_time.get(): + out += " * In ray.get(): {} min, {} max, {} avg, {} total\n".format( + fmt(self.get_time.min()), + fmt(self.get_time.max()), + fmt(self.get_time.avg()), + fmt(self.get_time.get()), + ) + if self.next_time.get(): + batch_creation_str = ( + " * In batch creation: {} min, {} max, " "{} avg, {} total\n" + ) + out += batch_creation_str.format( + fmt(self.next_time.min()), + fmt(self.next_time.max()), + fmt(self.next_time.avg()), + fmt(self.next_time.get()), + ) + if self.format_time.get(): + format_str = ( + " * In batch formatting: {} min, {} max, " "{} avg, {} total\n" + ) + out += format_str.format( + fmt(self.format_time.min()), + fmt(self.format_time.max()), + fmt(self.format_time.avg()), + fmt(self.format_time.get()), + ) + if self.collate_time.get(): + out += " * In collate_fn: {} min, {} max, {} avg, {} total\n".format( + fmt(self.collate_time.min()), + fmt(self.collate_time.max()), + fmt(self.collate_time.avg()), + fmt(self.collate_time.get()), + ) + if self.finalize_batch_time.get(): + format_str = ( + " * In host->device transfer: {} min, {} max, {} avg, {} total\n" + ) + out += format_str.format( + fmt(self.finalize_batch_time.min()), + fmt(self.finalize_batch_time.max()), + fmt(self.finalize_batch_time.avg()), + fmt(self.finalize_batch_time.get()), + ) + if DataContext.get_current().enable_get_object_locations_for_metrics: + out += "Block locations:\n" + out += " * Num blocks local: {}\n".format(self.iter_blocks_local) + out += " * Num blocks remote: {}\n".format(self.iter_blocks_remote) + out += " * Num blocks unknown location: {}\n".format( + self.iter_unknown_location + ) + if self.streaming_split_coord_time.get() != 0: + out += "Streaming split coordinator overhead time: " + out += f"{fmt(self.streaming_split_coord_time.get())}\n" + + return out + + def __repr__(self, level=0) -> str: + indent = leveled_indent(level) + return ( + f"IterStatsSummary(\n" + f"{indent} wait_time={fmt(self.wait_time.get()) or None},\n" + f"{indent} get_time={fmt(self.get_time.get()) or None},\n" + f"{indent} iter_blocks_local={self.iter_blocks_local or None},\n" + f"{indent} iter_blocks_remote={self.iter_blocks_remote or None},\n" + f"{indent} iter_unknown_location={self.iter_unknown_location or None},\n" + f"{indent} next_time={fmt(self.next_time.get()) or None},\n" + f"{indent} format_time={fmt(self.format_time.get()) or None},\n" + f"{indent} user_time={fmt(self.user_time.get()) or None},\n" + f"{indent} total_time={fmt(self.total_time.get()) or None},\n" + f"{indent})" + ) diff --git a/deepseek/lib/python3.10/site-packages/ray/data/_internal/util.py b/deepseek/lib/python3.10/site-packages/ray/data/_internal/util.py new file mode 100644 index 0000000000000000000000000000000000000000..1d0b70cf6a6c97b1901d7897219b9e86b14a1ebe --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/data/_internal/util.py @@ -0,0 +1,1091 @@ +import importlib +import logging +import os +import pathlib +import random +import sys +import threading +import time +import urllib.parse +from collections import deque +from types import ModuleType +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Iterable, + Iterator, + List, + Optional, + Tuple, + TypeVar, + Union, +) + +import numpy as np + +import ray +from ray._private.utils import _get_pyarrow_version +from ray.data.context import DEFAULT_READ_OP_MIN_NUM_BLOCKS, WARN_PREFIX, DataContext + +if TYPE_CHECKING: + import pandas + import pyarrow + + from ray.data._internal.compute import ComputeStrategy + from ray.data._internal.planner.exchange.sort_task_spec import SortKey + from ray.data.block import Block, BlockMetadata, UserDefinedFunction + from ray.data.datasource import Datasource, Reader + from ray.util.placement_group import PlacementGroup + +logger = logging.getLogger(__name__) + + +KiB = 1024 # bytes +MiB = 1024 * KiB +GiB = 1024 * MiB + + +# NOTE: Make sure that these lower and upper bounds stay in sync with version +# constraints given in python/setup.py. +# Inclusive minimum pyarrow version. +MIN_PYARROW_VERSION = "6.0.1" +RAY_DISABLE_PYARROW_VERSION_CHECK = "RAY_DISABLE_PYARROW_VERSION_CHECK" +_VERSION_VALIDATED = False +_LOCAL_SCHEME = "local" +_EXAMPLE_SCHEME = "example" + + +LazyModule = Union[None, bool, ModuleType] +_pyarrow_dataset: LazyModule = None + + +class _NullSentinel: + """Sentinel value that sorts greater than any other value.""" + + def __eq__(self, other): + return isinstance(other, _NullSentinel) + + def __lt__(self, other): + return False + + def __le__(self, other): + return isinstance(other, _NullSentinel) + + def __gt__(self, other): + return True + + def __ge__(self, other): + return True + + +NULL_SENTINEL = _NullSentinel() + + +def _lazy_import_pyarrow_dataset() -> LazyModule: + global _pyarrow_dataset + if _pyarrow_dataset is None: + try: + from pyarrow import dataset as _pyarrow_dataset + except ModuleNotFoundError: + # If module is not found, set _pyarrow to False so we won't + # keep trying to import it on every _lazy_import_pyarrow() call. + _pyarrow_dataset = False + return _pyarrow_dataset + + +def _check_pyarrow_version(): + """Check that pyarrow's version is within the supported bounds.""" + global _VERSION_VALIDATED + + if not _VERSION_VALIDATED: + if os.environ.get(RAY_DISABLE_PYARROW_VERSION_CHECK, "0") == "1": + _VERSION_VALIDATED = True + return + + version = _get_pyarrow_version() + if version is not None: + from packaging.version import parse as parse_version + + if parse_version(version) < parse_version(MIN_PYARROW_VERSION): + raise ImportError( + f"Dataset requires pyarrow >= {MIN_PYARROW_VERSION}, but " + f"{version} is installed. Reinstall with " + f'`pip install -U "pyarrow"`. ' + "If you want to disable this pyarrow version check, set the " + f"environment variable {RAY_DISABLE_PYARROW_VERSION_CHECK}=1." + ) + else: + logger.warning( + "You are using the 'pyarrow' module, but the exact version is unknown " + "(possibly carried as an internal component by another module). Please " + f"make sure you are using pyarrow >= {MIN_PYARROW_VERSION} to ensure " + "compatibility with Ray Dataset. " + "If you want to disable this pyarrow version check, set the " + f"environment variable {RAY_DISABLE_PYARROW_VERSION_CHECK}=1." + ) + _VERSION_VALIDATED = True + + +def _autodetect_parallelism( + parallelism: int, + target_max_block_size: int, + ctx: DataContext, + datasource_or_legacy_reader: Optional[Union["Datasource", "Reader"]] = None, + mem_size: Optional[int] = None, + placement_group: Optional["PlacementGroup"] = None, + avail_cpus: Optional[int] = None, +) -> Tuple[int, str, Optional[int]]: + """Returns parallelism to use and the min safe parallelism to avoid OOMs. + + This detects parallelism using the following heuristics, applied in order: + + 1) We start with the default value of 200. This can be overridden by + setting the `read_op_min_num_blocks` attribute of + :class:`~ray.data.context.DataContext`. + 2) Min block size. If the parallelism would make blocks smaller than this + threshold, the parallelism is reduced to avoid the overhead of tiny blocks. + 3) Max block size. If the parallelism would make blocks larger than this + threshold, the parallelism is increased to avoid OOMs during processing. + 4) Available CPUs. If the parallelism cannot make use of all the available + CPUs in the cluster, the parallelism is increased until it can. + + Args: + parallelism: The user-requested parallelism, or -1 for auto-detection. + target_max_block_size: The target max block size to + produce. We pass this separately from the + DatasetContext because it may be set per-op instead of + per-Dataset. + ctx: The current Dataset context to use for configs. + datasource_or_legacy_reader: The datasource or legacy reader, to be used for + data size estimation. + mem_size: If passed, then used to compute the parallelism according to + target_max_block_size. + placement_group: The placement group that this Dataset + will execute inside, if any. + avail_cpus: Override avail cpus detection (for testing only). + + Returns: + Tuple of detected parallelism (only if -1 was specified), the reason + for the detected parallelism (only if -1 was specified), and the estimated + inmemory size of the dataset. + """ + min_safe_parallelism = 1 + max_reasonable_parallelism = sys.maxsize + if mem_size is None and datasource_or_legacy_reader: + mem_size = datasource_or_legacy_reader.estimate_inmemory_data_size() + if mem_size is not None and not np.isnan(mem_size): + min_safe_parallelism = max(1, int(mem_size / target_max_block_size)) + max_reasonable_parallelism = max(1, int(mem_size / ctx.target_min_block_size)) + + reason = "" + if parallelism < 0: + if parallelism != -1: + raise ValueError("`parallelism` must either be -1 or a positive integer.") + + if ( + ctx.min_parallelism is not None + and ctx.min_parallelism != DEFAULT_READ_OP_MIN_NUM_BLOCKS + and ctx.read_op_min_num_blocks == DEFAULT_READ_OP_MIN_NUM_BLOCKS + ): + logger.warning( + "``DataContext.min_parallelism`` is deprecated in Ray 2.10. " + "Please specify ``DataContext.read_op_min_num_blocks`` instead." + ) + ctx.read_op_min_num_blocks = ctx.min_parallelism + + # Start with 2x the number of cores as a baseline, with a min floor. + if placement_group is None: + placement_group = ray.util.get_current_placement_group() + avail_cpus = avail_cpus or _estimate_avail_cpus(placement_group) + parallelism = max( + min(ctx.read_op_min_num_blocks, max_reasonable_parallelism), + min_safe_parallelism, + avail_cpus * 2, + ) + + if parallelism == ctx.read_op_min_num_blocks: + reason = ( + "DataContext.get_current().read_op_min_num_blocks=" + f"{ctx.read_op_min_num_blocks}" + ) + elif parallelism == max_reasonable_parallelism: + reason = ( + "output blocks of size at least " + "DataContext.get_current().target_min_block_size=" + f"{ctx.target_min_block_size / (1024 * 1024)}MiB" + ) + elif parallelism == min_safe_parallelism: + reason = ( + "output blocks of size at most " + "DataContext.get_current().target_max_block_size=" + f"{ctx.target_max_block_size / (1024 * 1024)}MiB" + ) + else: + reason = ( + "parallelism at least twice the available number " + f"of CPUs ({avail_cpus})" + ) + + logger.debug( + f"Autodetected parallelism={parallelism} based on " + f"estimated_available_cpus={avail_cpus} and " + f"estimated_data_size={mem_size}." + ) + + return parallelism, reason, mem_size + + +def _estimate_avail_cpus(cur_pg: Optional["PlacementGroup"]) -> int: + """Estimates the available CPU parallelism for this Dataset in the cluster. + + If we aren't in a placement group, this is trivially the number of CPUs in the + cluster. Otherwise, we try to calculate how large the placement group is relative + to the size of the cluster. + + Args: + cur_pg: The current placement group, if any. + """ + cluster_cpus = int(ray.cluster_resources().get("CPU", 1)) + cluster_gpus = int(ray.cluster_resources().get("GPU", 0)) + + # If we're in a placement group, we shouldn't assume the entire cluster's + # resources are available for us to use. Estimate an upper bound on what's + # reasonable to assume is available for datasets to use. + if cur_pg: + pg_cpus = 0 + for bundle in cur_pg.bundle_specs: + # Calculate the proportion of the cluster this placement group "takes up". + # Then scale our cluster_cpus proportionally to avoid over-parallelizing + # if there are many parallel Tune trials using the cluster. + cpu_fraction = bundle.get("CPU", 0) / max(1, cluster_cpus) + gpu_fraction = bundle.get("GPU", 0) / max(1, cluster_gpus) + max_fraction = max(cpu_fraction, gpu_fraction) + # Over-parallelize by up to a factor of 2, but no more than that. It's + # preferrable to over-estimate than under-estimate. + pg_cpus += 2 * int(max_fraction * cluster_cpus) + + return min(cluster_cpus, pg_cpus) + + return cluster_cpus + + +def _estimate_available_parallelism() -> int: + """Estimates the available CPU parallelism for this Dataset in the cluster. + If we are currently in a placement group, take that into account.""" + cur_pg = ray.util.get_current_placement_group() + return _estimate_avail_cpus(cur_pg) + + +def _warn_on_high_parallelism(requested_parallelism, num_read_tasks): + available_cpu_slots = ray.available_resources().get("CPU", 1) + if ( + requested_parallelism + and num_read_tasks > available_cpu_slots * 4 + and num_read_tasks >= 5000 + ): + logger.warning( + f"{WARN_PREFIX} The requested parallelism of {requested_parallelism} " + "is more than 4x the number of available CPU slots in the cluster of " + f"{available_cpu_slots}. This can " + "lead to slowdowns during the data reading phase due to excessive " + "task creation. Reduce the parallelism to match with the available " + "CPU slots in the cluster, or set parallelism to -1 for Ray Data " + "to automatically determine the parallelism. " + "You can ignore this message if the cluster is expected to autoscale." + ) + + +def _check_import(obj, *, module: str, package: str) -> None: + """Check if a required dependency is installed. + + If `module` can't be imported, this function raises an `ImportError` instructing + the user to install `package` from PyPI. + + Args: + obj: The object that has a dependency. + module: The name of the module to import. + package: The name of the package on PyPI. + """ + try: + importlib.import_module(module) + except ImportError: + raise ImportError( + f"`{obj.__class__.__name__}` depends on '{package}', but '{package}' " + f"couldn't be imported. You can install '{package}' by running `pip " + f"install {package}`." + ) + + +def _resolve_custom_scheme(path: str) -> str: + """Returns the resolved path if the given path follows a Ray-specific custom + scheme. Othewise, returns the path unchanged. + + The supported custom schemes are: "local", "example". + """ + parsed_uri = urllib.parse.urlparse(path) + if parsed_uri.scheme == _LOCAL_SCHEME: + path = parsed_uri.netloc + parsed_uri.path + elif parsed_uri.scheme == _EXAMPLE_SCHEME: + example_data_path = pathlib.Path(__file__).parent.parent / "examples" / "data" + path = example_data_path / (parsed_uri.netloc + parsed_uri.path) + path = str(path.resolve()) + return path + + +def _is_local_scheme(paths: Union[str, List[str]]) -> bool: + """Returns True if the given paths are in local scheme. + Note: The paths must be in same scheme, i.e. it's invalid and + will raise error if paths are mixed with different schemes. + """ + if isinstance(paths, str): + paths = [paths] + if isinstance(paths, pathlib.Path): + paths = [str(paths)] + elif not isinstance(paths, list) or any(not isinstance(p, str) for p in paths): + raise ValueError("paths must be a path string or a list of path strings.") + elif len(paths) == 0: + raise ValueError("Must provide at least one path.") + num = sum(urllib.parse.urlparse(path).scheme == _LOCAL_SCHEME for path in paths) + if num > 0 and num < len(paths): + raise ValueError( + "The paths must all be local-scheme or not local-scheme, " + f"but found mixed {paths}" + ) + return num == len(paths) + + +def _truncated_repr(obj: Any) -> str: + """Utility to return a truncated object representation for error messages.""" + msg = str(obj) + if len(msg) > 200: + msg = msg[:200] + "..." + return msg + + +def _insert_doc_at_pattern( + obj, + *, + message: str, + pattern: str, + insert_after: bool = True, + directive: Optional[str] = None, + skip_matches: int = 0, +) -> str: + if "\n" in message: + raise ValueError( + "message shouldn't contain any newlines, since this function will insert " + f"its own linebreaks when text wrapping: {message}" + ) + + doc = obj.__doc__.strip() + if not doc: + doc = "" + + if pattern == "" and insert_after: + # Empty pattern + insert_after means that we want to append the message to the + # end of the docstring. + head = doc + tail = "" + else: + tail = doc + i = tail.find(pattern) + skip_matches_left = skip_matches + while i != -1: + if insert_after: + # Set offset to the first character after the pattern. + offset = i + len(pattern) + else: + # Set offset to the first character in the matched line. + offset = tail[:i].rfind("\n") + 1 + head = tail[:offset] + tail = tail[offset:] + skip_matches_left -= 1 + if skip_matches_left <= 0: + break + elif not insert_after: + # Move past the found pattern, since we're skipping it. + tail = tail[i - offset + len(pattern) :] + i = tail.find(pattern) + else: + raise ValueError( + f"Pattern {pattern} not found after {skip_matches} skips in docstring " + f"{doc}" + ) + # Get indentation of the to-be-inserted text. + after_lines = list(filter(bool, tail.splitlines())) + if len(after_lines) > 0: + lines = after_lines + else: + lines = list(filter(bool, reversed(head.splitlines()))) + # Should always have at least one non-empty line in the docstring. + assert len(lines) > 0 + indent = " " * (len(lines[0]) - len(lines[0].lstrip())) + # Handle directive. + message = message.strip("\n") + if directive is not None: + base = f"{indent}.. {directive}::\n" + message = message.replace("\n", "\n" + indent + " " * 4) + message = base + indent + " " * 4 + message + else: + message = indent + message.replace("\n", "\n" + indent) + # Add two blank lines before/after message, if necessary. + if insert_after ^ (pattern == "\n\n"): + # Only two blank lines before message if: + # 1. Inserting message after pattern and pattern is not two blank lines. + # 2. Inserting message before pattern and pattern is two blank lines. + message = "\n\n" + message + if (not insert_after) ^ (pattern == "\n\n"): + # Only two blank lines after message if: + # 1. Inserting message before pattern and pattern is not two blank lines. + # 2. Inserting message after pattern and pattern is two blank lines. + message = message + "\n\n" + + # Insert message before/after pattern. + parts = [head, message, tail] + # Build new docstring. + obj.__doc__ = "".join(parts) + + +def _consumption_api( + if_more_than_read: bool = False, + datasource_metadata: Optional[str] = None, + extra_condition: Optional[str] = None, + delegate: Optional[str] = None, + pattern="Examples:", + insert_after=False, +): + """Annotate the function with an indication that it's a consumption API, and that it + will trigger Dataset execution. + """ + base = ( + " will trigger execution of the lazy transformations performed on " + "this dataset." + ) + if delegate: + message = delegate + base + elif not if_more_than_read: + message = "This operation" + base + else: + condition = "If this dataset consists of more than a read, " + if datasource_metadata is not None: + condition += ( + f"or if the {datasource_metadata} can't be determined from the " + "metadata provided by the datasource, " + ) + if extra_condition is not None: + condition += extra_condition + ", " + message = condition + "then this operation" + base + + def wrap(obj): + _insert_doc_at_pattern( + obj, + message=message, + pattern=pattern, + insert_after=insert_after, + directive="note", + ) + return obj + + return wrap + + +def ConsumptionAPI(*args, **kwargs): + """Annotate the function with an indication that it's a consumption API, and that it + will trigger Dataset execution. + """ + if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): + return _consumption_api()(args[0]) + return _consumption_api(*args, **kwargs) + + +def _all_to_all_api(*args, **kwargs): + """Annotate the function with an indication that it's a all to all API, and that it + is an operation that requires all inputs to be materialized in-memory to execute. + """ + + def wrap(obj): + _insert_doc_at_pattern( + obj, + message=( + "This operation requires all inputs to be " + "materialized in object store for it to execute." + ), + pattern="Examples:", + insert_after=False, + directive="note", + ) + return obj + + return wrap + + +def AllToAllAPI(*args, **kwargs): + """Annotate the function with an indication that it's a all to all API, and that it + is an operation that requires all inputs to be materialized in-memory to execute. + """ + # This should only be used as a decorator for dataset methods. + assert len(args) == 1 and len(kwargs) == 0 and callable(args[0]) + return _all_to_all_api()(args[0]) + + +def _split_list(arr: List[Any], num_splits: int) -> List[List[Any]]: + """Split the list into `num_splits` lists. + + The splits will be even if the `num_splits` divides the length of list, otherwise + the remainder (suppose it's R) will be allocated to the first R splits (one for + each). + This is the same as numpy.array_split(). The reason we make this a separate + implementation is to allow the heterogeneity in the elements in the list. + """ + assert num_splits > 0 + q, r = divmod(len(arr), num_splits) + splits = [ + arr[i * q + min(i, r) : (i + 1) * q + min(i + 1, r)] for i in range(num_splits) + ] + return splits + + +def get_compute_strategy( + fn: "UserDefinedFunction", + fn_constructor_args: Optional[Iterable[Any]] = None, + compute: Optional[Union[str, "ComputeStrategy"]] = None, + concurrency: Optional[Union[int, Tuple[int, int]]] = None, +) -> "ComputeStrategy": + """Get `ComputeStrategy` based on the function or class, and concurrency + information. + + Args: + fn: The function or generator to apply to a record batch, or a class type + that can be instantiated to create such a callable. + fn_constructor_args: Positional arguments to pass to ``fn``'s constructor. + compute: Either "tasks" (default) to use Ray Tasks or an + :class:`~ray.data.ActorPoolStrategy` to use an autoscaling actor pool. + concurrency: The number of Ray workers to use concurrently. + + Returns: + The `ComputeStrategy` for execution. + """ + # Lazily import these objects to avoid circular imports. + from ray.data._internal.compute import ActorPoolStrategy, TaskPoolStrategy + from ray.data.block import CallableClass + + if isinstance(fn, CallableClass): + is_callable_class = True + else: + # TODO(chengsu): disallow object that is not a function. For example, + # An object instance of class often indicates a bug in user code. + is_callable_class = False + if fn_constructor_args is not None: + raise ValueError( + "``fn_constructor_args`` can only be specified if providing a " + f"callable class instance for ``fn``, but got: {fn}." + ) + + if compute is not None: + # Legacy code path to support `compute` argument. + logger.warning( + "The argument ``compute`` is deprecated in Ray 2.9. Please specify " + "argument ``concurrency`` instead. For more information, see " + "https://docs.ray.io/en/master/data/transforming-data.html#" + "stateful-transforms." + ) + if is_callable_class and ( + compute == "tasks" or isinstance(compute, TaskPoolStrategy) + ): + raise ValueError( + "``compute`` must specify an actor compute strategy when using a " + f"callable class, but got: {compute}. For example, use " + "``compute=ray.data.ActorPoolStrategy(size=n)``." + ) + elif not is_callable_class and ( + compute == "actors" or isinstance(compute, ActorPoolStrategy) + ): + raise ValueError( + f"``compute`` is specified as the actor compute strategy: {compute}, " + f"but ``fn`` is not a callable class: {fn}. Pass a callable class or " + "use the default ``compute`` strategy." + ) + return compute + elif concurrency is not None: + if isinstance(concurrency, tuple): + if ( + len(concurrency) == 2 + and isinstance(concurrency[0], int) + and isinstance(concurrency[1], int) + ): + if is_callable_class: + return ActorPoolStrategy( + min_size=concurrency[0], max_size=concurrency[1] + ) + else: + raise ValueError( + "``concurrency`` is set as a tuple of integers, but ``fn`` " + f"is not a callable class: {fn}. Use ``concurrency=n`` to " + "control maximum number of workers to use." + ) + else: + raise ValueError( + "``concurrency`` is expected to be set as a tuple of " + f"integers, but got: {concurrency}." + ) + elif isinstance(concurrency, int): + if is_callable_class: + return ActorPoolStrategy(size=concurrency) + else: + return TaskPoolStrategy(size=concurrency) + else: + raise ValueError( + "``concurrency`` is expected to be set as an integer or a " + f"tuple of integers, but got: {concurrency}." + ) + else: + if is_callable_class: + raise ValueError( + "``concurrency`` must be specified when using a callable class. " + "For example, use ``concurrency=n`` for a pool of ``n`` workers." + ) + else: + return TaskPoolStrategy() + + +def capfirst(s: str): + """Capitalize the first letter of a string + + Args: + s: String to capitalize + + Returns: + Capitalized string + """ + return s[0].upper() + s[1:] + + +def capitalize(s: str): + """Capitalize a string, removing '_' and keeping camelcase. + + Args: + s: String to capitalize + + Returns: + Capitalized string with no underscores. + """ + return "".join(capfirst(x) for x in s.split("_")) + + +def pandas_df_to_arrow_block(df: "pandas.DataFrame") -> "Block": + from ray.data.block import BlockAccessor, BlockExecStats + + block = BlockAccessor.for_block(df).to_arrow() + stats = BlockExecStats.builder() + return ( + block, + BlockAccessor.for_block(block).get_metadata(exec_stats=stats.build()), + ) + + +def ndarray_to_block(ndarray: np.ndarray, ctx: DataContext) -> "Block": + from ray.data.block import BlockAccessor, BlockExecStats + + DataContext._set_current(ctx) + + stats = BlockExecStats.builder() + block = BlockAccessor.batch_to_block({"data": ndarray}) + metadata = BlockAccessor.for_block(block).get_metadata(exec_stats=stats.build()) + return block, metadata + + +def get_table_block_metadata( + table: Union["pyarrow.Table", "pandas.DataFrame"] +) -> "BlockMetadata": + from ray.data.block import BlockAccessor, BlockExecStats + + stats = BlockExecStats.builder() + return BlockAccessor.for_block(table).get_metadata(exec_stats=stats.build()) + + +def unify_block_metadata_schema( + metadata: List["BlockMetadata"], +) -> Optional[Union[type, "pyarrow.lib.Schema"]]: + """For the input list of BlockMetadata, return a unified schema of the + corresponding blocks. If the metadata have no valid schema, returns None. + """ + # Some blocks could be empty, in which case we cannot get their schema. + # TODO(ekl) validate schema is the same across different blocks. + from ray.data._internal.arrow_ops.transform_pyarrow import unify_schemas + + # First check if there are blocks with computed schemas, then unify + # valid schemas from all such blocks. + schemas_to_unify = [] + for m in metadata: + if m.schema is not None and (m.num_rows is None or m.num_rows > 0): + schemas_to_unify.append(m.schema) + if schemas_to_unify: + # Check valid pyarrow installation before attempting schema unification + try: + import pyarrow as pa + except ImportError: + pa = None + # If the result contains PyArrow schemas, unify them + if pa is not None and all(isinstance(s, pa.Schema) for s in schemas_to_unify): + return unify_schemas(schemas_to_unify) + # Otherwise, if the resulting schemas are simple types (e.g. int), + # return the first schema. + return schemas_to_unify[0] + return None + + +def find_partition_index( + table: Union["pyarrow.Table", "pandas.DataFrame"], + desired: List[Any], + sort_key: "SortKey", +) -> int: + columns = sort_key.get_columns() + descending = sort_key.get_descending() + + left, right = 0, len(table) + for i in range(len(desired)): + if left == right: + return right + col_name = columns[i] + col_vals = table[col_name].to_numpy()[left:right] + desired_val = desired[i] + + # Handle null values - replace them with sentinel values + if desired_val is None: + desired_val = NULL_SENTINEL + + # Replace None/NaN values in col_vals with sentinel + null_mask = col_vals == None # noqa: E711 + if null_mask.any(): + col_vals = col_vals.copy() # Make a copy to avoid modifying original + col_vals[null_mask] = NULL_SENTINEL + + prevleft = left + if descending is True: + left = prevleft + ( + len(col_vals) + - np.searchsorted( + col_vals, + desired_val, + side="right", + sorter=np.arange(len(col_vals) - 1, -1, -1), + ) + ) + right = prevleft + ( + len(col_vals) + - np.searchsorted( + col_vals, + desired_val, + side="left", + sorter=np.arange(len(col_vals) - 1, -1, -1), + ) + ) + else: + left = prevleft + np.searchsorted(col_vals, desired_val, side="left") + right = prevleft + np.searchsorted(col_vals, desired_val, side="right") + return right if descending is True else left + + +def find_partitions(table, boundaries, sort_key): + partitions = [] + + # For each boundary value, count the number of items that are less + # than it. Since the block is sorted, these counts partition the items + # such that boundaries[i] <= x < boundaries[i + 1] for each x in + # partition[i]. If `descending` is true, `boundaries` would also be + # in descending order and we only need to count the number of items + # *greater than* the boundary value instead. + bounds = [ + find_partition_index(table, boundary, sort_key) for boundary in boundaries + ] + + last_idx = 0 + for idx in bounds: + partitions.append(table[last_idx:idx]) + last_idx = idx + partitions.append(table[last_idx:]) + return partitions + + +def get_attribute_from_class_name(class_name: str) -> Any: + """Get Python attribute from the provided class name. + + The caller needs to make sure the provided class name includes + full module name, and can be imported successfully. + """ + from importlib import import_module + + paths = class_name.split(".") + if len(paths) < 2: + raise ValueError(f"Cannot create object from {class_name}.") + + module_name = ".".join(paths[:-1]) + attribute_name = paths[-1] + return getattr(import_module(module_name), attribute_name) + + +class Queue: + """A thread-safe queue implementation for multiple producers and consumers. + + Provide `release()` to exit producer threads cooperatively for resource release. + """ + + def __init__(self, queue_size: int): + # The queue shared across multiple producer threads. + self._queue = deque() + # The boolean varilable to indicate whether producer threads should exit. + self._threads_exit = False + # The semaphore for producer threads to put item into queue. + self._producer_semaphore = threading.Semaphore(queue_size) + # The semaphore for consumer threads to get item from queue. + self._consumer_semaphore = threading.Semaphore(0) + # The mutex lock to guard access of `self._queue` and `self._threads_exit`. + self._mutex = threading.Lock() + + def put(self, item: Any) -> bool: + """Put an item into the queue. + + Block if necessary until a free slot is available in queue. + This method is called by producer threads. + + Returns: + True if the caller thread should exit immediately. + """ + self._producer_semaphore.acquire() + with self._mutex: + if self._threads_exit: + return True + else: + self._queue.append(item) + self._consumer_semaphore.release() + return False + + def get(self) -> Any: + """Remove and return an item from the queue. + + Block if necessary until an item is available in queue. + This method is called by consumer threads. + """ + self._consumer_semaphore.acquire() + with self._mutex: + next_item = self._queue.popleft() + self._producer_semaphore.release() + return next_item + + def release(self, num_threads: int): + """Release `num_threads` of producers so they would exit cooperatively.""" + with self._mutex: + self._threads_exit = True + for _ in range(num_threads): + # NOTE: After Python 3.9+, Semaphore.release(n) can be used to + # release all threads at once. + self._producer_semaphore.release() + + def qsize(self): + """Return the size of the queue.""" + with self._mutex: + return len(self._queue) + + +T = TypeVar("T") +U = TypeVar("U") + + +def make_async_gen( + base_iterator: Iterator[T], + fn: Callable[[Iterator[T]], Iterator[U]], + num_workers: int = 1, +) -> Iterator[U]: + """Returns a new iterator with elements fetched from the base_iterator + in an async fashion using a threadpool. + + Each thread in the threadpool will fetch data from the base_iterator in a + thread-safe fashion, and apply the provided `fn` computation concurrently. + + Args: + base_iterator: The iterator to asynchronously fetch from. + fn: The function to run on the input iterator. + num_workers: The number of threads to use in the threadpool. Defaults to 1. + + Returns: + An iterator with the same elements as outputted from `fn`. + """ + + if num_workers < 1: + raise ValueError("Size of threadpool must be at least 1.") + + # Use a lock to fetch from the base_iterator in a thread-safe fashion. + def convert_to_threadsafe_iterator(base_iterator: Iterator[T]) -> Iterator[T]: + class ThreadSafeIterator: + def __init__(self, it): + self.lock = threading.Lock() + self.it = it + + def __next__(self): + with self.lock: + return next(self.it) + + def __iter__(self): + return self + + return ThreadSafeIterator(base_iterator) + + thread_safe_generator = convert_to_threadsafe_iterator(base_iterator) + + class Sentinel: + def __init__(self, thread_index: int): + self.thread_index = thread_index + + output_queue = Queue(1) + + # Because pulling from the base iterator cannot happen concurrently, + # we must execute the expensive computation in a separate step which + # can be parallelized via a threadpool. + def execute_computation(thread_index: int): + try: + for item in fn(thread_safe_generator): + if output_queue.put(item): + # Return early when it's instructed to do so. + return + output_queue.put(Sentinel(thread_index)) + except Exception as e: + output_queue.put(e) + + # Use separate threads to produce output batches. + threads = [ + threading.Thread(target=execute_computation, args=(i,), daemon=True) + for i in range(num_workers) + ] + + for thread in threads: + thread.start() + + # Use main thread to consume output batches. + num_threads_finished = 0 + try: + while True: + next_item = output_queue.get() + if isinstance(next_item, Exception): + raise next_item + if isinstance(next_item, Sentinel): + num_threads_finished += 1 + else: + yield next_item + if num_threads_finished >= num_workers: + break + finally: + # Cooperatively exit all producer threads. + # This is to avoid these daemon threads hanging there with holding batches in + # memory, which can cause GRAM OOM easily. This can happen when caller breaks + # in the middle of iteration. + num_threads_alive = num_workers - num_threads_finished + if num_threads_alive > 0: + output_queue.release(num_threads_alive) + + +def call_with_retry( + f: Callable[[], Any], + description: str, + *, + match: Optional[List[str]] = None, + max_attempts: int = 10, + max_backoff_s: int = 32, +) -> Any: + """Retry a function with exponential backoff. + + Args: + f: The function to retry. + match: A list of strings to match in the exception message. If ``None``, any + error is retried. + description: An imperitive description of the function being retried. For + example, "open the file". + max_attempts: The maximum number of attempts to retry. + max_backoff_s: The maximum number of seconds to backoff. + """ + assert max_attempts >= 1, f"`max_attempts` must be positive. Got {max_attempts}." + + for i in range(max_attempts): + try: + return f() + except Exception as e: + is_retryable = match is None or any( + [pattern in str(e) for pattern in match] + ) + if is_retryable and i + 1 < max_attempts: + # Retry with binary expoential backoff with random jitter. + backoff = min((2 ** (i + 1)), max_backoff_s) * random.random() + logger.debug( + f"Retrying {i+1} attempts to {description} after {backoff} seconds." + ) + time.sleep(backoff) + else: + raise e from None + + +def iterate_with_retry( + iterable_factory: Callable[[], Iterable], + description: str, + *, + match: Optional[List[str]] = None, + max_attempts: int = 10, + max_backoff_s: int = 32, +) -> Any: + """Iterate through an iterable with retries. + + If the iterable raises an exception, this function recreates and re-iterates + through the iterable, while skipping the items that have already been yielded. + + Args: + iterable_factory: A no-argument function that creates the iterable. + match: A list of strings to match in the exception message. If ``None``, any + error is retried. + description: An imperitive description of the function being retried. For + example, "open the file". + max_attempts: The maximum number of attempts to retry. + max_backoff_s: The maximum number of seconds to backoff. + """ + assert max_attempts >= 1, f"`max_attempts` must be positive. Got {max_attempts}." + + num_items_yielded = 0 + for i in range(max_attempts): + try: + iterable = iterable_factory() + for i, item in enumerate(iterable): + if i < num_items_yielded: + # Skip items that have already been yielded. + continue + + num_items_yielded += 1 + yield item + return + except Exception as e: + is_retryable = match is None or any( + [pattern in str(e) for pattern in match] + ) + if is_retryable and i + 1 < max_attempts: + # Retry with binary expoential backoff with random jitter. + backoff = min((2 ** (i + 1)), max_backoff_s) * random.random() + logger.debug( + f"Retrying {i+1} attempts to {description} after {backoff} seconds." + ) + time.sleep(backoff) + else: + raise e from None + + +def create_dataset_tag(dataset_name: Optional[str], *args): + tag = dataset_name or "dataset" + for arg in args: + tag += f"_{arg}" + return tag + + +def convert_bytes_to_human_readable_str(num_bytes: int) -> str: + if num_bytes >= 1e9: + num_bytes_str = f"{round(num_bytes / 1e9)}GB" + elif num_bytes >= 1e6: + num_bytes_str = f"{round(num_bytes / 1e6)}MB" + else: + num_bytes_str = f"{round(num_bytes / 1e3)}KB" + return num_bytes_str diff --git a/deepseek/lib/python3.10/site-packages/ray/data/block.py b/deepseek/lib/python3.10/site-packages/ray/data/block.py new file mode 100644 index 0000000000000000000000000000000000000000..fcab3feb67eb5abd917d0dfbd4e612598c85dd02 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/data/block.py @@ -0,0 +1,477 @@ +import collections +import logging +import os +import time +from dataclasses import dataclass +from enum import Enum +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterator, + List, + Literal, + Optional, + Protocol, + Tuple, + TypeVar, + Union, +) + +import numpy as np + +import ray +from ray import DynamicObjectRefGenerator +from ray.air.util.tensor_extensions.arrow import ArrowConversionError +from ray.data._internal.util import _check_pyarrow_version, _truncated_repr +from ray.types import ObjectRef +from ray.util import log_once +from ray.util.annotations import DeveloperAPI + +import psutil + +try: + import resource +except ImportError: + resource = None + +if TYPE_CHECKING: + import pandas + import pyarrow + + from ray.data._internal.block_builder import BlockBuilder + from ray.data._internal.planner.exchange.sort_task_spec import SortKey + from ray.data.aggregate import AggregateFn + + +T = TypeVar("T", contravariant=True) +U = TypeVar("U", covariant=True) + +KeyType = TypeVar("KeyType") +AggType = TypeVar("AggType") + + +# Represents a batch of records to be stored in the Ray object store. +# +# Block data can be accessed in a uniform way via ``BlockAccessors`` like` +# ``ArrowBlockAccessor``. +Block = Union["pyarrow.Table", "pandas.DataFrame"] + + +logger = logging.getLogger(__name__) + + +@DeveloperAPI +class BlockType(Enum): + ARROW = "arrow" + PANDAS = "pandas" + + +# User-facing data batch type. This is the data type for data that is supplied to and +# returned from batch UDFs. +DataBatch = Union["pyarrow.Table", "pandas.DataFrame", Dict[str, np.ndarray]] + +# User-facing data column type. This is the data type for data that is supplied to and +# returned from column UDFs. +DataBatchColumn = Union[ + "pyarrow.ChunkedArray", "pyarrow.Array", "pandas.Series", np.ndarray +] + + +# A class type that implements __call__. +CallableClass = type + + +class _CallableClassProtocol(Protocol[T, U]): + def __call__(self, __arg: T) -> Union[U, Iterator[U]]: + ... + + +# A user defined function passed to map, map_batches, ec. +UserDefinedFunction = Union[ + Callable[[T], U], + Callable[[T], Iterator[U]], + "_CallableClassProtocol", +] + +# A list of block references pending computation by a single task. For example, +# this may be the output of a task reading a file. +BlockPartition = List[Tuple[ObjectRef[Block], "BlockMetadata"]] + +# The metadata that describes the output of a BlockPartition. This has the +# same type as the metadata that describes each block in the partition. +BlockPartitionMetadata = List["BlockMetadata"] + +# TODO(ekl/chengsu): replace this with just +# `DynamicObjectRefGenerator` once block splitting +# is on by default. When block splitting is off, the type is a plain block. +MaybeBlockPartition = Union[Block, DynamicObjectRefGenerator] + +VALID_BATCH_FORMATS = ["pandas", "pyarrow", "numpy", None] +DEFAULT_BATCH_FORMAT = "numpy" + + +def _apply_batch_format(given_batch_format: Optional[str]) -> str: + if given_batch_format == "default": + given_batch_format = DEFAULT_BATCH_FORMAT + if given_batch_format not in VALID_BATCH_FORMATS: + raise ValueError( + f"The given batch format {given_batch_format} isn't allowed (must be one of" + f" {VALID_BATCH_FORMATS})." + ) + return given_batch_format + + +def _apply_batch_size( + given_batch_size: Optional[Union[int, Literal["default"]]] +) -> Optional[int]: + if given_batch_size == "default": + return ray.data.context.DEFAULT_BATCH_SIZE + else: + return given_batch_size + + +@DeveloperAPI +class BlockExecStats: + """Execution stats for this block. + + Attributes: + wall_time_s: The wall-clock time it took to compute this block. + cpu_time_s: The CPU time it took to compute this block. + node_id: A unique id for the node that computed this block. + """ + + def __init__(self): + self.start_time_s: Optional[float] = None + self.end_time_s: Optional[float] = None + self.wall_time_s: Optional[float] = None + self.udf_time_s: Optional[float] = 0 + self.cpu_time_s: Optional[float] = None + self.node_id = ray.runtime_context.get_runtime_context().get_node_id() + # Max memory usage. May be an overestimate since we do not + # differentiate from previous tasks on the same worker. + self.max_rss_bytes: int = 0 + self.task_idx: Optional[int] = None + + @staticmethod + def builder() -> "_BlockExecStatsBuilder": + return _BlockExecStatsBuilder() + + def __repr__(self): + return repr( + { + "wall_time_s": self.wall_time_s, + "cpu_time_s": self.cpu_time_s, + "udf_time_s": self.udf_time_s, + "node_id": self.node_id, + } + ) + + +class _BlockExecStatsBuilder: + """Helper class for building block stats. + + When this class is created, we record the start time. When build() is + called, the time delta is saved as part of the stats. + """ + + def __init__(self): + self.start_time = time.perf_counter() + self.start_cpu = time.process_time() + + def build(self) -> "BlockExecStats": + self.end_time = time.perf_counter() + self.end_cpu = time.process_time() + + stats = BlockExecStats() + stats.start_time_s = self.start_time + stats.end_time_s = self.end_time + stats.wall_time_s = self.end_time - self.start_time + stats.cpu_time_s = self.end_cpu - self.start_cpu + if resource is None: + # NOTE(swang): resource package is not supported on Windows. This + # is only the memory usage at the end of the task, not the peak + # memory. + process = psutil.Process(os.getpid()) + stats.max_rss_bytes = int(process.memory_info().rss) + else: + stats.max_rss_bytes = int( + resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * 1e3 + ) + return stats + + +@DeveloperAPI +@dataclass +class BlockMetadata: + """Metadata about the block.""" + + #: The number of rows contained in this block, or None. + num_rows: Optional[int] + #: The approximate size in bytes of this block, or None. + size_bytes: Optional[int] + #: The pyarrow schema or types of the block elements, or None. + schema: Optional[Union[type, "pyarrow.lib.Schema"]] + #: The list of file paths used to generate this block, or + #: the empty list if indeterminate. + input_files: Optional[List[str]] + #: Execution stats for this block. + exec_stats: Optional[BlockExecStats] + + def __post_init__(self): + if self.input_files is None: + self.input_files = [] + if self.size_bytes is not None: + # Require size_bytes to be int, ray.util.metrics objects + # will not take other types like numpy.int64 + assert isinstance(self.size_bytes, int) + + +@DeveloperAPI +class BlockAccessor: + """Provides accessor methods for a specific block. + + Ideally, we wouldn't need a separate accessor classes for blocks. However, + this is needed if we want to support storing ``pyarrow.Table`` directly + as a top-level Ray object, without a wrapping class (issue #17186). + """ + + def num_rows(self) -> int: + """Return the number of rows contained in this block.""" + raise NotImplementedError + + def iter_rows(self, public_row_format: bool) -> Iterator[T]: + """Iterate over the rows of this block. + + Args: + public_row_format: Whether to cast rows into the public Dict row + format (this incurs extra copy conversions). + """ + raise NotImplementedError + + def slice(self, start: int, end: int, copy: bool) -> Block: + """Return a slice of this block. + + Args: + start: The starting index of the slice. + end: The ending index of the slice. + copy: Whether to perform a data copy for the slice. + + Returns: + The sliced block result. + """ + raise NotImplementedError + + def take(self, indices: List[int]) -> Block: + """Return a new block containing the provided row indices. + + Args: + indices: The row indices to return. + + Returns: + A new block containing the provided row indices. + """ + raise NotImplementedError + + def select(self, columns: List[Optional[str]]) -> Block: + """Return a new block containing the provided columns.""" + raise NotImplementedError + + def random_shuffle(self, random_seed: Optional[int]) -> Block: + """Randomly shuffle this block.""" + raise NotImplementedError + + def to_pandas(self) -> "pandas.DataFrame": + """Convert this block into a Pandas dataframe.""" + raise NotImplementedError + + def to_numpy( + self, columns: Optional[Union[str, List[str]]] = None + ) -> Union[np.ndarray, Dict[str, np.ndarray]]: + """Convert this block (or columns of block) into a NumPy ndarray. + + Args: + columns: Name of columns to convert, or None if converting all columns. + """ + raise NotImplementedError + + def to_arrow(self) -> "pyarrow.Table": + """Convert this block into an Arrow table.""" + raise NotImplementedError + + def to_block(self) -> Block: + """Return the base block that this accessor wraps.""" + raise NotImplementedError + + def to_default(self) -> Block: + """Return the default data format for this accessor.""" + return self.to_block() + + def to_batch_format(self, batch_format: Optional[str]) -> DataBatch: + """Convert this block into the provided batch format. + + Args: + batch_format: The batch format to convert this block to. + + Returns: + This block formatted as the provided batch format. + """ + if batch_format is None: + return self.to_block() + elif batch_format == "default" or batch_format == "native": + return self.to_default() + elif batch_format == "pandas": + return self.to_pandas() + elif batch_format == "pyarrow": + return self.to_arrow() + elif batch_format == "numpy": + return self.to_numpy() + else: + raise ValueError( + f"The batch format must be one of {VALID_BATCH_FORMATS}, got: " + f"{batch_format}" + ) + + def size_bytes(self) -> int: + """Return the approximate size in bytes of this block.""" + raise NotImplementedError + + def schema(self) -> Union[type, "pyarrow.lib.Schema"]: + """Return the Python type or pyarrow schema of this block.""" + raise NotImplementedError + + def get_metadata( + self, + input_files: Optional[List[str]] = None, + exec_stats: Optional[BlockExecStats] = None, + ) -> BlockMetadata: + """Create a metadata object from this block.""" + return BlockMetadata( + num_rows=self.num_rows(), + size_bytes=self.size_bytes(), + schema=self.schema(), + input_files=input_files, + exec_stats=exec_stats, + ) + + def zip(self, other: "Block") -> "Block": + """Zip this block with another block of the same type and size.""" + raise NotImplementedError + + @staticmethod + def builder() -> "BlockBuilder": + """Create a builder for this block type.""" + raise NotImplementedError + + @classmethod + def batch_to_block( + cls, + batch: DataBatch, + block_type: Optional[BlockType] = None, + ) -> Block: + """Create a block from user-facing data formats.""" + + if isinstance(batch, np.ndarray): + raise ValueError( + f"Error validating {_truncated_repr(batch)}: " + "Standalone numpy arrays are not " + "allowed in Ray 2.5. Return a dict of field -> array, " + "e.g., `{'data': array}` instead of `array`." + ) + + elif isinstance(batch, collections.abc.Mapping): + if block_type is None or block_type == BlockType.ARROW: + try: + return cls.batch_to_arrow_block(batch) + except ArrowConversionError as e: + if log_once("_fallback_to_pandas_block_warning"): + logger.warning( + f"Failed to convert batch to Arrow due to: {e}; " + f"falling back to Pandas block" + ) + + if block_type is None: + return cls.batch_to_pandas_block(batch) + else: + raise e + else: + assert block_type == BlockType.PANDAS + return cls.batch_to_pandas_block(batch) + return batch + + @classmethod + def batch_to_arrow_block(cls, batch: Dict[str, Any]) -> Block: + """Create an Arrow block from user-facing data formats.""" + from ray.data._internal.arrow_block import ArrowBlockBuilder + + return ArrowBlockBuilder._table_from_pydict(batch) + + @classmethod + def batch_to_pandas_block(cls, batch: Dict[str, Any]) -> Block: + """Create a Pandas block from user-facing data formats.""" + from ray.data._internal.pandas_block import PandasBlockAccessor + + return PandasBlockAccessor.numpy_to_block(batch) + + @staticmethod + def for_block(block: Block) -> "BlockAccessor[T]": + """Create a block accessor for the given block.""" + _check_pyarrow_version() + import pandas + import pyarrow + + if isinstance(block, pyarrow.Table): + from ray.data._internal.arrow_block import ArrowBlockAccessor + + return ArrowBlockAccessor(block) + elif isinstance(block, pandas.DataFrame): + from ray.data._internal.pandas_block import PandasBlockAccessor + + return PandasBlockAccessor(block) + elif isinstance(block, bytes): + from ray.data._internal.arrow_block import ArrowBlockAccessor + + return ArrowBlockAccessor.from_bytes(block) + elif isinstance(block, list): + raise ValueError( + f"Error validating {_truncated_repr(block)}: " + "Standalone Python objects are not " + "allowed in Ray 2.5. To use Python objects in a dataset, " + "wrap them in a dict of numpy arrays, e.g., " + "return `{'item': batch}` instead of just `batch`." + ) + else: + raise TypeError("Not a block type: {} ({})".format(block, type(block))) + + def sample(self, n_samples: int, sort_key: "SortKey") -> "Block": + """Return a random sample of items from this block.""" + raise NotImplementedError + + def sort_and_partition( + self, boundaries: List[T], sort_key: "SortKey" + ) -> List["Block"]: + """Return a list of sorted partitions of this block.""" + raise NotImplementedError + + def combine(self, key: "SortKey", aggs: Tuple["AggregateFn"]) -> Block: + """Combine rows with the same key into an accumulator.""" + raise NotImplementedError + + @staticmethod + def merge_sorted_blocks( + blocks: List["Block"], sort_key: "SortKey" + ) -> Tuple[Block, BlockMetadata]: + """Return a sorted block by merging a list of sorted blocks.""" + raise NotImplementedError + + @staticmethod + def aggregate_combined_blocks( + blocks: List[Block], sort_key: "SortKey", aggs: Tuple["AggregateFn"] + ) -> Tuple[Block, BlockMetadata]: + """Aggregate partially combined and sorted blocks.""" + raise NotImplementedError + + def block_type(self) -> BlockType: + """Return the block type of this block.""" + raise NotImplementedError diff --git a/deepseek/lib/python3.10/site-packages/ray/data/grouped_data.py b/deepseek/lib/python3.10/site-packages/ray/data/grouped_data.py new file mode 100644 index 0000000000000000000000000000000000000000..8f7b7dde118ddbd54705b89c2589b9dd32f48dab --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/data/grouped_data.py @@ -0,0 +1,517 @@ +from typing import Any, Dict, Iterable, List, Optional, Tuple, Union + +from ray.data._internal.aggregate import Count, Max, Mean, Min, Std, Sum +from ray.data._internal.compute import ComputeStrategy +from ray.data._internal.logical.interfaces import LogicalPlan +from ray.data._internal.logical.operators.all_to_all_operator import Aggregate +from ray.data.aggregate import AggregateFn +from ray.data.block import BlockAccessor, CallableClass, UserDefinedFunction +from ray.data.dataset import DataBatch, Dataset +from ray.util.annotations import PublicAPI + +CDS_API_GROUP = "Computations or Descriptive Stats" +FA_API_GROUP = "Function Application" + + +class _MultiColumnSortedKey: + """Represents a tuple of group keys with a ``__lt__`` method + + This is a simple implementation to support multi-column groupby. + While a 1D array of tuples suffices to maintain the lexicographical + sorted order, a comparison method is also needed in ``np.searchsorted`` + (for computing the group key boundaries). + """ + + __slots__ = ("data",) + + def __init__(self, *args): + self.data = tuple(args) + + def __lt__(self, obj: "_MultiColumnSortedKey") -> bool: + return self.data < obj.data + + def __repr__(self) -> str: + """Print as T(1, 2)""" + return "T" + self.data.__repr__() + + +class GroupedData: + """Represents a grouped dataset created by calling ``Dataset.groupby()``. + + The actual groupby is deferred until an aggregation is applied. + """ + + def __init__( + self, + dataset: Dataset, + key: Union[str, List[str]], + ): + """Construct a dataset grouped by key (internal API). + + The constructor is not part of the GroupedData API. + Use the ``Dataset.groupby()`` method to construct one. + """ + self._dataset = dataset + self._key = key + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}(dataset={self._dataset}, " f"key={self._key!r})" + ) + + @PublicAPI(api_group=FA_API_GROUP) + def aggregate(self, *aggs: AggregateFn) -> Dataset: + """Implements an accumulator-based aggregation. + + Args: + aggs: Aggregations to do. + + Returns: + The output is an dataset of ``n + 1`` columns where the first column + is the groupby key and the second through ``n + 1`` columns are the + results of the aggregations. + If groupby key is ``None`` then the key part of return is omitted. + """ + + plan = self._dataset._plan.copy() + op = Aggregate( + self._dataset._logical_plan.dag, + key=self._key, + aggs=aggs, + ) + logical_plan = LogicalPlan(op, self._dataset.context) + return Dataset( + plan, + logical_plan, + ) + + def _aggregate_on( + self, + agg_cls: type, + on: Union[str, List[str]], + ignore_nulls: bool, + *args, + **kwargs, + ): + """Helper for aggregating on a particular subset of the dataset. + + This validates the `on` argument, and converts a list of column names + to a multi-aggregation. A null `on` results in a + multi-aggregation on all columns for an Arrow Dataset, and a single + aggregation on the entire row for a simple Dataset. + """ + aggs = self._dataset._build_multicolumn_aggs( + agg_cls, on, ignore_nulls, *args, skip_cols=self._key, **kwargs + ) + return self.aggregate(*aggs) + + @PublicAPI(api_group=FA_API_GROUP) + def map_groups( + self, + fn: UserDefinedFunction[DataBatch, DataBatch], + *, + compute: Union[str, ComputeStrategy] = None, + batch_format: Optional[str] = "default", + fn_args: Optional[Iterable[Any]] = None, + fn_kwargs: Optional[Dict[str, Any]] = None, + fn_constructor_args: Optional[Iterable[Any]] = None, + fn_constructor_kwargs: Optional[Dict[str, Any]] = None, + num_cpus: Optional[float] = None, + num_gpus: Optional[float] = None, + concurrency: Optional[Union[int, Tuple[int, int]]] = None, + **ray_remote_args, + ) -> "Dataset": + """Apply the given function to each group of records of this dataset. + + While map_groups() is very flexible, note that it comes with downsides: + * It may be slower than using more specific methods such as min(), max(). + * It requires that each group fits in memory on a single node. + + In general, prefer to use aggregate() instead of map_groups(). + + .. warning:: + Specifying both ``num_cpus`` and ``num_gpus`` for map tasks is experimental, + and may result in scheduling or stability issues. Please + `report any issues `_ + to the Ray team. + + Examples: + >>> # Return a single record per group (list of multiple records in, + >>> # list of a single record out). + >>> import ray + >>> import pandas as pd + >>> import numpy as np + >>> # Get first value per group. + >>> ds = ray.data.from_items([ # doctest: +SKIP + ... {"group": 1, "value": 1}, + ... {"group": 1, "value": 2}, + ... {"group": 2, "value": 3}, + ... {"group": 2, "value": 4}]) + >>> ds.groupby("group").map_groups( # doctest: +SKIP + ... lambda g: {"result": np.array([g["value"][0]])}) + + >>> # Return multiple records per group (dataframe in, dataframe out). + >>> df = pd.DataFrame( + ... {"A": ["a", "a", "b"], "B": [1, 1, 3], "C": [4, 6, 5]} + ... ) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> grouped = ds.groupby("A") # doctest: +SKIP + >>> grouped.map_groups( # doctest: +SKIP + ... lambda g: g.apply( + ... lambda c: c / g[c.name].sum() if c.name in ["B", "C"] else c + ... ) + ... ) # doctest: +SKIP + + Args: + fn: The function to apply to each group of records, or a class type + that can be instantiated to create such a callable. It takes as + input a batch of all records from a single group, and returns a + batch of zero or more records, similar to map_batches(). + compute: The compute strategy, either "tasks" (default) to use Ray + tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor + pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an + autoscaling actor pool. + batch_format: Specify ``"default"`` to use the default block format + (NumPy), ``"pandas"`` to select ``pandas.DataFrame``, "pyarrow" to + select ``pyarrow.Table``, or ``"numpy"`` to select + ``Dict[str, numpy.ndarray]``, or None to return the underlying block + exactly as is with no additional formatting. + fn_args: Arguments to `fn`. + fn_kwargs: Keyword arguments to `fn`. + fn_constructor_args: Positional arguments to pass to ``fn``'s constructor. + You can only provide this if ``fn`` is a callable class. These arguments + are top-level arguments in the underlying Ray actor construction task. + fn_constructor_kwargs: Keyword arguments to pass to ``fn``'s constructor. + This can only be provided if ``fn`` is a callable class. These arguments + are top-level arguments in the underlying Ray actor construction task. + num_cpus: The number of CPUs to reserve for each parallel map worker. + num_gpus: The number of GPUs to reserve for each parallel map worker. For + example, specify `num_gpus=1` to request 1 GPU for each parallel map + worker. + ray_remote_args: Additional resource requirements to request from + ray (e.g., num_gpus=1 to request GPUs for the map tasks). + + Returns: + The return type is determined by the return type of ``fn``, and the return + value is combined from results of all groups. + """ + # Globally sort records by key. + # Note that sort() will ensure that records of the same key partitioned + # into the same block. + if self._key is not None: + sorted_ds = self._dataset.sort(self._key) + else: + sorted_ds = self._dataset.repartition(1) + + def get_key_boundaries(block_accessor: BlockAccessor) -> List[int]: + """Compute block boundaries based on the key(s)""" + + import numpy as np + + # Get the keys of the batch in numpy array format + keys = block_accessor.to_numpy(self._key) + + if isinstance(keys, dict): + # For multiple keys, we generate a separate tuple column + convert_to_multi_column_sorted_key = np.vectorize(_MultiColumnSortedKey) + keys: np.ndarray = convert_to_multi_column_sorted_key(*keys.values()) + + boundaries = [] + start = 0 + while start < keys.size: + end = start + np.searchsorted(keys[start:], keys[start], side="right") + boundaries.append(end) + start = end + return boundaries + + # The batch is the entire block, because we have batch_size=None for + # map_batches() below. + def apply_udf_to_groups(udf, batch, *args, **kwargs): + block = BlockAccessor.batch_to_block(batch) + block_accessor = BlockAccessor.for_block(block) + if self._key: + boundaries = get_key_boundaries(block_accessor) + else: + boundaries = [block_accessor.num_rows()] + start = 0 + for end in boundaries: + group_block = block_accessor.slice(start, end) + group_block_accessor = BlockAccessor.for_block(group_block) + # Convert block of each group to batch format here, because the + # block format here can be different from batch format + # (e.g. block is Arrow format, and batch is NumPy format). + group_batch = group_block_accessor.to_batch_format(batch_format) + applied = udf(group_batch, *args, **kwargs) + yield applied + start = end + + if isinstance(fn, CallableClass): + + class wrapped_fn: + def __init__(self, *args, **kwargs): + self.fn = fn(*args, **kwargs) + + def __call__(self, batch, *args, **kwargs): + yield from apply_udf_to_groups(self.fn, batch, *args, **kwargs) + + else: + + def wrapped_fn(batch, *args, **kwargs): + yield from apply_udf_to_groups(fn, batch, *args, **kwargs) + + # Change the name of the wrapped function so that users see the name of their + # function rather than `wrapped_fn` in the progress bar. + wrapped_fn.__name__ = fn.__name__ + + # Note we set batch_size=None here, so it will use the entire block as a batch, + # which ensures that each group will be contained within a batch in entirety. + return sorted_ds._map_batches_without_batch_size_validation( + wrapped_fn, + batch_size=None, + compute=compute, + batch_format=batch_format, + zero_copy_batch=False, + fn_args=fn_args, + fn_kwargs=fn_kwargs, + fn_constructor_args=fn_constructor_args, + fn_constructor_kwargs=fn_constructor_kwargs, + num_cpus=num_cpus, + num_gpus=num_gpus, + concurrency=concurrency, + ray_remote_args_fn=None, + **ray_remote_args, + ) + + @PublicAPI(api_group=CDS_API_GROUP) + def count(self) -> Dataset: + """Compute count aggregation. + + Examples: + >>> import ray + >>> ray.data.from_items([ # doctest: +SKIP + ... {"A": x % 3, "B": x} for x in range(100)]).groupby( # doctest: +SKIP + ... "A").count() # doctest: +SKIP + + Returns: + A dataset of ``[k, v]`` columns where ``k`` is the groupby key and + ``v`` is the number of rows with that key. + If groupby key is ``None`` then the key part of return is omitted. + """ + return self.aggregate(Count()) + + @PublicAPI(api_group=CDS_API_GROUP) + def sum( + self, on: Union[str, List[str]] = None, ignore_nulls: bool = True + ) -> Dataset: + r"""Compute grouped sum aggregation. + + Examples: + >>> import ray + >>> ray.data.from_items([ # doctest: +SKIP + ... (i % 3, i, i**2) # doctest: +SKIP + ... for i in range(100)]) \ # doctest: +SKIP + ... .groupby(lambda x: x[0] % 3) \ # doctest: +SKIP + ... .sum(lambda x: x[2]) # doctest: +SKIP + >>> ray.data.range(100).groupby("id").sum() # doctest: +SKIP + >>> ray.data.from_items([ # doctest: +SKIP + ... {"A": i % 3, "B": i, "C": i**2} # doctest: +SKIP + ... for i in range(100)]) \ # doctest: +SKIP + ... .groupby("A") \ # doctest: +SKIP + ... .sum(["B", "C"]) # doctest: +SKIP + + Args: + on: a column name or a list of column names to aggregate. + ignore_nulls: Whether to ignore null values. If ``True``, null + values will be ignored when computing the sum; if ``False``, + if a null value is encountered, the output will be null. + We consider np.nan, None, and pd.NaT to be null values. + Default is ``True``. + + Returns: + The sum result. + + For different values of ``on``, the return varies: + + - ``on=None``: a dataset containing a groupby key column, + ``"k"``, and a column-wise sum column for each original column + in the dataset. + - ``on=["col_1", ..., "col_n"]``: a dataset of ``n + 1`` + columns where the first column is the groupby key and the second + through ``n + 1`` columns are the results of the aggregations. + + If groupby key is ``None`` then the key part of return is omitted. + """ + return self._aggregate_on(Sum, on, ignore_nulls) + + @PublicAPI(api_group=CDS_API_GROUP) + def min( + self, on: Union[str, List[str]] = None, ignore_nulls: bool = True + ) -> Dataset: + """Compute grouped min aggregation. + + Examples: + >>> import ray + >>> ray.data.le(100).groupby("value").min() # doctest: +SKIP + >>> ray.data.from_items([ # doctest: +SKIP + ... {"A": i % 3, "B": i, "C": i**2} # doctest: +SKIP + ... for i in range(100)]) \ # doctest: +SKIP + ... .groupby("A") \ # doctest: +SKIP + ... .min(["B", "C"]) # doctest: +SKIP + + Args: + on: a column name or a list of column names to aggregate. + ignore_nulls: Whether to ignore null values. If ``True``, null + values will be ignored when computing the min; if ``False``, + if a null value is encountered, the output will be null. + We consider np.nan, None, and pd.NaT to be null values. + Default is ``True``. + + Returns: + The min result. + + For different values of ``on``, the return varies: + + - ``on=None``: a dataset containing a groupby key column, + ``"k"``, and a column-wise min column for each original column in + the dataset. + - ``on=["col_1", ..., "col_n"]``: a dataset of ``n + 1`` + columns where the first column is the groupby key and the second + through ``n + 1`` columns are the results of the aggregations. + + If groupby key is ``None`` then the key part of return is omitted. + """ + return self._aggregate_on(Min, on, ignore_nulls) + + @PublicAPI(api_group=CDS_API_GROUP) + def max( + self, on: Union[str, List[str]] = None, ignore_nulls: bool = True + ) -> Dataset: + """Compute grouped max aggregation. + + Examples: + >>> import ray + >>> ray.data.le(100).groupby("value").max() # doctest: +SKIP + >>> ray.data.from_items([ # doctest: +SKIP + ... {"A": i % 3, "B": i, "C": i**2} # doctest: +SKIP + ... for i in range(100)]) \ # doctest: +SKIP + ... .groupby("A") \ # doctest: +SKIP + ... .max(["B", "C"]) # doctest: +SKIP + + Args: + on: a column name or a list of column names to aggregate. + ignore_nulls: Whether to ignore null values. If ``True``, null + values will be ignored when computing the max; if ``False``, + if a null value is encountered, the output will be null. + We consider np.nan, None, and pd.NaT to be null values. + Default is ``True``. + + Returns: + The max result. + + For different values of ``on``, the return varies: + + - ``on=None``: a dataset containing a groupby key column, + ``"k"``, and a column-wise max column for each original column in + the dataset. + - ``on=["col_1", ..., "col_n"]``: a dataset of ``n + 1`` + columns where the first column is the groupby key and the second + through ``n + 1`` columns are the results of the aggregations. + + If groupby key is ``None`` then the key part of return is omitted. + """ + return self._aggregate_on(Max, on, ignore_nulls) + + @PublicAPI(api_group=CDS_API_GROUP) + def mean( + self, on: Union[str, List[str]] = None, ignore_nulls: bool = True + ) -> Dataset: + """Compute grouped mean aggregation. + + Examples: + >>> import ray + >>> ray.data.le(100).groupby("value").mean() # doctest: +SKIP + >>> ray.data.from_items([ # doctest: +SKIP + ... {"A": i % 3, "B": i, "C": i**2} # doctest: +SKIP + ... for i in range(100)]) \ # doctest: +SKIP + ... .groupby("A") \ # doctest: +SKIP + ... .mean(["B", "C"]) # doctest: +SKIP + + Args: + on: a column name or a list of column names to aggregate. + ignore_nulls: Whether to ignore null values. If ``True``, null + values will be ignored when computing the mean; if ``False``, + if a null value is encountered, the output will be null. + We consider np.nan, None, and pd.NaT to be null values. + Default is ``True``. + + Returns: + The mean result. + + For different values of ``on``, the return varies: + + - ``on=None``: a dataset containing a groupby key column, + ``"k"``, and a column-wise mean column for each original column + in the dataset. + - ``on=["col_1", ..., "col_n"]``: a dataset of ``n + 1`` + columns where the first column is the groupby key and the second + through ``n + 1`` columns are the results of the aggregations. + + If groupby key is ``None`` then the key part of return is omitted. + """ + return self._aggregate_on(Mean, on, ignore_nulls) + + @PublicAPI(api_group=CDS_API_GROUP) + def std( + self, + on: Union[str, List[str]] = None, + ddof: int = 1, + ignore_nulls: bool = True, + ) -> Dataset: + """Compute grouped standard deviation aggregation. + + Examples: + >>> import ray + >>> ray.data.range(100).groupby("id").std(ddof=0) # doctest: +SKIP + >>> ray.data.from_items([ # doctest: +SKIP + ... {"A": i % 3, "B": i, "C": i**2} # doctest: +SKIP + ... for i in range(100)]) \ # doctest: +SKIP + ... .groupby("A") \ # doctest: +SKIP + ... .std(["B", "C"]) # doctest: +SKIP + + NOTE: This uses Welford's online method for an accumulator-style + computation of the standard deviation. This method was chosen due to + it's numerical stability, and it being computable in a single pass. + This may give different (but more accurate) results than NumPy, Pandas, + and sklearn, which use a less numerically stable two-pass algorithm. + See + https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm + + Args: + on: a column name or a list of column names to aggregate. + ddof: Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + ignore_nulls: Whether to ignore null values. If ``True``, null + values will be ignored when computing the std; if ``False``, + if a null value is encountered, the output will be null. + We consider np.nan, None, and pd.NaT to be null values. + Default is ``True``. + + Returns: + The standard deviation result. + + For different values of ``on``, the return varies: + + - ``on=None``: a dataset containing a groupby key column, + ``"k"``, and a column-wise std column for each original column in + the dataset. + - ``on=["col_1", ..., "col_n"]``: a dataset of ``n + 1`` + columns where the first column is the groupby key and the second + through ``n + 1`` columns are the results of the aggregations. + + If groupby key is ``None`` then the key part of return is omitted. + """ + return self._aggregate_on(Std, on, ignore_nulls, ddof=ddof) + + +# Backwards compatibility alias. +GroupedDataset = GroupedData diff --git a/deepseek/lib/python3.10/site-packages/ray/data/iterator.py b/deepseek/lib/python3.10/site-packages/ray/data/iterator.py new file mode 100644 index 0000000000000000000000000000000000000000..2f19111af80f79d7ae44fc1c7334ca3938b268b5 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/data/iterator.py @@ -0,0 +1,930 @@ +import abc +import time +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Tuple, + TypeVar, + Union, +) + +import numpy as np + +from ray.data._internal.block_batching.iter_batches import iter_batches +from ray.data._internal.execution.interfaces import RefBundle +from ray.data._internal.logical.operators.input_data_operator import InputData +from ray.data._internal.logical.optimizers import LogicalPlan +from ray.data._internal.plan import ExecutionPlan +from ray.data._internal.stats import DatasetStats, StatsManager +from ray.data.block import BlockAccessor, DataBatch, _apply_batch_format +from ray.util.annotations import PublicAPI + +if TYPE_CHECKING: + import tensorflow as tf + import torch + + from ray.data.dataset import ( + CollatedData, + MaterializedDataset, + Schema, + TensorFlowTensorBatchType, + TorchBatchType, + ) + + +T = TypeVar("T") + + +class _IterableFromIterator(Iterable[T]): + def __init__(self, iterator_gen: Callable[[], Iterator[T]]): + """Constructs an Iterable from an iterator generator. + + Args: + iterator_gen: A function that returns an iterator each time it + is called. For example, this can be a generator function. + """ + self.iterator_gen = iterator_gen + + def __iter__(self): + return self.iterator_gen() + + +@PublicAPI(stability="beta") +class DataIterator(abc.ABC): + """An iterator for reading records from a :class:`~Dataset`. + + For Datasets, each iteration call represents a complete read of all items in the + Dataset. + + If using Ray Train, each trainer actor should get its own iterator by calling + :meth:`ray.train.get_dataset_shard("train") + `. + + Examples: + >>> import ray + >>> ds = ray.data.range(5) + >>> ds + Dataset(num_rows=5, schema={id: int64}) + >>> ds.iterator() + DataIterator(Dataset(num_rows=5, schema={id: int64})) + """ + + @abc.abstractmethod + def _to_ref_bundle_iterator( + self, + ) -> Tuple[Iterator[RefBundle], Optional[DatasetStats], bool]: + """Returns the iterator to use for `iter_batches`. + + Returns: + A tuple. The first item of the tuple is an iterator over RefBundles. + The second item of the tuple is a DatasetStats object used for recording + stats during iteration. + The third item is a boolean indicating if the blocks can be safely cleared + after use. + """ + raise NotImplementedError + + @PublicAPI(stability="beta") + def iter_batches( + self, + *, + prefetch_batches: int = 1, + batch_size: int = 256, + batch_format: Optional[str] = "default", + drop_last: bool = False, + local_shuffle_buffer_size: Optional[int] = None, + local_shuffle_seed: Optional[int] = None, + _collate_fn: Optional[Callable[[DataBatch], "CollatedData"]] = None, + _finalize_fn: Optional[Callable[[Any], Any]] = None, + ) -> Iterable[DataBatch]: + """Return a batched iterable over the dataset. + + Examples: + >>> import ray + >>> for batch in ray.data.range( + ... 1000000 + ... ).iterator().iter_batches(): # doctest: +SKIP + ... print(batch) # doctest: +SKIP + + Time complexity: O(1) + + Args: + prefetch_batches: The number of batches to fetch ahead of the current batch + to fetch. If set to greater than 0, a separate threadpool will be used + to fetch the objects to the local node, format the batches, and apply + the collate_fn. Defaults to 1. + batch_size: The number of rows in each batch, or None to use entire blocks + as batches (blocks may contain different number of rows). + The final batch may include fewer than ``batch_size`` rows if + ``drop_last`` is ``False``. Defaults to 256. + batch_format: Specify ``"default"`` to use the default block format + (NumPy), ``"pandas"`` to select ``pandas.DataFrame``, "pyarrow" to + select ``pyarrow.Table``, or ``"numpy"`` to select + ``Dict[str, numpy.ndarray]``, or None to return the underlying block + exactly as is with no additional formatting. + drop_last: Whether to drop the last batch if it's incomplete. + local_shuffle_buffer_size: If non-None, the data will be randomly shuffled + using a local in-memory shuffle buffer, and this value will serve as the + minimum number of rows that must be in the local in-memory shuffle + buffer in order to yield a batch. When there are no more rows to add to + the buffer, the remaining rows in the buffer will be drained. + local_shuffle_seed: The seed to use for the local random shuffle. + + Returns: + An iterable over record batches. + """ + batch_format = _apply_batch_format(batch_format) + + def _create_iterator() -> Iterator[DataBatch]: + time_start = time.perf_counter() + # Iterate through the dataset from the start each time + # _iterator_gen is called. + # This allows multiple iterations of the dataset without + # needing to explicitly call `iter_batches()` multiple times. + ( + ref_bundles_iterator, + stats, + blocks_owned_by_consumer, + ) = self._to_ref_bundle_iterator() + + iterator = iter( + iter_batches( + ref_bundles_iterator, + stats=stats, + clear_block_after_read=blocks_owned_by_consumer, + batch_size=batch_size, + batch_format=batch_format, + drop_last=drop_last, + collate_fn=_collate_fn, + finalize_fn=_finalize_fn, + shuffle_buffer_min_size=local_shuffle_buffer_size, + shuffle_seed=local_shuffle_seed, + prefetch_batches=prefetch_batches, + ) + ) + + dataset_tag = self._get_dataset_tag() + + if stats: + stats.iter_initialize_s.add(time.perf_counter() - time_start) + + for batch in iterator: + yield batch + StatsManager.update_iteration_metrics(stats, dataset_tag) + StatsManager.clear_iteration_metrics(dataset_tag) + + if stats: + stats.iter_total_s.add(time.perf_counter() - time_start) + + return _IterableFromIterator(_create_iterator) + + def _get_dataset_tag(self) -> str: + return "unknown_dataset" + + def iter_rows(self) -> Iterable[Dict[str, Any]]: + """Return a local row iterable over the dataset. + + If the dataset is a tabular dataset (Arrow/Pandas blocks), dicts + are yielded for each row by the iterator. If the dataset is not tabular, + the raw row is yielded. + + Examples: + >>> import ray + >>> dataset = ray.data.range(10) + >>> next(iter(dataset.iterator().iter_rows())) + {'id': 0} + + Time complexity: O(1) + + Returns: + An iterable over rows of the dataset. + """ + batch_iterable = self.iter_batches( + batch_size=None, batch_format=None, prefetch_batches=1 + ) + + def _wrapped_iterator(): + for batch in batch_iterable: + batch = BlockAccessor.for_block(BlockAccessor.batch_to_block(batch)) + for row in batch.iter_rows(public_row_format=True): + yield row + + return _IterableFromIterator(_wrapped_iterator) + + @abc.abstractmethod + @PublicAPI(stability="beta") + def stats(self) -> str: + """Returns a string containing execution timing information.""" + raise NotImplementedError + + @abc.abstractmethod + def schema(self) -> "Schema": + """Return the schema of the dataset iterated over.""" + raise NotImplementedError + + @PublicAPI(stability="beta") + def iter_torch_batches( + self, + *, + prefetch_batches: int = 1, + batch_size: Optional[int] = 256, + dtypes: Optional[Union["torch.dtype", Dict[str, "torch.dtype"]]] = None, + device: str = "auto", + collate_fn: Optional[Callable[[Dict[str, np.ndarray]], "CollatedData"]] = None, + drop_last: bool = False, + local_shuffle_buffer_size: Optional[int] = None, + local_shuffle_seed: Optional[int] = None, + ) -> Iterable["TorchBatchType"]: + """Return a batched iterable of Torch Tensors over the dataset. + + This iterable yields a dictionary of column-tensors. If you are looking for + more flexibility in the tensor conversion (e.g. casting dtypes) or the batch + format, try using :meth:`~ray.data.DataIterator.iter_batches` directly. + + Examples: + >>> import ray + >>> for batch in ray.data.range( + ... 12, + ... ).iterator().iter_torch_batches(batch_size=4): + ... print(batch) + {'id': tensor([0, 1, 2, 3])} + {'id': tensor([4, 5, 6, 7])} + {'id': tensor([ 8, 9, 10, 11])} + + Use the ``collate_fn`` to customize how the tensor batch is created. + + >>> from typing import Any, Dict + >>> import torch + >>> import numpy as np + >>> import ray + >>> def collate_fn(batch: Dict[str, np.ndarray]) -> Any: + ... return torch.stack( + ... [torch.as_tensor(array) for array in batch.values()], + ... axis=1 + ... ) + >>> iterator = ray.data.from_items([ + ... {"col_1": 1, "col_2": 2}, + ... {"col_1": 3, "col_2": 4}]).iterator() + >>> for batch in iterator.iter_torch_batches(collate_fn=collate_fn): + ... print(batch) + tensor([[1, 2], + [3, 4]]) + + Time complexity: O(1) + + Args: + prefetch_batches: The number of batches to fetch ahead of the current batch + to fetch. If set to greater than 0, a separate threadpool will be used + to fetch the objects to the local node, format the batches, and apply + the collate_fn. Defaults to 1. + batch_size: The number of rows in each batch, or None to use entire blocks + as batches (blocks may contain different number of rows). + The final batch may include fewer than ``batch_size`` rows if + ``drop_last`` is ``False``. Defaults to 256. + dtypes: The Torch dtype(s) for the created tensor(s); if None, the dtype + will be inferred from the tensor data. You can't use this parameter + with ``collate_fn``. + device: The device on which the tensor should be placed. Defaults to + "auto" which moves the tensors to the appropriate device when the + Dataset is passed to Ray Train and ``collate_fn`` is not provided. + Otherwise, defaults to CPU. You can't use this parameter with + ``collate_fn``. + collate_fn: A function to convert a Numpy batch to a PyTorch tensor batch. + When this parameter is specified, the user should manually handle the + host to device data transfer outside of ``collate_fn``. + This is useful for further processing the data after it has been + batched. Potential use cases include collating along a dimension other + than the first, padding sequences of various lengths, or generally + handling batches of different length tensors. If not provided, the + default collate function is used which simply converts the batch of + numpy arrays to a batch of PyTorch tensors. This API is still + experimental and is subject to change. You can't use this parameter in + conjunction with ``dtypes`` or ``device``. + drop_last: Whether to drop the last batch if it's incomplete. + local_shuffle_buffer_size: If non-None, the data will be randomly shuffled + using a local in-memory shuffle buffer, and this value will serve as the + minimum number of rows that must be in the local in-memory shuffle + buffer in order to yield a batch. When there are no more rows to add to + the buffer, the remaining rows in the buffer will be drained. This + buffer size must be greater than or equal to ``batch_size``, and + therefore ``batch_size`` must also be specified when using local + shuffling. + local_shuffle_seed: The seed to use for the local random shuffle. + + Returns: + An iterable over Torch Tensor batches. + """ + + from ray.air._internal.torch_utils import ( + convert_ndarray_batch_to_torch_tensor_batch, + ) + from ray.train.torch import get_device + + if collate_fn is not None and (dtypes is not None or device != "auto"): + raise ValueError( + "collate_fn cannot be used with dtypes and device." + "You should manually move the output Torch tensors to the" + "desired dtype and device outside of collate_fn." + ) + + if device == "auto": + # Use the appropriate device for Ray Train, or falls back to CPU if + # Ray Train is not being used. + device = get_device() + + if collate_fn is None: + # The default collate_fn handles formatting and Tensor creation. + # Here, we set device=None to defer host to device data transfer + # to the subsequent finalize_fn. + def collate_fn(batch: Union[np.ndarray, Dict[str, np.ndarray]]): + return convert_ndarray_batch_to_torch_tensor_batch( + batch, + dtypes=dtypes, + device=None, + ) + + # The default finalize_fn handles the host to device data transfer. + # This is executed in a 1-thread pool separately from collate_fn + # to allow independent parallelism of these steps. + def finalize_fn(batch: Union["torch.Tensor", Dict[str, "torch.Tensor"]]): + if device is not None: + if isinstance(batch, dict): + for k, t in batch.items(): + batch[k] = t.to(device=device) + else: + batch = batch.to(device=device) + return batch + + else: + finalize_fn = None + + return self.iter_batches( + prefetch_batches=prefetch_batches, + batch_size=batch_size, + drop_last=drop_last, + local_shuffle_buffer_size=local_shuffle_buffer_size, + local_shuffle_seed=local_shuffle_seed, + _collate_fn=collate_fn, + _finalize_fn=finalize_fn, + ) + + def iter_tf_batches( + self, + *, + prefetch_batches: int = 1, + batch_size: Optional[int] = 256, + dtypes: Optional[Union["tf.dtypes.DType", Dict[str, "tf.dtypes.DType"]]] = None, + drop_last: bool = False, + local_shuffle_buffer_size: Optional[int] = None, + local_shuffle_seed: Optional[int] = None, + ) -> Iterable["TensorFlowTensorBatchType"]: + """Return a batched iterable of TensorFlow Tensors over the dataset. + + This iterable will yield single-tensor batches of the underlying dataset + consists of a single column; otherwise, it will yield a dictionary of + column-tensors. + + .. tip:: + If you don't need the additional flexibility provided by this method, + consider using :meth:`~ray.data.Dataset.to_tf` instead. It's easier + to use. + + Examples: + >>> import ray + >>> for batch in ray.data.range( # doctest: +SKIP + ... 12, + ... ).iter_tf_batches(batch_size=4): + ... print(batch.shape) # doctest: +SKIP + (4, 1) + (4, 1) + (4, 1) + + Time complexity: O(1) + + Args: + prefetch_batches: The number of batches to fetch ahead of the current batch + to fetch. If set to greater than 0, a separate threadpool will be used + to fetch the objects to the local node, format the batches, and apply + the collate_fn. Defaults to 1. + batch_size: The number of rows in each batch, or None to use entire blocks + as batches (blocks may contain different number of rows). + The final batch may include fewer than ``batch_size`` rows if + ``drop_last`` is ``False``. Defaults to 256. + dtypes: The TensorFlow dtype(s) for the created tensor(s); if None, the + dtype will be inferred from the tensor data. + drop_last: Whether to drop the last batch if it's incomplete. + local_shuffle_buffer_size: If non-None, the data will be randomly shuffled + using a local in-memory shuffle buffer, and this value will serve as the + minimum number of rows that must be in the local in-memory shuffle + buffer in order to yield a batch. When there are no more rows to add to + the buffer, the remaining rows in the buffer will be drained. This + buffer size must be greater than or equal to ``batch_size``, and + therefore ``batch_size`` must also be specified when using local + shuffling. + local_shuffle_seed: The seed to use for the local random shuffle. + + Returns: + An iterator over TensorFlow Tensor batches. + """ + from ray.air._internal.tensorflow_utils import ( + convert_ndarray_batch_to_tf_tensor_batch, + ) + + batch_iterable = self.iter_batches( + prefetch_batches=prefetch_batches, + batch_size=batch_size, + drop_last=drop_last, + local_shuffle_buffer_size=local_shuffle_buffer_size, + local_shuffle_seed=local_shuffle_seed, + ) + mapped_iterable = map( + lambda batch: convert_ndarray_batch_to_tf_tensor_batch( + batch, dtypes=dtypes + ), + batch_iterable, + ) + + return mapped_iterable + + def to_torch( + self, + *, + label_column: Optional[str] = None, + feature_columns: Optional[ + Union[List[str], List[List[str]], Dict[str, List[str]]] + ] = None, + label_column_dtype: Optional["torch.dtype"] = None, + feature_column_dtypes: Optional[ + Union["torch.dtype", List["torch.dtype"], Dict[str, "torch.dtype"]] + ] = None, + batch_size: int = 1, + prefetch_batches: int = 1, + drop_last: bool = False, + local_shuffle_buffer_size: Optional[int] = None, + local_shuffle_seed: Optional[int] = None, + unsqueeze_label_tensor: bool = True, + unsqueeze_feature_tensors: bool = True, + ) -> "torch.utils.data.IterableDataset": + """Return a Torch IterableDataset over this dataset. + + This is only supported for datasets convertible to Arrow records. + + It is recommended to use the returned ``IterableDataset`` directly + instead of passing it into a torch ``DataLoader``. + + Each element in IterableDataset will be a tuple consisting of 2 + elements. The first item contains the feature tensor(s), and the + second item is the label tensor. Those can take on different + forms, depending on the specified arguments. + + For the features tensor (N is the ``batch_size`` and n, m, k + are the number of features per tensor): + + * If ``feature_columns`` is a ``List[str]``, the features will be + a tensor of shape (N, n), with columns corresponding to + ``feature_columns`` + + * If ``feature_columns`` is a ``List[List[str]]``, the features will be + a list of tensors of shape [(N, m),...,(N, k)], with columns of each + tensor corresponding to the elements of ``feature_columns`` + + * If ``feature_columns`` is a ``Dict[str, List[str]]``, the features + will be a dict of key-tensor pairs of shape + {key1: (N, m),..., keyN: (N, k)}, with columns of each + tensor corresponding to the value of ``feature_columns`` under the + key. + + If ``unsqueeze_label_tensor=True`` (default), the label tensor will be + of shape (N, 1). Otherwise, it will be of shape (N,). + If ``label_column`` is specified as ``None``, then no column from the + ``Dataset`` will be treated as the label, and the output label tensor + will be ``None``. + + Note that you probably want to call ``.split()`` on this dataset if + there are to be multiple Torch workers consuming the data. + + Time complexity: O(1) + + Args: + label_column: The name of the column used as the + label (second element of the output list). Can be None for + prediction, in which case the second element of returned + tuple will also be None. + feature_columns: The names of the columns + to use as the features. Can be a list of lists or + a dict of string-list pairs for multi-tensor output. + If None, then use all columns except the label column as + the features. + label_column_dtype: The torch dtype to + use for the label column. If None, then automatically infer + the dtype. + feature_column_dtypes: The dtypes to use for the feature + tensors. This should match the format of ``feature_columns``, + or be a single dtype, in which case it will be applied to + all tensors. If None, then automatically infer the dtype. + batch_size: How many samples per batch to yield at a time. + Defaults to 1. + prefetch_batches: The number of batches to fetch ahead of the current batch + to fetch. If set to greater than 0, a separate threadpool will be used + to fetch the objects to the local node, format the batches, and apply + the collate_fn. Defaults to 1. + drop_last: Set to True to drop the last incomplete batch, + if the dataset size is not divisible by the batch size. If + False and the size of dataset is not divisible by the batch + size, then the last batch will be smaller. Defaults to False. + local_shuffle_buffer_size: If non-None, the data will be randomly shuffled + using a local in-memory shuffle buffer, and this value will serve as the + minimum number of rows that must be in the local in-memory shuffle + buffer in order to yield a batch. When there are no more rows to add to + the buffer, the remaining rows in the buffer will be drained. This + buffer size must be greater than or equal to ``batch_size``, and + therefore ``batch_size`` must also be specified when using local + shuffling. + local_shuffle_seed: The seed to use for the local random shuffle. + unsqueeze_label_tensor: If set to True, the label tensor + will be unsqueezed (reshaped to (N, 1)). Otherwise, it will + be left as is, that is (N, ). In general, regression loss + functions expect an unsqueezed tensor, while classification + loss functions expect a squeezed one. Defaults to True. + unsqueeze_feature_tensors: If set to True, the features tensors + will be unsqueezed (reshaped to (N, 1)) before being concatenated into + the final features tensor. Otherwise, they will be left as is, that is + (N, ). Defaults to True. + + Returns: + A torch IterableDataset. + """ + import torch + + from ray.air._internal.torch_utils import convert_pandas_to_torch_tensor + from ray.data._internal.torch_iterable_dataset import TorchIterableDataset + + # If an empty collection is passed in, treat it the same as None + if not feature_columns: + feature_columns = None + + if feature_column_dtypes and not isinstance(feature_column_dtypes, torch.dtype): + if isinstance(feature_columns, dict): + if not isinstance(feature_column_dtypes, dict): + raise TypeError( + "If `feature_columns` is a dict, " + "`feature_column_dtypes` must be None, `torch.dtype`," + f" or dict, got {type(feature_column_dtypes)}." + ) + if set(feature_columns) != set(feature_column_dtypes): + raise ValueError( + "`feature_columns` and `feature_column_dtypes` " + "must have the same keys." + ) + if any(not subcolumns for subcolumns in feature_columns.values()): + raise ValueError("column list may not be empty") + elif isinstance(feature_columns[0], (list, tuple)): + if not isinstance(feature_column_dtypes, (list, tuple)): + raise TypeError( + "If `feature_columns` is a list of lists, " + "`feature_column_dtypes` must be None, `torch.dtype`," + f" or a sequence, got {type(feature_column_dtypes)}." + ) + if len(feature_columns) != len(feature_column_dtypes): + raise ValueError( + "`feature_columns` and `feature_column_dtypes` " + "must have the same length." + ) + if any(not subcolumns for subcolumns in feature_columns): + raise ValueError("column list may not be empty") + + def make_generator(): + for batch in self.iter_batches( + batch_size=batch_size, + batch_format="pandas", + prefetch_batches=prefetch_batches, + drop_last=drop_last, + local_shuffle_buffer_size=local_shuffle_buffer_size, + local_shuffle_seed=local_shuffle_seed, + ): + if label_column: + label_tensor = convert_pandas_to_torch_tensor( + batch, + [label_column], + label_column_dtype, + unsqueeze=unsqueeze_label_tensor, + ) + batch.pop(label_column) + else: + label_tensor = None + + if isinstance(feature_columns, dict): + features_tensor = { + key: convert_pandas_to_torch_tensor( + batch, + feature_columns[key], + ( + feature_column_dtypes[key] + if isinstance(feature_column_dtypes, dict) + else feature_column_dtypes + ), + unsqueeze=unsqueeze_feature_tensors, + ) + for key in feature_columns + } + else: + features_tensor = convert_pandas_to_torch_tensor( + batch, + columns=feature_columns, + column_dtypes=feature_column_dtypes, + unsqueeze=unsqueeze_feature_tensors, + ) + + yield (features_tensor, label_tensor) + + return TorchIterableDataset(make_generator) + + @PublicAPI(stability="beta") + def to_tf( + self, + feature_columns: Union[str, List[str]], + label_columns: Union[str, List[str]], + *, + additional_columns: Union[Optional[str], Optional[List[str]]] = None, + prefetch_batches: int = 1, + batch_size: int = 1, + drop_last: bool = False, + local_shuffle_buffer_size: Optional[int] = None, + local_shuffle_seed: Optional[int] = None, + feature_type_spec: Union["tf.TypeSpec", Dict[str, "tf.TypeSpec"]] = None, + label_type_spec: Union["tf.TypeSpec", Dict[str, "tf.TypeSpec"]] = None, + additional_type_spec: Union[ + Optional["tf.TypeSpec"], Optional[Dict[str, "tf.TypeSpec"]] + ] = None, + ) -> "tf.data.Dataset": + """Return a TF Dataset over this dataset. + + .. warning:: + If your dataset contains ragged tensors, this method errors. To prevent + errors, :ref:`resize your tensors `. + + Examples: + >>> import ray + >>> ds = ray.data.read_csv( + ... "s3://anonymous@air-example-data/iris.csv" + ... ) + >>> it = ds.iterator(); it + DataIterator(Dataset( + num_rows=?, + schema={ + sepal length (cm): double, + sepal width (cm): double, + petal length (cm): double, + petal width (cm): double, + target: int64 + } + )) + + If your model accepts a single tensor as input, specify a single feature column. + + >>> it.to_tf(feature_columns="sepal length (cm)", label_columns="target") + <_OptionsDataset element_spec=(TensorSpec(shape=(None,), dtype=tf.float64, name='sepal length (cm)'), TensorSpec(shape=(None,), dtype=tf.int64, name='target'))> + + If your model accepts a dictionary as input, specify a list of feature columns. + + >>> it.to_tf(["sepal length (cm)", "sepal width (cm)"], "target") + <_OptionsDataset element_spec=({'sepal length (cm)': TensorSpec(shape=(None,), dtype=tf.float64, name='sepal length (cm)'), 'sepal width (cm)': TensorSpec(shape=(None,), dtype=tf.float64, name='sepal width (cm)')}, TensorSpec(shape=(None,), dtype=tf.int64, name='target'))> + + If your dataset contains multiple features but your model accepts a single + tensor as input, combine features with + :class:`~ray.data.preprocessors.Concatenator`. + + >>> from ray.data.preprocessors import Concatenator + >>> columns_to_concat = ["sepal length (cm)", "sepal width (cm)", "petal length (cm)", "petal width (cm)"] + >>> preprocessor = Concatenator(columns=columns_to_concat, output_column_name="features") + >>> it = preprocessor.transform(ds).iterator() + >>> it + DataIterator(Concatenator + +- Dataset( + num_rows=?, + schema={ + sepal length (cm): double, + sepal width (cm): double, + petal length (cm): double, + petal width (cm): double, + target: int64 + } + )) + >>> it.to_tf("features", "target") + <_OptionsDataset element_spec=(TensorSpec(shape=(None, 4), dtype=tf.float64, name='features'), TensorSpec(shape=(None,), dtype=tf.int64, name='target'))> + + If your model accepts different types, shapes, or names of tensors as input, specify the type spec. + If type specs are not specified, they are automatically inferred from the schema of the iterator. + + >>> import tensorflow as tf + >>> it.to_tf( + ... feature_columns="features", + ... label_columns="target", + ... feature_type_spec=tf.TensorSpec(shape=(None, 4), dtype=tf.float32, name="features"), + ... label_type_spec=tf.TensorSpec(shape=(None,), dtype=tf.float32, name="label") + ... ) + <_OptionsDataset element_spec=(TensorSpec(shape=(None, 4), dtype=tf.float32, name='features'), TensorSpec(shape=(None,), dtype=tf.float32, name='label'))> + + If your model accepts additional metadata aside from features and label, specify a single additional column or a list of additional columns. + A common use case is to include sample weights in the data samples and train a ``tf.keras.Model`` with ``tf.keras.Model.fit``. + + >>> import pandas as pd + >>> ds = ds.add_column("sample weights", lambda df: pd.Series([1] * len(df))) + >>> it = ds.iterator() + >>> it.to_tf(feature_columns="sepal length (cm)", label_columns="target", additional_columns="sample weights") + <_OptionsDataset element_spec=(TensorSpec(shape=(None,), dtype=tf.float64, name='sepal length (cm)'), TensorSpec(shape=(None,), dtype=tf.int64, name='target'), TensorSpec(shape=(None,), dtype=tf.int64, name='sample weights'))> + + If your model accepts different types, shapes, or names for the additional metadata, specify the type spec of the additional column. + + >>> it.to_tf( + ... feature_columns="sepal length (cm)", + ... label_columns="target", + ... additional_columns="sample weights", + ... additional_type_spec=tf.TensorSpec(shape=(None,), dtype=tf.float32, name="weight") + ... ) + <_OptionsDataset element_spec=(TensorSpec(shape=(None,), dtype=tf.float64, name='sepal length (cm)'), TensorSpec(shape=(None,), dtype=tf.int64, name='target'), TensorSpec(shape=(None,), dtype=tf.float32, name='weight'))> + + Args: + feature_columns: Columns that correspond to model inputs. If this is a + string, the input data is a tensor. If this is a list, the input data + is a ``dict`` that maps column names to their tensor representation. + label_columns: Columns that correspond to model targets. If this is a + string, the target data is a tensor. If this is a list, the target data + is a ``dict`` that maps column names to their tensor representation. + additional_columns: Columns that correspond to sample weights or other metadata. + If this is a string, the weight data is a tensor. If this is a list, the + weight data is a ``dict`` that maps column names to their tensor representation. + prefetch_batches: The number of batches to fetch ahead of the current batch + to fetch. If set to greater than 0, a separate threadpool will be used + to fetch the objects to the local node, format the batches, and apply + the collate_fn. Defaults to 1. + batch_size: Record batch size. Defaults to 1. + drop_last: Set to True to drop the last incomplete batch, + if the dataset size is not divisible by the batch size. If + False and the size of dataset is not divisible by the batch + size, then the last batch will be smaller. Defaults to False. + local_shuffle_buffer_size: If non-None, the data will be randomly shuffled + using a local in-memory shuffle buffer, and this value will serve as the + minimum number of rows that must be in the local in-memory shuffle + buffer in order to yield a batch. When there are no more rows to add to + the buffer, the remaining rows in the buffer will be drained. This + buffer size must be greater than or equal to ``batch_size``, and + therefore ``batch_size`` must also be specified when using local + shuffling. + local_shuffle_seed: The seed to use for the local random shuffle. + feature_type_spec: The `tf.TypeSpec` of `feature_columns`. If there is + only one column, specify a `tf.TypeSpec`. If there are multiple columns, + specify a ``dict`` that maps column names to their `tf.TypeSpec`. + Default is `None` to automatically infer the type of each column. + label_type_spec: The `tf.TypeSpec` of `label_columns`. If there is + only one column, specify a `tf.TypeSpec`. If there are multiple columns, + specify a ``dict`` that maps column names to their `tf.TypeSpec`. + Default is `None` to automatically infer the type of each column. + additional_type_spec: The `tf.TypeSpec` of `additional_columns`. If there + is only one column, specify a `tf.TypeSpec`. If there are multiple + columns, specify a ``dict`` that maps column names to their `tf.TypeSpec`. + Default is `None` to automatically infer the type of each column. + + Returns: + A ``tf.data.Dataset`` that yields inputs and targets. + """ # noqa: E501 + + from ray.air._internal.tensorflow_utils import ( + convert_ndarray_to_tf_tensor, + get_type_spec, + ) + + try: + import tensorflow as tf + except ImportError: + raise ValueError("tensorflow must be installed!") + + def validate_column(column: str) -> None: + if column not in valid_columns: + raise ValueError( + f"You specified '{column}' in `feature_columns`, " + f"`label_columns`, or `additional_columns`, but there's no " + f"column named '{column}' in the dataset. " + f"Valid column names are: {valid_columns}." + ) + + def validate_columns(columns: Union[str, List]) -> None: + if isinstance(columns, list): + for column in columns: + validate_column(column) + else: + validate_column(columns) + + def convert_batch_to_tensors( + batch: Dict[str, np.ndarray], + *, + columns: Union[str, List[str]], + type_spec: Union[tf.TypeSpec, Dict[str, tf.TypeSpec]], + ) -> Union[tf.Tensor, Dict[str, tf.Tensor]]: + if isinstance(columns, str): + return convert_ndarray_to_tf_tensor(batch[columns], type_spec=type_spec) + return { + column: convert_ndarray_to_tf_tensor( + batch[column], type_spec=type_spec[column] + ) + for column in columns + } + + def generator(): + for batch in self.iter_batches( + prefetch_batches=prefetch_batches, + batch_size=batch_size, + drop_last=drop_last, + local_shuffle_buffer_size=local_shuffle_buffer_size, + local_shuffle_seed=local_shuffle_seed, + ): + assert isinstance(batch, dict) + features = convert_batch_to_tensors( + batch, columns=feature_columns, type_spec=feature_type_spec + ) + labels = convert_batch_to_tensors( + batch, columns=label_columns, type_spec=label_type_spec + ) + + if additional_columns is None: + yield features, labels + else: + additional_metadata = convert_batch_to_tensors( + batch, + columns=additional_columns, + type_spec=additional_type_spec, + ) + yield features, labels, additional_metadata + + if feature_type_spec is None or label_type_spec is None: + schema = self.schema() + valid_columns = set(schema.names) + validate_columns(feature_columns) + validate_columns(label_columns) + feature_type_spec = get_type_spec(schema, columns=feature_columns) + label_type_spec = get_type_spec(schema, columns=label_columns) + + if additional_columns is not None and additional_type_spec is None: + schema = self.schema() + valid_columns = set(schema.names) + validate_columns(additional_columns) + additional_type_spec = get_type_spec(schema, columns=additional_columns) + + if additional_columns is not None: + dataset = tf.data.Dataset.from_generator( + generator, + output_signature=( + feature_type_spec, + label_type_spec, + additional_type_spec, + ), + ) + else: + dataset = tf.data.Dataset.from_generator( + generator, output_signature=(feature_type_spec, label_type_spec) + ) + + options = tf.data.Options() + options.experimental_distribute.auto_shard_policy = ( + tf.data.experimental.AutoShardPolicy.OFF + ) + return dataset.with_options(options) + + @PublicAPI(stability="beta") + def materialize(self) -> "MaterializedDataset": + """Execute and materialize this data iterator into object store memory. + + .. note:: + This method triggers the execution and materializes all blocks + of the iterator, returning its contents as a + :class:`~ray.data.dataset.MaterializedDataset` for further processing. + """ + + from ray.data.dataset import MaterializedDataset + + ref_bundles_iter, stats, _ = self._to_ref_bundle_iterator() + + ref_bundles = list(ref_bundles_iter) + execution_plan = ExecutionPlan(stats) + logical_plan = LogicalPlan( + InputData(input_data=ref_bundles), + execution_plan._context, + ) + return MaterializedDataset( + execution_plan, + logical_plan, + ) + + def __del__(self): + # Clear metrics on deletion in case the iterator was not fully consumed. + StatsManager.clear_iteration_metrics(self._get_dataset_tag()) + + +# Backwards compatibility alias. +DatasetIterator = DataIterator diff --git a/deepseek/lib/python3.10/site-packages/ray/data/random_access_dataset.py b/deepseek/lib/python3.10/site-packages/ray/data/random_access_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..a24c6796f7ca6b3dcbb63b97a722882e7b0d4687 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/data/random_access_dataset.py @@ -0,0 +1,293 @@ +import bisect +import logging +import random +import time +from collections import defaultdict +from typing import TYPE_CHECKING, Any, List, Optional + +import numpy as np + +import ray +from ray.data._internal.execution.interfaces.ref_bundle import ( + _ref_bundles_iterator_to_block_refs_list, +) +from ray.data._internal.remote_fn import cached_remote_fn +from ray.data.block import BlockAccessor +from ray.data.context import DataContext +from ray.types import ObjectRef +from ray.util.annotations import PublicAPI + +try: + import pyarrow as pa +except ImportError: + pa = None + +if TYPE_CHECKING: + from ray.data import Dataset + +logger = logging.getLogger(__name__) + + +@PublicAPI(stability="alpha") +class RandomAccessDataset: + """A class that provides distributed, random access to a Dataset. + + See: ``Dataset.to_random_access_dataset()``. + """ + + def __init__( + self, + ds: "Dataset", + key: str, + num_workers: int, + ): + """Construct a RandomAccessDataset (internal API). + + The constructor is a private API. Use ``ds.to_random_access_dataset()`` + to construct a RandomAccessDataset. + """ + schema = ds.schema(fetch_if_missing=True) + if schema is None or isinstance(schema, type): + raise ValueError("RandomAccessDataset only supports Arrow-format blocks.") + + start = time.perf_counter() + logger.info("[setup] Indexing dataset by sort key.") + sorted_ds = ds.sort(key) + get_bounds = cached_remote_fn(_get_bounds) + bundles = sorted_ds.iter_internal_ref_bundles() + blocks = _ref_bundles_iterator_to_block_refs_list(bundles) + + logger.info("[setup] Computing block range bounds.") + bounds = ray.get([get_bounds.remote(b, key) for b in blocks]) + self._non_empty_blocks = [] + self._lower_bound = None + self._upper_bounds = [] + for i, b in enumerate(bounds): + if b: + self._non_empty_blocks.append(blocks[i]) + if self._lower_bound is None: + self._lower_bound = b[0] + self._upper_bounds.append(b[1]) + + logger.info("[setup] Creating {} random access workers.".format(num_workers)) + ctx = DataContext.get_current() + scheduling_strategy = ctx.scheduling_strategy + self._workers = [ + _RandomAccessWorker.options(scheduling_strategy=scheduling_strategy).remote( + key + ) + for _ in range(num_workers) + ] + ( + self._block_to_workers_map, + self._worker_to_blocks_map, + ) = self._compute_block_to_worker_assignments() + + logger.info( + "[setup] Worker to blocks assignment: {}".format(self._worker_to_blocks_map) + ) + ray.get( + [ + w.assign_blocks.remote( + { + i: self._non_empty_blocks[i] + for i in self._worker_to_blocks_map[w] + } + ) + for w in self._workers + ] + ) + + logger.info("[setup] Finished assigning blocks to workers.") + self._build_time = time.perf_counter() - start + + def _compute_block_to_worker_assignments(self): + # Return values. + block_to_workers: dict[int, List["ray.ActorHandle"]] = defaultdict(list) + worker_to_blocks: dict["ray.ActorHandle", List[int]] = defaultdict(list) + + # Aux data structures. + loc_to_workers: dict[str, List["ray.ActorHandle"]] = defaultdict(list) + locs = ray.get([w.ping.remote() for w in self._workers]) + for i, loc in enumerate(locs): + loc_to_workers[loc].append(self._workers[i]) + block_locs = ray.experimental.get_object_locations(self._non_empty_blocks) + + # First, try to assign all blocks to all workers at its location. + for block_idx, block in enumerate(self._non_empty_blocks): + block_info = block_locs[block] + locs = block_info.get("node_ids", []) + for loc in locs: + for worker in loc_to_workers[loc]: + block_to_workers[block_idx].append(worker) + worker_to_blocks[worker].append(block_idx) + + # Randomly assign any leftover blocks to at least one worker. + # TODO: the load balancing here could be improved. + for block_idx, block in enumerate(self._non_empty_blocks): + if len(block_to_workers[block_idx]) == 0: + worker = random.choice(self._workers) + block_to_workers[block_idx].append(worker) + worker_to_blocks[worker].append(block_idx) + + return block_to_workers, worker_to_blocks + + def get_async(self, key: Any) -> ObjectRef[Any]: + """Asynchronously finds the record for a single key. + + Args: + key: The key of the record to find. + + Returns: + ObjectRef containing the record (in pydict form), or None if not found. + """ + block_index = self._find_le(key) + if block_index is None: + return ray.put(None) + return self._worker_for(block_index).get.remote(block_index, key) + + def multiget(self, keys: List[Any]) -> List[Optional[Any]]: + """Synchronously find the records for a list of keys. + + Args: + keys: List of keys to find the records for. + + Returns: + List of found records (in pydict form), or None for missing records. + """ + batches = defaultdict(list) + for k in keys: + batches[self._find_le(k)].append(k) + futures = {} + for index, keybatch in batches.items(): + if index is None: + continue + fut = self._worker_for(index).multiget.remote( + [index] * len(keybatch), keybatch + ) + futures[index] = fut + results = {} + for i, fut in futures.items(): + keybatch = batches[i] + values = ray.get(fut) + for k, v in zip(keybatch, values): + results[k] = v + return [results.get(k) for k in keys] + + def stats(self) -> str: + """Returns a string containing access timing information.""" + stats = ray.get([w.stats.remote() for w in self._workers]) + total_time = sum(s["total_time"] for s in stats) + accesses = [s["num_accesses"] for s in stats] + blocks = [s["num_blocks"] for s in stats] + msg = "RandomAccessDataset:\n" + msg += "- Build time: {}s\n".format(round(self._build_time, 2)) + msg += "- Num workers: {}\n".format(len(stats)) + msg += "- Blocks per worker: {} min, {} max, {} mean\n".format( + min(blocks), max(blocks), int(sum(blocks) / len(blocks)) + ) + msg += "- Accesses per worker: {} min, {} max, {} mean\n".format( + min(accesses), max(accesses), int(sum(accesses) / len(accesses)) + ) + msg += "- Mean access time: {}us\n".format( + int(total_time / (1 + sum(accesses)) * 1e6) + ) + return msg + + def _worker_for(self, block_index: int): + return random.choice(self._block_to_workers_map[block_index]) + + def _find_le(self, x: Any) -> int: + i = bisect.bisect_left(self._upper_bounds, x) + if i >= len(self._upper_bounds) or x < self._lower_bound: + return None + return i + + +@ray.remote(num_cpus=0) +class _RandomAccessWorker: + def __init__(self, key_field): + self.blocks = None + self.key_field = key_field + self.num_accesses = 0 + self.total_time = 0 + + def assign_blocks(self, block_ref_dict): + self.blocks = {k: ray.get(ref) for k, ref in block_ref_dict.items()} + + def get(self, block_index, key): + start = time.perf_counter() + result = self._get(block_index, key) + self.total_time += time.perf_counter() - start + self.num_accesses += 1 + return result + + def multiget(self, block_indices, keys): + start = time.perf_counter() + block = self.blocks[block_indices[0]] + if len(set(block_indices)) == 1 and isinstance( + self.blocks[block_indices[0]], pa.Table + ): + # Fast path: use np.searchsorted for vectorized search on a single block. + # This is ~3x faster than the naive case. + block = self.blocks[block_indices[0]] + col = block[self.key_field] + indices = np.searchsorted(col, keys) + acc = BlockAccessor.for_block(block) + result = [acc._get_row(i) for i in indices] + # assert result == [self._get(i, k) for i, k in zip(block_indices, keys)] + else: + result = [self._get(i, k) for i, k in zip(block_indices, keys)] + self.total_time += time.perf_counter() - start + self.num_accesses += 1 + return result + + def ping(self): + return ray.get_runtime_context().get_node_id() + + def stats(self) -> dict: + return { + "num_blocks": len(self.blocks), + "num_accesses": self.num_accesses, + "total_time": self.total_time, + } + + def _get(self, block_index, key): + if block_index is None: + return None + block = self.blocks[block_index] + column = block[self.key_field] + if isinstance(block, pa.Table): + column = _ArrowListWrapper(column) + i = _binary_search_find(column, key) + if i is None: + return None + acc = BlockAccessor.for_block(block) + return acc._get_row(i) + + +def _binary_search_find(column, x): + i = bisect.bisect_left(column, x) + if i != len(column) and column[i] == x: + return i + return None + + +class _ArrowListWrapper: + def __init__(self, arrow_col): + self.arrow_col = arrow_col + + def __getitem__(self, i): + return self.arrow_col[i].as_py() + + def __len__(self): + return len(self.arrow_col) + + +def _get_bounds(block, key): + if len(block) == 0: + return None + b = (block[key][0], block[key][len(block) - 1]) + if isinstance(block, pa.Table): + b = (b[0].as_py(), b[1].as_py()) + return b diff --git a/evalkit_tf433/lib/python3.10/site-packages/networkx/readwrite/adjlist.py b/evalkit_tf433/lib/python3.10/site-packages/networkx/readwrite/adjlist.py new file mode 100644 index 0000000000000000000000000000000000000000..768af5ad73c148dc6300e66ecc4c440af6e231e8 --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/networkx/readwrite/adjlist.py @@ -0,0 +1,310 @@ +""" +************** +Adjacency List +************** +Read and write NetworkX graphs as adjacency lists. + +Adjacency list format is useful for graphs without data associated +with nodes or edges and for nodes that can be meaningfully represented +as strings. + +Format +------ +The adjacency list format consists of lines with node labels. The +first label in a line is the source node. Further labels in the line +are considered target nodes and are added to the graph along with an edge +between the source node and target node. + +The graph with edges a-b, a-c, d-e can be represented as the following +adjacency list (anything following the # in a line is a comment):: + + a b c # source target target + d e +""" + +__all__ = ["generate_adjlist", "write_adjlist", "parse_adjlist", "read_adjlist"] + +import networkx as nx +from networkx.utils import open_file + + +def generate_adjlist(G, delimiter=" "): + """Generate a single line of the graph G in adjacency list format. + + Parameters + ---------- + G : NetworkX graph + + delimiter : string, optional + Separator for node labels + + Returns + ------- + lines : string + Lines of data in adjlist format. + + Examples + -------- + >>> G = nx.lollipop_graph(4, 3) + >>> for line in nx.generate_adjlist(G): + ... print(line) + 0 1 2 3 + 1 2 3 + 2 3 + 3 4 + 4 5 + 5 6 + 6 + + See Also + -------- + write_adjlist, read_adjlist + + Notes + ----- + The default `delimiter=" "` will result in unexpected results if node names contain + whitespace characters. To avoid this problem, specify an alternate delimiter when spaces are + valid in node names. + + NB: This option is not available for data that isn't user-generated. + + """ + directed = G.is_directed() + seen = set() + for s, nbrs in G.adjacency(): + line = str(s) + delimiter + for t, data in nbrs.items(): + if not directed and t in seen: + continue + if G.is_multigraph(): + for d in data.values(): + line += str(t) + delimiter + else: + line += str(t) + delimiter + if not directed: + seen.add(s) + yield line[: -len(delimiter)] + + +@open_file(1, mode="wb") +def write_adjlist(G, path, comments="#", delimiter=" ", encoding="utf-8"): + """Write graph G in single-line adjacency-list format to path. + + + Parameters + ---------- + G : NetworkX graph + + path : string or file + Filename or file handle for data output. + Filenames ending in .gz or .bz2 will be compressed. + + comments : string, optional + Marker for comment lines + + delimiter : string, optional + Separator for node labels + + encoding : string, optional + Text encoding. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_adjlist(G, "test.adjlist") + + The path can be a filehandle or a string with the name of the file. If a + filehandle is provided, it has to be opened in 'wb' mode. + + >>> fh = open("test.adjlist", "wb") + >>> nx.write_adjlist(G, fh) + + Notes + ----- + The default `delimiter=" "` will result in unexpected results if node names contain + whitespace characters. To avoid this problem, specify an alternate delimiter when spaces are + valid in node names. + NB: This option is not available for data that isn't user-generated. + + This format does not store graph, node, or edge data. + + See Also + -------- + read_adjlist, generate_adjlist + """ + import sys + import time + + pargs = comments + " ".join(sys.argv) + "\n" + header = ( + pargs + + comments + + f" GMT {time.asctime(time.gmtime())}\n" + + comments + + f" {G.name}\n" + ) + path.write(header.encode(encoding)) + + for line in generate_adjlist(G, delimiter): + line += "\n" + path.write(line.encode(encoding)) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def parse_adjlist( + lines, comments="#", delimiter=None, create_using=None, nodetype=None +): + """Parse lines of a graph adjacency list representation. + + Parameters + ---------- + lines : list or iterator of strings + Input data in adjlist format + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + nodetype : Python type, optional + Convert nodes to this type. + + comments : string, optional + Marker for comment lines + + delimiter : string, optional + Separator for node labels. The default is whitespace. + + Returns + ------- + G: NetworkX graph + The graph corresponding to the lines in adjacency list format. + + Examples + -------- + >>> lines = ["1 2 5", "2 3 4", "3 5", "4", "5"] + >>> G = nx.parse_adjlist(lines, nodetype=int) + >>> nodes = [1, 2, 3, 4, 5] + >>> all(node in G for node in nodes) + True + >>> edges = [(1, 2), (1, 5), (2, 3), (2, 4), (3, 5)] + >>> all((u, v) in G.edges() or (v, u) in G.edges() for (u, v) in edges) + True + + See Also + -------- + read_adjlist + + """ + G = nx.empty_graph(0, create_using) + for line in lines: + p = line.find(comments) + if p >= 0: + line = line[:p] + if not len(line): + continue + vlist = line.rstrip("\n").split(delimiter) + u = vlist.pop(0) + # convert types + if nodetype is not None: + try: + u = nodetype(u) + except BaseException as err: + raise TypeError( + f"Failed to convert node ({u}) to type {nodetype}" + ) from err + G.add_node(u) + if nodetype is not None: + try: + vlist = list(map(nodetype, vlist)) + except BaseException as err: + raise TypeError( + f"Failed to convert nodes ({','.join(vlist)}) to type {nodetype}" + ) from err + G.add_edges_from([(u, v) for v in vlist]) + return G + + +@open_file(0, mode="rb") +@nx._dispatchable(graphs=None, returns_graph=True) +def read_adjlist( + path, + comments="#", + delimiter=None, + create_using=None, + nodetype=None, + encoding="utf-8", +): + """Read graph in adjacency list format from path. + + Parameters + ---------- + path : string or file + Filename or file handle to read. + Filenames ending in .gz or .bz2 will be uncompressed. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + nodetype : Python type, optional + Convert nodes to this type. + + comments : string, optional + Marker for comment lines + + delimiter : string, optional + Separator for node labels. The default is whitespace. + + Returns + ------- + G: NetworkX graph + The graph corresponding to the lines in adjacency list format. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_adjlist(G, "test.adjlist") + >>> G = nx.read_adjlist("test.adjlist") + + The path can be a filehandle or a string with the name of the file. If a + filehandle is provided, it has to be opened in 'rb' mode. + + >>> fh = open("test.adjlist", "rb") + >>> G = nx.read_adjlist(fh) + + Filenames ending in .gz or .bz2 will be compressed. + + >>> nx.write_adjlist(G, "test.adjlist.gz") + >>> G = nx.read_adjlist("test.adjlist.gz") + + The optional nodetype is a function to convert node strings to nodetype. + + For example + + >>> G = nx.read_adjlist("test.adjlist", nodetype=int) + + will attempt to convert all nodes to integer type. + + Since nodes must be hashable, the function nodetype must return hashable + types (e.g. int, float, str, frozenset - or tuples of those, etc.) + + The optional create_using parameter indicates the type of NetworkX graph + created. The default is `nx.Graph`, an undirected graph. + To read the data as a directed graph use + + >>> G = nx.read_adjlist("test.adjlist", create_using=nx.DiGraph) + + Notes + ----- + This format does not store graph or node data. + + See Also + -------- + write_adjlist + """ + lines = (line.decode(encoding) for line in path) + return parse_adjlist( + lines, + comments=comments, + delimiter=delimiter, + create_using=create_using, + nodetype=nodetype, + ) diff --git a/evalkit_tf433/lib/python3.10/site-packages/networkx/readwrite/graphml.py b/evalkit_tf433/lib/python3.10/site-packages/networkx/readwrite/graphml.py new file mode 100644 index 0000000000000000000000000000000000000000..7d0a1da0dcf488cda2c0e018dfb48f2ff947ac80 --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/networkx/readwrite/graphml.py @@ -0,0 +1,1053 @@ +""" +******* +GraphML +******* +Read and write graphs in GraphML format. + +.. warning:: + + This parser uses the standard xml library present in Python, which is + insecure - see :external+python:mod:`xml` for additional information. + Only parse GraphML files you trust. + +This implementation does not support mixed graphs (directed and unidirected +edges together), hyperedges, nested graphs, or ports. + +"GraphML is a comprehensive and easy-to-use file format for graphs. It +consists of a language core to describe the structural properties of a +graph and a flexible extension mechanism to add application-specific +data. Its main features include support of + + * directed, undirected, and mixed graphs, + * hypergraphs, + * hierarchical graphs, + * graphical representations, + * references to external data, + * application-specific attribute data, and + * light-weight parsers. + +Unlike many other file formats for graphs, GraphML does not use a +custom syntax. Instead, it is based on XML and hence ideally suited as +a common denominator for all kinds of services generating, archiving, +or processing graphs." + +http://graphml.graphdrawing.org/ + +Format +------ +GraphML is an XML format. See +http://graphml.graphdrawing.org/specification.html for the specification and +http://graphml.graphdrawing.org/primer/graphml-primer.html +for examples. +""" + +import warnings +from collections import defaultdict + +import networkx as nx +from networkx.utils import open_file + +__all__ = [ + "write_graphml", + "read_graphml", + "generate_graphml", + "write_graphml_xml", + "write_graphml_lxml", + "parse_graphml", + "GraphMLWriter", + "GraphMLReader", +] + + +@open_file(1, mode="wb") +def write_graphml_xml( + G, + path, + encoding="utf-8", + prettyprint=True, + infer_numeric_types=False, + named_key_ids=False, + edge_id_from_attribute=None, +): + """Write G in GraphML XML format to path + + Parameters + ---------- + G : graph + A networkx graph + path : file or string + File or filename to write. + Filenames ending in .gz or .bz2 will be compressed. + encoding : string (optional) + Encoding for text data. + prettyprint : bool (optional) + If True use line breaks and indenting in output XML. + infer_numeric_types : boolean + Determine if numeric types should be generalized. + For example, if edges have both int and float 'weight' attributes, + we infer in GraphML that both are floats. + named_key_ids : bool (optional) + If True use attr.name as value for key elements' id attribute. + edge_id_from_attribute : dict key (optional) + If provided, the graphml edge id is set by looking up the corresponding + edge data attribute keyed by this parameter. If `None` or the key does not exist in edge data, + the edge id is set by the edge key if `G` is a MultiGraph, else the edge id is left unset. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_graphml(G, "test.graphml") + + Notes + ----- + This implementation does not support mixed graphs (directed + and unidirected edges together) hyperedges, nested graphs, or ports. + """ + writer = GraphMLWriter( + encoding=encoding, + prettyprint=prettyprint, + infer_numeric_types=infer_numeric_types, + named_key_ids=named_key_ids, + edge_id_from_attribute=edge_id_from_attribute, + ) + writer.add_graph_element(G) + writer.dump(path) + + +@open_file(1, mode="wb") +def write_graphml_lxml( + G, + path, + encoding="utf-8", + prettyprint=True, + infer_numeric_types=False, + named_key_ids=False, + edge_id_from_attribute=None, +): + """Write G in GraphML XML format to path + + This function uses the LXML framework and should be faster than + the version using the xml library. + + Parameters + ---------- + G : graph + A networkx graph + path : file or string + File or filename to write. + Filenames ending in .gz or .bz2 will be compressed. + encoding : string (optional) + Encoding for text data. + prettyprint : bool (optional) + If True use line breaks and indenting in output XML. + infer_numeric_types : boolean + Determine if numeric types should be generalized. + For example, if edges have both int and float 'weight' attributes, + we infer in GraphML that both are floats. + named_key_ids : bool (optional) + If True use attr.name as value for key elements' id attribute. + edge_id_from_attribute : dict key (optional) + If provided, the graphml edge id is set by looking up the corresponding + edge data attribute keyed by this parameter. If `None` or the key does not exist in edge data, + the edge id is set by the edge key if `G` is a MultiGraph, else the edge id is left unset. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_graphml_lxml(G, "fourpath.graphml") + + Notes + ----- + This implementation does not support mixed graphs (directed + and unidirected edges together) hyperedges, nested graphs, or ports. + """ + try: + import lxml.etree as lxmletree + except ImportError: + return write_graphml_xml( + G, + path, + encoding, + prettyprint, + infer_numeric_types, + named_key_ids, + edge_id_from_attribute, + ) + + writer = GraphMLWriterLxml( + path, + graph=G, + encoding=encoding, + prettyprint=prettyprint, + infer_numeric_types=infer_numeric_types, + named_key_ids=named_key_ids, + edge_id_from_attribute=edge_id_from_attribute, + ) + writer.dump() + + +def generate_graphml( + G, + encoding="utf-8", + prettyprint=True, + named_key_ids=False, + edge_id_from_attribute=None, +): + """Generate GraphML lines for G + + Parameters + ---------- + G : graph + A networkx graph + encoding : string (optional) + Encoding for text data. + prettyprint : bool (optional) + If True use line breaks and indenting in output XML. + named_key_ids : bool (optional) + If True use attr.name as value for key elements' id attribute. + edge_id_from_attribute : dict key (optional) + If provided, the graphml edge id is set by looking up the corresponding + edge data attribute keyed by this parameter. If `None` or the key does not exist in edge data, + the edge id is set by the edge key if `G` is a MultiGraph, else the edge id is left unset. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> linefeed = chr(10) # linefeed = \n + >>> s = linefeed.join(nx.generate_graphml(G)) + >>> for line in nx.generate_graphml(G): # doctest: +SKIP + ... print(line) + + Notes + ----- + This implementation does not support mixed graphs (directed and unidirected + edges together) hyperedges, nested graphs, or ports. + """ + writer = GraphMLWriter( + encoding=encoding, + prettyprint=prettyprint, + named_key_ids=named_key_ids, + edge_id_from_attribute=edge_id_from_attribute, + ) + writer.add_graph_element(G) + yield from str(writer).splitlines() + + +@open_file(0, mode="rb") +@nx._dispatchable(graphs=None, returns_graph=True) +def read_graphml(path, node_type=str, edge_key_type=int, force_multigraph=False): + """Read graph in GraphML format from path. + + Parameters + ---------- + path : file or string + File or filename to write. + Filenames ending in .gz or .bz2 will be compressed. + + node_type: Python type (default: str) + Convert node ids to this type + + edge_key_type: Python type (default: int) + Convert graphml edge ids to this type. Multigraphs use id as edge key. + Non-multigraphs add to edge attribute dict with name "id". + + force_multigraph : bool (default: False) + If True, return a multigraph with edge keys. If False (the default) + return a multigraph when multiedges are in the graph. + + Returns + ------- + graph: NetworkX graph + If parallel edges are present or `force_multigraph=True` then + a MultiGraph or MultiDiGraph is returned. Otherwise a Graph/DiGraph. + The returned graph is directed if the file indicates it should be. + + Notes + ----- + Default node and edge attributes are not propagated to each node and edge. + They can be obtained from `G.graph` and applied to node and edge attributes + if desired using something like this: + + >>> default_color = G.graph["node_default"]["color"] # doctest: +SKIP + >>> for node, data in G.nodes(data=True): # doctest: +SKIP + ... if "color" not in data: + ... data["color"] = default_color + >>> default_color = G.graph["edge_default"]["color"] # doctest: +SKIP + >>> for u, v, data in G.edges(data=True): # doctest: +SKIP + ... if "color" not in data: + ... data["color"] = default_color + + This implementation does not support mixed graphs (directed and unidirected + edges together), hypergraphs, nested graphs, or ports. + + For multigraphs the GraphML edge "id" will be used as the edge + key. If not specified then they "key" attribute will be used. If + there is no "key" attribute a default NetworkX multigraph edge key + will be provided. + + Files with the yEd "yfiles" extension can be read. The type of the node's + shape is preserved in the `shape_type` node attribute. + + yEd compressed files ("file.graphmlz" extension) can be read by renaming + the file to "file.graphml.gz". + + """ + reader = GraphMLReader(node_type, edge_key_type, force_multigraph) + # need to check for multiple graphs + glist = list(reader(path=path)) + if len(glist) == 0: + # If no graph comes back, try looking for an incomplete header + header = b'' + path.seek(0) + old_bytes = path.read() + new_bytes = old_bytes.replace(b"", header) + glist = list(reader(string=new_bytes)) + if len(glist) == 0: + raise nx.NetworkXError("file not successfully read as graphml") + return glist[0] + + +@nx._dispatchable(graphs=None, returns_graph=True) +def parse_graphml( + graphml_string, node_type=str, edge_key_type=int, force_multigraph=False +): + """Read graph in GraphML format from string. + + Parameters + ---------- + graphml_string : string + String containing graphml information + (e.g., contents of a graphml file). + + node_type: Python type (default: str) + Convert node ids to this type + + edge_key_type: Python type (default: int) + Convert graphml edge ids to this type. Multigraphs use id as edge key. + Non-multigraphs add to edge attribute dict with name "id". + + force_multigraph : bool (default: False) + If True, return a multigraph with edge keys. If False (the default) + return a multigraph when multiedges are in the graph. + + + Returns + ------- + graph: NetworkX graph + If no parallel edges are found a Graph or DiGraph is returned. + Otherwise a MultiGraph or MultiDiGraph is returned. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> linefeed = chr(10) # linefeed = \n + >>> s = linefeed.join(nx.generate_graphml(G)) + >>> H = nx.parse_graphml(s) + + Notes + ----- + Default node and edge attributes are not propagated to each node and edge. + They can be obtained from `G.graph` and applied to node and edge attributes + if desired using something like this: + + >>> default_color = G.graph["node_default"]["color"] # doctest: +SKIP + >>> for node, data in G.nodes(data=True): # doctest: +SKIP + ... if "color" not in data: + ... data["color"] = default_color + >>> default_color = G.graph["edge_default"]["color"] # doctest: +SKIP + >>> for u, v, data in G.edges(data=True): # doctest: +SKIP + ... if "color" not in data: + ... data["color"] = default_color + + This implementation does not support mixed graphs (directed and unidirected + edges together), hypergraphs, nested graphs, or ports. + + For multigraphs the GraphML edge "id" will be used as the edge + key. If not specified then they "key" attribute will be used. If + there is no "key" attribute a default NetworkX multigraph edge key + will be provided. + + """ + reader = GraphMLReader(node_type, edge_key_type, force_multigraph) + # need to check for multiple graphs + glist = list(reader(string=graphml_string)) + if len(glist) == 0: + # If no graph comes back, try looking for an incomplete header + header = '' + new_string = graphml_string.replace("", header) + glist = list(reader(string=new_string)) + if len(glist) == 0: + raise nx.NetworkXError("file not successfully read as graphml") + return glist[0] + + +class GraphML: + NS_GRAPHML = "http://graphml.graphdrawing.org/xmlns" + NS_XSI = "http://www.w3.org/2001/XMLSchema-instance" + # xmlns:y="http://www.yworks.com/xml/graphml" + NS_Y = "http://www.yworks.com/xml/graphml" + SCHEMALOCATION = " ".join( + [ + "http://graphml.graphdrawing.org/xmlns", + "http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd", + ] + ) + + def construct_types(self): + types = [ + (int, "integer"), # for Gephi GraphML bug + (str, "yfiles"), + (str, "string"), + (int, "int"), + (int, "long"), + (float, "float"), + (float, "double"), + (bool, "boolean"), + ] + + # These additions to types allow writing numpy types + try: + import numpy as np + except: + pass + else: + # prepend so that python types are created upon read (last entry wins) + types = [ + (np.float64, "float"), + (np.float32, "float"), + (np.float16, "float"), + (np.int_, "int"), + (np.int8, "int"), + (np.int16, "int"), + (np.int32, "int"), + (np.int64, "int"), + (np.uint8, "int"), + (np.uint16, "int"), + (np.uint32, "int"), + (np.uint64, "int"), + (np.int_, "int"), + (np.intc, "int"), + (np.intp, "int"), + ] + types + + self.xml_type = dict(types) + self.python_type = dict(reversed(a) for a in types) + + # This page says that data types in GraphML follow Java(TM). + # http://graphml.graphdrawing.org/primer/graphml-primer.html#AttributesDefinition + # true and false are the only boolean literals: + # http://en.wikibooks.org/wiki/Java_Programming/Literals#Boolean_Literals + convert_bool = { + # We use data.lower() in actual use. + "true": True, + "false": False, + # Include integer strings for convenience. + "0": False, + 0: False, + "1": True, + 1: True, + } + + def get_xml_type(self, key): + """Wrapper around the xml_type dict that raises a more informative + exception message when a user attempts to use data of a type not + supported by GraphML.""" + try: + return self.xml_type[key] + except KeyError as err: + raise TypeError( + f"GraphML does not support type {key} as data values." + ) from err + + +class GraphMLWriter(GraphML): + def __init__( + self, + graph=None, + encoding="utf-8", + prettyprint=True, + infer_numeric_types=False, + named_key_ids=False, + edge_id_from_attribute=None, + ): + self.construct_types() + from xml.etree.ElementTree import Element + + self.myElement = Element + + self.infer_numeric_types = infer_numeric_types + self.prettyprint = prettyprint + self.named_key_ids = named_key_ids + self.edge_id_from_attribute = edge_id_from_attribute + self.encoding = encoding + self.xml = self.myElement( + "graphml", + { + "xmlns": self.NS_GRAPHML, + "xmlns:xsi": self.NS_XSI, + "xsi:schemaLocation": self.SCHEMALOCATION, + }, + ) + self.keys = {} + self.attributes = defaultdict(list) + self.attribute_types = defaultdict(set) + + if graph is not None: + self.add_graph_element(graph) + + def __str__(self): + from xml.etree.ElementTree import tostring + + if self.prettyprint: + self.indent(self.xml) + s = tostring(self.xml).decode(self.encoding) + return s + + def attr_type(self, name, scope, value): + """Infer the attribute type of data named name. Currently this only + supports inference of numeric types. + + If self.infer_numeric_types is false, type is used. Otherwise, pick the + most general of types found across all values with name and scope. This + means edges with data named 'weight' are treated separately from nodes + with data named 'weight'. + """ + if self.infer_numeric_types: + types = self.attribute_types[(name, scope)] + + if len(types) > 1: + types = {self.get_xml_type(t) for t in types} + if "string" in types: + return str + elif "float" in types or "double" in types: + return float + else: + return int + else: + return list(types)[0] + else: + return type(value) + + def get_key(self, name, attr_type, scope, default): + keys_key = (name, attr_type, scope) + try: + return self.keys[keys_key] + except KeyError: + if self.named_key_ids: + new_id = name + else: + new_id = f"d{len(list(self.keys))}" + + self.keys[keys_key] = new_id + key_kwargs = { + "id": new_id, + "for": scope, + "attr.name": name, + "attr.type": attr_type, + } + key_element = self.myElement("key", **key_kwargs) + # add subelement for data default value if present + if default is not None: + default_element = self.myElement("default") + default_element.text = str(default) + key_element.append(default_element) + self.xml.insert(0, key_element) + return new_id + + def add_data(self, name, element_type, value, scope="all", default=None): + """ + Make a data element for an edge or a node. Keep a log of the + type in the keys table. + """ + if element_type not in self.xml_type: + raise nx.NetworkXError( + f"GraphML writer does not support {element_type} as data values." + ) + keyid = self.get_key(name, self.get_xml_type(element_type), scope, default) + data_element = self.myElement("data", key=keyid) + data_element.text = str(value) + return data_element + + def add_attributes(self, scope, xml_obj, data, default): + """Appends attribute data to edges or nodes, and stores type information + to be added later. See add_graph_element. + """ + for k, v in data.items(): + self.attribute_types[(str(k), scope)].add(type(v)) + self.attributes[xml_obj].append([k, v, scope, default.get(k)]) + + def add_nodes(self, G, graph_element): + default = G.graph.get("node_default", {}) + for node, data in G.nodes(data=True): + node_element = self.myElement("node", id=str(node)) + self.add_attributes("node", node_element, data, default) + graph_element.append(node_element) + + def add_edges(self, G, graph_element): + if G.is_multigraph(): + for u, v, key, data in G.edges(data=True, keys=True): + edge_element = self.myElement( + "edge", + source=str(u), + target=str(v), + id=str(data.get(self.edge_id_from_attribute)) + if self.edge_id_from_attribute + and self.edge_id_from_attribute in data + else str(key), + ) + default = G.graph.get("edge_default", {}) + self.add_attributes("edge", edge_element, data, default) + graph_element.append(edge_element) + else: + for u, v, data in G.edges(data=True): + if self.edge_id_from_attribute and self.edge_id_from_attribute in data: + # select attribute to be edge id + edge_element = self.myElement( + "edge", + source=str(u), + target=str(v), + id=str(data.get(self.edge_id_from_attribute)), + ) + else: + # default: no edge id + edge_element = self.myElement("edge", source=str(u), target=str(v)) + default = G.graph.get("edge_default", {}) + self.add_attributes("edge", edge_element, data, default) + graph_element.append(edge_element) + + def add_graph_element(self, G): + """ + Serialize graph G in GraphML to the stream. + """ + if G.is_directed(): + default_edge_type = "directed" + else: + default_edge_type = "undirected" + + graphid = G.graph.pop("id", None) + if graphid is None: + graph_element = self.myElement("graph", edgedefault=default_edge_type) + else: + graph_element = self.myElement( + "graph", edgedefault=default_edge_type, id=graphid + ) + default = {} + data = { + k: v + for (k, v) in G.graph.items() + if k not in ["node_default", "edge_default"] + } + self.add_attributes("graph", graph_element, data, default) + self.add_nodes(G, graph_element) + self.add_edges(G, graph_element) + + # self.attributes contains a mapping from XML Objects to a list of + # data that needs to be added to them. + # We postpone processing in order to do type inference/generalization. + # See self.attr_type + for xml_obj, data in self.attributes.items(): + for k, v, scope, default in data: + xml_obj.append( + self.add_data( + str(k), self.attr_type(k, scope, v), str(v), scope, default + ) + ) + self.xml.append(graph_element) + + def add_graphs(self, graph_list): + """Add many graphs to this GraphML document.""" + for G in graph_list: + self.add_graph_element(G) + + def dump(self, stream): + from xml.etree.ElementTree import ElementTree + + if self.prettyprint: + self.indent(self.xml) + document = ElementTree(self.xml) + document.write(stream, encoding=self.encoding, xml_declaration=True) + + def indent(self, elem, level=0): + # in-place prettyprint formatter + i = "\n" + level * " " + if len(elem): + if not elem.text or not elem.text.strip(): + elem.text = i + " " + if not elem.tail or not elem.tail.strip(): + elem.tail = i + for elem in elem: + self.indent(elem, level + 1) + if not elem.tail or not elem.tail.strip(): + elem.tail = i + else: + if level and (not elem.tail or not elem.tail.strip()): + elem.tail = i + + +class IncrementalElement: + """Wrapper for _IncrementalWriter providing an Element like interface. + + This wrapper does not intend to be a complete implementation but rather to + deal with those calls used in GraphMLWriter. + """ + + def __init__(self, xml, prettyprint): + self.xml = xml + self.prettyprint = prettyprint + + def append(self, element): + self.xml.write(element, pretty_print=self.prettyprint) + + +class GraphMLWriterLxml(GraphMLWriter): + def __init__( + self, + path, + graph=None, + encoding="utf-8", + prettyprint=True, + infer_numeric_types=False, + named_key_ids=False, + edge_id_from_attribute=None, + ): + self.construct_types() + import lxml.etree as lxmletree + + self.myElement = lxmletree.Element + + self._encoding = encoding + self._prettyprint = prettyprint + self.named_key_ids = named_key_ids + self.edge_id_from_attribute = edge_id_from_attribute + self.infer_numeric_types = infer_numeric_types + + self._xml_base = lxmletree.xmlfile(path, encoding=encoding) + self._xml = self._xml_base.__enter__() + self._xml.write_declaration() + + # We need to have a xml variable that support insertion. This call is + # used for adding the keys to the document. + # We will store those keys in a plain list, and then after the graph + # element is closed we will add them to the main graphml element. + self.xml = [] + self._keys = self.xml + self._graphml = self._xml.element( + "graphml", + { + "xmlns": self.NS_GRAPHML, + "xmlns:xsi": self.NS_XSI, + "xsi:schemaLocation": self.SCHEMALOCATION, + }, + ) + self._graphml.__enter__() + self.keys = {} + self.attribute_types = defaultdict(set) + + if graph is not None: + self.add_graph_element(graph) + + def add_graph_element(self, G): + """ + Serialize graph G in GraphML to the stream. + """ + if G.is_directed(): + default_edge_type = "directed" + else: + default_edge_type = "undirected" + + graphid = G.graph.pop("id", None) + if graphid is None: + graph_element = self._xml.element("graph", edgedefault=default_edge_type) + else: + graph_element = self._xml.element( + "graph", edgedefault=default_edge_type, id=graphid + ) + + # gather attributes types for the whole graph + # to find the most general numeric format needed. + # Then pass through attributes to create key_id for each. + graphdata = { + k: v + for k, v in G.graph.items() + if k not in ("node_default", "edge_default") + } + node_default = G.graph.get("node_default", {}) + edge_default = G.graph.get("edge_default", {}) + # Graph attributes + for k, v in graphdata.items(): + self.attribute_types[(str(k), "graph")].add(type(v)) + for k, v in graphdata.items(): + element_type = self.get_xml_type(self.attr_type(k, "graph", v)) + self.get_key(str(k), element_type, "graph", None) + # Nodes and data + for node, d in G.nodes(data=True): + for k, v in d.items(): + self.attribute_types[(str(k), "node")].add(type(v)) + for node, d in G.nodes(data=True): + for k, v in d.items(): + T = self.get_xml_type(self.attr_type(k, "node", v)) + self.get_key(str(k), T, "node", node_default.get(k)) + # Edges and data + if G.is_multigraph(): + for u, v, ekey, d in G.edges(keys=True, data=True): + for k, v in d.items(): + self.attribute_types[(str(k), "edge")].add(type(v)) + for u, v, ekey, d in G.edges(keys=True, data=True): + for k, v in d.items(): + T = self.get_xml_type(self.attr_type(k, "edge", v)) + self.get_key(str(k), T, "edge", edge_default.get(k)) + else: + for u, v, d in G.edges(data=True): + for k, v in d.items(): + self.attribute_types[(str(k), "edge")].add(type(v)) + for u, v, d in G.edges(data=True): + for k, v in d.items(): + T = self.get_xml_type(self.attr_type(k, "edge", v)) + self.get_key(str(k), T, "edge", edge_default.get(k)) + + # Now add attribute keys to the xml file + for key in self.xml: + self._xml.write(key, pretty_print=self._prettyprint) + + # The incremental_writer writes each node/edge as it is created + incremental_writer = IncrementalElement(self._xml, self._prettyprint) + with graph_element: + self.add_attributes("graph", incremental_writer, graphdata, {}) + self.add_nodes(G, incremental_writer) # adds attributes too + self.add_edges(G, incremental_writer) # adds attributes too + + def add_attributes(self, scope, xml_obj, data, default): + """Appends attribute data.""" + for k, v in data.items(): + data_element = self.add_data( + str(k), self.attr_type(str(k), scope, v), str(v), scope, default.get(k) + ) + xml_obj.append(data_element) + + def __str__(self): + return object.__str__(self) + + def dump(self, stream=None): + self._graphml.__exit__(None, None, None) + self._xml_base.__exit__(None, None, None) + + +# default is lxml is present. +write_graphml = write_graphml_lxml + + +class GraphMLReader(GraphML): + """Read a GraphML document. Produces NetworkX graph objects.""" + + def __init__(self, node_type=str, edge_key_type=int, force_multigraph=False): + self.construct_types() + self.node_type = node_type + self.edge_key_type = edge_key_type + self.multigraph = force_multigraph # If False, test for multiedges + self.edge_ids = {} # dict mapping (u,v) tuples to edge id attributes + + def __call__(self, path=None, string=None): + from xml.etree.ElementTree import ElementTree, fromstring + + if path is not None: + self.xml = ElementTree(file=path) + elif string is not None: + self.xml = fromstring(string) + else: + raise ValueError("Must specify either 'path' or 'string' as kwarg") + (keys, defaults) = self.find_graphml_keys(self.xml) + for g in self.xml.findall(f"{{{self.NS_GRAPHML}}}graph"): + yield self.make_graph(g, keys, defaults) + + def make_graph(self, graph_xml, graphml_keys, defaults, G=None): + # set default graph type + edgedefault = graph_xml.get("edgedefault", None) + if G is None: + if edgedefault == "directed": + G = nx.MultiDiGraph() + else: + G = nx.MultiGraph() + # set defaults for graph attributes + G.graph["node_default"] = {} + G.graph["edge_default"] = {} + for key_id, value in defaults.items(): + key_for = graphml_keys[key_id]["for"] + name = graphml_keys[key_id]["name"] + python_type = graphml_keys[key_id]["type"] + if key_for == "node": + G.graph["node_default"].update({name: python_type(value)}) + if key_for == "edge": + G.graph["edge_default"].update({name: python_type(value)}) + # hyperedges are not supported + hyperedge = graph_xml.find(f"{{{self.NS_GRAPHML}}}hyperedge") + if hyperedge is not None: + raise nx.NetworkXError("GraphML reader doesn't support hyperedges") + # add nodes + for node_xml in graph_xml.findall(f"{{{self.NS_GRAPHML}}}node"): + self.add_node(G, node_xml, graphml_keys, defaults) + # add edges + for edge_xml in graph_xml.findall(f"{{{self.NS_GRAPHML}}}edge"): + self.add_edge(G, edge_xml, graphml_keys) + # add graph data + data = self.decode_data_elements(graphml_keys, graph_xml) + G.graph.update(data) + + # switch to Graph or DiGraph if no parallel edges were found + if self.multigraph: + return G + + G = nx.DiGraph(G) if G.is_directed() else nx.Graph(G) + # add explicit edge "id" from file as attribute in NX graph. + nx.set_edge_attributes(G, values=self.edge_ids, name="id") + return G + + def add_node(self, G, node_xml, graphml_keys, defaults): + """Add a node to the graph.""" + # warn on finding unsupported ports tag + ports = node_xml.find(f"{{{self.NS_GRAPHML}}}port") + if ports is not None: + warnings.warn("GraphML port tag not supported.") + # find the node by id and cast it to the appropriate type + node_id = self.node_type(node_xml.get("id")) + # get data/attributes for node + data = self.decode_data_elements(graphml_keys, node_xml) + G.add_node(node_id, **data) + # get child nodes + if node_xml.attrib.get("yfiles.foldertype") == "group": + graph_xml = node_xml.find(f"{{{self.NS_GRAPHML}}}graph") + self.make_graph(graph_xml, graphml_keys, defaults, G) + + def add_edge(self, G, edge_element, graphml_keys): + """Add an edge to the graph.""" + # warn on finding unsupported ports tag + ports = edge_element.find(f"{{{self.NS_GRAPHML}}}port") + if ports is not None: + warnings.warn("GraphML port tag not supported.") + + # raise error if we find mixed directed and undirected edges + directed = edge_element.get("directed") + if G.is_directed() and directed == "false": + msg = "directed=false edge found in directed graph." + raise nx.NetworkXError(msg) + if (not G.is_directed()) and directed == "true": + msg = "directed=true edge found in undirected graph." + raise nx.NetworkXError(msg) + + source = self.node_type(edge_element.get("source")) + target = self.node_type(edge_element.get("target")) + data = self.decode_data_elements(graphml_keys, edge_element) + # GraphML stores edge ids as an attribute + # NetworkX uses them as keys in multigraphs too if no key + # attribute is specified + edge_id = edge_element.get("id") + if edge_id: + # self.edge_ids is used by `make_graph` method for non-multigraphs + self.edge_ids[source, target] = edge_id + try: + edge_id = self.edge_key_type(edge_id) + except ValueError: # Could not convert. + pass + else: + edge_id = data.get("key") + + if G.has_edge(source, target): + # mark this as a multigraph + self.multigraph = True + + # Use add_edges_from to avoid error with add_edge when `'key' in data` + # Note there is only one edge here... + G.add_edges_from([(source, target, edge_id, data)]) + + def decode_data_elements(self, graphml_keys, obj_xml): + """Use the key information to decode the data XML if present.""" + data = {} + for data_element in obj_xml.findall(f"{{{self.NS_GRAPHML}}}data"): + key = data_element.get("key") + try: + data_name = graphml_keys[key]["name"] + data_type = graphml_keys[key]["type"] + except KeyError as err: + raise nx.NetworkXError(f"Bad GraphML data: no key {key}") from err + text = data_element.text + # assume anything with subelements is a yfiles extension + if text is not None and len(list(data_element)) == 0: + if data_type == bool: + # Ignore cases. + # http://docs.oracle.com/javase/6/docs/api/java/lang/ + # Boolean.html#parseBoolean%28java.lang.String%29 + data[data_name] = self.convert_bool[text.lower()] + else: + data[data_name] = data_type(text) + elif len(list(data_element)) > 0: + # Assume yfiles as subelements, try to extract node_label + node_label = None + # set GenericNode's configuration as shape type + gn = data_element.find(f"{{{self.NS_Y}}}GenericNode") + if gn is not None: + data["shape_type"] = gn.get("configuration") + for node_type in ["GenericNode", "ShapeNode", "SVGNode", "ImageNode"]: + pref = f"{{{self.NS_Y}}}{node_type}/{{{self.NS_Y}}}" + geometry = data_element.find(f"{pref}Geometry") + if geometry is not None: + data["x"] = geometry.get("x") + data["y"] = geometry.get("y") + if node_label is None: + node_label = data_element.find(f"{pref}NodeLabel") + shape = data_element.find(f"{pref}Shape") + if shape is not None: + data["shape_type"] = shape.get("type") + if node_label is not None: + data["label"] = node_label.text + + # check all the different types of edges available in yEd. + for edge_type in [ + "PolyLineEdge", + "SplineEdge", + "QuadCurveEdge", + "BezierEdge", + "ArcEdge", + ]: + pref = f"{{{self.NS_Y}}}{edge_type}/{{{self.NS_Y}}}" + edge_label = data_element.find(f"{pref}EdgeLabel") + if edge_label is not None: + break + if edge_label is not None: + data["label"] = edge_label.text + elif text is None: + data[data_name] = "" + return data + + def find_graphml_keys(self, graph_element): + """Extracts all the keys and key defaults from the xml.""" + graphml_keys = {} + graphml_key_defaults = {} + for k in graph_element.findall(f"{{{self.NS_GRAPHML}}}key"): + attr_id = k.get("id") + attr_type = k.get("attr.type") + attr_name = k.get("attr.name") + yfiles_type = k.get("yfiles.type") + if yfiles_type is not None: + attr_name = yfiles_type + attr_type = "yfiles" + if attr_type is None: + attr_type = "string" + warnings.warn(f"No key type for id {attr_id}. Using string") + if attr_name is None: + raise nx.NetworkXError(f"Unknown key for id {attr_id}.") + graphml_keys[attr_id] = { + "name": attr_name, + "type": self.python_type[attr_type], + "for": k.get("for"), + } + # check for "default" sub-element of key element + default = k.find(f"{{{self.NS_GRAPHML}}}default") + if default is not None: + # Handle default values identically to data element values + python_type = graphml_keys[attr_id]["type"] + if python_type == bool: + graphml_key_defaults[attr_id] = self.convert_bool[ + default.text.lower() + ] + else: + graphml_key_defaults[attr_id] = python_type(default.text) + return graphml_keys, graphml_key_defaults