sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
ray-project/ray:doc/source/cluster/doc_code/yarn/dashboard.py | import skein
import sys
from urllib.parse import urlparse
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python dashboard.py <dashboard-address>")
sys.exit(1)
address = sys.argv[1]
# Check if the address is a valid URL
result = urlparse(address)
if not all([result.scheme, result.netloc]):
print("Error: Invalid dashboard address. Please provide a valid URL.")
sys.exit(1)
print("Registering dashboard " + address + " on skein.")
app = skein.ApplicationClient.from_current()
app.ui.add_page("ray-dashboard", address, "Ray Dashboard")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/cluster/doc_code/yarn/dashboard.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/_internal/planner/plan_expression/expression_visitors.py | from dataclasses import replace
from typing import Dict, List, TypeVar
from ray.data.expressions import (
AliasExpr,
BinaryExpr,
ColumnExpr,
DownloadExpr,
Expr,
LiteralExpr,
MonotonicallyIncreasingIdExpr,
Operation,
StarExpr,
UDFExpr,
UnaryExpr,
_CallableClassUDF,
_ExprVisitor,
)
T = TypeVar("T")
# Mapping of operations to their string symbols for inline representation
_INLINE_OP_SYMBOLS = {
Operation.ADD: "+",
Operation.SUB: "-",
Operation.MUL: "*",
Operation.DIV: "/",
Operation.MOD: "%",
Operation.FLOORDIV: "//",
Operation.GT: ">",
Operation.LT: "<",
Operation.GE: ">=",
Operation.LE: "<=",
Operation.EQ: "==",
Operation.NE: "!=",
Operation.AND: "&",
Operation.OR: "|",
Operation.IN: "in",
Operation.NOT_IN: "not in",
}
class _ExprVisitorBase(_ExprVisitor[None]):
"""Base visitor that provides automatic recursive traversal.
This class extends _ExprVisitor and provides default implementations
for composite nodes that automatically traverse child expressions.
"""
def visit_binary(self, expr: "BinaryExpr") -> None:
"""Default implementation: recursively visit both operands."""
super().visit(expr.left)
super().visit(expr.right)
def visit_unary(self, expr: "UnaryExpr") -> None:
"""Default implementation: recursively visit the operand."""
super().visit(expr.operand)
def visit_alias(self, expr: "AliasExpr") -> None:
"""Default implementation: recursively visit the inner expression."""
super().visit(expr.expr)
def visit_udf(self, expr: "UDFExpr") -> None:
"""Default implementation: recursively visit all arguments."""
for arg in expr.args:
super().visit(arg)
for value in expr.kwargs.values():
super().visit(value)
def visit_literal(self, expr: LiteralExpr) -> None:
"""Visit a literal expression (no columns to collect)."""
pass
def visit_star(self, expr: StarExpr) -> None:
"""Visit a star expression (no columns to collect)."""
pass
def visit_download(self, expr: "Expr") -> None:
"""Visit a download expression (no columns to collect)."""
pass
def visit_monotonically_increasing_id(
self, expr: "MonotonicallyIncreasingIdExpr"
) -> None:
"""Visit a monotonically_increasing_id expression (no columns to collect)."""
pass
class _ColumnReferenceCollector(_ExprVisitorBase):
"""Visitor that collects all column references from expression trees.
This visitor traverses expression trees and accumulates column names
referenced in ColumnExpr nodes.
"""
def __init__(self):
"""Initialize with an empty set of referenced columns."""
# NOTE: We're using dict to maintain insertion ordering
self._col_refs: Dict[str, None] = dict()
def get_column_refs(self) -> List[str]:
return list(self._col_refs.keys())
def visit_column(self, expr: ColumnExpr) -> None:
"""Visit a column expression and collect its name.
Args:
expr: The column expression.
Returns:
None (only collects columns as a side effect).
"""
self._col_refs[expr.name] = None
def visit_alias(self, expr: AliasExpr) -> None:
"""Visit an alias expression and collect from its inner expression.
Args:
expr: The alias expression.
Returns:
None (only collects columns as a side effect).
"""
self.visit(expr.expr)
class _CallableClassUDFCollector(_ExprVisitorBase):
"""Visitor that collects all callable class UDFs from expression trees.
This visitor traverses expression trees and collects _CallableClassUDF instances
that wrap callable classes (as opposed to regular functions).
"""
def __init__(self):
"""Initialize with an empty list of _CallableClassUDF instances."""
self._expr_udfs: List[_CallableClassUDF] = []
def get_callable_class_udfs(self) -> List[_CallableClassUDF]:
"""Get the list of collected _CallableClassUDF instances.
Returns:
List of _CallableClassUDF instances that wrap callable classes.
"""
return self._expr_udfs
def visit_column(self, expr: ColumnExpr) -> None:
"""Visit a column expression (no UDFs to collect)."""
pass
def visit_udf(self, expr: UDFExpr) -> None:
"""Visit a UDF expression and collect it if it's a callable class.
Args:
expr: The UDF expression.
Returns:
None (only collects UDFs as a side effect).
"""
# Check if fn is an _CallableClassUDF (indicates callable class)
if isinstance(expr.fn, _CallableClassUDF):
self._expr_udfs.append(expr.fn)
# Continue visiting child expressions
super().visit_udf(expr)
class _ColumnSubstitutionVisitor(_ExprVisitor[Expr]):
"""Visitor rebinding column references in ``Expression``s.
This visitor traverses given ``Expression`` trees and substitutes column references
according to a provided substitution map.
"""
def __init__(self, column_ref_substitutions: Dict[str, Expr]):
"""Initialize with a column substitution map.
Args:
column_ref_substitutions: Mapping from column names to replacement expressions.
"""
self._col_ref_substitutions = column_ref_substitutions
def visit_column(self, expr: ColumnExpr) -> Expr:
"""Visit a column expression and substitute it.
Args:
expr: The column expression.
Returns:
The substituted expression or the original if no substitution exists.
"""
substitution = self._col_ref_substitutions.get(expr.name)
return substitution if substitution is not None else expr
def visit_literal(self, expr: LiteralExpr) -> Expr:
"""Visit a literal expression (no rewriting needed).
Args:
expr: The literal expression.
Returns:
The original literal expression.
"""
return expr
def visit_binary(self, expr: BinaryExpr) -> Expr:
"""Visit a binary expression and rewrite its operands.
Args:
expr: The binary expression.
Returns:
A new binary expression with rewritten operands.
"""
return BinaryExpr(
expr.op,
self.visit(expr.left),
self.visit(expr.right),
)
def visit_unary(self, expr: UnaryExpr) -> Expr:
"""Visit a unary expression and rewrite its operand.
Args:
expr: The unary expression.
Returns:
A new unary expression with rewritten operand.
"""
return UnaryExpr(expr.op, self.visit(expr.operand))
def visit_udf(self, expr: UDFExpr) -> Expr:
"""Visit a UDF expression and rewrite its arguments.
Args:
expr: The UDF expression.
Returns:
A new UDF expression with rewritten arguments.
"""
new_args = [self.visit(arg) for arg in expr.args]
new_kwargs = {key: self.visit(value) for key, value in expr.kwargs.items()}
return UDFExpr(
fn=expr.fn,
data_type=expr.data_type,
args=new_args,
kwargs=new_kwargs,
)
def visit_alias(self, expr: AliasExpr) -> Expr:
"""Visit an alias expression and rewrite its inner expression.
Args:
expr: The alias expression.
Returns:
A new alias expression with rewritten inner expression and preserved name.
"""
# We unalias returned expression to avoid nested aliasing
visited = self.visit(expr.expr)._unalias()
# NOTE: We're carrying over all of the other aspects of the alias
# only replacing inner expre
return replace(
expr,
expr=visited,
# Alias expression will remain a renaming one (ie replacing source column)
# so long as it's referencing another column (and not otherwise)
#
# TODO replace w/ standalone rename expr
_is_rename=expr._is_rename and _is_col_expr(visited),
)
def visit_download(self, expr: "Expr") -> Expr:
"""Visit a download expression (no rewriting needed).
Args:
expr: The download expression.
Returns:
The original download expression.
"""
return expr
def visit_star(self, expr: StarExpr) -> Expr:
"""Visit a star expression (no rewriting needed).
Args:
expr: The star expression.
Returns:
The original star expression.
"""
return expr
def visit_monotonically_increasing_id(
self, expr: MonotonicallyIncreasingIdExpr
) -> Expr:
"""Visit a monotonically_increasing_id expression (no rewriting needed).
Args:
expr: The monotonically_increasing_id expression.
Returns:
The original expression.
"""
return expr
def _is_col_expr(expr: Expr) -> bool:
return isinstance(expr, ColumnExpr) or (
isinstance(expr, AliasExpr) and isinstance(expr.expr, ColumnExpr)
)
class _TreeReprVisitor(_ExprVisitor[str]):
"""Visitor that generates a readable tree representation of expressions. Returns in pre-order traversal."""
def __init__(self, prefix: str = "", is_last: bool = True):
"""
Initialize the tree representation visitor.
Args:
prefix: The prefix string for indentation (accumulated from parent nodes)
is_last: Whether this node is the last child of its parent
"""
self.prefix = prefix
self.is_last = is_last
self._max_length = 50 # Maximum length of the node label
def _make_tree_lines(
self,
node_label: str,
children: List[tuple[str, "Expr"]] = None,
expr: "Expr" = None,
) -> str:
"""
Format a node and its children with tree box-drawing characters.
Args:
node_label: The label for this node (e.g., "ADD")
children: List of (label, child_expr) tuples to render as children
expr: The expression node (used to extract datatype)
Returns:
Multi-line string representation of the tree
"""
lines = [node_label]
if children:
for i, (label, child_expr) in enumerate(children):
is_last_child = i == len(children) - 1
# Build prefix for the child based on whether current node is last
child_prefix = self.prefix + (" " if self.is_last else "β ")
# Choose connector: βββ for last child, βββ for others
connector = "βββ " if is_last_child else "βββ "
# Recursively visit the child with updated prefix
child_visitor = _TreeReprVisitor(child_prefix, is_last_child)
child_lines = child_visitor.visit(child_expr).split("\n")
# Add the first line with label and connector
if label:
lines.append(f"{child_prefix}{connector}{label}: {child_lines[0]}")
else:
lines.append(f"{child_prefix}{connector}{child_lines[0]}")
# Add remaining lines from child with proper indentation
for line in child_lines[1:]:
lines.append(line)
return "\n".join(lines)
def visit_column(self, expr: "ColumnExpr") -> str:
return self._make_tree_lines(f"COL({expr.name!r})", expr=expr)
def visit_literal(self, expr: "LiteralExpr") -> str:
# Truncate long values for readability
value_repr = repr(expr.value)
if len(value_repr) > self._max_length:
value_repr = value_repr[: self._max_length - 3] + "..."
return self._make_tree_lines(f"LIT({value_repr})", expr=expr)
def visit_binary(self, expr: "BinaryExpr") -> str:
return self._make_tree_lines(
f"{expr.op.name}",
children=[
("left", expr.left),
("right", expr.right),
],
expr=expr,
)
def visit_unary(self, expr: "UnaryExpr") -> str:
return self._make_tree_lines(
f"{expr.op.name}",
children=[("operand", expr.operand)],
expr=expr,
)
def visit_alias(self, expr: "AliasExpr") -> str:
rename_marker = " [rename]" if expr._is_rename else ""
return self._make_tree_lines(
f"ALIAS({expr.name!r}){rename_marker}",
children=[("", expr.expr)],
expr=expr,
)
def visit_udf(self, expr: "UDFExpr") -> str:
# Get function name for better readability
fn_name = getattr(expr.fn, "__name__", str(expr.fn))
children = []
# Add positional arguments
for i, arg in enumerate(expr.args):
children.append((f"arg[{i}]", arg))
# Add keyword arguments
for key, value in expr.kwargs.items():
children.append((f"kwarg[{key!r}]", value))
return self._make_tree_lines(
f"UDF({fn_name})",
children=children if children else None,
expr=expr,
)
def visit_download(self, expr: "DownloadExpr") -> str:
return self._make_tree_lines(f"DOWNLOAD({expr.uri_column_name!r})", expr=expr)
def visit_star(self, expr: "StarExpr") -> str:
return self._make_tree_lines("COL(*)", expr=expr)
def visit_monotonically_increasing_id(
self, expr: "MonotonicallyIncreasingIdExpr"
) -> str:
return self._make_tree_lines("MONOTONICALLY_INCREASING_ID()", expr=expr)
class _InlineExprReprVisitor(_ExprVisitor[str]):
"""Visitor that generates concise inline string representations of expressions.
This visitor creates single-line string representations suitable for displaying
in operator names, log messages, etc. It aims to be human-readable while keeping
the representation compact.
"""
def __init__(self, max_literal_length: int = 20):
"""Initialize the inline representation visitor.
Args:
max_literal_length: Maximum length for literal value representations
"""
self._max_literal_length = max_literal_length
def visit_column(self, expr: "ColumnExpr") -> str:
"""Visit a column expression and return its inline representation."""
return f"col({expr.name!r})"
def visit_literal(self, expr: "LiteralExpr") -> str:
"""Visit a literal expression and return its inline representation."""
value_repr = repr(expr.value)
if len(value_repr) > self._max_literal_length:
value_repr = value_repr[: self._max_literal_length - 3] + "..."
return value_repr
def visit_binary(self, expr: "BinaryExpr") -> str:
"""Visit a binary expression and return its inline representation."""
left_str = self.visit(expr.left)
right_str = self.visit(expr.right)
# Add parentheses around child binary expressions to avoid ambiguity
if isinstance(expr.left, BinaryExpr):
left_str = f"({left_str})"
if isinstance(expr.right, BinaryExpr):
right_str = f"({right_str})"
op_str = _INLINE_OP_SYMBOLS.get(expr.op, expr.op.name.lower())
return f"{left_str} {op_str} {right_str}"
def visit_unary(self, expr: "UnaryExpr") -> str:
"""Visit a unary expression and return its inline representation."""
operand_str = self.visit(expr.operand)
# Add parentheses around binary expression operands to avoid ambiguity
if isinstance(expr.operand, BinaryExpr):
operand_str = f"({operand_str})"
# Map operations to symbols/functions
if expr.op == Operation.NOT:
return f"~{operand_str}"
elif expr.op == Operation.IS_NULL:
return f"{operand_str}.is_null()"
elif expr.op == Operation.IS_NOT_NULL:
return f"{operand_str}.is_not_null()"
else:
return f"{expr.op.name.lower()}({operand_str})"
def visit_alias(self, expr: "AliasExpr") -> str:
"""Visit an alias expression and return its inline representation."""
inner_str = self.visit(expr.expr)
return f"{inner_str}.alias({expr.name!r})"
def visit_udf(self, expr: "UDFExpr") -> str:
"""Visit a UDF expression and return its inline representation."""
# Get function name for better readability
# For callable objects (instances with __call__), use the class name
fn_name = getattr(expr.fn, "__name__", expr.fn.__class__.__name__)
# Build argument list
args_str = []
for arg in expr.args:
args_str.append(self.visit(arg))
for key, value in expr.kwargs.items():
args_str.append(f"{key}={self.visit(value)}")
args_repr = ", ".join(args_str) if args_str else ""
return f"{fn_name}({args_repr})"
def visit_download(self, expr: "DownloadExpr") -> str:
"""Visit a download expression and return its inline representation."""
return f"download({expr.uri_column_name!r})"
def visit_star(self, expr: "StarExpr") -> str:
"""Visit a star expression and return its inline representation."""
return "col(*)"
def visit_monotonically_increasing_id(
self, expr: "MonotonicallyIncreasingIdExpr"
) -> str:
"""Visit a monotonically_increasing_id expression and return its inline representation."""
return "monotonically_increasing_id()"
def get_column_references(expr: Expr) -> List[str]:
"""Extract all column references from an expression.
This is a convenience function that creates a _ColumnReferenceCollector,
visits the expression tree, and returns the list of referenced column names.
Args:
expr: The expression to extract column references from.
Returns:
List of column names referenced in the expression, in order of appearance.
Example:
>>> from ray.data.expressions import col
>>> expr = (col("a") > 5) & (col("b") == "test")
>>> get_column_references(expr)
['a', 'b']
"""
collector = _ColumnReferenceCollector()
collector.visit(expr)
return collector.get_column_refs()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/planner/plan_expression/expression_visitors.py",
"license": "Apache License 2.0",
"lines": 433,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/tests/test_ray_get.py | import sys
import threading
import time
import numpy as np
import pytest
import ray
def test_multithreaded_ray_get(ray_start_cluster):
# This test tries to get a large object from the head node to the worker node
# while making many concurrent ray.get requests for a local object in plasma.
# TODO(57923): Make this not rely on timing if possible.
ray_cluster = ray_start_cluster
ray_cluster.add_node(
# This will make the object transfer slower and allow the test to
# interleave Get requests.
_system_config={
"object_manager_max_bytes_in_flight": 1024**2,
}
)
ray.init(address=ray_cluster.address)
ray_cluster.add_node(resources={"worker": 1})
# max_concurrency >= 3 is required: one thread for small gets, one for large gets,
# one for setting the threading.Events.
@ray.remote(resources={"worker": 1}, max_concurrency=3)
class Actor:
def __init__(self):
# ray.put will ensure that the object is in plasma
# even if it's small.
self._local_small_ref = ray.put("1")
# Used to check the thread running the small `ray.gets` has made at least
# one API call successfully.
self._small_gets_started = threading.Event()
# Used to tell the thread running small `ray.gets` to exit.
self._stop_small_gets = threading.Event()
def small_gets_started(self):
self._small_gets_started.wait()
def stop_small_gets(self):
self._stop_small_gets.set()
def do_small_gets(self):
while not self._stop_small_gets.is_set():
ray.get(self._local_small_ref)
time.sleep(0.01)
self._small_gets_started.set()
def do_large_get(self, refs_to_get):
remote_large_ref = refs_to_get[0]
ray.get(remote_large_ref)
actor = Actor.remote()
# Start a task on one thread that will repeatedly call `ray.get` on small
# plasma objects.
small_gets_ref = actor.do_small_gets.remote()
ray.get(actor.small_gets_started.remote())
# Start a second task on another thread that will call `ray.get` on a large object.
# The transfer will be slow due to the system config set above.
large_ref = ray.put(np.ones(1024**3, dtype=np.int8))
ray.get(actor.do_large_get.remote([large_ref]))
# Check that all `ray.get` calls succeeded.
ray.get(actor.stop_small_gets.remote())
ray.get(small_gets_ref)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_ray_get.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:rllib/utils/metrics/ray_metrics.py | import time
from ray.util.metrics import Histogram
_num_buckets = 31
_coeff = 4
_short_event_min = 0.0001 # 0.1 ms
_short_event_max = 1.5
_long_event_min = 0.1
_long_event_max = 600.0
def _create_buckets(coeff, event_min, event_max, num):
"""Generates a list of `num` buckets between `event_min` and `event_max`.
`coeff` - specifies how much denser at the low end
"""
if num == 1:
return [event_min]
step = 1 / (num - 1)
return [
(0 + step * i) ** coeff * (event_max - event_min) + event_min
for i in range(num)
]
DEFAULT_HISTOGRAM_BOUNDARIES_SHORT_EVENTS = _create_buckets(
coeff=_coeff,
event_min=_short_event_min,
event_max=_short_event_max,
num=_num_buckets,
)
DEFAULT_HISTOGRAM_BOUNDARIES_LONG_EVENTS = _create_buckets(
coeff=_coeff,
event_min=_long_event_min,
event_max=_long_event_max,
num=_num_buckets,
)
class TimerAndPrometheusLogger:
"""Context manager for timing code execution.
Elapsed time is automatically logged to the provided Prometheus Histogram.
Example:
with TimerAndPrometheusLogger(Histogram):
learner.update()
"""
def __init__(self, histogram: Histogram):
self._histogram = histogram
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.elapsed = time.perf_counter() - self.start
self._histogram.observe(self.elapsed)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/utils/metrics/ray_metrics.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/llm/_internal/common/callbacks/base.py | import asyncio
import inspect
import logging
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Type, Union
if TYPE_CHECKING:
from ray.llm._internal.common.utils.download_utils import NodeModelDownloadable
from ray.llm._internal.serve.core.configs.llm_config import LLMConfig
logger = logging.getLogger(__name__)
@dataclass
class CallbackCtx:
"""
Context object passed to all callback hooks.
Callbacks can read and modify fields as needed.
"""
worker_node_download_model: Optional["NodeModelDownloadable"] = None
"""Model download configuration for worker nodes. Used to specify how
models should be downloaded and cached on worker nodes in distributed
deployments."""
placement_group: Optional[Any] = None
"""Ray placement group for resource allocation and scheduling. Controls
where and how resources are allocated across the cluster."""
runtime_env: Optional[Dict[str, Any]] = None
"""Runtime environment configuration for the Ray workers. Includes
dependencies, environment variables, and other runtime settings."""
custom_data: Dict[str, Any] = field(default_factory=dict)
"""Flexible dictionary for callback-specific state and data. Allows
callbacks to store and share custom information during initialization."""
run_init_node: bool = True
"""Whether to run model downloads during initialization. Set to False
to skip downloading models."""
class CallbackBase:
"""Base class for custom initialization implementations.
This class defines the interface for custom initialization logic
for LLMEngine to be called in node_initialization.
"""
def __init__(
self,
llm_config: "LLMConfig",
raise_error_on_callback: bool = True,
ctx_kwargs: Optional[Dict[str, Any]] = None,
**kwargs,
):
self.raise_error_on_callback = raise_error_on_callback
self.kwargs = kwargs
self.llm_config = llm_config
# Create and store CallbackCtx internally using ctx_kwargs
ctx_kwargs = ctx_kwargs or {}
self.ctx = CallbackCtx(**ctx_kwargs)
async def on_before_node_init(self) -> None:
"""Called before node initialization begins."""
pass
async def on_after_node_init(self) -> None:
"""Called after node initialization completes."""
pass
def on_before_download_model_files_distributed(self) -> None:
"""Called before model files are downloaded on each node."""
pass
def _get_method(self, method_name: str) -> Tuple[Callable, bool]:
"""Get a callback method."""
if not hasattr(self, method_name):
raise AttributeError(
f"Callback {type(self).__name__} does not have method '{method_name}'"
)
return getattr(self, method_name), inspect.iscoroutinefunction(
getattr(self, method_name)
)
def _handle_callback_error(self, method_name: str, e: Exception) -> None:
if self.raise_error_on_callback:
raise Exception(
f"Error running callback method '{method_name}' on {type(self).__name__}: {str(e)}"
) from e
else:
logger.error(
f"Error running callback method '{method_name}' on {type(self).__name__}: {str(e)}"
)
async def run_callback(self, method_name: str) -> None:
"""Run a callback method either synchronously or asynchronously.
Args:
method_name: The name of the method to call on the callback
Returns:
None
"""
method, is_async = self._get_method(method_name)
try:
if is_async:
await method()
else:
method()
except Exception as e:
self._handle_callback_error(method_name, e)
def run_callback_sync(self, method_name: str) -> None:
"""Run a callback method synchronously
Args:
method_name: The name of the method to call on the callback
Returns:
None
"""
method, is_async = self._get_method(method_name)
try:
if is_async:
try:
loop = asyncio.get_running_loop()
loop.run_until_complete(method())
except RuntimeError:
asyncio.run(method())
else:
method()
except Exception as e:
self._handle_callback_error(method_name, e)
@dataclass
class CallbackConfig:
"""Configuration for the callback to be used in LLMConfig"""
callback_class: Union[str, Type[CallbackBase]] = CallbackBase
"""Class to use for the callback. Can be custom user defined class"""
callback_kwargs: Dict[str, Any] = field(default_factory=dict)
"""Keyword arguments to pass to the Callback class at construction."""
raise_error_on_callback: bool = True
"""Whether to raise an error if a callback method fails."""
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/common/callbacks/base.py",
"license": "Apache License 2.0",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/llm/_internal/common/callbacks/cloud_downloader.py | import logging
import time
from typing import Any, List, Tuple
from pydantic import BaseModel, field_validator
from .base import CallbackBase
logger = logging.getLogger(__name__)
class CloudDownloaderConfig(BaseModel):
"""Model for validating CloudDownloader configuration."""
paths: List[Tuple[str, str]]
@field_validator("paths")
@classmethod
def validate_paths(cls, v: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
# Supported cloud storage URI schemes
valid_schemes = ("s3://", "gs://", "abfss://", "azure://")
for i, (cloud_uri, _) in enumerate(v):
if not any(cloud_uri.startswith(scheme) for scheme in valid_schemes):
raise ValueError(
f"paths[{i}][0] (cloud_uri) must start with one of {valid_schemes}, "
f"got '{cloud_uri}'"
)
return v
class CloudDownloader(CallbackBase):
"""Callback that downloads files from cloud storage before model files are downloaded.
This callback expects self.kwargs to contain a 'paths' field which should be
a list of tuples, where each tuple contains (cloud_uri, local_path) strings.
Supported cloud storage URIs: s3://, gs://, abfss://, azure://
Example:
```
from ray.llm._internal.common.callbacks.cloud_downloader import CloudDownloader
from ray.llm._internal.serve.core.configs.llm_config import LLMConfig
config = LLMConfig(
...
callback_config={
"callback_class": CloudDownloader,
"callback_kwargs": {
"paths": [
("s3://bucket/path/to/file.txt", "/local/path/to/file.txt"),
("gs://bucket/path/to/file.txt", "/local/path/to/file.txt"),
]
}
}
...
)
```
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the CloudDownloader callback.
Args:
**kwargs: Keyword arguments passed to the callback as a dictionary.
Must contain a 'paths' field with a list of (cloud_uri, local_path) tuples.
"""
super().__init__(**kwargs)
# Validate configuration using Pydantic
if "paths" not in self.kwargs:
raise ValueError("CloudDownloader requires 'paths' field in kwargs")
CloudDownloaderConfig.model_validate(self.kwargs)
def on_before_download_model_files_distributed(self) -> None:
"""Download files from cloud storage to local paths before model files are downloaded."""
from ray.llm._internal.common.utils.cloud_utils import CloudFileSystem
paths = self.kwargs["paths"]
start_time = time.monotonic()
for cloud_uri, local_path in paths:
CloudFileSystem.download_files(path=local_path, bucket_uri=cloud_uri)
end_time = time.monotonic()
logger.info(
f"CloudDownloader: Files downloaded in {end_time - start_time} seconds"
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/common/callbacks/cloud_downloader.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/llm/tests/common/utils/test_callback_base.py | import asyncio
import pytest
from ray.llm._internal.common.callbacks.base import (
CallbackBase,
)
from ray.llm._internal.common.utils.download_utils import NodeModelDownloadable
from ray.llm._internal.serve.core.configs.llm_config import (
LLMConfig,
ModelLoadingConfig,
)
class TestingCallback(CallbackBase):
def __init__(self, llm_config, raise_error_on_callback: bool = True, **kwargs):
super().__init__(llm_config, raise_error_on_callback, **kwargs)
self.before_init_called = False
self.after_init_called = False
self.before_init_ctx = None
self.after_init_ctx = None
assert kwargs["kwargs_test_key"] == "kwargs_test_value"
async def on_before_node_init(self) -> None:
assert (
self.ctx.worker_node_download_model
== NodeModelDownloadable.MODEL_AND_TOKENIZER
)
self.ctx.worker_node_download_model = NodeModelDownloadable.NONE
self.ctx.custom_data["ctx_test_key"] = "ctx_test_value"
self.before_init_called = True
self.ctx.run_init_node = False
async def on_after_node_init(self) -> None:
assert self.ctx.worker_node_download_model == NodeModelDownloadable.NONE
self.after_init_called = True
assert self.ctx.custom_data["ctx_test_key"] == "ctx_test_value"
class TestCallbackBase:
@pytest.fixture
def llm_config(self):
config = LLMConfig(
model_loading_config=ModelLoadingConfig(model_id="test-model"),
llm_engine="vLLM",
callback_config={
"callback_class": TestingCallback,
"callback_kwargs": {"kwargs_test_key": "kwargs_test_value"},
},
)
return config
def test_callback_methods_called(self, llm_config):
"""Test that callback methods are called during initialization."""
# Run initialization
async def run_initialization():
callback = llm_config.get_or_create_callback()
await callback.run_callback("on_before_node_init")
if callback.ctx.run_init_node:
raise Exception("run_init_node is True")
await callback.run_callback("on_after_node_init")
asyncio.run(run_initialization())
# Verify callback was created and methods were called
callback = llm_config.get_or_create_callback()
assert callback is not None
assert isinstance(callback, TestingCallback)
assert callback.before_init_called is True
assert callback.after_init_called is True
def test_callback_singleton_behavior(self, llm_config):
"""Test that callback instance is cached (singleton pattern)."""
# Get callback multiple times
callback1 = llm_config.get_or_create_callback()
callback2 = llm_config.get_or_create_callback()
# Should be the same instance
assert callback1 is callback2
def test_callback_must_inherit_from_callback_class(self):
"""Test that callback_class must be a subclass of Callback, not just implement the same methods."""
class FakeCallback:
"""A class that implements the same methods as Callback but doesn't inherit from it."""
def __init__(self, **kwargs):
pass
async def on_before_node_init(self):
pass
async def on_after_node_init(self):
pass
# Should raise an error when trying to create callback
with pytest.raises(Exception, match="is-subclass"):
LLMConfig(
model_loading_config=ModelLoadingConfig(model_id="test-model"),
llm_engine="vLLM",
callback_config={
"callback_class": FakeCallback,
"callback_kwargs": {},
},
)
if __name__ == "__main__":
pytest.main(["-v", __file__])
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/common/utils/test_callback_base.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/train_tests/huggingface_accelerate/test_huggingface_accelerate.py | import tempfile
import torch
import evaluate
from datasets import load_dataset
from transformers import (
AutoTokenizer,
AutoModelForSequenceClassification,
AdamW,
get_linear_schedule_with_warmup,
)
from accelerate import Accelerator
import ray
import ray.train
from ray.train import Checkpoint, ScalingConfig
from ray.train.torch import TorchTrainer
def train_func():
# Instantiate the accelerator
accelerator = Accelerator()
# Datasets
dataset = load_dataset("yelp_review_full")
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
def tokenize_function(examples):
outputs = tokenizer(examples["text"], padding="max_length", truncation=True)
outputs["labels"] = examples["label"]
return outputs
small_train_dataset = (
dataset["train"].select(range(100)).map(tokenize_function, batched=True)
)
small_eval_dataset = (
dataset["test"].select(range(100)).map(tokenize_function, batched=True)
)
# Remove unwanted columns and convert datasets to PyTorch format
columns_to_remove = [
"text",
"label",
] # Remove original columns, keep tokenized ones
small_train_dataset = small_train_dataset.remove_columns(columns_to_remove)
small_eval_dataset = small_eval_dataset.remove_columns(columns_to_remove)
small_train_dataset.set_format("torch")
small_eval_dataset.set_format("torch")
# Create data loaders
train_dataloader = torch.utils.data.DataLoader(
small_train_dataset, batch_size=16, shuffle=True
)
eval_dataloader = torch.utils.data.DataLoader(
small_eval_dataset, batch_size=16, shuffle=False
)
# Model
model = AutoModelForSequenceClassification.from_pretrained(
"bert-base-cased", num_labels=5
)
# Optimizer and scheduler
optimizer = AdamW(model.parameters(), lr=2e-5)
num_training_steps = len(train_dataloader) * 3 # 3 epochs
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=num_training_steps,
)
# Prepare everything for distributed training
(
model,
optimizer,
train_dataloader,
eval_dataloader,
lr_scheduler,
) = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# Evaluation metric
metric = evaluate.load("accuracy")
# Start training
num_epochs = 3
for epoch in range(num_epochs):
# Training
model.train()
total_loss = 0
for batch in train_dataloader:
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
total_loss += loss.item()
# Evaluation
model.eval()
for batch in eval_dataloader:
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather_for_metrics(
(predictions, batch["labels"])
)
metric.add_batch(predictions=predictions, references=references)
eval_results = metric.compute()
accelerator.print(f"Epoch {epoch + 1}: {eval_results}")
# Report metrics and checkpoint to Ray Train
metrics = {
"epoch": epoch + 1,
"train_loss": total_loss / len(train_dataloader),
"eval_accuracy": eval_results["accuracy"],
}
# Create checkpoint
with tempfile.TemporaryDirectory() as tmpdir:
if accelerator.is_main_process:
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(tmpdir)
tokenizer.save_pretrained(tmpdir)
checkpoint = Checkpoint.from_directory(tmpdir)
else:
checkpoint = None
ray.train.report(metrics=metrics, checkpoint=checkpoint)
def test_huggingface_accelerate():
# Define a Ray TorchTrainer to launch `train_func` on all workers
trainer = TorchTrainer(
train_func,
scaling_config=ScalingConfig(num_workers=4, use_gpu=True),
# If running in a multi-node cluster, this is where you
# should configure the run's persistent storage that is accessible
# across all worker nodes.
run_config=ray.train.RunConfig(
storage_path="/mnt/cluster_storage/huggingface_accelerate_run"
),
)
result: ray.train.Result = trainer.fit()
# Verify training completed successfully
assert result.metrics is not None
assert "eval_accuracy" in result.metrics
assert result.checkpoint is not None
# Load the trained model from checkpoint
with result.checkpoint.as_directory() as checkpoint_dir:
model = AutoModelForSequenceClassification.from_pretrained( # noqa: F841
checkpoint_dir
)
tokenizer = AutoTokenizer.from_pretrained(checkpoint_dir) # noqa: F841
if __name__ == "__main__":
test_huggingface_accelerate()
| {
"repo_id": "ray-project/ray",
"file_path": "release/train_tests/huggingface_accelerate/test_huggingface_accelerate.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/train_tests/huggingface_transformers/test_huggingface_transformers.py | import os
import numpy as np
import evaluate
from datasets import load_dataset
from transformers import (
Trainer,
TrainingArguments,
AutoTokenizer,
AutoModelForSequenceClassification,
)
import ray.train.huggingface.transformers
from ray.train import ScalingConfig
from ray.train.torch import TorchTrainer
# [1] Encapsulate data preprocessing, training, and evaluation
# logic in a training function
# ============================================================
def train_func():
# Datasets
dataset = load_dataset("yelp_review_full")
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
def tokenize_function(examples):
return tokenizer(examples["text"], padding="max_length", truncation=True)
small_train_dataset = (
dataset["train"].select(range(100)).map(tokenize_function, batched=True)
)
small_eval_dataset = (
dataset["test"].select(range(100)).map(tokenize_function, batched=True)
)
# Model
model = AutoModelForSequenceClassification.from_pretrained(
"bert-base-cased", num_labels=5
)
# Evaluation Metrics
metric = evaluate.load("accuracy")
def compute_metrics(eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
return metric.compute(predictions=predictions, references=labels)
# Hugging Face Trainer
training_args = TrainingArguments(
output_dir="test_trainer",
evaluation_strategy="epoch",
save_strategy="epoch",
report_to="none",
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=small_train_dataset,
eval_dataset=small_eval_dataset,
compute_metrics=compute_metrics,
)
# [2] Report Metrics and Checkpoints to Ray Train
# ===============================================
callback = ray.train.huggingface.transformers.RayTrainReportCallback()
trainer.add_callback(callback)
# [3] Prepare Transformers Trainer
# ================================
trainer = ray.train.huggingface.transformers.prepare_trainer(trainer)
# Start Training
trainer.train()
def test_huggingface_transformers():
# [4] Define a Ray TorchTrainer to launch `train_func` on all workers
# ===================================================================
ray_trainer = TorchTrainer(
train_func,
scaling_config=ScalingConfig(num_workers=4, use_gpu=True),
# [4a] For multi-node clusters, configure persistent storage that is
# accessible across all worker nodes
run_config=ray.train.RunConfig(
storage_path="/mnt/cluster_storage/huggingface_run"
),
)
result: ray.train.Result = ray_trainer.fit()
# [5] Load the trained model
with result.checkpoint.as_directory() as checkpoint_dir:
checkpoint_path = os.path.join( # noqa: F841
checkpoint_dir,
ray.train.huggingface.transformers.RayTrainReportCallback.CHECKPOINT_NAME,
)
model = AutoModelForSequenceClassification.from_pretrained( # noqa: F841
checkpoint_path
)
if __name__ == "__main__":
test_huggingface_transformers()
| {
"repo_id": "ray-project/ray",
"file_path": "release/train_tests/huggingface_transformers/test_huggingface_transformers.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/train_tests/pytorch_lightning/test_lightning.py | import os
import tempfile
import torch
from torch.utils.data import DataLoader
from torchvision.models import resnet18
from torchvision.datasets import FashionMNIST
from torchvision.transforms import ToTensor, Normalize, Compose
import lightning.pytorch as pl
import ray.train.lightning
from ray.train.torch import TorchTrainer
# Model, Loss, Optimizer
class ImageClassifier(pl.LightningModule):
def __init__(self):
super().__init__()
self.model = resnet18(num_classes=10)
self.model.conv1 = torch.nn.Conv2d(
1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False
)
self.criterion = torch.nn.CrossEntropyLoss()
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
outputs = self.forward(x)
loss = self.criterion(outputs, y)
self.log("loss", loss, on_step=True, prog_bar=True)
return loss
def configure_optimizers(self):
return torch.optim.Adam(self.model.parameters(), lr=0.001)
def train_func():
# Data
transform = Compose([ToTensor(), Normalize((0.28604,), (0.32025,))])
data_dir = os.path.join(tempfile.gettempdir(), "data")
train_data = FashionMNIST(
root=data_dir, train=True, download=True, transform=transform
)
train_dataloader = DataLoader(train_data, batch_size=128, shuffle=True)
# Training
model = ImageClassifier()
# [1] Configure PyTorch Lightning Trainer.
trainer = pl.Trainer(
max_epochs=10,
devices="auto",
accelerator="auto",
strategy=ray.train.lightning.RayDDPStrategy(),
plugins=[ray.train.lightning.RayLightningEnvironment()],
callbacks=[ray.train.lightning.RayTrainReportCallback()],
# [1a] Optionally, disable the default checkpointing behavior
# in favor of the `RayTrainReportCallback` above.
enable_checkpointing=False,
)
trainer = ray.train.lightning.prepare_trainer(trainer)
trainer.fit(model, train_dataloaders=train_dataloader)
def test_lightning_train_run():
# [2] Configure scaling and resource requirements.
scaling_config = ray.train.ScalingConfig(num_workers=4, use_gpu=True)
# [3] Launch distributed training job.
trainer = TorchTrainer(
train_func,
scaling_config=scaling_config,
# [3a] If running in a multi-node cluster, this is where you
# should configure the run's persistent storage that is accessible
# across all worker nodes.
run_config=ray.train.RunConfig(
storage_path="/mnt/cluster_storage/lightning_run"
),
)
result: ray.train.Result = trainer.fit()
# [4] Load the trained model.
with result.checkpoint.as_directory() as checkpoint_dir:
model = ImageClassifier.load_from_checkpoint( # noqa: F841
os.path.join(
checkpoint_dir,
ray.train.lightning.RayTrainReportCallback.CHECKPOINT_NAME,
),
)
if __name__ == "__main__":
test_lightning_train_run()
| {
"repo_id": "ray-project/ray",
"file_path": "release/train_tests/pytorch_lightning/test_lightning.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/serve/doc_code/application_level_autoscaling.py | # __serve_example_begin__
import time
from ray import serve
@serve.deployment
class Preprocessor:
def __call__(self, input_data: str) -> str:
# Simulate preprocessing work
time.sleep(0.05)
return f"preprocessed_{input_data}"
@serve.deployment
class Model:
def __call__(self, preprocessed_data: str) -> str:
# Simulate model inference (takes longer than preprocessing)
time.sleep(0.1)
return f"result_{preprocessed_data}"
@serve.deployment
class Driver:
def __init__(self, preprocessor, model):
self._preprocessor = preprocessor
self._model = model
async def __call__(self, input_data: str) -> str:
# Coordinate preprocessing and model inference
preprocessed = await self._preprocessor.remote(input_data)
result = await self._model.remote(preprocessed)
return result
app = Driver.bind(Preprocessor.bind(), Model.bind())
# __serve_example_end__
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/doc_code/application_level_autoscaling.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/serve/tests/test_config_files/get_multi_deployment_signal_app.py | import os
import ray
from ray import serve
from ray.serve.handle import DeploymentHandle
@serve.deployment
class A:
def __init__(self, b: DeploymentHandle):
self.b = b
self.signal = ray.get_actor("signal_A", namespace="default_test_namespace")
async def __call__(self):
await self.signal.wait.remote()
return os.getpid()
@serve.deployment
class B:
def __init__(self):
self.signal = ray.get_actor("signal_B", namespace="default_test_namespace")
async def __call__(self):
await self.signal.wait.remote()
return os.getpid()
app = A.bind(B.bind())
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_config_files/get_multi_deployment_signal_app.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/train/doc_code/asynchronous_validation.py | # __validation_fn_simple_start__
import os
import torch
import ray.train
import ray.data
# Define Ray Data validation dataset outside validation function because it is not json serializable
validation_dataset = ...
def validation_fn(checkpoint: ray.train.Checkpoint) -> dict:
# Load the checkpoint
model = ...
with checkpoint.as_directory() as checkpoint_dir:
model_state_dict = torch.load(os.path.join(checkpoint_dir, "model.pt"))
model.load_state_dict(model_state_dict)
model.eval()
# Perform validation on the data
total_accuracy = 0
with torch.no_grad():
for batch in validation_dataset.iter_torch_batches(batch_size=128):
images, labels = batch["image"], batch["label"]
outputs = model(images)
total_accuracy += (outputs.argmax(1) == labels).sum().item()
return {"score": total_accuracy / len(validation_dataset)}
# __validation_fn_simple_end__
# __validation_fn_torch_trainer_start__
import torchmetrics
from torch.nn import CrossEntropyLoss
import ray.train.torch
def eval_only_train_fn(config_dict: dict) -> None:
# Load the checkpoint
model = ...
with config_dict["checkpoint"].as_directory() as checkpoint_dir:
model_state_dict = torch.load(os.path.join(checkpoint_dir, "model.pt"))
model.load_state_dict(model_state_dict)
model.cuda().eval()
# Set up metrics and data loaders
criterion = CrossEntropyLoss()
mean_valid_loss = torchmetrics.MeanMetric().cuda()
test_data_shard = ray.train.get_dataset_shard("validation")
test_dataloader = test_data_shard.iter_torch_batches(batch_size=128)
# Compute and report metric
with torch.no_grad():
for batch in test_dataloader:
images, labels = batch["image"], batch["label"]
outputs = model(images)
loss = criterion(outputs, labels)
mean_valid_loss(loss)
ray.train.report(
metrics={"score": mean_valid_loss.compute().item()},
checkpoint=ray.train.Checkpoint(
ray.train.get_context()
.get_storage()
.build_checkpoint_path_from_name("placeholder")
),
checkpoint_upload_mode=ray.train.CheckpointUploadMode.NO_UPLOAD,
)
def validation_fn(checkpoint: ray.train.Checkpoint, train_run_name: str, epoch: int) -> dict:
trainer = ray.train.torch.TorchTrainer(
eval_only_train_fn,
train_loop_config={"checkpoint": checkpoint},
scaling_config=ray.train.ScalingConfig(
num_workers=2, use_gpu=True, accelerator_type="A10G"
),
# Name validation run to easily associate it with training run
run_config=ray.train.RunConfig(
name=f"{train_run_name}_validation_epoch_{epoch}"
),
# User weaker GPUs for validation
datasets={"validation": validation_dataset},
)
result = trainer.fit()
return result.metrics
# __validation_fn_torch_trainer_end__
# __validation_fn_map_batches_start__
class Predictor:
def __init__(self, checkpoint: ray.train.Checkpoint):
self.model = ...
with checkpoint.as_directory() as checkpoint_dir:
model_state_dict = torch.load(os.path.join(checkpoint_dir, "model.pt"))
self.model.load_state_dict(model_state_dict)
self.model.cuda().eval()
def __call__(self, batch: dict) -> dict:
image = torch.as_tensor(batch["image"], dtype=torch.float32, device="cuda")
label = torch.as_tensor(batch["label"], dtype=torch.float32, device="cuda")
pred = self.model(image)
return {"res": (pred.argmax(1) == label).cpu().numpy()}
def validation_fn(checkpoint: ray.train.Checkpoint) -> dict:
# Set name to avoid confusion; default name is "Dataset"
validation_dataset.set_name("validation")
eval_res = validation_dataset.map_batches(
Predictor,
batch_size=128,
num_gpus=1,
fn_constructor_kwargs={"checkpoint": checkpoint},
concurrency=2,
)
mean = eval_res.mean(["res"])
return {
"score": mean,
}
# __validation_fn_map_batches_end__
# __validation_fn_report_start__
import tempfile
from ray.train import ValidationConfig, ValidationTaskConfig
def train_func(config: dict) -> None:
...
epochs = ...
model = ...
rank = ray.train.get_context().get_world_rank()
for epoch in epochs:
... # training step
if rank == 0:
training_metrics = {"loss": ..., "epoch": epoch}
local_checkpoint_dir = tempfile.mkdtemp()
torch.save(
model.module.state_dict(),
os.path.join(local_checkpoint_dir, "model.pt"),
)
ray.train.report(
training_metrics,
checkpoint=ray.train.Checkpoint.from_directory(local_checkpoint_dir),
checkpoint_upload_mode=ray.train.CheckpointUploadMode.ASYNC,
validation=ValidationTaskConfig(fn_kwargs={
"train_run_name": ray.train.get_context().get_experiment_name(),
"epoch": epoch,
}),
)
else:
ray.train.report({}, None)
def run_trainer() -> ray.train.Result:
train_dataset = ray.data.read_parquet(...)
trainer = ray.train.torch.TorchTrainer(
train_func,
validation_config=ValidationConfig(fn=validation_fn),
# Pass training dataset in datasets arg to split it across training workers
datasets={"train": train_dataset},
scaling_config=ray.train.ScalingConfig(
num_workers=2,
use_gpu=True,
# Use powerful GPUs for training
accelerator_type="A100",
),
)
return trainer.fit()
# __validation_fn_report_end__
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/train/doc_code/asynchronous_validation.py",
"license": "Apache License 2.0",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/_internal/execution/operators/sub_progress.py | import typing
from abc import ABC, abstractmethod
from typing import List, Optional
if typing.TYPE_CHECKING:
from ray.data._internal.progress.base_progress import BaseProgressBar
class SubProgressBarMixin(ABC):
"""Abstract class for operators that support sub-progress bars"""
@abstractmethod
def get_sub_progress_bar_names(self) -> Optional[List[str]]:
"""
Returns list of sub-progress bar names
This is used to create the sub-progress bars in the progress manager.
Note that sub-progress bars will be created in the order returned by
this method.
"""
...
@abstractmethod
def set_sub_progress_bar(self, name: str, pg: "BaseProgressBar"):
"""
Sets sub-progress bars
name: name of sub-progress bar
pg: a progress bar. Can be sub-progress bars for rich, tqdm, etc.
"""
...
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/execution/operators/sub_progress.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:release/train_tests/async_checkpointing_validation_benchmark/test_async_checkpointing_validation_benchmark.py | from enum import Enum
import logging
import os
import tempfile
import time
import torch
import torchmetrics
from torch.nn import CrossEntropyLoss
from torch.optim import Adam
from torchvision import transforms
from torchvision.models import VisionTransformer
from torchvision.transforms import ToTensor, Normalize
import ray
import ray.train
import ray.train.torch
from ray.train import CheckpointUploadMode, ValidationConfig, ValidationTaskConfig
from ray._private.test_utils import safe_write_to_results_json
logger = logging.getLogger(__name__)
class ValidationType(Enum):
INLINE = "inline"
TORCH_TRAINER = "torch_trainer"
MAP_BATCHES = "map_batches"
MAXIMUM_ALLOWED_ACCURACY_DIFF = 0.2
MAXIMUM_ALLOWED_E2E_TIME_MULTIPLIER = 1.1
# ==== Start dataset and model creation ======
STORAGE_PATH_PREFIX = os.environ.get("ANYSCALE_ARTIFACT_STORAGE", "artifact_storage")
STORAGE_PATH = f"{STORAGE_PATH_PREFIX}/ray_summit_24_train_demo"
def transform_cifar(row: dict):
transform = transforms.Compose(
[ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
row["image"] = transform(row["image"])
return row
validation_dataset = ray.data.read_parquet(f"{STORAGE_PATH}/cifar10-parquet/test").map(
transform_cifar
)
def create_model():
return VisionTransformer(
image_size=32, # CIFAR-10 image size is 32x32
patch_size=4, # Patch size is 4x4
num_layers=24, # Number of transformer layers
num_heads=8, # Number of attention heads
hidden_dim=384, # Hidden size (can be adjusted)
mlp_dim=768, # MLP dimension (can be adjusted)
num_classes=10, # CIFAR-10 has 10 classes
)
# ==== End dataset and model creation ======
# ==== Start map_batches approach ======
class Predictor:
def __init__(self, checkpoint):
self.model = create_model()
with checkpoint.as_directory() as checkpoint_dir:
model_state_dict = torch.load(os.path.join(checkpoint_dir, "model.pt"))
self.model.load_state_dict(model_state_dict)
self.model.cuda().eval()
def __call__(self, batch):
image = torch.as_tensor(batch["image"], dtype=torch.float32, device="cuda")
label = torch.as_tensor(batch["label"], dtype=torch.int8, device="cuda")
pred = self.model(image)
return {"res": (pred.argmax(1) == label).cpu().numpy()}
def validate_with_map_batches(checkpoint):
start_time = time.time()
eval_res = validation_dataset.map_batches(
Predictor,
batch_size=128,
num_gpus=1,
fn_constructor_kwargs={"checkpoint": checkpoint},
concurrency=2,
)
mean = eval_res.mean(["res"])
return {
"score": mean,
"validation_time": time.time() - start_time,
}
# ==== End map_batches approach ======
# ==== Start TorchTrainer approach ======
def eval_only_train_func(config_dict):
# Load the checkpoint
model = create_model()
with config_dict["checkpoint"].as_directory() as checkpoint_dir:
model_state_dict = torch.load(os.path.join(checkpoint_dir, "model.pt"))
model.load_state_dict(model_state_dict)
model.cuda().eval()
# Get the data
test_data_shard = ray.train.get_dataset_shard("test")
test_dataloader = test_data_shard.iter_torch_batches(batch_size=128)
# Report metrics with dummy checkpoint
mean_acc = torchmetrics.Accuracy(task="multiclass", num_classes=10, top_k=1).cuda()
with torch.no_grad():
for batch in test_dataloader:
images, labels = batch["image"], batch["label"]
outputs = model(images)
mean_acc(outputs.argmax(1), labels)
ray.train.report(
metrics={"score": mean_acc.compute().item()},
checkpoint=ray.train.Checkpoint(
ray.train.get_context()
.get_storage()
.build_checkpoint_path_from_name("placeholder")
),
checkpoint_upload_mode=CheckpointUploadMode.NO_UPLOAD,
)
def validate_with_torch_trainer(checkpoint, parent_run_name, epoch, batch_idx):
start_time = time.time()
trainer = ray.train.torch.TorchTrainer(
eval_only_train_func,
train_loop_config={"checkpoint": checkpoint},
scaling_config=ray.train.ScalingConfig(num_workers=2, use_gpu=True),
datasets={"test": validation_dataset},
run_config=ray.train.RunConfig(
name=f"{parent_run_name}-validation_epoch={epoch}_batch_idx={batch_idx}"
),
)
result = trainer.fit()
return {
"score": result.metrics["score"],
"validation_time": time.time() - start_time,
}
# ==== End TorchTrainer approach ======
def validate_and_report(
model,
epoch,
batch_idx,
blocked_times,
config,
loss,
):
validate_within_trainer = config["validate_within_trainer"]
num_epochs = config["num_epochs"]
checkpoint_upload_mode = config["checkpoint_upload_mode"]
validation_type = config["validation_type"]
if validate_within_trainer:
test_dataloader = ray.train.get_dataset_shard("test").iter_torch_batches(
batch_size=128
)
# Validate model within training loop
val_elapsed_time = None
if validate_within_trainer:
val_start_time = time.time()
mean_acc = torchmetrics.Accuracy(
task="multiclass", num_classes=10, top_k=1
).cuda()
model.eval()
with torch.no_grad():
for batch in test_dataloader:
X, y = batch["image"], batch["label"]
outputs = model(X)
mean_acc(outputs.argmax(1), y)
val_elapsed_time = time.time() - val_start_time
# Report metrics + checkpoint + validate
metrics = {"loss": loss.item(), "epoch": epoch}
if validate_within_trainer and epoch == num_epochs - 1:
metrics["score"] = mean_acc.compute().item()
if ray.train.get_context().get_world_rank() == 0:
if val_elapsed_time:
metrics["validation_time"] = val_elapsed_time
iteration_checkpoint_dir = tempfile.mkdtemp()
torch.save(
model.module.state_dict(),
os.path.join(iteration_checkpoint_dir, "model.pt"),
)
start_time = time.time()
if validation_type == ValidationType.TORCH_TRAINER:
validation = ValidationTaskConfig(
fn_kwargs={
"parent_run_name": ray.train.get_context().get_experiment_name(),
"epoch": epoch,
"batch_idx": batch_idx,
}
)
elif validation_type == ValidationType.MAP_BATCHES:
validation = True
else:
validation = False
ray.train.report(
metrics,
checkpoint=ray.train.Checkpoint.from_directory(iteration_checkpoint_dir),
checkpoint_upload_mode=checkpoint_upload_mode,
validation=validation,
)
blocked_times.append(time.time() - start_time)
else:
ray.train.report({}, None)
def train_func(config):
batch_size = 256
num_epochs = config["num_epochs"]
midpoint_batch = int(config["rows_per_worker"] / batch_size / 2)
# Prepare model, dataloader, and possibly metrics
model = create_model()
model = ray.train.torch.prepare_model(model)
criterion = CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=0.001)
train_data_shard = ray.train.get_dataset_shard("train")
train_dataloader = train_data_shard.iter_torch_batches(batch_size=batch_size)
# Train / eval / report loop
blocked_times = []
for epoch in range(num_epochs):
# Train model, then validate/report at midpoint and end of epoch
model.train()
i = 0
for i, batch in enumerate(train_dataloader):
images, labels = batch["image"], batch["label"]
outputs = model(images)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i == midpoint_batch:
validate_and_report(model, epoch, i, blocked_times, config, loss)
validate_and_report(model, epoch, i, blocked_times, config, loss)
# Report train_func metrics with dummy checkpoint since that is the only way to
# return metrics
if ray.train.get_context().get_world_rank() == 0:
with tempfile.TemporaryDirectory() as temp_dir:
ray.train.report(
metrics={
"report_blocked_times": blocked_times,
"train_func_return_time": time.time(),
},
checkpoint=ray.train.Checkpoint.from_directory(temp_dir),
)
else:
ray.train.report({}, None)
def run_training_with_validation(
checkpoint_upload_mode: CheckpointUploadMode,
validation_type: ValidationType,
validate_within_trainer: bool,
num_epochs: int,
train_dataset: ray.data.Dataset,
training_rows: int,
):
# Launch distributed training job.
start_time = time.time()
scaling_config = ray.train.ScalingConfig(num_workers=2, use_gpu=True)
if validation_type == ValidationType.INLINE:
validation_fn = None
elif validation_type == ValidationType.TORCH_TRAINER:
validation_fn = validate_with_torch_trainer
elif validation_type == ValidationType.MAP_BATCHES:
validation_fn = validate_with_map_batches
datasets = {"train": train_dataset}
train_loop_config = {
"validate_within_trainer": validate_within_trainer,
"num_epochs": num_epochs,
"checkpoint_upload_mode": checkpoint_upload_mode,
"rows_per_worker": training_rows / 2,
"validation_type": validation_type,
}
if validate_within_trainer:
datasets["test"] = validation_dataset
trainer = ray.train.torch.TorchTrainer(
train_func,
validation_config=ValidationConfig(fn=validation_fn) if validation_fn else None,
train_loop_config=train_loop_config,
scaling_config=scaling_config,
datasets=datasets,
run_config=ray.train.RunConfig(
storage_path="/mnt/cluster_storage",
),
)
result = trainer.fit()
end_time = time.time()
# Return metrics
# TODO: consider measuring how long it takes to kick off validation,
# how long checkpoint upload takes, distribution of times
train_func_metrics = result.best_checkpoints[-1][1]
metrics = {}
metrics["e2e_time"] = end_time - start_time
metrics["final_validation_waiting_time"] = (
end_time - train_func_metrics["train_func_return_time"]
)
metrics["total_report_blocked_time"] = sum(
train_func_metrics["report_blocked_times"]
)
metrics["total_validation_time"] = sum(
t[1]["validation_time"] for t in result.best_checkpoints[:-1]
)
metrics["final_score"] = result.best_checkpoints[-2][1]["score"]
return metrics
def main():
train_dataset = ray.data.read_parquet(f"{STORAGE_PATH}/cifar10-parquet/train").map(
transform_cifar
)
training_rows = train_dataset.count()
consolidated_metrics = {}
num_epochs = 10
consolidated_metrics["sync_cp_inline_val_metrics"] = run_training_with_validation(
CheckpointUploadMode.SYNC,
ValidationType.INLINE,
True,
num_epochs,
train_dataset,
training_rows,
)
consolidated_metrics[
"async_cp_torch_trainer_val_metrics"
] = run_training_with_validation(
CheckpointUploadMode.ASYNC,
ValidationType.TORCH_TRAINER,
False,
num_epochs,
train_dataset,
training_rows,
)
consolidated_metrics[
"async_cp_map_batches_val_metrics"
] = run_training_with_validation(
CheckpointUploadMode.ASYNC,
ValidationType.MAP_BATCHES,
False,
num_epochs,
train_dataset,
training_rows,
)
logger.info(consolidated_metrics)
safe_write_to_results_json(consolidated_metrics)
# Assert final scores aren't too far off, which would imply an inaccurate comparison
# Example value: 0.55
sync_final_score = consolidated_metrics["sync_cp_inline_val_metrics"]["final_score"]
async_torchtrainer_final_score = consolidated_metrics[
"async_cp_torch_trainer_val_metrics"
]["final_score"]
async_map_batches_final_score = consolidated_metrics[
"async_cp_map_batches_val_metrics"
]["final_score"]
assert (
abs(sync_final_score - async_torchtrainer_final_score)
< MAXIMUM_ALLOWED_ACCURACY_DIFF
and abs(sync_final_score - async_map_batches_final_score)
< MAXIMUM_ALLOWED_ACCURACY_DIFF
)
# Assert async checkpointing/validation e2e time is faster; add multipler to account for training time variance
# Example values: 1385s vs 1317s vs 1304s
sync_e2e_time = consolidated_metrics["sync_cp_inline_val_metrics"]["e2e_time"]
async_torchtrainer_e2e_time = consolidated_metrics[
"async_cp_torch_trainer_val_metrics"
]["e2e_time"]
async_map_batches_e2e_time = consolidated_metrics[
"async_cp_map_batches_val_metrics"
]["e2e_time"]
assert (
async_torchtrainer_e2e_time
< sync_e2e_time * MAXIMUM_ALLOWED_E2E_TIME_MULTIPLIER
and async_map_batches_e2e_time
< sync_e2e_time * MAXIMUM_ALLOWED_E2E_TIME_MULTIPLIER
)
# map_batches is faster than TorchTrainer. Note that inline is the fastest but is blocking
# Example values: 92s vs 387s vs 264s (gap between sync and async smaller if more data)
sync_validation_time = consolidated_metrics["sync_cp_inline_val_metrics"][
"total_validation_time"
]
# Assert report blocking time is (way) less with async checkpointing
# Example values: 3.66s vs 0.033s
sync_report_blocked_time = consolidated_metrics["sync_cp_inline_val_metrics"][
"total_report_blocked_time"
]
async_torchtrainer_report_blocked_time = consolidated_metrics[
"async_cp_torch_trainer_val_metrics"
]["total_report_blocked_time"]
async_map_batches_report_blocked_time = consolidated_metrics[
"async_cp_map_batches_val_metrics"
]["total_report_blocked_time"]
assert (
async_torchtrainer_report_blocked_time < sync_report_blocked_time
and async_map_batches_report_blocked_time < sync_report_blocked_time
)
# Assert sync blocking time (report + validation + final validation) is less than async blocking time (report + final validation)
# Example values of final validation blocking time: 40s vs 26s
sync_final_validation_blocking_time = consolidated_metrics[
"sync_cp_inline_val_metrics"
]["final_validation_waiting_time"]
async_torchtrainer_final_validation_blocking_time = consolidated_metrics[
"async_cp_torch_trainer_val_metrics"
]["final_validation_waiting_time"]
async_map_batches_final_validation_blocking_time = consolidated_metrics[
"async_cp_map_batches_val_metrics"
]["final_validation_waiting_time"]
sync_blocking_time = (
sync_report_blocked_time
+ sync_validation_time
+ sync_final_validation_blocking_time
)
async_torchtrainer_blocking_time = (
async_torchtrainer_report_blocked_time
+ async_torchtrainer_final_validation_blocking_time
)
async_map_batches_blocking_time = (
async_map_batches_report_blocked_time
+ async_map_batches_final_validation_blocking_time
)
assert (
sync_blocking_time > async_torchtrainer_blocking_time
and sync_blocking_time > async_map_batches_blocking_time
)
# TODO: consider correctness checks like validating that local checkpoints get deleted
# TODO: track validation startup metrics: schedule validation task, autoscale nodes,
# start TorchTrainer/map_batches, load checkpoint.
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "release/train_tests/async_checkpointing_validation_benchmark/test_async_checkpointing_validation_benchmark.py",
"license": "Apache License 2.0",
"lines": 399,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/ray-overview/examples/llamafactory-llm-fine-tune/ci/nb2py.py | #!/usr/bin/env python3
import argparse
import nbformat
def convert_notebook(
input_path: str, output_path: str, ignore_cmds: bool = False
) -> None:
"""
Read a Jupyter notebook and write a Python script, converting all %%bash
cells and IPython "!" commands into subprocess.run calls that raise on error.
Cells that load or autoreload extensions are ignored.
"""
nb = nbformat.read(input_path, as_version=4)
with open(output_path, "w") as out:
for cell in nb.cells:
# Only process code cells
if cell.cell_type != "code":
continue
lines = cell.source.splitlines()
# Skip cells that load or autoreload extensions
if any(
l.strip().startswith("%load_ext autoreload")
or l.strip().startswith("%autoreload all")
for l in lines
):
continue
# Detect a %%bash cell
if lines and lines[0].strip().startswith("%%bash"):
if ignore_cmds:
continue
bash_script = "\n".join(lines[1:]).rstrip()
out.write("import subprocess\n")
out.write(
f"subprocess.run(r'''{bash_script}''',\n"
" shell=True,\n"
" check=True,\n"
" executable='/bin/bash')\n\n"
)
else:
# Detect any IPython '!' shell commands in code lines
has_bang = any(line.lstrip().startswith("!") for line in lines)
if has_bang:
if ignore_cmds:
continue
out.write("import subprocess\n")
for line in lines:
stripped = line.lstrip()
if stripped.startswith("!"):
cmd = stripped[1:].lstrip()
out.write(
f"subprocess.run(r'''{cmd}''',\n"
" shell=True,\n"
" check=True,\n"
" executable='/bin/bash')\n"
)
else:
out.write(line.rstrip() + "\n")
out.write("\n")
else:
# Regular Python cell:
code = cell.source.rstrip()
if "USE_RAY=1 llamafactory-cli train" in code:
continue # Skip this training cell due to expiring experiments monitoring library tokens
# else, dump as-is
out.write(cell.source.rstrip() + "\n\n")
def main() -> None:
parser = argparse.ArgumentParser(
description="Convert a Jupyter notebook to a Python script, preserving bash cells and '!' commands as subprocess calls unless ignored with --ignore-cmds."
)
parser.add_argument("input_nb", help="Path to the input .ipynb file")
parser.add_argument("output_py", help="Path for the output .py script")
parser.add_argument(
"--ignore-cmds", action="store_true", help="Ignore bash cells and '!' commands"
)
args = parser.parse_args()
convert_notebook(args.input_nb, args.output_py, ignore_cmds=args.ignore_cmds)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/llamafactory-llm-fine-tune/ci/nb2py.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/serve/doc_code/replica_rank.py | # __replica_rank_start__
from ray import serve
@serve.deployment(num_replicas=4)
class ModelShard:
def __call__(self):
context = serve.get_replica_context()
return {
"rank": context.rank.rank, # Access the integer rank value
"world_size": context.world_size,
}
app = ModelShard.bind()
# __replica_rank_end__
# __reconfigure_rank_start__
from typing import Any
from ray import serve
from ray.serve.schema import ReplicaRank
@serve.deployment(num_replicas=4, user_config={"name": "model_v1"})
class RankAwareModel:
def __init__(self):
context = serve.get_replica_context()
self.rank = context.rank.rank # Extract integer rank value
self.world_size = context.world_size
self.model_name = None
print(f"Replica rank: {self.rank}/{self.world_size}")
async def reconfigure(self, user_config: Any, rank: ReplicaRank):
"""Called when user_config or rank changes."""
self.rank = rank.rank # Extract integer rank value from ReplicaRank object
self.world_size = serve.get_replica_context().world_size
self.model_name = user_config.get("name")
print(f"Reconfigured: rank={self.rank}, model={self.model_name}")
def __call__(self):
return {"rank": self.rank, "model_name": self.model_name}
app2 = RankAwareModel.bind()
# __reconfigure_rank_end__
if __name__ == "__main__":
# __replica_rank_start_run_main__
h = serve.run(app)
# Test that we can get rank information from replicas
seen_ranks = set()
for _ in range(20):
res = h.remote().result()
print(f"Output from __call__: {res}")
assert res["rank"] in [0, 1, 2, 3]
assert res["world_size"] == 4
seen_ranks.add(res["rank"])
# Verify we hit all replicas
print(f"Saw ranks: {sorted(seen_ranks)}")
# Output from __call__: {'rank': 2, 'world_size': 4}
# Output from __call__: {'rank': 1, 'world_size': 4}
# Output from __call__: {'rank': 3, 'world_size': 4}
# Output from __call__: {'rank': 0, 'world_size': 4}
# Output from __call__: {'rank': 0, 'world_size': 4}
# Output from __call__: {'rank': 0, 'world_size': 4}
# Output from __call__: {'rank': 0, 'world_size': 4}
# Output from __call__: {'rank': 3, 'world_size': 4}
# Output from __call__: {'rank': 1, 'world_size': 4}
# Output from __call__: {'rank': 1, 'world_size': 4}
# Output from __call__: {'rank': 0, 'world_size': 4}
# Output from __call__: {'rank': 1, 'world_size': 4}
# Output from __call__: {'rank': 3, 'world_size': 4}
# Output from __call__: {'rank': 2, 'world_size': 4}
# Output from __call__: {'rank': 0, 'world_size': 4}
# Output from __call__: {'rank': 0, 'world_size': 4}
# Output from __call__: {'rank': 2, 'world_size': 4}
# Output from __call__: {'rank': 1, 'world_size': 4}
# Output from __call__: {'rank': 3, 'world_size': 4}
# Output from __call__: {'rank': 0, 'world_size': 4}
# Saw ranks: [0, 1, 2, 3]
# __replica_rank_end_run_main__
# __reconfigure_rank_start_run_main__
h = serve.run(app2)
for _ in range(20):
res = h.remote().result()
assert res["rank"] in [0, 1, 2, 3]
assert res["model_name"] == "model_v1"
seen_ranks.add(res["rank"])
# (ServeReplica:default:RankAwareModel pid=1231505) Replica rank: 0/4
# (ServeReplica:default:RankAwareModel pid=1231505) Reconfigured: rank=0, model=model_v1
# (ServeReplica:default:RankAwareModel pid=1231504) Replica rank: 1/4
# (ServeReplica:default:RankAwareModel pid=1231504) Reconfigured: rank=1, model=model_v1
# (ServeReplica:default:RankAwareModel pid=1231502) Replica rank: 3/4
# (ServeReplica:default:RankAwareModel pid=1231502) Reconfigured: rank=3, model=model_v1
# (ServeReplica:default:RankAwareModel pid=1231503) Replica rank: 2/4
# (ServeReplica:default:RankAwareModel pid=1231503) Reconfigured: rank=2, model=model_v1
# __reconfigure_rank_end_run_main__
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/doc_code/replica_rank.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/ray_release/cloud_util.py | import json
import os
import random
import shutil
import string
import tempfile
import time
from typing import Optional, Tuple
from urllib.parse import urlparse
import boto3
from azure.identity import CertificateCredential
from azure.storage.blob import BlobServiceClient
from ray_release.logger import logger
_AZURE_ACCOUNT_SECRET_ID = "azure-service-principal-oss-release"
_AZURE_CERTIFICATE_SECRET_ID = "azure-service-principal-certificate"
_AZURE_CREDENTIAL = [None]
def get_azure_credential() -> CertificateCredential:
if _AZURE_CREDENTIAL[0] is None:
secret_manager = boto3.client("secretsmanager", region_name="us-west-2")
azure_account = secret_manager.get_secret_value(
SecretId=_AZURE_ACCOUNT_SECRET_ID
)
azure_account = json.loads(azure_account["SecretString"])
client_id = azure_account["client_id"]
tenant_id = azure_account["tenant_id"]
certificate_secret = secret_manager.get_secret_value(
SecretId=_AZURE_CERTIFICATE_SECRET_ID
)
certificate = certificate_secret["SecretString"]
with tempfile.TemporaryDirectory() as tmp_dir:
certificate_path = os.path.join(tmp_dir, "azure_cert.pem")
with open(certificate_path, "w") as f:
f.write(certificate)
_AZURE_CREDENTIAL[0] = CertificateCredential(
tenant_id=tenant_id,
client_id=client_id,
certificate_path=certificate_path,
)
return _AZURE_CREDENTIAL[0]
def generate_tmp_cloud_storage_path() -> str:
return "".join(random.choice(string.ascii_lowercase) for i in range(10))
def _upload_file_to_azure(
local_file_path: str,
azure_file_path: str,
blob_service_client: Optional[BlobServiceClient] = None,
) -> None:
"""Upload a file to Azure Blob Storage.
Args:
local_file_path: Path to local file to upload.
azure_file_path: Path to file in Azure blob storage.
"""
account, container, path = _parse_abfss_uri(azure_file_path)
account_url = f"https://{account}.blob.core.windows.net"
if blob_service_client is None:
credential = get_azure_credential()
blob_service_client = BlobServiceClient(account_url, credential)
blob_client = blob_service_client.get_blob_client(container=container, blob=path)
try:
with open(local_file_path, "rb") as f:
blob_client.upload_blob(data=f, overwrite=True)
except Exception as e:
logger.exception(f"Failed to upload file to Azure Blob Storage: {e}")
raise
def archive_directory(directory_path: str) -> str:
timestamp = str(int(time.time()))
archived_filename = f"ray_release_{timestamp}.zip"
output_path = os.path.abspath(archived_filename)
shutil.make_archive(output_path[:-4], "zip", directory_path)
return output_path
def upload_working_dir_to_azure(working_dir: str, azure_directory_uri: str) -> str:
"""Upload archived working directory to Azure blob storage.
Args:
working_dir: Path to directory to upload.
azure_directory_uri: Path to directory in Azure blob storage.
Returns:
Azure blob storage path where archived directory was uploaded.
"""
archived_file_path = archive_directory(working_dir)
archived_filename = os.path.basename(archived_file_path)
azure_file_path = f"{azure_directory_uri}/{archived_filename}"
_upload_file_to_azure(
local_file_path=archived_file_path, azure_file_path=azure_file_path
)
return azure_file_path
def _parse_abfss_uri(uri: str) -> Tuple[str, str, str]:
"""Parse ABFSS URI to extract account, container, and path.
ABFSS URI format: abfss://container@account.dfs.core.windows.net/path
Returns: (account_name, container_name, path)
"""
parsed = urlparse(uri)
if "@" not in parsed.netloc:
raise ValueError(
f"Invalid ABFSS URI format: {uri}. "
"Expected format: abfss://container@account.dfs.core.windows.net/path"
)
# Split netloc into container@account.dfs.core.windows.net
container, account_part = parsed.netloc.split("@", 1)
# Extract account name from account.dfs.core.windows.net
account = account_part.split(".")[0]
# Path starts with / which we keep for the blob path
path = parsed.path.lstrip("/")
return account, container, path
def convert_abfss_uri_to_https(uri: str) -> str:
"""Convert ABFSS URI to HTTPS URI.
ABFSS URI format: abfss://container@account.dfs.core.windows.net/path
Returns: HTTPS URI format: https://account.dfs.core.windows.net/container/path
"""
account, container, path = _parse_abfss_uri(uri)
return f"https://{account}.dfs.core.windows.net/{container}/{path}"
| {
"repo_id": "ray-project/ray",
"file_path": "release/ray_release/cloud_util.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/serve/_private/task_consumer.py | from abc import ABC
class TaskConsumerWrapper(ABC):
def __init__(self, *args, **kwargs):
pass
def initialize_callable(self, consumer_concurrency: int):
pass
def __del__(self):
pass
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/_private/task_consumer.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/train/examples/pytorch/deepspeed_finetune/train.py | import argparse
import logging
import os
import tempfile
import uuid
from typing import Any, Dict
os.environ["RAY_TRAIN_V2_ENABLED"] = "1"
import deepspeed
import torch
from datasets import DownloadConfig, load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer
import ray
import ray.train
import ray.train.torch
from ray.train import Checkpoint, RunConfig, ScalingConfig
from ray.train.torch import TorchTrainer
logger = logging.getLogger(__name__)
def log_rank0(message: str) -> None:
if ray.train.get_context().get_world_rank() == 0:
logger.info(message)
def get_tokenizer(model_name: str, trust_remote_code: bool = True) -> Any:
"""
Load and configure the tokenizer for the given model.
Args:
model_name: Name of the model to load tokenizer for
trust_remote_code: Whether to trust remote code
Returns:
Configured tokenizer
"""
tokenizer = AutoTokenizer.from_pretrained(
model_name, trust_remote_code=trust_remote_code
)
# Set pad token if not already set
if tokenizer.pad_token is None:
if tokenizer.eos_token is not None:
tokenizer.pad_token = tokenizer.eos_token
else:
# Fallback for models without eos_token
tokenizer.pad_token = tokenizer.unk_token
return tokenizer
def setup_dataloader(
model_name: str, dataset_name: str, seq_length: int, batch_size: int
) -> DataLoader:
tokenizer = get_tokenizer(model_name)
dataset = load_dataset(
dataset_name,
split="train[:1%]",
download_config=DownloadConfig(disable_tqdm=True),
)
def tokenize_function(examples):
return tokenizer(
examples["text"],
padding="max_length",
max_length=seq_length,
truncation=True,
)
tokenized_dataset = dataset.map(
tokenize_function, batched=True, num_proc=1, keep_in_memory=True
)
tokenized_dataset.set_format(type="torch", columns=["input_ids", "attention_mask"])
data_loader = DataLoader(tokenized_dataset, batch_size=batch_size, shuffle=True)
return ray.train.torch.prepare_data_loader(data_loader)
def setup_model_and_optimizer(
model_name: str, learning_rate: float, ds_config: Dict[str, Any]
) -> deepspeed.runtime.engine.DeepSpeedEngine:
model = AutoModelForCausalLM.from_pretrained(model_name)
log_rank0(
f"Model loaded: {model_name} (#parameters: {sum(p.numel() for p in model.parameters())})"
)
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
ds_engine, optimizer, _, _ = deepspeed.initialize(
model=model,
optimizer=optimizer,
config=ds_config,
)
return ds_engine
def report_metrics_and_save_checkpoint(
ds_engine: deepspeed.runtime.engine.DeepSpeedEngine, metrics: Dict[str, Any]
) -> None:
ctx = ray.train.get_context()
epoch_value = metrics["epoch"]
with tempfile.TemporaryDirectory() as tmp_dir:
checkpoint_dir = os.path.join(tmp_dir, "checkpoint")
os.makedirs(checkpoint_dir, exist_ok=True)
ds_engine.save_checkpoint(checkpoint_dir)
epoch_file = os.path.join(checkpoint_dir, "epoch.txt")
with open(epoch_file, "w", encoding="utf-8") as f:
f.write(str(epoch_value))
checkpoint = Checkpoint.from_directory(tmp_dir)
ray.train.report(metrics, checkpoint=checkpoint)
if ctx.get_world_rank() == 0:
experiment_name = ctx.get_experiment_name()
log_rank0(
f"Checkpoint saved successfully for experiment {experiment_name} at {checkpoint_dir}. Metrics: {metrics}"
)
def load_checkpoint(
ds_engine: deepspeed.runtime.engine.DeepSpeedEngine, ckpt: ray.train.Checkpoint
) -> int:
next_epoch = 0
try:
with ckpt.as_directory() as checkpoint_dir:
log_rank0(f"Loading checkpoint from {checkpoint_dir}")
epoch_dir = os.path.join(checkpoint_dir, "checkpoint")
if not os.path.isdir(epoch_dir):
epoch_dir = checkpoint_dir
ds_engine.load_checkpoint(epoch_dir)
epoch_file = os.path.join(epoch_dir, "epoch.txt")
if os.path.isfile(epoch_file):
with open(epoch_file, "r", encoding="utf-8") as f:
last_epoch = int(f.read().strip())
next_epoch = last_epoch + 1
if torch.distributed.is_available() and torch.distributed.is_initialized():
torch.distributed.barrier()
log_rank0("Successfully loaded distributed checkpoint")
except Exception as e:
logger.error(f"Failed to load checkpoint: {e}")
raise RuntimeError(f"Checkpoint loading failed: {e}") from e
return next_epoch
def train_loop(config: Dict[str, Any]) -> None:
ds_engine = setup_model_and_optimizer(
config["model_name"], config["learning_rate"], config["ds_config"]
)
# Load checkpoint if exists
ckpt = ray.train.get_checkpoint()
start_epoch = 0
if ckpt:
start_epoch = load_checkpoint(ds_engine, ckpt)
if start_epoch > 0:
log_rank0(f"Resuming training from epoch {start_epoch}")
train_loader = setup_dataloader(
config["model_name"],
config["dataset_name"],
config["seq_length"],
config["batch_size"],
)
steps_per_epoch = len(train_loader)
device = ray.train.torch.get_device()
# Set model to training mode
ds_engine.train()
for epoch in range(start_epoch, config["epochs"]):
if ray.train.get_context().get_world_size() > 1 and hasattr(
train_loader, "sampler"
):
sampler = getattr(train_loader, "sampler", None)
if sampler and hasattr(sampler, "set_epoch"):
sampler.set_epoch(epoch)
running_loss = 0.0
num_batches = 0
for step, batch in enumerate(train_loader):
input_ids = batch["input_ids"].to(device)
attention_mask = batch["attention_mask"].to(device)
outputs = ds_engine(
input_ids=input_ids,
attention_mask=attention_mask,
labels=input_ids,
use_cache=False,
)
loss = outputs.loss
log_rank0(
f"Epoch: {epoch} Step: {step + 1}/{steps_per_epoch} Loss: {loss.item()}"
)
ds_engine.backward(loss)
ds_engine.step()
running_loss += loss.item()
num_batches += 1
if config["debug_steps"] > 0 and step + 1 >= config["debug_steps"]:
log_rank0(f"Debug steps finished. Stopping epoch {epoch}.")
break
report_metrics_and_save_checkpoint(
ds_engine,
{"loss": running_loss / num_batches, "epoch": epoch},
)
def main():
args = get_args()
print(args)
scaling_config = ScalingConfig(
num_workers=args.num_workers, use_gpu=not args.cpu_only
)
ds_config = {
"train_micro_batch_size_per_gpu": args.batch_size,
"bf16": {"enabled": True},
"grad_accum_dtype": "bf16",
"zero_optimization": {
"stage": args.zero_stage,
"overlap_comm": True,
"contiguous_gradients": True,
},
"gradient_clipping": 1.0,
}
train_loop_config = {
"epochs": args.num_epochs,
"learning_rate": args.learning_rate,
"batch_size": args.batch_size,
"ds_config": ds_config,
"model_name": args.model_name,
"seq_length": args.seq_length,
"dataset_name": args.dataset_name,
"debug_steps": args.debug_steps,
}
name = (
f"deepspeed_sample_{uuid.uuid4().hex[:8]}"
if args.resume_experiment is None
else args.resume_experiment
)
print(f"Experiment name: {name}")
run_config = RunConfig(
storage_path=args.storage_path,
name=name,
)
trainer = TorchTrainer(
train_loop_per_worker=train_loop,
scaling_config=scaling_config,
train_loop_config=train_loop_config,
run_config=run_config,
)
result = trainer.fit()
print(f"Training finished. Result: {result}")
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, default="gpt2")
parser.add_argument("--dataset_name", type=str, default="ag_news")
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument("--num_epochs", type=int, default=1)
parser.add_argument("--seq_length", type=int, default=512)
parser.add_argument("--learning_rate", type=float, default=1e-6)
parser.add_argument("--zero_stage", type=int, default=3)
parser.add_argument("--num_workers", type=int, default=2)
parser.add_argument("--cpu_only", action="store_true", help="Disable GPU usage")
parser.add_argument("--storage_path", type=str, default="/mnt/cluster_storage")
parser.add_argument(
"--resume_experiment",
type=str,
default=None,
help="Path to the experiment to resume from",
)
parser.add_argument("--debug_steps", type=int, default=0)
return parser.parse_args()
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/train/examples/pytorch/deepspeed_finetune/train.py",
"license": "Apache License 2.0",
"lines": 240,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/_internal/datasource/mcap_datasource.py | """MCAP (Message Capture) datasource for Ray Data.
MCAP is a standardized format for storing timestamped messages from robotics and
autonomous systems, commonly used for sensor data, control commands, and other
time-series data.
"""
import json
import logging
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Set, Union
from ray.data._internal.delegating_block_builder import DelegatingBlockBuilder
from ray.data._internal.util import _check_import
from ray.data.block import Block
from ray.data.datasource.file_based_datasource import FileBasedDatasource
from ray.util.annotations import DeveloperAPI
if TYPE_CHECKING:
import pyarrow
from mcap.reader import Channel, Message, Schema
logger = logging.getLogger(__name__)
@dataclass
class TimeRange:
"""Time range for filtering MCAP messages.
Attributes:
start_time: Start time in nanoseconds (inclusive).
end_time: End time in nanoseconds (exclusive).
"""
start_time: int
end_time: int
def __post_init__(self):
"""Validate time range after initialization."""
if self.start_time >= self.end_time:
raise ValueError(
f"start_time ({self.start_time}) must be less than "
f"end_time ({self.end_time})"
)
if self.start_time < 0 or self.end_time < 0:
raise ValueError(
f"time values must be non-negative, got start_time={self.start_time}, "
f"end_time={self.end_time}"
)
@DeveloperAPI
class MCAPDatasource(FileBasedDatasource):
"""MCAP (Message Capture) datasource for Ray Data.
This datasource provides reading of MCAP files with predicate pushdown
optimization for filtering by topics, time ranges, and message types.
MCAP is a standardized format for storing timestamped messages from robotics and
autonomous systems, commonly used for sensor data, control commands, and other
time-series data.
Examples:
Basic usage:
>>> import ray # doctest: +SKIP
>>> ds = ray.data.read_mcap("/path/to/data.mcap") # doctest: +SKIP
With topic filtering and time range:
>>> from ray.data.datasource import TimeRange # doctest: +SKIP
>>> ds = ray.data.read_mcap( # doctest: +SKIP
... "/path/to/data.mcap",
... topics={"/camera/image_raw", "/lidar/points"},
... time_range=TimeRange(start_time=1000000000, end_time=2000000000)
... ) # doctest: +SKIP
With multiple files and metadata:
>>> ds = ray.data.read_mcap( # doctest: +SKIP
... ["file1.mcap", "file2.mcap"],
... topics={"/camera/image_raw", "/lidar/points"},
... message_types={"sensor_msgs/Image", "sensor_msgs/PointCloud2"},
... include_metadata=True
... ) # doctest: +SKIP
"""
_FILE_EXTENSIONS = ["mcap"]
def __init__(
self,
paths: Union[str, List[str]],
topics: Optional[Union[List[str], Set[str]]] = None,
time_range: Optional[TimeRange] = None,
message_types: Optional[Union[List[str], Set[str]]] = None,
include_metadata: bool = True,
**file_based_datasource_kwargs,
):
"""Initialize MCAP datasource.
Args:
paths: Path or list of paths to MCAP files.
topics: Optional list/set of topic names to include. If specified,
only messages from these topics will be read.
time_range: Optional TimeRange for filtering messages by timestamp.
TimeRange contains start_time and end_time in nanoseconds, where
both values must be non-negative and start_time < end_time.
message_types: Optional list/set of message type names (schema names)
to include. Only messages with matching schema names will be read.
include_metadata: Whether to include MCAP metadata fields in the output.
Defaults to True. When True, includes schema, channel, and message
metadata.
**file_based_datasource_kwargs: Additional arguments for FileBasedDatasource.
"""
super().__init__(paths, **file_based_datasource_kwargs)
_check_import(self, module="mcap", package="mcap")
# Convert to sets for faster lookup
self._topics = set(topics) if topics else None
self._message_types = set(message_types) if message_types else None
self._time_range = time_range
self._include_metadata = include_metadata
def _read_stream(self, f: "pyarrow.NativeFile", path: str) -> Iterator[Block]:
"""Read MCAP file and yield blocks of message data.
This method implements efficient MCAP reading with predicate pushdown.
It uses MCAP's built-in filtering capabilities for optimal performance
and applies additional filters when needed.
Args:
f: File-like object to read from. Must be seekable for MCAP reading.
path: Path to the MCAP file being processed.
Yields:
Block: Blocks of MCAP message data as pyarrow Tables.
Raises:
ValueError: If the MCAP file cannot be read or has invalid format.
"""
from mcap.reader import make_reader
reader = make_reader(f)
# Note: MCAP summaries are optional and iter_messages works without them
# We don't need to validate the summary since it's not required
# Use MCAP's built-in filtering for topics and time range
messages = reader.iter_messages(
topics=list(self._topics) if self._topics else None,
start_time=self._time_range.start_time if self._time_range else None,
end_time=self._time_range.end_time if self._time_range else None,
log_time_order=True,
reverse=False,
)
builder = DelegatingBlockBuilder()
for schema, channel, message in messages:
# Apply filters that couldn't be pushed down to MCAP level
if not self._should_include_message(schema, channel, message):
continue
# Convert message to dictionary format
message_data = self._message_to_dict(schema, channel, message, path)
builder.add(message_data)
# Yield the block if we have any messages
if builder.num_rows() > 0:
yield builder.build()
def _should_include_message(
self, schema: "Schema", channel: "Channel", message: "Message"
) -> bool:
"""Check if a message should be included based on filters.
This method applies Python-level filtering that cannot be pushed down
to the MCAP library level. Topic filters are already handled by the
MCAP reader, so only message_types filtering is needed here.
Args:
schema: MCAP schema object containing message type information.
channel: MCAP channel object containing topic and metadata.
message: MCAP message object containing the actual data.
Returns:
True if the message should be included, False otherwise.
"""
# Message type filter (cannot be pushed down to MCAP reader)
if self._message_types and schema and schema.name not in self._message_types:
return False
return True
def _message_to_dict(
self, schema: "Schema", channel: "Channel", message: "Message", path: str
) -> Dict[str, Any]:
"""Convert MCAP message to dictionary format.
This method converts MCAP message objects into a standardized dictionary
format suitable for Ray Data processing.
Args:
schema: MCAP schema object containing message type and encoding info.
channel: MCAP channel object containing topic and channel metadata.
message: MCAP message object containing the actual message data.
path: Path to the source file (for include_paths functionality).
Returns:
Dictionary containing message data in Ray Data format.
"""
# Decode message data based on encoding
decoded_data = message.data
if channel.message_encoding == "json" and isinstance(message.data, bytes):
try:
decoded_data = json.loads(message.data.decode("utf-8"))
except (json.JSONDecodeError, UnicodeDecodeError):
# Keep raw bytes if decoding fails
decoded_data = message.data
# Core message data
message_data = {
"data": decoded_data,
"topic": channel.topic,
"log_time": message.log_time,
"publish_time": message.publish_time,
"sequence": message.sequence,
}
# Add metadata if requested
if self._include_metadata:
message_data.update(
{
"channel_id": message.channel_id,
"message_encoding": channel.message_encoding,
"schema_name": schema.name if schema else None,
"schema_encoding": schema.encoding if schema else None,
"schema_data": schema.data if schema else None,
}
)
# Add file path if include_paths is enabled (from FileBasedDatasource)
if getattr(self, "include_paths", False):
message_data["path"] = path
return message_data
def get_name(self) -> str:
"""Return a human-readable name for this datasource."""
return "MCAP"
@property
def supports_distributed_reads(self) -> bool:
"""Whether this datasource supports distributed reads.
MCAP files can be read in parallel across multiple files.
"""
return True
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/datasource/mcap_datasource.py",
"license": "Apache License 2.0",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:doc/source/serve/doc_code/autoscaling_policy.py | # __begin_scheduled_batch_processing_policy__
from datetime import datetime
from typing import Any, Dict
from ray.serve.config import AutoscalingContext
def scheduled_batch_processing_policy(
ctx: AutoscalingContext,
) -> tuple[int, Dict[str, Any]]:
current_time = datetime.now()
current_hour = current_time.hour
# Scale up during business hours (9 AM - 5 PM)
if 9 <= current_hour < 17:
return 2, {"reason": "Business hours"}
# Scale up for evening batch processing (6 PM - 8 PM)
elif 18 <= current_hour < 20:
return 4, {"reason": "Evening batch processing"}
# Minimal scaling during off-peak hours
else:
return 1, {"reason": "Off-peak hours"}
# __end_scheduled_batch_processing_policy__
# __begin_custom_metrics_autoscaling_policy__
from typing import Any, Dict
from ray.serve.config import AutoscalingContext
def custom_metrics_autoscaling_policy(
ctx: AutoscalingContext,
) -> tuple[int, Dict[str, Any]]:
cpu_usage_metric = ctx.aggregated_metrics.get("cpu_usage", {})
memory_usage_metric = ctx.aggregated_metrics.get("memory_usage", {})
max_cpu_usage = list(cpu_usage_metric.values())[-1] if cpu_usage_metric else 0
max_memory_usage = (
list(memory_usage_metric.values())[-1] if memory_usage_metric else 0
)
if max_cpu_usage > 80 or max_memory_usage > 85:
return min(ctx.capacity_adjusted_max_replicas, ctx.current_num_replicas + 1), {}
elif max_cpu_usage < 30 and max_memory_usage < 40:
return max(ctx.capacity_adjusted_min_replicas, ctx.current_num_replicas - 1), {}
else:
return ctx.current_num_replicas, {}
# __end_custom_metrics_autoscaling_policy__
# __begin_application_level_autoscaling_policy__
from typing import Dict, Tuple
from ray.serve.config import AutoscalingContext
from ray.serve._private.common import DeploymentID
from ray.serve.config import AutoscalingContext
def coordinated_scaling_policy(
contexts: Dict[DeploymentID, AutoscalingContext]
) -> Tuple[Dict[DeploymentID, int], Dict]:
"""Scale deployments based on coordinated load balancing."""
decisions = {}
# Example: Scale a preprocessing deployment
preprocessing_id = [d for d in contexts if d.name == "Preprocessor"][0]
preprocessing_ctx = contexts[preprocessing_id]
# Scale based on queue depth
preprocessing_replicas = max(
preprocessing_ctx.capacity_adjusted_min_replicas,
min(
preprocessing_ctx.capacity_adjusted_max_replicas,
preprocessing_ctx.total_num_requests // 10,
),
)
decisions[preprocessing_id] = preprocessing_replicas
# Example: Scale a model deployment proportionally
model_id = [d for d in contexts if d.name == "Model"][0]
model_ctx = contexts[model_id]
# Scale model to handle preprocessing output
# Assuming model takes 2x longer than preprocessing
model_replicas = max(
model_ctx.capacity_adjusted_min_replicas,
min(model_ctx.capacity_adjusted_max_replicas, preprocessing_replicas * 2),
)
decisions[model_id] = model_replicas
return decisions, {}
# __end_application_level_autoscaling_policy__
# __begin_stateful_application_level_policy__
from typing import Dict, Tuple, Any
from ray.serve.config import AutoscalingContext
from ray.serve._private.common import DeploymentID
def stateful_application_level_policy(
contexts: Dict[DeploymentID, AutoscalingContext]
) -> Tuple[Dict[DeploymentID, int], Dict[DeploymentID, Dict[str, Any]]]:
"""Example policy demonstrating per-deployment state persistence."""
decisions = {}
policy_state = {}
for deployment_id, ctx in contexts.items():
# Read previous state for this deployment (persisted from last iteration)
prev_state = ctx.policy_state or {}
scale_count = prev_state.get("scale_count", 0)
last_replicas = prev_state.get("last_replicas", ctx.current_num_replicas)
# Simple scaling logic: scale based on queue depth
desired_replicas = max(
ctx.capacity_adjusted_min_replicas,
min(
ctx.capacity_adjusted_max_replicas,
ctx.total_num_requests // 10,
),
)
decisions[deployment_id] = desired_replicas
# Store per-deployment state that persists across iterations
policy_state[deployment_id] = {
"scale_count": scale_count + 1,
"last_replicas": desired_replicas,
}
return decisions, policy_state
# __end_stateful_application_level_policy__
# __begin_apply_autoscaling_config_example__
from typing import Any, Dict
from ray.serve.config import AutoscalingContext
def queue_length_based_autoscaling_policy(
ctx: AutoscalingContext,
) -> tuple[int, Dict[str, Any]]:
# This policy calculates the "raw" desired replicas based on queue length.
# Ray Serve automatically applies scaling factors, delays, and bounds from
# the deployment's autoscaling_config on top of this decision.
queue_length = ctx.total_num_requests
if queue_length > 50:
return 10, {}
elif queue_length > 10:
return 5, {}
else:
return 0, {}
# __end_apply_autoscaling_config_example__
# __begin_apply_autoscaling_config_usage__
from ray import serve
from ray.serve.config import AutoscalingConfig, AutoscalingPolicy
@serve.deployment(
autoscaling_config=AutoscalingConfig(
min_replicas=1,
max_replicas=10,
metrics_interval_s=0.1,
upscale_delay_s=1.0,
downscale_delay_s=1.0,
policy=AutoscalingPolicy(
policy_function=queue_length_based_autoscaling_policy
)
),
max_ongoing_requests=5,
)
class MyDeployment:
def __call__(self) -> str:
return "Hello, world!"
app = MyDeployment.bind()
# __end_apply_autoscaling_config_usage__ | {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/doc_code/autoscaling_policy.py",
"license": "Apache License 2.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/serve/doc_code/custom_metrics_autoscaling.py | # __serve_example_begin__
import time
from typing import Dict
import psutil
from ray import serve
@serve.deployment(
autoscaling_config={
"min_replicas": 1,
"max_replicas": 5,
"metrics_interval_s": 0.1,
"policy": {
"policy_function": "autoscaling_policy:custom_metrics_autoscaling_policy"
},
},
max_ongoing_requests=5,
)
class CustomMetricsDeployment:
def __init__(self):
self.process = psutil.Process()
def __call__(self) -> str:
# Simulate some work
time.sleep(0.5)
return "Hello, world!"
def record_autoscaling_stats(self) -> Dict[str, float]:
# Get CPU usage as a percentage
cpu_usage = self.process.cpu_percent(interval=0.1)
# Get memory usage as a percentage of system memory
memory_info = self.process.memory_full_info()
system_memory = psutil.virtual_memory().total
memory_usage = (memory_info.uss / system_memory) * 100
return {
"cpu_usage": cpu_usage,
"memory_usage": memory_usage,
}
# Create the app
app = CustomMetricsDeployment.bind()
# __serve_example_end__
if __name__ == "__main__":
import requests # noqa
serve.run(app)
for _ in range(10):
resp = requests.get("http://localhost:8000/")
assert resp.text == "Hello, world!"
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/doc_code/custom_metrics_autoscaling.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/doc_code/scheduled_batch_processing.py | # __serve_example_begin__
import asyncio
from ray import serve
from ray.serve.config import AutoscalingConfig, AutoscalingPolicy
@serve.deployment(
autoscaling_config=AutoscalingConfig(
min_replicas=1,
max_replicas=12,
policy=AutoscalingPolicy(
policy_function="autoscaling_policy:scheduled_batch_processing_policy"
),
),
)
class BatchProcessingDeployment:
async def __call__(self) -> str:
# Simulate batch processing work
await asyncio.sleep(0.5)
return "Hello, world!"
app = BatchProcessingDeployment.bind()
# __serve_example_end__
if __name__ == "__main__":
import requests # noqa
serve.run(app)
resp = requests.get("http://localhost:8000/")
assert resp.text == "Hello, world!"
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/doc_code/scheduled_batch_processing.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/examples/data/video_processing/envs.py | """Lazy environment variable accessors for the video processing example."""
from __future__ import annotations
import os
from typing import Any, Callable, Dict, Iterable
def _maybe_int(value: str | None, default: int) -> int:
if value is None:
return default
try:
return int(value)
except (TypeError, ValueError):
return default
def _int_env_getter(name: str, default: int) -> Callable[[], int]:
def _getter() -> int:
return _maybe_int(os.getenv(name), default)
return _getter
_ENVIRONMENT_VARIABLES: Dict[str, Callable[[], Any]] = {
"RAY_VIDEO_EXAMPLE_MAX_TARGETS": _int_env_getter(
"RAY_VIDEO_EXAMPLE_MAX_TARGETS", 10_000
),
"RAY_VIDEO_EXAMPLE_MAX_DECODE_FRAMES": _int_env_getter(
"RAY_VIDEO_EXAMPLE_MAX_DECODE_FRAMES", 100_000
),
}
def __getattr__(name: str) -> Any:
getter = _ENVIRONMENT_VARIABLES.get(name)
if getter is None:
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
return getter()
def __dir__() -> Iterable[str]:
return sorted(_ENVIRONMENT_VARIABLES.keys())
__all__ = list(__dir__())
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/examples/data/video_processing/envs.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/examples/data/video_processing/http_utils.py | """HTTP helper utilities for the Ray Data video processing example."""
from __future__ import annotations
from io import BytesIO
from pathlib import Path
from typing import Any, Mapping, MutableMapping, Optional
from urllib.parse import urlparse
try: # pragma: no cover - optional dependency
import aiohttp # type: ignore
except Exception: # pragma: no cover
aiohttp = None # type: ignore
try: # pragma: no cover - optional dependency
import requests # type: ignore
except Exception: # pragma: no cover
requests = None # type: ignore
class HTTPConnection:
"""Small helper around ``requests``/``aiohttp`` for reuseable HTTP clients."""
def __init__(self, *, reuse_client: bool = True) -> None:
self.reuse_client = reuse_client
self._sync_client: Optional[Any] = None
self._async_client: Optional[Any] = None
def get_sync_client(self):
if requests is None:
raise ImportError(
"requests is required for HTTPConnection. Install with `pip install requests`."
)
if self._sync_client is None or not self.reuse_client:
if self._sync_client is not None and not self.reuse_client:
try:
self._sync_client.close()
except Exception:
pass
self._sync_client = requests.Session()
return self._sync_client
async def get_async_client(self):
if aiohttp is None:
raise ImportError(
"aiohttp is required for HTTPConnection. Install with `pip install aiohttp`."
)
if self._async_client is None or not self.reuse_client:
if (
self._async_client is not None
and not self._async_client.closed
and not self.reuse_client
):
try:
await self._async_client.close()
except Exception:
pass
self._async_client = aiohttp.ClientSession()
return self._async_client
def _validate_http_url(self, url: str) -> None:
parsed_url = urlparse(url)
if parsed_url.scheme not in ("http", "https"):
raise ValueError("Invalid HTTP URL: scheme must be 'http' or 'https'.")
def _headers(self, **extras: str) -> MutableMapping[str, str]:
return dict(extras)
def get_response(
self,
url: str,
*,
stream: bool = False,
timeout: Optional[float] = None,
extra_headers: Optional[Mapping[str, str]] = None,
):
self._validate_http_url(url)
client = self.get_sync_client()
extra_headers = extra_headers or {}
return client.get(
url,
headers=self._headers(**extra_headers),
stream=stream,
timeout=timeout,
)
async def get_async_response(
self,
url: str,
*,
timeout: Optional[float] = None,
extra_headers: Optional[Mapping[str, str]] = None,
):
self._validate_http_url(url)
client = await self.get_async_client()
extra_headers = extra_headers or {}
return client.get(
url,
headers=self._headers(**extra_headers),
timeout=timeout,
)
def get_bytes(self, url: str, *, timeout: Optional[float] = None) -> bytes:
with self.get_response(url, stream=False, timeout=timeout) as r:
r.raise_for_status()
return r.content
async def async_get_bytes(
self,
url: str,
*,
timeout: Optional[float] = None,
) -> bytes:
async with await self.get_async_response(url, timeout=timeout) as r:
r.raise_for_status()
return await r.read()
def download_file(
self,
url: str,
save_path: Path,
*,
timeout: Optional[float] = None,
chunk_size: int = 512 * 1024,
) -> Path:
with self.get_response(url, stream=True, timeout=timeout) as r:
r.raise_for_status()
with save_path.open("wb") as f:
for chunk in r.iter_content(chunk_size):
if chunk:
f.write(chunk)
return save_path
async def async_download_file(
self,
url: str,
save_path: Path,
*,
timeout: Optional[float] = None,
chunk_size: int = 512 * 1024,
) -> Path:
async with await self.get_async_response(url, timeout=timeout) as r:
r.raise_for_status()
with save_path.open("wb") as f:
async for chunk in r.content.iter_chunked(chunk_size):
if chunk:
f.write(chunk)
return save_path
def download_bytes_chunked(
self,
url: str,
*,
timeout: Optional[float] = None,
chunk_size: int = 512 * 1024,
) -> bytes:
"""Stream a response into memory to avoid large one-shot downloads."""
with self.get_response(url, stream=True, timeout=timeout) as r:
r.raise_for_status()
bio = BytesIO()
for chunk in r.iter_content(chunk_size):
if chunk:
bio.write(chunk)
return bio.getvalue()
def close(self):
if self._sync_client is not None:
try:
self._sync_client.close()
except Exception:
pass
self._sync_client = None
async def aclose(self):
if self._async_client is not None and not self._async_client.closed:
try:
await self._async_client.close()
except Exception:
pass
self._async_client = None
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/examples/data/video_processing/http_utils.py",
"license": "Apache License 2.0",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/examples/data/video_processing/main.py | from __future__ import annotations
import asyncio
import base64
import threading
from io import BytesIO
from queue import Queue
from typing import Any, Dict, List
import pyarrow as pa
import ray
import ray.data
from ray.data.examples.data.video_processing.video_processor import VideoProcessor
from ray.data.llm import build_processor, vLLMEngineProcessorConfig
EXAMPLE_VIDEO_PATH = (
"https://videos.pexels.com/video-files/30527638/13076846_2160_3240_30fps.mp4"
)
EXAMPLE_MODEL_PATH = "/vllm-workspace/tmp/vlm"
DEFAULT_PROMPT = "Summarize the content of this video"
class DecodeFramesUDF:
def __init__(self, sampling=None, preprocess=None):
self.processor = VideoProcessor(
sampling=sampling or {"num_frames": 4},
output_format="pil",
preprocess=preprocess or {"resize": {"size": [384, 384]}, "convert": "RGB"},
)
def _run_async(self, coro):
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop is None:
return asyncio.run(coro)
q: Queue = Queue(maxsize=1)
def _runner():
try:
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
res = new_loop.run_until_complete(coro)
q.put((True, res))
except Exception as e:
q.put((False, e))
finally:
try:
new_loop.close()
except Exception:
pass
t = threading.Thread(target=_runner, daemon=True)
t.start()
ok, val = q.get()
if ok:
return val
raise val
def __call__(self, batch: Any):
records = batch.to_pylist() if isinstance(batch, pa.Table) else list(batch)
if not records:
return pa.Table.from_pylist([])
sources = [str(r["video_url"]) for r in records]
prompts = [r.get("prompt", DEFAULT_PROMPT) for r in records]
results = self._run_async(self.processor.process(sources))
out: List[Dict[str, Any]] = []
for row, prompt_text, res in zip(records, prompts, results):
frames = res.get("frames", [])
frames_b64: List[str] = []
for f in frames:
buf = BytesIO()
f.save(buf, format="JPEG", quality=90)
frames_b64.append(base64.b64encode(buf.getvalue()).decode("ascii"))
out.append(
{
"video_url": str(row.get("video_url")),
"prompt": str(prompt_text),
"frames_b64": frames_b64,
}
)
return pa.Table.from_pylist(out)
def _preprocess(row: Dict[str, Any], max_images: int = 10) -> Dict[str, Any]:
frames_b64: List[str] = row.get("frames_b64") or []
if not frames_b64:
raise RuntimeError(f"No frames decoded for video: {row.get('video_url')}")
image_contents = [
{"type": "image", "image": f"data:image/jpeg;base64,{b64}"}
for b64 in frames_b64[:max_images]
]
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": [
*image_contents,
{"type": "text", "text": row.get("prompt", DEFAULT_PROMPT)},
],
},
]
return {
"messages": messages,
"sampling_params": {"temperature": 0.1, "top_p": 0.001, "max_tokens": 512},
"video_url": row.get("video_url"),
}
def run_dataset_pipeline(model_path: str) -> None:
if not ray.is_initialized():
ray.init(include_dashboard=False)
ds = ray.data.from_items(
[
{
"video_url": EXAMPLE_VIDEO_PATH,
"prompt": "Summarize the content of this video",
},
{"video_url": EXAMPLE_VIDEO_PATH, "prompt": "List notable objects."},
{"video_url": EXAMPLE_VIDEO_PATH, "prompt": "Describe the scene."},
]
)
config = vLLMEngineProcessorConfig(
model_source=model_path,
batch_size=1,
concurrency=1,
has_image=True,
engine_kwargs={
"enable_chunked_prefill": True,
"enforce_eager": True,
"limit_mm_per_prompt": {"image": 10},
},
apply_chat_template=True,
)
decode_udf = DecodeFramesUDF()
ds_decoded = ds.map_batches(decode_udf, batch_format="pyarrow", batch_size=1)
inference_stage = build_processor(
config,
preprocess=_preprocess,
postprocess=lambda row: {
"video_url": row.get("video_url"),
"generated_text": row.get("generated_text", ""),
},
)
ds_inferred = inference_stage(ds_decoded)
for row in ds_inferred.take_all():
print("\n=== Dataset result ===")
print(f"video: {row['video_url']}")
print(f"generated_text: {row.get('generated_text', '')}")
def main() -> None:
run_dataset_pipeline(EXAMPLE_MODEL_PATH)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/examples/data/video_processing/main.py",
"license": "Apache License 2.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/examples/data/video_processing/video_processor.py | """Video processing utilities for Ray Data examples.
`VideoProcessor` downloads, decodes, and samples frames from video sources. It is
intended to be composed via Ray Data primitives such as ``map_batches`` and is
kept lightweight so it can serve as a reference implementation for custom
pipelines.
"""
from __future__ import annotations
import asyncio
import base64
import hashlib
import importlib
import io
import os
import sys
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
from urllib.parse import urlparse
from pydantic import BaseModel
from ray.data.examples.data.video_processing import envs as video_envs
from ray.data.examples.data.video_processing.http_utils import HTTPConnection
try: # pragma: no cover - availability depends on environment
import av as _av_mod # type: ignore
except Exception: # pragma: no cover
_av_mod = None # type: ignore
try: # pragma: no cover
from PIL import Image as _PIL_Image # type: ignore
except Exception: # pragma: no cover
_PIL_Image = None # type: ignore
FrameType = Any # PIL.Image.Image or numpy.ndarray
def _is_http(url: str) -> bool:
try:
scheme = urlparse(url).scheme
return scheme in ("http", "https")
except Exception:
return False
def _is_data_uri(url: str) -> bool:
return isinstance(url, str) and url.startswith("data:")
def _sha256_16(s: str) -> str:
return hashlib.sha256(s.encode("utf-8")).hexdigest()[:16]
class Sampling(BaseModel):
"""Lightweight sampling configuration for ``VideoProcessor``."""
fps: Optional[float] = None
num_frames: Optional[int] = None
class Config:
extra = "forbid"
class VideoProcessor:
"""Decode and sample frames from video sources.
- Uses PyAV for decoding.
- Network fetch/caching via HTTPConnection.
- CPU-heavy work done in a thread to avoid blocking the event loop.
Args:
sampling: {"fps": k} or {"num_frames": n}. Default fps=3.0.
cache_dir: Optional directory for disk cache.
cache_mode: One of "auto", "disk", or "memory".
output_format: "pil" or "numpy".
channels_first: When numpy, output (C, H, W) if True else (H, W, C).
timeout_s: HTTP timeout for downloads.
max_concurrency: Semaphore limit for parallel processing.
retries: Number of retry attempts on retriable errors (default 2).
retry_backoff_base: Base seconds for exponential backoff.
bypass_if_frames_present: Reserved for future use.
pack_for_model: Reserved for future use.
keep_downloaded: If using disk cache, keep cached file after processing.
preprocess: PIL preprocessing dict {resize, crop, convert}.
max_sampled_frames: Optional cap for number of sampled frames.
"""
def __init__(
self,
*,
sampling: Optional[Dict[str, Any]] = None,
cache_dir: Optional[str] = None,
cache_mode: str = "auto",
output_format: str = "pil",
channels_first: bool = False,
timeout_s: float = 30.0,
max_concurrency: int = 8,
retries: int = 2,
retry_backoff_base: float = 0.5,
bypass_if_frames_present: bool = False,
pack_for_model: bool = False,
keep_downloaded: bool = False,
preprocess: Optional[Dict[str, Any]] = None,
max_sampled_frames: Optional[int] = None,
) -> None:
sampling_cfg = Sampling(**(sampling or {}))
if sampling_cfg.fps is None and sampling_cfg.num_frames is None:
sampling_cfg.fps = 3.0
self._sampling = sampling_cfg
self._cache_dir = Path(cache_dir) if cache_dir else None
self._cache_mode = cache_mode
self._output_format = output_format
self._channels_first = channels_first
self._timeout_s = timeout_s
self._retries = int(retries)
self._retry_backoff_base = float(retry_backoff_base)
self._bypass_if_frames_present = bypass_if_frames_present
self._pack_for_model = pack_for_model
self._keep_downloaded = keep_downloaded
self._preprocess = preprocess or {}
self._max_sampled_frames = (
int(max_sampled_frames) if max_sampled_frames is not None else None
)
self._http = HTTPConnection()
self._sem = asyncio.Semaphore(max_concurrency)
async def process(self, sources: List[str]) -> List[Dict[str, Any]]:
if not sources:
return []
tasks = [self._process_one_safe(src) for src in sources]
return await asyncio.gather(*tasks)
async def _process_one_safe(self, source: str) -> Dict[str, Any]:
async with self._sem:
attempt = 0
backoff = self._retry_backoff_base
while attempt <= self._retries:
try:
return await asyncio.to_thread(self._process_one_sync, source)
except Exception as e:
if not self._should_retry(e) or attempt == self._retries:
return {
"frames": [],
"meta": {
"failed": True,
"error_type": type(e).__name__,
"error": str(e),
"attempts": attempt + 1,
"retried": attempt > 0,
"source": str(source),
"video_num_frames": 0,
"frame_timestamps": [],
},
}
await asyncio.sleep(max(backoff, 0))
backoff *= 2
attempt += 1
def _should_retry(self, e: Exception) -> bool:
non_retriable = (ImportError, ValueError)
return not isinstance(e, non_retriable)
def _process_one_sync(self, source: str) -> Dict[str, Any]:
if _av_mod is None:
raise ImportError(
"PyAV is required for VideoProcessor. Install with `pip install av`."
)
if _PIL_Image is None:
raise ImportError(
"Pillow is required for VideoProcessor. Install with `pip install pillow`."
)
resolved, is_memory, cleanup_path = self._resolve_source_for_decode(source)
container = None
try:
if is_memory:
try:
container = _av_mod.open(resolved)
except Exception:
fmt_guess = self._guess_format_from_source(source) or "mp4"
container = _av_mod.open(resolved, format=fmt_guess)
else:
container = _av_mod.open(resolved)
try:
vstream = next(
s for s in container.streams if getattr(s, "type", None) == "video"
)
except StopIteration:
raise ValueError("No video stream found in source")
frames: List[FrameType] = []
timestamps: List[float] = []
allow_zero_samples = False
s = self._sampling
if s.num_frames is not None:
n = max(int(s.num_frames), 1)
if (
self._max_sampled_frames is not None
and self._max_sampled_frames >= 0
):
n = min(n, self._max_sampled_frames)
decoded = 0
for frame in container.decode(video=vstream.index):
decoded += 1
if getattr(frame, "pts", None) is None:
fps_guess = None
try:
fps_guess = (
float(getattr(vstream, "average_rate", 0)) or None
)
except Exception:
fps_guess = None
current_ts = (
len(timestamps) / fps_guess
if fps_guess
else float(len(timestamps))
)
else:
current_ts = float(frame.pts * vstream.time_base)
frames.append(self._format_frame(frame))
timestamps.append(current_ts)
if len(frames) >= n:
break
if decoded >= video_envs.RAY_VIDEO_EXAMPLE_MAX_DECODE_FRAMES:
break
else:
targets = self._build_targets(container, vstream)
if (
self._max_sampled_frames is not None
and self._max_sampled_frames >= 0
):
targets = targets[: self._max_sampled_frames]
if not targets:
allow_zero_samples = True
else:
target_idx = 0
next_target = targets[target_idx]
decoded = 0
for frame in container.decode(video=vstream.index):
decoded += 1
if getattr(frame, "pts", None) is None:
current_ts = len(timestamps) / ((s.fps or 30.0))
else:
current_ts = float(frame.pts * vstream.time_base)
if current_ts + 1e-6 >= next_target:
frames.append(self._format_frame(frame))
timestamps.append(current_ts)
target_idx += 1
if target_idx >= len(targets):
break
next_target = targets[target_idx]
if decoded >= video_envs.RAY_VIDEO_EXAMPLE_MAX_DECODE_FRAMES:
break
finally:
exc_type, _, _ = sys.exc_info()
close_error: Optional[Exception] = None
try:
if container is not None:
container.close()
except Exception as e:
close_error = RuntimeError(
f"Failed to close PyAV container for source {self._source_repr(source, resolved, is_memory)}: {e}"
)
if exc_type is None:
raise close_error from e
cleanup_error: Optional[Exception] = None
if cleanup_path is not None and not self._keep_downloaded:
try:
os.remove(cleanup_path)
except Exception as e:
cleanup_error = RuntimeError(
f"Failed to remove cached file at {cleanup_path}: {e}"
)
if exc_type is None:
raise cleanup_error from e
if close_error is None:
close_error = cleanup_error
if exc_type is None and close_error is not None:
raise close_error
if not frames and not allow_zero_samples:
raise ValueError("No frames sampled")
w = h = None
if frames:
if self._output_format == "pil":
try:
w, h = frames[0].width, frames[0].height
except Exception:
w = h = None
else:
arr0 = frames[0]
try:
shape = getattr(arr0, "shape", None)
if shape is None:
raise ValueError("invalid numpy frame")
if self._channels_first:
_, h, w = shape
else:
h, w, _ = shape
except Exception:
w = h = None
result = {
"frames": frames,
"meta": {
"video_size": [w, h] if (w and h) else None,
"video_num_frames": len(frames),
"frame_timestamps": timestamps,
"source": self._source_repr(source, resolved, is_memory),
"failed": False,
},
}
return result
def _guess_format_from_source(self, source: str) -> Optional[str]:
try:
if _is_data_uri(source):
header = source.split(",", 1)[0]
if "video/" in header:
mime = header.split("video/")[1].split(";")[0].strip()
return {
"mp4": "mp4",
"webm": "webm",
"ogg": "ogg",
"quicktime": "mov",
"x-matroska": "matroska",
}.get(mime, None)
parsed = urlparse(source)
ext = os.path.splitext(parsed.path or source)[1].lower().lstrip(".")
return {
"mp4": "mp4",
"m4v": "mp4",
"mov": "mov",
"webm": "webm",
"mkv": "matroska",
"ogg": "ogg",
}.get(ext, None)
except Exception:
return None
def _source_repr(self, original: str, resolved: Any, is_memory: bool) -> str:
try:
if is_memory:
return f"memory://{len(resolved.getbuffer())}b"
return str(resolved)
except Exception:
return str(original)
def _build_targets(self, container: Any, vstream: Any) -> List[float]:
duration_s: Optional[float] = None
try:
if getattr(container, "duration", None) is not None and _av_mod is not None:
duration_s = float(container.duration * _av_mod.time_base)
except Exception:
duration_s = None
if duration_s is None:
try:
if getattr(vstream, "duration", None) is not None:
duration_s = float(vstream.duration * float(vstream.time_base))
except Exception:
duration_s = None
s = self._sampling
targets: List[float] = []
if s.fps is not None:
if duration_s is None:
limit = max(int(s.fps * 2), 1)
limit = min(limit, video_envs.RAY_VIDEO_EXAMPLE_MAX_TARGETS)
targets = [i / s.fps for i in range(limit)]
else:
n = int(max(duration_s, 0.0) * s.fps) + 1
n = max(1, min(n, video_envs.RAY_VIDEO_EXAMPLE_MAX_TARGETS))
targets = [i / s.fps for i in range(n)]
return targets
def _apply_preprocess_pil(self, img: Any) -> Any:
if not self._preprocess:
return img
r = self._preprocess.get("resize")
if r and isinstance(r, dict) and "size" in r:
resample_name = r.get("resample", "BILINEAR")
method = None
try:
method = (
getattr(_PIL_Image, resample_name, None) if _PIL_Image else None
)
if method is None and _PIL_Image is not None:
Resampling = getattr(_PIL_Image, "Resampling", None)
if Resampling is not None:
method = getattr(Resampling, resample_name, None)
except Exception:
method = None
if method is None:
method = 2
img = img.resize(tuple(r["size"]), method)
c = self._preprocess.get("crop")
if c and isinstance(c, dict) and "box" in c:
img = img.crop(tuple(c["box"]))
conv = self._preprocess.get("convert")
if isinstance(conv, str):
img = img.convert(conv)
return img
def _format_frame(self, frame: Any) -> FrameType:
if self._output_format == "pil":
img = frame.to_image()
img = self._apply_preprocess_pil(img)
return img
else:
try:
np = importlib.import_module("numpy")
except Exception as e:
raise ImportError(
"NumPy is required for numpy output_format. Install with `pip install numpy`."
) from e
if self._preprocess:
img = frame.to_image()
img = self._apply_preprocess_pil(img)
arr = np.array(img)
if getattr(arr, "ndim", 0) < 2 or arr.size == 0:
raise ValueError(
"Failed to convert preprocessed PIL image to a valid numpy array"
)
else:
arr = frame.to_ndarray(format="rgb24")
if not hasattr(arr, "shape"):
raise ValueError("invalid numpy frame")
if getattr(arr, "ndim", 0) == 2:
arr = np.expand_dims(arr, -1)
if self._channels_first:
return arr.transpose(2, 0, 1)
return arr
def _resolve_source_for_decode(
self, source: str
) -> Tuple[Union[str, io.BytesIO], bool, Optional[str]]:
"""Return (resolved, is_memory, cleanup_path).
cache_mode:
- "auto": download to disk if uniform sampling (num_frames) likely needs seek; otherwise stream.
- "disk": always download to disk when http/https.
- "memory": fetch into BytesIO when http/https or data URI.
"""
if _is_data_uri(source):
try:
header, b64 = source.split(",", 1)
raw = base64.b64decode(b64)
return io.BytesIO(raw), True, None
except Exception as e:
raise ValueError(f"Invalid data URI: {e}") from e
parsed = urlparse(source)
if parsed.scheme in ("file", "") and os.path.exists(parsed.path or source):
return (parsed.path or source), False, None
if _is_http(source):
use_disk = self._cache_dir is not None and (
self._cache_mode == "disk"
or (
self._cache_mode == "auto" and self._sampling.num_frames is not None
)
)
use_memory = self._cache_mode == "memory"
if use_memory:
data = self._http.download_bytes_chunked(
source, timeout=self._timeout_s
)
return io.BytesIO(data), True, None
if use_disk:
self._cache_dir.mkdir(parents=True, exist_ok=True)
fname = f"video-{_sha256_16(source)}.bin"
tmp = self._cache_dir / f".{fname}.tmp"
final = self._cache_dir / fname
self._http.download_file(source, tmp, timeout=self._timeout_s)
os.replace(tmp, final)
return (
str(final),
False,
(None if self._keep_downloaded else str(final)),
)
return source, False, None
return source, False, None
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/examples/data/video_processing/video_processor.py",
"license": "Apache License 2.0",
"lines": 443,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/tests/test_map_batches.py | import asyncio
import itertools
import math
import os
import time
from typing import Iterator
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import ray
from ray.data._internal.arrow_ops.transform_pyarrow import (
MIN_PYARROW_VERSION_TYPE_PROMOTION,
)
from ray.data._internal.utils.arrow_utils import get_pyarrow_version
from ray.data.context import DataContext
from ray.data.dataset import Dataset
from ray.data.exceptions import UserCodeException
from ray.data.tests.conftest import * # noqa
from ray.data.tests.test_util import ConcurrencyCounter # noqa
from ray.data.tests.util import column_udf, extract_values
from ray.tests.conftest import * # noqa
# Helper function to process timestamp data in nanoseconds
def process_timestamp_data(row):
# Convert numpy.datetime64 to pd.Timestamp if needed
if isinstance(row["timestamp"], np.datetime64):
row["timestamp"] = pd.Timestamp(row["timestamp"])
# Add 1ns to timestamp
row["timestamp"] = row["timestamp"] + pd.Timedelta(1, "ns")
# Ensure the timestamp column is in the expected dtype (datetime64[ns])
row["timestamp"] = pd.to_datetime(row["timestamp"], errors="raise")
return row
def process_timestamp_data_batch_arrow(batch: pa.Table) -> pa.Table:
# Convert pyarrow Table to pandas DataFrame to process the timestamp column
df = batch.to_pandas()
df["timestamp"] = df["timestamp"].apply(
lambda x: pd.Timestamp(x) if isinstance(x, np.datetime64) else x
)
# Add 1ns to timestamp
df["timestamp"] = df["timestamp"] + pd.Timedelta(1, "ns")
# Convert back to pyarrow Table
return pa.table(df)
def process_timestamp_data_batch_pandas(batch: pd.DataFrame) -> pd.DataFrame:
# Add 1ns to timestamp column
batch["timestamp"] = batch["timestamp"] + pd.Timedelta(1, "ns")
return batch
def test_map_batches_basic(
ray_start_regular_shared,
tmp_path,
restore_data_context,
target_max_block_size_infinite_or_default,
):
ctx = DataContext.get_current()
ctx.execution_options.preserve_order = True
# Test input validation
ds = ray.data.range(5)
with pytest.raises(ValueError):
ds.map_batches(
column_udf("id", lambda x: x + 1), batch_format="pyarrow", batch_size=-1
).take()
# Set up.
df = pd.DataFrame({"one": [1, 2, 3], "two": [2, 3, 4]})
table = pa.Table.from_pandas(df)
pq.write_table(table, os.path.join(tmp_path, "test1.parquet"))
# Test pandas
ds = ray.data.read_parquet(str(tmp_path))
ds2 = ds.map_batches(lambda df: df + 1, batch_size=1, batch_format="pandas")
ds_list = ds2.take()
values = [s["one"] for s in ds_list]
assert values == [2, 3, 4]
values = [s["two"] for s in ds_list]
assert values == [3, 4, 5]
# Test Pyarrow
ds = ray.data.read_parquet(str(tmp_path))
ds2 = ds.map_batches(lambda pa: pa, batch_size=1, batch_format="pyarrow")
ds_list = ds2.take()
values = [s["one"] for s in ds_list]
assert values == [1, 2, 3]
values = [s["two"] for s in ds_list]
assert values == [2, 3, 4]
# Test batch
size = 300
ds = ray.data.range(size)
ds2 = ds.map_batches(lambda df: df + 1, batch_size=17, batch_format="pandas")
ds_list = ds2.take_all()
for i in range(size):
# The pandas column is "value", and it originally has rows from 0~299.
# After the map batch, it should have 1~300.
row = ds_list[i]
assert row["id"] == i + 1
assert ds.count() == 300
# Test the lambda returns different types than the batch_format
# pandas => list block
ds = ray.data.read_parquet(str(tmp_path))
ds2 = ds.map_batches(lambda df: {"id": np.array([1])}, batch_size=1)
ds_list = extract_values("id", ds2.take())
assert ds_list == [1, 1, 1]
assert ds.count() == 3
# pyarrow => list block
ds = ray.data.read_parquet(str(tmp_path))
ds2 = ds.map_batches(
lambda df: {"id": np.array([1])}, batch_size=1, batch_format="pyarrow"
)
ds_list = extract_values("id", ds2.take())
assert ds_list == [1, 1, 1]
assert ds.count() == 3
# Test the wrong return value raises an exception.
ds = ray.data.read_parquet(str(tmp_path))
with pytest.raises(ValueError):
ds_list = ds.map_batches(
lambda df: 1, batch_size=2, batch_format="pyarrow"
).take()
def test_map_batches_extra_args(
shutdown_only, tmp_path, target_max_block_size_infinite_or_default
):
ray.shutdown()
ray.init(num_cpus=3)
def put(x):
# We only support automatic deref in the legacy backend.
return x
# Test input validation
ds = ray.data.range(5)
class Foo:
def __call__(self, df):
return df
with pytest.raises(ValueError):
# fn_constructor_args and fn_constructor_kwargs only supported for actor
# compute strategy.
ds.map_batches(
lambda x: x,
fn_constructor_args=(1,),
fn_constructor_kwargs={"a": 1},
)
with pytest.raises(ValueError):
# fn_constructor_args and fn_constructor_kwargs only supported for callable
# class UDFs.
ds.map_batches(
lambda x: x,
fn_constructor_args=(1,),
fn_constructor_kwargs={"a": 1},
)
# Set up.
df = pd.DataFrame({"one": [1, 2, 3], "two": [2, 3, 4]})
table = pa.Table.from_pandas(df)
pq.write_table(table, os.path.join(tmp_path, "test1.parquet"))
# Test extra UDF args.
# Test positional.
def udf(batch, a):
assert a == 1
return batch + a
ds = ray.data.read_parquet(str(tmp_path))
ds2 = ds.map_batches(
udf,
batch_size=1,
batch_format="pandas",
fn_args=(put(1),),
)
ds_list = ds2.take()
values = sorted([s["one"] for s in ds_list])
assert values == [2, 3, 4]
values = sorted([s["two"] for s in ds_list])
assert values == [3, 4, 5]
# Test kwargs.
def udf(batch, b=None):
assert b == 2
return b * batch
ds = ray.data.read_parquet(str(tmp_path))
ds2 = ds.map_batches(
udf,
batch_size=1,
batch_format="pandas",
fn_kwargs={"b": put(2)},
)
ds_list = ds2.take()
values = sorted([s["one"] for s in ds_list])
assert values == [2, 4, 6]
values = sorted([s["two"] for s in ds_list])
assert values == [4, 6, 8]
# Test both.
def udf(batch, a, b=None):
assert a == 1
assert b == 2
return b * batch + a
ds = ray.data.read_parquet(str(tmp_path))
ds2 = ds.map_batches(
udf,
batch_size=1,
batch_format="pandas",
fn_args=(put(1),),
fn_kwargs={"b": put(2)},
)
ds_list = ds2.take()
values = sorted([s["one"] for s in ds_list])
assert values == [3, 5, 7]
values = sorted([s["two"] for s in ds_list])
assert values == [5, 7, 9]
# Test constructor UDF args.
# Test positional.
class CallableFn:
def __init__(self, a):
assert a == 1
self.a = a
def __call__(self, x):
return x + self.a
ds = ray.data.read_parquet(str(tmp_path))
ds2 = ds.map_batches(
CallableFn,
concurrency=1,
batch_size=1,
batch_format="pandas",
fn_constructor_args=(put(1),),
)
ds_list = ds2.take()
values = sorted([s["one"] for s in ds_list])
assert values == [2, 3, 4]
values = sorted([s["two"] for s in ds_list])
assert values == [3, 4, 5]
# Test kwarg.
class CallableFn:
def __init__(self, b=None):
assert b == 2
self.b = b
def __call__(self, x):
return self.b * x
ds = ray.data.read_parquet(str(tmp_path))
ds2 = ds.map_batches(
CallableFn,
concurrency=1,
batch_size=1,
batch_format="pandas",
fn_constructor_kwargs={"b": put(2)},
)
ds_list = ds2.take()
values = sorted([s["one"] for s in ds_list])
assert values == [2, 4, 6]
values = sorted([s["two"] for s in ds_list])
assert values == [4, 6, 8]
# Test both.
class CallableFn:
def __init__(self, a, b=None):
assert a == 1
assert b == 2
self.a = a
self.b = b
def __call__(self, x):
return self.b * x + self.a
ds = ray.data.read_parquet(str(tmp_path))
ds2 = ds.map_batches(
CallableFn,
concurrency=1,
batch_size=1,
batch_format="pandas",
fn_constructor_args=(put(1),),
fn_constructor_kwargs={"b": put(2)},
)
ds_list = ds2.take()
values = sorted([s["one"] for s in ds_list])
assert values == [3, 5, 7]
values = sorted([s["two"] for s in ds_list])
assert values == [5, 7, 9]
# Test callable chain.
ds = ray.data.read_parquet(str(tmp_path))
fn_constructor_args = (put(1),)
fn_constructor_kwargs = {"b": put(2)}
ds2 = ds.map_batches(
CallableFn,
concurrency=1,
batch_size=1,
batch_format="pandas",
fn_constructor_args=fn_constructor_args,
fn_constructor_kwargs=fn_constructor_kwargs,
).map_batches(
CallableFn,
concurrency=1,
batch_size=1,
batch_format="pandas",
fn_constructor_args=fn_constructor_args,
fn_constructor_kwargs=fn_constructor_kwargs,
)
ds_list = ds2.take()
values = sorted([s["one"] for s in ds_list])
assert values == [7, 11, 15]
values = sorted([s["two"] for s in ds_list])
assert values == [11, 15, 19]
# Test function + callable chain.
ds = ray.data.read_parquet(str(tmp_path))
fn_constructor_args = (put(1),)
fn_constructor_kwargs = {"b": put(2)}
ds2 = ds.map_batches(
lambda df, a, b=None: b * df + a,
batch_size=1,
batch_format="pandas",
fn_args=(put(1),),
fn_kwargs={"b": put(2)},
).map_batches(
CallableFn,
concurrency=1,
batch_size=1,
batch_format="pandas",
fn_constructor_args=fn_constructor_args,
fn_constructor_kwargs=fn_constructor_kwargs,
)
ds_list = ds2.take()
values = sorted([s["one"] for s in ds_list])
assert values == [7, 11, 15]
values = sorted([s["two"] for s in ds_list])
assert values == [11, 15, 19]
@pytest.mark.parametrize("method", [Dataset.map, Dataset.map_batches, Dataset.flat_map])
def test_map_with_memory_resources(
method, shutdown_only, target_max_block_size_infinite_or_default
):
"""Test that we can use memory resource to limit the concurrency."""
num_blocks = 50
memory_per_task = 100 * 1024**2
max_concurrency = 5
ray.init(num_cpus=num_blocks, _memory=memory_per_task * max_concurrency)
concurrency_counter = ConcurrencyCounter.remote()
def map_fn(row_or_batch):
ray.get(concurrency_counter.inc.remote())
time.sleep(0.5)
ray.get(concurrency_counter.decr.remote())
if method is Dataset.flat_map:
return [row_or_batch]
else:
return row_or_batch
ds = ray.data.range(num_blocks, override_num_blocks=num_blocks)
if method is Dataset.map:
ds = ds.map(
map_fn,
num_cpus=1,
memory=memory_per_task,
)
elif method is Dataset.map_batches:
ds = ds.map_batches(
map_fn,
batch_size=None,
num_cpus=1,
memory=memory_per_task,
)
elif method is Dataset.flat_map:
ds = ds.flat_map(
map_fn,
num_cpus=1,
memory=memory_per_task,
)
assert len(ds.take(num_blocks)) == num_blocks
actual_max_concurrency = ray.get(concurrency_counter.get_max_concurrency.remote())
assert actual_max_concurrency <= max_concurrency
def test_map_batches_generator(
ray_start_regular_shared, tmp_path, target_max_block_size_infinite_or_default
):
# Set up.
df = pd.DataFrame({"one": [1, 2, 3], "two": [2, 3, 4]})
table = pa.Table.from_pandas(df)
pq.write_table(table, os.path.join(tmp_path, "test1.parquet"))
def pandas_generator(batch: pd.DataFrame) -> Iterator[pd.DataFrame]:
for i in range(len(batch)):
yield batch.iloc[[i]] + 1
ds = ray.data.read_parquet(str(tmp_path))
ds2 = ds.map_batches(pandas_generator, batch_size=1, batch_format="pandas")
ds_list = ds2.take()
values = sorted([s["one"] for s in ds_list])
assert values == [2, 3, 4]
values = sorted([s["two"] for s in ds_list])
assert values == [3, 4, 5]
def fail_generator(batch):
for i in range(len(batch)):
yield i
# Test the wrong return value raises an exception.
ds = ray.data.read_parquet(str(tmp_path))
with pytest.raises(ValueError):
ds_list = ds.map_batches(
fail_generator, batch_size=2, batch_format="pyarrow"
).take()
def test_map_batches_actors_preserves_order(
shutdown_only, target_max_block_size_infinite_or_default
):
class UDFClass:
def __call__(self, x):
return x
ray.shutdown()
ray.init(num_cpus=2)
# Test that actor compute model preserves block order.
ds = ray.data.range(10, override_num_blocks=5)
assert extract_values("id", ds.map_batches(UDFClass, concurrency=1).take()) == list(
range(10)
)
@pytest.mark.parametrize(
"num_rows,num_blocks,batch_size",
[
(10, 5, 2),
(10, 1, 10),
(12, 3, 2),
],
)
def test_map_batches_batch_mutation(
ray_start_regular_shared,
num_rows,
num_blocks,
batch_size,
restore_data_context,
target_max_block_size_infinite_or_default,
):
ctx = DataContext.get_current()
ctx.execution_options.preserve_order = True
# Test that batch mutation works without encountering a read-only error (e.g. if the
# batch is a zero-copy view on data in the object store).
def mutate(df):
df["id"] += 1
return df
ds = ray.data.range(num_rows, override_num_blocks=num_blocks).repartition(
num_blocks
)
# Convert to Pandas blocks.
ds = ds.map_batches(lambda df: df, batch_format="pandas", batch_size=None)
# Apply UDF that mutates the batches.
ds = ds.map_batches(mutate, batch_size=batch_size, zero_copy_batch=False)
assert [row["id"] for row in ds.iter_rows()] == list(range(1, num_rows + 1))
@pytest.mark.parametrize(
"num_rows,num_blocks,batch_size",
[
(10, 5, 2),
(10, 1, 10),
(12, 3, 2),
],
)
def test_map_batches_batch_zero_copy(
ray_start_regular_shared,
num_rows,
num_blocks,
batch_size,
target_max_block_size_infinite_or_default,
):
# Test that batches are zero-copy read-only views when zero_copy_batch=True.
def mutate(df):
# Check that batch is read-only.
assert not df.values.flags.writeable
df["id"] += 1
return df
ds = ray.data.range(num_rows, override_num_blocks=num_blocks).repartition(
num_blocks
)
# Convert to Pandas blocks.
ds = ds.map_batches(lambda df: df, batch_format="pandas", batch_size=None)
ds = ds.materialize()
# Apply UDF that mutates the batches, which should fail since the batch is
# read-only.
with pytest.raises(UserCodeException):
with pytest.raises(
ValueError, match="tried to mutate a zero-copy read-only batch"
):
ds = ds.map_batches(
mutate,
batch_format="pandas",
batch_size=batch_size,
zero_copy_batch=True,
)
ds.materialize()
BLOCK_BUNDLING_TEST_CASES = [
(block_size, batch_size)
for batch_size in range(1, 8)
for block_size in range(1, 2 * batch_size + 1)
]
@pytest.mark.parametrize("block_size,batch_size", BLOCK_BUNDLING_TEST_CASES)
def test_map_batches_block_bundling_auto(
ray_start_regular_shared,
block_size,
batch_size,
target_max_block_size_infinite_or_default,
):
# Ensure that we test at least 2 batches worth of blocks.
num_blocks = max(10, 2 * batch_size // block_size)
ds = ray.data.range(num_blocks * block_size, override_num_blocks=num_blocks)
# Confirm that we have the expected number of initial blocks.
assert ds._plan.initial_num_blocks() == num_blocks
# Blocks should be bundled up to the batch size.
ds1 = ds.map_batches(lambda x: x, batch_size=batch_size).materialize()
num_expected_blocks = math.ceil(
# If batch_size > block_size, then multiple blocks will be clumped
# together to make sure there are at least batch_size rows
num_blocks
/ max(math.ceil(batch_size / block_size), 1)
)
assert ds1._plan.initial_num_blocks() == num_expected_blocks
# Blocks should not be bundled up when batch_size is not specified.
ds2 = ds.map_batches(lambda x: x).materialize()
assert ds2._plan.initial_num_blocks() == num_blocks
@pytest.mark.parametrize(
"block_sizes,batch_size,expected_num_blocks",
[
([1, 2], 3, 1),
([2, 2, 1], 3, 2),
([1, 2, 3, 4], 4, 2),
([3, 1, 1, 3], 4, 2),
([2, 4, 1, 8], 4, 2),
([1, 1, 1, 1], 4, 1),
([1, 0, 3, 2], 4, 2),
([4, 4, 4, 4], 4, 4),
],
)
def test_map_batches_block_bundling_skewed_manual(
ray_start_regular_shared,
block_sizes,
batch_size,
expected_num_blocks,
target_max_block_size_infinite_or_default,
):
num_blocks = len(block_sizes)
ds = ray.data.from_blocks(
[pd.DataFrame({"a": [1] * block_size}) for block_size in block_sizes]
)
# Confirm that we have the expected number of initial blocks.
assert ds._plan.initial_num_blocks() == num_blocks
ds = ds.map_batches(lambda x: x, batch_size=batch_size).materialize()
# Blocks should be bundled up to the batch size.
assert ds._plan.initial_num_blocks() == expected_num_blocks
BLOCK_BUNDLING_SKEWED_TEST_CASES = [
(block_sizes, batch_size)
for batch_size in range(1, 4)
for num_blocks in range(1, batch_size + 1)
for block_sizes in itertools.product(
range(1, 2 * batch_size + 1), repeat=num_blocks
)
]
@pytest.mark.parametrize("block_sizes,batch_size", BLOCK_BUNDLING_SKEWED_TEST_CASES)
def test_map_batches_block_bundling_skewed_auto(
ray_start_regular_shared,
block_sizes,
batch_size,
target_max_block_size_infinite_or_default,
):
num_blocks = len(block_sizes)
ds = ray.data.from_blocks(
[pd.DataFrame({"a": [1] * block_size}) for block_size in block_sizes]
)
# Confirm that we have the expected number of initial blocks.
assert ds._plan.initial_num_blocks() == num_blocks
ds = ds.map_batches(lambda x: x, batch_size=batch_size).materialize()
curr = 0
num_out_blocks = 0
for block_size in block_sizes:
if curr >= batch_size:
num_out_blocks += 1
curr = 0
curr += block_size
if curr > 0:
num_out_blocks += 1
# Blocks should be bundled up to the batch size.
assert ds._plan.initial_num_blocks() == num_out_blocks
def test_map_batches_preserve_empty_blocks(
ray_start_regular_shared, target_max_block_size_infinite_or_default
):
ds = ray.data.range(10, override_num_blocks=10)
ds = ds.map_batches(lambda x: [])
ds = ds.map_batches(lambda x: x)
assert ds._plan.initial_num_blocks() == 10, ds
def test_map_batches_combine_empty_blocks(
ray_start_regular_shared, target_max_block_size_infinite_or_default
):
xs = [x % 3 for x in list(range(100))]
# ds1 has 1 block which contains 100 rows.
ds1 = ray.data.from_items(xs).repartition(1).sort("item").map_batches(lambda x: x)
assert ds1._block_num_rows() == [100]
# ds2 has 30 blocks, but only 3 of them are non-empty
ds2 = (
ray.data.from_items(xs)
.repartition(30)
.sort("item")
.map_batches(lambda x: x, batch_size=1)
)
assert len(ds2._block_num_rows()) == 3
count = sum(1 for x in ds2._block_num_rows() if x > 0)
assert count == 3
# The number of partitions should not affect the map_batches() result.
assert ds1.take_all() == ds2.take_all()
# NOTE: All tests above share a Ray cluster, while the tests below do not. These
# tests should only be carefully reordered to retain this invariant!
@pytest.mark.parametrize(
"df, expected_df",
[
pytest.param(
pd.DataFrame(
{
"id": [1, 2, 3],
"timestamp": pd.to_datetime(
[
"2024-01-01 00:00:00.123456789",
"2024-01-02 00:00:00.987654321",
"2024-01-03 00:00:00.111222333",
]
),
"value": [10.123456789, 20.987654321, 30.111222333],
}
),
pd.DataFrame(
{
"id": [1, 2, 3],
"timestamp": pd.to_datetime(
[
"2024-01-01 00:00:00.123456790",
"2024-01-02 00:00:00.987654322",
"2024-01-03 00:00:00.111222334",
]
),
"value": [10.123456789, 20.987654321, 30.111222333],
}
),
id="nanoseconds_increment",
)
],
)
def test_map_batches_timestamp_nanosecs(
df, expected_df, ray_start_regular_shared, target_max_block_size_infinite_or_default
):
"""Verify handling timestamp with nanosecs in map_batches"""
ray_data = ray.data.from_pandas(df)
# Using pyarrow format
result_arrow = ray_data.map_batches(
process_timestamp_data_batch_arrow, batch_format="pyarrow"
)
processed_df_arrow = result_arrow.to_pandas()
processed_df_arrow["timestamp"] = processed_df_arrow["timestamp"].astype(
"datetime64[ns]"
)
pd.testing.assert_frame_equal(processed_df_arrow, expected_df)
# Using pandas format
result_pandas = ray_data.map_batches(
process_timestamp_data_batch_pandas, batch_format="pandas"
)
processed_df_pandas = result_pandas.to_pandas()
processed_df_pandas["timestamp"] = processed_df_pandas["timestamp"].astype(
"datetime64[ns]"
)
pd.testing.assert_frame_equal(processed_df_pandas, expected_df)
def test_map_batches_async_exception_propagation(shutdown_only):
ray.shutdown()
ray.init(num_cpus=2)
class MyUDF:
def __init__(self):
pass
async def __call__(self, batch):
# This will trigger an assertion error.
assert False
yield batch
ds = ray.data.range(20)
ds = ds.map_batches(MyUDF, concurrency=2)
with pytest.raises(ray.exceptions.RayTaskError) as exc_info:
ds.materialize()
assert "AssertionError" in str(exc_info.value)
assert "assert False" in str(exc_info.value)
def test_map_batches_async_generator_fast_yield(
shutdown_only, target_max_block_size_infinite_or_default
):
# Tests the case where the async generator yields immediately,
# with a high number of tasks in flight, which results in
# the internal queue being almost instantaneously filled.
# This test ensures that the internal queue is completely drained in this scenario.
ray.shutdown()
ray.init(num_cpus=4)
async def task_yield(row):
return row
class AsyncActor:
def __init__(self):
pass
async def __call__(self, batch):
rows = [{"id": np.array([i])} for i in batch["id"]]
tasks = [asyncio.create_task(task_yield(row)) for row in rows]
for task in tasks:
yield await task
n = 8
ds = ray.data.range(n, override_num_blocks=n)
ds = ds.map_batches(
AsyncActor,
batch_size=n,
compute=ray.data.ActorPoolStrategy(size=1, max_tasks_in_flight_per_actor=n),
concurrency=1,
max_concurrency=n,
)
output = ds.take_all()
expected_output = [{"id": i} for i in range(n)]
# Because all tasks are submitted almost simultaneously,
# the output order may be different compared to the original input.
assert len(output) == len(expected_output), (len(output), len(expected_output))
@pytest.mark.skipif(
get_pyarrow_version() < MIN_PYARROW_VERSION_TYPE_PROMOTION,
reason="Requires PyArrow >= 14.0.0 for type promotion in nested struct fields",
)
def test_map_batches_struct_field_type_divergence(shutdown_only):
"""Test map_batches with struct fields that have diverging primitive types."""
def generator_fn(batch):
for i, row_id in enumerate(batch["id"]):
if i % 2 == 0:
# Yield struct with fields (a: int64, b: string)
yield {"data": [{"a": 1, "b": "hello"}]}
else:
# Yield struct with fields (a: float64, c: int32)
# Field 'a' has different type, field 'b' missing, field 'c' new
yield {"data": [{"a": 1.5, "c": 100}]}
ds = ray.data.range(4, override_num_blocks=1)
ds = ds.map_batches(generator_fn, batch_size=4)
result = ds.materialize()
rows = result.take_all()
assert len(rows) == 4
# Sort to make the order deterministic.
rows.sort(key=lambda r: (r["data"]["a"], str(r["data"]["b"])))
# Rows with a=1.0 (originally int) should have int cast to float, with c=None
assert rows[0]["data"] == {"a": 1.0, "b": "hello", "c": None}
assert rows[1]["data"] == {"a": 1.0, "b": "hello", "c": None}
# Rows with a=1.5 should have float a, with b=None
assert rows[2]["data"] == {"a": 1.5, "b": None, "c": 100}
assert rows[3]["data"] == {"a": 1.5, "b": None, "c": 100}
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_map_batches.py",
"license": "Apache License 2.0",
"lines": 715,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_with_column.py | import pandas as pd
import pyarrow as pa
import pyarrow.compute as pc
import pytest
from pkg_resources import parse_version
import ray
from ray.data._internal.util import rows_same
from ray.data._internal.utils.arrow_utils import get_pyarrow_version
from ray.data.datatype import DataType
from ray.data.exceptions import UserCodeException
from ray.data.expressions import col, lit, udf
from ray.data.tests.conftest import * # noqa
from ray.exceptions import RayTaskError
from ray.tests.conftest import * # noqa
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
@pytest.mark.parametrize(
"column_name, expr, expected_value",
[
# Arithmetic operations
("result", col("id") + 1, 1), # 0 + 1 = 1
("result", col("id") + 5, 5), # 0 + 5 = 5
("result", col("id") - 1, -1), # 0 - 1 = -1
("result", col("id") * 2, 0), # 0 * 2 = 0
("result", col("id") * 3, 0), # 0 * 3 = 0
("result", col("id") / 2, 0.0), # 0 / 2 = 0.0
# More complex arithmetic
("result", (col("id") + 1) * 2, 2), # (0 + 1) * 2 = 2
("result", (col("id") * 2) + 3, 3), # 0 * 2 + 3 = 3
# Comparison operations
("result", col("id") > 0, False), # 0 > 0 = False
("result", col("id") >= 0, True), # 0 >= 0 = True
("result", col("id") < 1, True), # 0 < 1 = True
("result", col("id") <= 0, True), # 0 <= 0 = True
("result", col("id") == 0, True), # 0 == 0 = True
# Operations with literals
("result", col("id") + lit(10), 10), # 0 + 10 = 10
("result", col("id") * lit(5), 0), # 0 * 5 = 0
("result", lit(2) + col("id"), 2), # 2 + 0 = 2
("result", lit(10) / (col("id") + 1), 10.0), # 10 / (0 + 1) = 10.0
],
)
def test_with_column(
ray_start_regular_shared,
column_name,
expr,
expected_value,
target_max_block_size_infinite_or_default,
):
"""Verify that `with_column` works with various operations."""
ds = ray.data.range(5).with_column(column_name, expr)
result = ds.take(1)[0]
assert result["id"] == 0
assert result[column_name] == expected_value
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_with_column_nonexistent_column(
ray_start_regular_shared, target_max_block_size_infinite_or_default
):
"""Verify that referencing a non-existent column with col() raises an exception."""
# Create a dataset with known column "id"
ds = ray.data.range(5)
# Try to reference a non-existent column - this should raise an exception
with pytest.raises(UserCodeException):
ds.with_column("result", col("nonexistent_column") + 1).materialize()
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_with_column_multiple_expressions(
ray_start_regular_shared, target_max_block_size_infinite_or_default
):
"""Verify that `with_column` correctly handles multiple expressions at once."""
ds = ray.data.range(5)
ds = ds.with_column("plus_one", col("id") + 1)
ds = ds.with_column("times_two", col("id") * 2)
ds = ds.with_column("ten_minus_id", 10 - col("id"))
first_row = ds.take(1)[0]
assert first_row["id"] == 0
assert first_row["plus_one"] == 1
assert first_row["times_two"] == 0
assert first_row["ten_minus_id"] == 10
# Ensure all new columns exist in the schema.
assert set(ds.schema().names) == {"id", "plus_one", "times_two", "ten_minus_id"}
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
@pytest.mark.parametrize(
"udf_function, column_name, expected_result",
[
# Single column UDF - add one to each value
pytest.param(
lambda: udf(DataType.int64())(lambda x: pc.add(x, 1)),
"add_one",
1, # 0 + 1 = 1
id="single_column_add_one",
),
# Single column UDF - multiply by 2
pytest.param(
lambda: udf(DataType.int64())(lambda x: pc.multiply(x, 2)),
"times_two",
0, # 0 * 2 = 0
id="single_column_multiply",
),
# Single column UDF - square the value
pytest.param(
lambda: udf(DataType.int64())(lambda x: pc.multiply(x, x)),
"squared",
0, # 0 * 0 = 0
id="single_column_square",
),
# Single column UDF with string return type
pytest.param(
lambda: udf(DataType.string())(lambda x: pc.cast(x, pa.string())),
"id_str",
"0", # Convert 0 to "0"
id="single_column_to_string",
),
# Single column UDF with float return type
pytest.param(
lambda: udf(DataType.float64())(lambda x: pc.divide(x, 2.0)),
"half",
0.0, # 0 / 2.0 = 0.0
id="single_column_divide_float",
),
],
)
def test_with_column_udf_single_column(
ray_start_regular_shared,
udf_function,
column_name,
expected_result,
target_max_block_size_infinite_or_default,
):
"""Test UDFExpr functionality with single column operations in with_column."""
ds = ray.data.range(5)
udf_fn = udf_function()
# Apply the UDF to the "id" column
ds_with_udf = ds.with_column(column_name, udf_fn(col("id")))
result = ds_with_udf.take(1)[0]
assert result["id"] == 0
assert result[column_name] == expected_result
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
@pytest.mark.parametrize(
"test_scenario",
[
# Multi-column UDF - add two columns
pytest.param(
{
"data": [{"a": 1, "b": 2}, {"a": 3, "b": 4}],
"udf": lambda: udf(DataType.int64())(lambda x, y: pc.add(x, y)),
"column_name": "sum_ab",
"expected_first": 3, # 1 + 2 = 3
"expected_second": 7, # 3 + 4 = 7
},
id="multi_column_add",
),
# Multi-column UDF - multiply two columns
pytest.param(
{
"data": [{"x": 2, "y": 3}, {"x": 4, "y": 5}],
"udf": lambda: udf(DataType.int64())(lambda x, y: pc.multiply(x, y)),
"column_name": "product_xy",
"expected_first": 6, # 2 * 3 = 6
"expected_second": 20, # 4 * 5 = 20
},
id="multi_column_multiply",
),
# Multi-column UDF - string concatenation
pytest.param(
{
"data": [
{"first": "John", "last": "Doe"},
{"first": "Jane", "last": "Smith"},
],
"udf": lambda: udf(DataType.string())(
lambda first, last: pc.binary_join_element_wise(first, last, " ")
),
"column_name": "full_name",
"expected_first": "John Doe",
"expected_second": "Jane Smith",
},
id="multi_column_string_concat",
),
],
)
def test_with_column_udf_multi_column(
ray_start_regular_shared,
test_scenario,
target_max_block_size_infinite_or_default,
):
"""Test UDFExpr functionality with multi-column operations in with_column."""
data = test_scenario["data"]
udf_fn = test_scenario["udf"]()
column_name = test_scenario["column_name"]
expected_first = test_scenario["expected_first"]
expected_second = test_scenario["expected_second"]
ds = ray.data.from_items(data)
# Apply UDF to multiple columns based on the scenario
if "a" in data[0] and "b" in data[0]:
ds_with_udf = ds.with_column(column_name, udf_fn(col("a"), col("b")))
elif "x" in data[0] and "y" in data[0]:
ds_with_udf = ds.with_column(column_name, udf_fn(col("x"), col("y")))
else: # first/last name scenario
ds_with_udf = ds.with_column(column_name, udf_fn(col("first"), col("last")))
results = ds_with_udf.take(2)
assert results[0][column_name] == expected_first
assert results[1][column_name] == expected_second
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
@pytest.mark.parametrize(
"expression_scenario",
[
# UDF in arithmetic expression
pytest.param(
{
"expression_factory": lambda add_one_udf: add_one_udf(col("id")) * 2,
"expected": 2, # (0 + 1) * 2 = 2
"column_name": "udf_times_two",
},
id="udf_in_arithmetic",
),
# UDF with literal addition
pytest.param(
{
"expression_factory": lambda add_one_udf: add_one_udf(col("id"))
+ lit(10),
"expected": 11, # (0 + 1) + 10 = 11
"column_name": "udf_plus_literal",
},
id="udf_plus_literal",
),
# UDF in comparison
pytest.param(
{
"expression_factory": lambda add_one_udf: add_one_udf(col("id")) > 0,
"expected": True, # (0 + 1) > 0 = True
"column_name": "udf_comparison",
},
id="udf_in_comparison",
),
# Nested UDF operations (UDF + regular expression)
pytest.param(
{
"expression_factory": lambda add_one_udf: add_one_udf(col("id") + 5),
"expected": 6, # add_one(0 + 5) = add_one(5) = 6
"column_name": "nested_udf",
},
id="nested_udf_expression",
),
],
)
def test_with_column_udf_in_complex_expressions(
ray_start_regular_shared,
expression_scenario,
target_max_block_size_infinite_or_default,
):
"""Test UDFExpr functionality in complex expressions with with_column."""
ds = ray.data.range(5)
# Create a simple add_one UDF for use in expressions
@udf(DataType.int64())
def add_one(x: pa.Array) -> pa.Array:
return pc.add(x, 1)
expression = expression_scenario["expression_factory"](add_one)
expected = expression_scenario["expected"]
column_name = expression_scenario["column_name"]
ds_with_expr = ds.with_column(column_name, expression)
result = ds_with_expr.take(1)[0]
assert result["id"] == 0
assert result[column_name] == expected
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_with_column_udf_multiple_udfs(
ray_start_regular_shared, target_max_block_size_infinite_or_default
):
"""Test applying multiple UDFs in sequence with with_column."""
ds = ray.data.range(5)
# Define multiple UDFs
@udf(DataType.int64())
def add_one(x: pa.Array) -> pa.Array:
return pc.add(x, 1)
@udf(DataType.int64())
def multiply_by_two(x: pa.Array) -> pa.Array:
return pc.multiply(x, 2)
@udf(DataType.float64())
def divide_by_three(x: pa.Array) -> pa.Array:
return pc.divide(x, 3.0)
# Apply UDFs in sequence
ds = ds.with_column("plus_one", add_one(col("id")))
ds = ds.with_column("times_two", multiply_by_two(col("plus_one")))
ds = ds.with_column("div_three", divide_by_three(col("times_two")))
# Convert to pandas and compare with expected result
result_df = ds.to_pandas()
expected_df = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4],
"plus_one": [1, 2, 3, 4, 5], # id + 1
"times_two": [2, 4, 6, 8, 10], # (id + 1) * 2
"div_three": [
2.0 / 3.0,
4.0 / 3.0,
2.0,
8.0 / 3.0,
10.0 / 3.0,
], # ((id + 1) * 2) / 3
}
)
pd.testing.assert_frame_equal(result_df, expected_df)
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_with_column_mixed_udf_and_regular_expressions(
ray_start_regular_shared, target_max_block_size_infinite_or_default
):
"""Test mixing UDF expressions and regular expressions in with_column operations."""
ds = ray.data.range(5)
# Define a UDF for testing
@udf(DataType.int64())
def multiply_by_three(x: pa.Array) -> pa.Array:
return pc.multiply(x, 3)
# Mix regular expressions and UDF expressions
ds = ds.with_column("plus_ten", col("id") + 10) # Regular expression
ds = ds.with_column("times_three", multiply_by_three(col("id"))) # UDF expression
ds = ds.with_column("minus_five", col("id") - 5) # Regular expression
ds = ds.with_column(
"udf_plus_regular", multiply_by_three(col("id")) + col("plus_ten")
) # Mixed: UDF + regular
ds = ds.with_column(
"comparison", col("times_three") > col("plus_ten")
) # Regular expression using UDF result
# Convert to pandas and compare with expected result
result_df = ds.to_pandas()
expected_df = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4],
"plus_ten": [10, 11, 12, 13, 14], # id + 10
"times_three": [0, 3, 6, 9, 12], # id * 3
"minus_five": [-5, -4, -3, -2, -1], # id - 5
"udf_plus_regular": [10, 14, 18, 22, 26], # (id * 3) + (id + 10)
"comparison": [False, False, False, False, False], # times_three > plus_ten
}
)
pd.testing.assert_frame_equal(result_df, expected_df)
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_with_column_udf_invalid_return_type_validation(
ray_start_regular_shared, target_max_block_size_infinite_or_default
):
"""Test that UDFs returning invalid types raise TypeError with clear message."""
ds = ray.data.range(3)
# Test UDF returning invalid type (dict) - expecting string but returning dict
@udf(DataType.string())
def invalid_dict_return(x: pa.Array) -> dict:
return {"invalid": "return_type"}
# Test UDF returning invalid type (str) - expecting string but returning plain str
@udf(DataType.string())
def invalid_str_return(x: pa.Array) -> str:
return "invalid_string"
# Test UDF returning invalid type (int) - expecting int64 but returning plain int
@udf(DataType.int64())
def invalid_int_return(x: pa.Array) -> int:
return 42
# Test each invalid return type
test_cases = [
(invalid_dict_return, "dict"),
(invalid_str_return, "str"),
(invalid_int_return, "int"),
]
for invalid_udf, expected_type_name in test_cases:
with pytest.raises((RayTaskError, UserCodeException)) as exc_info:
ds.with_column("invalid_col", invalid_udf(col("id"))).take(1)
# The actual TypeError gets wrapped, so we need to check the exception chain
error_message = str(exc_info.value)
assert f"returned invalid type {expected_type_name}" in error_message
assert "Expected type" in error_message
assert "pandas.Series" in error_message and "numpy.ndarray" in error_message
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
@pytest.mark.parametrize(
"scenario",
[
pytest.param(
{
"data": [
{"name": "Alice"},
{"name": "Bob"},
{"name": "Charlie"},
],
"expr_factory": lambda: col("name") + "_X",
"column_name": "name_with_suffix",
"expected": ["Alice_X", "Bob_X", "Charlie_X"],
},
id="string_col_plus_python_literal_rhs",
),
pytest.param(
{
"data": [
{"name": "Alice"},
{"name": "Bob"},
{"name": "Charlie"},
],
"expr_factory": lambda: "_X" + col("name"),
"column_name": "name_with_prefix",
"expected": ["_XAlice", "_XBob", "_XCharlie"],
},
id="python_literal_lhs_plus_string_col",
),
pytest.param(
{
"data": [
{"first": "John", "last": "Doe"},
{"first": "Jane", "last": "Smith"},
],
"expr_factory": lambda: col("first") + col("last"),
"column_name": "full_name",
"expected": ["JohnDoe", "JaneSmith"],
},
id="string_col_plus_string_col",
),
pytest.param(
{
"arrow_table": pa.table(
{"name": pa.array(["Alice", "Bob"]).dictionary_encode()}
),
"expr_factory": lambda: col("name") + "_X",
"column_name": "name_with_suffix",
"expected": ["Alice_X", "Bob_X"],
},
id="dict_encoded_string_col_plus_literal_rhs",
),
pytest.param(
{
"data": [
{"name": "Alice"},
{"name": "Bob"},
],
"expr_factory": lambda: col("name") + lit("_X"),
"column_name": "name_with_suffix",
"expected": ["Alice_X", "Bob_X"],
},
id="string_col_plus_lit_literal_rhs",
),
],
)
def test_with_column_string_concat_combinations(
ray_start_regular_shared,
scenario,
):
if "arrow_table" in scenario:
ds = ray.data.from_arrow(scenario["arrow_table"])
else:
ds = ray.data.from_items(scenario["data"])
expr = scenario["expr_factory"]()
column_name = scenario["column_name"]
ds2 = ds.with_column(column_name, expr)
out = ds2.to_pandas()
assert out[column_name].tolist() == scenario["expected"]
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_with_column_string_concat_type_mismatch_raises(
ray_start_regular_shared,
):
# int + string should raise a user-facing error
ds = ray.data.range(3)
with pytest.raises((RayTaskError, UserCodeException)):
ds.with_column("bad", col("id") + "_X").materialize()
@pytest.mark.parametrize(
"expr_factory, expected_columns, alias_name, expected_values",
[
(
lambda: col("id").alias("new_id"),
["id", "new_id"],
"new_id",
[0, 1, 2, 3, 4], # Copy of id column
),
(
lambda: (col("id") + 1).alias("id_plus_one"),
["id", "id_plus_one"],
"id_plus_one",
[1, 2, 3, 4, 5], # id + 1
),
(
lambda: (col("id") * 2 + 5).alias("transformed"),
["id", "transformed"],
"transformed",
[5, 7, 9, 11, 13], # id * 2 + 5
),
(
lambda: lit(42).alias("constant"),
["id", "constant"],
"constant",
[42, 42, 42, 42, 42], # lit(42)
),
(
lambda: (col("id") >= 0).alias("is_non_negative"),
["id", "is_non_negative"],
"is_non_negative",
[True, True, True, True, True], # id >= 0
),
(
lambda: (col("id") + 1).alias("id"),
["id"], # Only one column since we're overwriting id
"id",
[1, 2, 3, 4, 5], # id + 1 replaces original id
),
],
ids=[
"col_alias",
"arithmetic_alias",
"complex_alias",
"literal_alias",
"comparison_alias",
"overwrite_existing_column",
],
)
def test_with_column_alias_expressions(
ray_start_regular_shared,
expr_factory,
expected_columns,
alias_name,
expected_values,
):
"""Test that alias expressions work correctly with with_column."""
expr = expr_factory()
# Verify the alias name matches what we expect
assert expr.name == alias_name
# Apply the aliased expression
ds = ray.data.range(5).with_column(alias_name, expr)
# Convert to pandas for comprehensive comparison
result_df = ds.to_pandas()
# Create expected DataFrame
expected_df = pd.DataFrame({"id": [0, 1, 2, 3, 4], alias_name: expected_values})
# Ensure column order matches expected_columns
expected_df = expected_df[expected_columns]
# Assert the entire DataFrame is equal
pd.testing.assert_frame_equal(result_df, expected_df)
# Verify the alias expression evaluates the same as the non-aliased version
non_aliased_expr = expr
ds_non_aliased = ray.data.range(5).with_column(alias_name, non_aliased_expr)
non_aliased_df = ds_non_aliased.to_pandas()
pd.testing.assert_frame_equal(result_df, non_aliased_df)
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_with_column_callable_class_udf_actor_semantics(ray_start_regular_shared):
"""Test that callable class UDFs maintain state across batches using actor semantics."""
import pyarrow.compute as pc
# Create a callable class UDF that tracks the number of times it's called
@udf(return_dtype=DataType.int32())
class InvocationCounter:
def __init__(self, offset=0):
self.offset = offset
self.call_count = 0
def __call__(self, x):
# Increment call count each time the UDF is invoked
self.call_count += 1
# Add the offset plus the call count to show state is maintained
return pc.add(pc.add(x, self.offset), self.call_count)
# Create a dataset with multiple blocks to ensure multiple invocations
ds = ray.data.range(20, override_num_blocks=4)
# Use the callable class UDF
counter = InvocationCounter(offset=100)
result_ds = ds.with_column("result", counter(col("id")))
# Convert to list to trigger execution
results = result_ds.take_all()
# The results should show that the call_count incremented across batches
# Since we have 4 blocks, the UDF should be called 4 times on the same actor
# The exact values will depend on which batch each row came from
# But we can verify that the offset (100) was applied
for result in results:
# Each result should have the base id + offset (100) + at least 1 (first call)
assert result["result"] >= result["id"] + 100 + 1
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_with_column_callable_class_udf_with_constructor_args(
ray_start_regular_shared,
):
"""Test that callable class UDFs correctly use constructor arguments."""
import pyarrow.compute as pc
@udf(return_dtype=DataType.int32())
class AddOffset:
def __init__(self, offset):
self.offset = offset
def __call__(self, x):
return pc.add(x, self.offset)
# Create dataset
ds = ray.data.range(10)
# Test with different offsets
add_five = AddOffset(5)
add_ten = AddOffset(10)
result_5 = ds.with_column("plus_five", add_five(col("id"))).to_pandas()
result_10 = ds.with_column("plus_ten", add_ten(col("id"))).to_pandas()
# Verify the offsets were applied correctly
expected_5 = pd.DataFrame({"id": list(range(10)), "plus_five": list(range(5, 15))})
expected_10 = pd.DataFrame({"id": list(range(10)), "plus_ten": list(range(10, 20))})
assert rows_same(result_5, expected_5)
assert rows_same(result_10, expected_10)
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_with_column_multiple_callable_class_udfs(ray_start_regular_shared):
"""Test that multiple callable class UDFs can be used in the same projection."""
import pyarrow.compute as pc
@udf(return_dtype=DataType.int32())
class Multiplier:
def __init__(self, factor):
self.factor = factor
def __call__(self, x):
return pc.multiply(x, self.factor)
@udf(return_dtype=DataType.int32())
class Adder:
def __init__(self, addend):
self.addend = addend
def __call__(self, x):
return pc.add(x, self.addend)
# Create dataset
ds = ray.data.range(5)
# Use multiple callable class UDFs
times_two = Multiplier(2)
plus_ten = Adder(10)
result = ds.with_column("doubled", times_two(col("id"))).with_column(
"plus_ten", plus_ten(col("id"))
)
result_df = result.to_pandas()
expected_df = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4],
"doubled": [0, 2, 4, 6, 8],
"plus_ten": [10, 11, 12, 13, 14],
}
)
assert rows_same(result_df, expected_df)
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_with_column_same_callable_class_different_constructor_args(
ray_start_regular_shared,
):
"""Test that the same callable class with different constructor args works correctly.
This test ensures that when the same callable class is instantiated with different
constructor arguments, each instance maintains its own state. This is important for
future-proofing in case Actor->Actor fusion becomes enabled.
"""
import pyarrow.compute as pc
@udf(return_dtype=DataType.int32())
class Multiplier:
def __init__(self, factor):
self.factor = factor
def __call__(self, x):
return pc.multiply(x, self.factor)
# Create dataset
ds = ray.data.range(5)
# Use the SAME class with DIFFERENT constructor arguments
times_two = Multiplier(2)
times_three = Multiplier(3)
result = ds.with_column("times_two", times_two(col("id"))).with_column(
"times_three", times_three(col("id"))
)
print(result.explain())
result_df = result.to_pandas()
expected_df = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4],
"times_two": [0, 2, 4, 6, 8], # id * 2
"times_three": [0, 3, 6, 9, 12], # id * 3
}
)
from ray.data._internal.util import rows_same
assert rows_same(result_df, expected_df)
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_with_column_callable_class_udf_with_compute_strategy(
ray_start_regular_shared,
):
"""Test that compute strategy can be specified for callable class UDFs."""
import pyarrow.compute as pc
@udf(return_dtype=DataType.int32())
class AddOffset:
def __init__(self, offset):
self.offset = offset
def __call__(self, x):
return pc.add(x, self.offset)
# Create dataset
ds = ray.data.range(10)
# Use a specific compute strategy
add_five = AddOffset(5)
result = ds.with_column(
"result",
add_five(col("id")),
compute=ray.data.ActorPoolStrategy(size=2),
)
result_df = result.to_pandas()
expected_df = pd.DataFrame({"id": list(range(10)), "result": list(range(5, 15))})
assert rows_same(result_df, expected_df)
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_with_column_async_callable_class_udf(ray_start_regular_shared):
"""Test that async callable class UDFs work correctly with actor semantics."""
import asyncio
import pyarrow.compute as pc
@udf(return_dtype=DataType.int32())
class AsyncAddOffset:
def __init__(self, offset):
self.offset = offset
self.call_count = 0
async def __call__(self, x):
# Simulate async work
await asyncio.sleep(0.001)
self.call_count += 1
# Add offset to show the UDF was called
return pc.add(x, self.offset)
# Create dataset
ds = ray.data.range(10, override_num_blocks=2)
# Use async callable class UDF
add_five = AsyncAddOffset(5)
result = ds.with_column("result", add_five(col("id")))
result_df = result.to_pandas()
expected_df = pd.DataFrame({"id": list(range(10)), "result": list(range(5, 15))})
assert rows_same(result_df, expected_df)
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_with_column_async_callable_class_udf_with_state(ray_start_regular_shared):
"""Test that async callable class UDFs maintain state across batches."""
import asyncio
import pyarrow.compute as pc
@udf(return_dtype=DataType.int32())
class AsyncCounter:
def __init__(self):
self.total_processed = 0
async def __call__(self, x):
# Simulate async work
await asyncio.sleep(0.001)
# Track how many items we've processed
batch_size = len(x)
self.total_processed += batch_size
# Return the running count
return pc.add(x, self.total_processed - batch_size)
# Create dataset with multiple blocks
ds = ray.data.range(20, override_num_blocks=4)
# Use async callable class UDF with state
counter = AsyncCounter()
result = ds.with_column("running_total", counter(col("id")))
# Just verify we got results without errors
# The exact values will depend on execution order
results = result.take_all()
assert len(results) == 20
# All values should be at least the original id
for i, result in enumerate(results):
assert result["running_total"] >= result["id"]
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_with_column_multiple_async_callable_class_udfs(ray_start_regular_shared):
"""Test that multiple async callable class UDFs can work together."""
import asyncio
import pyarrow.compute as pc
@udf(return_dtype=DataType.int32())
class AsyncMultiplier:
def __init__(self, factor):
self.factor = factor
async def __call__(self, x):
await asyncio.sleep(0.001)
return pc.multiply(x, self.factor)
@udf(return_dtype=DataType.int32())
class AsyncAdder:
def __init__(self, addend):
self.addend = addend
async def __call__(self, x):
await asyncio.sleep(0.001)
return pc.add(x, self.addend)
# Create dataset
ds = ray.data.range(5)
# Use multiple async callable class UDFs
times_two = AsyncMultiplier(2)
plus_ten = AsyncAdder(10)
result = ds.with_column("doubled", times_two(col("id"))).with_column(
"plus_ten", plus_ten(col("id"))
)
result_df = result.to_pandas()
expected_df = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4],
"doubled": [0, 2, 4, 6, 8],
"plus_ten": [10, 11, 12, 13, 14],
}
)
assert rows_same(result_df, expected_df)
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_with_column_async_generator_udf_multiple_yields(ray_start_regular_shared):
"""Test that async generator UDFs correctly handle multiple yields.
When an async generator UDF yields multiple values, the last (most recent)
value is returned. This matches map_batches behavior of collecting all yields,
while adapting to expression context where a single value per row is required.
"""
import pyarrow.compute as pc
@udf(return_dtype=DataType.int32())
class AsyncGeneratorMultiYield:
"""UDF that yields multiple values - last yield is returned."""
def __init__(self, offset):
self.offset = offset
async def __call__(self, x):
# Yield multiple values for the same input
# Fix: Last yield is returned (most recent/final result)
yield pc.add(x, self.offset) # First yield: x + offset
yield pc.multiply(x, self.offset + 10) # Second yield: x * (offset + 10)
yield pc.add(x, self.offset * 2) # Third yield: x + (offset * 2) - RETURNED
# Create dataset
ds = ray.data.range(5, override_num_blocks=1)
# Use async generator UDF
udf_instance = AsyncGeneratorMultiYield(5)
result = ds.with_column("result", udf_instance(col("id")))
result_df = result.to_pandas()
# Fixed behavior: last yield is returned
# Input: [0, 1, 2, 3, 4]
# First yield: [0+5, 1+5, 2+5, 3+5, 4+5] = [5, 6, 7, 8, 9]
# Second yield: [0*15, 1*15, 2*15, 3*15, 4*15] = [0, 15, 30, 45, 60]
# Third yield (RETURNED): [0+10, 1+10, 2+10, 3+10, 4+10] = [10, 11, 12, 13, 14]
expected_after_fix = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4],
"result": [10, 11, 12, 13, 14], # Last yield returned: id + (5*2) = id + 10
}
)
assert rows_same(result_df, expected_after_fix)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_with_column.py",
"license": "Apache License 2.0",
"lines": 861,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/train/v2/tests/test_config.py | import pyarrow.fs
import pytest
from ray.train import RunConfig, ScalingConfig
def test_scaling_config_validation():
assert ScalingConfig(
num_workers=2, use_gpu=True, resources_per_worker={"CPU": 1}
).total_resources == {"CPU": 2, "GPU": 2}
with pytest.raises(ValueError, match="`use_gpu` is False but `GPU` was found in"):
ScalingConfig(num_workers=2, use_gpu=False, resources_per_worker={"GPU": 1})
with pytest.raises(ValueError, match="Cannot specify both"):
ScalingConfig(num_workers=2, use_gpu=True, use_tpu=True)
with pytest.raises(
ValueError,
match=(
"If `label_selector` is a list, it must be the same length as "
"`max_workers`"
),
):
ScalingConfig(num_workers=2, label_selector=[{"subcluster": "my_subcluster"}])
with pytest.raises(
ValueError,
match=(
"If `label_selector` is a list, it must be the same length as "
"`max_workers`"
),
):
ScalingConfig(
num_workers=(2, 3),
label_selector=[{"subcluster": "a"}, {"subcluster": "b"}],
)
def test_scaling_config_accelerator_type():
scaling_config = ScalingConfig(num_workers=2, use_gpu=True, accelerator_type="A100")
assert scaling_config.accelerator_type == "A100"
assert scaling_config._resources_per_worker_not_none == {
"GPU": 1,
"accelerator_type:A100": 0.001,
}
assert scaling_config.total_resources == {
"GPU": 2,
"accelerator_type:A100": 0.002,
}
assert scaling_config.additional_resources_per_worker == {
"accelerator_type:A100": 0.001
}
def test_scaling_config_tpu_min_workers_multiple():
with pytest.raises(ValueError, match="min_workers"):
ScalingConfig(
num_workers=(1, 2),
use_tpu=True,
topology="2x2x2",
accelerator_type="TPU-V4",
resources_per_worker={"TPU": 4},
)
def test_storage_filesystem_repr():
"""Test for https://github.com/ray-project/ray/pull/40851"""
config = RunConfig(storage_filesystem=pyarrow.fs.S3FileSystem())
repr(config)
def test_scaling_config_default_workers():
"""Test that num_workers defaults to 1 for non-TPU workloads."""
config = ScalingConfig()
assert config.num_workers == 1
assert config.total_resources == {"CPU": 1}
config_gpu = ScalingConfig(use_gpu=True)
assert config_gpu.num_workers == 1
assert config_gpu.total_resources == {"GPU": 1}
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/test_config.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/train/lint/check_circular_imports.py | import argparse
import ast
import sys
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple
TRAIN_PACKAGES = set()
def find_train_packages(base_train_dir: Path, patch_train_dir: Path) -> None:
"""
Find and initialize the global TRAIN_PACKAGES with all train package names from base_train_dir
and patch_train_dir combined.
"""
global TRAIN_PACKAGES
TRAIN_PACKAGES = set()
# Collect all packages under both base and patch train dirs
package_files = list(base_train_dir.rglob("__init__.py")) + list(
patch_train_dir.rglob("__init__.py")
)
base_dir = get_base_dir()
for init_file in package_files:
relative_path = init_file.relative_to(base_dir)
dotted_module = str(relative_path.parent).replace("/", ".")
TRAIN_PACKAGES.add(dotted_module)
def is_train_package(module_str: str) -> bool:
return module_str in TRAIN_PACKAGES
def get_base_dir() -> Path:
"""Return the filesystem path to the ray python directory."""
current_file_path = Path(__file__).resolve()
package_dir = current_file_path.parents[3]
return package_dir
def get_base_train_dir() -> Path:
"""Return the filesystem path to the ray train directory."""
return get_base_dir() / "ray/train"
def does_overlap(main_module: str, module: str) -> bool:
"""Checks if the init file of module is on the import path of main_module"""
return main_module.startswith(f"{module}.") or main_module == module
class Import:
"""
Represents an import statement.
For example, 'from X import A, B' has module 'X' and names ['A', 'B'].
Also supports 'import X'.
"""
def __init__(
self, module: str, names: List[str] = None, is_package: bool = False
) -> None:
self.is_package = is_package
self.module = module
self.names = names if names else []
class ImportCollector(ast.NodeVisitor):
"""
An AST node visitor that collects all module-level imports from a Python source file.
It traverses the AST and records module-level import statements (`import ...` and `from ... import ...`) that are not
inside function or class definitions, and that are not guarded by `if TYPE_CHECKING` or `if typing.TYPE_CHECKING`
blocks.
"""
def __init__(self, module_name: str, is_package: bool) -> None:
self._module_name = module_name
self._is_package = is_package
self.imports: Set[Import] = set()
self.type_checking_imported = False
# --- private helpers ---
def _is_type_checking_test(self, expr: ast.AST) -> bool:
"""Return True for `if TYPE_CHECKING` or `if typing.TYPE_CHECKING`."""
if (
self.type_checking_imported
and isinstance(expr, ast.Name)
and expr.id == "TYPE_CHECKING"
):
return True
elif (
isinstance(expr, ast.Attribute)
and isinstance(expr.value, ast.Name)
and expr.value.id == "typing"
and expr.attr == "TYPE_CHECKING"
):
return True
return False
def _get_package_parts(self) -> List[str]:
parts = self._module_name.split(".")
return parts if self._is_package else parts[:-1]
def _to_absolute_module(
self, level: int, module_str: Optional[str]
) -> Optional[str]:
"""Construct the absolute module string from a relative import."""
# Absolute import
if level == 0:
return module_str
package_parts = self._get_package_parts()
# If the relative import is out of bounds
if level - 1 > len(package_parts):
return None
# Base parts based on the level
base_module_parts = (
package_parts if level == 1 else package_parts[: -(level - 1)]
)
# Construct absolute module string
abs_module_parts = (
base_module_parts + module_str.split(".")
if module_str
else base_module_parts
)
return ".".join(abs_module_parts)
# --- parsing functions ---
def visit_If(self, node: ast.If) -> None:
# If the test is not TYPE_CHECKING, visit statement body
if not self._is_type_checking_test(node.test):
for stmt in node.body:
self.visit(stmt)
# Also visit conditional branches
for stmt in node.orelse:
self.visit(stmt)
def visit_Import(self, node: ast.Import) -> None:
for alias in node.names:
if alias.name:
self.imports.add(
Import(module=alias.name, is_package=is_train_package(alias.name))
)
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
import_str = self._to_absolute_module(node.level or 0, node.module)
if not import_str:
return
names = [alias.name for alias in node.names]
self.imports.add(
Import(
module=import_str, is_package=is_train_package(import_str), names=names
)
)
if "TYPE_CHECKING" in names and import_str == "typing":
self.type_checking_imported = True
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
# Skip function contents
return
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
# Skip function contents
return
def visit_ClassDef(self, node: ast.ClassDef) -> None:
# Skip class contents
return
def collect_imports(
module_name: str, is_package: bool, source_text: str
) -> Set[Import]:
try:
tree = ast.parse(source_text)
except SyntaxError:
print(f"Warning: Failed to parse {module_name} for circular imports")
return set()
collector = ImportCollector(module_name, is_package)
collector.visit(tree)
return collector.imports
def to_module_name_and_is_package(py_file: Path) -> Tuple[str, bool]:
"""
Convert a Python file path to its corresponding module name and determine if it is a package.
Args:
py_file: The path to the Python file.
Returns:
Tuple[str, bool]: A tuple containing the module name as a string and a boolean indicating
whether the module is a package (True if it is an __init__.py file).
"""
file_path = py_file.relative_to(get_base_dir())
module_path = file_path.with_suffix("")
module_parts = module_path.parts
is_package = module_parts[-1] == "__init__"
if is_package:
module_parts = module_parts[:-1]
module_str = ".".join(module_parts)
return module_str, is_package
def get_file_module_imports(
files: List[Path], module_match_string: Optional[str] = None
) -> Dict[str, List[Import]]:
"""
Collect and return the module-level imports for a list of Python files.
Args:
files: A list of Path objects representing Python files to analyze.
module_match_string: An optional string to filter imports. Only imports
containing this string will be included in the result.
Returns:
A dictionary mapping module names to a list of their import statements.
The module names are derived from the file paths, and the import statements
are filtered based on the optional module_match_string.
"""
module_imports: Dict[str, List[Import]] = {}
# Collect the imports for each python file
for py_file in files:
try:
module_name, is_package = to_module_name_and_is_package(py_file)
src = py_file.read_text(encoding="utf-8", errors="ignore")
imports = collect_imports(module_name, is_package, src)
module_imports[module_name] = [
stmt
for stmt in imports
if module_match_string is None or module_match_string in stmt.module
]
except Exception:
continue
return module_imports
def convert_to_file_paths(imports: List[Import]) -> List[Path]:
"""
Convert a list of import strings to a list of file paths.
Args:
imports: A list of Import objects
Returns:
A list of file paths.
"""
base_dir = get_base_dir()
file_paths = []
for imp in imports:
if imp.is_package:
relative_path = imp.module.replace(".", "/") + "/__init__.py"
else:
relative_path = imp.module.replace(".", "/") + ".py"
file_paths.append(base_dir / relative_path)
return file_paths
def expand_to_include_reexports(import_map: Dict[str, List[Import]]) -> None:
"""
Expands the set of imports for a given import map to include the modules resulting from reexports.
So if in the base train module, there is "from x import a, b" and x is a package, then this function
will explore the __init__.py of x and include the modules a and b were reexported from in the import map.
"""
for module, base_imports in import_map.items():
# Get only the package imports
packages = [imp for imp in base_imports if imp.is_package]
package_files = convert_to_file_paths(packages)
reexports = get_file_module_imports(package_files)
agg_reexports = []
# Filter patch init file imports to those that only contain the right names
for base_import in base_imports:
if base_import.module in reexports:
import_list = reexports[base_import.module]
target_reexports = [
imp
for imp in import_list
if set(imp.names) & set(base_import.names)
]
agg_reexports.extend(target_reexports)
# Expand modules to include reexported modules
import_map[module].extend(agg_reexports)
def check_violations(
base_train_patching_imports: Dict[str, List[Import]], patch_dir: Path
) -> List[str]:
"""
Check for circular import violations between base and patch train modules.
Args:
base_train_patching_imports: A dictionary mapping base train module names to their imports.
patch_dir: The directory path containing patch train modules.
Returns:
A list of strings describing any circular import violations found.
"""
violations: List[str] = []
# Get the imports from the patch train init files
patch_train_init_files = list(patch_dir.rglob("__init__.py"))
patch_train_init_imports = get_file_module_imports(
patch_train_init_files, module_match_string="ray.train"
)
# Expand the imports to include reexports
expand_to_include_reexports(base_train_patching_imports)
# Process each patch train init module for violations
for base_train_init_module, imports in base_train_patching_imports.items():
# Get the imports from the patch train files
patch_train_files = convert_to_file_paths(imports)
patch_train_file_imports = get_file_module_imports(
patch_train_files, module_match_string="ray.train"
)
for patch_module, imports in patch_train_file_imports.items():
# Skip if the base train init module is in the import path of the patch module
if does_overlap(patch_module, base_train_init_module):
continue
# Skip if the patch train module init file imports the base train init module
patch_init_module = (
".".join(patch_module.split(".")[:-1])
if not is_train_package(patch_module)
else patch_module
)
patch_init_imports = patch_train_init_imports.get(patch_init_module, [])
if any(
does_overlap(imp.module, base_train_init_module)
for imp in patch_init_imports
):
continue
for patch_import in imports:
# If any of those v1 imports go through the init file, then it is a violation
if does_overlap(patch_import.module, base_train_init_module):
violations.append(
f"circular-import-train: Circular import between {base_train_init_module} (importing {patch_module}) and {patch_module} (importing {patch_import.module}). Resolve by importing {base_train_init_module} in the __init__.py of {patch_init_module}."
)
return violations
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--patch_dir",
default="ray/train/v2",
help="Path to the directory containing patching contents",
)
args = parser.parse_args()
# Get train directory paths
base_dir = get_base_dir()
base_train_dir = get_base_train_dir()
patch_train_dir = base_dir / Path(args.patch_dir)
# Find and save all train packages in global TRAIN_PACKAGES for reference
find_train_packages(base_train_dir, patch_train_dir)
# Collect all base train init files
base_train_init_files = [
f
for f in base_train_dir.rglob("__init__.py")
if not f.is_relative_to(patch_train_dir)
]
# Get the patching imports in the base train init files
dotted_module_prefix = str(patch_train_dir.relative_to(base_dir)).replace("/", ".")
patching_imports = get_file_module_imports(
base_train_init_files, module_match_string=dotted_module_prefix
)
# Collect all violations based off the patching imports
violations = check_violations(patching_imports, patch_train_dir)
if violations:
print("\n".join(violations))
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/lint/check_circular_imports.py",
"license": "Apache License 2.0",
"lines": 316,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/train/v2/tests/test_circular_import_linter.py | import textwrap
from pathlib import Path
import pytest
from ray.train.lint import check_circular_imports as cci
def test_import_collector_excludes_non_module_level_and_type_checking():
source = textwrap.dedent(
"""
import os
from typing import TYPE_CHECKING
from .submod import thing
if TYPE_CHECKING:
import foo
def f():
import json
class C:
import pkg
"""
)
imports = cci.collect_imports(
module_name="pkg.module", is_package=False, source_text=source
)
imports = [imp.module for imp in imports]
assert "os" in imports
assert "pkg.submod" in imports
assert "foo" not in imports
assert "json" not in imports
assert "pkg" not in imports
def test_to_module_name_and_is_package(tmp_path: Path, monkeypatch: pytest.MonkeyPatch):
# Create a fake python tree under tmp: tmp/python/ray/train/lint/{pkg}/...
base_dir = tmp_path / "python"
pkg_dir = base_dir / "ray" / "train" / "lint"
pkg_dir.mkdir(parents=True, exist_ok=True)
init_pkg = pkg_dir / "foo" / "__init__.py"
init_pkg.parent.mkdir(parents=True, exist_ok=True)
init_pkg.write_text("# pkg init")
mod_file = pkg_dir / "bar.py"
mod_file.write_text("# module file")
monkeypatch.setattr(cci, "get_base_dir", lambda: base_dir)
module_name, is_pkg = cci.to_module_name_and_is_package(init_pkg)
assert module_name == "ray.train.lint.foo"
assert is_pkg is True
module_name, is_pkg = cci.to_module_name_and_is_package(mod_file)
assert module_name == "ray.train.lint.bar"
assert is_pkg is False
def test_get_file_module_imports_filters_by_prefix(
tmp_path: Path, monkeypatch: pytest.MonkeyPatch
):
base_dir = tmp_path / "python"
target_dir = base_dir / "ray" / "train" / "demo"
target_dir.mkdir(parents=True, exist_ok=True)
file_a = target_dir / "a.py"
file_a.write_text(
"\n".join(
[
"import os",
"from ray.train.v2.torch import torch_trainer",
"from some.other import mod",
]
)
)
file_b = target_dir / "b.py"
file_b.write_text("from ray.train import something")
monkeypatch.setattr(cci, "get_base_dir", lambda: base_dir)
cci.find_train_packages(base_dir, target_dir)
result = cci.get_file_module_imports(
[file_a, file_b], module_match_string="ray.train"
)
# Keys are dotted module names
assert set(result.keys()) == {"ray.train.demo.a", "ray.train.demo.b"}
# Imports were found
assert len(result["ray.train.demo.a"]) == 1
assert len(result["ray.train.demo.b"]) == 1
# Only imports containing the prefix are kept
assert result["ray.train.demo.a"][0].module == "ray.train.v2.torch"
assert result["ray.train.demo.b"][0].module == "ray.train"
def test_check_standard_violations_reports_and_suppresses(
tmp_path: Path, monkeypatch: pytest.MonkeyPatch
):
base_dir = tmp_path / "python"
train_dir = base_dir / "ray" / "train"
patch_dir = train_dir / "v2"
v2_dir = train_dir / "v2" / "tensorflow"
v1_pkg_dir = train_dir / "tensorflow"
v2_dir.mkdir(parents=True, exist_ok=True)
v1_pkg_dir.mkdir(parents=True, exist_ok=True)
# Base v1 package init: imports a v2 module
(v1_pkg_dir / "__init__.py").write_text(
"from ray.train.v2.tensorflow.tensorflow_trainer import tensorflow_trainer\n"
)
# v2 module that (incorrectly) imports back into v1 package
(v2_dir / "tensorflow_trainer.py").write_text(
"from ray.train.tensorflow import something\n"
)
# Extra v2 module that should not be checked
(v2_dir / "foo.py").write_text("from ray.train.tensorflow import something\n")
# v2 package init WITHOUT importing v1 package (should trigger violation)
(v2_dir / "__init__.py").write_text("# empty init\n")
monkeypatch.setattr(cci, "get_base_dir", lambda: base_dir)
cci.find_train_packages(base_dir, patch_dir)
# Build mapping: base v1 init module -> imports of v2 it references
base_v1_init = train_dir / "tensorflow" / "__init__.py"
imports_map = cci.get_file_module_imports([base_v1_init])
violations = cci.check_violations(imports_map, patch_dir=train_dir / "v2")
assert len(violations) == 1
# Now fix by having v2 package init import the v1 package init (suppresses violation)
(v2_dir / "__init__.py").write_text("import ray.train.tensorflow\n")
violations = cci.check_violations(imports_map, patch_dir=train_dir / "v2")
assert violations == []
def test_check_no_violation_on_overlapping_import_path(
tmp_path: Path, monkeypatch: pytest.MonkeyPatch
):
base_dir = tmp_path / "python"
train_dir = base_dir / "ray" / "train"
patch_dir = train_dir / "v2"
v2_dir = train_dir / "v2" / "tensorflow"
v2_dir.mkdir(parents=True, exist_ok=True)
# Circular dependency between ray.train and v2 module
(v2_dir / "tensorflow_trainer.py").write_text("from ray.train import something\n")
(train_dir / "__init__.py").write_text(
"from ray.train.v2.tensorflow.tensorflow_trainer import TensorflowTrainer\n"
)
monkeypatch.setattr(cci, "get_base_dir", lambda: base_dir)
cci.find_train_packages(base_dir, patch_dir)
# Build mapping: base v1 init module -> imports of v2 it references
base_v1_init = train_dir / "__init__.py"
imports_map = cci.get_file_module_imports([base_v1_init])
violations = cci.check_violations(imports_map, patch_dir=patch_dir)
assert len(violations) == 0
def test_expand_to_exclude_reexports(tmp_path: Path, monkeypatch: pytest.MonkeyPatch):
base_dir = tmp_path / "python"
train_dir = base_dir / "ray" / "train"
patch_dir = train_dir / "v2"
v2_dir = train_dir / "v2" / "tensorflow"
v2_dir.mkdir(parents=True, exist_ok=True)
# Import from v2 init file
(train_dir / "__init__.py").write_text(
"from ray.train.v2.tensorflow import TensorflowTrainer\n"
)
# Reexport tensorflow_trainer from v2 init file
(v2_dir / "__init__.py").write_text(
"from .tensorflow_trainer import TensorflowTrainer \n"
)
# Circular dependency with ray.train
(v2_dir / "tensorflow_trainer.py").write_text("from ray.train import something\n")
monkeypatch.setattr(cci, "get_base_dir", lambda: base_dir)
cci.find_train_packages(base_dir, patch_dir)
# Build mapping: base v1 init module -> imports of v2 it references
base_v1_init = train_dir / "__init__.py"
imports_map = cci.get_file_module_imports([base_v1_init])
assert imports_map.keys()
assert "ray.train" in imports_map.keys()
assert isinstance(imports_map["ray.train"], list)
assert imports_map["ray.train"]
assert isinstance(imports_map["ray.train"][0], cci.Import)
assert imports_map["ray.train"][0].module == "ray.train.v2.tensorflow"
cci.expand_to_include_reexports(imports_map)
assert len(imports_map["ray.train"]) == 2
# The tensorflow trainer is not included in the imports_map
trainer_module = "ray.train.v2.tensorflow.tensorflow_trainer"
assert any(imp.module == trainer_module for imp in imports_map["ray.train"])
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/test_circular_import_linter.py",
"license": "Apache License 2.0",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/train/v2/tests/test_circular_imports.py | """
This file tests previously known circular imports to prevent regressions, isolating each import in a Ray task.
"""
import sys
import pytest
import ray
@pytest.fixture(scope="session", autouse=True)
def ray_session():
"""Initialize Ray at the start of the test session and shutdown at the end."""
if not ray.is_initialized():
ray.init(runtime_env={"env_vars": {"RAY_TRAIN_V2_ENABLED": "1"}})
yield
if ray.is_initialized():
ray.shutdown()
def run_import_task(task_func):
"""
Helper function to run a Ray import task and handle errors.
"""
try:
future = task_func.remote()
ray.get(future)
except Exception as e:
raise AssertionError(f"Import failed: {e}")
def test_train_import():
# Ray tasks for train imports
@ray.remote
def import_user_callback():
from ray.train.v2.api.callback import UserCallback # noqa: F401
@ray.remote
def import_train_configs():
from ray.train.v2.api.config import ( # noqa: F401
FailureConfig,
RunConfig,
ScalingConfig,
)
@ray.remote
def import_checkpoint_upload_mode():
from ray.train.v2.api.report_config import CheckpointUploadMode # noqa: F401
@ray.remote
def import_reported_checkpoint():
from ray.train.v2.api.reported_checkpoint import (
ReportedCheckpoint, # noqa: F401
)
@ray.remote
def import_result():
from ray.train.v2.api.result import Result # noqa: F401
@ray.remote
def import_train_fn_utils():
from ray.train.v2.api.train_fn_utils import ( # noqa: F401
get_all_reported_checkpoints,
get_checkpoint,
get_context,
get_dataset_shard,
report,
)
run_import_task(import_user_callback)
run_import_task(import_train_configs)
run_import_task(import_checkpoint_upload_mode)
run_import_task(import_reported_checkpoint)
run_import_task(import_result)
run_import_task(import_train_fn_utils)
def test_tensorflow_import():
# Ray tasks for tensorflow imports
@ray.remote
def import_tensorflow_trainer():
from ray.train.v2.tensorflow.tensorflow_trainer import ( # noqa: F401
TensorflowTrainer,
)
run_import_task(import_tensorflow_trainer)
def test_collective_import():
# Ray tasks for collective imports
@ray.remote
def import_collectives():
from ray.train.collective.collectives import ( # noqa: F401
barrier,
broadcast_from_rank_zero,
)
run_import_task(import_collectives)
def test_lightgbm_import():
# Ray tasks for lightgbm imports
@ray.remote
def import_lightgbm_trainer():
from ray.train.v2.lightgbm.lightgbm_trainer import LightGBMTrainer # noqa: F401
run_import_task(import_lightgbm_trainer)
def test_torch_import():
# Ray tasks for torch imports
@ray.remote
def import_torch_trainer():
from ray.train.v2.torch.torch_trainer import TorchTrainer # noqa: F401
@ray.remote
def import_torch_train_loop_utils():
from ray.train.v2.torch.train_loop_utils import ( # noqa: F401
accelerate,
backward,
enable_reproducibility,
get_device,
get_devices,
prepare_data_loader,
prepare_model,
prepare_optimizer,
)
run_import_task(import_torch_trainer)
run_import_task(import_torch_train_loop_utils)
def test_xgboost_import():
# Ray tasks for xgboost imports
@ray.remote
def import_xgboost_config():
from ray.train.v2.xgboost.config import XGBoostConfig # noqa: F401
@ray.remote
def import_xgboost_trainer():
from ray.train.v2.xgboost.xgboost_trainer import XGBoostTrainer # noqa: F401
run_import_task(import_xgboost_config)
run_import_task(import_xgboost_trainer)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-x", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/test_circular_imports.py",
"license": "Apache License 2.0",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/dashboard/modules/reporter/reporter_models.py | from typing import Dict, List, Optional, Tuple
from ray._common.pydantic_compat import PYDANTIC_INSTALLED, BaseModel
if PYDANTIC_INSTALLED:
# TODO(aguo): Use these pydantic models in the dashboard API as well.
class ProcessGPUInfo(BaseModel):
"""
Information about GPU usage for a single process.
NOTE: Backwards compatibility for this model must be maintained.
If broken, the downstream dashboard API and UI code will break.
If you must make a backwards-incompatible change, you must make sure
to update the relevant code in the dashboard API and UI as well.
"""
pid: int
gpuMemoryUsage: int # in MB
gpuUtilization: Optional[int] = None # percentage
class GpuUtilizationInfo(BaseModel):
"""
GPU utilization information for a single GPU device.
NOTE: Backwards compatibility for this model must be maintained.
If broken, the downstream dashboard API and UI code will break.
If you must make a backwards-incompatible change, you must make sure
to update the relevant code in the dashboard API and UI as well.
"""
index: int
name: str
uuid: str
utilizationGpu: Optional[int] = None # percentage
memoryUsed: int # in MB
memoryTotal: int # in MB
processesPids: Optional[
List[ProcessGPUInfo]
] = None # converted to list in _compose_stats_payload
powerMw: Optional[int] = None # current power draw in milliwatts
temperatureC: Optional[int] = None # temperature in Celsius
class TpuUtilizationInfo(BaseModel):
"""
TPU utilization information for a single TPU device.
NOTE: Backwards compatibility for this model must be maintained.
If broken, the downstream dashboard API and UI code will break.
If you must make a backwards-incompatible change, you must make sure
to update the relevant code in the dashboard API and UI as well.
"""
index: int
name: str
tpuType: str
tpuTopology: str
tensorcoreUtilization: int # percentage
hbmUtilization: int # percentage
dutyCycle: int # percentage
memoryUsed: int # in bytes
memoryTotal: int # in bytes
class CpuTimes(BaseModel):
"""
CPU times information based on psutil.scputimes.
NOTE: Backwards compatibility for this model must be maintained.
If broken, the downstream dashboard API and UI code will break.
If you must make a backwards-incompatible change, you must make sure
to update the relevant code in the dashboard API and UI as well.
"""
user: float
system: float
childrenUser: float
childrenSystem: float
class MemoryInfo(BaseModel):
"""
Memory information based on psutil.svmem.
NOTE: Backwards compatibility for this model must be maintained.
If broken, the downstream dashboard API and UI code will break.
If you must make a backwards-incompatible change, you must make sure
to update the relevant code in the dashboard API and UI as well.
"""
rss: float
vms: float
pfaults: Optional[float] = None
pageins: Optional[float] = None
class MemoryFullInfo(MemoryInfo):
"""
Memory full information based on psutil.smem.
NOTE: Backwards compatibility for this model must be maintained.
If broken, the downstream dashboard API and UI code will break.
If you must make a backwards-incompatible change, you must make sure
to update the relevant code in the dashboard API and UI as well.
"""
uss: float
class ProcessInfo(BaseModel):
"""
Process information from psutil.
NOTE: Backwards compatibility for this model must be maintained.
If broken, the downstream dashboard API and UI code will break.
If you must make a backwards-incompatible change, you must make sure
to update the relevant code in the dashboard API and UI as well.
"""
pid: int
createTime: float
cpuPercent: float
cpuTimes: Optional[CpuTimes] # psutil._pslinux.scputimes object
cmdline: List[str]
memoryInfo: Optional[MemoryInfo] # psutil._pslinux.svmem object
memoryFullInfo: Optional[MemoryFullInfo] # psutil._pslinux.smem object
numFds: Optional[int] = None # Not available on Windows
gpuMemoryUsage: Optional[int] = None # in MB, added by _get_workers
gpuUtilization: Optional[int] = None # percentage, added by _get_workers
# Note: The actual data structure uses tuples for some fields, not structured objects
# These are type aliases to document the tuple structure
MemoryUsage = Tuple[
int, int, float, int
] # (total, available, percent, used) in bytes
LoadAverage = Tuple[
Tuple[float, float, float], Optional[Tuple[float, float, float]]
] # (load, perCpuLoad)
NetworkStats = Tuple[int, int] # (sent, received) in bytes
DiskIOStats = Tuple[
int, int, int, int
] # (readBytes, writeBytes, readCount, writeCount)
DiskIOSpeed = Tuple[
float, float, float, float
] # (readSpeed, writeSpeed, readIops, writeIops)
NetworkSpeed = Tuple[float, float] # (sendSpeed, receiveSpeed) in bytes/sec
class DiskUsage(BaseModel):
"""
Disk usage information based on psutil.diskusage.
NOTE: Backwards compatibility for this model must be maintained.
If broken, the downstream dashboard API and UI code will break.
If you must make a backwards-incompatible change, you must make sure
to update the relevant code in the dashboard API and UI as well.
"""
total: int
used: int
free: int
percent: float
class StatsPayload(BaseModel):
"""
Main stats payload returned by _compose_stats_payload.
NOTE: Backwards compatibility for this model must be maintained.
If broken, the downstream dashboard API and UI code will break.
If you must make a backwards-incompatible change, you must make sure
to update the relevant code in the dashboard API and UI as well.
"""
now: float # POSIX timestamp
hostname: str
ip: str
cpu: float # CPU usage percentage
cpus: Tuple[int, int] # (logicalCpuCount, physicalCpuCount)
mem: MemoryUsage # (total, available, percent, used) in bytes
shm: Optional[int] = None # shared memory in bytes, None if not available
workers: List[ProcessInfo]
raylet: Optional[ProcessInfo] = None
agent: Optional[ProcessInfo] = None
bootTime: float # POSIX timestamp
loadAvg: LoadAverage # (load, perCpuLoad) where load is (1min, 5min, 15min)
disk: Dict[str, DiskUsage] # mount point -> psutil disk usage object
diskIo: DiskIOStats # (readBytes, writeBytes, readCount, writeCount)
diskIoSpeed: DiskIOSpeed # (readSpeed, writeSpeed, readIops, writeIops)
gpus: List[GpuUtilizationInfo]
tpus: List[TpuUtilizationInfo]
network: NetworkStats # (sent, received) in bytes
networkSpeed: NetworkSpeed # (sendSpeed, receiveSpeed) in bytes/sec
cmdline: List[str] # deprecated field from raylet
gcs: Optional[ProcessInfo] = None # only present on head node
else:
StatsPayload = None
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/reporter/reporter_models.py",
"license": "Apache License 2.0",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/llm/tests/serve/cpu/configs/test_multi_node_placement_groups.py | from typing import Any, Dict
import pytest
from ray.llm._internal.serve.core.server.llm_server import LLMServer
from ray.llm._internal.serve.engines.vllm.vllm_models import VLLMEngineConfig
from ray.llm._internal.serve.serving_patterns.data_parallel.dp_server import DPServer
from ray.serve.llm import LLMConfig, ModelLoadingConfig
def get_llm_config_with_placement_group(
tensor_parallel_size: int = 1,
pipeline_parallel_size: int = 1,
placement_group_config: Dict[str, Any] = None,
) -> LLMConfig:
"""Create LLMConfig with specified parallelism parameters and placement group config."""
return LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id="test_model",
model_source="facebook/opt-1.3b",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=1,
max_replicas=1,
),
),
engine_kwargs=dict(
tensor_parallel_size=tensor_parallel_size,
pipeline_parallel_size=pipeline_parallel_size,
distributed_executor_backend="ray",
),
placement_group_config=placement_group_config,
runtime_env=None,
)
@pytest.mark.parametrize(
"tp_size,pp_size,placement_strategy",
[
(2, 4, "PACK"), # Multi-node PP+TP with PACK
(4, 2, "PACK"), # Multi-node PP+TP with PACK
(8, 1, "SPREAD"), # Multi-node TP with SPREAD
(1, 8, "SPREAD"), # Multi-node PP with SPREAD
],
)
def test_llm_serve_custom_placement_group(tp_size, pp_size, placement_strategy):
"""Test Ray Serve LLM with custom placement group configurations."""
total_gpus = tp_size * pp_size
# Create custom placement group configuration
placement_group_config = {
"bundles": [{"GPU": 1, "CPU": 1}] * total_gpus,
"strategy": placement_strategy,
}
llm_config = get_llm_config_with_placement_group(
tensor_parallel_size=tp_size,
pipeline_parallel_size=pp_size,
placement_group_config=placement_group_config,
)
# Verify the configuration is properly set
assert llm_config.placement_group_config == placement_group_config
assert llm_config.engine_kwargs["tensor_parallel_size"] == tp_size
assert llm_config.engine_kwargs["pipeline_parallel_size"] == pp_size
# Test that serve options are generated correctly
serve_options = LLMServer.get_deployment_options(llm_config)
assert "placement_group_bundles" in serve_options
assert "placement_group_strategy" in serve_options
assert serve_options["placement_group_strategy"] == placement_strategy
assert len(serve_options["placement_group_bundles"]) == total_gpus
@pytest.mark.parametrize(
"tp_size,pp_size",
[
(2, 1), # TP-only should use PACK by default
(1, 2), # PP-only should use PACK by default
(2, 2), # TP+PP should use PACK by default
],
)
def test_llm_serve_default_placement_strategy(tp_size, pp_size):
"""Test that Ray Serve LLM uses PACK strategy by default for all configurations."""
llm_config = get_llm_config_with_placement_group(
tensor_parallel_size=tp_size,
pipeline_parallel_size=pp_size,
placement_group_config=None, # Use defaults
)
serve_options = LLMServer.get_deployment_options(llm_config)
# All configurations should default to PACK strategy
assert serve_options["placement_group_strategy"] == "PACK"
assert len(serve_options["placement_group_bundles"]) == tp_size * pp_size
def test_llm_serve_placement_group_validation():
"""Test validation of placement group configurations."""
# Test missing bundles
with pytest.raises(
ValueError, match="placement_group_config must contain 'bundles'"
):
llm_config = get_llm_config_with_placement_group(
placement_group_config={"strategy": "PACK"}
)
LLMServer.get_deployment_options(llm_config)
# Test missing strategy (should default to PACK, not fail)
llm_config = get_llm_config_with_placement_group(
placement_group_config={"bundles": [{"GPU": 1}]}
)
serve_options = LLMServer.get_deployment_options(llm_config)
assert serve_options["placement_group_strategy"] == "PACK"
def test_llm_serve_multi_gpu_per_bundle_passes_through():
"""Test multiple GPUs per bundle pass through Serve validation.
Serve allows GPU>1 per bundle in placement_group_config. vLLM will enforce
its own GPU<=1 restriction during engine creation (not tested here).
This confirms Serve doesn't block it, allowing vLLM to manage its constraints.
"""
llm_config = get_llm_config_with_placement_group(
tensor_parallel_size=1,
pipeline_parallel_size=1,
placement_group_config={
"bundles": [{"GPU": 2, "CPU": 4}],
"strategy": "PACK",
},
)
# Serve should accept and pass through GPU=2 to placement group
# First bundle gets CPU: 4 (from config) + 1 (replica actor) = 5
serve_options = LLMServer.get_deployment_options(llm_config)
assert serve_options["placement_group_bundles"][0]["GPU"] == 2
assert serve_options["placement_group_bundles"][0]["CPU"] == 5
# vLLM will reject this during actual engine creation with a validation error
# (not tested here since this is a config-only CPU test)
@pytest.mark.parametrize(
"tp_size,pp_size,expected_bundles",
[
(1, 1, 1),
(2, 1, 2),
(1, 2, 2),
(2, 2, 4),
(4, 2, 8),
(2, 4, 8),
],
)
def test_llm_serve_bundle_count(tp_size, pp_size, expected_bundles):
"""Test that correct number of bundles are created for different TP/PP configs."""
llm_config = get_llm_config_with_placement_group(
tensor_parallel_size=tp_size,
pipeline_parallel_size=pp_size,
)
serve_options = LLMServer.get_deployment_options(llm_config)
assert len(serve_options["placement_group_bundles"]) == expected_bundles
def test_llm_serve_accelerator_and_resource_merging():
"""Test accelerator type injection and replica actor resource merging."""
placement_group_config = {
"bundles": [{"GPU": 1, "CPU": 1}] * 2,
"strategy": "PACK",
}
llm_config = LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id="test_model",
model_source="facebook/opt-1.3b",
),
deployment_config=dict(
autoscaling_config=dict(min_replicas=1, max_replicas=1),
ray_actor_options=dict(
num_cpus=2,
num_gpus=1,
memory=1000000000, # 1GB
),
),
engine_kwargs=dict(
tensor_parallel_size=2,
pipeline_parallel_size=1,
distributed_executor_backend="ray",
),
accelerator_type="L4",
placement_group_config=placement_group_config,
)
serve_options = LLMServer.get_deployment_options(llm_config)
# First bundle: merged replica actor resources
# CPU: 1 (from bundle) + 2 (from replica actor) = 3
# GPU: Already 1 in both
first_bundle = serve_options["placement_group_bundles"][0]
assert first_bundle["CPU"] == 3
assert first_bundle["GPU"] == 2 # 1 from bundle + 1 from replica actor
assert "memory" in first_bundle
assert "accelerator_type:L4" in first_bundle
# Tail bundles: original config + accelerator type
for bundle in serve_options["placement_group_bundles"][1:]:
assert bundle["CPU"] == 1
assert bundle["GPU"] == 1
assert "accelerator_type:L4" in bundle
assert bundle["accelerator_type:L4"] == 0.001
def test_llm_serve_data_parallel_placement_override():
"""Test that data parallel deployments override placement group strategy to STRICT_PACK."""
placement_group_config = {
"bundles": [{"GPU": 1, "CPU": 1}] * 2,
"strategy": "SPREAD", # This should be overridden
}
llm_config = LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id="test_model",
model_source="facebook/opt-1.3b",
),
# For DP correctness, do not set autoscaling_config; DP size fixes replicas
deployment_config=dict(),
engine_kwargs=dict(
tensor_parallel_size=2,
pipeline_parallel_size=1,
data_parallel_size=2, # Enable data parallelism
distributed_executor_backend="ray",
),
placement_group_config=placement_group_config,
)
serve_options = DPServer.get_deployment_options(llm_config)
# Data parallel should override to STRICT_PACK regardless of user-specified strategy
assert serve_options["placement_group_strategy"] == "STRICT_PACK"
# Note: num_replicas is set by build_dp_deployment, not by get_deployment_options
def test_fractional_gpu_env_inferred_from_pg():
"""A fractional placement group should inject VLLM_RAY_PER_WORKER_GPUS."""
placement_group_config = {"bundles": [{"GPU": 0.49}]}
llm_config = LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id="test_model",
model_source="facebook/opt-125m",
),
placement_group_config=placement_group_config,
runtime_env=dict(env_vars={"EXTRA_VAR": "1"}),
)
engine_config = VLLMEngineConfig.from_llm_config(llm_config)
runtime_env = engine_config.get_runtime_env_with_local_env_vars()
assert runtime_env["env_vars"]["EXTRA_VAR"] == "1"
assert runtime_env["env_vars"]["VLLM_RAY_PER_WORKER_GPUS"] == "0.49"
def test_fractional_gpu_env_var_override_preserved():
"""User-provided env var should be preserved when set."""
llm_config = LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id="test_model",
model_source="facebook/opt-125m",
),
placement_group_config={"bundles": [{"GPU": 0.4}]},
runtime_env=dict(
env_vars={
"VLLM_RAY_PER_WORKER_GPUS": "0.6",
}
),
)
engine_config = VLLMEngineConfig.from_llm_config(llm_config)
runtime_env = engine_config.get_runtime_env_with_local_env_vars()
assert runtime_env["env_vars"]["VLLM_RAY_PER_WORKER_GPUS"] == "0.6"
if __name__ == "__main__":
pytest.main(["-v", __file__])
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/serve/cpu/configs/test_multi_node_placement_groups.py",
"license": "Apache License 2.0",
"lines": 239,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/llm_tests/serve/test_llm_serve_multi_node_integration.py | import pytest
import ray
from ray import serve
from ray._common.test_utils import wait_for_condition
from ray.serve._private.constants import SERVE_DEFAULT_APP_NAME
from ray.serve.llm import (
build_dp_deployment,
build_pd_openai_app,
build_openai_app,
LLMConfig,
LLMServingArgs,
ModelLoadingConfig,
)
from ray.serve.schema import ApplicationStatus
@pytest.fixture(autouse=True)
def cleanup_ray_resources():
"""Automatically cleanup Ray resources between tests to prevent conflicts."""
yield
serve.shutdown()
ray.shutdown()
def is_default_app_running():
"""Check if the default application is running successfully."""
try:
default_app = serve.status().applications[SERVE_DEFAULT_APP_NAME]
return default_app.status == ApplicationStatus.RUNNING
except (KeyError, AttributeError):
return False
@pytest.mark.parametrize(
"tp_size,pp_size",
[
(2, 4), # TPxPP=8 > 4 GPUs/node, FORCES cross-node placement
(4, 2), # TPxPP=8 > 4 GPUs/node, FORCES cross-node placement
],
)
def test_llm_serve_multi_node(tp_size, pp_size):
"""Test multi-node Ray Serve LLM deployment with custom placement groups.
Cluster: 2 nodes x 4 GPUs = 8 total GPUs
TPxPP=8 exceeds per-node capacity, forcing cross-node deployment.
"""
total_gpus = tp_size * pp_size
placement_group_config = {
"bundles": [{"GPU": 1, "CPU": 1}] * total_gpus,
"strategy": "PACK",
}
llm_config = LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id="opt-1.3b",
model_source="facebook/opt-1.3b",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=1,
max_replicas=1,
),
),
engine_kwargs=dict(
tensor_parallel_size=tp_size,
pipeline_parallel_size=pp_size,
distributed_executor_backend="ray",
max_model_len=512,
max_num_batched_tokens=256,
enforce_eager=True,
),
placement_group_config=placement_group_config,
runtime_env={"env_vars": {"VLLM_DISABLE_COMPILE_CACHE": "1"}},
)
app = build_openai_app(llm_serving_args=LLMServingArgs(llm_configs=[llm_config]))
serve.run(app, blocking=False)
wait_for_condition(is_default_app_running, timeout=300)
def test_llm_serve_data_parallelism():
"""Test Data Parallelism deployment with STRICT_PACK override.
Validates that DP deployments work correctly with placement group configs
"""
placement_group_config = {
"bundles": [{"GPU": 1, "CPU": 1}],
"strategy": "SPREAD", # Will be overridden to STRICT_PACK
}
llm_config = LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id="microsoft/Phi-tiny-MoE-instruct",
model_source="microsoft/Phi-tiny-MoE-instruct",
),
deployment_config=dict(), # DP sets num_replicas, not autoscaling
engine_kwargs=dict(
tensor_parallel_size=1,
pipeline_parallel_size=1,
data_parallel_size=4, # 4 DP replicas, need to fill 2x4GPU workers
distributed_executor_backend="ray",
max_model_len=1024,
max_num_seqs=32,
enforce_eager=True,
),
experimental_configs=dict(
dp_size_per_node=2,
),
placement_group_config=placement_group_config,
runtime_env={"env_vars": {"VLLM_DISABLE_COMPILE_CACHE": "1"}},
)
app = build_dp_deployment(llm_config)
serve.run(app, blocking=False)
wait_for_condition(is_default_app_running, timeout=300)
def test_llm_serve_prefill_decode_with_data_parallelism():
"""Test Prefill-Decode disaggregation with Data Parallelism and Expert Parallelism.
Cluster: 2 nodes x 4 GPUs = 8 GPUs total
- Prefill: DP=4 (scheduled on node with "prefill" custom resource)
- Decode: DP=4 (scheduled on node with "decode" custom resource)
Note: This test requires RAY_SERVE_USE_COMPACT_SCHEDULING_STRATEGY=1 to be set
(configured in release_tests.yaml). Without this flag, Serve uses the default
SPREAD scheduling strategy, which will prevent DP replicas from being colocated.
"""
model_loading_config = ModelLoadingConfig(
model_id="deepseek",
model_source="deepseek-ai/DeepSeek-V2-Lite",
)
base_engine_kwargs = {
"tensor_parallel_size": 1,
"enable_expert_parallel": True,
"load_format": "dummy",
"max_model_len": 512,
"max_num_batched_tokens": 256,
"enforce_eager": True,
}
prefill_config = LLMConfig(
model_loading_config=model_loading_config,
engine_kwargs={
**base_engine_kwargs,
"data_parallel_size": 4,
"kv_transfer_config": {
"kv_connector": "NixlConnector",
"kv_role": "kv_both",
},
},
experimental_configs={
"dp_size_per_node": 4,
"NIXL_SIDE_CHANNEL_PORT_BASE": 40000, # Prefill port range
},
runtime_env={"env_vars": {"VLLM_DISABLE_COMPILE_CACHE": "1"}},
)
decode_config = LLMConfig(
model_loading_config=model_loading_config,
engine_kwargs={
**base_engine_kwargs,
"data_parallel_size": 4,
"kv_transfer_config": {
"kv_connector": "NixlConnector",
"kv_role": "kv_both",
},
},
experimental_configs={
"dp_size_per_node": 4,
"NIXL_SIDE_CHANNEL_PORT_BASE": 41000, # Decode port range (different)
},
runtime_env={"env_vars": {"VLLM_DISABLE_COMPILE_CACHE": "1"}},
)
# build_pd_openai_app auto-detects DP and uses build_dp_deployment
app = build_pd_openai_app(
{
"prefill_config": prefill_config,
"decode_config": decode_config,
}
)
serve.run(app, blocking=False)
wait_for_condition(is_default_app_running, timeout=300)
if __name__ == "__main__":
pytest.main(["-v", __file__])
| {
"repo_id": "ray-project/ray",
"file_path": "release/llm_tests/serve/test_llm_serve_multi_node_integration.py",
"license": "Apache License 2.0",
"lines": 165,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_dataset_aggregrations.py | import math
import random
import sys
import time
import numpy as np
import pandas as pd
import pytest
import ray
from ray.data.tests.conftest import * # noqa
from ray.data.tests.conftest import (
CoreExecutionMetrics,
assert_core_execution_metrics_equals,
)
from ray.tests.conftest import * # noqa
def test_count(ray_start_regular):
ds = ray.data.range(100, override_num_blocks=10)
# We do not kick off the read task by default.
assert not ds._plan.has_started_execution
assert ds.count() == 100
# Getting number of rows should not trigger execution of any read tasks
# for ray.data.range(), as the number of rows is known beforehand.
assert not ds._plan.has_started_execution
assert_core_execution_metrics_equals(CoreExecutionMetrics(task_count={}))
def test_count_edge_case(ray_start_regular):
# Test this edge case: https://github.com/ray-project/ray/issues/44509.
ds = ray.data.range(10)
ds.count()
actual_count = ds.filter(fn=lambda row: row["id"] % 2 == 0).count()
assert actual_count == 5
def test_count_after_caching_after_execution(ray_start_regular):
SCALE_FACTOR = 5
FILE_ROW_COUNT = 150
DS_ROW_COUNT = FILE_ROW_COUNT * SCALE_FACTOR
paths = ["example://iris.csv"] * SCALE_FACTOR
ds = ray.data.read_csv(paths)
# Row count should be unknown before execution.
assert "num_rows=?" in str(ds)
# After iterating over bundles and completing execution, row count should be known.
list(ds.iter_internal_ref_bundles())
assert ds.count() == DS_ROW_COUNT
assert ds._plan._cache._num_rows == DS_ROW_COUNT
@pytest.mark.parametrize("num_parts", [1, 30])
@pytest.mark.parametrize("ds_format", ["arrow", "pandas"])
def test_global_tabular_min(ray_start_regular_shared_2_cpus, ds_format, num_parts):
seed = int(time.time())
print(f"Seeding RNG for test_global_arrow_min with: {seed}")
random.seed(seed)
xs = list(range(100))
random.shuffle(xs)
def _to_pandas(ds):
return ds.map_batches(lambda x: x, batch_size=None, batch_format="pandas")
# Test built-in global min aggregation
ds = ray.data.from_items([{"A": x} for x in xs]).repartition(num_parts)
if ds_format == "pandas":
ds = _to_pandas(ds)
assert ds.min("A") == 0
# Test empty dataset
# Note: we explicitly set parallelism here to ensure there are no empty
# input blocks.
ds = ray.data.range(10, override_num_blocks=10)
if ds_format == "pandas":
ds = _to_pandas(ds)
assert ds.filter(lambda r: r["id"] > 10).min("id") is None
# Test built-in global min aggregation with nans
nan_ds = ray.data.from_items([{"A": x} for x in xs] + [{"A": None}]).repartition(
num_parts
)
if ds_format == "pandas":
nan_ds = _to_pandas(nan_ds)
assert nan_ds.min("A") == 0
# Test ignore_nulls=False
assert pd.isnull(nan_ds.min("A", ignore_nulls=False))
# Test all nans
nan_ds = ray.data.from_items([{"A": None}] * len(xs)).repartition(num_parts)
if ds_format == "pandas":
nan_ds = _to_pandas(nan_ds)
assert pd.isnull(nan_ds.min("A"))
assert pd.isnull(nan_ds.min("A", ignore_nulls=False))
@pytest.mark.parametrize("num_parts", [1, 30])
@pytest.mark.parametrize("ds_format", ["arrow", "pandas"])
def test_global_tabular_max(ray_start_regular_shared_2_cpus, ds_format, num_parts):
seed = int(time.time())
print(f"Seeding RNG for test_global_arrow_max with: {seed}")
random.seed(seed)
xs = list(range(100))
random.shuffle(xs)
def _to_pandas(ds):
return ds.map_batches(lambda x: x, batch_size=None, batch_format="pandas")
# Test built-in global max aggregation
ds = ray.data.from_items([{"A": x} for x in xs]).repartition(num_parts)
if ds_format == "pandas":
ds = _to_pandas(ds)
assert ds.max("A") == 99
# Test empty dataset
# Note: we explicitly set parallelism here to ensure there are no empty
# input blocks.
ds = ray.data.range(10, override_num_blocks=10)
if ds_format == "pandas":
ds = _to_pandas(ds)
assert ds.filter(lambda r: r["id"] > 10).max("id") is None
# Test built-in global max aggregation with nans
nan_ds = ray.data.from_items([{"A": x} for x in xs] + [{"A": None}]).repartition(
num_parts
)
if ds_format == "pandas":
nan_ds = _to_pandas(nan_ds)
assert nan_ds.max("A") == 99
# Test ignore_nulls=False
assert pd.isnull(nan_ds.max("A", ignore_nulls=False))
# Test all nans
nan_ds = ray.data.from_items([{"A": None}] * len(xs)).repartition(num_parts)
if ds_format == "pandas":
nan_ds = _to_pandas(nan_ds)
assert pd.isnull(nan_ds.max("A"))
assert pd.isnull(nan_ds.max("A", ignore_nulls=False))
@pytest.mark.parametrize("num_parts", [1, 30])
@pytest.mark.parametrize("ds_format", ["arrow", "pandas"])
def test_global_tabular_mean(ray_start_regular_shared_2_cpus, ds_format, num_parts):
seed = int(time.time())
print(f"Seeding RNG for test_global_arrow_mean with: {seed}")
random.seed(seed)
xs = list(range(100))
random.shuffle(xs)
def _to_pandas(ds):
return ds.map_batches(lambda x: x, batch_size=None, batch_format="pandas")
# Test built-in global mean aggregation
ds = ray.data.from_items([{"A": x} for x in xs]).repartition(num_parts)
if ds_format == "pandas":
ds = _to_pandas(ds)
assert ds.mean("A") == 49.5
# Test empty dataset
# Note: we explicitly set parallelism here to ensure there are no empty
# input blocks.
ds = ray.data.range(10, override_num_blocks=10)
if ds_format == "pandas":
ds = _to_pandas(ds)
assert ds.filter(lambda r: r["id"] > 10).mean("id") is None
# Test built-in global mean aggregation with nans
nan_ds = ray.data.from_items([{"A": x} for x in xs] + [{"A": None}]).repartition(
num_parts
)
if ds_format == "pandas":
nan_ds = _to_pandas(nan_ds)
assert nan_ds.mean("A") == 49.5
# Test ignore_nulls=False
assert pd.isnull(nan_ds.mean("A", ignore_nulls=False))
# Test all nans
nan_ds = ray.data.from_items([{"A": None}] * len(xs)).repartition(num_parts)
if ds_format == "pandas":
nan_ds = _to_pandas(nan_ds)
assert pd.isnull(nan_ds.mean("A"))
assert pd.isnull(nan_ds.mean("A", ignore_nulls=False))
@pytest.mark.parametrize("num_parts", [1, 30])
@pytest.mark.parametrize("ds_format", ["arrow", "pandas"])
def test_global_tabular_std(ray_start_regular_shared_2_cpus, ds_format, num_parts):
# NOTE: Do not change the seed
seed = 1740035705
random.seed(seed)
xs = list(range(100))
random.shuffle(xs)
def _to_arrow(ds):
return ds.map_batches(lambda x: x, batch_size=None, batch_format="pyarrow")
def _to_pandas(ds):
return ds.map_batches(lambda x: x, batch_size=None, batch_format="pandas")
# Test built-in global max aggregation
df = pd.DataFrame({"A": xs})
ds = ray.data.from_pandas(df).repartition(num_parts)
if ds_format == "arrow":
ds = _to_arrow(ds)
assert math.isclose(ds.std("A"), df["A"].std())
assert math.isclose(ds.std("A", ddof=0), df["A"].std(ddof=0))
# Test empty dataset
ds = ray.data.from_pandas(pd.DataFrame({"A": []}))
if ds_format == "arrow":
ds = _to_arrow(ds)
assert pd.isnull(ds.std("A"))
# Test edge cases
ds = ray.data.from_pandas(pd.DataFrame({"A": [3]}))
if ds_format == "arrow":
ds = _to_arrow(ds)
assert np.isnan(ds.std("A"))
# Test built-in global std aggregation with nans
nan_df = pd.DataFrame({"A": xs + [None]})
nan_ds = ray.data.from_pandas(nan_df).repartition(num_parts)
if ds_format == "arrow":
nan_ds = _to_arrow(nan_ds)
assert math.isclose(nan_ds.std("A"), nan_df["A"].std())
# Test ignore_nulls=False
assert pd.isnull(nan_ds.std("A", ignore_nulls=False))
# Test all nans
nan_ds = ray.data.from_items([{"A": None}] * len(xs)).repartition(num_parts)
if ds_format == "pandas":
nan_ds = _to_pandas(nan_ds)
assert pd.isnull(nan_ds.std("A"))
assert pd.isnull(nan_ds.std("A", ignore_nulls=False))
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_dataset_aggregrations.py",
"license": "Apache License 2.0",
"lines": 197,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_dataset_creation.py | import sys
import pandas as pd
import pyarrow as pa
import pytest
import ray
from ray.data._internal.execution.interfaces.ref_bundle import (
_ref_bundles_iterator_to_block_refs_list,
)
from ray.data.tests.conftest import * # noqa
from ray.data.tests.util import extract_values
from ray.tests.conftest import * # noqa
@pytest.mark.parametrize(
"input_blocks",
[
[pd.DataFrame({"column": ["spam"]}), pd.DataFrame({"column": ["ham", "eggs"]})],
[
pa.Table.from_pydict({"column": ["spam"]}),
pa.Table.from_pydict({"column": ["ham", "eggs"]}),
],
],
)
def test_from_blocks(input_blocks, ray_start_regular_shared):
ds = ray.data.from_blocks(input_blocks)
bundles = ds.iter_internal_ref_bundles()
output_blocks = ray.get(_ref_bundles_iterator_to_block_refs_list(bundles))
assert len(input_blocks) == len(output_blocks)
assert all(
input_block.equals(output_block)
for input_block, output_block in zip(input_blocks, output_blocks)
)
def test_from_items(ray_start_regular_shared):
ds = ray.data.from_items(["hello", "world"])
assert extract_values("item", ds.take()) == ["hello", "world"]
assert isinstance(next(iter(ds.iter_batches(batch_format=None))), pa.Table)
@pytest.mark.parametrize("parallelism", list(range(1, 21)))
def test_from_items_parallelism(ray_start_regular_shared, parallelism):
# Test that specifying parallelism yields the expected number of blocks.
n = 20
records = [{"a": i} for i in range(n)]
ds = ray.data.from_items(records, override_num_blocks=parallelism)
out = ds.take_all()
assert out == records
assert ds._plan.initial_num_blocks() == parallelism
def test_from_items_parallelism_truncated(ray_start_regular_shared):
# Test that specifying parallelism greater than the number of items is truncated to
# the number of items.
n = 10
parallelism = 20
records = [{"a": i} for i in range(n)]
ds = ray.data.from_items(records, override_num_blocks=parallelism)
out = ds.take_all()
assert out == records
assert ds._plan.initial_num_blocks() == n
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_dataset_creation.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_dataset_iter.py | import math
import sys
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
import ray
from ray.data.block import BlockAccessor
from ray.data.context import DataContext
from ray.data.tests.conftest import * # noqa
from ray.data.tests.util import extract_values
from ray.tests.conftest import * # noqa
def test_iter_rows(ray_start_regular_shared):
# Test simple rows.
n = 10
ds = ray.data.range(n)
for row, k in zip(ds.iter_rows(), range(n)):
assert row == {"id": k}
# Test tabular rows.
t1 = pa.Table.from_pydict({"one": [1, 2, 3], "two": [2, 3, 4]})
t2 = pa.Table.from_pydict({"one": [4, 5, 6], "two": [5, 6, 7]})
t3 = pa.Table.from_pydict({"one": [7, 8, 9], "two": [8, 9, 10]})
t4 = pa.Table.from_pydict({"one": [10, 11, 12], "two": [11, 12, 13]})
ts = [t1, t2, t3, t4]
t = pa.concat_tables(ts)
ds = ray.data.from_arrow(ts)
def to_pylist(table):
pydict = table.to_pydict()
names = table.schema.names
pylist = [
{column: pydict[column][row] for column in names}
for row in range(table.num_rows)
]
return pylist
# Default ArrowRows.
for row, t_row in zip(ds.iter_rows(), to_pylist(t)):
assert isinstance(row, dict)
assert row == t_row
# PandasRows after conversion.
pandas_ds = ds.map_batches(lambda x: x, batch_format="pandas")
df = t.to_pandas()
for row, (index, df_row) in zip(pandas_ds.iter_rows(), df.iterrows()):
assert isinstance(row, dict)
assert row == df_row.to_dict()
def test_iter_batches_basic(ray_start_regular_shared):
df1 = pd.DataFrame({"one": [1, 2, 3], "two": [2, 3, 4]})
df2 = pd.DataFrame({"one": [4, 5, 6], "two": [5, 6, 7]})
df3 = pd.DataFrame({"one": [7, 8, 9], "two": [8, 9, 10]})
df4 = pd.DataFrame({"one": [10, 11, 12], "two": [11, 12, 13]})
dfs = [df1, df2, df3, df4]
ds = ray.data.from_blocks(dfs)
# Default.
for batch, df in zip(ds.iter_batches(batch_size=None, batch_format="pandas"), dfs):
assert isinstance(batch, pd.DataFrame)
assert batch.equals(df)
# pyarrow.Table format.
for batch, df in zip(ds.iter_batches(batch_size=None, batch_format="pyarrow"), dfs):
assert isinstance(batch, pa.Table)
assert batch.equals(pa.Table.from_pandas(df))
# NumPy format.
for batch, df in zip(ds.iter_batches(batch_size=None, batch_format="numpy"), dfs):
assert isinstance(batch, dict)
assert list(batch.keys()) == ["one", "two"]
assert all(isinstance(col, np.ndarray) for col in batch.values())
pd.testing.assert_frame_equal(pd.DataFrame(batch), df)
# Test NumPy format on Arrow blocks.
ds2 = ds.map_batches(lambda b: b, batch_size=None, batch_format="pyarrow")
for batch, df in zip(ds2.iter_batches(batch_size=None, batch_format="numpy"), dfs):
assert isinstance(batch, dict)
assert list(batch.keys()) == ["one", "two"]
assert all(isinstance(col, np.ndarray) for col in batch.values())
pd.testing.assert_frame_equal(pd.DataFrame(batch), df)
# Default format -> numpy.
for batch, df in zip(ds.iter_batches(batch_size=None, batch_format="default"), dfs):
assert isinstance(batch, dict)
assert list(batch.keys()) == ["one", "two"]
assert all(isinstance(col, np.ndarray) for col in batch.values())
pd.testing.assert_frame_equal(pd.DataFrame(batch), df)
# Batch size.
batch_size = 2
batches = list(ds.iter_batches(batch_size=batch_size, batch_format="pandas"))
assert all(len(batch) == batch_size for batch in batches)
assert len(batches) == math.ceil(
(len(df1) + len(df2) + len(df3) + len(df4)) / batch_size
)
assert pd.concat(batches, ignore_index=True).equals(
pd.concat(dfs, ignore_index=True)
)
# Batch size larger than block.
batch_size = 4
batches = list(ds.iter_batches(batch_size=batch_size, batch_format="pandas"))
assert all(len(batch) == batch_size for batch in batches)
assert len(batches) == math.ceil(
(len(df1) + len(df2) + len(df3) + len(df4)) / batch_size
)
assert pd.concat(batches, ignore_index=True).equals(
pd.concat(dfs, ignore_index=True)
)
# Batch size larger than dataset.
batch_size = 15
batches = list(ds.iter_batches(batch_size=batch_size, batch_format="pandas"))
assert all(len(batch) == ds.count() for batch in batches)
assert len(batches) == 1
assert pd.concat(batches, ignore_index=True).equals(
pd.concat(dfs, ignore_index=True)
)
# Batch size drop partial.
batch_size = 5
batches = list(
ds.iter_batches(batch_size=batch_size, drop_last=True, batch_format="pandas")
)
assert all(len(batch) == batch_size for batch in batches)
assert len(batches) == (len(df1) + len(df2) + len(df3) + len(df4)) // batch_size
assert pd.concat(batches, ignore_index=True).equals(
pd.concat(dfs, ignore_index=True)[:10]
)
# Batch size don't drop partial.
batch_size = 5
batches = list(
ds.iter_batches(batch_size=batch_size, drop_last=False, batch_format="pandas")
)
assert all(len(batch) == batch_size for batch in batches[:-1])
assert len(batches[-1]) == (len(df1) + len(df2) + len(df3) + len(df4)) % batch_size
assert len(batches) == math.ceil(
(len(df1) + len(df2) + len(df3) + len(df4)) / batch_size
)
assert pd.concat(batches, ignore_index=True).equals(
pd.concat(dfs, ignore_index=True)
)
# Prefetch.
batches = list(
ds.iter_batches(prefetch_batches=1, batch_size=None, batch_format="pandas")
)
assert len(batches) == len(dfs)
for batch, df in zip(batches, dfs):
assert isinstance(batch, pd.DataFrame)
assert batch.equals(df)
batch_size = 2
batches = list(
ds.iter_batches(
prefetch_batches=2, batch_size=batch_size, batch_format="pandas"
)
)
assert all(len(batch) == batch_size for batch in batches)
assert len(batches) == math.ceil(
(len(df1) + len(df2) + len(df3) + len(df4)) / batch_size
)
assert pd.concat(batches, ignore_index=True).equals(
pd.concat(dfs, ignore_index=True)
)
# Prefetch more than number of blocks.
batches = list(
ds.iter_batches(
prefetch_batches=len(dfs), batch_size=None, batch_format="pandas"
)
)
assert len(batches) == len(dfs)
for batch, df in zip(batches, dfs):
assert isinstance(batch, pd.DataFrame)
assert batch.equals(df)
# Prefetch with ray.wait.
context = DataContext.get_current()
old_config = context.actor_prefetcher_enabled
try:
context.actor_prefetcher_enabled = False
batches = list(
ds.iter_batches(prefetch_batches=1, batch_size=None, batch_format="pandas")
)
assert len(batches) == len(dfs)
for batch, df in zip(batches, dfs):
assert isinstance(batch, pd.DataFrame)
assert batch.equals(df)
finally:
context.actor_prefetcher_enabled = old_config
def test_iter_batches_empty_block(ray_start_regular_shared):
ds = ray.data.range(1).repartition(10)
assert str(list(ds.iter_batches(batch_size=None))) == "[{'id': array([0])}]"
assert (
str(list(ds.iter_batches(batch_size=1, local_shuffle_buffer_size=1)))
== "[{'id': array([0])}]"
)
@pytest.mark.parametrize("ds_format", ["arrow", "pandas"])
def test_iter_batches_local_shuffle(shutdown_only, ds_format):
# Input validation.
# Batch size must be given for local shuffle.
with pytest.raises(ValueError):
list(
ray.data.range(100).iter_batches(
batch_size=None, local_shuffle_buffer_size=10
)
)
def range(n, parallelism=200):
if ds_format == "arrow":
ds = ray.data.range(n, override_num_blocks=parallelism)
elif ds_format == "pandas":
ds = ray.data.range(n, override_num_blocks=parallelism).map_batches(
lambda df: df, batch_size=None, batch_format="pandas"
)
return ds
def to_row_dicts(batch):
if isinstance(batch, pd.DataFrame):
return batch.to_dict(orient="records")
return [{"id": v} for v in batch["id"]]
def unbatch(batches):
return [r for batch in batches for r in to_row_dicts(batch)]
def sort(r):
return sorted(r, key=lambda v: v["id"])
base = range(100).take_all()
# Local shuffle.
r1 = unbatch(
range(100, parallelism=10).iter_batches(
batch_size=3,
local_shuffle_buffer_size=25,
)
)
r2 = unbatch(
range(100, parallelism=10).iter_batches(
batch_size=3,
local_shuffle_buffer_size=25,
)
)
# Check randomness of shuffle.
assert r1 != r2, (r1, r2)
assert r1 != base
assert r2 != base
# Check content.
assert sort(r1) == sort(base)
assert sort(r2) == sort(base)
# Set seed.
r1 = unbatch(
range(100, parallelism=10).iter_batches(
batch_size=3,
local_shuffle_buffer_size=25,
local_shuffle_seed=0,
)
)
r2 = unbatch(
range(100, parallelism=10).iter_batches(
batch_size=3,
local_shuffle_buffer_size=25,
local_shuffle_seed=0,
)
)
# Check randomness of shuffle.
assert r1 == r2, (r1, r2)
assert r1 != base
# Check content.
assert sort(r1) == sort(base)
# Single block.
r1 = unbatch(
range(100, parallelism=1).iter_batches(
batch_size=3,
local_shuffle_buffer_size=25,
)
)
r2 = unbatch(
range(100, parallelism=1).iter_batches(
batch_size=3,
local_shuffle_buffer_size=25,
)
)
# Check randomness of shuffle.
assert r1 != r2, (r1, r2)
assert r1 != base
assert r2 != base
# Check content.
assert sort(r1) == sort(base)
assert sort(r2) == sort(base)
# Single-row blocks.
r1 = unbatch(
range(100, parallelism=100).iter_batches(
batch_size=3,
local_shuffle_buffer_size=25,
)
)
r2 = unbatch(
range(100, parallelism=100).iter_batches(
batch_size=3,
local_shuffle_buffer_size=25,
)
)
# Check randomness of shuffle.
assert r1 != r2, (r1, r2)
assert r1 != base
assert r2 != base
# Check content.
assert sort(r1) == sort(base)
assert sort(r2) == sort(base)
# Buffer larger than dataset.
r1 = unbatch(
range(100, parallelism=10).iter_batches(
batch_size=3,
local_shuffle_buffer_size=200,
)
)
r2 = unbatch(
range(100, parallelism=10).iter_batches(
batch_size=3,
local_shuffle_buffer_size=200,
)
)
# Check randomness of shuffle.
assert r1 != r2, (r1, r2)
assert r1 != base
assert r2 != base
# Check content.
assert sort(r1) == sort(base)
assert sort(r2) == sort(base)
# Batch size larger than block.
r1 = unbatch(
range(100, parallelism=20).iter_batches(
batch_size=12,
local_shuffle_buffer_size=25,
)
)
r2 = unbatch(
range(100, parallelism=20).iter_batches(
batch_size=12,
local_shuffle_buffer_size=25,
)
)
# Check randomness of shuffle.
assert r1 != r2, (r1, r2)
assert r1 != base
assert r2 != base
# Check content.
assert sort(r1) == sort(base)
assert sort(r2) == sort(base)
# Batch size larger than dataset.
r1 = unbatch(
range(100, parallelism=10).iter_batches(
batch_size=200,
local_shuffle_buffer_size=400,
)
)
r2 = unbatch(
range(100, parallelism=10).iter_batches(
batch_size=200,
local_shuffle_buffer_size=400,
)
)
# Check randomness of shuffle.
assert r1 != r2, (r1, r2)
assert r1 != base
assert r2 != base
# Check content.
assert sort(r1) == sort(base)
assert sort(r2) == sort(base)
# Drop partial batches.
r1 = unbatch(
range(100, parallelism=10).iter_batches(
batch_size=7,
local_shuffle_buffer_size=21,
drop_last=True,
)
)
r2 = unbatch(
range(100, parallelism=10).iter_batches(
batch_size=7,
local_shuffle_buffer_size=21,
drop_last=True,
)
)
# Check randomness of shuffle.
assert r1 != r2, (r1, r2)
assert r1 != base
assert r2 != base
# Check content.
# Check that partial batches were dropped.
assert len(r1) % 7 == 0
assert len(r2) % 7 == 0
tmp_base = base
if ds_format in ("arrow", "pandas"):
r1 = [tuple(r.items()) for r in r1]
r2 = [tuple(r.items()) for r in r2]
tmp_base = [tuple(r.items()) for r in base]
assert set(r1) <= set(tmp_base)
assert set(r2) <= set(tmp_base)
# Test empty dataset.
ds = ray.data.from_items([])
r1 = unbatch(ds.iter_batches(batch_size=2, local_shuffle_buffer_size=10))
assert len(r1) == 0
assert r1 == ds.take()
@pytest.mark.parametrize(
"block_sizes,batch_size,drop_last",
[
# Single block, batch smaller than block, keep partial
([10], 3, False),
# Single block, batch smaller than block, drop partial
([10], 3, True),
# Single block, exact division
([10], 5, False),
# Multiple equal-sized blocks, batch doesn't divide evenly, keep partial
([5, 5, 5], 7, False),
# Multiple equal-sized blocks, batch doesn't divide evenly, drop partial
([5, 5, 5], 7, True),
# Multiple unequal-sized blocks, keep partial
([1, 5, 10], 4, False),
# Multiple unequal-sized blocks, drop partial
([1, 5, 10], 4, True),
# Edge case: batch_size = 1
([5, 3, 7], 1, False),
# Edge case: batch larger than total rows
([2, 3, 4], 100, False),
# Exact division across multiple blocks
([6, 12, 18], 6, False),
],
)
def test_iter_batches_grid(
ray_start_regular_shared,
block_sizes,
batch_size,
drop_last,
):
# Tests slicing, batch combining, and partial batch dropping logic over
# specific dataset, batching, and dropping configurations.
# Create the dataset with the given block sizes.
dfs = []
running_size = 0
for block_size in block_sizes:
dfs.append(
pd.DataFrame(
{"value": list(range(running_size, running_size + block_size))}
)
)
running_size += block_size
num_rows = running_size
ds = ray.data.from_blocks(dfs)
batches = list(
ds.iter_batches(
batch_size=batch_size,
drop_last=drop_last,
batch_format="pandas",
)
)
if num_rows % batch_size == 0 or not drop_last:
# Number of batches should be equal to
# num_rows / batch_size, rounded up.
assert len(batches) == math.ceil(num_rows / batch_size)
# Concatenated batches should equal the DataFrame
# representation of the entire dataset.
assert pd.concat(batches, ignore_index=True).equals(ds.to_pandas())
else:
# Number of batches should be equal to
# num_rows / batch_size, rounded down.
assert len(batches) == num_rows // batch_size
# Concatenated batches should equal the DataFrame
# representation of the dataset with the partial batch
# remainder sliced off.
assert pd.concat(batches, ignore_index=True).equals(
ds.to_pandas()[: batch_size * (num_rows // batch_size)]
)
if num_rows % batch_size == 0 or drop_last:
assert all(len(batch) == batch_size for batch in batches)
else:
assert all(len(batch) == batch_size for batch in batches[:-1])
assert len(batches[-1]) == num_rows % batch_size
@pytest.mark.skipif(
sys.version_info >= (3, 12), reason="No tensorflow for Python 3.12+"
)
def test_iter_tf_batches_emits_deprecation_warning(ray_start_regular_shared):
with pytest.warns(DeprecationWarning):
ray.data.range(1).iter_tf_batches()
@pytest.mark.skipif(
sys.version_info >= (3, 12), reason="No tensorflow for Python 3.12+"
)
def test_iter_tf_batches(ray_start_regular_shared):
df1 = pd.DataFrame(
{"one": [1, 2, 3], "two": [1.0, 2.0, 3.0], "label": [1.0, 2.0, 3.0]}
)
df2 = pd.DataFrame(
{"one": [4, 5, 6], "two": [4.0, 5.0, 6.0], "label": [4.0, 5.0, 6.0]}
)
df3 = pd.DataFrame({"one": [7, 8], "two": [7.0, 8.0], "label": [7.0, 8.0]})
df = pd.concat([df1, df2, df3])
ds = ray.data.from_pandas([df1, df2, df3])
num_epochs = 2
for _ in range(num_epochs):
iterations = []
for batch in ds.iter_tf_batches(batch_size=3):
iterations.append(
np.stack((batch["one"], batch["two"], batch["label"]), axis=1)
)
combined_iterations = np.concatenate(iterations)
np.testing.assert_array_equal(np.sort(df.values), np.sort(combined_iterations))
@pytest.mark.skipif(
sys.version_info >= (3, 12), reason="No tensorflow for Python 3.12+"
)
def test_iter_tf_batches_tensor_ds(ray_start_regular_shared):
arr1 = np.arange(12).reshape((3, 2, 2))
arr2 = np.arange(12, 24).reshape((3, 2, 2))
arr = np.concatenate((arr1, arr2))
ds = ray.data.from_numpy([arr1, arr2])
num_epochs = 2
for _ in range(num_epochs):
iterations = []
for batch in ds.iter_tf_batches(batch_size=2):
iterations.append(batch["data"])
combined_iterations = np.concatenate(iterations)
np.testing.assert_array_equal(arr, combined_iterations)
def test_get_internal_block_refs(ray_start_regular_shared):
blocks = ray.data.range(10, override_num_blocks=10).get_internal_block_refs()
assert len(blocks) == 10
out = []
for b in ray.get(blocks):
out.extend(extract_values("id", BlockAccessor.for_block(b).iter_rows(True)))
out = sorted(out)
assert out == list(range(10)), out
def test_iter_internal_ref_bundles(ray_start_regular_shared):
n = 10
ds = ray.data.range(n, override_num_blocks=n)
iter_ref_bundles = ds.iter_internal_ref_bundles()
out = []
ref_bundle_count = 0
for ref_bundle in iter_ref_bundles:
for block_ref, block_md in ref_bundle.blocks:
b = ray.get(block_ref)
out.extend(extract_values("id", BlockAccessor.for_block(b).iter_rows(True)))
ref_bundle_count += 1
out = sorted(out)
assert ref_bundle_count == n
assert out == list(range(n)), out
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_dataset_iter.py",
"license": "Apache License 2.0",
"lines": 524,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_dataset_limits.py | import sys
import time
import pandas as pd
import pyarrow as pa
import pytest
import ray
from ray.data.block import BlockMetadata
from ray.data.context import DataContext
from ray.data.datasource.datasource import Datasource, ReadTask
from ray.data.tests.conftest import * # noqa
from ray.data.tests.conftest import (
CoreExecutionMetrics,
assert_core_execution_metrics_equals,
get_initial_core_execution_metrics_snapshot,
)
from ray.data.tests.util import extract_values
from ray.tests.conftest import * # noqa
def test_limit_execution(ray_start_regular):
last_snapshot = get_initial_core_execution_metrics_snapshot()
override_num_blocks = 20
ds = ray.data.range(100, override_num_blocks=override_num_blocks)
# Add some delay to the output to prevent all tasks from finishing
# immediately.
def delay(row):
time.sleep(0.1)
return row
ds = ds.map(delay)
last_snapshot = assert_core_execution_metrics_equals(
CoreExecutionMetrics(task_count={}),
last_snapshot=last_snapshot,
)
# During lazy execution, we should not execute too many more tasks than is
# needed to produce the requested number of rows.
for i in [1, 11]:
assert extract_values("id", ds.limit(i).take(200)) == list(range(i))
last_snapshot = assert_core_execution_metrics_equals(
CoreExecutionMetrics(
task_count={
"ReadRange->Map(delay)": lambda count: count
< override_num_blocks / 2,
"slice_fn": lambda count: count <= 1,
}
),
last_snapshot=last_snapshot,
)
# .materialize().limit() should only trigger execution once.
ds = ray.data.range(100, override_num_blocks=20).materialize()
last_snapshot = assert_core_execution_metrics_equals(
CoreExecutionMetrics(
task_count={
"ReadRange": 20,
}
),
last_snapshot=last_snapshot,
)
for i in [1, 10]:
assert extract_values("id", ds.limit(i).take(200)) == list(range(i))
assert_core_execution_metrics_equals(
CoreExecutionMetrics(task_count={"slice_fn": lambda count: count <= 1}),
last_snapshot=last_snapshot,
)
@pytest.mark.parametrize("lazy", [False, True])
def test_limit(ray_start_regular_shared, lazy):
ds = ray.data.range(100, override_num_blocks=20)
if not lazy:
ds = ds.materialize()
for i in range(100):
assert extract_values("id", ds.limit(i).take(200)) == list(range(i))
# NOTE: We test outside the power-of-2 range in order to ensure that we're not reading
# redundant files due to exponential ramp-up.
@pytest.mark.parametrize("limit", [10, 20, 30, 60])
def test_limit_no_redundant_read(
ray_start_regular_shared,
limit,
):
# Test that dataset truncation eliminates redundant reads.
@ray.remote
class Counter:
def __init__(self):
self.count = 0
def increment(self):
self.count += 1
def get(self):
return self.count
def reset(self):
self.count = 0
class CountingRangeDatasource(Datasource):
def __init__(self):
self.counter = Counter.remote()
def prepare_read(self, parallelism, n):
def range_(i):
ray.get(self.counter.increment.remote())
return [
pd.DataFrame({"id": range(parallelism * i, parallelism * i + n)})
]
return [
ReadTask(
lambda i=i: range_(i),
BlockMetadata(
num_rows=n,
size_bytes=sum(
sys.getsizeof(i)
for i in range(parallelism * i, parallelism * i + n)
),
input_files=None,
exec_stats=None,
),
schema=pa.lib.Schema.from_pandas(pd.DataFrame({"id": []})),
)
for i in range(parallelism)
]
source = CountingRangeDatasource()
total_rows = 1000
override_num_blocks = 100
ds = ray.data.read_datasource(
source,
override_num_blocks=override_num_blocks,
n=total_rows // override_num_blocks,
)
# Apply multiple limit ops.
# Once the smallest limit is reached, the entire dataset should stop execution.
ds = ds.limit(total_rows)
ds = ds.limit(limit)
ds = ds.limit(total_rows)
# Check content.
assert len(ds.take(limit)) == limit
# Check number of read tasks launched.
# min_read_tasks is the minimum number of read tasks needed for the limit.
# We may launch more tasks than this number, in order to to maximize throughput.
# But the actual number of read tasks should be less than the parallelism.
count = ray.get(source.counter.get.remote())
min_read_tasks = limit // (total_rows // override_num_blocks)
assert min_read_tasks <= count < override_num_blocks
def test_limit_no_num_row_info(ray_start_regular_shared):
# Test that datasources with no number-of-rows metadata available are still able to
# be truncated, falling back to kicking off all read tasks.
class DumbOnesDatasource(Datasource):
def prepare_read(self, parallelism, n):
return parallelism * [
ReadTask(
lambda: [pd.DataFrame({"id": [1] * n})],
BlockMetadata(
num_rows=None,
size_bytes=sys.getsizeof(1) * n,
input_files=None,
exec_stats=None,
),
schema=pa.lib.Schema.from_pandas(pd.DataFrame({"id": []})),
)
]
ds = ray.data.read_datasource(DumbOnesDatasource(), override_num_blocks=10, n=10)
for i in range(1, 100):
assert extract_values("id", ds.limit(i).take(100)) == [1] * i
def test_per_task_row_limit_basic(ray_start_regular_shared, restore_data_context):
"""Test basic per-block limiting functionality."""
# NOTE: It's critical to preserve ordering for assertions in this test to work
DataContext.get_current().execution_options.preserve_order = True
# Simple test that should work with the existing range datasource
ds = ray.data.range(1000, override_num_blocks=10).limit(50)
result = ds.take_all()
# Verify we get the correct results
assert len(result) == 50
assert [row["id"] for row in result] == list(range(50))
def test_per_task_row_limit_with_custom_readtask(ray_start_regular_shared):
"""Test per-block limiting directly with ReadTask implementation."""
def read_data_with_limit():
# This simulates a ReadTask that reads 200 rows
return [pd.DataFrame({"id": range(200)})]
# Create ReadTask with per-block limit
task_with_limit = ReadTask(
read_fn=read_data_with_limit,
metadata=BlockMetadata(
num_rows=200, size_bytes=1600, input_files=None, exec_stats=None
),
schema=pa.lib.Schema.from_pandas(pd.DataFrame({"id": []})),
per_task_row_limit=50,
)
# Execute the ReadTask
result_blocks = list(task_with_limit())
# Should get only 50 rows due to per-block limiting
assert len(result_blocks) == 1
assert len(result_blocks[0]) == 50
assert result_blocks[0]["id"].tolist() == list(range(50))
def test_per_task_row_limit_multiple_blocks_per_task(ray_start_regular_shared):
"""Test per-block limiting when ReadTasks return multiple blocks."""
def read_multiple_blocks_with_limit():
# This simulates a ReadTask that returns 3 blocks of 30 rows each
return [
pd.DataFrame({"id": range(0, 30)}),
pd.DataFrame({"id": range(30, 60)}),
pd.DataFrame({"id": range(60, 90)}),
]
# Create ReadTask with per-block limit of 70 (should get 2.33 blocks)
task = ReadTask(
read_fn=read_multiple_blocks_with_limit,
metadata=BlockMetadata(
num_rows=90, size_bytes=720, input_files=None, exec_stats=None
),
schema=pa.lib.Schema.from_pandas(pd.DataFrame({"id": []})),
per_task_row_limit=70,
)
result_blocks = list(task())
# Should get first 2 full blocks (60 rows) plus 10 rows from third block
total_rows = sum(len(block) for block in result_blocks)
assert total_rows == 70
# Verify the data is correct
all_ids = []
for block in result_blocks:
all_ids.extend(block["id"].tolist())
assert all_ids == list(range(70))
def test_per_task_row_limit_larger_than_data(
ray_start_regular_shared, restore_data_context
):
"""Test per-block limiting when limit is larger than available data."""
# NOTE: It's critical to preserve ordering for assertions in this test to work
DataContext.get_current().execution_options.preserve_order = True
total_rows = 50
ds = ray.data.range(total_rows, override_num_blocks=5)
limited_ds = ds.limit(100) # Limit larger than data
result = limited_ds.take_all()
assert len(result) == total_rows
assert [row["id"] for row in result] == list(range(total_rows))
def test_per_task_row_limit_exact_block_boundary(
ray_start_regular_shared, restore_data_context
):
"""Test per-block limiting when limit exactly matches block boundaries."""
# NOTE: It's critical to preserve ordering for assertions in this test to work
DataContext.get_current().execution_options.preserve_order = True
rows_per_block = 20
num_blocks = 5
limit = rows_per_block * 2 # Exactly 2 blocks
ds = ray.data.range(rows_per_block * num_blocks, override_num_blocks=num_blocks)
limited_ds = ds.limit(limit)
result = limited_ds.take_all()
assert len(result) == limit
assert [row["id"] for row in result] == list(range(limit))
@pytest.mark.parametrize("limit", [1, 5, 10, 25, 50, 99])
def test_per_task_row_limit_various_sizes(
ray_start_regular_shared, limit, restore_data_context
):
"""Test per-block limiting with various limit sizes."""
# NOTE: It's critical to preserve ordering for assertions in this test to work
DataContext.get_current().execution_options.preserve_order = True
total_rows = 100
num_blocks = 10
ds = ray.data.range(total_rows, override_num_blocks=num_blocks)
limited_ds = ds.limit(limit)
result = limited_ds.take_all()
expected_len = min(limit, total_rows)
assert len(result) == expected_len
assert [row["id"] for row in result] == list(range(expected_len))
def test_per_task_row_limit_with_transformations(
ray_start_regular_shared, restore_data_context
):
"""Test that per-block limiting works correctly with transformations."""
# NOTE: It's critical to preserve ordering for assertions in this test to work
DataContext.get_current().execution_options.preserve_order = True
# Test with map operation after limit
ds = ray.data.range(100, override_num_blocks=10)
limited_ds = ds.limit(20).map(lambda x: {"doubled": x["id"] * 2})
result = limited_ds.take_all()
assert len(result) == 20
assert [row["doubled"] for row in result] == [i * 2 for i in range(20)]
# Test with map operation before limit
ds = ray.data.range(100, override_num_blocks=10)
limited_ds = ds.map(lambda x: {"doubled": x["id"] * 2}).limit(20)
result = limited_ds.take_all()
assert len(result) == 20
assert [row["doubled"] for row in result] == [i * 2 for i in range(20)]
def test_per_task_row_limit_with_filter(ray_start_regular_shared, restore_data_context):
"""Test per-block limiting with filter operations."""
# NOTE: It's critical to preserve ordering for assertions in this test to work
DataContext.get_current().execution_options.preserve_order = True
# Filter before limit - per-block limiting should still work at read level
ds = ray.data.range(200, override_num_blocks=10)
filtered_limited = ds.filter(lambda x: x["id"] % 2 == 0).limit(15)
result = filtered_limited.take_all()
assert len(result) == 15
# Should get first 15 even numbers
assert [row["id"] for row in result] == [i * 2 for i in range(15)]
def test_per_task_row_limit_readtask_properties(ray_start_regular_shared):
"""Test ReadTask per_block_limit property."""
def dummy_read():
return [pd.DataFrame({"id": [1, 2, 3]})]
# Test ReadTask without per_block_limit
task_no_limit = ReadTask(
read_fn=dummy_read,
metadata=BlockMetadata(
num_rows=3, size_bytes=24, input_files=None, exec_stats=None
),
)
assert task_no_limit.per_task_row_limit is None
# Test ReadTask with per_block_limit
task_with_limit = ReadTask(
read_fn=dummy_read,
metadata=BlockMetadata(
num_rows=3, size_bytes=24, input_files=None, exec_stats=None
),
per_task_row_limit=10,
)
assert task_with_limit.per_task_row_limit == 10
def test_per_task_row_limit_edge_cases(ray_start_regular_shared, restore_data_context):
"""Test per-block limiting edge cases."""
# NOTE: It's critical to preserve ordering for assertions in this test to work
DataContext.get_current().execution_options.preserve_order = True
# Test with single row
ds = ray.data.range(1, override_num_blocks=1).limit(1)
result = ds.take_all()
assert len(result) == 1
assert result[0]["id"] == 0
# Test with limit of 1 on large dataset
ds = ray.data.range(10000, override_num_blocks=100).limit(1)
result = ds.take_all()
assert len(result) == 1
assert result[0]["id"] == 0
# Test with very large limit
ds = ray.data.range(100, override_num_blocks=10).limit(999999)
result = ds.take_all()
assert len(result) == 100
assert [row["id"] for row in result] == list(range(100))
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_dataset_limits.py",
"license": "Apache License 2.0",
"lines": 322,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_dataset_validation.py | import logging
import sys
from unittest.mock import patch
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
import ray
from ray.data import Schema
from ray.data._internal.util import _check_pyarrow_version
from ray.data.tests.conftest import * # noqa
from ray.tests.conftest import * # noqa
def test_column_name_type_check(ray_start_regular_shared):
df = pd.DataFrame({"1": np.random.rand(10), "a": np.random.rand(10)})
ds = ray.data.from_pandas(df)
assert ds.schema() == Schema(pa.schema([("1", pa.float64()), ("a", pa.float64())]))
assert ds.count() == 10
df = pd.DataFrame({1: np.random.rand(10), "a": np.random.rand(10)})
with pytest.raises(ValueError):
ray.data.from_pandas(df)
@pytest.mark.skipif(
sys.version_info >= (3, 12), reason="TODO(scottjlee): Not working yet for py312"
)
def test_unsupported_pyarrow_versions_check(shutdown_only):
ray.shutdown()
# Test that unsupported pyarrow versions cause an error to be raised upon the
# initial pyarrow use.
ray.init(runtime_env={"pip": ["pyarrow==8.0.0"]})
@ray.remote
def should_error():
_check_pyarrow_version()
with pytest.raises(
Exception,
match=r".*Dataset requires pyarrow >= 9.0.0, but 8.0.0 is installed.*",
):
ray.get(should_error.remote())
class LoggerWarningCalled(Exception):
"""Custom exception used in test_warning_execute_with_no_cpu() and
test_nowarning_execute_with_cpu(). Raised when the `logger.warning` method
is called, so that we can kick out of `plan.execute()` by catching this Exception
and check logging was done properly."""
pass
def test_warning_execute_with_no_cpu(ray_start_cluster):
"""Tests ExecutionPlan.execute() to ensure a warning is logged
when no CPU resources are available."""
# Create one node with no CPUs to trigger the Dataset warning
ray.shutdown()
ray.init(ray_start_cluster.address)
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
try:
ds = ray.data.range(10)
ds = ds.map_batches(lambda x: x)
ds.take()
except Exception as e:
assert isinstance(e, ValueError)
assert "exceeds the execution limits ExecutionResources(cpu=0.0" in str(e)
def test_nowarning_execute_with_cpu(ray_start_cluster):
"""Tests ExecutionPlan.execute() to ensure no warning is logged
when there are available CPU resources."""
# Create one node with CPUs to avoid triggering the Dataset warning
ray.shutdown()
ray.init(ray_start_cluster.address)
logger = logging.getLogger("ray.data._internal.plan")
with patch.object(
logger,
"warning",
side_effect=LoggerWarningCalled,
) as mock_logger:
ds = ray.data.range(10)
ds = ds.map_batches(lambda x: x)
ds.take()
mock_logger.assert_not_called()
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_dataset_validation.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_execution_optimizer_advanced.py | import itertools
import sys
from unittest.mock import MagicMock
import pandas as pd
import pyarrow as pa
import pytest
import ray
from ray.data._internal.compute import TaskPoolStrategy
from ray.data._internal.datasource.parquet_datasink import ParquetDatasink
from ray.data._internal.execution.interfaces.op_runtime_metrics import OpRuntimeMetrics
from ray.data._internal.execution.operators.base_physical_operator import (
AllToAllOperator,
)
from ray.data._internal.execution.operators.input_data_buffer import InputDataBuffer
from ray.data._internal.execution.operators.map_operator import MapOperator
from ray.data._internal.execution.operators.task_pool_map_operator import (
TaskPoolMapOperator,
)
from ray.data._internal.execution.operators.zip_operator import ZipOperator
from ray.data._internal.logical.interfaces import LogicalPlan
from ray.data._internal.logical.interfaces.physical_plan import PhysicalPlan
from ray.data._internal.logical.operators import (
RandomShuffle,
Repartition,
Sort,
)
from ray.data._internal.logical.operators.map_operator import MapBatches
from ray.data._internal.logical.operators.n_ary_operator import Zip
from ray.data._internal.logical.operators.write_operator import Write
from ray.data._internal.logical.rules import (
ConfigureMapTaskMemoryUsingOutputSize,
)
from ray.data._internal.planner import create_planner
from ray.data._internal.planner.exchange.sort_task_spec import SortKey
from ray.data._internal.stats import DatasetStats
from ray.data.context import DataContext
from ray.data.tests.conftest import * # noqa
from ray.data.tests.test_util import _check_usage_record, get_parquet_read_logical_op
from ray.data.tests.util import column_udf, extract_values, named_values
from ray.tests.conftest import * # noqa
def test_random_shuffle_operator(ray_start_regular_shared_2_cpus):
ctx = DataContext.get_current()
planner = create_planner()
read_op = get_parquet_read_logical_op()
op = RandomShuffle(
read_op,
seed=0,
)
plan = LogicalPlan(op, ctx)
physical_op = planner.plan(plan).dag
assert op.name == "RandomShuffle"
assert isinstance(physical_op, AllToAllOperator)
assert len(physical_op.input_dependencies) == 1
assert isinstance(physical_op.input_dependencies[0], MapOperator)
# Check that the linked logical operator is the same the input op.
assert physical_op._logical_operators == [op]
def test_random_shuffle_e2e(ray_start_regular_shared_2_cpus, configure_shuffle_method):
ds = ray.data.range(12, override_num_blocks=4)
r1 = extract_values("id", ds.random_shuffle(seed=0).take_all())
r2 = extract_values("id", ds.random_shuffle(seed=1024).take_all())
assert r1 != r2, (r1, r2)
assert sorted(r1) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], r1
assert sorted(r2) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], r2
_check_usage_record(["ReadRange", "RandomShuffle"])
@pytest.mark.parametrize(
"shuffle",
[True, False],
)
def test_repartition_operator(ray_start_regular_shared_2_cpus, shuffle):
ctx = DataContext.get_current()
planner = create_planner()
read_op = get_parquet_read_logical_op()
op = Repartition(read_op, num_outputs=5, shuffle=shuffle)
plan = LogicalPlan(op, ctx)
physical_op = planner.plan(plan).dag
assert op.name == "Repartition"
assert isinstance(physical_op, AllToAllOperator)
assert len(physical_op.input_dependencies) == 1
assert isinstance(physical_op.input_dependencies[0], MapOperator)
# Check that the linked logical operator is the same the input op.
assert physical_op._logical_operators == [op]
@pytest.mark.parametrize(
"shuffle",
[True, False],
)
def test_repartition_e2e(
ray_start_regular_shared_2_cpus, configure_shuffle_method, shuffle
):
def _check_repartition_usage_and_stats(ds):
_check_usage_record(["ReadRange", "Repartition"])
ds_stats: DatasetStats = ds._plan.stats()
if shuffle:
assert ds_stats.base_name == "ReadRange->Repartition"
assert "ReadRange->RepartitionMap" in ds_stats.metadata
else:
assert ds_stats.base_name == "Repartition"
assert "RepartitionSplit" in ds_stats.metadata
assert "RepartitionReduce" in ds_stats.metadata
ds = ray.data.range(10000, override_num_blocks=10).repartition(20, shuffle=shuffle)
assert ds._plan.initial_num_blocks() == 20, ds._plan.initial_num_blocks()
assert ds.sum() == sum(range(10000))
assert ds._block_num_rows() == [500] * 20, ds._block_num_rows()
_check_repartition_usage_and_stats(ds)
# Test num_output_blocks > num_rows to trigger empty block handling.
ds = ray.data.range(20, override_num_blocks=10).repartition(40, shuffle=shuffle)
assert ds._plan.initial_num_blocks() == 40, ds._plan.initial_num_blocks()
assert ds.sum() == sum(range(20))
if shuffle:
assert ds._block_num_rows() == [10] * 2 + [0] * (40 - 2), ds._block_num_rows()
else:
assert ds._block_num_rows() == [1] * 20 + [0] * 20, ds._block_num_rows()
_check_repartition_usage_and_stats(ds)
# Test case where number of rows does not divide equally into num_output_blocks.
ds = ray.data.range(22).repartition(4, shuffle=shuffle)
assert ds._plan.initial_num_blocks() == 4, ds._plan.initial_num_blocks()
assert ds.sum() == sum(range(22))
if shuffle:
assert ds._block_num_rows() == [9, 9, 4, 0], ds._block_num_rows()
else:
assert ds._block_num_rows() == [5, 6, 5, 6], ds._block_num_rows()
_check_repartition_usage_and_stats(ds)
# Test case where we do not split on repartitioning.
ds = ray.data.range(10, override_num_blocks=1).repartition(1, shuffle=shuffle)
assert ds._plan.initial_num_blocks() == 1, ds._plan.initial_num_blocks()
assert ds.sum() == sum(range(10))
assert ds._block_num_rows() == [10], ds._block_num_rows()
_check_repartition_usage_and_stats(ds)
def test_write_operator(ray_start_regular_shared_2_cpus, tmp_path):
ctx = DataContext.get_current()
concurrency = 2
planner = create_planner()
datasink = ParquetDatasink(tmp_path)
read_op = get_parquet_read_logical_op()
op = Write(
read_op,
datasink,
compute=TaskPoolStrategy(concurrency),
)
plan = LogicalPlan(op, ctx)
physical_op = planner.plan(plan).dag
assert op.name == "Write"
assert isinstance(physical_op, TaskPoolMapOperator)
assert physical_op._max_concurrency == concurrency
assert len(physical_op.input_dependencies) == 1
assert isinstance(physical_op.input_dependencies[0], MapOperator)
# Check that the linked logical operator is the same the input op.
assert physical_op._logical_operators == [op]
def test_sort_operator(
ray_start_regular_shared_2_cpus,
):
ctx = DataContext.get_current()
planner = create_planner()
read_op = get_parquet_read_logical_op()
op = Sort(
read_op,
sort_key=SortKey("col1"),
)
plan = LogicalPlan(op, ctx)
physical_op = planner.plan(plan).dag
assert op.name == "Sort"
assert isinstance(physical_op, AllToAllOperator)
assert len(physical_op.input_dependencies) == 1
assert isinstance(physical_op.input_dependencies[0], MapOperator)
def test_sort_e2e(ray_start_regular_shared_2_cpus, configure_shuffle_method, tmp_path):
ds = ray.data.range(100, override_num_blocks=4)
ds = ds.random_shuffle()
ds = ds.sort("id")
assert extract_values("id", ds.take_all()) == list(range(100))
_check_usage_record(["ReadRange", "RandomShuffle", "Sort"])
df = pd.DataFrame({"one": list(range(100)), "two": ["a"] * 100})
ds = ray.data.from_pandas([df])
ds.write_parquet(tmp_path)
ds = ray.data.read_parquet(tmp_path)
ds = ds.random_shuffle()
ds1 = ds.sort("one")
ds2 = ds.sort("one", descending=True)
r1 = ds1.select_columns(["one"]).take_all()
r2 = ds2.select_columns(["one"]).take_all()
assert [d["one"] for d in r1] == list(range(100))
assert [d["one"] for d in r2] == list(reversed(range(100)))
def test_sort_validate_keys(ray_start_regular_shared_2_cpus):
ds = ray.data.range(10)
assert extract_values("id", ds.sort("id").take_all()) == list(range(10))
invalid_col_name = "invalid_column"
with pytest.raises(ValueError, match="there's no such column in the dataset"):
ds.sort(invalid_col_name).take_all()
ds_named = ray.data.from_items(
[
{"col1": 1, "col2": 2},
{"col1": 3, "col2": 4},
{"col1": 5, "col2": 6},
{"col1": 7, "col2": 8},
]
)
ds_sorted_col1 = ds_named.sort("col1", descending=True)
r1 = ds_sorted_col1.select_columns(["col1"]).take_all()
r2 = ds_sorted_col1.select_columns(["col2"]).take_all()
assert [d["col1"] for d in r1] == [7, 5, 3, 1]
assert [d["col2"] for d in r2] == [8, 6, 4, 2]
with pytest.raises(ValueError, match="there's no such column in the dataset"):
ds_named.sort(invalid_col_name).take_all()
def test_inherit_batch_format_rule():
from ray.data._internal.logical.rules import (
InheritBatchFormatRule,
)
ctx = DataContext.get_current()
operator1 = get_parquet_read_logical_op()
operator2 = MapBatches(operator1, fn=lambda g: g, batch_format="pandas")
sort_key = SortKey("number", descending=True)
operator3 = Sort(operator2, sort_key)
original_plan = LogicalPlan(dag=operator3, context=ctx)
rule = InheritBatchFormatRule()
optimized_plan = rule.apply(original_plan)
assert optimized_plan.dag.batch_format == "pandas"
def test_batch_format_on_sort(ray_start_regular_shared_2_cpus):
"""Checks that the Sort op can inherit batch_format from upstream ops correctly."""
ds = ray.data.from_items(
[
{"col1": 1, "col2": 2},
{"col1": 1, "col2": 4},
{"col1": 5, "col2": 6},
{"col1": 7, "col2": 8},
]
)
df_expected = pd.DataFrame(
{
"col1": [7, 5, 1, 1],
"col2": [8, 6, 4, 2],
}
)
df_actual = (
ds.groupby("col1")
.map_groups(lambda g: g, batch_format="pandas")
.sort("col2", descending=True)
.to_pandas()
)
pd.testing.assert_frame_equal(df_actual, df_expected)
def test_batch_format_on_aggregate(ray_start_regular_shared_2_cpus):
"""Checks that the Aggregate op can inherit batch_format
from upstream ops correctly."""
from ray.data.aggregate import AggregateFn
ds = ray.data.from_items(
[
{"col1": 1, "col2": 2},
{"col1": 1, "col2": 4},
{"col1": 5, "col2": 6},
{"col1": 7, "col2": 8},
]
)
aggregation = AggregateFn(
init=lambda column: 1,
accumulate_row=lambda a, row: a * row["col2"],
merge=lambda a1, a2: a1 * a2,
name="prod",
)
assert (
ds.groupby("col1")
.map_groups(lambda g: g, batch_format="pandas")
.aggregate(aggregation)
) == {"prod": 384}
def test_aggregate_e2e(ray_start_regular_shared_2_cpus, configure_shuffle_method):
ds = ray.data.range(100, override_num_blocks=4)
ds = ds.groupby("id").count()
assert ds.count() == 100
for idx, row in enumerate(ds.sort("id").iter_rows()):
assert row == {"id": idx, "count()": 1}
_check_usage_record(["ReadRange", "Aggregate"])
def test_aggregate_validate_keys(ray_start_regular_shared_2_cpus):
ds = ray.data.range(10)
invalid_col_name = "invalid_column"
with pytest.raises(ValueError):
ds.groupby(invalid_col_name).count()
ds_named = ray.data.from_items(
[
{"col1": 1, "col2": "a"},
{"col1": 1, "col2": "b"},
{"col1": 2, "col2": "c"},
{"col1": 3, "col2": "c"},
]
)
ds_groupby_col1 = ds_named.groupby("col1").count()
assert ds_groupby_col1.sort("col1").take_all() == [
{"col1": 1, "count()": 2},
{"col1": 2, "count()": 1},
{"col1": 3, "count()": 1},
]
ds_groupby_col2 = ds_named.groupby("col2").count()
assert ds_groupby_col2.sort("col2").take_all() == [
{"col2": "a", "count()": 1},
{"col2": "b", "count()": 1},
{"col2": "c", "count()": 2},
]
with pytest.raises(
ValueError,
match="there's no such column in the dataset",
):
ds_named.groupby(invalid_col_name).count()
def test_zip_operator(ray_start_regular_shared_2_cpus):
ctx = DataContext.get_current()
planner = create_planner()
read_op1 = get_parquet_read_logical_op()
read_op2 = get_parquet_read_logical_op()
op = Zip(read_op1, read_op2)
plan = LogicalPlan(op, ctx)
physical_op = planner.plan(plan).dag
assert op.name == "Zip"
assert isinstance(physical_op, ZipOperator)
assert len(physical_op.input_dependencies) == 2
assert isinstance(physical_op.input_dependencies[0], MapOperator)
assert isinstance(physical_op.input_dependencies[1], MapOperator)
# Check that the linked logical operator is the same the input op.
assert physical_op._logical_operators == [op]
@pytest.mark.parametrize(
"num_blocks1,num_blocks2,num_blocks3",
list(itertools.combinations_with_replacement(range(1, 4), 3)),
)
def test_zip_e2e(
ray_start_regular_shared_2_cpus, num_blocks1, num_blocks2, num_blocks3
):
n = 4
ds1 = ray.data.range(n, override_num_blocks=num_blocks1)
ds2 = ray.data.range(n, override_num_blocks=num_blocks2).map(
column_udf("id", lambda x: x + 1)
)
ds3 = ray.data.range(n, override_num_blocks=num_blocks3).map(
column_udf("id", lambda x: x + 2)
)
ds = ds1.zip(ds2, ds3)
assert ds.take() == named_values(
["id", "id_1", "id_2"], zip(range(n), range(1, n + 1), range(2, n + 2))
)
_check_usage_record(["ReadRange", "Zip"])
def test_execute_to_legacy_block_list(
ray_start_regular_shared_2_cpus,
):
ds = ray.data.range(10)
# Stats not initialized until `ds.iter_rows()` is called
assert ds._plan._cache.get_stats() is None
for i, row in enumerate(ds.iter_rows()):
assert row["id"] == i
stats = ds._plan._cache.get_stats()
assert stats is not None
assert "ReadRange" in stats.metadata
assert stats.time_total_s > 0
def test_streaming_executor(
ray_start_regular_shared_2_cpus,
):
ds = ray.data.range(100, override_num_blocks=4)
ds = ds.map_batches(lambda x: x)
ds = ds.filter(lambda x: x["id"] > 0)
ds = ds.random_shuffle()
ds = ds.map_batches(lambda x: x)
result = []
for batch in ds.iter_batches(batch_size=3):
batch = batch["id"]
assert len(batch) == 3, batch
result.extend(batch)
assert sorted(result) == list(range(1, 100)), result
_check_usage_record(["ReadRange", "MapBatches", "Filter", "RandomShuffle"])
def test_schema_partial_execution(
ray_start_regular_shared_2_cpus,
):
fields = [
("sepal.length", pa.float64()),
("sepal.width", pa.float64()),
("petal.length", pa.float64()),
("petal.width", pa.float64()),
("variety", pa.string()),
]
ds = ray.data.read_parquet(
"example://iris.parquet",
schema=pa.schema(fields),
override_num_blocks=2,
).map_batches(lambda x: x)
iris_schema = ds.schema()
assert iris_schema == ray.data.dataset.Schema(pa.schema(fields))
# Verify that ds.schema() executes only the first block, and not the
# entire Dataset.
assert not ds._plan.has_computed_output()
assert ds._plan._logical_plan.dag.dag_str == (
"Read[ReadParquet] -> MapBatches[MapBatches(<lambda>)]"
)
@pytest.mark.parametrize(
"average_bytes_per_output, ray_remote_args, ray_remote_args_fn, data_context, expected_memory",
[
# The user hasn't set memory, so the rule should configure it.
(1, None, None, DataContext(), 1),
# The user has set memory, so the rule shouldn't change it.
(1, {"memory": 2}, None, DataContext(), 2),
(1, None, lambda: {"memory": 2}, DataContext(), 2),
# An estimate isn't available, so the rule shouldn't configure memory.
(None, None, None, DataContext(), None),
],
)
def test_configure_map_task_memory_rule(
average_bytes_per_output,
ray_remote_args,
ray_remote_args_fn,
data_context,
expected_memory,
):
input_op = InputDataBuffer(MagicMock(), [])
map_op = MapOperator.create(
MagicMock(),
input_op=input_op,
data_context=data_context,
ray_remote_args=ray_remote_args,
ray_remote_args_fn=ray_remote_args_fn,
)
map_op._metrics = MagicMock(
spec=OpRuntimeMetrics, average_bytes_per_output=average_bytes_per_output
)
plan = PhysicalPlan(map_op, op_map=MagicMock(), context=data_context)
rule = ConfigureMapTaskMemoryUsingOutputSize()
new_plan = rule.apply(plan)
remote_args = new_plan.dag._get_dynamic_ray_remote_args()
assert remote_args.get("memory") == expected_memory
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_execution_optimizer_advanced.py",
"license": "Apache License 2.0",
"lines": 419,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_execution_optimizer_basic.py | import sys
from typing import TYPE_CHECKING, List, Optional
import numpy as np
import pandas as pd
import pytest
import ray
if TYPE_CHECKING:
from ray.data.context import DataContext
from ray.data._internal.execution.operators.input_data_buffer import InputDataBuffer
from ray.data._internal.execution.operators.map_operator import MapOperator
from ray.data._internal.execution.operators.task_pool_map_operator import (
TaskPoolMapOperator,
)
from ray.data._internal.logical.interfaces import LogicalPlan
from ray.data._internal.logical.operators import (
Filter,
FlatMap,
FromArrow,
FromItems,
FromNumpy,
FromPandas,
MapBatches,
MapRows,
Project,
)
from ray.data._internal.logical.optimizers import PhysicalOptimizer
from ray.data._internal.planner import create_planner
from ray.data.block import BlockMetadata
from ray.data.context import DataContext
from ray.data.datasource import Datasource
from ray.data.datasource.datasource import ReadTask
from ray.data.expressions import col
from ray.data.tests.conftest import * # noqa
from ray.data.tests.test_util import _check_usage_record, get_parquet_read_logical_op
from ray.data.tests.util import column_udf, extract_values, named_values
from ray.tests.conftest import * # noqa
def test_read_operator(ray_start_regular_shared_2_cpus):
ctx = DataContext.get_current()
planner = create_planner()
op = get_parquet_read_logical_op()
plan = LogicalPlan(op, ctx)
physical_op = planner.plan(plan).dag
assert op.name == "ReadParquet"
assert isinstance(physical_op, MapOperator)
assert len(physical_op.input_dependencies) == 1
assert isinstance(physical_op.input_dependencies[0], InputDataBuffer)
# Check that the linked logical operator is the same the input op.
assert physical_op._logical_operators == [op]
assert physical_op.input_dependencies[0]._logical_operators == [op]
def test_read_operator_emits_warning_for_large_read_tasks():
class StubDatasource(Datasource):
def estimate_inmemory_data_size(self) -> Optional[int]:
return None
def get_read_tasks(
self,
parallelism: int,
per_task_row_limit: Optional[int] = None,
data_context: Optional["DataContext"] = None,
) -> List[ReadTask]:
large_object = np.zeros((128, 1024, 1024), dtype=np.uint8) # 128 MiB
def read_fn():
_ = large_object
yield pd.DataFrame({"column": [0]})
return [
ReadTask(
read_fn,
BlockMetadata(1, None, None, None),
per_task_row_limit=per_task_row_limit,
)
]
with pytest.warns(UserWarning):
ray.data.read_datasource(StubDatasource()).materialize()
def test_split_blocks_operator(ray_start_regular_shared_2_cpus):
ctx = DataContext.get_current()
planner = create_planner()
op = get_parquet_read_logical_op(parallelism=10)
logical_plan = LogicalPlan(op, ctx)
physical_plan = planner.plan(logical_plan)
physical_plan = PhysicalOptimizer().optimize(physical_plan)
physical_op = physical_plan.dag
assert physical_op.name == "ReadParquet->SplitBlocks(10)"
assert isinstance(physical_op, MapOperator)
assert len(physical_op.input_dependencies) == 1
assert isinstance(physical_op.input_dependencies[0], InputDataBuffer)
assert physical_op._additional_split_factor == 10
# Test that split blocks prevents fusion.
op = MapBatches(
op,
lambda x: x,
)
logical_plan = LogicalPlan(op, ctx)
physical_plan = planner.plan(logical_plan)
physical_plan = PhysicalOptimizer().optimize(physical_plan)
physical_op = physical_plan.dag
assert physical_op.name == "MapBatches(<lambda>)"
assert len(physical_op.input_dependencies) == 1
up_physical_op = physical_op.input_dependencies[0]
assert isinstance(up_physical_op, MapOperator)
assert up_physical_op.name == "ReadParquet->SplitBlocks(10)"
def test_from_operators(ray_start_regular_shared_2_cpus):
ctx = DataContext.get_current()
op_classes = [
FromArrow,
FromItems,
FromNumpy,
FromPandas,
]
for op_cls in op_classes:
planner = create_planner()
op = op_cls([], [])
plan = LogicalPlan(op, ctx)
physical_op = planner.plan(plan).dag
assert op.name == op_cls.__name__
assert isinstance(physical_op, InputDataBuffer)
assert len(physical_op.input_dependencies) == 0
# Check that the linked logical operator is the same the input op.
assert physical_op._logical_operators == [op]
def test_from_items_e2e(ray_start_regular_shared_2_cpus):
data = ["Hello", "World"]
ds = ray.data.from_items(data)
assert ds.take_all() == named_values("item", data), ds
# Check that metadata fetch is included in stats.
assert "FromItems" in ds.stats()
assert ds._plan._logical_plan.dag.name == "FromItems"
_check_usage_record(["FromItems"])
def test_map_operator_udf_name(ray_start_regular_shared_2_cpus):
# Test the name of the Map operator with different types of UDF.
def normal_function(x):
return x
lambda_function = lambda x: x # noqa: E731
class CallableClass:
def __call__(self, x):
return x
class NormalClass:
def method(self, x):
return x
udf_list = [
# A nomral function.
normal_function,
# A lambda function
lambda_function,
# A callable class.
CallableClass,
# An instance of a callable class.
CallableClass(),
# A normal class method.
NormalClass().method,
]
expected_names = [
"normal_function",
"<lambda>",
"CallableClass",
"CallableClass",
"NormalClass.method",
]
for udf, expected_name in zip(udf_list, expected_names):
op = MapRows(
get_parquet_read_logical_op(),
udf,
)
assert op.name == f"Map({expected_name})"
def test_map_batches_operator(ray_start_regular_shared_2_cpus):
ctx = DataContext.get_current()
planner = create_planner()
read_op = get_parquet_read_logical_op()
op = MapBatches(
read_op,
lambda x: x,
)
plan = LogicalPlan(op, ctx)
physical_op = planner.plan(plan).dag
assert op.name == "MapBatches(<lambda>)"
assert isinstance(physical_op, MapOperator)
assert len(physical_op.input_dependencies) == 1
assert isinstance(physical_op.input_dependencies[0], MapOperator)
# Check that the linked logical operator is the same the input op.
assert physical_op._logical_operators == [op]
def test_map_batches_e2e(ray_start_regular_shared_2_cpus):
ds = ray.data.range(5)
ds = ds.map_batches(column_udf("id", lambda x: x))
assert sorted(extract_values("id", ds.take_all())) == list(range(5)), ds
_check_usage_record(["ReadRange", "MapBatches"])
def test_map_rows_operator(ray_start_regular_shared_2_cpus):
ctx = DataContext.get_current()
planner = create_planner()
read_op = get_parquet_read_logical_op()
op = MapRows(
read_op,
lambda x: x,
)
plan = LogicalPlan(op, ctx)
physical_op = planner.plan(plan).dag
assert op.name == "Map(<lambda>)"
assert isinstance(physical_op, MapOperator)
assert len(physical_op.input_dependencies) == 1
assert isinstance(physical_op.input_dependencies[0], MapOperator)
def test_map_rows_e2e(ray_start_regular_shared_2_cpus):
ds = ray.data.range(5)
ds = ds.map(column_udf("id", lambda x: x + 1))
expected = [1, 2, 3, 4, 5]
actual = sorted(extract_values("id", ds.take_all()))
assert actual == expected, f"Expected {expected}, but got {actual}"
_check_usage_record(["ReadRange", "Map"])
def test_filter_operator(ray_start_regular_shared_2_cpus):
ctx = DataContext.get_current()
planner = create_planner()
read_op = get_parquet_read_logical_op()
op = Filter(
read_op,
fn=lambda x: x,
)
plan = LogicalPlan(op, ctx)
physical_op = planner.plan(plan).dag
assert op.name == "Filter(<lambda>)"
assert isinstance(physical_op, MapOperator)
assert len(physical_op.input_dependencies) == 1
assert isinstance(physical_op.input_dependencies[0], MapOperator)
def test_filter_e2e(ray_start_regular_shared_2_cpus):
ds = ray.data.range(5)
ds = ds.filter(fn=lambda x: x["id"] % 2 == 0)
assert sorted(extract_values("id", ds.take_all())) == [0, 2, 4], ds
_check_usage_record(["ReadRange", "Filter"])
def test_project_operator_select(ray_start_regular_shared_2_cpus):
"""
Checks that the physical plan is properly generated for the Project operator from
select columns.
"""
path = "example://iris.parquet"
ds = ray.data.read_parquet(path)
ds = ds.map_batches(lambda d: d)
cols = ["sepal.length", "petal.width"]
ds = ds.select_columns(cols)
logical_plan = ds._plan._logical_plan
op = logical_plan.dag
assert isinstance(op, Project), op.name
assert op.exprs == [col("sepal.length"), col("petal.width")]
physical_plan = create_planner().plan(logical_plan)
physical_plan = PhysicalOptimizer().optimize(physical_plan)
physical_op = physical_plan.dag
assert isinstance(physical_op, TaskPoolMapOperator)
assert isinstance(physical_op.input_dependency, TaskPoolMapOperator)
def test_project_operator_rename(ray_start_regular_shared_2_cpus):
"""
Checks that the physical plan is properly generated for the Project operator from
rename columns.
"""
from ray.data.expressions import star
path = "example://iris.parquet"
ds = ray.data.read_parquet(path)
ds = ds.map_batches(lambda d: d)
cols_rename = {"sepal.length": "sepal_length", "petal.width": "pedal_width"}
ds = ds.rename_columns(cols_rename)
logical_plan = ds._plan._logical_plan
op = logical_plan.dag
assert isinstance(op, Project), op.name
assert op.exprs == [
star(),
col("sepal.length").alias("sepal_length"),
col("petal.width").alias("pedal_width"),
]
physical_plan = create_planner().plan(logical_plan)
physical_plan = PhysicalOptimizer().optimize(physical_plan)
physical_op = physical_plan.dag
assert isinstance(physical_op, TaskPoolMapOperator)
assert isinstance(physical_op.input_dependency, TaskPoolMapOperator)
def test_flat_map(ray_start_regular_shared_2_cpus):
ctx = DataContext.get_current()
planner = create_planner()
read_op = get_parquet_read_logical_op()
op = FlatMap(
read_op,
lambda x: x,
)
plan = LogicalPlan(op, ctx)
physical_op = planner.plan(plan).dag
assert op.name == "FlatMap(<lambda>)"
assert isinstance(physical_op, MapOperator)
assert len(physical_op.input_dependencies) == 1
assert isinstance(physical_op.input_dependencies[0], MapOperator)
def test_flat_map_e2e(ray_start_regular_shared_2_cpus):
ds = ray.data.range(2)
ds = ds.flat_map(fn=lambda x: [{"id": x["id"]}, {"id": x["id"]}])
assert extract_values("id", ds.take_all()) == [0, 0, 1, 1], ds
_check_usage_record(["ReadRange", "FlatMap"])
def test_column_ops_e2e(ray_start_regular_shared_2_cpus):
ds = ray.data.range(2)
ds = ds.add_column(fn=lambda df: df.iloc[:, 0], col="new_col")
assert ds.take_all() == [{"id": 0, "new_col": 0}, {"id": 1, "new_col": 1}], ds
_check_usage_record(["ReadRange", "MapBatches"])
select_ds = ds.select_columns(cols=["new_col"])
assert select_ds.take_all() == [{"new_col": 0}, {"new_col": 1}]
_check_usage_record(["ReadRange", "MapBatches"])
ds = ds.drop_columns(cols=["new_col"])
assert ds.take_all() == [{"id": 0}, {"id": 1}], ds
_check_usage_record(["ReadRange", "MapBatches"])
def test_random_sample_e2e(ray_start_regular_shared_2_cpus):
import math
def ensure_sample_size_close(dataset, sample_percent=0.5):
r1 = ds.random_sample(sample_percent)
assert math.isclose(
r1.count(), int(ds.count() * sample_percent), rel_tol=2, abs_tol=2
)
ds = ray.data.range(10, override_num_blocks=2)
ensure_sample_size_close(ds)
ds = ray.data.range(10, override_num_blocks=2)
ensure_sample_size_close(ds)
ds = ray.data.range_tensor(5, override_num_blocks=2, shape=(2, 2))
ensure_sample_size_close(ds)
_check_usage_record(["ReadRange", "MapBatches"])
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_execution_optimizer_basic.py",
"license": "Apache License 2.0",
"lines": 311,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_execution_optimizer_integrations.py | import sys
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from packaging.version import parse as parse_version
import ray
from ray.data._internal.util import rows_same
from ray.data._internal.utils.arrow_utils import get_pyarrow_version
from ray.data.tests.conftest import * # noqa
from ray.data.tests.test_util import _check_usage_record
from ray.data.tests.util import extract_values
from ray.tests.conftest import * # noqa
def _should_skip_huggingface_test():
"""Check if we should skip the HuggingFace test due to version incompatibility."""
pyarrow_version = get_pyarrow_version()
if pyarrow_version is None:
return False
try:
datasets_version = __import__("datasets").__version__
if datasets_version is None:
return False
return pyarrow_version < parse_version("12.0.0") and parse_version(
datasets_version
) >= parse_version("3.0.0")
except (ImportError, AttributeError):
return False
def test_from_modin_e2e(ray_start_regular_shared_2_cpus):
import modin.pandas as mopd
df = pd.DataFrame(
{"one": list(range(100)), "two": list(range(100))},
)
modf = mopd.DataFrame(df)
ds = ray.data.from_modin(modf)
# `ds.take_all()` triggers execution with new backend, which is
# needed for checking operator usage below.
assert len(ds.take_all()) == len(df)
# `ds.to_pandas()` does not use the new backend.
dfds = ds.to_pandas()
assert df.equals(dfds)
# Check that metadata fetch is included in stats. This is `FromPandas`
# instead of `FromModin` because `from_modin` reduces to `from_pandas_refs`.
assert "FromPandas" in ds.stats()
assert ds._plan._logical_plan.dag.name == "FromPandas"
_check_usage_record(["FromPandas"])
@pytest.mark.parametrize("enable_pandas_block", [False, True])
def test_from_pandas_refs_e2e(ray_start_regular_shared_2_cpus, enable_pandas_block):
ctx = ray.data.context.DataContext.get_current()
old_enable_pandas_block = ctx.enable_pandas_block
ctx.enable_pandas_block = enable_pandas_block
try:
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
expected_df = pd.concat([df1, df2])
ds = ray.data.from_pandas_refs([ray.put(df1), ray.put(df2)])
assert rows_same(ds.to_pandas(), expected_df)
# Check that metadata fetch is included in stats.
assert "FromPandas" in ds.stats()
assert ds._plan._logical_plan.dag.name == "FromPandas"
# Test chaining multiple operations
ds2 = ds.map_batches(lambda x: x)
assert rows_same(ds2.to_pandas(), expected_df)
assert "MapBatches" in ds2.stats()
assert "FromPandas" in ds2.stats()
assert ds2._plan._logical_plan.dag.name == "MapBatches(<lambda>)"
# test from single pandas dataframe
ds = ray.data.from_pandas_refs(ray.put(df1))
assert rows_same(ds.to_pandas(), df1)
# Check that metadata fetch is included in stats.
assert "FromPandas" in ds.stats()
assert ds._plan._logical_plan.dag.name == "FromPandas"
_check_usage_record(["FromPandas"])
finally:
ctx.enable_pandas_block = old_enable_pandas_block
def test_from_numpy_refs_e2e(ray_start_regular_shared_2_cpus):
arr1 = np.expand_dims(np.arange(0, 4), axis=1)
arr2 = np.expand_dims(np.arange(4, 8), axis=1)
ds = ray.data.from_numpy_refs([ray.put(arr1), ray.put(arr2)])
values = np.stack(extract_values("data", ds.take(8)))
np.testing.assert_array_equal(values, np.concatenate((arr1, arr2)))
# Check that conversion task is included in stats.
assert "FromNumpy" in ds.stats()
assert ds._plan._logical_plan.dag.name == "FromNumpy"
_check_usage_record(["FromNumpy"])
# Test chaining multiple operations
ds2 = ds.map_batches(lambda x: x)
values = np.stack(extract_values("data", ds2.take(8)))
np.testing.assert_array_equal(values, np.concatenate((arr1, arr2)))
assert "MapBatches" in ds2.stats()
assert "FromNumpy" in ds2.stats()
assert ds2._plan._logical_plan.dag.name == "MapBatches(<lambda>)"
_check_usage_record(["FromNumpy", "MapBatches"])
# Test from single NumPy ndarray.
ds = ray.data.from_numpy_refs(ray.put(arr1))
values = np.stack(extract_values("data", ds.take(4)))
np.testing.assert_array_equal(values, arr1)
# Check that conversion task is included in stats.
assert "FromNumpy" in ds.stats()
assert ds._plan._logical_plan.dag.name == "FromNumpy"
_check_usage_record(["FromNumpy"])
def test_from_arrow_refs_e2e(ray_start_regular_shared_2_cpus):
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
ds = ray.data.from_arrow_refs(
[ray.put(pa.Table.from_pandas(df1)), ray.put(pa.Table.from_pandas(df2))]
)
values = [(r["one"], r["two"]) for r in ds.take(6)]
rows = [(r.one, r.two) for _, r in pd.concat([df1, df2]).iterrows()]
assert values == rows
# Check that metadata fetch is included in stats.
assert "FromArrow" in ds.stats()
assert ds._plan._logical_plan.dag.name == "FromArrow"
_check_usage_record(["FromArrow"])
# test from single pyarrow table ref
ds = ray.data.from_arrow_refs(ray.put(pa.Table.from_pandas(df1)))
values = [(r["one"], r["two"]) for r in ds.take(3)]
rows = [(r.one, r.two) for _, r in df1.iterrows()]
assert values == rows
# Check that conversion task is included in stats.
assert "FromArrow" in ds.stats()
assert ds._plan._logical_plan.dag.name == "FromArrow"
_check_usage_record(["FromArrow"])
@pytest.mark.skipif(
_should_skip_huggingface_test,
reason="Skip due to HuggingFace datasets >= 3.0.0 requiring pyarrow >= 12.0.0",
)
def test_from_huggingface_e2e(ray_start_regular_shared_2_cpus):
import datasets
from ray.data.tests.datasource.test_huggingface import hfds_assert_equals
data = datasets.load_dataset("tweet_eval", "emotion")
assert isinstance(data, datasets.DatasetDict)
ray_datasets = {
"train": ray.data.from_huggingface(data["train"]),
"validation": ray.data.from_huggingface(data["validation"]),
"test": ray.data.from_huggingface(data["test"]),
}
for ds_key, ds in ray_datasets.items():
assert isinstance(ds, ray.data.Dataset)
# `ds.take_all()` triggers execution with new backend, which is
# needed for checking operator usage below.
assert len(ds.take_all()) > 0
# Check that metadata fetch is included in stats;
# the underlying implementation uses the `ReadParquet` operator
# as this is an un-transformed public dataset.
assert "ReadParquet" in ds.stats() or "FromArrow" in ds.stats()
assert (
ds._plan._logical_plan.dag.name == "ReadParquet"
or ds._plan._logical_plan.dag.name == "FromArrow"
)
# use sort by 'text' to match order of rows
hfds_assert_equals(data[ds_key], ds)
try:
_check_usage_record(["ReadParquet"])
except AssertionError:
_check_usage_record(["FromArrow"])
# test transformed public dataset for fallback behavior
base_hf_dataset = data["train"]
hf_dataset_split = base_hf_dataset.train_test_split(test_size=0.2)
ray_dataset_split_train = ray.data.from_huggingface(hf_dataset_split["train"])
assert isinstance(ray_dataset_split_train, ray.data.Dataset)
# `ds.take_all()` triggers execution with new backend, which is
# needed for checking operator usage below.
assert len(ray_dataset_split_train.take_all()) > 0
# Check that metadata fetch is included in stats;
# the underlying implementation uses the `FromArrow` operator.
assert "FromArrow" in ray_dataset_split_train.stats()
assert ray_dataset_split_train._plan._logical_plan.dag.name == "FromArrow"
assert ray_dataset_split_train.count() == hf_dataset_split["train"].num_rows
_check_usage_record(["FromArrow"])
def test_from_torch_e2e(ray_start_regular_shared_2_cpus, tmp_path):
import torchvision
torch_dataset = torchvision.datasets.FashionMNIST(tmp_path, download=True)
ray_dataset = ray.data.from_torch(torch_dataset)
expected_data = list(torch_dataset)
actual_data = list(ray_dataset.take_all())
assert extract_values("item", actual_data) == expected_data
# Check that metadata fetch is included in stats.
assert "ReadTorch" in ray_dataset.stats()
# Underlying implementation uses `FromItems` operator
assert ray_dataset._plan._logical_plan.dag.name == "ReadTorch"
_check_usage_record(["ReadTorch"])
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_execution_optimizer_integrations.py",
"license": "Apache License 2.0",
"lines": 183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_execution_optimizer_limit_pushdown.py | import sys
from typing import Any, Dict, List
import pandas as pd
import pytest
import ray
from ray.data import Dataset
from ray.data._internal.logical.interfaces import Plan
from ray.data._internal.util import rows_same
from ray.data.block import BlockMetadata
from ray.data.datasource import Datasource
from ray.data.datasource.datasource import ReadTask
from ray.data.tests.conftest import * # noqa
from ray.tests.conftest import * # noqa
def _check_valid_plan_and_result(
ds: Dataset,
expected_plan: Plan,
expected_result: List[Dict[str, Any]],
expected_physical_plan_ops=None,
check_ordering=True,
):
actual_result = ds.take_all()
if check_ordering:
assert actual_result == expected_result
else:
assert rows_same(pd.DataFrame(actual_result), pd.DataFrame(expected_result))
assert ds._plan._logical_plan.dag.dag_str == expected_plan
expected_physical_plan_ops = expected_physical_plan_ops or []
for op in expected_physical_plan_ops:
assert op in ds.stats(), f"Operator {op} not found: {ds.stats()}"
def test_limit_pushdown_basic_limit_fusion(ray_start_regular_shared_2_cpus):
"""Test basic Limit -> Limit fusion."""
# Use override_num_blocks=1 for deterministic row ordering.
ds = ray.data.range(100, override_num_blocks=1).limit(5).limit(100)
_check_valid_plan_and_result(
ds,
"Read[ReadRange] -> Limit[limit=5]",
[{"id": i} for i in range(5)],
check_ordering=False,
)
def test_limit_pushdown_limit_fusion_reversed(ray_start_regular_shared_2_cpus):
"""Test Limit fusion with reversed order."""
# Use override_num_blocks=1 for deterministic row ordering.
ds = ray.data.range(100, override_num_blocks=1).limit(100).limit(5)
_check_valid_plan_and_result(
ds,
"Read[ReadRange] -> Limit[limit=5]",
[{"id": i} for i in range(5)],
check_ordering=False,
)
def test_limit_pushdown_multiple_limit_fusion(ray_start_regular_shared_2_cpus):
"""Test multiple Limit operations fusion."""
# Use override_num_blocks=1 for deterministic row ordering.
ds = (
ray.data.range(100, override_num_blocks=1)
.limit(50)
.limit(80)
.limit(5)
.limit(20)
)
_check_valid_plan_and_result(
ds,
"Read[ReadRange] -> Limit[limit=5]",
[{"id": i} for i in range(5)],
check_ordering=False,
)
def test_limit_pushdown_through_maprows(ray_start_regular_shared_2_cpus):
"""Test that Limit pushes through MapRows operations."""
def f1(x):
return x
ds = ray.data.range(100, override_num_blocks=100).map(f1).limit(1)
_check_valid_plan_and_result(
ds,
"Read[ReadRange] -> Limit[limit=1] -> MapRows[Map(f1)]",
[{"id": 0}],
check_ordering=False,
)
def test_limit_pushdown_through_mapbatches(ray_start_regular_shared_2_cpus):
"""Test that Limit pushes through MapBatches operations."""
def f2(x):
return x
ds = (
ray.data.range(100, override_num_blocks=100)
.map_batches(f2, udf_modifying_row_count=False)
.limit(1)
)
_check_valid_plan_and_result(
ds,
"Read[ReadRange] -> Limit[limit=1] -> MapBatches[MapBatches(f2)]",
[{"id": 0}],
check_ordering=False,
)
def test_limit_pushdown_stops_at_filter(ray_start_regular_shared_2_cpus):
"""Test that Limit does NOT push through Filter operations (conservative)."""
ds = (
ray.data.range(100, override_num_blocks=100)
.filter(lambda x: x["id"] < 50)
.limit(1)
)
_check_valid_plan_and_result(
ds,
"Read[ReadRange] -> Filter[Filter(<lambda>)] -> Limit[limit=1]",
[{"id": 0}],
check_ordering=False,
)
def test_limit_pushdown_through_project(ray_start_regular_shared_2_cpus):
"""Test that Limit pushes through Project operations."""
ds = ray.data.range(100, override_num_blocks=100).select_columns(["id"]).limit(5)
_check_valid_plan_and_result(
ds,
"Read[ReadRange] -> Limit[limit=5] -> Project[Project]",
[{"id": i} for i in range(5)],
check_ordering=False,
)
def test_limit_pushdown_stops_at_sort(ray_start_regular_shared_2_cpus):
"""Test that Limit stops at Sort operations (AllToAll)."""
ds = ray.data.range(100).sort("id").limit(5)
_check_valid_plan_and_result(
ds,
"Read[ReadRange] -> Sort[Sort] -> Limit[limit=5]",
[{"id": i} for i in range(5)],
)
def test_limit_pushdown_complex_interweaved_operations(ray_start_regular_shared_2_cpus):
"""Test Limit pushdown with complex interweaved operations."""
def f1(x):
return x
def f2(x):
return x
ds = ray.data.range(100).sort("id").map(f1).limit(20).sort("id").map(f2).limit(5)
_check_valid_plan_and_result(
ds,
"Read[ReadRange] -> Sort[Sort] -> Limit[limit=20] -> MapRows[Map(f1)] -> "
"Sort[Sort] -> Limit[limit=5] -> MapRows[Map(f2)]",
[{"id": i} for i in range(5)],
)
def test_limit_pushdown_between_two_map_operators(ray_start_regular_shared_2_cpus):
"""Test Limit pushdown between two Map operators."""
def f1(x):
return x
def f2(x):
return x
ds = ray.data.range(100, override_num_blocks=100).map(f1).limit(1).map(f2)
_check_valid_plan_and_result(
ds,
"Read[ReadRange] -> Limit[limit=1] -> MapRows[Map(f1)] -> MapRows[Map(f2)]",
[{"id": 0}],
check_ordering=False,
)
def test_limit_pushdown_correctness(ray_start_regular_shared_2_cpus):
"""Test that limit pushdown produces correct results in various scenarios."""
# Test 1: Simple project + limit
ds = ray.data.range(100).select_columns(["id"]).limit(10)
result = ds.take_all()
expected = [{"id": i} for i in range(10)]
assert result == expected
# Test 2: Multiple operations + limit (with MapRows pushdown)
ds = (
ray.data.range(100)
.map(lambda x: {"id": x["id"], "squared": x["id"] ** 2})
.select_columns(["id"])
.limit(5)
)
result = ds.take_all()
expected = [{"id": i} for i in range(5)]
assert result == expected
# Test 3: MapRows operations should get limit pushed (safe)
ds = ray.data.range(100).map(lambda x: {"id": x["id"] * 2}).limit(5)
result = ds.take_all()
expected = [{"id": i * 2} for i in range(5)]
assert result == expected
# Test 4: MapBatches operations should not get limit pushed
ds = ray.data.range(100).map_batches(lambda batch: {"id": batch["id"] * 2}).limit(5)
result = ds.take_all()
expected = [{"id": i * 2} for i in range(5)]
assert result == expected
# Test 5: Filter operations should not get limit pushed (conservative)
ds = ray.data.range(100).filter(lambda x: x["id"] % 2 == 0).limit(3)
result = ds.take_all()
expected = [{"id": i} for i in [0, 2, 4]]
assert result == expected
# Test 6: Complex chain with both safe operations (should all get limit pushed)
ds = (
ray.data.range(100)
.select_columns(["id"]) # Project - could be safe if it was the immediate input
.map(lambda x: {"id": x["id"] + 1}) # MapRows - NOT safe, stops pushdown
.limit(3)
)
result = ds.take_all()
expected = [{"id": i + 1} for i in range(3)]
assert result == expected
# The plan should show all operations after the limit
plan_str = ds._plan._logical_plan.dag.dag_str
assert (
"Read[ReadRange] -> Limit[limit=3] -> Project[Project] -> MapRows[Map(<lambda>)]"
== plan_str
)
def test_limit_pushdown_scan_efficiency(ray_start_regular_shared_2_cpus):
"""Test that limit pushdown scans fewer rows from the data source."""
@ray.remote
class Counter:
def __init__(self):
self.value = 0
def increment(self, amount=1):
self.value += amount
return self.value
def get(self):
return self.value
def reset(self):
self.value = 0
# Create a custom datasource that tracks how many rows it produces
class CountingDatasource(Datasource):
def __init__(self):
self.counter = Counter.remote()
def prepare_read(self, parallelism, n_per_block=10):
def read_fn(block_idx):
# Each block produces n_per_block rows
ray.get(self.counter.increment.remote(n_per_block))
return [
pd.DataFrame(
{
"id": range(
block_idx * n_per_block, (block_idx + 1) * n_per_block
)
}
)
]
return [
ReadTask(
lambda i=i: read_fn(i),
BlockMetadata(
num_rows=n_per_block,
size_bytes=n_per_block * 8, # rough estimate
input_files=None,
exec_stats=None,
),
)
for i in range(parallelism)
]
def get_rows_produced(self):
return ray.get(self.counter.get.remote())
# Test 1: Project + Limit should scan fewer rows due to pushdown
source = CountingDatasource()
ds = ray.data.read_datasource(source, override_num_blocks=20, n_per_block=10)
ds = ds.select_columns(["id"]).limit(5)
result = ds.take_all()
# Should get correct results
assert len(result) == 5
assert result == [{"id": i} for i in range(5)]
# Should have scanned significantly fewer than all 200 rows (20 blocks * 10 rows)
# Due to pushdown, we should scan much less
rows_produced_1 = source.get_rows_produced()
assert rows_produced_1 < 200 # Should be much less than total
# Test 2: MapRows + Limit should also scan fewer rows due to pushdown
source2 = CountingDatasource()
ds2 = ray.data.read_datasource(source2, override_num_blocks=20, n_per_block=10)
ds2 = ds2.map(lambda x: x).limit(5)
result2 = ds2.take_all()
# Should get correct results
assert len(result2) == 5
assert result2 == [{"id": i} for i in range(5)]
# Should also scan fewer than total due to pushdown
rows_produced_2 = source2.get_rows_produced()
assert rows_produced_2 < 200
# Both should be efficient with pushdown
assert rows_produced_1 < 100 # Should be much less than total
assert rows_produced_2 < 100 # Should be much less than total
# Test 3: Filter + Limit should scan fewer due to early termination, but not pushdown
source3 = CountingDatasource()
ds3 = ray.data.read_datasource(source3, override_num_blocks=20, n_per_block=10)
ds3 = ds3.filter(lambda x: x["id"] % 2 == 0).limit(3)
result3 = ds3.take_all()
# Should get correct results
assert len(result3) == 3
assert result3 == [{"id": i} for i in [0, 2, 4]]
# Should still scan fewer than total due to early termination
rows_produced_3 = source3.get_rows_produced()
assert rows_produced_3 < 200
def test_limit_pushdown_union(ray_start_regular_shared_2_cpus):
"""Test limit pushdown behavior with Union operations."""
# Create two datasets and union with limit
ds1 = ray.data.range(100, override_num_blocks=10)
ds2 = ray.data.range(200, override_num_blocks=10)
ds = ds1.union(ds2).limit(5)
expected_plan = "Read[ReadRange] -> Limit[limit=5], Read[ReadRange] -> Limit[limit=5] -> Union[Union] -> Limit[limit=5]"
_check_valid_plan_and_result(
ds, expected_plan, [{"id": i} for i in range(5)], check_ordering=False
)
def test_limit_pushdown_union_with_maprows(ray_start_regular_shared_2_cpus):
"""Limit after Union + MapRows: limit should be pushed before the MapRows
and inside each Union branch."""
ds1 = ray.data.range(100, override_num_blocks=10)
ds2 = ray.data.range(200, override_num_blocks=10)
ds = ds1.union(ds2).map(lambda x: x).limit(5)
expected_plan = (
"Read[ReadRange] -> Limit[limit=5], "
"Read[ReadRange] -> Limit[limit=5] -> Union[Union] -> "
"Limit[limit=5] -> MapRows[Map(<lambda>)]"
)
_check_valid_plan_and_result(
ds, expected_plan, [{"id": i} for i in range(5)], check_ordering=False
)
def test_limit_pushdown_union_with_sort(ray_start_regular_shared_2_cpus):
"""Limit after Union + Sort: limit must NOT push through the Sort."""
ds1 = ray.data.range(100, override_num_blocks=4)
ds2 = ray.data.range(50, override_num_blocks=4).map(
lambda x: {"id": x["id"] + 1000}
)
ds = ds1.union(ds2).sort("id").limit(5)
expected_plan = (
"Read[ReadRange], "
"Read[ReadRange] -> MapRows[Map(<lambda>)] -> "
"Union[Union] -> Sort[Sort] -> Limit[limit=5]"
)
_check_valid_plan_and_result(ds, expected_plan, [{"id": i} for i in range(5)])
def test_limit_pushdown_multiple_unions(ray_start_regular_shared_2_cpus):
"""Outer limit over nested unions should create a branch-local limit
for every leaf plus the global one."""
ds = (
ray.data.range(100)
.union(ray.data.range(100, override_num_blocks=5))
.union(ray.data.range(50))
.limit(5)
)
expected_plan = (
"Read[ReadRange] -> Limit[limit=5], "
"Read[ReadRange] -> Limit[limit=5] -> Union[Union] -> Limit[limit=5], "
"Read[ReadRange] -> Limit[limit=5] -> Union[Union] -> Limit[limit=5]"
)
_check_valid_plan_and_result(
ds, expected_plan, [{"id": i} for i in range(5)], check_ordering=False
)
def test_limit_pushdown_union_with_groupby(ray_start_regular_shared_2_cpus):
"""Limit after Union + Aggregate: limit should stay after Aggregate."""
ds1 = ray.data.range(100)
ds2 = ray.data.range(100).map(lambda x: {"id": x["id"] + 1000})
ds = ds1.union(ds2).groupby("id").count().limit(5)
# Result should contain 5 distinct ids with count == 1.
res = ds.take_all()
# Plan suffix check (no branch limits past Aggregate).
assert ds._plan._logical_plan.dag.dag_str.endswith(
"Union[Union] -> Aggregate[Aggregate] -> Limit[limit=5]"
)
assert len(res) == 5 and all(r["count()"] == 1 for r in res)
def test_limit_pushdown_complex_chain(ray_start_regular_shared_2_cpus):
"""
Complex end-to-end case:
1. Two branches each with a branch-local Limit pushed to Read.
β’ left : Project
β’ right : MapRows
2. Union of the two branches.
3. Global Aggregate (groupby/count).
4. Sort (descending id) β pushes stop here.
5. Final Limit.
Verifies both plan rewrite and result correctness.
"""
# ββ left branch ββββββββββββββββββββββββββββββββββββββββββββββββ
left = ray.data.range(50).select_columns(["id"]).limit(10)
# ββ right branch βββββββββββββββββββββββββββββββββββββββββββββββ
right = ray.data.range(50).map(lambda x: {"id": x["id"] + 1000}).limit(10)
# ββ union β aggregate β sort β limit ββββββββββββββββββββββββββ
ds = left.union(right).groupby("id").count().sort("id", descending=True).limit(3)
# Expected logical-plan string.
expected_plan = (
"Read[ReadRange] -> Limit[limit=10] -> Project[Project], "
"Read[ReadRange] -> Limit[limit=10] -> MapRows[Map(<lambda>)] "
"-> Union[Union] -> Aggregate[Aggregate] -> Sort[Sort] -> Limit[limit=3]"
)
# Top-3 ids are the three largest (1009, 1008, 1007) with count()==1.
expected_result = [
{"id": 1009, "count()": 1},
{"id": 1008, "count()": 1},
{"id": 1007, "count()": 1},
]
_check_valid_plan_and_result(ds, expected_plan, expected_result)
def test_limit_pushdown_union_maps_projects(ray_start_regular_shared_2_cpus):
r"""
Read -> MapBatches -> MapRows -> Project
\ /
-------- Union ------------- β Limit
The limit should be pushed in front of each branch
(past MapRows, Project) while the original
global Limit is preserved after the Union.
"""
# Left branch.
left = (
ray.data.range(30)
.map_batches(lambda b: b, udf_modifying_row_count=False)
.map(lambda r: {"id": r["id"]})
.select_columns(["id"])
)
# Right branch with shifted ids.
right = (
ray.data.range(30)
.map_batches(lambda b: b, udf_modifying_row_count=False)
.map(lambda r: {"id": r["id"] + 100})
.select_columns(["id"])
)
ds = left.union(right).limit(3)
expected_plan = (
"Read[ReadRange] -> "
"Limit[limit=3] -> MapBatches[MapBatches(<lambda>)] -> MapRows[Map(<lambda>)] -> "
"Project[Project], "
"Read[ReadRange] -> "
"Limit[limit=3] -> MapBatches[MapBatches(<lambda>)] -> MapRows[Map(<lambda>)] -> "
"Project[Project] -> Union[Union] -> Limit[limit=3]"
)
expected_result = [{"id": i} for i in range(3)] # First 3 rows from left branch.
_check_valid_plan_and_result(
ds, expected_plan, expected_result, check_ordering=False
)
def test_limit_pushdown_map_per_block_limit_applied(ray_start_regular_shared_2_cpus):
"""Test that per-block limits are actually applied during map execution."""
# Create a global counter using Ray
@ray.remote
class Counter:
def __init__(self):
self.value = 0
def increment(self):
self.value += 1
return self.value
def get(self):
return self.value
counter = Counter.remote()
def track_processing(row):
# Record that this row was processed
ray.get(counter.increment.remote())
return row
# Create dataset with limit pushed through map
ds = ray.data.range(1000, override_num_blocks=10).map(track_processing).limit(50)
# Execute and get results
result = ds.take_all()
# Verify correct results
expected = [{"id": i} for i in range(50)]
assert result == expected
# Check how many rows were actually processed
processed_count = ray.get(counter.get.remote())
# With per-block limits, we should process fewer rows than the total dataset
# but at least the number we need for the final result
assert (
processed_count >= 50
), f"Expected at least 50 rows processed, got {processed_count}"
assert (
processed_count < 1000
), f"Expected fewer than 1000 rows processed, got {processed_count}"
print(f"Processed {processed_count} rows to get {len(result)} results")
def test_limit_pushdown_preserves_map_behavior(ray_start_regular_shared_2_cpus):
"""Test that adding per-block limits doesn't change the logical result."""
def add_one(row):
row["id"] += 1
return row
# Compare with and without limit pushdown
ds_with_limit = ray.data.range(100).map(add_one).limit(10)
ds_without_limit = ray.data.range(100).limit(10).map(add_one)
result_with = ds_with_limit.take_all()
result_without = ds_without_limit.take_all()
# Results should be identical
assert result_with == result_without
# Both should have the expected transformation applied
expected = [{"id": i + 1} for i in range(10)]
assert result_with == expected
@pytest.mark.parametrize(
"udf_modifying_row_count,expected_plan",
[
(
False,
"Read[ReadRange] -> Limit[limit=10] -> MapBatches[MapBatches(<lambda>)]",
),
(
True,
"Read[ReadRange] -> MapBatches[MapBatches(<lambda>)] -> Limit[limit=10]",
),
],
)
def test_limit_pushdown_udf_modifying_row_count_with_map_batches(
ray_start_regular_shared_2_cpus,
udf_modifying_row_count,
expected_plan,
):
"""Test that limit pushdown preserves the row count with map batches."""
ds = (
ray.data.range(100)
.map_batches(lambda x: x, udf_modifying_row_count=udf_modifying_row_count)
.limit(10)
)
_check_valid_plan_and_result(
ds,
expected_plan,
[{"id": i} for i in range(10)],
)
def test_does_not_pushdown_limit_past_map_batches_by_default(
ray_start_regular_shared_2_cpus,
):
def duplicate_id(batch):
yield {"data": list(batch["id"]) * 2}
# If the optimizer incorrectly pushes the limit past the map operator, then the
# returned count is 2.
num_rows = ray.data.range(1).map_batches(duplicate_id).limit(1).count()
assert num_rows == 1, num_rows
def test_does_not_pushdown_limit_past_map_groups_by_default(
ray_start_regular_shared_2_cpus,
):
def duplicate_id(batch):
yield {"data": list(batch["id"]) * 2}
# If the optimizer incorrectly pushes the limit past the map operator, then the
# returned count is 2.
num_rows = ray.data.range(1).groupby("id").map_groups(duplicate_id).limit(1).count()
assert num_rows == 1, num_rows
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_execution_optimizer_limit_pushdown.py",
"license": "Apache License 2.0",
"lines": 508,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/_internal/common/dict_utils.py | from typing import Any, Dict, Optional
def maybe_apply_llm_deployment_config_defaults(
defaults: Dict[str, Any],
user_deployment_config: Optional[Dict[str, Any]],
) -> Dict[str, Any]:
"""Apply defaults and merge with user-provided deployment config.
If the user has explicitly set 'num_replicas' in their deployment config,
we remove 'autoscaling_config' from the defaults since Ray Serve
does not allow both to be set simultaneously. Then merges the defaults
with the user config.
Args:
defaults: The default deployment options dictionary.
user_deployment_config: The user-provided deployment configuration.
Returns:
The merged deployment options with conflicts resolved.
"""
if user_deployment_config and "num_replicas" in user_deployment_config:
defaults = defaults.copy()
defaults.pop("autoscaling_config", None)
return deep_merge_dicts(defaults, user_deployment_config or {})
def deep_merge_dicts(base: Dict[str, Any], override: Dict[str, Any]) -> Dict[str, Any]:
"""
Merge two dictionaries hierarchically, creating a new dictionary without modifying inputs.
For each key:
- If the key exists in both dicts and both values are dicts, recursively merge them
- Otherwise, the value from override takes precedence
Args:
base: The base dictionary
override: The dictionary with values that should override the base
Returns:
A new merged dictionary
Example:
>>> base = {"a": 1, "b": {"c": 2, "d": 3}}
>>> override = {"b": {"c": 10}, "e": 5}
>>> result = deep_merge_dicts(base, override)
>>> result
{'a': 1, 'b': {'c': 10, 'd': 3}, 'e': 5}
"""
result = base.copy()
for key, value in override.items():
if key in result and isinstance(result[key], dict) and isinstance(value, dict):
# Recursively merge nested dictionaries
result[key] = deep_merge_dicts(result[key], value)
else:
# Override the value (or add new key)
result[key] = value
return result
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/common/dict_utils.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/data_parallel/test_dp_server.py | import sys
from copy import deepcopy
import pytest
from ray.llm._internal.serve.core.configs.llm_config import LLMConfig
from ray.llm._internal.serve.serving_patterns.data_parallel.dp_server import DPServer
class TestGetDeploymentOptions:
@pytest.mark.parametrize(
"data_parallel_size,num_replica,allowed",
[
(None, 1, True),
(None, 2, True),
(None, 3, True),
(1, 1, True),
(1, 2, True),
(1, 3, True),
(2, 2, False),
(2, 3, False),
(4, 2, False),
(2, None, True),
(None, None, True),
],
)
def test_multi_replica_dp_validation(
self, data_parallel_size, num_replica, allowed
):
"""Test that multi-replica and DP size are mutually exclusive.
Ray.llm's implementation does not yet support multi-replica
deployment along with DP.
"""
engine_kwargs = (
{}
if data_parallel_size is None
else {"data_parallel_size": data_parallel_size}
)
deployment_config = {} if num_replica is None else {"num_replicas": num_replica}
def get_serve_options_with_num_replica():
llm_config = LLMConfig(
model_loading_config=dict(model_id="test_model"),
engine_kwargs=deepcopy(engine_kwargs),
deployment_config=deepcopy(deployment_config),
)
deployment_options = DPServer.get_deployment_options(llm_config)
return deployment_options
if allowed:
serve_options = get_serve_options_with_num_replica()
actual_num_replicas = serve_options.get("num_replicas", 1)
expected_num_replicas = (data_parallel_size or 1) * (num_replica or 1)
assert actual_num_replicas == expected_num_replicas
else:
with pytest.raises(
ValueError,
match="use engine_kwargs.data_parallel_size",
):
get_serve_options_with_num_replica()
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/serve/cpu/deployments/data_parallel/test_dp_server.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/llm/test_builder_llm_server.py | import sys
import pytest
from ray import serve
from ray.llm._internal.serve.constants import DEFAULT_MAX_TARGET_ONGOING_REQUESTS
from ray.llm._internal.serve.core.configs.llm_config import (
LLMConfig,
ModelLoadingConfig,
)
from ray.llm._internal.serve.core.server.builder import (
build_llm_deployment,
)
class TestBuildVllmDeployment:
def test_build_llm_deployment(
self,
llm_config_with_mock_engine,
shutdown_ray_and_serve,
disable_placement_bundles,
):
"""Test `build_llm_deployment` can build a vLLM deployment."""
app = build_llm_deployment(llm_config_with_mock_engine)
assert isinstance(app, serve.Application)
handle = serve.run(app)
assert handle.deployment_name.startswith("LLMServer")
def test_build_llm_deployment_with_name_prefix(
self,
llm_config_with_mock_engine,
shutdown_ray_and_serve,
disable_placement_bundles,
):
"""Test `build_llm_deployment` can build a vLLM deployment with name prefix."""
_name_prefix_for_test = "test_name_prefix"
app = build_llm_deployment(
llm_config_with_mock_engine, name_prefix=_name_prefix_for_test
)
assert isinstance(app, serve.Application)
handle = serve.run(app)
assert handle.deployment_name.startswith(_name_prefix_for_test)
def test_build_llm_deployment_name_prefix_along_with_deployment_config(
self,
llm_config_with_mock_engine,
shutdown_ray_and_serve,
disable_placement_bundles,
):
"""Test `build_llm_deployment` can build a vLLM deployment with name prefix and deployment config."""
config_with_name: LLMConfig = llm_config_with_mock_engine.model_copy(deep=True)
_deployment_name = "deployment_name_from_config"
_name_prefix_for_test = "test_name_prefix"
config_with_name.deployment_config["name"] = _deployment_name
app = build_llm_deployment(config_with_name, name_prefix=_name_prefix_for_test)
assert isinstance(app, serve.Application)
handle = serve.run(app)
assert handle.deployment_name == _name_prefix_for_test + _deployment_name
def test_default_autoscaling_config_included_without_num_replicas(
self, disable_placement_bundles
):
"""Test that default autoscaling_config with target_ongoing_requests is included
when num_replicas is not specified.
"""
llm_config = LLMConfig(
model_loading_config=ModelLoadingConfig(model_id="test-model"),
)
app = build_llm_deployment(llm_config)
deployment = app._bound_deployment
autoscaling_config = deployment._deployment_config.autoscaling_config
assert autoscaling_config is not None
assert (
autoscaling_config.target_ongoing_requests
== DEFAULT_MAX_TARGET_ONGOING_REQUESTS
)
def test_autoscaling_config_removed_from_defaults_when_num_replicas_specified(
self, disable_placement_bundles
):
"""Test that autoscaling_config from defaults is removed when user specifies
num_replicas, since Ray Serve does not allow both.
"""
llm_config = LLMConfig(
model_loading_config=ModelLoadingConfig(model_id="test-model"),
deployment_config={
"num_replicas": 2,
},
)
app = build_llm_deployment(llm_config)
deployment = app._bound_deployment
assert deployment._deployment_config.num_replicas == 2
# autoscaling_config should be None since num_replicas is set
assert deployment._deployment_config.autoscaling_config is None
def test_user_target_ongoing_requests_respected(self, disable_placement_bundles):
"""Test that user-specified target_ongoing_requests is respected and not
overridden by defaults.
"""
user_target = 50
llm_config = LLMConfig(
model_loading_config=ModelLoadingConfig(model_id="test-model"),
deployment_config={
"autoscaling_config": {
"target_ongoing_requests": user_target,
},
},
)
app = build_llm_deployment(llm_config)
deployment = app._bound_deployment
autoscaling_config = deployment._deployment_config.autoscaling_config
assert autoscaling_config is not None
assert autoscaling_config.target_ongoing_requests == user_target
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/serve/cpu/deployments/llm/test_builder_llm_server.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:ci/ray_ci/configs.py | from typing import TypedDict
class PythonVersionInfo(TypedDict):
bin_path: str
BUILD_TYPES = [
"optimized",
"debug",
]
ARCHITECTURE = [
"x86_64",
"aarch64",
]
PYTHON_VERSIONS = {
"3.9": PythonVersionInfo(bin_path="cp39-cp39"),
"3.10": PythonVersionInfo(bin_path="cp310-cp310"),
"3.11": PythonVersionInfo(bin_path="cp311-cp311"),
"3.12": PythonVersionInfo(bin_path="cp312-cp312"),
"3.13": PythonVersionInfo(bin_path="cp313-cp313"),
"3.14": PythonVersionInfo(bin_path="cp314-cp314"),
}
DEFAULT_PYTHON_VERSION = "3.9"
DEFAULT_PYTHON_TAG_VERSION = "3.10"
DEFAULT_BUILD_TYPE = "optimized"
DEFAULT_ARCHITECTURE = "x86_64"
| {
"repo_id": "ray-project/ray",
"file_path": "ci/ray_ci/configs.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/train/_internal/base_worker_group.py | """Abstract base class for WorkerGroup implementations.
This module defines the common base class that both V1 and V2 WorkerGroup
implementations should inherit from to ensure backend compatibility.
"""
import abc
from typing import Callable, List, TypeVar
from ray.types import ObjectRef
from ray.util.annotations import DeveloperAPI
T = TypeVar("T")
@DeveloperAPI
class BaseWorkerGroup(abc.ABC):
"""Abstract base class for WorkerGroup implementations.
This base class defines the minimal set of methods that backend classes
expect from WorkerGroup implementations. Both V1 and V2 WorkerGroup
classes should inherit from this base class to ensure compatibility with
all backend configurations.
The interface focuses on the core operations that backends need:
- Executing functions on workers
- Getting worker count and resource allocation
"""
@abc.abstractmethod
def execute(self, func: Callable[..., T], *args, **kwargs) -> List[T]:
"""Execute a function on all workers synchronously.
Args:
func: The function to execute on each worker.
*args: Positional arguments to pass to the function.
**kwargs: Keyword arguments to pass to the function.
Returns:
A list of results from each worker, in worker rank order.
"""
pass
@abc.abstractmethod
def execute_async(self, func: Callable[..., T], *args, **kwargs) -> List[ObjectRef]:
"""Execute a function on all workers asynchronously.
Args:
func: The function to execute on each worker.
*args: Positional arguments to pass to the function.
**kwargs: Keyword arguments to pass to the function.
Returns:
A list of ObjectRef results from each worker, in worker rank order.
"""
pass
@abc.abstractmethod
def execute_single(
self, worker_index: int, func: Callable[..., T], *args, **kwargs
) -> T:
"""Execute a function on a single worker synchronously.
Args:
worker_index: The index of the worker to execute on.
func: The function to execute.
*args: Positional arguments to pass to the function.
**kwargs: Keyword arguments to pass to the function.
Returns:
The result from the specified worker.
"""
pass
@abc.abstractmethod
def execute_single_async(
self, worker_index: int, func: Callable[..., T], *args, **kwargs
) -> ObjectRef:
"""Execute a function on a single worker asynchronously.
Args:
worker_index: The index of the worker to execute on.
func: The function to execute.
*args: Positional arguments to pass to the function.
**kwargs: Keyword arguments to pass to the function.
Returns:
An ObjectRef to the result from the specified worker.
"""
pass
@abc.abstractmethod
def __len__(self) -> int:
"""Return the number of workers in the group."""
pass
@abc.abstractmethod
def get_resources_per_worker(self) -> dict:
"""Get the resources allocated per worker.
Returns:
A dictionary mapping resource names to quantities per worker.
Common keys include "CPU", "GPU", "memory".
"""
pass
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/_internal/base_worker_group.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/train/tests/test_base_worker_group.py | """Tests for BaseWorkerGroup implementation and usage."""
import pytest
from ray.train._internal.base_worker_group import BaseWorkerGroup
from ray.train._internal.worker_group import WorkerGroup as V1WorkerGroup
from ray.train.v2._internal.execution.worker_group.worker_group import (
WorkerGroup as V2WorkerGroup,
)
def test_interface_abstract_methods():
"""Test that BaseWorkerGroup enforces its abstract methods."""
# Should not be able to instantiate interface directly
with pytest.raises(TypeError):
BaseWorkerGroup()
# Should not be able to create incomplete implementation
class IncompleteWorkerGroup(BaseWorkerGroup):
def execute(self, func, *args, **kwargs):
pass
# Missing other abstract methods
with pytest.raises(TypeError):
IncompleteWorkerGroup()
def test_real_implementations_inherit_interface():
"""Smoke test that real WorkerGroup implementations inherit from interface."""
# Test inheritance
assert issubclass(V1WorkerGroup, BaseWorkerGroup)
assert issubclass(V2WorkerGroup, BaseWorkerGroup)
# Test that all abstract methods are implemented
# If any abstract methods are missing, __abstractmethods__ will be non-empty
assert (
len(V1WorkerGroup.__abstractmethods__) == 0
), f"V1 WorkerGroup missing abstract methods: {V1WorkerGroup.__abstractmethods__}"
assert (
len(V2WorkerGroup.__abstractmethods__) == 0
), f"V2 WorkerGroup missing abstract methods: {V2WorkerGroup.__abstractmethods__}"
if __name__ == "__main__":
pytest.main([__file__])
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/tests/test_base_worker_group.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/train_tests/local_mode/torch_local_mode_launcher.py | """Ray Train release test: local mode launched by torchrun.
Setup:
- 2 x g4dn.12xlarge (4 GPU)
Test owner: xinyuangui2
The test launches a ray cluster with 2 nodes, and launches a torchrun job on each node.
"""
import os
import ray
import subprocess
import logging
from ray.air.util.node import _force_on_node
from pathlib import Path
logger = logging.getLogger(__name__)
@ray.remote
def _write(stream: bytes, path: str):
Path(path).parent.mkdir(parents=True, exist_ok=True)
with open(path, "wb") as f:
f.write(stream)
@ray.remote
def _torch_run_launch(
master_address: str,
node_rank: int,
absolute_path: str,
n_nodes: int,
n_processes_per_node: int,
master_port: int,
):
cmd = [
"torchrun",
f"--nnodes={n_nodes}",
f"--nproc-per-node={n_processes_per_node}",
f"--node_rank={node_rank}",
"--rdzv_backend=c10d",
f"--rdzv_endpoint={master_address}:{master_port}",
"--rdzv_id=local_mode_job",
absolute_path,
]
env = os.environ.copy()
env["RAY_TRAIN_V2_ENABLED"] = "1"
subprocess.check_call(cmd, env=env)
def torch_run_launch_on_nodes():
head_ip = ray.util.get_node_ip_address()
node_id_ips = []
for node in ray.nodes():
if not node["Alive"]:
continue
node_ip = node["NodeManagerAddress"]
if node_ip == head_ip:
continue
node_id = node["NodeID"]
node_id_ips.append((node_id, node_ip))
assert len(node_id_ips) == 2, f"Expected 2 nodes, got {len(node_id_ips)}"
master_address = node_id_ips[0][1]
futures = []
absolute_path = os.path.abspath("torch_local_mode_test.py")
with open(absolute_path, "rb") as f:
stream = f.read()
logger.info(f"Uploading file to all nodes: {absolute_path}")
for i in range(len(node_id_ips)):
futures.append(
_force_on_node(node_id_ips[i][0], _write).remote(stream, absolute_path)
)
ray.get(futures)
logger.info("Uploaded file to all nodes, starting torch run launch")
futures = []
for i in range(len(node_id_ips)):
futures.append(
_force_on_node(node_id_ips[i][0], _torch_run_launch).remote(
master_address, i, absolute_path, len(node_id_ips), 4, 29500
)
)
ray.get(futures)
if __name__ == "__main__":
# https://docs.ray.io/en/latest/ray-core/scheduling/accelerators.html#using-accelerators-in-tasks-and-actors
# we don't want actors to override CUDA_VISIBLE_DEVICES
ray.init(
"auto",
runtime_env={"env_vars": {"RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES": "1"}},
)
torch_run_launch_on_nodes()
| {
"repo_id": "ray-project/ray",
"file_path": "release/train_tests/local_mode/torch_local_mode_launcher.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/train_tests/local_mode/torch_local_mode_test.py | import os
import tempfile
import logging
import torch
from torch.nn import CrossEntropyLoss
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision.models import resnet18
from torchvision.datasets import FashionMNIST
from torchvision.transforms import ToTensor, Normalize, Compose
from filelock import FileLock
import torch.distributed as dist
import ray
from ray.train import (
Checkpoint,
CheckpointConfig,
RunConfig,
ScalingConfig,
get_context,
)
from ray.train.torch import TorchTrainer
logger = logging.getLogger(__name__)
DATA_ROOT = "/tmp/test_data"
def train_func(config):
# Model, Loss, Optimizer
model = resnet18(num_classes=10)
model.conv1 = torch.nn.Conv2d(
1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False
)
lock = FileLock(os.path.join(DATA_ROOT, "fashionmnist.lock"))
# [1] Prepare model.
model = ray.train.torch.prepare_model(model)
# model.to("cuda") # This is done by `prepare_model`
criterion = CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=config["lr"])
# Data
transform = Compose([ToTensor(), Normalize((0.28604,), (0.32025,))])
local_rank = get_context().get_local_rank()
if local_rank == 0:
logger.info(f"Downloading FashionMNIST data to {DATA_ROOT}")
with lock:
_ = FashionMNIST(
root=DATA_ROOT, train=True, download=True, transform=transform
)
dist.barrier()
logger.info(f"Loading FashionMNIST data from {DATA_ROOT}")
train_data = FashionMNIST(
root=DATA_ROOT, train=True, download=False, transform=transform
)
train_loader = DataLoader(train_data, batch_size=config["batch_size"], shuffle=True)
# [2] Prepare dataloader.
train_loader = ray.train.torch.prepare_data_loader(train_loader)
# Training
epoch_losses = []
for epoch in range(config["num_epochs"]):
if ray.train.get_context().get_world_size() > 1:
train_loader.sampler.set_epoch(epoch)
epoch_loss = 0.0
num_batches = 0
for images, labels in train_loader:
# This is done by `prepare_data_loader`!
# images, labels = images.to("cuda"), labels.to("cuda")
outputs = model(images)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss += loss.item()
num_batches += 1
# Calculate average loss for the epoch
avg_epoch_loss = epoch_loss / num_batches if num_batches > 0 else float("inf")
epoch_losses.append(avg_epoch_loss)
# [3] Report metrics and checkpoint.
metrics = {
"loss": avg_epoch_loss,
"epoch": epoch,
"epoch_losses": epoch_losses.copy(), # Track all losses for validation
}
with tempfile.TemporaryDirectory() as temp_checkpoint_dir:
torch.save(
model.state_dict(), os.path.join(temp_checkpoint_dir, "model.pt")
)
ray.train.report(
metrics,
checkpoint=Checkpoint.from_directory(temp_checkpoint_dir),
)
if ray.train.get_context().get_world_rank() == 0:
logger.info(f"metrics: {metrics}")
def fit_func():
# Define configurations.
train_loop_config = {"num_epochs": 20, "lr": 0.01, "batch_size": 32}
scaling_config = ScalingConfig(num_workers=0, use_gpu=True)
run_config = RunConfig(checkpoint_config=CheckpointConfig(num_to_keep=1))
# Initialize the Trainer.
trainer = TorchTrainer(
train_loop_per_worker=train_func,
train_loop_config=train_loop_config,
scaling_config=scaling_config,
run_config=run_config,
)
# Train the model.
result = trainer.fit()
# Inspect the results and validate loss makes sense
final_loss = result.metrics["loss"]
epoch_losses = result.metrics.get("epoch_losses", [])
logger.info(f"final_loss: {final_loss}")
logger.info(f"all epoch losses: {epoch_losses}")
# Validation 1: Check loss is finite and not NaN
assert not torch.isnan(torch.tensor(final_loss)), f"Final loss is NaN: {final_loss}"
assert torch.isfinite(
torch.tensor(final_loss)
), f"Final loss is not finite: {final_loss}"
# Validation 2: Check loss convergence - final loss should be lower than initial loss
if len(epoch_losses) >= 2:
initial_loss = epoch_losses[0]
assert (
final_loss < initial_loss
), f"Loss didn't decrease: initial={initial_loss}, final={final_loss}"
logger.info(
f"Loss successfully decreased from {initial_loss:.4f} to {final_loss:.4f}"
)
# Additional check: loss should show general decreasing trend
# Allow for some fluctuation but overall trend should be downward
mid_point = len(epoch_losses) // 2
early_avg = sum(epoch_losses[:mid_point]) / mid_point
late_avg = sum(epoch_losses[mid_point:]) / (len(epoch_losses) - mid_point)
assert (
late_avg < early_avg
), f"Loss trend not decreasing: early_avg={early_avg:.4f}, late_avg={late_avg:.4f}"
logger.info(
f"Loss trend validation passed: early_avg={early_avg:.4f}, late_avg={late_avg:.4f}"
)
logger.info("All loss validation checks passed!")
return result
if __name__ == "__main__":
fit_func()
| {
"repo_id": "ray-project/ray",
"file_path": "release/train_tests/local_mode/torch_local_mode_test.py",
"license": "Apache License 2.0",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/nightly_tests/multimodal_inference_benchmarks/audio_transcription/daft_main.py | # This file is adapted from https://github.com/Eventual-Inc/Daft/tree/9da265d8f1e5d5814ae871bed3cee1b0757285f5/benchmarking/ai/audio_transcription
from __future__ import annotations
import io
import time
import uuid
import ray
import numpy as np
import torch
import torchaudio
import torchaudio.transforms as T
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor
import daft
TRANSCRIPTION_MODEL = "openai/whisper-tiny"
NUM_GPUS = 8
NEW_SAMPLING_RATE = 16000
INPUT_PATH = "s3://anonymous@ray-example-data/common_voice_17/parquet/"
OUTPUT_PATH = f"s3://ray-data-write-benchmark/{uuid.uuid4().hex}"
daft.context.set_runner_ray()
@ray.remote
def warmup():
pass
# NOTE: On a fresh Ray cluster, it can take a minute or longer to schedule the first
# task. To ensure benchmarks compare data processing speed and not cluster startup
# overhead, this code launches a several tasks as warmup.
ray.get([warmup.remote() for _ in range(64)])
def resample(audio_bytes):
waveform, sampling_rate = torchaudio.load(io.BytesIO(audio_bytes), format="flac")
waveform = T.Resample(sampling_rate, NEW_SAMPLING_RATE)(waveform).squeeze()
return np.array(waveform)
processor = AutoProcessor.from_pretrained(TRANSCRIPTION_MODEL)
@daft.udf(return_dtype=daft.DataType.tensor(daft.DataType.float32()))
def whisper_preprocess(resampled):
extracted_features = processor(
resampled.to_arrow().to_numpy(zero_copy_only=False).tolist(),
sampling_rate=NEW_SAMPLING_RATE,
device="cpu",
).input_features
return extracted_features
@daft.udf(
return_dtype=daft.DataType.list(daft.DataType.int32()),
batch_size=64,
concurrency=NUM_GPUS,
num_gpus=1,
)
class Transcriber:
def __init__(self) -> None:
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.dtype = torch.float16
self.model = AutoModelForSpeechSeq2Seq.from_pretrained(
TRANSCRIPTION_MODEL,
torch_dtype=self.dtype,
low_cpu_mem_usage=True,
use_safetensors=True,
)
self.model.to(self.device)
def __call__(self, extracted_features):
spectrograms = np.array(extracted_features)
spectrograms = torch.tensor(spectrograms).to(self.device, dtype=self.dtype)
with torch.no_grad():
token_ids = self.model.generate(spectrograms)
return token_ids.cpu().numpy()
@daft.udf(return_dtype=daft.DataType.string())
def decoder(token_ids):
transcription = processor.batch_decode(token_ids, skip_special_tokens=True)
return transcription
start_time = time.time()
df = daft.read_parquet(INPUT_PATH)
df = df.with_column(
"resampled",
df["audio"]["bytes"].apply(
resample, return_dtype=daft.DataType.list(daft.DataType.float32())
),
)
df = df.with_column("extracted_features", whisper_preprocess(df["resampled"]))
df = df.with_column("token_ids", Transcriber(df["extracted_features"]))
df = df.with_column("transcription", decoder(df["token_ids"]))
df = df.with_column("transcription_length", df["transcription"].str.length())
df = df.exclude("token_ids", "extracted_features", "resampled")
df.write_parquet(OUTPUT_PATH)
print("Runtime:", time.time() - start_time)
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/multimodal_inference_benchmarks/audio_transcription/daft_main.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/nightly_tests/multimodal_inference_benchmarks/audio_transcription/ray_data_main.py | from __future__ import annotations
import io
import time
import uuid
import numpy as np
import ray
import torch
import torchaudio
import torchaudio.transforms as T
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor
TRANSCRIPTION_MODEL = "openai/whisper-tiny"
NUM_GPUS = 8
SAMPLING_RATE = 16000
INPUT_PATH = "s3://anonymous@ray-example-data/common_voice_17/parquet/"
OUTPUT_PATH = f"s3://ray-data-write-benchmark/{uuid.uuid4().hex}"
BATCH_SIZE = 64
ray.init()
@ray.remote
def warmup():
pass
# NOTE: On a fresh Ray cluster, it can take a minute or longer to schedule the first
# task. To ensure benchmarks compare data processing speed and not cluster startup
# overhead, this code launches a several tasks as warmup.
ray.get([warmup.remote() for _ in range(64)])
def resample(item):
# NOTE: Remove the `audio` column since we don't need it anymore. This is done by
# the system automatically on Ray Data 2.51+ with the `with_column` API.
audio = item.pop("audio")
audio_bytes = audio["bytes"]
waveform, sampling_rate = torchaudio.load(io.BytesIO(audio_bytes), format="flac")
waveform = T.Resample(sampling_rate, SAMPLING_RATE)(waveform).squeeze()
item["arr"] = np.array(waveform)
return item
processor = AutoProcessor.from_pretrained(TRANSCRIPTION_MODEL)
def whisper_preprocess(batch):
array = batch.pop("arr")
extracted_features = processor(
array.tolist(),
sampling_rate=SAMPLING_RATE,
return_tensors="np",
device="cpu",
).input_features
batch["input_features"] = list(extracted_features)
return batch
class Transcriber:
def __init__(self):
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.dtype = torch.float16
self.model_id = TRANSCRIPTION_MODEL
self.model = AutoModelForSpeechSeq2Seq.from_pretrained(
self.model_id,
torch_dtype=self.dtype,
low_cpu_mem_usage=True,
use_safetensors=True,
)
self.model.to(self.device)
def __call__(self, batch):
input_features = batch.pop("input_features")
spectrograms = np.array(input_features)
spectrograms = torch.tensor(spectrograms).to(self.device, dtype=self.dtype)
with torch.no_grad():
token_ids = self.model.generate(spectrograms)
batch["token_ids"] = token_ids.cpu().numpy()
return batch
def decoder(batch):
# NOTE: Remove the `token_ids` column since we don't need it anymore. This is done by
# the system automatically on Ray Data 2.51+ with the `with_column` API.
token_ids = batch.pop("token_ids")
transcription = processor.batch_decode(token_ids, skip_special_tokens=True)
batch["transcription"] = transcription
batch["transcription_length"] = np.array([len(t) for t in transcription])
return batch
start_time = time.time()
ds = ray.data.read_parquet(INPUT_PATH)
ds = ds.repartition(target_num_rows_per_block=BATCH_SIZE)
ds = ds.map(resample)
ds = ds.map_batches(whisper_preprocess, batch_size=BATCH_SIZE)
ds = ds.map_batches(
Transcriber,
batch_size=BATCH_SIZE,
concurrency=NUM_GPUS,
num_gpus=1,
)
ds = ds.map_batches(decoder)
ds.write_parquet(OUTPUT_PATH)
print("Runtime:", time.time() - start_time)
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/multimodal_inference_benchmarks/audio_transcription/ray_data_main.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/nightly_tests/multimodal_inference_benchmarks/document_embedding/daft_main.py | # This file is adapted from https://github.com/Eventual-Inc/Daft/tree/9da265d8f1e5d5814ae871bed3cee1b0757285f5/benchmarking/ai/document_embedding
from __future__ import annotations
import time
import uuid
import pymupdf
import torch
from langchain.text_splitter import RecursiveCharacterTextSplitter
import daft
from daft import col
import ray
EMBED_MODEL_ID = "sentence-transformers/all-MiniLM-L6-v2"
EMBEDDING_DIM = 384
NUM_GPU_NODES = 8
INPUT_PATH = "s3://ray-example-data/digitalcorpora/metadata/**/"
OUTPUT_PATH = f"s3://ray-data-write-benchmark/{uuid.uuid4().hex}"
MAX_PDF_PAGES = 100
CHUNK_SIZE = 2048
CHUNK_OVERLAP = 200
EMBEDDING_BATCH_SIZE = 10
daft.context.set_runner_ray()
@ray.remote
def warmup():
pass
# NOTE: On a fresh Ray cluster, it can take a minute or longer to schedule the first
# task. To ensure benchmarks compare data processing speed and not cluster startup
# overhead, this code launches a several tasks as warmup.
ray.get([warmup.remote() for _ in range(64)])
def extract_text_from_parsed_pdf(pdf_bytes):
try:
doc = pymupdf.Document(stream=pdf_bytes, filetype="pdf")
if len(doc) > MAX_PDF_PAGES:
print(f"Skipping PDF because it has {len(doc)} pages")
return None
page_texts = [
{"text": page.get_text(), "page_number": page.number} for page in doc
]
return page_texts
except Exception as e:
print(f"Error extracting text from PDF {e}")
return None
def chunk(text):
splitter = RecursiveCharacterTextSplitter(
chunk_size=CHUNK_SIZE, chunk_overlap=CHUNK_OVERLAP
)
chunk_iter = splitter.split_text(text)
chunks = []
for chunk_index, text in enumerate(chunk_iter):
chunks.append(
{
"text": text,
"chunk_id": chunk_index,
}
)
return chunks
@daft.udf(
return_dtype=daft.DataType.fixed_size_list(daft.DataType.float32(), EMBEDDING_DIM),
concurrency=NUM_GPU_NODES,
num_gpus=1.0,
batch_size=EMBEDDING_BATCH_SIZE,
)
class Embedder:
def __init__(self):
from sentence_transformers import SentenceTransformer
device = "cuda" if torch.cuda.is_available() else "cpu"
self.model = SentenceTransformer(EMBED_MODEL_ID, device=device)
self.model.compile()
def __call__(self, text_col):
if len(text_col) == 0:
return []
embeddings = self.model.encode(
text_col.to_pylist(),
convert_to_tensor=True,
# torch_dtype=torch.bfloat16,
)
return embeddings.cpu().numpy()
start_time = time.time()
df = daft.read_parquet(INPUT_PATH)
df = df.where(daft.col("file_name").str.endswith(".pdf"))
df = df.with_column("pdf_bytes", df["uploaded_pdf_path"].url.download())
pages_struct_type = daft.DataType.struct(
fields={"text": daft.DataType.string(), "page_number": daft.DataType.int32()}
)
df = df.with_column(
"pages",
df["pdf_bytes"].apply(
extract_text_from_parsed_pdf,
return_dtype=daft.DataType.list(pages_struct_type),
),
)
df = df.explode("pages")
df = df.with_columns(
{"page_text": col("pages")["text"], "page_number": col("pages")["page_number"]}
)
df = df.where(daft.col("page_text").not_null())
chunks_struct_type = daft.DataType.struct(
fields={"text": daft.DataType.string(), "chunk_id": daft.DataType.int32()}
)
df = df.with_column(
"chunks",
df["page_text"].apply(chunk, return_dtype=daft.DataType.list(chunks_struct_type)),
)
df = df.explode("chunks")
df = df.with_columns(
{"chunk": col("chunks")["text"], "chunk_id": col("chunks")["chunk_id"]}
)
df = df.where(daft.col("chunk").not_null())
df = df.with_column("embedding", Embedder(df["chunk"]))
df = df.select("uploaded_pdf_path", "page_number", "chunk_id", "chunk", "embedding")
df.write_parquet(OUTPUT_PATH)
print("Runtime:", time.time() - start_time)
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/multimodal_inference_benchmarks/document_embedding/daft_main.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/nightly_tests/multimodal_inference_benchmarks/document_embedding/ray_data_main.py | from __future__ import annotations
import pymupdf
import ray
import ray.data
from ray.data.expressions import download
import torch
from langchain.text_splitter import RecursiveCharacterTextSplitter
from sentence_transformers import SentenceTransformer
import uuid
import time
EMBED_MODEL_ID = "sentence-transformers/all-MiniLM-L6-v2"
EMBEDDING_DIM = 384
NUM_GPU_NODES = 8
INPUT_PATH = "s3://anonymous@ray-example-data/digitalcorpora/metadata/"
OUTPUT_PATH = f"s3://ray-data-write-benchmark/{uuid.uuid4().hex}"
MAX_PDF_PAGES = 100
CHUNK_SIZE = 2048
CHUNK_OVERLAP = 200
EMBEDDING_BATCH_SIZE = 10
ray.init()
@ray.remote
def warmup():
pass
# NOTE: On a fresh Ray cluster, it can take a minute or longer to schedule the first
# task. To ensure benchmarks compare data processing speed and not cluster startup
# overhead, this code launches a several tasks as warmup.
ray.get([warmup.remote() for _ in range(64)])
def extract_text_from_pdf(row):
try:
# NOTE: Remove the `bytes` column since we don't need it anymore. This is done by
# the system automatically on Ray Data 2.51+ with the `with_column` API.
bs = row.pop("bytes")
doc = pymupdf.Document(stream=bs, filetype="pdf")
if len(doc) > MAX_PDF_PAGES:
path = row["uploaded_pdf_path"]
print(f"Skipping PDF {path} because it has {len(doc)} pages")
return
for page in doc:
row["page_text"] = page.get_text()
row["page_number"] = page.number
yield row
except Exception as e:
path = row["uploaded_pdf_path"]
print(f"Error extracting text from PDF {path}: {e}")
return
def chunker(row):
splitter = RecursiveCharacterTextSplitter(
chunk_size=CHUNK_SIZE, chunk_overlap=CHUNK_OVERLAP
)
page_text = row.pop("page_text")
chunk_iter = splitter.split_text(page_text)
for chunk_index, text in enumerate(chunk_iter):
row["chunk"] = text
row["chunk_id"] = chunk_index
yield row
class Embedder:
def __init__(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
self.model = SentenceTransformer(EMBED_MODEL_ID, device=device)
self.model.compile()
def __call__(self, batch):
embedding = self.model.encode(
batch["chunk"],
)
batch["embedding"] = embedding
return batch
start_time = time.time()
(
ray.data.read_parquet(INPUT_PATH)
.filter(lambda row: row["file_name"].endswith(".pdf"))
.with_column("bytes", download("uploaded_pdf_path"))
.flat_map(extract_text_from_pdf)
.flat_map(chunker)
.map_batches(
Embedder,
concurrency=NUM_GPU_NODES,
num_gpus=1.0,
batch_size=EMBEDDING_BATCH_SIZE,
)
.select_columns(
["uploaded_pdf_path", "page_number", "chunk_id", "chunk", "embedding"]
)
.write_parquet(OUTPUT_PATH)
)
print("Runtime:", time.time() - start_time)
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/multimodal_inference_benchmarks/document_embedding/ray_data_main.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/nightly_tests/multimodal_inference_benchmarks/image_classification/daft_main.py | # This file is adapted from https://github.com/Eventual-Inc/Daft/tree/9da265d8f1e5d5814ae871bed3cee1b0757285f5/benchmarking/ai/image_classification
from __future__ import annotations
import time
import uuid
import daft
from daft import col
import numpy as np
import ray
import torch
from torchvision import transforms
from torchvision.models import ResNet18_Weights, resnet18
NUM_GPU_NODES = 8
INPUT_PATH = "s3://anonymous@ray-example-data/imagenet/metadata_file"
OUTPUT_PATH = f"s3://ray-data-write-benchmark/{uuid.uuid4().hex}"
BATCH_SIZE = 100
IMAGE_DIM = (3, 224, 224)
daft.context.set_runner_ray()
@ray.remote
def warmup():
pass
# NOTE: On a fresh Ray cluster, it can take a minute or longer to schedule the first
# task. To ensure benchmarks compare data processing speed and not cluster startup
# overhead, this code launches a several tasks as warmup.
ray.get([warmup.remote() for _ in range(64)])
weights = ResNet18_Weights.DEFAULT
transform = transforms.Compose([transforms.ToTensor(), weights.transforms()])
@daft.udf(
return_dtype=daft.DataType.string(),
concurrency=NUM_GPU_NODES,
num_gpus=1.0,
batch_size=BATCH_SIZE,
)
class ResNetModel:
def __init__(self):
self.weights = weights
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = resnet18(weights=weights).to(self.device)
self.model.eval()
def __call__(self, images):
if len(images) == 0:
return []
torch_batch = torch.from_numpy(np.array(images.to_pylist())).to(self.device)
with torch.inference_mode():
prediction = self.model(torch_batch)
predicted_classes = prediction.argmax(dim=1).detach().cpu()
predicted_labels = [
self.weights.meta["categories"][i] for i in predicted_classes
]
return predicted_labels
start_time = time.time()
df = daft.read_parquet(INPUT_PATH)
# NOTE: Limit to the 803,580 images Daft uses in their benchmark.
df = df.limit(803_580)
# NOTE: We need to manually repartition the DataFrame to achieve good performance. This
# code isn't in Daft's benchmark, possibly because their Parquet metadata is
# pre-partitioned. Note we're using `repartition(NUM_GPUS)` instead of
# `into_partitions(NUM_CPUS * 2)` as suggested in Daft's documentation. In our
# experiments, the recommended approach led to OOMs, crashes, and slower performance.
df = df.repartition(NUM_GPU_NODES)
df = df.with_column(
"decoded_image",
df["image_url"]
.url.download()
.image.decode(on_error="null", mode=daft.ImageMode.RGB),
)
# NOTE: At least one image encounters this error: https://github.com/etemesi254/zune-image/issues/244.
# So, we need to return "null" for errored files and filter them out.
df = df.where(df["decoded_image"].not_null())
df = df.with_column(
"norm_image",
df["decoded_image"].apply(
func=lambda image: transform(image),
return_dtype=daft.DataType.tensor(
dtype=daft.DataType.float32(), shape=IMAGE_DIM
),
),
)
df = df.with_column("label", ResNetModel(col("norm_image")))
df = df.select("image_url", "label")
df.write_parquet(OUTPUT_PATH)
print("Runtime:", time.time() - start_time)
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/multimodal_inference_benchmarks/image_classification/daft_main.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/nightly_tests/multimodal_inference_benchmarks/image_classification/ray_data_main.py | from __future__ import annotations
import io
import time
import torch
from packaging import version
from PIL import Image
from torchvision import transforms
from torchvision.models import ResNet18_Weights, resnet18
from ray.data.expressions import download
import numpy as np
import uuid
import ray
NUM_GPU_NODES = 8
INPUT_PATH = "s3://anonymous@ray-example-data/imagenet/metadata_file.parquet"
OUTPUT_PATH = f"s3://ray-data-write-benchmark/{uuid.uuid4().hex}"
BATCH_SIZE = 100
weights = ResNet18_Weights.DEFAULT
transform = transforms.Compose([transforms.ToTensor(), weights.transforms()])
ray.init()
@ray.remote
def warmup():
pass
# NOTE: On a fresh Ray cluster, it can take a minute or longer to schedule the first
# task. To ensure benchmarks compare data processing speed and not cluster startup
# overhead, this code launches a several tasks as warmup.
ray.get([warmup.remote() for _ in range(64)])
def deserialize_image(row):
image = Image.open(io.BytesIO(row["bytes"])).convert("RGB")
# NOTE: Remove the `bytes` column since we don't need it anymore. This is done by
# the system automatically on Ray Data 2.51+ with the `with_column` API.
del row["bytes"]
row["image"] = np.array(image)
return row
def transform_image(row):
row["norm_image"] = transform(row["image"]).numpy()
# NOTE: Remove the `image` column since we don't need it anymore. This is done by
# the system automatically on Ray Data 2.51+ with the `with_column` API.
del row["image"]
return row
class ResNetActor:
def __init__(self):
self.weights = weights
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = resnet18(weights=self.weights).to(self.device)
self.model.eval()
def __call__(self, batch):
torch_batch = torch.from_numpy(batch["norm_image"]).to(self.device)
# NOTE: Remove the `norm_image` column since we don't need it anymore. This is
# done by the system automatically on Ray Data 2.51+ with the `with_column`
# API.
del batch["norm_image"]
with torch.inference_mode():
prediction = self.model(torch_batch)
predicted_classes = prediction.argmax(dim=1).detach().cpu()
predicted_labels = [
self.weights.meta["categories"][i] for i in predicted_classes
]
batch["label"] = predicted_labels
return batch
start_time = time.time()
# You can use `download` on Ray 2.50+.
if version.parse(ray.__version__) > version.parse("2.49.2"):
ds = (
ray.data.read_parquet(INPUT_PATH)
# NOTE: Limit to the 803,580 images Daft uses in their benchmark.
.limit(803_580)
.with_column("bytes", download("image_url"))
.map(fn=deserialize_image)
.map(fn=transform_image)
.map_batches(
fn=ResNetActor,
batch_size=BATCH_SIZE,
num_gpus=1.0,
concurrency=NUM_GPU_NODES,
)
.select_columns(["image_url", "label"])
)
ds.write_parquet(OUTPUT_PATH)
else:
# NOTE: Limit to the 803,580 images Daft uses in their benchmark.
paths = ray.data.read_parquet(INPUT_PATH).limit(803_580).take_all()
paths = [row["image_url"] for row in paths]
ds = (
ray.data.read_images(
paths, include_paths=True, ignore_missing_paths=True, mode="RGB"
)
.map(fn=transform_image)
.map_batches(
fn=ResNetActor,
batch_size=BATCH_SIZE,
num_gpus=1.0,
concurrency=NUM_GPU_NODES,
)
.select_columns(["path", "label"])
)
ds.write_parquet(OUTPUT_PATH)
print("Runtime:", time.time() - start_time)
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/multimodal_inference_benchmarks/image_classification/ray_data_main.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/nightly_tests/multimodal_inference_benchmarks/large_image_embedding/daft_main.py | import time
import uuid
import numpy as np
from pybase64 import b64decode
import ray
import torch
from transformers import ViTImageProcessor, ViTForImageClassification
from daft import DataType, udf
import daft
BATCH_SIZE = 1024
INPUT_PREFIX = "s3://anonymous@ray-example-data/image-datasets/10TiB-b64encoded-images-in-parquet-v3/"
OUTPUT_PREFIX = f"s3://ray-data-write-benchmark/{uuid.uuid4().hex}"
PROCESSOR = ViTImageProcessor(
do_convert_rgb=None,
do_normalize=True,
do_rescale=True,
do_resize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
resample=2,
rescale_factor=0.00392156862745098,
size={"height": 224, "width": 224},
)
daft.context.set_runner_ray()
@ray.remote
def warmup():
pass
# NOTE: On a fresh Ray cluster, it can take a minute or longer to schedule the first
# task. To ensure benchmarks compare data processing speed and not cluster startup
# overhead, this code launches a several tasks as warmup.
ray.get([warmup.remote() for _ in range(64)])
def decode(data: bytes) -> bytes:
decoded_data = b64decode(data, None, True)
return decoded_data
def preprocess(image):
outputs = PROCESSOR(images=image)["pixel_values"]
assert len(outputs) == 1, type(outputs)
return outputs[0]
@udf(
return_dtype=DataType.tensor(DataType.float32()),
batch_size=BATCH_SIZE,
num_gpus=1,
concurrency=40,
)
class Infer:
def __init__(self):
self._device = "cuda" if torch.cuda.is_available() else "cpu"
self._model = ViTForImageClassification.from_pretrained(
"google/vit-base-patch16-224"
).to(self._device)
def __call__(self, image_column) -> np.ndarray:
image_ndarray = np.array(image_column.to_pylist())
with torch.inference_mode():
next_tensor = torch.from_numpy(image_ndarray).to(
dtype=torch.float32, device=self._device, non_blocking=True
)
output = self._model(next_tensor).logits
return output.cpu().detach().numpy()
start_time = time.time()
df = daft.read_parquet(INPUT_PREFIX)
df = df.with_column("image", df["image"].apply(decode, return_dtype=DataType.binary()))
df = df.with_column("image", df["image"].image.decode(mode=daft.ImageMode.RGB))
df = df.with_column("height", df["image"].image_height())
df = df.with_column("width", df["image"].image.width())
df = df.with_column(
"image",
df["image"].apply(preprocess, return_dtype=DataType.tensor(DataType.float32())),
)
df = df.with_column("embeddings", Infer(df["image"]))
df = df.select("embeddings")
df.write_parquet(OUTPUT_PREFIX)
print("Runtime", time.time() - start_time)
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/multimodal_inference_benchmarks/large_image_embedding/daft_main.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/nightly_tests/multimodal_inference_benchmarks/large_image_embedding/ray_data_main.py | from io import BytesIO
import time
from typing import Dict, Any
import uuid
import numpy as np
from PIL import Image
from pybase64 import b64decode
import torch
from transformers import ViTImageProcessor, ViTForImageClassification
import ray
INPUT_PREFIX = "s3://anonymous@ray-example-data/image-datasets/10TiB-b64encoded-images-in-parquet-v3/"
OUTPUT_PREFIX = f"s3://ray-data-write-benchmark/{uuid.uuid4().hex}"
BATCH_SIZE = 1024
PROCESSOR = ViTImageProcessor(
do_convert_rgb=None,
do_normalize=True,
do_rescale=True,
do_resize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
resample=2,
rescale_factor=0.00392156862745098,
size={"height": 224, "width": 224},
)
ray.init()
@ray.remote
def warmup():
pass
# NOTE: On a fresh Ray cluster, it can take a minute or longer to schedule the first
# task. To ensure benchmarks compare data processing speed and not cluster startup
# overhead, this code launches a several tasks as warmup.
ray.get([warmup.remote() for _ in range(64)])
def decode(row: Dict[str, Any]) -> Dict[str, Any]:
image_data = b64decode(row["image"], None, True)
image = Image.open(BytesIO(image_data)).convert("RGB")
width, height = image.size
return {
"original_url": row["url"],
"original_width": width,
"original_height": height,
"image": np.asarray(image),
}
def preprocess(row: Dict[str, Any]) -> Dict[str, Any]:
outputs = PROCESSOR(images=row["image"])["pixel_values"]
assert len(outputs) == 1, len(outputs)
row["image"] = outputs[0]
return row
class Infer:
def __init__(self):
self._device = "cuda" if torch.cuda.is_available() else "cpu"
self._model = ViTForImageClassification.from_pretrained(
"google/vit-base-patch16-224"
).to(self._device)
def __call__(self, batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
with torch.inference_mode():
next_tensor = torch.from_numpy(batch["image"]).to(
dtype=torch.float32, device=self._device, non_blocking=True
)
output = self._model(next_tensor).logits
return {
"original_url": batch["original_url"],
"original_width": batch["original_width"],
"original_height": batch["original_height"],
"output": output.cpu().numpy(),
}
start_time = time.time()
ds = (
ray.data.read_parquet(INPUT_PREFIX)
.map(decode)
.map(preprocess)
.map_batches(
Infer,
batch_size=BATCH_SIZE,
num_gpus=1,
concurrency=40,
)
.write_parquet(OUTPUT_PREFIX)
)
print("Runtime", time.time() - start_time)
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/multimodal_inference_benchmarks/large_image_embedding/ray_data_main.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/nightly_tests/multimodal_inference_benchmarks/video_object_detection/daft_main.py | # This file is adapted from https://github.com/Eventual-Inc/Daft/tree/9da265d8f1e5d5814ae871bed3cee1b0757285f5/benchmarking/ai/video_object_detection
from __future__ import annotations
import torch
import torchvision
from PIL import Image
from ultralytics import YOLO
import uuid
import daft
from daft.expressions import col
NUM_GPU_NODES = 8
YOLO_MODEL = "yolo11n.pt"
INPUT_PATH = "s3://anonymous@ray-example-data/videos/Hollywood2-actions-videos/Hollywood2/AVIClips/"
OUTPUT_PATH = f"s3://ray-data-write-benchmark/{uuid.uuid4().hex}"
IMAGE_HEIGHT = 640
IMAGE_WIDTH = 640
@daft.udf(
return_dtype=daft.DataType.list(
daft.DataType.struct(
{
"label": daft.DataType.string(),
"confidence": daft.DataType.float32(),
"bbox": daft.DataType.list(daft.DataType.int32()),
}
)
),
concurrency=NUM_GPU_NODES,
num_gpus=1.0,
)
class ExtractImageFeatures:
def __init__(self):
self.model = YOLO(YOLO_MODEL)
if torch.cuda.is_available():
self.model.to("cuda")
def to_features(self, res):
return [
{
"label": label,
"confidence": confidence.item(),
"bbox": bbox.tolist(),
}
for label, confidence, bbox in zip(
res.names, res.boxes.conf, res.boxes.xyxy
)
]
def __call__(self, images):
if len(images) == 0:
return []
batch = [
torchvision.transforms.functional.to_tensor(Image.fromarray(image))
for image in images
]
stack = torch.stack(batch, dim=0)
return daft.Series.from_pylist(
[self.to_features(res) for res in self.model(stack)]
)
daft.context.set_runner_ray()
df = daft.read_video_frames(
INPUT_PATH,
image_height=IMAGE_HEIGHT,
image_width=IMAGE_WIDTH,
)
df = df.with_column("features", ExtractImageFeatures(col("data")))
df = df.explode("features")
df = df.with_column(
"object",
daft.col("data").image.crop(daft.col("features")["bbox"]).image.encode("png"),
)
df = df.exclude("data")
df.write_parquet(OUTPUT_PATH)
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/multimodal_inference_benchmarks/video_object_detection/daft_main.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/nightly_tests/multimodal_inference_benchmarks/video_object_detection/ray_data_main.py | from PIL import Image
import ray
from ultralytics import YOLO
import torch
import torchvision
import numpy as np
import io
import uuid
NUM_GPU_NODES = 8
YOLO_MODEL = "yolo11n.pt"
INPUT_PATH = "s3://anonymous@ray-example-data/videos/Hollywood2-actions-videos/Hollywood2/AVIClips/"
OUTPUT_PATH = f"s3://ray-data-write-benchmark/{uuid.uuid4().hex}"
IMAGE_HEIGHT = 640
IMAGE_WIDTH = 640
# This was a change made: Alter batch size accordingly
# batch_size = 32 for 1x large
# batch_size = 100 for 2x, 4x, and 8x large
BATCH_SIZE = 32
ray.init()
class ExtractImageFeatures:
def __init__(self):
self.model = YOLO(YOLO_MODEL)
if torch.cuda.is_available():
self.model.to("cuda")
def to_features(self, res):
return [
{
"label": label,
"confidence": confidence.item(),
"bbox": bbox.tolist(), # TODO: Use numpy
}
for label, confidence, bbox in zip(
res.names, res.boxes.conf, res.boxes.xyxy
)
]
def __call__(self, batch):
frames = batch["frame"]
if len(frames) == 0:
batch["features"] = []
return batch
tensor_batch = [
torchvision.transforms.functional.to_tensor(Image.fromarray(frame))
for frame in frames
]
stack = torch.stack(tensor_batch, dim=0)
results = self.model(stack)
features = [self.to_features(res) for res in results]
batch["features"] = features
return batch
def resize_frame(row):
frame = row["frame"]
pil_image = Image.fromarray(frame)
resized_pil = pil_image.resize((IMAGE_HEIGHT, IMAGE_WIDTH))
resized_frame = np.array(resized_pil)
row["frame"] = resized_frame
return row
def explode_features(row):
features_list = row["features"]
for feature in features_list:
row["features"] = feature
yield row
def crop_image(row):
frame = row["frame"]
bbox = row["features"]["bbox"]
x1, y1, x2, y2 = map(int, bbox)
pil_image = Image.fromarray(frame)
cropped_pil = pil_image.crop((x1, y1, x2, y2))
buf = io.BytesIO()
# This was a change made: Use compress_level=2
cropped_pil.save(buf, format="PNG", compress_level=2)
cropped_pil_png = buf.getvalue()
row["object"] = cropped_pil_png
return row
ds = ray.data.read_videos(INPUT_PATH)
ds = ds.map(resize_frame)
ds = ds.map_batches(
ExtractImageFeatures, batch_size=BATCH_SIZE, num_gpus=1.0, concurrency=NUM_GPU_NODES
)
ds = ds.flat_map(explode_features)
ds = ds.map(crop_image)
ds = ds.drop_columns(["frame"])
ds.write_parquet(OUTPUT_PATH)
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/multimodal_inference_benchmarks/video_object_detection/ray_data_main.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/tests/test_azure_ssh_config.py | """Tests for Azure autoscaler Path object serialization and SSH key handling.
This test verifies that the Azure autoscaler properly:
1. Converts Path objects to strings before storing them in configuration
2. Always removes ssh_public_key from auth config (both user-specified and auto-generated)
to prevent bootstrap config from containing paths that don't exist on head node
3. Always injects public key content into ARM template parameters for VM creation
4. Ensures configuration can be properly serialized to JSON
The ssh_public_key path is removed because bootstrap config gets copied to worker nodes
and must only contain paths that exist on the head node. The public key content is
still used via ARM template parameter injection during VM creation.
The original issue was introduced in PR #54596 which added automatic SSH key
generation but stored Path objects directly in the configuration, causing
serialization errors.
"""
import json
import sys
import pytest
from ray.autoscaler._private._azure.config import _configure_key_pair
@pytest.mark.parametrize(
"test_case,auth_config,expected_public_key_content",
[
(
"user_specified_keys",
{
"ssh_user": "ubuntu",
"ssh_private_key": "private_key_path", # Will be replaced with actual path
"ssh_public_key": "public_key_path", # Will be replaced with actual path
},
"ssh-rsa TEST_KEY user@example.com",
),
(
"auto_generated_keys",
{"ssh_user": "ubuntu"},
None, # Will be auto-generated, so we just check it exists
),
],
)
def test_azure_key_pair_string_conversion(
tmp_path, test_case, auth_config, expected_public_key_content
):
"""Test that Azure key pair configuration converts Path objects to strings.
Tests both user-specified and auto-generated SSH key scenarios.
"""
# Create the key files under pytest's temporary path (needed for user-specified case)
private_key_path = tmp_path / "id_rsa"
public_key_path = tmp_path / "id_rsa.pub"
private_key_path.write_text("")
public_key_path.write_text("ssh-rsa TEST_KEY user@example.com")
# Replace placeholder paths with actual paths for user-specified keys
if (
"ssh_private_key" in auth_config
and auth_config["ssh_private_key"] == "private_key_path"
):
auth_config["ssh_private_key"] = private_key_path
if (
"ssh_public_key" in auth_config
and auth_config["ssh_public_key"] == "public_key_path"
):
auth_config["ssh_public_key"] = public_key_path
# Create test configuration
config = {
"auth": auth_config,
"provider": {"location": "westus2", "resource_group": "test-group"},
"available_node_types": {"ray.head.default": {"node_config": {}}},
}
# Process the config
result_config = _configure_key_pair(config)
# Verify private key path exists and was converted to string
assert "ssh_private_key" in result_config["auth"]
assert isinstance(result_config["auth"]["ssh_private_key"], str)
# Verify ssh_public_key is always removed (both user-specified and auto-generated)
# because bootstrap config must only contain paths that exist on head node
assert "ssh_public_key" not in result_config["auth"]
# Verify public key content was injected into ARM parameters
head_node_config = result_config["available_node_types"]["ray.head.default"][
"node_config"
]
assert "azure_arm_parameters" in head_node_config
assert "publicKey" in head_node_config["azure_arm_parameters"]
actual_public_key = head_node_config["azure_arm_parameters"]["publicKey"]
if expected_public_key_content is not None:
# User-specified case: verify exact content
assert actual_public_key == expected_public_key_content
else:
# Auto-generated case: just verify it exists and looks like an SSH key
assert actual_public_key.strip()
assert actual_public_key.startswith("ssh-rsa")
# Verify config can be serialized to JSON without errors
json_str = json.dumps(result_config)
# If we get here, serialization succeeded
# Now try to deserialize to make sure it's valid JSON
deserialized = json.loads(json_str)
assert isinstance(deserialized, dict)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_azure_ssh_config.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/_internal/batch/benchmark/benchmark_processor.py | #!/usr/bin/env python
"""
Benchmark Ray Data LLM offline batch inference throughput.
Sample usage:
python ray.llm._internal.batch.benchmark.benchmark_processor --mode vllm_engine --batch-size 64 --concurrency 1 --num-prompts 10000 --model facebook/opt-1.3b
--tensor-parallel-size 2 --pipeline-parallel-size 2 --distributed-executor-backend ray
"""
import argparse
import sys
from dataclasses import dataclass
from enum import Enum
from time import perf_counter, sleep
import ray
from .dataset import ShareGPTDataset
from ray import data, serve
from ray.data.llm import (
ChatTemplateStageConfig,
DetokenizeStageConfig,
ServeDeploymentProcessorConfig,
TokenizerStageConfig,
build_processor,
vLLMEngineProcessorConfig,
)
from ray.serve.llm import (
LLMConfig,
ModelLoadingConfig,
build_llm_deployment,
)
from ray.serve.llm.openai_api_models import CompletionRequest
class Mode(Enum):
"""Processor to benchmark."""
VLLM_ENGINE = "vllm_engine"
SHARED_VLLM_ENGINE = "shared_vllm_engine"
SERVE_DEPLOYMENT = "serve_deployment"
SHARED_SERVE_DEPLOYMENT = "shared_serve_deployment"
CLASSIFY = "classify"
# Default sampling parameters -- ensure a fair comparison by omitting sampling-induced variance
VLLM_SAMPLING_PARAMS = {
"top_p": 1.0,
"temperature": 1.0,
"max_tokens": 100,
"ignore_eos": True,
}
# Default vLLM engine kwargs
VLLM_ENGINE_KWARGS = {
"max_num_batched_tokens": 4096,
}
# Default pooling parameters for classification
CLASSIFY_POOLING_PARAMS = {
"truncate_prompt_tokens": -1,
}
def build_vllm_engine_kwargs(**kwargs) -> dict:
"""Build vLLM engine kwargs from command line arguments."""
engine_kwargs = VLLM_ENGINE_KWARGS.copy()
engine_kwargs.update({k: v for k, v in kwargs.items() if v is not None})
return engine_kwargs
def _build_vllm_engine_config(
model: str,
batch_size: int,
concurrency: int,
pipeline_parallel_size: int = None,
tensor_parallel_size: int = None,
distributed_executor_backend: str = None,
task_type: str = None,
max_model_len: int = None,
) -> vLLMEngineProcessorConfig:
"""Helper to create vLLMEngineProcessorConfig."""
engine_kwargs = build_vllm_engine_kwargs(
pipeline_parallel_size=pipeline_parallel_size,
tensor_parallel_size=tensor_parallel_size,
distributed_executor_backend=distributed_executor_backend,
)
if max_model_len is not None:
engine_kwargs["max_model_len"] = max_model_len
config = vLLMEngineProcessorConfig(
model_source=model,
batch_size=batch_size,
concurrency=concurrency,
apply_chat_template=False,
tokenize=False,
detokenize=False,
engine_kwargs=engine_kwargs,
)
if task_type is not None:
config.task_type = task_type
return config
def _build_serve_deployment_config(
batch_size: int,
concurrency: int,
deployment_name: str = None,
app_name: str = None,
) -> ServeDeploymentProcessorConfig:
"""Helper to create ServeDeploymentProcessorConfig."""
return ServeDeploymentProcessorConfig(
deployment_name=deployment_name,
app_name=app_name,
dtype_mapping={
"CompletionRequest": CompletionRequest,
},
batch_size=batch_size,
concurrency=concurrency,
)
@dataclass(slots=True)
class BenchmarkResult:
mode: Mode
batch_size: int
concurrency: int
samples: int
elapsed_s: float
@property
def throughput(self) -> float:
return self.samples / self.elapsed_s if self.elapsed_s else 0.0
def show(self) -> None:
print("\n" + "=" * 60)
print(f"BENCHMARK - {self.mode}")
print("=" * 60)
print(f"Samples : {self.samples}")
print(f"Batch size : {self.batch_size}")
print(f"Concurrency : {self.concurrency}")
print(f"Time (s) : {self.elapsed_s:.2f}")
print(f"Throughput : {self.throughput:.2f} req/s")
print("=" * 60)
def build_single_vllm_engine_processor(
batch_size: int,
concurrency: int,
model: str,
sampling_params: dict = VLLM_SAMPLING_PARAMS,
pipeline_parallel_size: int = None,
tensor_parallel_size: int = None,
distributed_executor_backend: str = None,
):
"""Build vLLM engine processor for single-turn benchmark."""
config = _build_vllm_engine_config(
model,
batch_size,
concurrency,
pipeline_parallel_size,
tensor_parallel_size,
distributed_executor_backend,
)
return build_processor(
config,
preprocess=lambda row: dict(
prompt=row["prompt"],
sampling_params=sampling_params,
),
postprocess=lambda row: row,
)
def build_shared_vllm_engine_processor(
batch_size: int,
concurrency: int,
model: str,
sampling_params: dict = VLLM_SAMPLING_PARAMS,
pipeline_parallel_size: int = None,
tensor_parallel_size: int = None,
distributed_executor_backend: str = None,
):
"""Build vLLM engine processor for multi-turn benchmark."""
config = _build_vllm_engine_config(
model,
batch_size,
concurrency,
pipeline_parallel_size,
tensor_parallel_size,
distributed_executor_backend,
)
processor1 = build_processor(
config,
preprocess=lambda row: dict(
prompt=row["prompt"],
sampling_params=sampling_params,
),
postprocess=lambda row: {
"prompt": row["generated_text"]
if str(row.get("generated_text", "")).strip()
else row["prompt"]
},
)
processor2 = build_processor(
config,
preprocess=lambda row: dict(
prompt=row["prompt"],
sampling_params=sampling_params,
),
postprocess=lambda row: row,
)
def multi_turn_processor(dataset):
return processor2(processor1(dataset))
return multi_turn_processor
def build_classify_processor(
batch_size: int,
concurrency: int,
model: str,
pooling_params: dict = CLASSIFY_POOLING_PARAMS,
max_model_len: int = 512,
distributed_executor_backend: str = None,
):
"""Build vLLM engine processor for classification benchmark."""
engine_kwargs = VLLM_ENGINE_KWARGS.copy()
if distributed_executor_backend is not None:
engine_kwargs["distributed_executor_backend"] = distributed_executor_backend
config = vLLMEngineProcessorConfig(
model_source=model,
task_type="classify",
batch_size=batch_size,
concurrency=concurrency,
chat_template_stage=ChatTemplateStageConfig(enabled=False),
tokenize_stage=TokenizerStageConfig(enabled=True),
detokenize_stage=DetokenizeStageConfig(enabled=False),
engine_kwargs=engine_kwargs,
)
return build_processor(
config,
preprocess=lambda row: dict(
prompt=row["prompt"],
pooling_params=pooling_params,
),
postprocess=lambda row: {
"probs": float(row["embeddings"][0])
if row.get("embeddings") is not None and len(row["embeddings"]) > 0
else None,
},
)
def setup_serve_deployment(model: str, concurrency: int) -> tuple[str, str]:
"""Set up Ray Serve deployment for hosting the LLM model."""
deployment_name = "benchmark_deployment"
app_name = "benchmark_app"
llm_config = LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id=model,
model_source=model,
),
deployment_config=dict(
name=deployment_name,
# To fairly compare with vLLM engine processor, fix the number of replicas to the concurrency level
autoscaling_config=dict(
min_replicas=concurrency,
max_replicas=concurrency,
),
),
engine_kwargs=dict(
enable_prefix_caching=True,
enable_chunked_prefill=True,
max_num_batched_tokens=4096,
),
)
override_serve_options = dict(name=deployment_name)
llm_app = build_llm_deployment(
llm_config, override_serve_options=override_serve_options
)
serve.run(llm_app, name=app_name)
print("Waiting for Serve deployment to be ready...")
max_wait_time = 120 # seconds
wait_time = 0
while not _is_app_ready(app_name) and wait_time < max_wait_time:
sleep(5)
wait_time += 5
if wait_time >= max_wait_time:
raise TimeoutError("Deployment failed to become ready within timeout")
print("Deployment is ready!")
return deployment_name, app_name
def _is_app_ready(app_name: str) -> bool:
try:
serve_status = serve.status()
if app_name in serve_status.applications:
app_status = serve_status.applications[app_name]
if app_status.status == "RUNNING":
print(f"Application '{app_name}' is RUNNING.")
return True
else:
print(f"Application '{app_name}' status: {app_status.status}")
return False
else:
print(f"Application '{app_name}' not found in Serve status.")
return False
except Exception as e:
print(f"Error checking app status: {e}")
return False
def build_single_serve_deployment_processor(
batch_size: int,
concurrency: int,
model: str,
sampling_params: dict = VLLM_SAMPLING_PARAMS,
deployment_name: str = None,
app_name: str = None,
**kwargs,
):
"""Build Serve deployment processor for single-turn benchmark."""
config = _build_serve_deployment_config(
batch_size,
concurrency,
deployment_name,
app_name,
)
return build_processor(
config,
preprocess=lambda row: dict(
method="completions",
dtype="CompletionRequest",
request_kwargs=dict(
model=model,
prompt=row["prompt"],
**sampling_params,
),
),
postprocess=lambda row: row,
)
def build_shared_serve_deployment_processor(
batch_size: int,
concurrency: int,
model: str,
sampling_params: dict = VLLM_SAMPLING_PARAMS,
deployment_name: str = None,
app_name: str = None,
**kwargs,
):
"""Build Serve deployment processor for multi-turn benchmark."""
config = _build_serve_deployment_config(
batch_size,
concurrency,
deployment_name,
app_name,
)
processor1 = build_processor(
config,
preprocess=lambda row: dict(
method="completions",
dtype="CompletionRequest",
request_kwargs=dict(
model=model,
prompt=row["prompt"],
stream=False,
),
),
postprocess=lambda row: {
# Fall back to original prompt if generated text is empty
"prompt": (
row["choices"][0]["text"]
if row.get("choices") and str(row["choices"][0].get("text", "")).strip()
else row["prompt"]
)
},
)
processor2 = build_processor(
config,
preprocess=lambda row: dict(
method="completions",
dtype="CompletionRequest",
request_kwargs=dict(
model=model,
prompt=row["prompt"],
stream=False,
),
),
postprocess=lambda row: row,
)
def multi_turn_processor(dataset):
return processor2(processor1(dataset))
return multi_turn_processor
# -----------------------------------------------------------------------------
# Benchmark execution
# -----------------------------------------------------------------------------
def run_processor(
mode: Mode,
dataset: data.Dataset,
builder,
**kwargs,
) -> BenchmarkResult:
processor = builder(**kwargs)
total_samples = dataset.count()
start = perf_counter()
processor(dataset).materialize()
elapsed = perf_counter() - start
return BenchmarkResult(
mode=mode,
batch_size=kwargs.get("batch_size"),
concurrency=kwargs.get("concurrency"),
samples=total_samples,
elapsed_s=elapsed,
)
def benchmark(
mode: Mode,
dataset: data.Dataset,
*,
batch_size: int,
concurrency: int,
model: str,
sampling_params: dict = VLLM_SAMPLING_PARAMS,
pipeline_parallel_size: int = None,
tensor_parallel_size: int = None,
distributed_executor_backend: str = None,
) -> BenchmarkResult:
mode_to_builder = {
Mode.VLLM_ENGINE: build_single_vllm_engine_processor,
Mode.SHARED_VLLM_ENGINE: build_shared_vllm_engine_processor,
Mode.SERVE_DEPLOYMENT: build_single_serve_deployment_processor,
Mode.SHARED_SERVE_DEPLOYMENT: build_shared_serve_deployment_processor,
Mode.CLASSIFY: build_classify_processor,
}
if mode not in mode_to_builder:
raise ValueError(f"Unknown benchmark mode: {mode}")
builder = mode_to_builder[mode]
if mode in [Mode.SERVE_DEPLOYMENT, Mode.SHARED_SERVE_DEPLOYMENT]:
deployment_name, app_name = setup_serve_deployment(model, concurrency)
try:
return run_processor(
mode,
dataset,
builder,
batch_size=batch_size,
concurrency=concurrency,
model=model,
sampling_params=sampling_params,
deployment_name=deployment_name,
app_name=app_name,
)
finally:
serve.delete(app_name)
elif mode == Mode.CLASSIFY:
return run_processor(
mode,
dataset,
builder,
batch_size=batch_size,
concurrency=concurrency,
model=model,
pooling_params=CLASSIFY_POOLING_PARAMS,
distributed_executor_backend=distributed_executor_backend,
)
else:
return run_processor(
mode,
dataset,
builder,
batch_size=batch_size,
concurrency=concurrency,
model=model,
sampling_params=sampling_params,
pipeline_parallel_size=pipeline_parallel_size,
tensor_parallel_size=tensor_parallel_size,
distributed_executor_backend=distributed_executor_backend,
)
# -----------------------------------------------------------------------------
# CLI
# -----------------------------------------------------------------------------
def parse_args(argv: list[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(description="vLLM throughput benchmark")
parser.add_argument(
"--mode",
choices=[mode.value for mode in Mode],
default=Mode.VLLM_ENGINE.value,
help="Ray Data LLM processor to run benchmarks for",
)
# Dataset configuration
parser.add_argument(
"--dataset-path",
type=str,
default="/home/ubuntu/datasets/Code-feedback-sharegpt-renamed",
help="Path to dataset on disk",
)
parser.add_argument(
"--num-prompts", type=int, default=1000, help="Number of prompts to process"
)
parser.add_argument(
"--hf-dataset-id",
type=str,
default="Crystalcareai/Code-feedback-sharegpt-renamed",
help="Hugging Face dataset ID to download",
)
parser.add_argument(
"--hf-split",
type=str,
default="train",
help="Hugging Face dataset split to load",
)
parser.add_argument(
"--seed",
type=int,
default=0,
help="Random seed for dataset sampling",
)
parser.add_argument(
"--truncate-prompt",
type=int,
default=512,
help="Maximum prompt length",
)
# Engine configuration
parser.add_argument(
"--model",
type=str,
required=True,
help="LLM model to use",
)
parser.add_argument(
"--pipeline-parallel-size",
type=int,
default=1,
help="Pipeline parallel size for vLLM engine",
)
parser.add_argument(
"--tensor-parallel-size",
type=int,
default=1,
help="Tensor parallel size for vLLM engine",
)
parser.add_argument(
"--distributed-executor-backend",
type=str,
default=None,
choices=["ray", "mp", "uni"],
help="Distributed executor backend for vLLM engine",
)
parser.add_argument(
"--max-tokens",
type=int,
default=None,
help="Maximum number of tokens to generate per request (default: 100)",
)
# Ray Data worker configuration
parser.add_argument(
"--batch-size",
type=int,
required=True,
help="Ray Data batch size for processing",
)
parser.add_argument(
"--concurrency", type=int, required=True, help="Ray Data concurrency level"
)
return parser.parse_args(argv)
def main() -> None:
args = parse_args(sys.argv[1:])
ray.init()
try:
dataset = ShareGPTDataset(
dataset_path=args.dataset_path,
seed=args.seed,
hf_dataset_id=args.hf_dataset_id,
hf_split=args.hf_split,
truncate_prompt=args.truncate_prompt,
)
prompts = dataset.sample(args.num_prompts)
dataset = data.from_items(prompts)
sampling_params = VLLM_SAMPLING_PARAMS.copy()
if args.max_tokens is not None:
sampling_params["max_tokens"] = args.max_tokens
result = benchmark(
Mode(args.mode),
dataset,
batch_size=args.batch_size,
concurrency=args.concurrency,
model=args.model,
sampling_params=sampling_params,
pipeline_parallel_size=args.pipeline_parallel_size,
tensor_parallel_size=args.tensor_parallel_size,
distributed_executor_backend=args.distributed_executor_backend,
)
result.show()
finally:
ray.shutdown()
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/batch/benchmark/benchmark_processor.py",
"license": "Apache License 2.0",
"lines": 562,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/llm/_internal/batch/benchmark/dataset.py | """
This module defines a dataset framework for sampling benchmark requests.
"""
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional
from datasets import load_dataset, load_from_disk
class BenchmarkDataset(ABC):
DEFAULT_RANDOM_SEED = 0
def __init__(
self,
dataset_path: Optional[str] = None,
random_seed: int = DEFAULT_RANDOM_SEED,
) -> None:
"""
Abstract base class for benchmark datasets.
All benchmark datasets should inherit from this class and implement
the required abstract methods.
Args:
dataset_path: The path to the dataset on disk.
random_seed: The seed for the random number generator.
"""
self._dataset_path = dataset_path
self._random_seed = random_seed
@abstractmethod
def load_data(self) -> None:
"""
Load data from the dataset source into memory.
Raises:
NotImplementedError: If the method is not implemented in subclasses.
"""
raise NotImplementedError("load_data must be implemented in subclasses.")
@abstractmethod
def sample(self, num_requests: int) -> List[Dict]:
"""
Sample prompts from the loaded dataset.
Args:
num_requests: The number of prompts to sample from the dataset.
Returns:
A list of sampled request dictionaries.
Raises:
NotImplementedError: If the method is not implemented in subclasses.
"""
raise NotImplementedError("sample must be implemented in subclasses.")
class ShareGPTDataset(BenchmarkDataset):
"""Implements the ShareGPT dataset. The first human message of each conversation is used to build a prompt."""
def __init__(
self,
dataset_path: str,
seed: int,
hf_dataset_id: str = "Crystalcareai/Code-feedback-sharegpt-renamed",
hf_split: str = "train",
truncate_prompt: Optional[int] = None,
) -> None:
"""
Initializes the ShareGPTDataset.
Args:
dataset_path: The path to the dataset on disk.
seed: The seed for the random number generator.
hf_dataset_id: The Hugging Face dataset ID to download if the dataset is not found on disk.
hf_split: The Hugging Face split to load from the dataset.
truncate_prompt: Maximum prompt length so that the prompt fits in the model's context window.
"""
super().__init__(dataset_path, seed)
self._seed = seed
self._hf_dataset_id = hf_dataset_id
self._hf_split = hf_split
self._truncate_prompt = truncate_prompt
self._data: list[Dict] | None = None
def load_data(self) -> None:
"""Load data from the dataset path into memory."""
if self._data is None:
self._data = self._load_dataset_data()
def sample(self, num_requests: int) -> List[Dict]:
"""Sample prompts from the loaded dataset."""
if self._data is None:
self.load_data()
# Extract all valid prompts from the dataset
all_prompts = []
for item in self._data:
prompt_data = self._extract_prompt(item)
if prompt_data is not None:
all_prompts.append(prompt_data)
if not all_prompts:
raise ValueError("ShareGPT dataset yielded no usable prompts")
# Replicate samples if num_requests exceeds available samples
if num_requests <= len(all_prompts):
return all_prompts[:num_requests]
full_copies = num_requests // len(all_prompts)
remainder = num_requests % len(all_prompts)
prompts = all_prompts * full_copies + all_prompts[:remainder]
return prompts
def _load_dataset(self):
"""Load dataset from disk or Hugging Face."""
path = Path(self._dataset_path)
print(f"Attempting to load dataset from {path}")
print(f"Dataset exists on disk: {path.exists()}")
try:
if path.exists():
dataset = load_from_disk(str(path))
else:
print(
f"Dataset not found on disk, downloading from Hugging Face: {self._hf_dataset_id}"
)
path.parent.mkdir(parents=True, exist_ok=True)
dataset = load_dataset(self._hf_dataset_id, split=self._hf_split)
dataset.save_to_disk(str(path))
return dataset
except Exception as e:
raise RuntimeError(f"Error loading ShareGPT dataset: {e}")
def _load_dataset_data(self) -> List[Dict]:
"""Load and process dataset data into a list of dictionaries."""
ds = self._load_dataset().shuffle(seed=self._seed)
data = []
for i, row in enumerate(ds):
data.append(row)
print(f"Loaded {len(data)} samples from dataset")
return data
def _extract_prompt(self, item: Dict) -> Dict | None:
"""
Extracts the first human message of a conversation or None.
The ShareGPT schema uses {"role": "human", "value": ...} for user
turns.
"""
messages = item.get("messages") or item.get("conversations") or []
prompt = next(
(
str(msg.get("value", "")).strip()
for msg in messages
if msg.get("role") in {"human", "user"}
),
None,
)
# Only return a valid prompt if it's not empty
if prompt and prompt.strip():
if self._truncate_prompt:
prompt = prompt[: self._truncate_prompt]
return {"prompt": prompt}
return None
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/batch/benchmark/dataset.py",
"license": "Apache License 2.0",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:release/llm_tests/batch/test_batch_multi_node_vllm.py | import pytest
import ray
from ray.data.llm import build_processor, vLLMEngineProcessorConfig
@pytest.fixture(autouse=True)
def cleanup_ray_resources():
"""Automatically cleanup Ray resources between tests to prevent conflicts."""
yield
ray.shutdown()
@pytest.mark.parametrize(
"tp_size,pp_size",
[
(2, 4),
(4, 2),
],
)
def test_vllm_multi_node(tp_size, pp_size):
config = vLLMEngineProcessorConfig(
model_source="facebook/opt-1.3b",
engine_kwargs=dict(
enable_prefix_caching=True,
enable_chunked_prefill=True,
max_num_batched_tokens=4096,
pipeline_parallel_size=pp_size,
tensor_parallel_size=tp_size,
distributed_executor_backend="ray",
),
tokenize=False,
detokenize=False,
concurrency=1,
batch_size=64,
apply_chat_template=False,
)
processor = build_processor(
config,
preprocess=lambda row: dict(
prompt=f"You are a calculator. {row['id']} ** 3 = ?",
sampling_params=dict(
temperature=0.3,
max_tokens=20,
detokenize=True,
),
),
postprocess=lambda row: dict(
resp=row["generated_text"],
),
)
ds = ray.data.range(60)
ds = processor(ds)
ds = ds.materialize()
outs = ds.take_all()
assert len(outs) == 60
assert all("resp" in out for out in outs)
| {
"repo_id": "ray-project/ray",
"file_path": "release/llm_tests/batch/test_batch_multi_node_vllm.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/dashboard/modules/aggregator/multi_consumer_event_buffer.py | import asyncio
import time
from collections import deque
from dataclasses import dataclass
from typing import Dict, List, Optional
from ray._private.telemetry.open_telemetry_metric_recorder import (
OpenTelemetryMetricRecorder,
)
from ray.core.generated import (
events_base_event_pb2,
)
from ray.core.generated.events_base_event_pb2 import RayEvent
from ray.dashboard.modules.aggregator.constants import (
AGGREGATOR_AGENT_METRIC_PREFIX,
CONSUMER_TAG_KEY,
)
@dataclass
class _ConsumerState:
# Index of the next event to be consumed by this consumer
cursor_index: int
class MultiConsumerEventBuffer:
"""A buffer which allows adding one event at a time and consuming events in batches.
Supports multiple consumers, each with their own cursor index. Tracks the number of events evicted for each consumer.
Buffer is not thread-safe but is asyncio-friendly. All operations must be called from within the same event loop.
Arguments:
max_size: Maximum number of events to store in the buffer.
max_batch_size: Maximum number of events to return in a batch when calling wait_for_batch.
common_metric_tags: Tags to add to all metrics.
"""
def __init__(
self,
max_size: int,
max_batch_size: int,
common_metric_tags: Optional[Dict[str, str]] = None,
):
self._buffer = deque(maxlen=max_size)
self._max_size = max_size
self._lock = asyncio.Lock()
self._has_new_events_to_consume = asyncio.Condition(self._lock)
self._consumers: Dict[str, _ConsumerState] = {}
self._max_batch_size = max_batch_size
self._common_metrics_tags = common_metric_tags or {}
self._metric_recorder = OpenTelemetryMetricRecorder()
self.evicted_events_metric_name = (
f"{AGGREGATOR_AGENT_METRIC_PREFIX}_queue_dropped_events"
)
self._metric_recorder.register_counter_metric(
self.evicted_events_metric_name,
"Total number of events dropped because the publish/buffer queue was full.",
)
async def add_event(self, event: events_base_event_pb2.RayEvent) -> None:
"""Add an event to the buffer.
If the buffer is full, the oldest event is dropped.
"""
async with self._lock:
dropped_event = None
if len(self._buffer) >= self._max_size:
dropped_event = self._buffer.popleft()
self._buffer.append(event)
if dropped_event is not None:
for consumer_name, consumer_state in self._consumers.items():
# Update consumer cursor index and evicted events metric if an event was dropped
if consumer_state.cursor_index == 0:
# The dropped event was the next event this consumer would have consumed, publish eviction metric
self._metric_recorder.set_metric_value(
self.evicted_events_metric_name,
{
**self._common_metrics_tags,
CONSUMER_TAG_KEY: consumer_name,
"event_type": RayEvent.EventType.Name(
dropped_event.event_type
),
},
1,
)
else:
# The dropped event was already consumed by the consumer, so we need to adjust the cursor
consumer_state.cursor_index -= 1
# Signal the consumers that there are new events to consume
self._has_new_events_to_consume.notify_all()
def _evict_old_events(self) -> None:
"""Clean the buffer by removing events from the buffer who have index lower than
all the cursor indexes of all consumers and updating the cursor index of all
consumers.
"""
if not self._consumers:
return
min_cursor_index = min(
consumer_state.cursor_index for consumer_state in self._consumers.values()
)
for _ in range(min_cursor_index):
self._buffer.popleft()
# update the cursor index of all consumers
for consumer_state in self._consumers.values():
consumer_state.cursor_index -= min_cursor_index
async def wait_for_batch(
self, consumer_name: str, timeout_seconds: float = 1.0
) -> List[events_base_event_pb2.RayEvent]:
"""Wait for batch respecting self.max_batch_size and timeout_seconds.
Returns a batch of up to self.max_batch_size items. Waits for up to
timeout_seconds after receiving the first event that will be in
the next batch. After the timeout, returns as many items as are ready.
Always returns a batch with at least one item - will block
indefinitely until an item comes in.
Arguments:
consumer_name: name of the consumer consuming the batch
timeout_seconds: maximum time to wait for a batch
Returns:
A list of up to max_batch_size events ready for consumption.
The list always contains at least one event.
"""
max_batch = self._max_batch_size
batch = []
async with self._has_new_events_to_consume:
consumer_state = self._consumers.get(consumer_name)
if consumer_state is None:
raise KeyError(f"unknown consumer '{consumer_name}'")
# Phase 1: read the first event, wait indefinitely until there is at least one event to consume
while consumer_state.cursor_index >= len(self._buffer):
await self._has_new_events_to_consume.wait()
# Add the first event to the batch
event = self._buffer[consumer_state.cursor_index]
consumer_state.cursor_index += 1
batch.append(event)
# Phase 2: add items to the batch up to timeout or until full
deadline = time.monotonic() + max(0.0, float(timeout_seconds))
while len(batch) < max_batch:
remaining = deadline - time.monotonic()
if remaining <= 0:
break
# Drain whatever is available
while len(batch) < max_batch and consumer_state.cursor_index < len(
self._buffer
):
batch.append(self._buffer[consumer_state.cursor_index])
consumer_state.cursor_index += 1
if len(batch) >= max_batch:
break
# There is still room in the batch, but no new events to consume; wait until notified or timeout
try:
await asyncio.wait_for(
self._has_new_events_to_consume.wait(), remaining
)
except asyncio.TimeoutError:
# Timeout, return the current batch
break
self._evict_old_events()
return batch
async def register_consumer(self, consumer_name: str) -> None:
"""Register a new consumer with a name.
Arguments:
consumer_name: A unique name for the consumer.
"""
async with self._lock:
if self._consumers.get(consumer_name) is not None:
raise ValueError(f"consumer '{consumer_name}' already registered")
self._consumers[consumer_name] = _ConsumerState(cursor_index=0)
async def size(self) -> int:
"""Get total number of events in the buffer. Does not take consumer cursors into account."""
return len(self._buffer)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/aggregator/multi_consumer_event_buffer.py",
"license": "Apache License 2.0",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/dashboard/modules/aggregator/publisher/async_publisher_client.py | import json
import logging
from abc import ABC, abstractmethod
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
from typing import List, Optional
import aiohttp
import ray.dashboard.utils as dashboard_utils
from ray._common.utils import get_or_create_event_loop
from ray._private.protobuf_compat import message_to_json
from ray._raylet import GcsClient
from ray.core.generated import (
events_base_event_pb2,
events_event_aggregator_service_pb2,
)
from ray.dashboard.modules.aggregator.publisher.configs import (
GCS_EXPOSABLE_EVENT_TYPES,
HTTP_EXPOSABLE_EVENT_TYPES,
PUBLISHER_TIMEOUT_SECONDS,
)
logger = logging.getLogger(__name__)
@dataclass
class PublishStats:
"""Data class that represents stats of publishing a batch of events."""
# Whether the publish was successful
is_publish_successful: bool
# Number of events published
num_events_published: int
# Number of events filtered out
num_events_filtered_out: int
@dataclass
class PublishBatch:
"""Data class that represents a batch of events to publish."""
# The list of events to publish
events: list[events_base_event_pb2.RayEvent]
# dropped task events metadata
task_events_metadata: Optional[
events_event_aggregator_service_pb2.TaskEventsMetadata
] = None
class PublisherClientInterface(ABC):
"""Abstract interface for publishing Ray event batches to external destinations.
Implementations should handle the actual publishing logic, filtering,
and format conversion appropriate for their specific destination type.
"""
def __init__(self):
self._exposable_event_types_list: List[str] = []
self._allow_all_event_types: bool = False
def count_num_events_in_batch(self, batch: PublishBatch) -> int:
"""Count the number of events in a given PublishBatch."""
return len(batch.events)
def _can_expose_event(self, event) -> bool:
"""
Check if an event should be allowed to be published.
"""
if self._allow_all_event_types:
return True
if not self._exposable_event_types_list:
return False
event_type_name = events_base_event_pb2.RayEvent.EventType.Name(
event.event_type
)
return event_type_name in self._exposable_event_types_list
@abstractmethod
async def publish(self, batch: PublishBatch) -> PublishStats:
"""Publish a batch of events to the destination."""
pass
@abstractmethod
async def close(self) -> None:
"""Clean up any resources used by this client. Should be called when the publisherClient is no longer required"""
pass
class AsyncHttpPublisherClient(PublisherClientInterface):
"""Client for publishing ray event batches to an external HTTP service."""
def __init__(
self,
endpoint: str,
executor: ThreadPoolExecutor,
timeout: float = PUBLISHER_TIMEOUT_SECONDS,
preserve_proto_field_name: bool = False,
) -> None:
super().__init__()
self._endpoint = endpoint
self._executor = executor
self._timeout = aiohttp.ClientTimeout(total=timeout)
self._session = None
self._preserve_proto_field_name = preserve_proto_field_name
if HTTP_EXPOSABLE_EVENT_TYPES.strip().upper() == "ALL":
self._allow_all_event_types = True
self._exposable_event_types_list = []
else:
self._exposable_event_types_list = [
event_type.strip()
for event_type in HTTP_EXPOSABLE_EVENT_TYPES.split(",")
if event_type.strip()
]
async def publish(self, batch: PublishBatch) -> PublishStats:
events_batch: list[events_base_event_pb2.RayEvent] = batch.events
if not events_batch:
# Nothing to publish -> success but nothing published
return PublishStats(
is_publish_successful=True,
num_events_published=0,
num_events_filtered_out=0,
)
filtered = [e for e in events_batch if self._can_expose_event(e)]
num_filtered_out = len(events_batch) - len(filtered)
if not filtered:
# All filtered out -> success but nothing published
return PublishStats(
is_publish_successful=True,
num_events_published=0,
num_events_filtered_out=num_filtered_out,
)
# Convert protobuf objects to python dictionaries for HTTP POST. Run in executor to avoid blocking the event loop.
filtered_json = await get_or_create_event_loop().run_in_executor(
self._executor,
lambda: [
json.loads(
message_to_json(
e,
always_print_fields_with_no_presence=True,
preserving_proto_field_name=self._preserve_proto_field_name,
)
)
for e in filtered
],
)
try:
# Create session on first use (lazy initialization)
if not self._session:
self._session = aiohttp.ClientSession(timeout=self._timeout)
return await self._send_http_request(filtered_json, num_filtered_out)
except Exception as e:
logger.error("Failed to send events to external service. Error: %r", e)
return PublishStats(
is_publish_successful=False,
num_events_published=0,
num_events_filtered_out=0,
)
async def _send_http_request(self, json_data, num_filtered_out) -> PublishStats:
async with self._session.post(
self._endpoint,
json=json_data,
) as resp:
resp.raise_for_status()
return PublishStats(
is_publish_successful=True,
num_events_published=len(json_data),
num_events_filtered_out=num_filtered_out,
)
async def close(self) -> None:
"""Closes the http session if one was created. Should be called when the publisherClient is no longer required"""
if self._session:
await self._session.close()
self._session = None
def set_session(self, session) -> None:
"""Inject an HTTP client session.
If a session is set explicitly, it will be used and managed by close().
"""
self._session = session
class AsyncGCSTaskEventsPublisherClient(PublisherClientInterface):
"""Client for publishing ray event batches to GCS."""
def __init__(
self,
gcs_client: GcsClient,
executor: ThreadPoolExecutor,
timeout_s: float = PUBLISHER_TIMEOUT_SECONDS,
) -> None:
super().__init__()
self._gcs_client = gcs_client
self._executor = executor
self._timeout_s = timeout_s
self._exposable_event_types_list = GCS_EXPOSABLE_EVENT_TYPES
async def publish(
self,
batch: PublishBatch,
) -> PublishStats:
events = batch.events
task_events_metadata = batch.task_events_metadata
has_dropped_task_attempts = (
task_events_metadata and task_events_metadata.dropped_task_attempts
)
if not events and not has_dropped_task_attempts:
# Nothing to publish -> success but nothing published
return PublishStats(
is_publish_successful=True,
num_events_published=0,
num_events_filtered_out=0,
)
# Filter events based on exposable event types
filtered_events = [e for e in events if self._can_expose_event(e)]
num_filtered_out = len(events) - len(filtered_events)
if not filtered_events and not has_dropped_task_attempts:
# all events filtered out and no task events metadata -> success but nothing published
return PublishStats(
is_publish_successful=True,
num_events_published=0,
num_events_filtered_out=num_filtered_out,
)
try:
events_data = self._create_ray_events_data(
filtered_events, task_events_metadata
)
request = events_event_aggregator_service_pb2.AddEventsRequest(
events_data=events_data
)
serialized_request = await get_or_create_event_loop().run_in_executor(
self._executor,
lambda: request.SerializeToString(),
)
status_code = await self._gcs_client.async_add_events(
serialized_request, self._timeout_s, self._executor
)
if status_code != dashboard_utils.HTTPStatusCode.OK:
logger.error(f"GCS AddEvents failed: {status_code}")
return PublishStats(
is_publish_successful=False,
num_events_published=0,
num_events_filtered_out=0,
)
return PublishStats(
is_publish_successful=True,
num_events_published=len(filtered_events),
num_events_filtered_out=num_filtered_out,
)
except Exception as e:
logger.error(f"Failed to send events to GCS: {e}")
return PublishStats(
is_publish_successful=False,
num_events_published=0,
num_events_filtered_out=0,
)
def _create_ray_events_data(
self,
event_batch: List[events_base_event_pb2.RayEvent],
task_events_metadata: Optional[
events_event_aggregator_service_pb2.TaskEventsMetadata
] = None,
) -> events_event_aggregator_service_pb2.RayEventsData:
"""
Helper method to create RayEventsData from event batch and metadata.
"""
events_data = events_event_aggregator_service_pb2.RayEventsData()
events_data.events.extend(event_batch)
if task_events_metadata:
events_data.task_events_metadata.CopyFrom(task_events_metadata)
return events_data
async def close(self) -> None:
pass
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/aggregator/publisher/async_publisher_client.py",
"license": "Apache License 2.0",
"lines": 248,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/dashboard/modules/aggregator/publisher/configs.py | # Environment variables for the aggregator agent publisher component.
import os
from ray._private import ray_constants
env_var_prefix = "RAY_DASHBOARD_AGGREGATOR_AGENT_PUBLISHER"
# Timeout for the publisher to publish events to the destination
PUBLISHER_TIMEOUT_SECONDS = ray_constants.env_integer(
f"{env_var_prefix}_TIMEOUT_SECONDS", 3
)
# Maximum number of retries for publishing events to the destination, if less than 0, will retry indefinitely
PUBLISHER_MAX_RETRIES = ray_constants.env_integer(f"{env_var_prefix}_MAX_RETRIES", -1)
# Initial backoff time for publishing events to the destination
PUBLISHER_INITIAL_BACKOFF_SECONDS = ray_constants.env_float(
f"{env_var_prefix}_INITIAL_BACKOFF_SECONDS", 0.01
)
# Maximum backoff time for publishing events to the destination
PUBLISHER_MAX_BACKOFF_SECONDS = ray_constants.env_float(
f"{env_var_prefix}_MAX_BACKOFF_SECONDS", 5.0
)
# Jitter ratio for publishing events to the destination
PUBLISHER_JITTER_RATIO = ray_constants.env_float(f"{env_var_prefix}_JITTER_RATIO", 0.1)
# Maximum sleep time between sending batches of events to the destination, should be greater than 0.0 to avoid busy looping
PUBLISHER_MAX_BUFFER_SEND_INTERVAL_SECONDS = ray_constants.env_float(
f"{env_var_prefix}_MAX_BUFFER_SEND_INTERVAL_SECONDS", 0.1
)
# HTTP Publisher specific configurations
# Comma-separated list of event types that are allowed to be exposed to external HTTP services
# Valid values: TASK_DEFINITION_EVENT, TASK_LIFECYCLE_EVENT, ACTOR_TASK_DEFINITION_EVENT, etc.
# Set to "ALL" to allow all event types.
# The list of all supported event types can be found in src/ray/protobuf/public/events_base_event.proto (EventType enum)
# By default TASK_PROFILE_EVENT is not exposed to external services
DEFAULT_HTTP_EXPOSABLE_EVENT_TYPES = (
"TASK_DEFINITION_EVENT,TASK_LIFECYCLE_EVENT,ACTOR_TASK_DEFINITION_EVENT,"
"DRIVER_JOB_DEFINITION_EVENT,DRIVER_JOB_LIFECYCLE_EVENT,"
"ACTOR_DEFINITION_EVENT,ACTOR_LIFECYCLE_EVENT,"
"NODE_DEFINITION_EVENT,NODE_LIFECYCLE_EVENT,"
)
HTTP_EXPOSABLE_EVENT_TYPES = os.environ.get(
"RAY_DASHBOARD_AGGREGATOR_AGENT_EXPOSABLE_EVENT_TYPES",
DEFAULT_HTTP_EXPOSABLE_EVENT_TYPES,
)
# GCS Publisher specific configurations
# List of event types that are allowed to be exposed to GCS, not overriden by environment variable
# as GCS only supports Task event types
GCS_EXPOSABLE_EVENT_TYPES = [
"TASK_DEFINITION_EVENT",
"TASK_LIFECYCLE_EVENT",
"TASK_PROFILE_EVENT",
"ACTOR_TASK_DEFINITION_EVENT",
]
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/aggregator/publisher/configs.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/dashboard/modules/aggregator/publisher/metrics.py | from ray._private.telemetry.open_telemetry_metric_recorder import (
OpenTelemetryMetricRecorder,
)
from ray.dashboard.modules.aggregator.constants import (
AGGREGATOR_AGENT_METRIC_PREFIX,
)
# OpenTelemetry metrics setup (registered once at import time)
metric_recorder = OpenTelemetryMetricRecorder()
# Counter metrics
published_counter_name = f"{AGGREGATOR_AGENT_METRIC_PREFIX}_published_events"
metric_recorder.register_counter_metric(
published_counter_name,
"Total number of events successfully published to the destination.",
)
filtered_counter_name = f"{AGGREGATOR_AGENT_METRIC_PREFIX}_filtered_events"
metric_recorder.register_counter_metric(
filtered_counter_name,
"Total number of events filtered out before publishing to the destination.",
)
failed_counter_name = f"{AGGREGATOR_AGENT_METRIC_PREFIX}_publish_failures"
metric_recorder.register_counter_metric(
failed_counter_name,
"Total number of events that failed to publish after retries.",
)
# Histogram metric
publish_latency_hist_name = f"{AGGREGATOR_AGENT_METRIC_PREFIX}_publish_latency_seconds"
metric_recorder.register_histogram_metric(
publish_latency_hist_name,
"Duration of publish calls in seconds.",
[0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2, 5],
)
# Gauge metrics
consecutive_failures_gauge_name = (
f"{AGGREGATOR_AGENT_METRIC_PREFIX}_consecutive_failures_since_last_success"
)
metric_recorder.register_gauge_metric(
consecutive_failures_gauge_name,
"Number of consecutive failed publish attempts since the last success.",
)
time_since_last_success_gauge_name = (
f"{AGGREGATOR_AGENT_METRIC_PREFIX}_time_since_last_success_seconds"
)
metric_recorder.register_gauge_metric(
time_since_last_success_gauge_name,
"Seconds since the last successful publish to the destination.",
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/aggregator/publisher/metrics.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/dashboard/modules/aggregator/publisher/ray_event_publisher.py | import asyncio
import logging
import random
from abc import ABC, abstractmethod
from typing import Dict, Optional
from ray.dashboard.modules.aggregator.constants import (
CONSUMER_TAG_KEY,
)
from ray.dashboard.modules.aggregator.multi_consumer_event_buffer import (
MultiConsumerEventBuffer,
)
from ray.dashboard.modules.aggregator.publisher.async_publisher_client import (
PublishBatch,
PublisherClientInterface,
)
from ray.dashboard.modules.aggregator.publisher.configs import (
PUBLISHER_INITIAL_BACKOFF_SECONDS,
PUBLISHER_JITTER_RATIO,
PUBLISHER_MAX_BACKOFF_SECONDS,
PUBLISHER_MAX_BUFFER_SEND_INTERVAL_SECONDS,
PUBLISHER_MAX_RETRIES,
)
from ray.dashboard.modules.aggregator.publisher.metrics import (
consecutive_failures_gauge_name,
failed_counter_name,
filtered_counter_name,
metric_recorder,
publish_latency_hist_name,
published_counter_name,
time_since_last_success_gauge_name,
)
from ray.dashboard.modules.aggregator.task_events_metadata_buffer import (
TaskEventsMetadataBuffer,
)
logger = logging.getLogger(__name__)
class RayEventPublisherInterface(ABC):
"""Abstract interface for publishing Ray event batches to external destinations."""
@abstractmethod
async def run_forever(self) -> None:
"""Run the publisher forever until cancellation or process death."""
pass
@abstractmethod
async def wait_until_running(self, timeout: Optional[float] = None) -> bool:
"""Wait until the publisher has started."""
pass
class RayEventPublisher(RayEventPublisherInterface):
"""RayEvents publisher that publishes batches of events to a destination by running a worker loop.
The worker loop continuously pulls batches from the event buffer and publishes them to the destination.
"""
# Cap the exponent to avoid computing unnecessarily large intermediate
_MAX_BACKOFF_EXPONENT = 30
def __init__(
self,
name: str,
publish_client: PublisherClientInterface,
event_buffer: MultiConsumerEventBuffer,
common_metric_tags: Optional[Dict[str, str]] = None,
task_metadata_buffer: Optional[TaskEventsMetadataBuffer] = None,
max_retries: int = PUBLISHER_MAX_RETRIES,
initial_backoff: float = PUBLISHER_INITIAL_BACKOFF_SECONDS,
max_backoff: float = PUBLISHER_MAX_BACKOFF_SECONDS,
jitter_ratio: float = PUBLISHER_JITTER_RATIO,
) -> None:
"""Initialize a RayEventsPublisher.
Args:
name: Name identifier for this publisher instance
publish_client: Client for publishing events to the destination
event_buffer: Buffer for reading batches of events
common_metric_tags: Common labels for all prometheus metrics
task_metadata_buffer: Buffer for reading a batch of droppedtask metadata
max_retries: Maximum number of retries for failed publishes
initial_backoff: Initial backoff time between retries in seconds
max_backoff: Maximum backoff time between retries in seconds
jitter_ratio: Random jitter ratio to add to backoff times
"""
self._name = name
self._common_metric_tags = dict(common_metric_tags or {})
self._common_metric_tags[CONSUMER_TAG_KEY] = name
self._max_retries = int(max_retries)
self._initial_backoff = float(initial_backoff)
self._max_backoff = float(max_backoff)
self._jitter_ratio = float(jitter_ratio)
self._publish_client = publish_client
self._event_buffer = event_buffer
self._task_metadata_buffer = task_metadata_buffer
# Event set once the publisher has registered as a consumer and is ready to publish events
self._started_event: asyncio.Event = asyncio.Event()
async def run_forever(self) -> None:
"""Run the publisher forever until cancellation or process death.
Registers as a consumer, starts the worker loop, and handles cleanup on cancellation.
"""
await self._event_buffer.register_consumer(self._name)
# Signal that the publisher is ready to publish events
self._started_event.set()
try:
logger.info(f"Starting publisher {self._name}")
while True:
events_batch = await self._event_buffer.wait_for_batch(
self._name,
PUBLISHER_MAX_BUFFER_SEND_INTERVAL_SECONDS,
)
publish_batch = PublishBatch(events=events_batch)
if self._task_metadata_buffer is not None:
task_metadata_batch = self._task_metadata_buffer.get()
publish_batch.task_events_metadata = task_metadata_batch
await self._async_publish_with_retries(publish_batch)
except asyncio.CancelledError:
logger.info(f"Publisher {self._name} cancelled, shutting down gracefully")
raise
except Exception as e:
logger.error(f"Publisher {self._name} encountered error: {e}")
raise
finally:
self._started_event.clear()
await self._publish_client.close()
async def wait_until_running(self, timeout: Optional[float] = None) -> bool:
"""Wait until the publisher has started.
Args:
timeout: Maximum time to wait in seconds. If None, waits indefinitely.
Returns:
True if the publisher started before the timeout, False otherwise.
If timeout is None, waits indefinitely.
"""
if timeout is None:
await self._started_event.wait()
return True
try:
await asyncio.wait_for(self._started_event.wait(), timeout)
return True
except asyncio.TimeoutError:
return False
async def _async_publish_with_retries(self, batch) -> None:
"""Attempts to publish a batch with retries.
Will retry failed publishes up to max_retries times with increasing delays.
"""
num_events_in_batch = self._publish_client.count_num_events_in_batch(batch)
failed_attempts_since_last_success = 0
while True:
start = asyncio.get_running_loop().time()
result = await self._publish_client.publish(batch)
duration = asyncio.get_running_loop().time() - start
if result.is_publish_successful:
await self._record_success(
num_published=int(result.num_events_published),
num_filtered=int(result.num_events_filtered_out),
duration=float(duration),
)
failed_attempts_since_last_success = 0
return
# Failed attempt
# case 1: if max retries are exhausted mark as failed and break out, retry indefinitely if max_retries is less than 0
if (
self._max_retries >= 0
and failed_attempts_since_last_success >= self._max_retries
):
await self._record_final_failure(
num_failed_events=int(num_events_in_batch),
duration=float(duration),
)
return
# case 2: max retries not exhausted, increment failed attempts counter and add latency to failure list, retry publishing batch with backoff
failed_attempts_since_last_success += 1
await self._record_retry_failure(
duration=float(duration),
failed_attempts=int(failed_attempts_since_last_success),
)
await self._async_sleep_with_backoff(failed_attempts_since_last_success)
async def _async_sleep_with_backoff(self, attempt: int) -> None:
"""Sleep with exponential backoff and optional jitter.
Args:
attempt: The current attempt number (0-based)
"""
capped_attempt = min(attempt, self._MAX_BACKOFF_EXPONENT)
delay = min(
self._max_backoff,
self._initial_backoff * (2**capped_attempt),
)
if self._jitter_ratio > 0:
jitter = delay * self._jitter_ratio
delay = max(0.0, random.uniform(delay - jitter, delay + jitter))
await asyncio.sleep(delay)
async def _record_success(
self, num_published: int, num_filtered: int, duration: float
) -> None:
"""Update in-memory stats and Prometheus metrics for a successful publish."""
if num_published > 0:
metric_recorder.set_metric_value(
published_counter_name,
self._common_metric_tags,
int(num_published),
)
if num_filtered > 0:
metric_recorder.set_metric_value(
filtered_counter_name, self._common_metric_tags, int(num_filtered)
)
metric_recorder.set_metric_value(
consecutive_failures_gauge_name, self._common_metric_tags, 0
)
metric_recorder.set_metric_value(
time_since_last_success_gauge_name, self._common_metric_tags, 0
)
metric_recorder.set_metric_value(
publish_latency_hist_name,
{**self._common_metric_tags, "Outcome": "success"},
float(duration),
)
async def _record_retry_failure(
self, duration: float, failed_attempts: int
) -> None:
"""Update Prometheus metrics for a retryable failure attempt."""
metric_recorder.set_metric_value(
consecutive_failures_gauge_name,
self._common_metric_tags,
int(failed_attempts),
)
metric_recorder.set_metric_value(
publish_latency_hist_name,
{**self._common_metric_tags, "Outcome": "failure"},
float(duration),
)
async def _record_final_failure(
self, num_failed_events: int, duration: float
) -> None:
"""Update in-memory stats and Prometheus metrics for a final (non-retryable) failure."""
if num_failed_events > 0:
metric_recorder.set_metric_value(
failed_counter_name,
self._common_metric_tags,
int(num_failed_events),
)
metric_recorder.set_metric_value(
consecutive_failures_gauge_name, self._common_metric_tags, 0
)
metric_recorder.set_metric_value(
publish_latency_hist_name,
{**self._common_metric_tags, "Outcome": "failure"},
float(duration),
)
class NoopPublisher(RayEventPublisherInterface):
"""A no-op publisher that adheres to the minimal interface used by AggregatorAgent.
Used when a destination is disabled. It runs forever but does nothing.
"""
async def run_forever(self) -> None:
"""Run forever doing nothing until cancellation."""
try:
await asyncio.Event().wait()
except asyncio.CancelledError:
logger.info("NoopPublisher cancelled")
raise
async def wait_until_running(self, timeout: Optional[float] = None) -> bool:
return True
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/aggregator/publisher/ray_event_publisher.py",
"license": "Apache License 2.0",
"lines": 251,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/dashboard/modules/aggregator/tests/test_multi_consumer_event_buffer.py | import asyncio
import random
import sys
import pytest
from google.protobuf.timestamp_pb2 import Timestamp
from ray.core.generated.events_base_event_pb2 import RayEvent
from ray.dashboard.modules.aggregator.multi_consumer_event_buffer import (
MultiConsumerEventBuffer,
)
def _create_test_event(
event_id: bytes = b"test",
event_type_enum=RayEvent.EventType.TASK_DEFINITION_EVENT,
message: str = "test message",
):
"""Helper function to create a test RayEvent."""
event = RayEvent()
event.event_id = event_id
event.source_type = RayEvent.SourceType.CORE_WORKER
event.event_type = event_type_enum
event.severity = RayEvent.Severity.INFO
event.message = message
event.session_name = "test_session"
# Set timestamp
timestamp = Timestamp()
timestamp.GetCurrentTime()
event.timestamp.CopyFrom(timestamp)
return event
class TestMultiConsumerEventBuffer:
@pytest.mark.asyncio
async def test_add_and_consume_event_basic(self):
"""Test basic event addition."""
buffer = MultiConsumerEventBuffer(max_size=10, max_batch_size=5)
consumer_name = "test_consumer"
await buffer.register_consumer(consumer_name)
assert await buffer.size() == 0
event = _create_test_event(b"event1")
await buffer.add_event(event)
assert await buffer.size() == 1
batch = await buffer.wait_for_batch(consumer_name, timeout_seconds=0)
assert len(batch) == 1
assert batch[0] == event
@pytest.mark.asyncio
async def test_add_event_buffer_overflow(self):
"""Test buffer overflow behavior and eviction logic."""
buffer = MultiConsumerEventBuffer(max_size=3, max_batch_size=2)
consumer_name = "test_consumer"
await buffer.register_consumer(consumer_name)
# Add events to fill buffer
events = []
event_types = [
RayEvent.EventType.TASK_DEFINITION_EVENT,
RayEvent.EventType.TASK_LIFECYCLE_EVENT,
RayEvent.EventType.ACTOR_TASK_DEFINITION_EVENT,
]
for i in range(3):
event = _create_test_event(f"event{i}".encode(), event_types[i])
events.append(event)
await buffer.add_event(event)
assert await buffer.size() == 3
# Add one more event to trigger eviction
overflow_event = _create_test_event(
b"overflow", RayEvent.EventType.TASK_PROFILE_EVENT
)
await buffer.add_event(overflow_event)
assert await buffer.size() == 3 # Still max size
@pytest.mark.asyncio
async def test_wait_for_batch_multiple_events(self):
"""Test waiting for batch when multiple events are immediately available and when when not all events are available."""
buffer = MultiConsumerEventBuffer(max_size=10, max_batch_size=3)
consumer_name = "test_consumer"
await buffer.register_consumer(consumer_name)
# Add multiple events
events = []
for i in range(5):
event = _create_test_event(f"event{i}".encode())
events.append(event)
await buffer.add_event(event)
# Should get max_batch_size events immediately
batch = await buffer.wait_for_batch(consumer_name, timeout_seconds=0.1)
assert len(batch) == 3 # max_batch_size
assert batch == events[:3]
# should now get the leftover events (< max_batch_size)
batch = await buffer.wait_for_batch(consumer_name, timeout_seconds=0.1)
assert len(batch) == 2
assert batch == events[3:]
@pytest.mark.asyncio
async def test_wait_for_batch_unknown_consumer(self):
"""Test error handling for unknown consumer."""
buffer = MultiConsumerEventBuffer(max_size=10, max_batch_size=5)
with pytest.raises(KeyError, match="unknown consumer"):
await buffer.wait_for_batch("nonexistent_consumer", timeout_seconds=0)
@pytest.mark.asyncio
async def test_register_consumer_duplicate(self):
"""Test error handling for duplicate consumer registration."""
buffer = MultiConsumerEventBuffer(max_size=10, max_batch_size=5)
consumer_name = "test_consumer"
await buffer.register_consumer(consumer_name)
with pytest.raises(
ValueError, match="consumer 'test_consumer' already registered"
):
await buffer.register_consumer(consumer_name)
@pytest.mark.asyncio
async def test_multiple_consumers_independent_cursors(self):
"""Test that multiple consumers have independent cursors."""
buffer = MultiConsumerEventBuffer(max_size=10, max_batch_size=2)
consumer_name_1 = "test_consumer_1"
consumer_name_2 = "test_consumer_2"
await buffer.register_consumer(consumer_name_1)
await buffer.register_consumer(consumer_name_2)
# Add events
events = []
for i in range(10):
event = _create_test_event(f"event{i}".encode())
events.append(event)
await buffer.add_event(event)
# Consumer 1 reads first batch
batch1 = await buffer.wait_for_batch(consumer_name_1, timeout_seconds=0.1)
assert batch1 == events[:2]
# Consumer 2 reads from beginning
batch2 = await buffer.wait_for_batch(consumer_name_2, timeout_seconds=0.1)
assert batch2 == events[:2]
# consumer 1 reads another batch
batch3 = await buffer.wait_for_batch(consumer_name_1, timeout_seconds=0.1)
assert batch3 == events[2:4]
# more events are added leading to events not consumed by consumer 2 getting evicted
# 4 events get evicted, consumer 1 has processed all 4 evicted events previously
# but consumer 2 has only processed 2 out of the 4 evicted events
for i in range(4):
event = _create_test_event(f"event{i + 10}".encode())
events.append(event)
await buffer.add_event(event)
# Just ensure buffer remains at max size
assert await buffer.size() == 10
# consumer 1 will read the next 2 events, not affected by the evictions
# consumer 1's cursor is adjusted internally to account for the evicted events
batch4 = await buffer.wait_for_batch(consumer_name_1, timeout_seconds=0.1)
assert batch4 == events[4:6]
# consumer 2 will read 2 events, skipping the evicted events
batch5 = await buffer.wait_for_batch(consumer_name_2, timeout_seconds=0.1)
assert batch5 == events[4:6] # events[2:4] are lost
@pytest.mark.asyncio
async def test_wait_for_batch_blocks_until_event_available(self):
"""Test that wait_for_batch blocks until at least one event is available."""
buffer = MultiConsumerEventBuffer(max_size=10, max_batch_size=5)
consumer_name = "test_consumer"
await buffer.register_consumer(consumer_name)
# Start waiting for batch (should block)
async def wait_for_batch():
return await buffer.wait_for_batch(consumer_name, timeout_seconds=2.0)
wait_task = asyncio.create_task(wait_for_batch())
# Wait a bit to ensure the task is waiting
await asyncio.sleep(4.0)
assert not wait_task.done()
# Add an event
event = _create_test_event(b"event1")
await buffer.add_event(event)
# Now the task should complete
batch = await wait_task
assert len(batch) == 1
assert batch[0] == event
@pytest.mark.asyncio
async def test_concurrent_producer_consumer_random_sleeps_with_overall_timeout(
self,
):
"""Producer with random sleeps and consumer reading until all events are received.
Uses an overall asyncio timeout to ensure the test fails if it hangs
before consuming all events.
"""
total_events = 40
max_batch_size = 2
buffer = MultiConsumerEventBuffer(max_size=100, max_batch_size=max_batch_size)
consumer_name = "test_consumer"
await buffer.register_consumer(consumer_name)
produced_events = []
consumed_events = []
random.seed(0)
async def producer():
for i in range(total_events):
event = _create_test_event(f"e{i}".encode())
produced_events.append(event)
await buffer.add_event(event)
await asyncio.sleep(random.uniform(0.0, 0.02))
async def consumer():
while len(consumed_events) < total_events:
batch = await buffer.wait_for_batch(consumer_name, timeout_seconds=0.1)
consumed_events.extend(batch)
# The test should fail if this times out before all events are consumed
await asyncio.wait_for(asyncio.gather(producer(), consumer()), timeout=5.0)
assert len(consumed_events) == total_events
assert consumed_events == produced_events
@pytest.mark.asyncio
async def test_events_are_evicted_once_consumed_by_all_consumers(self):
"""Test events are evicted from the buffer once they are consumed by all consumers"""
buffer = MultiConsumerEventBuffer(max_size=10, max_batch_size=2)
consumer_name_1 = "test_consumer_1"
consumer_name_2 = "test_consumer_2"
await buffer.register_consumer(consumer_name_1)
await buffer.register_consumer(consumer_name_2)
# Add events
events = []
for i in range(10):
event = _create_test_event(f"event{i}".encode())
events.append(event)
await buffer.add_event(event)
assert await buffer.size() == 10
# Consumer 1 reads first batch
batch1 = await buffer.wait_for_batch(consumer_name_1, timeout_seconds=0.1)
assert batch1 == events[:2]
# buffer size does not change as consumer 2 is yet to consume these events
assert await buffer.size() == 10
# Consumer 2 reads from beginning
batch2 = await buffer.wait_for_batch(consumer_name_2, timeout_seconds=0.1)
assert batch2 == events[:2]
# size reduces by 2 as both consumers have consumed 2 events
assert await buffer.size() == 8
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/aggregator/tests/test_multi_consumer_event_buffer.py",
"license": "Apache License 2.0",
"lines": 217,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/dashboard/modules/aggregator/tests/test_ray_event_publisher.py | import asyncio
import sys
import uuid
import pytest
from google.protobuf.timestamp_pb2 import Timestamp
from ray._common.test_utils import async_wait_for_condition
from ray.core.generated import events_base_event_pb2
from ray.dashboard.modules.aggregator.multi_consumer_event_buffer import (
MultiConsumerEventBuffer,
)
from ray.dashboard.modules.aggregator.publisher.async_publisher_client import (
PublisherClientInterface,
PublishStats,
)
from ray.dashboard.modules.aggregator.publisher.ray_event_publisher import (
NoopPublisher,
RayEventPublisher,
)
class MockPublisherClient(PublisherClientInterface):
"""Test implementation of PublisherClientInterface."""
def __init__(
self,
batch_size: int = 1,
side_effect=lambda batch: PublishStats(True, 1, 0),
):
self.batch_size = batch_size
self.publish_calls = []
self._side_effect = side_effect
async def publish(self, batch) -> PublishStats:
self.publish_calls.append(batch)
return self._side_effect(batch)
def count_num_events_in_batch(self, batch) -> int:
return self.batch_size
async def close(self) -> None:
pass
@pytest.fixture
def base_kwargs():
"""Common kwargs for publisher initialization."""
return {
"name": "test",
"max_retries": 2,
"initial_backoff": 0,
"max_backoff": 0,
"jitter_ratio": 0,
"enable_publisher_stats": True,
}
class TestRayEventPublisher:
"""Test the main RayEventsPublisher functionality."""
@pytest.mark.asyncio
async def test_publish_with_retries_failure_then_success(self, base_kwargs):
"""Test publish that fails then succeeds."""
call_count = {"count": 0}
# fail the first publish call but succeed on retry
def side_effect(batch):
call_count["count"] += 1
if call_count["count"] == 1:
return PublishStats(False, 0, 0)
return PublishStats(True, 1, 0)
client = MockPublisherClient(side_effect=side_effect)
event_buffer = MultiConsumerEventBuffer(max_size=10, max_batch_size=10)
publisher = RayEventPublisher(
name=base_kwargs["name"] + str(uuid.uuid4()),
publish_client=client,
event_buffer=event_buffer,
max_retries=base_kwargs["max_retries"],
initial_backoff=base_kwargs["initial_backoff"],
max_backoff=base_kwargs["max_backoff"],
jitter_ratio=base_kwargs["jitter_ratio"],
)
task = asyncio.create_task(publisher.run_forever())
try:
# ensure consumer is registered
assert await publisher.wait_until_running(2.0)
# Enqueue one event into buffer
e = events_base_event_pb2.RayEvent(
event_id=b"1",
source_type=events_base_event_pb2.RayEvent.SourceType.CORE_WORKER,
event_type=events_base_event_pb2.RayEvent.EventType.TASK_DEFINITION_EVENT,
timestamp=Timestamp(seconds=123, nanos=0),
severity=events_base_event_pb2.RayEvent.Severity.INFO,
message="hello",
)
await event_buffer.add_event(e)
# wait for two publish attempts (failure then success)
await async_wait_for_condition(lambda: len(client.publish_calls) == 2)
finally:
task.cancel()
with pytest.raises(asyncio.CancelledError):
await task
@pytest.mark.asyncio
async def test_publish_with_retries_max_retries_exceeded(self, base_kwargs):
"""Test publish that fails all retries and records failed events."""
client = MockPublisherClient(
side_effect=lambda batch: PublishStats(False, 0, 0)
)
event_buffer = MultiConsumerEventBuffer(max_size=10, max_batch_size=10)
publisher = RayEventPublisher(
name=base_kwargs["name"] + str(uuid.uuid4()),
publish_client=client,
event_buffer=event_buffer,
max_retries=2, # override to finite retries
initial_backoff=0,
max_backoff=0,
jitter_ratio=0,
)
task = asyncio.create_task(publisher.run_forever())
try:
# ensure consumer is registered
assert await publisher.wait_until_running(2.0)
e = events_base_event_pb2.RayEvent(
event_id=b"1",
source_type=events_base_event_pb2.RayEvent.SourceType.CORE_WORKER,
event_type=events_base_event_pb2.RayEvent.EventType.TASK_DEFINITION_EVENT,
timestamp=Timestamp(seconds=123, nanos=0),
severity=events_base_event_pb2.RayEvent.Severity.INFO,
message="hello",
)
await event_buffer.add_event(e)
# wait for publish attempts (initial + 2 retries)
await async_wait_for_condition(lambda: len(client.publish_calls) == 3)
assert len(client.publish_calls) == 3
finally:
task.cancel()
with pytest.raises(asyncio.CancelledError):
await task
class TestNoopPublisher:
"""Test no-op publisher implementation."""
@pytest.mark.asyncio
async def test_all_methods_noop(self):
"""Test that run_forever can be cancelled and metrics return expected values."""
publisher = NoopPublisher()
# Start and cancel run_forever
task = asyncio.create_task(publisher.run_forever())
task.cancel()
with pytest.raises(asyncio.CancelledError):
await task
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/aggregator/tests/test_ray_event_publisher.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_filter.py | import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from pkg_resources import parse_version
import ray
from ray.data.expressions import col
from ray.data.tests.conftest import get_pyarrow_version
from ray.tests.conftest import * # noqa
def test_filter_mutex(ray_start_regular_shared, tmp_path):
"""Test filter op."""
# Generate sample data
data = {
"sepal.length": [4.8, 5.1, 5.7, 6.3, 7.0],
"sepal.width": [3.0, 3.3, 3.5, 3.2, 2.8],
"petal.length": [1.4, 1.7, 4.2, 5.4, 6.1],
"petal.width": [0.2, 0.4, 1.5, 2.1, 2.4],
}
df = pd.DataFrame(data)
# Define the path for the Parquet file in the tmp_path directory
parquet_file = tmp_path / "sample_data.parquet"
# Write DataFrame to a Parquet file
table = pa.Table.from_pandas(df)
pq.write_table(table, parquet_file)
# Load parquet dataset
parquet_ds = ray.data.read_parquet(str(parquet_file))
# Filter using lambda (UDF)
with pytest.raises(
ValueError,
):
parquet_ds.filter(
fn=lambda r: r["sepal.length"] > 5.0, expr="sepal.length > 5.0"
)
with pytest.raises(ValueError, match="must be a UserDefinedFunction"):
parquet_ds.filter(fn="sepal.length > 5.0")
def test_filter_with_expressions(ray_start_regular_shared, tmp_path):
"""Test filtering with expressions."""
# Generate sample data
data = {
"sepal.length": [4.8, 5.1, 5.7, 6.3, 7.0],
"sepal.width": [3.0, 3.3, 3.5, 3.2, 2.8],
"petal.length": [1.4, 1.7, 4.2, 5.4, 6.1],
"petal.width": [0.2, 0.4, 1.5, 2.1, 2.4],
}
df = pd.DataFrame(data)
# Define the path for the Parquet file in the tmp_path directory
parquet_file = tmp_path / "sample_data.parquet"
# Write DataFrame to a Parquet file
table = pa.Table.from_pandas(df)
pq.write_table(table, parquet_file)
# Load parquet dataset
parquet_ds = ray.data.read_parquet(str(parquet_file))
# Filter using lambda (UDF)
filtered_udf_ds = parquet_ds.filter(lambda r: r["sepal.length"] > 5.0)
filtered_udf_data = filtered_udf_ds.to_pandas()
# Filter using expressions
filtered_expr_ds = parquet_ds.filter(expr="sepal.length > 5.0")
filtered_expr_data = filtered_expr_ds.to_pandas()
# Assert the filtered data is the same
assert set(filtered_udf_data["sepal.length"]) == set(
filtered_expr_data["sepal.length"]
)
assert len(filtered_udf_data) == len(filtered_expr_data)
# Verify correctness of filtered results: only rows with 'sepal.length' > 5.0
assert all(
filtered_expr_data["sepal.length"] > 5.0
), "Filtered data contains rows with 'sepal.length' <= 5.0"
assert all(
filtered_udf_data["sepal.length"] > 5.0
), "UDF-filtered data contains rows with 'sepal.length' <= 5.0"
def test_filter_with_invalid_expression(ray_start_regular_shared, tmp_path):
"""Test filtering with invalid expressions."""
# Generate sample data
data = {
"sepal.length": [4.8, 5.1, 5.7, 6.3, 7.0],
"sepal.width": [3.0, 3.3, 3.5, 3.2, 2.8],
"petal.length": [1.4, 1.7, 4.2, 5.4, 6.1],
"petal.width": [0.2, 0.4, 1.5, 2.1, 2.4],
}
df = pd.DataFrame(data)
# Define the path for the Parquet file in the tmp_path directory
parquet_file = tmp_path / "sample_data.parquet"
# Write DataFrame to a Parquet file
table = pa.Table.from_pandas(df)
pq.write_table(table, parquet_file)
# Load parquet dataset
parquet_ds = ray.data.read_parquet(str(parquet_file))
with pytest.raises(ValueError, match="Invalid syntax in the expression"):
parquet_ds.filter(expr="fake_news super fake")
fake_column_ds = parquet_ds.filter(expr="sepal_length_123 > 1")
# With predicate pushdown, the error is raised during file reading
# and wrapped in RayTaskError
with pytest.raises(
(ray.exceptions.RayTaskError, RuntimeError), match="sepal_length_123"
):
fake_column_ds.to_pandas()
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="predicate expressions require PyArrow >= 20.0.0",
)
@pytest.mark.parametrize(
"data_source",
[
pytest.param("from_items", id="arrow_blocks"),
pytest.param("from_pandas", id="pandas_blocks"),
],
)
@pytest.mark.parametrize(
"predicate_expr, test_data, expected_indices, test_description",
[
# Simple comparison filters
pytest.param(
col("age") >= 21,
[
{"age": 20, "name": "Alice"},
{"age": 21, "name": "Bob"},
{"age": 25, "name": "Charlie"},
{"age": 30, "name": "David"},
],
[1, 2, 3], # Indices of rows that should remain
"age_greater_equal_filter",
),
pytest.param(
col("score") > 50,
[
{"score": 30, "status": "fail"},
{"score": 50, "status": "borderline"},
{"score": 70, "status": "pass"},
{"score": 90, "status": "excellent"},
],
[2, 3],
"score_greater_than_filter",
),
pytest.param(
col("category") == "premium",
[
{"category": "basic", "price": 10},
{"category": "premium", "price": 50},
{"category": "standard", "price": 25},
{"category": "premium", "price": 75},
],
[1, 3],
"equality_string_filter",
),
# Complex logical filters
pytest.param(
(col("age") >= 18) & (col("active")),
[
{"age": 17, "active": True},
{"age": 18, "active": False},
{"age": 25, "active": True},
{"age": 30, "active": True},
],
[2, 3],
"logical_and_filter",
),
pytest.param(
(col("status") == "approved") | (col("priority") == "high"),
[
{"status": "pending", "priority": "low"},
{"status": "approved", "priority": "low"},
{"status": "pending", "priority": "high"},
{"status": "rejected", "priority": "high"},
],
[1, 2, 3],
"logical_or_filter",
),
# Null handling filters
pytest.param(
col("value").is_not_null(),
[
{"value": None, "id": 1},
{"value": 0, "id": 2},
{"value": None, "id": 3},
{"value": 42, "id": 4},
],
[1, 3],
"not_null_filter",
),
pytest.param(
col("name").is_null(),
[
{"name": "Alice", "id": 1},
{"name": None, "id": 2},
{"name": "Bob", "id": 3},
{"name": None, "id": 4},
],
[1, 3],
"is_null_filter",
),
# Complex multi-condition filters
pytest.param(
col("value").is_not_null() & (col("value") > 0),
[
{"value": None, "type": "missing"},
{"value": -5, "type": "negative"},
{"value": 0, "type": "zero"},
{"value": 10, "type": "positive"},
],
[3],
"null_aware_positive_filter",
),
# String operations
pytest.param(
col("name").is_not_null() & (col("name") != "excluded"),
[
{"name": "included", "id": 1},
{"name": "excluded", "id": 2},
{"name": None, "id": 3},
{"name": "allowed", "id": 4},
],
[0, 3],
"string_exclusion_filter",
),
# Additional comparison operations
pytest.param(
col("age") > 25,
[
{"age": 20, "name": "Alice"},
{"age": 25, "name": "Bob"},
{"age": 30, "name": "Charlie"},
{"age": 35, "name": "David"},
],
[2, 3],
"greater_than_filter",
),
pytest.param(
col("age") < 25,
[
{"age": 20, "name": "Alice"},
{"age": 25, "name": "Bob"},
{"age": 30, "name": "Charlie"},
],
[0],
"less_than_filter",
),
pytest.param(
col("age") <= 25,
[
{"age": 20, "name": "Alice"},
{"age": 25, "name": "Bob"},
{"age": 30, "name": "Charlie"},
],
[0, 1],
"less_than_equal_filter",
),
# Membership operations
pytest.param(
col("category").is_in(["A", "B"]),
[
{"category": "A", "value": 1},
{"category": "B", "value": 2},
{"category": "C", "value": 3},
{"category": "D", "value": 4},
{"category": "A", "value": 5},
],
[0, 1, 4],
"is_in_filter",
),
pytest.param(
col("category").not_in(["A", "B"]),
[
{"category": "A", "value": 1},
{"category": "B", "value": 2},
{"category": "C", "value": 3},
{"category": "D", "value": 4},
],
[2, 3], # These are indices not the actual values
"not_in_filter",
),
# Negation operations
pytest.param(
~(col("category") == "reject"),
[
{"category": "accept", "id": 1},
{"category": "reject", "id": 2},
{"category": "pending", "id": 3},
{"category": "reject", "id": 4},
],
[0, 2],
"negation_filter",
),
# Nested complex expressions
pytest.param(
(col("score") >= 50) & (col("grade") != "F") & col("active"),
[
{"score": 45, "grade": "F", "active": True},
{"score": 55, "grade": "D", "active": True},
{"score": 75, "grade": "B", "active": False},
{"score": 85, "grade": "A", "active": True},
],
[1, 3],
"complex_nested_filter",
),
],
)
def test_filter_with_predicate_expressions(
ray_start_regular_shared,
data_source,
predicate_expr,
test_data,
expected_indices,
test_description,
):
"""Test filter() with Ray Data predicate expressions on both Arrow and pandas blocks."""
# Create dataset based on data_source parameter
if data_source == "from_items":
ds = ray.data.from_items(test_data)
else: # from_pandas
ds = ray.data.from_pandas([pd.DataFrame(test_data)])
# Apply filter with predicate expression
filtered_ds = ds.filter(expr=predicate_expr)
# Convert to list and verify results
result_data = filtered_ds.to_pandas().to_dict("records")
expected_data = [test_data[i] for i in expected_indices]
# Use pandas testing for consistent comparison
result_df = pd.DataFrame(result_data)
expected_df = pd.DataFrame(expected_data)
pd.testing.assert_frame_equal(
result_df.reset_index(drop=True),
expected_df.reset_index(drop=True),
check_dtype=False,
)
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="predicate expressions require PyArrow >= 20.0.0",
)
def test_filter_predicate_expr_vs_function_consistency(ray_start_regular_shared):
"""Test that predicate expressions produce the same results as equivalent functions."""
test_data = [
{"age": 20, "score": 85, "active": True},
{"age": 25, "score": 45, "active": False},
{"age": 30, "score": 95, "active": True},
{"age": 18, "score": 60, "active": True},
]
ds = ray.data.from_items(test_data)
# Test simple comparison
predicate_result = ds.filter(expr=col("age") >= 21).to_pandas()
function_result = ds.filter(fn=lambda row: row["age"] >= 21).to_pandas()
pd.testing.assert_frame_equal(predicate_result, function_result, check_dtype=False)
# Test complex logical expression
complex_predicate = (col("age") >= 21) & (col("score") > 80) & col("active")
predicate_result = ds.filter(expr=complex_predicate).to_pandas()
function_result = ds.filter(
fn=lambda row: row["age"] >= 21 and row["score"] > 80 and row["active"]
).to_pandas()
pd.testing.assert_frame_equal(predicate_result, function_result, check_dtype=False)
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="predicate expressions require PyArrow >= 20.0.0",
)
def test_filter_predicate_with_different_block_formats(ray_start_regular_shared):
"""Test that predicate expressions work with different block formats (pandas/arrow)."""
test_data = [
{"category": "A", "value": 10},
{"category": "B", "value": 20},
{"category": "A", "value": 30},
{"category": "C", "value": 40},
]
# Test with different data sources that produce different block formats
# From items (typically arrow)
ds_items = ray.data.from_items(test_data)
result_items = ds_items.filter(expr=col("category") == "A").to_pandas()
# From pandas (pandas blocks)
df = pd.DataFrame(test_data)
ds_pandas = ray.data.from_pandas([df])
result_pandas = ds_pandas.filter(expr=col("category") == "A").to_pandas()
# Results should be identical (reset indices for comparison)
expected_df = pd.DataFrame(
[
{"category": "A", "value": 10},
{"category": "A", "value": 30},
]
)
pd.testing.assert_frame_equal(
result_items.reset_index(drop=True),
expected_df.reset_index(drop=True),
check_dtype=False,
)
pd.testing.assert_frame_equal(
result_pandas.reset_index(drop=True),
expected_df.reset_index(drop=True),
check_dtype=False,
)
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="predicate expressions require PyArrow >= 20.0.0",
)
def test_filter_expression_display_names(ray_start_regular_shared):
"""Test that filter operations display meaningful expression names in plans."""
import pyarrow.compute as pc
from ray.data.datatype import DataType
from ray.data.expressions import udf
@udf(return_dtype=DataType.from_arrow(pa.bool_()))
def _str_len(array):
return pc.greater(pc.binary_length(array), 0)
plan_str = str(ray.data.from_items(["a", ""]).filter(expr=_str_len(col("item"))))
assert plan_str == (
"Filter(_str_len(col('item')))\n"
"+- Dataset(num_rows=2, schema={item: string})"
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_filter.py",
"license": "Apache License 2.0",
"lines": 404,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/test_logging_java.py | import os
import subprocess
import sys
import tempfile
import pytest
import ray
from ray._common.test_utils import wait_for_condition
from ray.cross_language import java_actor_class
# Source code of MyClass.java
_MY_CLASS_JAVA = """
public class MyClass {
public int printToLog(String line) {
System.err.println(line);
return 0;
}
}
"""
@pytest.mark.skipif(
sys.platform == "win32" or sys.platform == "darwin",
reason="Does not work on Windows and OSX.",
)
def test_log_java_worker_logs(shutdown_only, capsys):
with tempfile.TemporaryDirectory() as tmp_dir:
print("using tmp_dir", tmp_dir)
with open(os.path.join(tmp_dir, "MyClass.java"), "w") as f:
f.write(_MY_CLASS_JAVA)
subprocess.check_call(["javac", "MyClass.java"], cwd=tmp_dir)
subprocess.check_call(["jar", "-cf", "myJar.jar", "MyClass.class"], cwd=tmp_dir)
ray.init(
job_config=ray.job_config.JobConfig(code_search_path=[tmp_dir]),
)
handle = java_actor_class("MyClass").remote()
ray.get(handle.printToLog.remote("here's my random line!"))
def check():
out, err = capsys.readouterr()
out += err
with capsys.disabled():
print(out)
return "here's my random line!" in out
wait_for_condition(check)
ray.shutdown()
if __name__ == "__main__":
# Make subprocess happy in bazel.
os.environ["LC_ALL"] = "en_US.UTF-8"
os.environ["LANG"] = "en_US.UTF-8"
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_logging_java.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/train/v2/_internal/execution/checkpoint/validation_manager.py | import logging
import time
from collections import OrderedDict, deque
from typing import TYPE_CHECKING, Any, Dict, List, Union
import ray
from ray.train._checkpoint import Checkpoint
from ray.train.v2._internal.execution.callback import (
ControllerCallback,
ReportCallback,
WorkerGroupCallback,
)
from ray.train.v2._internal.execution.checkpoint.checkpoint_manager import (
CheckpointManager,
)
from ray.train.v2._internal.execution.training_report import (
_TrainingReport,
)
from ray.train.v2.api.validation_config import ValidationConfig, ValidationTaskConfig
if TYPE_CHECKING:
from ray.train.v2._internal.execution.controller import TrainControllerState
from ray.train.v2._internal.execution.worker_group.worker import Worker
logger = logging.getLogger(__name__)
VALIDATION_TASK_POLL_INTERVAL_S = 1
MAX_IN_FLIGHT_VALIDATIONS = 1
@ray.remote
def run_validation_fn(
validation_config: ValidationConfig,
validation_task_config: Union[bool, ValidationTaskConfig],
checkpoint: Checkpoint,
) -> Dict:
"""Run the user-defined validation function.
Merges fn_kwargs from validation_config.task_config (defaults) with
fn_kwargs from validation_task_config (per-report overrides).
"""
# Merge kwargs: defaults from validation_config, overrides from validation_task_config
if validation_task_config is True:
merged_kwargs = validation_config.task_config.fn_kwargs
else:
merged_kwargs = {
**validation_config.task_config.fn_kwargs,
**validation_task_config.fn_kwargs,
}
metrics_dict = validation_config.fn(
checkpoint,
**merged_kwargs,
)
if not isinstance(metrics_dict, dict):
raise ValueError(
"The validation function must return a dictionary of metrics. "
f"Got {type(metrics_dict)} instead."
)
return metrics_dict
class ValidationManager(ControllerCallback, ReportCallback, WorkerGroupCallback):
def __init__(
self,
checkpoint_manager: CheckpointManager,
validation_config: ValidationConfig,
):
self._checkpoint_manager = checkpoint_manager
self._validation_config = validation_config
# _TrainingReports that we will validate
self._training_report_queue = deque()
# Map from in flight validation task to checkpoint
self._pending_validations = OrderedDict()
# Map from validation task to checkpoint
# Finished validations that have yet to be processed
self._finished_validations = OrderedDict()
self._requeue_incomplete_validations()
def _requeue_incomplete_validations(self):
"""Add _TrainingReports for incomplete validations to the queue."""
for checkpoint, (
training_result,
validation,
) in self._checkpoint_manager.get_pending_training_results().items():
if validation:
self._training_report_queue.append(
_TrainingReport(
metrics=training_result.metrics,
checkpoint=checkpoint,
validation=validation,
)
)
def after_report(
self,
training_report: _TrainingReport,
metrics: List[Dict[str, Any]],
):
if training_report.validation:
self._training_report_queue.append(training_report)
def _poll_validations(self) -> int:
"""Poll/process validations, update checkpoint manager, return num pending validations."""
# Move pending validations to finished validations
validation_tasks = list(self._pending_validations.keys())
done, _ = ray.wait(
validation_tasks, timeout=0, num_returns=len(validation_tasks)
)
done_checkpoints = []
for task in done:
done_checkpoints.append(self._pending_validations[task])
self._finished_validations[task] = self._pending_validations[task]
self._pending_validations.pop(task)
if done_checkpoints:
logger.info(
f"Finished async validation task(s) for checkpoint(s): {done_checkpoints}.\n"
f"Running validations for checkpoint(s): {list(self._pending_validations.values())}.\n"
f"Staged validations for checkpoint(s): {[tr.checkpoint for tr in self._training_report_queue]}."
)
# Process next finished validation
# TODO: consider configuration to process multiple at a time
if self._finished_validations:
task, checkpoint = next(iter(self._finished_validations.items()))
self._finished_validations.pop(task)
checkpoint_to_metrics = self._process_finished_validation(task, checkpoint)
self._checkpoint_manager.update_checkpoints_with_metrics(
checkpoint_to_metrics
)
return len(self._pending_validations)
def _kick_off_validations(self) -> int:
"""Kick off validations and return the number of pending validations."""
# TODO: figure out where to place run_validation_fn task:
# TODO: provide option to run this on gpu?
num_validations_to_start = max(
MAX_IN_FLIGHT_VALIDATIONS - len(self._pending_validations), 0
)
num_validations_to_start = min(
num_validations_to_start, len(self._training_report_queue)
)
for _ in range(num_validations_to_start):
training_report = self._training_report_queue.popleft()
# TODO: handle timeouts - ray.remote() does not have them
run_validation_fn_with_options = run_validation_fn.options(
**self._validation_config.ray_remote_kwargs,
)
validate_task = run_validation_fn_with_options.remote(
self._validation_config,
training_report.validation,
training_report.checkpoint,
)
self._pending_validations[validate_task] = training_report.checkpoint
logger.info(
f"Launched async validation task for checkpoint {training_report.checkpoint}"
)
return len(self._pending_validations)
def _process_finished_validation(
self, task: ray.ObjectRef, checkpoint: Checkpoint
) -> Dict[Checkpoint, Dict[str, Any]]:
"""Process finished validation, update checkpoint manager, return metrics."""
checkpoint_to_metrics = {}
try:
checkpoint_to_metrics[checkpoint] = ray.get(task)
except (ray.exceptions.RayTaskError, ray.exceptions.TaskCancelledError):
checkpoint_to_metrics[checkpoint] = {}
logger.exception(f"Validation failed for checkpoint {checkpoint}")
# TODO: track failed validations - see ed45912bb6ed435de06ac1cd58e9918e6825b4fe
return checkpoint_to_metrics
def before_controller_shutdown(self):
while self._poll_validations() != 0 or self._kick_off_validations() != 0:
time.sleep(VALIDATION_TASK_POLL_INTERVAL_S)
checkpoint_to_metrics = {}
tasks = list(self._finished_validations.keys())
for task in tasks:
checkpoint = self._finished_validations[task]
self._finished_validations.pop(task)
checkpoint_to_metrics.update(
self._process_finished_validation(task, checkpoint)
)
self._checkpoint_manager.update_checkpoints_with_metrics(checkpoint_to_metrics)
def after_controller_state_update(
self,
previous_state: "TrainControllerState",
current_state: "TrainControllerState",
):
# TODO: figure out if there's a better place to poll validations
# TODO: consider cleaning up validation tasks in before_controller_abort
self._poll_validations()
self._kick_off_validations()
def before_init_train_context(
self, workers: List["Worker"]
) -> Dict[str, List[bool]]:
return {
"has_validation_fn": [True] * len(workers),
}
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/_internal/execution/checkpoint/validation_manager.py",
"license": "Apache License 2.0",
"lines": 182,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/train/v2/_internal/execution/training_report.py | from typing import TYPE_CHECKING, Any, Dict, Optional, Union
if TYPE_CHECKING:
from ray.train import Checkpoint
from ray.train.v2.api.validation_config import ValidationTaskConfig
class _TrainingReport:
"""Checkpoint and metrics reported by user, as well as optional validation configuration."""
def __init__(
self,
checkpoint: Optional["Checkpoint"],
metrics: Dict[str, Any],
validation: Union[bool, "ValidationTaskConfig"],
):
self.checkpoint = checkpoint
self.metrics = metrics
self.validation = validation
def __repr__(self) -> str:
return f"TrainingReport(checkpoint={self.checkpoint}, metrics={self.metrics}, validation={self.validation})"
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/_internal/execution/training_report.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/train/v2/tests/test_validation_manager.py | import time
import unittest.mock
from unittest.mock import create_autospec
import pytest
import ray
from ray.train._checkpoint import Checkpoint
from ray.train._internal.session import _TrainingResult
from ray.train.v2._internal.execution.checkpoint import validation_manager
from ray.train.v2._internal.execution.checkpoint.checkpoint_manager import (
CheckpointManager,
)
from ray.train.v2._internal.execution.storage import StorageContext
from ray.train.v2._internal.execution.training_report import (
_TrainingReport,
)
from ray.train.v2._internal.execution.worker_group.worker import Worker
from ray.train.v2.api.validation_config import ValidationConfig, ValidationTaskConfig
from ray.train.v2.tests.util import create_dummy_training_reports
@pytest.fixture(autouse=True, scope="module")
def ray_start_4_cpus():
ray.init(num_cpus=4)
yield
ray.shutdown()
@unittest.mock.patch.object(ray, "wait", autospec=True)
def test_before_controller_shutdown(mock_wait, monkeypatch):
monkeypatch.setattr(validation_manager, "VALIDATION_TASK_POLL_INTERVAL_S", 0)
# Create ValidationManager with mocked objects
checkpoint_manager = create_autospec(CheckpointManager, instance=True)
checkpoint1 = create_autospec(Checkpoint, instance=True)
checkpoint2 = create_autospec(Checkpoint, instance=True)
checkpoint3 = create_autospec(Checkpoint, instance=True)
task1 = create_autospec(ray.ObjectRef, instance=True)
task2 = create_autospec(ray.ObjectRef, instance=True)
task3 = create_autospec(ray.ObjectRef, instance=True)
vm = validation_manager.ValidationManager(
checkpoint_manager=checkpoint_manager,
validation_config=ValidationConfig(fn=lambda x: None),
)
vm._pending_validations = {
task1: checkpoint1,
task2: checkpoint2,
task3: checkpoint3,
}
mock_wait.side_effect = [([], [task1, task2, task3]), ([task1, task2, task3], [])]
monkeypatch.setattr(ray, "get", lambda x: {"score": 1})
# Call before_controller_shutdown
vm.before_controller_shutdown()
assert mock_wait.call_count == 2
assert checkpoint_manager.update_checkpoints_with_metrics.mock_calls == [
unittest.mock.call({checkpoint1: {"score": 1}}),
unittest.mock.call({checkpoint2: {"score": 1}, checkpoint3: {"score": 1}}),
]
def test_before_init_train_context():
checkpoint_manager = create_autospec(CheckpointManager, instance=True)
vm = validation_manager.ValidationManager(
checkpoint_manager=checkpoint_manager,
validation_config=ValidationConfig(fn=lambda x: None),
)
workers = [create_autospec(Worker, instance=True) for _ in range(4)]
assert vm.before_init_train_context(workers) == {
"has_validation_fn": [True] * 4,
}
def test_checkpoint_validation_management_reordering(tmp_path):
checkpoint_manager = create_autospec(CheckpointManager, instance=True)
def validation_fn(checkpoint, score):
return {"score": score}
vm = validation_manager.ValidationManager(
checkpoint_manager=checkpoint_manager,
validation_config=ValidationConfig(
fn=validation_fn,
task_config=ValidationTaskConfig(fn_kwargs={"score": 100}),
),
)
(
low_initial_high_final_training_result,
high_initial_low_final_training_result,
) = create_dummy_training_reports(
num_results=2,
storage_context=StorageContext(
storage_path=tmp_path,
experiment_dir_name="checkpoint_validation_management_reordering_experiment",
),
)
# Enqueue validation tasks
vm.after_report(
training_report=_TrainingReport(
metrics=low_initial_high_final_training_result.metrics,
checkpoint=low_initial_high_final_training_result.checkpoint,
validation=ValidationTaskConfig(fn_kwargs={"score": 200}),
),
metrics={},
)
vm.after_report(
training_report=_TrainingReport(
metrics=high_initial_low_final_training_result.metrics,
checkpoint=high_initial_low_final_training_result.checkpoint,
validation=True,
),
metrics={},
)
# Assert ValidationManager state after each poll
assert vm._poll_validations() == 0
assert vm._kick_off_validations() == 1
ray.wait(
list(vm._pending_validations.keys()),
num_returns=1,
)
assert vm._poll_validations() == 0
assert vm._kick_off_validations() == 1
checkpoint_manager.update_checkpoints_with_metrics.assert_called_once_with(
{low_initial_high_final_training_result.checkpoint: {"score": 200}}
)
ray.wait(
list(vm._pending_validations.keys()),
num_returns=1,
)
assert vm._poll_validations() == 0
assert vm._kick_off_validations() == 0
checkpoint_manager.update_checkpoints_with_metrics.assert_called_with(
{high_initial_low_final_training_result.checkpoint: {"score": 100}}
)
def test_checkpoint_validation_management_failure(tmp_path):
checkpoint_manager = create_autospec(CheckpointManager, instance=True)
def failing_validation_fn(checkpoint):
return "invalid_return_type"
vm = validation_manager.ValidationManager(
checkpoint_manager=checkpoint_manager,
validation_config=ValidationConfig(fn=failing_validation_fn),
)
failing_training_result = create_dummy_training_reports(
num_results=1,
storage_context=StorageContext(
storage_path=tmp_path,
experiment_dir_name="checkpoint_validation_management_failure_experiment",
),
)[0]
vm.after_report(
training_report=_TrainingReport(
metrics=failing_training_result.metrics,
checkpoint=failing_training_result.checkpoint,
validation=ValidationTaskConfig(),
),
metrics={},
)
assert vm._poll_validations() == 0
assert vm._kick_off_validations() == 1
ray.wait(
list(vm._pending_validations.keys()),
num_returns=1,
)
assert vm._poll_validations() == 0
assert vm._kick_off_validations() == 0
checkpoint_manager.update_checkpoints_with_metrics.assert_called_once_with(
{failing_training_result.checkpoint: {}}
)
def test_checkpoint_validation_management_success_after_retry(tmp_path):
@ray.remote
class Counter:
def __init__(self):
self.value = 0
def increment(self):
self.value += 1
return self.value
counter = Counter.remote()
def one_time_failing_validation_fn(checkpoint):
print("one_time_failing_validation_fn called")
if ray.get(counter.increment.remote()) < 2:
raise ValueError("Fail on first attempt")
return {"score": 100}
checkpoint_manager = create_autospec(CheckpointManager, instance=True)
vm = validation_manager.ValidationManager(
checkpoint_manager=checkpoint_manager,
validation_config=ValidationConfig(
fn=one_time_failing_validation_fn,
ray_remote_kwargs={"max_retries": 1, "retry_exceptions": [ValueError]},
),
)
training_result = create_dummy_training_reports(
num_results=1,
storage_context=StorageContext(
storage_path=tmp_path,
experiment_dir_name="checkpoint_validation_management_success_after_retry_experiment",
),
)[0]
vm.after_report(
training_report=_TrainingReport(
metrics=training_result.metrics,
checkpoint=training_result.checkpoint,
validation=True,
),
metrics={},
)
assert vm._poll_validations() == 0
assert vm._kick_off_validations() == 1
ray.wait(
list(vm._pending_validations.keys()),
num_returns=1,
timeout=100,
)
assert vm._poll_validations() == 0
assert vm._kick_off_validations() == 0
checkpoint_manager.update_checkpoints_with_metrics.assert_called_once_with(
{training_result.checkpoint: {"score": 100}}
)
def test_checkpoint_validation_management_slow_validation_fn(tmp_path):
checkpoint_manager = create_autospec(CheckpointManager, instance=True)
def infinite_waiting_validation_fn(checkpoint):
while True:
time.sleep(1)
vm = validation_manager.ValidationManager(
checkpoint_manager=checkpoint_manager,
validation_config=ValidationConfig(fn=infinite_waiting_validation_fn),
)
timing_out_training_result = create_dummy_training_reports(
num_results=1,
storage_context=StorageContext(
storage_path=tmp_path,
experiment_dir_name="checkpoint_validation_management_slow_validation_fn_experiment",
),
)[0]
vm.after_report(
training_report=_TrainingReport(
metrics=timing_out_training_result.metrics,
checkpoint=timing_out_training_result.checkpoint,
validation=True,
),
metrics={},
)
assert vm._poll_validations() == 0
assert vm._kick_off_validations() == 1
# Finish the task by cancelling it
timing_out_task = next(iter(vm._pending_validations))
ray.cancel(timing_out_task)
with pytest.raises(ray.exceptions.TaskCancelledError):
ray.get(timing_out_task)
# Verify that poll processes finished task
assert vm._poll_validations() == 0
assert vm._kick_off_validations() == 0
checkpoint_manager.update_checkpoints_with_metrics.assert_called_once_with(
{
timing_out_training_result.checkpoint: {},
}
)
def test_checkpoint_validation_management_resume(tmp_path):
training_reports = create_dummy_training_reports(
num_results=3,
storage_context=StorageContext(
storage_path=tmp_path,
experiment_dir_name="checkpoint_validation_management_resume_experiment",
),
)
checkpoint_manager = create_autospec(CheckpointManager, instance=True)
checkpoint_manager.get_pending_training_results.return_value = {
training_reports[0].checkpoint: (
_TrainingResult(
checkpoint=training_reports[0].checkpoint,
metrics=training_reports[0].metrics,
),
True,
),
training_reports[1].checkpoint: (
_TrainingResult(
checkpoint=training_reports[1].checkpoint,
metrics=training_reports[1].metrics,
),
False,
),
training_reports[2].checkpoint: (
_TrainingResult(
checkpoint=training_reports[2].checkpoint,
metrics=training_reports[2].metrics,
),
ValidationTaskConfig(fn_kwargs={"score": 2}),
),
}
def validation_fn(checkpoint, score):
return {"score": score}
vm = validation_manager.ValidationManager(
checkpoint_manager=checkpoint_manager,
validation_config=ValidationConfig(
fn=validation_fn,
task_config=ValidationTaskConfig(fn_kwargs={"score": 1}),
),
)
assert vm._poll_validations() == 0
assert vm._kick_off_validations() == 1
ray.wait(
list(vm._pending_validations.keys()),
num_returns=1,
)
assert vm._poll_validations() == 0
assert vm._kick_off_validations() == 1
checkpoint_manager.update_checkpoints_with_metrics.assert_called_once_with(
{training_reports[0].checkpoint: {"score": 1}}
)
ray.wait(
list(vm._pending_validations.keys()),
num_returns=1,
)
assert vm._poll_validations() == 0
assert vm._kick_off_validations() == 0
checkpoint_manager.update_checkpoints_with_metrics.assert_called_with(
{training_reports[2].checkpoint: {"score": 2}}
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/test_validation_manager.py",
"license": "Apache License 2.0",
"lines": 307,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/train_tests/multinode_persistence/test_v2_persistence.py | import logging
import os
import pickle
import re
import tempfile
import time
import uuid
from contextlib import contextmanager
from pathlib import Path
from typing import List, Optional, Tuple
import pyarrow.fs
import pytest
import ray
import ray.train
import ray.train.collective
from ray._common.test_utils import simulate_s3_bucket
from ray.air._internal.uri_utils import URI
from ray.train import (
Checkpoint,
CheckpointConfig,
FailureConfig,
RunConfig,
ScalingConfig,
)
from ray.train.v2._internal.constants import HEALTH_CHECK_INTERVAL_S_ENV_VAR
from ray.train.v2._internal.execution.storage import _download_from_fs_path
from ray.train.v2.api.data_parallel_trainer import DataParallelTrainer
class TestConstants:
NUM_ITERATIONS = 6 # == num_checkpoints == num_artifacts
NUM_TRIALS = 2
NUM_WORKERS = 3
SCORE_KEY = "score"
@contextmanager
def mock_s3_bucket_uri():
port = 5002
region = "us-west-2"
with simulate_s3_bucket(port=port, region=region) as s3_uri:
import boto3
s3 = boto3.client(
"s3", region_name=region, endpoint_url=f"http://localhost:{port}"
)
# Bucket name will be autogenerated/unique per test
bucket_name = URI(s3_uri).name
s3.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={"LocationConstraint": region},
)
# Disable server HTTP request logging
logging.getLogger("werkzeug").setLevel(logging.WARNING)
yield URI(s3_uri)
logging.getLogger("werkzeug").setLevel(logging.INFO)
@contextmanager
def dummy_context_manager(*args, **kwargs):
yield "dummy value"
@pytest.fixture(autouse=True, scope="module")
def ray_start_4_cpus():
ray.init(num_cpus=4)
yield
ray.shutdown()
def _create_mock_custom_fs(custom_fs_root_dir: Path) -> pyarrow.fs.FileSystem:
from fsspec.implementations.dirfs import DirFileSystem
from fsspec.implementations.local import LocalFileSystem
custom_fs_root_dir.mkdir(parents=True, exist_ok=True)
storage_filesystem = pyarrow.fs.PyFileSystem(
pyarrow.fs.FSSpecHandler(
DirFileSystem(path=str(custom_fs_root_dir), fs=LocalFileSystem())
)
)
return storage_filesystem
@contextmanager
def _resolve_storage_type(
storage_path_type: str, tmp_path: Path
) -> Tuple[str, Optional[pyarrow.fs.FileSystem]]:
storage_path, storage_filesystem = None, None
context_manager = (
mock_s3_bucket_uri if storage_path_type == "cloud" else dummy_context_manager
)
with context_manager() as cloud_storage_path:
if storage_path_type == "nfs":
storage_path = str(tmp_path / "fake_nfs")
elif storage_path_type == "cloud":
storage_path = str(cloud_storage_path)
elif storage_path_type == "custom_fs":
storage_path = "mock_bucket"
storage_filesystem = _create_mock_custom_fs(tmp_path / "custom_fs")
yield storage_path, storage_filesystem
def _get_local_inspect_dir(
root_local_path: Path,
storage_path: str,
storage_filesystem: Optional[pyarrow.fs.FileSystem],
storage_local_path: Path = None,
) -> Tuple[Path, str]:
"""Downloads the storage path -> local dir for inspecting contents.
Returns:
Tuple: (local_inspect_dir, storage_fs_path), where storage_fs_path
is the path to the storage path on the filesystem (e.g., prefix stripped).
This is used to check the correctness of paths returned from `Result`'s,
since URIs are hard to do comparisons with.
"""
local_inspect_dir = root_local_path / "inspect"
if storage_path:
if storage_filesystem:
fs, storage_fs_path = storage_filesystem, storage_path
else:
fs, storage_fs_path = pyarrow.fs.FileSystem.from_uri(storage_path)
_download_from_fs_path(
fs=fs, fs_path=storage_fs_path, local_path=str(local_inspect_dir)
)
else:
fs, storage_fs_path = pyarrow.fs.LocalFileSystem(), str(storage_local_path)
local_inspect_dir = storage_local_path
return local_inspect_dir, storage_fs_path
def _get_checkpoint_epoch(checkpoint_dir_name: str) -> int:
"""Gets the checkpoint index from the checkpoint directory name."""
pattern = r"checkpoint_epoch=(\d+)"
match = re.search(pattern, checkpoint_dir_name)
assert match
return int(match.group(1))
def _create_checkpoint_shard_filename(rank_str: str) -> str:
return f"checkpoint_shard-rank={rank_str}.pkl"
def _get_checkpoint_shard_rank(checkpoint_shard_filename: str) -> int:
"""Get the checkpoint shard rank from the filename."""
pattern = _create_checkpoint_shard_filename(r"(\d+)")
match = re.search(pattern, checkpoint_shard_filename)
assert match
return int(match.group(1))
def train_fn(config):
# Check that the working dir for each worker is the shared trial dir.
# assert Path.cwd() == Path(train_session.storage.trial_working_directory).resolve()
start = 0
checkpoint = ray.train.get_checkpoint()
if checkpoint:
custom_restore_fn = config.get("custom_restore_fn")
if custom_restore_fn:
state = custom_restore_fn(checkpoint)
else:
with checkpoint.as_directory() as checkpoint_dir:
with open(os.path.join(checkpoint_dir, "checkpoint.pkl"), "rb") as f:
state = pickle.load(f)
print("Loaded back state from checkpoint:", state)
start = state["iter"] + 1
got = len(ray.train.get_all_reported_checkpoints())
expected = min(start, config.get("num_to_keep", float("inf")))
assert got == expected, f"Expected {expected} checkpoints, got {got}"
for i in range(start, config.get("num_iterations", 5)):
time.sleep(config.get("time_per_iter", 0.25))
metrics = {"iter": i, TestConstants.SCORE_KEY: i}
rank = ray.train.get_context().get_world_rank()
if rank in config.get("no_checkpoint_ranks", []):
ray.train.report(
metrics, checkpoint=None, checkpoint_dir_name=f"checkpoint_epoch={i}"
)
else:
with tempfile.TemporaryDirectory() as temp_dir:
with open(os.path.join(temp_dir, "checkpoint.pkl"), "wb") as f:
pickle.dump({"iter": i}, f)
checkpoint_file_name = _create_checkpoint_shard_filename(str(rank))
with open(os.path.join(temp_dir, checkpoint_file_name), "wb") as f:
pickle.dump({"iter": i}, f)
with config.get("custom_save_fn", dummy_context_manager)(temp_dir):
ray.train.report(
metrics,
checkpoint=Checkpoint.from_directory(temp_dir),
checkpoint_dir_name=f"checkpoint_epoch={i}",
)
# `train.report` should not have deleted this!
assert os.path.exists(temp_dir)
# TODO: This barrier before raising is a workaround to deflake the test.
# In this test setup, rank 0 is the fast-reporting worker
# that does not upload a checkpoint.
# If rank 0 raises an error immediately after getting past `report`,
# the next iteration of the control loop will handle the failure
# and the checkpoints from all other ranks will not be processed.
# This results in an earlier checkpoint getting used during restoration,
# which will cause the test assertions to fail.
# This should be fixed by forcing a queue flush on all workers before
# executing the failure decisions.
ray.train.collective.barrier()
if i in config.get("fail_iters", []):
got = len(ray.train.get_all_reported_checkpoints())
expected = min(i + 1, config.get("num_to_keep", float("inf")))
assert got == expected, f"Expected {expected} checkpoints, got {got}"
raise RuntimeError(f"Failing on iter={i}!!")
def _assert_storage_contents(
local_inspect_dir: Path,
exp_name: str,
checkpoint_config: CheckpointConfig,
no_checkpoint_ranks: List[int] = None,
constants: type = TestConstants,
):
no_checkpoint_ranks = no_checkpoint_ranks or []
# Second, inspect the contents of the storage path
storage_path_ls = list(local_inspect_dir.glob("*"))
assert len(storage_path_ls) == 1 # Only expect 1 experiment dir
exp_dir = storage_path_ls[0]
assert exp_dir.name == exp_name
# Check checkpoint contents
# If set, expect num_to_keep. Otherwise, expect to see all of them.
expected_num_checkpoints = checkpoint_config.num_to_keep or constants.NUM_ITERATIONS
assert len(list(exp_dir.glob("checkpoint_epoch=*"))) == expected_num_checkpoints
checkpoint_epochs = sorted(
[
_get_checkpoint_epoch(checkpoint_dir.name)
for checkpoint_dir in exp_dir.glob("checkpoint_epoch=*")
]
)
# Ex: If num_to_keep=2 out of 6 total checkpoints,
# expect checkpoint_epoch=4 and checkpoint_epoch=5.
assert checkpoint_epochs == list(
range(
constants.NUM_ITERATIONS - expected_num_checkpoints,
constants.NUM_ITERATIONS,
)
)
for checkpoint_dir in exp_dir.glob("checkpoint_epoch=*"):
# 1 shared checkpoint.pkl file, written by the trainable / all workers.
assert len(list(checkpoint_dir.glob("checkpoint.pkl"))) == 1
if test_trainer:
# 1 checkpoint shard per worker.
# Unless the worker did not report a checkpoint (no_checkpoint_ranks).
assert {
_get_checkpoint_shard_rank(checkpoint_shard.name)
for checkpoint_shard in checkpoint_dir.glob("checkpoint_shard-*.pkl")
} == {
i for i in range(constants.NUM_WORKERS) if i not in no_checkpoint_ranks
}
@pytest.mark.parametrize("storage_path_type", ["nfs", "cloud", "custom_fs"])
@pytest.mark.parametrize(
"checkpoint_config",
[
CheckpointConfig(),
CheckpointConfig(
num_to_keep=1,
checkpoint_score_attribute=TestConstants.SCORE_KEY,
checkpoint_score_order="max",
),
],
)
def test_trainer(
monkeypatch, tmp_path, storage_path_type, checkpoint_config: CheckpointConfig
):
"""End-to-end test that runs Train with many `storage_path_type` options:
- storage_path="nfs" --> save locally to a fake NFS path
- storage_path="cloud" --> save to a mock S3 bucket
- storage_path="custom_fs" --> save to a custom pyarrow filesystem
- The custom fs is a local filesystem that appends a path prefix to every path.
This is the expected output at the storage path:
{RunConfig.storage_path}/{RunConfig.name}
βββ checkpoint_epoch={epoch} <- Checkpoint directories with custom name
βββ checkpoint.pkl <- Shared checkpoint file
βββ checkpoint_shard-rank=0.pkl <- Worker checkpoint shards
βββ checkpoint_shard-rank=1.pkl
βββ ...
"""
health_check_interval_s = 0.1
monkeypatch.setenv(HEALTH_CHECK_INTERVAL_S_ENV_VAR, str(health_check_interval_s))
# Make report time slightly longer than health check interval.
# This is arbitrary but is meant to mimic a somewhat realistic scenario.
time_between_reports = health_check_interval_s * 2
exp_name = f"trainer_persistence_test-{uuid.uuid4().hex}"
no_checkpoint_ranks = [0]
if checkpoint_config.num_to_keep:
num_to_keep = checkpoint_config.num_to_keep
else:
num_to_keep = float("inf")
with _resolve_storage_type(storage_path_type, tmp_path) as (
storage_path,
storage_filesystem,
):
run_config = RunConfig(
storage_path=storage_path,
storage_filesystem=storage_filesystem,
name=exp_name,
checkpoint_config=checkpoint_config,
failure_config=FailureConfig(max_failures=2),
)
trainer = DataParallelTrainer(
train_fn,
train_loop_config={
"num_iterations": TestConstants.NUM_ITERATIONS,
"fail_iters": [2, 4],
# Test that global rank 0 is not required to checkpoint.
"no_checkpoint_ranks": no_checkpoint_ranks,
"time_per_iter": time_between_reports,
"num_to_keep": num_to_keep,
},
scaling_config=ScalingConfig(num_workers=TestConstants.NUM_WORKERS),
run_config=run_config,
)
print("\nStarting initial run.\n")
result = trainer.fit()
print("\nStarting manually restored run.\n")
restored_trainer = DataParallelTrainer(
train_fn,
train_loop_config={
"num_iterations": TestConstants.NUM_ITERATIONS,
"fail_iters": [2, 4],
# Test that global rank 0 is not required to checkpoint.
"no_checkpoint_ranks": no_checkpoint_ranks,
"time_per_iter": time_between_reports,
"num_to_keep": num_to_keep,
},
scaling_config=ScalingConfig(num_workers=TestConstants.NUM_WORKERS),
run_config=run_config,
)
result = restored_trainer.fit()
local_inspect_dir, storage_fs_path = _get_local_inspect_dir(
root_local_path=tmp_path,
storage_path=run_config.storage_path,
storage_filesystem=storage_filesystem,
)
# First, inspect that the result object returns the correct paths.
print(result)
run_path = result.path
assert run_path.startswith(storage_fs_path)
for checkpoint, _ in result.best_checkpoints:
assert checkpoint.path.startswith(run_path)
_assert_storage_contents(
local_inspect_dir,
exp_name,
checkpoint_config,
no_checkpoint_ranks=no_checkpoint_ranks,
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "release/train_tests/multinode_persistence/test_v2_persistence.py",
"license": "Apache License 2.0",
"lines": 326,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tune/examples/custom_checkpointing_with_callback.py | # Example demonstrating how to use SHOULD_CHECKPOINT in a tuner callback
# for smart checkpointing logic. This shows how to trigger checkpointing from
# callbacks based on training progress rather than fixed intervals.
import argparse
import json
import os
import time
from ray import tune
from ray.tune import Callback
from ray.tune.result import SHOULD_CHECKPOINT
# Hint: SHOULD_CHECKPOINT is an alias of the string "should_checkpoint"
# Some dummy function
def evaluation_fn(step, width, height):
time.sleep(0.1)
return (0.1 + width * step / 100) ** (-1) + height * 0.1
class SmartCheckpointCallback(Callback):
"""Custom callback that triggers checkpointing by updating the result dict.
This callback demonstrates checkpointing logic beyond
simple periodic checkpointing. It checkpoints based on performance improvements
or when the loss becomes unstable.
Args:
checkpoint_on_improvement: Checkpoint when loss improves significantly
checkpoint_on_instability: Checkpoint when loss becomes unstable
"""
def __init__(
self,
*,
checkpoint_on_improvement: bool = True,
checkpoint_on_instability: bool = True,
):
self.checkpoint_on_improvement = checkpoint_on_improvement
self.checkpoint_on_instability = checkpoint_on_instability
self.best_loss_per_trial = {}
self.recent_losses_per_trial = {}
def on_trial_result(self, iteration, trials, trial, result, **info):
"""Called after receiving a result from the trainable.
This hook implements intelligent checkpointing logic:
1. Checkpoint when we see significant improvement
2. Checkpoint when loss becomes unstable (variance increases)
3. Always checkpoint at specific milestones (every 10 steps)
"""
trial_id = trial.trial_id
current_loss = result.get("mean_loss", float("inf"))
current_step = result.get("iterations", 0)
# Initialize tracking for this trial
if trial_id not in self.best_loss_per_trial:
self.best_loss_per_trial[trial_id] = float("inf")
self.recent_losses_per_trial[trial_id] = []
should_checkpoint = False
reason = ""
# 1. Checkpoint every 10 steps as a baseline
if current_step > 0 and current_step % 10 == 0:
should_checkpoint = True
reason = f"milestone at step {current_step}"
# 2. Checkpoint on significant improvement
if self.checkpoint_on_improvement:
if (
current_loss < self.best_loss_per_trial[trial_id] * 0.9
): # 10% improvement
should_checkpoint = True
reason = f"significant improvement: {current_loss:.4f} < {self.best_loss_per_trial[trial_id]:.4f}"
self.best_loss_per_trial[trial_id] = current_loss
# 3. Checkpoint on instability (high variance in recent losses)
if self.checkpoint_on_instability and current_step > 5:
recent_losses = self.recent_losses_per_trial[trial_id]
recent_losses.append(current_loss)
if len(recent_losses) > 5:
recent_losses.pop(0) # Keep only last 5 losses
if len(recent_losses) == 5:
variance = (
sum((x - sum(recent_losses) / 5) ** 2 for x in recent_losses) / 5
)
if variance > 0.1: # High variance threshold
should_checkpoint = True
reason = f"instability detected: variance={variance:.4f}"
else:
# Track recent losses
recent_losses = self.recent_losses_per_trial[trial_id]
recent_losses.append(current_loss)
if len(recent_losses) > 5:
recent_losses.pop(0)
if should_checkpoint:
print(
f"Callback requesting checkpoint for trial {trial_id} at step {current_step}: {reason}"
)
result[SHOULD_CHECKPOINT] = True
class OptimizationTrainable(tune.Trainable):
"""A simple trainable that demonstrates automatic checkpointing with callbacks"""
def setup(self, config):
"""Initialize the trainable"""
self.current_step = 0
self.width = config["width"]
self.height = config["height"]
def step(self):
"""Perform one step of training"""
intermediate_score = evaluation_fn(self.current_step, self.width, self.height)
self.current_step += 1
return {
"iterations": self.current_step,
"mean_loss": intermediate_score,
"step": self.current_step, # For tracking
}
def save_checkpoint(self, checkpoint_dir):
"""Save checkpoint
Called automatically by Tune when SHOULD_CHECKPOINT is in the result
"""
checkpoint_path = os.path.join(checkpoint_dir, "checkpoint.json")
with open(checkpoint_path, "w") as f:
json.dump(
{"step": self.current_step, "width": self.width, "height": self.height},
f,
)
print(f"Checkpoint saved at step {self.current_step}")
def load_checkpoint(self, checkpoint):
"""Load checkpoint - called automatically by Tune during restoration"""
checkpoint_path = os.path.join(checkpoint, "checkpoint.json")
with open(checkpoint_path, "r") as f:
state = json.load(f)
self.current_step = state["step"]
self.width = state["width"]
self.height = state["height"]
print(f"Checkpoint loaded from step {self.current_step}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing"
)
args, _ = parser.parse_known_args()
print(
"=" * 60,
"Ray Tune Example: Smart Checkpointing with custom SHOULD_CHECKPOINT key",
"=" * 60,
"",
"This example demonstrates how to set the SHOULD_CHECKPOINT key in a callback",
"to implement intelligent checkpointing based on training progress.",
"",
"Key features:",
"- Callback-driven checkpointing by setting result[SHOULD_CHECKPOINT] = True",
"- Checkpoints triggered by performance improvements",
"- Milestone-based checkpointing every 10 steps",
"- Instability detection (high variance in recent losses)",
"- Automatic checkpoint save/load via class trainable",
sep="\n",
)
# Create the smart checkpoint callback
checkpoint_callback = SmartCheckpointCallback(
checkpoint_on_improvement=True, checkpoint_on_instability=True
)
tuner = tune.Tuner(
OptimizationTrainable,
run_config=tune.RunConfig(
name="smart_checkpoint_test",
stop={"training_iteration": 1 if args.smoke_test else 20},
callbacks=[checkpoint_callback], # Add our custom callback
# Disable automatic periodic checkpointing to show callback control
checkpoint_config=tune.CheckpointConfig(
checkpoint_frequency=0, # Disable periodic checkpointing
checkpoint_at_end=True, # Still checkpoint at the end
),
),
tune_config=tune.TuneConfig(
metric="mean_loss",
mode="min",
num_samples=3,
),
param_space={
"width": tune.randint(10, 100),
"height": tune.loguniform(10, 100),
},
)
print(
"Starting hyperparameter tuning with smart checkpointing...",
"Watch for checkpoint messages triggered by the callback!",
sep="\n",
)
results = tuner.fit()
best_result = results.get_best_result()
print(
"\n" + "=" * 60,
"RESULTS",
"=" * 60,
f"Best hyperparameters: {best_result.config}",
f"Best checkpoint: {best_result.checkpoint}",
"",
"The checkpoints were triggered by the SmartCheckpointCallback",
sep="\n",
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tune/examples/custom_checkpointing_with_callback.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/datasource/util.py | from typing import Any, Dict, Iterable, Tuple
import ray
from ray.data.block import Block
def _iter_sliced_blocks(
blocks: Iterable[Block], per_task_row_limit: int
) -> Iterable[Block]:
"""Iterate over blocks, accumulating rows up to the per-task row limit."""
rows_read = 0
for block in blocks:
if rows_read >= per_task_row_limit:
break
from ray.data.block import BlockAccessor
accessor = BlockAccessor.for_block(block)
block_rows = accessor.num_rows()
if rows_read + block_rows <= per_task_row_limit:
yield block
rows_read += block_rows
else:
# Slice the block to meet the limit exactly
remaining_rows = per_task_row_limit - rows_read
sliced_block = accessor.slice(0, remaining_rows, copy=True)
yield sliced_block
break
def _validate_head_node_resources_for_local_scheduling(
ray_remote_args: Dict[str, Any],
*,
op_description: str,
default_num_cpus: int = 1,
default_num_gpus: int = 0,
default_memory: int = 0,
) -> None:
"""Ensure the head node has enough resources before pinning work there.
Local paths (``local://``) and other driver-local I/O schedule tasks on the
head node via ``NodeAffinitySchedulingStrategy``. If the head node was
intentionally started with zero logical resources (a common practice to
avoid OOMs), those tasks become unschedulable. Detect this upfront and
raise a clear error with remediation steps.
"""
# Ray defaults to reserving 1 CPU per task when num_cpus isn't provided.
num_cpus = ray_remote_args.get("num_cpus", default_num_cpus)
num_gpus = ray_remote_args.get("num_gpus", default_num_gpus)
memory = ray_remote_args.get("memory", default_memory)
# Resource keys follow the Resources map of ray.nodes() (e.g., CPU, GPU, memory).
required_resources: Dict[str, float] = {}
required_resources["CPU"] = float(num_cpus)
required_resources["GPU"] = float(num_gpus)
required_resources["memory"] = float(memory)
# Include any additional custom resources requested.
custom_resources = ray_remote_args.get("resources", {})
for name, amount in custom_resources.items():
if amount is None:
continue
try:
amount = float(amount)
except (TypeError, ValueError) as err:
raise ValueError(f"Invalid resource amount for '{name}': {amount}") from err
required_resources[name] = amount
head_node = next(
(
node
for node in ray.nodes()
if node.get("Alive")
and "node:__internal_head__" in node.get("Resources", {})
),
None,
)
if not head_node:
# The head node metadata is unavailable (e.g., during shutdown). Fall back
# to the default behavior and let Ray surface its own error.
return
# Build a map of required vs available resources on the head node.
head_resources: Dict[str, float] = head_node.get("Resources", {})
# Map: resource name -> (required, available).
insufficient: Dict[str, Tuple[float, float]] = {}
for name, req in required_resources.items():
avail = head_resources.get(name, 0.0)
if avail < req:
insufficient[name] = (req, avail)
# If nothing is below the required amount, we are good to proceed.
if not insufficient:
return
details = "; ".join(
f"{name} required {req:g} but head has {avail:g}"
for name, (req, avail) in insufficient.items()
)
raise ValueError(
f"{op_description} must run on the head node (e.g., for local:// paths), "
f"but the head node doesn't have enough resources: {details}. "
"Add resources to the head node, switch to a shared filesystem instead "
"of local://, or set the resource requests on this operation to 0 "
"(for example, num_cpus=0) so it can run without head resources."
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/datasource/util.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/tests/test_autoscaler_azure.py | """Tests for Azure autoscaler availability zone functionality."""
import copy
import unittest
from unittest.mock import Mock, patch
from ray.autoscaler._private._azure.node_provider import AzureNodeProvider
class TestAzureAvailabilityZones(unittest.TestCase):
"""Test cases for Azure autoscaler availability zone support."""
def setUp(self):
"""Set up test fixtures."""
self.provider_config = {
"resource_group": "test-rg",
"location": "westus2",
"subscription_id": "test-sub-id",
}
self.cluster_name = "test-cluster"
# Create a mock provider that doesn't initialize Azure clients
with patch.object(
AzureNodeProvider,
"__init__",
lambda self, provider_config, cluster_name: None,
):
self.provider = AzureNodeProvider(self.provider_config, self.cluster_name)
self.provider.provider_config = self.provider_config
self.provider.cluster_name = self.cluster_name
def test_parse_availability_zones_none_input(self):
"""Test _parse_availability_zones with None input returns empty list."""
result = self.provider._parse_availability_zones(None)
self.assertEqual(result, [])
def test_parse_availability_zones_empty_string(self):
"""Test _parse_availability_zones with empty string returns empty list."""
result = self.provider._parse_availability_zones("")
self.assertEqual(result, [])
def test_parse_availability_zones_auto(self):
"""Test _parse_availability_zones with 'auto' returns empty list."""
result = self.provider._parse_availability_zones("auto")
self.assertEqual(result, [])
def test_parse_availability_zones_whitespace_only(self):
"""Test _parse_availability_zones with whitespace-only string returns empty list."""
result = self.provider._parse_availability_zones(" ")
self.assertEqual(result, [])
def test_parse_availability_zones_single_zone(self):
"""Test _parse_availability_zones with single zone string."""
result = self.provider._parse_availability_zones("1")
self.assertEqual(result, ["1"])
def test_parse_availability_zones_multiple_zones(self):
"""Test _parse_availability_zones with comma-separated zones."""
result = self.provider._parse_availability_zones("1,2,3")
self.assertEqual(result, ["1", "2", "3"])
def test_parse_availability_zones_zones_with_spaces(self):
"""Test _parse_availability_zones with spaces around zones."""
result = self.provider._parse_availability_zones("1, 2, 3")
self.assertEqual(result, ["1", "2", "3"])
def test_parse_availability_zones_zones_with_extra_spaces(self):
"""Test _parse_availability_zones with extra spaces and tabs."""
result = self.provider._parse_availability_zones(" 1 , 2 , 3 ")
self.assertEqual(result, ["1", "2", "3"])
def test_parse_availability_zones_none_disable_case_insensitive(self):
"""Test _parse_availability_zones with 'none' variations disables zones."""
test_cases = ["none", "None", "NONE"]
for case in test_cases:
with self.subTest(case=case):
result = self.provider._parse_availability_zones(case)
self.assertIsNone(result)
def test_parse_availability_zones_null_disable_case_insensitive(self):
"""Test _parse_availability_zones with 'null' variations disables zones."""
test_cases = ["null", "Null", "NULL"]
for case in test_cases:
with self.subTest(case=case):
result = self.provider._parse_availability_zones(case)
self.assertIsNone(result)
def test_parse_availability_zones_invalid_type(self):
"""Test _parse_availability_zones with invalid input type raises ValueError."""
with self.assertRaises(ValueError) as context:
self.provider._parse_availability_zones(123)
self.assertIn("availability_zone must be a string", str(context.exception))
self.assertIn("got int: 123", str(context.exception))
def test_parse_availability_zones_list_input_invalid(self):
"""Test _parse_availability_zones with list input raises ValueError."""
with self.assertRaises(ValueError) as context:
self.provider._parse_availability_zones(["1", "2", "3"])
self.assertIn("availability_zone must be a string", str(context.exception))
def test_parse_availability_zones_dict_input_invalid(self):
"""Test _parse_availability_zones with dict input raises ValueError."""
with self.assertRaises(ValueError) as context:
self.provider._parse_availability_zones({"zones": ["1", "2"]})
self.assertIn("availability_zone must be a string", str(context.exception))
def test_parse_availability_zones_numeric_zones(self):
"""Test _parse_availability_zones with numeric zone strings."""
result = self.provider._parse_availability_zones("1,2,3")
self.assertEqual(result, ["1", "2", "3"])
def test_parse_availability_zones_alpha_zones(self):
"""Test _parse_availability_zones with alphabetic zone strings."""
result = self.provider._parse_availability_zones("east,west,central")
self.assertEqual(result, ["east", "west", "central"])
def test_parse_availability_zones_mixed_zones(self):
"""Test _parse_availability_zones with mixed numeric and alpha zones."""
result = self.provider._parse_availability_zones("1,zone-b,3")
self.assertEqual(result, ["1", "zone-b", "3"])
class TestAzureAvailabilityZonePrecedence(unittest.TestCase):
"""Test cases for Azure availability zone precedence logic."""
def setUp(self):
"""Set up test fixtures."""
self.base_provider_config = {
"resource_group": "test-rg",
"location": "westus2",
"subscription_id": "test-sub-id",
}
self.cluster_name = "test-cluster"
def _create_mock_provider(self, provider_config=None):
"""Create a mock Azure provider for testing."""
config = copy.deepcopy(self.base_provider_config)
if provider_config:
config.update(provider_config)
with patch.object(
AzureNodeProvider,
"__init__",
lambda self, provider_config, cluster_name: None,
):
provider = AzureNodeProvider(config, self.cluster_name)
provider.provider_config = config
provider.cluster_name = self.cluster_name
# Mock the validation method to avoid Azure API calls
provider._validate_zones_for_node_pool = Mock(
side_effect=lambda zones, location, vm_size: zones
)
return provider
def _extract_zone_logic(self, provider, node_config):
"""Extract zone determination logic similar to _create_node method."""
node_availability_zone = node_config.get("azure_arm_parameters", {}).get(
"availability_zone"
)
provider_availability_zone = provider.provider_config.get("availability_zone")
if node_availability_zone is not None:
return (
provider._parse_availability_zones(node_availability_zone),
"node config availability_zone",
)
elif provider_availability_zone is not None:
return (
provider._parse_availability_zones(provider_availability_zone),
"provider availability_zone",
)
else:
return ([], "default")
def test_node_availability_zone_overrides_provider(self):
"""Test that node-level availability_zone overrides provider-level."""
provider = self._create_mock_provider({"availability_zone": "1,2"})
node_config = {
"azure_arm_parameters": {
"vmSize": "Standard_D2s_v3",
"availability_zone": "3",
}
}
zones, source = self._extract_zone_logic(provider, node_config)
self.assertEqual(zones, ["3"])
self.assertEqual(source, "node config availability_zone")
def test_provider_availability_zone_used_when_no_node_override(self):
"""Test that provider-level availability_zone is used when no node override."""
provider = self._create_mock_provider({"availability_zone": "1,2"})
node_config = {"azure_arm_parameters": {"vmSize": "Standard_D2s_v3"}}
zones, source = self._extract_zone_logic(provider, node_config)
self.assertEqual(zones, ["1", "2"])
self.assertEqual(source, "provider availability_zone")
def test_none_disables_zones_at_node_level(self):
"""Test that 'none' at node level disables zones even with provider zones."""
provider = self._create_mock_provider({"availability_zone": "1,2"})
node_config = {
"azure_arm_parameters": {
"vmSize": "Standard_D2s_v3",
"availability_zone": "none",
}
}
zones, source = self._extract_zone_logic(provider, node_config)
self.assertIsNone(zones)
self.assertEqual(source, "node config availability_zone")
def test_no_zones_when_neither_provider_nor_node_specify(self):
"""Test default behavior when neither provider nor node specify zones."""
provider = self._create_mock_provider()
node_config = {"azure_arm_parameters": {"vmSize": "Standard_D2s_v3"}}
zones, source = self._extract_zone_logic(provider, node_config)
self.assertEqual(zones, [])
self.assertEqual(source, "default")
def test_node_empty_string_overrides_provider_zones(self):
"""Test that node empty string overrides provider zones (auto-selection)."""
provider = self._create_mock_provider({"availability_zone": "1,2"})
node_config = {
"azure_arm_parameters": {
"vmSize": "Standard_D2s_v3",
"availability_zone": "",
}
}
zones, source = self._extract_zone_logic(provider, node_config)
self.assertEqual(zones, [])
self.assertEqual(source, "node config availability_zone")
def test_node_auto_overrides_provider_zones(self):
"""Test that node 'auto' overrides provider zones (auto-selection)."""
provider = self._create_mock_provider({"availability_zone": "1,2"})
node_config = {
"azure_arm_parameters": {
"vmSize": "Standard_D2s_v3",
"availability_zone": "auto",
}
}
zones, source = self._extract_zone_logic(provider, node_config)
self.assertEqual(zones, [])
self.assertEqual(source, "node config availability_zone")
def test_provider_none_disables_zones(self):
"""Test that provider-level 'none' disables zones."""
provider = self._create_mock_provider({"availability_zone": "none"})
node_config = {"azure_arm_parameters": {"vmSize": "Standard_D2s_v3"}}
zones, source = self._extract_zone_logic(provider, node_config)
self.assertIsNone(zones)
self.assertEqual(source, "provider availability_zone")
def test_provider_empty_string_allows_auto_selection(self):
"""Test that provider-level empty string allows auto-selection."""
provider = self._create_mock_provider({"availability_zone": ""})
node_config = {"azure_arm_parameters": {"vmSize": "Standard_D2s_v3"}}
zones, source = self._extract_zone_logic(provider, node_config)
self.assertEqual(zones, [])
self.assertEqual(source, "provider availability_zone")
def test_provider_auto_allows_auto_selection(self):
"""Test that provider-level 'auto' allows auto-selection."""
provider = self._create_mock_provider({"availability_zone": "auto"})
node_config = {"azure_arm_parameters": {"vmSize": "Standard_D2s_v3"}}
zones, source = self._extract_zone_logic(provider, node_config)
self.assertEqual(zones, [])
self.assertEqual(source, "provider availability_zone")
def test_node_null_overrides_provider_zones(self):
"""Test that node-level 'null' overrides provider zones."""
provider = self._create_mock_provider({"availability_zone": "1,2,3"})
node_config = {
"azure_arm_parameters": {
"vmSize": "Standard_D2s_v3",
"availability_zone": "null",
}
}
zones, source = self._extract_zone_logic(provider, node_config)
self.assertIsNone(zones)
self.assertEqual(source, "node config availability_zone")
def test_provider_null_disables_zones(self):
"""Test that provider-level 'null' disables zones."""
provider = self._create_mock_provider({"availability_zone": "NULL"})
node_config = {"azure_arm_parameters": {"vmSize": "Standard_D2s_v3"}}
zones, source = self._extract_zone_logic(provider, node_config)
self.assertIsNone(zones)
self.assertEqual(source, "provider availability_zone")
def test_complex_override_scenario(self):
"""Test complex scenario with multiple node types and different overrides."""
provider = self._create_mock_provider({"availability_zone": "1,2,3"})
# Test different node configurations
test_cases = [
# Node with specific zone override
{
"config": {
"azure_arm_parameters": {
"vmSize": "Standard_D2s_v3",
"availability_zone": "2",
}
},
"expected_zones": ["2"],
"expected_source": "node config availability_zone",
},
# Node with disabled zones
{
"config": {
"azure_arm_parameters": {
"vmSize": "Standard_D2s_v3",
"availability_zone": "none",
}
},
"expected_zones": None,
"expected_source": "node config availability_zone",
},
# Node with auto-selection
{
"config": {
"azure_arm_parameters": {
"vmSize": "Standard_D2s_v3",
"availability_zone": "",
}
},
"expected_zones": [],
"expected_source": "node config availability_zone",
},
# Node using provider default
{
"config": {"azure_arm_parameters": {"vmSize": "Standard_D2s_v3"}},
"expected_zones": ["1", "2", "3"],
"expected_source": "provider availability_zone",
},
]
for i, test_case in enumerate(test_cases):
with self.subTest(case=i):
zones, source = self._extract_zone_logic(provider, test_case["config"])
self.assertEqual(zones, test_case["expected_zones"])
self.assertEqual(source, test_case["expected_source"])
def test_mixed_case_precedence(self):
"""Test precedence with mixed case 'none' values."""
provider = self._create_mock_provider({"availability_zone": "None"})
node_config = {
"azure_arm_parameters": {
"vmSize": "Standard_D2s_v3",
"availability_zone": "NONE",
}
}
zones, source = self._extract_zone_logic(provider, node_config)
# Both should be None (disabled), but node should take precedence
self.assertIsNone(zones)
self.assertEqual(source, "node config availability_zone")
def test_whitespace_handling_in_precedence(self):
"""Test that whitespace is properly handled in precedence logic."""
provider = self._create_mock_provider({"availability_zone": " 1, 2, 3 "})
node_config = {
"azure_arm_parameters": {
"vmSize": "Standard_D2s_v3",
"availability_zone": " 2 ",
}
}
zones, source = self._extract_zone_logic(provider, node_config)
self.assertEqual(zones, ["2"])
self.assertEqual(source, "node config availability_zone")
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_autoscaler_azure.py",
"license": "Apache License 2.0",
"lines": 323,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/hello_world_tests/hello_world_emoji.py | import ray
import emoji
@ray.remote
def hello_world_emoji():
return emoji.emojize(":globe_showing_Americas:")
def main():
print(ray.get(hello_world_emoji.remote()))
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "release/hello_world_tests/hello_world_emoji.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/serve/_private/exceptions.py | class DeploymentIsBeingDeletedError(Exception):
"""Raised when an operation is attempted on a deployment that is being deleted."""
pass
class ExternalScalerDisabledError(Exception):
"""Raised when the external scaling API is used but external_scaler_enabled is False."""
pass
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/_private/exceptions.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/ray_release/tests/test_custom_image_build_and_test_init.py | import json
import os
import sys
from unittest.mock import patch
import pytest
import yaml
from click.testing import CliRunner
from ray_release.scripts.custom_image_build_and_test_init import main
_bazel_workspace_dir = os.environ.get("BUILD_WORKSPACE_DIRECTORY", "")
@patch.dict("os.environ", {"BUILDKITE": "1"})
@patch.dict("os.environ", {"RAYCI_BUILD_ID": "a1b2c3d4"})
@patch("ray_release.test.Test.update_from_s3", return_value=None)
@patch("ray_release.test.Test.is_jailed_with_open_issue", return_value=False)
def test_custom_image_build_and_test_init(
mock_update_from_s3, mock_is_jailed_with_open_issue
):
runner = CliRunner()
custom_build_jobs_output_file = "custom_build_jobs.yaml"
test_jobs_output_file = "test_jobs.json"
result = runner.invoke(
main,
[
"--test-collection-file",
"release/ray_release/tests/sample_tests.yaml",
"--global-config",
"oss_config.yaml",
"--frequency",
"nightly",
"--run-jailed-tests",
"--run-unstable-tests",
"--test-filters",
"prefix:hello_world",
"--custom-build-jobs-output-file",
custom_build_jobs_output_file,
"--test-jobs-output-file",
test_jobs_output_file,
],
catch_exceptions=False,
)
with open(
os.path.join(_bazel_workspace_dir, custom_build_jobs_output_file), "r"
) as f:
custom_build_jobs = yaml.safe_load(f)
assert len(custom_build_jobs["steps"]) == 1 # 1 custom build job
with open(os.path.join(_bazel_workspace_dir, test_jobs_output_file), "r") as f:
test_jobs = json.load(f)
assert len(test_jobs) == 1 # 1 group
assert len(test_jobs[0]["steps"]) == 2 # 2 tests
assert test_jobs[0]["steps"][0]["label"].startswith("hello_world.aws")
assert test_jobs[0]["steps"][1]["label"].startswith("hello_world_custom.aws")
assert result.exit_code == 0
@patch.dict("os.environ", {"BUILDKITE": "1"})
@patch.dict("os.environ", {"RAYCI_BUILD_ID": "a1b2c3d4"})
@patch("ray_release.test.Test.update_from_s3", return_value=None)
@patch("ray_release.test.Test.is_jailed_with_open_issue", return_value=False)
def test_custom_image_build_and_test_init_with_block_step(
mock_update_from_s3, mock_is_jailed_with_open_issue
):
num_tests_expected = 5
runner = CliRunner()
custom_build_jobs_output_file = "custom_build_jobs.yaml"
test_jobs_output_file = "test_jobs.json"
result = runner.invoke(
main,
[
"--test-collection-file",
"release/ray_release/tests/sample_5_tests.yaml",
"--global-config",
"oss_config.yaml",
"--frequency",
"nightly",
"--run-jailed-tests",
"--run-unstable-tests",
"--test-filters",
"prefix:hello_world",
"--custom-build-jobs-output-file",
custom_build_jobs_output_file,
"--test-jobs-output-file",
test_jobs_output_file,
],
catch_exceptions=False,
)
with open(
os.path.join(_bazel_workspace_dir, custom_build_jobs_output_file), "r"
) as f:
custom_build_jobs = yaml.safe_load(f)
assert len(custom_build_jobs["steps"]) == 1 # 1 custom build job
with open(os.path.join(_bazel_workspace_dir, test_jobs_output_file), "r") as f:
test_jobs = json.load(f)
print(test_jobs)
assert len(test_jobs) == 2 # 2 groups: block and hello_world
assert len(test_jobs[0]["steps"]) == 1 # 1 block step
assert test_jobs[0]["steps"][0]["block"] == "Run release tests"
assert test_jobs[0]["steps"][0]["key"] == "block_run_release_tests"
assert (
test_jobs[0]["steps"][0]["prompt"]
== f"You are triggering {num_tests_expected} tests. Do you want to proceed?"
)
assert len(test_jobs[1]["steps"]) == num_tests_expected # 5 tests
assert test_jobs[1]["steps"][0]["label"].startswith("hello_world.aws")
assert test_jobs[1]["steps"][1]["label"].startswith("hello_world_custom.aws")
assert result.exit_code == 0
@patch.dict("os.environ", {"AUTOMATIC": "1"})
@patch.dict("os.environ", {"BUILDKITE": "1"})
@patch.dict("os.environ", {"RAYCI_BUILD_ID": "a1b2c3d4"})
@patch("ray_release.test.Test.update_from_s3", return_value=None)
@patch("ray_release.test.Test.is_jailed_with_open_issue", return_value=False)
def test_custom_image_build_and_test_init_without_block_step_automatic(
mock_update_from_s3, mock_is_jailed_with_open_issue
):
num_tests_expected = 5
runner = CliRunner()
custom_build_jobs_output_file = "custom_build_jobs.yaml"
test_jobs_output_file = "test_jobs.json"
result = runner.invoke(
main,
[
"--test-collection-file",
"release/ray_release/tests/sample_5_tests.yaml",
"--global-config",
"oss_config.yaml",
"--frequency",
"nightly",
"--run-jailed-tests",
"--run-unstable-tests",
"--test-filters",
"prefix:hello_world",
"--custom-build-jobs-output-file",
custom_build_jobs_output_file,
"--test-jobs-output-file",
test_jobs_output_file,
],
catch_exceptions=False,
)
with open(
os.path.join(_bazel_workspace_dir, custom_build_jobs_output_file), "r"
) as f:
custom_build_jobs = yaml.safe_load(f)
assert len(custom_build_jobs["steps"]) == 1 # 1 custom build job
with open(os.path.join(_bazel_workspace_dir, test_jobs_output_file), "r") as f:
test_jobs = json.load(f)
print(test_jobs)
assert len(test_jobs) == 1 # 1 group: hello_world
assert len(test_jobs[0]["steps"]) == num_tests_expected # 5 tests
assert test_jobs[0]["steps"][0]["label"].startswith("hello_world.aws")
assert test_jobs[0]["steps"][1]["label"].startswith("hello_world_custom.aws")
assert result.exit_code == 0
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "release/ray_release/tests/test_custom_image_build_and_test_init.py",
"license": "Apache License 2.0",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/data/doc_code/working-with-llms/basic_llm_example.py | """
This file serves as a documentation example and CI test for basic LLM batch inference.
"""
# __basic_llm_example_start__
import os
import shutil
import ray
from ray.data.llm import vLLMEngineProcessorConfig, build_processor
# __basic_config_example_start__
# Basic vLLM configuration
config = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.1-8B-Instruct",
engine_kwargs={
"enable_chunked_prefill": True,
"max_num_batched_tokens": 4096, # Reduce if CUDA OOM occurs
"max_model_len": 4096, # Constrain to fit test GPU memory
},
concurrency=1,
batch_size=64,
)
# __basic_config_example_end__
processor = build_processor(
config,
preprocess=lambda row: dict(
messages=[
{"role": "system", "content": "You are a bot that responds with haikus."},
{"role": "user", "content": row["item"]},
],
sampling_params=dict(
temperature=0.3,
max_tokens=250,
),
),
postprocess=lambda row: dict(
answer=row["generated_text"],
**row, # This will return all the original columns in the dataset.
),
)
ds = ray.data.from_items(["Start of the haiku is: Complete this for me..."])
if __name__ == "__main__":
try:
import torch
if torch.cuda.is_available():
ds = processor(ds)
ds.show(limit=1)
else:
print("Skipping basic LLM run (no GPU available)")
except Exception as e:
print(f"Skipping basic LLM run due to environment error: {e}")
# __hf_token_config_example_start__
# Configuration with Hugging Face token
config_with_token = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.1-8B-Instruct",
runtime_env={"env_vars": {"HF_TOKEN": "your_huggingface_token"}},
concurrency=1,
batch_size=64,
)
# __hf_token_config_example_end__
# __parallel_config_example_start__
# Model parallelism configuration for larger models
# tensor_parallel_size=2: Split model across 2 GPUs for tensor parallelism
# pipeline_parallel_size=2: Use 2 pipeline stages (total 4 GPUs needed)
# Total GPUs required = tensor_parallel_size * pipeline_parallel_size = 4
config = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.1-8B-Instruct",
engine_kwargs={
"max_model_len": 16384,
"tensor_parallel_size": 2,
"pipeline_parallel_size": 2,
"enable_chunked_prefill": True,
"max_num_batched_tokens": 2048,
},
concurrency=1,
batch_size=32,
accelerator_type="L4",
)
# __parallel_config_example_end__
# __runai_config_example_start__
# RunAI streamer configuration for optimized model loading
# Note: Install vLLM with runai dependencies: pip install -U "vllm[runai]>=0.10.1"
config = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.1-8B-Instruct",
engine_kwargs={
"load_format": "runai_streamer",
"max_model_len": 16384,
},
concurrency=1,
batch_size=64,
)
# __runai_config_example_end__
# __lora_config_example_start__
# Multi-LoRA configuration
config = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.1-8B-Instruct",
engine_kwargs={
"enable_lora": True,
"max_lora_rank": 32,
"max_loras": 1,
"max_model_len": 16384,
},
concurrency=1,
batch_size=32,
)
# __lora_config_example_end__
# __s3_config_example_start__
# S3 hosted model configuration
s3_config = vLLMEngineProcessorConfig(
model_source="s3://your-bucket/your-model-path/",
engine_kwargs={
"load_format": "runai_streamer",
"max_model_len": 16384,
},
concurrency=1,
batch_size=64,
)
# __s3_config_example_end__
base_dir = "/tmp/llm_checkpoint_demo"
input_path = os.path.join(base_dir, "input")
output_path = os.path.join(base_dir, "output")
checkpoint_path = os.path.join(base_dir, "checkpoint")
# Reset directories
for path in (input_path, output_path, checkpoint_path):
shutil.rmtree(path, ignore_errors=True)
os.makedirs(path)
# __row_level_fault_tolerance_config_example_start__
# Row-level fault tolerance configuration
config = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.1-8B-Instruct",
concurrency=1,
batch_size=64,
should_continue_on_error=True,
)
# __row_level_fault_tolerance_config_example_end__
# __checkpoint_config_setup_example_start__
from ray.data.checkpoint import CheckpointConfig
ctx = ray.data.DataContext.get_current()
ctx.checkpoint_config = CheckpointConfig(
id_column="id",
checkpoint_path=checkpoint_path,
delete_checkpoint_on_success=False,
)
# __checkpoint_config_setup_example_end__
# __checkpoint_usage_example_start__
processor_config = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.1-8B-Instruct",
concurrency=1,
batch_size=16,
)
processor = build_processor(
processor_config,
preprocess=lambda row: dict(
id=row["id"], # Preserve the ID column for checkpointing
prompt=row["prompt"],
sampling_params=dict(
temperature=0.3,
max_tokens=10,
),
),
postprocess=lambda row: {
"id": row["id"], # Preserve the ID column for checkpointing
"answer": row.get("generated_text"),
},
)
ds = ray.data.read_parquet(input_path)
ds = processor(ds)
ds.write_parquet(output_path)
# __checkpoint_usage_example_end__
# __gpu_memory_config_example_start__
# GPU memory management configuration
# If you encounter CUDA out of memory errors, try these optimizations:
config_memory_optimized = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.1-8B-Instruct",
engine_kwargs={
"max_model_len": 8192,
"max_num_batched_tokens": 2048,
"enable_chunked_prefill": True,
"gpu_memory_utilization": 0.85,
"block_size": 16,
},
concurrency=1,
batch_size=16,
)
# For very large models or limited GPU memory:
config_minimal_memory = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.1-8B-Instruct",
engine_kwargs={
"max_model_len": 4096,
"max_num_batched_tokens": 1024,
"enable_chunked_prefill": True,
"gpu_memory_utilization": 0.75,
},
concurrency=1,
batch_size=8,
)
# __gpu_memory_config_example_end__
# __embedding_config_example_start__
# Embedding model configuration
embedding_config = vLLMEngineProcessorConfig(
model_source="sentence-transformers/all-MiniLM-L6-v2",
task_type="embed",
engine_kwargs=dict(
enable_prefix_caching=False,
enable_chunked_prefill=False,
max_model_len=256,
enforce_eager=True,
),
batch_size=32,
concurrency=1,
chat_template_stage=False, # Skip chat templating for embeddings
detokenize_stage=False, # Skip detokenization for embeddings
)
# Example usage for embeddings
def create_embedding_processor():
return build_processor(
embedding_config,
preprocess=lambda row: dict(prompt=row["text"]),
postprocess=lambda row: {
"text": row["prompt"],
"embedding": row["embeddings"],
},
)
# __embedding_config_example_end__
# __classification_config_example_start__
# Sequence classification model configuration
# Use task_type="classify" for classification models (e.g., sentiment, quality scoring)
# Use task_type="score" for cross-encoder scoring models
classification_config = vLLMEngineProcessorConfig(
model_source="nvidia/nemocurator-fineweb-nemotron-4-edu-classifier",
task_type="classify",
engine_kwargs=dict(
max_model_len=512,
enforce_eager=True,
),
batch_size=8,
concurrency=1,
chat_template_stage=False,
detokenize_stage=False,
)
# Example usage for classification
def create_classification_processor():
return build_processor(
classification_config,
preprocess=lambda row: dict(prompt=row["text"]),
postprocess=lambda row: {
"text": row["prompt"],
# Classification models return logits in the 'embeddings' field
"score": float(row["embeddings"][0])
if row.get("embeddings") is not None and len(row["embeddings"]) > 0
else None,
},
)
# __classification_config_example_end__
# __shared_vllm_engine_config_example_start__
import ray
from ray import serve
from ray.data.llm import ServeDeploymentProcessorConfig, build_processor
from ray.serve.llm import (
LLMConfig,
ModelLoadingConfig,
build_llm_deployment,
)
from ray.serve.llm.openai_api_models import CompletionRequest
llm_config = LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id="facebook/opt-1.3b",
model_source="facebook/opt-1.3b",
),
deployment_config=dict(
name="demo_deployment_config",
autoscaling_config=dict(
min_replicas=1,
max_replicas=1,
),
),
engine_kwargs=dict(
enable_prefix_caching=True,
enable_chunked_prefill=True,
max_num_batched_tokens=4096,
),
)
APP_NAME = "demo_app"
DEPLOYMENT_NAME = "demo_deployment"
override_serve_options = dict(name=DEPLOYMENT_NAME)
llm_app = build_llm_deployment(
llm_config, override_serve_options=override_serve_options
)
app = serve.run(llm_app, name=APP_NAME)
config = ServeDeploymentProcessorConfig(
deployment_name=DEPLOYMENT_NAME,
app_name=APP_NAME,
dtype_mapping={
"CompletionRequest": CompletionRequest,
},
concurrency=1,
batch_size=64,
)
processor1 = build_processor(
config,
preprocess=lambda row: dict(
method="completions",
dtype="CompletionRequest",
request_kwargs=dict(
model="facebook/opt-1.3b",
prompt=f"This is a prompt for {row['id']}",
stream=False,
),
),
postprocess=lambda row: dict(
prompt=row["choices"][0]["text"],
),
)
processor2 = build_processor(
config,
preprocess=lambda row: dict(
method="completions",
dtype="CompletionRequest",
request_kwargs=dict(
model="facebook/opt-1.3b",
prompt=row["prompt"],
stream=False,
),
),
postprocess=lambda row: row,
)
ds = ray.data.range(10)
ds = processor2(processor1(ds))
print(ds.take_all())
# __shared_vllm_engine_config_example_end__
# __cross_node_parallelism_config_example_start__
config = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.1-8B-Instruct",
engine_kwargs={
"enable_chunked_prefill": True,
"max_num_batched_tokens": 4096,
"max_model_len": 16384,
"pipeline_parallel_size": 4,
"tensor_parallel_size": 4,
"distributed_executor_backend": "ray",
},
batch_size=32,
concurrency=1,
)
# __cross_node_parallelism_config_example_end__
# __custom_placement_group_strategy_config_example_start__
config = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.1-8B-Instruct",
engine_kwargs={
"enable_chunked_prefill": True,
"max_num_batched_tokens": 4096,
"max_model_len": 16384,
"pipeline_parallel_size": 2,
"tensor_parallel_size": 2,
"distributed_executor_backend": "ray",
},
batch_size=32,
concurrency=1,
placement_group_config={
"bundles": [{"GPU": 1}] * 4,
"strategy": "STRICT_PACK",
},
)
# __custom_placement_group_strategy_config_example_end__
# __concurrent_config_example_start__
config = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.1-8B-Instruct",
engine_kwargs={
"enable_chunked_prefill": True,
"max_num_batched_tokens": 4096,
"max_model_len": 16384,
},
concurrency=10,
batch_size=64,
)
# __concurrent_config_example_end__
# __concurrent_config_fixed_pool_example_start__
config = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.1-8B-Instruct",
engine_kwargs={
"enable_chunked_prefill": True,
"max_num_batched_tokens": 4096,
"max_model_len": 16384,
},
concurrency=(10, 10),
batch_size=64,
)
# __concurrent_config_fixed_pool_example_end__
# __concurrent_batches_tuning_example_start__
# Tuning concurrent batch processing
# Configure both parameters together for optimal throughput
config = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.1-8B-Instruct",
engine_kwargs={
"enable_chunked_prefill": True,
"max_num_batched_tokens": 4096,
},
batch_size=64,
# Dataset-level concurrency (number of actor replicas)
concurrency=1,
# Number of batches that can run concurrently per actor (default: 8)
max_concurrent_batches=8,
# Number of tasks Ray Data queues per actor (default: 16)
# Increase to keep actor task queue saturated
experimental={"max_tasks_in_flight_per_actor": 16},
)
# __concurrent_batches_tuning_example_end__
# __basic_llm_example_end__
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/data/doc_code/working-with-llms/basic_llm_example.py",
"license": "Apache License 2.0",
"lines": 406,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/data/doc_code/working-with-llms/embedding_example.py | """
Documentation example and test for embedding model batch inference.
"""
import subprocess
import sys
subprocess.check_call([sys.executable, "-m", "pip", "install", "--upgrade", "ray[llm]"])
subprocess.check_call([sys.executable, "-m", "pip", "install", "numpy==1.26.4"])
def run_embedding_example():
# __embedding_example_start__
import ray
from ray.data.llm import vLLMEngineProcessorConfig, build_processor
embedding_config = vLLMEngineProcessorConfig(
model_source="sentence-transformers/all-MiniLM-L6-v2",
task_type="embed",
engine_kwargs=dict(
enable_prefix_caching=False,
enable_chunked_prefill=False,
max_model_len=256,
enforce_eager=True,
),
batch_size=32,
concurrency=1,
chat_template_stage=False, # Skip chat templating for embeddings
detokenize_stage=False, # Skip detokenization for embeddings
)
embedding_processor = build_processor(
embedding_config,
preprocess=lambda row: dict(prompt=row["text"]),
postprocess=lambda row: {
"text": row["prompt"],
"embedding": row["embeddings"],
},
)
texts = [
"Hello world",
"This is a test sentence",
"Embedding models convert text to vectors",
]
ds = ray.data.from_items([{"text": text} for text in texts])
embedded_ds = embedding_processor(ds)
embedded_ds.show(limit=1)
# __embedding_example_end__
if __name__ == "__main__":
try:
import torch
if torch.cuda.is_available():
run_embedding_example()
else:
print("Skipping embedding example (no GPU available)")
except Exception as e:
print(f"Skipping embedding example: {e}")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/data/doc_code/working-with-llms/embedding_example.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/data/doc_code/working-with-llms/openai_api_example.py | """
This file serves as a documentation example and CI test for OpenAI API batch inference.
"""
import os
from ray.data.llm import HttpRequestProcessorConfig, build_processor
def run_openai_example():
# __openai_example_start__
import ray
OPENAI_KEY = os.environ["OPENAI_API_KEY"]
ds = ray.data.from_items(["Hand me a haiku."])
config = HttpRequestProcessorConfig(
url="https://api.openai.com/v1/chat/completions",
headers={"Authorization": f"Bearer {OPENAI_KEY}"},
qps=1,
)
processor = build_processor(
config,
preprocess=lambda row: dict(
payload=dict(
model="gpt-4o-mini",
messages=[
{
"role": "system",
"content": "You are a bot that responds with haikus.",
},
{"role": "user", "content": row["item"]},
],
temperature=0.0,
max_tokens=150,
),
),
postprocess=lambda row: dict(
response=row["http_response"]["choices"][0]["message"]["content"]
),
)
ds = processor(ds)
print(ds.take_all())
# __openai_example_end__
def run_openai_demo():
"""Run the OpenAI API configuration demo."""
print("OpenAI API Configuration Demo")
print("=" * 30)
print("\nExample configuration:")
print("config = HttpRequestProcessorConfig(")
print(" url='https://api.openai.com/v1/chat/completions',")
print(" headers={'Authorization': f'Bearer {OPENAI_KEY}'},")
print(" qps=1,")
print(")")
print("\nThe processor handles:")
print("- Preprocessing: Convert text to OpenAI API format")
print("- HTTP requests: Send batched requests to OpenAI")
print("- Postprocessing: Extract response content")
def preprocess_for_openai(row):
"""Preprocess function for OpenAI API requests."""
return dict(
payload=dict(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": row["item"]},
],
temperature=0.0,
max_tokens=150,
)
)
def postprocess_openai_response(row):
"""Postprocess function for OpenAI API responses."""
return dict(response=row["http_response"]["choices"][0]["message"]["content"])
if __name__ == "__main__":
# Run live call if API key is set; otherwise show demo with mock output
if "OPENAI_API_KEY" in os.environ:
run_openai_example()
else:
# Mock response without API key
print(
[
{
"response": (
"Autumn leaves whisper\nSoft code flows in quiet lines\nBugs fall one by one"
)
}
]
)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/data/doc_code/working-with-llms/openai_api_example.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/_common/retry.py | import functools
import logging
import random
import time
from collections.abc import Sequence
from typing import Callable, Optional, TypeVar
try:
from typing import ParamSpec
except ImportError:
from typing_extensions import ParamSpec
logger = logging.getLogger(__name__)
R = TypeVar("R")
P = ParamSpec("P")
def call_with_retry(
f: Callable[P, R],
description: str,
match: Optional[Sequence[str]] = None,
max_attempts: int = 10,
max_backoff_s: int = 32,
*args: P.args,
**kwargs: P.kwargs,
) -> R:
"""Retry a function with exponential backoff.
Args:
f: The function to retry.
description: An imperative description of the function being retried. For
example, "open the file".
match: A sequence of strings to match in the exception message.
If ``None``, any error is retried.
max_attempts: The maximum number of attempts to retry.
max_backoff_s: The maximum number of seconds to backoff.
*args: Arguments to pass to the function.
**kwargs: Keyword arguments to pass to the function.
Returns:
The result of the function.
"""
# TODO: consider inverse match and matching exception type
assert max_attempts >= 1, f"`max_attempts` must be positive. Got {max_attempts}."
for i in range(max_attempts):
try:
return f(*args, **kwargs)
except Exception as e:
exception_str = str(e)
is_retryable = match is None or any(
pattern in exception_str for pattern in match
)
if is_retryable and i + 1 < max_attempts:
# Retry with binary exponential backoff with 20% random jitter.
backoff = min(2**i, max_backoff_s) * (random.uniform(0.8, 1.2))
logger.debug(
f"Retrying {i+1} attempts to {description} after {backoff} seconds."
)
time.sleep(backoff)
else:
if is_retryable:
logger.debug(
f"Failed to {description} after {max_attempts} attempts. Raising."
)
else:
logger.debug(
f"Did not find a match for {exception_str}. Raising after {i+1} attempts."
)
raise e from None
def retry(
description: str,
match: Optional[Sequence[str]] = None,
max_attempts: int = 10,
max_backoff_s: int = 32,
) -> Callable[[Callable[P, R]], Callable[P, R]]:
"""Decorator-based version of call_with_retry.
Args:
description: An imperative description of the function being retried. For
example, "open the file".
match: A sequence of strings to match in the exception message.
If ``None``, any error is retried.
max_attempts: The maximum number of attempts to retry.
max_backoff_s: The maximum number of seconds to backoff.
Returns:
A Callable that can be applied in a normal decorator fashion.
"""
def decorator(func: Callable[P, R]) -> Callable[P, R]:
@functools.wraps(func)
def inner(*args: P.args, **kwargs: P.kwargs) -> R:
return call_with_retry(
func, description, match, max_attempts, max_backoff_s, *args, **kwargs
)
return inner
return decorator
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_common/retry.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/_common/tests/test_retry.py | import sys
import pytest
from ray._common.retry import (
call_with_retry,
retry,
)
def test_call_with_retry_immediate_success_with_args():
def func(a, b):
return [a, b]
assert call_with_retry(func, "func", [], 1, 0, "a", "b") == ["a", "b"]
def test_retry_immediate_success_with_object_args():
class MyClass:
@retry("func", [], 1, 0)
def func(self, a, b):
return [a, b]
assert MyClass().func("a", "b") == ["a", "b"]
@pytest.mark.parametrize("use_decorator", [True, False])
def test_retry_last_attempt_successful_with_appropriate_wait_time(
monkeypatch, use_decorator
):
sleep_total = 0
def sleep(x):
nonlocal sleep_total
sleep_total += x
monkeypatch.setattr("time.sleep", sleep)
monkeypatch.setattr("random.uniform", lambda a, b: 1)
pattern = "have not reached 4th attempt"
call_count = 0
def func():
nonlocal call_count
call_count += 1
if call_count == 4:
return "success"
raise ValueError(pattern)
args = ["func", [pattern], 4, 3]
if use_decorator:
assert retry(*args)(func)() == "success"
else:
assert call_with_retry(func, *args) == "success"
assert sleep_total == 6 # 1 + 2 + 3
@pytest.mark.parametrize("use_decorator", [True, False])
def test_retry_unretryable_error(use_decorator):
call_count = 0
def func():
nonlocal call_count
call_count += 1
raise ValueError("unretryable error")
args = ["func", ["only retryable error"], 10, 0]
with pytest.raises(ValueError, match="unretryable error"):
if use_decorator:
retry(*args)(func)()
else:
call_with_retry(func, *args)
assert call_count == 1
@pytest.mark.parametrize("use_decorator", [True, False])
def test_retry_fail_all_attempts_retry_all_errors(use_decorator):
call_count = 0
def func():
nonlocal call_count
call_count += 1
raise ValueError(str(call_count))
args = ["func", None, 3, 0]
with pytest.raises(ValueError):
if use_decorator:
retry(*args)(func)()
else:
call_with_retry(func, *args)
assert call_count == 3
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_common/tests/test_retry.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/dashboard/modules/aggregator/tests/test_ray_actor_events.py | import base64
import json
import sys
import pytest
import ray
from ray._common.test_utils import wait_for_condition
from ray._private.test_utils import wait_for_dashboard_agent_available
from ray.dashboard.tests.conftest import * # noqa
_ACTOR_EVENT_PORT = 12346
@pytest.fixture(scope="session")
def httpserver_listen_address():
return ("127.0.0.1", _ACTOR_EVENT_PORT)
def test_ray_actor_events(ray_start_cluster, httpserver):
cluster = ray_start_cluster
cluster.add_node(
env_vars={
"RAY_DASHBOARD_AGGREGATOR_AGENT_EVENTS_EXPORT_ADDR": f"http://127.0.0.1:{_ACTOR_EVENT_PORT}",
"RAY_DASHBOARD_AGGREGATOR_AGENT_EXPOSABLE_EVENT_TYPES": "ACTOR_DEFINITION_EVENT,ACTOR_LIFECYCLE_EVENT",
},
_system_config={
"enable_ray_event": True,
},
)
cluster.wait_for_nodes()
head_node_id = cluster.head_node.node_id
all_nodes_ids = [node.node_id for node in cluster.list_all_nodes()]
class A:
def ping(self):
return "pong"
ray.init(address=cluster.address)
wait_for_dashboard_agent_available(cluster)
# Create an actor to trigger definition + lifecycle events
a = ray.remote(A).options(name="actor-test").remote()
ray.get(a.ping.remote())
# Check that an actor definition and a lifecycle event are published.
httpserver.expect_request("/", method="POST").respond_with_data("", status=200)
wait_for_condition(lambda: len(httpserver.log) >= 1)
req, _ = httpserver.log[0]
req_json = json.loads(req.data)
# We expect batched events containing definition then lifecycle
assert len(req_json) >= 2
# Verify event types and IDs exist
assert (
base64.b64decode(req_json[0]["actorDefinitionEvent"]["actorId"]).hex()
== a._actor_id.hex()
)
assert base64.b64decode(req_json[0]["nodeId"]).hex() == head_node_id
# Verify ActorId and state for ActorLifecycleEvents
has_alive_state = False
for actorLifeCycleEvent in req_json[1:]:
assert base64.b64decode(actorLifeCycleEvent["nodeId"]).hex() == head_node_id
assert (
base64.b64decode(
actorLifeCycleEvent["actorLifecycleEvent"]["actorId"]
).hex()
== a._actor_id.hex()
)
for stateTransition in actorLifeCycleEvent["actorLifecycleEvent"][
"stateTransitions"
]:
assert stateTransition["state"] in [
"DEPENDENCIES_UNREADY",
"PENDING_CREATION",
"ALIVE",
"RESTARTING",
"DEAD",
]
if stateTransition["state"] == "ALIVE":
has_alive_state = True
assert (
base64.b64decode(stateTransition["nodeId"]).hex() in all_nodes_ids
)
assert base64.b64decode(stateTransition["workerId"]).hex() != ""
assert has_alive_state
# Kill the actor and verify we get a DEAD state with death cause
ray.kill(a)
# Wait for the death event to be published
httpserver.expect_request("/", method="POST").respond_with_data("", status=200)
wait_for_condition(lambda: len(httpserver.log) >= 2)
has_dead_state = False
for death_req, _ in httpserver.log:
death_req_json = json.loads(death_req.data)
for actorLifeCycleEvent in death_req_json:
if "actorLifecycleEvent" in actorLifeCycleEvent:
assert (
base64.b64decode(
actorLifeCycleEvent["actorLifecycleEvent"]["actorId"]
).hex()
== a._actor_id.hex()
)
for stateTransition in actorLifeCycleEvent["actorLifecycleEvent"][
"stateTransitions"
]:
if stateTransition["state"] == "DEAD":
has_dead_state = True
assert (
stateTransition["deathCause"]["actorDiedErrorContext"][
"reason"
]
== "RAY_KILL"
)
assert has_dead_state
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/aggregator/tests/test_ray_actor_events.py",
"license": "Apache License 2.0",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:gen_redis_pkg.py | from bazel.gen_extract import gen_extract
if __name__ == "__main__":
gen_extract(
[
"ray_redis.zip",
],
clear_dir_first=[
"ray/core/src/ray/thirdparty/redis/src",
],
)
| {
"repo_id": "ray-project/ray",
"file_path": "gen_redis_pkg.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/stats.py | import logging
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
import pandas as pd
import pyarrow as pa
from ray.data._internal.tensor_extensions.arrow import convert_to_pyarrow_array
from ray.data.aggregate import (
AggregateFnV2,
ApproximateQuantile,
ApproximateTopK,
Count,
Max,
Mean,
Min,
MissingValuePercentage,
Std,
ZeroPercentage,
)
from ray.util.annotations import PublicAPI
if TYPE_CHECKING:
from ray.data.dataset import Schema
from ray.data.datatype import DataType, TypeCategory
logger = logging.getLogger(__name__)
@PublicAPI(stability="alpha")
@dataclass
class DatasetSummary:
"""Wrapper for dataset summary statistics.
Provides methods to access computed statistics.
Attributes:
dataset_schema: PyArrow schema of the original dataset
"""
STATISTIC_COLUMN = "statistic"
# PyArrow requires tables whereby each column's value conforms to the column's dtype as defined by the schema.
# However, aggregation results might produce statistics with types different from
# the original column (e.g., 'count' is int64 even for string columns).
# To handle this, we split statistics into two tables:
# 1. _stats_matching_column_dtype: Statistics that share the same type as the
# original column (e.g., min/max for numerical columns). These preserve
# the original column's dtype.
# 2. _stats_mismatching_column_dtype: Statistics with different types (e.g., count,
# missing_pct). These use inferred types (e.g., float64 for count).
_stats_matching_column_dtype: pa.Table
_stats_mismatching_column_dtype: pa.Table
dataset_schema: pa.Schema
columns: list[str]
def _safe_convert_table(self, table: pa.Table):
"""Safely convert a PyArrow table to pandas, handling problematic extension types.
Args:
table: PyArrow table to convert
Returns:
pandas DataFrame with converted data
"""
from ray.data.block import BlockAccessor
try:
return BlockAccessor.for_block(table).to_pandas()
except (TypeError, ValueError, pa.ArrowInvalid) as e:
logger.warning(
f"Direct conversion to pandas failed ({e}), "
"attempting column-by-column conversion"
)
result_data = {}
for col_name in table.schema.names:
col = table.column(col_name)
try:
result_data[col_name] = col.to_pandas()
except (TypeError, ValueError, pa.ArrowInvalid):
# Cast problematic columns to null type
null_col = pa.nulls(len(col), type=pa.null())
result_data[col_name] = null_col.to_pandas()
return pd.DataFrame(result_data)
def _set_statistic_index(self, df: pd.DataFrame) -> pd.DataFrame:
"""Set the statistic column as index if it exists, else return empty DataFrame.
Args:
df: DataFrame to set index on
Returns:
DataFrame with statistic column as index, or empty DataFrame if column missing
"""
if self.STATISTIC_COLUMN in df.columns:
return df.set_index(self.STATISTIC_COLUMN)
return pd.DataFrame()
def to_pandas(self):
"""Convert summary to a single pandas DataFrame.
Combines statistics from both schema-matching and schema-changing tables.
Note: Some PyArrow extension types (like TensorExtensionType) may fail to convert
to pandas when all values in a column are None. In such cases, this method
attempts to convert column-by-column, casting problematic columns to null type.
Returns:
DataFrame with all statistics, where rows are unique statistics from both tables
"""
df_matching = self._set_statistic_index(
self._safe_convert_table(self._stats_matching_column_dtype)
)
df_changing = self._set_statistic_index(
self._safe_convert_table(self._stats_mismatching_column_dtype)
)
# Handle case where both are empty
if df_matching.empty and df_changing.empty:
return pd.DataFrame(columns=[self.STATISTIC_COLUMN])
# Combine tables: prefer schema_matching values, fill with schema_changing
result = df_matching.combine_first(df_changing)
return (
result.reset_index()
.sort_values(self.STATISTIC_COLUMN)
.reset_index(drop=True)
)
def _extract_column_from_table(
self, table: pa.Table, column: str
) -> Optional[dict]:
"""Extract a column from a PyArrow table if it exists.
Args:
table: PyArrow table to extract from
column: Column name to extract
Returns:
DataFrame with 'statistic' and 'value' columns, or None if column doesn't exist
"""
if column not in table.schema.names:
return None
df = self._safe_convert_table(table)[[self.STATISTIC_COLUMN, column]]
return df.rename(columns={column: "value"})
def get_column_stats(self, column: str):
"""Get all statistics for a specific column, merging from both tables.
Args:
column: Column name to get statistics for
Returns:
DataFrame with all statistics for the column
"""
dfs = [
df
for table in [
self._stats_matching_column_dtype,
self._stats_mismatching_column_dtype,
]
if (df := self._extract_column_from_table(table, column)) is not None
]
if not dfs:
raise ValueError(f"Column '{column}' not found in summary tables")
# Concatenate and merge duplicate statistics (prefer non-null values)
combined = pd.concat(dfs, ignore_index=True)
# Group by statistic and take first non-null value for each group
def first_non_null(series):
non_null = series.dropna()
return non_null.iloc[0] if len(non_null) > 0 else None
result = (
combined.groupby(self.STATISTIC_COLUMN, sort=False)["value"]
.apply(first_non_null)
.reset_index()
.sort_values(self.STATISTIC_COLUMN)
.reset_index(drop=True)
)
return result
@dataclass
class _DtypeAggregators:
"""Container for columns and their aggregators.
Attributes:
column_to_dtype: Mapping from column name to dtype string representation
aggregators: List of all aggregators to apply
"""
column_to_dtype: Dict[str, str]
aggregators: List[AggregateFnV2]
def _numerical_aggregators(column: str) -> List[AggregateFnV2]:
"""Generate default metrics for numerical columns.
This function returns a list of aggregators that compute the following metrics:
- count
- mean
- min
- max
- std
- approximate_quantile (median)
- missing_value_percentage
- zero_percentage
Args:
column: The name of the numerical column to compute metrics for.
Returns:
A list of AggregateFnV2 instances that can be used with Dataset.aggregate()
"""
return [
Count(on=column, ignore_nulls=False),
Mean(on=column, ignore_nulls=True),
Min(on=column, ignore_nulls=True),
Max(on=column, ignore_nulls=True),
Std(on=column, ignore_nulls=True, ddof=0),
ApproximateQuantile(on=column, quantiles=[0.5]),
MissingValuePercentage(on=column),
ZeroPercentage(on=column, ignore_nulls=True),
]
def _temporal_aggregators(column: str) -> List[AggregateFnV2]:
"""Generate default metrics for temporal columns.
This function returns a list of aggregators that compute the following metrics:
- count
- min
- max
- missing_value_percentage
Args:
column: The name of the temporal column to compute metrics for.
Returns:
A list of AggregateFnV2 instances that can be used with Dataset.aggregate()
"""
return [
Count(on=column, ignore_nulls=False),
Min(on=column, ignore_nulls=True),
Max(on=column, ignore_nulls=True),
MissingValuePercentage(on=column),
]
def _basic_aggregators(column: str) -> List[AggregateFnV2]:
"""Generate default metrics for all columns.
This function returns a list of aggregators that compute the following metrics:
- count
- missing_value_percentage
- approximate_top_k (top 10 most frequent values)
Args:
column: The name of the column to compute metrics for.
Returns:
A list of AggregateFnV2 instances that can be used with Dataset.aggregate()
"""
return [
Count(on=column, ignore_nulls=False),
MissingValuePercentage(on=column),
ApproximateTopK(on=column, k=10),
]
def _default_dtype_aggregators() -> Dict[
Union["DataType", "TypeCategory"], Callable[[str], List[AggregateFnV2]]
]:
"""Get default mapping from Ray Data DataType to aggregator factory functions.
This function returns factory functions that create aggregators for specific columns.
Returns:
Dict mapping DataType or TypeCategory to factory functions that take a column name
and return a list of aggregators for that column.
Examples:
>>> from ray.data.datatype import DataType
>>> from ray.data.stats import _default_dtype_aggregators
>>> mapping = _default_dtype_aggregators()
>>> factory = mapping.get(DataType.int32())
>>> aggs = factory("my_column") # Creates aggregators for "my_column"
"""
from ray.data.datatype import DataType, TypeCategory
# Use pattern-matching types for cleaner mapping
return {
# Numerical types
DataType.int8(): _numerical_aggregators,
DataType.int16(): _numerical_aggregators,
DataType.int32(): _numerical_aggregators,
DataType.int64(): _numerical_aggregators,
DataType.uint8(): _numerical_aggregators,
DataType.uint16(): _numerical_aggregators,
DataType.uint32(): _numerical_aggregators,
DataType.uint64(): _numerical_aggregators,
DataType.float32(): _numerical_aggregators,
DataType.float64(): _numerical_aggregators,
DataType.bool(): _numerical_aggregators,
# String and binary types
DataType.string(): _basic_aggregators,
DataType.binary(): _basic_aggregators,
# Temporal types - pattern matches all temporal types (timestamp, date, time, duration)
TypeCategory.TEMPORAL: _temporal_aggregators,
# Note: Complex types like lists, structs, maps use fallback logic
# in _get_aggregators_for_dtype since they can't be easily enumerated
}
def _get_fallback_aggregators(column: str, dtype: "DataType") -> List[AggregateFnV2]:
"""Get aggregators using heuristic-based type detection.
This is a fallback when no explicit mapping is found for the dtype.
Args:
column: Column name
dtype: Ray Data DataType for the column
Returns:
List of aggregators suitable for the column type
"""
try:
# Check for null type first
if dtype.is_arrow_type() and pa.types.is_null(dtype._physical_dtype):
return [Count(on=column, ignore_nulls=False)]
elif dtype.is_numerical_type():
return _numerical_aggregators(column)
elif dtype.is_temporal_type():
return _temporal_aggregators(column)
else:
# Default for strings, binary, lists, nested types, etc.
return _basic_aggregators(column)
except Exception as e:
logger.warning(
f"Could not determine aggregators for column '{column}' with dtype {dtype}: {e}. "
f"Using basic aggregators."
)
return _basic_aggregators(column)
def _get_aggregators_for_dtype(
column: str,
dtype: "DataType",
dtype_agg_mapping: Dict[
Union["DataType", "TypeCategory"], Callable[[str], List[AggregateFnV2]]
],
) -> List[AggregateFnV2]:
"""Get aggregators for a specific column based on its DataType.
Attempts to match the dtype against the provided mapping first, then
falls back to heuristic-based selection if no match is found.
Args:
column: Column name
dtype: Ray Data DataType for the column
dtype_agg_mapping: Mapping from DataType to factory functions
Returns:
List of aggregators with the column name properly set
"""
from ray.data.datatype import DataType, TypeCategory
# Try to find a match in the mapping
for mapping_key, factory in dtype_agg_mapping.items():
if isinstance(mapping_key, DataType) and dtype == mapping_key:
return factory(column)
elif isinstance(mapping_key, (TypeCategory, str)) and dtype.is_of(mapping_key):
return factory(column)
# Fallback: Use heuristic-based selection
return _get_fallback_aggregators(column, dtype)
def _dtype_aggregators_for_dataset(
schema: Optional["Schema"],
columns: Optional[List[str]] = None,
dtype_agg_mapping: Optional[
Dict[Union["DataType", "TypeCategory"], Callable[[str], List[AggregateFnV2]]]
] = None,
) -> _DtypeAggregators:
"""Generate aggregators for columns in a dataset based on their DataTypes.
Args:
schema: A Ray Schema instance
columns: List of columns to include. If None, all columns will be included.
dtype_agg_mapping: Optional user-provided mapping from DataType to aggregator factories.
Each value should be a callable that takes a column name and returns aggregators.
This will be merged with the default mapping (user mapping takes precedence).
Returns:
_DtypeAggregators containing column-to-dtype mapping and aggregators
Raises:
ValueError: If schema is None or if specified columns don't exist in schema
"""
from ray.data.datatype import DataType
if not schema:
raise ValueError("Dataset must have a schema to determine column types")
if columns is None:
columns = schema.names
# Validate columns exist in schema
missing_cols = set(columns) - set(schema.names)
if missing_cols:
raise ValueError(f"Columns {missing_cols} not found in dataset schema")
# Build final mapping: default + user overrides
defaults = _default_dtype_aggregators()
if dtype_agg_mapping:
# Put user overrides first so they are checked before default patterns
final_mapping = dtype_agg_mapping.copy()
for k, v in defaults.items():
if k not in final_mapping:
final_mapping[k] = v
else:
final_mapping = defaults
# Generate aggregators for each column
column_to_dtype = {}
all_aggs = []
name_to_type = dict(zip(schema.names, schema.types))
for name in columns:
pa_type = name_to_type[name]
if pa_type is None or pa_type is object:
logger.warning(f"Skipping field '{name}': type is None or unsupported")
continue
ray_dtype = DataType.from_arrow(pa_type)
column_to_dtype[name] = str(ray_dtype)
all_aggs.extend(_get_aggregators_for_dtype(name, ray_dtype, final_mapping))
return _DtypeAggregators(
column_to_dtype=column_to_dtype,
aggregators=all_aggs,
)
def _format_stats(
agg: AggregateFnV2, value: Any, agg_type: pa.DataType
) -> Dict[str, Tuple[Any, pa.DataType]]:
"""Format aggregation result into stat entries.
Takes the raw aggregation result and formats it into one or more stat
entries. For scalar results, returns a single entry. For list results,
expands into multiple indexed entries.
Args:
agg: The aggregator instance
value: The aggregation result value
agg_type: PyArrow type of the aggregation result
Returns:
Dictionary mapping stat names to (value, type) tuples
"""
from ray.data.datatype import DataType
agg_name = agg.get_agg_name()
# Handle list results: expand into separate indexed stats
# If the value is None but the type is list, it means we got a null result
# for a list-type aggregator (e.g., ignore_nulls=True and all nulls).
is_list_type = (
pa.types.is_list(agg_type) or DataType.from_arrow(agg_type).is_list_type()
)
if isinstance(value, list) or (value is None and is_list_type):
scalar_type = (
agg_type.value_type
if DataType.from_arrow(agg_type).is_list_type()
else agg_type
)
if value is None:
# Can't expand None without knowing the size, return as-is
pass
else:
labels = [str(idx) for idx in range(len(value))]
return {
f"{agg_name}[{label}]": (list_val, scalar_type)
for label, list_val in zip(labels, value)
}
# Fallback to scalar result for non-list values or unexpandable Nones
return {agg_name: (value, agg_type)}
def _parse_summary_stats(
agg_result: Dict[str, any],
original_schema: pa.Schema,
agg_schema: pa.Schema,
aggregators: List[AggregateFnV2],
) -> tuple:
"""Parse aggregation results into schema-matching and schema-changing stats.
Args:
agg_result: Dictionary of aggregation results with keys like "count(col)"
original_schema: Original dataset schema
agg_schema: Schema of aggregation results
aggregators: List of aggregators used to generate the results
Returns:
Tuple of (schema_matching_stats, schema_changing_stats, column_names)
"""
schema_matching = {}
schema_changing = {}
columns = set()
# Build a lookup map from "stat_name(col_name)" to aggregator
agg_lookup = {agg.name: agg for agg in aggregators}
for key, value in agg_result.items():
if "(" not in key or not key.endswith(")"):
continue
# Get aggregator and extract info
agg = agg_lookup.get(key)
if not agg:
continue
col_name = agg.get_target_column()
if not col_name:
# Skip aggregations without a target column (e.g., Count())
continue
# Format the aggregation results
agg_type = agg_schema.field(key).type
original_type = original_schema.field(col_name).type
formatted_stats = _format_stats(agg, value, agg_type)
for stat_name, (stat_value, stat_type) in formatted_stats.items():
# Add formatted stats to appropriate dict based on schema matching
stats_dict = (
schema_matching if stat_type == original_type else schema_changing
)
stats_dict.setdefault(stat_name, {})[col_name] = (stat_value, stat_type)
columns.add(col_name)
return schema_matching, schema_changing, columns
def _create_pyarrow_array(
col_data: List, col_type: Optional[pa.DataType] = None, col_name: str = ""
) -> pa.Array:
"""Create a PyArrow array with fallback strategies.
Uses convert_to_pyarrow_array from arrow_block.py for type inference and
error handling when no specific type is provided.
Args:
col_data: List of column values
col_type: Optional PyArrow type to use
col_name: Column name for error messages (optional)
Returns:
PyArrow array
"""
if col_type is not None:
try:
return pa.array(col_data, type=col_type)
except (pa.ArrowTypeError, pa.ArrowInvalid):
# Type mismatch - fall through to type inference
pass
# Use convert_to_pyarrow_array for type inference and error handling
# This handles tensors, extension types, and fallback to ArrowPythonObjectArray
return convert_to_pyarrow_array(col_data, col_name or "column")
def _build_summary_table(
stats_dict: Dict[str, Dict[str, tuple]],
all_columns: set,
original_schema: pa.Schema,
preserve_types: bool,
) -> pa.Table:
"""Build a PyArrow table from parsed statistics.
Args:
stats_dict: Nested dict of {stat_name: {col_name: (value, type)}}
all_columns: Set of all column names across both tables
original_schema: Original dataset schema
preserve_types: If True, use original schema types for columns
Returns:
PyArrow table with statistics
"""
if not stats_dict:
return pa.table({})
stat_names = sorted(stats_dict.keys())
table_data = {DatasetSummary.STATISTIC_COLUMN: stat_names}
for col_name in sorted(all_columns):
# Collect values and infer type
col_data = []
first_type = None
for stat_name in stat_names:
if col_name in stats_dict[stat_name]:
value, agg_type = stats_dict[stat_name][col_name]
col_data.append(value)
if first_type is None:
first_type = agg_type
else:
col_data.append(None)
# Determine column type: prefer original schema, then first aggregation type, then infer
if preserve_types and col_name in original_schema.names:
col_type = original_schema.field(col_name).type
else:
col_type = first_type
table_data[col_name] = _create_pyarrow_array(col_data, col_type, col_name)
return pa.table(table_data)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/stats.py",
"license": "Apache License 2.0",
"lines": 505,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/tests/test_dataset_stats.py | import pandas as pd
import pyarrow as pa
import pytest
from packaging.version import parse as parse_version
import ray
from ray.data._internal.util import rows_same
from ray.data._internal.utils.arrow_utils import get_pyarrow_version
from ray.data.aggregate import (
ApproximateQuantile,
ApproximateTopK,
Count,
Max,
Mean,
Min,
MissingValuePercentage,
Std,
ZeroPercentage,
)
from ray.data.datatype import DataType
from ray.data.stats import (
DatasetSummary,
_basic_aggregators,
_default_dtype_aggregators,
_dtype_aggregators_for_dataset,
_numerical_aggregators,
_temporal_aggregators,
)
class TestDtypeAggregatorsForDataset:
"""Test suite for _dtype_aggregators_for_dataset function."""
@pytest.mark.parametrize(
"data,expected_dtypes,expected_agg_count",
[
# Numerical columns only
(
[{"int_col": 1, "float_col": 1.5}],
{
"int_col": "DataType(arrow:int64)",
"float_col": "DataType(arrow:double)",
},
16, # 2 columns * 8 aggregators each
),
# Mixed numerical and string
(
[{"num": 1, "str": "a"}],
{"num": "DataType(arrow:int64)", "str": "DataType(arrow:string)"},
11, # 1 numerical * 8 + 1 string * 3
),
# Boolean treated as numerical
(
[{"bool_col": True, "int_col": 1}],
{
"bool_col": "DataType(arrow:bool)",
"int_col": "DataType(arrow:int64)",
},
16, # 2 columns * 8 aggregators each
),
],
)
def test_column_type_detection(self, data, expected_dtypes, expected_agg_count):
"""Test that column types are correctly detected and mapped."""
ds = ray.data.from_items(data)
result = _dtype_aggregators_for_dataset(ds.schema())
assert (result.column_to_dtype, len(result.aggregators)) == (
expected_dtypes,
expected_agg_count,
)
def test_column_filtering(self):
"""Test that only specified columns are included."""
data = [{"col1": 1, "col2": "a", "col3": 1.5}]
ds = ray.data.from_items(data)
result = _dtype_aggregators_for_dataset(ds.schema(), columns=["col1", "col3"])
assert (set(result.column_to_dtype.keys()), len(result.aggregators)) == (
{"col1", "col3"},
16,
)
def test_empty_columns_list(self):
"""Test behavior with empty columns list."""
data = [{"col1": 1, "col2": "a"}]
ds = ray.data.from_items(data)
result = _dtype_aggregators_for_dataset(ds.schema(), columns=[])
assert (len(result.column_to_dtype), len(result.aggregators)) == (0, 0)
def test_invalid_columns_raises_error(self):
"""Test error handling when columns parameter contains non-existent columns."""
data = [{"col1": 1}]
ds = ray.data.from_items(data)
with pytest.raises(ValueError, match="not found in dataset schema"):
_dtype_aggregators_for_dataset(ds.schema(), columns=["nonexistent"])
def test_none_schema_raises_error(self):
"""Test that None schema raises appropriate error."""
with pytest.raises(ValueError, match="must have a schema"):
_dtype_aggregators_for_dataset(None)
def test_custom_dtype_mapping(self):
"""Test that custom dtype mappings override defaults."""
data = [{"int_col": 1}]
ds = ray.data.from_items(data)
# Override int64 to only use Count and Mean
custom_mapping = {DataType.int64(): lambda col: [Count(on=col), Mean(on=col)]}
result = _dtype_aggregators_for_dataset(
ds.schema(), dtype_agg_mapping=custom_mapping
)
assert [type(agg) for agg in result.aggregators] == [Count, Mean]
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("14.0.0"),
reason="Requires pyarrow >= 14.0.0",
)
def test_custom_dtype_mapping_pattern_precedence(self):
"""Test that specific custom mappings take precedence over default patterns."""
import datetime
# Use from_arrow to ensure we get exactly timestamp[us]
t = pa.table(
{"ts": pa.array([datetime.datetime(2024, 1, 1)], type=pa.timestamp("us"))}
)
ds = ray.data.from_arrow(t)
# Override specific timestamp type to only use Count
# Default for temporal is [Count, Min, Max, MissingValuePercentage]
ts_dtype = DataType.from_arrow(pa.timestamp("us"))
custom_mapping = {ts_dtype: lambda col: [Count(on=col)]}
result = _dtype_aggregators_for_dataset(
ds.schema(), dtype_agg_mapping=custom_mapping
)
# Should only have 1 aggregator if our specific override was used.
# If the default DataType.temporal() pattern matched first, we'd get 4 aggregators.
assert len(result.aggregators) == 1
assert isinstance(result.aggregators[0], Count)
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("14.0.0"),
reason="Requires pyarrow >= 14.0.0",
)
@pytest.mark.parametrize(
"pa_type",
[
pa.timestamp("us"), # Temporal: count, min, max, missing%
pa.date32(), # Temporal
pa.time64("us"), # Temporal
],
)
def test_temporal_types(self, pa_type):
"""Test that temporal types get appropriate aggregators."""
table = pa.table({"temporal_col": pa.array([1, 2, 3], type=pa_type)})
ds = ray.data.from_arrow(table)
result = _dtype_aggregators_for_dataset(ds.schema())
assert "temporal_col" in result.column_to_dtype
assert [type(agg) for agg in result.aggregators] == [
Count,
Min,
Max,
MissingValuePercentage,
]
class TestIndividualAggregatorFunctions:
"""Test suite for individual aggregator generator functions."""
def test_numerical_aggregators(self):
"""Test _numerical_aggregators function."""
aggs = _numerical_aggregators("test_col")
assert len(aggs) == 8
assert all(agg.get_target_column() == "test_col" for agg in aggs)
assert [type(agg) for agg in aggs] == [
Count,
Mean,
Min,
Max,
Std,
ApproximateQuantile,
MissingValuePercentage,
ZeroPercentage,
]
def test_temporal_aggregators(self):
"""Test _temporal_aggregators function."""
aggs = _temporal_aggregators("test_col")
assert len(aggs) == 4
assert all(agg.get_target_column() == "test_col" for agg in aggs)
assert [type(agg) for agg in aggs] == [Count, Min, Max, MissingValuePercentage]
def test_basic_aggregators(self):
"""Test _basic_aggregators function."""
aggs = _basic_aggregators("test_col")
assert len(aggs) == 3
assert all(agg.get_target_column() == "test_col" for agg in aggs)
assert [type(agg) for agg in aggs] == [
Count,
MissingValuePercentage,
ApproximateTopK,
]
class TestDefaultDtypeAggregators:
"""Test suite for _default_dtype_aggregators function."""
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("14.0.0"),
reason="Requires pyarrow >= 14.0.0",
)
@pytest.mark.parametrize(
"dtype_factory,expected_agg_types,uses_pattern_matching",
[
(
DataType.int32,
[
Count,
Mean,
Min,
Max,
Std,
ApproximateQuantile,
MissingValuePercentage,
ZeroPercentage,
],
False,
), # Numerical
(
DataType.float64,
[
Count,
Mean,
Min,
Max,
Std,
ApproximateQuantile,
MissingValuePercentage,
ZeroPercentage,
],
False,
), # Numerical
(
DataType.bool,
[
Count,
Mean,
Min,
Max,
Std,
ApproximateQuantile,
MissingValuePercentage,
ZeroPercentage,
],
False,
), # Numerical
(
DataType.string,
[Count, MissingValuePercentage, ApproximateTopK],
False,
), # Basic
(
DataType.binary,
[Count, MissingValuePercentage, ApproximateTopK],
False,
), # Basic
(
lambda: DataType.temporal("timestamp", unit="us"),
[Count, Min, Max, MissingValuePercentage],
True,
), # Temporal (pattern matched)
(
lambda: DataType.temporal("date32"),
[Count, Min, Max, MissingValuePercentage],
True,
), # Temporal (pattern matched)
(
lambda: DataType.temporal("time64", unit="us"),
[Count, Min, Max, MissingValuePercentage],
True,
), # Temporal (pattern matched)
],
)
def test_default_mappings(
self, dtype_factory, expected_agg_types, uses_pattern_matching
):
"""Test that default mappings return correct aggregators."""
from ray.data.datatype import TypeCategory
mapping = _default_dtype_aggregators()
dtype = dtype_factory()
if uses_pattern_matching:
# For pattern-matched types (like temporal), find the matching factory
factory = None
for mapping_key, mapping_factory in mapping.items():
if isinstance(mapping_key, (TypeCategory, str)) and dtype.is_of(
mapping_key
):
factory = mapping_factory
break
assert (
factory is not None
), f"Type {dtype} should match a pattern in the mapping"
else:
# For exact matches, directly access the mapping
assert dtype in mapping
factory = mapping[dtype]
# Call the factory with a test column to get aggregators
aggs = factory("test_col")
assert [type(agg) for agg in aggs] == expected_agg_types
class TestDatasetSummary:
"""Test suite for Dataset.summary() method."""
def test_basic_summary(self):
"""Test basic summary computation."""
ds = ray.data.from_items(
[
{"age": 25, "name": "Alice"},
{"age": 30, "name": "Bob"},
]
)
summary = ds.summary()
actual = summary.to_pandas()
# Verify columns are present
assert "age" in actual.columns
assert "name" in actual.columns
# Check key statistics using rows_same
actual_subset = actual[
actual["statistic"].isin(["count", "mean", "min", "max"])
].copy()
actual_subset["age"] = actual_subset["age"].astype(float)
actual_subset["name"] = actual_subset["name"].astype(float)
expected = pd.DataFrame(
{
"statistic": ["count", "mean", "min", "max"],
"age": [2.0, 27.5, 25.0, 30.0],
"name": [2.0, None, None, None],
}
)
assert rows_same(actual_subset, expected)
def test_summary_with_column_filter(self):
"""Test summary with specific columns."""
ds = ray.data.from_items(
[
{"col1": 1, "col2": "a", "col3": 3.5},
]
)
summary = ds.summary(columns=["col1"])
actual = summary.to_pandas()
# Check count and mean with rows_same
actual_subset = actual[actual["statistic"].isin(["count", "mean"])][
["statistic", "col1"]
].copy()
actual_subset["col1"] = actual_subset["col1"].astype(float)
expected = pd.DataFrame(
{
"statistic": ["count", "mean"],
"col1": [1.0, 1.0],
}
)
assert rows_same(actual_subset, expected)
def test_summary_custom_mapping(self):
"""Test summary with custom dtype aggregation mapping."""
ds = ray.data.from_items([{"value": 10, "other": 20}])
# Only Count and Mean for int64 columns
custom_mapping = {DataType.int64(): lambda col: [Count(on=col), Mean(on=col)]}
summary = ds.summary(override_dtype_agg_mapping=custom_mapping)
actual = summary.to_pandas()
# Convert to float for comparison
actual["value"] = actual["value"].astype(float)
actual["other"] = actual["other"].astype(float)
# Columns are sorted alphabetically, so order is: statistic, other, value
expected = pd.DataFrame(
{
"statistic": ["count", "mean"],
"other": [1.0, 20.0],
"value": [1.0, 10.0],
}
)
assert rows_same(actual, expected)
def test_get_column_stats(self):
"""Test get_column_stats method."""
ds = ray.data.from_items(
[
{"x": 1, "y": 2},
{"x": 3, "y": 4},
]
)
summary = ds.summary()
actual = summary.get_column_stats("x")
# Verify key statistics with rows_same (checking subset due to mixed types)
expected_stats = ["count", "mean", "min", "max"]
actual_subset = actual[actual["statistic"].isin(expected_stats)].copy()
actual_subset["value"] = actual_subset["value"].astype(float)
expected = pd.DataFrame(
{
"statistic": ["count", "mean", "min", "max"],
"value": [2.0, 2.0, 1.0, 3.0],
}
)
assert rows_same(actual_subset, expected)
@pytest.mark.parametrize(
"data,column,expected_df",
[
(
[{"x": 1}, {"x": 2}, {"x": 3}],
"x",
pd.DataFrame(
{
"statistic": ["count", "mean", "min", "max"],
"x": [3.0, 2.0, 1.0, 3.0],
}
),
),
(
[{"y": 10}, {"y": 20}],
"y",
pd.DataFrame(
{
"statistic": ["count", "mean", "min", "max"],
"y": [2.0, 15.0, 10.0, 20.0],
}
),
),
(
[{"z": 0}, {"z": 0}, {"z": 1}],
"z",
pd.DataFrame(
{
"statistic": ["count", "mean", "min", "max"],
"z": [3.0, 1.0 / 3, 0.0, 1.0],
}
),
),
],
)
def test_summary_statistics_values(self, data, column, expected_df):
"""Test that computed statistics have correct values."""
ds = ray.data.from_items(data)
summary = ds.summary(columns=[column])
actual = summary.to_pandas()
# Filter to key statistics and convert to float
actual_subset = actual[
actual["statistic"].isin(["count", "mean", "min", "max"])
][["statistic", column]].copy()
actual_subset[column] = actual_subset[column].astype(float)
assert rows_same(actual_subset, expected_df)
@pytest.mark.parametrize(
"data,columns,expected_df",
[
# Single numerical column with two values
(
[{"x": 10}, {"x": 20}],
["x"],
pd.DataFrame(
{
DatasetSummary.STATISTIC_COLUMN: [
"approx_quantile[0]",
"count",
"max",
"mean",
"min",
"missing_pct",
"std",
"zero_pct",
],
"x": [20.0, 2.0, 20.0, 15.0, 10.0, 0.0, 5.0, 0.0],
}
),
),
# Single numerical column with all same values
(
[{"y": 5}, {"y": 5}, {"y": 5}],
["y"],
pd.DataFrame(
{
DatasetSummary.STATISTIC_COLUMN: [
"approx_quantile[0]",
"count",
"max",
"mean",
"min",
"missing_pct",
"std",
"zero_pct",
],
"y": [5.0, 3.0, 5.0, 5.0, 5.0, 0.0, 0.0, 0.0],
}
),
),
# Multiple numerical columns
(
[{"a": 1, "b": 10}, {"a": 3, "b": 30}],
["a", "b"],
pd.DataFrame(
{
DatasetSummary.STATISTIC_COLUMN: [
"approx_quantile[0]",
"count",
"max",
"mean",
"min",
"missing_pct",
"std",
"zero_pct",
],
"a": [3.0, 2.0, 3.0, 2.0, 1.0, 0.0, 1.0, 0.0],
"b": [30.0, 2.0, 30.0, 20.0, 10.0, 0.0, 10.0, 0.0],
}
),
),
# Column with zeros and missing values
(
[{"z": 0}, {"z": 10}, {"z": None}],
["z"],
pd.DataFrame(
{
DatasetSummary.STATISTIC_COLUMN: [
"approx_quantile[0]",
"count",
"max",
"mean",
"min",
"missing_pct",
"std",
"zero_pct",
],
"z": [10.0, 3.0, 10.0, 5.0, 0.0, 100.0 / 3, 5.0, 50.0],
}
),
),
# Column with all zeros
(
[{"w": 0}, {"w": 0}],
["w"],
pd.DataFrame(
{
DatasetSummary.STATISTIC_COLUMN: [
"approx_quantile[0]",
"count",
"max",
"mean",
"min",
"missing_pct",
"std",
"zero_pct",
],
"w": [0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 100.0],
}
),
),
],
)
def test_summary_full_dataframe(self, data, columns, expected_df):
"""Test summary with full DataFrame comparison."""
ds = ray.data.from_items(data)
summary = ds.summary(columns=columns)
actual = summary.to_pandas()
# Convert to float for comparison
for col in expected_df.columns:
if col != DatasetSummary.STATISTIC_COLUMN:
expected_df[col] = expected_df[col].astype(float)
actual[col] = actual[col].astype(float)
assert rows_same(actual, expected_df)
def test_summary_multiple_quantiles(self):
"""Test summary with multiple quantiles."""
ds = ray.data.from_items(
[
{"x": 1},
{"x": 2},
{"x": 3},
{"x": 4},
{"x": 5},
]
)
# Create custom mapping with multiple quantiles
custom_mapping = {
DataType.int64(): lambda col: [
Count(on=col, ignore_nulls=False),
Min(on=col, ignore_nulls=True),
Max(on=col, ignore_nulls=True),
ApproximateQuantile(on=col, quantiles=[0.25, 0.5, 0.75]),
]
}
summary = ds.summary(columns=["x"], override_dtype_agg_mapping=custom_mapping)
actual = summary.to_pandas()
# Should have separate rows for each quantile with index-based labels [0], [1], [2]
expected = pd.DataFrame(
{
DatasetSummary.STATISTIC_COLUMN: [
"approx_quantile[0]",
"approx_quantile[1]",
"approx_quantile[2]",
"count",
"max",
"min",
],
"x": [2.0, 3.0, 4.0, 5.0, 5.0, 1.0],
}
)
# Convert to float for comparison
for col in expected.columns:
if col != DatasetSummary.STATISTIC_COLUMN:
expected[col] = expected[col].astype(float)
actual[col] = actual[col].astype(float)
assert rows_same(actual, expected)
def test_summary_custom_quantiles_and_topk(self):
"""Test summary with custom ApproximateQuantile and ApproximateTopK values."""
# Create data with numerical and string columns
ds = ray.data.from_items(
[
{"value": 10, "category": "apple"},
{"value": 20, "category": "banana"},
{"value": 30, "category": "apple"},
{"value": 40, "category": "cherry"},
{"value": 50, "category": "banana"},
{"value": 60, "category": "apple"},
{"value": 70, "category": "date"},
]
)
# Custom mapping with different quantile values and top-k value
custom_mapping = {
DataType.int64(): lambda col: [
Count(on=col, ignore_nulls=False),
ApproximateQuantile(on=col, quantiles=[0.1, 0.5, 0.9]),
],
DataType.string(): lambda col: [
Count(on=col, ignore_nulls=False),
ApproximateTopK(on=col, k=3), # Top 3 instead of default 10
],
}
summary = ds.summary(override_dtype_agg_mapping=custom_mapping)
actual = summary.to_pandas()
expected_stats = [
"approx_quantile[0]",
"approx_quantile[1]",
"approx_quantile[2]",
"approx_topk[0]",
"approx_topk[1]",
"approx_topk[2]",
"count",
]
# Verify all expected statistics are present
actual_stats = set(actual[DatasetSummary.STATISTIC_COLUMN].tolist())
assert all(stat in actual_stats for stat in expected_stats)
# Helper to get statistic value for a column
def get_stat(stat_name, col_name):
return actual[actual[DatasetSummary.STATISTIC_COLUMN] == stat_name][
col_name
].iloc[0]
# Verify all expected values
expected_values = {
("approx_quantile[0]", "value"): 10.0,
("approx_quantile[1]", "value"): 40.0,
("approx_quantile[2]", "value"): 70.0,
("approx_topk[0]", "category"): {"category": "apple", "count": 3},
("approx_topk[1]", "category"): {"category": "banana", "count": 2},
("approx_topk[2]", "category"): {"category": "date", "count": 1},
("count", "value"): 7.0,
("count", "category"): 7,
}
for (stat, col), expected in expected_values.items():
actual_value = get_stat(stat, col)
assert (
actual_value == expected
), f"{stat}[{col}] = {actual_value} != {expected}"
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_dataset_stats.py",
"license": "Apache License 2.0",
"lines": 643,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_hash_shuffle.py | from dataclasses import dataclass
from typing import Any, Dict, Optional
from unittest.mock import MagicMock, patch
import pytest
from ray.data import DataContext, ExecutionResources
from ray.data._internal.execution.interfaces import PhysicalOperator
from ray.data._internal.execution.operators.hash_aggregate import HashAggregateOperator
from ray.data._internal.execution.operators.hash_shuffle import HashShuffleOperator
from ray.data._internal.execution.operators.join import JoinOperator
from ray.data._internal.logical.interfaces import LogicalOperator
from ray.data._internal.logical.operators import JoinType
from ray.data._internal.util import GiB, MiB
from ray.data.aggregate import Count, Sum
from ray.data.block import BlockMetadata
@dataclass
class JoinTestCase:
# Expected outputs
expected_ray_remote_args: Dict[str, Any]
expected_num_partitions: int
expected_num_aggregators: int
# Input dataset configurations
left_size_bytes: Optional[int]
right_size_bytes: Optional[int]
left_num_blocks: Optional[int]
right_num_blocks: Optional[int]
# Join configuration
target_num_partitions: Optional[int]
# Cluster resources (for testing different resource scenarios)
total_cpu: float = 4.0
total_memory: int = 32 * GiB
@pytest.mark.parametrize(
"tc",
[
# Case 1: Auto-derived partitions with limited CPUs
JoinTestCase(
left_size_bytes=1 * GiB,
right_size_bytes=2 * GiB,
left_num_blocks=10,
right_num_blocks=5,
target_num_partitions=None, # Auto-derive
total_cpu=4.0,
expected_num_partitions=10, # max(10, 5)
expected_num_aggregators=4, # min(10 partitions, 4 CPUs) = 4
expected_ray_remote_args={
"max_concurrency": 3, # ceil(10 partitions / 4 aggregators)
"num_cpus": 0.25, # 4 CPUs * 25% / 4 aggregators
"memory": 1771674012,
"scheduling_strategy": "SPREAD",
"allow_out_of_order_execution": True,
},
),
# Case 2: Single partition (much higher memory overhead)
JoinTestCase(
left_size_bytes=1 * GiB,
right_size_bytes=1 * GiB,
left_num_blocks=10,
right_num_blocks=10,
target_num_partitions=1,
total_cpu=4.0,
expected_num_partitions=1,
expected_num_aggregators=1, # min(1 partition, 4 CPUs) = 1
expected_ray_remote_args={
"max_concurrency": 1,
"num_cpus": 1.0, # 4 CPUs * 25% / 1 aggregator
"memory": 8589934592,
"scheduling_strategy": "SPREAD",
"allow_out_of_order_execution": True,
},
),
# Case 3: Limited CPU resources affecting num_cpus calculation
JoinTestCase(
left_size_bytes=2 * GiB,
right_size_bytes=2 * GiB,
left_num_blocks=20,
right_num_blocks=20,
target_num_partitions=40,
total_cpu=2.0, # Only 2 CPUs available
expected_num_partitions=40,
expected_num_aggregators=2, # min(40 partitions, 2 CPUs) = 2
expected_ray_remote_args={
"max_concurrency": 8, # min(ceil(40/2), 8) = 8
"num_cpus": 0.25, # 2 CPUs * 25% / 2 aggregators
"memory": 2469606197,
"scheduling_strategy": "SPREAD",
"allow_out_of_order_execution": True,
},
),
# Case 4: Testing with many CPUs and partitions
JoinTestCase(
left_size_bytes=10 * GiB,
right_size_bytes=10 * GiB,
left_num_blocks=100,
right_num_blocks=100,
target_num_partitions=100,
total_cpu=32.0,
expected_num_partitions=100,
expected_num_aggregators=32, # min(100 partitions, 32 CPUs)
expected_ray_remote_args={
"max_concurrency": 4, # ceil(100 / 32)
"num_cpus": 0.25, # 32 CPUs * 25% / 32 aggregators
"memory": 1315333735,
"scheduling_strategy": "SPREAD",
"allow_out_of_order_execution": True,
},
),
# Case 5: Testing max aggregators cap (128 default)
JoinTestCase(
left_size_bytes=50 * GiB,
right_size_bytes=50 * GiB,
left_num_blocks=200,
right_num_blocks=200,
target_num_partitions=200,
total_cpu=256.0, # Many CPUs
expected_num_partitions=200,
expected_num_aggregators=128, # min(200, min(256, 128 (default max))
expected_ray_remote_args={
"max_concurrency": 2, # ceil(200 / 128)
"num_cpus": 0.5, # 256 CPUs * 25% / 128 aggregators
"memory": 2449473536,
"scheduling_strategy": "SPREAD",
"allow_out_of_order_execution": True,
},
),
# Case 6: Testing num_cpus derived from memory allocation
JoinTestCase(
left_size_bytes=50 * GiB,
right_size_bytes=50 * GiB,
left_num_blocks=200,
right_num_blocks=200,
target_num_partitions=None,
total_cpu=1024, # Many CPUs
expected_num_partitions=200,
expected_num_aggregators=128, # min(200, min(1000, 128 (default max))
expected_ray_remote_args={
"max_concurrency": 2, # ceil(200 / 128)
"num_cpus": 0.57, # ~2.5Gb / 4Gb = ~0.57
"memory": 2449473536,
"scheduling_strategy": "SPREAD",
"allow_out_of_order_execution": True,
},
),
# Case 7: No dataset size estimates available (fallback to default memory request)
# Memory calculation (fallback):
# max_mem_per_agg = 32 GiB / 32 = 1 GiB
# modest_mem = 1 GiB / 2 = 512 MiB
# memory = min(512 MiB, DEFAULT_1GiB) = 512 MiB = 536870912
# CPU calculation:
# cap = min(4.0, 32.0 * 0.25 / 32) = 0.25
# target = min(0.25, 536870912 / 4 GiB) = 0.12
JoinTestCase(
left_size_bytes=None,
right_size_bytes=None,
left_num_blocks=None,
right_num_blocks=None,
target_num_partitions=None,
total_cpu=32,
expected_num_partitions=200, # default parallelism
expected_num_aggregators=32, # min(200, min(1000, 128 (default max))
expected_ray_remote_args={
"max_concurrency": 7, # ceil(200 / 32)
"num_cpus": 0.12,
"memory": 536870912,
"scheduling_strategy": "SPREAD",
"allow_out_of_order_execution": True,
},
),
],
)
def test_join_aggregator_remote_args(
ray_start_regular,
tc,
):
"""Test that join operator correctly estimates memory, CPU, and other resources
for Aggregator actors based on dataset size estimates as well as cluster resources.
"""
left_logical_op_mock = MagicMock(LogicalOperator)
left_logical_op_mock.infer_metadata.return_value = BlockMetadata(
num_rows=None,
size_bytes=tc.left_size_bytes,
exec_stats=None,
input_files=None,
)
left_logical_op_mock.estimated_num_outputs.return_value = tc.left_num_blocks
left_op_mock = MagicMock(PhysicalOperator)
left_op_mock._output_dependencies = []
left_op_mock._logical_operators = [left_logical_op_mock]
right_logical_op_mock = MagicMock(LogicalOperator)
right_logical_op_mock.infer_metadata.return_value = BlockMetadata(
num_rows=None,
size_bytes=tc.right_size_bytes,
exec_stats=None,
input_files=None,
)
right_logical_op_mock.estimated_num_outputs.return_value = tc.right_num_blocks
right_op_mock = MagicMock(PhysicalOperator)
right_op_mock._output_dependencies = []
right_op_mock._logical_operators = [right_logical_op_mock]
# Patch the total cluster resources
with patch(
"ray.data._internal.execution.operators.hash_shuffle.ray.cluster_resources",
return_value={"CPU": tc.total_cpu, "memory": tc.total_memory},
):
# Create the join operator
op = JoinOperator(
left_input_op=left_op_mock,
right_input_op=right_op_mock,
data_context=DataContext.get_current(),
left_key_columns=("id",),
right_key_columns=("id",),
join_type=JoinType.INNER,
num_partitions=tc.target_num_partitions,
)
# Validate the estimations
assert op._num_partitions == tc.expected_num_partitions
assert op._aggregator_pool.num_aggregators == tc.expected_num_aggregators
assert (
op._aggregator_pool._aggregator_ray_remote_args
== tc.expected_ray_remote_args
)
@dataclass
class HashOperatorTestCase:
# Expected outputs
expected_ray_remote_args: Dict[str, Any]
expected_num_partitions: int
expected_num_aggregators: int
# Input dataset configuration
input_size_bytes: Optional[int]
input_num_blocks: Optional[int]
# Operator configuration
target_num_partitions: Optional[int]
# Cluster resources (for testing different resource scenarios)
total_cpu: float = 4.0
total_memory: int = 32 * GiB
@pytest.mark.parametrize(
"tc",
[
# Case 1: Auto-derived partitions with limited CPUs
HashOperatorTestCase(
input_size_bytes=2 * GiB,
input_num_blocks=16,
target_num_partitions=None,
total_cpu=4.0,
expected_num_partitions=16,
expected_num_aggregators=4,
expected_ray_remote_args={
"max_concurrency": 4,
"num_cpus": 0.16,
"memory": 671088640,
"scheduling_strategy": "SPREAD",
"allow_out_of_order_execution": True,
},
),
# Case 2: Single partition produced
HashOperatorTestCase(
input_size_bytes=512 * MiB,
input_num_blocks=8,
target_num_partitions=1,
total_cpu=8.0,
expected_num_partitions=1,
expected_num_aggregators=1,
expected_ray_remote_args={
"max_concurrency": 1,
"num_cpus": 0.25,
"memory": 1073741824,
"scheduling_strategy": "SPREAD",
"allow_out_of_order_execution": True,
},
),
# Case 3: Many CPUs
HashOperatorTestCase(
input_size_bytes=16 * GiB,
input_num_blocks=128,
target_num_partitions=32,
total_cpu=256.0,
expected_num_partitions=32,
expected_num_aggregators=32,
expected_ray_remote_args={
"max_concurrency": 1,
"num_cpus": 0.25,
"memory": 1073741824,
"scheduling_strategy": "SPREAD",
"allow_out_of_order_execution": True,
},
),
# Case 4: Testing num_cpus derived from memory allocation
HashOperatorTestCase(
input_size_bytes=50 * GiB,
input_num_blocks=200,
target_num_partitions=None,
total_cpu=1024, # Many CPUs
expected_num_partitions=200,
expected_num_aggregators=128, # min(200, min(1000, 128 (default max))
expected_ray_remote_args={
"max_concurrency": 2, # ceil(200 / 128)
"num_cpus": 0.16, # ~0.6Gb / 4Gb = ~0.16
"memory": 687865856,
"scheduling_strategy": "SPREAD",
"allow_out_of_order_execution": True,
},
),
# Case 6: No dataset size estimate inferred (fallback to default memory request)
# Memory calculation (fallback):
# max_mem_per_agg = 32 GiB / 32 = 1 GiB
# modest_mem = 1 GiB / 2 = 512 MiB
# memory = min(512 MiB, DEFAULT_1GiB) = 512 MiB = 536870912
# CPU calculation:
# cap = min(4.0, 32.0 * 0.25 / 32) = 0.25
# target = min(0.25, 536870912 / 4 GiB) = 0.12
HashOperatorTestCase(
input_size_bytes=None,
input_num_blocks=None,
target_num_partitions=None,
total_cpu=32.0,
expected_num_partitions=200,
expected_num_aggregators=32,
expected_ray_remote_args={
"max_concurrency": 7,
"num_cpus": 0.12,
"memory": 536870912,
"scheduling_strategy": "SPREAD",
"allow_out_of_order_execution": True,
},
),
],
)
def test_hash_aggregate_operator_remote_args(
ray_start_regular,
tc,
):
"""Test that HashAggregateOperator correctly estimates memory, CPU, and other resources
for aggregator actors based on dataset size estimates as well as cluster resources.
"""
logical_op_mock = MagicMock(LogicalOperator)
logical_op_mock.infer_metadata.return_value = BlockMetadata(
num_rows=None,
size_bytes=tc.input_size_bytes,
exec_stats=None,
input_files=None,
)
logical_op_mock.estimated_num_outputs.return_value = tc.input_num_blocks
op_mock = MagicMock(PhysicalOperator)
op_mock._output_dependencies = []
op_mock._logical_operators = [logical_op_mock]
# Create some test aggregation functions
agg_fns = [Sum("value"), Count()]
# Patch the total cluster resources
with patch(
"ray.data._internal.execution.operators.hash_shuffle.ray.cluster_resources",
return_value={"CPU": tc.total_cpu, "memory": tc.total_memory},
):
# Create the hash aggregate operator
op = HashAggregateOperator(
input_op=op_mock,
data_context=DataContext.get_current(),
aggregation_fns=agg_fns,
key_columns=("id",),
num_partitions=tc.target_num_partitions,
)
# Validate the estimations
assert op._num_partitions == tc.expected_num_partitions
assert op._aggregator_pool.num_aggregators == tc.expected_num_aggregators
assert (
op._aggregator_pool._aggregator_ray_remote_args
== tc.expected_ray_remote_args
)
@pytest.mark.parametrize(
"tc",
[
# Case 1: Auto-derived partitions with limited CPUs
# Memory calculation:
# max_partitions_per_agg = ceil(16 / 4) = 4
# partition_size = ceil(2 GiB / 16) = 128 MiB
# shuffle + output = 2 * (128 MiB * 4) = 1024 MiB
# with 1.3x skew factor: ceil(1024 MiB * 1.3) = 1395864372
# CPU calculation:
# cap = min(4.0, 4.0 * 0.25 / 4) = 0.25
# target = min(0.25, 1395864372 / 4 GiB) = 0.25
HashOperatorTestCase(
input_size_bytes=2 * GiB,
input_num_blocks=16,
target_num_partitions=None,
total_cpu=4.0,
expected_num_partitions=16,
expected_num_aggregators=4,
expected_ray_remote_args={
"max_concurrency": 4,
"num_cpus": 0.25,
"memory": 1395864372,
"scheduling_strategy": "SPREAD",
"allow_out_of_order_execution": True,
},
),
# Case 2: Single partition produced
# Memory calculation:
# max_partitions_per_agg = ceil(1 / 1) = 1
# partition_size = ceil(512 MiB / 1) = 512 MiB
# shuffle + output = 2 * (512 MiB * 1) = 1024 MiB
# with 1.3x skew factor: ceil(1024 MiB * 1.3) = 1395864372
# CPU calculation:
# cap = min(4.0, 8.0 * 0.25 / 1) = 2.0
# target = min(2.0, 1395864372 / 4 GiB) = 0.33
HashOperatorTestCase(
input_size_bytes=512 * MiB,
input_num_blocks=8,
target_num_partitions=1,
total_cpu=8.0,
expected_num_partitions=1,
expected_num_aggregators=1,
expected_ray_remote_args={
"max_concurrency": 1,
"num_cpus": 0.33,
"memory": 1395864372,
"scheduling_strategy": "SPREAD",
"allow_out_of_order_execution": True,
},
),
# Case 3: Many CPUs
# Memory calculation:
# max_partitions_per_agg = ceil(32 / 32) = 1
# partition_size = ceil(16 GiB / 32) = 512 MiB
# shuffle + output = 2 * (512 MiB * 1) = 1024 MiB
# with 1.3x skew factor: ceil(1024 MiB * 1.3) = 1395864372
# CPU calculation:
# cap = min(4.0, 256.0 * 0.25 / 32) = 2.0
# target = min(2.0, 1395864372 / 4 GiB) = 0.33
HashOperatorTestCase(
input_size_bytes=16 * GiB,
input_num_blocks=128,
target_num_partitions=32,
total_cpu=256.0,
expected_num_partitions=32,
expected_num_aggregators=32,
expected_ray_remote_args={
"max_concurrency": 1,
"num_cpus": 0.33,
"memory": 1395864372,
"scheduling_strategy": "SPREAD",
"allow_out_of_order_execution": True,
},
),
# Case 4: Testing num_cpus derived from memory allocation
# Memory calculation:
# max_partitions_per_agg = ceil(200 / 128) = 2
# partition_size = ceil(50 GiB / 200) = 256 MiB
# shuffle + output = 2 * (256 MiB * 2) = 1024 MiB
# with 1.3x skew factor: ceil(1024 MiB * 1.3) = 1395864372
# CPU calculation:
# cap = min(4.0, 1024 * 0.25 / 128) = 2.0
# target = min(2.0, 1395864372 / 4 GiB) = 0.33
HashOperatorTestCase(
input_size_bytes=50 * GiB,
input_num_blocks=200,
target_num_partitions=None,
total_cpu=1024, # Many CPUs
expected_num_partitions=200,
expected_num_aggregators=128, # min(200, min(1000, 128 (default max))
expected_ray_remote_args={
"max_concurrency": 2, # ceil(200 / 128)
"num_cpus": 0.33,
"memory": 1395864372,
"scheduling_strategy": "SPREAD",
"allow_out_of_order_execution": True,
},
),
# Case 5: No dataset size estimate inferred (fallback to default memory request)
# Memory calculation (fallback):
# max_mem_per_agg = 32 GiB / 32 = 1 GiB
# modest_mem = 1 GiB / 2 = 512 MiB
# memory = min(512 MiB, DEFAULT_1GiB) = 512 MiB = 536870912
# CPU calculation:
# cap = min(4.0, 32.0 * 0.25 / 32) = 0.25
# target = min(0.25, 536870912 / 4 GiB) = 0.12
HashOperatorTestCase(
input_size_bytes=None,
input_num_blocks=None,
target_num_partitions=None,
total_cpu=32.0,
expected_num_partitions=200,
expected_num_aggregators=32,
expected_ray_remote_args={
"max_concurrency": 7,
"num_cpus": 0.12,
"memory": 536870912,
"scheduling_strategy": "SPREAD",
"allow_out_of_order_execution": True,
},
),
],
)
def test_hash_shuffle_operator_remote_args(
ray_start_regular,
tc,
):
"""Test that HashShuffleOperator correctly estimates memory, CPU, and other resources
for aggregator actors based on dataset size estimates as well as cluster resources.
"""
logical_op_mock = MagicMock(LogicalOperator)
logical_op_mock.infer_metadata.return_value = BlockMetadata(
num_rows=None,
size_bytes=tc.input_size_bytes,
exec_stats=None,
input_files=None,
)
logical_op_mock.estimated_num_outputs.return_value = tc.input_num_blocks
op_mock = MagicMock(PhysicalOperator)
op_mock._output_dependencies = []
op_mock._logical_operators = [logical_op_mock]
# Patch the total cluster resources
with patch(
"ray.data._internal.execution.operators.hash_shuffle.ray.cluster_resources",
return_value={"CPU": tc.total_cpu, "memory": tc.total_memory},
):
with patch(
"ray.data._internal.execution.operators.hash_shuffle._get_total_cluster_resources"
) as mock_resources:
mock_resources.return_value = ExecutionResources(
cpu=tc.total_cpu, memory=tc.total_memory
)
# Create the hash shuffle operator
op = HashShuffleOperator(
input_op=op_mock,
data_context=DataContext.get_current(),
key_columns=("id",),
num_partitions=tc.target_num_partitions,
)
# Validate the estimations
assert op._num_partitions == tc.expected_num_partitions
assert op._aggregator_pool.num_aggregators == tc.expected_num_aggregators
assert (
op._aggregator_pool._aggregator_ray_remote_args
== tc.expected_ray_remote_args
)
def test_aggregator_ray_remote_args_partial_override(ray_start_regular):
"""Test that partial override of aggregator_ray_remote_args retains default values.
This tests the behavior where a user provides only some values (e.g., num_cpus)
in aggregator_ray_remote_args_override, and the system should retain the default
values for other parameters (e.g., scheduling_strategy, allow_out_of_order_execution).
"""
logical_op_mock = MagicMock(LogicalOperator)
logical_op_mock.infer_metadata.return_value = BlockMetadata(
num_rows=None,
size_bytes=2 * GiB,
exec_stats=None,
input_files=None,
)
logical_op_mock.estimated_num_outputs.return_value = 16
op_mock = MagicMock(PhysicalOperator)
op_mock._output_dependencies = []
op_mock._logical_operators = [logical_op_mock]
# Patch the total cluster resources
with patch(
"ray.data._internal.execution.operators.hash_shuffle.ray.cluster_resources",
return_value={"CPU": 4.0, "memory": 32 * GiB},
):
# Create operator with partial override (only num_cpus)
op = HashAggregateOperator(
input_op=op_mock,
data_context=DataContext.get_current(),
aggregation_fns=[Count()],
key_columns=("id",),
aggregator_ray_remote_args_override={
"num_cpus": 0.5
}, # Only override num_cpus
)
# Verify that num_cpus was overridden
assert op._aggregator_pool._aggregator_ray_remote_args["num_cpus"] == 0.5
# Verify that default values are retained
assert (
op._aggregator_pool._aggregator_ray_remote_args["scheduling_strategy"]
== "SPREAD"
)
assert (
op._aggregator_pool._aggregator_ray_remote_args[
"allow_out_of_order_execution"
]
is True
)
# Verify that max_concurrency is still present
assert "max_concurrency" in op._aggregator_pool._aggregator_ray_remote_args
# Verify that memory is still present
assert "memory" in op._aggregator_pool._aggregator_ray_remote_args
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_hash_shuffle.py",
"license": "Apache License 2.0",
"lines": 584,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/train/v2/tests/test_torch_gpu.py | import time
from typing import List
import pytest
import torch
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, DistributedSampler
import ray
from ray.train import RunConfig, ScalingConfig
from ray.train.examples.pytorch.torch_linear_example import LinearDataset
from ray.train.torch import TorchTrainer
from ray.train.v2._internal.execution.callback import WorkerGroupCallback
from ray.train.v2._internal.execution.worker_group import Worker
from ray.train.v2.api.exceptions import WorkerGroupError
def test_torch_trainer_cuda_initialization(ray_start_4_cpus_2_gpus):
"""Test that Torch CUDA initialization works with TorchTrainer.
This test verifies that PyTorch can properly initialize CUDA on multiple
workers before the training context is set up, ensuring that GPU resources
are available and accessible across all training workers.
See https://github.com/ray-project/ray/pull/56509 for more details.
"""
def train_func():
"""Empty training function for this initialization test.
Since we're only testing CUDA initialization, the actual training
logic is not needed for this test case.
"""
pass
def init_torch():
"""Trigger (lazy) initialization of CUDA."""
torch.cuda.is_available()
class InitTorchCallback(WorkerGroupCallback):
"""Callback to initialize PyTorch CUDA before training begins.
Implements before_init_train_context because this is where torch is typically imported,
ensuring that the CUDA environment is properly initialized.
"""
def before_init_train_context(self, workers: List[Worker]):
"""Execute CUDA initialization on all workers."""
futures = []
for worker in workers:
futures.append(worker.execute_async(init_torch))
ray.get(futures)
return {}
callback = InitTorchCallback()
trainer = TorchTrainer(
train_func,
scaling_config=ScalingConfig(num_workers=2, use_gpu=True),
run_config=RunConfig(callbacks=[callback]),
)
trainer.fit()
@pytest.mark.parametrize("num_gpus_per_worker", [0.5, 1, 2])
def test_torch_get_devices(ray_start_2x2_gpu_cluster, num_gpus_per_worker):
# cluster setups: 2 nodes, 2 gpus per node
# `CUDA_VISIBLE_DEVICES` is set to "0,1" on node 1 and node 2
if num_gpus_per_worker == 0.5:
# worker gpu topology:
# 4 workers on node 1, 4 workers on node 2
# `ray.get_gpu_ids()` returns [0], [0], [1], [1] on node 1
# and [0], [0], [1], [1] on node 2
expected_devices_per_rank = [[0], [0], [1], [1], [0], [0], [1], [1]]
elif num_gpus_per_worker == 1:
# worker gpu topology:
# 2 workers on node 1, 2 workers on node 2
# `ray.get_gpu_ids()` returns [0], [1] on node 1 and [0], [1] on node 2
expected_devices_per_rank = [[0], [1], [0], [1]]
elif num_gpus_per_worker == 2:
# worker gpu topology:
# 1 workers on node 1, 1 workers on node 2
# `ray.get_gpu_ids()` returns {0, 1} on node 1 and {0, 1} on node 2
# and `device_id` returns the one index from each set.
# So total count of devices should be 2.
expected_devices_per_rank = [[0, 1], [0, 1]]
else:
raise RuntimeError(
"New parameter for this test has been added without checking that the "
"correct devices have been returned."
)
def train_fn():
assert torch.cuda.current_device() == ray.train.torch.get_device().index
devices = sorted([device.index for device in ray.train.torch.get_devices()])
rank = ray.train.get_context().get_world_rank()
assert devices == expected_devices_per_rank[rank]
trainer = TorchTrainer(
train_fn,
scaling_config=ray.train.ScalingConfig(
num_workers=int(4 / num_gpus_per_worker),
use_gpu=True,
resources_per_worker={"GPU": num_gpus_per_worker},
),
)
trainer.fit()
def test_torch_prepare_model(ray_start_4_cpus_2_gpus):
"""Tests if ``prepare_model`` correctly wraps in DDP."""
def train_fn():
model = torch.nn.Linear(1, 1)
# Wrap in DDP.
model = ray.train.torch.prepare_model(model)
# Make sure model is wrapped in DDP.
assert isinstance(model, DistributedDataParallel)
# Make sure model is on cuda.
assert next(model.parameters()).is_cuda
trainer = TorchTrainer(
train_fn, scaling_config=ScalingConfig(num_workers=2, use_gpu=True)
)
trainer.fit()
class LinearDatasetDict(LinearDataset):
"""Modifies the LinearDataset to return a Dict instead of a Tuple."""
def __getitem__(self, index):
return {"x": self.x[index, None], "y": self.y[index, None]}
class NonTensorDataset(LinearDataset):
"""Modifies the LinearDataset to also return non-tensor objects."""
def __getitem__(self, index):
return {"x": self.x[index, None], "y": 2}
@pytest.mark.parametrize(
"dataset", (LinearDataset, LinearDatasetDict, NonTensorDataset)
)
def test_torch_prepare_dataloader(ray_start_4_cpus_2_gpus, dataset):
data_loader = DataLoader(dataset(a=1, b=2, size=10))
def train_fn():
wrapped_data_loader = ray.train.torch.prepare_data_loader(data_loader)
# Check that DistributedSampler has been added to the data loader.
assert isinstance(wrapped_data_loader.sampler, DistributedSampler)
# Make sure you can properly iterate through the DataLoader.
# Case where the dataset returns a tuple or list from __getitem__.
if isinstance(dataset, LinearDataset):
for batch in wrapped_data_loader:
x = batch[0]
y = batch[1]
# Make sure the data is on the correct device.
assert x.is_cuda and y.is_cuda
# Case where the dataset returns a dict from __getitem__.
elif isinstance(dataset, LinearDatasetDict):
for batch in wrapped_data_loader:
for x, y in zip(batch["x"], batch["y"]):
# Make sure the data is on the correct device.
assert x.is_cuda and y.is_cuda
elif isinstance(dataset, NonTensorDataset):
for batch in wrapped_data_loader:
for x, y in zip(batch["x"], batch["y"]):
# Make sure the data is on the correct device.
assert x.is_cuda and y == 2
trainer = TorchTrainer(
train_fn, scaling_config=ScalingConfig(num_workers=2, use_gpu=True)
)
trainer.fit()
def test_torch_fail_on_nccl_timeout(ray_start_4_cpus_2_gpus):
"""Tests that TorchTrainer raises exception on NCCL timeouts."""
def train_fn():
model = torch.nn.Linear(1, 1)
model = ray.train.torch.prepare_model(model)
# Rank 0 worker will never reach the collective operation.
# NCCL should timeout.
if ray.train.get_context().get_world_rank() == 0:
while True:
time.sleep(100)
torch.distributed.barrier()
trainer = TorchTrainer(
train_fn,
scaling_config=ScalingConfig(num_workers=2, use_gpu=True),
torch_config=ray.train.torch.TorchConfig(timeout_s=2),
)
# Training should fail and not hang.
with pytest.raises(WorkerGroupError):
trainer.fit()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/test_torch_gpu.py",
"license": "Apache License 2.0",
"lines": 165,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/tests/test_custom_autoscaling_metrics.py | import asyncio
import sys
from typing import Dict
import pytest
import ray
from ray import serve
from ray._common.test_utils import SignalActor, wait_for_condition
from ray.serve._private.common import DeploymentID
from ray.serve._private.test_utils import check_num_replicas_eq
from ray.serve.config import AutoscalingContext, AutoscalingPolicy
def get_autoscaling_metrics_from_controller(
client, deployment_id: DeploymentID
) -> Dict[str, float]:
"""Get autoscaling metrics from the controller for testing."""
ref = client._controller._get_metrics_for_deployment_for_testing.remote(
deployment_id
)
return ray.get(ref)
def custom_autoscaling_policy(ctx: AutoscalingContext):
aggregated_counter = sum(
x for x in ctx.aggregated_metrics.get("counter", {}).values()
)
max_counter = sum(
[x[-1].value for x in ctx.raw_metrics.get("counter", {}).values()]
)
if max_counter == aggregated_counter == 10:
return 3, {}
else:
return 1, {}
# Example from doc/source/serve/doc_code/autoscaling_policy.py
def max_cpu_usage_autoscaling_policy(ctx: AutoscalingContext):
cpu_usage_metric = ctx.aggregated_metrics.get("cpu_usage", {})
max_cpu_usage = list(cpu_usage_metric.values())[-1] if cpu_usage_metric else 0
if max_cpu_usage > 80:
return min(ctx.capacity_adjusted_max_replicas, ctx.current_num_replicas + 1), {}
elif max_cpu_usage < 30:
return max(ctx.capacity_adjusted_min_replicas, ctx.current_num_replicas - 1), {}
else:
return ctx.current_num_replicas, {}
class TestCustomServeMetrics:
"""Check that redeploying a deployment doesn't reset its start time."""
def test_custom_serve_metrics(self, serve_instance):
@serve.deployment(
autoscaling_config={
"min_replicas": 1,
"max_replicas": 5,
"upscale_delay_s": 0.5,
"downscale_delay_s": 0.5,
"metrics_interval_s": 0.1,
"look_back_period_s": 1,
}
)
class DummyMetricIncrementer:
def __init__(self):
self.counter = 0
async def __call__(self) -> str:
self.counter += 1
return "Hello, world"
def record_autoscaling_stats(self) -> Dict[str, int]:
# Increments each time the deployment has been called
return {"counter": self.counter}
app_name = "test_custom_metrics_app"
handle = serve.run(
DummyMetricIncrementer.bind(), name=app_name, route_prefix="/"
)
dep_id = DeploymentID(name="DummyMetricIncrementer", app_name=app_name)
# Call deployment 3 times
[handle.remote() for _ in range(3)]
def check_counter_value():
metrics = get_autoscaling_metrics_from_controller(serve_instance, dep_id)
return "counter" in metrics and metrics["counter"][-1][0].value == 3
# The final counter value recorded by the controller should be 3
wait_for_condition(
check_counter_value,
timeout=15,
)
def test_custom_serve_timeout(self, serve_instance):
@serve.deployment(
autoscaling_config={
"min_replicas": 1,
"max_replicas": 5,
"upscale_delay_s": 2,
"downscale_delay_s": 10,
"metrics_interval_s": 1,
"look_back_period_s": 2,
}
)
class DummyMetricTimeout:
def __init__(self):
self.counter = 0
async def __call__(self) -> str:
self.counter += 1
return "Hello, world"
async def record_autoscaling_stats(self) -> Dict[str, int]:
# Block here until it is forced to cancel due to timeout beyond RAY_SERVE_RECORD_AUTOSCALING_STATS_TIMEOUT_S
await asyncio.sleep(1000)
app_name = "test_custom_metrics_app"
handle = serve.run(DummyMetricTimeout.bind(), name=app_name, route_prefix="/")
dep_id = DeploymentID(name="DummyMetricTimeout", app_name=app_name)
# Call deployment 3 times
[handle.remote() for _ in range(3)]
# There should be no counter metric because asyncio timeout would have stopped the method execution
metrics = get_autoscaling_metrics_from_controller(serve_instance, dep_id)
assert metrics.get("counter", None) is None
def test_custom_serve_invalid_metric_type(self, serve_instance):
@serve.deployment(
autoscaling_config={
"min_replicas": 1,
"max_replicas": 5,
"upscale_delay_s": 2,
"downscale_delay_s": 10,
"metrics_interval_s": 1,
"look_back_period_s": 2,
}
)
class DummyInvalidMetric:
def __init__(self):
self.counter = 0
async def __call__(self) -> str:
self.counter += 1
return "Hello, world"
def record_autoscaling_stats(self) -> Dict[str, str]:
# Return an invalid metric dict whose valuse are neither int nor float
return {"counter": "not_an_int"}
app_name = "test_custom_metrics_app"
handle = serve.run(DummyInvalidMetric.bind(), name=app_name, route_prefix="/")
dep_id = DeploymentID(name="DummyInvalidMetric", app_name=app_name)
# Call deployment 3 times
[handle.remote() for _ in range(3)]
# There should be no counter metric because it failed validation, must be int or float
metrics = get_autoscaling_metrics_from_controller(serve_instance, dep_id)
assert metrics.get("counter", None) is None
def test_policy_using_custom_metrics(self, serve_instance):
signal = SignalActor.remote()
@serve.deployment(
autoscaling_config={
"min_replicas": 1,
"max_replicas": 5,
"upscale_delay_s": 2,
"downscale_delay_s": 1,
"metrics_interval_s": 0.1,
"look_back_period_s": 1,
"target_ongoing_requests": 10,
"policy": AutoscalingPolicy(policy_function=custom_autoscaling_policy),
},
max_ongoing_requests=100,
)
class CustomMetricsDeployment:
def __init__(self):
self.counter = 0
async def __call__(self) -> str:
self.counter += 1
await signal.wait.remote()
return "Hello, world"
def record_autoscaling_stats(self) -> Dict[str, int]:
return {"counter": self.counter}
handle = serve.run(CustomMetricsDeployment.bind())
[handle.remote() for _ in range(10)]
wait_for_condition(lambda: ray.get(signal.cur_num_waiters.remote()) == 10)
wait_for_condition(
check_num_replicas_eq, name="CustomMetricsDeployment", target=3
)
signal.send.remote()
def test_max_cpu_usage_autoscaling_policy(self, serve_instance):
"""Test autoscaling policy based on max CPU usage from documentation example."""
signal = SignalActor.remote()
@serve.deployment(
autoscaling_config={
"min_replicas": 1,
"max_replicas": 5,
"upscale_delay_s": 0.5,
"downscale_delay_s": 0.5,
"metrics_interval_s": 0.1,
"look_back_period_s": 1,
"target_ongoing_requests": 10,
"policy": AutoscalingPolicy(
policy_function=max_cpu_usage_autoscaling_policy
),
},
max_ongoing_requests=100,
)
class MaxCpuUsageDeployment:
def __init__(self):
self.cpu_usage = 0
async def __call__(self) -> str:
self.cpu_usage += 1
await signal.wait.remote()
return "Hello, world"
def record_autoscaling_stats(self) -> Dict[str, int]:
return {"cpu_usage": self.cpu_usage}
handle = serve.run(MaxCpuUsageDeployment.bind())
# Test scale up when CPU usage > 80
# Set CPU usage to 90 to trigger scale up
dep_id = DeploymentID(name="MaxCpuUsageDeployment")
# Send requests to increase CPU usage
[handle.remote() for _ in range(90)]
wait_for_condition(lambda: ray.get(signal.cur_num_waiters.remote()) == 90)
# Wait for metrics to be recorded and policy to trigger scale up
def check_scale_up():
metrics = get_autoscaling_metrics_from_controller(serve_instance, dep_id)
return "cpu_usage" in metrics and metrics["cpu_usage"][-1][0].value >= 90
wait_for_condition(check_scale_up, timeout=10)
# Should scale up to 2 replicas due to high CPU usage
wait_for_condition(
check_num_replicas_eq, name="MaxCpuUsageDeployment", target=2, timeout=15
)
# Release signal and test scale down when CPU usage < 30
signal.send.remote()
wait_for_condition(lambda: ray.get(signal.cur_num_waiters.remote()) == 0)
signal = SignalActor.remote()
# Reset CPU usage to low value by creating new deployment instance
# This simulates low CPU usage scenario
@serve.deployment(
autoscaling_config={
"min_replicas": 1,
"max_replicas": 5,
"upscale_delay_s": 0.5,
"downscale_delay_s": 0.5,
"metrics_interval_s": 0.1,
"look_back_period_s": 1,
"target_ongoing_requests": 10,
"policy": AutoscalingPolicy(
policy_function=max_cpu_usage_autoscaling_policy
),
},
max_ongoing_requests=100,
)
class LowCpuUsageDeployment:
def __init__(self):
self.cpu_usage = 0
async def __call__(self) -> str:
self.cpu_usage += 1
await signal.wait.remote()
return "Hello, world"
def record_autoscaling_stats(self) -> Dict[str, int]:
# Return low CPU usage to trigger scale down
return {"cpu_usage": 20}
handle = serve.run(LowCpuUsageDeployment.bind())
# Send a few requests to establish low CPU usage
[handle.remote() for _ in range(5)]
wait_for_condition(lambda: ray.get(signal.cur_num_waiters.remote()) == 5)
# Wait for metrics to be recorded
dep_id_low = DeploymentID(name="LowCpuUsageDeployment")
def check_low_cpu():
metrics = get_autoscaling_metrics_from_controller(
serve_instance, dep_id_low
)
return "cpu_usage" in metrics and metrics["cpu_usage"][-1][0].value <= 30
wait_for_condition(check_low_cpu, timeout=10)
# Should downscale to 1 replica due to low CPU usage
wait_for_condition(
check_num_replicas_eq, name="LowCpuUsageDeployment", target=1, timeout=15
)
signal.send.remote()
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_custom_autoscaling_metrics.py",
"license": "Apache License 2.0",
"lines": 256,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/logical/rules/projection_pushdown.py | from typing import Dict, List, Optional, Set, Tuple
from ray.data._internal.logical.interfaces import (
LogicalOperator,
LogicalOperatorSupportsProjectionPushdown,
LogicalPlan,
Rule,
)
from ray.data._internal.logical.operators import Project
from ray.data._internal.planner.plan_expression.expression_visitors import (
_ColumnReferenceCollector,
_ColumnSubstitutionVisitor,
_is_col_expr,
)
from ray.data.expressions import (
AliasExpr,
ColumnExpr,
Expr,
StarExpr,
)
__all__ = [
"ProjectionPushdown",
]
def _collect_referenced_columns(exprs: List[Expr]) -> Optional[List[str]]:
"""
Extract all column names referenced by the given expressions.
Recursively traverses expression trees to find all ColumnExpr nodes
and collects their names.
Example: For expression "col1 + col2", returns {"col1", "col2"}
"""
# If any expression is star(), we need all columns
if any(isinstance(expr, StarExpr) for expr in exprs):
# TODO (goutam): Instead of using None to refer to All columns, resolve the AST against the schema.
# https://github.com/ray-project/ray/issues/57720
return None
collector = _ColumnReferenceCollector()
for expr in exprs or []:
collector.visit(expr)
return collector.get_column_refs()
def _analyze_upstream_project(
upstream_project: Project,
) -> Tuple[Set[str], dict[str, Expr], Set[str]]:
"""
Analyze what the upstream project produces and identifies removed columns.
Example: Upstream exprs [col("x").alias("y")] β removed_by_renames = {"x"} if "x" not in output
"""
output_column_names = {
expr.name for expr in upstream_project.exprs if not isinstance(expr, StarExpr)
}
# Compose column definitions in the form of a mapping of
# - Target column name
# - Target expression
output_column_defs = {
expr.name: expr for expr in _filter_out_star(upstream_project.exprs)
}
# Identify upstream input columns removed by renaming (ie not propagated into
# its output)
upstream_column_renaming_map = _extract_input_columns_renaming_mapping(
upstream_project.exprs
)
return (
output_column_names,
output_column_defs,
set(upstream_column_renaming_map.keys()),
)
def _validate_fusion(
downstream_project: Project,
upstream_has_all: bool,
upstream_output_columns: Set[str],
removed_by_renames: Set[str],
) -> Tuple[bool, Set[str]]:
"""
Validate if fusion is possible without rewriting expressions.
Args:
downstream_project: The downstream Project operator
upstream_has_all: True if the upstream Project has all columns, False otherwise
upstream_output_columns: Set of column names that are available in the upstream Project
removed_by_renames: Set of column names that are removed by renames in the upstream Project
Returns:
Tuple of (is_valid, missing_columns)
- is_valid: True if all expressions can be fused, False otherwise
- missing_columns: Set of column names that are referenced but not available
Example: Downstream refs "x" but upstream renamed "x" to "y" and dropped "x"
β (False, {"x"})
"""
missing_columns = set()
for expr in downstream_project.exprs:
if isinstance(expr, StarExpr):
continue
column_refs = _collect_referenced_columns([expr])
column_refs_set = set(column_refs or [])
columns_from_original = column_refs_set - (
column_refs_set & upstream_output_columns
)
# Validate accessibility
if not upstream_has_all and columns_from_original:
# Example: Upstream selects ["a", "b"], Downstream refs "c" β can't fuse
missing_columns.update(columns_from_original)
if any(col in removed_by_renames for col in columns_from_original):
# Example: Upstream renames "x" to "y" (dropping "x"), Downstream refs "x" β can't fuse
removed_cols = {
col for col in columns_from_original if col in removed_by_renames
}
missing_columns.update(removed_cols)
is_valid = len(missing_columns) == 0
return is_valid, missing_columns
def _try_fuse(upstream_project: Project, downstream_project: Project) -> Project:
"""
Attempt to merge two consecutive Project operations into one.
Example: Upstream: [star(), col("x").alias("y")], Downstream: [star(), (col("y") + 1).alias("z")] β Fused: [star(), (col("x") + 1).alias("z")]
"""
# Check resource compatibility before attempting fusion
# This ensures with_column respects resource boundaries like map_batches does
from ray.data._internal.logical.rules.operator_fusion import (
FuseOperators,
are_remote_args_compatible,
)
# Check if remote args (num_cpus, num_gpus, etc.) are compatible
if not are_remote_args_compatible(
upstream_project.ray_remote_args or {},
downstream_project.ray_remote_args or {},
):
# Resources don't match - cannot fuse
return downstream_project
# Check if compute strategies are compatible
fused_compute = FuseOperators._fuse_compute_strategy(
upstream_project.compute, downstream_project.compute
)
if fused_compute is None:
# Compute strategies incompatible - cannot fuse
return downstream_project
upstream_has_star: bool = upstream_project.has_star_expr()
# TODO add validations that
# - exprs only depend on input attrs (ie no dep on output of other exprs)
# Analyze upstream
(
upstream_output_cols,
upstream_column_defs,
upstream_input_cols_removed,
) = _analyze_upstream_project(upstream_project)
# Validate fusion possibility
is_valid, missing_columns = _validate_fusion(
downstream_project,
upstream_has_star,
upstream_output_cols,
upstream_input_cols_removed,
)
if not is_valid:
# Raise KeyError to match expected error type in tests
raise KeyError(
f"Column(s) {sorted(missing_columns)} not found. "
f"Available columns: {sorted(upstream_output_cols) if not upstream_has_star else 'all columns (has star)'}"
)
# Following invariants are upheld for each ``Project`` logical op:
#
# 1. ``Project``s list of expressions are bound to op's input columns **only**
# (ie there could be no inter-dependency b/w expressions themselves)
#
# 2. `Each of expressions on the `Project``s list constitutes an output
# column definition, where column's name is derived from ``expr.name`` and
# column itself is derived by executing that expression against the op's
# input block.
#
# Therefore to abide by and satisfy aforementioned invariants, when fusing
# 2 ``Project`` operators, following scenarios are considered:
#
# 1. Composition: downstream including (and potentially renaming) upstream
# output columns (this is the case when downstream holds ``StarExpr``).
#
# 2. Projection: downstream projecting upstream output columns (by for ex,
# only selecting & transforming some of the upstream output columns).
#
# Upstream output column refs inside downstream expressions need to be bound
# to upstream output column definitions to satisfy invariant #1 (common for both
# composition/projection cases)
v = _ColumnSubstitutionVisitor(upstream_column_defs)
rebound_downstream_exprs = [
v.visit(e) for e in _filter_out_star(downstream_project.exprs)
]
if not downstream_project.has_star_expr():
# Projection case: this is when downstream is a *selection* (ie, not including
# the upstream columns with ``StarExpr``)
#
# Example:
# Upstream: Project([col("a").alias("b")])
# Downstream: Project([col("b").alias("c")])
#
# Result: Project([col("a").alias("c")])
new_exprs = rebound_downstream_exprs
else:
# Composition case: downstream has ``StarExpr`` (entailing that downstream
# output will be including all of the upstream output columns)
#
# Example 1:
# Upstream: [star(), col("a").alias("b")],
# Downstream: [star(), col("b").alias("c")]
#
# Result: [star(), col("a").alias("b"), col("a").alias("c")]
#
# Example 2:
# Input (columns): ["a", "b"]
# Upstream: [star({"b": "z"}), col("a").alias("x")],
# Downstream: [star({"x": "y"}), col("z")]
#
# Result: [star(), col("a").alias("y"), col("b").alias("z")]
# Extract downstream's input column rename map (downstream inputs are
# upstream's outputs)
downstream_input_column_rename_map = _extract_input_columns_renaming_mapping(
downstream_project.exprs
)
# Collect upstream output column expression "projected" to become
# downstream expressions
projected_upstream_output_col_exprs = []
# When fusing 2 projections
for e in upstream_project.exprs:
# NOTE: We have to filter out upstream output columns that are
# being *renamed* by downstream expression
if e.name not in downstream_input_column_rename_map:
projected_upstream_output_col_exprs.append(e)
new_exprs = projected_upstream_output_col_exprs + rebound_downstream_exprs
return Project(
upstream_project.input_dependency,
exprs=new_exprs,
compute=fused_compute,
ray_remote_args=downstream_project.ray_remote_args,
)
def _filter_out_star(exprs: List[Expr]) -> List[Expr]:
return [e for e in exprs if not isinstance(e, StarExpr)]
class ProjectionPushdown(Rule):
"""
Optimization rule that pushes projections (column selections) down the query plan.
This rule performs two optimizations:
1. Fuses consecutive Project operations to eliminate redundant projections
2. Pushes projections into data sources (e.g., Read operations) to enable
column pruning at the storage layer
"""
def apply(self, plan: LogicalPlan) -> LogicalPlan:
"""Apply projection pushdown optimization to the entire plan."""
dag = plan.dag
new_dag = dag._apply_transform(self._try_fuse_projects)
new_dag = new_dag._apply_transform(self._push_projection_into_read_op)
return LogicalPlan(new_dag, plan.context) if dag is not new_dag else plan
@classmethod
def _try_fuse_projects(cls, op: LogicalOperator) -> LogicalOperator:
"""
Optimize a single Project operator.
Steps:
1. Iteratively fuse with upstream Project operations
2. Push the resulting projection into the data source if possible
"""
if not isinstance(op, Project):
return op
# Step 1: Iteratively fuse with upstream Project operations
current_project: Project = op
if not isinstance(current_project.input_dependency, Project):
return op
upstream_project: Project = current_project.input_dependency # type: ignore[assignment]
fused = _try_fuse(upstream_project, current_project)
return fused
@classmethod
def _push_projection_into_read_op(cls, op: LogicalOperator) -> LogicalOperator:
if not isinstance(op, Project):
return op
current_project: Project = op
# Step 2: Push projection into the data source if supported
input_op = current_project.input_dependency
if (
isinstance(input_op, LogicalOperatorSupportsProjectionPushdown)
and input_op.supports_projection_pushdown()
):
if current_project.has_star_expr():
# If project has a star, then projection is not feasible
required_columns = None
else:
# Otherwise, collect required columns to push projection down
# into the reader
required_columns = _collect_referenced_columns(current_project.exprs)
# Check if it's a simple projection that could be pushed into
# read as a whole
is_projection = all(
_is_col_expr(expr) for expr in _filter_out_star(current_project.exprs)
)
if is_projection:
# NOTE: We only can rename output columns when it's a simple
# projection and Project operator is discarded (otherwise
# it might be holding expression referencing attributes
# by original their names prior to renaming)
#
# TODO fix by instead rewriting exprs
output_column_rename_map = _extract_input_columns_renaming_mapping(
current_project.exprs
)
# Determine columns to project
if required_columns is None:
# All columns case - need to determine available columns
if not output_column_rename_map:
# No renames and all columns - pass through as None
projection_map = None
else:
# Has renames - get the list of columns to apply renames to
current_projection = input_op.get_projection_map()
if current_projection is not None:
# Use output column names from existing projection (for chained renames)
columns = list(current_projection.values())
else:
# No existing projection - get all columns from schema
schema = input_op.infer_schema()
if schema is not None:
columns = schema.names
else:
# Cannot determine available columns - this shouldn't happen in practice
# for properly implemented datasources. Rather than guessing, raise an error.
raise RuntimeError(
f"Cannot apply rename operation: schema unavailable for input operator "
f"{input_op}. This may indicate a legacy datasource that doesn't properly "
f"expose schema information."
)
# Build projection_map: apply renames to all columns
projection_map = {
col: output_column_rename_map.get(col, col)
for col in columns
}
else:
# Specific columns selected - build projection_map with renames applied
projection_map = {
col: output_column_rename_map.get(col, col)
for col in required_columns
}
# Apply projection to the read op
return input_op.apply_projection(projection_map)
else:
# Complex expressions - apply projection without full rename
projection_map = (
None
if required_columns is None
else {col: col for col in required_columns}
)
projected_input_op = input_op.apply_projection(projection_map)
# Has transformations: Keep Project on top of optimized Read
return Project(
projected_input_op,
exprs=current_project.exprs,
compute=current_project.compute,
ray_remote_args=current_project.ray_remote_args,
)
return current_project
def _extract_input_columns_renaming_mapping(
projection_exprs: List[Expr],
) -> Dict[str, str]:
"""Fetches renaming mapping of all input columns names being renamed (replaced).
Format is source column name -> new column name.
"""
return dict(
[
_get_renaming_mapping(expr)
for expr in _filter_out_star(projection_exprs)
if _is_renaming_expr(expr)
]
)
def _get_renaming_mapping(expr: Expr) -> Tuple[str, str]:
assert _is_renaming_expr(expr)
alias: AliasExpr = expr
return alias.expr.name, alias.name
def _is_renaming_expr(expr: Expr) -> bool:
is_renaming = isinstance(expr, AliasExpr) and expr._is_rename
assert not is_renaming or isinstance(
expr.expr, ColumnExpr
), f"Renaming expression expected to be of the shape alias(col('source'), 'target') (got {expr})"
return is_renaming
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/logical/rules/projection_pushdown.py",
"license": "Apache License 2.0",
"lines": 368,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.