sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
ray-project/ray:rllib/utils/metrics/stats/item_series.py | from collections import deque
from itertools import chain
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ray.rllib.utils.metrics.stats.base import StatsBase
from ray.rllib.utils.metrics.stats.utils import batch_values_to_cpu
from ray.util.annotations import DeveloperAPI
@DeveloperAPI
class ItemSeriesStats(StatsBase):
"""A Stats object that tracks a series of items.
Use this if you want to track a series of items that should not be reduced.
An example would be to log actions and translate them into a chart to visualize
the distribution of actions outside of RLlib.
This class will check if logged items are GPU tensors.
If they are, they will be converted to CPU memory.
Note that at the root level, the internal item list can grow to `window * len(incoming_stats)`.
"""
stats_cls_identifier = "item_series"
def __init__(self, window: Optional[int] = None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._window = window
self.items: Union[List[Any], deque[Any]] = []
self._set_items([])
def _set_items(self, new_items):
# For stats with window, use a deque with maxlen=window.
# This way, we never store more values than absolutely necessary.
if self._window and self.is_leaf:
# Window always counts at leafs only (or non-root stats)
self.items = deque(new_items, maxlen=self._window)
# For infinite windows, use `new_values` as-is (a list).
else:
self.items = new_items
def get_state(self) -> Dict[str, Any]:
state = super().get_state()
state["items"] = self.items
state["window"] = self._window
return state
def set_state(self, state: Dict[str, Any]) -> None:
super().set_state(state)
self.items = state["items"]
self._window = state["window"]
def push(self, item: Any) -> None:
"""Pushes a item into this Stats object.
This method does not handle GPU tensors.
Args:
item: The item to push. Can be of any type but data should be in CPU memory.
"""
self.items.append(item)
if self._window and len(self.items) > self._window:
self.items.popleft()
def reduce(self, compile: bool = True) -> Union[Any, "ItemSeriesStats"]:
"""Reduces the internal values list according to the constructor settings.
Args:
compile: Argument is ignored for ItemSeriesStats.
Returns:
The reduced value (can be of any type, depending on the input values and
reduction method).
"""
items = self.items
self._set_items([])
items = batch_values_to_cpu(items)
if compile:
return items
return_stats = self.clone()
return_stats._set_items(items)
return return_stats
def __len__(self) -> int:
"""Returns the length of the internal items list."""
return len(self.items)
def peek(
self, compile: bool = True, latest_merged_only: bool = False
) -> Union[List[Any], Any]:
"""Returns the internal items list.
This does not alter the internal items list.
Args:
compile: If True and items list is empty, returns np.nan. Otherwise returns the items list.
latest_merged_only: If True, only considers the latest merged values.
This parameter only works on aggregation stats (root or intermediate nodes).
When enabled, peek() will only use the items from the most recent merge operation.
Returns:
The internal items list, or np.nan if compile=True and items list is empty.
"""
# Check latest_merged_only validity
if latest_merged_only and self.is_leaf:
raise ValueError(
"latest_merged_only can only be used on aggregation stats objects "
"(is_leaf=False)"
)
# If latest_merged_only is True, use only the latest merged items
if latest_merged_only:
if self.latest_merged is None:
# No merged items yet, return np.nan if compile=True, else empty list
if compile:
return np.nan
return []
# Use only the latest merged items
items = self.latest_merged
else:
# Normal peek behavior
items = self.items
items = batch_values_to_cpu(items)
# If compile=True and items list is empty, return np.nan (consistent with other stats)
if compile and len(items) == 0:
return np.nan
return items
def merge(self, incoming_stats: List["ItemSeriesStats"]) -> None:
"""Merges ItemSeriesStats objects.
Args:
incoming_stats: The list of ItemSeriesStats objects to merge.
Returns:
None. The merge operation modifies self in place.
"""
assert (
not self.is_leaf
), "ItemSeriesStats should only be merged at aggregation stages (root or intermediate)"
new_items = [s.items for s in incoming_stats]
new_items = list(chain.from_iterable(new_items))
all_items = list(self.items) + new_items
self.items = all_items
# Track merged values for latest_merged_only peek functionality
if not self.is_leaf:
# Store the items that were merged in this operation (from incoming_stats only)
self.latest_merged = new_items
@staticmethod
def _get_init_args(stats_object=None, state=None) -> Dict[str, Any]:
"""Returns the initialization arguments for this Stats object."""
super_args = StatsBase._get_init_args(stats_object=stats_object, state=state)
if state is not None:
return {
**super_args,
"window": state["window"],
}
elif stats_object is not None:
return {
**super_args,
"window": stats_object._window,
}
return super_args
def __repr__(self) -> str:
return f"ItemSeriesStats(window={self._window}; len={len(self)})"
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/utils/metrics/stats/item_series.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/utils/metrics/stats/lifetime_sum.py | import time
from typing import Any, Dict, List, Union
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.metrics.stats.base import StatsBase
from ray.rllib.utils.metrics.stats.utils import safe_isnan, single_value_to_cpu
from ray.util.annotations import DeveloperAPI
torch, _ = try_import_torch()
_, tf, _ = try_import_tf()
@DeveloperAPI
class LifetimeSumStats(StatsBase):
"""A Stats object that tracks the sum of a series of singular values (not vectors)."""
stats_cls_identifier = "lifetime_sum"
def __init__(
self,
with_throughput: bool = False,
*args,
**kwargs,
):
"""Initializes a LifetimeSumStats instance.
Args:
with_throughput: If True, track the throughput since the last restore from a checkpoint.
"""
super().__init__(*args, **kwargs)
self._lifetime_sum = 0.0
self.track_throughputs = with_throughput
# We need to initialize this to 0.0
# When setting state or reducing, these values are expected to be updated we calculate a throughput.
self._value_at_last_reduce = 0.0
self._value_at_last_restore = 0.0
# We initialize this to the current time which may result in a low first throughput value
# It seems reasonable that starting from a checkpoint or starting an experiment results in a low first throughput value
self._last_reduce_time = time.perf_counter()
self._last_restore_time = time.perf_counter()
@property
def has_throughputs(self) -> bool:
return self.track_throughputs
def initialize_throughput_reference_time(self, time: float) -> None:
assert (
not self.is_leaf
), "initialize_throughput_reference_time can only be called on root stats"
self._last_reduce_time = time
self._last_restore_time = time
@staticmethod
def _get_init_args(stats_object=None, state=None) -> Dict[str, Any]:
"""Returns the initialization arguments for this Stats object."""
super_args = StatsBase._get_init_args(stats_object=stats_object, state=state)
if state is not None:
return {
**super_args,
"with_throughput": state["track_throughputs"],
}
elif stats_object is not None:
return {
**super_args,
"with_throughput": stats_object.track_throughputs,
}
else:
raise ValueError("Either stats_object or state must be provided")
@property
def throughputs(self) -> Dict[str, float]:
"""Returns the throughput since the last reduce.
For root stats, also returns throughput since last restore.
"""
assert (
self.has_throughputs
), "Throughput tracking is not enabled on this Stats object"
result = {
"throughput_since_last_reduce": self.throughput_since_last_reduce,
}
# Only root stats track throughput since last restore
if self.is_root:
result["throughput_since_last_restore"] = self.throughput_since_last_restore
return result
def __len__(self) -> int:
return 1
def peek(
self, compile: bool = True, latest_merged_only: bool = False
) -> Union[Any, List[Any]]:
"""Returns the current lifetime sum value.
If value is a GPU tensor, it's converted to CPU.
Args:
compile: If True, the result is compiled into a single value if possible.
latest_merged_only: If True, only considers the latest merged values.
This parameter only works on aggregation stats (root or intermediate nodes).
When enabled, peek() will only return the sum that was added in the most recent merge operation.
"""
# Check latest_merged_only validity
if latest_merged_only and self.is_leaf:
raise ValueError(
"latest_merged_only can only be used on aggregation stats objects (is_leaf=False)."
)
# If latest_merged_only is True, use only the latest merged sum
if latest_merged_only:
if self.latest_merged is None:
# No merged values yet, return 0
value = 0.0
else:
# Use only the latest merged sum
value = self.latest_merged
else:
# Normal peek behavior
value = self._lifetime_sum
# Convert GPU tensor to CPU
if torch and isinstance(value, torch.Tensor):
value = single_value_to_cpu(value)
return value if compile else [value]
def get_state(self) -> Dict[str, Any]:
state = super().get_state()
state["lifetime_sum"] = single_value_to_cpu(self._lifetime_sum)
state["track_throughputs"] = self.track_throughputs
return state
def set_state(self, state: Dict[str, Any]) -> None:
super().set_state(state)
self._lifetime_sum = state["lifetime_sum"]
self.track_throughputs = state["track_throughputs"]
# We always start over with the throughput calculation after a restore
self._value_at_last_restore = self._lifetime_sum
self._value_at_last_reduce = self._lifetime_sum
def push(self, value: Any) -> None:
"""Pushes a value into this Stats object.
Args:
value: The value to be pushed. Can be of any type.
PyTorch GPU tensors are kept on GPU until reduce() or peek().
TensorFlow tensors are moved to CPU immediately.
"""
# Convert TensorFlow tensors to CPU immediately
if tf and tf.is_tensor(value):
value = value.numpy()
if safe_isnan(value):
return
if torch and isinstance(value, torch.Tensor):
value = value.detach()
self._lifetime_sum += value
@property
def throughput_since_last_reduce(self) -> float:
"""Returns the throughput since the last reduce call."""
if self.track_throughputs:
lifetime_sum = self._lifetime_sum
# Convert GPU tensor to CPU
if torch and isinstance(lifetime_sum, torch.Tensor):
lifetime_sum = single_value_to_cpu(lifetime_sum)
return (lifetime_sum - self._value_at_last_reduce) / (
time.perf_counter() - self._last_reduce_time
)
else:
raise ValueError(
"Tracking of throughput since last reduce is not enabled on this Stats object"
)
@property
def throughput_since_last_restore(self) -> float:
"""Returns the total throughput since the last restore.
Only available for root stats, as restoring from checkpoints only happens at the root.
"""
if not self.is_root:
raise ValueError(
"throughput_since_last_restore is only available for root stats"
)
if self.track_throughputs:
lifetime_sum = self._lifetime_sum
# Convert GPU tensor to CPU
if torch and isinstance(lifetime_sum, torch.Tensor):
lifetime_sum = single_value_to_cpu(lifetime_sum)
return (lifetime_sum - self._value_at_last_restore) / (
time.perf_counter() - self._last_restore_time
)
else:
raise ValueError(
"Tracking of throughput since last restore is not enabled on this Stats object"
)
def reduce(self, compile: bool = True) -> Union[Any, "LifetimeSumStats"]:
"""Reduces the internal value.
If value is a GPU tensor, it's converted to CPU.
"""
value = self._lifetime_sum
# Convert GPU tensor to CPU
if torch and isinstance(value, torch.Tensor):
value = single_value_to_cpu(value)
# Reset for all non-root stats (both leaf and intermediate aggregators)
# Only root stats should never reset because they aggregate everything
# Non-root stats reset so they only send deltas up the aggregation tree
if not self.is_root:
# Reset to 0 with same type (tensor or scalar)
if torch and isinstance(self._lifetime_sum, torch.Tensor):
self._lifetime_sum = torch.tensor(0.0, device=self._lifetime_sum.device)
else:
self._lifetime_sum = 0.0
self._value_at_last_reduce = 0.0
else:
self._value_at_last_reduce = value
# Update the last reduce time for throughput tracking
if self.track_throughputs:
self._last_reduce_time = time.perf_counter()
if compile:
return value
return_stats = self.clone()
return_stats._lifetime_sum = value
return return_stats
def merge(self, incoming_stats: List["LifetimeSumStats"]) -> None:
"""Merges LifetimeSumStats objects.
Args:
incoming_stats: The list of LifetimeSumStats objects to merge.
Returns:
None. The merge operation modifies self in place.
"""
assert (
not self.is_leaf
), "LifetimeSumStats should only be merged at aggregation stages (root or intermediate)"
incoming_sum = sum([stat._lifetime_sum for stat in incoming_stats])
# Directly update _lifetime_sum instead of calling push (which is disabled for non-leaf stats)
if torch and isinstance(incoming_sum, torch.Tensor):
incoming_sum = incoming_sum.detach()
if tf and tf.is_tensor(incoming_sum):
incoming_sum = incoming_sum.numpy()
self._lifetime_sum += incoming_sum
# Track merged values for latest_merged_only peek functionality
if not self.is_leaf:
# Store the sum that was added in this merge operation
self.latest_merged = incoming_sum
def __repr__(self) -> str:
return f"LifetimeSumStats({self.peek()}; track_throughputs={self.track_throughputs})"
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/utils/metrics/stats/lifetime_sum.py",
"license": "Apache License 2.0",
"lines": 220,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/utils/metrics/stats/max.py | import numpy as np
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.metrics.stats.series import SeriesStats
from ray.util.annotations import DeveloperAPI
torch, _ = try_import_torch()
@DeveloperAPI
class MaxStats(SeriesStats):
"""A Stats object that tracks the max of a series of singular values (not vectors)."""
stats_cls_identifier = "max"
def _np_reduce_fn(self, values):
return np.nanmax(values)
def _torch_reduce_fn(self, values):
"""Reduce function for torch tensors (stays on GPU)."""
# torch.nanmax not available, use workaround
clean_values = values[~torch.isnan(values)]
if len(clean_values) == 0:
return torch.tensor(float("nan"), device=values.device)
# Cast to float32 to avoid errors from Long tensors
return torch.max(clean_values.float())
def __repr__(self) -> str:
return f"MaxStats({self.peek()}; window={self._window}; len={len(self)})"
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/utils/metrics/stats/max.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:rllib/utils/metrics/stats/mean.py | from typing import Any, Union
import numpy as np
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.metrics.stats.series import SeriesStats
from ray.util.annotations import DeveloperAPI
torch, _ = try_import_torch()
_, tf, _ = try_import_tf()
@DeveloperAPI
class MeanStats(SeriesStats):
"""A Stats object that tracks the mean of a series of singular values (not vectors).
Note the following limitation: When merging multiple MeanStats objects, the resulting mean is not the true mean of all values.
Instead, it is the mean of the means of the incoming MeanStats objects.
This is because we calculate the mean in parallel components and potentially merge them multiple times in one reduce cycle.
The resulting mean of means may differ significantly from the true mean, especially if some incoming means are the result of few outliers.
Example to illustrate this limitation:
First incoming mean: [1, 2, 3, 4, 5] -> 3
Second incoming mean: [15] -> 15
Mean of both merged means: [3, 15] -> 9
True mean of all values: [1, 2, 3, 4, 5, 15] -> 5
"""
stats_cls_identifier = "mean"
def _np_reduce_fn(self, values: np.ndarray) -> float:
return np.nanmean(values)
def _torch_reduce_fn(self, values: "torch.Tensor"):
"""Reduce function for torch tensors (stays on GPU)."""
return torch.nanmean(values.float())
def push(self, value: Any) -> None:
"""Pushes a value into this Stats object.
Args:
value: The value to be pushed. Can be of any type.
PyTorch GPU tensors are kept on GPU until reduce() or peek().
TensorFlow tensors are moved to CPU immediately.
"""
# Convert TensorFlow tensors to CPU immediately, keep PyTorch tensors as-is
if tf and tf.is_tensor(value):
value = value.numpy()
self.values.append(value)
def reduce(self, compile: bool = True) -> Union[Any, "MeanStats"]:
reduced_values = self.window_reduce() # Values are on CPU already after this
self._set_values([])
if compile:
return reduced_values[0]
return_stats = self.clone()
return_stats.values = reduced_values
return return_stats
def __repr__(self) -> str:
return f"MeanStats({self.peek()}; window={self._window}; len={len(self)})"
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/utils/metrics/stats/mean.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:rllib/utils/metrics/stats/min.py | import numpy as np
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.metrics.stats.series import SeriesStats
from ray.util.annotations import DeveloperAPI
torch, _ = try_import_torch()
@DeveloperAPI
class MinStats(SeriesStats):
"""A Stats object that tracks the min of a series of singular values (not vectors)."""
stats_cls_identifier = "min"
def _np_reduce_fn(self, values: np.ndarray) -> float:
return np.nanmin(values)
def _torch_reduce_fn(self, values: "torch.Tensor"):
"""Reduce function for torch tensors (stays on GPU)."""
# torch.nanmin not available, use workaround
clean_values = values[~torch.isnan(values)]
if len(clean_values) == 0:
return torch.tensor(float("nan"), device=values.device)
# Cast to float32 to avoid errors from Long tensors
return torch.min(clean_values.float())
def __repr__(self) -> str:
return f"MinStats({self.peek()}; window={self._window}; len={len(self)})"
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/utils/metrics/stats/min.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:rllib/utils/metrics/stats/percentiles.py | from collections import deque
from itertools import chain
from typing import Any, Dict, List, Optional, Union
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.metrics.stats.base import StatsBase
from ray.rllib.utils.metrics.stats.utils import batch_values_to_cpu, safe_isnan
from ray.util.annotations import DeveloperAPI
torch, _ = try_import_torch()
_, tf, _ = try_import_tf()
@DeveloperAPI
class PercentilesStats(StatsBase):
"""A Stats object that tracks percentiles of a series of singular values (not vectors)."""
stats_cls_identifier = "percentiles"
def __init__(
self,
percentiles: Union[List[int], bool] = None,
window: Optional[Union[int, float]] = None,
*args,
**kwargs,
):
"""Initializes a PercentilesStats instance.
Percentiles are computed over the last `window` values across all parallel components.
Example: If we have 10 parallel components, and each component tracks 1,000 values, we will track the last 10,000 values across all components.
Be careful to not track too many values because computing percentiles is O(n*log(n)) where n is the window size.
See https://github.com/ray-project/ray/pull/52963 for more details.
Args:
percentiles: The percentiles to track.
If None, track the default percentiles [0, 50, 75, 90, 95, 99, 100].
If a list, track the given percentiles.
"""
super().__init__(*args, **kwargs)
self._window = window
self.values: Union[List[Any], deque[Any]] = []
self._set_values([])
if percentiles is None:
# We compute a bunch of default percentiles because computing one is just as expensive as computing all of them.
percentiles = [0, 50, 75, 90, 95, 99, 100]
elif isinstance(percentiles, list):
percentiles = percentiles
else:
raise ValueError("`percentiles` must be a list or None")
self._percentiles = percentiles
def get_state(self) -> Dict[str, Any]:
state = super().get_state()
state["values"] = self.values
state["window"] = self._window
state["percentiles"] = self._percentiles
return state
def set_state(self, state: Dict[str, Any]) -> None:
super().set_state(state)
self._set_values(state["values"])
self._window = state["window"]
self._percentiles = state["percentiles"]
def _set_values(self, new_values):
# For stats with window, use a deque with maxlen=window.
# This way, we never store more values than absolutely necessary.
if self._window and self.is_leaf:
# Window always counts at leafs only (or non-root stats)
self.values = deque(new_values, maxlen=self._window)
# For infinite windows, use `new_values` as-is (a list).
else:
self.values = new_values
def __len__(self) -> int:
"""Returns the length of the internal values list."""
return len(self.values)
def __float__(self):
raise ValueError(
"Cannot convert to float because percentiles are not reduced to a single value."
)
def __eq__(self, other):
self._comp_error("__eq__")
def __le__(self, other):
self._comp_error("__le__")
def __ge__(self, other):
self._comp_error("__ge__")
def __lt__(self, other):
self._comp_error("__lt__")
def __gt__(self, other):
self._comp_error("__gt__")
def __add__(self, other):
self._comp_error("__add__")
def __sub__(self, other):
self._comp_error("__sub__")
def __mul__(self, other):
self._comp_error("__mul__")
def _comp_error(self, comp):
raise NotImplementedError()
def __format__(self, fmt):
raise ValueError(
"Cannot format percentiles object because percentiles are not reduced to a single value."
)
def push(self, value: Any) -> None:
"""Pushes a value into this Stats object.
Args:
value: The value to be pushed. Can be of any type.
PyTorch GPU tensors are kept on GPU until reduce() or peek().
TensorFlow tensors are moved to CPU immediately.
"""
# Convert TensorFlow tensors to CPU immediately, keep PyTorch tensors as-is
if tf and tf.is_tensor(value):
value = value.numpy()
if safe_isnan(value):
raise ValueError("NaN values are not allowed in PercentilesStats")
if torch and isinstance(value, torch.Tensor):
value = value.detach()
self.values.append(value)
def merge(self, incoming_stats: List["PercentilesStats"]) -> None:
"""Merges PercentilesStats objects.
This method assumes that the incoming stats have the same percentiles and window size.
It will append the incoming values to the existing values.
Args:
incoming_stats: The list of PercentilesStats objects to merge.
Returns:
None. The merge operation modifies self in place.
"""
assert (
not self.is_leaf
), "PercentilesStats should only be merged at aggregation stages (root or intermediate)"
assert all(
s._percentiles == self._percentiles for s in incoming_stats
), "All incoming PercentilesStats objects must have the same percentiles"
assert all(
s._window == self._window for s in incoming_stats
), "All incoming PercentilesStats objects must have the same window size"
new_values = [s.values for s in incoming_stats]
new_values = list(chain.from_iterable(new_values))
all_values = list(self.values) + new_values
self.values = all_values
# Track merged values for latest_merged_only peek functionality
if not self.is_leaf:
# Store the values that were merged in this operation (from incoming_stats only)
self.latest_merged = new_values
def peek(
self, compile: bool = True, latest_merged_only: bool = False
) -> Union[Any, List[Any]]:
"""Returns the result of reducing the internal values list.
Note that this method does NOT alter the internal values list in this process.
Thus, users can call this method to get an accurate look at the reduced value(s)
given the current internal values list.
Args:
compile: If True, the result is compiled into the percentiles list.
latest_merged_only: If True, only considers the latest merged values.
This parameter only works on aggregation stats (root or intermediate nodes).
When enabled, peek() will only use the values from the most recent merge operation.
Returns:
The result of reducing the internal values list on CPU.
"""
# Check latest_merged_only validity
if latest_merged_only and self.is_leaf:
raise ValueError(
"latest_merged_only can only be used on aggregation stats objects (is_leaf=False)."
)
# If latest_merged_only is True, use only the latest merged values
if latest_merged_only:
if self.latest_merged is None:
# No merged values yet, return dict with None values
if compile:
return {p: None for p in self._percentiles}
else:
return []
# Use only the latest merged values
latest_merged = self.latest_merged
values = batch_values_to_cpu(latest_merged)
else:
# Normal peek behavior
values = batch_values_to_cpu(self.values)
values.sort()
if compile:
return compute_percentiles(values, self._percentiles)
return values
def reduce(self, compile: bool = True) -> Union[Any, "PercentilesStats"]:
"""Reduces the internal values list.
Args:
compile: If True, the result is compiled into a single value if possible.
Returns:
The reduced value on CPU.
"""
values = batch_values_to_cpu(self.values)
values.sort()
self._set_values([])
if compile:
return compute_percentiles(values, self._percentiles)
return_stats = self.clone()
return_stats.values = values
return return_stats
def __repr__(self) -> str:
return (
f"PercentilesStats({self.peek()}; window={self._window}; len={len(self)})"
)
@staticmethod
def _get_init_args(stats_object=None, state=None) -> Dict[str, Any]:
"""Returns the initialization arguments for this Stats object."""
super_args = StatsBase._get_init_args(stats_object=stats_object, state=state)
if state is not None:
return {
**super_args,
"percentiles": state["percentiles"],
"window": state["window"],
}
elif stats_object is not None:
return {
**super_args,
"percentiles": stats_object._percentiles,
"window": stats_object._window,
}
else:
raise ValueError("Either stats_object or state must be provided")
@DeveloperAPI
def compute_percentiles(sorted_list, percentiles):
"""Compute percentiles from an already sorted list.
Note that this will not raise an error if the list is not sorted to avoid overhead.
Args:
sorted_list: A list of numbers sorted in ascending order
percentiles: A list of percentile values (0-100)
Returns:
A dictionary mapping percentile values to their corresponding data values
"""
n = len(sorted_list)
if n == 0:
return {p: None for p in percentiles}
results = {}
for p in percentiles:
index = (p / 100) * (n - 1)
if index.is_integer():
results[p] = sorted_list[int(index)]
else:
lower_index = int(index)
upper_index = lower_index + 1
weight = index - lower_index
results[p] = (
sorted_list[lower_index] * (1 - weight)
+ sorted_list[upper_index] * weight
)
return results
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/utils/metrics/stats/percentiles.py",
"license": "Apache License 2.0",
"lines": 234,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/utils/metrics/stats/series.py | from abc import ABCMeta
from collections import deque
from itertools import chain
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ray.rllib.utils.annotations import (
OverrideToImplementCustomLogic_CallToSuperRecommended,
)
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.metrics.stats.base import StatsBase
from ray.rllib.utils.metrics.stats.utils import batch_values_to_cpu, single_value_to_cpu
from ray.util.annotations import DeveloperAPI
torch, _ = try_import_torch()
_, tf, _ = try_import_tf()
@DeveloperAPI
class SeriesStats(StatsBase, metaclass=ABCMeta):
"""A base class for Stats that represent a series of singular values (not vectors)."""
# Set by subclasses
_np_reduce_fn = None
# Set by subclasses
_torch_reduce_fn = None
def __init__(
self,
window: Optional[Union[int, float]] = None,
*args,
**kwargs,
):
"""Initializes a SeriesStats instance.
Args:
window: The window size to reduce over.
"""
super().__init__(*args, **kwargs)
self._window = window
self.values: Union[List[Any], deque[Any]] = []
self._set_values([])
def get_state(self) -> Dict[str, Any]:
state = super().get_state()
state = {
**state,
"values": batch_values_to_cpu(self.values),
"window": self._window,
}
return state
def set_state(self, state: Dict[str, Any]) -> None:
super().set_state(state)
self._set_values(state["values"])
self._window = state["window"]
@OverrideToImplementCustomLogic_CallToSuperRecommended
@staticmethod
def _get_init_args(stats_object=None, state=None) -> Dict[str, Any]:
super_args = StatsBase._get_init_args(stats_object=stats_object, state=state)
if state is not None:
return {
**super_args,
"window": state["window"],
}
elif stats_object is not None:
return {
**super_args,
"window": stats_object._window,
}
else:
raise ValueError("Either stats_object or state must be provided")
def reduce(self, compile: bool = True) -> Union[Any, "SeriesStats"]:
"""Reduces the internal values list according to the constructor settings."""
if self._window is None:
if len(self.values) <= 1 or not compile:
reduced_values = batch_values_to_cpu(self.values)
else:
reduced_values = self.window_reduce()
else:
reduced_values = self.window_reduce()
self._set_values([])
if compile:
if len(reduced_values) == 0:
return np.nan
else:
return reduced_values[0]
return_stats = self.clone()
return_stats.values = reduced_values
return return_stats
def __len__(self) -> int:
"""Returns the length of the internal values list."""
return len(self.values)
def _set_values(self, new_values):
# For stats with window, use a deque with maxlen=window.
# This way, we never store more values than absolutely necessary.
if self._window and self.is_leaf:
# Window always counts at leafs only (or non-root stats)
self.values = deque(new_values, maxlen=self._window)
# For infinite windows, use `new_values` as-is (a list).
else:
self.values = new_values
def push(self, value: Any) -> None:
"""Pushes a value into this Stats object.
Args:
value: The value to be pushed. Can be of any type.
PyTorch GPU tensors are kept on GPU until reduce() or peek().
TensorFlow tensors are moved to CPU immediately.
"""
# Convert TensorFlow tensors to CPU immediately, keep PyTorch tensors as-is
if tf and tf.is_tensor(value):
value = value.numpy()
if torch and isinstance(value, torch.Tensor):
value = value.detach()
if self._window is None:
if not self.values:
self._set_values([value])
else:
self._set_values(self.running_reduce(self.values[0], value))
else:
# For windowed operations, append to values and trim if needed
self.values.append(value)
def merge(self, incoming_stats: List["SeriesStats"]) -> None:
"""Merges SeriesStats objects.
Args:
incoming_stats: The list of SeriesStats objects to merge.
Returns:
None. The merge operation modifies self in place.
"""
assert (
not self.is_leaf
), "SeriesStats should only be merged at aggregation stages (root or intermediate)"
if len(incoming_stats) == 0:
return
all_items = [s.values for s in incoming_stats]
all_items = list(chain.from_iterable(all_items))
# Implicitly may convert internal to list.
# That's ok because we don't want to evict items from the deque if we merge in this object's values.
all_items = list(self.values) + list(all_items)
self.values = all_items
# Track merged values for latest_merged_only peek functionality
if not self.is_leaf:
# Store the values that were merged in this operation (from incoming_stats only)
merged_values = list(
chain.from_iterable([s.values for s in incoming_stats])
)
self.latest_merged = merged_values
def peek(
self, compile: bool = True, latest_merged_only: bool = False
) -> Union[Any, List[Any]]:
"""Returns the result of reducing the internal values list.
Note that this method does NOT alter the internal values list.
Args:
compile: If True, the result is compiled into a single value if possible.
latest_merged_only: If True, only considers the latest merged values.
This parameter only works on aggregation stats (root or intermediate nodes, is_leaf=False).
When enabled, peek() will only use the values from the most recent merge operation.
Returns:
The result of reducing the internal values list.
"""
# If latest_merged_only is True, use look at the latest merged values
if latest_merged_only:
if self.is_leaf:
raise ValueError(
"latest_merged_only can only be used on aggregation stats objects "
"(is_leaf=False)"
)
if self.latest_merged is None:
# No merged values yet, return NaN or empty list
if compile:
return np.nan
else:
return []
# Use only the latest merged values
latest_merged = self.latest_merged
if len(latest_merged) == 0:
reduced_values = [np.nan]
else:
reduced_values = self.window_reduce(latest_merged)
else:
# Normal peek behavior
if len(self.values) == 1:
# Note that we can not check for window=None here because merged SeriesStats may have multiple values.
reduced_values = self.values
else:
reduced_values = self.window_reduce()
if compile:
if len(reduced_values) == 0:
return np.nan
else:
return reduced_values[0]
else:
return reduced_values
def running_reduce(self, value_1, value_2) -> List[Any]:
"""Reduces two values through a reduce function.
If values are PyTorch tensors, reduction happens on GPU.
Result stays on GPU (or CPU if values were CPU).
Args:
value_1: The first value to reduce.
value_2: The second value to reduce.
Returns:
The reduced value (may be GPU tensor).
"""
# If values are torch tensors, reduce on GPU
if (
torch
and isinstance(value_1, torch.Tensor)
and hasattr(self, "_torch_reduce_fn")
):
return [self._torch_reduce_fn(torch.stack([value_1, value_2]))]
# Otherwise use numpy reduction
return [self._np_reduce_fn([value_1, value_2])]
def window_reduce(self, values=None) -> List[Any]:
"""Reduces the internal values list according to the constructor settings.
If values are PyTorch GPU tensors, reduction happens on GPU and result
is moved to CPU. Otherwise returns CPU value.
Args:
values: The list of values to reduce. If not None, use `self.values`
Returns:
The reduced value on CPU.
"""
values = values if values is not None else self.values
# Special case: Internal values list is empty -> return NaN
if len(values) == 0:
return [np.nan]
# If values are torch tensors, reduce on GPU then move to CPU
if (
torch
and isinstance(values[0], torch.Tensor)
and hasattr(self, "_torch_reduce_fn")
):
stacked = torch.stack(list(values))
# Check for all NaN
if torch.all(torch.isnan(stacked)):
return [np.nan]
result = self._torch_reduce_fn(stacked)
return [single_value_to_cpu(result)]
# Otherwise use numpy reduction on CPU values
if np.all(np.isnan(values)):
return [np.nan]
else:
return [self._np_reduce_fn(values)]
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/utils/metrics/stats/series.py",
"license": "Apache License 2.0",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/utils/metrics/stats/sum.py | import time
from typing import Any, Dict, Union
import numpy as np
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.metrics.stats.series import SeriesStats
from ray.util.annotations import DeveloperAPI
torch, _ = try_import_torch()
@DeveloperAPI
class SumStats(SeriesStats):
"""A Stats object that tracks the sum of a series of singular values (not vectors)."""
stats_cls_identifier = "sum"
def _np_reduce_fn(self, values: np.ndarray) -> float:
return np.nansum(values)
def _torch_reduce_fn(self, values: "torch.Tensor"):
"""Reduce function for torch tensors (stays on GPU)."""
# torch.nansum not available, use workaround
clean_values = values[~torch.isnan(values)]
if len(clean_values) == 0:
return torch.tensor(0.0, device=values.device)
return torch.sum(clean_values.float())
def __init__(self, with_throughput: bool = False, **kwargs):
"""Initializes a SumStats instance.
Args:
throughput: If True, track a throughput estimate based on the time between consecutive calls to reduce().
"""
super().__init__(**kwargs)
self.track_throughput = with_throughput
# We initialize this to the current time which may result in a low first throughput value
# It seems reasonable that starting from a checkpoint or starting an experiment results in a low first throughput value
self._last_throughput_measure_time = time.perf_counter()
def initialize_throughput_reference_time(self, time: float) -> None:
assert (
self.is_root
), "initialize_throughput_reference_time can only be called on root stats"
self._last_throughput_measure_time = time
@property
def has_throughputs(self) -> bool:
return self.track_throughput
@property
def throughputs(self) -> float:
"""Returns the throughput since the last reduce."""
assert (
self.track_throughput
), "Throughput tracking is not enabled on this Stats object"
return self.peek(compile=True) / (
time.perf_counter() - self._last_throughput_measure_time
)
def reduce(self, compile: bool = True) -> Union[Any, "SumStats"]:
reduce_value = super().reduce(compile=True)
# Update the last throughput measure time for correct throughput calculations
if self.track_throughput:
self._last_throughput_measure_time = time.perf_counter()
if compile:
return reduce_value
return_stats = self.clone()
return_stats.values = [reduce_value]
return return_stats
@staticmethod
def _get_init_args(stats_object=None, state=None) -> Dict[str, Any]:
"""Returns the initialization arguments for this Stats object."""
super_args = SeriesStats._get_init_args(stats_object=stats_object, state=state)
if state is not None:
return {
**super_args,
"with_throughput": state["track_throughput"],
}
elif stats_object is not None:
return {
**super_args,
"with_throughput": stats_object.track_throughput,
}
else:
raise ValueError("Either stats_object or state must be provided")
def get_state(self) -> Dict[str, Any]:
"""Returns the state of the stats object."""
state = super().get_state()
state["track_throughput"] = self.track_throughput
return state
def set_state(self, state: Dict[str, Any]) -> None:
super().set_state(state)
self.track_throughput = state["track_throughput"]
def __repr__(self) -> str:
return f"SumStats({self.peek()}; window={self._window}; len={len(self)})"
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/utils/metrics/stats/sum.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:rllib/utils/metrics/stats/utils.py | from collections import deque
from typing import Any, List, Union
import numpy as np
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.util.annotations import DeveloperAPI
torch, _ = try_import_torch()
_, tf, _ = try_import_tf()
@DeveloperAPI
def safe_isnan(value):
"""Check if a value is NaN.
Args:
value: The value to check.
Returns:
True if the value is NaN, False otherwise.
"""
if torch and torch.is_tensor(value):
return torch.isnan(value)
if tf and tf.is_tensor(value):
return tf.math.is_nan(value)
return np.isnan(value)
@DeveloperAPI
def single_value_to_cpu(value):
"""Convert a single value to CPU if it's a tensor.
TensorFlow tensors are always converted to numpy/python values.
PyTorch tensors are converted to python scalars.
"""
if torch and isinstance(value, torch.Tensor):
return value.detach().cpu().item()
elif tf and tf.is_tensor(value):
return value.numpy()
return value
@DeveloperAPI
def batch_values_to_cpu(values: Union[List[Any], deque]) -> List[Any]:
"""Convert a list or deque of GPU tensors to CPU scalars in a single operation.
This function efficiently processes multiple PyTorch GPU tensors together by
stacking them and performing a single .cpu() call. Assumes all values are either
PyTorch tensors (on same device) or already CPU values.
Args:
values: A list or deque of values that may be GPU tensors.
Returns:
A list of CPU scalar values.
"""
if not values:
return []
# Check if first value is a torch tensor - assume all are the same type
if torch and isinstance(values[0], torch.Tensor):
# Stack all tensors and move to CPU in one operation
stacked = torch.stack(list(values))
cpu_tensor = stacked.detach().cpu()
return cpu_tensor.tolist()
# Already CPU values
return list(values)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/utils/metrics/stats/utils.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:rllib/utils/metrics/tests/test_legacy_stats.py | import re
import time
import numpy as np
import pytest
from ray.rllib.utils.metrics.legacy_stats import Stats, merge_stats
from ray.rllib.utils.test_utils import check
# Default values used throughout the tests
DEFAULT_EMA_COEFF = 0.01
DEFAULT_THROUGHPUT_EMA_COEFF = 0.05
DEFAULT_CLEAR_ON_REDUCE = False
DEFAULT_THROUGHPUT = False
@pytest.fixture
def basic_stats():
return Stats(
init_values=None,
reduce="mean",
ema_coeff=DEFAULT_EMA_COEFF,
window=None,
clear_on_reduce=DEFAULT_CLEAR_ON_REDUCE,
throughput=DEFAULT_THROUGHPUT,
throughput_ema_coeff=DEFAULT_THROUGHPUT_EMA_COEFF,
)
@pytest.mark.parametrize(
"init_values,expected_len,expected_peek",
[(1.0, 1, 1.0), (None, 0, np.nan), ([1, 2, 3], 3, 2)],
)
def test_init_with_values(init_values, expected_len, expected_peek):
"""Test initialization with different initial values."""
stats = Stats(
init_values=init_values,
reduce="mean",
ema_coeff=None,
window=3,
clear_on_reduce=DEFAULT_CLEAR_ON_REDUCE,
throughput=DEFAULT_THROUGHPUT,
throughput_ema_coeff=DEFAULT_THROUGHPUT_EMA_COEFF,
)
check(len(stats), expected_len)
if expected_len > 0:
check(stats.peek(), expected_peek)
check(stats.peek(compile=True), [expected_peek])
else:
check(np.isnan(stats.peek()), True)
def test_invalid_init_params():
"""Test initialization with invalid parameters."""
# Invalid reduce method
with pytest.raises(ValueError):
Stats(
init_values=None,
reduce="invalid",
window=None,
ema_coeff=DEFAULT_EMA_COEFF,
clear_on_reduce=DEFAULT_CLEAR_ON_REDUCE,
throughput=DEFAULT_THROUGHPUT,
throughput_ema_coeff=DEFAULT_THROUGHPUT_EMA_COEFF,
)
# Cannot have both window and ema_coeff
with pytest.raises(ValueError):
Stats(
init_values=None,
window=3,
ema_coeff=0.1,
reduce="mean",
clear_on_reduce=DEFAULT_CLEAR_ON_REDUCE,
throughput=DEFAULT_THROUGHPUT,
throughput_ema_coeff=DEFAULT_THROUGHPUT_EMA_COEFF,
)
# Cannot have ema_coeff with non-mean reduction
with pytest.raises(ValueError):
Stats(
init_values=None,
reduce="sum",
ema_coeff=0.1,
window=None,
clear_on_reduce=DEFAULT_CLEAR_ON_REDUCE,
throughput=DEFAULT_THROUGHPUT,
throughput_ema_coeff=DEFAULT_THROUGHPUT_EMA_COEFF,
)
def test_push_with_ema():
"""Test pushing values with EMA reduction."""
stats = Stats(
init_values=None,
reduce="mean",
ema_coeff=DEFAULT_EMA_COEFF,
window=None,
clear_on_reduce=DEFAULT_CLEAR_ON_REDUCE,
throughput=DEFAULT_THROUGHPUT,
throughput_ema_coeff=DEFAULT_THROUGHPUT_EMA_COEFF,
)
stats.push(1.0)
stats.push(2.0)
# EMA formula: new_val = (1.0 - ema_coeff) * old_val + ema_coeff * val
expected = 1.0 * (1.0 - DEFAULT_EMA_COEFF) + 2.0 * DEFAULT_EMA_COEFF
check(abs(stats.peek() - expected) < 1e-6, True)
def test_window():
window_size = 3
stats = Stats(
init_values=None,
window=window_size,
reduce="mean",
ema_coeff=None,
clear_on_reduce=DEFAULT_CLEAR_ON_REDUCE,
throughput=DEFAULT_THROUGHPUT,
throughput_ema_coeff=DEFAULT_THROUGHPUT_EMA_COEFF,
)
# Push values and check window behavior
for i in range(1, 5): # Push values 1, 2, 3, 4
stats.push(i)
# Check that the window size is respected
expected_window_size = min(i, window_size)
check(len(stats.values), expected_window_size)
# Check that the window contains the most recent values
if i <= window_size:
expected_values = list(range(1, i + 1))
else:
expected_values = list(range(i - window_size + 1, i + 1))
check(list(stats.peek(compile=False)), expected_values)
# After pushing 4 values with window size 3, we should have [2, 3, 4]
# and the mean should be (2 + 3 + 4) / 3 = 3
check(stats.peek(), 3)
# Test reduce behavior
reduced_value = stats.reduce()
check(reduced_value, 3)
@pytest.mark.parametrize(
"reduce_method,values,expected",
[
("sum", [1, 2, 3], 6),
("min", [10, 20, 5, 100], 5),
("max", [1, 3, 2, 4], 4),
],
)
def test_reduce_methods(reduce_method, values, expected):
"""Test different reduce methods."""
stats = Stats(
init_values=None,
reduce=reduce_method,
window=None,
ema_coeff=None,
clear_on_reduce=DEFAULT_CLEAR_ON_REDUCE,
throughput=DEFAULT_THROUGHPUT,
throughput_ema_coeff=DEFAULT_THROUGHPUT_EMA_COEFF,
)
for val in values:
stats.push(val)
check(stats.peek(), expected)
def test_basic_merge_on_time_axis():
"""Test merging stats on time axis."""
stats1 = Stats(
init_values=None,
reduce="sum",
window=None,
ema_coeff=None,
clear_on_reduce=DEFAULT_CLEAR_ON_REDUCE,
throughput=DEFAULT_THROUGHPUT,
throughput_ema_coeff=DEFAULT_THROUGHPUT_EMA_COEFF,
)
stats1.push(1)
stats1.push(2)
stats2 = Stats(
init_values=None,
reduce="sum",
window=None,
ema_coeff=None,
clear_on_reduce=DEFAULT_CLEAR_ON_REDUCE,
throughput=DEFAULT_THROUGHPUT,
throughput_ema_coeff=DEFAULT_THROUGHPUT_EMA_COEFF,
)
stats2.push(3)
stats2.push(4)
stats1.merge_on_time_axis(stats2)
check(stats1.peek(), 10) # sum of [1, 2, 3, 4]
def test_basic_merge_in_parallel():
"""Test merging stats in parallel."""
window_size = 3
stats1 = Stats(
init_values=None,
reduce="mean",
window=window_size,
ema_coeff=None,
clear_on_reduce=DEFAULT_CLEAR_ON_REDUCE,
throughput=DEFAULT_THROUGHPUT,
throughput_ema_coeff=DEFAULT_THROUGHPUT_EMA_COEFF,
)
for i in range(1, 4): # [1, 2, 3]
stats1.push(i)
stats2 = Stats(
init_values=None,
reduce="mean",
window=window_size,
ema_coeff=None,
clear_on_reduce=DEFAULT_CLEAR_ON_REDUCE,
throughput=DEFAULT_THROUGHPUT,
throughput_ema_coeff=DEFAULT_THROUGHPUT_EMA_COEFF,
)
for i in range(4, 7): # [4, 5, 6]
stats2.push(i)
result = Stats(
init_values=None,
reduce="mean",
window=window_size,
ema_coeff=None,
clear_on_reduce=DEFAULT_CLEAR_ON_REDUCE,
throughput=DEFAULT_THROUGHPUT,
throughput_ema_coeff=DEFAULT_THROUGHPUT_EMA_COEFF,
)
result.merge_in_parallel(stats1, stats2)
check(abs(result.peek() - 4.167) < 1e-3, True)
@pytest.mark.parametrize(
"op,expected",
[
(lambda s: float(s), 2.0),
(lambda s: int(s), 2),
(lambda s: s + 1, 3.0),
(lambda s: s - 1, 1.0),
(lambda s: s * 2, 4.0),
(lambda s: s == 2.0, True),
(lambda s: s <= 3.0, True),
(lambda s: s >= 1.0, True),
(lambda s: s < 3.0, True),
(lambda s: s > 1.0, True),
],
)
def test_numeric_operations(op, expected):
"""Test numeric operations on Stats objects."""
stats = Stats(
init_values=None,
reduce="mean",
ema_coeff=DEFAULT_EMA_COEFF,
window=None,
clear_on_reduce=DEFAULT_CLEAR_ON_REDUCE,
throughput=DEFAULT_THROUGHPUT,
throughput_ema_coeff=DEFAULT_THROUGHPUT_EMA_COEFF,
)
stats.push(2.0)
check(op(stats), expected)
def test_state_serialization():
"""Test saving and loading Stats state."""
stats = Stats(
init_values=None,
reduce="sum",
reduce_per_index_on_aggregate=True,
window=3,
ema_coeff=None,
clear_on_reduce=DEFAULT_CLEAR_ON_REDUCE,
throughput=DEFAULT_THROUGHPUT,
throughput_ema_coeff=DEFAULT_THROUGHPUT_EMA_COEFF,
)
for i in range(1, 4):
stats.push(i)
state = stats.get_state()
loaded_stats = Stats.from_state(state)
check(loaded_stats._reduce_method, stats._reduce_method)
check(loaded_stats._window, stats._window)
check(loaded_stats.peek(), stats.peek())
check(len(loaded_stats), len(stats))
def test_similar_to():
"""Test creating similar Stats objects."""
original = Stats(
init_values=None,
reduce="sum",
window=3,
ema_coeff=None,
clear_on_reduce=DEFAULT_CLEAR_ON_REDUCE,
throughput=DEFAULT_THROUGHPUT,
throughput_ema_coeff=DEFAULT_THROUGHPUT_EMA_COEFF,
)
original.push(1)
original.push(2)
original.reduce()
# Similar stats without initial values
similar = Stats.similar_to(original)
check(similar._reduce_method, original._reduce_method)
check(similar._window, original._window)
check(len(similar), 0) # Should start empty
# Similar stats with initial values
similar_with_value = Stats.similar_to(original, init_values=[3, 4])
check(len(similar_with_value), 2)
check(similar_with_value.peek(), 7)
# Test that adding to the similar stats does not affect the original stats
similar.push(10)
check(original.peek(), 3)
def test_reduce_history():
"""Test basic reduce history functionality."""
stats = Stats(
init_values=None,
reduce="sum",
window=None,
ema_coeff=None,
clear_on_reduce=DEFAULT_CLEAR_ON_REDUCE,
throughput=DEFAULT_THROUGHPUT,
throughput_ema_coeff=DEFAULT_THROUGHPUT_EMA_COEFF,
)
# Push values and reduce
stats.push(1)
stats.push(2)
check(stats.reduce(), 3)
# Push more values and reduce
stats.push(3)
stats.push(4)
check(stats.reduce(), 10)
def test_reduce_history_with_clear():
"""Test reduce history with clear_on_reduce=True."""
stats = Stats(
init_values=None,
reduce="sum",
window=None,
ema_coeff=None,
clear_on_reduce=True,
throughput=DEFAULT_THROUGHPUT,
throughput_ema_coeff=DEFAULT_THROUGHPUT_EMA_COEFF,
)
# Push and reduce multiple times
stats.push(1)
stats.push(2)
check(stats.reduce(), 3)
check(len(stats), 0) # Values should be cleared
stats.push(3)
stats.push(4)
check(stats.reduce(), 7)
check(len(stats), 0)
def test_basic_throughput():
"""Test basic throughput tracking."""
stats = Stats(
init_values=None,
reduce="sum",
window=None,
ema_coeff=None,
clear_on_reduce=DEFAULT_CLEAR_ON_REDUCE,
throughput=True,
throughput_ema_coeff=None,
)
# First push - throughput should be 0 initially
stats.push(1)
check(stats.peek(), 1)
check(stats.throughput, np.nan)
# Wait and push again to measure throughput
time.sleep(0.1)
stats.push(1)
check(stats.peek(), 2)
check(stats.throughput, 10, rtol=0.1)
# Wait and push again to measure throughput
time.sleep(0.1)
stats.push(2)
check(stats.peek(), 4)
check(
stats.throughput, 10.1, rtol=0.1
) # default EMA coefficient for throughput is 0.01
@pytest.mark.parametrize(
"reduce_method,"
"reduce_per_index,"
"clear_on_reduce,"
"window,"
"expected_first_round_values,"
"expected_first_round_peek,"
"expected_second_round_values,"
"expected_second_round_peek,"
"expected_third_round_values,"
"expected_third_round_peek",
[
# In the following, we carry out some calculations by hand to verify that the math yields expected results.
# To keep things readable, we round the results to 2 decimal places. Since we don't aggregate many times,
# the rounding errors are negligible.
(
"mean", # reduce_method
True, # reduce_per_index
True, # clear_on_reduce
None, # window
# With window=None and ema_coeff=0.01, the values list
# contains a single value. For mean with reduce_per_index=True,
# the first merged values are [55, 110, 165]
# EMA calculation:
# 1. Start with 55
# 2. Update with 110: 0.99*55 + 0.01*110 = 55.55
# 3. Update with 165: 0.99*55.55 + 0.01*165 = 56.65
[56.65], # expected_first_round_values - final EMA value
56.65, # expected_first_round_peek - same as the EMA value
# Second round, merged values are [220, 275, 330]
# Starting fresh after clear_on_reduce:
# 1. Start with 220
# 2. Update with 275: 0.99*220 + 0.01*275 = 220.55
# 3. Update with 330: 0.99*220.55 + 0.01*330 = 221.65
[221.65], # expected_second_round_values - final EMA value
221.65, # expected_second_round_peek - same as the EMA value
# Third round, merged values contain [385]
[700], # expected_third_round_values - final EMA value
700, # expected_third_round_peek - final EMA value
),
(
"mean", # reduce_method
True, # reduce_per_index
True, # clear_on_reduce
4, # window
# Three values that we reduce per index from the two incoming stats.
# [(10 + 100) / 2, (20 + 200) / 2, (30 + 300) / 2] = [55, 110, 165]
[55, 110, 165], # expected_first_round_values
(55 + 110 + 165) / 3, # expected_first_round_peek
# Since we clear on reduce, the second round starts fresh.
# The values are the three values that we reduce per index from the two incoming stats.
# [(40 + 400) / 2, (50 + 500) / 2, (60 + 600) / 2] = [220, 275, 330]
[220, 275, 330], # expected_second_round_values
(220 + 275 + 330) / 3, # expected_second_round_peek
# Since we clear on reduce, the third round starts fresh.
# We only add the new value from the second Stats object.
[
700
], # expected_third_round_values - clear_on_reduce makes this just the new merged value
700, # expected_third_round_peek
),
(
"mean", # reduce_method
True, # reduce_per_index
False, # clear_on_reduce
None, # window
# With window=None and ema_coeff=0.01, the values list
# contains a single value. For mean with reduce_per_index=True,
# For the first Stats object, the values are [10, 20, 30]
# EMA calculation:
# 1. Start with 10
# 2. Update with 20: 0.99*10 + 0.01*20 = 10.1
# 3. Update with 30: 0.99*10.1 + 0.01*30 = 10.299
# For the second Stats object, the values are [100, 200, 300]
# EMA calculation:
# 1. Start with 100
# 2. Update with 200: 0.99*100 + 0.01*200 = 101
# 3. Update with 300: 0.99*101 + 0.01*300 = 102.99
# Finally, the we reduce over the single index:
# 0.5*10.299 + 0.5*102.99 = 56.64
[56.64], # expected_first_round_values - final EMA value
56.64, # expected_first_round_peek - same as the EMA value
# Second round, for the first object, the values are [40, 50, 60]
# Starting from 10.299 (because we don't clear on reduce)
# 1. Update with 40: 0.99*10.299 + 0.01*40 = 10.6
# 2. Update with 50: 0.99*10.6 + 0.01*50 = 10.994
# 3. Update with 60: 0.99*10.994 + 0.01*60 = 11.48
# For the second object, the values are [400, 500, 600]
# 1. Start from 102.99 (because we don't clear on reduce)
# 2. Update with 400: 0.99*102.99 + 0.01*400 = 105.96
# 3. Update with 500: 0.99*105.96 + 0.01*500 = 109.9
# 4. Update with 600: 0.99*109.9 + 0.01*600 = 114.8
# Finally, the we reduce over the single index:
# 0.5*11.48 + 0.5*114.8 = 63.14
[63.14], # expected_second_round_values - final EMA value
63.14, # expected_second_round_peek - same as the EMA value
# Third round, for the first object, there are no new values
# For the second object, the values are [700]
# 1. Start from 114.8 (because we don't clear on reduce)
# 2. Update with 700: 0.99*114.8 + 0.01*700 = 120.65
# Finally, the we reduce over the single index:
# 0.5*11.48 + 0.5*120.65 = 66.07
[66.07], # expected_third_round_values - final EMA value
66.07, # expected_third_round_peek - final EMA value
),
(
"mean", # reduce_method
True, # reduce_per_index
False, # clear_on_reduce
4, # window
# The first round values are the three values that we reduce per index from the two incoming stats.
# [(10 + 100) / 2, (20 + 200) / 2, (30 + 300) / 2] = [55, 110, 165]
[55, 110, 165], # expected_first_round_values
(55 + 110 + 165) / 3, # expected_first_round_peek
# Since we don't clear on reduce, the second round includes the latest value from the first round.
# [(30 + 300) / 2, (40 + 400) / 2, (50 + 500) / 2, (60 + 600) / 2] = [165, 220, 275, 330]
[
165,
220,
275,
330,
], # expected_second_round_values - includes values from previous round
(165 + 220 + 275 + 330)
/ 4, # expected_second_round_peek - average of all 4 values
# Since we don't clear on reduce, the third round includes the latest value from the second round.
# [(30 + 400) / 2, (40 + 500) / 2, (50 + 600) / 2, (60 + 700) / 2] = [215, 270, 325, 380]
[
215,
270,
325,
380,
], # expected_third_round_values - matches actual values in test
(215 + 270 + 325 + 380)
/ 4, # expected_third_round_peek - average of all 4 values
),
(
"sum", # reduce_method
True, # reduce_per_index
True, # clear_on_reduce
None, # window
[660], # expected_first_round_values
110 + 220 + 330, # expected_first_round_peek
[1650], # expected_second_round_values
440 + 550 + 660, # expected_second_round_peek
[700], # expected_third_round_values
700, # expected_third_round_peek
),
(
"sum", # reduce_method
True, # reduce_per_index
True, # clear_on_reduce
4, # window
[110, 220, 330], # expected_first_round_values
110 + 220 + 330, # expected_first_round_peek
[440, 550, 660], # expected_second_round_values
440 + 550 + 660, # expected_second_round_peek
[700], # expected_third_round_values
700, # expected_third_round_peek
),
(
"sum", # reduce_method
True, # reduce_per_index
False, # clear_on_reduce
None, # window
[660], # expected_first_round_values
110 + 220 + 330, # expected_first_round_peek
# The leading zero in this list is an artifact of how we merge lifetime sums.
# We merge them by substracting the previously reduced values from their history from the sum.
[0.0, 660 + 1650], # expected_second_round_values
660 + 440 + 550 + 660, # expected_second_round_peek
[0.0, 660 + 1650 + 700], # expected_third_round_values
660 + 1650 + 700, # expected_third_round_peek
),
(
"sum", # reduce_method
True, # reduce_per_index
False, # clear_on_reduce
4, # window
[110, 220, 330], # expected_first_round_values
110 + 220 + 330, # expected_first_round_peek
[330, 440, 550, 660], # expected_second_round_values
330 + 440 + 550 + 660, # expected_second_round_peek
[430, 540, 650, 760], # expected_third_round_values
430 + 540 + 650 + 760, # expected_third_round_peek
),
(
"min", # reduce_method
True, # reduce_per_index
True, # clear_on_reduce
None, # window
[10], # expected_first_round_values
10, # expected_first_round_peek
[40], # expected_second_round_values
40, # expected_second_round_peek
[700], # expected_third_round_values
700, # expected_third_round_peek
),
(
"min", # reduce_method
True, # reduce_per_index
True, # clear_on_reduce
4, # window
[10, 20, 30], # expected_first_round_values
10, # expected_first_round_peek
[40, 50, 60], # expected_second_round_values
40, # expected_second_round_peek
[700], # expected_third_round_values
700, # expected_third_round_peek
),
(
"min", # reduce_method
True, # reduce_per_index
False, # clear_on_reduce
None, # window
[10], # expected_first_round_values
10, # expected_first_round_peek
[10, 10], # expected_second_round_values
10, # expected_second_round_peek
[10, 10], # expected_third_round_values
10, # expected_third_round_peek
),
(
"min", # reduce_method
True, # reduce_per_index
False, # clear_on_reduce
4, # window
# Minima of [(10, 100), (20, 200), (30, 300)] = [10, 20, 30]
[10, 20, 30], # expected_first_round_values
10, # expected_first_round_peek
# Minima of [(30, 300), (40, 400), (50, 500), (60, 600)] = [30, 40, 50, 60]
[30, 40, 50, 60], # expected_second_round_values
30, # expected_second_round_peek
# Minimum of [(30, 400), (40, 500), (50, 600), (60, 700)] = [30, 40, 50, 60]
[30, 40, 50, 60], # expected_third_round_values
30, # expected_third_round_peek
),
(
"max", # reduce_method
True, # reduce_per_index
True, # clear_on_reduce
None, # window
[300], # expected_first_round_values
300, # expected_first_round_peek
[600], # expected_second_round_values
600, # expected_second_round_peek
[700], # expected_third_round_values
700, # expected_third_round_peek
),
(
"max", # reduce_method
True, # reduce_per_index
True, # clear_on_reduce
4, # window
[100, 200, 300], # expected_first_round_values
300, # expected_first_round_peek
[400, 500, 600], # expected_second_round_values
600, # expected_second_round_peek
[700], # expected_third_round_values
700, # expected_third_round_peek
),
(
"max", # reduce_method
True, # reduce_per_index
False, # clear_on_reduce
None, # window
[300], # expected_first_round_values
300, # expected_first_round_peek
[300, 600], # expected_second_round_values
600, # expected_second_round_peek
[600, 700], # expected_third_round_values
700, # expected_third_round_peek
),
(
"max", # reduce_method
True, # reduce_per_index
False, # clear_on_reduce
4, # window
[100, 200, 300], # expected_first_round_values
300, # expected_first_round_peek
[300, 400, 500, 600], # expected_second_round_values
600, # expected_second_round_peek
[400, 500, 600, 700], # expected_third_round_values
700, # expected_third_round_peek
),
(
"mean", # reduce_method
False, # reduce_per_index
True, # clear_on_reduce
None, # window
# With window=None and ema_coeff=0.01, the values list
# contains a single value. For mean with reduce_per_index=True,
# For the first Stats object, the values are [10, 20, 30]
# EMA calculation:
# 1. Start with 10
# 2. Update with 20: 0.99*10 + 0.01*20 = 10.1
# 3. Update with 30: 0.99*10.1 + 0.01*30 = 10.299
# For the second Stats object, the values are [100, 200, 300]
# EMA calculation:
# 1. Start with 100
# 2. Update with 200: 0.99*100 + 0.01*200 = 101
# 3. Update with 300: 0.99*101 + 0.01*300 = 102.99
# Finally, the we reduce over the single index:
# 0.5*10.299 + 0.5*102.99 = 56.64
[56.64, 56.64], # expected_first_round_values - final EMA value
56.64, # expected_first_round_peek - same as the EMA value
# Second round, for the first object, the values are [40, 50, 60]
# Start with 40 (because we clear on reduce)
# 1. Update with 40: 0.99*40 + 0.01*40 = 40.0
# 2. Update with 50: 0.99*40.0 + 0.01*50 = 40.1
# 3. Update with 60: 0.99*40.1 + 0.01*60 = 40.3
# For the second object, the values are [400, 500, 600]
# Start with 400 (because we clear on reduce)
# 1. Update with 400: 0.99*400 + 0.01*400 = 400.0
# 2. Update with 500: 0.99*400.0 + 0.01*500 = 401.0
# 3. Update with 600: 0.99*401.0 + 0.01*600 = 403.0
# Finally, the we reduce over the two indices:
# 0.5*40.3 + 0.5*403.0 = 221.65
[221.65, 221.65], # expected_second_round_values - final EMA value
221.65, # expected_second_round_peek - same as the EMA value
# Third round, for the first object, there are no new values
# For the second object, the values are [700]
[700], # expected_third_round_values - final EMA value
700, # expected_third_round_peek - final EMA value
),
(
"mean", # reduce_method
False, # reduce_per_index
True, # clear_on_reduce
4, # window
[110, 110, 165, 165], # expected_first_round_values
(110 + 110 + 165 + 165) / 4, # expected_first_round_peek
[275, 275, 330, 330], # expected_second_round_values
(275 + 275 + 330 + 330) / 4, # expected_second_round_peek
[700], # expected_third_round_values
700, # expected_third_round_peek
),
(
"mean", # reduce_method
False, # reduce_per_index
False, # clear_on_reduce
None, # window
# With window=None and ema_coeff=0.01, the values list
# contains a single value. For mean with reduce_per_index=True,
# For the first Stats object, the values are [10, 20, 30]
# EMA calculation:
# 1. Start with 10
# 2. Update with 20: 0.99*10 + 0.01*20 = 10.1
# 3. Update with 30: 0.99*10.1 + 0.01*30 = 10.299
# For the second Stats object, the values are [100, 200, 300]
# EMA calculation:
# 1. Start with 100
# 2. Update with 200: 0.99*100 + 0.01*200 = 101
# 3. Update with 300: 0.99*101 + 0.01*300 = 102.99
# Finally, the we reduce over the single index:
# 0.5*10.299 + 0.5*102.99 = 56.64
[56.64, 56.64], # expected_first_round_values
56.64, # expected_first_round_peek
# Second round, for the first object, the values are [40, 50, 60]
# Starting from 10.299 (because we don't clear on reduce)
# 1. Update with 40: 0.99*10.299 + 0.01*40 = 10.6
# 2. Update with 50: 0.99*10.6 + 0.01*50 = 10.994
# 3. Update with 60: 0.99*10.994 + 0.01*60 = 11.48
# For the second object, the values are [400, 500, 600]
# 1. Start from 102.99 (because we don't clear on reduce)
# 2. Update with 400: 0.99*102.99 + 0.01*400 = 105.96
# 3. Update with 500: 0.99*105.96 + 0.01*500 = 109.9
# 4. Update with 600: 0.99*109.9 + 0.01*600 = 114.8
# Finally, the we reduce over the single index:
# 0.5*11.48 + 0.5*114.8 = 63.14
[63.14, 63.14], # expected_second_round_values
63.14, # expected_second_round_peek
# Third round, for the first object, there are no new values
# For the second object, the values are [700]
# 1. Start from 114.8 (because we don't clear on reduce)
# 2. Update with 700: 0.99*114.8 + 0.01*700 = 120.65
# Finally, the we reduce over the single index:
# 0.5*11.48 + 0.5*120.65 = 66.07
[66.07, 66.07], # expected_third_round_values
66.07, # expected_third_round_peek
),
(
"mean", # reduce_method
False, # reduce_per_index
False, # clear_on_reduce
4, # window
[110, 110, 165, 165], # expected_first_round_values
(110 + 110 + 165 + 165) / 4, # expected_first_round_peek
[275, 275, 330, 330], # expected_second_round_values
(275 + 275 + 330 + 330) / 4, # expected_second_round_peek
[325, 325, 380, 380], # expected_third_round_values
(325 + 325 + 380 + 380) / 4, # expected_third_round_peek
),
(
"sum", # reduce_method
False, # reduce_per_index
True, # clear_on_reduce
None, # window
[660 / 2, 660 / 2], # expected_first_round_values
# 10 + 20 + 30 + 100 + 200 + 300
660, # expected_first_round_peek
[1650 / 2, 1650 / 2], # expected_second_round_values
# 40 + 50 + 60 + 400 + 500 + 600
1650, # expected_second_round_peek
[700], # expected_third_round_values
700, # expected_third_round_peek
),
(
"sum", # reduce_method
False, # reduce_per_index
True, # clear_on_reduce
4, # window
[110, 110, 165, 165], # expected_first_round_values
110 + 110 + 165 + 165, # expected_first_round_peek
[275, 275, 330, 330], # expected_second_round_values
275 + 275 + 330 + 330, # expected_second_round_peek
[700], # expected_third_round_values
700, # expected_third_round_peek
),
(
"sum", # reduce_method
False, # reduce_per_index
False, # clear_on_reduce
None, # window
[330.0, 330.0], # expected_first_round_values
660.0, # expected_first_round_peek
[0, 1155.0, 1155.0], # expected_second_round_values
2310.0, # expected_second_round_peek
[0, 1505.0, 1505.0], # expected_third_round_values
3010.0, # expected_third_round_peek
),
(
"sum", # reduce_method
False, # reduce_per_index
False, # clear_on_reduce
4, # window
[110, 110, 165, 165], # expected_first_round_values
110 + 110 + 165 + 165, # expected_first_round_peek
[275, 275, 330, 330], # expected_second_round_values
275 + 275 + 330 + 330, # expected_second_round_peek
[325, 325, 380, 380], # expected_third_round_values
325 + 325 + 380 + 380, # expected_third_round_peek
),
(
"min", # reduce_method
False, # reduce_per_index
True, # clear_on_reduce
None, # window
[10, 10], # expected_first_round_values
10, # expected_first_round_peek
[40, 40], # expected_second_round_values
40, # expected_second_round_peek
[700], # expected_third_round_values
700, # expected_third_round_peek
),
(
"min", # reduce_method
False, # reduce_per_index
True, # clear_on_reduce
4, # window
[20, 20, 30, 30], # expected_first_round_values
20, # expected_first_round_peek
[50, 50, 60, 60], # expected_second_round_values
50, # expected_second_round_peek
[700], # expected_third_round_values
700, # expected_third_round_peek
),
(
"min", # reduce_method
False, # reduce_per_index
False, # clear_on_reduce
None, # window
[10, 10], # expected_first_round_values
10, # expected_first_round_peek
[10, 10, 10], # expected_second_round_values
10, # expected_second_round_peek
[10, 10, 10], # expected_third_round_values
10, # expected_third_round_peek
),
(
"min", # reduce_method
False, # reduce_per_index
False, # clear_on_reduce
4, # window
[20, 20, 30, 30], # expected_first_round_values
20, # expected_first_round_peek
[50, 50, 60, 60], # expected_second_round_values
50, # expected_second_round_peek
[50, 50, 60, 60], # expected_third_round_values
50, # expected_third_round_peek
),
(
"max", # reduce_method
False, # reduce_per_index
True, # clear_on_reduce
None, # window
[300, 300], # expected_first_round_values
300, # expected_first_round_peek
[600, 600], # expected_second_round_values
600, # expected_second_round_peek
[700], # expected_third_round_values
700, # expected_third_round_peek
),
(
"max", # reduce_method
False, # reduce_per_index
True, # clear_on_reduce
4, # window
[200, 200, 300, 300], # expected_first_round_values
300, # expected_first_round_peek
[500, 500, 600, 600], # expected_second_round_values
600, # expected_second_round_peek
[700], # expected_third_round_values
700, # expected_third_round_peek
),
(
"max", # reduce_method
False, # reduce_per_index
False, # clear_on_reduce
None, # window
[300, 300], # expected_first_round_values
300, # expected_first_round_peek
[300, 600, 600], # expected_second_round_values
600, # expected_second_round_peek
[600, 700, 700], # expected_third_round_values
700, # expected_third_round_peek
),
(
"max", # reduce_method
False, # reduce_per_index
False, # clear_on_reduce
4, # window
[200, 200, 300, 300], # expected_first_round_values
300, # expected_first_round_peek
[500, 500, 600, 600], # expected_second_round_values
600, # expected_second_round_peek
[600, 600, 700, 700], # expected_third_round_values
700, # expected_third_round_peek
),
],
)
def test_aggregation_multiple_rounds(
reduce_method,
reduce_per_index,
clear_on_reduce,
window,
expected_first_round_values,
expected_first_round_peek,
expected_second_round_values,
expected_second_round_peek,
expected_third_round_values,
expected_third_round_peek,
):
"""Test reduce_per_index_on_aggregate with different reduction methods, clear_on_reduce, setting."""
# First round: Create and fill two stats objects
incoming_stats1 = Stats(
reduce=reduce_method,
window=window,
clear_on_reduce=clear_on_reduce,
reduce_per_index_on_aggregate=reduce_per_index,
)
incoming_stats1.push(10)
incoming_stats1.push(20)
incoming_stats1.push(30)
incoming_stats2 = Stats(
reduce=reduce_method,
window=window,
clear_on_reduce=clear_on_reduce,
reduce_per_index_on_aggregate=reduce_per_index,
)
incoming_stats2.push(100)
incoming_stats2.push(200)
incoming_stats2.push(300)
# First merge
# Use compile=False to simulate how we use stats in the MetricsLogger
incoming_stats1_reduced = incoming_stats1.reduce(compile=False)
incoming_stats2_reduced = incoming_stats2.reduce(compile=False)
result_stats = merge_stats(
base_stats=None,
incoming_stats=[incoming_stats1_reduced, incoming_stats2_reduced],
)
# Verify first merge results
check(
result_stats.values, expected_first_round_values, atol=1e-2
) # Tolerance for EMA calculation
check(result_stats.peek(), expected_first_round_peek, atol=1e-2)
result_stats.reduce(compile=True)
# Second round: Add more values to original stats
incoming_stats1.push(40)
incoming_stats1.push(50)
incoming_stats1.push(60)
incoming_stats2.push(400)
incoming_stats2.push(500)
incoming_stats2.push(600)
# Second merge
incoming_stats1_reduced = incoming_stats1.reduce(compile=False)
incoming_stats2_reduced = incoming_stats2.reduce(compile=False)
result_stats = merge_stats(
base_stats=result_stats,
incoming_stats=[incoming_stats1_reduced, incoming_stats2_reduced],
)
# Verify second merge results
check(result_stats.values, expected_second_round_values, atol=1e-2)
check(result_stats.peek(), expected_second_round_peek, atol=1e-2)
result_stats.reduce(compile=True)
# Third round: Add only one value to one stats object
incoming_stats2.push(700)
# Third merge
incoming_stats1_reduced = incoming_stats1.reduce(compile=False)
incoming_stats2_reduced = incoming_stats2.reduce(compile=False)
result_stats = merge_stats(
base_stats=result_stats,
incoming_stats=[incoming_stats1_reduced, incoming_stats2_reduced],
)
# Verify third merge results
check(result_stats.values, expected_third_round_values, atol=1e-2)
check(result_stats.peek(), expected_third_round_peek, atol=1e-2)
result_stats.reduce(compile=True)
def test_merge_in_parallel_empty_and_nan_values():
"""Test the merge_in_parallel method with empty and NaN value stats."""
# Root stat and all other stats are empty/nan
empty_stats = Stats(init_values=[])
empty_stats2 = Stats(init_values=[])
nan_stats = Stats(init_values=[np.nan])
empty_stats.merge_in_parallel(empty_stats, empty_stats2, nan_stats)
# Root stat should remain empty
check(empty_stats.values, [])
# Root stat has values but others are empty or NaN
empty_stats = Stats(init_values=[])
nan_stats = Stats(init_values=[np.nan])
stats_with_values = Stats(init_values=[1.0, 2.0])
original_values = stats_with_values.values.copy()
stats_with_values.merge_in_parallel(empty_stats, nan_stats)
# Values should remain unchanged since all other stats are filtered out
check(stats_with_values.values, original_values)
# Root stat is empty but one other stat has values
empty_stats3 = Stats(init_values=[])
stats_with_values2 = Stats(init_values=[3.0, 4.0])
empty_stats3.merge_in_parallel(stats_with_values2)
# empty_stats3 should now have stats_with_values2's values
check(empty_stats3.values, stats_with_values2.values)
# Root stat has NaN and other stat has values
nan_stats3 = Stats(init_values=[np.nan])
stats_with_values3 = Stats(init_values=[5.0, 6.0])
nan_stats3.merge_in_parallel(stats_with_values3)
# nan_stats3 should now have stats_with_values3's values
check(nan_stats3.values, stats_with_values3.values)
def test_percentiles():
"""Test that percentiles work correctly.
We don't test percentiles as part of aggregation tests because it is not compabible
with `reduce_per_index_on_parallel_merge` only used for reduce=None.
"""
# Test basic functionality with single stats
# Use values 0-9 to make percentile calculations easy to verify
stats = Stats(reduce=None, percentiles=True, window=10)
for i in range(10):
stats.push(i)
# Values should be sorted when peeking
check(stats.peek(compile=False), list(range(10)))
# Test with window constraint - push one more value
stats.push(10)
# Window is 10, so the oldest value (0) should be dropped
check(stats.peek(compile=False), list(range(1, 11)))
# Test reduce
check(stats.reduce(compile=False).values, list(range(1, 11)))
# Check with explicit percentiles
del stats
stats = Stats(reduce=None, percentiles=[0, 50], window=10)
for i in range(10)[::-1]:
stats.push(i)
check(stats.peek(compile=False), list(range(10)))
check(stats.peek(compile=True), {0: 0, 50: 4.5})
# Test merge_in_parallel with easy-to-calculate values
stats1 = Stats(reduce=None, percentiles=True, window=20)
# Push values 0, 2, 4, 6, 8 (even numbers 0-8)
for i in range(0, 10, 2):
stats1.push(i)
check(stats1.reduce(compile=False).values, [0, 2, 4, 6, 8])
stats2 = Stats(reduce=None, percentiles=True, window=20)
# Push values 1, 3, 5, 7, 9 (odd numbers 1-9)
for i in range(1, 10, 2):
stats2.push(i)
check(stats2.reduce(compile=False).values, [1, 3, 5, 7, 9])
merged_stats = Stats(reduce=None, percentiles=True, window=20)
merged_stats.merge_in_parallel(stats1, stats2)
# Should merge and sort values from both stats
# Merged values should be sorted: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
expected_merged = list(range(10))
check(merged_stats.values, expected_merged)
check(merged_stats.peek(compile=False), expected_merged)
# Test compiled percentiles with numpy as reference
expected_percentiles = np.percentile(expected_merged, [0, 50, 75, 90, 95, 99, 100])
compiled_percentiles = merged_stats.peek(compile=True)
# Check that our percentiles match numpy's calculations
check(compiled_percentiles[0], expected_percentiles[0]) # 0th percentile
check(compiled_percentiles[50], expected_percentiles[1]) # 50th percentile
check(compiled_percentiles[75], expected_percentiles[2]) # 75th percentile
check(compiled_percentiles[90], expected_percentiles[3]) # 90th percentile
check(compiled_percentiles[95], expected_percentiles[4]) # 95th percentile
check(compiled_percentiles[99], expected_percentiles[5]) # 99th percentile
check(compiled_percentiles[100], expected_percentiles[6]) # 100th percentile
# Test validation - window required
with pytest.raises(ValueError, match="A window must be specified"):
Stats(reduce=None, percentiles=True, window=None)
# Test validation - percentiles must be a list
with pytest.raises(ValueError, match="must be a list or bool"):
Stats(reduce=None, percentiles=0.5, window=5)
# Test validation - percentiles must contain numbers
with pytest.raises(ValueError, match="must contain only ints or floats"):
Stats(reduce=None, window=5, percentiles=["invalid"])
# Test validation - percentiles must be between 0 and 100
with pytest.raises(ValueError, match="must contain only values between 0 and 100"):
Stats(reduce=None, window=5, percentiles=[-1, 50, 101])
# Test validation - percentiles must be None for other reduce methods
with pytest.raises(
ValueError, match="`reduce` must be `None` when `percentiles` is not `False`"
):
Stats(reduce="mean", window=5, percentiles=[50])
with pytest.raises(
ValueError,
match=re.escape(
"`reduce_per_index_on_aggregate` (True) must be `False` "
"when `percentiles` is not `False`!"
),
):
Stats(
reduce=None,
reduce_per_index_on_aggregate=True,
percentiles=True,
window=5,
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/utils/metrics/tests/test_legacy_stats.py",
"license": "Apache License 2.0",
"lines": 1080,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:rllib/examples/envs/classes/multi_agent/footsies/test/footsies_suppress_unity_logs.py | import os
import platform
import tempfile
import time
import unittest
from contextlib import contextmanager
from pathlib import Path
from ray.rllib.env import EnvContext
from ray.rllib.examples.envs.classes.multi_agent.footsies.footsies_env import (
env_creator,
)
# Detect platform and choose appropriate binary
if platform.system() == "Darwin":
binary_to_download = "mac_headless"
elif platform.system() == "Linux":
binary_to_download = "linux_server"
else:
raise RuntimeError(f"Unsupported platform: {platform.system()}")
FOOTSIES_ENV_BASE_CONFIG = {
"max_t": 1000,
"frame_skip": 4,
"observation_delay": 16,
"host": "localhost",
"binary_download_dir": "/tmp/ray/binaries/footsies",
"binary_extract_dir": "/tmp/ray/binaries/footsies",
"binary_to_download": binary_to_download,
}
_port_counter = 45001
def _create_env(config_overrides):
global _port_counter
config = {
**FOOTSIES_ENV_BASE_CONFIG,
"train_start_port": _port_counter,
"eval_start_port": _port_counter + 1,
**config_overrides,
}
_port_counter += 2
return env_creator(EnvContext(config, worker_index=0))
@contextmanager
def capture_stdout_stderr():
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as tmp:
log_path = tmp.name
# Use file descriptors 1 and 2 directly (stdout and stderr)
# This bypasses pytest's wrapping of sys.stdout/stderr
stdout_fd = 1
stderr_fd = 2
# Save original file descriptors
saved_stdout = os.dup(stdout_fd)
saved_stderr = os.dup(stderr_fd)
log_file = None
try:
# Open log file with line buffering
log_file = open(log_path, "w", buffering=1)
os.dup2(log_file.fileno(), stdout_fd)
os.dup2(log_file.fileno(), stderr_fd)
yield log_path
finally:
# Flush the log file
if log_file:
log_file.flush()
# Restore original file descriptors
os.dup2(saved_stdout, stdout_fd)
os.dup2(saved_stderr, stderr_fd)
# Close file and saved descriptors
if log_file:
log_file.close()
os.close(saved_stdout)
os.close(saved_stderr)
class TestFootsies(unittest.TestCase):
def test_default_supress_output_mode(self):
with capture_stdout_stderr() as log_path:
env = _create_env({})
time.sleep(2) # Give Unity time to write output
env.close()
# Give a bit more time for any buffered output to be written
time.sleep(0.5)
# Read the captured output
with open(log_path, "r") as f:
captured_output = f.read()
assert (
"`log_unity_output` not set in environment config, not logging output by default"
in captured_output
)
assert "[UnityMemory]" not in captured_output
# Clean up
if Path(log_path).exists():
os.unlink(log_path)
def test_enable_output_mode(self):
with capture_stdout_stderr() as log_path:
env = _create_env({"log_unity_output": True})
time.sleep(2) # Give Unity time to write output
env.close()
# Give a bit more time for any buffered output to be written
time.sleep(0.5)
# Read the captured output
with open(log_path, "r") as f:
captured_output = f.read()
assert "[UnityMemory]" in captured_output
# Clean up
if Path(log_path).exists():
os.unlink(log_path)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/envs/classes/multi_agent/footsies/test/footsies_suppress_unity_logs.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/test_cpu_tensor_zero_copy_serialization.py | import collections
import sys
from dataclasses import make_dataclass
import pytest
import torch
import ray
from ray._common.test_utils import assert_tensors_equivalent
def test_cpu_tensor_serialization(ray_start_cluster_with_zero_copy_tensors):
PRIMITIVE_OBJECTS = [
# Scalars and basic types
torch.tensor(42),
torch.tensor(3.14159),
torch.tensor(True),
torch.tensor(1 + 2j, dtype=torch.complex64),
torch.tensor(1 + 2j, dtype=torch.complex128),
# Lower dimensions
torch.tensor([1, 2, 3]),
torch.tensor([1.0, 2.0, 3.0]),
torch.tensor([True, False]),
torch.randn(3, 4),
torch.randint(0, 10, (2, 3)),
torch.zeros(5, dtype=torch.int64),
torch.ones(2, 2, dtype=torch.float32),
# Empty and edge cases
torch.tensor([]),
torch.tensor((), dtype=torch.float32),
torch.zeros(0, 5),
torch.zeros(3, 0),
torch.zeros(2, 0, 4),
torch.empty(0, 10, 0),
# Higher dimensions
torch.randn(1, 1, 1, 1),
torch.randn(2, 3, 4, 5, 6),
# Strided / non-contiguous
torch.arange(12).reshape(3, 4).t(),
torch.arange(10)[::2],
torch.randn(4, 4)[:, :2],
# Scalar vs 0-dim
torch.tensor(99),
torch.tensor([99]).squeeze(),
]
REPRESENTATIVE_COMPLEX_OBJECTS = [
# List of tensors (explicitly requested)
[torch.tensor([1, 2]), torch.tensor(3.5), torch.randn(2, 2)],
# Dict with tensor values
{
"weights": torch.randn(10),
"bias": torch.zeros(10),
"flag": torch.tensor(True),
},
# NamedTuple with multiple tensors
collections.namedtuple("Layer", ["w", "b", "meta"])(
w=torch.randn(5, 5), b=torch.zeros(5), meta="test"
),
# Dataclass
make_dataclass("ModelParams", [("kernel", torch.Tensor), ("stride", int)])(
kernel=torch.randn(3, 3), stride=1
),
# One deep nesting example (covers recursive logic without explosion)
{"model": {"layers": [{"w": torch.tensor([1.0])}], "output": torch.tensor(42)}},
# Mixed types in container (tensor + primitive)
[torch.tensor(100), "metadata", 42, {"aux": torch.tensor([1, 2])}],
]
TEST_OBJECTS = PRIMITIVE_OBJECTS + REPRESENTATIVE_COMPLEX_OBJECTS
@ray.remote
def echo(x):
return x
for obj in TEST_OBJECTS:
restored1 = ray.get(ray.put(obj))
restored2 = ray.get(echo.remote(obj))
assert_tensors_equivalent(obj, restored1)
assert_tensors_equivalent(obj, restored2)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_cpu_tensor_zero_copy_serialization.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/test_gpu_tensor_zero_copy_serialization.py | import collections
import os
import sys
import pytest
import torch
import ray
from ray._common.test_utils import assert_tensors_equivalent
USE_GPU = os.environ.get("RAY_PYTEST_USE_GPU") == "1"
@pytest.mark.skipif(not USE_GPU, reason="Skipping GPU Test")
def test_gpu_tensor_serialization(ray_start_cluster_with_zero_copy_tensors):
if not torch.cuda.is_available():
pytest.skip("CUDA not available")
# === GPU tensor core cases ===
PRIMITIVE_GPU_OBJECTS = [
torch.tensor(42, device="cuda"),
torch.tensor(3.14159, device="cuda"),
torch.tensor(True, device="cuda"),
torch.tensor([1, 2, 3], device="cuda"),
torch.randn(3, 4, device="cuda"),
torch.randint(0, 10, (2, 3), device="cuda"),
torch.zeros(5, dtype=torch.int64, device="cuda"),
torch.ones(2, 2, dtype=torch.float32, device="cuda"),
torch.randn(1, 1, 1, 1, device="cuda"),
torch.randn(2, 3, 4, 5, 6, device="cuda"),
torch.arange(8, device="cuda").reshape(2, 2, 2),
torch.tensor(99, device="cuda").expand(1, 3, 1),
torch.zeros(2, 0, 4, device="cuda"),
# Empty and strided on GPU
torch.tensor([], device="cuda"),
torch.zeros(0, 5, device="cuda"),
torch.arange(12, device="cuda").reshape(3, 4).t(),
torch.randn(4, 4, device="cuda")[:, :2],
]
REPRESENTATIVE_COMPLEX_GPU_OBJECTS = [
# List of GPU tensors
[torch.tensor([1, 2], device="cuda"), torch.randn(3, device="cuda")],
# Dict with GPU tensors
{
"features": torch.randn(100, device="cuda"),
"labels": torch.randint(0, 10, (100,), device="cuda"),
},
# NamedTuple on GPU
collections.namedtuple("GPULayer", ["weight", "bias"])(
weight=torch.randn(20, 10, device="cuda"),
bias=torch.zeros(10, device="cuda"),
),
# Deep nesting with GPU tensor
{"encoder": {"blocks": [{"attn": torch.tensor(1.0, device="cuda")}]}},
]
TEST_GPU_OBJECTS = PRIMITIVE_GPU_OBJECTS + REPRESENTATIVE_COMPLEX_GPU_OBJECTS
@ray.remote(num_gpus=1)
def echo(x):
return x
for obj in TEST_GPU_OBJECTS:
restored1 = ray.get(ray.put(obj))
restored2 = ray.get(echo.remote(obj))
assert_tensors_equivalent(obj, restored1)
assert_tensors_equivalent(obj, restored2)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_gpu_tensor_zero_copy_serialization.py",
"license": "Apache License 2.0",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/_private/accelerators/metax_gpu.py | import logging
import os
from typing import List, Optional, Tuple
from ray._private.accelerators.accelerator import AcceleratorManager
logger = logging.getLogger(__name__)
CUDA_VISIBLE_DEVICES_ENV_VAR = "CUDA_VISIBLE_DEVICES"
NOSET_CUDA_VISIBLE_DEVICES_ENV_VAR = "RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES"
class MetaxGPUAcceleratorManager(AcceleratorManager):
"""Metax GPU accelerators."""
@staticmethod
def get_resource_name() -> str:
return "GPU"
@staticmethod
def get_visible_accelerator_ids_env_var() -> str:
return CUDA_VISIBLE_DEVICES_ENV_VAR
@staticmethod
def get_current_process_visible_accelerator_ids() -> Optional[List[str]]:
cuda_visible_devices = os.environ.get(
MetaxGPUAcceleratorManager.get_visible_accelerator_ids_env_var(), None
)
if cuda_visible_devices is None:
return None
if cuda_visible_devices == "":
return []
return list(cuda_visible_devices.split(","))
@staticmethod
def get_current_node_num_accelerators() -> int:
try:
import pymxsml.mxsml_extension as pymxsml
try:
pymxsml.mxSmlExInit()
except pymxsml.MXSMLEXError:
return 0
device_count = pymxsml.mxSmlExDeviceGetCount()
pymxsml.mxSmlExShutdown()
return device_count
except Exception as e:
logger.debug("Could not import pymxsml: %s", e)
return 0
@staticmethod
def get_current_node_accelerator_type() -> Optional[str]:
try:
import pymxsml.mxsml_extension as pymxsml
try:
pymxsml.mxSmlExInit()
except pymxsml.MXSMLEXError:
return None
device_name = None
device_count = pymxsml.mxSmlExDeviceGetCount()
if device_count > 0:
handle = pymxsml.mxSmlExDeviceGetHandleByIndex(0)
device_name = pymxsml.mxSmlExDeviceGetName(handle)
if isinstance(device_name, bytes):
device_name = device_name.decode("utf-8")
pymxsml.mxSmlExShutdown()
return device_name
except Exception:
logger.warning("Failed to detect GPU type.", exc_info=True)
return None
@staticmethod
def validate_resource_request_quantity(
quantity: float,
) -> Tuple[bool, Optional[str]]:
return (True, None)
@staticmethod
def set_current_process_visible_accelerator_ids(
visible_cuda_devices: List[str],
) -> None:
if os.environ.get(NOSET_CUDA_VISIBLE_DEVICES_ENV_VAR):
return
os.environ[
MetaxGPUAcceleratorManager.get_visible_accelerator_ids_env_var()
] = ",".join(visible_cuda_devices)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_private/accelerators/metax_gpu.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/tests/accelerators/test_metax_gpu.py | import os
import sys
from unittest.mock import patch
import pytest
import ray
from ray._private.accelerators import (
MetaxGPUAcceleratorManager,
get_accelerator_manager_for_resource,
)
@patch(
"ray._private.accelerators.MetaxGPUAcceleratorManager.get_current_node_num_accelerators",
return_value=4,
)
def test_visible_metax_gpu_ids(mock_get_num_accelerators, monkeypatch, shutdown_only):
monkeypatch.setenv("CUDA_VISIBLE_DEVICES", "0,1,2")
del get_accelerator_manager_for_resource._resource_name_to_accelerator_manager
ray.init()
assert mock_get_num_accelerators.called
assert ray.available_resources()["GPU"] == 3
def test_get_current_process_visible_accelerator_ids(monkeypatch):
monkeypatch.setenv("CUDA_VISIBLE_DEVICES", "0")
assert MetaxGPUAcceleratorManager.get_current_process_visible_accelerator_ids() == [
"0"
]
monkeypatch.setenv("CUDA_VISIBLE_DEVICES", "0,4,7")
assert MetaxGPUAcceleratorManager.get_current_process_visible_accelerator_ids() == [
"0",
"4",
"7",
]
monkeypatch.setenv("CUDA_VISIBLE_DEVICES", "")
assert (
MetaxGPUAcceleratorManager.get_current_process_visible_accelerator_ids() == []
)
monkeypatch.delenv("CUDA_VISIBLE_DEVICES")
assert (
MetaxGPUAcceleratorManager.get_current_process_visible_accelerator_ids() is None
)
def test_set_current_process_visible_accelerator_ids():
MetaxGPUAcceleratorManager.set_current_process_visible_accelerator_ids(["0"])
assert os.environ["CUDA_VISIBLE_DEVICES"] == "0"
MetaxGPUAcceleratorManager.set_current_process_visible_accelerator_ids(["0", "1"])
assert os.environ["CUDA_VISIBLE_DEVICES"] == "0,1"
MetaxGPUAcceleratorManager.set_current_process_visible_accelerator_ids(
["0", "1", "7"]
)
assert os.environ["CUDA_VISIBLE_DEVICES"] == "0,1,7"
del os.environ["CUDA_VISIBLE_DEVICES"]
if __name__ == "__main__":
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/accelerators/test_metax_gpu.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/experimental/gpu_object_manager/nixl_tensor_transport.py | import threading
import time
import traceback
from collections import OrderedDict
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Dict, List, Optional
import ray
from ray._private.ray_constants import NIXL_REMOTE_AGENT_CACHE_MAXSIZE
from ray.experimental.gpu_object_manager.tensor_transport_manager import (
CommunicatorMetadata,
TensorTransportManager,
TensorTransportMetadata,
)
if TYPE_CHECKING:
import torch
@dataclass
class NixlCommunicatorMetadata(CommunicatorMetadata):
"""Metadata for the NIXL communicator."""
@dataclass
class NixlTransportMetadata(TensorTransportMetadata):
"""Metadata for tensors stored in the GPU object store for NIXL transport.
Args:
nixl_serialized_descs: Serialized tensor descriptors for NIXL transport.
nixl_agent_meta: The additional metadata of the remote NIXL agent.
nixl_agent_name: The name of the NIXL agent.
nixl_agent_meta_version: The version of the NIXL agent metadata.
"""
nixl_serialized_descs: Optional[bytes] = None
nixl_agent_meta: Optional[bytes] = None
nixl_agent_name: Optional[str] = None
nixl_agent_meta_version: Optional[int] = 0
__eq__ = object.__eq__
__hash__ = object.__hash__
@dataclass
class TensorDesc:
reg_desc: Any # nixlRegDList
metadata_count: int # tracks the number of NIXL metadata containing the tensor
class NixlTensorTransport(TensorTransportManager):
def __init__(self):
# This is lazily initialized because it requires NIXL to actually be installed and we want to allow an owner that is just coordinating to not need to have NIXL installed.
self._nixl_agent = None
self._aborted_transfer_obj_ids = set()
self._aborted_transfer_obj_ids_lock = threading.Lock()
# Mapping from tensor storage data pointer to the NIXL descriptor and reference count.
# Unlike _managed_meta_nixl, we only deregister tensors when ALL metadata containing the tensor is freed.
self._tensor_desc_cache: Dict[int, TensorDesc] = {}
# Mapping from object ID to the NIXL managed meta.
# The lifetime of _managed_meta_nixl is tied to the object ref and freed when the ref goes out of scope.
self._managed_meta_nixl: Dict[str, Any] = {}
# Lock protecting _tensor_desc_cache and _managed_meta_nixl since they can be
# accessed from the main task execution thread or the _ray_system thread.
self._cache_lock = threading.Lock()
# LRU cache of remote agent names. When full, the least
# recently used remote agent is evicted and remove_remote_agent is called.
self._remote_agents: OrderedDict = OrderedDict()
# Increment the version whenever memory is deregistered.
self._nixl_agent_meta_version = 0
def tensor_transport_backend(self) -> str:
return "NIXL"
@staticmethod
def is_one_sided() -> bool:
return True
@staticmethod
def can_abort_transport() -> bool:
return True
def get_nixl_agent(self):
"""
Creates a NIXL agent with UCX backend if not already created.
"""
if self._nixl_agent is not None:
return self._nixl_agent
from nixl._api import nixl_agent, nixl_agent_config
agent_config = nixl_agent_config(backends=["UCX"])
ctx = ray.get_runtime_context()
actor_id = ctx.get_actor_id()
if actor_id is None:
# If the actor id is None, it means the current process is a driver.
import uuid
actor_id = f"RAY-DRIVER-{uuid.uuid4()}"
self._nixl_agent = nixl_agent(actor_id, agent_config)
return self._nixl_agent
def actor_has_tensor_transport(self, actor: "ray.actor.ActorHandle") -> bool:
# TODO(dayshah): This is called on a .remote RDT call, so it's quite expensive.
def __ray_actor_has_tensor_transport__(
self: "ray.actor.ActorHandle",
) -> bool:
# Check if nixl is installed
try:
from ray.experimental.gpu_object_manager.util import (
get_tensor_transport_manager,
)
get_tensor_transport_manager("NIXL").get_nixl_agent()
return True
except Exception:
return False
return ray.get(
actor.__ray_call__.options(concurrency_group="_ray_system").remote(
__ray_actor_has_tensor_transport__
)
)
def extract_tensor_transport_metadata(
self,
obj_id: str,
gpu_object: List["torch.Tensor"],
) -> NixlTransportMetadata:
import torch
with self._cache_lock:
device = None
tensor_meta = []
if gpu_object:
# We assume all tensors in one GPU object have the same device type,
# but we don't assume they're all on the same device.
devices = set()
device = gpu_object[0].device
for t in gpu_object:
if t.device.type != device.type:
raise ValueError(
"All tensors in an RDT object must have the same device type."
)
if not t.is_contiguous():
raise ValueError(
"All tensors in an RDT object must be contiguous."
)
tensor_meta.append((t.shape, t.dtype))
devices.add(t.device)
if device.type == "cuda":
# We have to synchronize before memory registration to assure the
# object has been created because nixl doesn't guarantee it will.
for dev in devices:
torch.cuda.synchronize(dev)
nixl_agent = self.get_nixl_agent()
self._add_tensor_descs(gpu_object)
xfer_descs = nixl_agent.get_xfer_descs(gpu_object)
serialized_descs = nixl_agent.get_serialized_descs(xfer_descs)
agent_meta = nixl_agent.get_agent_metadata()
agent_name = nixl_agent.name
agent_meta_version = self._nixl_agent_meta_version
else:
serialized_descs, agent_meta = None, None
agent_name, agent_meta_version = None, None
ret = NixlTransportMetadata(
tensor_meta=tensor_meta,
tensor_device=device.type if device else None,
nixl_serialized_descs=serialized_descs,
nixl_agent_meta=agent_meta,
nixl_agent_name=agent_name,
nixl_agent_meta_version=agent_meta_version,
)
self._put_meta(obj_id, ret)
return ret
def get_communicator_metadata(
self,
src_actor: "ray.actor.ActorHandle",
dst_actor: "ray.actor.ActorHandle",
backend: Optional[str] = None,
) -> NixlCommunicatorMetadata:
return NixlCommunicatorMetadata()
def recv_multiple_tensors(
self,
obj_id: str,
tensor_transport_metadata: TensorTransportMetadata,
communicator_metadata: CommunicatorMetadata,
target_buffers: Optional[List["torch.Tensor"]] = None,
) -> List["torch.Tensor"]:
from ray.experimental.gpu_object_manager.util import (
create_empty_tensors_from_metadata,
)
tensors = target_buffers or create_empty_tensors_from_metadata(
tensor_transport_metadata
)
assert isinstance(tensor_transport_metadata, NixlTransportMetadata)
assert isinstance(communicator_metadata, NixlCommunicatorMetadata)
nixl_serialized_descs = tensor_transport_metadata.nixl_serialized_descs
remote_nixl_agent_meta = tensor_transport_metadata.nixl_agent_meta
with self._aborted_transfer_obj_ids_lock:
if obj_id in self._aborted_transfer_obj_ids:
self._aborted_transfer_obj_ids.remove(obj_id)
raise RuntimeError(f"NIXL transfer aborted for object id: {obj_id}")
if not tensors:
return []
local_descs = None
remote_name = None
xfer_handle = None
try:
nixl_agent = self.get_nixl_agent()
remote_descs = nixl_agent.deserialize_descs(nixl_serialized_descs)
local_descs = nixl_agent.register_memory(tensors)
remote_name = tensor_transport_metadata.nixl_agent_name
remote_agent_meta_version = (
tensor_transport_metadata.nixl_agent_meta_version
)
# Nixl agent reuse is enabled.
if NIXL_REMOTE_AGENT_CACHE_MAXSIZE > 0:
if remote_name in self._remote_agents:
# If the remote agent metadata version is different from the cached one,
# it means there was memory deregistered. We need to remove the remote agent
# before adding it, because `nixlRemoteSection` currently does not support
# updating descriptor list in such a case (there is potential memory overlap).
if remote_agent_meta_version != self._remote_agents[remote_name]:
nixl_agent.remove_remote_agent(remote_name)
self._remote_agents.move_to_end(remote_name)
elif len(self._remote_agents) >= NIXL_REMOTE_AGENT_CACHE_MAXSIZE:
evicted_agent_name, _ = self._remote_agents.popitem(last=False)
nixl_agent.remove_remote_agent(evicted_agent_name)
self._remote_agents[remote_name] = remote_agent_meta_version
nixl_agent.add_remote_agent(remote_nixl_agent_meta)
xfer_handle = nixl_agent.initialize_xfer(
# "UUID" here is just a placeholder, can be any bytes, but without it,
# nixl will fail to transfer multiple times.
"READ",
local_descs.trim(),
remote_descs,
remote_name,
"UUID",
)
state = nixl_agent.transfer(xfer_handle)
if state == "ERR":
raise RuntimeError("NIXL transfer got to Error state.")
# Since current nixl does not provide a better way, we need to check the state of
# the transfer continuously.
while True:
state = nixl_agent.check_xfer_state(xfer_handle)
if state == "ERR":
raise RuntimeError("NIXL transfer got to Error state.")
if state == "PROC":
with self._aborted_transfer_obj_ids_lock:
if obj_id in self._aborted_transfer_obj_ids:
self._aborted_transfer_obj_ids.remove(obj_id)
raise RuntimeError(
f"NIXL transfer aborted for object id: {obj_id}"
)
time.sleep(0.001) # Avoid busy waiting
elif state == "DONE":
break
except Exception:
from ray.exceptions import RayDirectTransportError
raise RayDirectTransportError(
f"The NIXL recv failed for object id: {obj_id}. The source actor may have died during the transfer. "
f"The exception thrown from the nixl recv was:\n {traceback.format_exc()}"
) from None
finally:
# We could raise errors or NIXL could raise errors like NIXL_ERR_REMOTE_DISCONNECT,
# so doing best effort cleanup.
with self._aborted_transfer_obj_ids_lock:
self._aborted_transfer_obj_ids.discard(obj_id)
if xfer_handle:
nixl_agent.release_xfer_handle(xfer_handle)
if NIXL_REMOTE_AGENT_CACHE_MAXSIZE == 0 and remote_name:
nixl_agent.remove_remote_agent(remote_name)
if local_descs:
with self._cache_lock:
nixl_agent.deregister_memory(local_descs)
self._nixl_agent_meta_version += 1
return tensors
def send_multiple_tensors(
self,
tensors: List["torch.Tensor"],
tensor_transport_metadata: TensorTransportMetadata,
communicator_metadata: CommunicatorMetadata,
):
raise NotImplementedError(
"NIXL transport does not support send_multiple_tensors, since it is a one-sided transport."
)
def garbage_collect(
self,
obj_id: str,
tensor_transport_meta: TensorTransportMetadata,
tensors: List["torch.Tensor"],
):
with self._cache_lock:
assert isinstance(tensor_transport_meta, NixlTransportMetadata)
if obj_id not in self._managed_meta_nixl:
return
self._managed_meta_nixl.pop(obj_id, None)
for tensor in tensors:
key = tensor.untyped_storage().data_ptr()
if key in self._tensor_desc_cache:
tensor_desc = self._tensor_desc_cache[key]
tensor_desc.metadata_count -= 1
if tensor_desc.metadata_count == 0:
self._tensor_desc_cache.pop(key)
self.get_nixl_agent().deregister_memory(tensor_desc.reg_desc)
self._nixl_agent_meta_version += 1
def abort_transport(
self,
obj_id: str,
communicator_metadata: CommunicatorMetadata,
):
with self._aborted_transfer_obj_ids_lock:
self._aborted_transfer_obj_ids.add(obj_id)
def _get_num_managed_meta_nixl(self) -> int:
with self._cache_lock:
return len(self._managed_meta_nixl)
def _get_meta(self, object_id: str) -> Optional[NixlTransportMetadata]:
"""
Get the NIXL transport metadata for the given object ID if it exists
"""
if object_id in self._managed_meta_nixl:
return self._managed_meta_nixl[object_id]
return None
def _put_meta(self, object_id: str, meta: NixlTransportMetadata):
"""
Store the NIXL transport metadata for the given object ID
"""
self._managed_meta_nixl[object_id] = meta
def _add_tensor_descs(self, tensors: List["torch.Tensor"]):
"""
If this is the first time the tensor is being registered, we register the
full underlying pytorch storage object with NIXL. Otherwise, we increment the reference count.
"""
for tensor in tensors:
key = tensor.untyped_storage().data_ptr()
if key in self._tensor_desc_cache:
self._tensor_desc_cache[key].metadata_count += 1
else:
mem_type = "cuda" if tensor.is_cuda else "cpu"
# the GPU ID of the device the tensor is on.
# NOTE: we clip this to 0 since the GPU ID is not used for CPU tensors, and get_device returns -1 for CPU tensors.
# This triggers an error in nixl since it expects an unsigned.
gpu_id = max(tensor.get_device(), 0)
# Registering the full underlying pytorch storage object by constructing a memory region
# with the data pointer, size, GPU ID, and meta info. Doing the equivalent of what nixl does for pytorch tensors
# internally: https://github.com/ai-dynamo/nixl/blob/dd23ef01bd366aef89fa552f2b042f89a0b45fcb/src/api/python/_api.py#L1034
reg_desc = self.get_nixl_agent().register_memory(
[
(
tensor.untyped_storage().data_ptr(),
tensor.untyped_storage().nbytes(),
gpu_id,
"",
)
],
mem_type=mem_type,
)
self._tensor_desc_cache[key] = TensorDesc(reg_desc, 1)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/experimental/gpu_object_manager/nixl_tensor_transport.py",
"license": "Apache License 2.0",
"lines": 335,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/experimental/gpu_object_manager/util.py | import threading
from typing import TYPE_CHECKING, Dict, List, NamedTuple, Optional
import ray
from ray._raylet import ObjectRef
from ray.experimental.gpu_object_manager.collective_tensor_transport import (
GLOOTensorTransport,
NCCLTensorTransport,
)
from ray.experimental.gpu_object_manager.cuda_ipc_transport import CudaIpcTransport
from ray.experimental.gpu_object_manager.nixl_tensor_transport import (
NixlTensorTransport,
)
from ray.experimental.gpu_object_manager.tensor_transport_manager import (
TensorTransportManager,
TensorTransportMetadata,
)
from ray.util.annotations import PublicAPI
if TYPE_CHECKING:
import torch
class TransportManagerInfo(NamedTuple):
# Class that implements TensorTransportManager
transport_manager_class: type[TensorTransportManager]
# List of supported device types for the transport
devices: List[str]
# Data type for this transport (e.g. torch.Tensor or jax.Array)
# If not provided, defaults to torch.Tensor
data_type: type
transport_manager_info: Dict[str, TransportManagerInfo] = {}
# Singleton instances of transport managers
transport_managers: Dict[str, TensorTransportManager] = {}
# To protect the singleton instances of transport managers
transport_managers_lock = threading.Lock()
# Flipped to True when the first custom transport is registered.
has_custom_transports = False
@PublicAPI(stability="alpha")
def register_tensor_transport(
transport_name: str,
devices: List[str],
transport_manager_class: type[TensorTransportManager],
data_type: type,
):
"""
Register a new tensor transport for use in Ray. Note that this needs to be called
before you create the actors that will use the transport. The actors also
need to be created in the same process from which you call this function.
Args:
transport_name: The name of the transport protocol.
devices: List of PyTorch device types supported by this transport (e.g., ["cuda", "cpu"]).
transport_manager_class: A class that implements TensorTransportManager.
data_type: The data type for this transport (e.g. torch.Tensor or jax.Array).
Raises:
ValueError: If transport_manager_class is not a subclass of TensorTransportManager.
"""
global transport_manager_info
global has_custom_transports
transport_name = transport_name.upper()
if transport_name in transport_manager_info:
raise ValueError(f"Transport {transport_name} already registered.")
if not issubclass(transport_manager_class, TensorTransportManager):
raise ValueError(
f"transport_manager_class {transport_manager_class.__name__} must be a subclass of TensorTransportManager."
)
transport_manager_info[transport_name] = TransportManagerInfo(
transport_manager_class, devices, data_type
)
if transport_name not in DEFAULT_TRANSPORTS:
has_custom_transports = True
DEFAULT_TRANSPORTS = ["NIXL", "GLOO", "NCCL", "CUDA_IPC"]
_default_transports_registered = False
def _ensure_default_transports_registered():
global _default_transports_registered
with transport_managers_lock:
if _default_transports_registered:
return
_default_transports_registered = True
try:
import torch
register_tensor_transport(
"NIXL", ["cuda", "cpu"], NixlTensorTransport, torch.Tensor
)
register_tensor_transport(
"GLOO", ["cpu"], GLOOTensorTransport, torch.Tensor
)
register_tensor_transport(
"NCCL", ["cuda"], NCCLTensorTransport, torch.Tensor
)
register_tensor_transport(
"CUDA_IPC", ["cuda"], CudaIpcTransport, torch.Tensor
)
except ImportError:
pass
def get_transport_data_type(tensor_transport: str) -> type:
_ensure_default_transports_registered()
if tensor_transport not in transport_manager_info:
raise ValueError(f"Unsupported tensor transport protocol: {tensor_transport}")
return transport_manager_info[tensor_transport].data_type
def get_tensor_transport_manager(
transport_name: str,
) -> "TensorTransportManager":
"""Get the tensor transport manager for the given tensor transport protocol.
Args:
transport_name: The tensor transport protocol to use for the GPU object.
Returns:
TensorTransportManager: The tensor transport manager for the given tensor transport protocol.
"""
global transport_manager_info
global transport_managers
global transport_managers_lock
_ensure_default_transports_registered()
with transport_managers_lock:
if transport_name in transport_managers:
return transport_managers[transport_name]
if transport_name not in transport_manager_info:
raise ValueError(f"Unsupported tensor transport protocol: {transport_name}")
transport_managers[transport_name] = transport_manager_info[
transport_name
].transport_manager_class()
return transport_managers[transport_name]
def register_custom_tensor_transports_on_actor(
actor: "ray.actor.ActorHandle",
) -> Optional[ObjectRef]:
"""
If there's no custom transports to register, returns None.
Otherwise returns an object ref for a task on the actor that will register the custom transports.
"""
global transport_manager_info
global has_custom_transports
_ensure_default_transports_registered()
if not has_custom_transports:
return None
def register_transport_on_actor(
self, owner_transport_manager_info: Dict[str, TransportManagerInfo]
):
from ray.experimental.gpu_object_manager.util import (
_ensure_default_transports_registered,
register_tensor_transport,
transport_manager_info,
)
_ensure_default_transports_registered()
for transport_name, transport_info in owner_transport_manager_info.items():
if transport_name not in transport_manager_info:
register_tensor_transport(
transport_name,
transport_info.devices,
transport_info.transport_manager_class,
transport_info.data_type,
)
return actor.__ray_call__.options(concurrency_group="_ray_system").remote(
register_transport_on_actor, transport_manager_info
)
def device_match_transport(device: str, tensor_transport: str) -> bool:
"""Check if the device matches the transport."""
_ensure_default_transports_registered()
if tensor_transport not in transport_manager_info:
raise ValueError(f"Unsupported tensor transport protocol: {tensor_transport}")
return device in transport_manager_info[tensor_transport].devices
def normalize_and_validate_tensor_transport(tensor_transport: str) -> str:
_ensure_default_transports_registered()
tensor_transport = tensor_transport.upper()
if tensor_transport not in transport_manager_info:
raise ValueError(f"Invalid tensor transport: {tensor_transport}")
return tensor_transport
def validate_one_sided(tensor_transport: str, ray_usage_func: str):
_ensure_default_transports_registered()
if not transport_manager_info[
tensor_transport
].transport_manager_class.is_one_sided():
raise ValueError(
f"Trying to use two-sided tensor transport: {tensor_transport} for {ray_usage_func}. "
"This is only supported for one-sided transports such as NIXL or the OBJECT_STORE."
)
def create_empty_tensors_from_metadata(
tensor_transport_meta: TensorTransportMetadata,
) -> List["torch.Tensor"]:
import torch
tensors = []
device = tensor_transport_meta.tensor_device
for meta in tensor_transport_meta.tensor_meta:
shape, dtype = meta
tensor = torch.empty(shape, dtype=dtype, device=device)
tensors.append(tensor)
return tensors
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/experimental/gpu_object_manager/util.py",
"license": "Apache License 2.0",
"lines": 182,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/serve/doc_code/asyncio_best_practices.py | # flake8: noqa
"""
Code examples for the asyncio best practices guide.
All examples are structured to be runnable and demonstrate key concepts.
"""
# __imports_begin__
from ray import serve
import asyncio
# __imports_end__
# __echo_async_begin__
@serve.deployment
class Echo:
async def __call__(self, request):
await asyncio.sleep(0.1)
return "ok"
# __echo_async_end__
# __blocking_echo_begin__
@serve.deployment
class BlockingEcho:
def __call__(self, request):
# Blocking.
import time
time.sleep(1)
return "ok"
# __blocking_echo_end__
# __fastapi_deployment_begin__
from fastapi import FastAPI
app = FastAPI()
@serve.deployment
@serve.ingress(app)
class FastAPIDeployment:
@app.get("/sync")
def sync_endpoint(self):
# FastAPI runs this in a threadpool.
import time
time.sleep(1)
return "ok"
@app.get("/async")
async def async_endpoint(self):
# Runs directly on FastAPI's asyncio loop.
await asyncio.sleep(1)
return "ok"
# __fastapi_deployment_end__
# __blocking_http_begin__
@serve.deployment
class BlockingHTTP:
async def __call__(self, request):
# ❌ This blocks the event loop until the HTTP call finishes.
import requests
resp = requests.get("https://example.com/")
return resp.text
# __blocking_http_end__
# __async_http_begin__
@serve.deployment
class AsyncHTTP:
async def __call__(self, request):
import httpx
async with httpx.AsyncClient() as client:
resp = await client.get("https://example.com/")
return resp.text
# __async_http_end__
# __threaded_http_begin__
@serve.deployment
class ThreadedHTTP:
async def __call__(self, request):
import requests
def fetch():
return requests.get("https://example.com/").text
# ✅ Offload blocking I/O to a worker thread.
return await asyncio.to_thread(fetch)
# __threaded_http_end__
# __threadpool_override_begin__
from concurrent.futures import ThreadPoolExecutor
@serve.deployment
class CustomThreadPool:
def __init__(self):
loop = asyncio.get_running_loop()
loop.set_default_executor(ThreadPoolExecutor(max_workers=16))
async def __call__(self, request):
return await asyncio.to_thread(lambda: "ok")
# __threadpool_override_end__
# __numpy_deployment_begin__
@serve.deployment
class NumpyDeployment:
def _heavy_numpy(self, array):
import numpy as np
# Many NumPy ops release the GIL while executing C/Fortran code.
return np.linalg.svd(array)[0]
async def __call__(self, request):
import numpy as np
# Create a sample array from request data
array = np.random.rand(100, 100)
# ✅ Multiple threads can run _heavy_numpy in parallel if
# the underlying implementation releases the GIL.
return await asyncio.to_thread(self._heavy_numpy, array)
# __numpy_deployment_end__
# __max_ongoing_requests_begin__
@serve.deployment(max_ongoing_requests=32)
class MyService:
async def __call__(self, request):
await asyncio.sleep(1)
return "ok"
# __max_ongoing_requests_end__
# __async_io_bound_begin__
@serve.deployment(max_ongoing_requests=100)
class AsyncIOBound:
async def __call__(self, request):
# Mostly waiting on an external system.
await asyncio.sleep(0.1)
return "ok"
# __async_io_bound_end__
# __blocking_cpu_begin__
@serve.deployment(max_ongoing_requests=100)
class BlockingCPU:
def __call__(self, request):
# ❌ Blocks the user event loop.
import time
time.sleep(1)
return "ok"
# __blocking_cpu_end__
# __cpu_with_threadpool_begin__
@serve.deployment(max_ongoing_requests=100)
class CPUWithThreadpool:
def __call__(self, request):
# With RAY_SERVE_RUN_SYNC_IN_THREADPOOL=1, each call runs in a thread.
import time
time.sleep(1)
return "ok"
# __cpu_with_threadpool_end__
# __batched_model_begin__
@serve.deployment(max_ongoing_requests=64)
class BatchedModel:
@serve.batch(max_batch_size=32)
async def __call__(self, requests):
# requests is a list of request objects.
inputs = [r for r in requests]
outputs = await self._run_model(inputs)
return outputs
async def _run_model(self, inputs):
# Placeholder model function
return [f"result_{i}" for i in inputs]
# __batched_model_end__
# __batched_model_offload_begin__
@serve.deployment(max_ongoing_requests=64)
class BatchedModelOffload:
@serve.batch(max_batch_size=32)
async def __call__(self, requests):
# requests is a list of request objects.
inputs = [r for r in requests]
outputs = await self._run_model(inputs)
return outputs
async def _run_model(self, inputs):
def run_sync():
# Heavy CPU or GIL-releasing native code here.
# Placeholder model function
return [f"result_{i}" for i in inputs]
loop = asyncio.get_running_loop()
return await loop.run_in_executor(None, run_sync)
# __batched_model_offload_end__
# __blocking_stream_begin__
@serve.deployment
class BlockingStream:
def __call__(self, request):
# ❌ Blocks the event loop between yields.
import time
for i in range(10):
time.sleep(1)
yield f"{i}\n"
# __blocking_stream_end__
# __async_stream_begin__
@serve.deployment
class AsyncStream:
async def __call__(self, request):
# ✅ Yields items without blocking the loop.
async def generator():
for i in range(10):
await asyncio.sleep(1)
yield f"{i}\n"
return generator()
# __async_stream_end__
# __offload_io_begin__
@serve.deployment
class OffloadIO:
async def __call__(self, request):
import requests
def fetch():
return requests.get("https://example.com/").text
# Offload to a thread, free the event loop.
body = await asyncio.to_thread(fetch)
return body
# __offload_io_end__
# __offload_cpu_begin__
@serve.deployment
class OffloadCPU:
def _compute(self, x):
# CPU-intensive work.
total = 0
for i in range(10_000_000):
total += (i * x) % 7
return total
async def __call__(self, request):
x = 123
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(None, self._compute, x)
return str(result)
# __offload_cpu_end__
# __ray_parallel_begin__
import ray
@ray.remote
def heavy_task(x):
# Heavy compute runs in its own worker process.
return x * x
@serve.deployment
class RayParallel:
async def __call__(self, request):
values = [1, 2, 3, 4]
refs = [heavy_task.remote(v) for v in values]
results = await asyncio.gather(*[r for r in refs])
return {"results": results}
# __ray_parallel_end__
if __name__ == "__main__":
import ray
# Initialize Ray if not already running
if not ray.is_initialized():
ray.init()
print("Testing Echo deployment...")
# Test Echo
echo_handle = serve.run(Echo.bind())
result = echo_handle.remote(None).result()
print(f"Echo result: {result}")
assert result == "ok"
print("\nTesting BlockingEcho deployment...")
# Test BlockingEcho
blocking_handle = serve.run(BlockingEcho.bind())
result = blocking_handle.remote(None).result()
print(f"BlockingEcho result: {result}")
assert result == "ok"
print("\nTesting MyService deployment...")
# Test MyService
service_handle = serve.run(MyService.bind())
result = service_handle.remote(None).result()
print(f"MyService result: {result}")
assert result == "ok"
print("\nTesting AsyncIOBound deployment...")
# Test AsyncIOBound
io_bound_handle = serve.run(AsyncIOBound.bind())
result = io_bound_handle.remote(None).result()
print(f"AsyncIOBound result: {result}")
assert result == "ok"
print("\nTesting AsyncStream deployment...")
# Test AsyncStream (just create it, don't fully consume)
stream_handle = serve.run(AsyncStream.bind())
print("AsyncStream deployment created successfully")
print("\nTesting OffloadCPU deployment...")
# Test OffloadCPU
cpu_handle = serve.run(OffloadCPU.bind())
result = cpu_handle.remote(None).result()
print(f"OffloadCPU result: {result}")
print("\nTesting NumpyDeployment...")
# Test NumpyDeployment
numpy_handle = serve.run(NumpyDeployment.bind())
result = numpy_handle.remote(None).result()
print(f"NumpyDeployment result shape: {result.shape}")
assert result.shape == (100, 100)
print("\nTesting BlockingCPU deployment...")
# Test BlockingCPU
blocking_cpu_handle = serve.run(BlockingCPU.bind())
result = blocking_cpu_handle.remote(None).result()
print(f"BlockingCPU result: {result}")
assert result == "ok"
print("\nTesting CPUWithThreadpool deployment...")
# Test CPUWithThreadpool
cpu_threadpool_handle = serve.run(CPUWithThreadpool.bind())
result = cpu_threadpool_handle.remote(None).result()
print(f"CPUWithThreadpool result: {result}")
assert result == "ok"
print("\nTesting CustomThreadPool deployment...")
custom_threadpool_handle = serve.run(CustomThreadPool.bind())
result = custom_threadpool_handle.remote(None).result()
print(f"CustomThreadPool result: {result}")
assert result == "ok"
print("\nTesting BlockingStream deployment...")
# Test BlockingStream - just verify it can be created and called
blocking_stream_handle = serve.run(BlockingStream.bind())
# For generator responses, we need to handle them differently
# Just verify deployment works
print("BlockingStream deployment created successfully")
print("\nTesting RayParallel deployment...")
# Test RayParallel
ray_parallel_handle = serve.run(RayParallel.bind())
result = ray_parallel_handle.remote(None).result()
print(f"RayParallel result: {result}")
assert result == {"results": [1, 4, 9, 16]}
print("\nTesting BatchedModel deployment...")
# Test BatchedModel
batched_model_handle = serve.run(BatchedModel.bind())
result = batched_model_handle.remote(1).result()
print(f"BatchedModel result: {result}")
assert result == "result_1"
print("\nTesting BatchedModelOffload deployment...")
# Test BatchedModelOffload
batched_model_offload_handle = serve.run(BatchedModelOffload.bind())
result = batched_model_offload_handle.remote(1).result()
print(f"BatchedModelOffload result: {result}")
assert result == "result_1"
# Test HTTP-related deployments with try-except
print("\n--- Testing HTTP-related deployments (may fail due to network) ---")
print("\nTesting BlockingHTTP deployment...")
try:
blocking_http_handle = serve.run(BlockingHTTP.bind())
result = blocking_http_handle.remote(None).result()
print(f"BlockingHTTP result (first 50 chars): {result[:50]}...")
print("✅ BlockingHTTP test passed")
except Exception as e:
print(f"⚠️ BlockingHTTP test failed (expected): {type(e).__name__}: {e}")
print("\nTesting AsyncHTTP deployment...")
try:
async_http_handle = serve.run(AsyncHTTP.bind())
result = async_http_handle.remote(None).result()
print(f"AsyncHTTP result (first 50 chars): {result[:50]}...")
print("✅ AsyncHTTP test passed")
except Exception as e:
print(f"⚠️ AsyncHTTP test failed (expected): {type(e).__name__}: {e}")
print("\nTesting ThreadedHTTP deployment...")
try:
threaded_http_handle = serve.run(ThreadedHTTP.bind())
result = threaded_http_handle.remote(None).result()
print(f"ThreadedHTTP result (first 50 chars): {result[:50]}...")
print("✅ ThreadedHTTP test passed")
except Exception as e:
print(f"⚠️ ThreadedHTTP test failed (expected): {type(e).__name__}: {e}")
print("\nTesting OffloadIO deployment...")
try:
offload_io_handle = serve.run(OffloadIO.bind())
result = offload_io_handle.remote(None).result()
print(f"OffloadIO result (first 50 chars): {result[:50]}...")
print("✅ OffloadIO test passed")
except Exception as e:
print(f"⚠️ OffloadIO test failed (expected): {type(e).__name__}: {e}")
print("\nTesting FastAPIDeployment...")
fastapi_handle = serve.run(FastAPIDeployment.bind())
# Give it a moment to start
import time
import requests
time.sleep(2)
# Test the sync endpoint
response = requests.get("http://127.0.0.1:8000/sync", timeout=5)
print(f"FastAPIDeployment /sync result: {response.json()}")
# Test the async endpoint
response = requests.get("http://127.0.0.1:8000/async", timeout=5)
print(f"FastAPIDeployment /async result: {response.json()}")
print("✅ FastAPIDeployment test passed")
print("\n✅ All core tests passed!")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/doc_code/asyncio_best_practices.py",
"license": "Apache License 2.0",
"lines": 353,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/tests/test_preserve_hash_shuffle_blocks.py | import pytest
import ray
from ray.data.context import DataContext
from ray.tests.conftest import * # noqa
class TestPreserveHashShuffleBlocks:
"""Test that hash shuffle repartition preserves block structure."""
@pytest.fixture(autouse=True)
def setup(self):
"""Setup test context with small target_max_block_size."""
ctx = DataContext.get_current()
# Very small to force splitting if enabled
ctx.target_max_block_size = 1
yield
def test_repartition_preserves_blocks(
self, ray_start_regular_shared_2_cpus, shutdown_only
):
"""Test that repartition with keys preserves block count."""
# Create a dataset with multiple blocks
ds = ray.data.range(10, override_num_blocks=10)
# Repartition using hash shuffle with keys
result = ds.repartition(2, keys=["id"]).materialize()
# Should have exactly 2 blocks (one per partition)
assert result.num_blocks() == 2
def test_map_groups_works(self, ray_start_regular_shared_2_cpus, shutdown_only):
"""Test that map_groups works correctly."""
ds = ray.data.from_items(
[
{"group": 1, "value": 1},
{"group": 1, "value": 2},
{"group": 2, "value": 3},
{"group": 2, "value": 4},
]
)
# map_groups should work correctly
result = (
ds.groupby("group")
.map_groups(lambda g: {"count": [len(g["value"])]})
.take_all()
)
assert len(result) == 2
counts = sorted([r["count"] for r in result])
assert counts == [2, 2]
def test_join_does_not_preserve_blocks(
self, ray_start_regular_shared_2_cpus, shutdown_only
):
"""Test that join does not preserve block structure (default behavior)."""
# Create a dataset with one large block
ds = ray.data.range(10, override_num_blocks=2)
# Join operation uses hash shuffle but doesn't set disallow_block_splitting
result = ds.join(
ds, on=("id",), join_type="inner", num_partitions=2
).materialize()
# Should have more than 2 blocks due to block splitting
assert result.num_blocks() >= 3
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_preserve_hash_shuffle_blocks.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/train/v2/_internal/callbacks/placement_group_callback.py | import logging
from typing import TYPE_CHECKING, Optional
import ray
from ray.exceptions import RayActorError
from ray.train.v2._internal.execution.callback import (
ControllerCallback,
WorkerGroupCallback,
)
from ray.train.v2._internal.execution.controller.placement_group_cleaner import (
PlacementGroupCleaner,
)
if TYPE_CHECKING:
from ray.train.v2._internal.execution.context import TrainRunContext
from ray.train.v2._internal.execution.worker_group import WorkerGroup
logger = logging.getLogger(__name__)
class PlacementGroupCleanerCallback(ControllerCallback, WorkerGroupCallback):
"""Callback that manages a PlacementGroupCleaner for the training controller.
This callback ensures that placement groups are cleaned up even if the controller
dies ungracefully.
"""
def __init__(self, check_interval_s: float = 1.0):
"""Initialize the callback.
Args:
check_interval_s: How often (in seconds) the cleaner should check
if the controller is still alive.
"""
self._check_interval_s = check_interval_s
self._cleaner: Optional[PlacementGroupCleaner] = None
self._controller_actor_id: Optional[str] = None
def after_controller_start(self, train_run_context: "TrainRunContext"):
"""Launch the detached PlacementGroupCleaner actor.
This is called when the controller starts, before the control loop begins.
"""
core_context = ray.runtime_context.get_runtime_context()
self._controller_actor_id = core_context.get_actor_id()
try:
# Launch the cleaner as a detached actor so it survives controller death
cleaner_actor_cls = ray.remote(num_cpus=0)(PlacementGroupCleaner)
self._cleaner = cleaner_actor_cls.options(
lifetime="detached",
get_if_exists=False,
).remote(check_interval_s=self._check_interval_s)
logger.debug(
f"PlacementGroupCleaner launched for run_id={train_run_context.run_id}"
)
except Exception as e:
logger.warning(
f"Failed to launch PlacementGroupCleaner: {e}. "
"Placement groups may not be cleaned up if controller exits ungracefully."
)
self._cleaner = None
def after_worker_group_start(self, worker_group: "WorkerGroup"):
"""Register the worker group's placement group with the cleaner.
This is called after a worker group is successfully started.
"""
if not self._cleaner or not self._controller_actor_id:
logger.warning(
"PlacementGroupCleaner not available. "
"Placement groups may not be cleaned up if controller exits ungracefully."
)
return
worker_group_state = worker_group.get_worker_group_state()
placement_group = worker_group_state.placement_group_handle.placement_group
try:
ray.get(
self._cleaner.register_controller_and_placement_group.remote(
self._controller_actor_id, placement_group
)
)
except Exception as e:
logger.warning(
f"Failed to register placement group with cleaner: {e}. "
"Placement group may not be cleaned up if controller dies ungracefully."
)
return
self._cleaner.start_monitoring.remote()
logger.debug(
f"Registered placement group {placement_group.id} with PlacementGroupCleaner."
)
def before_controller_shutdown(self):
self._stop_cleaner()
def _stop_cleaner(self):
if not self._cleaner:
return
try:
# Stop the cleaner gracefully (it won't clean up the PG)
stop_timeout_s = max(2.0, self._check_interval_s * 2)
ray.get(self._cleaner.stop.remote(), timeout=stop_timeout_s)
except RayActorError:
logger.debug(
"PlacementGroupCleaner exited before stop completed; ignoring."
)
except Exception:
logger.exception("Failed to stop PlacementGroupCleaner gracefully.")
finally:
self._cleaner = None
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/_internal/callbacks/placement_group_callback.py",
"license": "Apache License 2.0",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/train/v2/_internal/execution/controller/placement_group_cleaner.py | import logging
import queue
import threading
from typing import Optional
import ray
from ray.train.v2._internal.constants import GET_ACTOR_TIMEOUT_S
from ray.train.v2._internal.state.util import is_actor_alive
from ray.util.placement_group import PlacementGroup, remove_placement_group
logger = logging.getLogger(__name__)
class PlacementGroupCleaner:
"""Detached helper that ensures PG cleanup if Ray Train Controller exits ungracefully.
This actor should be created with lifetime='detached' to avoid being
fate-shared with the Train controller.
"""
def __init__(self, check_interval_s: float = 1.0):
self._check_interval_s = check_interval_s
self._pg_queue: queue.Queue = queue.Queue()
self._stop_event = threading.Event()
self._controller_actor_id: Optional[str] = None
self._monitor_thread: Optional[threading.Thread] = None
self._get_actor_timeout_s = GET_ACTOR_TIMEOUT_S
self._exiting: bool = False
def register_controller_and_placement_group(
self, controller_actor_id: str, placement_group: PlacementGroup
):
self._controller_actor_id = controller_actor_id
logger.debug(
"PlacementGroupCleaner registered controller %s with placement group %s",
controller_actor_id,
placement_group.id,
)
# Send placement group update to the monitor thread via queue
self._pg_queue.put(placement_group)
def start_monitoring(self):
"""Start monitoring the controller and placement group."""
if self._monitor_thread is not None and self._monitor_thread.is_alive():
# Thread already running, just return True
logger.debug("Monitor thread already running")
return True
self._monitor_thread = threading.Thread(
target=self._monitor_loop,
name="PlacementGroupCleanerMonitor",
daemon=True,
)
self._monitor_thread.start()
logger.debug("PlacementGroupCleaner started monitoring in background thread")
return True
def _monitor_loop(self):
"""Monitor controller; remove PG when controller is gone.
This runs continuously until controller dies or stop() is called.
Uses a queue to receive placement group updates.
"""
curr_placement_group: Optional[PlacementGroup] = None
while not self._stop_event.is_set():
# Check for new placement group updates from queue
try:
pg = self._pg_queue.get(timeout=self._check_interval_s)
curr_placement_group = pg
logger.debug(f"Updated current placement group to {pg.id}")
except queue.Empty:
pass # continue to monitor current placement group
# Skip monitoring if no placement group registered
if not curr_placement_group:
continue
# Check if controller is still alive
try:
alive = is_actor_alive(
actor_id=self._controller_actor_id,
timeout=self._get_actor_timeout_s,
)
except ray.util.state.exception.RayStateApiException:
logger.warning(
"Failed to query Ray Train Controller actor state. "
"State API may be temporarily unavailable. Continuing to monitor."
)
continue
# Cleanup if controller is dead
if not alive:
self._cleanup_placement_group(curr_placement_group)
break
# Exit the actor after cleanup since controller is dead
self._exit()
self._monitor_thread = None
def _cleanup_placement_group(self, placement_group: PlacementGroup):
"""Clean up the current placement group if it hasn't been removed."""
if self._is_placement_group_removed(placement_group):
logger.debug(
"Controller actor died but placement group already removed; "
"skipping cleanup."
)
return
logger.warning(
f"Detected that the Ray Train controller actor ({self._controller_actor_id}) is dead. "
f"Cleaning up placement group = [{placement_group.id}] created by this run."
)
try:
remove_placement_group(placement_group)
except Exception as e:
logger.warning(f"Failed to clean up placement group: {e}")
return
logger.debug(
f"Placement group = [{placement_group.id}] cleaned up successfully"
)
def _stop_monitor_thread(self):
"""Stop the monitor thread and wait for it to exit.
Returns:
bool: True if the thread was stopped, False if there was no active thread.
"""
if self._monitor_thread is None or not self._monitor_thread.is_alive():
return False
# Signal stop and wait for thread to exit
self._stop_event.set()
join_timeout = max(2.0, self._check_interval_s * 2)
self._monitor_thread.join(timeout=join_timeout)
if self._monitor_thread.is_alive():
logger.warning(
"Monitor thread did not exit within %.2f seconds", join_timeout
)
return False
self._monitor_thread = None
return True
def stop(self):
"""Request the cleaner to stop monitoring and exit."""
self._stop_monitor_thread()
self._exit()
def _is_placement_group_removed(self, placement_group: PlacementGroup) -> bool:
"""Check if a placement group has been removed."""
try:
table = ray.util.placement_group_table(placement_group)
except Exception as e:
logger.warning(
f"Failed to query placement group table: {e}. "
"Assuming placement group is not removed."
)
return False
if "state" not in table:
return True
return table["state"] == "REMOVED"
def _exit(self):
"""Exit the actor."""
if self._exiting:
return
self._exiting = True
try:
ray.actor.exit_actor()
except Exception as e:
# If exit fails for any reason, just log it.
logger.warning(f"Failed to exit actor: {e}")
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/_internal/execution/controller/placement_group_cleaner.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/train/v2/tests/test_placement_group_cleaner.py | import time
import pytest
import ray
from ray.exceptions import RayActorError
from ray.tests.client_test_utils import create_remote_signal_actor
from ray.train.v2._internal.execution.controller.placement_group_cleaner import (
PlacementGroupCleaner,
)
from ray.util.placement_group import placement_group, remove_placement_group
@pytest.fixture(autouse=True)
def ray_start():
ray.init(num_cpus=4)
yield
ray.shutdown()
@pytest.fixture
def monitoring_started_signal(monkeypatch):
"""Signal actor set whenever PlacementGroupCleaner begins monitoring."""
SignalActor = create_remote_signal_actor(ray)
signal_actor = SignalActor.options(num_cpus=0).remote()
original_start_monitoring = PlacementGroupCleaner.start_monitoring
def start_monitoring_with_signal(self):
started = original_start_monitoring(self)
if started:
signal_actor.send.remote()
return started
monkeypatch.setattr(
PlacementGroupCleaner,
"start_monitoring",
start_monitoring_with_signal,
)
try:
yield signal_actor
finally:
try:
ray.kill(signal_actor)
except Exception:
pass
@ray.remote(num_cpus=0)
class MockController:
"""Mock controller actor for testing."""
def __init__(self):
self._alive = True
def get_actor_id(self):
return ray.get_runtime_context().get_actor_id()
def shutdown(self):
"""Simulate controller death."""
self._alive = False
ray.actor.exit_actor()
def test_placement_group_cleaner_basic_lifecycle(monitoring_started_signal):
"""Test that the PlacementGroupCleaner can be launched and stopped."""
# Launch cleaner as detached
cleaner = (
ray.remote(num_cpus=0)(PlacementGroupCleaner)
.options(
name="test_pg_cleaner",
namespace="test",
lifetime="detached",
get_if_exists=False,
)
.remote(check_interval_s=0.1)
)
# Create a mock controller
controller = MockController.remote()
controller_id = ray.get(controller.get_actor_id.remote())
# Create a placement group
pg = placement_group([{"CPU": 1}], strategy="SPREAD")
ray.get(pg.ready())
# Register controller and placement group atomically
ray.get(cleaner.register_controller_and_placement_group.remote(controller_id, pg))
# Start monitoring asynchronously (same behavior as production code)
cleaner.start_monitoring.remote()
# Wait deterministically for monitoring to start to avoid flakiness.
ray.get(monitoring_started_signal.wait.remote())
# Controller is still alive, so PG should still exist
assert pg.id is not None
# Stop the cleaner gracefully; tolerate the actor exiting itself.
try:
ray.get(cleaner.stop.remote(), timeout=2.0)
except RayActorError:
pass
# PG should still exist after graceful stop
try:
# If PG exists, this should work
remove_placement_group(pg)
except Exception as e:
pytest.fail(f"Placement group should still exist after graceful stop: {e}")
finally:
try:
ray.get(controller.shutdown.remote())
except RayActorError:
pass
def test_pg_cleaner_cleans_up_on_controller_death(monitoring_started_signal):
"""Test that the PG cleaner removes PG when controller dies."""
# Launch cleaner as detached
cleaner = (
ray.remote(num_cpus=0)(PlacementGroupCleaner)
.options(
name="test_pg_cleaner_cleanup",
namespace="test",
lifetime="detached",
get_if_exists=False,
)
.remote(check_interval_s=0.1)
)
# Create a mock controller
controller = MockController.remote()
controller_id = ray.get(controller.get_actor_id.remote())
# Create a placement group
pg = placement_group([{"CPU": 1}], strategy="SPREAD")
ray.get(pg.ready())
# Register controller and placement group atomically
ray.get(cleaner.register_controller_and_placement_group.remote(controller_id, pg))
cleaner.start_monitoring.remote()
# Wait deterministically for monitoring to start to avoid flakiness.
ray.get(monitoring_started_signal.wait.remote())
# Kill the controller
ray.kill(controller)
# Wait for cleaner to detect death and clean up
time.sleep(1.0)
# Try to verify PG was cleaned up
# Note: After cleanup, the PG ID should no longer be valid
try:
# Try to remove the PG - if it was already removed, this should fail
remove_placement_group(pg)
except Exception:
# This is expected - PG was already cleaned up
pass
# The cleaner should exit after performing cleanup since it's detached.
with pytest.raises(RayActorError):
ray.get(cleaner.start_monitoring.remote(), timeout=2.0)
def test_pg_cleaner_handles_duplicate_start():
"""Test that cleaner handles duplicate start_monitoring calls."""
cleaner = (
ray.remote(num_cpus=0)(PlacementGroupCleaner)
.options(
name="test_pg_cleaner_duplicate",
namespace="test",
lifetime="detached",
get_if_exists=False,
)
.remote(check_interval_s=0.1)
)
controller = MockController.remote()
controller_id = ray.get(controller.get_actor_id.remote())
pg = placement_group([{"CPU": 1}], strategy="SPREAD")
ray.get(pg.ready())
ray.get(cleaner.register_controller_and_placement_group.remote(controller_id, pg))
# Start monitoring asynchronously
cleaner.start_monitoring.remote()
# Stop
try:
ray.get(cleaner.stop.remote(), timeout=2.0)
except RayActorError:
pass
# Detached cleaner should be gone after stop.
with pytest.raises(RayActorError):
ray.get(cleaner.start_monitoring.remote(), timeout=2.0)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/test_placement_group_cleaner.py",
"license": "Apache License 2.0",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/_private/tensor_serialization_utils.py | import warnings
from typing import TYPE_CHECKING, Any, Tuple
if TYPE_CHECKING:
import numpy as np
import torch
class ZeroCopyTensorsWarning(UserWarning):
"""
Warning for unsafe or failed zero-copy tensor serialization/deserialization.
"""
pass
warnings.filterwarnings("once", category=ZeroCopyTensorsWarning)
def _zero_copy_tensors_deserializer(
np_array: "np.ndarray", dtype_str: str, shape: Tuple[int, ...], device_str: str
) -> "torch.Tensor":
"""
Reconstructs a torch.Tensor from a zero-copy NumPy byte array.
Args:
np_array: 1D uint8 NumPy array of the original tensor's raw bytes.
dtype_str: Full string representation of the original tensor's dtype (e.g., 'torch.float32').
shape: The original shape of the tensor before serialization.
device_str: String representation of the original device (e.g., 'cpu', 'cuda:0').
Returns:
Reconstructed torch.Tensor on the specified device if successful;
otherwise, returns the input np_array unchanged and issues a warning.
Raises:
ImportError/DeserializationError: If deserialization fails for any reason (e.g., missing PyTorch
dtype mismatch, shape inconsistency, device error, etc.).
"""
try:
import torch
except ImportError as e:
raise ImportError(
"Zero-copy tensor deserialization failed: PyTorch is not installed."
) from e
try:
# Step 1: Convert uint8 numpy array back to torch tensor
uint8_tensor = torch.from_numpy(np_array)
# Step 2: Restore original dtype
dtype_name = dtype_str.split(".")[-1]
if not hasattr(torch, dtype_name):
raise ValueError(f"Invalid or unsupported dtype string: {dtype_str}")
original_dtype = getattr(torch, dtype_name)
# Compute number of bytes per element
dtype_size = torch.tensor([], dtype=original_dtype).element_size()
if np_array.size % dtype_size != 0:
raise ValueError(
f"Byte array size ({np_array.size}) is not divisible by "
f"dtype size ({dtype_size}) for dtype {dtype_str}"
)
# Step 3: Reshape and reinterpret bytes as target dtype
restored_tensor = uint8_tensor.view(original_dtype).reshape(shape)
# Step 4: Move to target device
return restored_tensor.to(device=device_str)
except Exception as e:
from ray._private.serialization import DeserializationError
raise DeserializationError(
f"Failed to deserialize zero-copy tensor from byte array. "
f"Input dtype={dtype_str}, shape={shape}, device={device_str}. "
f"Underlying error: {type(e).__name__}: {e}"
) from e
def zero_copy_tensors_reducer(tensor: "torch.Tensor") -> Tuple[Any, Tuple[Any, ...]]:
"""Pickle serializer for zero-copy serialization of read-only torch.Tensor.
This serializer aims to avoid copying tensor data by using a NumPy uint8 view,
which enables pickle5's out-of-band buffer transmission. However, true zero-copy
is only possible when the input tensor is already:
- On CPU,
- Detached from the computation graph (no gradients),
- Contiguous in memory.
If the input tensor does **not** meet these conditions, this function will:
- Call `.detach()` to remove gradient information,
- Move the tensor to CPU (copying data if it's on GPU or another device),
- Make the tensor contiguous (copying data if it's non-contiguous).
These operations may incur one or two full copies of the tensor data,
negating zero-copy benefits. A warning is issued in such cases.
Args:
tensor: The input torch.Tensor to serialize. Can be on any device,
with or without gradients, contiguous or not — but zero-copy
is only achieved if it is already CPU, detached, and contiguous.
Returns:
A tuple (deserializer_callable, args_tuple) suitable for pickle.
"""
warnings.warn(
"Zero-copy tensor serialization is enabled, but it only works safely for read-only tensors "
"(detached, no gradients, contiguous). Modifiable or non-contiguous tensors may cause data corruption.",
ZeroCopyTensorsWarning,
stacklevel=3,
)
import torch
# Detach the tensor from gradients and computation graph.
# Move it to cpu (this is a noop if the tensor is already in main memory, but will create a copy if the
# the tensor is on an accelerator).
# Ensure that the tensor is contiguous. If the tensor is not contiguous, this will create a contiguous
# copy.
cpu_tensor = tensor.detach().cpu()
if not cpu_tensor.is_contiguous():
warnings.warn(
"The input tensor is non-contiguous. A copy will be made to ensure contiguity. "
"For zero-copy serialization, please ensure the tensor is contiguous before passing it "
"(e.g., by calling `.contiguous()`).",
ZeroCopyTensorsWarning,
stacklevel=3,
)
cpu_tensor = cpu_tensor.contiguous()
# Flatten to 1D for safe uint8 view (handles scalars)
flat_tensor = cpu_tensor.reshape(-1)
# View as uint8 bytes
uint8_view = flat_tensor.view(torch.uint8)
np_array = uint8_view.numpy()
return _zero_copy_tensors_deserializer, (
np_array,
str(tensor.dtype),
tuple(tensor.shape),
str(tensor.device),
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_private/tensor_serialization_utils.py",
"license": "Apache License 2.0",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/train/v2/tests/test_data_resource_cleanup.py | import sys
import time
from unittest.mock import MagicMock, create_autospec
import pytest
import ray
from ray.data._internal.cluster_autoscaler.default_autoscaling_coordinator import (
get_or_create_autoscaling_coordinator,
)
from ray.data._internal.iterator.stream_split_iterator import (
SplitCoordinator,
)
from ray.train.v2._internal.callbacks.datasets import DatasetsCallback
from ray.train.v2._internal.execution.worker_group import (
WorkerGroup,
WorkerGroupContext,
)
from ray.train.v2.tests.util import DummyObjectRefWrapper, create_dummy_run_context
pytestmark = pytest.mark.usefixtures("mock_runtime_context")
def test_datasets_callback_multiple_datasets(ray_start_4_cpus):
"""Test that the DatasetsCallback properly collects the coordinator actors for multiple datasets"""
# Start worker group
worker_group_context = WorkerGroupContext(
run_attempt_id="test",
train_fn_ref=DummyObjectRefWrapper(lambda: None),
num_workers=4,
resources_per_worker={"CPU": 1},
)
wg = WorkerGroup.create(
train_run_context=create_dummy_run_context(),
worker_group_context=worker_group_context,
)
# Create train run context
NUM_ROWS = 100
datasets = {
"sharded_1": ray.data.range(NUM_ROWS),
"sharded_2": ray.data.range(NUM_ROWS),
"unsharded": ray.data.range(NUM_ROWS),
}
dataset_config = ray.train.DataConfig(datasets_to_split=["sharded_1", "sharded_2"])
train_run_context = create_dummy_run_context(
datasets=datasets, dataset_config=dataset_config
)
callback = DatasetsCallback(train_run_context)
callback.before_init_train_context(wg.get_workers())
# Two coordinator actors, one for each sharded dataset
coordinator_actors = callback._coordinator_actors
assert len(coordinator_actors) == 2
def test_after_worker_group_abort():
callback = DatasetsCallback(create_dummy_run_context())
# Mock SplitCoordinator shutdown_executor method
coord_mock = create_autospec(SplitCoordinator)
remote_mock = MagicMock()
coord_mock.shutdown_executor.remote = remote_mock
callback._coordinator_actors = [coord_mock]
dummy_wg_context = WorkerGroupContext(
run_attempt_id="test",
train_fn_ref=DummyObjectRefWrapper(lambda: None),
num_workers=4,
resources_per_worker={"CPU": 1},
)
callback.after_worker_group_abort(dummy_wg_context)
# shutdown_executor called on SplitCoordinator
remote_mock.assert_called_once()
def test_after_worker_group_shutdown():
callback = DatasetsCallback(create_dummy_run_context())
# Mock SplitCoordinator shutdown_executor method
coord_mock = create_autospec(SplitCoordinator)
remote_mock = MagicMock()
coord_mock.shutdown_executor.remote = remote_mock
callback._coordinator_actors = [coord_mock]
dummy_wg_context = WorkerGroupContext(
run_attempt_id="test",
train_fn_ref=DummyObjectRefWrapper(lambda: None),
num_workers=4,
resources_per_worker={"CPU": 1},
)
callback.after_worker_group_shutdown(dummy_wg_context)
# shutdown_executor called on SplitCoordinator
remote_mock.assert_called_once()
def test_split_coordinator_shutdown_executor(ray_start_4_cpus):
"""Tests that the SplitCoordinator properly requests resources for the data executor and cleans up after it is shutdown"""
def get_ongoing_requests(coordinator, timeout=3.0):
"""Retrieve ongoing requests from the AutoscalingCoordinator."""
deadline = time.time() + timeout
requests = {}
while time.time() < deadline:
requests = ray.get(
coordinator.__ray_call__.remote(lambda c: dict(c._ongoing_reqs))
)
if requests:
break
time.sleep(0.05)
return requests
# Start coordinator and executor
NUM_SPLITS = 1
dataset = ray.data.range(100)
coord = SplitCoordinator.options(name="test_split_coordinator").remote(
dataset, NUM_SPLITS, None
)
ray.get(coord.start_epoch.remote(0))
# Explicitly trigger autoscaling
ray.get(
coord.__ray_call__.remote(
lambda coord: coord._executor._cluster_autoscaler.try_trigger_scaling()
)
)
# Collect requests from the AutoscalingCoordinator
coordinator = get_or_create_autoscaling_coordinator()
requests = get_ongoing_requests(coordinator)
# One request made (V2 registers with the coordinator)
assert len(requests) == 1
requester_id = list(requests.keys())[0]
assert requester_id.startswith("data-")
# Shutdown data executor
ray.get(coord.shutdown_executor.remote())
# Verify that the request is cancelled (removed from ongoing requests)
requests = ray.get(coordinator.__ray_call__.remote(lambda c: dict(c._ongoing_reqs)))
assert len(requests) == 0, "Resource request was not cancelled"
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-x", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/test_data_resource_cleanup.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/autoscaler/v2/instance_manager/subscribers/cloud_resource_monitor.py | import logging
import time
from typing import Dict, List
from ray.autoscaler.v2.instance_manager.instance_manager import (
InstanceUpdatedSubscriber,
)
from ray.autoscaler.v2.schema import NodeType
from ray.core.generated.instance_manager_pb2 import Instance, InstanceUpdateEvent
logger = logging.getLogger(__name__)
class CloudResourceMonitor(InstanceUpdatedSubscriber):
"""CloudResourceMonitor records the availability of all node types.
In the Spot scenario, the resources in the cluster change dynamically.
When scaling up, it is necessary to know which node types are most
likely to have resources, in order to decide which type of node to request.
During scaling up, if resource of a node type is requested but fail to
allocate, that type is considered unavailable at that timestamp.This class
records the last timestamp at which a node type is unavailable,allowing the
autoscaler to skip such node types when making future scaling decisions.
"""
def __init__(
self,
) -> None:
self._last_unavailable_timestamp: Dict[NodeType, float] = {}
def allocation_timeout(self, failed_event: InstanceUpdateEvent):
unavailable_timestamp = time.time()
self._last_unavailable_timestamp[
failed_event.instance_type
] = unavailable_timestamp
logger.info(
f"Cloud Resource Type {failed_event.instance_type} is "
f"unavailable at timestamp={unavailable_timestamp}. "
f"We will lower its priority in feature schedules."
)
def allocation_succeeded(self, succeeded_event: InstanceUpdateEvent):
if succeeded_event.instance_type in self._last_unavailable_timestamp:
self._last_unavailable_timestamp.pop(succeeded_event.instance_type)
logger.info(
f"Cloud Resource Type {succeeded_event.instance_type} is "
f"available at timestamp={time.time()}. We will higher its priority in "
f"feature schedules."
)
def notify(self, events: List[InstanceUpdateEvent]) -> None:
for event in events:
if event.new_instance_status == Instance.ALLOCATION_TIMEOUT:
self.allocation_timeout(event)
elif (
event.new_instance_status == Instance.RAY_RUNNING
and event.instance_type
):
self.allocation_succeeded(event)
def get_resource_availabilities(self) -> Dict[NodeType, float]:
"""Calculate the availability scores of node types.
Higher values indicate a higher likelihood of resource allocation.
"""
resource_availability_scores: Dict[NodeType, float] = {}
if self._last_unavailable_timestamp:
max_ts = max(self._last_unavailable_timestamp.values())
for node_type in self._last_unavailable_timestamp:
resource_availability_scores[node_type] = (
1 - self._last_unavailable_timestamp[node_type] / max_ts
)
return resource_availability_scores
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/autoscaler/v2/instance_manager/subscribers/cloud_resource_monitor.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/train/v2/tests/test_data_config.py | from ray.data._internal.execution.interfaces.execution_options import (
ExecutionOptions,
)
from ray.train import DataConfig
def test_per_dataset_execution_options_single(ray_start_4_cpus):
"""Test that a single ExecutionOptions object applies to all datasets."""
# Create execution options with specific settings
execution_options = ExecutionOptions()
execution_options.preserve_order = True
execution_options.verbose_progress = True
data_config = DataConfig(execution_options=execution_options)
# Verify that all datasets get the same execution options
train_options = data_config._get_execution_options("train")
test_options = data_config._get_execution_options("test")
val_options = data_config._get_execution_options("val")
assert train_options.preserve_order is True
assert train_options.verbose_progress is True
assert test_options.preserve_order is True
assert test_options.verbose_progress is True
assert val_options.preserve_order is True
assert val_options.verbose_progress is True
def test_per_dataset_execution_options_dict(ray_start_4_cpus):
"""Test that a dict of ExecutionOptions maps to specific datasets, and datasets
not in the dict get default ingest options. Also tests resource limits."""
# Create different execution options for different datasets
train_options = ExecutionOptions()
train_options.preserve_order = True
train_options.verbose_progress = True
train_options.resource_limits = train_options.resource_limits.copy(cpu=4, gpu=2)
test_options = ExecutionOptions()
test_options.preserve_order = False
test_options.verbose_progress = False
test_options.resource_limits = test_options.resource_limits.copy(cpu=2, gpu=1)
execution_options_dict = {
"train": train_options,
"test": test_options,
}
data_config = DataConfig(execution_options=execution_options_dict)
# Verify that each dataset in the dict gets its specific options
retrieved_train_options = data_config._get_execution_options("train")
retrieved_test_options = data_config._get_execution_options("test")
assert retrieved_train_options.preserve_order is True
assert retrieved_train_options.verbose_progress is True
assert retrieved_test_options.preserve_order is False
assert retrieved_test_options.verbose_progress is False
# Verify resource limits
assert retrieved_train_options.resource_limits.cpu == 4
assert retrieved_train_options.resource_limits.gpu == 2
assert retrieved_test_options.resource_limits.cpu == 2
assert retrieved_test_options.resource_limits.gpu == 1
# Verify that a dataset not in the dict gets default options
default_options = DataConfig.default_ingest_options()
retrieved_val_options = data_config._get_execution_options("val")
assert retrieved_val_options.preserve_order == default_options.preserve_order
assert retrieved_val_options.verbose_progress == default_options.verbose_progress
assert (
retrieved_val_options.resource_limits.cpu == default_options.resource_limits.cpu
)
assert (
retrieved_val_options.resource_limits.gpu == default_options.resource_limits.gpu
)
def test_per_dataset_execution_options_default(ray_start_4_cpus):
"""Test that None or empty dict execution_options results in all datasets
using default options."""
# Test with None
data_config_none = DataConfig(execution_options=None)
default_options = DataConfig.default_ingest_options()
retrieved_train_options = data_config_none._get_execution_options("train")
retrieved_test_options = data_config_none._get_execution_options("test")
assert retrieved_train_options.preserve_order == default_options.preserve_order
assert retrieved_test_options.preserve_order == default_options.preserve_order
# Test with empty dict
data_config_empty = DataConfig(execution_options={})
retrieved_train_options = data_config_empty._get_execution_options("train")
retrieved_test_options = data_config_empty._get_execution_options("test")
assert retrieved_train_options.preserve_order == default_options.preserve_order
assert retrieved_test_options.preserve_order == default_options.preserve_order
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", "-x", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/test_data_config.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/namespace_expressions/dt_namespace.py | from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING, Callable, Literal
import pyarrow
import pyarrow.compute as pc
from ray.data.datatype import DataType
from ray.data.expressions import pyarrow_udf
if TYPE_CHECKING:
from ray.data.expressions import Expr, UDFExpr
TemporalUnit = Literal[
"year",
"quarter",
"month",
"week",
"day",
"hour",
"minute",
"second",
"millisecond",
"microsecond",
"nanosecond",
]
@dataclass
class _DatetimeNamespace:
"""Datetime namespace for operations on datetime-typed expression columns."""
_expr: "Expr"
def _unary_temporal_int(
self, func: Callable[[pyarrow.Array], pyarrow.Array]
) -> "UDFExpr":
"""Helper for year/month/… that return int32."""
@pyarrow_udf(return_dtype=DataType.int32())
def _udf(arr: pyarrow.Array) -> pyarrow.Array:
return func(arr)
return _udf(self._expr)
# extractors
def year(self) -> "UDFExpr":
"""Extract year component."""
return self._unary_temporal_int(pc.year)
def month(self) -> "UDFExpr":
"""Extract month component."""
return self._unary_temporal_int(pc.month)
def day(self) -> "UDFExpr":
"""Extract day component."""
return self._unary_temporal_int(pc.day)
def hour(self) -> "UDFExpr":
"""Extract hour component."""
return self._unary_temporal_int(pc.hour)
def minute(self) -> "UDFExpr":
"""Extract minute component."""
return self._unary_temporal_int(pc.minute)
def second(self) -> "UDFExpr":
"""Extract second component."""
return self._unary_temporal_int(pc.second)
# formatting
def strftime(self, fmt: str) -> "UDFExpr":
"""Format timestamps with a strftime pattern."""
@pyarrow_udf(return_dtype=DataType.string())
def _format(arr: pyarrow.Array) -> pyarrow.Array:
return pc.strftime(arr, format=fmt)
return _format(self._expr)
# rounding
def ceil(self, unit: TemporalUnit) -> "UDFExpr":
"""Ceil timestamps to the next multiple of the given unit."""
return_dtype = self._expr.data_type
@pyarrow_udf(return_dtype=return_dtype)
def _ceil(arr: pyarrow.Array) -> pyarrow.Array:
return pc.ceil_temporal(arr, multiple=1, unit=unit)
return _ceil(self._expr)
def floor(self, unit: TemporalUnit) -> "UDFExpr":
"""Floor timestamps to the previous multiple of the given unit."""
return_dtype = self._expr.data_type
@pyarrow_udf(return_dtype=return_dtype)
def _floor(arr: pyarrow.Array) -> pyarrow.Array:
return pc.floor_temporal(arr, multiple=1, unit=unit)
return _floor(self._expr)
def round(self, unit: TemporalUnit) -> "UDFExpr":
"""Round timestamps to the nearest multiple of the given unit."""
return_dtype = self._expr.data_type
@pyarrow_udf(return_dtype=return_dtype)
def _round(arr: pyarrow.Array) -> pyarrow.Array:
return pc.round_temporal(arr, multiple=1, unit=unit)
return _round(self._expr)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/namespace_expressions/dt_namespace.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/nightly_tests/chaos_test/actor_workload.py | import ray
from ray._common.test_utils import wait_for_condition
from ray.data._internal.progress.progress_bar import ProgressBar
from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy
def run_actor_workload(total_num_cpus, smoke):
"""Run actor-based workload.
The test checks if actor restart -1 and task_retries -1 works
as expected. It basically requires many actors to report the
seqno to the centralized DB actor while there are failures.
If at least once is guaranteed upon failures, this test
shouldn't fail.
"""
@ray.remote(num_cpus=0, max_task_retries=-1)
class DBActor:
def __init__(self):
self.letter_dict = set()
def add(self, letter):
self.letter_dict.add(letter)
def get(self):
return self.letter_dict
@ray.remote(num_cpus=1, max_restarts=-1, max_task_retries=-1)
class ReportActor:
def __init__(self, db_actor):
self.db_actor = db_actor
def add(self, letter):
ray.get(self.db_actor.add.remote(letter))
NUM_CPUS = int(total_num_cpus)
multiplier = 2
# For smoke mode, run fewer tasks
if smoke:
multiplier = 1
TOTAL_TASKS = int(300 * multiplier)
head_node_id = ray.get_runtime_context().get_node_id()
db_actors = [
DBActor.options(
scheduling_strategy=NodeAffinitySchedulingStrategy(
node_id=head_node_id, soft=False
)
).remote()
for _ in range(NUM_CPUS)
]
pb = ProgressBar("Chaos test", TOTAL_TASKS * NUM_CPUS, "task")
actors = []
for db_actor in db_actors:
actors.append(ReportActor.remote(db_actor))
results = []
highest_reported_num = 0
for a in actors:
for _ in range(TOTAL_TASKS):
results.append(a.add.remote(str(highest_reported_num)))
highest_reported_num += 1
pb.fetch_until_complete(results)
pb.close()
for actor in actors:
ray.kill(actor)
# Consistency check
wait_for_condition(
lambda: (
ray.cluster_resources().get("CPU", 0)
== ray.available_resources().get("CPU", 0)
),
timeout=60,
)
letter_set = set()
for db_actor in db_actors:
letter_set.update(ray.get(db_actor.get.remote()))
# Make sure the DB actor didn't lose any report.
# If this assert fails, that means at least once actor task semantic
# wasn't guaranteed.
for i in range(highest_reported_num):
assert str(i) in letter_set, i
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/chaos_test/actor_workload.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/nightly_tests/chaos_test/object_ref_borrowing_workload.py | import numpy as np
import ray
from ray._common.test_utils import wait_for_condition
def run_object_ref_borrowing_workload(total_num_cpus, smoke):
"""Run object ref borrowing workload.
This test checks that borrowed refs
remain valid even with node failures or transient network failures.
"""
@ray.remote(num_cpus=1, max_retries=-1)
def create_object(size_mb):
data = np.zeros(size_mb * 1024 * 1024, dtype=np.uint8)
return data
@ray.remote(num_cpus=1, max_retries=-1)
def borrow_object(borrowed_refs):
data = ray.get(borrowed_refs[0])
return len(data)
# For smoke mode, run fewer iterations
if smoke:
NUM_ITERATIONS = 10
else:
NUM_ITERATIONS = 2000
OBJECT_SIZE_MB = 10
print(f"Starting borrowing test with {NUM_ITERATIONS * 2} total tasks")
print(f"Object size: {OBJECT_SIZE_MB}MB per object")
print(f"Expected total data: {NUM_ITERATIONS * 2 * OBJECT_SIZE_MB / 1024:.2f} GB")
total_completed = 0
total_bytes = 0
for i in range(NUM_ITERATIONS):
ref = create_object.remote(OBJECT_SIZE_MB)
task_ref = borrow_object.remote([ref])
size = ray.get(task_ref)
total_completed += 1
total_bytes += size
refs = []
for i in range(NUM_ITERATIONS):
ref = create_object.remote(OBJECT_SIZE_MB)
refs.append(borrow_object.remote([ref]))
sizes = ray.get(refs)
total_completed += len(sizes)
total_bytes += sum(sizes)
print("All tasks completed:")
print(f" Total tasks completed: {total_completed} (expected {NUM_ITERATIONS * 2})")
print(f" Total data processed: {total_bytes / (1024**3):.2f} GB")
expected_total_tasks = NUM_ITERATIONS * 2
assert (
total_completed == expected_total_tasks
), f"Expected {expected_total_tasks} completions, got {total_completed}"
expected_total_bytes = expected_total_tasks * OBJECT_SIZE_MB * 1024 * 1024
assert (
total_bytes == expected_total_bytes
), f"Expected {expected_total_bytes} bytes, got {total_bytes}"
# Consistency check
wait_for_condition(
lambda: (
ray.cluster_resources().get("CPU", 0)
== ray.available_resources().get("CPU", 0)
),
timeout=60,
)
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/chaos_test/object_ref_borrowing_workload.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/nightly_tests/chaos_test/streaming_generator_workload.py | import numpy as np
import ray
from ray._common.test_utils import wait_for_condition
from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy
def run_streaming_generator_workload(total_num_cpus, smoke):
"""Run streaming generator workload.
Spreads streaming generators across the nodes to ensure
chaos events affect the generators. Tests that streaming generators
work correctly with retries when there are node failures or transient
network failures.
"""
@ray.remote(num_cpus=1, max_retries=-1)
def streaming_generator(num_items, item_size_mb):
for i in range(num_items):
data = np.zeros(item_size_mb * 1024 * 1024, dtype=np.uint8)
yield data
@ray.remote(num_cpus=1, max_retries=-1)
def consume_streaming_generator(num_items, item_size_mb):
gen = streaming_generator.remote(num_items, item_size_mb)
count = 0
total_bytes = 0
for item_ref in gen:
data = ray.get(item_ref)
count += 1
total_bytes += data.nbytes
return (count, total_bytes)
# Get alive nodes to distribute generators across the cluster
alive_nodes = [n for n in ray.nodes() if n.get("Alive", False)]
NUM_GENERATORS = 2 * len(alive_nodes)
# For smoke mode, run fewer items
if smoke:
ITEMS_PER_GENERATOR = 10
else:
ITEMS_PER_GENERATOR = 500
ITEM_SIZE_MB = 10
print(
f"Starting {NUM_GENERATORS} concurrent streaming generators "
f"({ITEMS_PER_GENERATOR} items of {ITEM_SIZE_MB}MB each)"
)
print(
f"Expected total data: "
f"{NUM_GENERATORS * ITEMS_PER_GENERATOR * ITEM_SIZE_MB / 1024:.2f} GB"
)
# Distribute generators across nodes to maximize chaos impact
tasks = []
for i in range(NUM_GENERATORS):
node = alive_nodes[i % len(alive_nodes)]
node_id = node["NodeID"]
task = consume_streaming_generator.options(
scheduling_strategy=NodeAffinitySchedulingStrategy(
node_id=node_id, soft=True
)
).remote(ITEMS_PER_GENERATOR, ITEM_SIZE_MB)
tasks.append(task)
results = ray.get(tasks)
total_items = sum(count for count, _ in results)
total_bytes = sum(bytes_val for _, bytes_val in results)
print("All generators completed:")
print(
f" Total items: {total_items} (expected {NUM_GENERATORS * ITEMS_PER_GENERATOR})"
)
print(f" Total data: {total_bytes / (1024**3):.2f} GB")
assert (
total_items == NUM_GENERATORS * ITEMS_PER_GENERATOR
), f"Expected {NUM_GENERATORS * ITEMS_PER_GENERATOR} items, got {total_items}"
# Consistency check
wait_for_condition(
lambda: (
ray.cluster_resources().get("CPU", 0)
== ray.available_resources().get("CPU", 0)
),
timeout=60,
)
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/chaos_test/streaming_generator_workload.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/nightly_tests/chaos_test/task_workload.py | import random
import string
import time
import numpy as np
import ray
from ray._common.test_utils import wait_for_condition
from ray.data._internal.progress.progress_bar import ProgressBar
def run_task_workload(total_num_cpus, smoke):
"""Run task-based workload that doesn't require object reconstruction."""
@ray.remote(num_cpus=1, max_retries=-1)
def task():
def generate_data(size_in_kb=10):
return np.zeros(1024 * size_in_kb, dtype=np.uint8)
a = ""
for _ in range(100000):
a = a + random.choice(string.ascii_letters)
return generate_data(size_in_kb=50)
@ray.remote(num_cpus=1, max_retries=-1)
def invoke_nested_task():
time.sleep(0.8)
return ray.get(task.remote())
multiplier = 75
# For smoke mode, run fewer tasks
if smoke:
multiplier = 1
TOTAL_TASKS = int(total_num_cpus * 2 * multiplier)
pb = ProgressBar("Chaos test", TOTAL_TASKS, "task")
results = [invoke_nested_task.remote() for _ in range(TOTAL_TASKS)]
pb.block_until_complete(results)
pb.close()
# Consistency check.
wait_for_condition(
lambda: (
ray.cluster_resources().get("CPU", 0)
== ray.available_resources().get("CPU", 0)
),
timeout=60,
)
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/chaos_test/task_workload.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/nightly_tests/chaos_test/test_chaos.py | import argparse
import json
import logging
import os
import time
import ray
from ray._private.test_utils import monitor_memory_usage
from task_workload import run_task_workload
from actor_workload import run_actor_workload
from streaming_generator_workload import run_streaming_generator_workload
from object_ref_borrowing_workload import run_object_ref_borrowing_workload
def parse_script_args():
parser = argparse.ArgumentParser()
parser.add_argument("--node-kill-interval", type=int, default=60)
parser.add_argument("--workload", type=str)
parser.add_argument("--smoke", action="store_true")
parser.add_argument("--disable-resource-killer", action="store_true")
return parser.parse_known_args()
def main():
"""Test task/actor/streaming generator/object ref borrowing chaos test.
It tests the following scenarios:
1. Raylet failures: Done by an actor calling Raylet's Shutdown RPC.
2. EC2 instance termination: Done by an actor terminating
EC2 instances via AWS SDK.
3. Network failures: Done by injecting network failures via iptables or env variables.
"""
args, _ = parse_script_args()
logging.info("Received arguments: {}".format(args))
ray.init(address="auto")
total_num_cpus = ray.cluster_resources()["CPU"]
monitor_actor = monitor_memory_usage()
# Select the workload based on the argument
workload = None
if args.workload == "tasks":
workload = run_task_workload
elif args.workload == "actors":
workload = run_actor_workload
elif args.workload == "streaming":
workload = run_streaming_generator_workload
elif args.workload == "borrowing":
workload = run_object_ref_borrowing_workload
else:
assert False
node_killer = None
if args.disable_resource_killer:
print("ResourceKiller disabled")
else:
node_killer = ray.get_actor(
"ResourceKiller", namespace="release_test_namespace"
)
node_killer.run.remote()
print("ResourceKiller started")
start = time.time()
workload(total_num_cpus, args.smoke)
runtime_s = time.time() - start
runtime_s = round(runtime_s, 2)
print(f"Total runtime: {runtime_s}")
if node_killer is not None:
node_killer.stop_run.remote()
print(f"Total node failures: {ray.get(node_killer.get_total_killed.remote())}")
used_gb, usage = ray.get(monitor_actor.get_peak_memory_info.remote())
used_gb = round(used_gb, 2)
print("Memory usage with failures.")
print(f"Peak memory usage: {used_gb}GB")
print(f"Peak memory usage per processes:\n {usage}")
ray.get(monitor_actor.stop_run.remote())
results = {
"time": runtime_s,
"_peak_memory": used_gb,
"_peak_process_memory": usage,
}
results["perf_metrics"] = [
{
"perf_metric_name": f"chaos_{args.workload}_runtime_s",
"perf_metric_value": runtime_s,
"perf_metric_type": "LATENCY",
},
{
"perf_metric_name": f"chaos_{args.workload}_peak_memory_gb",
"perf_metric_value": used_gb,
"perf_metric_type": "MEMORY",
},
]
with open(os.environ["TEST_OUTPUT_JSON"], "w") as f:
json.dump(results, f)
main()
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/chaos_test/test_chaos.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/train/v2/tests/test_jax_gpu.py | import sys
import pytest
from ray.train import RunConfig, ScalingConfig
from ray.train.v2._internal.constants import (
HEALTH_CHECK_INTERVAL_S_ENV_VAR,
is_v2_enabled,
)
from ray.train.v2.jax import JaxTrainer
assert is_v2_enabled()
@pytest.fixture(autouse=True)
def reduce_health_check_interval(monkeypatch):
monkeypatch.setenv(HEALTH_CHECK_INTERVAL_S_ENV_VAR, "0.2")
yield
@pytest.mark.skipif(sys.platform == "darwin", reason="JAX GPU not supported on macOS")
@pytest.mark.skipif(
sys.version_info >= (3, 12),
reason="Current jax version is not supported in python 3.12+",
)
def test_jax_distributed_gpu_training(ray_start_4_cpus_2_gpus, tmp_path):
"""Test multi-GPU JAX distributed training.
This test verifies that JAX distributed initialization works correctly
across multiple GPU workers and that they can coordinate.
"""
def train_func():
import jax
from ray import train
# Get JAX distributed info
devices = jax.devices()
world_rank = train.get_context().get_world_rank()
world_size = train.get_context().get_world_size()
# Verify distributed setup
assert world_size == 2, f"Expected world size 2, got {world_size}"
assert world_rank in [0, 1], f"Invalid rank {world_rank}"
assert len(devices) == 2, f"Expected 2 devices, got {len(devices)}"
train.report(
{
"world_rank": world_rank,
"world_size": world_size,
"num_devices": len(devices),
}
)
trainer = JaxTrainer(
train_func,
scaling_config=ScalingConfig(num_workers=2, use_gpu=True),
run_config=RunConfig(storage_path=str(tmp_path)),
)
result = trainer.fit()
assert result.error is None
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/test_jax_gpu.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/serve/doc_code/external_scaler_predictive.py | # __serve_example_begin__
import time
from ray import serve
from typing import Any
@serve.deployment(num_replicas=3)
class TextProcessor:
"""A simple text processing deployment that can be scaled externally."""
def __init__(self):
self.request_count = 0
def __call__(self, text: Any) -> dict:
# Simulate text processing work
time.sleep(0.1)
self.request_count += 1
return {
"request_count": self.request_count,
}
app = TextProcessor.bind()
# __serve_example_end__
def main():
import requests
serve.run(app)
# Test the deployment
resp = requests.post(
"http://localhost:8000/",
json="hello world"
)
print(f"Response: {resp.json()}")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/doc_code/external_scaler_predictive.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/doc_code/external_scaler_predictive_client.py | # __client_script_begin__
import logging
import time
from datetime import datetime
import requests
APPLICATION_NAME = "my-app"
DEPLOYMENT_NAME = "TextProcessor"
SERVE_ENDPOINT = "http://localhost:8265"
SCALING_INTERVAL = 300 # Check every 5 minutes
logger = logging.getLogger(__name__)
def get_current_replicas(app_name: str, deployment_name: str) -> int:
"""Get current replica count. Returns -1 on error."""
try:
resp = requests.get(
f"{SERVE_ENDPOINT}/api/serve/applications/",
timeout=10
)
if resp.status_code != 200:
logger.error(f"Failed to get applications: {resp.status_code}")
return -1
apps = resp.json().get("applications", {})
if app_name not in apps:
logger.error(f"Application {app_name} not found")
return -1
deployments = apps[app_name].get("deployments", {})
if deployment_name in deployments:
return deployments[deployment_name]["target_num_replicas"]
logger.error(f"Deployment {deployment_name} not found")
return -1
except requests.exceptions.RequestException as e:
logger.error(f"Request failed: {e}")
return -1
def scale_deployment(app_name: str, deployment_name: str):
"""Scale deployment based on time of day."""
hour = datetime.now().hour
current = get_current_replicas(app_name, deployment_name)
# Check if we successfully retrieved the current replica count
if current == -1:
logger.error("Failed to get current replicas, skipping scaling decision")
return
target = 10 if 9 <= hour < 17 else 3 # Peak hours: 9am-5pm
delta = target - current
if delta == 0:
logger.info(f"Already at target ({current} replicas)")
return
action = "Adding" if delta > 0 else "Removing"
logger.info(f"{action} {abs(delta)} replicas ({current} -> {target})")
try:
resp = requests.post(
f"{SERVE_ENDPOINT}/api/v1/applications/{app_name}/deployments/{deployment_name}/scale",
headers={"Content-Type": "application/json"},
json={"target_num_replicas": target},
timeout=10
)
if resp.status_code == 200:
logger.info("Successfully scaled deployment")
else:
logger.error(f"Scale failed: {resp.status_code} - {resp.text}")
except requests.exceptions.RequestException as e:
logger.error(f"Request failed: {e}")
def main():
logger.info(f"Starting predictive scaling for {APPLICATION_NAME}/{DEPLOYMENT_NAME}")
while True:
scale_deployment(APPLICATION_NAME, DEPLOYMENT_NAME)
time.sleep(SCALING_INTERVAL)
# __client_script_end__
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/doc_code/external_scaler_predictive_client.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/serve/tests/test_deployment_topology.py | import asyncio
import sys
import pytest
import requests
from fastapi import FastAPI
import ray
from ray import serve
from ray._common.test_utils import SignalActor, wait_for_condition
from ray.serve.api import get_deployment_handle
from ray.serve.context import _get_global_client
from ray.serve.handle import DeploymentHandle
from ray.serve.schema import ApplicationStatus
class TestDeploymentTopology:
def test_simple_chain_topology(self, serve_instance):
"""Test topology with a simple chain: ingress -> downstream."""
# Define a downstream deployment
@serve.deployment
class Downstream:
def __call__(self):
return "downstream response"
# Define an ingress deployment that calls downstream
@serve.deployment
class Ingress:
def __init__(self, downstream: DeploymentHandle):
self.downstream = downstream
async def __call__(self):
result = await self.downstream.remote()
return f"ingress -> {result}"
# Deploy the application
downstream = Downstream.bind()
ingress = Ingress.bind(downstream)
serve.run(ingress, name="test_app", route_prefix="/test")
# Make a request to ensure deployments are fully initialized
response = requests.get("http://localhost:8000/test")
assert response.status_code == 200
assert "downstream response" in response.text
# Define expected topology
expected_topology = {
"app_name": "test_app",
"ingress_deployment": "Ingress",
"nodes": {
"Ingress": {
"name": "Ingress",
"app_name": "test_app",
"is_ingress": True,
"outbound_deployments": [
{"name": "Downstream", "app_name": "test_app"}
],
},
"Downstream": {
"name": "Downstream",
"app_name": "test_app",
"is_ingress": False,
"outbound_deployments": [],
},
},
}
# Final verification - get the actual topology
client = _get_global_client()
status = client.get_serve_details()
actual_topology = status["applications"]["test_app"]["deployment_topology"]
# Direct comparison with expected topology
assert actual_topology == expected_topology
def test_multi_level_chain_topology(self, serve_instance):
"""Test topology with deep dependencies: Ingress -> A -> B -> C."""
@serve.deployment
class ServiceC:
def __call__(self):
return "C"
@serve.deployment
class ServiceB:
def __init__(self, service_c: DeploymentHandle):
self.service_c = service_c
async def __call__(self):
result = await self.service_c.remote()
return f"B->{result}"
@serve.deployment
class ServiceA:
def __init__(self, service_b: DeploymentHandle):
self.service_b = service_b
async def __call__(self):
result = await self.service_b.remote()
return f"A->{result}"
@serve.deployment
class Ingress:
def __init__(self, service_a: DeploymentHandle):
self.service_a = service_a
async def __call__(self):
result = await self.service_a.remote()
return f"Ingress->{result}"
# Deploy the application
service_c = ServiceC.bind()
service_b = ServiceB.bind(service_c)
service_a = ServiceA.bind(service_b)
ingress = Ingress.bind(service_a)
serve.run(ingress, name="test_app", route_prefix="/test")
# Make a request
response = requests.get("http://localhost:8000/test")
assert response.status_code == 200
assert "C" in response.text
# Expected topology
expected_topology = {
"app_name": "test_app",
"ingress_deployment": "Ingress",
"nodes": {
"Ingress": {
"name": "Ingress",
"app_name": "test_app",
"is_ingress": True,
"outbound_deployments": [
{"name": "ServiceA", "app_name": "test_app"}
],
},
"ServiceA": {
"name": "ServiceA",
"app_name": "test_app",
"is_ingress": False,
"outbound_deployments": [
{"name": "ServiceB", "app_name": "test_app"}
],
},
"ServiceB": {
"name": "ServiceB",
"app_name": "test_app",
"is_ingress": False,
"outbound_deployments": [
{"name": "ServiceC", "app_name": "test_app"}
],
},
"ServiceC": {
"name": "ServiceC",
"app_name": "test_app",
"is_ingress": False,
"outbound_deployments": [],
},
},
}
# Final verification
client = _get_global_client()
status = client.get_serve_details()
actual_topology = status["applications"]["test_app"]["deployment_topology"]
assert actual_topology == expected_topology
def test_fan_out_topology(self, serve_instance):
"""Test topology with fan-out: Ingress -> [Service1, Service2, Service3]."""
@serve.deployment
class Service1:
def __call__(self):
return "service1"
@serve.deployment
class Service2:
def __call__(self):
return "service2"
@serve.deployment
class Service3:
def __call__(self):
return "service3"
@serve.deployment
class Ingress:
def __init__(
self,
service1: DeploymentHandle,
service2: DeploymentHandle,
service3: DeploymentHandle,
):
self.service1 = service1
self.service2 = service2
self.service3 = service3
async def __call__(self):
results = await asyncio.gather(
self.service1.remote(),
self.service2.remote(),
self.service3.remote(),
)
return f"Results: {','.join(results)}"
# Deploy the application
service1 = Service1.bind()
service2 = Service2.bind()
service3 = Service3.bind()
ingress = Ingress.bind(service1, service2, service3)
serve.run(ingress, name="test_app", route_prefix="/test")
# Make a request
response = requests.get("http://localhost:8000/test")
assert response.status_code == 200
# Expected topology - outbound_deployments should contain all three services
expected_topology = {
"app_name": "test_app",
"ingress_deployment": "Ingress",
"nodes": {
"Ingress": {
"name": "Ingress",
"app_name": "test_app",
"is_ingress": True,
"outbound_deployments": [
{"name": "Service1", "app_name": "test_app"},
{"name": "Service2", "app_name": "test_app"},
{"name": "Service3", "app_name": "test_app"},
],
},
"Service1": {
"name": "Service1",
"app_name": "test_app",
"is_ingress": False,
"outbound_deployments": [],
},
"Service2": {
"name": "Service2",
"app_name": "test_app",
"is_ingress": False,
"outbound_deployments": [],
},
"Service3": {
"name": "Service3",
"app_name": "test_app",
"is_ingress": False,
"outbound_deployments": [],
},
},
}
# Final verification
client = _get_global_client()
status = client.get_serve_details()
actual_topology = status["applications"]["test_app"]["deployment_topology"]
# Sort for comparison
actual_topology["nodes"]["Ingress"]["outbound_deployments"] = sorted(
actual_topology["nodes"]["Ingress"]["outbound_deployments"],
key=lambda x: x["name"],
)
expected_topology["nodes"]["Ingress"]["outbound_deployments"] = sorted(
expected_topology["nodes"]["Ingress"]["outbound_deployments"],
key=lambda x: x["name"],
)
assert actual_topology == expected_topology
def test_diamond_topology(self, serve_instance):
"""Test diamond pattern: Ingress -> [ServiceA, ServiceB] -> Database."""
@serve.deployment
class Database:
def __call__(self):
return "db_data"
@serve.deployment
class ServiceA:
def __init__(self, database: DeploymentHandle):
self.database = database
async def __call__(self):
data = await self.database.remote()
return f"A:{data}"
@serve.deployment
class ServiceB:
def __init__(self, database: DeploymentHandle):
self.database = database
async def __call__(self):
data = await self.database.remote()
return f"B:{data}"
@serve.deployment
class Ingress:
def __init__(
self, service_a: DeploymentHandle, service_b: DeploymentHandle
):
self.service_a = service_a
self.service_b = service_b
async def __call__(self):
results = await asyncio.gather(
self.service_a.remote(),
self.service_b.remote(),
)
return f"Results: {','.join(results)}"
# Deploy the application
database = Database.bind()
service_a = ServiceA.bind(database)
service_b = ServiceB.bind(database)
ingress = Ingress.bind(service_a, service_b)
serve.run(ingress, name="test_app", route_prefix="/test")
# Make a request
response = requests.get("http://localhost:8000/test")
assert response.status_code == 200
# Expected topology
expected_topology = {
"app_name": "test_app",
"ingress_deployment": "Ingress",
"nodes": {
"Ingress": {
"name": "Ingress",
"app_name": "test_app",
"is_ingress": True,
"outbound_deployments": [
{"name": "ServiceA", "app_name": "test_app"},
{"name": "ServiceB", "app_name": "test_app"},
],
},
"ServiceA": {
"name": "ServiceA",
"app_name": "test_app",
"is_ingress": False,
"outbound_deployments": [
{"name": "Database", "app_name": "test_app"}
],
},
"ServiceB": {
"name": "ServiceB",
"app_name": "test_app",
"is_ingress": False,
"outbound_deployments": [
{"name": "Database", "app_name": "test_app"}
],
},
"Database": {
"name": "Database",
"app_name": "test_app",
"is_ingress": False,
"outbound_deployments": [],
},
},
}
# Final verification
client = _get_global_client()
status = client.get_serve_details()
actual_topology = status["applications"]["test_app"]["deployment_topology"]
# Sort for comparison
actual_topology["nodes"]["Ingress"]["outbound_deployments"] = sorted(
actual_topology["nodes"]["Ingress"]["outbound_deployments"],
key=lambda x: x["name"],
)
expected_topology["nodes"]["Ingress"]["outbound_deployments"] = sorted(
expected_topology["nodes"]["Ingress"]["outbound_deployments"],
key=lambda x: x["name"],
)
assert actual_topology == expected_topology
def test_cross_app_topology(self, serve_instance):
"""Test cross-application dependencies."""
# Deploy App2 first (the dependency)
@serve.deployment
class App2Service:
def __call__(self):
return "app2_response"
app2_service = App2Service.bind()
serve.run(app2_service, name="app2", route_prefix="/app2")
# Deploy App1 that depends on App2
@serve.deployment
class App1Service:
def __init__(self):
self.app2_handle = get_deployment_handle("App2Service", "app2")
async def __call__(self):
result = await self.app2_handle.remote()
return f"app1->{result}"
app1_service = App1Service.bind()
serve.run(app1_service, name="app1", route_prefix="/app1")
# Make requests to both apps to ensure they're initialized
response1 = requests.get("http://localhost:8000/app1")
assert response1.status_code == 200
response2 = requests.get("http://localhost:8000/app2")
assert response2.status_code == 200
# Expected topology for app1
expected_topology_app1 = {
"app_name": "app1",
"ingress_deployment": "App1Service",
"nodes": {
"App1Service": {
"name": "App1Service",
"app_name": "app1",
"is_ingress": True,
"outbound_deployments": [
{"name": "App2Service", "app_name": "app2"}
],
},
},
}
# Expected topology for app2
expected_topology_app2 = {
"app_name": "app2",
"ingress_deployment": "App2Service",
"nodes": {
"App2Service": {
"name": "App2Service",
"app_name": "app2",
"is_ingress": True,
"outbound_deployments": [],
},
},
}
# Wait for topologies to be built
def check_topology():
client = _get_global_client()
status = client.get_serve_details()
topology_app1 = (
status.get("applications", {})
.get("app1", {})
.get("deployment_topology")
)
topology_app2 = (
status.get("applications", {})
.get("app2", {})
.get("deployment_topology")
)
return (
topology_app1 == expected_topology_app1
and topology_app2 == expected_topology_app2
)
# NOTE(abrar): using wait_for_condition because outbound deployments are added asynchronously
# when get_deployment_handle is used.
wait_for_condition(check_topology)
# Final verification
client = _get_global_client()
status = client.get_serve_details()
actual_topology_app1 = status["applications"]["app1"]["deployment_topology"]
actual_topology_app2 = status["applications"]["app2"]["deployment_topology"]
assert actual_topology_app1 == expected_topology_app1
assert actual_topology_app2 == expected_topology_app2
def test_single_deployment_no_dependencies(self, serve_instance):
"""Test single deployment with no outbound dependencies."""
@serve.deployment
class Standalone:
def __call__(self):
return "standalone"
standalone = Standalone.bind()
serve.run(standalone, name="test_app", route_prefix="/test")
# Make a request
response = requests.get("http://localhost:8000/test")
assert response.status_code == 200
assert response.text == "standalone"
# Expected topology
expected_topology = {
"app_name": "test_app",
"ingress_deployment": "Standalone",
"nodes": {
"Standalone": {
"name": "Standalone",
"app_name": "test_app",
"is_ingress": True,
"outbound_deployments": [],
},
},
}
# Final verification
client = _get_global_client()
status = client.get_serve_details()
actual_topology = status["applications"]["test_app"]["deployment_topology"]
assert actual_topology == expected_topology
def test_topology_update_on_redeploy(self, serve_instance):
"""Test that topology updates when application is redeployed with different structure."""
# Initial deployment: Ingress -> ServiceA
@serve.deployment
class ServiceA:
def __call__(self):
return "A"
@serve.deployment
class Ingress:
def __init__(self, service_a: DeploymentHandle):
self.service_a = service_a
async def __call__(self):
result = await self.service_a.remote()
return f"Ingress->{result}"
service_a = ServiceA.bind()
ingress = Ingress.bind(service_a)
serve.run(ingress, name="test_app", route_prefix="/test")
# Make a request
response = requests.get("http://localhost:8000/test")
assert response.status_code == 200
# Expected initial topology
expected_topology_v1 = {
"app_name": "test_app",
"ingress_deployment": "Ingress",
"nodes": {
"Ingress": {
"name": "Ingress",
"app_name": "test_app",
"is_ingress": True,
"outbound_deployments": [
{"name": "ServiceA", "app_name": "test_app"}
],
},
"ServiceA": {
"name": "ServiceA",
"app_name": "test_app",
"is_ingress": False,
"outbound_deployments": [],
},
},
}
client = _get_global_client()
status = client.get_serve_details()
actual_topology = status["applications"]["test_app"]["deployment_topology"]
assert expected_topology_v1 == actual_topology
# Redeploy with different structure: Ingress -> ServiceB
@serve.deployment
class ServiceB:
def __call__(self):
return "B"
@serve.deployment
class IngressV2:
def __init__(self, service_b: DeploymentHandle):
self.service_b = service_b
async def __call__(self):
result = await self.service_b.remote()
return f"IngressV2->{result}"
service_b = ServiceB.bind()
ingress_v2 = IngressV2.bind(service_b)
serve.run(ingress_v2, name="test_app", route_prefix="/test")
# Make a request to new deployment
response = requests.get("http://localhost:8000/test")
assert response.status_code == 200
assert "B" in response.text
# Expected updated topology
expected_topology_v2 = {
"app_name": "test_app",
"ingress_deployment": "IngressV2",
"nodes": {
"IngressV2": {
"name": "IngressV2",
"app_name": "test_app",
"is_ingress": True,
"outbound_deployments": [
{"name": "ServiceB", "app_name": "test_app"}
],
},
"ServiceB": {
"name": "ServiceB",
"app_name": "test_app",
"is_ingress": False,
"outbound_deployments": [],
},
},
}
# Final verification
client = _get_global_client()
status = client.get_serve_details()
actual_topology = status["applications"]["test_app"]["deployment_topology"]
assert actual_topology == expected_topology_v2
def test_fastapi_factory_with_get_deployment_handle(self, serve_instance):
"""Test topology with FastAPI factory pattern using get_deployment_handle.
This test demonstrates the pattern where get_deployment_handle is used
inside a FastAPI request handler (lazy lookup), which is common in
factory patterns where dependencies aren't injected at construction time.
"""
# Backend service that will be called via get_deployment_handle
@serve.deployment(name="BackendService")
class BackendService:
def __call__(self):
return "backend_data"
# FastAPI ingress using factory pattern
def create_fastapi_app():
app = FastAPI()
@app.get("/")
async def root():
# Use get_deployment_handle inside the request handler (lazy lookup)
# This is the factory pattern - handle is obtained at request time
backend_handle = get_deployment_handle("BackendService", "test_app")
result = await backend_handle.remote()
return {"data": result}
@app.get("/health")
async def health():
return {"status": "ok"}
return app
@serve.deployment(name="FastAPIIngress")
@serve.ingress(create_fastapi_app())
class FastAPIIngress:
def __init__(self, backend_service: DeploymentHandle):
self.backend_service = backend_service
# Deploy both deployments
ingress_deployment = FastAPIIngress.bind(BackendService.bind())
serve.run(ingress_deployment, name="test_app", route_prefix="/test")
# Make requests to ensure deployments are initialized
response = requests.get("http://localhost:8000/test/health")
assert response.status_code == 200
# Make a request that triggers get_deployment_handle
response = requests.get("http://localhost:8000/test/")
assert response.status_code == 200
assert "backend_data" in response.text
# Expected topology
expected_topology = {
"app_name": "test_app",
"ingress_deployment": "FastAPIIngress",
"nodes": {
"FastAPIIngress": {
"name": "FastAPIIngress",
"app_name": "test_app",
"is_ingress": True,
"outbound_deployments": [
{"name": "BackendService", "app_name": "test_app"}
],
},
"BackendService": {
"name": "BackendService",
"app_name": "test_app",
"is_ingress": False,
"outbound_deployments": [],
},
},
}
# Wait for topology to be built
# NOTE(abrar): using wait_for_condition because outbound deployments are added asynchronously
# when get_deployment_handle is used.
def check_topology():
client = _get_global_client()
status = client.get_serve_details()
topology = (
status.get("applications", {})
.get("test_app", {})
.get("deployment_topology")
)
return topology == expected_topology
wait_for_condition(check_topology)
# Final verification
client = _get_global_client()
status = client.get_serve_details()
actual_topology = status["applications"]["test_app"]["deployment_topology"]
assert actual_topology == expected_topology
def test_topology_with_updating_deployment(self, serve_instance):
"""Test topology when a deployment is updating (not fully rolled out)."""
# Create signal actor for synchronization
signal = SignalActor.remote()
# Initial deployment
@serve.deployment(name="ServiceA", version="v1")
class ServiceA:
def __call__(self):
return "v1"
@serve.deployment(name="Ingress")
class Ingress:
def __init__(self, service_a: DeploymentHandle):
# Block until signal is sent to keep deployment in UPDATING state
ray.get(signal.wait.remote())
self.service_a = service_a
async def __call__(self):
result = await self.service_a.remote()
return f"Ingress->{result}"
# Deploy initial version
service_a_v1 = ServiceA.bind()
ingress = Ingress.bind(service_a_v1)
serve._run(ingress, name="test_app", route_prefix="/test", _blocking=False)
wait_for_condition(lambda: ray.get(signal.cur_num_waiters.remote()) == 1)
# Verify initial topology
initial_topology = {
"app_name": "test_app",
"ingress_deployment": "Ingress",
"nodes": {
"Ingress": {
"name": "Ingress",
"app_name": "test_app",
"is_ingress": True,
"outbound_deployments": [],
},
"ServiceA": {
"name": "ServiceA",
"app_name": "test_app",
"is_ingress": False,
"outbound_deployments": [],
},
},
}
def check_initial_topology():
client = _get_global_client()
status = client.get_serve_details()
if "test_app" not in status["applications"]:
return False
app_status = status["applications"]["test_app"]["status"]
return app_status == ApplicationStatus.DEPLOYING.value
wait_for_condition(check_initial_topology)
client = _get_global_client()
status = client.get_serve_details()
actual_topology = status["applications"]["test_app"]["deployment_topology"]
assert actual_topology == initial_topology
# Clean up: send signal to unblock replicas
ray.get(signal.send.remote())
def test_topology_with_failed_deployment(self, serve_instance):
"""Test topology when a deployment fails to start."""
# A deployment that will fail to start
@serve.deployment
class ServiceB:
def __call__(self):
return "b"
@serve.deployment(name="FailingService")
class FailingService:
def __init__(self, service_b: DeploymentHandle):
self.service_b = service_b
raise RuntimeError("Intentional failure for testing")
def __call__(self):
return "should never reach here"
@serve.deployment(name="Ingress")
class Ingress:
def __init__(self, failing_service: DeploymentHandle):
self.failing_service = failing_service
async def __call__(self):
result = await self.failing_service.remote()
return f"Ingress->{result}"
# Deploy with failing service
service_b = ServiceB.bind()
failing_service = FailingService.bind(service_b)
ingress = Ingress.bind(failing_service)
# Deploy (this won't fail immediately, but replicas will fail to start)
serve._run(ingress, name="test_app", route_prefix="/test", _blocking=False)
# Wait for the deployment to be processed and replicas to fail
def deployment_has_failures():
client = _get_global_client()
status = client.get_serve_details()
if "test_app" not in status["applications"]:
return False
app_status = status["applications"]["test_app"]["status"]
return app_status == ApplicationStatus.DEPLOY_FAILED.value
wait_for_condition(deployment_has_failures, timeout=10)
# Get the serve details
client = _get_global_client()
status = client.get_serve_details()
# Verify the application exists (even with failed deployment)
assert "test_app" in status["applications"]
# Verify deployment_topology still exists
# The topology should be built based on the deployment structure,
# regardless of whether the replicas are healthy
topology = status["applications"]["test_app"]["deployment_topology"]
expected_topology = {
"app_name": "test_app",
"ingress_deployment": "Ingress",
"nodes": {
"Ingress": {
"name": "Ingress",
"app_name": "test_app",
"is_ingress": True,
"outbound_deployments": [
{"name": "FailingService", "app_name": "test_app"}
],
},
"FailingService": {
"name": "FailingService",
"app_name": "test_app",
"is_ingress": False,
"outbound_deployments": [],
},
"ServiceB": {
"name": "ServiceB",
"app_name": "test_app",
"is_ingress": False,
"outbound_deployments": [],
},
},
}
assert topology == expected_topology
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_deployment_topology.py",
"license": "Apache License 2.0",
"lines": 735,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/test_exceptions.py | """Tests for Ray exceptions."""
import sys
from enum import Enum
from unittest.mock import MagicMock, patch
import pytest
from ray.exceptions import AuthenticationError, RayError
class FakeAuthMode(Enum):
DISABLED = 0
TOKEN = 1
class TestAuthenticationError:
"""Tests for AuthenticationError exception."""
auth_doc_url = "https://docs.ray.io/en/latest/ray-security/token-auth.html"
def test_basic_creation(self):
"""Test basic AuthenticationError creation and message format."""
error = AuthenticationError("Token is missing")
error_str = str(error)
# Original message preserved
assert "Token is missing" in error_str
# Doc URL included
assert self.auth_doc_url in error_str
def test_is_ray_error_subclass(self):
"""Test that AuthenticationError is a RayError subclass."""
error = AuthenticationError("Test")
assert isinstance(error, RayError)
@pytest.mark.parametrize(
"auth_mode,expected_note",
[
(FakeAuthMode.DISABLED, "RAY_AUTH_MODE is currently 'disabled'"),
(FakeAuthMode.TOKEN, None),
],
ids=["disabled", "token"],
)
def test_auth_mode_note_in_message(self, auth_mode, expected_note):
"""Test that error message includes auth mode note when not in token mode."""
with patch.dict(
"sys.modules",
{
"ray._raylet": MagicMock(
AuthenticationMode=FakeAuthMode,
get_authentication_mode=lambda: auth_mode,
)
},
):
error = AuthenticationError("Token is missing")
error_str = str(error)
assert "Token is missing" in error_str
if expected_note:
assert expected_note in error_str
else:
assert "RAY_AUTH_MODE is currently" not in error_str
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_exceptions.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/_internal/common/utils/cloud_filesystem/azure_filesystem.py | """Azure-specific filesystem implementation.
This module provides an Azure-specific implementation that delegates to PyArrowFileSystem.
This maintains backward compatibility while allowing for future optimizations using
native Azure tools (azcopy, azure-storage-blob SDK).
"""
from typing import List, Optional, Union
from ray.llm._internal.common.utils.cloud_filesystem.base import BaseCloudFileSystem
from ray.llm._internal.common.utils.cloud_filesystem.pyarrow_filesystem import (
PyArrowFileSystem,
)
class AzureFileSystem(BaseCloudFileSystem):
"""Azure-specific implementation of cloud filesystem operations.
**Note**: This implementation currently delegates to PyArrowFileSystem to maintain
stability. Optimized implementation using azure-storage-blob SDK and azcopy
will be added in a future PR.
"""
@staticmethod
def get_file(
object_uri: str, decode_as_utf_8: bool = True
) -> Optional[Union[str, bytes]]:
"""Download a file from cloud storage into memory.
Args:
object_uri: URI of the file (abfss:// or azure://)
decode_as_utf_8: If True, decode the file as UTF-8
Returns:
File contents as string or bytes, or None if file doesn't exist
"""
return PyArrowFileSystem.get_file(object_uri, decode_as_utf_8)
@staticmethod
def list_subfolders(folder_uri: str) -> List[str]:
"""List the immediate subfolders in a cloud directory.
Args:
folder_uri: URI of the directory (abfss:// or azure://)
Returns:
List of subfolder names (without trailing slashes)
"""
return PyArrowFileSystem.list_subfolders(folder_uri)
@staticmethod
def download_files(
path: str,
bucket_uri: str,
substrings_to_include: Optional[List[str]] = None,
suffixes_to_exclude: Optional[List[str]] = None,
) -> None:
"""Download files from cloud storage to a local directory.
Args:
path: Local directory where files will be downloaded
bucket_uri: URI of cloud directory
substrings_to_include: Only include files containing these substrings
suffixes_to_exclude: Exclude certain files from download (e.g .safetensors)
"""
PyArrowFileSystem.download_files(
path, bucket_uri, substrings_to_include, suffixes_to_exclude
)
@staticmethod
def upload_files(
local_path: str,
bucket_uri: str,
) -> None:
"""Upload files to cloud storage.
Args:
local_path: The local path of the files to upload.
bucket_uri: The bucket uri to upload the files to, must start with
`abfss://` or `azure://`.
"""
PyArrowFileSystem.upload_files(local_path, bucket_uri)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/common/utils/cloud_filesystem/azure_filesystem.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/llm/_internal/common/utils/cloud_filesystem/base.py | """Abstract base class for cloud filesystem implementations.
This module defines the interface that all cloud storage provider implementations
must follow, ensuring consistency across different providers while allowing
provider-specific optimizations.
"""
from abc import ABC, abstractmethod
from typing import List, Optional, Union
class BaseCloudFileSystem(ABC):
"""Abstract base class for cloud filesystem implementations.
This class defines the interface that all cloud storage provider implementations
must implement. Provider-specific classes (S3FileSystem, GCSFileSystem, etc.)
will inherit from this base class and provide optimized implementations for
their respective cloud storage platforms.
"""
@staticmethod
@abstractmethod
def get_file(
object_uri: str, decode_as_utf_8: bool = True
) -> Optional[Union[str, bytes]]:
"""Download a file from cloud storage into memory.
Args:
object_uri: URI of the file (s3://, gs://, abfss://, or azure://)
decode_as_utf_8: If True, decode the file as UTF-8
Returns:
File contents as string or bytes, or None if file doesn't exist
"""
pass
@staticmethod
@abstractmethod
def list_subfolders(folder_uri: str) -> List[str]:
"""List the immediate subfolders in a cloud directory.
Args:
folder_uri: URI of the directory (s3://, gs://, abfss://, or azure://)
Returns:
List of subfolder names (without trailing slashes)
"""
pass
@staticmethod
@abstractmethod
def download_files(
path: str,
bucket_uri: str,
substrings_to_include: Optional[List[str]] = None,
suffixes_to_exclude: Optional[List[str]] = None,
) -> None:
"""Download files from cloud storage to a local directory.
Args:
path: Local directory where files will be downloaded
bucket_uri: URI of cloud directory
substrings_to_include: Only include files containing these substrings
suffixes_to_exclude: Exclude certain files from download (e.g .safetensors)
"""
pass
@staticmethod
@abstractmethod
def upload_files(
local_path: str,
bucket_uri: str,
) -> None:
"""Upload files to cloud storage.
Args:
local_path: The local path of the files to upload.
bucket_uri: The bucket uri to upload the files to, must start with
`s3://`, `gs://`, `abfss://`, or `azure://`.
"""
pass
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/common/utils/cloud_filesystem/base.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/llm/_internal/common/utils/cloud_filesystem/gcs_filesystem.py | """GCS-specific filesystem implementation.
This module provides a GCS-specific implementation.
This maintains backward compatibility while allowing for future optimizations using
native GCS tools (gsutil, google-cloud-storage SDK).
"""
from typing import List, Optional, Union
from ray.llm._internal.common.utils.cloud_filesystem.base import BaseCloudFileSystem
from ray.llm._internal.common.utils.cloud_filesystem.pyarrow_filesystem import (
PyArrowFileSystem,
)
class GCSFileSystem(BaseCloudFileSystem):
"""GCS-specific implementation of cloud filesystem operations.
**Note**: This implementation currently delegates to PyArrowFileSystem to maintain
stability. Optimized implementation using google-cloud-storage SDK and gsutil
will be added in a future PR.
"""
@staticmethod
def get_file(
object_uri: str, decode_as_utf_8: bool = True
) -> Optional[Union[str, bytes]]:
"""Download a file from cloud storage into memory.
Args:
object_uri: URI of the file (gs://)
decode_as_utf_8: If True, decode the file as UTF-8
Returns:
File contents as string or bytes, or None if file doesn't exist
"""
return PyArrowFileSystem.get_file(object_uri, decode_as_utf_8)
@staticmethod
def list_subfolders(folder_uri: str) -> List[str]:
"""List the immediate subfolders in a cloud directory.
Args:
folder_uri: URI of the directory (gs://)
Returns:
List of subfolder names (without trailing slashes)
"""
return PyArrowFileSystem.list_subfolders(folder_uri)
@staticmethod
def download_files(
path: str,
bucket_uri: str,
substrings_to_include: Optional[List[str]] = None,
suffixes_to_exclude: Optional[List[str]] = None,
) -> None:
"""Download files from cloud storage to a local directory.
Args:
path: Local directory where files will be downloaded
bucket_uri: URI of cloud directory
substrings_to_include: Only include files containing these substrings
suffixes_to_exclude: Exclude certain files from download (e.g .safetensors)
"""
PyArrowFileSystem.download_files(
path, bucket_uri, substrings_to_include, suffixes_to_exclude
)
@staticmethod
def upload_files(
local_path: str,
bucket_uri: str,
) -> None:
"""Upload files to cloud storage.
Args:
local_path: The local path of the files to upload.
bucket_uri: The bucket uri to upload the files to, must start with `gs://`.
"""
PyArrowFileSystem.upload_files(local_path, bucket_uri)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/common/utils/cloud_filesystem/gcs_filesystem.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/llm/_internal/common/utils/cloud_filesystem/pyarrow_filesystem.py | """PyArrow-based filesystem implementation for cloud storage.
This module provides a PyArrow-based implementation of the cloud filesystem
interface, supporting S3, GCS, and Azure storage providers.
"""
import os
from concurrent.futures import ThreadPoolExecutor
from typing import List, Optional, Tuple, Union
from urllib.parse import urlparse
import pyarrow.fs as pa_fs
from ray.llm._internal.common.observability.logging import get_logger
from ray.llm._internal.common.utils.cloud_filesystem.base import BaseCloudFileSystem
logger = get_logger(__name__)
class PyArrowFileSystem(BaseCloudFileSystem):
"""PyArrow-based implementation of cloud filesystem operations.
This class provides a unified interface for cloud storage operations using
PyArrow's filesystem abstraction. It supports S3, GCS, and Azure storage
providers.
"""
@staticmethod
def get_fs_and_path(object_uri: str) -> Tuple[pa_fs.FileSystem, str]:
"""Get the appropriate filesystem and path from a URI.
Args:
object_uri: URI of the file (s3://, gs://, abfss://, or azure://)
If URI contains 'anonymous@', anonymous access is used.
Example: s3://anonymous@bucket/path
Returns:
Tuple of (filesystem, path)
"""
if object_uri.startswith("pyarrow-"):
object_uri = object_uri[8:]
anonymous = False
# Check for anonymous access pattern (only for S3/GCS)
# e.g. s3://anonymous@bucket/path
if "@" in object_uri and not (
object_uri.startswith("abfss://") or object_uri.startswith("azure://")
):
parts = object_uri.split("@", 1)
# Check if the first part ends with "anonymous"
if parts[0].endswith("anonymous"):
anonymous = True
# Remove the anonymous@ part, keeping the scheme
scheme = parts[0].split("://")[0]
object_uri = f"{scheme}://{parts[1]}"
if object_uri.startswith("s3://"):
endpoint = os.getenv("AWS_ENDPOINT_URL_S3", None)
virtual_hosted_style = os.getenv("AWS_S3_ADDRESSING_STYLE", None)
fs = pa_fs.S3FileSystem(
anonymous=anonymous,
endpoint_override=endpoint,
force_virtual_addressing=(virtual_hosted_style == "virtual"),
)
path = object_uri[5:] # Remove "s3://"
elif object_uri.startswith("gs://"):
fs = pa_fs.GcsFileSystem(anonymous=anonymous)
path = object_uri[5:] # Remove "gs://"
elif object_uri.startswith("abfss://"):
fs, path = PyArrowFileSystem._create_abfss_filesystem(object_uri)
elif object_uri.startswith("azure://"):
fs, path = PyArrowFileSystem._create_azure_filesystem(object_uri)
else:
raise ValueError(f"Unsupported URI scheme: {object_uri}")
return fs, path
@staticmethod
def _create_azure_filesystem(object_uri: str) -> Tuple[pa_fs.FileSystem, str]:
"""Create an Azure filesystem for Azure Blob Storage or ABFSS.
Args:
object_uri: Azure URI (azure://container@account.blob.core.windows.net/path or
abfss://container@account.dfs.core.windows.net/path)
Returns:
Tuple of (PyArrow FileSystem, path without scheme prefix)
Raises:
ImportError: If required dependencies are not installed.
ValueError: If the Azure URI format is invalid.
"""
try:
import adlfs
from azure.identity import DefaultAzureCredential
except ImportError:
raise ImportError(
"You must `pip install adlfs azure-identity` "
"to use Azure/ABFSS URIs. "
"Note that these must be preinstalled on all nodes in the Ray cluster."
)
# Parse and validate the Azure URI
parsed = urlparse(object_uri)
scheme = parsed.scheme.lower()
# Validate URI format: scheme://container@account.domain/path
if not parsed.netloc or "@" not in parsed.netloc:
raise ValueError(
f"Invalid {scheme.upper()} URI format - missing container@account: {object_uri}"
)
container_part, hostname_part = parsed.netloc.split("@", 1)
# Validate container name (must be non-empty)
if not container_part:
raise ValueError(
f"Invalid {scheme.upper()} URI format - empty container name: {object_uri}"
)
# Validate hostname format based on scheme
valid_hostname = False
if scheme == "abfss":
valid_hostname = hostname_part.endswith(".dfs.core.windows.net")
expected_domains = ".dfs.core.windows.net"
elif scheme == "azure":
valid_hostname = hostname_part.endswith(
".blob.core.windows.net"
) or hostname_part.endswith(".dfs.core.windows.net")
expected_domains = ".blob.core.windows.net or .dfs.core.windows.net"
if not hostname_part or not valid_hostname:
raise ValueError(
f"Invalid {scheme.upper()} URI format - invalid hostname (must end with {expected_domains}): {object_uri}"
)
# Extract and validate account name
azure_storage_account_name = hostname_part.split(".")[0]
if not azure_storage_account_name:
raise ValueError(
f"Invalid {scheme.upper()} URI format - empty account name: {object_uri}"
)
# Create the adlfs filesystem
adlfs_fs = adlfs.AzureBlobFileSystem(
account_name=azure_storage_account_name,
credential=DefaultAzureCredential(),
)
# Wrap with PyArrow's PyFileSystem for compatibility
fs = pa_fs.PyFileSystem(pa_fs.FSSpecHandler(adlfs_fs))
# Return the path without the scheme prefix
path = f"{container_part}{parsed.path}"
return fs, path
@staticmethod
def _create_abfss_filesystem(object_uri: str) -> Tuple[pa_fs.FileSystem, str]:
"""Create an ABFSS filesystem for Azure Data Lake Storage Gen2.
This is a wrapper around _create_azure_filesystem for backward compatibility.
Args:
object_uri: ABFSS URI (abfss://container@account.dfs.core.windows.net/path)
Returns:
Tuple of (PyArrow FileSystem, path without abfss:// prefix)
"""
return PyArrowFileSystem._create_azure_filesystem(object_uri)
@staticmethod
def _filter_files(
fs: pa_fs.FileSystem,
source_path: str,
destination_path: str,
substrings_to_include: Optional[List[str]] = None,
suffixes_to_exclude: Optional[List[str]] = None,
) -> List[Tuple[str, str]]:
"""Filter files from cloud storage based on inclusion and exclusion criteria.
Args:
fs: PyArrow filesystem instance
source_path: Source path in cloud storage
destination_path: Local destination path
substrings_to_include: Only include files containing these substrings
suffixes_to_exclude: Exclude files ending with these suffixes
Returns:
List of tuples containing (source_file_path, destination_file_path)
"""
file_selector = pa_fs.FileSelector(source_path, recursive=True)
file_infos = fs.get_file_info(file_selector)
path_pairs = []
for file_info in file_infos:
if file_info.type != pa_fs.FileType.File:
continue
rel_path = file_info.path[len(source_path) :].lstrip("/")
# Apply filters
if substrings_to_include:
if not any(
substring in rel_path for substring in substrings_to_include
):
continue
if suffixes_to_exclude:
if any(rel_path.endswith(suffix) for suffix in suffixes_to_exclude):
continue
path_pairs.append(
(file_info.path, os.path.join(destination_path, rel_path))
)
return path_pairs
@staticmethod
def get_file(
object_uri: str, decode_as_utf_8: bool = True
) -> Optional[Union[str, bytes]]:
"""Download a file from cloud storage into memory.
Args:
object_uri: URI of the file (s3://, gs://, abfss://, or azure://)
decode_as_utf_8: If True, decode the file as UTF-8
Returns:
File contents as string or bytes, or None if file doesn't exist
"""
try:
fs, path = PyArrowFileSystem.get_fs_and_path(object_uri)
# Check if file exists
if not fs.get_file_info(path).type == pa_fs.FileType.File:
logger.info(f"URI {object_uri} does not exist.")
return None
# Read file
with fs.open_input_file(path) as f:
body = f.read()
if decode_as_utf_8:
body = body.decode("utf-8")
return body
except Exception as e:
logger.warning(f"Error reading {object_uri}: {e}")
return None
@staticmethod
def list_subfolders(folder_uri: str) -> List[str]:
"""List the immediate subfolders in a cloud directory.
Args:
folder_uri: URI of the directory (s3://, gs://, abfss://, or azure://)
Returns:
List of subfolder names (without trailing slashes)
"""
# Ensure that the folder_uri has a trailing slash.
folder_uri = f"{folder_uri.rstrip('/')}/"
try:
fs, path = PyArrowFileSystem.get_fs_and_path(folder_uri)
# List directory contents
file_infos = fs.get_file_info(pa_fs.FileSelector(path, recursive=False))
# Filter for directories and extract subfolder names
subfolders = []
for file_info in file_infos:
if file_info.type == pa_fs.FileType.Directory:
# Extract just the subfolder name without the full path
subfolder = os.path.basename(file_info.path.rstrip("/"))
subfolders.append(subfolder)
return subfolders
except Exception as e:
logger.error(f"Error listing subfolders in {folder_uri}: {e}")
return []
@staticmethod
def download_files(
path: str,
bucket_uri: str,
substrings_to_include: Optional[List[str]] = None,
suffixes_to_exclude: Optional[List[str]] = None,
max_concurrency: int = 10,
chunk_size: int = 64 * 1024 * 1024,
) -> None:
"""Download files from cloud storage to a local directory.
Args:
path: Local directory where files will be downloaded
bucket_uri: URI of cloud directory
substrings_to_include: Only include files containing these substrings
suffixes_to_exclude: Exclude certain files from download
max_concurrency: Maximum number of concurrent files to download (default: 10)
chunk_size: Size of transfer chunks (default: 64MB)
"""
try:
fs, source_path = PyArrowFileSystem.get_fs_and_path(bucket_uri)
# Ensure destination exists
os.makedirs(path, exist_ok=True)
# If no filters, use direct copy_files
if not substrings_to_include and not suffixes_to_exclude:
pa_fs.copy_files(
source=source_path,
destination=path,
source_filesystem=fs,
destination_filesystem=pa_fs.LocalFileSystem(),
use_threads=True,
chunk_size=chunk_size,
)
return
# List and filter files
files_to_download = PyArrowFileSystem._filter_files(
fs, source_path, path, substrings_to_include, suffixes_to_exclude
)
if not files_to_download:
logger.info("Filters do not match any of the files, skipping download")
return
def download_single_file(file_paths):
source_file_path, dest_file_path = file_paths
# Create destination directory if needed
dest_dir = os.path.dirname(dest_file_path)
if dest_dir:
os.makedirs(dest_dir, exist_ok=True)
# Use PyArrow's copy_files for individual files,
pa_fs.copy_files(
source=source_file_path,
destination=dest_file_path,
source_filesystem=fs,
destination_filesystem=pa_fs.LocalFileSystem(),
use_threads=True,
chunk_size=chunk_size,
)
return dest_file_path
max_workers = min(max_concurrency, len(files_to_download))
with ThreadPoolExecutor(max_workers=max_workers) as executor:
futures = [
executor.submit(download_single_file, file_paths)
for file_paths in files_to_download
]
for future in futures:
try:
future.result()
except Exception as e:
logger.error(f"Failed to download file: {e}")
raise
except Exception as e:
logger.exception(f"Error downloading files from {bucket_uri}: {e}")
raise
@staticmethod
def upload_files(
local_path: str,
bucket_uri: str,
) -> None:
"""Upload files to cloud storage.
Args:
local_path: The local path of the files to upload.
bucket_uri: The bucket uri to upload the files to, must start with
`s3://`, `gs://`, `abfss://`, or `azure://`.
"""
try:
fs, dest_path = PyArrowFileSystem.get_fs_and_path(bucket_uri)
pa_fs.copy_files(
source=local_path,
destination=dest_path,
source_filesystem=pa_fs.LocalFileSystem(),
destination_filesystem=fs,
)
except Exception as e:
logger.exception(f"Error uploading files to {bucket_uri}: {e}")
raise
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/common/utils/cloud_filesystem/pyarrow_filesystem.py",
"license": "Apache License 2.0",
"lines": 319,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/llm/_internal/common/utils/cloud_filesystem/s3_filesystem.py | """S3-specific filesystem implementation using boto3.
This module provides an S3-specific implementation that uses boto3 (AWS SDK for Python)
for reliable and efficient S3 operations.
"""
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
from typing import List, Optional, Union
import boto3
from botocore import UNSIGNED
from botocore.client import BaseClient
from botocore.config import Config
from ray.llm._internal.common.observability.logging import get_logger
from ray.llm._internal.common.utils.cloud_filesystem.base import BaseCloudFileSystem
logger = get_logger(__name__)
class S3FileSystem(BaseCloudFileSystem):
"""S3-specific implementation of cloud filesystem operations using boto3.
This implementation uses boto3 (AWS SDK for Python) for reliable and efficient
operations with S3 storage.
"""
@staticmethod
def _parse_s3_uri(uri: str) -> tuple[str, str, bool]:
"""Parse S3 URI into bucket and key.
Args:
uri: S3 URI (e.g., s3://bucket/path/to/object or s3://anonymous@bucket/path/to/object)
Returns:
Tuple of (bucket_name, key, is_anonymous)
Raises:
ValueError: If URI is not a valid S3 URI
"""
# Check if anonymous@ prefix exists
is_anonymous = False
if uri.startswith("s3://anonymous@"):
is_anonymous = True
uri = uri.replace("s3://anonymous@", "s3://", 1)
if not uri.startswith("s3://"):
raise ValueError(f"Invalid S3 URI: {uri}")
# Remove s3:// prefix and split into bucket and key
path = uri[5:] # Remove "s3://"
parts = path.split("/", 1)
bucket = parts[0]
key = parts[1] if len(parts) > 1 else ""
return bucket, key, is_anonymous
@staticmethod
def _get_s3_client(max_pool_connections: int = 50, anonymous: bool = False):
"""Create a new S3 client instance with connection pooling.
Args:
max_pool_connections: Maximum number of connections in the pool.
Should be >= max_workers for optimal performance.
anonymous: Whether to use anonymous access to S3.
Returns:
boto3 S3 client with connection pooling configured
"""
# Configure connection pooling for better concurrent performance
config = Config(
max_pool_connections=max_pool_connections,
# Retry configuration for transient failures
retries={
"max_attempts": 3,
"mode": "adaptive", # Adapts retry behavior based on error type
},
# TCP keepalive helps with long-running connections
tcp_keepalive=True,
signature_version=UNSIGNED if anonymous else None,
)
return boto3.client("s3", config=config)
@staticmethod
def get_file(
object_uri: str, decode_as_utf_8: bool = True
) -> Optional[Union[str, bytes]]:
"""Download a file from cloud storage into memory.
Args:
object_uri: URI of the file (s3://)
decode_as_utf_8: If True, decode the file as UTF-8
Returns:
File contents as string or bytes, or None if file doesn't exist
"""
try:
bucket, key, is_anonymous = S3FileSystem._parse_s3_uri(object_uri)
s3_client = S3FileSystem._get_s3_client(anonymous=is_anonymous)
# Download file directly into memory
response = s3_client.get_object(Bucket=bucket, Key=key)
body = response["Body"].read()
if decode_as_utf_8:
return body.decode("utf-8")
return body
except Exception as e:
logger.error(f"Error reading {object_uri}: {e}")
@staticmethod
def list_subfolders(folder_uri: str) -> List[str]:
"""List the immediate subfolders in a cloud directory.
Args:
folder_uri: URI of the directory (s3://)
Returns:
List of subfolder names (without trailing slashes)
"""
try:
bucket, prefix, is_anonymous = S3FileSystem._parse_s3_uri(folder_uri)
# Ensure that the prefix has a trailing slash
if prefix and not prefix.endswith("/"):
prefix = f"{prefix}/"
s3_client = S3FileSystem._get_s3_client(anonymous=is_anonymous)
# List objects with delimiter to get only immediate subfolders
response = s3_client.list_objects_v2(
Bucket=bucket, Prefix=prefix, Delimiter="/"
)
subfolders = []
# CommonPrefixes contains the subdirectories
for common_prefix in response.get("CommonPrefixes", []):
folder_path = common_prefix["Prefix"]
# Extract the folder name from the full prefix
# Remove the parent prefix and trailing slash
folder_name = folder_path[len(prefix) :].rstrip("/")
if folder_name:
subfolders.append(folder_name)
return subfolders
except Exception as e:
logger.error(f"Error listing subfolders in {folder_uri}: {e}")
return []
@staticmethod
def _calculate_optimal_workers(
num_files: int, total_size: int, default_max: int = 100, default_min: int = 10
) -> int:
"""Calculate optimal number of workers based on file characteristics.
Args:
num_files: Number of files to download
total_size: Total size of all files in bytes
default_max: Maximum workers to cap at
default_min: Minimum workers to use
Returns:
Optimal number of workers between default_min and default_max
"""
if num_files == 0:
return default_min
avg_file_size = total_size / num_files if total_size > 0 else 0
# Strategy: More workers for smaller files, fewer for larger files
if avg_file_size < 1024 * 1024: # < 1MB (small files)
# Use more workers for many small files
workers = min(num_files, default_max)
elif avg_file_size < 10 * 1024 * 1024: # 1-10MB (medium files)
# Use moderate workers
workers = min(num_files // 2, default_max // 2)
else: # > 10MB (large files)
# Use fewer workers since each download is bandwidth-intensive
workers = min(20, num_files)
# Ensure workers is between min and max
return max(default_min, min(workers, default_max))
@staticmethod
def _download_single_file(
s3_client: BaseClient, bucket: str, key: str, local_file_path: str
) -> tuple[str, bool]:
"""Download a single file from S3.
Args:
s3_client: Shared boto3 S3 client
bucket: S3 bucket name
key: S3 object key
local_file_path: Local path where file will be saved
Returns:
Tuple of (key, success)
"""
try:
# Create parent directories if needed
os.makedirs(os.path.dirname(local_file_path), exist_ok=True)
s3_client.download_file(bucket, key, local_file_path)
logger.debug(f"Downloaded {key} to {local_file_path}")
return key, True
except Exception as e:
logger.error(f"Failed to download {key}: {e}")
return key, False
@staticmethod
def download_files(
path: str,
bucket_uri: str,
substrings_to_include: Optional[List[str]] = None,
suffixes_to_exclude: Optional[List[str]] = None,
max_workers: Optional[int] = None,
) -> None:
"""Download files from cloud storage to a local directory concurrently.
Args:
path: Local directory where files will be downloaded
bucket_uri: URI of cloud directory
substrings_to_include: Only include files containing these substrings
suffixes_to_exclude: Exclude certain files from download (e.g .safetensors)
max_workers: Maximum number of concurrent downloads. If None, automatically
calculated based on file count and sizes (min: 10, max: 100)
"""
try:
bucket, prefix, is_anonymous = S3FileSystem._parse_s3_uri(bucket_uri)
# Ensure the destination directory exists
os.makedirs(path, exist_ok=True)
# Ensure prefix has trailing slash for directory listing
if prefix and not prefix.endswith("/"):
prefix = f"{prefix}/"
# Create initial client for listing (will recreate with proper pool size later)
s3_client = S3FileSystem._get_s3_client(anonymous=is_anonymous)
# List all objects in the bucket with the given prefix
paginator = s3_client.get_paginator("list_objects_v2")
pages = paginator.paginate(Bucket=bucket, Prefix=prefix)
# Collect all files to download and track total size
files_to_download = []
total_size = 0
for page in pages:
for obj in page.get("Contents", []):
key = obj["Key"]
size = obj.get("Size", 0)
# Skip if it's a directory marker
if key.endswith("/"):
continue
# Get the relative path (remove the prefix)
relative_path = key[len(prefix) :]
# Apply include filters
if substrings_to_include:
if not any(
substr in relative_path for substr in substrings_to_include
):
continue
# Apply exclude filters
if suffixes_to_exclude:
if any(
relative_path.endswith(suffix.lstrip("*"))
for suffix in suffixes_to_exclude
):
continue
# Construct local file path
local_file_path = os.path.join(path, relative_path)
files_to_download.append((bucket, key, local_file_path))
total_size += size
# Download files concurrently
if not files_to_download:
logger.info(f"No files matching filters to download from {bucket_uri}")
return
# Dynamically calculate workers if not provided
if max_workers is None:
max_workers = S3FileSystem._calculate_optimal_workers(
num_files=len(files_to_download),
total_size=total_size,
default_max=100,
default_min=10,
)
# Create shared client with proper connection pool size for downloads
s3_client = S3FileSystem._get_s3_client(
max_pool_connections=max_workers + 10, anonymous=is_anonymous
)
failed_downloads = []
with ThreadPoolExecutor(max_workers=max_workers) as executor:
# Submit all download tasks with shared client
future_to_key = {
executor.submit(
S3FileSystem._download_single_file,
s3_client, # Pass shared client to each worker
bucket,
key,
local_path,
): key
for bucket, key, local_path in files_to_download
}
# Process completed downloads
for future in as_completed(future_to_key):
key, success = future.result()
if not success:
failed_downloads.append(key)
# Report any failures
if failed_downloads:
logger.error(
f"Failed to download {len(failed_downloads)} files: {failed_downloads[:5]}..."
)
except Exception as e:
logger.exception(f"Error downloading files from {bucket_uri}: {e}")
raise
@staticmethod
def upload_files(
local_path: str,
bucket_uri: str,
) -> None:
"""Upload files to cloud storage.
Args:
local_path: The local path of the files to upload.
bucket_uri: The bucket uri to upload the files to, must start with `s3://`.
"""
try:
bucket, prefix, is_anonymous = S3FileSystem._parse_s3_uri(bucket_uri)
# Ensure prefix has trailing slash for directory upload
if prefix and not prefix.endswith("/"):
prefix = f"{prefix}/"
s3_client = S3FileSystem._get_s3_client(anonymous=is_anonymous)
local_path_obj = Path(local_path)
# Walk through the local directory and upload each file
if local_path_obj.is_file():
# Upload a single file
file_name = local_path_obj.name
s3_key = f"{prefix}{file_name}" if prefix else file_name
s3_client.upload_file(str(local_path_obj), bucket, s3_key)
logger.debug(f"Uploaded {local_path_obj} to s3://{bucket}/{s3_key}")
elif local_path_obj.is_dir():
# Upload directory recursively
for file_path in local_path_obj.rglob("*"):
if file_path.is_file():
# Calculate relative path from local_path
relative_path = file_path.relative_to(local_path_obj)
# Construct S3 key
s3_key = f"{prefix}{relative_path.as_posix()}"
# Upload file
s3_client.upload_file(str(file_path), bucket, s3_key)
logger.debug(f"Uploaded {file_path} to s3://{bucket}/{s3_key}")
else:
raise ValueError(
f"Path {local_path} does not exist or is not a file/directory"
)
except Exception as e:
logger.exception(f"Error uploading files to {bucket_uri}: {e}")
raise
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/common/utils/cloud_filesystem/s3_filesystem.py",
"license": "Apache License 2.0",
"lines": 311,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/llm/tests/common/cloud/test_cloud_filesystem.py | """Tests for CloudFileSystem class."""
import os
import sys
import tempfile
from unittest.mock import patch
import pytest
from ray.llm._internal.common.utils.cloud_utils import CloudFileSystem
class TestCloudFileSystem:
"""Tests for the CloudFileSystem class."""
@patch("ray.llm._internal.common.utils.cloud_utils.GCSFileSystem")
def test_download_model(self, mock_gcs_filesystem):
"""Test downloading a model from cloud storage."""
# Mock GCSFileSystem.get_file to return hash content
mock_gcs_filesystem.get_file.return_value = "abcdef1234567890"
# Create temp directory for testing
with tempfile.TemporaryDirectory() as tempdir:
# Test downloading model
with patch.object(CloudFileSystem, "download_files") as mock_download:
CloudFileSystem.download_model(tempdir, "gs://bucket/model", False)
# Check that hash file was processed
assert os.path.exists(os.path.join(tempdir, "refs", "main"))
with open(os.path.join(tempdir, "refs", "main"), "r") as f:
assert f.read() == "abcdef1234567890"
# Verify get_file was called for hash file
mock_gcs_filesystem.get_file.assert_called_once_with(
"gs://bucket/model/hash", decode_as_utf_8=True
)
# Check that download_files was called correctly
mock_download.assert_called_once()
call_args = mock_download.call_args[1]
assert call_args["path"] == os.path.join(
tempdir, "snapshots", "abcdef1234567890"
)
assert call_args["bucket_uri"] == "gs://bucket/model"
assert call_args["substrings_to_include"] == []
assert call_args["suffixes_to_exclude"] is None
@patch("ray.llm._internal.common.utils.cloud_utils.GCSFileSystem")
def test_upload_model(self, mock_gcs_filesystem):
"""Test uploading a model to cloud storage."""
# Create temp directory for testing
with tempfile.TemporaryDirectory() as tempdir:
hash = "abcdef1234567890"
# Create refs/main file
os.makedirs(os.path.join(tempdir, "refs"), exist_ok=True)
model_rev_path = os.path.join(tempdir, "refs", "main")
with open(model_rev_path, "w") as f:
f.write(hash)
# Create snapshots/<hash> folder
model_asset_path = os.path.join(tempdir, "snapshots", hash)
os.makedirs(model_asset_path)
# Test uploading model
CloudFileSystem.upload_model(tempdir, "gs://bucket/model")
# Check that upload_files was called twice - once for model assets and once for hash file
assert mock_gcs_filesystem.upload_files.call_count == 2
# Verify the calls were made with correct arguments
calls = mock_gcs_filesystem.upload_files.call_args_list
call_paths = {
call[0][0] for call in calls
} # Extract local_path from each call
call_uris = {
call[0][1] for call in calls
} # Extract bucket_uri from each call
assert model_asset_path in call_paths
assert model_rev_path in call_paths
assert "gs://bucket/model" in call_uris
assert "gs://bucket/model/hash" in call_uris
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/common/cloud/test_cloud_filesystem.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/tests/common/cloud/test_mirror_config.py | """Tests for mirror config classes."""
import sys
import pytest
from ray.llm._internal.common.utils.cloud_utils import (
CloudMirrorConfig,
LoraMirrorConfig,
)
class TestCloudMirrorConfig:
"""Tests for the CloudMirrorConfig class."""
def test_valid_s3_uri(self):
"""Test valid S3 URI."""
config = CloudMirrorConfig(bucket_uri="s3://my-bucket/path")
assert config.bucket_uri == "s3://my-bucket/path"
assert config.storage_type == "s3"
def test_valid_gcs_uri(self):
"""Test valid GCS URI."""
config = CloudMirrorConfig(bucket_uri="gs://my-bucket/path")
assert config.bucket_uri == "gs://my-bucket/path"
assert config.storage_type == "gcs"
def test_valid_abfss_uri(self):
"""Test valid ABFSS URI."""
config = CloudMirrorConfig(
bucket_uri="abfss://container@account.dfs.core.windows.net/path"
)
assert (
config.bucket_uri == "abfss://container@account.dfs.core.windows.net/path"
)
assert config.storage_type == "abfss"
def test_valid_azure_uri(self):
"""Test valid Azure URI."""
config = CloudMirrorConfig(
bucket_uri="azure://container@account.blob.core.windows.net/path"
)
assert (
config.bucket_uri == "azure://container@account.blob.core.windows.net/path"
)
assert config.storage_type == "azure"
def test_none_uri(self):
"""Test None URI."""
config = CloudMirrorConfig(bucket_uri=None)
assert config.bucket_uri is None
assert config.storage_type is None
def test_invalid_uri(self):
"""Test invalid URI."""
with pytest.raises(
ValueError, match='Got invalid value "file:///tmp" for bucket_uri'
):
CloudMirrorConfig(bucket_uri="file:///tmp")
def test_extra_files(self):
"""Test extra files configuration."""
config = CloudMirrorConfig(
bucket_uri="s3://bucket/path",
extra_files=[
{"bucket_uri": "s3://bucket/file1", "destination_path": "/dest1"},
{"bucket_uri": "s3://bucket/file2", "destination_path": "/dest2"},
],
)
assert len(config.extra_files) == 2
assert config.extra_files[0].bucket_uri == "s3://bucket/file1"
assert config.extra_files[0].destination_path == "/dest1"
class TestLoraMirrorConfig:
"""Tests for the LoraMirrorConfig class."""
def test_valid_s3_config(self):
"""Test valid S3 LoRA config."""
config = LoraMirrorConfig(
lora_model_id="test-model",
bucket_uri="s3://my-bucket/lora-models",
max_total_tokens=1000,
)
assert config.lora_model_id == "test-model"
assert config.bucket_uri == "s3://my-bucket/lora-models"
assert config.bucket_name == "my-bucket"
assert config.bucket_path == "lora-models"
def test_valid_abfss_config(self):
"""Test valid ABFSS LoRA config."""
config = LoraMirrorConfig(
lora_model_id="test-model",
bucket_uri="abfss://container@account.dfs.core.windows.net/lora/models",
max_total_tokens=1000,
)
assert config.lora_model_id == "test-model"
assert (
config.bucket_uri
== "abfss://container@account.dfs.core.windows.net/lora/models"
)
assert config.bucket_name == "container"
assert config.bucket_path == "lora/models"
def test_valid_azure_config(self):
"""Test valid Azure LoRA config."""
config = LoraMirrorConfig(
lora_model_id="test-model",
bucket_uri="azure://container@account.blob.core.windows.net/lora/models",
max_total_tokens=1000,
)
assert config.lora_model_id == "test-model"
assert (
config.bucket_uri
== "azure://container@account.blob.core.windows.net/lora/models"
)
assert config.bucket_name == "container"
assert config.bucket_path == "lora/models"
def test_bucket_path_parsing(self):
"""Test bucket path parsing for different URI formats."""
# S3 with multiple path segments
config = LoraMirrorConfig(
lora_model_id="test",
bucket_uri="s3://bucket/path/to/model",
max_total_tokens=1000,
)
assert config.bucket_name == "bucket"
assert config.bucket_path == "path/to/model"
# ABFSS with multiple path segments
config = LoraMirrorConfig(
lora_model_id="test",
bucket_uri="abfss://container@account.dfs.core.windows.net/deep/nested/path",
max_total_tokens=1000,
)
assert config.bucket_name == "container"
assert config.bucket_path == "deep/nested/path"
def test_invalid_uri(self):
"""Test invalid URI in LoRA config."""
with pytest.raises(
ValueError, match='Got invalid value "file:///tmp" for bucket_uri'
):
LoraMirrorConfig(
lora_model_id="test-model",
bucket_uri="file:///tmp",
max_total_tokens=1000,
)
def test_optional_fields(self):
"""Test optional fields in LoRA config."""
config = LoraMirrorConfig(
lora_model_id="test-model",
bucket_uri="s3://bucket/path",
max_total_tokens=1000,
sync_args=["--exclude", "*.tmp"],
)
assert config.max_total_tokens == 1000
assert config.sync_args == ["--exclude", "*.tmp"]
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/common/cloud/test_mirror_config.py",
"license": "Apache License 2.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/tests/common/cloud/test_pyarrow_filesystem.py | """Tests for PyArrowFileSystem class."""
import os
import sys
import tempfile
from unittest.mock import ANY, MagicMock, patch
import pyarrow.fs as pa_fs
import pytest
from ray.llm._internal.common.utils.cloud_filesystem.pyarrow_filesystem import (
PyArrowFileSystem,
)
class TestPyArrowFileSystem:
"""Tests for the PyArrowFileSystem class."""
@patch("pyarrow.fs.S3FileSystem")
def test_get_file(self, mock_s3fs):
"""Test getting a file from cloud storage."""
# Setup mock filesystem and file
mock_fs = MagicMock()
mock_s3fs.return_value = mock_fs
# Mock file content and info
mock_file = MagicMock()
mock_file.read.return_value = b"test file content"
mock_fs.open_input_file.return_value.__enter__.return_value = mock_file
mock_fs.get_file_info.return_value.type = pa_fs.FileType.File
# Test getting file as string (default)
content = PyArrowFileSystem.get_file("s3://bucket/test.txt")
assert content == "test file content"
# Test getting file as bytes
content_bytes = PyArrowFileSystem.get_file(
"s3://bucket/test.txt", decode_as_utf_8=False
)
assert content_bytes == b"test file content"
# Test non-existent file
mock_fs.get_file_info.return_value.type = pa_fs.FileType.NotFound
assert PyArrowFileSystem.get_file("s3://bucket/nonexistent.txt") is None
@patch("pyarrow.fs.GcsFileSystem")
def test_list_subfolders(self, mock_gcsfs):
"""Test listing subfolders in cloud storage."""
# Setup mock filesystem
mock_fs = MagicMock()
mock_gcsfs.return_value = mock_fs
# Create mock file infos for directory listing
dir1 = MagicMock()
dir1.type = pa_fs.FileType.Directory
dir1.path = "bucket/parent/dir1"
dir2 = MagicMock()
dir2.type = pa_fs.FileType.Directory
dir2.path = "bucket/parent/dir2"
file1 = MagicMock()
file1.type = pa_fs.FileType.File
file1.path = "bucket/parent/file.txt"
mock_fs.get_file_info.return_value = [dir1, dir2, file1]
# Test listing subfolders
folders = PyArrowFileSystem.list_subfolders("gs://bucket/parent")
assert sorted(folders) == ["dir1", "dir2"]
@patch.object(PyArrowFileSystem, "get_fs_and_path")
def test_list_subfolders_exception_handling(self, mock_get_fs_and_path):
"""Test that list_subfolders returns empty list when get_fs_and_path raises exception."""
# Make get_fs_and_path raise an exception
mock_get_fs_and_path.side_effect = ValueError("Example exception")
# Test that list_subfolders handles the exception gracefully
folders = PyArrowFileSystem.list_subfolders("gs://bucket/parent")
assert folders == []
# Verify get_fs_and_path was called
mock_get_fs_and_path.assert_called_once_with("gs://bucket/parent/")
@patch("pyarrow.fs.copy_files")
@patch("pyarrow.fs.S3FileSystem")
def test_download_files_no_filters(self, mock_s3fs, mock_copy_files):
"""Test downloading files from cloud storage without filters."""
# Setup mock filesystem
mock_fs = MagicMock()
mock_s3fs.return_value = mock_fs
# Create temp directory for testing
with tempfile.TemporaryDirectory() as tempdir:
# Test downloading files without filters
PyArrowFileSystem.download_files(tempdir, "s3://bucket/dir")
# Verify copy_files was called with correct arguments
mock_copy_files.assert_called_once_with(
source="bucket/dir",
destination=tempdir,
source_filesystem=mock_fs,
destination_filesystem=ANY,
use_threads=True,
chunk_size=64 * 1024 * 1024,
)
@patch("pyarrow.fs.copy_files")
@patch("pyarrow.fs.S3FileSystem")
def test_download_files_with_filters(self, mock_s3fs, mock_copy_files):
"""Test downloading files from cloud storage with filters."""
# Setup mock filesystem
mock_fs = MagicMock()
mock_s3fs.return_value = mock_fs
# Create mock file infos for listing
file_info1 = MagicMock()
file_info1.type = pa_fs.FileType.File
file_info1.path = "bucket/dir/file1.txt"
file_info2 = MagicMock()
file_info2.type = pa_fs.FileType.File
file_info2.path = "bucket/dir/subdir/file2.json"
file_info3 = MagicMock()
file_info3.type = pa_fs.FileType.File
file_info3.path = "bucket/dir/file3.tmp"
dir_info = MagicMock()
dir_info.type = pa_fs.FileType.Directory
dir_info.path = "bucket/dir/subdir"
mock_fs.get_file_info.return_value = [
file_info1,
file_info2,
file_info3,
dir_info,
]
# Create temp directory for testing
with tempfile.TemporaryDirectory() as tempdir:
# Test downloading files with filters
PyArrowFileSystem.download_files(
tempdir,
"s3://bucket/dir",
substrings_to_include=["file1", "file2"],
suffixes_to_exclude=[".tmp"],
)
# Verify copy_files was called for each filtered file
assert mock_copy_files.call_count == 2
# Get all calls to copy_files
calls = mock_copy_files.call_args_list
# Verify the calls (order may vary due to threading)
expected_sources = {"bucket/dir/file1.txt", "bucket/dir/subdir/file2.json"}
expected_dests = {
os.path.join(tempdir, "file1.txt"),
os.path.join(tempdir, "subdir", "file2.json"),
}
actual_sources = {call.kwargs["source"] for call in calls}
actual_dests = {call.kwargs["destination"] for call in calls}
assert actual_sources == expected_sources
assert actual_dests == expected_dests
# Verify all calls have correct filesystem and options
for call in calls:
assert call.kwargs["source_filesystem"] == mock_fs
assert call.kwargs["destination_filesystem"] is not None
assert call.kwargs["use_threads"] is True
assert call.kwargs["chunk_size"] == 64 * 1024 * 1024
@patch("pyarrow.fs.copy_files")
@patch("pyarrow.fs.S3FileSystem")
def test_upload_files(self, mock_s3fs, mock_copy_files):
"""Test uploading files to cloud storage."""
# Setup mock filesystem
mock_fs = MagicMock()
mock_s3fs.return_value = mock_fs
# Create temp directory for testing
with tempfile.TemporaryDirectory() as tempdir:
# Test uploading files
PyArrowFileSystem.upload_files(tempdir, "s3://bucket/dir")
# Check that the files are copied
mock_copy_files.assert_called_once_with(
source=tempdir,
destination="bucket/dir",
source_filesystem=ANY,
destination_filesystem=ANY,
)
class TestFilterFiles:
"""Tests for the _filter_files method in PyArrowFileSystem."""
def test_filter_files_no_filters(self):
"""Test filtering files with no inclusion or exclusion filters."""
# Setup mock filesystem
mock_fs = MagicMock()
# Create mock file infos
file_info1 = MagicMock()
file_info1.type = pa_fs.FileType.File
file_info1.path = "bucket/model/file1.txt"
file_info2 = MagicMock()
file_info2.type = pa_fs.FileType.File
file_info2.path = "bucket/model/subdir/file2.json"
dir_info = MagicMock()
dir_info.type = pa_fs.FileType.Directory
dir_info.path = "bucket/model/subdir"
mock_fs.get_file_info.return_value = [file_info1, file_info2, dir_info]
# Test filtering with no filters
result = PyArrowFileSystem._filter_files(
fs=mock_fs, source_path="bucket/model", destination_path="/local/dest"
)
# Should include all files, exclude directories
expected = [
("bucket/model/file1.txt", "/local/dest/file1.txt"),
("bucket/model/subdir/file2.json", "/local/dest/subdir/file2.json"),
]
assert sorted(result) == sorted(expected)
# Verify filesystem was called correctly
mock_fs.get_file_info.assert_called_once()
call_args = mock_fs.get_file_info.call_args[0][0]
assert call_args.base_dir == "bucket/model"
assert call_args.recursive is True
def test_filter_files_with_inclusion_substrings(self):
"""Test filtering files with inclusion substrings."""
# Setup mock filesystem
mock_fs = MagicMock()
# Create mock file infos
file_info1 = MagicMock()
file_info1.type = pa_fs.FileType.File
file_info1.path = "bucket/model/config.json"
file_info2 = MagicMock()
file_info2.type = pa_fs.FileType.File
file_info2.path = "bucket/model/weights.bin"
file_info3 = MagicMock()
file_info3.type = pa_fs.FileType.File
file_info3.path = "bucket/model/tokenizer.json"
mock_fs.get_file_info.return_value = [file_info1, file_info2, file_info3]
# Test filtering with inclusion substrings
result = PyArrowFileSystem._filter_files(
fs=mock_fs,
source_path="bucket/model",
destination_path="/local/dest",
substrings_to_include=["config", "tokenizer"],
)
# Should only include files with "config" or "tokenizer" in path
expected = [
("bucket/model/config.json", "/local/dest/config.json"),
("bucket/model/tokenizer.json", "/local/dest/tokenizer.json"),
]
assert sorted(result) == sorted(expected)
def test_filter_files_with_exclusion_suffixes(self):
"""Test filtering files with exclusion suffixes."""
# Setup mock filesystem
mock_fs = MagicMock()
# Create mock file infos
file_info1 = MagicMock()
file_info1.type = pa_fs.FileType.File
file_info1.path = "bucket/model/model.bin"
file_info2 = MagicMock()
file_info2.type = pa_fs.FileType.File
file_info2.path = "bucket/model/config.json"
file_info3 = MagicMock()
file_info3.type = pa_fs.FileType.File
file_info3.path = "bucket/model/temp.tmp"
file_info4 = MagicMock()
file_info4.type = pa_fs.FileType.File
file_info4.path = "bucket/model/log.txt"
mock_fs.get_file_info.return_value = [
file_info1,
file_info2,
file_info3,
file_info4,
]
# Test filtering with exclusion suffixes
result = PyArrowFileSystem._filter_files(
fs=mock_fs,
source_path="bucket/model",
destination_path="/local/dest",
suffixes_to_exclude=[".tmp", ".txt"],
)
# Should exclude files ending with .tmp or .txt
expected = [
("bucket/model/model.bin", "/local/dest/model.bin"),
("bucket/model/config.json", "/local/dest/config.json"),
]
assert sorted(result) == sorted(expected)
def test_filter_files_with_both_filters(self):
"""Test filtering files with both inclusion and exclusion filters."""
# Setup mock filesystem
mock_fs = MagicMock()
# Create mock file infos
file_info1 = MagicMock()
file_info1.type = pa_fs.FileType.File
file_info1.path = "bucket/model/config.json"
file_info2 = MagicMock()
file_info2.type = pa_fs.FileType.File
file_info2.path = "bucket/model/config.tmp"
file_info3 = MagicMock()
file_info3.type = pa_fs.FileType.File
file_info3.path = "bucket/model/weights.bin"
file_info4 = MagicMock()
file_info4.type = pa_fs.FileType.File
file_info4.path = "bucket/model/tokenizer.json"
mock_fs.get_file_info.return_value = [
file_info1,
file_info2,
file_info3,
file_info4,
]
# Test filtering with both inclusion and exclusion
result = PyArrowFileSystem._filter_files(
fs=mock_fs,
source_path="bucket/model",
destination_path="/local/dest",
substrings_to_include=["config", "tokenizer"],
suffixes_to_exclude=[".tmp"],
)
# Should include files with "config" or "tokenizer" but exclude .tmp files
expected = [
("bucket/model/config.json", "/local/dest/config.json"),
("bucket/model/tokenizer.json", "/local/dest/tokenizer.json"),
]
assert sorted(result) == sorted(expected)
class TestPyArrowFileSystemAzureSupport:
"""Tests for Azure/ABFSS support in PyArrowFileSystem."""
@patch("adlfs.AzureBlobFileSystem")
@patch("azure.identity.DefaultAzureCredential")
@patch("pyarrow.fs.PyFileSystem")
@patch("pyarrow.fs.FSSpecHandler")
def test_get_fs_and_path_abfss(
self, mock_handler, mock_pyfs, mock_cred, mock_adlfs
):
"""Test getting ABFSS filesystem and path."""
mock_adlfs_instance = MagicMock()
mock_adlfs.return_value = mock_adlfs_instance
mock_pyfs_instance = MagicMock()
mock_pyfs.return_value = mock_pyfs_instance
fs, path = PyArrowFileSystem.get_fs_and_path(
"abfss://container@account.dfs.core.windows.net/path/to/file"
)
assert fs == mock_pyfs_instance
assert path == "container/path/to/file"
# Verify the adlfs filesystem was created with correct parameters
mock_adlfs.assert_called_once_with(
account_name="account", credential=mock_cred.return_value
)
mock_handler.assert_called_once_with(mock_adlfs_instance)
mock_pyfs.assert_called_once_with(mock_handler.return_value)
@patch("adlfs.AzureBlobFileSystem")
@patch("azure.identity.DefaultAzureCredential")
@patch("pyarrow.fs.PyFileSystem")
@patch("pyarrow.fs.FSSpecHandler")
def test_get_fs_and_path_azure(
self, mock_handler, mock_pyfs, mock_cred, mock_adlfs
):
"""Test getting Azure filesystem and path."""
mock_adlfs_instance = MagicMock()
mock_adlfs.return_value = mock_adlfs_instance
mock_pyfs_instance = MagicMock()
mock_pyfs.return_value = mock_pyfs_instance
fs, path = PyArrowFileSystem.get_fs_and_path(
"azure://container@account.blob.core.windows.net/path/to/file"
)
assert fs == mock_pyfs_instance
assert path == "container/path/to/file"
# Verify the adlfs filesystem was created with correct parameters
mock_adlfs.assert_called_once_with(
account_name="account", credential=mock_cred.return_value
)
def test_abfss_uri_validation(self):
"""Test ABFSS URI validation."""
# Test valid URIs
valid_uris = [
"abfss://container@account.dfs.core.windows.net/path",
"abfss://my-container@myaccount.dfs.core.windows.net/deep/nested/path",
]
for uri in valid_uris:
with patch("adlfs.AzureBlobFileSystem"), patch(
"azure.identity.DefaultAzureCredential"
), patch("pyarrow.fs.PyFileSystem"), patch("pyarrow.fs.FSSpecHandler"):
# Should not raise an exception
PyArrowFileSystem._create_abfss_filesystem(uri)
# Test invalid URIs
invalid_uris = [
"abfss://container", # Missing @account
"abfss://@account.dfs.core.windows.net/path", # Empty container
"abfss://container@account.wrong.domain/path", # Wrong domain
"abfss://container@.dfs.core.windows.net/path", # Empty account
"abfss://container@account.dfs.core.windows.net", # No path (but this is actually valid)
]
for uri in invalid_uris[:-1]: # Skip the last one as it's actually valid
with pytest.raises(ValueError):
PyArrowFileSystem._create_abfss_filesystem(uri)
def test_azure_uri_validation(self):
"""Test Azure URI validation."""
# Test valid URIs
valid_uris = [
"azure://container@account.blob.core.windows.net/path",
"azure://container@account.dfs.core.windows.net/path",
"azure://my-container@myaccount.blob.core.windows.net/deep/nested/path",
]
for uri in valid_uris:
with patch("adlfs.AzureBlobFileSystem"), patch(
"azure.identity.DefaultAzureCredential"
), patch("pyarrow.fs.PyFileSystem"), patch("pyarrow.fs.FSSpecHandler"):
# Should not raise an exception
PyArrowFileSystem._create_azure_filesystem(uri)
# Test invalid URIs
invalid_uris = [
"azure://container", # Missing @account
"azure://@account.blob.core.windows.net/path", # Empty container
"azure://container@account.wrong.domain/path", # Wrong domain
"azure://container@.blob.core.windows.net/path", # Empty account
]
for uri in invalid_uris:
with pytest.raises(ValueError):
PyArrowFileSystem._create_azure_filesystem(uri)
def test_abfss_import_error(self):
"""Test ImportError when adlfs is not available."""
with patch(
"builtins.__import__", side_effect=ImportError("No module named 'adlfs'")
):
with pytest.raises(
ImportError, match="You must `pip install adlfs azure-identity`"
):
PyArrowFileSystem._create_abfss_filesystem(
"abfss://container@account.dfs.core.windows.net/path"
)
def test_azure_import_error(self):
"""Test ImportError when adlfs is not available for Azure."""
with patch(
"builtins.__import__", side_effect=ImportError("No module named 'adlfs'")
):
with pytest.raises(
ImportError, match="You must `pip install adlfs azure-identity`"
):
PyArrowFileSystem._create_azure_filesystem(
"azure://container@account.blob.core.windows.net/path"
)
@patch("adlfs.AzureBlobFileSystem")
@patch("azure.identity.DefaultAzureCredential")
@patch("pyarrow.fs.PyFileSystem")
@patch("pyarrow.fs.FSSpecHandler")
def test_abfss_anonymous_access_ignored(
self, mock_handler, mock_pyfs, mock_cred, mock_adlfs
):
"""Test that anonymous access pattern is ignored for ABFSS URIs."""
mock_adlfs_instance = MagicMock()
mock_adlfs.return_value = mock_adlfs_instance
mock_pyfs_instance = MagicMock()
mock_pyfs.return_value = mock_pyfs_instance
# ABFSS URI with @ symbol should not trigger anonymous access logic
fs, path = PyArrowFileSystem.get_fs_and_path(
"abfss://container@account.dfs.core.windows.net/path"
)
assert fs == mock_pyfs_instance
assert path == "container/path"
# Verify that DefaultAzureCredential was used, not anonymous access
mock_cred.assert_called_once()
mock_adlfs.assert_called_once_with(
account_name="account", credential=mock_cred.return_value
)
@patch("adlfs.AzureBlobFileSystem")
@patch("azure.identity.DefaultAzureCredential")
@patch("pyarrow.fs.PyFileSystem")
@patch("pyarrow.fs.FSSpecHandler")
def test_get_file_abfss(self, mock_handler, mock_pyfs, mock_cred, mock_adlfs):
"""Test getting a file from ABFSS storage."""
# Setup mock filesystem and file
mock_adlfs_instance = MagicMock()
mock_adlfs.return_value = mock_adlfs_instance
mock_fs = MagicMock()
mock_pyfs.return_value = mock_fs
# Mock file content and info
mock_file = MagicMock()
mock_file.read.return_value = b"test abfss content"
mock_fs.open_input_file.return_value.__enter__.return_value = mock_file
mock_fs.get_file_info.return_value.type = pa_fs.FileType.File
# Test getting file as string (default)
content = PyArrowFileSystem.get_file(
"abfss://container@account.dfs.core.windows.net/test.txt"
)
assert content == "test abfss content"
# Verify the correct path was used
mock_fs.get_file_info.assert_called_with("container/test.txt")
mock_fs.open_input_file.assert_called_with("container/test.txt")
@patch("adlfs.AzureBlobFileSystem")
@patch("azure.identity.DefaultAzureCredential")
@patch("pyarrow.fs.PyFileSystem")
@patch("pyarrow.fs.FSSpecHandler")
def test_list_subfolders_abfss(
self, mock_handler, mock_pyfs, mock_cred, mock_adlfs
):
"""Test listing subfolders in ABFSS storage."""
# Setup mock filesystem
mock_adlfs_instance = MagicMock()
mock_adlfs.return_value = mock_adlfs_instance
mock_fs = MagicMock()
mock_pyfs.return_value = mock_fs
# Create mock file infos for directory listing
dir1 = MagicMock()
dir1.type = pa_fs.FileType.Directory
dir1.path = "container/parent/subdir1"
dir2 = MagicMock()
dir2.type = pa_fs.FileType.Directory
dir2.path = "container/parent/subdir2"
file1 = MagicMock()
file1.type = pa_fs.FileType.File
file1.path = "container/parent/file.txt"
mock_fs.get_file_info.return_value = [dir1, dir2, file1]
# Test listing subfolders
folders = PyArrowFileSystem.list_subfolders(
"abfss://container@account.dfs.core.windows.net/parent"
)
assert sorted(folders) == ["subdir1", "subdir2"]
# Verify the correct path was used
mock_fs.get_file_info.assert_called_once()
call_args = mock_fs.get_file_info.call_args[0][0]
assert call_args.base_dir == "container/parent/"
assert call_args.recursive is False
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/common/cloud/test_pyarrow_filesystem.py",
"license": "Apache License 2.0",
"lines": 489,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/tests/common/cloud/test_s3_filesystem.py | """Tests for S3FileSystem class."""
import json
import os
import sys
import tempfile
from unittest.mock import MagicMock, patch
import pytest
from botocore.exceptions import ClientError
from ray.llm._internal.common.utils.cloud_filesystem.s3_filesystem import (
S3FileSystem,
)
class TestS3FileSystem:
"""Tests for the S3FileSystem class."""
@patch("boto3.client")
@pytest.mark.parametrize(
"decode_as_utf_8,file_content,expected_content",
[
(True, b"test file content", "test file content"),
(False, b"test file content", b"test file content"),
],
)
def test_get_file(
self,
mock_boto_client,
decode_as_utf_8,
file_content,
expected_content,
):
"""Test getting a file from S3 as string or bytes."""
# Setup mock S3 client
mock_s3_client = MagicMock()
mock_boto_client.return_value = mock_s3_client
# Mock get_object response
mock_body = MagicMock()
mock_body.read.return_value = file_content
mock_s3_client.get_object.return_value = {
"Body": mock_body,
"ContentLength": len(file_content),
}
# Test getting file
content = S3FileSystem.get_file(
"s3://bucket/test.txt", decode_as_utf_8=decode_as_utf_8
)
assert content == expected_content
mock_s3_client.get_object.assert_called_once_with(
Bucket="bucket", Key="test.txt"
)
@patch("boto3.client")
def test_get_file_not_found(self, mock_boto_client):
"""Test getting a non-existent file from S3."""
# Setup mock S3 client
mock_s3_client = MagicMock()
mock_boto_client.return_value = mock_s3_client
# Simulate NoSuchKey error
mock_s3_client.get_object.side_effect = ClientError(
{
"Error": {
"Code": "NoSuchKey",
"Message": "The specified key does not exist.",
}
},
"GetObject",
)
assert S3FileSystem.get_file("s3://bucket/nonexistent.txt") is None
@patch("boto3.client")
def test_get_file_anonymous(self, mock_boto_client):
"""Test getting a file from S3 with anonymous access."""
# Setup mock S3 client
mock_s3_client = MagicMock()
mock_boto_client.return_value = mock_s3_client
# Mock get_object response
mock_body = MagicMock()
mock_body.read.return_value = b"anonymous content"
mock_s3_client.get_object.return_value = {
"Body": mock_body,
}
# Test getting file with anonymous URI
content = S3FileSystem.get_file("s3://anonymous@bucket/test.txt")
assert content == "anonymous content"
# Verify anonymous config was used (UNSIGNED signature)
assert mock_boto_client.called
@patch("boto3.client")
@pytest.mark.parametrize(
"uri,expected_prefix",
[
("s3://bucket/parent", "parent/"),
("s3://bucket/parent/", "parent/"),
],
)
def test_list_subfolders(self, mock_boto_client, uri, expected_prefix):
"""Test listing subfolders in S3."""
# Setup mock S3 client
mock_s3_client = MagicMock()
mock_boto_client.return_value = mock_s3_client
# Mock list_objects_v2 response
mock_s3_client.list_objects_v2.return_value = {
"CommonPrefixes": [
{"Prefix": f"{expected_prefix}folder1/"},
{"Prefix": f"{expected_prefix}folder2/"},
]
}
folders = S3FileSystem.list_subfolders(uri)
assert sorted(folders) == ["folder1", "folder2"]
mock_s3_client.list_objects_v2.assert_called_once_with(
Bucket="bucket", Prefix=expected_prefix, Delimiter="/"
)
@patch("boto3.client")
def test_list_subfolders_exception(self, mock_boto_client):
"""Test listing subfolders when operation fails."""
# Setup mock S3 client
mock_s3_client = MagicMock()
mock_boto_client.return_value = mock_s3_client
mock_s3_client.list_objects_v2.side_effect = Exception("Network error")
assert S3FileSystem.list_subfolders("s3://bucket/parent") == []
def test_list_subfolders_invalid_uri(self):
"""Test listing subfolders with invalid URI."""
# list_subfolders catches all exceptions and returns empty list
result = S3FileSystem.list_subfolders("gs://bucket/parent")
assert result == []
@patch("boto3.client")
@pytest.mark.parametrize(
"uri,expected_prefix",
[
("s3://bucket/dir", "dir/"),
("s3://bucket/dir/", "dir/"),
],
)
def test_download_files(self, mock_boto_client, uri, expected_prefix):
"""Test downloading files from S3."""
# Setup mock S3 client
mock_s3_client = MagicMock()
mock_boto_client.return_value = mock_s3_client
# Mock paginator
mock_paginator = MagicMock()
mock_s3_client.get_paginator.return_value = mock_paginator
mock_paginator.paginate.return_value = [
{
"Contents": [
{"Key": f"{expected_prefix}file1.txt", "Size": 100},
{"Key": f"{expected_prefix}file2.txt", "Size": 200},
]
}
]
# Mock download_file to do nothing
mock_s3_client.download_file = MagicMock()
with tempfile.TemporaryDirectory() as tempdir:
S3FileSystem.download_files(tempdir, uri, max_workers=2)
# Verify paginator was called correctly
mock_s3_client.get_paginator.assert_called_with("list_objects_v2")
mock_paginator.paginate.assert_called_once_with(
Bucket="bucket", Prefix=expected_prefix
)
# Verify files were downloaded
assert mock_s3_client.download_file.call_count == 2
@patch("boto3.client")
def test_download_files_with_filters(self, mock_boto_client):
"""Test downloading files with inclusion and exclusion filters."""
# Setup mock S3 client
mock_s3_client = MagicMock()
mock_boto_client.return_value = mock_s3_client
# Mock paginator with various files
mock_paginator = MagicMock()
mock_s3_client.get_paginator.return_value = mock_paginator
mock_paginator.paginate.return_value = [
{
"Contents": [
{"Key": "dir/config.json", "Size": 100},
{"Key": "dir/tokenizer.json", "Size": 200},
{"Key": "dir/data.tmp", "Size": 300},
{"Key": "dir/readme.txt", "Size": 400},
{"Key": "dir/other.bin", "Size": 500},
]
}
]
# Mock download_file to do nothing
mock_s3_client.download_file = MagicMock()
with tempfile.TemporaryDirectory() as tempdir:
S3FileSystem.download_files(
tempdir,
"s3://bucket/dir",
substrings_to_include=["config", "tokenizer"],
suffixes_to_exclude=[".tmp", ".txt"],
max_workers=2,
)
# Should only download config.json and tokenizer.json
# (included by substring, not excluded by suffix)
assert mock_s3_client.download_file.call_count == 2
# Get the keys that were downloaded
downloaded_keys = [
call[0][1] for call in mock_s3_client.download_file.call_args_list
]
assert "dir/config.json" in downloaded_keys
assert "dir/tokenizer.json" in downloaded_keys
@patch("boto3.client")
def test_download_files_no_matching_files(self, mock_boto_client):
"""Test downloading when no files match filters."""
# Setup mock S3 client
mock_s3_client = MagicMock()
mock_boto_client.return_value = mock_s3_client
# Mock paginator with files that won't match
mock_paginator = MagicMock()
mock_s3_client.get_paginator.return_value = mock_paginator
mock_paginator.paginate.return_value = [
{
"Contents": [
{"Key": "dir/file1.txt", "Size": 100},
]
}
]
with tempfile.TemporaryDirectory() as tempdir:
# This should not raise, just return without downloading
S3FileSystem.download_files(
tempdir,
"s3://bucket/dir",
substrings_to_include=["nonexistent"],
max_workers=2,
)
# Verify no files were downloaded
mock_s3_client.download_file.assert_not_called()
@patch("boto3.client")
@patch("ray.llm._internal.common.utils.cloud_filesystem.s3_filesystem.logger")
def test_download_files_concurrent_failure(self, mock_logger, mock_boto_client):
"""Test downloading files when some downloads fail."""
# Setup mock S3 client
mock_s3_client = MagicMock()
mock_boto_client.return_value = mock_s3_client
# Mock paginator
mock_paginator = MagicMock()
mock_s3_client.get_paginator.return_value = mock_paginator
mock_paginator.paginate.return_value = [
{
"Contents": [
{"Key": "dir/file1.txt", "Size": 100},
{"Key": "dir/file2.txt", "Size": 200},
]
}
]
# Make download_file fail
mock_s3_client.download_file.side_effect = Exception("Download failed")
with tempfile.TemporaryDirectory() as tempdir:
# Should complete without raising, but log errors
S3FileSystem.download_files(tempdir, "s3://bucket/dir", max_workers=2)
# Verify error was logged for failed downloads
mock_logger.error.assert_called()
error_call = mock_logger.error.call_args_list[0][0][0]
assert "Failed to download" in error_call
def test_download_files_invalid_uri(self):
"""Test downloading files with invalid URI."""
with tempfile.TemporaryDirectory() as tempdir:
with pytest.raises(ValueError, match="Invalid S3 URI"):
S3FileSystem.download_files(tempdir, "gs://bucket/dir")
@patch("boto3.client")
@pytest.mark.parametrize(
"uri,expected_prefix",
[
("s3://bucket/dir", "dir/"),
("s3://bucket/dir/", "dir/"),
],
)
def test_upload_files_directory(self, mock_boto_client, uri, expected_prefix):
"""Test uploading a directory to S3."""
# Setup mock S3 client
mock_s3_client = MagicMock()
mock_boto_client.return_value = mock_s3_client
mock_s3_client.upload_file = MagicMock()
with tempfile.TemporaryDirectory() as tempdir:
# Create some test files
test_file1 = os.path.join(tempdir, "file1.txt")
test_file2 = os.path.join(tempdir, "subdir", "file2.txt")
os.makedirs(os.path.dirname(test_file2), exist_ok=True)
with open(test_file1, "w") as f:
f.write("test1")
with open(test_file2, "w") as f:
f.write("test2")
S3FileSystem.upload_files(tempdir, uri)
# Verify files were uploaded
assert mock_s3_client.upload_file.call_count == 2
# Check the S3 keys that were used
uploaded_keys = [
call[0][2] for call in mock_s3_client.upload_file.call_args_list
]
assert f"{expected_prefix}file1.txt" in uploaded_keys
assert f"{expected_prefix}subdir/file2.txt" in uploaded_keys
@patch("boto3.client")
def test_upload_files_single_file(self, mock_boto_client):
"""Test uploading a single file to S3."""
# Setup mock S3 client
mock_s3_client = MagicMock()
mock_boto_client.return_value = mock_s3_client
mock_s3_client.upload_file = MagicMock()
with tempfile.TemporaryDirectory() as tempdir:
# Create a test file
test_file = os.path.join(tempdir, "single.txt")
with open(test_file, "w") as f:
f.write("test content")
S3FileSystem.upload_files(test_file, "s3://bucket/dir/")
# Verify single file was uploaded
mock_s3_client.upload_file.assert_called_once()
call_args = mock_s3_client.upload_file.call_args[0]
assert call_args[2] == "dir/single.txt"
@patch("boto3.client")
def test_upload_files_exception(self, mock_boto_client):
"""Test uploading files when operation fails."""
# Setup mock S3 client
mock_s3_client = MagicMock()
mock_boto_client.return_value = mock_s3_client
mock_s3_client.upload_file.side_effect = Exception("Network error")
with tempfile.TemporaryDirectory() as tempdir:
# Create a test file
test_file = os.path.join(tempdir, "test.txt")
with open(test_file, "w") as f:
f.write("test")
with pytest.raises(Exception, match="Network error"):
S3FileSystem.upload_files(tempdir, "s3://bucket/dir")
def test_upload_files_invalid_uri(self):
"""Test uploading files with invalid URI."""
with tempfile.TemporaryDirectory() as tempdir:
with pytest.raises(ValueError, match="Invalid S3 URI"):
S3FileSystem.upload_files(tempdir, "gs://bucket/dir")
def test_upload_files_nonexistent_path(self):
"""Test uploading from a path that doesn't exist."""
with pytest.raises(ValueError, match="does not exist"):
S3FileSystem.upload_files("/nonexistent/path", "s3://bucket/dir")
def test_parse_s3_uri(self):
"""Test parsing S3 URIs."""
# Standard URI
bucket, key, is_anon = S3FileSystem._parse_s3_uri(
"s3://bucket/path/to/file.txt"
)
assert bucket == "bucket"
assert key == "path/to/file.txt"
assert is_anon is False
# Anonymous URI
bucket, key, is_anon = S3FileSystem._parse_s3_uri(
"s3://anonymous@bucket/file.txt"
)
assert bucket == "bucket"
assert key == "file.txt"
assert is_anon is True
# Bucket only
bucket, key, is_anon = S3FileSystem._parse_s3_uri("s3://bucket")
assert bucket == "bucket"
assert key == ""
assert is_anon is False
def test_calculate_optimal_workers(self):
"""Test worker calculation based on file characteristics."""
# Many small files (< 1MB)
workers = S3FileSystem._calculate_optimal_workers(
num_files=50, total_size=50 * 500 * 1024 # 50 files * 500KB each
)
assert workers == 50 # Should use many workers for small files
# Medium files (1-10MB)
workers = S3FileSystem._calculate_optimal_workers(
num_files=50, total_size=50 * 5 * 1024 * 1024 # 50 files * 5MB each
)
assert workers == 25 # Should use moderate workers
# Large files (> 10MB)
workers = S3FileSystem._calculate_optimal_workers(
num_files=50, total_size=50 * 50 * 1024 * 1024 # 50 files * 50MB each
)
assert workers == 20 # Should cap at 20 for large files
# Zero files
workers = S3FileSystem._calculate_optimal_workers(num_files=0, total_size=0)
assert workers == 10 # Should return default_min
class TestS3FileSystemIntegration:
"""Integration tests for S3FileSystem (requires actual S3 access)."""
def test_list_subfolders_real_s3(self):
"""Test listing subfolders from real S3 bucket."""
# Test listing subfolders in the parent directory which has actual subfolders
folders = S3FileSystem.list_subfolders(
"s3://anonymous@air-example-data/rayllm-ossci/"
)
# Verify we get expected subfolders
assert isinstance(folders, list)
assert "meta-Llama-3.2-1B-Instruct" in folders
assert len(folders) > 0
def test_get_file_real_s3(self):
"""Test getting a file from real S3 bucket."""
# Test getting a small config file
content = S3FileSystem.get_file(
"s3://anonymous@air-example-data/rayllm-ossci/meta-Llama-3.2-1B-Instruct/config.json"
)
assert content is not None
assert isinstance(content, str)
# Verify it's valid JSON
config = json.loads(content)
assert "model_type" in config or "vocab_size" in config
def test_download_files_with_exclusion(self):
"""Test downloading files with exclusion filter (exclude safetensors files)."""
with tempfile.TemporaryDirectory() as tempdir:
# Download files excluding safetensors
S3FileSystem.download_files(
tempdir,
"s3://anonymous@air-example-data/rayllm-ossci/meta-Llama-3.2-1B-Instruct/",
suffixes_to_exclude=[".safetensors"],
)
# Get list of downloaded files
downloaded_files = set()
for root, dirs, files in os.walk(tempdir):
for file in files:
rel_path = os.path.relpath(os.path.join(root, file), tempdir)
downloaded_files.add(rel_path)
# Verify safetensors file is excluded
assert (
"model.safetensors" not in downloaded_files
), "safetensors file should be excluded"
# Verify other files are downloaded
assert "config.json" in downloaded_files
assert "tokenizer.json" in downloaded_files
assert len(downloaded_files) > 0
def test_download_files_with_inclusion(self):
"""Test downloading files with inclusion filter (include only .json files)."""
with tempfile.TemporaryDirectory() as tempdir:
# Download only .json files
S3FileSystem.download_files(
tempdir,
"s3://anonymous@air-example-data/rayllm-ossci/meta-Llama-3.2-1B-Instruct/",
substrings_to_include=[".json"],
)
# Get list of downloaded files
downloaded_files = set()
for root, dirs, files in os.walk(tempdir):
for file in files:
rel_path = os.path.relpath(os.path.join(root, file), tempdir)
downloaded_files.add(rel_path)
# Verify only .json files are downloaded
expected_json_files = {
"config.json",
"generation_config.json",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
}
assert (
downloaded_files == expected_json_files
), f"Expected {expected_json_files}, got {downloaded_files}"
# Verify non-json files are excluded
assert "model.safetensors" not in downloaded_files
assert "README.md" not in downloaded_files
assert "LICENSE.txt" not in downloaded_files
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/common/cloud/test_s3_filesystem.py",
"license": "Apache License 2.0",
"lines": 442,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/tests/common/cloud/test_utils.py | """Utility tests for cloud functionality."""
import asyncio
import sys
import pytest
from ray.llm._internal.common.utils.cloud_utils import (
CloudObjectCache,
is_remote_path,
remote_object_cache,
)
class MockSyncFetcher:
def __init__(self):
self.call_count = 0
self.calls = []
def __call__(self, key: str):
self.call_count += 1
self.calls.append(key)
if key == "missing":
return -1
return f"value-{key}"
class MockAsyncFetcher:
def __init__(self):
self.call_count = 0
self.calls = []
async def __call__(self, key: str):
self.call_count += 1
self.calls.append(key)
if key == "missing":
return -1
return f"value-{key}"
class TestCloudObjectCache:
"""Tests for the CloudObjectCache class."""
def test_sync_cache_basic(self):
"""Test basic synchronous cache functionality."""
fetcher = MockSyncFetcher()
cache = CloudObjectCache(max_size=2, fetch_fn=fetcher)
# Test fetching a value (should be a miss)
assert cache.get("key1") == "value-key1"
assert fetcher.call_count == 1
assert fetcher.calls == ["key1"]
# Test cache hit (should not call fetcher)
assert cache.get("key1") == "value-key1"
assert fetcher.call_count == 1 # Count should not increase
assert fetcher.calls == ["key1"] # Calls should not change
# Test cache size limit
assert cache.get("key2") == "value-key2" # Miss, should call fetcher
assert fetcher.call_count == 2
assert fetcher.calls == ["key1", "key2"]
assert (
cache.get("key3") == "value-key3"
) # Miss, should call fetcher and evict key1
assert fetcher.call_count == 3
assert fetcher.calls == ["key1", "key2", "key3"]
assert len(cache) == 2
# Verify key1 was evicted by checking if it's fetched again
assert cache.get("key1") == "value-key1" # Miss, should call fetcher
assert fetcher.call_count == 4
assert fetcher.calls == ["key1", "key2", "key3", "key1"]
# Verify final cache state
assert len(cache) == 2
assert "key3" in cache._cache # key3 should still be in cache
assert "key1" in cache._cache # key1 should be back in cache
assert "key2" not in cache._cache # key2 should have been evicted
@pytest.mark.asyncio
async def test_async_cache_missing_object_expiration(self):
"""Test cache expiration for missing objects in async mode."""
fetcher = MockAsyncFetcher()
cache = CloudObjectCache(
max_size=2,
fetch_fn=fetcher,
missing_expire_seconds=1, # 1 second to expire missing object
exists_expire_seconds=3, # 3 seconds to expire existing object
missing_object_value=-1,
)
# Test missing object expiration
assert await cache.aget("missing") is -1 # First fetch
assert fetcher.call_count == 1
assert fetcher.calls == ["missing"]
# Should still be cached
assert await cache.aget("missing") is -1 # Cache hit
assert fetcher.call_count == 1 # No new fetch
assert fetcher.calls == ["missing"]
await asyncio.sleep(1.5) # Wait for missing object to expire
assert await cache.aget("missing") is -1 # Should fetch again after expiration
assert fetcher.call_count == 2 # New fetch
assert fetcher.calls == ["missing", "missing"]
@pytest.mark.asyncio
async def test_async_cache_existing_object_expiration(self):
"""Test expiration of existing objects in async mode."""
fetcher = MockAsyncFetcher()
cache = CloudObjectCache(
max_size=2,
fetch_fn=fetcher,
missing_expire_seconds=1, # 1 second to expire missing object
exists_expire_seconds=3, # 3 seconds to expire existing object
missing_object_value=-1,
)
# Test existing object expiration
assert await cache.aget("key1") == "value-key1" # First fetch
assert fetcher.call_count == 1
assert fetcher.calls == ["key1"]
# Should still be cached (not expired)
assert await cache.aget("key1") == "value-key1" # Cache hit
assert fetcher.call_count == 1 # No new fetch
assert fetcher.calls == ["key1"]
await asyncio.sleep(1.5) # Not expired yet (exists_expire_seconds=3)
assert await cache.aget("key1") == "value-key1" # Should still hit cache
assert fetcher.call_count == 1 # No new fetch
assert fetcher.calls == ["key1"] # No change in calls
await asyncio.sleep(2) # Now expired (total > 2 seconds)
assert await cache.aget("key1") == "value-key1" # Should fetch again
assert fetcher.call_count == 2 # New fetch
assert fetcher.calls == ["key1", "key1"]
# Verify final cache state
assert len(cache) == 1
class TestRemoteObjectCacheDecorator:
"""Tests for the remote_object_cache decorator."""
@pytest.mark.asyncio
async def test_basic_functionality(self):
"""Test basic remote_object_cache decorator functionality."""
call_count = 0
MISSING = object()
@remote_object_cache(
max_size=2,
missing_expire_seconds=1,
exists_expire_seconds=3,
missing_object_value=MISSING,
)
async def fetch(key: str):
nonlocal call_count
call_count += 1
if key == "missing":
return MISSING
return f"value-{key}"
# Test cache hit
assert await fetch("key1") == "value-key1"
assert call_count == 1
assert await fetch("key1") == "value-key1" # Should hit cache
assert call_count == 1 # Count should not increase
# Test cache size limit
assert await fetch("key2") == "value-key2"
assert call_count == 2
assert await fetch("key3") == "value-key3" # Should evict key1
assert call_count == 3
# Verify key1 was evicted
assert await fetch("key1") == "value-key1"
assert call_count == 4
@pytest.mark.asyncio
async def test_expiration(self):
"""Test cache expiration for both missing and existing objects."""
call_count = 0
MISSING = object()
@remote_object_cache(
max_size=2,
missing_expire_seconds=1, # 1 second to expire missing object
exists_expire_seconds=3, # 3 seconds to expire existing object
missing_object_value=MISSING,
)
async def fetch(key: str):
nonlocal call_count
call_count += 1
if key == "missing":
return MISSING
return f"value-{key}"
# Test missing object expiration
assert await fetch("missing") is MISSING
assert call_count == 1
assert await fetch("missing") is MISSING # Should hit cache
assert call_count == 1
await asyncio.sleep(1.5) # Wait for missing object to expire
assert await fetch("missing") is MISSING # Should fetch again
assert call_count == 2
# Test existing object expiration
assert await fetch("key1") == "value-key1"
assert call_count == 3
assert await fetch("key1") == "value-key1" # Should hit cache
assert call_count == 3
await asyncio.sleep(1.5) # Not expired yet
assert await fetch("key1") == "value-key1" # Should still hit cache
assert call_count == 3
await asyncio.sleep(2) # Now expired (total > 3 seconds)
assert await fetch("key1") == "value-key1" # Should fetch again
assert call_count == 4
@pytest.mark.asyncio
async def test_error_handling(self):
"""Test error handling in remote_object_cache decorator."""
call_count = 0
@remote_object_cache(max_size=2)
async def fetch(key: str):
nonlocal call_count
call_count += 1
if key == "error":
raise ValueError("Test error")
return f"value-{key}"
# Test successful case
assert await fetch("key1") == "value-key1"
assert call_count == 1
# Test error case
with pytest.raises(ValueError, match="Test error"):
await fetch("error")
assert call_count == 2
# Verify error wasn't cached
with pytest.raises(ValueError, match="Test error"):
await fetch("error")
assert call_count == 3
@pytest.mark.asyncio
async def test_concurrent_access(self):
"""Test concurrent access to cached function."""
call_count = 0
DELAY = 0.1
@remote_object_cache(max_size=2)
async def slow_fetch(key: str):
nonlocal call_count
call_count += 1
await asyncio.sleep(DELAY) # Simulate slow operation
return f"value-{key}"
# Launch multiple concurrent calls
tasks = [slow_fetch("key1") for _ in range(5)]
results = await asyncio.gather(*tasks)
# All results should be the same
assert all(r == "value-key1" for r in results)
# Should only call once despite multiple concurrent requests
assert call_count == 1
class TestIsRemotePath:
"""Tests for the is_remote_path utility function."""
def test_s3_paths(self):
"""Test S3 path detection."""
assert is_remote_path("s3://bucket/path") is True
assert is_remote_path("s3://bucket") is True
assert is_remote_path("s3://anonymous@bucket/path") is True
def test_gcs_paths(self):
"""Test GCS path detection."""
assert is_remote_path("gs://bucket/path") is True
assert is_remote_path("gs://bucket") is True
assert is_remote_path("gs://anonymous@bucket/path") is True
def test_abfss_paths(self):
"""Test ABFSS path detection."""
assert (
is_remote_path("abfss://container@account.dfs.core.windows.net/path")
is True
)
assert is_remote_path("abfss://container@account.dfs.core.windows.net") is True
def test_azure_paths(self):
"""Test Azure path detection."""
assert (
is_remote_path("azure://container@account.blob.core.windows.net/path")
is True
)
assert (
is_remote_path("azure://container@account.dfs.core.windows.net/path")
is True
)
def test_local_paths(self):
"""Test local path detection."""
assert is_remote_path("/local/path") is False
assert is_remote_path("./relative/path") is False
assert is_remote_path("file:///local/path") is False
assert is_remote_path("http://example.com") is False
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/common/cloud/test_utils.py",
"license": "Apache License 2.0",
"lines": 259,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/_internal/batch/processor/utils.py | """Shared utility functions for processor builders."""
from typing import Any, Dict, Optional, Tuple, Union
from ray.data import ActorPoolStrategy
from ray.llm._internal.batch.stages.configs import _StageConfigBase
def get_value_or_fallback(value: Any, fallback: Any) -> Any:
"""Return value if not None, otherwise return fallback."""
return value if value is not None else fallback
def extract_resource_kwargs(
runtime_env: Optional[Dict[str, Any]],
num_cpus: Optional[float],
memory: Optional[float],
) -> Dict[str, Any]:
"""Extract non-None resource kwargs for map_batches."""
kwargs = {}
if runtime_env is not None:
kwargs["runtime_env"] = runtime_env
if num_cpus is not None:
kwargs["num_cpus"] = num_cpus
if memory is not None:
kwargs["memory"] = memory
return kwargs
def normalize_cpu_stage_concurrency(
concurrency: Optional[Union[int, Tuple[int, int]]]
) -> Dict[str, int]:
"""Normalize concurrency for CPU stages (int -> (1, int) for autoscaling)."""
if concurrency is None:
return {"size": 1} # Default to minimal autoscaling pool
if isinstance(concurrency, int):
return {"min_size": 1, "max_size": concurrency}
return {
"min_size": concurrency[0],
"max_size": concurrency[1],
}
def build_cpu_stage_map_kwargs(
stage_cfg: _StageConfigBase,
) -> Dict[str, Any]:
"""Build map_batches_kwargs for CPU stages."""
concurrency = normalize_cpu_stage_concurrency(stage_cfg.concurrency)
return dict(
zero_copy_batch=True,
compute=ActorPoolStrategy(**concurrency),
batch_size=stage_cfg.batch_size,
**extract_resource_kwargs(
stage_cfg.runtime_env,
stage_cfg.num_cpus,
stage_cfg.memory,
),
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/batch/processor/utils.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/llm/_internal/batch/stages/configs.py | from typing import Any, Dict, Literal, Optional, Tuple, Type, TypeVar, Union
from pydantic import Field
from ray.llm._internal.common.base_pydantic import BaseModelExtended
T = TypeVar("T", bound="_StageConfigBase")
class _StageConfigBase(BaseModelExtended):
enabled: bool = Field(default=True, description="Whether this stage is enabled.")
# Optional overrides; processor-level defaults still apply
batch_size: Optional[int] = Field(default=None, description="Rows per batch.")
concurrency: Optional[Union[int, Tuple[int, int]]] = Field(
default=None, description="Actor pool size or range for this stage."
)
runtime_env: Optional[Dict[str, Any]] = Field(
default=None, description="Optional runtime env for this stage."
)
num_cpus: Optional[float] = Field(
default=None,
description="Number of CPUs to reserve for each map worker in this stage.",
)
memory: Optional[float] = Field(
default=None,
description="Heap memory in bytes to reserve for each map worker in this stage.",
)
class ChatTemplateStageConfig(_StageConfigBase):
model_source: Optional[str] = Field(
default=None, description="Model source/identifier for this stage."
)
chat_template: Optional[str] = Field(default=None)
chat_template_kwargs: Optional[Dict[str, Any]] = Field(default=None)
class TokenizerStageConfig(_StageConfigBase):
model_source: Optional[str] = Field(
default=None, description="Model source/identifier for this stage."
)
class DetokenizeStageConfig(_StageConfigBase):
model_source: Optional[str] = Field(
default=None, description="Model source/identifier for this stage."
)
class PrepareImageStageConfig(_StageConfigBase):
pass
class PrepareMultimodalStageConfig(_StageConfigBase):
model_config_kwargs: Optional[Dict[str, Any]] = Field(
default=None,
description="Optional kwargs to pass to the model config. See available model config "
"kwargs at https://docs.vllm.ai/en/latest/api/vllm/config/#vllm.config.ModelConfig",
)
chat_template_content_format: Optional[Literal["string", "openai"]] = Field(
default="string",
description="The content format to use for the chat template. "
"This is used to format the chat template content according to a specific model.",
)
apply_sys_msg_formatting: Optional[bool] = Field(
default=False,
description="Whether to apply formatting system messages.",
)
class HttpRequestStageConfig(_StageConfigBase):
pass
def resolve_stage_config(
stage_cfg_value: Union[bool, Dict[str, Any], _StageConfigBase],
stage_config_cls: Type[T],
processor_defaults: Optional[Dict[str, Any]] = None,
) -> T:
"""Resolve a stage config value (bool | dict | StageConfig) into a typed StageConfig.
Args:
stage_cfg_value: The stage config value (bool, dict, or typed StageConfig).
stage_config_cls: The StageConfig class to instantiate.
processor_defaults: Optional dict of processor-level defaults to merge in.
Expected keys: 'batch_size', 'concurrency', 'runtime_env', 'model_source'.
Returns:
Resolved StageConfig instance with defaults merged.
"""
processor_defaults = processor_defaults or {}
# If already a typed config, create a copy to avoid mutating the input
if isinstance(stage_cfg_value, stage_config_cls):
resolved = stage_config_cls.model_validate(stage_cfg_value.model_dump())
# If bool, create minimal config with enabled flag
elif isinstance(stage_cfg_value, bool):
resolved = stage_config_cls(enabled=stage_cfg_value)
# If dict, parse it into the config class
elif isinstance(stage_cfg_value, dict):
resolved = stage_config_cls(**stage_cfg_value)
else:
raise TypeError(
f"Unsupported type for stage config: {type(stage_cfg_value).__name__}. "
f"Expected bool, dict, or {stage_config_cls.__name__} instance. "
f"Got: {stage_cfg_value}"
)
# Merge processor defaults for fields not explicitly set
default_fields = ["batch_size", "concurrency", "runtime_env", "model_source"]
for field_name in default_fields:
# Skip if field doesn't exist on this config class (e.g., model_source only on some stages)
if not hasattr(resolved, field_name):
continue
if (
getattr(resolved, field_name, None) is None
and field_name in processor_defaults
):
setattr(resolved, field_name, processor_defaults[field_name])
return resolved
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/batch/stages/configs.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/llm/tests/batch/cpu/processor/test_backward_compat.py | import sys
import warnings
from unittest.mock import patch
import pytest
from ray.llm._internal.batch.processor.vllm_engine_proc import vLLMEngineProcessorConfig
from ray.llm._internal.batch.stages.configs import (
ChatTemplateStageConfig,
DetokenizeStageConfig,
PrepareImageStageConfig,
TokenizerStageConfig,
)
def test_legacy_booleans_coerced_to_stage_configs():
"""Legacy flags → stage configs (dict form)."""
config = vLLMEngineProcessorConfig(
model_source="test-model",
apply_chat_template=True,
tokenize=False,
detokenize=True,
has_image=True,
)
# Legacy flags should be coerced to stage configs
assert isinstance(config.chat_template_stage, dict)
assert config.chat_template_stage["enabled"] is True
assert isinstance(config.tokenize_stage, dict)
assert config.tokenize_stage["enabled"] is False
assert isinstance(config.detokenize_stage, dict)
assert config.detokenize_stage["enabled"] is True
assert isinstance(config.prepare_image_stage, dict)
assert config.prepare_image_stage["enabled"] is True
def test_explicit_stage_configs_preserved():
"""Explicit stage configs not overwritten by legacy flags."""
explicit_chat_template = ChatTemplateStageConfig(enabled=False, batch_size=64)
config = vLLMEngineProcessorConfig(
model_source="test-model",
chat_template_stage=explicit_chat_template,
apply_chat_template=True, # Legacy flag should be ignored
)
# Explicit stage config should be preserved
assert config.chat_template_stage is explicit_chat_template
assert config.chat_template_stage.enabled is False
assert config.chat_template_stage.batch_size == 64
def test_chat_template_fields_merged():
"""apply_chat_template + chat_template → merged into stage config."""
config = vLLMEngineProcessorConfig(
model_source="test-model",
apply_chat_template=True,
chat_template="custom_template",
)
assert isinstance(config.chat_template_stage, dict)
assert config.chat_template_stage["enabled"] is True
assert config.chat_template_stage["chat_template"] == "custom_template"
def test_no_warnings_when_using_new_api():
"""No warnings when only new API used."""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
vLLMEngineProcessorConfig(
model_source="test-model",
chat_template_stage=ChatTemplateStageConfig(enabled=True),
tokenize_stage=TokenizerStageConfig(enabled=True),
detokenize_stage=DetokenizeStageConfig(enabled=True),
prepare_image_stage=PrepareImageStageConfig(enabled=False),
)
# Filter out any non-UserWarning warnings
deprecation_warnings = [
warning for warning in w if issubclass(warning.category, UserWarning)
]
assert len(deprecation_warnings) == 0
def test_legacy_dict_stage_config():
"""Dict form stage configs work correctly."""
config = vLLMEngineProcessorConfig(
model_source="test-model",
chat_template_stage={"enabled": False, "batch_size": 128},
tokenize_stage={"enabled": True, "concurrency": 4},
)
assert isinstance(config.chat_template_stage, dict)
assert config.chat_template_stage["enabled"] is False
assert config.chat_template_stage["batch_size"] == 128
assert isinstance(config.tokenize_stage, dict)
assert config.tokenize_stage["enabled"] is True
assert config.tokenize_stage["concurrency"] == 4
@pytest.mark.parametrize(
"prepare_image_stage",
[
True,
False,
{"batch_size": 128},
PrepareImageStageConfig(enabled=True),
PrepareImageStageConfig(enabled=False),
],
)
def test_prepare_image_stage_deprecation(prepare_image_stage):
"""prepare_image_stage deprecation warning should be emitted when enabled."""
if isinstance(prepare_image_stage, bool):
is_enabled = prepare_image_stage
elif isinstance(prepare_image_stage, dict):
is_enabled = True
else:
is_enabled = prepare_image_stage.enabled
with patch("ray.llm._internal.batch.processor.base.logger.warning") as mock_warning:
vLLMEngineProcessorConfig(
model_source="test-model",
prepare_image_stage=prepare_image_stage,
)
if is_enabled:
mock_warning.assert_called_once()
call_args = mock_warning.call_args[0][0]
assert "prepare_image_stage" in call_args
assert "prepare_multimodal_stage" in call_args
else:
mock_warning.assert_not_called()
def test_prepare_image_stage_deprecation_not_set():
"""prepare_image_stage deprecation warning should not be emitted when not set."""
with patch("ray.llm._internal.batch.processor.base.logger.warning") as mock_warning:
vLLMEngineProcessorConfig(
model_source="test-model",
)
mock_warning.assert_not_called()
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/batch/cpu/processor/test_backward_compat.py",
"license": "Apache License 2.0",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/tests/batch/cpu/stages/test_configs.py | import sys
import pytest
from ray.llm._internal.batch.stages.configs import (
ChatTemplateStageConfig,
PrepareImageStageConfig,
TokenizerStageConfig,
resolve_stage_config,
)
def test_resolve_dict_to_config():
"""Dict → parsed StageConfig with all fields."""
stage_cfg = resolve_stage_config(
{"enabled": True, "batch_size": 128, "concurrency": 4},
PrepareImageStageConfig,
)
assert isinstance(stage_cfg, PrepareImageStageConfig)
assert stage_cfg.enabled is True
assert stage_cfg.batch_size == 128
assert stage_cfg.concurrency == 4
def test_resolve_typed_config_copied():
"""Typed config creates copy (doesn't mutate input)."""
original = ChatTemplateStageConfig(
enabled=True, batch_size=64, model_source="model1"
)
processor_defaults = {"batch_size": 128, "model_source": "model2"}
# Resolve with first processor defaults
resolved1 = resolve_stage_config(
original, ChatTemplateStageConfig, processor_defaults
)
assert resolved1.batch_size == 64 # Explicit value preserved
assert resolved1.model_source == "model1" # Explicit value preserved
# Resolve same original with different processor defaults
processor_defaults2 = {"batch_size": 256, "model_source": "model3"}
resolved2 = resolve_stage_config(
original, ChatTemplateStageConfig, processor_defaults2
)
assert resolved2.batch_size == 64 # Still preserved
assert resolved2.model_source == "model1" # Still preserved
# Original unchanged
assert original.batch_size == 64
assert original.model_source == "model1"
def test_resolve_merges_processor_defaults():
"""Defaults merged when None."""
stage_cfg = resolve_stage_config(
{"enabled": True},
TokenizerStageConfig,
processor_defaults={
"batch_size": 128,
"concurrency": 4,
"model_source": "test-model",
},
)
assert stage_cfg.batch_size == 128
assert stage_cfg.concurrency == 4
assert stage_cfg.model_source == "test-model"
def test_resolve_preserves_explicit_overrides():
"""Explicit values not overridden by defaults."""
stage_cfg = resolve_stage_config(
{"enabled": True, "batch_size": 64, "concurrency": 2},
PrepareImageStageConfig,
processor_defaults={"batch_size": 128, "concurrency": 4},
)
assert stage_cfg.batch_size == 64 # Explicit override preserved
assert stage_cfg.concurrency == 2 # Explicit override preserved
def test_resolve_model_source_fallback():
"""model_source field uses processor default when None."""
stage_cfg = resolve_stage_config(
{"enabled": True},
TokenizerStageConfig,
processor_defaults={"model_source": "default-model"},
)
assert stage_cfg.model_source == "default-model"
def test_resolve_bool_true():
"""Bool True → enabled StageConfig."""
stage_cfg = resolve_stage_config(True, PrepareImageStageConfig)
assert isinstance(stage_cfg, PrepareImageStageConfig)
assert stage_cfg.enabled is True
def test_resolve_bool_false():
"""Bool False → disabled StageConfig."""
stage_cfg = resolve_stage_config(False, PrepareImageStageConfig)
assert isinstance(stage_cfg, PrepareImageStageConfig)
assert stage_cfg.enabled is False
def test_resolve_runtime_env_replacement():
"""Stage runtime_env replaces processor (not merged)."""
stage_cfg = resolve_stage_config(
{"enabled": True, "runtime_env": {"env_vars": {"STAGE_VAR": "stage_value"}}},
PrepareImageStageConfig,
processor_defaults={"runtime_env": {"env_vars": {"PROC_VAR": "proc_value"}}},
)
# Stage runtime_env completely replaces processor runtime_env
assert stage_cfg.runtime_env == {"env_vars": {"STAGE_VAR": "stage_value"}}
def test_resolve_same_config_reusable():
"""Same StageConfig instance can be resolved with different processor defaults without mutation."""
original = ChatTemplateStageConfig(enabled=True, batch_size=None, model_source=None)
processor_defaults1 = {"batch_size": 128, "model_source": "model1"}
processor_defaults2 = {"batch_size": 256, "model_source": "model2"}
resolved1 = resolve_stage_config(
original, ChatTemplateStageConfig, processor_defaults1
)
resolved2 = resolve_stage_config(
original, ChatTemplateStageConfig, processor_defaults2
)
# Each resolution gets its own defaults
assert resolved1.batch_size == 128
assert resolved1.model_source == "model1"
assert resolved2.batch_size == 256
assert resolved2.model_source == "model2"
# Original unchanged (still None)
assert original.batch_size is None
assert original.model_source is None
def test_resolve_unsupported_type():
"""Unsupported types raise TypeError."""
with pytest.raises(TypeError, match="Unsupported type for stage config"):
resolve_stage_config(None, PrepareImageStageConfig)
with pytest.raises(TypeError, match="Unsupported type for stage config"):
resolve_stage_config(123, PrepareImageStageConfig)
def test_resolve_stage_without_model_source():
"""Stages without model_source field don't get it from defaults."""
stage_cfg = resolve_stage_config(
{"enabled": True},
PrepareImageStageConfig,
processor_defaults={"model_source": "test-model"},
)
# PrepareImageStageConfig doesn't have model_source field
assert not hasattr(stage_cfg, "model_source")
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/batch/cpu/stages/test_configs.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/datasource/kafka_datasource.py | """Kafka datasource for bounded data reads.
This module provides a Kafka datasource implementation for Ray Data that supports
bounded reads with offset-based range queries.
Message keys and values are returned as raw bytes to support any serialization format
(JSON, Avro, Protobuf, etc.). Users can decode them using map operations.
Requires:
- kafka-python: https://kafka-python.readthedocs.io/
"""
import logging
import time
from dataclasses import dataclass, fields
from datetime import datetime, timezone
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Literal,
Optional,
Tuple,
Union,
)
import pyarrow as pa
if TYPE_CHECKING:
from kafka import KafkaConsumer, TopicPartition
from ray.data._internal.output_buffer import BlockOutputBuffer, OutputBlockSizeOption
from ray.data._internal.util import _check_import
from ray.data.block import Block, BlockMetadata
from ray.data.context import DataContext
from ray.data.datasource import Datasource, ReadTask
logger = logging.getLogger(__name__)
@dataclass
class KafkaAuthConfig:
"""Authentication configuration for Kafka connections.
Uses standard kafka-python parameter names. See kafka-python documentation
for full details: https://kafka-python.readthedocs.io/
security_protocol: Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.
Default: PLAINTEXT.
sasl_mechanism: Authentication mechanism when security_protocol
is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are:
PLAIN, GSSAPI, OAUTHBEARER, SCRAM-SHA-256, SCRAM-SHA-512.
sasl_plain_username: username for sasl PLAIN and SCRAM authentication.
Required if sasl_mechanism is PLAIN or one of the SCRAM mechanisms.
sasl_plain_password: password for sasl PLAIN and SCRAM authentication.
Required if sasl_mechanism is PLAIN or one of the SCRAM mechanisms.
sasl_kerberos_name: Constructed gssapi.Name for use with
sasl mechanism handshake. If provided, sasl_kerberos_service_name and
sasl_kerberos_domain name are ignored. Default: None.
sasl_kerberos_service_name: Service name to include in GSSAPI
sasl mechanism handshake. Default: 'kafka'
sasl_kerberos_domain_name: kerberos domain name to use in GSSAPI
sasl mechanism handshake. Default: one of bootstrap servers
sasl_oauth_token_provider: OAuthBearer
token provider instance. Default: None
ssl_context: Pre-configured SSLContext for wrapping
socket connections. If provided, all other ssl_* configurations
will be ignored. Default: None.
ssl_check_hostname: Flag to configure whether ssl handshake
should verify that the certificate matches the brokers hostname.
Default: True.
ssl_cafile: Optional filename of ca file to use in certificate
verification. Default: None.
ssl_certfile: Optional filename of file in pem format containing
the client certificate, as well as any ca certificates needed to
establish the certificate's authenticity. Default: None.
ssl_keyfile: Optional filename containing the client private key.
Default: None.
ssl_password: Optional password to be used when loading the
certificate chain. Default: None.
ssl_crlfile: Optional filename containing the CRL to check for
certificate expiration. By default, no CRL check is done. When
providing a file, only the leaf certificate will be checked against
this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
Default: None.
ssl_ciphers: optionally set the available ciphers for ssl
connections. It should be a string in the OpenSSL cipher list
format. If no cipher can be selected (because compile-time options
or other configuration forbids use of all the specified ciphers),
an ssl.SSLError will be raised. See ssl.SSLContext.set_ciphers
"""
# Security protocol
security_protocol: Optional[str] = None
# SASL configuration
sasl_mechanism: Optional[str] = None
sasl_plain_username: Optional[str] = None
sasl_plain_password: Optional[str] = None
sasl_kerberos_name: Optional[str] = None
sasl_kerberos_service_name: Optional[str] = None
sasl_kerberos_domain_name: Optional[str] = None
sasl_oauth_token_provider: Optional[Any] = None
# SSL configuration
ssl_context: Optional[Any] = None
ssl_check_hostname: Optional[bool] = None
ssl_cafile: Optional[str] = None
ssl_certfile: Optional[str] = None
ssl_keyfile: Optional[str] = None
ssl_password: Optional[str] = None
ssl_ciphers: Optional[str] = None
ssl_crlfile: Optional[str] = None
def _add_authentication_to_config(
config: Dict[str, Any], kafka_auth_config: Optional[KafkaAuthConfig]
) -> None:
"""Add authentication configuration to consumer config in-place.
Args:
config: Consumer config dict to modify.
kafka_auth_config: Authentication configuration.
"""
if kafka_auth_config:
# Extract non-None fields from dataclass without copying objects
for field in fields(kafka_auth_config):
value = getattr(kafka_auth_config, field.name)
if value is not None:
config[field.name] = value
def _build_consumer_config_for_discovery(
bootstrap_servers: List[str], kafka_auth_config: Optional[KafkaAuthConfig]
) -> Dict[str, Any]:
"""Build minimal consumer config for partition discovery.
Args:
bootstrap_servers: List of Kafka broker addresses.
kafka_auth_config: Authentication configuration.
Returns:
Consumer configuration dict for discovery.
"""
config = {
"bootstrap_servers": bootstrap_servers,
"enable_auto_commit": False, # We are performing a bounded read, so we don't need to commit offsets
"consumer_timeout_ms": 1000, # Short timeout for discovery
}
_add_authentication_to_config(config, kafka_auth_config)
return config
def _build_consumer_config_for_read(
bootstrap_servers: List[str],
kafka_auth_config: Optional[KafkaAuthConfig],
) -> Dict[str, Any]:
"""Build full consumer config for reading messages.
Args:
bootstrap_servers: List of Kafka broker addresses.
kafka_auth_config: Authentication configuration.
Returns:
Consumer configuration dict for reading.
"""
config = {
"bootstrap_servers": bootstrap_servers,
"enable_auto_commit": False,
"value_deserializer": lambda v: v,
"key_deserializer": lambda k: k,
}
_add_authentication_to_config(config, kafka_auth_config)
return config
def _datetime_to_ms(dt: datetime) -> int:
"""Convert a datetime to milliseconds since epoch (UTC).
If the datetime has no timezone info (i.e., ``tzinfo is None``),
it is assumed to be UTC. Timezone-aware datetimes are converted to
UTC automatically via ``datetime.timestamp()``.
Args:
dt: A datetime object, with or without timezone info.
Returns:
Milliseconds since Unix epoch.
"""
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
return int(dt.timestamp() * 1000)
def _resolve_datetime_offset(
consumer: "KafkaConsumer",
topic_partition: "TopicPartition",
dt: datetime,
fallback_offset: int,
) -> int:
"""Resolve a datetime to a concrete Kafka offset.
Uses ``consumer.offsets_for_times()`` to find the earliest offset whose
timestamp is >= the given datetime. If no such offset exists, returns
``fallback_offset``.
Args:
consumer: Kafka consumer instance.
topic_partition: TopicPartition to resolve the offset for.
dt: The datetime to resolve.
fallback_offset: Offset to return if no messages match the timestamp.
Returns:
The resolved integer offset.
"""
timestamp_ms: int = _datetime_to_ms(dt)
offsets: Dict = consumer.offsets_for_times({topic_partition: timestamp_ms})
offset_and_ts: Optional[Tuple[int, int]] = offsets.get(topic_partition)
if offset_and_ts is None:
return fallback_offset
return offset_and_ts.offset
def _resolve_offsets(
consumer: "KafkaConsumer",
topic_partition: "TopicPartition",
start_offset: Union[int, datetime, Literal["earliest"]],
end_offset: Union[int, datetime, Literal["latest"]],
) -> Tuple[int, int]:
"""Resolve start and end offsets to actual integer offsets.
Handles int offsets, "earliest"/"latest" strings, and datetime objects.
For datetime objects, uses ``consumer.offsets_for_times()`` to find the
earliest offset whose timestamp is >= the given datetime.
Args:
consumer: Kafka consumer instance.
topic_partition: TopicPartition to resolve offsets for.
start_offset: Start offset (int, datetime, or "earliest").
end_offset: End offset (int, datetime, or "latest").
Returns:
Tuple of (resolved_start_offset, resolved_end_offset).
"""
earliest_offset = consumer.beginning_offsets([topic_partition])[topic_partition]
latest_offset = consumer.end_offsets([topic_partition])[topic_partition]
# Keep original values for error messages
original_start = start_offset
original_end = end_offset
if start_offset == "earliest" or start_offset is None:
start_offset = earliest_offset
elif isinstance(start_offset, datetime):
start_offset = _resolve_datetime_offset(
consumer, topic_partition, start_offset, latest_offset
)
if end_offset == "latest" or end_offset is None:
end_offset = latest_offset
elif isinstance(end_offset, datetime):
end_offset = _resolve_datetime_offset(
consumer, topic_partition, end_offset, latest_offset
)
if start_offset > end_offset:
start_str = (
f"{original_start}"
if original_start == start_offset
else f"{original_start} (resolved to {start_offset})"
)
end_str = (
f"{original_end}"
if original_end == end_offset
else f"{original_end} (resolved to {end_offset})"
)
raise ValueError(
f"start_offset ({start_str}) > end_offset ({end_str}) "
f"for partition {topic_partition.partition} in topic {topic_partition.topic}"
)
return start_offset, end_offset
class KafkaDatasource(Datasource):
"""Kafka datasource for reading from Kafka topics with bounded reads."""
# Batch size for incremental block yielding
BATCH_SIZE_FOR_YIELD = 1000
def __init__(
self,
topics: Union[str, List[str]],
bootstrap_servers: Union[str, List[str]],
start_offset: Union[int, datetime, Literal["earliest"]] = "earliest",
end_offset: Union[int, datetime, Literal["latest"]] = "latest",
kafka_auth_config: Optional[KafkaAuthConfig] = None,
timeout_ms: int = 10000,
):
"""Initialize Kafka datasource.
Args:
topics: Kafka topic name(s) to read from.
bootstrap_servers: Kafka broker addresses (string or list of strings).
start_offset: Starting position. Can be:
- int: Offset number
- datetime: Read from the first message at or after this time.
datetimes with no timezone info are treated as UTC.
- str: "earliest"
end_offset: Ending position (exclusive). Can be:
- int: Offset number
- datetime: Read up to (but not including) the first message
at or after this time. datetimes with no timezone info are treated as UTC.
- str: "latest"
kafka_auth_config: Authentication configuration. See KafkaAuthConfig for details.
timeout_ms: Timeout in milliseconds for every read task to poll until reaching end_offset (default 10000ms).
If the read task does not reach end_offset within the timeout, it will stop polling and return the messages
it has read so far.
Raises:
ValueError: If required configuration is missing.
ImportError: If kafka-python is not installed.
"""
_check_import(self, module="kafka", package="kafka-python")
if not topics:
raise ValueError("topics cannot be empty")
if not bootstrap_servers:
raise ValueError("bootstrap_servers cannot be empty")
if timeout_ms <= 0:
raise ValueError("timeout_ms must be positive")
if isinstance(start_offset, int) and isinstance(end_offset, int):
if start_offset > end_offset:
raise ValueError("start_offset must be less than end_offset")
if isinstance(start_offset, datetime) and isinstance(end_offset, datetime):
if _datetime_to_ms(start_offset) > _datetime_to_ms(end_offset):
raise ValueError("start_offset must be less than end_offset")
if isinstance(start_offset, str) and start_offset == "latest":
raise ValueError("start_offset cannot be 'latest'")
if isinstance(end_offset, str) and end_offset == "earliest":
raise ValueError("end_offset cannot be 'earliest'")
# Validate bootstrap_servers format
if isinstance(bootstrap_servers, str):
if not bootstrap_servers or ":" not in bootstrap_servers:
raise ValueError(
f"Invalid bootstrap_servers format: {bootstrap_servers}. "
"Expected 'host:port' or list of 'host:port' strings."
)
elif isinstance(bootstrap_servers, list):
if not bootstrap_servers:
raise ValueError("bootstrap_servers cannot be empty list")
for server in bootstrap_servers:
if not isinstance(server, str) or ":" not in server:
raise ValueError(
f"Invalid bootstrap_servers format: {server}. "
"Expected 'host:port' string."
)
self._topics = topics if isinstance(topics, list) else [topics]
self._bootstrap_servers = (
bootstrap_servers
if isinstance(bootstrap_servers, list)
else [bootstrap_servers]
)
self._start_offset = start_offset
self._end_offset = end_offset
self._kafka_auth_config = kafka_auth_config
self._timeout_ms = timeout_ms
self._target_max_block_size = DataContext.get_current().target_max_block_size
def estimate_inmemory_data_size(self) -> Optional[int]:
"""Return an estimate of the in-memory data size, or None if unknown."""
return None
def get_read_tasks(
self,
parallelism: int,
per_task_row_limit: Optional[int] = None,
data_context: Optional["DataContext"] = None,
) -> List[ReadTask]:
"""Create read tasks for Kafka partitions.
Creates one read task per partition.
Each task reads from a single partition of a single topic.
Args:
parallelism: This argument is deprecated.
per_task_row_limit: Maximum number of rows per read task.
data_context: The data context to use to get read tasks. This is not used by this datasource.
Returns:
List of ReadTask objects, one per partition.
"""
# Discover all partitions for all topics
# We need to create a consumer on the driver to discover partitions
from kafka import KafkaConsumer
# Build minimal consumer config for partition discovery
consumer_config = _build_consumer_config_for_discovery(
self._bootstrap_servers, self._kafka_auth_config
)
# Discover partitions for all topics
topic_partitions = [] # List of (topic, partition) tuples
discovery_consumer = None
try:
discovery_consumer = KafkaConsumer(**consumer_config)
for topic in self._topics:
partitions = discovery_consumer.partitions_for_topic(topic)
if not partitions:
raise ValueError(
f"Topic {topic} has no partitions or doesn't exist"
)
for partition in partitions:
topic_partitions.append((topic, partition))
finally:
if discovery_consumer:
discovery_consumer.close()
# Store config for use in read functions (avoid serialization issues)
bootstrap_servers = self._bootstrap_servers
start_offset = self._start_offset
end_offset = self._end_offset
timeout_ms = self._timeout_ms
kafka_auth_config = self._kafka_auth_config
target_max_block_size = self._target_max_block_size
tasks = []
schema = pa.schema(
[
("offset", pa.int64()),
("key", pa.binary()),
("value", pa.binary()),
("topic", pa.string()),
("partition", pa.int32()),
("timestamp", pa.int64()), # Kafka timestamp in milliseconds
("timestamp_type", pa.int32()), # 0=CreateTime, 1=LogAppendTime
("headers", pa.map_(pa.string(), pa.binary())), # Message headers
]
)
for topic_name, partition_id in topic_partitions:
def create_kafka_read_fn(
topic_name: str = topic_name,
partition_id: int = partition_id,
bootstrap_servers: List[str] = bootstrap_servers,
start_offset: Optional[
Union[int, datetime, Literal["earliest"]]
] = start_offset,
end_offset: Optional[
Union[int, datetime, Literal["latest"]]
] = end_offset,
kafka_auth_config: Optional[KafkaAuthConfig] = kafka_auth_config,
timeout_ms: int = timeout_ms,
target_max_block_size: int = target_max_block_size,
):
"""Create a Kafka read function with captured variables.
This factory function captures configuration variables as default arguments
to avoid serialization issues when the read function is executed remotely
by Ray. Using default arguments ensures all needed config is available
in the remote task without requiring 'self' to be serialized.
"""
def kafka_read_fn() -> Iterable[Block]:
"""Read function for a single Kafka partition using kafka-python.
This function runs remotely in a Ray task. It creates a KafkaConsumer,
reads messages from a single assigned partition, and yields PyArrow tables
incrementally for efficient streaming processing.
"""
from kafka import KafkaConsumer, TopicPartition
# Build consumer configuration
consumer_config = _build_consumer_config_for_read(
bootstrap_servers, kafka_auth_config
)
# Create the Kafka consumer
consumer = KafkaConsumer(**consumer_config)
try:
# Assign only the specific partition for this task
topic_partition = TopicPartition(topic_name, partition_id)
consumer.assign([topic_partition])
start_off, end_off = _resolve_offsets(
consumer, topic_partition, start_offset, end_offset
)
# Seek to the requested starting position
consumer.seek(topic_partition, start_off)
records = []
output_buffer = BlockOutputBuffer(
OutputBlockSizeOption.of(
target_max_block_size=target_max_block_size
)
)
# Main polling loop - read maximum 500 messages per loop (default max_poll_records for KafkaConsumer poll is 500)
partition_done = False
start_time = time.time()
timeout_seconds = timeout_ms / 1000.0
while not partition_done:
# Check if overall timeout has been reached
elapsed_time = time.time() - start_time
if elapsed_time >= timeout_seconds:
logger.warning(
f"Kafka read task timed out after {timeout_ms}ms while reading partition {partition_id} of topic {topic_name}; "
f"end_offset {end_off} was not reached. Returning {len(records)} messages collected in this read task so far."
)
break
# Check if we've reached the end_offset before polling
# This avoids waiting for timeout when no more messages are available
current_position = consumer.position(topic_partition)
if current_position >= end_off:
break
# Calculate remaining timeout for this poll
remaining_timeout_ms = int(
(timeout_seconds - elapsed_time) * 1000
)
# Poll for a batch of messages from Kafka
msg_batch = consumer.poll(
timeout_ms=min(remaining_timeout_ms, 10000),
)
if not msg_batch:
continue
messages = msg_batch.get(topic_partition, [])
for msg in messages:
# Check if we've reached the end offset (for bounded reads)
# Use >= for exclusive end_offset (don't include end_offset message)
if end_off is not None and msg.offset >= end_off:
partition_done = True
break
# Extract all message metadata into a flat record
headers_dict = dict(msg.headers) if msg.headers else {}
records.append(
{
"offset": msg.offset,
"key": msg.key,
"value": msg.value,
"topic": msg.topic,
"partition": msg.partition,
"timestamp": msg.timestamp,
"timestamp_type": msg.timestamp_type,
"headers": headers_dict,
}
)
# Yield incrementally when we hit batch size
if len(records) >= KafkaDatasource.BATCH_SIZE_FOR_YIELD:
table = pa.Table.from_pylist(records)
output_buffer.add_block(table)
while output_buffer.has_next():
yield output_buffer.next()
records = [] # Clear for next batch
# Yield any remaining records
if records:
table = pa.Table.from_pylist(records)
output_buffer.add_block(table)
output_buffer.finalize()
while output_buffer.has_next():
yield output_buffer.next()
finally:
# Always close the consumer to release connections
consumer.close()
return kafka_read_fn
# Create metadata for this task
metadata = BlockMetadata(
num_rows=None,
size_bytes=None,
input_files=[f"kafka://{topic_name}/{partition_id}"],
exec_stats=None,
)
kafka_read_fn = create_kafka_read_fn(topic_name, partition_id)
# Create read task
task = ReadTask(
read_fn=kafka_read_fn,
metadata=metadata,
schema=schema,
per_task_row_limit=per_task_row_limit,
)
tasks.append(task)
return tasks
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/datasource/kafka_datasource.py",
"license": "Apache License 2.0",
"lines": 513,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:release/nightly_tests/simulate_cross_az_network_failure.py | import argparse
import subprocess
import sys
import threading
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
import ray
import random
import ray.util
# The goal of the this script is to simulate cross AZ transient network failures periodically on a Ray job.
# We do this by modifying the iptables to drop all inbound and outbound traffic for a given duration
# except for intra-node and SSH traffic. After the duration, the iptables rules are restored.
# The failure script is run in a background thread while the main command is run in the foreground.
# NOTE: The script itself does not spin up a Ray cluster, it operates on the assumption that an existing
# Ray cluster is running and we are able to SSH into the nodes (like on Anyscale).
PARALLEL = 500 # concurrent SSH sessions
SSH_USER = "ubuntu" # Anyscale default
AFFECT_WORKER_RATIO = 0.50 # failure affects 50% of worker nodes
EXTRA_SSH = [
"-o",
"BatchMode=yes",
"-o",
"StrictHostKeyChecking=accept-new",
"-o",
"ConnectTimeout=10",
]
def iptables_cmd(self_ip: str, seconds: int) -> str:
return f"""\
nohup setsid bash -lc '
sudo iptables -w -A INPUT -p tcp --dport 22 -j ACCEPT
sudo iptables -w -A OUTPUT -p tcp --sport 22 -j ACCEPT
sudo iptables -w -A INPUT -s 127.0.0.0/8 -d 127.0.0.0/8 -j ACCEPT
sudo iptables -w -A OUTPUT -s 127.0.0.0/8 -d 127.0.0.0/8 -j ACCEPT
sudo iptables -w -A INPUT -s {self_ip} -d {self_ip} -j ACCEPT
sudo iptables -w -A OUTPUT -s {self_ip} -d {self_ip} -j ACCEPT
sudo iptables -w -A INPUT -j DROP
sudo iptables -w -A OUTPUT -j DROP
sleep {seconds}
sudo iptables -w -D OUTPUT -j DROP
sudo iptables -w -D INPUT -j DROP
sudo iptables -w -D OUTPUT -s {self_ip} -d {self_ip} -j ACCEPT
sudo iptables -w -D INPUT -s {self_ip} -d {self_ip} -j ACCEPT
sudo iptables -w -D OUTPUT -s 127.0.0.0/8 -d 127.0.0.0/8 -j ACCEPT
sudo iptables -w -D INPUT -s 127.0.0.0/8 -d 127.0.0.0/8 -j ACCEPT
sudo iptables -w -D OUTPUT -p tcp --sport 22 -j ACCEPT
sudo iptables -w -D INPUT -p tcp --dport 22 -j ACCEPT
' &>/dev/null &
"""
def ssh_run(ip: str, cmd: str) -> tuple[bool, str]:
"""Run SSH command on remote host."""
target = f"{SSH_USER}@{ip}"
res = subprocess.run(
["ssh", *EXTRA_SSH, target, cmd], capture_output=True, text=True
)
ok = res.returncode == 0
msg = res.stdout.strip() if ok else (res.stderr.strip() or res.stdout.strip())
return ok, msg
def simulate_cross_az_network_failure(seconds: int):
if not ray.is_initialized():
ray.init(address="auto")
nodes = ray.nodes()
all_ips = [n["NodeManagerAddress"] for n in nodes if n.get("Alive", False)]
# Always inject failures on the head node
head_ip = next(
(
n["NodeManagerAddress"]
for n in nodes
if n.get("NodeManagerAddress") == ray.util.get_node_ip_address()
),
None,
)
print(f"Discovered {len(all_ips)} alive nodes")
print(f"Head node: {head_ip}")
worker_ips = [ip for ip in all_ips if ip != head_ip]
print(f"Eligible worker nodes: {len(worker_ips)}")
if not worker_ips:
print("ERROR: No worker nodes found")
return
k = max(1, int(len(worker_ips) * AFFECT_WORKER_RATIO))
affected = random.sample(worker_ips, k)
# NOTE: When running this script on Anyscale with longer failure durations the blacked out head node could
# cause your workspace to lag and die. To avoid this, comment out the below line.
affected.append(head_ip)
print(
f"Affecting {len(affected)} nodes (~{AFFECT_WORKER_RATIO*100:.0f}% of workers + head node):"
)
print(", ".join(affected[:10]) + (" ..." if len(affected) > 10 else ""))
cmds = {ip: iptables_cmd(ip, seconds) for ip in affected}
print(f"\nTriggering {seconds}s of transient network failure...")
successes, failures = [], {}
with ThreadPoolExecutor(max_workers=PARALLEL) as ex:
futs = {ex.submit(ssh_run, ip, cmds[ip]): ip for ip in affected}
for fut in as_completed(futs):
ip = futs[fut]
try:
ok, msg = fut.result()
if ok:
successes.append(ip)
else:
failures[ip] = msg
except Exception as e:
failures[ip] = str(e)
print("\n=== Summary ===")
print(f"Succeeded: {len(successes)} nodes")
print(f"Failed : {len(failures)} nodes")
if failures:
for ip, msg in list(failures.items()):
print(f" {ip}: {msg}")
def network_failure_loop(interval, network_failure_duration):
"""
Run the network failure loop in a background thread at regular intervals.
Args:
interval: Interval in seconds between network failure events
network_failure_duration: Duration in seconds of each network failure
"""
print(
f"[NETWORK FAILURE {time.strftime('%H:%M:%S')}] Starting network failure thread with interval: {interval} seconds"
)
while True:
# Sleep for the interval duration
time.sleep(interval)
# Simulate a network failure
print(
f"[NETWORK FAILURE {time.strftime('%H:%M:%S')}] Triggering network failure simulation..."
)
try:
simulate_cross_az_network_failure(network_failure_duration)
except Exception as e:
print(
f"[NETWORK FAILURE {time.strftime('%H:%M:%S')}] ERROR: Network failure simulation failed: {e}"
)
def parse_args():
parser = argparse.ArgumentParser(
description="Run benchmark with network failure injection at regular intervals",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Run map_benchmark with network failures injected every 300 seconds, each lasting 5 seconds
python simulate_cross_az_network_failure.py --network-failure-interval 300 --network-failure-duration 5 --command python map_benchmark.py --api map_batches --sf 1000
""",
)
parser.add_argument(
"--network-failure-interval",
type=int,
required=True,
help="Interval in seconds between network failure events",
)
parser.add_argument(
"--network-failure-duration",
type=int,
required=True,
help="Duration in seconds of each network failure",
)
parser.add_argument(
"--command",
nargs=argparse.REMAINDER,
required=True,
help="The main command to run (e.g., 'python map_benchmark.py --api map_batches ...')",
)
return parser.parse_args()
def main():
args = parse_args()
# Validate command (argparse catches missing --command, but not empty --command)
if not args.command:
print("ERROR: --command requires at least one argument")
print(
"Usage: python simulate_cross_az_network_failure.py --network-failure-interval <seconds> --network-failure-duration <seconds> --command <command>"
)
sys.exit(1)
print("=" * 80)
print("Running with Network Failure Injection")
print("=" * 80)
print(f"Network failure interval: {args.network_failure_interval} seconds")
print(f"Network failure duration: {args.network_failure_duration} seconds")
print(f"Command: {' '.join(args.command)}")
print("=" * 80)
print()
# Start network failure thread as daemon - it will die with the process
network_failure_thread = threading.Thread(
target=network_failure_loop,
args=(args.network_failure_interval, args.network_failure_duration),
daemon=True,
)
network_failure_thread.start()
try:
# Run the main command in the foreground
print(
f"[MAIN {time.strftime('%H:%M:%S')}] Starting command: {' '.join(args.command)}"
)
main_result = subprocess.run(args.command)
print(
f"\n[MAIN {time.strftime('%H:%M:%S')}] Command completed with exit code: {main_result.returncode}"
)
exit_code = main_result.returncode
except KeyboardInterrupt:
print("\n[MAIN] Interrupted by user")
exit_code = 130
except Exception as e:
print(f"[MAIN] ERROR: {e}")
exit_code = 1
print("\n" + "=" * 80)
print(f"Execution completed with exit code: {exit_code}")
print("=" * 80)
sys.exit(exit_code)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/simulate_cross_az_network_failure.py",
"license": "Apache License 2.0",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/connectors/common/flatten_observations.py | from typing import Any, Collection, Dict, List, Optional
import gymnasium as gym
import numpy as np
import tree # pip install dm_tree
from gymnasium.spaces import Box
from ray.rllib.connectors.connector_v2 import ConnectorV2
from ray.rllib.core.columns import Columns
from ray.rllib.core.rl_module.rl_module import RLModule
from ray.rllib.utils.annotations import override
from ray.rllib.utils.numpy import flatten_inputs_to_1d_tensor
from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space
from ray.rllib.utils.typing import AgentID, EpisodeType
from ray.util.annotations import PublicAPI
@PublicAPI(stability="alpha")
class FlattenObservations(ConnectorV2):
"""A connector piece that flattens all observation components into a 1D array.
- Can be used either in env-to-module or learner pipelines.
- When used in env-to-module pipelines:
- Works directly on the incoming episodes list and changes the last observation
in-place (write the flattened observation back into the episode).
- This connector does NOT alter the incoming batch (`data`) when called.
- When used in learner pipelines:
Works directly on the incoming episodes list and changes all observations
before stacking them into the batch.
.. testcode::
import gymnasium as gym
import numpy as np
from ray.rllib.connectors.env_to_module import FlattenObservations
from ray.rllib.env.single_agent_episode import SingleAgentEpisode
from ray.rllib.utils.test_utils import check
# Some arbitrarily nested, complex observation space.
obs_space = gym.spaces.Dict({
"a": gym.spaces.Box(-10.0, 10.0, (), np.float32),
"b": gym.spaces.Tuple([
gym.spaces.Discrete(2),
gym.spaces.Box(-1.0, 1.0, (2, 1), np.float32),
]),
"c": gym.spaces.MultiDiscrete([2, 3]),
})
act_space = gym.spaces.Discrete(2)
# Two example episodes, both with initial (reset) observations coming from the
# above defined observation space.
episode_1 = SingleAgentEpisode(
observations=[
{
"a": np.array(-10.0, np.float32),
"b": (1, np.array([[-1.0], [-1.0]], np.float32)),
"c": np.array([0, 2]),
},
],
)
episode_2 = SingleAgentEpisode(
observations=[
{
"a": np.array(10.0, np.float32),
"b": (0, np.array([[1.0], [1.0]], np.float32)),
"c": np.array([1, 1]),
},
],
)
# Construct our connector piece.
connector = FlattenObservations(obs_space, act_space)
# Call our connector piece with the example data.
output_batch = connector(
rl_module=None, # This connector works without an RLModule.
batch={}, # This connector does not alter the input batch.
episodes=[episode_1, episode_2],
explore=True,
shared_data={},
)
# The connector does not alter the data and acts as pure pass-through.
check(output_batch, {})
# The connector has flattened each item in the episodes to a 1D tensor.
check(
episode_1.get_observations(0),
# box() disc(2). box(2, 1). multidisc(2, 3)........
np.array([-10.0, 0.0, 1.0, -1.0, -1.0, 1.0, 0.0, 0.0, 0.0, 1.0]),
)
check(
episode_2.get_observations(0),
# box() disc(2). box(2, 1). multidisc(2, 3)........
np.array([10.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]),
)
# Two example episodes, both with initial (reset) observations coming from the
# above defined observation space.
episode_1 = SingleAgentEpisode(
observations=[
{
"a": np.array(-10.0, np.float32),
"b": (1, np.array([[-1.0], [-1.0]], np.float32)),
"c": np.array([0, 2]),
},
],
)
episode_2 = SingleAgentEpisode(
observations=[
{
"a": np.array(10.0, np.float32),
"b": (0, np.array([[1.0], [1.0]], np.float32)),
"c": np.array([1, 1]),
},
],
)
# Construct our connector piece and remove in it the "a" (Box()) key-value
# pair from the dictionary observations.
connector = FlattenObservations(obs_space, act_space, keys_to_remove=["a"])
# Call our connector piece with the example data.
output_batch = connector(
rl_module=None, # This connector works without an RLModule.
batch={}, # This connector does not alter the input batch.
episodes=[episode_1, episode_2],
explore=True,
shared_data={},
)
# The connector has flattened each item in the episodes to a 1D tensor
# and removed the "a" (Box()) key-value pair.
check(
episode_1.get_observations(0),
# disc(2). box(2, 1). multidisc(2, 3)........
np.array([0.0, 1.0, -1.0, -1.0, 1.0, 0.0, 0.0, 0.0, 1.0]),
)
check(
episode_2.get_observations(0),
# disc(2). box(2, 1). multidisc(2, 3)........
np.array([1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]),
)
# Use the connector in a learner pipeline. Note, we need here two
# observations in the episode because the agent has to have stepped
# at least once.
episode_1 = SingleAgentEpisode(
observations=[
{
"a": np.array(-10.0, np.float32),
"b": (1, np.array([[-1.0], [-1.0]], np.float32)),
"c": np.array([0, 2]),
},
{
"a": np.array(-10.0, np.float32),
"b": (1, np.array([[-1.0], [-1.0]], np.float32)),
"c": np.array([0, 2]),
},
],
actions=[1],
rewards=[0],
# Set the length of the lookback buffer to 0 to read the data as
# from an actual step.
len_lookback_buffer=0,
)
episode_2 = SingleAgentEpisode(
observations=[
{
"a": np.array(10.0, np.float32),
"b": (0, np.array([[1.0], [1.0]], np.float32)),
"c": np.array([1, 1]),
},
{
"a": np.array(10.0, np.float32),
"b": (0, np.array([[1.0], [1.0]], np.float32)),
"c": np.array([1, 1]),
},
],
actions=[1],
rewards=[0],
# Set the length of the lookback buffer to 0 to read the data as
# from an actual step.
len_lookback_buffer=0,
)
# Construct our connector piece for a learner pipeline and remove the
# "a" (Box()) key-value pair.
connector = FlattenObservations(
obs_space,
act_space,
as_learner_connector=True,
keys_to_remove=["a"]
)
# Call our connector piece with the example data.
output_batch = connector(
rl_module=None, # This connector works without an RLModule.
batch={}, # This connector does not alter the input batch.
episodes=[episode_1, episode_2],
explore=True,
shared_data={},
)
check(list(output_batch.keys()), ["obs"])
check(list(output_batch["obs"].keys()), [(episode_1.id_,), (episode_2.id_,)])
check(
output_batch["obs"][(episode_1.id_,)][0][0],
np.array([0.0, 1.0, -1.0, -1.0, 1.0, 0.0, 0.0, 0.0, 1.0]),
)
check(
output_batch["obs"][(episode_2.id_,)][0][0],
np.array([1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]),
)
# Multi-agent example: Use the connector with a multi-agent observation space.
# The observation space must be a Dict with agent IDs as top-level keys.
from ray.rllib.env.multi_agent_episode import MultiAgentEpisode
# Define a per-agent observation space.
per_agent_obs_space = gym.spaces.Dict({
"a": gym.spaces.Box(-10.0, 10.0, (), np.float32),
"b": gym.spaces.Tuple([
gym.spaces.Discrete(2),
gym.spaces.Box(-1.0, 1.0, (2, 1), np.float32),
]),
"c": gym.spaces.MultiDiscrete([2, 3]),
})
# Create a multi-agent observation space with agent IDs as keys.
multi_agent_obs_space = gym.spaces.Dict({
"agent_1": per_agent_obs_space,
"agent_2": per_agent_obs_space,
})
# Create a multi-agent episode with observations for both agents.
# Agent IDs are inferred from the keys in the observations dict.
ma_episode = MultiAgentEpisode(
observations=[
{
"agent_1": {
"a": np.array(-10.0, np.float32),
"b": (1, np.array([[-1.0], [-1.0]], np.float32)),
"c": np.array([0, 2]),
},
"agent_2": {
"a": np.array(10.0, np.float32),
"b": (0, np.array([[1.0], [1.0]], np.float32)),
"c": np.array([1, 1]),
},
},
],
)
# Construct the connector for multi-agent, flattening only agent_1's observations.
# Note: If agent_ids is None (the default), all agents' observations are flattened.
connector = FlattenObservations(
multi_agent_obs_space,
act_space,
multi_agent=True,
agent_ids=["agent_1"],
)
# Call the connector.
output_batch = connector(
rl_module=None,
batch={},
episodes=[ma_episode],
explore=True,
shared_data={},
)
# agent_1's observation is flattened.
check(
ma_episode.agent_episodes["agent_1"].get_observations(0),
# box() disc(2). box(2, 1). multidisc(2, 3)........
np.array([-10.0, 0.0, 1.0, -1.0, -1.0, 1.0, 0.0, 0.0, 0.0, 1.0]),
)
# agent_2's observation is unchanged (not in agent_ids).
check(
ma_episode.agent_episodes["agent_2"].get_observations(0),
{
"a": np.array(10.0, np.float32),
"b": (0, np.array([[1.0], [1.0]], np.float32)),
"c": np.array([1, 1]),
},
)
"""
@override(ConnectorV2)
def recompute_output_observation_space(
self,
input_observation_space: gym.Space,
input_action_space: gym.Space,
) -> gym.Space:
self._input_obs_base_struct = get_base_struct_from_space(
self.input_observation_space
)
if self._multi_agent:
spaces = {}
assert isinstance(
input_observation_space, gym.spaces.Dict
), f"To flatten a Multi-Agent observation, it is expected that observation space is a dictionary, its actual type is {type(input_observation_space)}"
for agent_id, space in input_observation_space.items():
# Remove keys, if necessary.
# TODO (simon): Maybe allow to remove different keys for different agents.
if self._keys_to_remove:
assert isinstance(
space, gym.spaces.Dict
), f"To remove keys from an observation space requires that it be a dictionary, its actual type is {type(space)}"
self._input_obs_base_struct[agent_id] = {
k: v
for k, v in self._input_obs_base_struct[agent_id].items()
if k not in self._keys_to_remove
}
if self._agent_ids and agent_id not in self._agent_ids:
# For nested spaces, we need to use the original Spaces (rather than the reduced version)
spaces[agent_id] = self.input_observation_space[agent_id]
else:
sample = flatten_inputs_to_1d_tensor(
tree.map_structure(
lambda s: s.sample(),
self._input_obs_base_struct[agent_id],
),
self._input_obs_base_struct[agent_id],
batch_axis=False,
)
spaces[agent_id] = Box(
float("-inf"), float("inf"), (len(sample),), np.float32
)
return gym.spaces.Dict(spaces)
else:
# Remove keys, if necessary.
if self._keys_to_remove:
assert isinstance(
input_observation_space, gym.spaces.Dict
), f"To remove keys from an observation space requires that it be a dictionary, its actual type is {type(input_observation_space)}"
self._input_obs_base_struct = {
k: v
for k, v in self._input_obs_base_struct.items()
if k not in self._keys_to_remove
}
sample = flatten_inputs_to_1d_tensor(
tree.map_structure(
lambda s: s.sample(),
self._input_obs_base_struct,
),
self._input_obs_base_struct,
batch_axis=False,
)
return Box(float("-inf"), float("inf"), (len(sample),), np.float32)
def __init__(
self,
input_observation_space: Optional[gym.Space] = None,
input_action_space: Optional[gym.Space] = None,
*,
multi_agent: bool = False,
agent_ids: Optional[Collection[AgentID]] = None,
as_learner_connector: bool = False,
keys_to_remove: Optional[List[str]] = None,
**kwargs,
):
"""Initializes a FlattenObservations instance.
Args:
input_observation_space: The input observation space. For multi-agent
setups, this must be a Dict space with agent IDs as top-level keys
mapping to each agent's individual observation space.
input_action_space: The input action space.
multi_agent: Whether this connector operates on multi-agent observations,
in which case, the top-level of the Dict space (where agent IDs are
mapped to individual agents' observation spaces) is left as-is.
agent_ids: If multi_agent is True, this argument defines a collection of
AgentIDs for which to flatten. AgentIDs not in this collection will
have their observations passed through unchanged.
If None (the default), flatten observations for all AgentIDs.
as_learner_connector: Whether this connector is part of a Learner connector
pipeline, as opposed to an env-to-module pipeline.
Note, this is usually only used for offline rl where the data comes
from an offline dataset instead of a simulator. With a simulator the
data is simply rewritten.
keys_to_remove: Optional keys to remove from the observations.
"""
self._input_obs_base_struct = None
self._multi_agent = multi_agent
self._agent_ids = agent_ids
self._as_learner_connector = as_learner_connector
assert keys_to_remove is None or (
keys_to_remove
and isinstance(input_observation_space, gym.spaces.Dict)
or (
multi_agent
and any(
isinstance(agent_space, gym.spaces.Dict)
for agent_space in self.input_observation_space
)
)
), "When using `keys_to_remove` the observation space must be of type `gym.spaces.Dict`."
self._keys_to_remove = keys_to_remove or []
super().__init__(input_observation_space, input_action_space, **kwargs)
@override(ConnectorV2)
def __call__(
self,
*,
rl_module: RLModule,
batch: Dict[str, Any],
episodes: List[EpisodeType],
explore: Optional[bool] = None,
shared_data: Optional[dict] = None,
**kwargs,
) -> Any:
if self._as_learner_connector:
for sa_episode in self.single_agent_episode_iterator(
episodes, agents_that_stepped_only=True
):
def _map_fn(obs, _sa_episode=sa_episode):
# Remove keys, if necessary.
obs = [self._remove_keys_from_dict(o, sa_episode) for o in obs]
batch_size = len(sa_episode)
flattened_obs = flatten_inputs_to_1d_tensor(
inputs=obs,
# In the multi-agent case, we need to use the specific agent's
# space struct, not the multi-agent observation space dict.
spaces_struct=self._input_obs_base_struct,
# Our items are individual observations (no batch axis present).
batch_axis=False,
)
return flattened_obs.reshape(batch_size, -1).copy()
self.add_n_batch_items(
batch=batch,
column=Columns.OBS,
items_to_add=_map_fn(
sa_episode.get_observations(indices=slice(0, len(sa_episode))),
sa_episode,
),
num_items=len(sa_episode),
single_agent_episode=sa_episode,
)
else:
for sa_episode in self.single_agent_episode_iterator(
episodes, agents_that_stepped_only=True
):
last_obs = sa_episode.get_observations(-1)
# Remove keys, if necessary.
last_obs = self._remove_keys_from_dict(last_obs, sa_episode)
if self._multi_agent:
if (
self._agent_ids is not None
and sa_episode.agent_id not in self._agent_ids
):
flattened_obs = last_obs
else:
flattened_obs = flatten_inputs_to_1d_tensor(
inputs=last_obs,
# In the multi-agent case, we need to use the specific agent's
# space struct, not the multi-agent observation space dict.
spaces_struct=self._input_obs_base_struct[
sa_episode.agent_id
],
# Our items are individual observations (no batch axis present).
batch_axis=False,
)
else:
flattened_obs = flatten_inputs_to_1d_tensor(
inputs=last_obs,
spaces_struct=self._input_obs_base_struct,
# Our items are individual observations (no batch axis present).
batch_axis=False,
)
# Write new observation directly back into the episode.
sa_episode.set_observations(at_indices=-1, new_data=flattened_obs)
# We set the Episode's observation space to ours so that we can safely
# set the last obs to the new value (without causing a space mismatch
# error).
sa_episode.observation_space = self.observation_space
return batch
def _remove_keys_from_dict(self, obs, sa_episode):
"""Removes keys from dictionary spaces.
Args:
obs: Observation sample from space.
sa_episode: Single-agent episode. Needs `agent_id` set in multi-agent
setups.
Returns:
Observation sample `obs` with keys in `self._keys_to_remove` removed.
"""
# Only remove keys for agents that have a dictionary space.
is_dict_space = False
if self._multi_agent:
is_dict_space = isinstance(
self.input_observation_space[sa_episode.agent_id], gym.spaces.Dict
)
else:
is_dict_space = isinstance(self.input_observation_space, gym.spaces.Dict)
# Remove keys, if necessary.
if is_dict_space and self._keys_to_remove:
obs = {k: v for k, v in obs.items() if k not in self._keys_to_remove}
return obs
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/connectors/common/flatten_observations.py",
"license": "Apache License 2.0",
"lines": 467,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/data/namespace_expressions/list_namespace.py | """List namespace for expression operations on list-typed columns."""
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING, Literal, Union
import numpy as np
import pyarrow
import pyarrow.compute as pc
from ray.data._internal.arrow_utils import _combine_as_list_array, _counts_to_offsets
from ray.data.datatype import DataType
from ray.data.expressions import pyarrow_udf
if TYPE_CHECKING:
from ray.data.expressions import Expr, UDFExpr
def _ensure_array(arr: pyarrow.Array) -> pyarrow.Array:
"""Convert ChunkedArray to Array if needed."""
if isinstance(arr, pyarrow.ChunkedArray):
return arr.combine_chunks()
return arr
def _is_list_like(pa_type: pyarrow.DataType) -> bool:
"""Return True for list-like Arrow types (list, large_list, fixed_size_list)."""
return (
pyarrow.types.is_list(pa_type)
or pyarrow.types.is_large_list(pa_type)
or pyarrow.types.is_fixed_size_list(pa_type)
or (
hasattr(pyarrow.types, "is_list_view")
and pyarrow.types.is_list_view(pa_type)
)
or (
hasattr(pyarrow.types, "is_large_list_view")
and pyarrow.types.is_large_list_view(pa_type)
)
)
def _infer_flattened_dtype(expr: "Expr") -> DataType:
"""Infer the return DataType after flattening one level of list nesting."""
if not expr.data_type.is_arrow_type():
return DataType(object)
arrow_type = expr.data_type.to_arrow_dtype()
if not _is_list_like(arrow_type):
return DataType(object)
child_type = arrow_type.value_type
if not _is_list_like(child_type):
return DataType(object)
if pyarrow.types.is_large_list(arrow_type):
return DataType.from_arrow(pyarrow.large_list(child_type.value_type))
else:
return DataType.from_arrow(pyarrow.list_(child_type.value_type))
def _validate_nested_list(arr_type: pyarrow.DataType) -> None:
"""Raise TypeError if arr_type is not a list of lists."""
if not _is_list_like(arr_type):
raise TypeError(
"list.flatten() requires a list column whose elements are also lists."
)
if not _is_list_like(arr_type.value_type):
raise TypeError(
"list.flatten() requires a list column whose elements are also lists."
)
@dataclass
class _ListNamespace:
"""Namespace for list operations on expression columns.
This namespace provides methods for operating on list-typed columns using
PyArrow compute functions.
Example:
>>> from ray.data.expressions import col
>>> # Get length of list column
>>> expr = col("items").list.len()
>>> # Get first item using method
>>> expr = col("items").list.get(0)
>>> # Get first item using indexing
>>> expr = col("items").list[0]
>>> # Slice list
>>> expr = col("items").list[1:3]
"""
_expr: Expr
def len(self) -> "UDFExpr":
"""Get the length of each list."""
@pyarrow_udf(return_dtype=DataType.int32())
def _list_len(arr: pyarrow.Array) -> pyarrow.Array:
return pc.list_value_length(arr)
return _list_len(self._expr)
def __getitem__(self, key: Union[int, slice]) -> "UDFExpr":
"""Get element or slice using bracket notation.
Args:
key: An integer for element access or slice for list slicing.
Returns:
UDFExpr that extracts the element or slice.
Example:
>>> col("items").list[0] # Get first item # doctest: +SKIP
>>> col("items").list[1:3] # Get slice [1, 3) # doctest: +SKIP
>>> col("items").list[-1] # Get last item # doctest: +SKIP
"""
if isinstance(key, int):
return self.get(key)
elif isinstance(key, slice):
return self.slice(key.start, key.stop, key.step)
else:
raise TypeError(
f"List indices must be integers or slices, not {type(key).__name__}"
)
def get(self, index: int) -> "UDFExpr":
"""Get element at the specified index from each list.
Args:
index: The index of the element to retrieve. Negative indices are supported.
Returns:
UDFExpr that extracts the element at the given index.
"""
# Infer return type from the list's value type
return_dtype = DataType(object) # fallback
if self._expr.data_type.is_arrow_type():
arrow_type = self._expr.data_type.to_arrow_dtype()
if pyarrow.types.is_list(arrow_type) or pyarrow.types.is_large_list(
arrow_type
):
return_dtype = DataType.from_arrow(arrow_type.value_type)
elif pyarrow.types.is_fixed_size_list(arrow_type):
return_dtype = DataType.from_arrow(arrow_type.value_type)
@pyarrow_udf(return_dtype=return_dtype)
def _list_get(arr: pyarrow.Array) -> pyarrow.Array:
return pc.list_element(arr, index)
return _list_get(self._expr)
def slice(
self, start: int | None = None, stop: int | None = None, step: int | None = None
) -> "UDFExpr":
"""Slice each list.
Args:
start: Start index (inclusive). Defaults to 0.
stop: Stop index (exclusive). Defaults to list length.
step: Step size. Defaults to 1.
Returns:
UDFExpr that extracts a slice from each list.
"""
# Return type is the same as the input list type
return_dtype = self._expr.data_type
@pyarrow_udf(return_dtype=return_dtype)
def _list_slice(arr: pyarrow.Array) -> pyarrow.Array:
return pc.list_slice(
arr,
start=0 if start is None else start,
stop=stop,
step=1 if step is None else step,
)
return _list_slice(self._expr)
def sort(
self,
order: Literal["ascending", "descending"] = "ascending",
null_placement: Literal["at_start", "at_end"] = "at_end",
) -> "UDFExpr":
"""Sort the elements within each (nested) list.
Args:
order: Sorting order, must be ``\"ascending\"`` or ``\"descending\"``.
null_placement: Placement for null values, ``\"at_start\"`` or ``\"at_end\"``.
Returns:
UDFExpr providing the sorted lists.
Example:
>>> from ray.data.expressions import col
>>> # [[3,1],[2,None]] -> [[1,3],[2,None]]
>>> expr = col("items").list.sort() # doctest: +SKIP
"""
if order not in {"ascending", "descending"}:
raise ValueError(
"order must be either 'ascending' or 'descending', got " f"{order!r}"
)
if null_placement not in {"at_start", "at_end"}:
raise ValueError(
"null_placement must be 'at_start' or 'at_end', got "
f"{null_placement!r}"
)
return_dtype = self._expr.data_type
@pyarrow_udf(return_dtype=return_dtype)
def _list_sort(arr: pyarrow.Array) -> pyarrow.Array:
# Approach:
# 1) Normalize fixed_size_list -> list for list_* kernels (preserve nulls).
# 2) Flatten to (row_index, value) pairs, sort by row then value.
# 3) Rebuild list array using per-row lengths and restore original type.
arr = _ensure_array(arr)
arr_type = arr.type
arr_dtype = DataType.from_arrow(arr_type)
if not arr_dtype.is_list_type():
raise TypeError("list.sort() requires a list column.")
original_type = arr_type
null_mask = arr.is_null() if arr.null_count else None
sort_arr = arr
if pyarrow.types.is_fixed_size_list(arr_type):
# Example: FixedSizeList<2>[ [3,1], None, [2,4] ]
# Fill null row -> [[3,1],[None,None],[2,4]], cast to list<child> for sort,
# then cast back to fixed_size to preserve schema. list_* kernels operate
# on list/large_list, so we cast fixed_size_list<T> to list<T> here.
child_type = arr_type.value_type
list_size = arr_type.list_size
if null_mask is not None:
# Fill null rows with fixed-size null lists so each row keeps
# the same list_size when we sort and rebuild offsets.
filler_values = pyarrow.nulls(len(arr) * list_size, type=child_type)
filler = pyarrow.FixedSizeListArray.from_arrays(
filler_values, list_size
)
sort_arr = pc.if_else(null_mask, filler, arr)
list_type = pyarrow.list_(child_type)
sort_arr = sort_arr.cast(list_type)
arr_type = sort_arr.type
# Flatten to (row_index, value) pairs, sort within each row by value.
values = pc.list_flatten(sort_arr)
if len(values):
row_indices = pc.list_parent_indices(sort_arr)
struct = pyarrow.StructArray.from_arrays(
[row_indices, values],
["row", "value"],
)
sorted_indices = pc.sort_indices(
struct,
sort_keys=[("row", "ascending"), ("value", order)],
null_placement=null_placement,
)
values = pc.take(values, sorted_indices)
# Reconstruct list array with original row boundaries and nulls.
lengths = pc.list_value_length(sort_arr)
lengths = pc.fill_null(lengths, 0)
is_large = pyarrow.types.is_large_list(arr_type)
offsets = _counts_to_offsets(lengths)
sorted_arr = _combine_as_list_array(
offsets=offsets,
values=values,
is_large=is_large,
null_mask=null_mask,
)
if pyarrow.types.is_fixed_size_list(original_type):
sorted_arr = sorted_arr.cast(original_type)
return sorted_arr
return _list_sort(self._expr)
def flatten(self) -> "UDFExpr":
"""Flatten one level of nesting for each list value."""
return_dtype = _infer_flattened_dtype(self._expr)
@pyarrow_udf(return_dtype=return_dtype)
def _list_flatten(arr: pyarrow.Array) -> pyarrow.Array:
# Approach:
# 1) Flatten list<list<T>> to a flat values array and parent indices.
# 2) Count values per original row.
# 3) Rebuild list array using offsets while preserving top-level nulls.
arr = _ensure_array(arr)
_validate_nested_list(arr.type)
inner_lists: pyarrow.Array = pc.list_flatten(arr)
all_scalars: pyarrow.Array = pc.list_flatten(inner_lists)
n_rows: int = len(arr)
if len(all_scalars) == 0:
# All rows are empty/None after flatten, so build zero counts to
# preserve row count and produce empty lists for each row.
counts = pyarrow.array(np.repeat(0, n_rows), type=pyarrow.int64())
offsets = _counts_to_offsets(counts)
else:
# Example: arr = [[[1,2],[3]], [[4], None], None]
# inner_lists = [[1,2],[3],[4],None], all_scalars = [1,2,3,4]
# parent(arr)=[0,0,1,1], parent(inner)=[0,0,1,2] -> row_indices=[0,0,0,1]
# counts=[3,1,0] -> offsets=[0,3,4,4]
row_indices: pyarrow.Array = pc.take(
pc.list_parent_indices(arr),
pc.list_parent_indices(inner_lists),
)
vc: pyarrow.StructArray = pc.value_counts(row_indices)
rows_with_scalars: pyarrow.Array = pc.struct_field(vc, "values")
scalar_counts: pyarrow.Array = pc.struct_field(vc, "counts")
# Compute per-row counts of flattened scalars. value_counts gives counts
# only for rows that appear, so we map those counts back onto the full
# row range [0, n_rows) and fill missing rows with 0.
row_sequence: pyarrow.Array = pyarrow.array(
np.arange(n_rows, dtype=np.int64), type=pyarrow.int64()
)
positions: pyarrow.Array = pc.index_in(
row_sequence, value_set=rows_with_scalars
)
counts: pyarrow.Array = pc.if_else(
pc.is_null(positions),
0,
pc.take(scalar_counts, pc.fill_null(positions, 0)),
)
offsets = _counts_to_offsets(counts)
is_large: bool = pyarrow.types.is_large_list(arr.type)
null_mask: pyarrow.Array | None = arr.is_null() if arr.null_count else None
# Rebuild a list/large_list array while preserving top-level nulls.
return _combine_as_list_array(
offsets=offsets,
values=all_scalars,
is_large=is_large,
null_mask=null_mask,
)
return _list_flatten(self._expr)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/namespace_expressions/list_namespace.py",
"license": "Apache License 2.0",
"lines": 284,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/namespace_expressions/string_namespace.py | """String namespace for expression operations on string-typed columns."""
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Callable, Literal
import pyarrow
import pyarrow.compute as pc
from ray.data.datatype import DataType
from ray.data.expressions import _create_pyarrow_compute_udf, pyarrow_udf
if TYPE_CHECKING:
from ray.data.expressions import Expr, UDFExpr
def _create_str_udf(
pc_func: Callable[..., pyarrow.Array], return_dtype: DataType
) -> Callable[..., "UDFExpr"]:
"""Helper to create a string UDF that wraps a PyArrow compute function.
This helper handles all types of PyArrow compute operations:
- Unary operations (no args): upper(), lower(), reverse()
- Pattern operations (pattern + args): starts_with(), contains()
- Multi-argument operations: replace(), replace_slice()
Args:
pc_func: PyArrow compute function that takes (array, *positional, **kwargs)
return_dtype: The return data type
Returns:
A callable that creates UDFExpr instances
"""
return _create_pyarrow_compute_udf(pc_func, return_dtype=return_dtype)
@dataclass
class _StringNamespace:
"""Namespace for string operations on expression columns.
This namespace provides methods for operating on string-typed columns using
PyArrow compute functions.
Example:
>>> from ray.data.expressions import col
>>> # Convert to uppercase
>>> expr = col("name").str.upper()
>>> # Get string length
>>> expr = col("name").str.len()
>>> # Check if string starts with a prefix
>>> expr = col("name").str.starts_with("A")
"""
_expr: Expr
# Length methods
def len(self) -> "UDFExpr":
"""Get the length of each string in characters."""
return _create_str_udf(pc.utf8_length, DataType.int32())(self._expr)
def byte_len(self) -> "UDFExpr":
"""Get the length of each string in bytes."""
return _create_str_udf(pc.binary_length, DataType.int32())(self._expr)
# Case methods
def upper(self) -> "UDFExpr":
"""Convert strings to uppercase."""
return _create_str_udf(pc.utf8_upper, DataType.string())(self._expr)
def lower(self) -> "UDFExpr":
"""Convert strings to lowercase."""
return _create_str_udf(pc.utf8_lower, DataType.string())(self._expr)
def capitalize(self) -> "UDFExpr":
"""Capitalize the first character of each string."""
return _create_str_udf(pc.utf8_capitalize, DataType.string())(self._expr)
def title(self) -> "UDFExpr":
"""Convert strings to title case."""
return _create_str_udf(pc.utf8_title, DataType.string())(self._expr)
def swapcase(self) -> "UDFExpr":
"""Swap the case of each character."""
return _create_str_udf(pc.utf8_swapcase, DataType.string())(self._expr)
# Predicate methods
def is_alpha(self) -> "UDFExpr":
"""Check if strings contain only alphabetic characters."""
return _create_str_udf(pc.utf8_is_alpha, DataType.bool())(self._expr)
def is_alnum(self) -> "UDFExpr":
"""Check if strings contain only alphanumeric characters."""
return _create_str_udf(pc.utf8_is_alnum, DataType.bool())(self._expr)
def is_digit(self) -> "UDFExpr":
"""Check if strings contain only digits."""
return _create_str_udf(pc.utf8_is_digit, DataType.bool())(self._expr)
def is_decimal(self) -> "UDFExpr":
"""Check if strings contain only decimal characters."""
return _create_str_udf(pc.utf8_is_decimal, DataType.bool())(self._expr)
def is_numeric(self) -> "UDFExpr":
"""Check if strings contain only numeric characters."""
return _create_str_udf(pc.utf8_is_numeric, DataType.bool())(self._expr)
def is_space(self) -> "UDFExpr":
"""Check if strings contain only whitespace."""
return _create_str_udf(pc.utf8_is_space, DataType.bool())(self._expr)
def is_lower(self) -> "UDFExpr":
"""Check if strings are lowercase."""
return _create_str_udf(pc.utf8_is_lower, DataType.bool())(self._expr)
def is_upper(self) -> "UDFExpr":
"""Check if strings are uppercase."""
return _create_str_udf(pc.utf8_is_upper, DataType.bool())(self._expr)
def is_title(self) -> "UDFExpr":
"""Check if strings are title-cased."""
return _create_str_udf(pc.utf8_is_title, DataType.bool())(self._expr)
def is_printable(self) -> "UDFExpr":
"""Check if strings contain only printable characters."""
return _create_str_udf(pc.utf8_is_printable, DataType.bool())(self._expr)
def is_ascii(self) -> "UDFExpr":
"""Check if strings contain only ASCII characters."""
return _create_str_udf(pc.string_is_ascii, DataType.bool())(self._expr)
# Searching methods
def starts_with(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Check if strings start with a pattern."""
return _create_str_udf(pc.starts_with, DataType.bool())(
self._expr, pattern, *args, **kwargs
)
def ends_with(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Check if strings end with a pattern."""
return _create_str_udf(pc.ends_with, DataType.bool())(
self._expr, pattern, *args, **kwargs
)
def contains(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Check if strings contain a substring."""
return _create_str_udf(pc.match_substring, DataType.bool())(
self._expr, pattern, *args, **kwargs
)
def match(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Match strings against a SQL LIKE pattern."""
return _create_str_udf(pc.match_like, DataType.bool())(
self._expr, pattern, *args, **kwargs
)
def find(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Find the first occurrence of a substring."""
return _create_str_udf(pc.find_substring, DataType.int32())(
self._expr, pattern, *args, **kwargs
)
def count(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Count occurrences of a substring."""
return _create_str_udf(pc.count_substring, DataType.int32())(
self._expr, pattern, *args, **kwargs
)
def find_regex(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Find the first occurrence matching a regex pattern."""
return _create_str_udf(pc.find_substring_regex, DataType.int32())(
self._expr, pattern, *args, **kwargs
)
def count_regex(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Count occurrences matching a regex pattern."""
return _create_str_udf(pc.count_substring_regex, DataType.int32())(
self._expr, pattern, *args, **kwargs
)
def match_regex(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Check if strings match a regex pattern."""
return _create_str_udf(pc.match_substring_regex, DataType.bool())(
self._expr, pattern, *args, **kwargs
)
# Transformation methods
def reverse(self) -> "UDFExpr":
"""Reverse each string."""
return _create_str_udf(pc.utf8_reverse, DataType.string())(self._expr)
def slice(self, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Slice strings by codeunit indices."""
return _create_str_udf(pc.utf8_slice_codeunits, DataType.string())(
self._expr, *args, **kwargs
)
def replace(
self, pattern: str, replacement: str, *args: Any, **kwargs: Any
) -> "UDFExpr":
"""Replace occurrences of a substring."""
return _create_str_udf(pc.replace_substring, DataType.string())(
self._expr, pattern, replacement, *args, **kwargs
)
def replace_regex(
self, pattern: str, replacement: str, *args: Any, **kwargs: Any
) -> "UDFExpr":
"""Replace occurrences matching a regex pattern."""
return _create_str_udf(pc.replace_substring_regex, DataType.string())(
self._expr, pattern, replacement, *args, **kwargs
)
def replace_slice(
self, start: int, stop: int, replacement: str, *args: Any, **kwargs: Any
) -> "UDFExpr":
"""Replace a slice with a string."""
return _create_str_udf(pc.binary_replace_slice, DataType.string())(
self._expr, start, stop, replacement, *args, **kwargs
)
def split(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Split strings by a pattern."""
return _create_str_udf(pc.split_pattern, DataType(object))(
self._expr, pattern, *args, **kwargs
)
def split_regex(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Split strings by a regex pattern."""
return _create_str_udf(pc.split_pattern_regex, DataType(object))(
self._expr, pattern, *args, **kwargs
)
def split_whitespace(self, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Split strings on whitespace."""
return _create_str_udf(pc.utf8_split_whitespace, DataType(object))(
self._expr, *args, **kwargs
)
def extract(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Extract a substring matching a regex pattern."""
return _create_str_udf(pc.extract_regex, DataType.string())(
self._expr, pattern, *args, **kwargs
)
def repeat(self, n: int, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Repeat each string n times."""
return _create_str_udf(pc.binary_repeat, DataType.string())(
self._expr, n, *args, **kwargs
)
def center(
self, width: int, padding: str = " ", *args: Any, **kwargs: Any
) -> "UDFExpr":
"""Center strings in a field of given width."""
return _create_str_udf(pc.utf8_center, DataType.string())(
self._expr, width, padding, *args, **kwargs
)
def lpad(
self, width: int, padding: str = " ", *args: Any, **kwargs: Any
) -> "UDFExpr":
"""Right-align strings by padding with a given character while respecting ``width``.
If the string is longer than the specified width, it remains intact (no truncation occurs).
"""
return _create_str_udf(pc.utf8_lpad, DataType.string())(
self._expr, width, padding, *args, **kwargs
)
def rpad(
self, width: int, padding: str = " ", *args: Any, **kwargs: Any
) -> "UDFExpr":
"""Left-align strings by padding with a given character while respecting ``width``.
If the string is longer than the specified width, it remains intact (no truncation occurs).
"""
return _create_str_udf(pc.utf8_rpad, DataType.string())(
self._expr, width, padding, *args, **kwargs
)
# Custom methods that need special logic beyond simple PyArrow function calls
def strip(self, characters: str | None = None) -> "UDFExpr":
"""Remove leading and trailing whitespace or specified characters.
Args:
characters: Characters to remove. If None, removes whitespace.
Returns:
UDFExpr that strips characters from both ends.
"""
@pyarrow_udf(return_dtype=DataType.string())
def _str_strip(arr: pyarrow.Array) -> pyarrow.Array:
if characters is None:
return pc.utf8_trim_whitespace(arr)
else:
return pc.utf8_trim(arr, characters=characters)
return _str_strip(self._expr)
def lstrip(self, characters: str | None = None) -> "UDFExpr":
"""Remove leading whitespace or specified characters.
Args:
characters: Characters to remove. If None, removes whitespace.
Returns:
UDFExpr that strips characters from the left.
"""
@pyarrow_udf(return_dtype=DataType.string())
def _str_lstrip(arr: pyarrow.Array) -> pyarrow.Array:
if characters is None:
return pc.utf8_ltrim_whitespace(arr)
else:
return pc.utf8_ltrim(arr, characters=characters)
return _str_lstrip(self._expr)
def rstrip(self, characters: str | None = None) -> "UDFExpr":
"""Remove trailing whitespace or specified characters.
Args:
characters: Characters to remove. If None, removes whitespace.
Returns:
UDFExpr that strips characters from the right.
"""
@pyarrow_udf(return_dtype=DataType.string())
def _str_rstrip(arr: pyarrow.Array) -> pyarrow.Array:
if characters is None:
return pc.utf8_rtrim_whitespace(arr)
else:
return pc.utf8_rtrim(arr, characters=characters)
return _str_rstrip(self._expr)
# Padding
def pad(
self,
width: int,
fillchar: str = " ",
side: Literal["left", "right", "both"] = "right",
) -> "UDFExpr":
"""Pad strings to a specified width.
Args:
width: Target width.
fillchar: Character to use for padding.
side: "left", "right", or "both" for padding side.
Returns:
UDFExpr that pads strings.
"""
@pyarrow_udf(return_dtype=DataType.string())
def _str_pad(arr: pyarrow.Array) -> pyarrow.Array:
if side == "right":
return pc.utf8_rpad(arr, width=width, padding=fillchar)
elif side == "left":
return pc.utf8_lpad(arr, width=width, padding=fillchar)
elif side == "both":
return pc.utf8_center(arr, width=width, padding=fillchar)
else:
raise ValueError("side must be 'left', 'right', or 'both'")
return _str_pad(self._expr)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/namespace_expressions/string_namespace.py",
"license": "Apache License 2.0",
"lines": 292,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/namespace_expressions/struct_namespace.py | """Struct namespace for expression operations on struct-typed columns."""
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
import pyarrow
import pyarrow.compute as pc
from ray.data.datatype import DataType
from ray.data.expressions import pyarrow_udf
if TYPE_CHECKING:
from ray.data.expressions import Expr, UDFExpr
@dataclass
class _StructNamespace:
"""Namespace for struct operations on expression columns.
This namespace provides methods for operating on struct-typed columns using
PyArrow compute functions.
Example:
>>> from ray.data.expressions import col
>>> # Access a field using method
>>> expr = col("user_record").struct.field("age")
>>> # Access a field using bracket notation
>>> expr = col("user_record").struct["age"]
>>> # Access nested field
>>> expr = col("user_record").struct["address"].struct["city"]
"""
_expr: Expr
def __getitem__(self, field_name: str) -> "UDFExpr":
"""Extract a field using bracket notation.
Args:
field_name: The name of the field to extract.
Returns:
UDFExpr that extracts the specified field from each struct.
Example:
>>> col("user").struct["age"] # Get age field # doctest: +SKIP
>>> col("user").struct["address"].struct["city"] # Get nested city field # doctest: +SKIP
"""
return self.field(field_name)
def field(self, field_name: str) -> "UDFExpr":
"""Extract a field from a struct.
Args:
field_name: The name of the field to extract.
Returns:
UDFExpr that extracts the specified field from each struct.
"""
# Infer return type from the struct field type
return_dtype = DataType(object) # fallback
if self._expr.data_type.is_arrow_type():
arrow_type = self._expr.data_type.to_arrow_dtype()
if pyarrow.types.is_struct(arrow_type):
try:
field_type = arrow_type.field(field_name).type
return_dtype = DataType.from_arrow(field_type)
except KeyError:
# Field not found in schema, fallback to object
pass
@pyarrow_udf(return_dtype=return_dtype)
def _struct_field(arr: pyarrow.Array) -> pyarrow.Array:
return pc.struct_field(arr, field_name)
return _struct_field(self._expr)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/namespace_expressions/struct_namespace.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/data/preprocessors/serialization_handlers.py | """
Serialization handlers for preprocessor save/load functionality.
This module implements a factory pattern to abstract different serialization formats,
making it easier to add new formats and maintain existing ones.
"""
import abc
import base64
import pickle
from enum import Enum
from typing import Any, Dict, Optional, Union
from ray.cloudpickle import cloudpickle
from ray.util.annotations import DeveloperAPI
@DeveloperAPI
class HandlerFormatName(Enum):
"""Enum for consistent format naming in the factory."""
CLOUDPICKLE = "cloudpickle"
PICKLE = "pickle"
@DeveloperAPI
class SerializationHandler(abc.ABC):
"""Abstract base class for handling preprocessor serialization formats."""
@abc.abstractmethod
def serialize(
self, data: Union["Preprocessor", Dict[str, Any]] # noqa: F821
) -> Union[str, bytes]:
"""Serialize preprocessor data to the specific format.
Args:
data: Dictionary containing preprocessor metadata and stats
Returns:
Serialized data in format-specific representation
"""
pass
@abc.abstractmethod
def deserialize(self, serialized: Union[str, bytes]) -> Any:
"""Deserialize data from the specific format.
Args:
serialized: Serialized data in format-specific representation
Returns:
For structured formats (CloudPickle/JSON/MessagePack): Dictionary containing preprocessor metadata and stats
For pickle format: The actual deserialized object
"""
pass
@abc.abstractmethod
def get_magic_bytes(self) -> Union[str, bytes]:
"""Get the magic bytes/prefix for this format."""
pass
def strip_magic_bytes(self, serialized: Union[str, bytes]) -> Union[str, bytes]:
"""Remove magic bytes from serialized data."""
magic = self.get_magic_bytes()
if isinstance(serialized, (str, bytes)) and serialized.startswith(magic):
return serialized[len(magic) :]
return serialized
@DeveloperAPI
class CloudPickleSerializationHandler(SerializationHandler):
"""Handler for CloudPickle serialization format."""
MAGIC_CLOUDPICKLE = b"CPKL:"
def serialize(
self, data: Union["Preprocessor", Dict[str, Any]] # noqa: F821
) -> bytes:
"""Serialize to CloudPickle format with magic prefix."""
return self.MAGIC_CLOUDPICKLE + cloudpickle.dumps(data)
def deserialize(self, serialized: bytes) -> Dict[str, Any]:
"""Deserialize from CloudPickle format."""
if not isinstance(serialized, bytes):
raise ValueError(
f"Expected bytes for CloudPickle deserialization, got {type(serialized)}"
)
if not serialized.startswith(self.MAGIC_CLOUDPICKLE):
raise ValueError(f"Invalid CloudPickle magic bytes: {serialized[:10]}")
cloudpickle_data = self.strip_magic_bytes(serialized)
return cloudpickle.loads(cloudpickle_data)
def get_magic_bytes(self) -> bytes:
return self.MAGIC_CLOUDPICKLE
@DeveloperAPI
class PickleSerializationHandler(SerializationHandler):
"""Handler for legacy Pickle serialization format."""
def serialize(
self, data: Union["Preprocessor", Dict[str, Any]] # noqa: F821
) -> str:
"""
Serialize using pickle format (for backward compatibility).
data is ignored, but kept for consistency
"""
return base64.b64encode(pickle.dumps(data)).decode("ascii")
def deserialize(
self, serialized: str
) -> Any: # Returns the actual object, not metadata
"""Deserialize from pickle format (legacy support)."""
# For pickle, we return the actual deserialized object directly
return pickle.loads(base64.b64decode(serialized))
def get_magic_bytes(self) -> str:
return "" # Pickle format doesn't use magic bytes
class SerializationHandlerFactory:
"""Factory class for creating appropriate serialization handlers."""
_handlers = {
HandlerFormatName.CLOUDPICKLE: CloudPickleSerializationHandler,
HandlerFormatName.PICKLE: PickleSerializationHandler,
}
@classmethod
def register_handler(cls, format_name: HandlerFormatName, handler_class: type):
"""Register a new serialization handler."""
cls._handlers[format_name] = handler_class
@classmethod
def get_handler(
cls,
format_identifier: Optional[HandlerFormatName] = None,
data: Optional[Union[str, bytes]] = None,
**kwargs,
) -> SerializationHandler:
"""Get the appropriate serialization handler for a format or serialized data.
Args:
format_identifier: The format to use for serialization. If None, will detect from data.
data: Serialized data to detect format from (used when format_identifier is None).
**kwargs: Additional keyword arguments (currently unused).
Returns:
SerializationHandler instance for the format
Raises:
ValueError: If format is not supported or cannot be detected
"""
# If it's already a format enum, use it directly
if not format_identifier:
format_identifier = cls.detect_format(data)
if format_identifier not in cls._handlers:
raise ValueError(
f"Unsupported serialization format: {format_identifier.value}. "
f"Supported formats: {list(cls._handlers.keys())}"
)
handler_class = cls._handlers[format_identifier]
return handler_class()
@classmethod
def detect_format(cls, serialized: Union[str, bytes]) -> HandlerFormatName:
"""Detect the serialization format from the magic bytes.
Args:
serialized: Serialized data
Returns:
Format name enum
Raises:
ValueError: If format cannot be detected
"""
# Check for CloudPickle first (binary format)
if isinstance(serialized, bytes) and serialized.startswith(
CloudPickleSerializationHandler.MAGIC_CLOUDPICKLE
):
return HandlerFormatName.CLOUDPICKLE
# Check for legacy pickle format (no magic bytes, should be base64 encoded)
if isinstance(serialized, str):
return HandlerFormatName.PICKLE
raise ValueError(
f"Cannot detect serialization format from: {serialized[:20]}..."
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/preprocessors/serialization_handlers.py",
"license": "Apache License 2.0",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/preprocessors/version_support.py | class UnknownPreprocessorError(ValueError):
"""Raised when attempting to deserialize an unknown/unregistered preprocessor type."""
def __init__(self, preprocessor_type: str):
self.preprocessor_type = preprocessor_type
super().__init__(f"Unknown preprocessor type: {preprocessor_type}")
_PREPROCESSOR_REGISTRY = {}
def SerializablePreprocessor(version: int, identifier: str):
"""Register a preprocessor class for serialization.
This decorator registers a preprocessor class in the serialization registry,
enabling it to be serialized and deserialized. The decorated class MUST inherit
from SerializablePreprocessor.
Args:
version: Version number for this preprocessor's serialization format
identifier: Stable identifier for serialization. This identifier will be used
in serialized data. Using an explicit identifier allows classes to be
renamed without breaking compatibility with existing serialized data.
Returns:
A decorator function that registers the class and returns it unchanged.
Raises:
TypeError: If the decorated class does not inherit from SerializablePreprocessor
Note:
If a class with the same identifier is already registered, logs an info message
and overwrites the previous registration.
Examples:
@SerializablePreprocessor(version=1, identifier="my_preprocessor_v1")
class MyPreprocessor(SerializablePreprocessor):
pass
"""
def decorator(cls):
import logging
from ray.data.preprocessor import SerializablePreprocessorBase
# Verify that the class inherits from SerializablePreprocessor
if not issubclass(cls, SerializablePreprocessorBase):
raise TypeError(
f"Class {cls.__module__}.{cls.__qualname__} must inherit from "
f"SerializablePreprocessor to use @SerializablePreprocessor decorator."
)
cls.set_version(version)
cls.set_preprocessor_class_id(identifier)
# Check for collisions and log info message
if identifier in _PREPROCESSOR_REGISTRY:
existing = _PREPROCESSOR_REGISTRY[identifier]
if existing != cls:
logging.info(
f"Preprocessor id collision: '{identifier}' was already registered "
f"by {existing.__module__}.{existing.__qualname__}. "
f"Overwriting with {cls.__module__}.{cls.__qualname__}."
)
_PREPROCESSOR_REGISTRY[identifier] = cls
return cls
return decorator
def _lookup_class(serialization_id: str):
"""Look up a preprocessor class by its serialization ID.
Args:
serialization_id: The serialization ID of the preprocessor (either explicit or class name)
Returns:
The registered preprocessor class
Raises:
UnknownPreprocessorError: If the serialization ID is not registered
"""
if serialization_id not in _PREPROCESSOR_REGISTRY:
raise UnknownPreprocessorError(serialization_id)
return _PREPROCESSOR_REGISTRY[serialization_id]
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/preprocessors/version_support.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/data/_internal/streaming_repartition.py | from collections import deque
from typing import Deque, List, Tuple
from ray.data._internal.execution.interfaces import RefBundle
from ray.data._internal.execution.operators.map_operator import BaseRefBundler
"""Streaming repartition builds fixed-size outputs from a stream of inputs.
We construct batches here to produce exactly sized outputs from arbitrary [start, end) slices across input blocks.
The task builder submits a map task only after the total number of rows accumulated across pending blocks reaches
target num rows (except during the final flush, which may emit a smaller tail block). This allows us to create
target-sized batches without materializing entire large blocks on the driver.
Detailed Implementation:
1. When a new bundle arrives, buffer it in the pending list.
2. Whenever the total number of rows in the pending bundles reaches the target row count, try to build a ready bundle.
3. Determine the slice needed from the final bundle so the ready bundle holds an exact multiple of the target rows,
and add the remaining bundle to the pending bundles for the next iteration.
4. Submit that ready bundle to a remote map task; the task slices each block according to the slice metadata stored
in the RefBundle (the bundle now contains n × target rows for n ≥ 1).
5. We configured the `OutputBlockSizeOption.target_num_rows_per_block` to the target number of rows per block in
plan_streaming_repartition_op so the output buffer further splits the n × target rows into n blocks of exactly
the target size.
6. Once upstream input is exhausted, flush any leftover pending bundles and repeat steps 1‑5 for the tail.
7. The resulting blocks have lengths `[target, …, target, (total_rows % target)]`; ordering isn’t guaranteed, but the
remainder block should appear near the end.
"""
class StreamingRepartitionRefBundler(BaseRefBundler):
"""Incrementally builds task inputs to produce multiples of target-sized outputs."""
def __init__(self, target_num_rows_per_block: int):
assert (
target_num_rows_per_block > 0
), "target_num_rows_per_block must be positive for streaming repartition."
self._target_num_rows = target_num_rows_per_block
self._pending_bundles: Deque[RefBundle] = deque()
self._ready_bundles: Deque[RefBundle] = deque()
self._consumed_input_bundles: List[RefBundle] = []
self._total_pending_rows = 0
def _try_build_ready_bundle(self, flush_remaining: bool = False):
if self._total_pending_rows >= self._target_num_rows:
rows_needed_from_last_bundle = (
self._pending_bundles[-1].num_rows()
- self._total_pending_rows % self._target_num_rows
)
assert rows_needed_from_last_bundle >= 0 # This will never be negative
pending_bundles = list(self._pending_bundles)
remaining_bundle = None
if (
rows_needed_from_last_bundle > 0
and rows_needed_from_last_bundle < pending_bundles[-1].num_rows()
):
last_bundle = pending_bundles.pop()
sliced_bundle, remaining_bundle = last_bundle.slice(
rows_needed_from_last_bundle
)
pending_bundles.append(sliced_bundle)
self._ready_bundles.append(RefBundle.merge_ref_bundles(pending_bundles))
self._pending_bundles.clear()
self._total_pending_rows = 0
if remaining_bundle and remaining_bundle.num_rows() > 0:
self._pending_bundles.append(remaining_bundle)
self._total_pending_rows += remaining_bundle.num_rows()
if flush_remaining and len(self._pending_bundles) > 0:
self._ready_bundles.append(
RefBundle.merge_ref_bundles(self._pending_bundles)
)
self._pending_bundles.clear()
self._total_pending_rows = 0
def add_bundle(self, ref_bundle: RefBundle):
self._total_pending_rows += ref_bundle.num_rows()
self._pending_bundles.append(ref_bundle)
self._try_build_ready_bundle()
self._consumed_input_bundles.append(ref_bundle)
def has_bundle(self) -> bool:
return len(self._ready_bundles) > 0
def get_next_bundle(
self,
) -> Tuple[List[RefBundle], RefBundle]:
consumed_input_bundles = self._consumed_input_bundles
self._consumed_input_bundles = []
return consumed_input_bundles, self._ready_bundles.popleft()
def done_adding_bundles(self):
if len(self._pending_bundles) > 0:
self._try_build_ready_bundle(flush_remaining=True)
def num_blocks(self):
return sum(len(bundle) for bundle in self._pending_bundles) + sum(
len(bundle) for bundle in self._ready_bundles
)
def size_bytes(self) -> int:
return sum(bundle.size_bytes() for bundle in self._pending_bundles) + sum(
bundle.size_bytes() for bundle in self._ready_bundles
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/streaming_repartition.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/tests/unit/test_bundler.py | from typing import Any, List
import pandas as pd
import pytest
import ray
from ray.data._internal.execution.interfaces.ref_bundle import RefBundle
from ray.data._internal.streaming_repartition import StreamingRepartitionRefBundler
from ray.data.block import BlockAccessor
def _make_ref_bundles_for_unit_test(raw_bundles: List[List[List[Any]]]) -> tuple:
output_bundles = []
block_data_map = {}
ref_counter = 0
for raw_bundle in raw_bundles:
blocks = []
schema = None
for raw_block in raw_bundle:
block = pd.DataFrame({"id": raw_block})
block_ref = ray.ObjectRef(str(ref_counter).encode().ljust(28, b"0"))
ref_counter += 1
block_data_map[block_ref] = block
blocks.append((block_ref, BlockAccessor.for_block(block).get_metadata()))
schema = BlockAccessor.for_block(block).schema()
output_bundle = RefBundle(blocks=blocks, owns_blocks=True, schema=schema)
output_bundles.append(output_bundle)
return output_bundles, block_data_map
@pytest.mark.parametrize(
"target,in_bundles,expected_row_counts",
[
(
# Target of 2 rows per bundle
2,
[[[1]], [[2]], [[3]], [[4]]],
[2, 2], # Expected output: 2 bundles of 2 rows each
),
(
# Target of 3 rows with uneven inputs
3,
[[[1, 2]], [[3, 4, 5]], [[6]]],
[3, 3], # Expected: [1,2,3] and [4,5,6]
),
(
# Target of 4 rows with leftover
4,
[[[1, 2]], [[3, 4]], [[5, 6, 7]]],
[4, 3], # Expected: [1,2,3,4] and [5,6,7]
),
(
# Larger target with various input sizes
5,
[[[1, 2, 3]], [[4, 5, 6, 7]], [[8, 9]], [[10, 11, 12]]],
[5, 5, 2], # Expected: [1-5], [6-10], [11-12]
),
(
# Test with empty blocks
3,
[[[1]], [[]], [[2, 3]], [[]], [[4, 5]]],
[3, 2], # Expected: [1,2,3] and [4,5]
),
(
# Test with last block smaller than target num rows per block
100,
[[[1]], [[2]], [[3]], [[4]], [[5]]],
[5],
),
],
)
def test_streaming_repartition_ref_bundler(target, in_bundles, expected_row_counts):
"""Test StreamingRepartitionRefBundler with various input patterns (unit test)."""
bundler = StreamingRepartitionRefBundler(target)
bundles, block_data_map = _make_ref_bundles_for_unit_test(in_bundles)
out_bundles = []
for bundle in bundles:
bundler.add_bundle(bundle)
while bundler.has_bundle():
_, out_bundle = bundler.get_next_bundle()
out_bundles.append(out_bundle)
bundler.done_adding_bundles()
while bundler.has_bundle():
_, out_bundle = bundler.get_next_bundle()
out_bundles.append(out_bundle)
# Verify number of output bundles
assert len(out_bundles) == len(
expected_row_counts
), f"Expected {len(expected_row_counts)} bundles, got {len(out_bundles)}"
# Verify row counts for each bundle
for i, (out_bundle, expected_count) in enumerate(
zip(out_bundles, expected_row_counts)
):
assert (
out_bundle.num_rows() == expected_count
), f"Bundle {i}: expected {expected_count} rows, got {out_bundle.num_rows()}"
# Verify all bundles have been ingested
assert bundler.num_blocks() == 0
# Verify all output bundles except the last are exact multiples of target
for i, out_bundle in enumerate(out_bundles[:-1]):
assert (
out_bundle.num_rows() % target == 0
), f"Bundle {i} has {out_bundle.num_rows()} rows, not a multiple of {target}"
# Verify data integrity - all input data is preserved in order (bundler slicing is in order)
total_input_rows = sum(sum(len(block) for block in bundle) for bundle in in_bundles)
total_output_rows = sum(bundle.num_rows() for bundle in out_bundles)
assert total_output_rows == total_input_rows
# Verify block content - extract all values from output bundles
output_values = []
for bundle in out_bundles:
for (block_ref, _), block_slice in zip(bundle.blocks, bundle.slices):
# Look up the actual block data from our map (no ray.get needed)
data = block_data_map[block_ref]["id"]
if block_slice is not None:
# We apply the slice here manually because this is just for testing bundler
# and the block slicing is happened in map operator for streaming repartition
data = data[block_slice.start_offset : block_slice.end_offset]
output_values.extend(data)
# Expected values are all input values flattened in order
expected_values = [
value for bundle in in_bundles for block in bundle for value in block
]
assert (
output_values == expected_values
), f"Output values {output_values} don't match expected {expected_values}"
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/unit/test_bundler.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/_private/grpc_utils.py | import os
from concurrent import futures
from typing import Any, Optional, Sequence, Tuple
import grpc
from grpc import aio as aiogrpc
import ray
from ray._private.authentication import authentication_utils
from ray._private.tls_utils import load_certs_from_env
def init_grpc_channel(
address: str,
options: Optional[Sequence[Tuple[str, Any]]] = None,
asynchronous: bool = False,
credentials: Optional[grpc.ChannelCredentials] = None,
):
"""Create a gRPC channel with authentication interceptors if token auth is enabled.
This function handles:
- TLS configuration via RAY_USE_TLS environment variable or custom credentials
- Authentication interceptors when token auth is enabled
- Keepalive settings from Ray config
- Both synchronous and asynchronous channels
Args:
address: The gRPC server address (host:port)
options: Optional gRPC channel options as sequence of (key, value) tuples
asynchronous: If True, create async channel; otherwise sync
credentials: Optional custom gRPC credentials for TLS. If provided, takes
precedence over RAY_USE_TLS environment variable.
Returns:
grpc.Channel or grpc.aio.Channel: Configured gRPC channel with interceptors
"""
grpc_module = aiogrpc if asynchronous else grpc
options = options or []
options_dict = dict(options)
options_dict["grpc.keepalive_time_ms"] = options_dict.get(
"grpc.keepalive_time_ms", ray._config.grpc_client_keepalive_time_ms()
)
options_dict["grpc.keepalive_timeout_ms"] = options_dict.get(
"grpc.keepalive_timeout_ms", ray._config.grpc_client_keepalive_timeout_ms()
)
options = options_dict.items()
# Build interceptors list
interceptors = []
if authentication_utils.is_token_auth_enabled():
from ray._private.authentication.grpc_authentication_client_interceptor import (
SyncAuthenticationMetadataClientInterceptor,
get_async_auth_interceptors,
)
if asynchronous:
interceptors.extend(get_async_auth_interceptors())
else:
interceptors.append(SyncAuthenticationMetadataClientInterceptor())
# Determine channel type and credentials
if credentials is not None:
# Use provided custom credentials (takes precedence)
channel_creator = grpc_module.secure_channel
base_args = (address, credentials)
elif os.environ.get("RAY_USE_TLS", "0").lower() in ("1", "true"):
# Use TLS from environment variables
server_cert_chain, private_key, ca_cert = load_certs_from_env()
tls_credentials = grpc.ssl_channel_credentials(
certificate_chain=server_cert_chain,
private_key=private_key,
root_certificates=ca_cert,
)
channel_creator = grpc_module.secure_channel
base_args = (address, tls_credentials)
else:
# Insecure channel
channel_creator = grpc_module.insecure_channel
base_args = (address,)
# Create channel (async channels get interceptors in constructor, sync via intercept_channel)
if asynchronous:
channel = channel_creator(
*base_args, options=options, interceptors=interceptors
)
else:
channel = channel_creator(*base_args, options=options)
if interceptors:
channel = grpc.intercept_channel(channel, *interceptors)
return channel
def create_grpc_server_with_interceptors(
max_workers: Optional[int] = None,
thread_name_prefix: str = "grpc_server",
options: Optional[Sequence[Tuple[str, Any]]] = None,
asynchronous: bool = False,
):
"""Create a gRPC server with authentication interceptors if token auth is enabled.
This function handles:
- Authentication interceptors when token auth is enabled
- Both synchronous and asynchronous servers
- Thread pool configuration for sync servers
Args:
max_workers: Max thread pool workers (required for sync, ignored for async)
thread_name_prefix: Thread name prefix for sync thread pool
options: Optional gRPC server options as sequence of (key, value) tuples
asynchronous: If True, create async server; otherwise sync
Returns:
grpc.Server or grpc.aio.Server: Configured gRPC server with interceptors
"""
grpc_module = aiogrpc if asynchronous else grpc
# Build interceptors list
interceptors = []
if authentication_utils.is_token_auth_enabled():
if asynchronous:
from ray._private.authentication.grpc_authentication_server_interceptor import (
AsyncAuthenticationServerInterceptor,
)
interceptors.append(AsyncAuthenticationServerInterceptor())
else:
from ray._private.authentication.grpc_authentication_server_interceptor import (
SyncAuthenticationServerInterceptor,
)
interceptors.append(SyncAuthenticationServerInterceptor())
# Create server
if asynchronous:
server = grpc_module.server(
interceptors=interceptors if interceptors else None,
options=options,
)
else:
if max_workers is None:
raise ValueError("max_workers is required for synchronous gRPC servers")
executor = futures.ThreadPoolExecutor(
max_workers=max_workers,
thread_name_prefix=thread_name_prefix,
)
server = grpc_module.server(
executor,
interceptors=interceptors if interceptors else None,
options=options,
)
return server
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_private/grpc_utils.py",
"license": "Apache License 2.0",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/tests/authentication/test_async_grpc_interceptors.py | import grpc
import pytest
from grpc import aio as aiogrpc
from ray._private.authentication.authentication_token_generator import (
generate_new_authentication_token,
)
from ray._private.authentication_test_utils import (
authentication_env_guard,
reset_auth_token_state,
set_auth_mode,
set_env_auth_token,
)
from ray._private.grpc_utils import init_grpc_channel
from ray.core.generated import reporter_pb2, reporter_pb2_grpc
@pytest.mark.asyncio
async def test_async_server_and_client_with_valid_token(create_async_test_server):
"""Test async server + client with matching token succeeds."""
token = generate_new_authentication_token()
with authentication_env_guard():
set_auth_mode("token")
set_env_auth_token(token)
reset_auth_token_state()
# Create server with auth enabled
server, port = await create_async_test_server(with_auth=True)
try:
# Client with auth interceptor via init_grpc_channel
channel = init_grpc_channel(
f"localhost:{port}",
options=None,
asynchronous=True,
)
stub = reporter_pb2_grpc.ReporterServiceStub(channel)
request = reporter_pb2.HealthCheckRequest()
response = await stub.HealthCheck(request, timeout=5)
assert response is not None
finally:
await server.stop(grace=1)
@pytest.mark.asyncio
async def test_async_server_and_client_with_invalid_token(create_async_test_server):
"""Test async server + client with mismatched token fails."""
server_token = generate_new_authentication_token()
wrong_token = generate_new_authentication_token()
with authentication_env_guard():
# Set up server with server_token
set_auth_mode("token")
set_env_auth_token(server_token)
reset_auth_token_state()
server, port = await create_async_test_server(with_auth=True)
try:
# Create client channel and manually add wrong token to metadata
channel = aiogrpc.insecure_channel(f"localhost:{port}")
stub = reporter_pb2_grpc.ReporterServiceStub(channel)
# Add invalid token to metadata (not using client interceptor)
metadata = (("authorization", f"Bearer {wrong_token}"),)
request = reporter_pb2.HealthCheckRequest()
# Should fail with UNAUTHENTICATED
with pytest.raises(grpc.RpcError) as exc_info:
await stub.HealthCheck(request, metadata=metadata, timeout=5)
assert exc_info.value.code() == grpc.StatusCode.UNAUTHENTICATED
finally:
await server.stop(grace=1)
@pytest.mark.asyncio
async def test_async_server_with_auth_client_without_token(create_async_test_server):
"""Test async server with auth, client without token fails."""
token = generate_new_authentication_token()
with authentication_env_guard():
# Set up server with auth enabled
set_auth_mode("token")
set_env_auth_token(token)
reset_auth_token_state()
server, port = await create_async_test_server(with_auth=True)
try:
# Create channel without auth metadata
channel = aiogrpc.insecure_channel(f"localhost:{port}")
stub = reporter_pb2_grpc.ReporterServiceStub(channel)
request = reporter_pb2.HealthCheckRequest()
# Should fail with UNAUTHENTICATED (no metadata provided)
with pytest.raises(grpc.RpcError) as exc_info:
await stub.HealthCheck(request, timeout=5)
assert exc_info.value.code() == grpc.StatusCode.UNAUTHENTICATED
finally:
await server.stop(grace=1)
@pytest.mark.asyncio
async def test_async_server_without_auth(create_async_test_server):
"""Test async server without auth allows unauthenticated requests."""
with authentication_env_guard():
# Disable auth mode
set_auth_mode("disabled")
reset_auth_token_state()
# Create server without auth
server, port = await create_async_test_server(with_auth=False)
try:
# Client without auth
channel = aiogrpc.insecure_channel(f"localhost:{port}")
stub = reporter_pb2_grpc.ReporterServiceStub(channel)
request = reporter_pb2.HealthCheckRequest()
# Should succeed without auth
response = await stub.HealthCheck(request, timeout=5)
assert response is not None
finally:
await server.stop(grace=1)
@pytest.mark.asyncio
async def test_async_server_with_auth_disabled_allows_all(create_async_test_server):
"""Test async server allows requests when auth mode is disabled."""
with authentication_env_guard():
# Disable auth mode globally
set_auth_mode("disabled")
reset_auth_token_state()
# Even though we call create_async_test_server with with_auth=True,
# the server won't enforce auth because auth mode is disabled
server, port = await create_async_test_server(with_auth=True)
try:
# Client without token
channel = aiogrpc.insecure_channel(f"localhost:{port}")
stub = reporter_pb2_grpc.ReporterServiceStub(channel)
request = reporter_pb2.HealthCheckRequest()
# Should succeed because auth is disabled
response = await stub.HealthCheck(request, timeout=5)
assert response is not None
finally:
await server.stop(grace=1)
@pytest.mark.asyncio
async def test_async_streaming_response_with_valid_token(create_async_test_server):
"""Test async server streaming response (unary_stream) works with valid token."""
token = generate_new_authentication_token()
with authentication_env_guard():
set_auth_mode("token")
set_env_auth_token(token)
reset_auth_token_state()
# Create server with auth enabled
server, port = await create_async_test_server(with_auth=True)
try:
# Client with auth interceptor via init_grpc_channel
channel = init_grpc_channel(
f"localhost:{port}",
options=None,
asynchronous=True,
)
stub = reporter_pb2_grpc.LogServiceStub(channel)
request = reporter_pb2.StreamLogRequest(log_file_name="test.log")
# Stream the response - this tests the unary_stream RPC path
chunks = []
async for response in stub.StreamLog(request, timeout=5):
chunks.append(response.data)
# Verify we got all 3 chunks from the test service
assert len(chunks) == 3
assert chunks == [b"chunk0", b"chunk1", b"chunk2"]
finally:
await server.stop(grace=1)
@pytest.mark.asyncio
async def test_async_streaming_response_without_token_fails(create_async_test_server):
"""Test async server streaming response fails without token."""
token = generate_new_authentication_token()
with authentication_env_guard():
set_auth_mode("token")
set_env_auth_token(token)
reset_auth_token_state()
server, port = await create_async_test_server(with_auth=True)
try:
# Client without auth token
channel = aiogrpc.insecure_channel(f"localhost:{port}")
stub = reporter_pb2_grpc.LogServiceStub(channel)
request = reporter_pb2.StreamLogRequest(log_file_name="test.log")
# Should fail with UNAUTHENTICATED when trying to iterate
with pytest.raises(grpc.RpcError) as exc_info:
async for _ in stub.StreamLog(request, timeout=5):
pass
assert exc_info.value.code() == grpc.StatusCode.UNAUTHENTICATED
finally:
await server.stop(grace=1)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-vv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/authentication/test_async_grpc_interceptors.py",
"license": "Apache License 2.0",
"lines": 173,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/authentication/test_sync_grpc_interceptors.py | import grpc
import pytest
from ray._private.authentication.authentication_token_generator import (
generate_new_authentication_token,
)
from ray._private.authentication_test_utils import (
authentication_env_guard,
reset_auth_token_state,
set_auth_mode,
set_env_auth_token,
)
from ray._private.grpc_utils import init_grpc_channel
from ray.core.generated import reporter_pb2, reporter_pb2_grpc
def test_sync_server_and_client_with_valid_token(create_sync_test_server):
"""Test sync server + client with matching token succeeds."""
token = generate_new_authentication_token()
with authentication_env_guard():
set_auth_mode("token")
set_env_auth_token(token)
reset_auth_token_state()
# Create server with auth enabled
server, port = create_sync_test_server(with_auth=True)
try:
# Client with auth interceptor via init_grpc_channel
channel = init_grpc_channel(
f"localhost:{port}",
options=None,
asynchronous=False,
)
stub = reporter_pb2_grpc.ReporterServiceStub(channel)
request = reporter_pb2.HealthCheckRequest()
response = stub.HealthCheck(request, timeout=5)
assert response is not None
finally:
server.stop(grace=1)
def test_sync_server_and_client_with_invalid_token(create_sync_test_server):
"""Test sync server + client with mismatched token fails."""
server_token = generate_new_authentication_token()
wrong_token = generate_new_authentication_token()
with authentication_env_guard():
# Set up server with server_token
set_auth_mode("token")
set_env_auth_token(server_token)
reset_auth_token_state()
server, port = create_sync_test_server(with_auth=True)
try:
# Create client channel and manually add wrong token to metadata
channel = grpc.insecure_channel(f"localhost:{port}")
stub = reporter_pb2_grpc.ReporterServiceStub(channel)
# Add invalid token to metadata (not using client interceptor)
metadata = (("authorization", f"Bearer {wrong_token}"),)
request = reporter_pb2.HealthCheckRequest()
# Should fail with UNAUTHENTICATED
with pytest.raises(grpc.RpcError) as exc_info:
stub.HealthCheck(request, metadata=metadata, timeout=5)
assert exc_info.value.code() == grpc.StatusCode.UNAUTHENTICATED
finally:
server.stop(grace=1)
def test_sync_server_with_auth_client_without_token(create_sync_test_server):
"""Test server with auth, client without token fails."""
token = generate_new_authentication_token()
with authentication_env_guard():
# Set up server with auth enabled
set_auth_mode("token")
set_env_auth_token(token)
reset_auth_token_state()
server, port = create_sync_test_server(with_auth=True)
try:
# Create channel without auth metadata
channel = grpc.insecure_channel(f"localhost:{port}")
stub = reporter_pb2_grpc.ReporterServiceStub(channel)
request = reporter_pb2.HealthCheckRequest()
# Should fail with UNAUTHENTICATED (no metadata provided)
with pytest.raises(grpc.RpcError) as exc_info:
stub.HealthCheck(request, timeout=5)
assert exc_info.value.code() == grpc.StatusCode.UNAUTHENTICATED
finally:
server.stop(grace=1)
def test_sync_server_without_auth(create_sync_test_server):
"""Test server without auth allows unauthenticated requests."""
with authentication_env_guard():
# Disable auth mode
set_auth_mode("disabled")
reset_auth_token_state()
# Create server without auth
server, port = create_sync_test_server(with_auth=False)
try:
# Client without auth
channel = grpc.insecure_channel(f"localhost:{port}")
stub = reporter_pb2_grpc.ReporterServiceStub(channel)
request = reporter_pb2.HealthCheckRequest()
# Should succeed without auth
response = stub.HealthCheck(request, timeout=5)
assert response is not None
finally:
server.stop(grace=1)
def test_sync_server_with_auth_disabled_allows_all(create_sync_test_server):
"""Test server allows requests when auth mode is disabled."""
with authentication_env_guard():
# Disable auth mode globally
set_auth_mode("disabled")
reset_auth_token_state()
# Even though we call create_sync_test_server with with_auth=True,
# the server won't enforce auth because auth mode is disabled
server, port = create_sync_test_server(with_auth=True)
try:
# Client without token
channel = grpc.insecure_channel(f"localhost:{port}")
stub = reporter_pb2_grpc.ReporterServiceStub(channel)
request = reporter_pb2.HealthCheckRequest()
# Should succeed because auth is disabled
response = stub.HealthCheck(request, timeout=5)
assert response is not None
finally:
server.stop(grace=1)
def test_sync_streaming_response_with_valid_token(create_sync_test_server):
"""Test sync server streaming response (unary_stream) works with valid token."""
token = generate_new_authentication_token()
with authentication_env_guard():
set_auth_mode("token")
set_env_auth_token(token)
reset_auth_token_state()
# Create server with auth enabled
server, port = create_sync_test_server(with_auth=True)
try:
# Client with auth interceptor via init_grpc_channel
channel = init_grpc_channel(
f"localhost:{port}",
options=None,
asynchronous=False,
)
stub = reporter_pb2_grpc.LogServiceStub(channel)
request = reporter_pb2.StreamLogRequest(log_file_name="test.log")
# Stream the response - this tests the unary_stream RPC path
chunks = []
for response in stub.StreamLog(request, timeout=5):
chunks.append(response.data)
# Verify we got all 3 chunks from the test service
assert len(chunks) == 3
assert chunks == [b"chunk0", b"chunk1", b"chunk2"]
finally:
server.stop(grace=1)
def test_sync_streaming_response_without_token_fails(create_sync_test_server):
"""Test sync server streaming response fails without token."""
token = generate_new_authentication_token()
with authentication_env_guard():
set_auth_mode("token")
set_env_auth_token(token)
reset_auth_token_state()
server, port = create_sync_test_server(with_auth=True)
try:
# Client without auth token
channel = grpc.insecure_channel(f"localhost:{port}")
stub = reporter_pb2_grpc.LogServiceStub(channel)
request = reporter_pb2.StreamLogRequest(log_file_name="test.log")
# Should fail with UNAUTHENTICATED when trying to iterate
with pytest.raises(grpc.RpcError) as exc_info:
for _ in stub.StreamLog(request, timeout=5):
pass
assert exc_info.value.code() == grpc.StatusCode.UNAUTHENTICATED
finally:
server.stop(grace=1)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-vv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/authentication/test_sync_grpc_interceptors.py",
"license": "Apache License 2.0",
"lines": 165,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/execution/ranker.py | """Ranker component for operator selection in streaming executor."""
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Generic, List, Protocol, Tuple, TypeVar
from ray.data._internal.execution.interfaces import PhysicalOperator
if TYPE_CHECKING:
from ray.data._internal.execution.resource_manager import ResourceManager
from ray.data._internal.execution.streaming_executor_state import Topology
# Protocol for comparable ranking values
class Comparable(Protocol):
"""Protocol for types that can be compared for ranking."""
def __lt__(self, other: "Comparable") -> bool:
...
def __le__(self, other: "Comparable") -> bool:
...
def __gt__(self, other: "Comparable") -> bool:
...
def __ge__(self, other: "Comparable") -> bool:
...
def __eq__(self, other: "Comparable") -> bool:
...
# Generic type for comparable ranking values
RankingValue = TypeVar("RankingValue", bound=Comparable)
class Ranker(ABC, Generic[RankingValue]):
"""Abstract base class for operator ranking strategies."""
@abstractmethod
def rank_operator(
self,
op: PhysicalOperator,
topology: "Topology",
resource_manager: "ResourceManager",
) -> RankingValue:
"""Rank operator for execution priority.
Operator to run next is selected as the one with the *smallest* value
of the lexicographically ordered ranks composed of (in order):
Args:
op: Operator to rank
topology: Current execution topology
resource_manager: Resource manager for usage information
Returns:
Rank (tuple) for operator
"""
pass
def rank_operators(
self,
ops: List[PhysicalOperator],
topology: "Topology",
resource_manager: "ResourceManager",
) -> List[RankingValue]:
assert len(ops) > 0
return [self.rank_operator(op, topology, resource_manager) for op in ops]
class DefaultRanker(Ranker[Tuple[int, int]]):
"""Ranker implementation."""
def rank_operator(
self,
op: PhysicalOperator,
topology: "Topology",
resource_manager: "ResourceManager",
) -> Tuple[int, int]:
"""Computes rank for op. *Lower means better rank*
1. Whether operator's could be throttled (int)
2. Operators' object store utilization
Args:
op: Operator to rank
topology: Current execution topology
resource_manager: Resource manager for usage information
Returns:
Rank (tuple) for operator
"""
throttling_disabled = 0 if op.throttling_disabled() else 1
return (
throttling_disabled,
resource_manager.get_op_usage(op).object_store_memory,
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/execution/ranker.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/tests/test_ranker.py | """Comprehensive tests for the generic ranker type system."""
from unittest.mock import MagicMock
import pytest
from ray.data._internal.execution.interfaces import PhysicalOperator
from ray.data._internal.execution.ranker import DefaultRanker, Ranker
from ray.data._internal.execution.resource_manager import ResourceManager
from ray.data._internal.execution.streaming_executor_state import Topology
def test_default_ranker():
"""Test that the ranker interface works correctly."""
ranker = DefaultRanker()
# Mock objects
op1 = MagicMock()
op1.throttling_disabled.return_value = False
op2 = MagicMock()
op2.throttling_disabled.return_value = True
topology = {}
resource_manager = MagicMock()
resource_manager.get_op_usage.return_value = MagicMock()
resource_manager.get_op_usage.return_value.object_store_memory = 1024
# Test rank_operator for first op
rank1 = ranker.rank_operator(op1, topology, resource_manager)
assert rank1 == (1, 1024) # throttling_disabled=False -> 1, memory=1024
# Test rank_operator for second op
rank2 = ranker.rank_operator(op2, topology, resource_manager)
assert rank2 == (0, 1024) # throttling_disabled=True -> 0, memory=1024
# Test rank_operators with both ops
ops = [op1, op2]
ranks = ranker.rank_operators(ops, topology, resource_manager)
assert ranks == [(1, 1024), (0, 1024)]
class IntRanker(Ranker[int]):
"""Ranker that returns integer rankings."""
def rank_operator(
self,
op: PhysicalOperator,
topology: "Topology",
resource_manager: ResourceManager,
) -> int:
"""Return integer ranking."""
return resource_manager.get_op_usage(op).object_store_memory
def test_generic_types():
"""Test that specific generic types work correctly."""
# Test integer ranker
int_ranker = IntRanker()
op1 = MagicMock()
op2 = MagicMock()
topology = {}
resource_manager = MagicMock()
resource_manager.get_op_usage.return_value = MagicMock()
resource_manager.get_op_usage.return_value.object_store_memory = 1024
# Test rank_operator for first op
rank1 = int_ranker.rank_operator(op1, topology, resource_manager)
assert rank1 == 1024
# Test rank_operator for second op
rank2 = int_ranker.rank_operator(op2, topology, resource_manager)
assert rank2 == 1024
# Test rank_operators with both ops
ops = [op1, op2]
ranks = int_ranker.rank_operators(ops, topology, resource_manager)
assert ranks == [1024, 1024]
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_ranker.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/serve/doc_code/cross_node_parallelism_example.py | # flake8: noqa
"""
Cross-node parallelism examples for Ray Serve LLM.
TP / PP / custom placement group strategies
for multi-node LLM deployments.
"""
# __cross_node_tp_example_start__
import vllm
from ray import serve
from ray.serve.llm import LLMConfig, build_openai_app
# Configure a model with tensor parallelism across 2 GPUs
# Tensor parallelism splits model weights across GPUs
llm_config = LLMConfig(
model_loading_config=dict(
model_id="llama-3.1-8b",
model_source="meta-llama/Llama-3.1-8B-Instruct",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=1,
max_replicas=2,
)
),
accelerator_type="L4",
engine_kwargs=dict(
tensor_parallel_size=2,
max_model_len=8192,
),
)
# Deploy the application
app = build_openai_app({"llm_configs": [llm_config]})
serve.run(app, blocking=True)
# __cross_node_tp_example_end__
# __cross_node_pp_example_start__
from ray import serve
from ray.serve.llm import LLMConfig, build_openai_app
# Configure a model with pipeline parallelism across 2 GPUs
# Pipeline parallelism splits model layers across GPUs
llm_config = LLMConfig(
model_loading_config=dict(
model_id="llama-3.1-8b",
model_source="meta-llama/Llama-3.1-8B-Instruct",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=1,
max_replicas=1,
)
),
accelerator_type="L4",
engine_kwargs=dict(
pipeline_parallel_size=2,
max_model_len=8192,
),
)
# Deploy the application
app = build_openai_app({"llm_configs": [llm_config]})
serve.run(app, blocking=True)
# __cross_node_pp_example_end__
# __cross_node_tp_pp_example_start__
from ray import serve
from ray.serve.llm import LLMConfig, build_openai_app
# Configure a model with both tensor and pipeline parallelism
# This example uses 4 GPUs total (2 TP * 2 PP)
llm_config = LLMConfig(
model_loading_config=dict(
model_id="llama-3.1-8b",
model_source="meta-llama/Llama-3.1-8B-Instruct",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=1,
max_replicas=1,
)
),
accelerator_type="L4",
engine_kwargs=dict(
tensor_parallel_size=2,
pipeline_parallel_size=2,
max_model_len=8192,
enable_chunked_prefill=True,
max_num_batched_tokens=4096,
),
)
# Deploy the application
app = build_openai_app({"llm_configs": [llm_config]})
serve.run(app, blocking=True)
# __cross_node_tp_pp_example_end__
# __custom_placement_group_pack_example_start__
from ray import serve
from ray.serve.llm import LLMConfig, build_openai_app
# Configure a model with custom placement group using PACK strategy
# PACK tries to place workers on as few nodes as possible for locality
llm_config = LLMConfig(
model_loading_config=dict(
model_id="llama-3.1-8b",
model_source="meta-llama/Llama-3.1-8B-Instruct",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=1,
max_replicas=1,
)
),
accelerator_type="L4",
engine_kwargs=dict(
tensor_parallel_size=2,
max_model_len=8192,
),
placement_group_config=dict(
bundles=[{"GPU": 1}] * 2,
strategy="PACK",
),
)
# Deploy the application
app = build_openai_app({"llm_configs": [llm_config]})
serve.run(app, blocking=True)
# __custom_placement_group_pack_example_end__
# __custom_placement_group_spread_example_start__
from ray import serve
from ray.serve.llm import LLMConfig, build_openai_app
# Configure a model with custom placement group using SPREAD strategy
# SPREAD distributes workers across nodes for fault tolerance
llm_config = LLMConfig(
model_loading_config=dict(
model_id="llama-3.1-8b",
model_source="meta-llama/Llama-3.1-8B-Instruct",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=1,
max_replicas=1,
)
),
accelerator_type="L4",
engine_kwargs=dict(
tensor_parallel_size=4,
max_model_len=8192,
),
placement_group_config=dict(
bundles=[{"GPU": 1}] * 4,
strategy="SPREAD",
),
)
# Deploy the application
app = build_openai_app({"llm_configs": [llm_config]})
serve.run(app, blocking=True)
# __custom_placement_group_spread_example_end__
# __custom_placement_group_strict_pack_example_start__
from ray import serve
from ray.serve.llm import LLMConfig, build_openai_app
# Configure a model with custom placement group using STRICT_PACK strategy
# STRICT_PACK ensures all workers are placed on the same node
llm_config = LLMConfig(
model_loading_config=dict(
model_id="llama-3.1-8b",
model_source="meta-llama/Llama-3.1-8B-Instruct",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=1,
max_replicas=2,
)
),
accelerator_type="A100",
engine_kwargs=dict(
tensor_parallel_size=2,
max_model_len=8192,
),
placement_group_config=dict(
bundles=[{"GPU": 1}] * 2,
strategy="STRICT_PACK",
),
)
# Deploy the application
app = build_openai_app({"llm_configs": [llm_config]})
serve.run(app, blocking=True)
# __custom_placement_group_strict_pack_example_end__
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/doc_code/cross_node_parallelism_example.py",
"license": "Apache License 2.0",
"lines": 178,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/_private/authentication/grpc_authentication_server_interceptor.py | """gRPC server interceptor for token-based authentication."""
import logging
from typing import Awaitable, Callable
import grpc
from grpc import aio as aiogrpc
from ray._private.authentication.authentication_constants import (
AUTHORIZATION_HEADER_NAME,
)
from ray._private.authentication.authentication_utils import (
is_token_auth_enabled,
validate_request_token,
)
logger = logging.getLogger(__name__)
def _authenticate_request(metadata: tuple) -> bool:
"""Authenticate incoming request. currently only supports token authentication.
Args:
metadata: gRPC metadata tuple of (key, value) pairs
Returns:
True if authentication succeeds or is not required, False otherwise
"""
if not is_token_auth_enabled():
return True
# Extract authorization header from metadata
auth_header = None
for key, value in metadata:
if key.lower() == AUTHORIZATION_HEADER_NAME:
auth_header = value
break
if not auth_header:
logger.warning("Authentication required but no authorization header provided")
return False
# Validate the token format and value
# validate_request_token returns bool (True if valid, False otherwise)
return validate_request_token(auth_header)
class AsyncAuthenticationServerInterceptor(aiogrpc.ServerInterceptor):
"""Async gRPC server interceptor that validates authentication tokens.
This interceptor checks the "authorization" metadata header for a valid
Bearer token when token authentication is enabled via RAY_AUTH_MODE=token.
If the token is missing or invalid, the request is rejected with UNAUTHENTICATED status.
"""
async def intercept_service(
self,
continuation: Callable[
[grpc.HandlerCallDetails], Awaitable[grpc.RpcMethodHandler]
],
handler_call_details: grpc.HandlerCallDetails,
) -> grpc.RpcMethodHandler:
"""Intercept service calls to validate authentication.
This method is called once per RPC to get the handler. We wrap the handler
to validate authentication before executing the actual RPC method.
"""
# Get the actual handler
handler = await continuation(handler_call_details)
if handler is None:
return None
async def _abort_if_unauthenticated(context):
"""Abort the RPC if authentication fails."""
if not _authenticate_request(context.invocation_metadata()):
await context.abort(
grpc.StatusCode.UNAUTHENTICATED,
"Invalid or missing authentication token",
)
# Wrap the RPC behavior with authentication check
def wrap_unary_response(behavior):
"""Wrap a unary response RPC method to validate authentication first."""
if behavior is None:
return None
async def wrapped(request_or_iterator, context):
await _abort_if_unauthenticated(context)
return await behavior(request_or_iterator, context)
return wrapped
def wrap_stream_response(behavior):
"""Wrap a streaming response RPC method to validate authentication first."""
if behavior is None:
return None
async def wrapped(request_or_iterator, context):
await _abort_if_unauthenticated(context)
async for response in behavior(request_or_iterator, context):
yield response
return wrapped
# Create a wrapper class that implements RpcMethodHandler interface
class AuthenticatedHandler:
"""Wrapper handler that validates authentication."""
def __init__(
self, original_handler, unary_wrapper_func, stream_wrapper_func
):
self._original = original_handler
self._wrap_unary = unary_wrapper_func
self._wrap_stream = stream_wrapper_func
@property
def request_streaming(self):
return self._original.request_streaming
@property
def response_streaming(self):
return self._original.response_streaming
@property
def request_deserializer(self):
return self._original.request_deserializer
@property
def response_serializer(self):
return self._original.response_serializer
@property
def unary_unary(self):
return self._wrap_unary(self._original.unary_unary)
@property
def unary_stream(self):
return self._wrap_stream(self._original.unary_stream)
@property
def stream_unary(self):
return self._wrap_unary(self._original.stream_unary)
@property
def stream_stream(self):
return self._wrap_stream(self._original.stream_stream)
return AuthenticatedHandler(handler, wrap_unary_response, wrap_stream_response)
class SyncAuthenticationServerInterceptor(grpc.ServerInterceptor):
"""Synchronous gRPC server interceptor that validates authentication tokens.
This interceptor checks the "authorization" metadata header for a valid
Bearer token when token authentication is enabled via RAY_AUTH_MODE=token.
If the token is missing or invalid, the request is rejected with UNAUTHENTICATED status.
"""
def intercept_service(
self,
continuation: Callable[[grpc.HandlerCallDetails], grpc.RpcMethodHandler],
handler_call_details: grpc.HandlerCallDetails,
) -> grpc.RpcMethodHandler:
"""Intercept service calls to validate authentication.
This method is called once per RPC to get the handler. We wrap the handler
to validate authentication before executing the actual RPC method.
"""
# Get the actual handler
handler = continuation(handler_call_details)
if handler is None:
return None
# Wrap the RPC behavior with authentication check
def wrap_rpc_behavior(behavior):
"""Wrap an RPC method to validate authentication first."""
if behavior is None:
return None
def wrapped(request_or_iterator, context):
if not _authenticate_request(context.invocation_metadata()):
context.abort(
grpc.StatusCode.UNAUTHENTICATED,
"Invalid or missing authentication token",
)
return behavior(request_or_iterator, context)
return wrapped
# Create a wrapper class that implements RpcMethodHandler interface
class AuthenticatedHandler:
"""Wrapper handler that validates authentication."""
def __init__(self, original_handler, wrapper_func):
self._original = original_handler
self._wrap = wrapper_func
@property
def request_streaming(self):
return self._original.request_streaming
@property
def response_streaming(self):
return self._original.response_streaming
@property
def request_deserializer(self):
return self._original.request_deserializer
@property
def response_serializer(self):
return self._original.response_serializer
@property
def unary_unary(self):
return self._wrap(self._original.unary_unary)
@property
def unary_stream(self):
return self._wrap(self._original.unary_stream)
@property
def stream_unary(self):
return self._wrap(self._original.stream_unary)
@property
def stream_stream(self):
return self._wrap(self._original.stream_stream)
return AuthenticatedHandler(handler, wrap_rpc_behavior)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_private/authentication/grpc_authentication_server_interceptor.py",
"license": "Apache License 2.0",
"lines": 177,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/_private/authentication/grpc_authentication_client_interceptor.py | """gRPC client interceptor for token-based authentication."""
import logging
from collections import namedtuple
from typing import Tuple
import grpc
from grpc import aio as aiogrpc
from ray._raylet import AuthenticationTokenLoader
logger = logging.getLogger(__name__)
# Named tuple to hold client call details
_ClientCallDetails = namedtuple(
"_ClientCallDetails",
("method", "timeout", "metadata", "credentials", "wait_for_ready", "compression"),
)
def _get_authentication_metadata_tuple() -> Tuple[Tuple[str, str], ...]:
"""Get gRPC metadata tuple for authentication. Currently only supported for token authentication.
Returns:
tuple: Empty tuple or ((AUTHORIZATION_HEADER_NAME, "Bearer <token>"),)
"""
token_loader = AuthenticationTokenLoader.instance()
if not token_loader.has_token():
return ()
headers = token_loader.get_token_for_http_header()
# Convert HTTP header dict to gRPC metadata tuple
# gRPC expects: (("key", "value"), ...)
return tuple((k, v) for k, v in headers.items())
class SyncAuthenticationMetadataClientInterceptor(
grpc.UnaryUnaryClientInterceptor,
grpc.UnaryStreamClientInterceptor,
grpc.StreamUnaryClientInterceptor,
grpc.StreamStreamClientInterceptor,
):
"""Synchronous gRPC client interceptor that adds authentication metadata."""
def _intercept_call_details(self, client_call_details):
"""Helper method to add authentication metadata to client call details."""
metadata = list(client_call_details.metadata or [])
metadata.extend(_get_authentication_metadata_tuple())
return _ClientCallDetails(
method=client_call_details.method,
timeout=client_call_details.timeout,
metadata=metadata,
credentials=client_call_details.credentials,
wait_for_ready=getattr(client_call_details, "wait_for_ready", None),
compression=getattr(client_call_details, "compression", None),
)
def intercept_unary_unary(self, continuation, client_call_details, request):
new_details = self._intercept_call_details(client_call_details)
return continuation(new_details, request)
def intercept_unary_stream(self, continuation, client_call_details, request):
new_details = self._intercept_call_details(client_call_details)
return continuation(new_details, request)
def intercept_stream_unary(
self, continuation, client_call_details, request_iterator
):
new_details = self._intercept_call_details(client_call_details)
return continuation(new_details, request_iterator)
def intercept_stream_stream(
self, continuation, client_call_details, request_iterator
):
new_details = self._intercept_call_details(client_call_details)
return continuation(new_details, request_iterator)
def _intercept_call_details_async(client_call_details):
"""Helper to add authentication metadata to client call details (async version)."""
metadata = list(client_call_details.metadata or [])
metadata.extend(_get_authentication_metadata_tuple())
return _ClientCallDetails(
method=client_call_details.method,
timeout=client_call_details.timeout,
metadata=metadata,
credentials=client_call_details.credentials,
wait_for_ready=getattr(client_call_details, "wait_for_ready", None),
compression=getattr(client_call_details, "compression", None),
)
# NOTE: gRPC aio's Channel.__init__ uses if-elif chains to categorize interceptors,
# so a single class inheriting from multiple interceptor types will only be registered
# for the first matching type. We must use separate classes for each RPC type.
# See: https://github.com/grpc/grpc/blob/master/src/python/grpcio/grpc/aio/_channel.py
class _AsyncUnaryUnaryAuthInterceptor(aiogrpc.UnaryUnaryClientInterceptor):
"""Async unary-unary interceptor that adds authentication metadata."""
async def intercept_unary_unary(self, continuation, client_call_details, request):
new_details = _intercept_call_details_async(client_call_details)
return await continuation(new_details, request)
class _AsyncUnaryStreamAuthInterceptor(aiogrpc.UnaryStreamClientInterceptor):
"""Async unary-stream interceptor that adds authentication metadata."""
async def intercept_unary_stream(self, continuation, client_call_details, request):
new_details = _intercept_call_details_async(client_call_details)
return await continuation(new_details, request)
class _AsyncStreamUnaryAuthInterceptor(aiogrpc.StreamUnaryClientInterceptor):
"""Async stream-unary interceptor that adds authentication metadata."""
async def intercept_stream_unary(
self, continuation, client_call_details, request_iterator
):
new_details = _intercept_call_details_async(client_call_details)
return await continuation(new_details, request_iterator)
class _AsyncStreamStreamAuthInterceptor(aiogrpc.StreamStreamClientInterceptor):
"""Async stream-stream interceptor that adds authentication metadata."""
async def intercept_stream_stream(
self, continuation, client_call_details, request_iterator
):
new_details = _intercept_call_details_async(client_call_details)
return await continuation(new_details, request_iterator)
def get_async_auth_interceptors():
"""Get a list of async authentication interceptors for all RPC types.
Returns a list of separate interceptor instances, one for each RPC type,
because gRPC aio channels only register multi-inheritance interceptors
for the first matching type.
Returns:
List of interceptor instances for unary-unary, unary-stream,
stream-unary, and stream-stream RPCs.
"""
return [
_AsyncUnaryUnaryAuthInterceptor(),
_AsyncUnaryStreamAuthInterceptor(),
_AsyncStreamUnaryAuthInterceptor(),
_AsyncStreamStreamAuthInterceptor(),
]
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_private/authentication/grpc_authentication_client_interceptor.py",
"license": "Apache License 2.0",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/llm/doc_code/serve/multi_gpu/dp_basic_example.py | """
This file serves as a documentation example and CI test for basic data parallel attention deployment.
Structure:
1. Monkeypatch setup: Ensures serve.run is non-blocking and removes accelerator requirements for CI testing.
2. Docs example (between __dp_basic_example_start/end__): Embedded in Sphinx docs via literalinclude.
3. Test validation (deployment status polling + cleanup)
"""
import time
from ray import serve
from ray.serve.schema import ApplicationStatus
from ray.serve._private.constants import SERVE_DEFAULT_APP_NAME
from ray.serve import llm
_original_serve_run = serve.run
_original_build_dp_openai_app = llm.build_dp_openai_app
def _non_blocking_serve_run(app, **kwargs):
"""Forces blocking=False for testing"""
kwargs["blocking"] = False
return _original_serve_run(app, **kwargs)
def _testing_build_dp_openai_app(builder_config, **kwargs):
"""Removes accelerator requirements for testing"""
if "llm_config" in builder_config:
config = builder_config["llm_config"]
if hasattr(config, "accelerator_type") and config.accelerator_type is not None:
config.accelerator_type = None
return _original_build_dp_openai_app(builder_config, **kwargs)
serve.run = _non_blocking_serve_run
llm.build_dp_openai_app = _testing_build_dp_openai_app
# __dp_basic_example_start__
from ray import serve
from ray.serve.llm import LLMConfig, build_dp_openai_app
# Configure the model with data parallel settings
config = LLMConfig(
model_loading_config={
"model_id": "microsoft/Phi-tiny-MoE-instruct"
},
engine_kwargs={
"data_parallel_size": 2, # Number of DP replicas
"tensor_parallel_size": 1, # TP size per replica
# Reduced for CI compatibility
"max_model_len": 1024,
"max_num_seqs": 32,
},
experimental_configs={
# This is a temporary required config. We will remove this in future versions.
"dp_size_per_node": 2, # DP replicas per node
},
)
app = build_dp_openai_app({
"llm_config": config
})
serve.run(app, blocking=True)
# __dp_basic_example_end__
status = ApplicationStatus.NOT_STARTED
timeout_seconds = 300
start_time = time.time()
while (
status != ApplicationStatus.RUNNING and time.time() - start_time < timeout_seconds
):
status = serve.status().applications[SERVE_DEFAULT_APP_NAME].status
if status in [ApplicationStatus.DEPLOY_FAILED, ApplicationStatus.UNHEALTHY]:
raise AssertionError(f"Deployment failed with status: {status}")
time.sleep(1)
if status != ApplicationStatus.RUNNING:
raise AssertionError(
f"Deployment failed to reach RUNNING status within {timeout_seconds}s. Current status: {status}"
)
serve.shutdown()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/llm/doc_code/serve/multi_gpu/dp_basic_example.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/llm/doc_code/serve/multi_gpu/dp_pd_example.py | """
This file serves as a documentation example and CI test for data parallel + prefill-decode disaggregation.
Structure:
1. Monkeypatch setup: Ensures serve.run is non-blocking and removes accelerator requirements for CI testing.
2. Docs example (between __dp_pd_example_start/end__): Embedded in Sphinx docs via literalinclude.
3. Test validation (deployment status polling + cleanup)
"""
import time
from ray import serve
from ray.serve.schema import ApplicationStatus
from ray.serve._private.constants import SERVE_DEFAULT_APP_NAME
from ray.serve import llm
from ray.serve.llm.deployment import PDProxyServer
from ray.serve.llm.ingress import OpenAiIngress, make_fastapi_ingress
# Check if NIXL is available (required for NixlConnector)
try:
import nixl # noqa: F401
NIXL_AVAILABLE = True
except ImportError:
NIXL_AVAILABLE = False
if not NIXL_AVAILABLE:
raise ImportError(
"NIXL is required for this example but is not installed. "
"Install it with: pip install nixl or uv pip install nixl"
)
_original_serve_run = serve.run
_original_build_dp_deployment = llm.build_dp_deployment
def _non_blocking_serve_run(app, **kwargs):
"""Forces blocking=False for testing"""
kwargs["blocking"] = False
return _original_serve_run(app, **kwargs)
def _testing_build_dp_deployment(llm_config, **kwargs):
"""Removes accelerator requirements for testing"""
if llm_config.accelerator_type is not None:
llm_config.accelerator_type = None
return _original_build_dp_deployment(llm_config, **kwargs)
serve.run = _non_blocking_serve_run
llm.build_dp_deployment = _testing_build_dp_deployment
# __dp_pd_example_start__
from ray import serve
from ray.serve.llm import LLMConfig, build_dp_deployment
from ray.serve.llm.deployment import PDProxyServer
from ray.serve.llm.ingress import OpenAiIngress, make_fastapi_ingress
# Configure prefill with data parallel attention
prefill_config = LLMConfig(
model_loading_config={
"model_id": "microsoft/Phi-tiny-MoE-instruct"
},
engine_kwargs={
"data_parallel_size": 2, # 2 DP replicas for prefill
"tensor_parallel_size": 1,
"kv_transfer_config": {
"kv_connector": "NixlConnector",
"kv_role": "kv_both",
},
# Reduced for CI compatibility
"max_model_len": 1024,
"max_num_seqs": 32,
},
experimental_configs={
"dp_size_per_node": 2,
},
)
# Configure decode with data parallel attention
decode_config = LLMConfig(
model_loading_config={
"model_id": "microsoft/Phi-tiny-MoE-instruct"
},
engine_kwargs={
"data_parallel_size": 2, # 2 DP replicas for decode (adjusted for 4 GPU limit)
"tensor_parallel_size": 1,
"kv_transfer_config": {
"kv_connector": "NixlConnector",
"kv_role": "kv_both",
},
# Reduced for CI compatibility
"max_model_len": 1024,
"max_num_seqs": 32,
},
experimental_configs={
"dp_size_per_node": 2,
},
)
# Build prefill and decode deployments with DP
prefill_deployment = build_dp_deployment(prefill_config, name_prefix="Prefill:")
decode_deployment = build_dp_deployment(decode_config, name_prefix="Decode:")
# Create PDProxyServer to coordinate between prefill and decode
proxy_options = PDProxyServer.get_deployment_options(prefill_config, decode_config)
proxy_deployment = serve.deployment(PDProxyServer).options(**proxy_options).bind(
prefill_server=prefill_deployment,
decode_server=decode_deployment,
)
# Create OpenAI-compatible ingress
ingress_options = OpenAiIngress.get_deployment_options([prefill_config, decode_config])
ingress_cls = make_fastapi_ingress(OpenAiIngress)
ingress_deployment = serve.deployment(ingress_cls).options(**ingress_options).bind(
llm_deployments=[proxy_deployment]
)
# Deploy the application
serve.run(ingress_deployment, blocking=True)
# __dp_pd_example_end__
status = ApplicationStatus.NOT_STARTED
timeout_seconds = 300 # Longer timeout for DP+PD setup
start_time = time.time()
while (
status != ApplicationStatus.RUNNING and time.time() - start_time < timeout_seconds
):
status = serve.status().applications[SERVE_DEFAULT_APP_NAME].status
if status in [ApplicationStatus.DEPLOY_FAILED, ApplicationStatus.UNHEALTHY]:
raise AssertionError(f"Deployment failed with status: {status}")
time.sleep(1)
if status != ApplicationStatus.RUNNING:
raise AssertionError(
f"Deployment failed to reach RUNNING status within {timeout_seconds}s. Current status: {status}"
)
serve.shutdown()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/llm/doc_code/serve/multi_gpu/dp_pd_example.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/llm/_internal/serve/serving_patterns/data_parallel/builder.py | import pprint
from typing import Any, Optional, Union
from pydantic import Field, field_validator
from ray import serve
from ray.llm._internal.common.base_pydantic import BaseModelExtended
from ray.llm._internal.common.dict_utils import deep_merge_dicts
from ray.llm._internal.serve.core.configs.llm_config import LLMConfig
from ray.llm._internal.serve.core.ingress.builder import IngressClsConfig
from ray.llm._internal.serve.core.ingress.ingress import (
make_fastapi_ingress,
)
from ray.llm._internal.serve.core.server.builder import build_llm_deployment
from ray.llm._internal.serve.observability.logging import get_logger
from ray.llm._internal.serve.serving_patterns.data_parallel.dp_rank_assigner import (
_DPRankAssigner,
)
from ray.llm._internal.serve.serving_patterns.data_parallel.dp_server import (
DPServer,
)
from ray.serve.deployment import Application
logger = get_logger(__name__)
def build_dp_deployment(
llm_config: LLMConfig,
*,
name_prefix: Optional[str] = None,
override_serve_options: Optional[dict] = None,
) -> Application:
"""Build a data parallel attention LLM deployment.
Args:
llm_config: The LLM configuration.
name_prefix: The prefix to add to the deployment name.
override_serve_options: The optional serve options to override the
default options.
Returns:
The Ray Serve Application for the data parallel attention LLM deployment.
"""
dp_size = llm_config.engine_kwargs.get("data_parallel_size", 1)
# TODO(rui): figure out a better way to pass in dp_size_per_node.
# NOTE: we cannot use engine_kwargs.data_parallel_size_local to specify
# the number of ranks per node because that has special semantics in vLLM.
# When we make serve's rank asignment node affinity aware, then we won't
# need this hack to make the ranks orginally distributed across nodes.
dp_size_per_node = llm_config.experimental_configs.get("dp_size_per_node")
if dp_size_per_node is None:
raise ValueError(
"dp_size_per_node must be set in experimental_configs for DP deployment."
)
dp_rank_assigner = _DPRankAssigner.bind(
dp_size=dp_size, dp_size_per_node=dp_size_per_node
)
return build_llm_deployment(
llm_config,
name_prefix=name_prefix,
bind_kwargs={"dp_rank_assigner": dp_rank_assigner},
override_serve_options=override_serve_options,
deployment_cls=DPServer,
)
class DPOpenAiServingArgs(BaseModelExtended):
"""Schema for DP OpenAI serving args."""
llm_config: Union[str, dict, LLMConfig] = Field(
description="The LLM configuration",
)
ingress_cls_config: Union[dict, IngressClsConfig] = Field(
default_factory=IngressClsConfig,
description="The configuration for the ingress class.",
)
ingress_deployment_config: Optional[dict] = Field(
default_factory=dict,
description="The Ray @server.deployment options for the ingress server.",
)
@field_validator("llm_config")
@classmethod
def _validate_llm_config(cls, value: Any) -> LLMConfig:
if isinstance(value, str):
return LLMConfig.from_file(value)
elif isinstance(value, dict):
return LLMConfig.model_validate(value)
elif isinstance(value, LLMConfig):
return value
else:
raise TypeError(f"Invalid LLMConfig type: {type(value)}")
@field_validator("ingress_cls_config")
@classmethod
def _validate_ingress_cls_config(cls, value: Any) -> IngressClsConfig:
if isinstance(value, dict):
return IngressClsConfig.model_validate(value)
return value
def build_dp_openai_app(builder_config: dict) -> Application:
"""Build an OpenAI compatible app with the DP attention deployment
setup from the given builder configuration.
Args:
builder_config: The configuration for the builder. It has to conform
to the DPOpenAiServingArgs pydantic model.
Returns:
The configured Ray Serve Application.
"""
builder_config = DPOpenAiServingArgs.model_validate(builder_config)
llm_config = builder_config.llm_config
dp_deployment = build_dp_deployment(llm_config)
ingress_cls_config = builder_config.ingress_cls_config
ingress_options = ingress_cls_config.ingress_cls.get_deployment_options(
[llm_config]
)
if builder_config.ingress_deployment_config:
ingress_options = deep_merge_dicts(
ingress_options, builder_config.ingress_deployment_config
)
ingress_cls = make_fastapi_ingress(ingress_cls_config.ingress_cls)
logger.info("============== Ingress Options ==============")
logger.info(pprint.pformat(ingress_options))
return serve.deployment(ingress_cls, **ingress_options).bind(
llm_deployments=[dp_deployment],
**ingress_cls_config.ingress_extra_kwargs,
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/serve/serving_patterns/data_parallel/builder.py",
"license": "Apache License 2.0",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/data/doc_code/working-with-llms/minimal_quickstart.py | """
Quickstart: vLLM + Ray Data batch inference.
1. Installation
2. Dataset creation
3. Processor configuration
4. Running inference
5. Getting results
"""
# __minimal_vllm_quickstart_start__
import ray
from ray.data.llm import vLLMEngineProcessorConfig, build_processor
# Initialize Ray
ray.init()
# simple dataset
ds = ray.data.from_items([
{"prompt": "What is machine learning?"},
{"prompt": "Explain neural networks in one sentence."},
])
# Minimal vLLM configuration
config = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.1-8B-Instruct",
concurrency=1, # 1 vLLM engine replica
batch_size=32, # 32 samples per batch
engine_kwargs={
"max_model_len": 4096, # Fit into test GPU memory
}
)
# Build processor
# preprocess: converts input row to format expected by vLLM (OpenAI chat format)
# postprocess: extracts generated text from vLLM output
processor = build_processor(
config,
preprocess=lambda row: {
"messages": [{"role": "user", "content": row["prompt"]}],
"sampling_params": {"temperature": 0.7, "max_tokens": 100},
},
postprocess=lambda row: {
"prompt": row["prompt"],
"response": row["generated_text"],
},
)
# inference
ds = processor(ds)
# iterate through the results
for result in ds.iter_rows():
print(f"Q: {result['prompt']}")
print(f"A: {result['response']}\n")
# Alternative ways to get results:
# results = ds.take(10) # Get first 10 results
# ds.show(limit=5) # Print first 5 results
# ds.write_parquet("output.parquet") # Save to file
# __minimal_vllm_quickstart_end__
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/data/doc_code/working-with-llms/minimal_quickstart.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/serve/tests/test_list_outbound_deployments.py | import sys
from typing import List
import pytest
import ray
from ray import serve
from ray.serve._private.common import DeploymentID
from ray.serve._private.constants import SERVE_NAMESPACE
from ray.serve.handle import DeploymentHandle
@serve.deployment
class DownstreamA:
def __call__(self, x: int) -> int:
return x * 2
@serve.deployment
class DownstreamB:
def process(self, x: int) -> int:
return x + 10
@serve.deployment
class UpstreamWithStoredHandles:
def __init__(self, handle_a: DeploymentHandle, handle_b: DeploymentHandle):
self.handle_a = handle_a
self.handle_b = handle_b
async def __call__(self, x: int) -> int:
result_a = await self.handle_a.remote(x)
result_b = await self.handle_b.process.remote(x)
return result_a + result_b
@serve.deployment
class UpstreamWithNestedHandles:
def __init__(self, handles_dict: dict, handles_list: list):
self.handles = handles_dict # {"a": handle_a, "b": handle_b}
self.handle_list = handles_list # [handle_a, handle_b]
async def __call__(self, x: int) -> int:
result_a = await self.handles["a"].remote(x)
result_b = await self.handles["b"].process.remote(x)
return result_a + result_b
@serve.deployment
class DynamicDeployment:
async def __call__(self, x: int, app_name1: str, app_name2: str) -> int:
handle_a = serve.get_deployment_handle("DownstreamA", app_name=app_name1)
handle_b = serve.get_deployment_handle("DownstreamB", app_name=app_name2)
result_a = await handle_a.remote(x)
result_b = await handle_b.process.remote(x)
return result_a + result_b
def get_replica_actor_handle(deployment_name: str, app_name: str):
actors = ray.util.list_named_actors(all_namespaces=True)
replica_actor_name = None
for actor in actors:
# Match pattern: SERVE_REPLICA::{app_name}#{deployment_name}#
if actor["name"].startswith(f"SERVE_REPLICA::{app_name}#{deployment_name}#"):
replica_actor_name = actor["name"]
break
if replica_actor_name is None:
# Debug: print all actor names to help diagnose
all_actors = [a["name"] for a in actors if "SERVE" in a["name"]]
raise RuntimeError(
f"Could not find replica actor for {deployment_name} in app {app_name}. "
f"Available serve actors: {all_actors}"
)
return ray.get_actor(replica_actor_name, namespace=SERVE_NAMESPACE)
@pytest.mark.asyncio
class TestListOutboundDeployments:
"""Test suite for list_outbound_deployments() method."""
async def test_stored_handles_in_init(self, serve_instance):
"""Test listing handles that are passed to __init__ and stored as attributes."""
app_name = "test_stored_handles"
# Build and deploy the app
handle_a = DownstreamA.bind()
handle_b = DownstreamB.bind()
app = UpstreamWithStoredHandles.bind(handle_a, handle_b)
serve.run(app, name=app_name)
# Get the replica actor for the upstream deployment
replica_actor = get_replica_actor_handle("UpstreamWithStoredHandles", app_name)
# Call list_outbound_deployments
outbound_deployments: List[DeploymentID] = ray.get(
replica_actor.list_outbound_deployments.remote()
)
# Verify results
deployment_names = {dep_id.name for dep_id in outbound_deployments}
assert "DownstreamA" in deployment_names
assert "DownstreamB" in deployment_names
assert len(outbound_deployments) == 2
# Verify app names match
for dep_id in outbound_deployments:
assert dep_id.app_name == app_name
async def test_nested_handles_in_dict_and_list(self, serve_instance):
"""Test listing handles stored in nested data structures (dict, list)."""
app_name = "test_nested_handles"
# Build and deploy the app
handle_a = DownstreamA.bind()
handle_b = DownstreamB.bind()
handles_dict = {"a": handle_a, "b": handle_b}
handles_list = [handle_a, handle_b]
app = UpstreamWithNestedHandles.bind(handles_dict, handles_list)
serve.run(app, name=app_name)
# Get the replica actor
replica_actor = get_replica_actor_handle("UpstreamWithNestedHandles", app_name)
# Call list_outbound_deployments
outbound_deployments: List[DeploymentID] = ray.get(
replica_actor.list_outbound_deployments.remote()
)
# Verify results (should find handles despite being in nested structures)
deployment_names = {dep_id.name for dep_id in outbound_deployments}
assert "DownstreamA" in deployment_names
assert "DownstreamB" in deployment_names
# Verify no duplicates (handle_a and handle_b appear in both dict and list)
assert len(outbound_deployments) == 2
async def test_no_handles(self, serve_instance):
"""Test deployment with no outbound handles."""
app_name = "test_no_handles"
# Deploy a simple deployment with no handles
app = DownstreamA.bind()
serve.run(app, name=app_name)
# Get the replica actor
replica_actor = get_replica_actor_handle("DownstreamA", app_name)
# Call list_outbound_deployments
outbound_deployments: List[DeploymentID] = ray.get(
replica_actor.list_outbound_deployments.remote()
)
# Should be empty
assert len(outbound_deployments) == 0
async def test_dynamic_handles(self, serve_instance):
app1 = DownstreamA.bind()
app2 = DownstreamB.bind()
app3 = DynamicDeployment.bind()
serve.run(app1, name="app1", route_prefix="/app1")
serve.run(app2, name="app2", route_prefix="/app2")
handle = serve.run(app3, name="app3", route_prefix="/app3")
# Make requests to trigger dynamic handle creation
# x=1: DownstreamA returns 1*2=2, DownstreamB returns 1+10=11, total=2+11=13
results = [await handle.remote(1, "app1", "app2") for _ in range(10)]
for result in results:
assert result == 13
# Get the replica actor
replica_actor = get_replica_actor_handle("DynamicDeployment", "app3")
# Call list_outbound_deployments
outbound_deployments: List[DeploymentID] = ray.get(
replica_actor.list_outbound_deployments.remote()
)
# Verify results - should include dynamically created handles
deployment_names = {dep_id.name for dep_id in outbound_deployments}
assert "DownstreamA" in deployment_names
assert "DownstreamB" in deployment_names
assert len(outbound_deployments) == 2
# Verify the app names are correct
app_names = {dep_id.app_name for dep_id in outbound_deployments}
assert "app1" in app_names
assert "app2" in app_names
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_list_outbound_deployments.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_issue_detection.py | import io
import logging
import re
import time
from unittest.mock import MagicMock, patch
import pytest
import ray
from ray.data._internal.execution.interfaces.physical_operator import (
OpTask,
PhysicalOperator,
RefBundle,
TaskExecDriverStats,
)
from ray.data._internal.execution.operators.input_data_buffer import (
InputDataBuffer,
)
from ray.data._internal.execution.operators.task_pool_map_operator import (
MapOperator,
)
from ray.data._internal.issue_detection.detectors.hanging_detector import (
DEFAULT_OP_TASK_STATS_MIN_COUNT,
DEFAULT_OP_TASK_STATS_STD_FACTOR,
HangingExecutionIssueDetector,
HangingExecutionIssueDetectorConfig,
)
from ray.data._internal.issue_detection.detectors.high_memory_detector import (
HighMemoryIssueDetector,
)
from ray.data.block import BlockMetadata, TaskExecWorkerStats
from ray.data.context import DataContext
from ray.tests.conftest import * # noqa
class FakeOpTask(OpTask):
"""A fake OpTask for testing purposes."""
def __init__(self, task_index: int):
super().__init__(task_index)
def get_waitable(self):
"""Return a dummy waitable."""
return ray.put(None)
class FakeOperator(PhysicalOperator):
def __init__(self, name: str, data_context: DataContext):
super().__init__(name=name, input_dependencies=[], data_context=data_context)
def _add_input_inner(self, refs: RefBundle, input_index: int) -> None:
pass
def has_next(self) -> bool:
return False
def _get_next_inner(self) -> RefBundle:
assert False
def get_stats(self):
return {}
def get_active_tasks(self):
# Return active tasks based on what's in _running_tasks
# This ensures has_execution_finished() works correctly
return [FakeOpTask(task_idx) for task_idx in self.metrics._running_tasks]
class TestHangingExecutionIssueDetector:
def test_hanging_detector_configuration(self, restore_data_context):
"""Test hanging detector configuration and initialization."""
# Test default configuration from DataContext
ctx = DataContext.get_current()
default_config = ctx.issue_detectors_config.hanging_detector_config
assert default_config.op_task_stats_min_count == DEFAULT_OP_TASK_STATS_MIN_COUNT
assert (
default_config.op_task_stats_std_factor == DEFAULT_OP_TASK_STATS_STD_FACTOR
)
# Test custom configuration
min_count = 5
std_factor = 3.0
custom_config = HangingExecutionIssueDetectorConfig(
op_task_stats_min_count=min_count,
op_task_stats_std_factor=std_factor,
)
ctx.issue_detectors_config.hanging_detector_config = custom_config
detector = HangingExecutionIssueDetector(
dataset_id="id", operators=[], config=custom_config
)
assert detector._op_task_stats_min_count == min_count
assert detector._op_task_stats_std_factor_threshold == std_factor
@patch(
"ray.data._internal.execution.interfaces.op_runtime_metrics.TaskDurationStats"
)
def test_basic_hanging_detection(
self, mock_stats_cls, ray_start_regular_shared, restore_data_context
):
# Set up logging capture
log_capture = io.StringIO()
handler = logging.StreamHandler(log_capture)
logger = logging.getLogger("ray.data._internal.issue_detection")
logger.addHandler(handler)
# Set up mock stats to return values that will trigger adaptive threshold
mocked_mean = 2.0 # Increase from 0.5 to 2.0 seconds
mocked_stddev = 0.2 # Increase from 0.05 to 0.2 seconds
mock_stats = mock_stats_cls.return_value
mock_stats.count.return_value = 20 # Enough samples
mock_stats.mean.return_value = mocked_mean
mock_stats.stddev.return_value = mocked_stddev
# Set a short issue detection interval for testing
ctx = DataContext.get_current()
detector_cfg = ctx.issue_detectors_config.hanging_detector_config
detector_cfg.detection_time_interval_s = 0.00
# test no hanging doesn't log hanging warning
def f1(x):
return x
_ = ray.data.range(1).map(f1).materialize()
log_output = log_capture.getvalue()
warn_msg = r"A task of operator .+ \(pid=.+, node_id=.+, attempt=.+\) has been running for [\d\.]+s"
assert re.search(warn_msg, log_output) is None, log_output
# # test hanging does log hanging warning
def f2(x):
time.sleep(5.0) # Increase from 1.1 to 5.0 seconds to exceed new threshold
return x
_ = ray.data.range(1).map(f2).materialize()
log_output = log_capture.getvalue()
assert re.search(warn_msg, log_output) is not None, log_output
@patch("time.perf_counter")
def test_hanging_deitector_detects_issues(
self, mock_perf_counter, ray_start_regular_shared
):
"""Test that the hanging detector correctly identifies tasks that exceed the adaptive threshold."""
# Configure hanging detector with extreme std_factor values
config = HangingExecutionIssueDetectorConfig(
op_task_stats_min_count=1,
op_task_stats_std_factor=1,
detection_time_interval_s=0,
)
op = FakeOperator("TestOperator", DataContext.get_current())
detector = HangingExecutionIssueDetector(
dataset_id="test_dataset", operators=[op], config=config
)
# Create a simple RefBundle for testing
block_ref = ray.put([{"id": 0}])
metadata = BlockMetadata(
num_rows=1, size_bytes=1, exec_stats=None, input_files=None
)
input_bundle = RefBundle(
blocks=((block_ref, metadata),), owns_blocks=True, schema=None
)
mock_perf_counter.return_value = 0.0
# Submit three tasks. Two of them finish immediately, while the third one hangs.
op.metrics.on_task_submitted(0, input_bundle)
op.metrics.on_task_submitted(1, input_bundle)
op.metrics.on_task_submitted(2, input_bundle)
op.metrics.on_task_finished(
0,
exception=None,
task_exec_stats=TaskExecWorkerStats(task_wall_time_s=0.0),
task_exec_driver_stats=TaskExecDriverStats(task_output_backpressure_s=0),
)
op.metrics.on_task_finished(
1,
exception=None,
task_exec_stats=TaskExecWorkerStats(task_wall_time_s=0.0),
task_exec_driver_stats=TaskExecDriverStats(task_output_backpressure_s=0),
)
# Start detecting
issues = detector.detect()
assert len(issues) == 0
# Set the perf_counter to trigger the issue detection
mock_perf_counter.return_value = 10.0
# On the second detect() call, the hanging task should be detected
issues = detector.detect()
assert len(issues) > 0, "Expected hanging issue to be detected"
assert issues[0].issue_type.value == "hanging"
assert "has been running for" in issues[0].message
assert "longer than the average task duration" in issues[0].message
@pytest.mark.parametrize(
"configured_memory, actual_memory, should_return_issue",
[
# User has appropriately configured memory, so no issue.
(4 * 1024**3, 4 * 1024**3, False),
# User hasn't configured memory correctly and memory use is high, so issue.
(None, 4 * 1024**3, True),
(1, 4 * 1024**3, True),
# User hasn't configured memory correctly but memory use is low, so no issue.
(None, 4 * 1024**3 - 1, False),
],
)
def test_high_memory_detection(
configured_memory, actual_memory, should_return_issue, restore_data_context
):
ctx = DataContext.get_current()
input_data_buffer = InputDataBuffer(ctx, input_data=[])
map_operator = MapOperator.create(
map_transformer=MagicMock(),
input_op=input_data_buffer,
data_context=ctx,
ray_remote_args={"memory": configured_memory},
)
map_operator._metrics = MagicMock(average_max_uss_per_task=actual_memory)
topology = {input_data_buffer: MagicMock(), map_operator: MagicMock()}
operators = list(topology.keys())
detector = HighMemoryIssueDetector(
dataset_id="id",
operators=operators,
config=ctx.issue_detectors_config.high_memory_detector_config,
)
issues = detector.detect()
assert should_return_issue == bool(issues)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_issue_detection.py",
"license": "Apache License 2.0",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/_private/authentication/http_token_authentication.py | import logging
from types import ModuleType
from typing import Dict, List, Optional
from ray._private.authentication import (
authentication_constants,
authentication_utils as auth_utils,
)
logger = logging.getLogger(__name__)
def get_token_auth_middleware(
aiohttp_module: ModuleType,
whitelisted_exact_paths: Optional[List[str]] = None,
whitelisted_path_prefixes: Optional[List[str]] = None,
):
"""Internal helper to create token auth middleware with provided modules.
Args:
aiohttp_module: The aiohttp module to use
whitelisted_exact_paths: List of exact paths that don't require authentication
whitelisted_path_prefixes: List of path prefixes that don't require authentication
Returns:
An aiohttp middleware function
"""
@aiohttp_module.web.middleware
async def token_auth_middleware(request, handler):
"""Middleware to validate bearer tokens when token authentication is enabled.
In minimal Ray installations (without ray._raylet), this middleware is a no-op
and passes all requests through without authentication.
"""
# No-op if token auth is not enabled or raylet is not available
if not auth_utils.is_token_auth_enabled():
return await handler(request)
# skip authentication for whitelisted paths
if (whitelisted_exact_paths and request.path in whitelisted_exact_paths) or (
whitelisted_path_prefixes
and request.path.startswith(tuple(whitelisted_path_prefixes))
):
return await handler(request)
# Try to get authentication token from multiple sources (in priority order):
# 1. Standard "Authorization" header (for API clients, SDKs)
# 2. Fallback "X-Ray-Authorization" header (for proxies and KubeRay)
# 3. Cookie (for web dashboard sessions)
auth_header = request.headers.get(
authentication_constants.AUTHORIZATION_HEADER_NAME, ""
)
if not auth_header:
auth_header = request.headers.get(
authentication_constants.RAY_AUTHORIZATION_HEADER_NAME, ""
)
if not auth_header:
token = request.cookies.get(
authentication_constants.AUTHENTICATION_TOKEN_COOKIE_NAME
)
if token:
# Format as Bearer token for validation
auth_header = (
authentication_constants.AUTHORIZATION_BEARER_PREFIX + token
)
if not auth_header:
return aiohttp_module.web.Response(
status=401, text="Unauthorized: Missing authentication token"
)
if not auth_utils.validate_request_token(auth_header):
return aiohttp_module.web.Response(
status=403, text="Forbidden: Invalid authentication token"
)
return await handler(request)
return token_auth_middleware
def get_auth_headers_if_auth_enabled(user_headers: Dict[str, str]) -> Dict[str, str]:
if not auth_utils.is_token_auth_enabled():
return {}
from ray._raylet import AuthenticationTokenLoader
# Check if user provided their own Authorization header (case-insensitive)
has_user_auth = any(
key.lower() == authentication_constants.AUTHORIZATION_HEADER_NAME
for key in user_headers.keys()
)
if has_user_auth:
# User has provided their own auth header, don't override
return {}
token_loader = AuthenticationTokenLoader.instance()
auth_headers = token_loader.get_token_for_http_header()
if not auth_headers:
# Token auth enabled but no token found
logger.warning(
"Token authentication is enabled but no token was found. "
"Requests to authenticated clusters will fail."
)
return auth_headers
def format_authentication_http_error(status: int, body: str) -> Optional[str]:
"""Return a user-friendly authentication error message, if applicable."""
if status == 401:
return "Authentication required: {body}\n\n{details}".format(
body=body,
details=authentication_constants.TOKEN_AUTH_ENABLED_BUT_NO_TOKEN_FOUND_ERROR_MESSAGE,
)
if status == 403:
return "Authentication failed: {body}\n\n{details}".format(
body=body,
details=authentication_constants.TOKEN_INVALID_ERROR_MESSAGE,
)
return None
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_private/authentication/http_token_authentication.py",
"license": "Apache License 2.0",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/tests/test_runtime_env_agent_auth.py | import socket
import sys
import urllib.error
import urllib.parse
import urllib.request
import pytest
import ray
from ray._common.test_utils import wait_for_condition
from ray._private.authentication.http_token_authentication import (
format_authentication_http_error,
get_auth_headers_if_auth_enabled,
)
from ray._private.authentication_test_utils import (
reset_auth_token_state,
set_auth_mode,
set_env_auth_token,
)
from ray.core.generated import runtime_env_agent_pb2
def _agent_url(agent_address: str, path: str) -> str:
return urllib.parse.urljoin(agent_address, path)
def _make_get_or_create_request() -> runtime_env_agent_pb2.GetOrCreateRuntimeEnvRequest:
request = runtime_env_agent_pb2.GetOrCreateRuntimeEnvRequest()
request.job_id = b"ray_client_test"
request.serialized_runtime_env = "{}"
request.runtime_env_config.setup_timeout_seconds = 1
request.source_process = "pytest"
return request
def _wait_for_runtime_env_agent(agent_address: str) -> None:
parsed = urllib.parse.urlparse(agent_address)
def _can_connect() -> bool:
try:
with socket.create_connection((parsed.hostname, parsed.port), timeout=1):
return True
except OSError:
return False
wait_for_condition(_can_connect, timeout=10)
def test_runtime_env_agent_requires_auth_missing_token(setup_cluster_with_token_auth):
agent_address = ray._private.worker.global_worker.node.runtime_env_agent_address
_wait_for_runtime_env_agent(agent_address)
request = _make_get_or_create_request()
with pytest.raises(urllib.error.HTTPError) as exc_info:
urllib.request.urlopen( # noqa: S310 - test controlled
urllib.request.Request(
_agent_url(agent_address, "/get_or_create_runtime_env"),
data=request.SerializeToString(),
headers={"Content-Type": "application/octet-stream"},
method="POST",
),
timeout=5,
)
assert exc_info.value.code == 401
body = exc_info.value.read().decode("utf-8", "ignore")
assert "Missing authentication token" in body
formatted = format_authentication_http_error(401, body)
assert formatted.startswith("Authentication required")
def test_runtime_env_agent_rejects_invalid_token(setup_cluster_with_token_auth):
agent_address = ray._private.worker.global_worker.node.runtime_env_agent_address
_wait_for_runtime_env_agent(agent_address)
request = _make_get_or_create_request()
with pytest.raises(urllib.error.HTTPError) as exc_info:
urllib.request.urlopen( # noqa: S310 - test controlled
urllib.request.Request(
_agent_url(agent_address, "/get_or_create_runtime_env"),
data=request.SerializeToString(),
headers={
"Content-Type": "application/octet-stream",
"Authorization": "Bearer wrong_token",
},
method="POST",
),
timeout=5,
)
assert exc_info.value.code == 403
body = exc_info.value.read().decode("utf-8", "ignore")
assert "Invalid authentication token" in body
formatted = format_authentication_http_error(403, body)
assert formatted.startswith("Authentication failed")
def test_runtime_env_agent_accepts_valid_token(setup_cluster_with_token_auth):
agent_address = ray._private.worker.global_worker.node.runtime_env_agent_address
_wait_for_runtime_env_agent(agent_address)
token = setup_cluster_with_token_auth["token"]
request = _make_get_or_create_request()
with urllib.request.urlopen( # noqa: S310 - test controlled
urllib.request.Request(
_agent_url(agent_address, "/get_or_create_runtime_env"),
data=request.SerializeToString(),
headers={
"Content-Type": "application/octet-stream",
"Authorization": f"Bearer {token}",
},
method="POST",
),
timeout=5,
) as response:
reply = runtime_env_agent_pb2.GetOrCreateRuntimeEnvReply()
reply.ParseFromString(response.read())
assert reply.status == runtime_env_agent_pb2.AgentRpcStatus.AGENT_RPC_STATUS_OK
def test_inject_token_if_enabled_adds_header(cleanup_auth_token_env):
set_auth_mode("token")
set_env_auth_token("apptoken1234567890")
reset_auth_token_state()
headers = {}
headers_to_add = get_auth_headers_if_auth_enabled(headers)
assert headers_to_add != {}
auth_header = headers_to_add["authorization"]
if isinstance(auth_header, bytes):
auth_header = auth_header.decode("utf-8")
assert auth_header == "Bearer apptoken1234567890"
def test_inject_token_if_enabled_respects_existing_header(cleanup_auth_token_env):
set_auth_mode("token")
set_env_auth_token("apptoken1234567890")
reset_auth_token_state()
headers = {"authorization": "Bearer custom"}
headers_to_add = get_auth_headers_if_auth_enabled(headers)
assert headers_to_add == {}
def test_format_authentication_http_error_non_auth_status():
assert format_authentication_http_error(404, "not found") is None
if __name__ == "__main__":
sys.exit(pytest.main(["-vv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_runtime_env_agent_auth.py",
"license": "Apache License 2.0",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/ray-core/examples/rdt/grpo_contextual_bandits.py | """
Reinforcement learning example using GPU-to-GPU Ray Direct Transport (RDT) and GRPO algorithm.
Based on: https://github.com/meta-pytorch/monarch/blob/0de4e6b4ad7da37e5dbb00a0e6fb61ef8105eac5/examples/presentation/demo.py
"""
import argparse
import time
from typing import Any
import ray
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Categorical
# -- TrajectorySlice --
# TrajectorySlice holds one state's sampled actions and associated metadata:
# - state: The 2D input vector fed to the generator model.
# - actions: The generator model's predictions for this state.
# - policy_version: Version of the generator model when these actions were generated.
# - rewards: The per-action rewards computed by the scorer for this state.
# - old_logps: The log-probabilities of the sampled actions under the policy that generated them.
TrajectorySlice = dict[str, torch.Tensor | int]
# -- Training --
BATCH_SIZE = 32
# Keep learning rate low so that the model does not jump outside
# the trust policy region.
LEARNING_RATE = 1e-6
WEIGHT_DECAY = 1e-10
# Adaptively reduces the learning rate to prevent large updates in a single step.
GRAD_CLIP_NORM = 1.0
# -- GRPO algorithm --
# Number of actions to sample for each state.
GROUP_SIZE = 10
# How far the new policy is allowed to stray from the older policies
# AKA the "trust region".
GRPO_CLIP_EPS = 0.1
# Discard old experiences, so that the new model can gradually explore
# further from the initial random policy.
MAX_BUFFER_SIZE = BATCH_SIZE * GROUP_SIZE * 5
# -- Environment --
STATE_DIM = 2 # The contextual bandit operates in 2D.
ACTION_DIM = 8 # Eight compass directions: [W, NW, N, NE, E, SE, S, SW].
# Unit direction vectors for the eight compass actions (W, NW, N, NE, E, SE, S, SW).
DIAGONAL_MAGNITUDE = 2**0.5 / 2.0
ACTION_DIRECTIONS = torch.tensor(
[
[-1.0, 0.0], # W
[-DIAGONAL_MAGNITUDE, DIAGONAL_MAGNITUDE], # NW
[0.0, 1.0], # N
[DIAGONAL_MAGNITUDE, DIAGONAL_MAGNITUDE], # NE
[1.0, 0.0], # E
[DIAGONAL_MAGNITUDE, -DIAGONAL_MAGNITUDE], # SE
[0.0, -1.0], # S
[-DIAGONAL_MAGNITUDE, -DIAGONAL_MAGNITUDE], # SW
],
dtype=torch.float32,
)
# -- Model --
# To demonstrate speed-ups from RDT, we use an oversized model.
# Residual connections prevent vanishing gradients for deep models.
class ResidualBlock(torch.nn.Module):
def __init__(self, hidden_dim: int) -> None:
super().__init__()
self.norm = torch.nn.LayerNorm(hidden_dim)
self.activation = torch.nn.ReLU()
self.linear = torch.nn.Linear(hidden_dim, hidden_dim, bias=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
residual = x
out = self.norm(x)
out = self.activation(out)
out = self.linear(out)
return residual + out
class ResidualMLP(torch.nn.Module): # Sized to ~50 MB of parameters.
"""Model used for Generator and Learner.
It takes a 2D state vector as input and produces logits for each action.
"""
def __init__(self, hidden_dim: int = 512, depth: int = 50):
super().__init__()
self.input = torch.nn.Linear(STATE_DIM, hidden_dim, bias=True)
self.backbone = torch.nn.ModuleList(
ResidualBlock(hidden_dim) for _ in range(depth - 1)
)
self.head = torch.nn.Linear(hidden_dim, ACTION_DIM, bias=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.input(x)
for block in self.backbone:
x = block(x)
x = self.head(x)
return x
# -- Utilities --
def sample_unit_vector(batch_size: int, dim: int = STATE_DIM) -> torch.Tensor:
"""Sample unit vectors of shape [batch_size, dim] by normalizing Gaussian draws."""
assert batch_size > 1, "Batch size must be greater than 1"
v = torch.randn(batch_size, dim)
norms = v.norm(dim=-1, keepdim=True) + 1e-8
return v / norms
# -- Actors --
@ray.remote
class ReplayBuffer:
"""Storage for scored trajectory slices.
This class stores the past experiences (AKA trajectories, or slices) of the model.
This allows the learner to sample and learn from the same experiences multiple times
by comparing the latest model with previous models.
The sampler weights the trajectories by the policy version, such that trajectories produced
by more recent versions of the model are more likely to be sampled.
"""
def __init__(self) -> None:
# Each entry stores a TrajectorySlice with CPU tensors.
self.storage: list[TrajectorySlice] = []
def put(self, slice: TrajectorySlice) -> None:
"""Add a new slice to the buffer.
The buffer discards the oldest slices if the buffer gets too large to prevent memory leaks,
and so that the latest model can gradually explore further from the initial random policy.
"""
self.storage.append(slice)
if len(self.storage) > MAX_BUFFER_SIZE:
self.storage = self.storage[-MAX_BUFFER_SIZE:]
def sample_from(self, n: int) -> list[TrajectorySlice]:
"""Sample n scored trajectory slices.
Each slice is a 'group' of actions sampled from the same state.
"""
if self.size() < n:
return []
# The probability of sampling a slice is proportional to its policy version.
total = sum(slice["policy_version"] for slice in self.storage)
probs = [slice["policy_version"] / total for slice in self.storage]
# Sample with replacement without exceeding the buffer's size.
n = min(n, self.size())
chosen = np.random.choice(self.size(), size=n, p=probs, replace=True)
return [self.storage[i] for i in chosen]
def size(self) -> int:
return len(self.storage)
@ray.remote
class Scorer:
"""Evaluates actions and assigns rewards to trajectory slices.
This scorer implements an analytic contextual bandit reward: for a 2D unit
context vector `s` and a discrete action `a` in {W, NW, N, NE, E, SE, S, SW},
reward is cosine_similarity(s, direction[a]) == dot(s, unit_direction[a]).
"""
def __init__(self, replay_buffer) -> None:
self.replay_buffer = replay_buffer
self.action_dirs = ACTION_DIRECTIONS # [ACTION_DIM, STATE_DIM]
@ray.method(tensor_transport="nixl") # CPU-CPU RDT
def score_slices(self, batched_slices: dict) -> None:
"""Score a batch of trajectory slices."""
states = batched_slices["state"]
actions = batched_slices["actions"]
old_logps = batched_slices["old_logps"]
policy_version = batched_slices["policy_version"]
# Unbatch the groups into separate slices so that they can be
# sampled independently.
for i in range(states.shape[0]):
# Compute rewards on the CPU: rewards = dot(state, unit_dir).
directions = self.action_dirs[actions[i]] # [GROUP_SIZE, STATE_DIM]
rewards = torch.mv(directions, states[i])
scored = TrajectorySlice(
policy_version=policy_version,
state=states[i],
actions=actions[i],
old_logps=old_logps[i],
rewards=rewards,
)
self.replay_buffer.put.remote(scored)
@ray.remote(num_gpus=1)
class Learner:
"""Updates policy based on collected experiences using GRPO algorithm."""
def __init__(self, replay_buffer) -> None:
self.model = ResidualMLP().to("cuda")
# Use smaller betas to favor recent momentum history.
self.optim = optim.AdamW(
self.model.parameters(),
lr=LEARNING_RATE,
weight_decay=WEIGHT_DECAY,
betas=(0.9, 0.9),
)
self.replay_buffer = replay_buffer
def _compute_advantages(self, rewards: torch.Tensor) -> torch.Tensor:
"""Compute advantages from rewards.
In GRPO, advantages represent how much better a reward is compared to the mean reward for the group of actions
Normalizing the advantages stabilizes training by maintaining a consistent scale of updates.
"""
# Unflatten rewards into [batch_size, GROUP_SIZE] in order to
# compute per-state mean baselines.
batch_size = rewards.shape[0] // GROUP_SIZE
rewards_reshaped = rewards.view(batch_size, GROUP_SIZE)
# Compute the mean reward for each state's group of actions.
baselines = rewards_reshaped.mean(dim=1, keepdim=True) # [batch_size, 1]
# Subtract the mean reward from each action's reward to get advantages.
advantages = rewards_reshaped - baselines # [batch_size, GROUP_SIZE]
# Flatten the advantages back to the original shape.
advantages = advantages.reshape(-1) # [batch_size * GROUP_SIZE]
# Normalize the advantages for training stability.
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
return advantages
def _apply_policy_update(
self,
states: torch.Tensor,
actions: torch.Tensor,
old_logps: torch.Tensor,
advantages: torch.Tensor,
) -> dict[str, float]:
"""Apply GRPO update to the model."""
# Compute the new policy's action log-probabilities.
dist_new = Categorical(logits=self.model(states))
new_logps = dist_new.log_prob(actions)
# Compare the new log-probabilities to the old log-probabilities to get the probability ratios.
# This is a proxy for how different the new policy is from the old policy.
ratio = (new_logps - old_logps).exp()
unclipped = ratio * advantages
# The 1 ± ε ratio defines the trust region. If the new policy's probability for an action is more than 1 ± ε times the old policy, clip the ratio
# to prevent too-large updates.
clipped = torch.clamp(ratio, 1 - GRPO_CLIP_EPS, 1 + GRPO_CLIP_EPS) * advantages
loss = -torch.min(unclipped, clipped).mean()
# Fraction of actions which did not contribute to the gradient update.
clip_fraction = (
((ratio < 1 - GRPO_CLIP_EPS) | (ratio > 1 + GRPO_CLIP_EPS)).float().mean()
)
# Update the policy network.
self.optim.zero_grad()
loss.backward()
# Clip the gradients to prevent exploding gradients and stabilize training.
nn.utils.clip_grad_norm_(self.model.parameters(), GRAD_CLIP_NORM)
self.optim.step()
return {
"loss": loss.detach().item(),
"clip_fraction": clip_fraction.detach().item(),
}
def step(self) -> dict[str, Any]:
"""Perform one training step and return metrics.
Each step samples a batch of trajectory slices from the replay buffer, computes the advantages, and updates the policy using the GRPO algorithm.
"""
slices: list[TrajectorySlice] = ray.get(
self.replay_buffer.sample_from.remote(BATCH_SIZE)
)
while len(slices) < BATCH_SIZE:
print(
f"Not enough slices in the buffer to sample {BATCH_SIZE} slices. Waiting for more slices..."
)
time.sleep(0.05)
slices = ray.get(self.replay_buffer.sample_from.remote(BATCH_SIZE))
# Prepare the tensors for the policy update.
actions = torch.cat([s["actions"] for s in slices]).to("cuda")
old_logps = torch.cat([s["old_logps"] for s in slices]).to("cuda")
rewards = torch.cat([s["rewards"] for s in slices]).to("cuda")
mean_rewards = torch.mean(rewards).item()
states = torch.stack([s["state"] for s in slices])
states = states.repeat_interleave(GROUP_SIZE, 0).to("cuda")
# Compute advantages and update the policy network using GRPO.
advantages = self._compute_advantages(rewards)
results = self._apply_policy_update(states, actions, old_logps, advantages)
results["rewards"] = mean_rewards
return results
@ray.method(tensor_transport="nixl")
def get_weights(self) -> dict[str, torch.Tensor]:
"""Get the current model weights.
The tensor_transport="nixl" option enables NIXL via RDT to transfer model weight
tensors. Without it, the weights will be transferred using the Ray object store.
"""
return self.model.state_dict()
@ray.remote(num_gpus=1)
class Generator:
"""Holds the current policy network and generates unscored trajectory slices."""
def __init__(self, scorer) -> None:
self.model = ResidualMLP().to("cuda").eval()
self.scorer = scorer
self.policy_version = 1
@ray.method(tensor_transport="nixl") # CPU-CPU RDT
def generate(self, states: torch.Tensor):
"""Generate actions using the current policy and send them and their metadata
to the Scorer.
Note: GRPO requires *sampling* from the current policy (not just the most probable "greedy" action).
"""
with torch.no_grad():
states = states.to("cuda")
logits = self.model(states) # [batch_size, ACTION_DIM]
dist = Categorical(logits=logits)
# Sample GROUP_SIZE actions for each state.
actions = dist.sample((GROUP_SIZE,)) # [GROUP_SIZE, batch_size]
logps = dist.log_prob(actions) # [GROUP_SIZE, batch_size]
# Transpose actions and logprobs for compatibility with the states tensor.
actions = actions.transpose(0, 1).contiguous() # [batch_size, GROUP_SIZE]
logps = logps.transpose(0, 1).contiguous() # [batch_size, GROUP_SIZE]
# Create trajectory slices and enqueue them for scoring.
slice_batch = {
"policy_version": self.policy_version,
"state": states.detach().cpu(),
"actions": actions.detach().cpu(),
"old_logps": logps.detach().cpu(),
}
self.scorer.score_slices.remote(slice_batch)
def update_weights(self, cuda_weights):
"""Update the generator's weights from the learner's weights.
Note: the actor is single-threaded, so weight loads do not overlap with generation.
"""
first_tensor = next(iter(cuda_weights.values()))
assert (
first_tensor.device.type == "cuda"
), "Expected CUDA tensors after GPU-to-GPU direct transfer"
self.model.load_state_dict(cuda_weights)
self.model.eval()
self.policy_version += 1
# -- Control loop --
def train(total_steps: int) -> None:
"""Run one end-to-end training session."""
# Instantiate one instance of each actor.
replay_buf = ReplayBuffer.remote()
learner = Learner.remote(replay_buf)
scorer = Scorer.remote(replay_buf)
generator = Generator.remote(scorer)
# Asynchronously initialize the generator with the current learner weights.
weights_updated_ref = generator.update_weights.remote(learner.get_weights.remote())
# Pre-fill the ReplayBuffer before starting GRPO.
# Generator is a single-threaded actor, so this generate call won't execute until after the
# above update_weights call has completed.
generator.generate.remote(sample_unit_vector(batch_size=BATCH_SIZE))
step_results = []
losses, rewards, clip_fractions = [], [], []
for i in range(total_steps):
states = sample_unit_vector(batch_size=BATCH_SIZE)
generator.generate.remote(states)
# Wait until the generator has been updated before launching the next learner step.
# Otherwise, the weights transfer could still be in progress during the next learner
# update, and the generator may receive partially updated weights.
ray.wait([weights_updated_ref])
# Asynchronously log every 20 steps.
if len(step_results) >= 20:
for step_result in ray.get(step_results):
losses.append(step_result["loss"])
rewards.append(step_result["rewards"])
clip_fractions.append(step_result["clip_fraction"])
print(
f"Step {i}/{total_steps} | Loss: {sum(losses[-20:]) / 20} | Rewards: {sum(rewards[-20:]) / 20:.3f} | Fraction clipped: {sum(clip_fractions[-20:]) / 20:.3f}"
)
step_results.clear()
step_results.append(learner.step.remote())
# Update the generator with new weights.
weights_updated_ref = generator.update_weights.remote(
learner.get_weights.remote()
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--steps",
type=int,
default=450,
)
args = parser.parse_args()
ray.init(ignore_reinit_error=True)
train(total_steps=args.steps)
print("Done!")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-core/examples/rdt/grpo_contextual_bandits.py",
"license": "Apache License 2.0",
"lines": 349,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/llm/_internal/serve/utils/registry.py | """Generic registry for LLM serving components using Ray's internal KV store.
This module provides a reusable registry mechanism that enables components to be
registered in the driver process and accessed across all Ray processes in the cluster,
including Ray Serve child processes.
Similar to RLlib/Tune's registry but with a fixed global prefix for cross-job access.
"""
import importlib
from typing import Any, Callable
import ray._private.worker as worker
import ray.cloudpickle as pickle
from ray.experimental.internal_kv import (
_internal_kv_del,
_internal_kv_exists,
_internal_kv_get,
_internal_kv_initialized,
_internal_kv_put,
)
from ray.llm._internal.serve.observability.logging import get_logger
logger = get_logger(__name__)
# Fixed prefix for cross-job accessibility (Serve deployments run in different jobs)
_SERVE_REGISTRY_PREFIX = "serve_global"
def _make_key(category: str, name: str) -> bytes:
"""Generate a binary key for the KV store.
Args:
category: The component category (e.g., "kv_connector_backend")
name: The component name
Returns:
The key to use for storing the value
"""
return (
b"LLMServeRegistry:"
+ _SERVE_REGISTRY_PREFIX.encode("ascii")
+ b":"
+ category.encode("ascii")
+ b"/"
+ name.encode("ascii")
)
def _create_loader(value: Any) -> Callable[[], Any]:
"""Create a loader callable for a value.
Handles both direct objects/classes and string paths for lazy loading.
Args:
value: Either:
- A class, object, or callable (returns lambda: value)
- A string in format "module_path:class_name" (creates import loader)
Returns:
A callable that returns the value when called
Raises:
ValueError: If value is a string but doesn't have the correct format
"""
if isinstance(value, str):
if ":" not in value:
raise ValueError(
f"Invalid format for string value: '{value}'. "
f"Expected format: 'module_path:class_name' or a class/object."
)
module_path, class_name = value.rsplit(":", 1)
# Create a loader callable that imports on demand
def loader():
module = importlib.import_module(module_path)
return getattr(module, class_name)
return loader
else:
# For direct objects/classes, create a simple loader
return lambda: value
class ComponentRegistry:
"""Generic registry for LLM serving components using Ray's internal KV store.
This registry enables components to be registered in the driver process and
accessed across all Ray processes in the cluster, including Ray Serve child processes.
Similar to RLlib/Tune's registry but with a fixed global prefix for cross-job access.
**Usage Pattern:**
This registry is designed for a "register once, read many" pattern:
- Components are typically registered in the driver process before deployment
- Ray Serve replicas read from the KV store during initialization
- Once a component is resolved and cached in a process, subsequent `get()` calls return the cached value without checking the KV store for updates
Example:
# Create a registry for a component category
registry = ComponentRegistry("my_component")
# Register a component
registry.register("my_component", MyComponentClass)
# Get a registered component
component = registry.get("my_component")
# Check if registered
if registry.contains("my_component"):
...
"""
def __init__(self, category: str):
"""Initialize a registry for a specific component category.
Args:
category: The category name (e.g., "kv_connector_backend")
"""
self.category = category
self._loader_cache: dict[str, Callable[[], Any]] = {}
self._resolved_cache: dict[str, Any] = {}
self._pending: dict[str, bytes] = {}
def register(self, name: str, value: Any) -> None:
"""Register a component.
Args:
name: The name to register under
value: The component to register. Can be:
- A class, object, or callable (serialized directly)
- A string in format "module_path:class_name" (lazy-loaded via import)
Raises:
ValueError: If the component is already registered. Use unregister() first if you need to change the registration.
Examples:
# Register a class directly
registry.register("MyClass", MyClass)
# Register via module path (lazy loading)
registry.register("MyClass", "my.module:MyClass")
"""
# Prevent double registration to avoid cache inconsistencies
if self.contains(name):
raise ValueError(
f"{self.category} '{name}' is already registered. "
f"Use unregister() first if you need to change the registration."
)
# Create a loader callable (handles both direct values and string paths)
loader = _create_loader(value)
# Serialize the loader callable
serialized = pickle.dumps(loader)
# Store loader in cache
self._loader_cache[name] = loader
# Store in KV store if Ray is initialized, otherwise queue for later
if _internal_kv_initialized():
try:
key = _make_key(self.category, name)
_internal_kv_put(key, serialized, overwrite=True)
logger.debug(f"Registered {self.category} '{name}' in KV store")
except Exception as e:
logger.warning(
f"Failed to register {self.category} '{name}' in KV store: {e}",
exc_info=True,
)
self._pending[name] = serialized
else:
self._pending[name] = serialized
def get(self, name: str) -> Any:
"""Get a registered component.
Args:
name: The name of the component
Returns:
The registered component. If registered with a string path,
returns the imported class/object. If registered directly,
returns the original value.
Raises:
ValueError: If the component is not registered
"""
# Check resolved cache first.
if name in self._resolved_cache:
return self._resolved_cache[name]
loader = self._loader_cache.get(name)
# If not in local loader cache, try fetching from KV store.
if loader is None and _internal_kv_initialized():
try:
key = _make_key(self.category, name)
serialized = _internal_kv_get(key)
if serialized is not None:
loader = pickle.loads(serialized)
# Cache the loader for future gets.
self._loader_cache[name] = loader
logger.debug(f"Loaded {self.category} '{name}' from KV store")
except Exception as e:
logger.warning(
f"Failed to load {self.category} '{name}' from KV store: {e}",
exc_info=True,
)
if loader is not None:
value = loader()
self._resolved_cache[name] = value
return value
# Not found
raise ValueError(
f"{self.category} '{name}' not found. "
f"Registered: {list(self._loader_cache.keys())}"
)
def contains(self, name: str) -> bool:
"""Check if a component is registered.
Args:
name: The name to check
Returns:
True if registered, False otherwise
"""
if name in self._loader_cache:
return True
if _internal_kv_initialized():
try:
key = _make_key(self.category, name)
return _internal_kv_exists(key)
except Exception as e:
logger.warning(
f"Failed to check if {self.category} '{name}' exists in KV store: {e}",
exc_info=True,
)
return False
return False
def unregister(self, name: str) -> None:
"""Unregister a component.
Removes the component from local cache, pending registrations, and KV store.
Args:
name: The name of the component to unregister
"""
# Remove from local caches
if name in self._loader_cache:
del self._loader_cache[name]
if name in self._resolved_cache:
del self._resolved_cache[name]
# Remove from pending if present
if name in self._pending:
del self._pending[name]
# Remove from KV store if Ray is initialized
if _internal_kv_initialized():
try:
key = _make_key(self.category, name)
_internal_kv_del(key)
logger.debug(f"Unregistered {self.category} '{name}' from KV store")
except Exception as e:
logger.warning(
f"Failed to unregister {self.category} '{name}' from KV store: {e}",
exc_info=True,
)
def flush_pending(self) -> None:
"""Flush pending registrations to KV store.
This is called automatically when Ray initializes via _post_init_hooks.
"""
if not _internal_kv_initialized() or not self._pending:
return
for name, serialized in self._pending.items():
try:
key = _make_key(self.category, name)
_internal_kv_put(key, serialized, overwrite=True)
logger.debug(
f"Flushed pending registration for {self.category} '{name}'"
)
except Exception as e:
logger.warning(
f"Failed to flush {self.category} '{name}': {e}", exc_info=True
)
self._pending.clear()
# Global registry instances for different component categories
_registries: dict[str, ComponentRegistry] = {}
def get_registry(category: str) -> ComponentRegistry:
"""Get or create a registry for a component category.
Args:
category: The component category name
Returns:
The ComponentRegistry instance for this category
"""
if category not in _registries:
_registries[category] = ComponentRegistry(category)
return _registries[category]
def _flush_all_registries():
"""Flush all pending registrations to KV store.
This is registered as a Ray post-init hook to ensure registrations
made before Ray initialization are available across processes.
"""
for registry in _registries.values():
registry.flush_pending()
if _flush_all_registries not in worker._post_init_hooks:
worker._post_init_hooks.append(_flush_all_registries)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/serve/utils/registry.py",
"license": "Apache License 2.0",
"lines": 258,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/llm/vllm/kv_transfer_backends/test_registry.py | """Unit tests for ComponentRegistry."""
import sys
import pytest
from ray.llm._internal.serve.utils.registry import ComponentRegistry, get_registry
class TestComponentRegistry:
"""Test suite for ComponentRegistry."""
def test_register_and_get_direct_class(self):
"""Test registering and retrieving a class directly."""
registry = ComponentRegistry("test_category")
test_class = type("TestClass", (), {})
registry.register("test_component", test_class)
assert registry.contains("test_component")
retrieved = registry.get("test_component")
assert retrieved == test_class
def test_register_and_get_module_path(self):
"""Test registering and retrieving via module path."""
registry = ComponentRegistry("test_category")
registry.register(
"test_component",
"ray.llm._internal.serve.utils.registry:ComponentRegistry",
)
assert registry.contains("test_component")
retrieved = registry.get("test_component")
assert retrieved == ComponentRegistry
def test_get_nonexistent_component_raises(self):
"""Test that getting a non-existent component raises ValueError."""
registry = ComponentRegistry("test_category")
with pytest.raises(ValueError, match="not found"):
registry.get("nonexistent")
def test_invalid_string_format_raises(self):
"""Test that registering with invalid string format raises ValueError."""
registry = ComponentRegistry("test_category")
with pytest.raises(ValueError, match="Invalid format"):
registry.register("test_comp", "invalid_format_no_colon")
def test_double_registration_raises(self):
"""Test that double registration raises ValueError."""
registry = ComponentRegistry("test_category")
test_class1 = type("TestClass1", (), {})
test_class2 = type("TestClass2", (), {})
registry.register("test_component", test_class1)
with pytest.raises(ValueError, match="already registered"):
registry.register("test_component", test_class2)
# Verify original registration is still intact
assert registry.get("test_component") == test_class1
def test_reregister_after_unregister(self):
"""Test that unregistering allows re-registration."""
registry = ComponentRegistry("test_category")
test_class1 = type("TestClass1", (), {})
test_class2 = type("TestClass2", (), {})
registry.register("test_component", test_class1)
registry.unregister("test_component")
registry.register("test_component", test_class2)
assert registry.get("test_component") == test_class2
def test_get_registry_singleton(self):
"""Test that get_registry returns the same instance for the same category."""
registry1 = get_registry("test_category")
registry2 = get_registry("test_category")
assert registry1 is registry2
assert registry1.category == "test_category"
def test_get_registry_different_categories(self):
"""Test that get_registry returns different instances for different categories."""
registry1 = get_registry("category1")
registry2 = get_registry("category2")
assert registry1 is not registry2
assert registry1.category == "category1"
assert registry2.category == "category2"
def test_unregister(self):
"""Test unregistering a component."""
registry = ComponentRegistry("test_category")
test_class = type("TestClass", (), {})
# Register and verify it exists
registry.register("test_component", test_class)
assert registry.contains("test_component")
# Unregister and verify it's removed
registry.unregister("test_component")
assert not registry.contains("test_component")
# Verify get raises ValueError
with pytest.raises(ValueError, match="not found"):
registry.get("test_component")
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/serve/cpu/deployments/llm/vllm/kv_transfer_backends/test_registry.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/_private/authentication/authentication_constants.py | # Authentication error messages
TOKEN_AUTH_ENABLED_BUT_NO_TOKEN_FOUND_ERROR_MESSAGE = (
"Token authentication is enabled but no authentication token was found"
)
TOKEN_INVALID_ERROR_MESSAGE = "Token authentication is enabled but the authentication token is invalid or incorrect." # noqa: E501
# HTTP header and cookie constants
AUTHORIZATION_HEADER_NAME = "authorization"
AUTHORIZATION_BEARER_PREFIX = "Bearer "
RAY_AUTHORIZATION_HEADER_NAME = "x-ray-authorization"
AUTHENTICATION_TOKEN_COOKIE_NAME = "ray-authentication-token"
AUTHENTICATION_TOKEN_COOKIE_MAX_AGE = 30 * 24 * 60 * 60 # 30 days
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_private/authentication/authentication_constants.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/tests/test_submission_client_auth.py | import pytest
from ray._private.authentication.authentication_constants import (
TOKEN_AUTH_ENABLED_BUT_NO_TOKEN_FOUND_ERROR_MESSAGE,
TOKEN_INVALID_ERROR_MESSAGE,
)
from ray._private.authentication_test_utils import (
clear_auth_token_sources,
reset_auth_token_state,
set_auth_mode,
set_env_auth_token,
)
from ray.dashboard.modules.dashboard_sdk import SubmissionClient
from ray.dashboard.modules.job.sdk import JobSubmissionClient
from ray.exceptions import AuthenticationError
from ray.util.state import StateApiClient
def test_submission_client_adds_token_automatically(setup_cluster_with_token_auth):
"""Test that SubmissionClient automatically adds token to headers."""
# Token is already set in environment from setup_cluster_with_token_auth fixture
client = SubmissionClient(address=setup_cluster_with_token_auth["dashboard_url"])
# Verify authorization header was added (lowercase as per implementation)
assert "authorization" in client._headers
assert client._headers["authorization"].startswith("Bearer ")
def test_submission_client_without_token_shows_helpful_error(
setup_cluster_with_token_auth,
):
"""Test that requests without token show helpful error message."""
# Remove token from environment
clear_auth_token_sources(remove_default=True)
set_auth_mode("disabled")
reset_auth_token_state()
client = SubmissionClient(address=setup_cluster_with_token_auth["dashboard_url"])
# Make a request - should fail with AuthenticationError
with pytest.raises(AuthenticationError) as exc_info:
client.get_version()
error_str = str(exc_info.value)
# Check that the error contains the simple message and auto-added docs link
assert TOKEN_AUTH_ENABLED_BUT_NO_TOKEN_FOUND_ERROR_MESSAGE in error_str
def test_submission_client_with_invalid_token_shows_helpful_error(
setup_cluster_with_token_auth,
):
"""Test that requests with wrong token show helpful error message."""
# Set wrong token
wrong_token = "wrong_token_00000000000000000000000000000000"
set_env_auth_token(wrong_token)
set_auth_mode("token")
reset_auth_token_state()
client = SubmissionClient(address=setup_cluster_with_token_auth["dashboard_url"])
# Make a request - should fail with AuthenticationError
with pytest.raises(AuthenticationError) as exc_info:
client.get_version()
error_str = str(exc_info.value)
# Check that the error contains the simple message and auto-added docs link
assert TOKEN_INVALID_ERROR_MESSAGE in error_str
def test_submission_client_with_valid_token_succeeds(setup_cluster_with_token_auth):
"""Test that requests with valid token succeed."""
client = SubmissionClient(address=setup_cluster_with_token_auth["dashboard_url"])
# Make a request - should succeed
version = client.get_version()
assert version is not None
def test_job_submission_client_inherits_auth(setup_cluster_with_token_auth):
"""Test that JobSubmissionClient inherits auth from SubmissionClient."""
client = JobSubmissionClient(address=setup_cluster_with_token_auth["dashboard_url"])
# Verify authorization header was added (lowercase as per implementation)
assert "authorization" in client._headers
assert client._headers["authorization"].startswith("Bearer ")
# Verify client can make authenticated requests
version = client.get_version()
assert version is not None
def test_state_api_client_inherits_auth(setup_cluster_with_token_auth):
"""Test that StateApiClient inherits auth from SubmissionClient."""
client = StateApiClient(address=setup_cluster_with_token_auth["dashboard_url"])
# Verify authorization header was added (lowercase as per implementation)
assert "authorization" in client._headers
assert client._headers["authorization"].startswith("Bearer ")
def test_user_provided_header_not_overridden(setup_cluster_with_token_auth):
"""Test that user-provided Authorization header is not overridden."""
custom_auth = "Bearer custom_token"
client = SubmissionClient(
address=setup_cluster_with_token_auth["dashboard_url"],
headers={"Authorization": custom_auth},
)
# Verify custom value is preserved
assert client._headers["Authorization"] == custom_auth
def test_user_provided_header_case_insensitive(setup_cluster_with_token_auth):
"""Test that user-provided Authorization header is preserved regardless of case."""
custom_auth = "Bearer custom_token"
# Test with lowercase "authorization"
client_lowercase = SubmissionClient(
address=setup_cluster_with_token_auth["dashboard_url"],
headers={"authorization": custom_auth},
)
# Verify custom value is preserved and no duplicate header added
assert client_lowercase._headers["authorization"] == custom_auth
assert "Authorization" not in client_lowercase._headers
# Test with mixed case "AuThOrIzAtIoN"
client_mixedcase = SubmissionClient(
address=setup_cluster_with_token_auth["dashboard_url"],
headers={"AuThOrIzAtIoN": custom_auth},
)
# Verify custom value is preserved and no duplicate header added
assert client_mixedcase._headers["AuThOrIzAtIoN"] == custom_auth
assert "Authorization" not in client_mixedcase._headers
assert "authorization" not in client_mixedcase._headers
def test_error_messages_contain_instructions(setup_cluster_with_token_auth):
"""Test that all auth error messages contain setup instructions."""
# Test 401 error (missing token)
clear_auth_token_sources(remove_default=True)
set_auth_mode("disabled")
reset_auth_token_state()
client = SubmissionClient(address=setup_cluster_with_token_auth["dashboard_url"])
with pytest.raises(AuthenticationError) as exc_info:
client.get_version()
error_str = str(exc_info.value)
assert TOKEN_AUTH_ENABLED_BUT_NO_TOKEN_FOUND_ERROR_MESSAGE in error_str
# Test 403 error (invalid token)
set_env_auth_token("wrong_token_00000000000000000000000000000000")
set_auth_mode("token")
reset_auth_token_state()
client2 = SubmissionClient(address=setup_cluster_with_token_auth["dashboard_url"])
with pytest.raises(AuthenticationError) as exc_info:
client2.get_version()
error_str = str(exc_info.value)
assert TOKEN_INVALID_ERROR_MESSAGE in error_str
def test_no_token_added_when_auth_disabled(setup_cluster_without_token_auth):
"""Test that no authorization header is injected when auth is disabled."""
client = SubmissionClient(address=setup_cluster_without_token_auth["dashboard_url"])
# Check both lowercase and uppercase variants
assert "authorization" not in client._headers
assert "Authorization" not in client._headers
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-vv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_submission_client_auth.py",
"license": "Apache License 2.0",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/logical/rules/predicate_pushdown.py | import copy
from typing import List
from ray.data._internal.logical.interfaces import (
LogicalOperator,
LogicalOperatorSupportsPredicatePassThrough,
LogicalOperatorSupportsPredicatePushdown,
LogicalPlan,
PredicatePassThroughBehavior,
Rule,
)
from ray.data._internal.logical.operators import Filter, Project
from ray.data._internal.planner.plan_expression.expression_visitors import (
_ColumnSubstitutionVisitor,
)
from ray.data.expressions import Expr, col
__all__ = [
"PredicatePushdown",
]
class PredicatePushdown(Rule):
"""Pushes down predicates across the graph.
This rule performs the following optimizations:
1. Combines chained Filter operators with compatible expressions
2. Pushes filter expressions through eligible operators using trait-based rules
3. Pushes filters into data sources that support predicate pushdown
Eligibility is determined by the LogicalOperatorSupportsPredicatePassThrough trait, which operators
implement to declare their pushdown behavior:
- PASSTHROUGH: Filter passes through unchanged (Sort, Repartition, Shuffle, Limit)
- PASSTHROUGH_WITH_SUBSTITUTION: Filter passes through with column rebinding (Project)
- PUSH_INTO_BRANCHES: Filter is pushed into each branch (Union)
- CONDITIONAL: Filter may be pushed based on analysis (Join - analyzes which side
the predicate references and pushes to that side if safe for the join type)
"""
def apply(self, plan: LogicalPlan) -> LogicalPlan:
"""Apply predicate pushdown optimization to the logical plan."""
dag = plan.dag
new_dag = dag._apply_transform(self._try_fuse_filters)
new_dag = new_dag._apply_transform(self._try_push_down_predicate)
return LogicalPlan(new_dag, plan.context) if dag is not new_dag else plan
@classmethod
def _is_valid_filter_operator(cls, op: LogicalOperator) -> bool:
return isinstance(op, Filter) and op.is_expression_based()
@classmethod
def _try_fuse_filters(cls, op: LogicalOperator) -> LogicalOperator:
"""Fuse consecutive Filter operators with compatible expressions."""
if not cls._is_valid_filter_operator(op):
return op
input_op = op.input_dependencies[0]
if not cls._is_valid_filter_operator(input_op):
return op
# Combine predicates
combined_predicate = op.predicate_expr & input_op.predicate_expr
# Create new filter on the input of the lower filter
return Filter(
input_op.input_dependencies[0],
predicate_expr=combined_predicate,
)
@classmethod
def _can_push_filter_through_projection(
cls, filter_op: "Filter", projection_op: Project
) -> bool:
"""Check if a filter can be pushed through a projection operator.
Returns False (blocks pushdown) if filter references:
- Columns removed by select: select(['a']).filter(col('b'))
- Computed columns: with_column('d', 4).filter(col('d'))
- Old column names after rename: rename({'b': 'B'}).filter(col('b'))
Returns True (allows pushdown) for:
- Columns present in output: select(['a', 'b']).filter(col('a'))
- New column names after rename: rename({'b': 'B'}).filter(col('B'))
- Rename chains with name reuse: rename({'a': 'b', 'b': 'c'}).filter(col('b'))
(where 'b' is valid output created by a->b)
"""
from ray.data._internal.logical.rules.projection_pushdown import (
_is_renaming_expr,
)
from ray.data._internal.planner.plan_expression.expression_visitors import (
_ColumnReferenceCollector,
)
from ray.data.expressions import AliasExpr
collector = _ColumnReferenceCollector()
collector.visit(filter_op.predicate_expr)
predicate_columns = set(collector.get_column_refs() or [])
output_columns = set()
new_names = set()
original_columns_being_renamed = set()
for expr in projection_op.exprs:
if expr.name is not None:
# Collect output column names
output_columns.add(expr.name)
# Process AliasExpr (computed columns or renames)
if isinstance(expr, AliasExpr):
new_names.add(expr.name)
# Check computed column: with_column('d', 4) creates AliasExpr(lit(4), 'd')
if expr.name in predicate_columns and not _is_renaming_expr(expr):
return False # Computed column
# Track old names being renamed for later check
if _is_renaming_expr(expr):
original_columns_being_renamed.add(expr.expr.name)
# Check if filter references columns removed by explicit select
# Valid if: projection includes all columns (star) OR predicate columns exist in output
has_required_columns = (
projection_op.has_star_expr() or predicate_columns.issubset(output_columns)
)
if not has_required_columns:
return False
# Find old names that are:
# 1. Being renamed away (in original_columns_being_renamed), AND
# 2. Referenced in predicate (in predicate_columns), AND
# 3. NOT recreated as new names (not in new_names)
#
# Examples:
# rename({'b': 'B'}).filter(col('b'))
# → {'b'} & {'b'} - {'B'} = {'b'} → BLOCKS (old name 'b' no longer exists)
#
# rename({'a': 'b', 'b': 'c'}).filter(col('b'))
# → {'a','b'} & {'b'} - {'b','c'} = {} → ALLOWS (new 'b' created by a->b)
#
# rename({'b': 'B'}).filter(col('B'))
# → {'b'} & {'B'} - {'B'} = {} → ALLOWS (using new name 'B')
invalid_old_names = (
original_columns_being_renamed & predicate_columns
) - new_names
if invalid_old_names:
return False # Old name after rename
return True
@classmethod
def _substitute_predicate_columns(
cls, predicate_expr: Expr, column_rename_map: dict[str, str]
) -> Expr:
"""Rebind column references in a predicate expression.
When pushing a predicate through a projection with column renames,
we need to rewrite column references from new names to old names.
Args:
predicate_expr: The predicate with new column names
column_rename_map: Mapping from old_name -> new_name
Returns:
The predicate rewritten to use old column names
"""
# Invert the mapping: new_name -> old_name (as col expression)
# This is because the predicate uses new names and we need to map
# them back to old names
column_mapping = {
new_col: col(old_col) for old_col, new_col in column_rename_map.items()
}
visitor = _ColumnSubstitutionVisitor(column_mapping)
return visitor.visit(predicate_expr)
@classmethod
def _try_push_down_predicate(cls, op: LogicalOperator) -> LogicalOperator:
"""Push Filter down through the operator tree."""
if not cls._is_valid_filter_operator(op):
return op
filter_op: Filter = op
input_op = filter_op.input_dependencies[0]
predicate_expr = filter_op.predicate_expr
# Case 1: Check if operator supports predicate pushdown (e.g., Read)
if (
isinstance(input_op, LogicalOperatorSupportsPredicatePushdown)
and input_op.supports_predicate_pushdown()
):
# Check if the operator has column renames that need rebinding
# This happens when projection pushdown has been applied
rename_map = input_op.get_column_renames()
if rename_map:
# Substitute the predicate to use original column names
# This is needed to ensure that the predicate expression can be pushed into the input operator.
predicate_expr = cls._substitute_predicate_columns(
predicate_expr, rename_map
)
# Push the predicate down
result_op = input_op.apply_predicate(predicate_expr)
# If the operator is unchanged (e.g., predicate references partition columns
# that can't be pushed down), keep the Filter operator
if result_op is input_op:
return filter_op
# Otherwise, return the result without the filter (predicate was pushed down)
return result_op
# Case 2: Check if operator allows predicates to pass through
if isinstance(input_op, LogicalOperatorSupportsPredicatePassThrough):
behavior = input_op.predicate_passthrough_behavior()
if behavior in (
PredicatePassThroughBehavior.PASSTHROUGH,
PredicatePassThroughBehavior.PASSTHROUGH_WITH_SUBSTITUTION,
):
# Both cases push through a single input with optional column rebinding
assert len(input_op.input_dependencies) == 1, (
f"{behavior.value} operators must have exactly 1 input, "
f"got {len(input_op.input_dependencies)}"
)
# Apply column substitution if needed
if (
behavior
== PredicatePassThroughBehavior.PASSTHROUGH_WITH_SUBSTITUTION
):
# Check if we can safely push the filter through this projection
if isinstance(
input_op, Project
) and not cls._can_push_filter_through_projection(
filter_op, input_op
):
return filter_op
rename_map = input_op.get_column_substitutions()
if rename_map:
predicate_expr = cls._substitute_predicate_columns(
predicate_expr, rename_map
)
# Push filter through and recursively try to push further
new_filter = Filter(
input_op.input_dependencies[0],
predicate_expr=predicate_expr,
)
pushed_filter = cls._try_push_down_predicate(new_filter)
# Return input_op with the pushed filter as its input
return cls._clone_op_with_new_inputs(input_op, [pushed_filter])
elif behavior == PredicatePassThroughBehavior.PUSH_INTO_BRANCHES:
# Push into each branch (e.g., Union)
# Apply filter to each branch and recursively push down
new_inputs = []
for branch_op in input_op.input_dependencies:
branch_filter = Filter(branch_op, predicate_expr=predicate_expr)
pushed_branch = cls._try_push_down_predicate(branch_filter)
new_inputs.append(pushed_branch)
# Return operator with filtered branches
return cls._clone_op_with_new_inputs(input_op, new_inputs)
elif behavior == PredicatePassThroughBehavior.CONDITIONAL:
# Handle conditional pushdown (e.g., Join)
return cls._push_filter_through_conditionally(filter_op, input_op)
return filter_op
@classmethod
def _push_filter_through_conditionally(
cls, filter_op: Filter, conditional_op: LogicalOperator
) -> LogicalOperator:
"""Handle conditional pushdown for operators like Join.
For operators with multiple inputs, we can push predicates that reference
only one side down to that side, when semantically safe.
"""
# Check if operator supports conditional pushdown by having the required method
if not hasattr(conditional_op, "which_side_to_push_predicate"):
return filter_op
push_side = conditional_op.which_side_to_push_predicate(
filter_op.predicate_expr
)
if push_side is None:
# Cannot push through
return filter_op
# Use the enum value directly as branch index
branch_idx = push_side.value
# Push to the appropriate branch
new_inputs = list(conditional_op.input_dependencies)
branch_filter = Filter(
new_inputs[branch_idx],
predicate_expr=filter_op.predicate_expr,
)
new_inputs[branch_idx] = cls._try_push_down_predicate(branch_filter)
# Return operator with updated input
return cls._clone_op_with_new_inputs(conditional_op, new_inputs)
@classmethod
def _clone_op_with_new_inputs(
cls, op: LogicalOperator, new_inputs: List[LogicalOperator]
) -> LogicalOperator:
"""Clone an operator with new inputs.
Args:
op: The operator to clone
new_inputs: List of new input operators (can be single element list)
Returns:
A shallow copy of the operator with updated input dependencies
"""
new_op = copy.copy(op)
new_op.input_dependencies = new_inputs
return new_op
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/logical/rules/predicate_pushdown.py",
"license": "Apache License 2.0",
"lines": 268,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/tests/test_predicate_pushdown.py | import re
from typing import Any, List
import pandas as pd
import pyarrow.compute as pc
import pytest
import ray
from ray.data import Dataset
from ray.data._internal.logical.operators import (
Filter,
Limit,
Project,
Repartition,
Sort,
)
from ray.data._internal.logical.optimizers import LogicalOptimizer
from ray.data._internal.util import rows_same
from ray.data.expressions import col
from ray.data.tests.conftest import * # noqa
from ray.data.tests.test_execution_optimizer_limit_pushdown import (
_check_valid_plan_and_result,
)
from ray.data.tests.test_util import (
get_operator_types,
get_operators_of_type,
plan_has_operator,
plan_operator_comes_before,
)
from ray.tests.conftest import * # noqa
# Pattern to match read operators in logical plans.
# Matches Read[Read<Format>] where format is Parquet, CSV, Range, etc.
READ_OPERATOR_PATTERN = (
r"^(Read\[Read\w+\]|ListFiles\[ListFiles\] -> ReadFiles\[ReadFiles\])"
)
def _check_plan_with_flexible_read(
ds: Dataset, expected_plan_suffix: str, expected_result: List[Any]
):
"""Check the logical plan with flexible read operator matching.
This function allows flexibility in the read operator part of the plan
by using a configurable pattern (READ_OPERATOR_PATTERN).
Args:
ds: The dataset to check.
expected_plan_suffix: The expected plan after the read operator(s).
If empty string, only the read operator is expected.
expected_result: The expected result data.
"""
# Optimize the logical plan before checking
logical_plan = ds._plan._logical_plan
optimized_plan = LogicalOptimizer().optimize(logical_plan)
actual_plan = optimized_plan.dag.dag_str
match = re.match(READ_OPERATOR_PATTERN, actual_plan)
assert match, f"Expected plan to start with read operator, got: {actual_plan}"
# Check if there's a suffix expected
if expected_plan_suffix:
# The suffix should appear after the read operator
expected_full_pattern = (
f"{READ_OPERATOR_PATTERN} -> {re.escape(expected_plan_suffix)}"
)
assert re.match(expected_full_pattern, actual_plan), (
f"Expected plan to match pattern with suffix '{expected_plan_suffix}', "
f"got: {actual_plan}"
)
# If no suffix, the plan should be just the read operator
else:
assert actual_plan == match.group(
1
), f"Expected plan to be just the read operator, got: {actual_plan}"
# Check the result
assert ds.take_all() == expected_result
@pytest.fixture
def parquet_ds(ray_start_regular_shared):
"""Fixture to load the Parquet dataset for testing."""
ds = ray.data.read_parquet("example://iris.parquet")
assert ds.count() == 150
return ds
@pytest.fixture
def csv_ds(ray_start_regular_shared):
"""Fixture to load the CSV dataset for testing."""
ds = ray.data.read_csv("example://iris.csv")
assert ds.count() == 150
return ds
def test_filter_with_udfs(parquet_ds):
"""Test filtering with UDFs where predicate pushdown does not occur."""
filtered_udf_ds = parquet_ds.filter(lambda r: r["sepal.length"] > 5.0)
filtered_udf_data = filtered_udf_ds.take_all()
assert filtered_udf_ds.count() == 118
assert all(record["sepal.length"] > 5.0 for record in filtered_udf_data)
_check_plan_with_flexible_read(
filtered_udf_ds,
"Filter[Filter(<lambda>)]", # UDF filter doesn't push down
filtered_udf_data,
)
def test_filter_with_expressions(parquet_ds):
"""Test filtering with expressions where predicate pushdown occurs."""
filtered_udf_data = parquet_ds.filter(lambda r: r["sepal.length"] > 5.0).take_all()
filtered_expr_ds = parquet_ds.filter(expr="sepal.length > 5.0")
_check_plan_with_flexible_read(
filtered_expr_ds,
"", # Pushed down to read, no additional operators
filtered_udf_data,
)
def test_filter_pushdown_source_and_op(ray_start_regular_shared):
"""Test filtering when expressions are provided both in source and operator."""
# Test with PyArrow compute expressions
source_expr = pc.greater(pc.field("sepal.length"), pc.scalar(5.0))
filter_expr = "sepal.width > 3.0"
ds = ray.data.read_parquet("example://iris.parquet", filter=source_expr).filter(
expr=filter_expr
)
result = ds.take_all()
assert all(r["sepal.length"] > 5.0 and r["sepal.width"] > 3.0 for r in result)
_check_plan_with_flexible_read(
ds,
"", # Both filters pushed down to read
result,
)
def test_chained_filter_with_expressions(parquet_ds):
"""Test chained filtering with expressions where combined pushdown occurs."""
filtered_expr_chained_ds = (
parquet_ds.filter(expr=col("sepal.length") > 1.0)
.filter(expr=col("sepal.length") > 2.0)
.filter(expr=col("sepal.length") > 3.0)
.filter(expr=col("sepal.length") > 3.0)
.filter(expr=col("sepal.length") > 5.0)
)
filtered_udf_data = parquet_ds.filter(lambda r: r["sepal.length"] > 5.0).take_all()
_check_plan_with_flexible_read(
filtered_expr_chained_ds,
"", # All filters combined and pushed down to read
filtered_udf_data,
)
@pytest.mark.parametrize(
"filter_fn,expected_suffix",
[
(
lambda ds: ds.filter(lambda r: r["sepal.length"] > 5.0),
"Filter[Filter(<lambda>)]", # UDF filter doesn't push down
),
(
lambda ds: ds.filter(expr=col("sepal.length") > 5.0),
"Filter[Filter(col('sepal.length') > 5.0)]", # CSV doesn't support predicate pushdown
),
],
)
def test_filter_pushdown_csv(csv_ds, filter_fn, expected_suffix):
"""Test filtering on CSV files (CSV doesn't support predicate pushdown)."""
filtered_ds = filter_fn(csv_ds)
filtered_data = filtered_ds.take_all()
assert filtered_ds.count() == 118
assert all(record["sepal.length"] > 5.0 for record in filtered_data)
_check_plan_with_flexible_read(
filtered_ds,
expected_suffix,
filtered_data,
)
def test_filter_mixed(csv_ds):
"""Test that mixed function and expressions work (CSV doesn't support predicate pushdown)."""
csv_ds = csv_ds.filter(lambda r: r["sepal.length"] < 5.0)
csv_ds = csv_ds.filter(expr="sepal.length > 3.0")
csv_ds = csv_ds.filter(expr="sepal.length > 4.0")
csv_ds = csv_ds.map(lambda x: x)
csv_ds = csv_ds.filter(expr="sepal.length > 2.0")
csv_ds = csv_ds.filter(expr="sepal.length > 1.0")
filtered_expr_data = csv_ds.take_all()
assert csv_ds.count() == 22
assert all(record["sepal.length"] < 5.0 for record in filtered_expr_data)
assert all(record["sepal.length"] > 4.0 for record in filtered_expr_data)
# After optimization: expression filters before map get fused, expression filters after map get fused
# CSV doesn't support predicate pushdown, so filters stay after Read
_check_plan_with_flexible_read(
csv_ds,
"Filter[Filter(<lambda>)] -> Filter[Filter((col('sepal.length') > 4.0) & (col('sepal.length') > 3.0))] -> "
"MapRows[Map(<lambda>)] -> Filter[Filter((col('sepal.length') > 1.0) & (col('sepal.length') > 2.0))]",
filtered_expr_data,
)
def test_filter_mixed_expression_first_parquet(ray_start_regular_shared):
"""Test that mixed functional and expressions work with Parquet (supports predicate pushdown)."""
ds = ray.data.read_parquet("example://iris.parquet")
ds = ds.filter(expr="sepal.length > 3.0")
ds = ds.filter(expr="sepal.length > 4.0")
ds = ds.filter(lambda r: r["sepal.length"] < 5.0)
filtered_expr_data = ds.take_all()
assert ds.count() == 22
assert all(record["sepal.length"] < 5.0 for record in filtered_expr_data)
assert all(record["sepal.length"] > 4.0 for record in filtered_expr_data)
_check_plan_with_flexible_read(
ds,
"Filter[Filter(<lambda>)]", # Expressions pushed down, UDF remains
filtered_expr_data,
)
def test_filter_mixed_expression_first_csv(ray_start_regular_shared):
"""Test that mixed functional and expressions work with CSV (doesn't support predicate pushdown)."""
ds = ray.data.read_csv("example://iris.csv")
ds = ds.filter(expr="sepal.length > 3.0")
ds = ds.filter(expr="sepal.length > 4.0")
ds = ds.filter(lambda r: r["sepal.length"] < 5.0)
filtered_expr_data = ds.take_all()
assert ds.count() == 22
assert all(record["sepal.length"] < 5.0 for record in filtered_expr_data)
assert all(record["sepal.length"] > 4.0 for record in filtered_expr_data)
# CSV doesn't support predicate pushdown, so expression filters get fused but not pushed down
_check_plan_with_flexible_read(
ds,
"Filter[Filter((col('sepal.length') > 4.0) & (col('sepal.length') > 3.0))] -> Filter[Filter(<lambda>)]",
filtered_expr_data,
)
def test_filter_mixed_expression_not_readfiles(ray_start_regular_shared):
"""Test that mixed functional and expressions work."""
ds = ray.data.range(100).filter(expr="id > 1.0")
ds = ds.filter(expr="id > 2.0")
ds = ds.filter(lambda r: r["id"] < 5.0)
filtered_expr_data = ds.take_all()
assert ds.count() == 2
assert all(record["id"] < 5.0 for record in filtered_expr_data)
assert all(record["id"] > 2.0 for record in filtered_expr_data)
_check_valid_plan_and_result(
ds,
"Read[ReadRange] -> Filter[Filter((col('id') > 2.0) & (col('id') > 1.0))] -> "
"Filter[Filter(<lambda>)]",
filtered_expr_data,
)
@pytest.mark.parametrize(
"operations,output_rename_map,expected_filter_expr,test_id",
[
(
# rename("sepal.length" -> a).filter(a)
lambda ds: ds.rename_columns({"sepal.length": "a"}).filter(
expr=col("a") > 2.0
),
{"a": "sepal.length"},
col("sepal.length") > 2.0,
"rename_filter",
),
(
# rename("sepal.length" -> a).filter(a).rename(a -> b)
lambda ds: ds.rename_columns({"sepal.length": "a"})
.filter(expr=col("a") > 2.0)
.rename_columns({"a": "b"}),
{"b": "sepal.length"},
col("sepal.length") > 2.0,
"rename_filter_rename",
),
(
# rename("sepal.length" -> a).filter(a).rename(a -> b).filter(b)
lambda ds: ds.rename_columns({"sepal.length": "a"})
.filter(expr=col("a") > 2.0)
.rename_columns({"a": "b"})
.filter(expr=col("b") < 5.0),
{"b": "sepal.length"},
(col("sepal.length") > 2.0) & (col("sepal.length") < 5.0),
"rename_filter_rename_filter",
),
(
# rename("sepal.length" -> a).filter(a).rename(a -> b).filter(b).rename("sepal.width" -> a)
# Here column a is referred multiple times in rename
lambda ds: ds.rename_columns({"sepal.length": "a"})
.filter(expr=col("a") > 2.0)
.rename_columns({"a": "b"})
.filter(expr=col("b") < 5.0)
.rename_columns({"sepal.width": "a"}),
{"b": "sepal.length", "a": "sepal.width"},
(col("sepal.length") > 2.0) & (col("sepal.length") < 5.0),
"rename_filter_rename_filter_rename",
),
],
ids=lambda x: x if isinstance(x, str) else "",
)
def test_pushdown_with_rename_and_filter(
ray_start_regular_shared,
operations,
output_rename_map,
expected_filter_expr,
test_id,
):
"""Test predicate pushdown with various combinations of rename and filter operations."""
path = "example://iris.parquet"
ds = operations(ray.data.read_parquet(path))
result = ds.take_all()
# Check that plan is just the read (filters and renames pushed down/fused)
_check_plan_with_flexible_read(ds, "", result)
ds1 = ray.data.read_parquet(path).filter(expr=expected_filter_expr)
# Convert to pandas to ensure both datasets are fully executed
df = ds.to_pandas().rename(columns=output_rename_map)
df1 = ds1.to_pandas()
assert len(df) == len(df1), f"Expected {len(df)} rows, got {len(df1)} rows"
pd.testing.assert_frame_equal(df, df1)
def _get_optimized_plan(ds: Dataset) -> str:
"""Get the optimized logical plan as a string."""
logical_plan = ds._plan._logical_plan
optimized_plan = LogicalOptimizer().optimize(logical_plan)
return optimized_plan.dag.dag_str
def _check_plan_matches_pattern(ds: Dataset, expected_pattern: str):
"""Check that the optimized plan matches the expected regex pattern."""
actual_plan = _get_optimized_plan(ds)
assert re.match(expected_pattern, actual_plan), (
f"Plan mismatch:\n"
f"Expected pattern: {expected_pattern}\n"
f"Actual plan: {actual_plan}"
)
class TestPredicatePushdownIntoRead:
"""Tests for pushing predicates into Read operators.
When a data source supports predicate pushdown (like Parquet),
the filter should be absorbed into the Read operator itself.
"""
@pytest.fixture
def parquet_ds(self, ray_start_regular_shared):
return ray.data.read_parquet("example://iris.parquet")
def test_complex_pipeline_all_filters_push_to_read(self, parquet_ds):
"""Complex pipeline: filters should push through all operators into Read.
Pipeline: Read -> Filter -> Rename -> Filter -> Sort -> Repartition
-> Filter -> Limit -> Filter
All filters should fuse, push through all operators, rebind through rename,
and be absorbed into the Read operator.
"""
ds = (
parquet_ds.filter(expr=col("sepal.length") > 4.0)
.rename_columns({"sepal.length": "len", "sepal.width": "width"})
.filter(expr=col("len") < 7.0)
.sort("len")
.repartition(3)
.filter(expr=col("width") > 2.5)
.limit(100)
.filter(expr=col("len") > 4.5)
)
# Verify correctness: should apply all filters correctly
expected = (
parquet_ds.filter(
expr=(col("sepal.length") > 4.0)
& (col("sepal.length") < 7.0)
& (col("sepal.width") > 2.5)
& (col("sepal.length") > 4.5)
)
.rename_columns({"sepal.length": "len", "sepal.width": "width"})
.sort("len")
.repartition(3)
.limit(100)
)
assert rows_same(ds.to_pandas(), expected.to_pandas())
# Verify plan: all filters pushed into Read, passthrough ops remain
optimized_plan = LogicalOptimizer().optimize(ds._plan._logical_plan)
assert not plan_has_operator(
optimized_plan, Filter
), "No Filter operators should remain after pushdown into Read"
class TestPassthroughBehavior:
"""Tests for PASSTHROUGH behavior operators.
Operators: Sort, Repartition, RandomShuffle, Limit
Predicates pass through unchanged - operators don't affect filtering.
"""
@pytest.fixture
def base_ds(self, ray_start_regular_shared):
return ray.data.range(100)
@pytest.mark.parametrize(
"transform,expected_op_type",
[
(lambda ds: ds.sort("id"), "Sort"),
(lambda ds: ds.repartition(10), "Repartition"),
(lambda ds: ds.random_shuffle(), "RandomShuffle"),
(lambda ds: ds.limit(50), "Limit"),
],
ids=["sort", "repartition", "random_shuffle", "limit"],
)
def test_filter_pushes_through_operator(self, base_ds, transform, expected_op_type):
"""Filter should push through passthrough operators."""
ds = transform(base_ds).filter(expr=col("id") < 10)
# Verify correctness against expected result
expected = base_ds.filter(expr=col("id") < 10)
assert rows_same(ds.to_pandas(), expected.to_pandas())
# Filter pushed down, operator remains
optimized_plan = LogicalOptimizer().optimize(ds._plan._logical_plan)
assert plan_has_operator(
optimized_plan, Filter
), "Filter should exist after pushdown"
# Verify the passthrough operator is still present
op_types = get_operator_types(optimized_plan)
assert expected_op_type in op_types, f"{expected_op_type} should remain in plan"
def test_filter_pushes_through_multiple_ops(self, base_ds):
"""Filter should push through multiple passthrough operators."""
ds = base_ds.sort("id").repartition(5).limit(50).filter(expr=col("id") < 10)
# Verify correctness against expected result
expected = base_ds.filter(expr=col("id") < 10)
assert rows_same(ds.to_pandas(), expected.to_pandas())
# Verify plan: filter pushed down, all operators remain
optimized_plan = LogicalOptimizer().optimize(ds._plan._logical_plan)
assert plan_has_operator(optimized_plan, Filter), "Filter should exist"
assert plan_has_operator(optimized_plan, Sort), "Sort should remain"
assert plan_has_operator(
optimized_plan, Repartition
), "Repartition should remain"
assert plan_has_operator(optimized_plan, Limit), "Limit should remain"
def test_multiple_filters_fuse_and_push_through(self, base_ds):
"""Multiple filters should fuse and push through passthrough operators."""
ds = base_ds.filter(expr=col("id") > 5).sort("id").filter(expr=col("id") < 20)
# Verify correctness against expected result
expected = base_ds.filter(expr=(col("id") > 5) & (col("id") < 20))
assert rows_same(ds.to_pandas(), expected.to_pandas())
# Verify plan: filters fused and pushed, Sort remains
optimized_plan = LogicalOptimizer().optimize(ds._plan._logical_plan)
filters = get_operators_of_type(optimized_plan, Filter)
assert len(filters) == 1, "Multiple filters should be fused into one"
assert plan_has_operator(optimized_plan, Sort), "Sort should remain"
assert plan_operator_comes_before(
optimized_plan, Filter, Sort
), "Fused filter should come before Sort"
class TestPassthroughWithSubstitutionBehavior:
"""Tests for PASSTHROUGH_WITH_SUBSTITUTION behavior operators.
Operator: Project (used by rename_columns, select, with_column)
Predicates push through but column names must be rebound.
"""
@pytest.fixture
def parquet_ds(self, ray_start_regular_shared):
return ray.data.read_parquet("example://iris.parquet")
def test_simple_rename_with_filter(self, parquet_ds):
"""Filter after rename should rebind columns and push down."""
ds = parquet_ds.rename_columns({"sepal.length": "len"}).filter(
expr=col("len") > 5.0
)
# Verify correctness against expected result
expected = parquet_ds.filter(expr=col("sepal.length") > 5.0).rename_columns(
{"sepal.length": "len"}
)
assert rows_same(ds.to_pandas(), expected.to_pandas())
# Filter rebound and pushed to Read (no Filter operators should remain)
optimized_plan = LogicalOptimizer().optimize(ds._plan._logical_plan)
assert not plan_has_operator(
optimized_plan, Filter
), "Filter should be pushed into Read, no Filter operators should remain"
def test_chained_renames_with_filter(self, parquet_ds):
"""Multiple renames should track through filter pushdown."""
ds = (
parquet_ds.rename_columns({"sepal.length": "a"})
.rename_columns({"a": "b"})
.filter(expr=col("b") > 5.0)
)
# Verify correctness against expected result
expected = (
parquet_ds.filter(expr=col("sepal.length") > 5.0)
.rename_columns({"sepal.length": "a"})
.rename_columns({"a": "b"})
)
assert rows_same(ds.to_pandas(), expected.to_pandas())
# Filter should be pushed into Read after column rebinding
optimized_plan = LogicalOptimizer().optimize(ds._plan._logical_plan)
assert not plan_has_operator(
optimized_plan, Filter
), "Filter should be pushed into Read after rebinding through renames"
def test_multiple_filters_with_renames(self, parquet_ds):
"""Multiple filters with renames should all rebind and push."""
ds = (
parquet_ds.rename_columns({"sepal.length": "a"})
.filter(expr=col("a") > 2.0)
.rename_columns({"a": "b"})
.filter(expr=col("b") < 5.0)
)
# Verify correctness against expected result
expected = (
parquet_ds.filter(
expr=(col("sepal.length") > 2.0) & (col("sepal.length") < 5.0)
)
.rename_columns({"sepal.length": "a"})
.rename_columns({"a": "b"})
)
assert rows_same(ds.to_pandas(), expected.to_pandas())
# Multiple filters should be fused, rebound, and pushed into Read
optimized_plan = LogicalOptimizer().optimize(ds._plan._logical_plan)
assert not plan_has_operator(
optimized_plan, Filter
), "All filters should be fused, rebound, and pushed into Read"
class TestProjectionWithFilterEdgeCases:
"""Tests for edge cases with select_columns and with_column followed by filters.
These tests verify that filters correctly handle:
- Columns that are kept by select (should push through)
- Columns that are removed by select (should NOT push through)
- Computed columns from with_column (should NOT push through)
"""
@pytest.fixture
def base_ds(self, ray_start_regular_shared):
return ray.data.from_items(
[
{"a": 1, "b": 2, "c": 3},
{"a": 2, "b": 5, "c": 8},
{"a": 3, "b": 6, "c": 9},
]
)
def test_select_then_filter_on_selected_column(self, base_ds):
"""Filter on selected column should push through select."""
ds = base_ds.select_columns(["a", "b"]).filter(expr=col("a") > 1)
# Verify correctness
result_df = ds.to_pandas()
expected_df = pd.DataFrame(
[
{"a": 2, "b": 5},
{"a": 3, "b": 6},
]
)
# Sort columns before comparison
result_df = result_df[sorted(result_df.columns)]
expected_df = expected_df[sorted(expected_df.columns)]
assert rows_same(result_df, expected_df)
# Verify plan: filter pushed through select
optimized_plan = LogicalOptimizer().optimize(ds._plan._logical_plan)
assert plan_operator_comes_before(
optimized_plan, Filter, Project
), "Filter should be pushed before Project"
def test_select_then_filter_on_removed_column(self, base_ds):
"""Filter on removed column should fail, not push through."""
ds = base_ds.select_columns(["a"])
with pytest.raises((KeyError, ray.exceptions.RayTaskError)):
ds.filter(expr=col("b") == 2).take_all()
def test_with_column_then_filter_on_computed_column(self, base_ds):
"""Filter on computed column should not push through."""
from ray.data.expressions import lit
ds = base_ds.with_column("d", lit(4)).filter(expr=col("d") == 4)
# Verify correctness - all rows should pass (d is always 4)
result_df = ds.to_pandas()
expected_df = pd.DataFrame(
[
{"a": 1, "b": 2, "c": 3, "d": 4},
{"a": 2, "b": 5, "c": 8, "d": 4},
{"a": 3, "b": 6, "c": 9, "d": 4},
]
)
# Sort columns before comparison
result_df = result_df[sorted(result_df.columns)]
expected_df = expected_df[sorted(expected_df.columns)]
assert rows_same(result_df, expected_df)
# Verify plan: filter should NOT push through (stays after with_column)
optimized_plan = LogicalOptimizer().optimize(ds._plan._logical_plan)
assert plan_has_operator(
optimized_plan, Filter
), "Filter should remain (not pushed through)"
def test_rename_then_filter_on_old_column_name(self, base_ds):
"""Filter using old column name after rename should fail."""
ds = base_ds.rename_columns({"b": "B"})
with pytest.raises((KeyError, ray.exceptions.RayTaskError)):
ds.filter(expr=col("b") == 2).take_all()
@pytest.mark.parametrize(
"ds_factory,rename_map,filter_col,filter_value,expected_rows",
[
# In-memory dataset: rename a->b, b->b_old
(
lambda: ray.data.from_items(
[
{"a": 1, "b": 2, "c": 3},
{"a": 2, "b": 5, "c": 8},
{"a": 3, "b": 6, "c": 9},
]
),
{"a": "b", "b": "b_old"},
"b",
1,
[{"b": 2, "b_old": 5, "c": 8}, {"b": 3, "b_old": 6, "c": 9}],
),
# Parquet dataset: rename sepal.length->sepal.width, sepal.width->old_width
(
lambda: ray.data.read_parquet("example://iris.parquet"),
{"sepal.length": "sepal.width", "sepal.width": "old_width"},
"sepal.width",
5.0,
None, # Will verify via alternative computation
),
],
ids=["in_memory", "parquet"],
)
def test_rename_chain_with_name_reuse(
self,
ray_start_regular_shared,
ds_factory,
rename_map,
filter_col,
filter_value,
expected_rows,
):
"""Test rename chains where an output name matches another rename's input name.
This tests the fix for a bug where rename(a->b, b->c) followed by filter(b>5)
would incorrectly block pushdown, even though 'b' is a valid output column
(created by a->b).
Example: rename({'a': 'b', 'b': 'temp'}) creates 'b' from 'a' and 'temp' from 'b'.
A filter on 'b' should be able to push through.
"""
ds = ds_factory()
# Apply rename and filter
ds_renamed_filtered = ds.rename_columns(rename_map).filter(
expr=col(filter_col) > filter_value
)
# Verify correctness
if expected_rows is not None:
# For in-memory, compare against expected rows
result_df = ds_renamed_filtered.to_pandas()
expected_df = pd.DataFrame(expected_rows)
result_df = result_df[sorted(result_df.columns)]
expected_df = expected_df[sorted(expected_df.columns)]
assert rows_same(result_df, expected_df)
else:
# For parquet, compare against alternative computation
# Filter on original column, then rename
original_col = next(k for k, v in rename_map.items() if v == filter_col)
expected = ds.filter(expr=col(original_col) > filter_value).rename_columns(
rename_map
)
assert rows_same(ds_renamed_filtered.to_pandas(), expected.to_pandas())
# Verify plan optimization
optimized_plan = LogicalOptimizer().optimize(
ds_renamed_filtered._plan._logical_plan
)
# Determine if the data source supports predicate pushdown by checking
# if the filter was completely eliminated (pushed into the read operator)
has_filter = plan_has_operator(optimized_plan, Filter)
has_project = plan_has_operator(optimized_plan, Project)
# For file-based reads that support predicate pushdown (e.g., parquet),
# the filter should be completely pushed into the read operator.
# We detect this by checking if the filter is gone after optimization.
if not has_filter and not has_project:
# Filter was pushed into Read - this is the optimal case
pass # Test passes
elif has_filter and has_project:
# For in-memory datasets, filter should at least push through projection
assert plan_operator_comes_before(
optimized_plan, Filter, Project
), "Filter should be pushed before Project after rebinding through rename chain"
else:
# Unexpected state - either filter or project but not both
raise AssertionError(
f"Unexpected optimization state: has_filter={has_filter}, has_project={has_project}"
)
class TestPushIntoBranchesBehavior:
"""Tests for PUSH_INTO_BRANCHES behavior operators.
Operator: Union
Predicates are duplicated and pushed into each branch.
"""
def test_simple_union_with_filter(self, ray_start_regular_shared):
"""Filter after union should push into both branches."""
ds1 = ray.data.range(100, parallelism=2)
ds2 = ray.data.range(100, parallelism=2)
ds = ds1.union(ds2).filter(expr=col("id") >= 50)
# Verify correctness: should have duplicates from both branches
base = ray.data.range(100)
expected = base.filter(expr=col("id") >= 50).union(
base.filter(expr=col("id") >= 50)
)
assert rows_same(ds.to_pandas(), expected.to_pandas())
def test_multiple_unions_with_filter(self, ray_start_regular_shared):
"""Filter should push into all branches of multiple unions."""
ds1 = ray.data.read_parquet("example://iris.parquet")
ds2 = ray.data.read_parquet("example://iris.parquet")
ds3 = ray.data.read_parquet("example://iris.parquet")
ds = ds1.union(ds2).union(ds3).filter(expr=col("sepal.length") > 5.0)
# Verify correctness: should have 3x the filtered results
single_filtered = ray.data.read_parquet("example://iris.parquet").filter(
expr=col("sepal.length") > 5.0
)
expected = single_filtered.union(single_filtered).union(single_filtered)
assert rows_same(ds.to_pandas(), expected.to_pandas())
def test_branch_filters_plus_union_filter(self, ray_start_regular_shared):
"""Individual branch filters plus union filter should all push."""
parquet_ds = ray.data.read_parquet("example://iris.parquet")
ds1 = parquet_ds.filter(expr=col("sepal.width") > 2.0)
ds2 = parquet_ds.filter(expr=col("sepal.width") > 2.0)
ds = ds1.union(ds2).filter(expr=col("sepal.length") < 5.0)
# Verify correctness: both filters applied
expected_single = parquet_ds.filter(
expr=(col("sepal.width") > 2.0) & (col("sepal.length") < 5.0)
)
expected = expected_single.union(expected_single)
assert rows_same(ds.to_pandas(), expected.to_pandas())
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_predicate_pushdown.py",
"license": "Apache License 2.0",
"lines": 666,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/llm_tests/batch/test_batch_single_node_vllm.py | #!/usr/bin/env python
"""
Single-node vLLM baseline benchmark for Ray Data LLM batch inference.
Measures throughput and supports env-driven thresholds and
JSON artifact output.
"""
import json
import os
import sys
import pytest
import ray
from ray.llm._internal.batch.benchmark.dataset import ShareGPTDataset
from ray.llm._internal.batch.benchmark.benchmark_processor import (
Mode,
VLLM_SAMPLING_PARAMS,
benchmark,
)
# Benchmark constants
NUM_REQUESTS = 1000
MODEL_ID = "facebook/opt-1.3b"
BATCH_SIZE = 64
CONCURRENCY = 1
@pytest.fixture(autouse=True)
def disable_vllm_compile_cache(monkeypatch):
"""Disable vLLM compile cache to avoid cache corruption."""
monkeypatch.setenv("VLLM_DISABLE_COMPILE_CACHE", "1")
@pytest.fixture(autouse=True)
def cleanup_ray_resources():
"""Cleanup Ray resources between tests."""
yield
ray.shutdown()
def _get_float_env(name: str, default: float | None = None) -> float | None:
value = os.getenv(name)
if value is None or value == "":
return default
try:
return float(value)
except ValueError:
raise AssertionError(f"Invalid float for {name}: {value}")
def test_single_node_baseline_benchmark():
"""
Single-node baseline benchmark: facebook/opt-1.3b, TP=1, PP=1, 1000 prompts.
Logs BENCHMARK_* metrics and optionally asserts perf thresholds from env:
- RAY_DATA_LLM_BENCHMARK_MIN_THROUGHPUT (req/s)
- RAY_DATA_LLM_BENCHMARK_MAX_LATENCY_S (seconds)
Writes JSON artifact to RAY_LLM_BENCHMARK_ARTIFACT_PATH if set.
"""
# Dataset setup
dataset_path = os.getenv(
"RAY_LLM_BENCHMARK_DATASET_PATH", "/tmp/ray_llm_benchmark_dataset"
)
dataset = ShareGPTDataset(
dataset_path=dataset_path,
seed=0,
hf_dataset_id="Crystalcareai/Code-feedback-sharegpt-renamed",
hf_split="train",
truncate_prompt=2048,
)
print(f"Loading {NUM_REQUESTS} prompts from ShareGPT dataset...")
prompts = dataset.sample(num_requests=NUM_REQUESTS)
print(f"Loaded {len(prompts)} prompts")
ds = ray.data.from_items(prompts)
# Benchmark config (single node, TP=1, PP=1)
print(
f"\nBenchmark: {MODEL_ID}, batch={BATCH_SIZE}, concurrency={CONCURRENCY}, TP=1, PP=1"
)
# Use benchmark processor to run a single-node vLLM benchmark
result = benchmark(
Mode.VLLM_ENGINE,
ds,
batch_size=BATCH_SIZE,
concurrency=CONCURRENCY,
model=MODEL_ID,
sampling_params=VLLM_SAMPLING_PARAMS,
pipeline_parallel_size=1,
tensor_parallel_size=1,
)
result.show()
# Assertions and metrics
assert result.samples == len(prompts)
assert result.throughput > 0
print("\n" + "=" * 60)
print("BENCHMARK METRICS")
print("=" * 60)
print(f"BENCHMARK_THROUGHPUT: {result.throughput:.4f} req/s")
print(f"BENCHMARK_LATENCY: {result.elapsed_s:.4f} s")
print(f"BENCHMARK_SAMPLES: {result.samples}")
print("=" * 60)
# Optional thresholds to fail on regressions
min_throughput = _get_float_env("RAY_DATA_LLM_BENCHMARK_MIN_THROUGHPUT", 5)
max_latency_s = _get_float_env("RAY_DATA_LLM_BENCHMARK_MAX_LATENCY_S", 150)
if min_throughput is not None:
assert (
result.throughput >= min_throughput
), f"Throughput regression: {result.throughput:.4f} < {min_throughput:.4f} req/s"
if max_latency_s is not None:
assert (
result.elapsed_s <= max_latency_s
), f"Latency regression: {result.elapsed_s:.4f} > {max_latency_s:.4f} s"
# Optional JSON artifact emission for downstream ingestion
artifact_path = os.getenv("RAY_LLM_BENCHMARK_ARTIFACT_PATH")
if artifact_path:
metrics = {
"model": MODEL_ID,
"batch_size": BATCH_SIZE,
"concurrency": CONCURRENCY,
"samples": int(result.samples),
"throughput_req_per_s": float(result.throughput),
"elapsed_s": float(result.elapsed_s),
}
try:
os.makedirs(os.path.dirname(artifact_path), exist_ok=True)
with open(artifact_path, "w", encoding="utf-8") as f:
json.dump(metrics, f, indent=2, sort_keys=True)
print(f"Wrote benchmark artifact to: {artifact_path}")
except Exception as e: # noqa: BLE001
print(
f"Warning: failed to write benchmark artifact to {artifact_path}: {e}"
)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "release/llm_tests/batch/test_batch_single_node_vllm.py",
"license": "Apache License 2.0",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/dashboard/tests/test_dashboard_auth.py | """Tests for dashboard token authentication."""
import sys
import pytest
import requests
def test_dashboard_request_requires_auth_with_valid_token(
setup_cluster_with_token_auth,
):
"""Test that requests succeed with valid token when auth is enabled."""
cluster_info = setup_cluster_with_token_auth
headers = {"Authorization": f"Bearer {cluster_info['token']}"}
response = requests.get(
f"{cluster_info['dashboard_url']}/api/component_activities",
headers=headers,
)
assert response.status_code == 200
def test_dashboard_request_requires_auth_missing_token(setup_cluster_with_token_auth):
"""Test that requests fail without token when auth is enabled."""
cluster_info = setup_cluster_with_token_auth
response = requests.get(
f"{cluster_info['dashboard_url']}/api/component_activities",
json={"test": "data"},
)
assert response.status_code == 401
def test_dashboard_request_requires_auth_invalid_token(setup_cluster_with_token_auth):
"""Test that requests fail with invalid token when auth is enabled."""
cluster_info = setup_cluster_with_token_auth
headers = {"Authorization": "Bearer wrong_token_00000000000000000000000000000000"}
response = requests.get(
f"{cluster_info['dashboard_url']}/api/component_activities",
json={"test": "data"},
headers=headers,
)
assert response.status_code == 403
def test_dashboard_request_with_ray_auth_header(setup_cluster_with_token_auth):
"""Test that requests succeed with valid token in X-Ray-Authorization header."""
cluster_info = setup_cluster_with_token_auth
headers = {"X-Ray-Authorization": f"Bearer {cluster_info['token']}"}
response = requests.get(
f"{cluster_info['dashboard_url']}/api/component_activities",
headers=headers,
)
assert response.status_code == 200
def test_authorization_header_takes_precedence(setup_cluster_with_token_auth):
"""Test that standard Authorization header takes precedence over X-Ray-Authorization."""
cluster_info = setup_cluster_with_token_auth
# Provide both headers: valid token in Authorization, invalid in X-Ray-Authorization
headers = {
"Authorization": f"Bearer {cluster_info['token']}",
"X-Ray-Authorization": "Bearer invalid_token_000000000000000000000000",
}
# Should succeed because Authorization header takes precedence
response = requests.get(
f"{cluster_info['dashboard_url']}/api/component_activities",
headers=headers,
)
assert response.status_code == 200
# Now test with invalid Authorization but valid X-Ray-Authorization
headers = {
"Authorization": "Bearer invalid_token_000000000000000000000000",
"X-Ray-Authorization": f"Bearer {cluster_info['token']}",
}
# Should fail because Authorization header takes precedence (even though it's invalid)
response = requests.get(
f"{cluster_info['dashboard_url']}/api/component_activities",
headers=headers,
)
assert response.status_code == 403
def test_dashboard_auth_disabled(setup_cluster_without_token_auth):
"""Test that auth is not enforced when AUTH_MODE is disabled."""
cluster_info = setup_cluster_without_token_auth
response = requests.get(
f"{cluster_info['dashboard_url']}/api/component_activities",
json={"test": "data"},
)
assert response.status_code == 200
def test_authentication_mode_endpoint_with_token_auth(setup_cluster_with_token_auth):
"""Test authentication_mode endpoint returns 'token' when auth is enabled."""
cluster_info = setup_cluster_with_token_auth
# This endpoint should be accessible WITHOUT authentication
response = requests.get(f"{cluster_info['dashboard_url']}/api/authentication_mode")
assert response.status_code == 200
assert response.json() == {"authentication_mode": "token"}
def test_authentication_mode_endpoint_without_auth(setup_cluster_without_token_auth):
"""Test authentication_mode endpoint returns 'disabled' when auth is off."""
cluster_info = setup_cluster_without_token_auth
response = requests.get(f"{cluster_info['dashboard_url']}/api/authentication_mode")
assert response.status_code == 200
assert response.json() == {"authentication_mode": "disabled"}
def test_authentication_mode_endpoint_is_public(setup_cluster_with_token_auth):
"""Test authentication_mode endpoint works without Authorization header."""
cluster_info = setup_cluster_with_token_auth
# Call WITHOUT any authorization header - should still succeed
response = requests.get(
f"{cluster_info['dashboard_url']}/api/authentication_mode",
headers={}, # Explicitly no auth
)
# Should succeed even with token auth enabled
assert response.status_code == 200
assert response.json() == {"authentication_mode": "token"}
if __name__ == "__main__":
sys.exit(pytest.main(["-vv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/tests/test_dashboard_auth.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/_private/authentication/authentication_token_generator.py | import secrets
def generate_new_authentication_token() -> str:
"""Generate an authentication token for the cluster.
256 bits of entropy is considered sufficient to be durable to brute force attacks.
"""
return secrets.token_hex(32)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_private/authentication/authentication_token_generator.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/tests/test_token_auth_integration.py | """Integration tests for token-based authentication in Ray."""
import os
import subprocess
import sys
from pathlib import Path
from typing import Optional
import pytest
import ray
import ray.dashboard.consts as dashboard_consts
from ray._common.network_utils import build_address
from ray._common.test_utils import (
PrometheusTimeseries,
fetch_prometheus_timeseries,
wait_for_condition,
)
from ray._private.test_utils import client_test_enabled
try:
from ray._raylet import AuthenticationTokenLoader
_RAYLET_AVAILABLE = True
except ImportError:
_RAYLET_AVAILABLE = False
AuthenticationTokenLoader = None
from ray._private.authentication_test_utils import (
authentication_env_guard,
clear_auth_token_sources,
reset_auth_token_state,
set_auth_mode,
set_auth_token_path,
set_env_auth_token,
)
pytestmark = pytest.mark.skipif(
not _RAYLET_AVAILABLE,
reason="Authentication tests require ray._raylet (not available in minimal installs)",
)
def _run_ray_start_and_verify_status(
args: list, env: dict, expect_success: bool = True, timeout: int = 30
) -> subprocess.CompletedProcess:
"""Helper to run ray start command with proper error handling."""
result = subprocess.run(
["ray", "start"] + args,
env={"RAY_ENABLE_WINDOWS_OR_OSX_CLUSTER": "1", **env},
capture_output=True,
text=True,
timeout=timeout,
)
if expect_success:
assert result.returncode == 0, (
f"ray start should have succeeded. "
f"stdout: {result.stdout}, stderr: {result.stderr}"
)
else:
assert result.returncode != 0, (
f"ray start should have failed but succeeded. "
f"stdout: {result.stdout}, stderr: {result.stderr}"
)
# Check that error message mentions token
error_output = result.stdout + result.stderr
assert (
"authentication token" in error_output.lower()
or "token" in error_output.lower()
), f"Error message should mention token. Got: {error_output}"
return result
def _cleanup_ray_start(env: Optional[dict] = None):
"""Helper to clean up ray start processes."""
# Ensure any ray.init() connection is closed first
if ray.is_initialized():
ray.shutdown()
# Stop with a longer timeout
subprocess.run(
["ray", "stop", "--force"],
env=env,
capture_output=True,
timeout=60, # Increased timeout for flaky cleanup
check=False, # Don't raise on non-zero exit
)
# Wait for ray processes to actually stop
def ray_stopped():
result = subprocess.run(
["ray", "status"],
capture_output=True,
check=False,
)
# ray status returns non-zero when no cluster is running
return result.returncode != 0
try:
wait_for_condition(ray_stopped, timeout=10, retry_interval_ms=500)
except Exception:
# Best effort - don't fail the test if we can't verify it stopped
pass
@pytest.fixture(autouse=True)
def clean_token_sources(cleanup_auth_token_env):
"""Ensure authentication-related state is clean around each test."""
clear_auth_token_sources(remove_default=True)
reset_auth_token_state()
yield
if ray.is_initialized():
ray.shutdown()
subprocess.run(
["ray", "stop", "--force"],
capture_output=True,
timeout=60,
check=False,
)
reset_auth_token_state()
@pytest.mark.skipif(
client_test_enabled(),
reason="This test is for starting a new local cluster, not compatible with client mode",
)
def test_local_cluster_generates_token():
"""Test ray.init() generates token for local cluster when auth_mode=token is set."""
# Ensure no token exists
default_token_path = Path.home() / ".ray" / "auth_token"
assert (
not default_token_path.exists()
), f"Token file already exists at {default_token_path}"
# Enable token auth via environment variable
set_auth_mode("token")
reset_auth_token_state()
# Initialize Ray with token auth
ray.init()
try:
# Verify token file was created
assert default_token_path.exists(), (
f"Token file was not created at {default_token_path}. "
f"HOME={os.environ.get('HOME')}, "
f"Files in {default_token_path.parent}: {list(default_token_path.parent.iterdir()) if default_token_path.parent.exists() else 'directory does not exist'}"
)
token = default_token_path.read_text().strip()
assert len(token) == 64
assert all(c in "0123456789abcdef" for c in token)
# Verify cluster is working
assert ray.is_initialized()
finally:
ray.shutdown()
def test_connect_without_token_raises_error(setup_cluster_with_token_auth):
"""Test ray.init(address=...) without token fails when auth_mode=token is set."""
cluster_info = setup_cluster_with_token_auth
cluster = cluster_info["cluster"]
# Disconnect the current driver session and drop token state before retrying.
ray.shutdown()
set_auth_mode("disabled")
clear_auth_token_sources(remove_default=True)
reset_auth_token_state()
# Ensure no token exists
token_loader = AuthenticationTokenLoader.instance()
assert not token_loader.has_token()
# Try to connect to the cluster without a token - should raise RuntimeError
with pytest.raises(ConnectionError):
ray.init(address=cluster.address)
@pytest.mark.parametrize(
"token,expected_status",
[
(None, 401), # No token -> Unauthorized
("wrong_token", 403), # Wrong token -> Forbidden
],
ids=["no_token", "wrong_token"],
)
def test_state_api_auth_failure(token, expected_status, setup_cluster_with_token_auth):
"""Test that state API calls fail with missing or incorrect token."""
import requests
cluster_info = setup_cluster_with_token_auth
dashboard_url = cluster_info["dashboard_url"]
# Make direct HTTP request to state API endpoint
headers = {}
if token is not None:
headers["Authorization"] = f"Bearer {token}"
response = requests.get(f"{dashboard_url}/api/v0/actors", headers=headers)
assert response.status_code == expected_status, (
f"State API should return {expected_status}, got {response.status_code}: "
f"{response.text}"
)
@pytest.mark.parametrize("tokens_match", [True, False])
def test_cluster_token_authentication(tokens_match, setup_cluster_with_token_auth):
"""Test cluster authentication with matching and non-matching tokens."""
cluster_info = setup_cluster_with_token_auth
cluster = cluster_info["cluster"]
cluster_token = cluster_info["token"]
# Reconfigure the driver token state to simulate fresh connections.
ray.shutdown()
set_auth_mode("token")
if tokens_match:
client_token = cluster_token # Same token - should succeed
else:
client_token = "b" * 64 # Different token - should fail
set_env_auth_token(client_token)
reset_auth_token_state()
if tokens_match:
# Should succeed - test gRPC calls work
ray.init(address=cluster.address)
obj_ref = ray.put("test_data")
result = ray.get(obj_ref)
assert result == "test_data"
@ray.remote
def test_func():
return "success"
result = ray.get(test_func.remote())
assert result == "success"
ray.shutdown()
else:
# Should fail - connection or gRPC calls should fail
with pytest.raises((ConnectionError, RuntimeError)):
ray.init(address=cluster.address)
try:
ray.put("test")
finally:
ray.shutdown()
@pytest.mark.skipif(
client_test_enabled(),
reason="Uses subprocess ray start, not compatible with client mode",
)
@pytest.mark.parametrize("is_head", [True, False])
def test_ray_start_without_token_raises_error(is_head, request):
"""Test that ray start fails when auth_mode=token but no token exists."""
# Set up environment with token auth enabled but no token
env = os.environ.copy()
env["RAY_AUTH_MODE"] = "token"
env.pop("RAY_AUTH_TOKEN", None)
env.pop("RAY_AUTH_TOKEN_PATH", None)
# Ensure no default token file exists (already cleaned by fixture)
default_token_path = Path.home() / ".ray" / "auth_token"
assert not default_token_path.exists()
# When specifying an address, we need a head node to connect to
cluster_info = None
if not is_head:
cluster_info = request.getfixturevalue("setup_cluster_with_token_auth")
cluster = cluster_info["cluster"]
ray.shutdown()
# Prepare arguments
if is_head:
args = ["--head", "--port=0"]
else:
args = [f"--address={cluster.address}"]
# Try to start node - should fail
_run_ray_start_and_verify_status(args, env, expect_success=False)
@pytest.mark.skipif(
client_test_enabled(),
reason="Uses subprocess ray start, not compatible with client mode",
)
def test_ray_start_head_with_token_succeeds():
"""Test that ray start --head succeeds when token auth is enabled with a valid token."""
# Set up environment with token auth and a valid token
test_token = "a" * 64
env = os.environ.copy()
env["RAY_AUTH_TOKEN"] = test_token
env["RAY_AUTH_MODE"] = "token"
try:
# Start head node - should succeed
_run_ray_start_and_verify_status(
["--head", "--port=0"], env, expect_success=True
)
# Verify we can connect to the cluster with ray.init()
set_env_auth_token(test_token)
set_auth_mode("token")
reset_auth_token_state()
# Wait for cluster to be ready
def cluster_ready():
try:
ray.init(address="auto")
return True
except Exception:
return False
wait_for_condition(cluster_ready, timeout=10)
assert ray.is_initialized()
# Test basic operations work
@ray.remote
def test_func():
return "success"
result = ray.get(test_func.remote())
assert result == "success"
finally:
# Cleanup handles ray.shutdown() internally
_cleanup_ray_start(env)
@pytest.mark.skipif(
client_test_enabled(),
reason="Uses subprocess ray start, not compatible with client mode",
)
@pytest.mark.parametrize("token_match", ["correct", "incorrect"])
def test_ray_start_address_with_token(token_match, setup_cluster_with_token_auth):
"""Test ray start --address=... with correct or incorrect token."""
cluster_info = setup_cluster_with_token_auth
cluster = cluster_info["cluster"]
cluster_token = cluster_info["token"]
# Reset the driver connection to reuse the fixture-backed cluster.
ray.shutdown()
set_auth_mode("token")
# Set up environment for worker
env = os.environ.copy()
env["RAY_AUTH_MODE"] = "token"
if token_match == "correct":
env["RAY_AUTH_TOKEN"] = cluster_token
expect_success = True
else:
env["RAY_AUTH_TOKEN"] = "b" * 64
expect_success = False
# Start worker node
_run_ray_start_and_verify_status(
[f"--address={cluster.address}", "--num-cpus=1"],
env,
expect_success=expect_success,
)
if token_match == "correct":
try:
# Connect and verify the cluster has 2 nodes (head + worker)
set_env_auth_token(cluster_token)
reset_auth_token_state()
ray.init(address=cluster.address)
def worker_joined():
return len(ray.nodes()) >= 2
wait_for_condition(worker_joined, timeout=10)
nodes = ray.nodes()
assert (
len(nodes) >= 2
), f"Expected at least 2 nodes, got {len(nodes)}: {nodes}"
finally:
if ray.is_initialized():
ray.shutdown()
_cleanup_ray_start(env)
def test_e2e_operations_with_token_auth(setup_cluster_with_token_auth):
"""Test that e2e operations work with token authentication enabled.
This verifies that with token auth enabled:
1. Tasks execute successfully
2. Actors can be created and called
3. State API works (list_nodes, list_actors, list_tasks)
4. Job submission works
"""
cluster_info = setup_cluster_with_token_auth
# Test 1: Submit a simple task
@ray.remote
def simple_task(x):
return x + 1
result = ray.get(simple_task.remote(41))
assert result == 42, f"Task should return 42, got {result}"
# Test 2: Create and use an actor
@ray.remote
class SimpleActor:
def __init__(self):
self.value = 0
def increment(self):
self.value += 1
return self.value
actor = SimpleActor.remote()
result = ray.get(actor.increment.remote())
assert result == 1, f"Actor method should return 1, got {result}"
# Test 3: State API operations (uses HTTP with auth headers)
from ray.util.state import list_actors, list_nodes, list_tasks
# List nodes - should include at least the head node
wait_for_condition(lambda: len(list_nodes()) >= 1)
# List actors - should include our SimpleActor
def check_actors():
actors = list_actors()
if len(actors) < 1:
return False
return "SimpleActor" in actors[0].class_name
wait_for_condition(check_actors)
# List tasks - should include completed tasks
wait_for_condition(lambda: len(list_tasks()) >= 1)
# Test 4: Submit a job and wait for completion
from ray.job_submission import JobSubmissionClient
# Create job submission client (uses HTTP with auth headers)
client = JobSubmissionClient(address=cluster_info["dashboard_url"])
# Submit a simple job
job_id = client.submit_job(
entrypoint="echo 'Hello from job'",
)
# Wait for job to complete
def job_finished():
status = client.get_job_status(job_id)
return status in ["SUCCEEDED", "FAILED", "STOPPED"]
wait_for_condition(job_finished, timeout=30)
final_status = client.get_job_status(job_id)
assert (
final_status == "SUCCEEDED"
), f"Job should succeed, got status: {final_status}"
def test_logs_api_with_token_auth(setup_cluster_with_token_auth):
"""Test that log APIs work with token authentication enabled."""
from ray.util.state import get_log, list_logs
# Get node ID for log queries
node_id = ray.nodes()[0]["NodeID"]
# Test list_logs() with valid auth
logs = list_logs(node_id=node_id)
assert isinstance(logs, dict), f"list_logs should return a dict, got {type(logs)}"
# Test get_log() with valid auth (fetch raylet.out which will always exist)
chunks_received = 0
for chunk in get_log(filename="raylet.out", node_id=node_id, tail=10):
assert isinstance(chunk, str), f"get_log chunk should be str, got {type(chunk)}"
chunks_received += 1
break
assert chunks_received > 0, "Should have received at least one log chunk"
@pytest.mark.skipif(
client_test_enabled(),
reason="Uses subprocess ray CLI, not compatible with client mode",
)
@pytest.mark.parametrize("use_generate", [True, False])
def test_get_auth_token_cli(use_generate):
"""Test ray get-auth-token CLI command."""
test_token = "a" * 64
with authentication_env_guard():
if use_generate:
# Test --generate flag (no token set)
clear_auth_token_sources(remove_default=True)
args = ["ray", "get-auth-token", "--generate"]
else:
# Test with existing token from env var
set_env_auth_token(test_token)
reset_auth_token_state()
args = ["ray", "get-auth-token"]
env = os.environ.copy()
result = subprocess.run(
args,
env=env,
capture_output=True,
text=True,
timeout=10,
)
assert result.returncode == 0, (
f"ray get-auth-token should succeed. "
f"stdout: {result.stdout}, stderr: {result.stderr}"
)
# Verify token is printed to stdout
token = result.stdout.strip()
assert len(token) == 64, token
assert all(c in "0123456789abcdef" for c in token), "Token should be hex"
if not use_generate:
# When using env var, should get exact token back
assert token == test_token
# Verify logs went to stderr (if --generate was used)
if use_generate:
assert (
"generating new authentication token..." in result.stderr.lower()
), "Should log generation to stderr"
@pytest.mark.skipif(
client_test_enabled(),
reason="Uses subprocess ray CLI, not compatible with client mode",
)
def test_get_auth_token_cli_no_token_no_generate():
"""Test ray get-auth-token fails without token and without --generate."""
with authentication_env_guard():
reset_auth_token_state()
clear_auth_token_sources(remove_default=True)
env = os.environ.copy()
result = subprocess.run(
["ray", "get-auth-token"],
env=env,
capture_output=True,
text=True,
timeout=10,
)
assert result.returncode != 0, "Should fail when no token and no --generate"
assert "error" in result.stderr.lower(), "Should print error to stderr"
assert "no" in result.stderr.lower() and "token" in result.stderr.lower()
@pytest.mark.skipif(
client_test_enabled(),
reason="Uses subprocess ray CLI, not compatible with client mode",
)
def test_get_auth_token_cli_piping():
"""Test that ray get-auth-token output can be piped."""
test_token = "b" * 64
with authentication_env_guard():
set_env_auth_token(test_token)
reset_auth_token_state()
env = os.environ.copy()
# Test piping: use token in shell pipeline
result = subprocess.run(
"ray get-auth-token | wc -c",
shell=True,
env=env,
capture_output=True,
text=True,
timeout=10,
)
assert result.returncode == 0
char_count = int(result.stdout.strip())
assert char_count == 64, f"Expected 64 chars (no newline), got {char_count}"
@pytest.mark.skipif(
client_test_enabled(),
reason="Tests AuthenticationTokenLoader directly, no benefit testing this in client mode",
)
def test_missing_token_file_raises_authentication_error():
"""Test that RAY_AUTH_TOKEN_PATH pointing to missing file raises AuthenticationError."""
with authentication_env_guard():
# Clear first, then set up the specific test scenario
clear_auth_token_sources(remove_default=True)
set_auth_mode("token")
set_auth_token_path(None, "/nonexistent/path/to/token")
reset_auth_token_state()
token_loader = AuthenticationTokenLoader.instance()
with pytest.raises(ray.exceptions.AuthenticationError) as exc_info:
token_loader.has_token()
# Verify error message is informative
assert str(Path("/nonexistent/path/to/token")) in str(exc_info.value)
assert "RAY_AUTH_TOKEN_PATH" in str(exc_info.value)
@pytest.mark.skipif(
client_test_enabled(),
reason="Tests AuthenticationTokenLoader directly, no benefit testing this in client mode",
)
def test_empty_token_file_raises_authentication_error(tmp_path):
"""Test that RAY_AUTH_TOKEN_PATH pointing to empty file raises AuthenticationError."""
token_file = tmp_path / "empty_token_file.txt"
with authentication_env_guard():
# Clear first, then set up the specific test scenario
clear_auth_token_sources(remove_default=True)
set_auth_mode("token")
set_auth_token_path("", token_file)
reset_auth_token_state()
token_loader = AuthenticationTokenLoader.instance()
with pytest.raises(ray.exceptions.AuthenticationError) as exc_info:
token_loader.has_token()
assert "cannot be opened or is empty" in str(exc_info.value)
assert str(token_file) in str(exc_info.value)
@pytest.mark.skipif(
client_test_enabled(),
reason="Tests AuthenticationTokenLoader directly, no benefit testing this in client mode",
)
def test_no_token_with_auth_enabled_returns_false():
"""Test that has_token(ignore_auth_mode=True) returns False when no token exists.
This allows the caller (ensure_token_if_auth_enabled) to decide whether
to generate a new token or raise an error.
"""
with authentication_env_guard():
set_auth_mode("token")
clear_auth_token_sources(remove_default=True)
reset_auth_token_state()
token_loader = AuthenticationTokenLoader.instance()
# has_token(ignore_auth_mode=True) should return False, not raise an exception
result = token_loader.has_token(ignore_auth_mode=True)
assert result is False
@pytest.mark.skipif(
client_test_enabled(),
reason="no benefit testing this in client mode",
)
def test_opentelemetry_metrics_with_token_auth(setup_cluster_with_token_auth):
"""Test that OpenTelemetry metrics are exported with token authentication.
This test verifies that the C++ OpenTelemetryMetricRecorder correctly includes
the authentication token in its gRPC metadata when exporting metrics to the
metrics agent. If the auth headers are missing or incorrect, the metrics agent
would reject the requests and metrics wouldn't be collected.
"""
cluster_info = setup_cluster_with_token_auth
cluster = cluster_info["cluster"]
# Get the metrics export address from the head node
head_node = cluster.head_node
prom_addresses = [
build_address(head_node.node_ip_address, head_node.metrics_export_port)
]
timeseries = PrometheusTimeseries()
def verify_metrics_collected():
"""Verify that metrics are being exported successfully."""
fetch_prometheus_timeseries(prom_addresses, timeseries)
metric_names = list(timeseries.metric_descriptors.keys())
# Check for core Ray metrics that are always exported
# These metrics are exported via the C++ OpenTelemetry recorder
expected_metrics = [
"ray_node_cpu_utilization",
"ray_node_mem_used",
"ray_node_disk_usage",
]
# At least some metrics should be present
return len(metric_names) > 0 and any(
any(expected in name for name in metric_names)
for expected in expected_metrics
)
# Wait for metrics to be collected
# If auth wasn't working, the metrics agent would reject the exports
# and we wouldn't see any metrics
wait_for_condition(verify_metrics_collected, retry_interval_ms=1000)
def _get_dashboard_agent_address(cluster_info):
"""Get the dashboard agent HTTP address from a running cluster."""
import json
# Get agent address from internal KV
node_id = ray.nodes()[0]["NodeID"]
key = f"{dashboard_consts.DASHBOARD_AGENT_ADDR_NODE_ID_PREFIX}{node_id}"
agent_addr = ray.experimental.internal_kv._internal_kv_get(
key, namespace=ray._private.ray_constants.KV_NAMESPACE_DASHBOARD
)
if agent_addr:
ip, http_port, grpc_port = json.loads(agent_addr)
return f"http://{ip}:{http_port}"
return None
def _wait_and_get_dashboard_agent_address(cluster_info, timeout=30):
"""Waits for the dashboard agent address to become available and returns it."""
def agent_address_is_available():
return _get_dashboard_agent_address(cluster_info) is not None
wait_for_condition(agent_address_is_available, timeout=timeout)
return _get_dashboard_agent_address(cluster_info)
@pytest.mark.parametrize(
"token_type,expected_status",
[
("none", 401), # No token -> Unauthorized
("valid", "not_auth_error"), # Valid token -> passes auth (may get 404)
("invalid", 403), # Invalid token -> Forbidden
],
ids=["no_token", "valid_token", "invalid_token"],
)
def test_dashboard_agent_auth(
token_type, expected_status, setup_cluster_with_token_auth
):
"""Test dashboard agent authentication with various token scenarios."""
import requests
cluster_info = setup_cluster_with_token_auth
agent_address = _wait_and_get_dashboard_agent_address(cluster_info)
# Build headers based on token type
headers = {}
if token_type == "valid":
headers["Authorization"] = f"Bearer {cluster_info['token']}"
elif token_type == "invalid":
headers["Authorization"] = "Bearer invalid_token_12345678901234567890"
# token_type == "none" -> no Authorization header
response = requests.get(
f"{agent_address}/api/job_agent/jobs/nonexistent/logs",
headers=headers,
timeout=5,
)
if expected_status == "not_auth_error":
# Valid token should pass auth (may get 404 for nonexistent job)
assert response.status_code not in (401, 403), (
f"Valid token should be accepted, got {response.status_code}: "
f"{response.text}"
)
else:
assert (
response.status_code == expected_status
), f"Expected {expected_status}, got {response.status_code}: {response.text}"
@pytest.mark.parametrize(
"endpoint",
["/api/healthz", "/api/local_raylet_healthz"],
ids=["healthz", "local_raylet_healthz"],
)
def test_dashboard_agent_health_check_public(endpoint, setup_cluster_with_token_auth):
"""Test that agent health check endpoints remain public without auth."""
import requests
cluster_info = setup_cluster_with_token_auth
agent_address = _wait_and_get_dashboard_agent_address(cluster_info)
# Health check endpoints should be accessible without auth
response = requests.get(f"{agent_address}{endpoint}", timeout=5)
assert response.status_code == 200, (
f"Health check {endpoint} should return 200 without auth, "
f"got {response.status_code}: {response.text}"
)
if __name__ == "__main__":
sys.exit(pytest.main(["-vv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_token_auth_integration.py",
"license": "Apache License 2.0",
"lines": 640,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/tests/unit/test_extract_route_patterns.py | """Unit tests for extract_route_patterns function."""
import pytest
from fastapi import FastAPI
from starlette.applications import Starlette
from starlette.routing import Mount, Route
from ray.serve._private.thirdparty.get_asgi_route_name import (
extract_route_patterns,
)
def has_path(patterns, path):
"""Helper to check if a path exists in patterns list."""
return any(pattern.path == path for pattern in patterns)
def get_methods_for_path(patterns, path):
"""Helper to get methods for a specific path."""
for pattern in patterns:
if pattern.path == path:
return pattern.methods
return None
def test_extract_route_patterns_fastapi_simple():
"""Test extracting route patterns from a simple FastAPI app."""
app = FastAPI()
@app.get("/")
def root():
return {"message": "root"}
@app.get("/users/{user_id}")
def get_user(user_id: str):
return {"user_id": user_id}
@app.post("/items/{item_id}")
def create_item(item_id: str):
return {"item_id": item_id}
patterns = extract_route_patterns(app)
# FastAPI automatically adds some default routes
assert has_path(patterns, "/")
assert has_path(patterns, "/users/{user_id}")
assert has_path(patterns, "/items/{item_id}")
# FastAPI adds OpenAPI routes
assert has_path(patterns, "/openapi.json")
assert has_path(patterns, "/docs")
def test_extract_route_patterns_nested_paths():
"""Test extracting nested parameterized routes."""
app = FastAPI()
@app.get("/api/v1/users/{user_id}/posts/{post_id}")
def get_post(user_id: str, post_id: str):
return {"user_id": user_id, "post_id": post_id}
@app.get("/api/v1/users/{user_id}/settings")
def get_settings(user_id: str):
return {"user_id": user_id}
patterns = extract_route_patterns(app)
assert has_path(patterns, "/api/v1/users/{user_id}/posts/{post_id}")
assert has_path(patterns, "/api/v1/users/{user_id}/settings")
def test_extract_route_patterns_with_mounts():
"""Test extracting route patterns from apps with mounted sub-apps."""
# Create a sub-app
sub_app = Starlette(
routes=[
Route("/health", lambda request: None),
Route("/status", lambda request: None),
]
)
# Create main app with mounted sub-app
app = Starlette(
routes=[
Route("/", lambda request: None),
Mount("/admin", app=sub_app),
]
)
patterns = extract_route_patterns(app)
assert has_path(patterns, "/")
assert has_path(patterns, "/admin/health")
assert has_path(patterns, "/admin/status")
def test_extract_route_patterns_nested_mounts():
"""Test extracting patterns from deeply nested mounts."""
# Innermost app
inner_app = Starlette(
routes=[
Route("/details", lambda request: None),
]
)
# Middle app
middle_app = Starlette(
routes=[
Route("/list", lambda request: None),
Mount("/item", app=inner_app),
]
)
# Main app
app = Starlette(
routes=[
Route("/", lambda request: None),
Mount("/api/v1", app=middle_app),
]
)
patterns = extract_route_patterns(app)
assert has_path(patterns, "/")
assert has_path(patterns, "/api/v1/list")
assert has_path(patterns, "/api/v1/item/details")
def test_extract_route_patterns_with_root_path():
"""Test extracting patterns from apps with root_path set."""
app = FastAPI(root_path="/v1")
@app.get("/")
def root():
return {}
@app.get("/users")
def get_users():
return []
@app.get("/items/{item_id}")
def get_item(item_id: str):
return {"item_id": item_id}
patterns = extract_route_patterns(app)
# Root path should be prepended to all routes
assert has_path(patterns, "/v1/") # Root route
assert has_path(patterns, "/v1/users")
assert has_path(patterns, "/v1/items/{item_id}")
def test_extract_route_patterns_empty_app():
"""Test extracting patterns from an app with no user-defined routes."""
app = FastAPI()
# Don't define any routes
patterns = extract_route_patterns(app)
# Should still have FastAPI defaults
assert has_path(patterns, "/openapi.json")
assert has_path(patterns, "/docs")
# May or may not have "/" depending on FastAPI version
def test_extract_route_patterns_starlette():
"""Test extracting patterns from a pure Starlette app."""
async def homepage(request):
return None
async def user_detail(request):
return None
app = Starlette(
routes=[
Route("/", homepage),
Route("/users/{user_id}", user_detail),
]
)
patterns = extract_route_patterns(app)
assert has_path(patterns, "/")
assert has_path(patterns, "/users/{user_id}")
# Starlette shouldn't have OpenAPI routes
assert not has_path(patterns, "/openapi.json")
def test_extract_route_patterns_multiple_methods_same_path():
"""Test that methods are grouped when multiple methods use same path."""
app = FastAPI()
@app.get("/items/{item_id}")
def get_item(item_id: str):
return {"item_id": item_id}
@app.put("/items/{item_id}")
def update_item(item_id: str):
return {"item_id": item_id}
@app.delete("/items/{item_id}")
def delete_item(item_id: str):
return {"item_id": item_id}
patterns = extract_route_patterns(app)
# Path should appear only once with all methods grouped
path_count = sum(1 for pattern in patterns if pattern.path == "/items/{item_id}")
assert path_count == 1
# Check that all methods are present
methods = get_methods_for_path(patterns, "/items/{item_id}")
assert methods is not None
assert "GET" in methods
assert "PUT" in methods
assert "DELETE" in methods
def test_extract_route_patterns_invalid_app():
"""Test that invalid apps return empty list gracefully."""
class FakeApp:
"""An app without routes attribute."""
pass
fake_app = FakeApp()
# Should return empty list without raising exception
patterns = extract_route_patterns(fake_app)
assert patterns == []
def test_extract_route_patterns_mount_without_routes():
"""Test handling mounts that don't have sub-routes."""
from starlette.responses import PlainTextResponse
async def custom_mount(scope, receive, send):
response = PlainTextResponse("Custom mount")
await response(scope, receive, send)
app = Starlette(
routes=[
Route("/", lambda request: None),
Mount("/custom", app=custom_mount),
]
)
patterns = extract_route_patterns(app)
assert has_path(patterns, "/")
assert has_path(patterns, "/custom")
# Custom mount has no method restrictions
assert get_methods_for_path(patterns, "/custom") is None
def test_extract_route_patterns_sorted_output():
"""Test that output is sorted by path."""
app = FastAPI()
@app.get("/zebra")
def zebra():
return {}
@app.get("/apple")
def apple():
return {}
@app.get("/banana")
def banana():
return {}
patterns = extract_route_patterns(app)
# Extract just the paths
paths = [pattern.path for pattern in patterns]
# Find the user-defined routes
user_routes = [p for p in paths if p in ["/zebra", "/apple", "/banana"]]
# Should be sorted
assert user_routes == ["/apple", "/banana", "/zebra"]
def test_extract_route_patterns_special_characters():
"""Test routes with special regex characters."""
app = FastAPI()
@app.get("/users/{user_id:path}")
def get_user_path(user_id: str):
return {"user_id": user_id}
@app.get("/items/{item_id:int}")
def get_item_int(item_id: int):
return {"item_id": item_id}
patterns = extract_route_patterns(app)
# Extract just the paths
paths = [pattern.path for pattern in patterns]
# FastAPI converts these to standard patterns
assert any("user_id" in p for p in paths)
assert any("item_id" in p for p in paths)
def test_extract_route_patterns_websocket_routes():
"""Test that WebSocket routes are also extracted."""
app = FastAPI()
@app.get("/http")
def http_route():
return {}
@app.websocket("/ws")
async def websocket_route(websocket):
await websocket.accept()
await websocket.close()
patterns = extract_route_patterns(app)
assert has_path(patterns, "/http")
assert has_path(patterns, "/ws")
# WebSocket route should have no method restrictions
assert get_methods_for_path(patterns, "/ws") is None
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/unit/test_extract_route_patterns.py",
"license": "Apache License 2.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/test_tpu.py | import sys
from unittest.mock import MagicMock, patch
import pytest
import ray
from ray._private.accelerators import TPUAcceleratorManager, tpu
from ray.util.tpu import SlicePlacementGroup
def test_get_current_pod_name_smoke():
with patch(
"ray._private.accelerators.tpu.TPUAcceleratorManager.get_current_node_tpu_name",
return_value="my-tpu",
):
name = ray.util.tpu.get_current_pod_name()
assert name == "my-tpu"
def test_empty_get_current_pod_name_returns_none():
with patch(
"ray._private.accelerators.tpu.TPUAcceleratorManager.get_current_node_tpu_name",
return_value="",
):
name = ray.util.tpu.get_current_pod_name()
assert name is None
@pytest.mark.parametrize(
"test_case",
[
# (number_chips_per_host, parsed accl_type, expected_worker_count)
(4, "v2-4", 1),
(4, "v3-32", 4),
(4, "v4-8", 1),
(4, "v4-16", 2),
(8, "v5litepod-4", 1),
(8, "v5litepod-8", 1),
(8, "v5litepod-16", 2),
(8, "v5litepod-32", 4),
(4, "v5p-4", 1),
(4, "v5p-8", 1),
(4, "v5p-16", 2),
(4, "v6e-4", 1),
(8, "v6e-8", 1),
(8, "v6e-16", 2),
(4, "v7x-8", 1),
(4, "v7x-16", 2),
],
)
@patch("glob.glob")
def test_worker_count(mock_glob, test_case):
num_devices, accelerator_type, expected_worker_count = test_case
mock_glob.return_value = ["/dev/accel" + str(x) for x in range(num_devices)]
TPUAcceleratorManager.get_current_node_num_accelerators.cache_clear()
with patch(
"ray._private.accelerators.tpu.TPUAcceleratorManager."
"get_current_node_tpu_pod_type",
return_value=accelerator_type,
):
worker_count = ray.util.tpu.get_current_pod_worker_count()
assert worker_count == expected_worker_count
@patch("glob.glob")
def test_num_tpu_chips(mock_glob):
mock_glob.return_value = [
"/dev/accel0",
"/dev/accel1",
"/dev/accel2",
"/dev/accel3",
]
TPUAcceleratorManager.get_current_node_num_accelerators.cache_clear()
num_tpu_chips = ray.util.tpu.get_num_tpu_chips_on_node()
assert num_tpu_chips == 4
@pytest.mark.parametrize(
"test_case",
[
# (accelerator_type, accelerator_topology, expected_result)
("v2-16", "4x4", True),
("v2-256", "16x16", True),
("v2-4", "2x2", False),
("v3-16", "4x4", True),
("v3-1024", "32x32", True),
("v3-4", "4x16", False),
("v4-4", "2x2x1", True),
("v4-32", "2x4x4", True),
("v4-2048", "8x8x16", True),
("v4-4", "16x16x16", False),
("v5p-128", "4x4x4", True),
("v5p-4096", "16x16x16", True),
("v5p-12288", "16x16x24", True),
("v5p-4", "24x24x24", False),
("v5litepod-16", "2x8", True),
("v5litepod-256", "16x16", True),
("v5litepod-4", "2x2", True),
("v6e-16", "4x4", True),
("v6e-64", "8x8", True),
("v6e-4", "4x16", False),
("tpu7x-16", "2x2x2", True),
("tpu7x-64", "2x4x4", True),
("v7x-8", "4x4", False),
],
)
@patch("glob.glob")
def test_is_valid_tpu_accelerator_topology(_mock_glob, test_case):
"""Test valid TPU accelerator topologies."""
accelerator_type, accelerator_topology, expected_result = test_case
actual_result = TPUAcceleratorManager.is_valid_tpu_accelerator_topology(
accelerator_type, accelerator_topology
)
assert actual_result == expected_result
def test_get_current_node_labels_env_only(monkeypatch):
# Simulate GKE TPU environment variables
monkeypatch.setenv("TPU_NAME", "tpu-worker-group-2")
monkeypatch.setenv("TPU_WORKER_ID", "0")
monkeypatch.setenv("TPU_ACCELERATOR_TYPE", "v6e-16")
monkeypatch.setenv("TPU_TOPOLOGY", "4x4")
tpu_labels = TPUAcceleratorManager.get_current_node_accelerator_labels()
assert tpu_labels["ray.io/tpu-slice-name"] == "tpu-worker-group-2"
assert tpu_labels["ray.io/tpu-worker-id"] == "0"
assert tpu_labels["ray.io/tpu-topology"] == "4x4"
assert tpu_labels["ray.io/tpu-pod-type"] == "v6e-16"
def test_get_current_node_tpu_topology_from_metadata():
tpu_env_string = "TPU_ACCELERATOR:v6e.\nTOPOLOGY: '2x2x4'\nTPU_HOST_BOUNDS:0,1,1,2"
with patch(
"ray._private.accelerators.tpu._get_tpu_metadata", return_value=tpu_env_string
):
topology = TPUAcceleratorManager.get_current_node_tpu_topology()
assert topology == "2x2x4"
@pytest.mark.parametrize(
"topology, accelerator_type, expected_pod_type, should_raise",
[
("2x4", "TPU-V6E", "v6e-8", False),
("2x2x2", "TPU-V4", "v4-16", False),
("4x8", "TPU-V3", "v3-64", False),
("2x2x1", "TPU-V5P", "v5p-8", False),
("4x4", "TPU-V5P", "v5p-32", False),
("8x16", "TPU-V6E", "v6e-128", False),
("", "TPU-V3", None, False),
("4x", "TPU-V3", None, True),
("2x2x2", "TPU-V7X", "v7x-16", False),
],
)
def test_infer_tpu_pod_type_from_topology(
topology, accelerator_type, expected_pod_type, should_raise
):
if should_raise:
with pytest.raises(ValueError):
tpu.infer_tpu_pod_type_from_topology(topology, accelerator_type)
else:
actual_result = tpu.infer_tpu_pod_type_from_topology(topology, accelerator_type)
assert actual_result == expected_pod_type
@pytest.fixture
def ray_start_cpu():
address_info = ray.init(num_cpus=1)
yield address_info
ray.shutdown()
@pytest.fixture
def ray_tpu_cluster(ray_start_cluster):
"""
Simulates a Ray cluster with two multi-host TPU v4-16 slices.
"""
pod_type = "v4-16"
topology = "2x2x2"
cluster = ray_start_cluster
slice_0_env_common = {
"TPU_NAME": "test-slice-0",
"TPU_ACCELERATOR_TYPE": pod_type,
"TPU_TOPOLOGY": topology,
}
slice_0_head_labels = {
"ray.io/tpu-slice-name": "test-slice-0",
"ray.io/tpu-worker-id": "0",
"ray.io/tpu-pod-type": pod_type,
"ray.io/tpu-topology": topology,
}
slice_0_worker_labels = {
"ray.io/tpu-slice-name": "test-slice-0",
"ray.io/tpu-worker-id": "1",
"ray.io/tpu-pod-type": pod_type,
"ray.io/tpu-topology": topology,
}
cluster.add_node(
num_cpus=2,
resources={"TPU": 4, f"TPU-{pod_type}-head": 1},
env_vars={**slice_0_env_common, "TPU_WORKER_ID": "0"},
labels=slice_0_head_labels,
)
cluster.add_node(
num_cpus=2,
resources={"TPU": 4},
env_vars={**slice_0_env_common, "TPU_WORKER_ID": "1"},
labels=slice_0_worker_labels,
)
slice_1_env_common = {
"TPU_NAME": "test-slice-1",
"TPU_ACCELERATOR_TYPE": pod_type,
"TPU_TOPOLOGY": topology,
}
slice_1_head_labels = {
"ray.io/tpu-slice-name": "test-slice-1",
"ray.io/tpu-worker-id": "0",
"ray.io/tpu-pod-type": pod_type,
"ray.io/tpu-topology": topology,
}
slice_1_worker_labels = {
"ray.io/tpu-slice-name": "test-slice-1",
"ray.io/tpu-worker-id": "1",
"ray.io/tpu-pod-type": pod_type,
"ray.io/tpu-topology": topology,
}
cluster.add_node(
num_cpus=2,
resources={"TPU": 4, f"TPU-{pod_type}-head": 1},
env_vars={**slice_1_env_common, "TPU_WORKER_ID": "0"},
labels=slice_1_head_labels,
)
cluster.add_node(
num_cpus=2,
resources={"TPU": 4},
env_vars={**slice_1_env_common, "TPU_WORKER_ID": "1"},
labels=slice_1_worker_labels,
)
ray.init(address=cluster.address)
yield cluster
ray.shutdown()
def test_fetch_tpu_slice_name_from_pg(ray_tpu_cluster):
"""Tests that the slice name can be fetched from a PG."""
tpu_head_pg = ray.util.placement_group(bundles=[{"TPU-v4-16-head": 1}])
ray.get(tpu_head_pg.ready())
expected_unique_slice_names = {"test-slice-0", "test-slice-1"}
slice_name = tpu.fetch_tpu_slice_name_from_pg(tpu_head_pg)
assert slice_name in expected_unique_slice_names
ray.util.remove_placement_group(tpu_head_pg)
def test_reserve_tpu_slice(ray_tpu_cluster):
"""Tests that a TPU slice can be successfully reserved."""
reserved_name_0, hg_pg_0 = tpu.reserve_tpu_slice(
topology="2x2x2", accelerator_type="TPU-V4"
)
reserved_name_1, hg_pg_1 = tpu.reserve_tpu_slice(
topology="2x2x2", accelerator_type="TPU-V4"
)
# Ensure the placement groups reserving the TPU slice using the head worker are valid.
assert hg_pg_0 is not None, "Expected placement group for slice 0, got None"
assert hg_pg_1 is not None, "Expected placement group for slice 1, got None"
assert (
reserved_name_0 != reserved_name_1
), f"Expected to reserve two different slices, but got the same name: {reserved_name_0}"
expected_unique_slice_names = {"test-slice-0", "test-slice-1"}
actual_reserved_names = {reserved_name_0, reserved_name_1}
assert actual_reserved_names == expected_unique_slice_names, (
f"Got unexpected slice names. Expected {expected_unique_slice_names}, "
f"but got {actual_reserved_names}"
)
def test_slice_placement_group(ray_tpu_cluster):
"""Test that single TPU slice can be successfully reserved."""
slice_placement_group = ray.util.tpu.slice_placement_group(
topology="2x2x2",
accelerator_version="v4",
)
assert slice_placement_group.chips_per_host == 4
assert slice_placement_group.num_hosts == 2
assert slice_placement_group.placement_group.bundle_count == 2
assert slice_placement_group.placement_group.bundle_specs == [
{"TPU": 4, "CPU": 1.0},
{"TPU": 4, "CPU": 1.0},
]
def test_multi_slice_placement_group(ray_tpu_cluster):
"""Test that multiple whole TPU slices can be successfully reserved"""
multi_slice_placement_group = ray.util.tpu.slice_placement_group(
topology="2x2x2",
accelerator_version="v4",
num_slices=2,
)
assert multi_slice_placement_group.placement_group.bundle_count == 4
assert multi_slice_placement_group.num_hosts == 4
assert multi_slice_placement_group.placement_group.bundle_specs == [
{"TPU": 4, "CPU": 1.0}, # slice 1, host 1
{"TPU": 4, "CPU": 1.0}, # slice 1, host 2
{"TPU": 4, "CPU": 1.0}, # slice 2, host 1
{"TPU": 4, "CPU": 1.0}, # slice 2, host 2
]
@patch("ray.util.tpu.placement_group")
@patch("ray.util.tpu.remove_placement_group")
@patch("ray.util.tpu.reserve_tpu_slice")
def test_slice_placement_group_partial_failure_cleanup(
mock_reserve, mock_remove_pg, mock_create_pg
):
"""
Verifies that if a multi-slice request fails halfway through,
the TPU head placement groups are cleaned up to prevent leaks.
"""
fake_head_pg_1 = MagicMock(name="head_pg_1")
mock_reserve.side_effect = [("slice_1", fake_head_pg_1), None]
with pytest.raises(RuntimeError, match="Failed to reserve TPU slice"):
SlicePlacementGroup(topology="2x2x2", accelerator_version="v4", num_slices=2)
# Validate that 2 TPU util attempted to reserve two slices, failed, and
# correctly cleaned up the hanging TPU head placement groups.
assert mock_reserve.call_count == 2
mock_remove_pg.assert_called_once_with(fake_head_pg_1)
mock_create_pg.assert_not_called()
@pytest.mark.parametrize(
"accelerator_type, expected_version",
[
# type with "TPU-" prefix
("TPU-V4", "v4"),
("TPU-v4", "v4"),
("TPU-V6E", "v6e"),
("TPU-v5p", "v5p"),
("TPU-V7X", "v7x"),
# Only the TPU version - no parsing necessary.
("v4", "v4"),
("v3", "v3"),
("v6e", "v6e"),
("v5litepod", "v5litepod"),
("v7x", "v7x"),
],
)
def test_get_tpu_version_valid(accelerator_type, expected_version):
assert ray.util.tpu.get_tpu_version_from_type(accelerator_type) == expected_version
@pytest.mark.parametrize(
"invalid_type",
[
"A100", # GPU type
"random-invalid-type", # Random string
"TPU-invalid", # TPU prefix
"", # Empty string
],
)
def test_get_tpu_version_invalid(invalid_type):
with pytest.raises(ValueError, match="Invalid accelerator_type"):
ray.util.tpu.get_tpu_version_from_type(invalid_type)
@pytest.mark.parametrize(
"topology, accelerator_type, num_workers, resources_per_worker, expected_slices",
[
# "2x2x1" has 4 chips, for 4 workers with TPU: 1 each we expect num_slices=1.
("2x2x1", "TPU-V4", 4, {"TPU": 1}, 1),
# "2x2x1" has 4 chips, for 8 workers with TPU: 1 each we expect num_slices=2.
("2x2x1", "v4", 8, {"TPU": 1}, 2),
# "2x2x2" has 8 chips and 2 hosts, defaulting to 1 TPU worker per host
# and requesting 4 workers, we expect num_slices=2.
("2x2x2", "TPU-V4", 4, None, 2),
# "2x2x4" has 16 chips and 4 hosts, defaulting to 1 TPU worker per host
# and requesting 4 workers, we expect num_slices=1.
("2x2x4", "TPU-V4", 4, None, 1),
# 0 workers requested -> fallback to 1 slice.
("2x2x1", "v4", 0, None, 1),
# Invalid topology -> fallback to 1 slice.
("", "v4", 4, {"TPU": 1}, 1),
("2x2x1", "", 4, {"TPU": 1}, 1),
],
)
def test_get_tpu_num_slices_for_workers(
topology, accelerator_type, num_workers, resources_per_worker, expected_slices
):
num_slices = ray.util.tpu.get_tpu_num_slices_for_workers(
topology=topology,
accelerator_type=accelerator_type,
num_workers=num_workers,
resources_per_worker=resources_per_worker,
)
assert num_slices == expected_slices
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_tpu.py",
"license": "Apache License 2.0",
"lines": 355,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/util/tpu.py | import logging
import math
from typing import Dict, List, Optional, Tuple
import ray
from ray._private.accelerators import TPUAcceleratorManager
from ray._private.accelerators.tpu import (
VALID_TPU_TYPES,
get_chips_per_host,
get_num_chips_from_topology,
reserve_tpu_slice,
)
from ray._private.client_mode_hook import client_mode_wrap
from ray.util.annotations import PublicAPI
from ray.util.placement_group import (
PlacementGroup,
placement_group,
remove_placement_group,
)
logger = logging.getLogger(__name__)
@PublicAPI(stability="alpha")
def get_tpu_version_from_type(accelerator_type: str) -> str:
"""Extracts the version from the accelerator type.
Args:
accelerator_type: The full accelerator type string (e.g. "TPU-V6E").
Returns:
The version string (e.g. "v6e").
Raises:
ValueError: If the accelerator type is invalid.
"""
accel_type_lower = accelerator_type.lower()
if accel_type_lower.startswith("tpu-"):
version = accel_type_lower.replace("tpu-", "")
elif accel_type_lower.startswith("tpu"):
version = accel_type_lower.replace("tpu", "v")
else:
version = accel_type_lower
if version not in VALID_TPU_TYPES:
raise ValueError(
f"Invalid accelerator_type: {accelerator_type}. "
f"Must be one of {list(VALID_TPU_TYPES)} or start with 'TPU-' followed by a valid type."
)
return version
@PublicAPI(stability="alpha")
def get_current_pod_name() -> Optional[str]:
"""
Return the name of the TPU pod that the worker is a part of.
Returns:
The name of the TPU pod. Returns None if not part of a TPU pod.
"""
tpu_name = TPUAcceleratorManager.get_current_node_tpu_name()
if tpu_name == "":
tpu_name = None
return tpu_name
@PublicAPI(stability="alpha")
def get_current_pod_worker_count() -> Optional[int]:
"""
Count the number of workers associated with the TPU pod that the worker belongs to.
Returns:
The total number of workers in the TPU pod. Returns None if the worker is not
part of a TPU pod.
"""
return TPUAcceleratorManager.get_num_workers_in_current_tpu_pod()
@PublicAPI(stability="alpha")
def get_num_tpu_chips_on_node() -> int:
"""
Return the number of TPU chips on the node.
Returns:
The total number of chips on the TPU node. Returns 0 if none are found.
"""
return TPUAcceleratorManager.get_current_node_num_accelerators()
@PublicAPI(stability="alpha")
def get_tpu_num_slices_for_workers(
topology: str,
accelerator_type: str,
num_workers: int,
resources_per_worker: Optional[Dict[str, float]] = None,
) -> int:
"""
Calculates the number of slices needed to accommodate the specified number of workers.
Args:
topology: The TPU topology string.
accelerator_type: The accelerator type string.
num_workers: The desired number of workers.
resources_per_worker: Optional dict of resources per worker.
Returns:
The number of slices required. Returns 1 if inputs are invalid or incomplete.
"""
if not topology or not accelerator_type:
return 1
try:
# Calculate how many workers fit in a single slice (num_slices=1)
# given the topology and resources per worker.
workers_per_slice, _ = get_tpu_worker_resources(
topology=topology,
accelerator_type=accelerator_type,
resources_per_unit=resources_per_worker,
num_slices=1,
)
if workers_per_slice == 0:
return 1
return max(1, math.ceil(num_workers / workers_per_slice))
except Exception:
# Fallback to 1 if calculation fails.
return 1
@PublicAPI(stability="alpha")
def get_tpu_worker_resources(
topology: str,
accelerator_type: str,
resources_per_unit: Optional[Dict[str, float]] = None,
num_slices: int = 1,
) -> Tuple[int, Dict[str, float]]:
"""
Calculates the number of workers and the resources required for each worker
to run based on a TPU topology.
Args:
topology: The TPU topology string.
accelerator_type: The accelerator string.
resources_per_unit: Optional manual override for resources per unit. If
unspecified, the number of TPU chips in a host is assumed.
num_slices: The number of TPU slices.
Returns:
A tuple containing:
- num_workers: Total workers required.
- unit_resources: The resource dictionary for a single worker.
"""
accelerator_version = get_tpu_version_from_type(accelerator_type)
chips_per_host = get_chips_per_host(topology, accelerator_version)
total_chips_per_slice = get_num_chips_from_topology(topology)
total_chips_available = total_chips_per_slice * num_slices
# Calculate the per-unit resources based on the TPU topology.
final_resources = resources_per_unit.copy() if resources_per_unit else {}
if "CPU" not in final_resources:
final_resources["CPU"] = 1
# If user didn't specify TPU, default to # of chips on 1 host.
if "TPU" not in final_resources:
final_resources["TPU"] = chips_per_host
tpus_per_unit = final_resources["TPU"]
# Validate TPU resource values.
if tpus_per_unit <= 0:
raise ValueError("TPU resources must be positive.")
if total_chips_available % tpus_per_unit != 0:
raise ValueError(
f"Total chips ({total_chips_available}) not divisible by "
f"TPUs requested per unit ({tpus_per_unit})."
)
if total_chips_per_slice % tpus_per_unit != 0:
raise ValueError(
f"The requested resources per bundle ({tpus_per_unit} TPU chips) do not "
f"divide evenly into the chips available per slice ({total_chips_per_slice}). "
"This configuration results in an uneven distribution of workers across slices, "
"which is not supported."
)
num_workers = int(total_chips_available // tpus_per_unit)
return num_workers, final_resources
@PublicAPI(stability="alpha")
def get_tpu_coordinator_env_vars(
coordinator_address: str,
num_slices: int,
slice_id: int,
coordinator_port: str = "8081",
) -> Dict[str, str]:
"""
Returns the environment variables required for JAX multi-slice coordination.
Args:
coordinator_address: The IP address or hostname of the coordinator.
num_slices: The total number of slices in the cluster.
slice_id: The index of the current slice.
coordinator_port: The port the coordinator is listening on.
Returns:
A dictionary mapping environment variable names to their values.
"""
return {
"MEGASCALE_COORDINATOR_ADDRESS": coordinator_address,
"MEGASCALE_PORT": coordinator_port,
"MEGASCALE_NUM_SLICES": str(num_slices),
"MEGASCALE_SLICE_ID": str(slice_id),
}
@PublicAPI(stability="alpha")
class SlicePlacementGroup:
"""
A handle to a placement group reservation for a TPU slice.
The following definitions are added for clarity:
- Accelerator type: A string describing the accelerator type and version (e.g. TPU-V2, TPU-V6E).
- Accelerator version: The accelerator generation only (e.g. v6e, v5p, v5litepod).
- Pod type: The TPU accelerator version and the number of chips in a topology. (e.g. v6e-128, v5p-8).
- Accelerator topology: The physical topology representing the structure (e.g. 2x2x2, 16x16).
Args:
topology: The TPU topology string (e.g. "2x2x2").
accelerator_version: The TPU accelerator generation (e.g. "v6e", "v5p", "v4").
resources_per_bundle: Optionally specify the resources to include in every worker bundle.
strategy: PlacementGroup parameter. The strategy to create the placement group. Currently default to "SPREAD"
- "PACK": Packs Bundles into as few nodes as possible.
- "SPREAD": Places Bundles across distinct nodes as even as possible.
- "STRICT_PACK": Packs Bundles into one node. The group is
not allowed to span multiple nodes.
- "STRICT_SPREAD": Packs Bundles across distinct nodes.
lifetime: PlacementGroup parameter. Either `None`, which defaults to the placement group
will fate share with its creator and will be deleted once its
creator is dead, or "detached", which means the placement group
will live as a global object independent of the creator.
num_slices: Number of TPU slices in the SlicePlacementGroup. Defaults to 1 when unspecified.
Examples:
.. testcode:: python
:skipif: True
import ray
from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy
from ray.util.tpu import SlicePlacementGroup
slice_handle = SlicePlacementGroup(topology="4x4", accelerator_version="v6e")
slice_pg = slice_handle.placement_group
ray.get(slice_pg.ready(), timeout=10)
@ray.remote(num_cpus=0, resources={'TPU': 4})
def spmd_task(world, rank):
print(f"Current TPU is rank {rank} of {world}")
tasks = [
spmd_task.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=slice_pg,
)
).remote(world=4, rank=i)
for i in range(slice_handle.num_hosts)
]
"""
def __init__(
self,
topology: str,
accelerator_version: str,
resources_per_bundle: Optional[Dict[str, float]] = None,
# below are args related to PG
strategy: str = "SPREAD",
name: str = "",
lifetime: Optional[str] = None,
# default
num_slices: int = 1,
):
self._topology = topology.strip().lower()
self._accelerator_version = accelerator_version.strip().lower()
self._resources_per_bundle = resources_per_bundle or {}
self._num_slices = num_slices
# Calculate number of bundles and bundle resources for specified TPU topology.
self._num_bundles, self._bundle_resources = get_tpu_worker_resources(
topology=self._topology,
accelerator_type=self._accelerator_version,
resources_per_unit=resources_per_bundle,
num_slices=self._num_slices,
)
self._chips_per_host = get_chips_per_host(
self._topology, self._accelerator_version
)
total_chips = get_num_chips_from_topology(self._topology)
hosts_per_slice = max(1, total_chips // self._chips_per_host)
self._num_hosts = hosts_per_slice * self._num_slices
self._head_pgs: List[PlacementGroup] = []
self._bundle_label_selector: List[Dict[str, str]] = []
self._validate_tpu_config()
self._placement_group = None
# Reserve a TPU slice of the provided accelerator version and topology.
self._placement_group = self._reserve_slice(
strategy,
name,
lifetime,
)
def _accelerator_version_check(self, accelerator_version: str):
if accelerator_version not in VALID_TPU_TYPES:
raise ValueError(
f"Invalid accelerator version: {accelerator_version}. Must be one of: {VALID_TPU_TYPES}"
)
def _validate_tpu_config(self):
# Should validate topology and generation values and return a
# ValueError if invalid.
self._accelerator_version_check(self.accelerator_version)
if not TPUAcceleratorManager.is_valid_tpu_accelerator_topology(
tpu_accelerator_version=self.accelerator_version,
tpu_topology=self._topology,
):
raise ValueError(
f"Invalid accelerator topology: '{self._topology}' for "
f"accelerator version: '{self.accelerator_version}'"
)
def _reserve_slice(
self,
strategy: str = "SPREAD",
name: str = "",
lifetime: Optional[str] = None,
) -> PlacementGroup:
"""Performs the two-step scheduling to reserve a TPU slice."""
self._bundle_label_selector = []
bundles = []
bundles_per_slice = self._num_bundles // self._num_slices
# Construct accelerator format for reserve_tpu_slice. e.g. From "v6e" to "TPU-V6E", "v5p" to "TPU-V5P".
accelerator_type = "TPU-" + self.accelerator_version.upper()
try:
for _ in range(self.num_slices):
reservation = reserve_tpu_slice(self._topology, accelerator_type)
if not reservation:
raise RuntimeError(
f"Failed to reserve TPU slice. Requested {self.num_slices} "
f"slice(s) of topology '{self._topology}' with accelerator type "
f"'{accelerator_type}'. Ensure that sufficient TPU resources are "
"available in the cluster."
)
# Store the head placement group for clean-up when un-reserving the slice.
slice_name, head_pg = reservation
self._head_pgs.append(head_pg)
# Reserving a slice is done through constructing num_hosts bundles, each with a label selector for
# the unique name of an available TPU slice.
selector = {ray._raylet.RAY_NODE_TPU_SLICE_NAME_KEY: slice_name}
self._bundle_label_selector.extend([selector] * bundles_per_slice)
bundles += [
self._bundle_resources.copy() for _ in range(bundles_per_slice)
]
pg = placement_group(
bundles=bundles,
strategy=strategy,
name=name,
lifetime=lifetime,
bundle_label_selector=self._bundle_label_selector,
)
return pg
except Exception:
self.shutdown()
raise
@property
def placement_group(self) -> PlacementGroup:
"""The underlying PlacementGroup object."""
return self._placement_group
@property
def chips_per_host(self) -> int:
"""The number of chips per host for this TPU slice."""
# This is the same value as resources per worker for TPU.
return self._chips_per_host
@property
def num_hosts(self) -> int:
"""The total number of hosts in the SlicePlacementGroup."""
return self._num_hosts
@property
def num_bundles(self) -> int:
"""The total number of bundles in the SlicePlacementGroup."""
return self._num_bundles
@property
def topology(self) -> str:
"""The physical topology of the TPU slice."""
return self._topology
@property
def accelerator_version(self) -> str:
"""The TPU accelerator type of the slice."""
return self._accelerator_version
@property
def num_slices(self) -> int:
"""The number of TPU slices this SlicePlacementGroup spans."""
return self._num_slices
@property
def head_placement_groups(self) -> List[PlacementGroup]:
"""The internal head PGs used to reserve the slices."""
return self._head_pgs
@property
def bundle_label_selector(self) -> List[Dict[str, str]]:
"""The bundle label selector list for the worker PG."""
return self._bundle_label_selector
@property
def bundle_resources(self) -> Dict[str, float]:
"""The resources that are assigned to each bundle."""
return self._bundle_resources
def shutdown(self):
"""Removes the worker placement group and all internal head PGs."""
if self._placement_group:
remove_placement_group(self._placement_group)
self._placement_group = None
for head_pg in self._head_pgs:
remove_placement_group(head_pg)
self._head_pgs = []
@PublicAPI(stability="alpha")
@client_mode_wrap
def slice_placement_group(
topology: str,
accelerator_version: str,
resources_per_bundle: Optional[Dict[str, float]] = None,
num_slices: int = 1,
**kwargs,
) -> SlicePlacementGroup:
"""Asynchronously creates a PlacementGroup for a TPU slice.
A slice placement group reserves num_slices TPU slice(s) and creates a placement
group for scheduling tasks or actors.
Args:
topology: The desired TPU pod topology (e.g. "4x4", "2x8").
accelerator_version: The TPU accelerator generation, (e.g. "v4", "v5p", "v6e").
resources_per_bundle: Specify the number of resources to reserve per bundle.
When unspecified, SlicePlacementGroup defaults to reserving 1 bundle per TPU host in
a topology, with the bundle resources set to the number of TPU in a host.
Ex: Specifying {"TPU": 1} for a 4x4 topology would result in 16 bundles, each with 1 TPU.
If resources_per_bundle=None for the same topology, there would be 4 bundles with 4 TPU each.
num_slices: The number of tpu slices within the placement group
**kwargs: Additional arguments for the placement group, such as 'name', 'lifetime', or 'strategy'.
Returns:
The handle for the created SlicePlacementGroup.
"""
return SlicePlacementGroup(
topology=topology,
accelerator_version=accelerator_version,
resources_per_bundle=resources_per_bundle,
num_slices=num_slices,
**kwargs,
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/util/tpu.py",
"license": "Apache License 2.0",
"lines": 402,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/tests/test_ray_event_export_task_events.py | import base64
import json
import logging
import textwrap
from typing import Optional
import grpc
import pytest
import ray
import ray.dashboard.consts as dashboard_consts
from ray._common.network_utils import find_free_port
from ray._common.test_utils import wait_for_condition
from ray._private import ray_constants
from ray._private.test_utils import run_string_as_driver_nonblocking
from ray._raylet import GcsClient
logger = logging.getLogger(__name__)
_EVENT_AGGREGATOR_AGENT_TARGET_PORT = find_free_port()
_EVENT_AGGREGATOR_AGENT_TARGET_IP = "127.0.0.1"
_EVENT_AGGREGATOR_AGENT_TARGET_ADDR = (
f"http://{_EVENT_AGGREGATOR_AGENT_TARGET_IP}:{_EVENT_AGGREGATOR_AGENT_TARGET_PORT}"
)
@pytest.fixture(scope="module")
def httpserver_listen_address():
return (_EVENT_AGGREGATOR_AGENT_TARGET_IP, _EVENT_AGGREGATOR_AGENT_TARGET_PORT)
_cluster_with_aggregator_target = pytest.mark.parametrize(
("preserve_proto_field_name", "ray_start_cluster_head_with_env_vars"),
[
pytest.param(
preserve_proto_field_name,
{
"env_vars": {
"RAY_task_events_report_interval_ms": 100,
"RAY_enable_core_worker_ray_event_to_aggregator": "1",
"RAY_DASHBOARD_AGGREGATOR_AGENT_EVENTS_EXPORT_ADDR": _EVENT_AGGREGATOR_AGENT_TARGET_ADDR,
"RAY_DASHBOARD_AGGREGATOR_AGENT_PRESERVE_PROTO_FIELD_NAME": (
"1" if preserve_proto_field_name is True else "0"
),
},
},
)
for preserve_proto_field_name in [True, False]
],
indirect=["ray_start_cluster_head_with_env_vars"],
)
def wait_until_grpc_channel_ready(
gcs_address: str, node_ids: list[str], timeout: int = 5
):
# get the grpc port
gcs_client = GcsClient(address=gcs_address)
def get_dashboard_agent_address(node_id: str):
return gcs_client.internal_kv_get(
f"{ray.dashboard.consts.DASHBOARD_AGENT_ADDR_NODE_ID_PREFIX}{node_id}".encode(),
namespace=ray_constants.KV_NAMESPACE_DASHBOARD,
timeout=dashboard_consts.GCS_RPC_TIMEOUT_SECONDS,
)
wait_for_condition(
lambda: all(
get_dashboard_agent_address(node_id) is not None for node_id in node_ids
)
)
grpc_ports = [
json.loads(get_dashboard_agent_address(node_id))[2] for node_id in node_ids
]
targets = [f"127.0.0.1:{grpc_port}" for grpc_port in grpc_ports]
# wait for the dashboard agent grpc port to be ready
for target in targets:
channel = grpc.insecure_channel(target)
try:
grpc.channel_ready_future(channel).result(timeout=timeout)
except grpc.FutureTimeoutError:
return False
return True
def get_job_id_and_driver_script_task_id_from_events(
events: json, preserve_proto_field_name: bool
) -> tuple[Optional[str], Optional[str]]:
test_job_id = base64.b64encode(
ray.JobID.from_hex(ray.get_runtime_context().get_job_id()).binary()
).decode()
driver_script_job_id = None
driver_task_id = None
for event in events:
if preserve_proto_field_name:
if event["event_type"] == "TASK_DEFINITION_EVENT":
if (
event["task_definition_event"]["task_type"] == "DRIVER_TASK"
and event["task_definition_event"]["job_id"] != test_job_id
):
driver_task_id = event["task_definition_event"]["task_id"]
driver_script_job_id = event["task_definition_event"]["job_id"]
assert driver_task_id is not None
assert driver_script_job_id is not None
else:
if event["eventType"] == "TASK_DEFINITION_EVENT":
if (
event["taskDefinitionEvent"]["taskType"] == "DRIVER_TASK"
and event["taskDefinitionEvent"]["jobId"] != test_job_id
):
driver_task_id = event["taskDefinitionEvent"]["taskId"]
driver_script_job_id = event["taskDefinitionEvent"]["jobId"]
assert driver_task_id is not None
assert driver_script_job_id is not None
return driver_script_job_id, driver_task_id
def check_task_event_base_fields(
event: json, preserve_proto_field_name: bool, head_node_id: str
):
assert event["timestamp"] is not None
assert event["severity"] == "INFO"
if preserve_proto_field_name:
assert event["event_id"] is not None
assert event["source_type"] == "CORE_WORKER"
assert event["session_name"] is not None
assert "node_id" in event
assert base64.b64decode(event["node_id"]).hex() == head_node_id
else:
assert event["eventId"] is not None
assert event["sourceType"] == "CORE_WORKER"
assert event["sessionName"] is not None
assert "nodeId" in event
assert base64.b64decode(event["nodeId"]).hex() == head_node_id
def check_task_lifecycle_event_states_and_error_info(
events: json,
expected_task_id_states_dict: dict,
expected_task_id_error_info_dict: dict,
preserve_proto_field_name: bool,
):
task_id_states_dict = {}
task_id_error_info_dict = {}
for event in events:
if preserve_proto_field_name:
if event["event_type"] == "TASK_LIFECYCLE_EVENT":
task_id = event["task_lifecycle_event"]["task_id"]
task_attempt = event["task_lifecycle_event"]["task_attempt"]
if (task_id, task_attempt) not in task_id_states_dict:
task_id_states_dict[(task_id, task_attempt)] = set()
for state in event["task_lifecycle_event"]["state_transitions"]:
task_id_states_dict[(task_id, task_attempt)].add(state["state"])
if "ray_error_info" in event["task_lifecycle_event"]:
task_id_error_info_dict[(task_id, task_attempt)] = event[
"task_lifecycle_event"
]["ray_error_info"]
else:
if event["eventType"] == "TASK_LIFECYCLE_EVENT":
task_id = event["taskLifecycleEvent"]["taskId"]
task_attempt = event["taskLifecycleEvent"]["taskAttempt"]
if (task_id, task_attempt) not in task_id_states_dict:
task_id_states_dict[(task_id, task_attempt)] = set()
for state in event["taskLifecycleEvent"]["stateTransitions"]:
task_id_states_dict[(task_id, task_attempt)].add(state["state"])
if "rayErrorInfo" in event["taskLifecycleEvent"]:
task_id_error_info_dict[(task_id, task_attempt)] = event[
"taskLifecycleEvent"
]["rayErrorInfo"]
for (
expected_task_id_attempt,
expected_states,
) in expected_task_id_states_dict.items():
assert expected_task_id_attempt in task_id_states_dict
assert task_id_states_dict[expected_task_id_attempt] == expected_states
for (
expected_task_id_attempt,
expected_error_info,
) in expected_task_id_error_info_dict.items():
assert expected_task_id_attempt in task_id_error_info_dict
if preserve_proto_field_name:
assert (
task_id_error_info_dict[expected_task_id_attempt]["error_type"]
== expected_error_info["error_type"]
)
assert (
expected_error_info["error_message"]
in task_id_error_info_dict[expected_task_id_attempt]["error_message"]
)
else:
assert (
task_id_error_info_dict[expected_task_id_attempt]["errorType"]
== expected_error_info["errorType"]
)
assert (
expected_error_info["errorMessage"]
in task_id_error_info_dict[expected_task_id_attempt]["errorMessage"]
)
def get_and_validate_events(httpserver, validation_func):
event_data = []
for http_log in httpserver.log:
req, _ = http_log
data = json.loads(req.data)
event_data.extend(data)
try:
validation_func(event_data)
return True
except Exception:
return False
def run_driver_script_and_wait_for_events(script, httpserver, cluster, validation_func):
httpserver.expect_request("/", method="POST").respond_with_data("", status=200)
node_ids = [node.node_id for node in cluster.list_all_nodes()]
# Here we wait for the dashboard agent grpc server to be ready before running the
# driver script. Ideally, the startup sequence should guarantee that. Created an
# issue to track this: https://github.com/ray-project/ray/issues/58007
assert wait_until_grpc_channel_ready(cluster.gcs_address, node_ids)
run_string_as_driver_nonblocking(script)
wait_for_condition(
lambda: get_and_validate_events(
httpserver,
lambda events: validation_func(events, cluster.head_node.node_id),
)
)
class TestNormalTaskEvents:
@_cluster_with_aggregator_target
def test_normal_task_succeed(
self,
ray_start_cluster_head_with_env_vars,
httpserver,
preserve_proto_field_name,
):
script = textwrap.dedent(
"""
import ray
ray.init()
@ray.remote
def normal_task():
pass
ray.get(normal_task.remote())
"""
)
def validate_events(events, head_node_id):
(
driver_script_job_id,
driver_task_id,
) = get_job_id_and_driver_script_task_id_from_events(
events, preserve_proto_field_name
)
expected_driver_task_states = {"RUNNING", "FINISHED"}
expected_normal_task_states = {
"PENDING_ARGS_AVAIL",
"PENDING_NODE_ASSIGNMENT",
"SUBMITTED_TO_WORKER",
"RUNNING",
"FINISHED",
}
# Check definition events
driver_task_definition_received = False
normal_task_definition_received = False
for event in events:
if preserve_proto_field_name:
if event["event_type"] == "TASK_DEFINITION_EVENT":
check_task_event_base_fields(
event, preserve_proto_field_name, head_node_id
)
if event["task_definition_event"]["task_type"] == "DRIVER_TASK":
if (
event["task_definition_event"]["task_id"]
!= driver_task_id
):
continue
driver_task_definition_received = True
assert event["task_definition_event"]["task_attempt"] == 0
assert (
event["task_definition_event"]["language"] == "PYTHON"
)
else:
normal_task_definition_received = True
normal_task_id = event["task_definition_event"]["task_id"]
assert normal_task_id is not None
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["module_name"]
== "__main__"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["class_name"]
== ""
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["function_name"]
== "normal_task"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["function_hash"]
is not None
)
assert (
event["task_definition_event"]["task_name"]
== "normal_task"
)
assert event["task_definition_event"][
"required_resources"
] == {"CPU": 1.0}
assert (
event["task_definition_event"]["job_id"]
== driver_script_job_id
)
assert (
event["task_definition_event"]["parent_task_id"]
== driver_task_id
)
assert event["task_definition_event"]["task_attempt"] == 0
assert (
event["task_definition_event"]["language"] == "PYTHON"
)
else:
assert event["event_type"] == "TASK_LIFECYCLE_EVENT"
else:
if event["eventType"] == "TASK_DEFINITION_EVENT":
check_task_event_base_fields(
event, preserve_proto_field_name, head_node_id
)
if event["taskDefinitionEvent"]["taskType"] == "DRIVER_TASK":
if event["taskDefinitionEvent"]["taskId"] != driver_task_id:
continue
driver_task_definition_received = True
assert event["taskDefinitionEvent"]["taskAttempt"] == 0
assert event["taskDefinitionEvent"]["language"] == "PYTHON"
else:
normal_task_definition_received = True
normal_task_id = event["taskDefinitionEvent"]["taskId"]
assert normal_task_id is not None
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["moduleName"]
== "__main__"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["className"]
== ""
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["functionName"]
== "normal_task"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["functionHash"]
is not None
)
assert (
event["taskDefinitionEvent"]["taskName"]
== "normal_task"
)
assert event["taskDefinitionEvent"][
"requiredResources"
] == {"CPU": 1.0}
assert (
event["taskDefinitionEvent"]["jobId"]
== driver_script_job_id
)
assert (
event["taskDefinitionEvent"]["parentTaskId"]
== driver_task_id
)
assert event["taskDefinitionEvent"]["taskAttempt"] == 0
assert event["taskDefinitionEvent"]["language"] == "PYTHON"
else:
assert event["eventType"] == "TASK_LIFECYCLE_EVENT"
assert driver_task_definition_received
assert normal_task_definition_received
# Check lifecycle events
expected_task_id_states_dict = {
(driver_task_id, 0): expected_driver_task_states,
(normal_task_id, 0): expected_normal_task_states,
}
expected_task_id_error_info_dict = {}
check_task_lifecycle_event_states_and_error_info(
events,
expected_task_id_states_dict,
expected_task_id_error_info_dict,
preserve_proto_field_name,
)
run_driver_script_and_wait_for_events(
script, httpserver, ray_start_cluster_head_with_env_vars, validate_events
)
@_cluster_with_aggregator_target
def test_normal_task_execution_failure_with_retry(
self,
ray_start_cluster_head_with_env_vars,
httpserver,
preserve_proto_field_name,
):
script = textwrap.dedent(
"""
import ray
ray.init()
@ray.remote(max_retries=1, retry_exceptions=[Exception])
def normal_task():
raise Exception("test error")
try:
ray.get(normal_task.remote())
except Exception as e:
pass
"""
)
def validate_events(events: json, head_node_id):
(
driver_script_job_id,
driver_task_id,
) = get_job_id_and_driver_script_task_id_from_events(
events, preserve_proto_field_name
)
# Check definition events
driver_task_definition_received = False
normal_task_definition_received = False
normal_task_definition_retry_received = False
for event in events:
if preserve_proto_field_name:
if event["event_type"] == "TASK_DEFINITION_EVENT":
check_task_event_base_fields(
event, preserve_proto_field_name, head_node_id
)
if event["task_definition_event"]["task_type"] == "DRIVER_TASK":
if (
event["task_definition_event"]["task_id"]
!= driver_task_id
):
continue
driver_task_definition_received = True
assert event["task_definition_event"]["task_attempt"] == 0
assert (
event["task_definition_event"]["language"] == "PYTHON"
)
else:
normal_task_id = event["task_definition_event"]["task_id"]
assert normal_task_id is not None
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["module_name"]
== "__main__"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["class_name"]
== ""
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["function_name"]
== "normal_task"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["function_hash"]
is not None
)
assert (
event["task_definition_event"]["task_name"]
== "normal_task"
)
assert event["task_definition_event"][
"required_resources"
] == {"CPU": 1.0}
assert (
event["task_definition_event"]["job_id"]
== driver_script_job_id
)
assert (
event["task_definition_event"]["parent_task_id"]
== driver_task_id
)
if event["task_definition_event"]["task_attempt"] == 0:
normal_task_definition_received = True
else:
assert (
event["task_definition_event"]["task_attempt"] == 1
)
normal_task_definition_retry_received = True
assert (
event["task_definition_event"]["language"] == "PYTHON"
)
else:
assert event["event_type"] == "TASK_LIFECYCLE_EVENT"
else:
if event["eventType"] == "TASK_DEFINITION_EVENT":
check_task_event_base_fields(
event, preserve_proto_field_name, head_node_id
)
if event["taskDefinitionEvent"]["taskType"] == "DRIVER_TASK":
if event["taskDefinitionEvent"]["taskId"] != driver_task_id:
continue
driver_task_definition_received = True
assert event["taskDefinitionEvent"]["taskAttempt"] == 0
assert event["taskDefinitionEvent"]["language"] == "PYTHON"
else:
normal_task_id = event["taskDefinitionEvent"]["taskId"]
assert normal_task_id is not None
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["moduleName"]
== "__main__"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["className"]
== ""
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["functionName"]
== "normal_task"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["functionHash"]
is not None
)
assert (
event["taskDefinitionEvent"]["taskName"]
== "normal_task"
)
assert event["taskDefinitionEvent"][
"requiredResources"
] == {"CPU": 1.0}
assert (
event["taskDefinitionEvent"]["jobId"]
== driver_script_job_id
)
assert (
event["taskDefinitionEvent"]["parentTaskId"]
== driver_task_id
)
if event["taskDefinitionEvent"]["taskAttempt"] == 0:
normal_task_definition_received = True
else:
assert event["taskDefinitionEvent"]["taskAttempt"] == 1
normal_task_definition_retry_received = True
assert event["taskDefinitionEvent"]["language"] == "PYTHON"
else:
assert event["eventType"] == "TASK_LIFECYCLE_EVENT"
assert driver_task_definition_received
assert normal_task_definition_received
assert normal_task_definition_retry_received
# Check execution events
expected_driver_task_states = {"RUNNING", "FINISHED"}
expected_normal_task_states = {
"PENDING_ARGS_AVAIL",
"PENDING_NODE_ASSIGNMENT",
"SUBMITTED_TO_WORKER",
"RUNNING",
"FAILED",
}
expected_task_id_states_dict = {
(driver_task_id, 0): expected_driver_task_states,
(normal_task_id, 0): expected_normal_task_states,
(normal_task_id, 1): expected_normal_task_states,
}
if preserve_proto_field_name:
expected_task_id_error_info_dict = {
(normal_task_id, 0): {
"error_type": "TASK_EXECUTION_EXCEPTION",
"error_message": "test error",
},
(normal_task_id, 1): {
"error_type": "TASK_EXECUTION_EXCEPTION",
"error_message": "test error",
},
}
else:
expected_task_id_error_info_dict = {
(normal_task_id, 0): {
"errorType": "TASK_EXECUTION_EXCEPTION",
"errorMessage": "test error",
},
(normal_task_id, 1): {
"errorType": "TASK_EXECUTION_EXCEPTION",
"errorMessage": "test error",
},
}
check_task_lifecycle_event_states_and_error_info(
events,
expected_task_id_states_dict,
expected_task_id_error_info_dict,
preserve_proto_field_name,
)
run_driver_script_and_wait_for_events(
script, httpserver, ray_start_cluster_head_with_env_vars, validate_events
)
@pytest.mark.skipif(
True,
reason="Disabled till https://github.com/ray-project/ray/issues/58016 is fixed",
)
@_cluster_with_aggregator_target
def test_task_failed_due_to_node_failure(
self,
ray_start_cluster_head_with_env_vars,
httpserver,
preserve_proto_field_name,
):
cluster = ray_start_cluster_head_with_env_vars
node = cluster.add_node(num_cpus=2)
script = textwrap.dedent(
"""
import ray
ray.init()
@ray.remote(num_cpus=2, max_retries=0)
def sleep():
import time
time.sleep(999)
x = sleep.options(name="node-killed").remote()
try:
ray.get(x)
except Exception as e:
pass
"""
)
# Run the driver script and wait for the sleep task to be executing
def validate_task_running(events: json, head_node_id):
# Obtain the task id of the sleep task
normal_task_id = None
for event in events:
if preserve_proto_field_name:
if (
event["event_type"] == "TASK_DEFINITION_EVENT"
and event["task_definition_event"]["task_type"] == "NORMAL_TASK"
):
normal_task_id = event["task_definition_event"]["task_id"]
break
else:
if (
event["eventType"] == "TASK_DEFINITION_EVENT"
and event["taskDefinitionEvent"]["taskType"] == "NORMAL_TASK"
):
normal_task_id = event["taskDefinitionEvent"]["taskId"]
break
assert normal_task_id is not None
# Check whether the task lifecycle event has running state
for event in events:
if preserve_proto_field_name:
if (
event["event_type"] == "TASK_LIFECYCLE_EVENT"
and event["task_lifecycle_event"]["task_id"] == normal_task_id
):
for state_transition in event["task_lifecycle_event"][
"state_transitions"
]:
if state_transition["state"] == "RUNNING":
return
else:
if (
event["eventType"] == "TASK_LIFECYCLE_EVENT"
and event["taskLifecycleEvent"]["taskId"] == normal_task_id
):
for state_transition in event["taskLifecycleEvent"][
"stateTransitions"
]:
if state_transition["state"] == "RUNNING":
return
assert False
run_driver_script_and_wait_for_events(
script,
httpserver,
ray_start_cluster_head_with_env_vars,
validate_task_running,
)
# Kill the node
cluster.remove_node(node)
# Wait and verify the task events
def validate_task_killed(events: json, head_node_id):
(
driver_script_job_id,
driver_task_id,
) = get_job_id_and_driver_script_task_id_from_events(
events, preserve_proto_field_name
)
# Check the task definition events
driver_task_definition_received = False
normal_task_definition_received = False
for event in events:
if preserve_proto_field_name:
if event["event_type"] == "TASK_DEFINITION_EVENT":
check_task_event_base_fields(
event, preserve_proto_field_name, head_node_id
)
if event["task_definition_event"]["task_type"] == "DRIVER_TASK":
if (
event["task_definition_event"]["task_id"]
!= driver_task_id
):
continue
driver_task_definition_received = True
assert event["task_definition_event"]["task_attempt"] == 0
assert (
event["task_definition_event"]["language"] == "PYTHON"
)
else:
normal_task_definition_received = True
normal_task_id = event["task_definition_event"]["task_id"]
assert normal_task_id is not None
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["module_name"]
== "__main__"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["class_name"]
== ""
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["function_name"]
== "sleep"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["function_hash"]
is not None
)
assert (
event["task_definition_event"]["task_name"]
== "node-killed"
)
assert event["task_definition_event"][
"required_resources"
] == {"CPU": 2.0}
assert (
event["task_definition_event"]["job_id"]
== driver_script_job_id
)
assert (
event["task_definition_event"]["parent_task_id"]
== driver_task_id
)
assert event["task_definition_event"]["task_attempt"] == 0
assert (
event["task_definition_event"]["language"] == "PYTHON"
)
else:
assert event["event_type"] == "TASK_LIFECYCLE_EVENT"
else:
if event["eventType"] == "TASK_DEFINITION_EVENT":
check_task_event_base_fields(
event, preserve_proto_field_name, head_node_id
)
if event["taskDefinitionEvent"]["taskType"] == "DRIVER_TASK":
if event["taskDefinitionEvent"]["taskId"] != driver_task_id:
continue
driver_task_definition_received = True
assert event["taskDefinitionEvent"]["taskAttempt"] == 0
assert event["taskDefinitionEvent"]["language"] == "PYTHON"
else:
normal_task_definition_received = True
normal_task_id = event["taskDefinitionEvent"]["taskId"]
assert normal_task_id is not None
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["moduleName"]
== "__main__"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["className"]
== ""
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["functionName"]
== "sleep"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["functionHash"]
is not None
)
assert (
event["taskDefinitionEvent"]["taskName"]
== "node-killed"
)
assert event["taskDefinitionEvent"][
"requiredResources"
] == {"CPU": 2.0}
assert (
event["taskDefinitionEvent"]["jobId"]
== driver_script_job_id
)
assert (
event["taskDefinitionEvent"]["parentTaskId"]
== driver_task_id
)
assert event["taskDefinitionEvent"]["taskAttempt"] == 0
assert event["taskDefinitionEvent"]["language"] == "PYTHON"
else:
assert event["eventType"] == "TASK_LIFECYCLE_EVENT"
assert driver_task_definition_received
assert normal_task_definition_received
# Check the task lifecycle events
expected_driver_task_states = {"RUNNING", "FINISHED"}
expected_normal_task_states = {
"PENDING_ARGS_AVAIL",
"PENDING_NODE_ASSIGNMENT",
"SUBMITTED_TO_WORKER",
"RUNNING",
"FAILED",
}
expected_task_id_states_dict = {
(driver_task_id, 0): expected_driver_task_states,
(normal_task_id, 0): expected_normal_task_states,
}
if preserve_proto_field_name:
expected_task_id_error_info_dict = {
(normal_task_id, 0): {
"error_type": "NODE_DIED",
"error_message": "Task failed because the node it was running on is dead or unavailable",
}
}
else:
expected_task_id_error_info_dict = {
(normal_task_id, 0): {
"errorType": "NODE_DIED",
"errorMessage": "Task failed because the node it was running on is dead or unavailable",
}
}
check_task_lifecycle_event_states_and_error_info(
events,
expected_task_id_states_dict,
expected_task_id_error_info_dict,
preserve_proto_field_name,
)
wait_for_condition(
lambda: get_and_validate_events(httpserver, validate_task_killed),
)
class TestActorTaskEvents:
@_cluster_with_aggregator_target
def test_actor_creation_succeed(
self,
ray_start_cluster_head_with_env_vars,
httpserver,
preserve_proto_field_name,
):
script = textwrap.dedent(
"""
import ray
ray.init()
@ray.remote(num_cpus=1)
class Actor:
def __init__(self):
pass
def task(self, arg):
pass
actor = Actor.remote()
obj = ray.put("test")
ray.get(actor.task.remote(obj))
"""
)
def validate_events(events: json, head_node_id):
(
driver_script_job_id,
driver_task_id,
) = get_job_id_and_driver_script_task_id_from_events(
events, preserve_proto_field_name
)
driver_task_definition_received = False
actor_creation_task_definition_received = False
actor_task_definition_received = False
for event in events:
if preserve_proto_field_name:
if event["event_type"] == "TASK_DEFINITION_EVENT":
check_task_event_base_fields(
event, preserve_proto_field_name, head_node_id
)
if event["task_definition_event"]["task_type"] == "DRIVER_TASK":
driver_task_definition_received = True
assert event["task_definition_event"]["task_attempt"] == 0
assert (
event["task_definition_event"]["language"] == "PYTHON"
)
else:
assert (
event["task_definition_event"]["task_type"]
== "ACTOR_CREATION_TASK"
)
actor_creation_task_definition_received = True
actor_creation_task_id = event["task_definition_event"][
"task_id"
]
assert actor_creation_task_id is not None
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["module_name"]
== "__main__"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["class_name"]
== "Actor"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["function_name"]
== "__init__"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["function_hash"]
is not None
)
assert (
event["task_definition_event"]["task_name"]
== "Actor.__init__"
)
assert event["task_definition_event"][
"required_resources"
] == {"CPU": 1.0}
assert (
event["task_definition_event"]["parent_task_id"]
== driver_task_id
)
assert (
event["task_definition_event"]["job_id"]
== driver_script_job_id
)
assert event["task_definition_event"]["task_attempt"] == 0
assert (
event["task_definition_event"]["language"] == "PYTHON"
)
elif event["event_type"] == "ACTOR_TASK_DEFINITION_EVENT":
actor_task_definition_received = True
actor_task_id = event["actor_task_definition_event"]["task_id"]
assert actor_task_id is not None
assert (
event["actor_task_definition_event"]["actor_func"][
"python_function_descriptor"
]["module_name"]
== "__main__"
)
assert (
event["actor_task_definition_event"]["actor_func"][
"python_function_descriptor"
]["class_name"]
== "Actor"
)
assert (
event["actor_task_definition_event"]["actor_func"][
"python_function_descriptor"
]["function_name"]
== "task"
)
assert (
event["actor_task_definition_event"]["actor_func"][
"python_function_descriptor"
]["function_hash"]
is not None
)
assert (
event["actor_task_definition_event"]["actor_task_name"]
== "Actor.task"
)
assert (
event["actor_task_definition_event"]["required_resources"]
== {}
)
assert (
event["actor_task_definition_event"]["job_id"]
== driver_script_job_id
)
assert (
event["actor_task_definition_event"]["parent_task_id"]
== driver_task_id
)
assert event["actor_task_definition_event"]["task_attempt"] == 0
assert (
event["actor_task_definition_event"]["language"] == "PYTHON"
)
else:
assert event["event_type"] == "TASK_LIFECYCLE_EVENT"
else:
if event["eventType"] == "TASK_DEFINITION_EVENT":
check_task_event_base_fields(
event, preserve_proto_field_name, head_node_id
)
if event["taskDefinitionEvent"]["taskType"] == "DRIVER_TASK":
driver_task_definition_received = True
assert event["taskDefinitionEvent"]["taskAttempt"] == 0
assert event["taskDefinitionEvent"]["language"] == "PYTHON"
else:
assert (
event["taskDefinitionEvent"]["taskType"]
== "ACTOR_CREATION_TASK"
)
actor_creation_task_definition_received = True
actor_creation_task_id = event["taskDefinitionEvent"][
"taskId"
]
assert actor_creation_task_id is not None
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["moduleName"]
== "__main__"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["className"]
== "Actor"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["functionName"]
== "__init__"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["functionHash"]
is not None
)
assert (
event["taskDefinitionEvent"]["taskName"]
== "Actor.__init__"
)
assert event["taskDefinitionEvent"][
"requiredResources"
] == {"CPU": 1.0}
assert (
event["taskDefinitionEvent"]["parentTaskId"]
== driver_task_id
)
assert (
event["taskDefinitionEvent"]["jobId"]
== driver_script_job_id
)
assert event["taskDefinitionEvent"]["taskAttempt"] == 0
assert event["taskDefinitionEvent"]["language"] == "PYTHON"
elif event["eventType"] == "ACTOR_TASK_DEFINITION_EVENT":
actor_task_definition_received = True
actor_task_id = event["actorTaskDefinitionEvent"]["taskId"]
assert actor_task_id is not None
assert (
event["actorTaskDefinitionEvent"]["actorFunc"][
"pythonFunctionDescriptor"
]["moduleName"]
== "__main__"
)
assert (
event["actorTaskDefinitionEvent"]["actorFunc"][
"pythonFunctionDescriptor"
]["className"]
== "Actor"
)
assert (
event["actorTaskDefinitionEvent"]["actorFunc"][
"pythonFunctionDescriptor"
]["functionName"]
== "task"
)
assert (
event["actorTaskDefinitionEvent"]["actorFunc"][
"pythonFunctionDescriptor"
]["functionHash"]
is not None
)
assert (
event["actorTaskDefinitionEvent"]["actorTaskName"]
== "Actor.task"
)
assert (
event["actorTaskDefinitionEvent"]["requiredResources"] == {}
)
assert (
event["actorTaskDefinitionEvent"]["jobId"]
== driver_script_job_id
)
assert (
event["actorTaskDefinitionEvent"]["parentTaskId"]
== driver_task_id
)
assert event["actorTaskDefinitionEvent"]["taskAttempt"] == 0
assert event["actorTaskDefinitionEvent"]["language"] == "PYTHON"
else:
assert event["eventType"] == "TASK_LIFECYCLE_EVENT"
assert driver_task_definition_received
assert actor_creation_task_definition_received
assert actor_task_definition_received
expected_driver_task_states = {"RUNNING", "FINISHED"}
expected_actor_creation_task_states = {
"PENDING_ARGS_AVAIL",
"PENDING_NODE_ASSIGNMENT",
"RUNNING",
"FINISHED",
}
expected_actor_task_states = {
"PENDING_ARGS_AVAIL",
"PENDING_NODE_ASSIGNMENT",
"SUBMITTED_TO_WORKER",
"PENDING_ACTOR_TASK_ARGS_FETCH",
"PENDING_ACTOR_TASK_ORDERING_OR_CONCURRENCY",
"RUNNING",
"FINISHED",
}
expected_task_id_states_dict = {
(driver_task_id, 0): expected_driver_task_states,
(actor_creation_task_id, 0): expected_actor_creation_task_states,
(actor_task_id, 0): expected_actor_task_states,
}
expected_task_id_error_info_dict = {}
check_task_lifecycle_event_states_and_error_info(
events,
expected_task_id_states_dict,
expected_task_id_error_info_dict,
preserve_proto_field_name,
)
run_driver_script_and_wait_for_events(
script, httpserver, ray_start_cluster_head_with_env_vars, validate_events
)
@_cluster_with_aggregator_target
def test_actor_creation_failed(
self,
ray_start_cluster_head_with_env_vars,
httpserver,
preserve_proto_field_name,
):
script = textwrap.dedent(
"""
import ray
import ray.util.state
from ray._common.test_utils import wait_for_condition
import time
@ray.remote(num_cpus=1)
class Actor:
def __init__(self):
time.sleep(1)
raise Exception("actor creation error")
def task(self):
pass
actor = Actor.remote()
wait_for_condition(lambda: ray.util.state.list_actors(filters=[("class_name", "=", "Actor")])[0]["state"] == "DEAD")
ray.get(actor.task.options().remote())
"""
)
def validate_events(events: json, head_node_id):
(
driver_script_job_id,
driver_task_id,
) = get_job_id_and_driver_script_task_id_from_events(
events, preserve_proto_field_name
)
driver_task_definition_received = False
actor_creation_task_definition_received = False
actor_task_definition_received = False
for event in events:
if preserve_proto_field_name:
if event["event_type"] == "TASK_DEFINITION_EVENT":
check_task_event_base_fields(
event, preserve_proto_field_name, head_node_id
)
if event["task_definition_event"]["task_type"] == "DRIVER_TASK":
driver_task_definition_received = True
assert event["task_definition_event"]["task_attempt"] == 0
assert (
event["task_definition_event"]["language"] == "PYTHON"
)
else:
assert (
event["task_definition_event"]["task_type"]
== "ACTOR_CREATION_TASK"
)
actor_creation_task_definition_received = True
actor_creation_task_id = event["task_definition_event"][
"task_id"
]
assert actor_creation_task_id is not None
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["module_name"]
== "__main__"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["class_name"]
== "Actor"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["function_name"]
== "__init__"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["function_hash"]
is not None
)
assert (
event["task_definition_event"]["task_name"]
== "Actor.__init__"
)
assert event["task_definition_event"][
"required_resources"
] == {"CPU": 1.0}
assert (
event["task_definition_event"]["parent_task_id"]
== driver_task_id
)
assert (
event["task_definition_event"]["job_id"]
== driver_script_job_id
)
assert event["task_definition_event"]["task_attempt"] == 0
assert (
event["task_definition_event"]["language"] == "PYTHON"
)
elif event["event_type"] == "ACTOR_TASK_DEFINITION_EVENT":
actor_task_definition_received = True
actor_task_id = event["actor_task_definition_event"]["task_id"]
assert actor_task_id is not None
assert (
event["actor_task_definition_event"]["actor_func"][
"python_function_descriptor"
]["module_name"]
== "__main__"
)
assert (
event["actor_task_definition_event"]["actor_func"][
"python_function_descriptor"
]["class_name"]
== "Actor"
)
assert (
event["actor_task_definition_event"]["actor_func"][
"python_function_descriptor"
]["function_name"]
== "task"
)
assert (
event["actor_task_definition_event"]["actor_func"][
"python_function_descriptor"
]["function_hash"]
is not None
)
assert (
event["actor_task_definition_event"]["actor_task_name"]
== "Actor.task"
)
assert (
event["actor_task_definition_event"]["required_resources"]
== {}
)
assert (
event["actor_task_definition_event"]["job_id"]
== driver_script_job_id
)
assert (
event["actor_task_definition_event"]["parent_task_id"]
== driver_task_id
)
assert event["actor_task_definition_event"]["task_attempt"] == 0
assert (
event["actor_task_definition_event"]["language"] == "PYTHON"
)
else:
assert event["event_type"] == "TASK_LIFECYCLE_EVENT"
else:
if event["eventType"] == "TASK_DEFINITION_EVENT":
check_task_event_base_fields(
event, preserve_proto_field_name, head_node_id
)
if event["taskDefinitionEvent"]["taskType"] == "DRIVER_TASK":
driver_task_definition_received = True
assert event["taskDefinitionEvent"]["taskAttempt"] == 0
assert event["taskDefinitionEvent"]["language"] == "PYTHON"
else:
assert (
event["taskDefinitionEvent"]["taskType"]
== "ACTOR_CREATION_TASK"
)
actor_creation_task_definition_received = True
actor_creation_task_id = event["taskDefinitionEvent"][
"taskId"
]
assert actor_creation_task_id is not None
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["moduleName"]
== "__main__"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["className"]
== "Actor"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["functionName"]
== "__init__"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["functionHash"]
is not None
)
assert (
event["taskDefinitionEvent"]["taskName"]
== "Actor.__init__"
)
assert event["taskDefinitionEvent"][
"requiredResources"
] == {"CPU": 1.0}
assert (
event["taskDefinitionEvent"]["parentTaskId"]
== driver_task_id
)
assert (
event["taskDefinitionEvent"]["jobId"]
== driver_script_job_id
)
assert event["taskDefinitionEvent"]["taskAttempt"] == 0
assert event["taskDefinitionEvent"]["language"] == "PYTHON"
elif event["eventType"] == "ACTOR_TASK_DEFINITION_EVENT":
actor_task_definition_received = True
actor_task_id = event["actorTaskDefinitionEvent"]["taskId"]
assert actor_task_id is not None
assert (
event["actorTaskDefinitionEvent"]["actorFunc"][
"pythonFunctionDescriptor"
]["moduleName"]
== "__main__"
)
assert (
event["actorTaskDefinitionEvent"]["actorFunc"][
"pythonFunctionDescriptor"
]["className"]
== "Actor"
)
assert (
event["actorTaskDefinitionEvent"]["actorFunc"][
"pythonFunctionDescriptor"
]["functionName"]
== "task"
)
assert (
event["actorTaskDefinitionEvent"]["actorFunc"][
"pythonFunctionDescriptor"
]["functionHash"]
is not None
)
assert (
event["actorTaskDefinitionEvent"]["actorTaskName"]
== "Actor.task"
)
assert (
event["actorTaskDefinitionEvent"]["requiredResources"] == {}
)
assert (
event["actorTaskDefinitionEvent"]["jobId"]
== driver_script_job_id
)
assert (
event["actorTaskDefinitionEvent"]["parentTaskId"]
== driver_task_id
)
assert event["actorTaskDefinitionEvent"]["taskAttempt"] == 0
assert event["actorTaskDefinitionEvent"]["language"] == "PYTHON"
else:
assert event["eventType"] == "TASK_LIFECYCLE_EVENT"
assert driver_task_definition_received
assert actor_creation_task_definition_received
assert actor_task_definition_received
expected_driver_task_states = {"RUNNING", "FINISHED"}
expected_actor_creation_task_states = {
"PENDING_ARGS_AVAIL",
"PENDING_NODE_ASSIGNMENT",
"RUNNING",
"FAILED",
}
expected_actor_task_states = {
"PENDING_ARGS_AVAIL",
"PENDING_NODE_ASSIGNMENT",
"FAILED",
}
expected_task_id_states_dict = {
(driver_task_id, 0): expected_driver_task_states,
(actor_creation_task_id, 0): expected_actor_creation_task_states,
(actor_task_id, 0): expected_actor_task_states,
}
if preserve_proto_field_name:
expected_task_id_error_info_dict = {
(actor_creation_task_id, 0): {
"error_type": "TASK_EXECUTION_EXCEPTION",
"error_message": "CreationTaskError: Exception raised from an actor init method.",
},
(actor_task_id, 0): {
"error_type": "ACTOR_DIED",
"error_message": "ray.exceptions.ActorDiedError: The actor died because of an error raised in its creation task",
},
}
else:
expected_task_id_error_info_dict = {
(actor_creation_task_id, 0): {
"errorType": "TASK_EXECUTION_EXCEPTION",
"errorMessage": "CreationTaskError: Exception raised from an actor init method.",
},
(actor_task_id, 0): {
"errorType": "ACTOR_DIED",
"errorMessage": "ray.exceptions.ActorDiedError: The actor died because of an error raised in its creation task",
},
}
check_task_lifecycle_event_states_and_error_info(
events,
expected_task_id_states_dict,
expected_task_id_error_info_dict,
preserve_proto_field_name,
)
run_driver_script_and_wait_for_events(
script, httpserver, ray_start_cluster_head_with_env_vars, validate_events
)
@_cluster_with_aggregator_target
def test_actor_creation_canceled(
self,
ray_start_cluster_head_with_env_vars,
httpserver,
preserve_proto_field_name,
):
script = textwrap.dedent(
"""
import ray
ray.init()
@ray.remote(num_cpus=2)
class Actor:
def __init__(self):
pass
def task(self):
pass
actor = Actor.remote()
ray.kill(actor)
"""
)
def validate_events(events: json, head_node_id):
(
driver_script_job_id,
driver_task_id,
) = get_job_id_and_driver_script_task_id_from_events(
events, preserve_proto_field_name
)
driver_task_definition_received = False
actor_creation_task_definition_received = False
for event in events:
if preserve_proto_field_name:
if event["event_type"] == "TASK_DEFINITION_EVENT":
check_task_event_base_fields(
event, preserve_proto_field_name, head_node_id
)
if event["task_definition_event"]["task_type"] == "DRIVER_TASK":
driver_task_definition_received = True
assert event["task_definition_event"]["task_attempt"] == 0
assert (
event["task_definition_event"]["language"] == "PYTHON"
)
else:
assert (
event["task_definition_event"]["task_type"]
== "ACTOR_CREATION_TASK"
)
actor_creation_task_definition_received = True
actor_creation_task_id = event["task_definition_event"][
"task_id"
]
assert actor_creation_task_id is not None
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["module_name"]
== "__main__"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["class_name"]
== "Actor"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["function_name"]
== "__init__"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["function_hash"]
is not None
)
assert (
event["task_definition_event"]["task_name"]
== "Actor.__init__"
)
assert event["task_definition_event"][
"required_resources"
] == {"CPU": 2.0}
assert (
event["task_definition_event"]["parent_task_id"]
== driver_task_id
)
assert (
event["task_definition_event"]["job_id"]
== driver_script_job_id
)
assert event["task_definition_event"]["task_attempt"] == 0
assert (
event["task_definition_event"]["language"] == "PYTHON"
)
else:
assert event["event_type"] == "TASK_LIFECYCLE_EVENT"
else:
if event["eventType"] == "TASK_DEFINITION_EVENT":
check_task_event_base_fields(
event, preserve_proto_field_name, head_node_id
)
if event["taskDefinitionEvent"]["taskType"] == "DRIVER_TASK":
driver_task_definition_received = True
assert event["taskDefinitionEvent"]["taskAttempt"] == 0
assert event["taskDefinitionEvent"]["language"] == "PYTHON"
else:
assert (
event["taskDefinitionEvent"]["taskType"]
== "ACTOR_CREATION_TASK"
)
actor_creation_task_definition_received = True
actor_creation_task_id = event["taskDefinitionEvent"][
"taskId"
]
assert actor_creation_task_id is not None
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["moduleName"]
== "__main__"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["className"]
== "Actor"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["functionName"]
== "__init__"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["functionHash"]
is not None
)
assert (
event["taskDefinitionEvent"]["taskName"]
== "Actor.__init__"
)
assert event["taskDefinitionEvent"][
"requiredResources"
] == {"CPU": 2.0}
assert (
event["taskDefinitionEvent"]["parentTaskId"]
== driver_task_id
)
assert (
event["taskDefinitionEvent"]["jobId"]
== driver_script_job_id
)
assert event["taskDefinitionEvent"]["taskAttempt"] == 0
assert event["taskDefinitionEvent"]["language"] == "PYTHON"
else:
assert event["eventType"] == "TASK_LIFECYCLE_EVENT"
assert driver_task_definition_received
assert actor_creation_task_definition_received
expected_driver_task_states = {"RUNNING", "FINISHED"}
expected_actor_creation_task_states = {
"PENDING_ARGS_AVAIL",
"PENDING_NODE_ASSIGNMENT",
"FAILED",
}
expected_task_id_states_dict = {
(driver_task_id, 0): expected_driver_task_states,
(actor_creation_task_id, 0): expected_actor_creation_task_states,
}
if preserve_proto_field_name:
expected_task_id_error_info_dict = {
(actor_creation_task_id, 0): {
"error_type": "WORKER_DIED",
"error_message": "",
}
}
else:
expected_task_id_error_info_dict = {
(actor_creation_task_id, 0): {
"errorType": "WORKER_DIED",
"errorMessage": "",
}
}
check_task_lifecycle_event_states_and_error_info(
events,
expected_task_id_states_dict,
expected_task_id_error_info_dict,
preserve_proto_field_name,
)
run_driver_script_and_wait_for_events(
script, httpserver, ray_start_cluster_head_with_env_vars, validate_events
)
@_cluster_with_aggregator_target
def test_actor_restart(
self,
ray_start_cluster_head_with_env_vars,
httpserver,
preserve_proto_field_name,
):
script = textwrap.dedent(
"""
import ray
import time
ray.init()
@ray.remote(num_cpus=2, max_restarts=-1, max_task_retries=-1)
class Actor:
def __init__(self):
pass
def actor_task(self):
pass
actor = Actor.remote()
time.sleep(999) # Keep the actor alive
"""
)
actor_creation_task_id = None
def validate_actor_creation(events: json, head_node_id):
nonlocal actor_creation_task_id
(
driver_script_job_id,
driver_task_id,
) = get_job_id_and_driver_script_task_id_from_events(
events, preserve_proto_field_name
)
driver_task_definition_received = False
actor_creation_task_definition_received = False
for event in events:
if preserve_proto_field_name:
if event["event_type"] == "TASK_DEFINITION_EVENT":
check_task_event_base_fields(
event, preserve_proto_field_name, head_node_id
)
if event["task_definition_event"]["task_type"] == "DRIVER_TASK":
driver_task_definition_received = True
assert event["task_definition_event"]["task_attempt"] == 0
assert (
event["task_definition_event"]["language"] == "PYTHON"
)
else:
assert (
event["task_definition_event"]["task_type"]
== "ACTOR_CREATION_TASK"
)
actor_creation_task_definition_received = True
actor_creation_task_id = event["task_definition_event"][
"task_id"
]
assert actor_creation_task_id is not None
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["module_name"]
== "__main__"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["class_name"]
== "Actor"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["function_name"]
== "__init__"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["function_hash"]
is not None
)
assert (
event["task_definition_event"]["task_name"]
== "Actor.__init__"
)
assert event["task_definition_event"][
"required_resources"
] == {"CPU": 2.0}
assert (
event["task_definition_event"]["parent_task_id"]
== driver_task_id
)
assert (
event["task_definition_event"]["job_id"]
== driver_script_job_id
)
assert event["task_definition_event"]["task_attempt"] == 0
assert (
event["task_definition_event"]["language"] == "PYTHON"
)
else:
assert event["event_type"] == "TASK_LIFECYCLE_EVENT"
else:
if event["eventType"] == "TASK_DEFINITION_EVENT":
check_task_event_base_fields(
event, preserve_proto_field_name, head_node_id
)
if event["taskDefinitionEvent"]["taskType"] == "DRIVER_TASK":
driver_task_definition_received = True
assert event["taskDefinitionEvent"]["taskAttempt"] == 0
assert event["taskDefinitionEvent"]["language"] == "PYTHON"
else:
assert (
event["taskDefinitionEvent"]["taskType"]
== "ACTOR_CREATION_TASK"
)
actor_creation_task_definition_received = True
actor_creation_task_id = event["taskDefinitionEvent"][
"taskId"
]
assert actor_creation_task_id is not None
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["moduleName"]
== "__main__"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["className"]
== "Actor"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["functionName"]
== "__init__"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["functionHash"]
is not None
)
assert (
event["taskDefinitionEvent"]["taskName"]
== "Actor.__init__"
)
assert event["taskDefinitionEvent"][
"requiredResources"
] == {"CPU": 2.0}
assert (
event["taskDefinitionEvent"]["jobId"]
== driver_script_job_id
)
assert (
event["taskDefinitionEvent"]["parentTaskId"]
== driver_task_id
)
assert event["taskDefinitionEvent"]["taskAttempt"] == 0
assert event["taskDefinitionEvent"]["language"] == "PYTHON"
else:
assert event["eventType"] == "TASK_LIFECYCLE_EVENT"
assert driver_task_definition_received
assert actor_creation_task_definition_received
expected_driver_task_states = {"RUNNING"}
expected_actor_creation_task_states = {
"PENDING_ARGS_AVAIL",
"PENDING_NODE_ASSIGNMENT",
"RUNNING",
"FINISHED",
}
expected_task_id_states_dict = {
(driver_task_id, 0): expected_driver_task_states,
(actor_creation_task_id, 0): expected_actor_creation_task_states,
}
expected_task_id_error_info_dict = {}
check_task_lifecycle_event_states_and_error_info(
events,
expected_task_id_states_dict,
expected_task_id_error_info_dict,
preserve_proto_field_name,
)
def validate_actor_restart(events: json):
nonlocal actor_creation_task_id
# Check the actor creation task running state with attempt number 1
expected_actor_retry_task_states = {
"RUNNING",
}
expected_task_id_states_dict = {
(actor_creation_task_id, 1): expected_actor_retry_task_states,
}
check_task_lifecycle_event_states_and_error_info(
events,
expected_task_id_states_dict,
{},
preserve_proto_field_name,
)
# Add a node to the cluster and wait for it to be registered
cluster = ray_start_cluster_head_with_env_vars
node = cluster.add_node(num_cpus=2)
# Run the driver script for the actor to be created and actor task to be executed
run_driver_script_and_wait_for_events(
script,
httpserver,
ray_start_cluster_head_with_env_vars,
validate_actor_creation,
)
# Add a second node to the cluster for the actor to be restarted on
cluster.add_node(num_cpus=2)
# Kill the first node
cluster.remove_node(node)
# Wait for the actor to be restarted on the second node
wait_for_condition(
lambda: get_and_validate_events(httpserver, validate_actor_restart),
)
if __name__ == "__main__":
pytest.main(["-vv", __file__])
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_ray_event_export_task_events.py",
"license": "Apache License 2.0",
"lines": 1853,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/llm/doc_code/serve/transcription/transcription_example.py | """
This file serves as a documentation example and CI test.
Structure:
1. Monkeypatch setup: Ensures serve.run is non-blocking and removes accelerator requirements for CI testing.
2. Docs example (between __transcription_example_start/end__): Embedded in Sphinx docs via literalinclude.
3. Test validation (deployment status polling + cleanup)
"""
import time
import openai
import requests
from ray import serve
from ray.serve.schema import ApplicationStatus
from ray.serve._private.constants import SERVE_DEFAULT_APP_NAME
from ray.serve import llm
_original_serve_run = serve.run
_original_build_openai_app = llm.build_openai_app
def _non_blocking_serve_run(app, **kwargs):
"""Forces blocking=False for testing"""
kwargs["blocking"] = False
return _original_serve_run(app, **kwargs)
def _testing_build_openai_app(llm_serving_args):
"""Removes accelerator requirements for testing"""
for config in llm_serving_args["llm_configs"]:
config.accelerator_type = None
return _original_build_openai_app(llm_serving_args)
serve.run = _non_blocking_serve_run
llm.build_openai_app = _testing_build_openai_app
# __transcription_example_start__
from ray import serve
from ray.serve.llm import LLMConfig, build_openai_app
llm_config = LLMConfig(
model_loading_config={
"model_id": "whisper-small",
"model_source": "openai/whisper-small",
},
deployment_config={
"autoscaling_config": {
"min_replicas": 1,
"max_replicas": 4,
}
},
accelerator_type="A10G",
log_engine_metrics=True,
)
app = build_openai_app({"llm_configs": [llm_config]})
serve.run(app, blocking=True)
# __transcription_example_end__
status = ApplicationStatus.NOT_STARTED
timeout_seconds = 300
start_time = time.time()
while (
status != ApplicationStatus.RUNNING and time.time() - start_time < timeout_seconds
):
status = serve.status().applications[SERVE_DEFAULT_APP_NAME].status
if status in [ApplicationStatus.DEPLOY_FAILED, ApplicationStatus.UNHEALTHY]:
raise AssertionError(f"Deployment failed with status: {status}")
time.sleep(1)
if status != ApplicationStatus.RUNNING:
raise AssertionError(
f"Deployment failed to reach RUNNING status within {timeout_seconds}s. Current status: {status}"
)
response = requests.get("https://voiceage.com/wbsamples/in_stereo/Sports.wav")
with open("audio.wav", "wb") as f:
f.write(response.content)
client = openai.OpenAI(base_url="http://localhost:8000/v1", api_key="fake-key")
with open("audio.wav", "rb") as f:
try:
response = client.audio.transcriptions.create(
model="whisper-small",
file=f,
temperature=0.0,
language="en",
)
except Exception as e:
raise AssertionError(
f"Error while querying models: {e}. Check the logs for more details."
)
serve.shutdown()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/llm/doc_code/serve/transcription/transcription_example.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/serve/tests/test_config_files/use_custom_autoscaling_policy.py | from ray import serve
from ray.serve.config import AutoscalingContext
def custom_autoscaling_policy(ctx: AutoscalingContext):
print("custom_autoscaling_policy")
return 2, {}
@serve.deployment
class CustomAutoscalingPolicy:
def __call__(self):
return "hello_from_custom_autoscaling_policy"
app = CustomAutoscalingPolicy.bind()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_config_files/use_custom_autoscaling_policy.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/tests/test_config_files/use_custom_request_router.py | import random
from typing import (
List,
Optional,
)
from ray import serve
from ray.serve.context import _get_internal_replica_context
from ray.serve.request_router import (
PendingRequest,
ReplicaID,
ReplicaResult,
RequestRouter,
RunningReplica,
)
class UniformRequestRouter(RequestRouter):
async def choose_replicas(
self,
candidate_replicas: List[RunningReplica],
pending_request: Optional[PendingRequest] = None,
) -> List[List[RunningReplica]]:
print("UniformRequestRouter routing request")
index = random.randint(0, len(candidate_replicas) - 1)
return [[candidate_replicas[index]]]
def on_request_routed(
self,
pending_request: PendingRequest,
replica_id: ReplicaID,
result: ReplicaResult,
):
print("on_request_routed callback is called!!")
@serve.deployment
class UniformRequestRouterApp:
def __init__(self):
context = _get_internal_replica_context()
self.replica_id: ReplicaID = context.replica_id
async def __call__(self):
return "hello_from_custom_request_router"
app = UniformRequestRouterApp.bind()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_config_files/use_custom_request_router.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/_internal/serve/engines/vllm/kv_transfer/factory.py | """Factory for lazy-loading KV connector backends.
This module provides a factory pattern for registering and instantiating
KV connector backends without eagerly importing all implementations.
This avoids circular import issues and improves startup performance.
"""
from typing import TYPE_CHECKING, Type, Union
from ray.llm._internal.serve.engines.vllm.kv_transfer.base import (
BaseConnectorBackend,
)
from ray.llm._internal.serve.observability.logging import get_logger
from ray.llm._internal.serve.utils.registry import get_registry
if TYPE_CHECKING:
from ray.llm._internal.serve.core.configs.llm_config import LLMConfig
logger = get_logger(__name__)
# Get the registry instance for KV connector backends
_kv_backend_registry = get_registry("kv_connector_backend")
class KVConnectorBackendFactory:
"""Factory for creating KV connector backend instances with lazy loading."""
@classmethod
def register_backend(
cls,
name: str,
backend_class_or_path: Union[Type["BaseConnectorBackend"], str],
) -> None:
"""Register a connector backend.
This enables the backend to be accessed on every Ray process in the cluster.
Args:
name: The name of the connector (e.g., "LMCacheConnectorV1")
backend_class_or_path: Either:
- The backend class object directly (preferred), or
- A string in the format "module_path:class_name" for lazy loading
Examples:
# Register with class directly (recommended):
KVConnectorBackendFactory.register_backend("MyConnector", MyConnectorClass)
# Register with module path string (for lazy loading):
KVConnectorBackendFactory.register_backend("MyConnector", "my.module:MyClass")
"""
_kv_backend_registry.register(name, backend_class_or_path)
@classmethod
def get_backend_class(cls, name: str) -> Type["BaseConnectorBackend"]:
"""Get the connector backend class by name.
For registered connectors, returns the registered backend class.
For unregistered connectors, returns BaseConnectorBackend which has
a no-op setup() method, allowing connectors that don't require
Ray Serve orchestration to work without registration.
Args:
name: The name of the connector backend
Returns:
The connector backend class
Raises:
ImportError: If a registered backend fails to load
"""
try:
return _kv_backend_registry.get(name)
except ValueError:
logger.warning(
f"Unsupported connector backend: {name}. "
f"Using default: {BaseConnectorBackend.__name__}."
)
return BaseConnectorBackend
except Exception as e:
raise ImportError(
f"Failed to load connector backend '{name}': {type(e).__name__}: {e}"
) from e
@classmethod
def create_backend(
cls, name: str, llm_config: "LLMConfig"
) -> "BaseConnectorBackend":
"""Create a connector backend instance.
Args:
name: The name of the connector backend
llm_config: The LLM configuration
Returns:
An instance of the connector backend
"""
return cls.get_backend_class(name)(llm_config)
@classmethod
def is_registered(cls, name: str) -> bool:
"""Check if a connector backend is registered."""
return _kv_backend_registry.contains(name)
@classmethod
def unregister_backend(cls, name: str) -> None:
"""Unregister a connector backend.
Removes the backend from the registry across all Ray processes.
Args:
name: The name of the connector backend to unregister
"""
_kv_backend_registry.unregister(name)
BUILTIN_BACKENDS = {
"LMCacheConnectorV1": "ray.llm._internal.serve.engines.vllm.kv_transfer.lmcache:LMCacheConnectorV1Backend",
"NixlConnector": "ray.llm._internal.serve.engines.vllm.kv_transfer.nixl:NixlConnectorBackend",
"MultiConnector": "ray.llm._internal.serve.engines.vllm.kv_transfer.multi_connector:MultiConnectorBackend",
}
def _initialize_registry() -> None:
"""Initialize the registry with built-in backends.
This function is called when the module is imported to ensure
built-in backends are registered.
"""
for name, backend_path in BUILTIN_BACKENDS.items():
if not KVConnectorBackendFactory.is_registered(name):
KVConnectorBackendFactory.register_backend(name, backend_path)
# Initialize registry when module is imported
_initialize_registry()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/serve/engines/vllm/kv_transfer/factory.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/llm/_internal/serve/engines/vllm/kv_transfer/multi_connector.py | import copy
from typing import TYPE_CHECKING
from ray.llm._internal.serve.engines.vllm.kv_transfer.base import (
BaseConnectorBackend,
)
from ray.llm._internal.serve.engines.vllm.kv_transfer.factory import (
KVConnectorBackendFactory,
)
if TYPE_CHECKING:
from ray.llm._internal.serve.core.configs.llm_config import LLMConfig
class MultiConnectorBackend(BaseConnectorBackend):
def __init__(self, llm_config: "LLMConfig"):
super().__init__(llm_config)
def setup(self) -> None:
"""Setup all connectors listed in the kv_transfer_config."""
kv_transfer_config = self.kv_transfer_config
connectors = kv_transfer_config.get("kv_connector_extra_config", {}).get(
"connectors", []
)
for connector in connectors:
connector_backend_str = connector.get("kv_connector")
if connector_backend_str is None:
raise ValueError("kv_connector is not set in the connector")
if connector_backend_str == "MultiConnector":
raise ValueError(
"Nesting MultiConnector within MultiConnector is not supported."
)
# Merge parent config with connector-specific config
sub_llm_config = copy.deepcopy(self.llm_config)
sub_llm_config.engine_kwargs["kv_transfer_config"] = {
**{
k: v
for k, v in kv_transfer_config.items()
if k != "kv_connector_extra_config"
},
**connector,
}
# Use factory to get backend class lazily
connector_backend = KVConnectorBackendFactory.create_backend(
connector_backend_str, sub_llm_config
)
connector_backend.setup()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/serve/engines/vllm/kv_transfer/multi_connector.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/llm/vllm/kv_transfer_backends/test_factory.py | import sys
from contextlib import contextmanager
from typing import Any
import pytest
from ray import serve
from ray.llm._internal.serve.engines.vllm.kv_transfer.base import (
BaseConnectorBackend,
)
from ray.llm._internal.serve.engines.vllm.kv_transfer.factory import (
KVConnectorBackendFactory,
)
from ray.serve.llm import LLMConfig
@contextmanager
def registered_backend(name: str, backend_class_or_path: Any):
KVConnectorBackendFactory.register_backend(name, backend_class_or_path)
try:
yield
finally:
if KVConnectorBackendFactory.is_registered(name):
KVConnectorBackendFactory.unregister_backend(name)
@pytest.fixture
def test_deployment_handle():
"""Fixture that creates a Serve deployment for testing cross-process registry access."""
# This ensures proper serialization when sent to child processes
class TestCrossProcessConnector(BaseConnectorBackend):
def setup(self):
pass
# Register the backend in the driver process and ensure cleanup
with registered_backend("TestCrossProcessConnector", TestCrossProcessConnector):
# Create a Serve deployment that will run in a different process than the
# driver process
@serve.deployment
class TestDeployment:
def __init__(self):
# This runs in a child process - should be able to access the registered backend
self.connector_class = KVConnectorBackendFactory.get_backend_class(
"TestCrossProcessConnector"
)
def __call__(self):
"""Return the connector class to verify it's correct."""
return self.connector_class
# Deploy and yield the handle and connector class
app = TestDeployment.bind()
handle = serve.run(app)
try:
yield handle, TestCrossProcessConnector
finally:
try:
serve.shutdown()
except RuntimeError:
# Handle case where event loop is already closed
pass
class TestKVConnectorBackendFactory:
"""Test suite for KVConnectorBackendFactory."""
def test_get_backend_class_success(self):
"""Test successful retrieval of a registered backend class."""
backend_class = KVConnectorBackendFactory.get_backend_class(
"LMCacheConnectorV1"
)
assert backend_class is not None
assert hasattr(backend_class, "setup")
def test_get_backend_class_not_registered_returns_base(self):
"""Test that getting a non-registered backend returns BaseConnectorBackend."""
backend_class = KVConnectorBackendFactory.get_backend_class(
"UnregisteredConnector"
)
assert backend_class == BaseConnectorBackend
assert issubclass(backend_class, BaseConnectorBackend)
def test_create_backend_success(self):
"""Test successful creation of a backend instance."""
llm_config = LLMConfig(
model_loading_config=dict(model_id="test-model"),
engine_kwargs=dict(
kv_transfer_config=dict(
kv_connector="LMCacheConnectorV1",
kv_role="kv_both",
)
),
)
backend = KVConnectorBackendFactory.create_backend(
"LMCacheConnectorV1", llm_config
)
assert isinstance(backend, BaseConnectorBackend)
assert backend.llm_config == llm_config
@pytest.mark.parametrize(
"connector_name",
["LMCacheConnectorV1", "NixlConnector", "MultiConnector"],
)
def test_all_registered_backends_can_be_loaded(self, connector_name):
"""Test that all pre-registered backends can be loaded."""
backend_class = KVConnectorBackendFactory.get_backend_class(connector_name)
assert backend_class is not None
assert issubclass(backend_class, BaseConnectorBackend)
def test_get_backend_class_import_error_handling(self):
"""Test that ImportError during backend loading is handled with clear message."""
# Register a backend with a non-existent module path
with registered_backend("BadBackend", "non.existent.module:NonExistentClass"):
with pytest.raises(
ImportError, match="Failed to load connector backend 'BadBackend'"
):
KVConnectorBackendFactory.get_backend_class("BadBackend")
def test_register_backend_with_class_directly(self):
"""Test registering a backend class directly."""
class CustomBackend(BaseConnectorBackend):
def setup(self):
pass
with registered_backend("CustomBackend", CustomBackend):
assert KVConnectorBackendFactory.is_registered("CustomBackend")
retrieved = KVConnectorBackendFactory.get_backend_class("CustomBackend")
assert retrieved == CustomBackend
def test_register_backend_with_module_path(self):
"""Test registering a backend via module path string."""
# Register using module:class format
with registered_backend(
"LMCacheViaPath",
"ray.llm._internal.serve.engines.vllm.kv_transfer.lmcache:LMCacheConnectorV1Backend",
):
assert KVConnectorBackendFactory.is_registered("LMCacheViaPath")
backend_class = KVConnectorBackendFactory.get_backend_class(
"LMCacheViaPath"
)
assert backend_class is not None
assert issubclass(backend_class, BaseConnectorBackend)
def test_unregistered_connector_with_llm_config_setup(self):
"""Test that unregistered connectors work with LLMConfig.setup_engine_backend()."""
llm_config = LLMConfig(
model_loading_config=dict(model_id="test-model"),
engine_kwargs=dict(
kv_transfer_config=dict(
kv_connector="SharedStorageConnector",
kv_role="kv_both",
)
),
)
# Should not raise an error
llm_config.setup_engine_backend()
@pytest.mark.asyncio
async def test_cross_process_registry_access(self, test_deployment_handle):
"""Test that registrations made in driver are accessible in Ray Serve child processes."""
handle, TestCrossProcessConnector = test_deployment_handle
# Verify it's registered in driver
assert KVConnectorBackendFactory.is_registered("TestCrossProcessConnector")
result = await handle.remote()
# Verify it's the correct class
assert result == TestCrossProcessConnector
assert issubclass(result, BaseConnectorBackend)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/serve/cpu/deployments/llm/vllm/kv_transfer_backends/test_factory.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/llm/vllm/kv_transfer_backends/test_multi_connector.py | import sys
from unittest.mock import MagicMock, patch
import pytest
from ray.llm._internal.serve.engines.vllm.kv_transfer.base import (
BaseConnectorBackend,
)
from ray.llm._internal.serve.engines.vllm.kv_transfer.factory import (
KVConnectorBackendFactory,
)
from ray.llm._internal.serve.engines.vllm.kv_transfer.multi_connector import (
MultiConnectorBackend,
)
from ray.serve.llm import LLMConfig
class TestMultiConnectorBackend:
"""Test suite for MultiConnectorBackend."""
@pytest.fixture
def basic_llm_config(self):
"""Fixture for basic LLM config with MultiConnector."""
return LLMConfig(
model_loading_config=dict(model_id="test-model"),
engine_kwargs=dict(
kv_transfer_config=dict(
kv_connector="MultiConnector",
kv_connector_extra_config=dict(
connectors=[
{"kv_connector": "LMCacheConnectorV1"},
{"kv_connector": "NixlConnector"},
]
),
)
),
)
@pytest.fixture
def multi_backend(self, basic_llm_config):
"""Fixture for MultiConnectorBackend."""
return MultiConnectorBackend(basic_llm_config)
def test_multi_connector_initialization(self, multi_backend):
"""Test that MultiConnectorBackend can be initialized."""
assert isinstance(multi_backend, MultiConnectorBackend)
assert isinstance(multi_backend, BaseConnectorBackend)
def test_setup_calls_all_connectors(self, multi_backend):
"""Test that setup calls setup on all configured connectors."""
mock_backend1 = MagicMock(spec=BaseConnectorBackend)
mock_backend2 = MagicMock(spec=BaseConnectorBackend)
with patch.object(
KVConnectorBackendFactory,
"create_backend",
side_effect=[mock_backend1, mock_backend2],
) as mock_create:
multi_backend.setup()
assert mock_create.call_count == 2
mock_backend1.setup.assert_called_once()
mock_backend2.setup.assert_called_once()
def test_setup_raises_error_when_connector_missing_kv_connector(self):
"""Test that setup raises ValueError when a connector is missing kv_connector."""
llm_config = LLMConfig(
model_loading_config=dict(model_id="test-model"),
engine_kwargs=dict(
kv_transfer_config=dict(
kv_connector="MultiConnector",
kv_connector_extra_config=dict(
connectors=[
{"some_other_key": "value"},
]
),
)
),
)
backend = MultiConnectorBackend(llm_config)
with pytest.raises(ValueError, match="kv_connector is not set"):
backend.setup()
def test_setup_with_nested_multi_connector_raises_error(self):
"""Test that nesting MultiConnector raises a ValueError."""
llm_config = LLMConfig(
model_loading_config=dict(model_id="test-model"),
engine_kwargs=dict(
kv_transfer_config=dict(
kv_connector="MultiConnector",
kv_connector_extra_config=dict(
connectors=[
{"kv_connector": "MultiConnector"},
]
),
)
),
)
backend = MultiConnectorBackend(llm_config)
with pytest.raises(ValueError, match="Nesting MultiConnector"):
backend.setup()
def test_setup_passes_isolated_config_to_sub_connectors(self):
"""Test that sub-connectors inherit parent config and receive their specific settings."""
llm_config = LLMConfig(
model_loading_config=dict(model_id="test-model"),
engine_kwargs=dict(
kv_transfer_config=dict(
kv_connector="MultiConnector",
engine_id="test-engine-123",
kv_role="kv_both",
kv_connector_extra_config=dict(
connectors=[
{
"kv_connector": "LMCacheConnectorV1",
"custom_param": "value1",
},
{"kv_connector": "NixlConnector", "custom_param": "value2"},
]
),
)
),
)
captured_configs = []
def capture_config(name, config):
captured_configs.append((name, config.engine_kwargs["kv_transfer_config"]))
return MagicMock(spec=BaseConnectorBackend)
with patch.object(
KVConnectorBackendFactory, "create_backend", side_effect=capture_config
):
MultiConnectorBackend(llm_config).setup()
assert len(captured_configs) == 2
# Verify each connector gets: inherited parent fields + its own specific config
expected_configs = [
(
"LMCacheConnectorV1",
{"kv_connector": "LMCacheConnectorV1", "custom_param": "value1"},
),
(
"NixlConnector",
{"kv_connector": "NixlConnector", "custom_param": "value2"},
),
]
for (actual_name, actual_config), (expected_name, expected_specific) in zip(
captured_configs, expected_configs
):
assert actual_name == expected_name
# Check inherited parent fields
assert actual_config["engine_id"] == "test-engine-123"
assert actual_config["kv_role"] == "kv_both"
# Check connector-specific fields
for key, value in expected_specific.items():
assert actual_config[key] == value
# Verify kv_connector_extra_config is not passed to sub-connectors
assert "kv_connector_extra_config" not in actual_config
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/serve/cpu/deployments/llm/vllm/kv_transfer_backends/test_multi_connector.py",
"license": "Apache License 2.0",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/collections.py | from typing import Dict, TypeVar
K = TypeVar("K")
def collapse_transitive_map(d: Dict[K, K]) -> Dict[K, K]:
"""Collapse transitive mappings in a dictionary. Given a mapping like
{a: b, b: c, c: d}, returns {a: d}, removing intermediate b -> c, c -> d.
Only keeps mappings where the key is NOT a value in another mapping (i.e., chain starting points).
Args:
d: Dictionary representing a mapping
Returns:
Dictionary with all transitive mappings collapsed, keeping only KV-pairs,
such that K and V are starting and terminal point of a chain
Examples:
>>> collapse_transitive_map({"a": "b", "b": "c", "c": "d"})
{'a': 'd'}
>>> collapse_transitive_map({"a": "b", "x": "y"})
{'a': 'b', 'x': 'y'}
"""
if not d:
return {}
collapsed = {}
values_set = set(d.values())
for k in d:
# Skip mappings that are in the value-set, meaning that they are
# part of the mapping chain (for ex, {a -> b, b -> c})
if k in values_set:
continue
cur = k
visited = {cur}
# Follow the chain until we reach a key that's not in the mapping
while cur in d:
next = d[cur]
if next in visited:
raise ValueError(f"Detected a cycle in the mapping {d}")
visited.add(next)
cur = next
collapsed[k] = cur
return collapsed
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/collections.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.