sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
ranaroussi/yfinance:tests/test_calendars.py | from datetime import datetime, timedelta, timezone
import unittest
import pandas as pd
from tests.context import yfinance as yf, session_gbl
class TestCalendars(unittest.TestCase):
def setUp(self):
self.calendars = yf.Calendars(session=session_gbl)
def test_get_earnings_calendar(self):
result = self.calendars.get_earnings_calendar(limit=1)
tickers = self.calendars.earnings_calendar.index.tolist()
self.assertIsInstance(result, pd.DataFrame)
self.assertEqual(len(result), 1)
self.assertIsInstance(tickers, list)
self.assertEqual(len(tickers), len(result))
self.assertEqual(tickers, result.index.tolist())
first_ticker = result.index.tolist()[0]
result_first_ticker = self.calendars.earnings_calendar.loc[first_ticker].name
self.assertEqual(first_ticker, result_first_ticker)
def test_get_earnings_calendar_init_params(self):
result = self.calendars.get_earnings_calendar(limit=5)
self.assertGreaterEqual(result['Event Start Date'].iloc[0], pd.to_datetime(datetime.now(tz=timezone.utc)))
start = datetime.now(tz=timezone.utc) - timedelta(days=7)
result = yf.Calendars(start=start).get_earnings_calendar(limit=5)
self.assertGreaterEqual(result['Event Start Date'].iloc[0], pd.to_datetime(start))
def test_get_ipo_info_calendar(self):
result = self.calendars.get_ipo_info_calendar(limit=5)
self.assertIsInstance(result, pd.DataFrame)
self.assertEqual(len(result), 5)
def test_get_economic_events_calendar(self):
result = self.calendars.get_economic_events_calendar(limit=5)
self.assertIsInstance(result, pd.DataFrame)
self.assertEqual(len(result), 5)
def test_get_splits_calendar(self):
result = self.calendars.get_splits_calendar(limit=5)
self.assertIsInstance(result, pd.DataFrame)
self.assertEqual(len(result), 5)
if __name__ == "__main__":
unittest.main() | {
"repo_id": "ranaroussi/yfinance",
"file_path": "tests/test_calendars.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ranaroussi/yfinance:yfinance/calendars.py | from __future__ import annotations # Just in case
import json
from typing import Any, Optional, List, Union, Dict
import warnings
import numpy as np
from requests import Session, Response, exceptions
import pandas as pd
from datetime import datetime, date, timedelta
from .const import _QUERY1_URL_
from .utils import log_indent_decorator, get_yf_logger, _parse_user_dt
from .screener import screen
from .data import YfData
from .exceptions import YFException
class CalendarQuery:
"""
Simple CalendarQuery class for calendar queries, similar to yf.screener.query.QueryBase.
Simple operand accepted by YF is of the form:
`{ "operator": operator, "operands": [field, ...values] }`
Nested operand accepted by YF:
`{ "operator": operator, "operands": [ ...CalendarQuery ] }`
### Simple example:
```python
op = CalendarQuery('eq', ['ticker', 'AAPL'])
print(op.to_dict())
```
"""
def __init__(self, operator: str, operand: Union[List[Any], List["CalendarQuery"]]):
"""
:param operator: Operator string, e.g., 'eq', 'gte', 'and', 'or'.
:param operand: List of operands: can be values (str, int), or other Operands instances (nested).
"""
operator = operator.upper()
self.operator = operator
self.operands = operand
def append(self, operand: Any) -> None:
"""
Append an operand to the operands list.
:param operand: CalendarQuery to append (can be value or CalendarQuery instance).
"""
self.operands.append(operand)
@property
def is_empty(self) -> bool:
"""
Check if the operands list is empty.
:return: True if operands list is empty, False otherwise.
"""
return len(self.operands) == 0
def to_dict(self) -> dict:
"""
Query-ready dict for YF.
Simple operand accepted by YF is of the form:
`{ "operator": operator, "operands": [field, ...values] }`
Nested operand accepted by YF:
`{ "operator": operator, "operands": [ ...CalendarQuery ] }`
"""
op = self.operator
ops = self.operands
return {
"operator": op,
"operands": [o.to_dict() if isinstance(o, CalendarQuery) else o for o in ops],
}
_CALENDAR_URL_ = f"{_QUERY1_URL_}/v1/finance/visualization"
DATE_STR_FORMAT = "%Y-%m-%d"
PREDEFINED_CALENDARS = {
"sp_earnings": {
"sortField": "intradaymarketcap",
"includeFields": [
"ticker",
"companyshortname",
"intradaymarketcap",
"eventname",
"startdatetime",
"startdatetimetype",
"epsestimate",
"epsactual",
"epssurprisepct",
],
"nan_cols": ["Surprise (%)", "EPS Estimate", "Reported EPS"],
"datetime_cols": ["Event Start Date"],
"df_index": "Symbol",
"renames": {
"Surprise (%)": "Surprise(%)",
"Company Name": "Company",
"Market Cap (Intraday)": "Marketcap",
},
},
"ipo_info": {
"sortField": "startdatetime",
"includeFields": [
"ticker",
"companyshortname",
"exchange_short_name",
"filingdate",
"startdatetime",
"amendeddate",
"pricefrom",
"priceto",
"offerprice",
"currencyname",
"shares",
"dealtype",
],
"nan_cols": ["Price From", "Price To", "Price", "Shares"],
"datetime_cols": ["Filing Date", "Date", "Amended Date"],
"df_index": "Symbol",
"renames": {
"Exchange Short Name": "Exchange",
},
},
"economic_event": {
"sortField": "startdatetime",
"includeFields": [
"econ_release",
"country_code",
"startdatetime",
"period",
"after_release_actual",
"consensus_estimate",
"prior_release_actual",
"originally_reported_actual",
],
"nan_cols": ["Actual", "Market Expectation", "Prior to This", "Revised from"],
"datetime_cols": ["Event Time"],
"df_index": "Event",
"renames": {
"Country Code": "Region",
"Market Expectation": "Expected",
"Prior to This": "Last",
"Revised from": "Revised",
},
},
"splits": {
"sortField": "startdatetime",
"includeFields": [
"ticker",
"companyshortname",
"startdatetime",
"optionable",
"old_share_worth",
"share_worth",
],
"nan_cols": [],
"datetime_cols": ["Payable On"],
"df_index": "Symbol",
"renames": {
"Optionable?": "Optionable",
},
},
}
class Calendars:
"""
Get economic calendars, for example, Earnings, IPO, Economic Events, Splits
### Simple example default params:
```python
import yfinance as yf
calendars = yf.Calendars()
earnings_calendar = calendars.get_earnings_calendar(limit=50)
print(earnings_calendar)
```"""
def __init__(
self,
start: Optional[Union[str, datetime, date]] = None,
end: Optional[Union[str, datetime, date]] = None,
session: Optional[Session] = None,
):
"""
:param str | datetime | date start: start date (default today) \
eg. start="2025-11-08"
:param str | datetime | date end: end date (default `start + 7 days`) \
eg. end="2025-11-08"
:param session: requests.Session object, optional
"""
self._logger = get_yf_logger()
self.session = session or Session()
self._data: YfData = YfData(session=session)
_start = self._parse_date_param(start)
_end = self._parse_date_param(end)
self._start = _start or datetime.now().strftime(DATE_STR_FORMAT)
self._end = _end or (datetime.strptime(self._start, DATE_STR_FORMAT) + timedelta(days=7)).strftime(DATE_STR_FORMAT)
if not start and end:
self._logger.debug(f"Incomplete boundary: did not provide `start`, using today {self._start=} to {self._end=}")
elif start and not end:
self._logger.debug(f"Incomplete boundary: did not provide `end`, using {self._start=} to {self._end=}: +7 days from self._start")
self._most_active_qy: CalendarQuery = CalendarQuery("or", [])
self._cache_request_body = {}
self.calendars: Dict[str, pd.DataFrame] = {}
def _parse_date_param(self, _date: Optional[Union[str, datetime, date, int]]) -> str:
if not _date:
return ""
else:
return _parse_user_dt(_date).strftime(DATE_STR_FORMAT)
def _get_data(
self, calendar_type: str, query: CalendarQuery, limit=12, offset=0, force=False
) -> pd.DataFrame:
if calendar_type not in PREDEFINED_CALENDARS:
raise YFException(f"Unknown calendar type: {calendar_type}")
params = {"lang": "en-US", "region": "US"}
body = {
"sortType": "DESC",
"entityIdType": calendar_type,
"sortField": PREDEFINED_CALENDARS[calendar_type]["sortField"],
"includeFields": PREDEFINED_CALENDARS[calendar_type]["includeFields"],
"size": min(limit, 100), # YF caps at 100, don't go higher
"offset": offset,
"query": query.to_dict(),
}
if self._cache_request_body.get(calendar_type, None) and not force:
cache_body = self._cache_request_body[calendar_type]
if cache_body == body and calendar_type in self.calendars:
# Uses cache if force=False and new request has same body as previous
self._logger.debug(f"Getting {calendar_type=} from local cache")
return self.calendars[calendar_type]
self._cache_request_body[calendar_type] = body
self._logger.debug(f"Fetching {calendar_type=} with {limit=}")
response: Response = self._data.post(_CALENDAR_URL_, params=params, body=body)
try:
json_data = response.json()
except json.JSONDecodeError:
self._logger.error(f"{calendar_type}: Failed to retrieve calendar.")
json_data = {}
# Error returned
if json_data.get("finance", {}).get("error", {}):
raise YFException(json_data.get("finance", {}).get("error", {}))
self.calendars[calendar_type] = self._create_df(json_data)
return self._cleanup_df(calendar_type)
def _create_df(self, json_data: dict) -> pd.DataFrame:
columns = []
for col in json_data["finance"]["result"][0]["documents"][0]["columns"]:
columns.append(col["label"])
if col["label"] == "Event Start Date" and col["type"] == "STRING":
# Rename duplicate columns Event Start Date
columns[-1] = "Timing"
rows = json_data["finance"]["result"][0]["documents"][0]["rows"]
return pd.DataFrame(rows, columns=columns)
def _cleanup_df(self, calendar_type: str) -> pd.DataFrame:
predef_cal: dict = PREDEFINED_CALENDARS[calendar_type]
df: pd.DataFrame = self.calendars[calendar_type]
if df.empty:
return df
# Convert types
nan_cols: list = predef_cal["nan_cols"]
if nan_cols:
df[nan_cols] = df[nan_cols].astype("float64").replace(0.0, np.nan)
# Format the dataframe
df.set_index(predef_cal["df_index"], inplace=True)
for rename_from, rename_to in predef_cal["renames"].items():
df.rename(columns={rename_from: rename_to}, inplace=True)
for datetime_col in predef_cal["datetime_cols"]:
df[datetime_col] = pd.to_datetime(df[datetime_col])
return df
@log_indent_decorator
def _get_most_active_operands(
self, _market_cap: Optional[float], force=False
) -> CalendarQuery:
"""
Retrieve tickers from YF, converts them into operands accepted by YF.
Saves the operands in self._most_active_qy.
Will not re-query if already populated.
Used for earnings calendar optional filter.
:param force: if True, will re-query even if operands already exist
:return: list of operands for active traded stocks
"""
if not self._most_active_qy.is_empty and not force:
return self._most_active_qy
self._logger.debug("Fetching 200 most_active for earnings calendar")
try:
json_raw: dict = screen(query="MOST_ACTIVES", count=200)
except exceptions.HTTPError:
self._logger.error("Failed to retrieve most active stocks.")
return self._most_active_qy
raw = json_raw.get("quotes", [{}])
self._most_active_qy = CalendarQuery("or", [])
for stock in raw:
if type(stock) is not dict:
continue
ticker = stock.get("symbol", "")
t_market_cap = stock.get("marketCap", 0)
# We filter market_cap here because we want to keep self._most_active_qy consistent
if ticker and (_market_cap is None or t_market_cap >= _market_cap):
self._most_active_qy.append(CalendarQuery("eq", ["ticker", ticker]))
return self._most_active_qy
def _get_startdatetime_operators(self, start=None, end=None) -> CalendarQuery:
"""
Get startdatetime operands for start/end dates.
If no dates passed, defaults to internal date set on initialization.
"""
_start = self._parse_date_param(start)
_end = self._parse_date_param(end)
if (start and not end) or (end and not start):
warnings.warn(
"When prividing custom `start` and `end` parameters, you may want to specify both, to avoid unexpected behaviour.",
UserWarning,
stacklevel=2,
)
return CalendarQuery(
"and",
[
CalendarQuery("gte", ["startdatetime", _start or self._start]),
CalendarQuery("lte", ["startdatetime", _end or self._end]),
],
)
### Manual getter functions:
@log_indent_decorator
def get_earnings_calendar(
self,
market_cap: Optional[float] = None,
filter_most_active: bool = True,
start=None,
end=None,
limit=12,
offset=0,
force=False,
) -> pd.DataFrame:
"""
Retrieve earnings calendar from YF as a DataFrame.
Will re-query every time it is called, overwriting previous data.
:param market_cap: market cap cutoff in USD, default None
:param filter_most_active: will filter for actively traded stocks (default True)
:param str | datetime | date start: overwrite start date (default set by __init__) \
eg. start="2025-11-08"
:param str | datetime | date end: overwrite end date (default set by __init__) \
eg. end="2025-11-08"
:param limit: maximum number of results to return (YF caps at 100)
:param offset: offsets the results for pagination. YF default 0
:param force: if True, will re-query even if cache already exists
:return: DataFrame with earnings calendar
"""
_start = self._parse_date_param(start)
_end = self._parse_date_param(end)
if (start and not end) or (end and not start):
warnings.warn(
"When prividing custom `start` and `end` parameters, you may want to specify both, to avoid unexpected behaviour.",
UserWarning,
stacklevel=2,
)
query = CalendarQuery(
"and",
[
CalendarQuery("eq", ["region", "us"]),
CalendarQuery(
"or",
[
CalendarQuery("eq", ["eventtype", "EAD"]),
CalendarQuery("eq", ["eventtype", "ERA"]),
],
),
CalendarQuery("gte", ["startdatetime", _start or self._start]),
CalendarQuery("lte", ["startdatetime", _end or self._end]),
],
)
if market_cap is not None:
if market_cap < 10_000_000:
warnings.warn(
f"market_cap {market_cap} is very low, did you mean to set it higher?",
UserWarning,
stacklevel=2,
)
query.append(CalendarQuery("gte", ["intradaymarketcap", market_cap]))
if filter_most_active and not offset:
# YF does not like filter most active while offsetting
query.append(self._get_most_active_operands(market_cap))
return self._get_data(
calendar_type="sp_earnings",
query=query,
limit=limit,
offset=offset,
force=force,
)
@log_indent_decorator
def get_ipo_info_calendar(
self, start=None, end=None, limit=12, offset=0, force=False
) -> pd.DataFrame:
"""
Retrieve IPOs calendar from YF as a Dataframe.
:param str | datetime | date start: overwrite start date (default set by __init__) \
eg. start="2025-11-08"
:param str | datetime | date end: overwrite end date (default set by __init__) \
eg. end="2025-11-08"
:param limit: maximum number of results to return (YF caps at 100)
:param offset: offsets the results for pagination. YF default 0
:param force: if True, will re-query even if cache already exists
:return: DataFrame with IPOs calendar
"""
_start = self._parse_date_param(start)
_end = self._parse_date_param(end)
if (start and not end) or (end and not start):
warnings.warn(
"When prividing custom `start` and `end` parameters, you may want to specify both, to avoid unexpected behaviour.",
UserWarning,
stacklevel=2,
)
query = CalendarQuery(
"or",
[
CalendarQuery("gtelt", ["startdatetime", _start or self._start, _end or self._end]),
CalendarQuery("gtelt", ["filingdate", _start or self._start, _end or self._end]),
CalendarQuery("gtelt", ["amendeddate", _start or self._start, _end or self._end]),
],
)
return self._get_data(
calendar_type="ipo_info",
query=query,
limit=limit,
offset=offset,
force=force,
)
@log_indent_decorator
def get_economic_events_calendar(
self, start=None, end=None, limit=12, offset=0, force=False
) -> pd.DataFrame:
"""
Retrieve Economic Events calendar from YF as a DataFrame.
:param str | datetime | date start: overwrite start date (default set by __init__) \
eg. start="2025-11-08"
:param str | datetime | date end: overwrite end date (default set by __init__) \
eg. end="2025-11-08"
:param limit: maximum number of results to return (YF caps at 100)
:param offset: offsets the results for pagination. YF default 0
:param force: if True, will re-query even if cache already exists
:return: DataFrame with Economic Events calendar
"""
return self._get_data(
calendar_type="economic_event",
query=self._get_startdatetime_operators(start, end),
limit=limit,
offset=offset,
force=force,
)
@log_indent_decorator
def get_splits_calendar(
self, start=None, end=None, limit=12, offset=0, force=False
) -> pd.DataFrame:
"""
Retrieve Splits calendar from YF as a DataFrame.
:param str | datetime | date start: overwrite start date (default set by __init__) \
eg. start="2025-11-08"
:param str | datetime | date end: overwrite end date (default set by __init__) \
eg. end="2025-11-08"
:param limit: maximum number of results to return (YF caps at 100)
:param offset: offsets the results for pagination. YF default 0
:param force: if True, will re-query even if cache already exists
:return: DataFrame with Splits calendar
"""
return self._get_data(
calendar_type="splits",
query=self._get_startdatetime_operators(start, end),
limit=limit,
offset=offset,
force=force,
)
### Easy / Default getter functions:
@property
def earnings_calendar(self) -> pd.DataFrame:
"""Earnings calendar with default settings."""
if "sp_earnings" in self.calendars:
return self.calendars["sp_earnings"]
return self.get_earnings_calendar()
@property
def ipo_info_calendar(self) -> pd.DataFrame:
"""IPOs calendar with default settings."""
if "ipo_info" in self.calendars:
return self.calendars["ipo_info"]
return self.get_ipo_info_calendar()
@property
def economic_events_calendar(self) -> pd.DataFrame:
"""Economic events calendar with default settings."""
if "economic_event" in self.calendars:
return self.calendars["economic_event"]
return self.get_economic_events_calendar()
@property
def splits_calendar(self) -> pd.DataFrame:
"""Splits calendar with default settings."""
if "splits" in self.calendars:
return self.calendars["splits"]
return self.get_splits_calendar()
| {
"repo_id": "ranaroussi/yfinance",
"file_path": "yfinance/calendars.py",
"license": "Apache License 2.0",
"lines": 475,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ranaroussi/yfinance:yfinance/config.py | import json
class NestedConfig:
def __init__(self, name, data):
self.__dict__['name'] = name
self.__dict__['data'] = data
def __getattr__(self, key):
return self.data.get(key)
def __setattr__(self, key, value):
self.data[key] = value
def __len__(self):
return len(self.__dict__['data'])
def __repr__(self):
return json.dumps(self.data, indent=4)
class ConfigMgr:
def __init__(self):
self._initialised = False
def _load_option(self):
self._initialised = True # prevent infinite loop
self.options = {}
# Initialise defaults
n = self.__getattr__('network')
n.proxy = None
n.retries = 0
d = self.__getattr__('debug')
d.hide_exceptions = True
d.logging = False
def __getattr__(self, key):
if not self._initialised:
self._load_option()
if key not in self.options:
self.options[key] = {}
return NestedConfig(key, self.options[key])
def __contains__(self, key):
if not self._initialised:
self._load_option()
return key in self.options
def __repr__(self):
if not self._initialised:
self._load_option()
all_options = self.options.copy()
return json.dumps(all_options, indent=4)
YfConfig = ConfigMgr()
| {
"repo_id": "ranaroussi/yfinance",
"file_path": "yfinance/config.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/serve/tests/test_controller_benchmark.py | import sys
import pytest
from ray.serve._private.benchmarks.common import run_controller_benchmark
@pytest.mark.asyncio
async def test_run_controller_benchmark(ray_start_stop):
"""Test that run_controller_benchmark runs and returns valid samples."""
config = {
"checkpoints": [1, 2],
"marination_period_s": 15,
"sample_interval_s": 5,
}
samples = await run_controller_benchmark(config=config)
assert len(samples) > 0, "Expected at least one sample"
expected_keys = [
"target_replicas",
"autoscale_duration_s",
"loop_duration_mean_s",
"loops_per_second",
"event_loop_delay_s",
"num_asyncio_tasks",
"deployment_state_update_mean_s",
"application_state_update_mean_s",
"proxy_state_update_mean_s",
"node_update_min_s",
"handle_metrics_delay_mean_ms",
"replica_metrics_delay_mean_ms",
"process_memory_mb",
]
for sample in samples:
for key in expected_keys:
assert key in sample, f"Sample missing expected key: {key}"
# Verify we have samples for each checkpoint
replicas_seen = {s["target_replicas"] for s in samples}
assert 1 in replicas_seen
assert 2 in replicas_seen
# Verify numeric fields are reasonable
for sample in samples:
assert sample["loops_per_second"] >= 0
assert sample["process_memory_mb"] >= 0
assert sample["autoscale_duration_s"] >= 0
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_controller_benchmark.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/gang.py | from dataclasses import dataclass
from typing import List
from ray.util.annotations import DeveloperAPI
@DeveloperAPI
@dataclass
class GangContext:
"""Context information for a replica that is part of a gang."""
gang_id: str
"""Unique identifier for this gang."""
rank: int
"""This replica's rank within the gang (0-indexed)."""
world_size: int
"""Total number of replicas in this gang."""
member_replica_ids: List[str]
"""List of replica IDs in this gang, ordered by rank."""
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/gang.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:ci/ray_ci/supported_images.py | from functools import lru_cache
from pathlib import Path
from typing import List
import yaml
_RAYCI_VERSION_FILE = ".rayciversion"
def _find_ray_root() -> Path:
"""Walk up from this file and cwd looking for .rayciversion."""
start = Path(__file__).resolve()
for parent in start.parents:
if (parent / _RAYCI_VERSION_FILE).exists():
return parent
if (Path.cwd() / _RAYCI_VERSION_FILE).exists():
return Path.cwd()
raise FileNotFoundError("Could not find Ray root (missing .rayciversion).")
@lru_cache(maxsize=1)
def load_supported_images():
yaml_path = _find_ray_root() / "ray-images.yaml"
with open(yaml_path) as f:
return yaml.safe_load(f)
def get_image_config(image_type: str) -> dict:
return load_supported_images()[image_type]
def get_python_versions(image_type: str) -> List[str]:
return get_image_config(image_type)["python"]
def get_platforms(image_type: str) -> List[str]:
return get_image_config(image_type)["platforms"]
def get_architectures(image_type: str) -> List[str]:
return get_image_config(image_type)["architectures"]
def get_default(image_type: str, key: str) -> str:
return get_image_config(image_type)["defaults"][key]
| {
"repo_id": "ray-project/ray",
"file_path": "ci/ray_ci/supported_images.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:ci/ray_ci/test_supported_images.py | """
Validates that ray-images.yaml is well-formed and internally consistent.
"""
import pytest
from ci.ray_ci.supported_images import get_image_config, load_supported_images
IMAGE_TYPES = list(load_supported_images().keys())
REQUIRED_KEYS = ["defaults", "python", "platforms", "architectures"]
REQUIRED_DEFAULTS = ["python", "gpu_platform", "architecture"]
class TestRayImagesSchema:
def test_has_image_types(self):
assert len(IMAGE_TYPES) > 0, "ray-images.yaml has no image types defined"
@pytest.mark.parametrize("image_type", IMAGE_TYPES)
def test_required_keys(self, image_type):
cfg = get_image_config(image_type)
for key in REQUIRED_KEYS:
assert key in cfg, f"{image_type}: missing required key '{key}'"
@pytest.mark.parametrize("image_type", IMAGE_TYPES)
def test_required_defaults(self, image_type):
defaults = get_image_config(image_type)["defaults"]
for key in REQUIRED_DEFAULTS:
assert key in defaults, f"{image_type}: missing required default '{key}'"
@pytest.mark.parametrize("image_type", IMAGE_TYPES)
def test_defaults_in_supported(self, image_type):
cfg = get_image_config(image_type)
defaults = cfg["defaults"]
assert defaults["python"] in cfg["python"], (
f"{image_type}: default python '{defaults['python']}' "
f"not in supported {cfg['python']}"
)
assert defaults["gpu_platform"] in cfg["platforms"], (
f"{image_type}: default gpu_platform '{defaults['gpu_platform']}' "
f"not in supported {cfg['platforms']}"
)
assert defaults["architecture"] in cfg["architectures"], (
f"{image_type}: default architecture '{defaults['architecture']}' "
f"not in supported {cfg['architectures']}"
)
@pytest.mark.parametrize("image_type", IMAGE_TYPES)
def test_no_empty_lists(self, image_type):
cfg = get_image_config(image_type)
for key in ["python", "platforms", "architectures"]:
assert len(cfg[key]) > 0, f"{image_type}: '{key}' list is empty"
@pytest.mark.parametrize("image_type", IMAGE_TYPES)
def test_python_versions_are_strings(self, image_type):
for v in get_image_config(image_type)["python"]:
assert isinstance(v, str), (
f"{image_type}: python version {v!r} is {type(v).__name__}, "
f"not str (missing quotes in YAML?)"
)
@pytest.mark.parametrize("image_type", IMAGE_TYPES)
def test_platforms_are_strings(self, image_type):
for v in get_image_config(image_type)["platforms"]:
assert isinstance(
v, str
), f"{image_type}: platform {v!r} is {type(v).__name__}, not str"
@pytest.mark.parametrize("image_type", IMAGE_TYPES)
def test_architectures_are_strings(self, image_type):
for v in get_image_config(image_type)["architectures"]:
assert isinstance(
v, str
), f"{image_type}: architecture {v!r} is {type(v).__name__}, not str"
| {
"repo_id": "ray-project/ray",
"file_path": "ci/ray_ci/test_supported_images.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/cluster_autoscaler/concurrency_solver.py | from typing import Dict, TypeVar
from ray.data._internal.execution.interfaces import ExecutionResources
# The math functions defined in this module use a generic type rather than
# `PhysicalOperator` so it's easier to test. We already pass in all of the necessary
# inputs, so the actual type doesn't matter.
T = TypeVar("T")
_SCHEDULABLE_RESOURCE_NAMES = ("cpu", "gpu", "memory")
def allocate_resources(
throughput: float,
*,
rates: Dict[T, float],
resource_requirements: Dict[T, ExecutionResources],
) -> Dict[T, ExecutionResources]:
"""Allocate resources for a pipeline to sustain the given throughput.
Key insight: in a pipeline, all operators must sustain the same throughput T.
Operator i with per-task rate r_i needs T/r_i tasks to sustain T. So maximizing
throughput is equivalent to finding the largest feasible T, then deriving task
counts from it.
Args:
throughput: The throughput for the pipeline in the same units as the rates.
rates: The rate at which a task or actor produces outputs for each operator.
resource_requirements: The logical resources required to schedule a task or
actor for each operator.
Returns:
A dictionary mapping operators to the allocated resources.
"""
assert throughput >= 0, "Throughput must be non-negative"
assert all(rate > 0 for rate in rates.values()), "Rates must be positive"
if not rates:
return {}
if throughput == 0:
return {op: ExecutionResources.zero() for op in rates}
# NOTE: This implementation computes fractional task counts. In practice, you
# can't schedule a fractional task or actor, so the allocations might be infeasible.
task_counts = {op: throughput / rate for op, rate in rates.items()}
return {op: resource_requirements[op].scale(task_counts[op]) for op in rates}
def compute_optimal_throughput(
*,
rates: Dict[T, float],
resource_requirements: Dict[T, ExecutionResources],
resource_limits: ExecutionResources,
concurrency_limits: Dict[T, int],
) -> float:
"""Compute the optimal throughput for a pipeline.
The optimal throughput is bounded by two constraints (we take the tightest):
1. Resource limits — total resource usage across all operators must fit the
budget.
2. Concurrency limits — each operator's task count cannot exceed its limit.
Args:
rates: The rate at which a task or actor produces outputs for each operator.
resource_requirements: The logical resources required to schedule a task or
actor for each operator.
resource_limits: The resource limits for the cluster.
concurrency_limits: The maximum number of tasks or actors that can be scheduled
concurrently for each operator.
Returns:
The optimal throughput for the pipeline in the same units as the rates.
"""
assert rates, "Rates must be non-empty"
return min(
_max_throughput_from_resources(rates, resource_requirements, resource_limits),
_max_throughput_from_concurrency(rates, concurrency_limits),
)
def _max_throughput_from_resources(
rates: Dict[T, float],
resource_requirements: Dict[T, ExecutionResources],
resource_limits: ExecutionResources,
) -> float:
"""For each resource type, compute the max throughput the resource budget allows."""
assert rates, "Rates must be non-empty"
assert all(rate > 0 for rate in rates.values()), "Rates must be positive"
assert (
rates.keys() == resource_requirements.keys()
), "Rates and resource requirements must have the same keys"
max_throughput = float("inf")
for resource_name in _SCHEDULABLE_RESOURCE_NAMES:
resource_limit = getattr(resource_limits, resource_name)
resource_cost_per_unit_throughput = sum(
getattr(resource_requirements[op], resource_name) / rates[op]
for op in rates
)
if resource_cost_per_unit_throughput > 0:
max_throughput = min(
max_throughput, resource_limit / resource_cost_per_unit_throughput
)
assert max_throughput >= 0, "Max throughput must be non-negative"
return max_throughput
def _max_throughput_from_concurrency(
rates: Dict[T, float],
concurrency_limits: Dict[T, int],
) -> float:
"""Each operator's throughput is capped at rate * concurrency_limit."""
assert rates, "Rates must be non-empty"
assert (
rates.keys() == concurrency_limits.keys()
), "Rates and concurrency limits must have the same keys"
return min(rates[op] * concurrency_limits[op] for op in rates)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/cluster_autoscaler/concurrency_solver.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/tests/unit/test_concurrency_solver.py | import pytest
from ray.data._internal.cluster_autoscaler.concurrency_solver import (
allocate_resources,
compute_optimal_throughput,
)
from ray.data._internal.execution.interfaces import ExecutionResources
class TestComputeOptimalThroughput:
def test_one_op_cpu_bound(self):
result = compute_optimal_throughput(
rates={"A": 1.0},
resource_requirements={"A": ExecutionResources(cpu=1)},
resource_limits=ExecutionResources.for_limits(cpu=2),
concurrency_limits={"A": float("inf")},
)
assert result == pytest.approx(2.0)
def test_one_op_gpu_bound(self):
result = compute_optimal_throughput(
rates={"A": 1.0},
resource_requirements={"A": ExecutionResources(gpu=1)},
resource_limits=ExecutionResources.for_limits(gpu=2),
concurrency_limits={"A": float("inf")},
)
assert result == pytest.approx(2.0)
def test_one_op_memory_bound(self):
result = compute_optimal_throughput(
rates={"A": 1.0},
resource_requirements={"A": ExecutionResources(memory=1e6)},
resource_limits=ExecutionResources.for_limits(memory=2e6),
concurrency_limits={"A": float("inf")},
)
assert result == pytest.approx(2.0)
def test_one_op_concurrency_bound(self):
result = compute_optimal_throughput(
rates={"A": 1.0},
resource_requirements={"A": ExecutionResources(cpu=1)},
resource_limits=ExecutionResources.for_limits(),
concurrency_limits={"A": 2},
)
assert result == pytest.approx(2.0)
def test_two_ops_equal_rates(self):
result = compute_optimal_throughput(
rates={"A": 1.0, "B": 1.0},
resource_requirements={
"A": ExecutionResources(cpu=1),
"B": ExecutionResources(cpu=1),
},
resource_limits=ExecutionResources.for_limits(cpu=2),
concurrency_limits={"A": float("inf"), "B": float("inf")},
)
assert result == pytest.approx(1.0)
def test_two_ops_different_rates(self):
result = compute_optimal_throughput(
rates={"A": 1.0, "B": 2.0},
resource_requirements={
"A": ExecutionResources(cpu=1),
"B": ExecutionResources(cpu=1),
},
resource_limits=ExecutionResources.for_limits(cpu=3),
concurrency_limits={"A": float("inf"), "B": float("inf")},
)
assert result == pytest.approx(2.0)
def test_two_ops_different_resource_requirements(self):
result = compute_optimal_throughput(
rates={"A": 1.0, "B": 1.0},
resource_requirements={
"A": ExecutionResources(cpu=1),
"B": ExecutionResources(cpu=2),
},
resource_limits=ExecutionResources.for_limits(cpu=3),
concurrency_limits={"A": float("inf"), "B": float("inf")},
)
assert result == pytest.approx(1.0)
def test_zero_resource_requirement(self):
result = compute_optimal_throughput(
rates={"A": 1.0},
resource_requirements={"A": ExecutionResources.zero()},
resource_limits=ExecutionResources.for_limits(cpu=1),
concurrency_limits={"A": float("inf")},
)
assert result == float("inf")
class TestAllocateResources:
def test_empty_rates(self):
result = allocate_resources(
0.0,
rates={},
resource_requirements={},
)
assert result == {}
def test_zero_throughput(self):
result = allocate_resources(
0.0,
rates={"A": 1.0},
resource_requirements={
"A": ExecutionResources(cpu=1),
},
)
assert result == {
"A": ExecutionResources.zero(),
}
def test_one_op(self):
result = allocate_resources(
1.0,
rates={"A": 1.0},
resource_requirements={"A": ExecutionResources(cpu=1)},
)
assert result["A"] == ExecutionResources(cpu=1)
def test_two_ops_different_rates(self):
result = allocate_resources(
2.0,
rates={"A": 1.0, "B": 2.0},
resource_requirements={
"A": ExecutionResources(cpu=1),
"B": ExecutionResources(cpu=1),
},
)
assert result["A"] == ExecutionResources(cpu=2)
assert result["B"] == ExecutionResources(cpu=1)
def test_two_ops_different_resource_requirements(self):
result = allocate_resources(
1.0,
rates={"A": 1.0, "B": 1.0},
resource_requirements={
"A": ExecutionResources(cpu=1),
"B": ExecutionResources(cpu=2),
},
)
assert result["A"] == ExecutionResources(cpu=1)
assert result["B"] == ExecutionResources(cpu=2)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/unit/test_concurrency_solver.py",
"license": "Apache License 2.0",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/data/doc_code/working-with-llms/custom_tokenizer_example.py | """
Documentation example and test for custom tokenizer batch inference.
Demonstrates how to use vLLM's tokenizer infrastructure for models whose
tokenizers are not natively supported by HuggingFace (e.g. Mistral Tekken,
DeepSeek-V3.2, Grok-2 tiktoken).
This example uses a standard model to demonstrate the pattern. For models
that truly require vLLM's custom tokenizer (e.g. deepseek-ai/DeepSeek-V3-0324),
replace the model ID and adjust tokenizer_mode accordingly.
"""
# __custom_chat_template_start__
from typing import Any, Dict, List
from vllm.tokenizers import get_tokenizer
class VLLMChatTemplate:
"""Apply a chat template using vLLM's tokenizer."""
def __init__(self, model_id: str, tokenizer_mode: str = "auto"):
self.tokenizer = get_tokenizer(
model_id,
tokenizer_mode=tokenizer_mode,
trust_remote_code=True,
)
async def __call__(self, batch: Dict[str, Any]) -> Dict[str, Any]:
prompts: List[str] = []
all_messages: List[List[Dict[str, Any]]] = []
for messages in batch["messages"]:
if hasattr(messages, "tolist"):
messages = messages.tolist()
all_messages.append(messages)
add_generation_prompt = messages[-1]["role"] == "user"
prompt = self.tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=add_generation_prompt,
continue_final_message=not add_generation_prompt,
)
prompts.append(prompt)
return {
"prompt": prompts,
"messages": all_messages,
"sampling_params": batch["sampling_params"],
}
# __custom_chat_template_end__
# __custom_tokenize_start__
class VLLMTokenize:
"""Tokenize text prompts using vLLM's tokenizer."""
def __init__(self, model_id: str, tokenizer_mode: str = "auto"):
self.tokenizer = get_tokenizer(
model_id,
tokenizer_mode=tokenizer_mode,
trust_remote_code=True,
)
async def __call__(self, batch: Dict[str, Any]) -> Dict[str, Any]:
all_tokenized: List[List[int]] = [
self.tokenizer.encode(prompt) for prompt in batch["prompt"]
]
return {
"tokenized_prompt": all_tokenized,
"messages": batch["messages"],
"sampling_params": batch["sampling_params"],
}
# __custom_tokenize_end__
# __custom_detokenize_start__
class VLLMDetokenize:
"""Detokenize generated token IDs using vLLM's tokenizer."""
def __init__(self, model_id: str, tokenizer_mode: str = "auto"):
self.tokenizer = get_tokenizer(
model_id,
tokenizer_mode=tokenizer_mode,
trust_remote_code=True,
)
async def __call__(self, batch: Dict[str, Any]) -> Dict[str, Any]:
decoded: List[str] = []
for tokens in batch["generated_tokens"]:
if hasattr(tokens, "tolist"):
tokens = tokens.tolist()
decoded.append(self.tokenizer.decode(tokens, skip_special_tokens=True))
return {
**batch,
"generated_text_custom": decoded,
}
# __custom_detokenize_end__
def run_custom_tokenizer_example():
import ray
from ray.data.llm import vLLMEngineProcessorConfig, build_processor
# Input dataset with sampling_params per row.
ds = ray.data.from_items(
[
{
"messages": [
{"role": "user", "content": "What is the capital of France?"}
],
"sampling_params": {"max_tokens": 256, "temperature": 0.7},
},
{
"messages": [
{"role": "user", "content": "Write a haiku about computing."}
],
"sampling_params": {"max_tokens": 256, "temperature": 0.7},
},
]
)
# __custom_tokenizer_pipeline_start__
MODEL_ID = "unsloth/Llama-3.1-8B-Instruct"
config = vLLMEngineProcessorConfig(
model_source=MODEL_ID,
engine_kwargs=dict(
max_model_len=4096,
trust_remote_code=True,
tokenizer_mode="auto",
),
batch_size=4,
concurrency=1,
# Disable built-in stages -- we handle them via map_batches.
chat_template_stage=False,
tokenize_stage=False,
detokenize_stage=False,
)
processor = build_processor(
config,
postprocess=lambda row: {
"generated_text": row.get("generated_text", ""),
"generated_tokens": row.get("generated_tokens", []),
"num_input_tokens": row.get("num_input_tokens", 0),
"num_generated_tokens": row.get("num_generated_tokens", 0),
},
)
ds = ds.map_batches(
VLLMChatTemplate,
fn_constructor_kwargs={"model_id": MODEL_ID},
concurrency=1,
batch_size=4,
)
ds = ds.map_batches(
VLLMTokenize,
fn_constructor_kwargs={"model_id": MODEL_ID},
concurrency=1,
batch_size=4,
)
ds = processor(ds)
ds = ds.map_batches(
VLLMDetokenize,
fn_constructor_kwargs={"model_id": MODEL_ID},
concurrency=1,
batch_size=4,
)
# __custom_tokenizer_pipeline_end__
ds.show(limit=2)
if __name__ == "__main__":
try:
import torch
if torch.cuda.is_available():
run_custom_tokenizer_example()
else:
print("Skipping custom tokenizer example (no GPU available)")
except Exception as e:
print(f"Skipping custom tokenizer example: {e}")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/data/doc_code/working-with-llms/custom_tokenizer_example.py",
"license": "Apache License 2.0",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/tests/unit/test_transform_pyarrow.py | import re
import types
from typing import Iterable
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from ray.data._internal.arrow_ops.transform_pyarrow import (
MIN_PYARROW_VERSION_TYPE_PROMOTION,
_align_struct_fields,
concat,
hash_partition,
shuffle,
try_combine_chunked_columns,
unify_schemas,
)
from ray.data._internal.tensor_extensions.arrow import (
ArrowTensorTypeV2,
_extension_array_concat_supported,
)
from ray.data._internal.utils.arrow_utils import get_pyarrow_version
from ray.data.block import BlockAccessor
from ray.data.context import DataContext
from ray.data.extensions import (
ArrowConversionError,
ArrowPythonObjectArray,
ArrowPythonObjectType,
ArrowTensorArray,
ArrowTensorType,
ArrowVariableShapedTensorArray,
ArrowVariableShapedTensorType,
)
def test_try_defragment_table():
chunks = np.array_split(np.arange(1000), 10)
t = pa.Table.from_pydict(
{
"id": pa.chunked_array([pa.array(c) for c in chunks]),
}
)
assert len(t["id"].chunks) == 10
dt = try_combine_chunked_columns(t)
assert len(dt["id"].chunks) == 1
assert dt == t
def test_hash_partitioning():
# Test hash-partitioning of the empty table
empty_table = pa.Table.from_pydict({"idx": []})
assert {} == hash_partition(empty_table, hash_cols=["idx"], num_partitions=5)
# Test hash-partitioning of table into 1 partition (returns table itself)
t = pa.Table.from_pydict({"idx": list(range(10))})
assert {0: t} == hash_partition(t, hash_cols=["idx"], num_partitions=1)
# Test hash-partitioning of proper table
idx = list(range(100))
t = pa.Table.from_pydict(
{
"idx": pa.array(idx),
"ints": pa.array(idx),
"floats": pa.array([float(i) for i in idx]),
"strings": pa.array([str(i) for i in idx]),
"structs": pa.array(
[
{
"value": i,
}
for i in idx
]
),
}
)
single_partition_dict = hash_partition(t, hash_cols=["idx"], num_partitions=1)
# There's just 1 partition
assert len(single_partition_dict) == 1
assert t == single_partition_dict.get(0)
def _concat_and_sort_partitions(parts: Iterable[pa.Table]) -> pa.Table:
return pa.concat_tables(parts).sort_by("idx")
_5_partition_dict = hash_partition(t, hash_cols=["strings"], num_partitions=5)
assert len(_5_partition_dict) == 5
assert t == _concat_and_sort_partitions(_5_partition_dict.values())
# There could be no more partitions than elements
_structs_partition_dict = hash_partition(
t, hash_cols=["structs"], num_partitions=101
)
assert len(_structs_partition_dict) <= 101
assert t == _concat_and_sort_partitions(_structs_partition_dict.values())
def test_shuffle():
t = pa.Table.from_pydict(
{
"index": pa.array(list(range(10))),
}
)
shuffled = shuffle(t, seed=0xDEED)
assert shuffled == pa.Table.from_pydict(
{"index": pa.array([4, 3, 6, 8, 7, 1, 5, 2, 9, 0])}
)
def test_arrow_concat_empty(simple_concat_data):
# Test empty.
assert concat(simple_concat_data["empty"]) == pa.table([])
def test_arrow_concat_single_block(simple_concat_data):
# Test single block:
out = concat([simple_concat_data["single_block"]])
assert len(out) == 2
assert out == simple_concat_data["single_block"]
def test_arrow_concat_basic(basic_concat_blocks, basic_concat_expected):
# Test two basic tables.
ts = basic_concat_blocks
out = concat(ts)
# Check length.
assert len(out) == basic_concat_expected["length"]
# Check schema.
assert out.column_names == basic_concat_expected["column_names"]
assert out.schema.types == basic_concat_expected["schema_types"]
# Confirm that concatenation is zero-copy (i.e. it didn't trigger chunk
# consolidation).
assert out["a"].num_chunks == basic_concat_expected["chunks"]
assert out["b"].num_chunks == basic_concat_expected["chunks"]
# Check content.
assert out["a"].to_pylist() == basic_concat_expected["content"]["a"]
assert out["b"].to_pylist() == basic_concat_expected["content"]["b"]
# Check equivalence.
expected = pa.concat_tables(ts)
assert out == expected
def test_arrow_concat_null_promotion(null_promotion_blocks, null_promotion_expected):
# Test null column --> well-typed column promotion.
ts = null_promotion_blocks
out = concat(ts)
# Check length.
assert len(out) == null_promotion_expected["length"]
# Check schema.
assert out.column_names == null_promotion_expected["column_names"]
assert out.schema.types == null_promotion_expected["schema_types"]
# Confirm that concatenation is zero-copy (i.e. it didn't trigger chunk
# consolidation).
assert out["a"].num_chunks == null_promotion_expected["chunks"]
assert out["b"].num_chunks == null_promotion_expected["chunks"]
# Check content.
assert out["a"].to_pylist() == null_promotion_expected["content"]["a"]
assert out["b"].to_pylist() == null_promotion_expected["content"]["b"]
# Check equivalence.
expected = pa.concat_tables(ts, promote=True)
assert out == expected
def test_arrow_concat_tensor_extension_uniform(
uniform_tensor_blocks, uniform_tensor_expected
):
# Test tensor column concatenation.
t1, t2 = uniform_tensor_blocks
ts = [t1, t2]
out = concat(ts)
# Check length.
assert len(out) == uniform_tensor_expected["length"]
# Check schema.
assert out.column_names == ["a"]
assert out.schema == uniform_tensor_expected["schema"]
# Confirm that concatenation is zero-copy (i.e. it didn't trigger chunk
# consolidation).
assert out["a"].num_chunks == uniform_tensor_expected["chunks"]
# Check content.
content = uniform_tensor_expected["content"]
np.testing.assert_array_equal(out["a"].chunk(0).to_numpy(), content[0])
np.testing.assert_array_equal(out["a"].chunk(1).to_numpy(), content[1])
# Check equivalence.
expected = pa.concat_tables(ts, promote=True)
assert out == expected
def test_arrow_concat_tensor_extension_variable_shaped(
variable_shaped_tensor_blocks, variable_shaped_tensor_expected
):
# Test variable_shaped tensor column concatenation.
t1, t2 = variable_shaped_tensor_blocks
ts = [t1, t2]
out = concat(ts)
# Check length.
assert len(out) == variable_shaped_tensor_expected["length"]
# Check schema.
assert out.column_names == ["a"]
assert out.schema == variable_shaped_tensor_expected["schema"]
# Confirm that concatenation is zero-copy (i.e. it didn't trigger chunk
# consolidation).
assert out["a"].num_chunks == variable_shaped_tensor_expected["chunks"]
# Check content.
content = variable_shaped_tensor_expected["content"]
for o, e in zip(out["a"].chunk(0).to_numpy(), content[0]):
np.testing.assert_array_equal(o, e)
for o, e in zip(out["a"].chunk(1).to_numpy(), content[1]):
np.testing.assert_array_equal(o, e)
# NOTE: We don't check equivalence with pyarrow.concat_tables since it currently
# fails for this case.
def test_arrow_concat_tensor_extension_uniform_and_variable_shaped(
mixed_tensor_blocks, mixed_tensor_expected
):
# Test concatenating a homogeneous-shaped tensor column with a variable-shaped
# tensor column.
t1, t2 = mixed_tensor_blocks
ts = [t1, t2]
out = concat(ts)
# Check length.
assert len(out) == mixed_tensor_expected["length"]
# Check schema.
assert out.column_names == ["a"]
assert out.schema == mixed_tensor_expected["schema"]
# Confirm that concatenation is zero-copy (i.e. it didn't trigger chunk
# consolidation).
assert out["a"].num_chunks == mixed_tensor_expected["chunks"]
# Check content.
content = mixed_tensor_expected["content"]
for o, e in zip(out["a"].chunk(0).to_numpy(), content[0]):
np.testing.assert_array_equal(o, e)
for o, e in zip(out["a"].chunk(1).to_numpy(), content[1]):
np.testing.assert_array_equal(o, e)
# NOTE: We don't check equivalence with pyarrow.concat_tables since it currently
# fails for this case.
def test_arrow_concat_tensor_extension_uniform_but_different(
different_shape_tensor_blocks, different_shape_tensor_expected
):
# Test concatenating two homogeneous-shaped tensor columns with differing shapes
# between them.
t1, t2 = different_shape_tensor_blocks
ts = [t1, t2]
out = concat(ts)
# Check length.
assert len(out) == different_shape_tensor_expected["length"]
# Check schema.
assert out.column_names == ["a"]
assert out.schema == different_shape_tensor_expected["schema"]
# Confirm that concatenation is zero-copy (i.e. it didn't trigger chunk
# consolidation).
assert out["a"].num_chunks == different_shape_tensor_expected["chunks"]
# Check content.
content = different_shape_tensor_expected["content"]
for o, e in zip(out["a"].chunk(0).to_numpy(), content[0]):
np.testing.assert_array_equal(o, e)
for o, e in zip(out["a"].chunk(1).to_numpy(), content[1]):
np.testing.assert_array_equal(o, e)
# NOTE: We don't check equivalence with pyarrow.concat_tables since it currently
# fails for this case.
def test_arrow_concat_with_objects(object_concat_blocks, object_concat_expected):
t3 = concat(object_concat_blocks)
assert isinstance(t3, pa.Table)
assert len(t3) == object_concat_expected["length"]
assert isinstance(t3.schema.field("a").type, object_concat_expected["a_type"])
assert object_concat_expected["b_type"](t3.schema.field("b").type)
assert t3.column("a").to_pylist() == object_concat_expected["content"]["a"]
assert t3.column("b").to_pylist() == object_concat_expected["content"]["b"]
def test_struct_with_different_field_names(
struct_different_field_names_blocks, struct_different_field_names_expected
):
# Ensures that when concatenating tables with struct columns having different
# field names, missing fields in each struct are filled with None in the
# resulting table.
# Concatenate tables with different field names in struct
t3 = concat(struct_different_field_names_blocks)
assert isinstance(t3, pa.Table)
assert len(t3) == struct_different_field_names_expected["length"]
# Check the entire schema
assert t3.schema == struct_different_field_names_expected["schema"]
# Check that missing fields are filled with None
assert (
t3.column("a").to_pylist()
== struct_different_field_names_expected["content"]["a"]
)
assert (
t3.column("d").to_pylist()
== struct_different_field_names_expected["content"]["d"]
)
def test_nested_structs(nested_structs_blocks, nested_structs_expected):
# Checks that deeply nested structs (3 levels of nesting) are handled properly
# during concatenation and the resulting table preserves the correct nesting
# structure.
# Concatenate tables with nested structs and missing fields
t3 = concat(nested_structs_blocks)
assert isinstance(t3, pa.Table)
assert len(t3) == nested_structs_expected["length"]
# Validate the schema of the resulting table
assert t3.schema == nested_structs_expected["schema"]
# Validate the data in the concatenated table
assert t3.column("a").to_pylist() == nested_structs_expected["content"]["a"]
assert t3.column("d").to_pylist() == nested_structs_expected["content"]["d"]
def test_struct_with_null_values(
struct_null_values_blocks, struct_null_values_expected
):
# Ensures that when concatenating tables with struct columns containing null
# values, the null values are properly handled, and the result reflects the
# expected structure.
# Concatenate tables with struct columns containing null values
t3 = concat(struct_null_values_blocks)
assert isinstance(t3, pa.Table)
assert len(t3) == struct_null_values_expected["length"]
# Validate the schema of the resulting table
assert (
t3.schema == struct_null_values_expected["schema"]
), f"Expected schema: {struct_null_values_expected['schema']}, but got {t3.schema}"
# Verify the PyArrow table content
assert t3.column("a").to_pylist() == struct_null_values_expected["content"]["a"]
result = t3.column("d").to_pylist()
expected = struct_null_values_expected["content"]["d"]
assert result == expected, f"Expected {expected}, but got {result}"
def test_struct_with_mismatched_lengths(
struct_mismatched_lengths_blocks, struct_mismatched_lengths_expected
):
# Verifies that when concatenating tables with struct columns of different lengths,
# the missing values are properly padded with None in the resulting table.
# Concatenate tables with struct columns of different lengths
t3 = concat(struct_mismatched_lengths_blocks)
assert isinstance(t3, pa.Table)
assert (
len(t3) == struct_mismatched_lengths_expected["length"]
) # Check that the resulting table has the correct number of rows
# Validate the schema of the resulting table
assert (
t3.schema == struct_mismatched_lengths_expected["schema"]
), f"Expected schema: {struct_mismatched_lengths_expected['schema']}, but got {t3.schema}"
# Verify the content of the resulting table
assert (
t3.column("a").to_pylist() == struct_mismatched_lengths_expected["content"]["a"]
)
result = t3.column("d").to_pylist()
expected = struct_mismatched_lengths_expected["content"]["d"]
assert result == expected, f"Expected {expected}, but got {result}"
def test_struct_with_empty_arrays(
struct_empty_arrays_blocks, struct_empty_arrays_expected
):
# Checks the behavior when concatenating tables with structs containing empty
# arrays, verifying that null structs are correctly handled.
# Concatenate tables with struct columns containing null values
t3 = concat(struct_empty_arrays_blocks)
# Verify that the concatenated result is a valid PyArrow Table
assert isinstance(t3, pa.Table)
assert (
len(t3) == struct_empty_arrays_expected["length"]
) # Check that the concatenated table has 3 rows
# Validate the schema of the resulting concatenated table
assert (
t3.schema == struct_empty_arrays_expected["schema"]
), f"Expected schema: {struct_empty_arrays_expected['schema']}, but got {t3.schema}"
# Verify the content of the concatenated table
assert t3.column("a").to_pylist() == struct_empty_arrays_expected["content"]["a"]
result = t3.column("d").to_pylist()
expected = struct_empty_arrays_expected["content"]["d"]
assert result == expected, f"Expected {expected}, but got {result}"
def test_struct_with_arrow_variable_shaped_tensor_type(
struct_variable_shaped_tensor_blocks, struct_variable_shaped_tensor_expected
):
# Test concatenating tables with struct columns containing ArrowVariableShapedTensorType
# fields, ensuring proper handling of variable-shaped tensors within structs.
# Concatenate tables with struct columns containing variable-shaped tensors
t3 = concat(struct_variable_shaped_tensor_blocks)
assert isinstance(t3, pa.Table)
assert len(t3) == struct_variable_shaped_tensor_expected["length"]
# Validate the schema of the resulting table
assert (
t3.schema == struct_variable_shaped_tensor_expected["schema"]
), f"Expected schema: {struct_variable_shaped_tensor_expected['schema']}, but got {t3.schema}"
# Verify the content of the resulting table
assert (
t3.column("id").to_pylist()
== struct_variable_shaped_tensor_expected["content"]["id"]
)
# Check that the struct column contains the expected data
result_structs = t3.column("struct_with_tensor").to_pylist()
assert len(result_structs) == 4
# Verify each struct contains the correct metadata and tensor data
expected_metadata = ["row1", "row2", "row3", "row4"]
for i, (struct, expected_meta) in enumerate(zip(result_structs, expected_metadata)):
assert struct["metadata"] == expected_meta
assert isinstance(struct["tensor"], np.ndarray)
# Verify tensor shapes match expectations
if i == 0:
assert struct["tensor"].shape == (2, 2)
np.testing.assert_array_equal(
struct["tensor"], np.ones((2, 2), dtype=np.float32)
)
elif i == 1:
assert struct["tensor"].shape == (3, 3)
np.testing.assert_array_equal(
struct["tensor"], np.zeros((3, 3), dtype=np.float32)
)
elif i == 2:
assert struct["tensor"].shape == (1, 4)
np.testing.assert_array_equal(
struct["tensor"], np.ones((1, 4), dtype=np.float32)
)
elif i == 3:
assert struct["tensor"].shape == (2, 1)
np.testing.assert_array_equal(
struct["tensor"], np.zeros((2, 1), dtype=np.float32)
)
@pytest.mark.skipif(
get_pyarrow_version() < MIN_PYARROW_VERSION_TYPE_PROMOTION,
reason="Requires PyArrow >= 14.0.0 for type promotion in nested struct fields",
)
def test_struct_with_diverging_primitive_types():
"""Test concatenating tables with struct fields that have diverging primitive types.
This tests the scenario where struct fields have the same name but different
primitive types (e.g., int64 vs float64), which requires type promotion.
"""
import pyarrow as pa
# Table 1: struct with (a: int64, b: string)
t1 = pa.table(
{
"data": pa.array(
[{"a": 1, "b": "hello"}, {"a": 2, "b": "world"}],
type=pa.struct([pa.field("a", pa.int64()), pa.field("b", pa.string())]),
)
}
)
# Table 2: struct with (a: float64, c: int32)
# Field 'a' has different type, field 'b' missing, field 'c' new
t2 = pa.table(
{
"data": pa.array(
[{"a": 1.5, "c": 100}, {"a": 2.5, "c": 200}],
type=pa.struct(
[pa.field("a", pa.float64()), pa.field("c", pa.int32())]
),
)
}
)
# Concatenate with type promotion
result = concat([t1, t2], promote_types=True)
# Verify schema: field 'a' should be promoted to float64
expected_struct_type = pa.struct(
[
pa.field("a", pa.float64()),
pa.field("b", pa.string()),
pa.field("c", pa.int32()),
]
)
assert result.schema == pa.schema([pa.field("data", expected_struct_type)])
# Verify data: int64 values should be cast to float64, missing fields filled with None
expected_data = [
{"a": 1.0, "b": "hello", "c": None},
{"a": 2.0, "b": "world", "c": None},
{"a": 1.5, "b": None, "c": 100},
{"a": 2.5, "b": None, "c": 200},
]
assert result.column("data").to_pylist() == expected_data
def test_arrow_concat_object_with_tensor_fails(object_with_tensor_fails_blocks):
with pytest.raises(ArrowConversionError) as exc_info:
concat(object_with_tensor_fails_blocks)
assert "objects and tensors" in str(exc_info.value.__cause__)
def test_unify_schemas(unify_schemas_basic_schemas, unify_schemas_multicol_schemas):
# Unifying a schema with the same schema as itself
schemas = unify_schemas_basic_schemas
assert (
unify_schemas([schemas["tensor_arr_1"], schemas["tensor_arr_1"]])
== schemas["tensor_arr_1"]
)
# Single columns with different shapes
contains_diff_shaped = [schemas["tensor_arr_1"], schemas["tensor_arr_2"]]
assert unify_schemas(contains_diff_shaped) == pa.schema(
[
("tensor_arr", ArrowVariableShapedTensorType(pa.int32(), 2)),
]
)
# Single columns with same shapes
contains_diff_types = [schemas["tensor_arr_1"], schemas["tensor_arr_3"]]
assert unify_schemas(contains_diff_types) == pa.schema(
[
("tensor_arr", ArrowTensorType((3, 5), pa.int32())),
]
)
# Single columns with a variable shaped tensor, same ndim
contains_var_shaped = [schemas["tensor_arr_1"], schemas["var_tensor_arr"]]
assert unify_schemas(contains_var_shaped) == pa.schema(
[
("tensor_arr", ArrowVariableShapedTensorType(pa.int32(), 2)),
]
)
# Single columns with a variable shaped tensor, different ndim
contains_1d2d = [schemas["tensor_arr_1"], schemas["var_tensor_arr_1d"]]
assert unify_schemas(contains_1d2d) == pa.schema(
[
("tensor_arr", ArrowVariableShapedTensorType(pa.int32(), 2)),
]
)
contains_2d3d = [schemas["tensor_arr_1"], schemas["var_tensor_arr_3d"]]
assert unify_schemas(contains_2d3d) == pa.schema(
[
("tensor_arr", ArrowVariableShapedTensorType(pa.int32(), 3)),
]
)
# Multi-column schemas
multicol = unify_schemas_multicol_schemas
assert unify_schemas(
[multicol["multicol_schema_1"], multicol["multicol_schema_2"]]
) == pa.schema(
[
("col_int", pa.int32()),
("col_fixed_tensor", ArrowTensorType((4, 2), pa.int32())),
("col_var_tensor", ArrowVariableShapedTensorType(pa.int16(), 5)),
]
)
assert unify_schemas(
[multicol["multicol_schema_1"], multicol["multicol_schema_3"]]
) == pa.schema(
[
("col_int", pa.int32()),
("col_fixed_tensor", ArrowVariableShapedTensorType(pa.int32(), 3)),
("col_var_tensor", ArrowVariableShapedTensorType(pa.int16(), 5)),
]
)
# Unifying >2 schemas together
assert unify_schemas(
[
multicol["multicol_schema_1"],
multicol["multicol_schema_2"],
multicol["multicol_schema_3"],
]
) == pa.schema(
[
("col_int", pa.int32()),
("col_fixed_tensor", ArrowVariableShapedTensorType(pa.int32(), 3)),
("col_var_tensor", ArrowVariableShapedTensorType(pa.int16(), 5)),
]
)
def test_unify_schemas_object_types(unify_schemas_object_types_schemas):
"""Test handling of object types (columns_with_objects functionality)."""
schemas = unify_schemas_object_types_schemas
# Should convert to ArrowPythonObjectType
result = unify_schemas([schemas["object_schema"], schemas["int_schema"]])
assert result == schemas["expected"]
# Test multiple object types
result = unify_schemas(
[schemas["object_schema"], schemas["int_schema"], schemas["float_schema"]]
)
assert result == schemas["expected"]
def test_unify_schemas_incompatible_tensor_dtypes(
unify_schemas_incompatible_tensor_schemas,
):
"""Test error handling for incompatible tensor dtypes."""
import pyarrow as pa
with pytest.raises(
pa.lib.ArrowTypeError,
match=re.escape(
"Can't unify tensor types with divergent scalar types: [ArrowTensorType(shape=(2, 2), dtype=int32), ArrowTensorType(shape=(2, 2), dtype=float)]"
),
):
unify_schemas(unify_schemas_incompatible_tensor_schemas)
def test_unify_schemas_objects_and_tensors(unify_schemas_objects_and_tensors_schemas):
"""Test error handling for intersection of objects and tensors."""
with pytest.raises(ValueError, match="Found columns with both objects and tensors"):
unify_schemas(unify_schemas_objects_and_tensors_schemas)
def test_unify_schemas_missing_tensor_fields(
unify_schemas_missing_tensor_fields_schemas,
):
"""Test handling of missing tensor fields in structs (has_missing_fields logic)."""
schemas = unify_schemas_missing_tensor_fields_schemas
# Should convert tensor to variable-shaped to accommodate missing field
result = unify_schemas([schemas["with_tensor"], schemas["without_tensor"]])
assert result == schemas["expected"]
def test_unify_schemas_nested_struct_tensors(
unify_schemas_nested_struct_tensors_schemas,
):
"""Test handling of nested structs with tensor fields."""
schemas = unify_schemas_nested_struct_tensors_schemas
# Should convert nested tensor to variable-shaped
result = unify_schemas([schemas["with_tensor"], schemas["without_tensor"]])
assert result == schemas["expected"]
def test_unify_schemas_edge_cases(unify_schemas_edge_cases_data):
"""Test edge cases and robustness."""
data = unify_schemas_edge_cases_data
# Empty schema list
with pytest.raises(Exception): # Should handle gracefully
unify_schemas(data["empty_schemas"])
# Single schema
assert unify_schemas([data["single_schema"]]) == data["single_schema"]
# Schemas with no common columns
result = unify_schemas(
[data["no_common_columns"]["schema1"], data["no_common_columns"]["schema2"]]
)
assert result == data["no_common_columns"]["expected"]
# All null schemas
result = unify_schemas(
[data["all_null_schemas"]["schema1"], data["all_null_schemas"]["schema2"]]
)
assert result == data["all_null_schemas"]["schema1"]
def test_unify_schemas_mixed_tensor_types(unify_schemas_mixed_tensor_data):
"""Test handling of mixed tensor types (fixed and variable shaped)."""
data = unify_schemas_mixed_tensor_data
# Should result in variable-shaped tensor
result = unify_schemas([data["fixed_shape"], data["variable_shaped"]])
assert result == data["expected_variable"]
# Test with different shapes but same dtype
result = unify_schemas([data["fixed_shape"], data["different_shape"]])
assert result == data["expected_variable"]
@pytest.mark.skipif(
get_pyarrow_version() < MIN_PYARROW_VERSION_TYPE_PROMOTION,
reason="Requires Arrow version of at least 14.0.0",
)
def test_unify_schemas_type_promotion(unify_schemas_type_promotion_data):
data = unify_schemas_type_promotion_data
# No type promotion
assert (
unify_schemas(
[data["non_null"], data["nullable"]],
promote_types=False,
)
== data["nullable"]
)
# No type promotion
with pytest.raises(pa.lib.ArrowTypeError) as exc_info:
unify_schemas(
[data["int64"], data["float64"]],
promote_types=False,
)
assert "Unable to merge: Field A has incompatible types: int64 vs double" == str(
exc_info.value
)
# Type promoted
assert (
unify_schemas(
[data["int64"], data["float64"]],
promote_types=True,
)
== data["float64"]
)
def test_arrow_block_select(block_select_data):
data = block_select_data
block_accessor = BlockAccessor.for_block(data["table"])
block = block_accessor.select(data["single_column"]["columns"])
assert block.schema == data["single_column"]["expected_schema"]
assert block.to_pandas().equals(data["df"][data["single_column"]["columns"]])
block = block_accessor.select(data["multiple_columns"]["columns"])
assert block.schema == data["multiple_columns"]["expected_schema"]
assert block.to_pandas().equals(data["df"][data["multiple_columns"]["columns"]])
with pytest.raises(ValueError):
block = block_accessor.select([lambda x: x % 3, "two"])
def test_arrow_block_slice_copy(block_slice_data):
# Test that ArrowBlock slicing properly copies the underlying Arrow
# table.
def check_for_copy(table1, table2, a, b, is_copy):
expected_slice = table1.slice(a, b - a)
assert table2.equals(expected_slice)
assert table2.schema == table1.schema
assert table1.num_columns == table2.num_columns
for col1, col2 in zip(table1.columns, table2.columns):
assert col1.num_chunks == col2.num_chunks
for chunk1, chunk2 in zip(col1.chunks, col2.chunks):
bufs1 = chunk1.buffers()
bufs2 = chunk2.buffers()
expected_offset = 0 if is_copy else a
assert chunk2.offset == expected_offset
assert len(chunk2) == b - a
if is_copy:
assert bufs2[1].address != bufs1[1].address
else:
assert bufs2[1].address == bufs1[1].address
data = block_slice_data["normal"]
table = data["table"]
a, b = data["slice_params"]["a"], data["slice_params"]["b"]
block_accessor = BlockAccessor.for_block(table)
# Test with copy.
table2 = block_accessor.slice(a, b, True)
check_for_copy(table, table2, a, b, is_copy=True)
# Test without copy.
table2 = block_accessor.slice(a, b, False)
check_for_copy(table, table2, a, b, is_copy=False)
def test_arrow_block_slice_copy_empty(block_slice_data):
# Test that ArrowBlock slicing properly copies the underlying Arrow
# table when the table is empty.
data = block_slice_data["empty"]
table = data["table"]
a, b = data["slice_params"]["a"], data["slice_params"]["b"]
expected_slice = table.slice(a, b - a)
block_accessor = BlockAccessor.for_block(table)
# Test with copy.
table2 = block_accessor.slice(a, b, True)
assert table2.equals(expected_slice)
assert table2.schema == table.schema
assert table2.num_rows == 0
# Test without copy.
table2 = block_accessor.slice(a, b, False)
assert table2.equals(expected_slice)
assert table2.schema == table.schema
assert table2.num_rows == 0
def test_mixed_tensor_types_same_dtype(
mixed_tensor_types_same_dtype_blocks, mixed_tensor_types_same_dtype_expected
):
"""Test mixed tensor types with same data type but different shapes."""
t1, t2 = mixed_tensor_types_same_dtype_blocks
t3 = concat([t1, t2])
assert isinstance(t3, pa.Table)
assert len(t3) == mixed_tensor_types_same_dtype_expected["length"]
# Verify schema - should have tensor field as variable-shaped
assert t3.schema == mixed_tensor_types_same_dtype_expected["schema"]
tensor_field = t3.schema.field("tensor")
assert isinstance(tensor_field.type, ArrowVariableShapedTensorType)
# Verify content
result_tensors = t3.column("tensor").to_pylist()
assert len(result_tensors) == mixed_tensor_types_same_dtype_expected["length"]
expected_tensors = mixed_tensor_types_same_dtype_expected["tensor_values"]
# Verify each tensor
for i, (result_tensor, expected_tensor) in enumerate(
zip(result_tensors, expected_tensors)
):
assert isinstance(result_tensor, np.ndarray)
assert result_tensor.shape == expected_tensor.shape
assert result_tensor.dtype == expected_tensor.dtype
np.testing.assert_array_equal(result_tensor, expected_tensor)
def test_mixed_tensor_types_fixed_shape_different(
mixed_tensor_types_fixed_shape_blocks, mixed_tensor_types_fixed_shape_expected
):
"""Test mixed tensor types with different fixed shapes."""
t1, t2 = mixed_tensor_types_fixed_shape_blocks
t3 = concat([t1, t2])
assert isinstance(t3, pa.Table)
assert len(t3) == mixed_tensor_types_fixed_shape_expected["length"]
# Verify schema - should have tensor field as variable-shaped
assert t3.schema == mixed_tensor_types_fixed_shape_expected["schema"]
tensor_field = t3.schema.field("tensor")
assert isinstance(tensor_field.type, ArrowVariableShapedTensorType)
# Verify content
result_tensors = t3.column("tensor").to_pylist()
assert len(result_tensors) == mixed_tensor_types_fixed_shape_expected["length"]
expected_tensors = mixed_tensor_types_fixed_shape_expected["tensor_values"]
# Verify each tensor
for i, (result_tensor, expected_tensor) in enumerate(
zip(result_tensors, expected_tensors)
):
assert isinstance(result_tensor, np.ndarray)
assert result_tensor.shape == expected_tensor.shape
assert result_tensor.dtype == expected_tensor.dtype
np.testing.assert_array_equal(result_tensor, expected_tensor)
def test_mixed_tensor_types_variable_shaped(
mixed_tensor_types_variable_shaped_blocks,
mixed_tensor_types_variable_shaped_expected,
):
"""Test mixed tensor types with variable-shaped tensors."""
t1, t2 = mixed_tensor_types_variable_shaped_blocks
t3 = concat([t1, t2])
assert isinstance(t3, pa.Table)
assert len(t3) == mixed_tensor_types_variable_shaped_expected["length"]
# Verify schema - should have tensor field as variable-shaped
assert t3.schema == mixed_tensor_types_variable_shaped_expected["schema"]
tensor_field = t3.schema.field("tensor")
assert isinstance(tensor_field.type, ArrowVariableShapedTensorType)
# Verify content
result_tensors = t3.column("tensor").to_pylist()
assert len(result_tensors) == mixed_tensor_types_variable_shaped_expected["length"]
expected_tensors = mixed_tensor_types_variable_shaped_expected["tensor_values"]
# Verify each tensor
for i, (result_tensor, expected_tensor) in enumerate(
zip(result_tensors, expected_tensors)
):
assert isinstance(result_tensor, np.ndarray)
assert result_tensor.shape == expected_tensor.shape
assert result_tensor.dtype == expected_tensor.dtype
np.testing.assert_array_equal(result_tensor, expected_tensor)
@pytest.mark.skipif(
not _extension_array_concat_supported(),
reason="ExtensionArrays support concatenation only in Pyarrow >= 12.0",
)
def test_mixed_tensor_types_in_struct(
struct_with_mixed_tensor_types_blocks, struct_with_mixed_tensor_types_expected
):
"""Test that the fix works for mixed tensor types in structs."""
t1, t2 = struct_with_mixed_tensor_types_blocks
# This should work with our fix
t3 = concat([t1, t2])
assert isinstance(t3, pa.Table)
assert len(t3) == struct_with_mixed_tensor_types_expected["length"]
# Verify the result has the expected structure
assert t3.schema == struct_with_mixed_tensor_types_expected["schema"]
assert "id" in t3.column_names
assert "struct" in t3.column_names
# Verify struct field contains both types of tensors
struct_data = t3.column("struct").to_pylist()
assert len(struct_data) == struct_with_mixed_tensor_types_expected["length"]
expected_struct_values = struct_with_mixed_tensor_types_expected["struct_values"]
# Verify struct values
for i, (struct_row, expected_values) in enumerate(
zip(struct_data, expected_struct_values)
):
for key, expected_value in expected_values.items():
assert struct_row[key] == expected_value
@pytest.mark.skipif(
not _extension_array_concat_supported(),
reason="ExtensionArrays support concatenation only in Pyarrow >= 12.0",
)
def test_nested_struct_with_mixed_tensor_types(
nested_struct_with_mixed_tensor_types_blocks,
nested_struct_with_mixed_tensor_types_expected,
):
"""Test nested structs with mixed tensor types at different levels."""
t1, t2 = nested_struct_with_mixed_tensor_types_blocks
t3 = concat([t1, t2])
assert isinstance(t3, pa.Table)
assert len(t3) == nested_struct_with_mixed_tensor_types_expected["length"]
# Verify the result has the expected structure
assert t3.schema == nested_struct_with_mixed_tensor_types_expected["schema"]
assert "id" in t3.column_names
assert "complex_struct" in t3.column_names
# Verify nested struct field contains both types of tensors
struct_data = t3.column("complex_struct").to_pylist()
assert len(struct_data) == nested_struct_with_mixed_tensor_types_expected["length"]
expected_fields = nested_struct_with_mixed_tensor_types_expected["expected_fields"]
# Check that nested structures are preserved
for field in expected_fields:
if field in ["nested", "outer_tensor", "outer_value"]:
assert field in struct_data[0]
elif field in ["inner_tensor", "inner_value"]:
assert field in struct_data[0]["nested"]
@pytest.mark.skipif(
not _extension_array_concat_supported(),
reason="ExtensionArrays support concatenation only in Pyarrow >= 12.0",
)
def test_multiple_tensor_fields_in_struct(
multiple_tensor_fields_struct_blocks, multiple_tensor_fields_struct_expected
):
"""Test structs with multiple tensor fields of different types."""
t1, t2 = multiple_tensor_fields_struct_blocks
t3 = concat([t1, t2])
assert isinstance(t3, pa.Table)
assert len(t3) == multiple_tensor_fields_struct_expected["length"]
# Verify the result has the expected structure
assert t3.schema == multiple_tensor_fields_struct_expected["schema"]
assert "id" in t3.column_names
assert "multi_tensor_struct" in t3.column_names
# Verify struct field contains both types of tensors
struct_data = t3.column("multi_tensor_struct").to_pylist()
assert len(struct_data) == multiple_tensor_fields_struct_expected["length"]
expected_fields = multiple_tensor_fields_struct_expected["expected_fields"]
# Check that all tensor fields are present
for row in struct_data:
for field in expected_fields:
assert field in row
def test_struct_with_incompatible_tensor_dtypes_fails():
"""Test that concatenating structs with incompatible tensor dtypes fails gracefully."""
# Block 1: Struct with float32 fixed-shape tensor
tensor_data1 = np.ones((2, 2), dtype=np.float32)
# Block 2: Struct with int64 variable-shaped tensor (different dtype)
tensor_data2 = np.array(
[
np.ones((3, 3), dtype=np.int64),
np.zeros((1, 4), dtype=np.int64),
],
dtype=object,
)
t1, t2 = _create_struct_tensor_blocks(
tensor_data1, tensor_data2, "fixed", "variable"
)
# This should fail because of incompatible tensor dtypes
with pytest.raises(
ArrowConversionError,
match=re.escape(
"Can't unify tensor types with divergent scalar types: [ArrowTensorTypeV2(shape=(2,), dtype=float), ArrowVariableShapedTensorType(ndim=2, dtype=int64)]"
),
):
concat([t1, t2])
@pytest.mark.skipif(
not _extension_array_concat_supported(),
reason="ExtensionArrays support concatenation only in Pyarrow >= 12.0",
)
def test_struct_with_additional_fields(
struct_with_additional_fields_blocks, struct_with_additional_fields_expected
):
"""Test structs where some blocks have additional fields."""
t1, t2 = struct_with_additional_fields_blocks
t3 = concat([t1, t2])
assert isinstance(t3, pa.Table)
assert len(t3) == struct_with_additional_fields_expected["length"]
# Verify the result has the expected structure
assert t3.schema == struct_with_additional_fields_expected["schema"]
assert "id" in t3.column_names
assert "struct" in t3.column_names
# Verify struct field contains both types of tensors
struct_data = t3.column("struct").to_pylist()
assert len(struct_data) == struct_with_additional_fields_expected["length"]
field_presence = struct_with_additional_fields_expected["field_presence"]
extra_values = struct_with_additional_fields_expected["extra_values"]
# Check field presence and values
for i, row in enumerate(struct_data):
for field, should_be_present in field_presence.items():
assert (field in row) == should_be_present
# Check extra field values
if "extra" in row:
assert row["extra"] == extra_values[i]
@pytest.mark.skipif(
not _extension_array_concat_supported(),
reason="ExtensionArrays support concatenation only in Pyarrow >= 12.0",
)
def test_struct_with_null_tensor_values(
struct_with_null_tensor_values_blocks, struct_with_null_tensor_values_expected
):
"""Test structs where some fields are missing and get filled with nulls."""
t1, t2 = struct_with_null_tensor_values_blocks
t3 = concat([t1, t2])
assert isinstance(t3, pa.Table)
assert len(t3) == struct_with_null_tensor_values_expected["length"]
# Validate schema - should have both fields
assert t3.schema == struct_with_null_tensor_values_expected["schema"]
# Validate result
assert t3.column("id").to_pylist() == struct_with_null_tensor_values_expected["ids"]
# Check the struct column directly to avoid the Arrow tensor extension null bug
struct_column = t3.column("struct")
expected_values = struct_with_null_tensor_values_expected["values"]
expected_tensor_validity = struct_with_null_tensor_values_expected[
"tensor_validity"
]
# Check each row
for i, (expected_value, expected_valid) in enumerate(
zip(expected_values, expected_tensor_validity)
):
assert struct_column[i]["value"].as_py() == expected_value
if expected_valid:
assert struct_column[i]["tensor"] is not None
else:
# Check that the tensor field is null by checking its validity
tensor_field = struct_column[i]["tensor"]
assert tensor_field.is_valid is False
# Test fixtures for _align_struct_fields tests
@pytest.fixture
def simple_struct_blocks():
"""Fixture for simple struct blocks with missing fields."""
# Block 1: Struct with fields 'a' and 'b'
struct_data1 = [{"a": 1, "b": "x"}, {"a": 2, "b": "y"}]
# Block 2: Struct with fields 'a' and 'c' (missing 'b', has 'c')
struct_data2 = [{"a": 3, "c": True}, {"a": 4, "c": False}]
return _create_basic_struct_blocks(
struct_data1, struct_data2, id_data1=None, id_data2=None
)
@pytest.fixture
def simple_struct_schema():
"""Fixture for simple struct schema with all fields."""
struct_fields = [("a", pa.int64()), ("b", pa.string()), ("c", pa.bool_())]
return _create_struct_schema(struct_fields, include_id=False)
@pytest.fixture
def nested_struct_blocks():
"""Fixture for nested struct blocks with missing fields."""
# Block 1: Nested struct with inner fields 'x' and 'y'
struct_data1 = [{"inner": {"x": 1, "y": "a"}}, {"inner": {"x": 2, "y": "b"}}]
# Block 2: Nested struct with inner fields 'x' and 'z' (missing 'y', has 'z')
struct_data2 = [{"inner": {"x": 3, "z": 1.5}}, {"inner": {"x": 4, "z": 2.5}}]
return _create_basic_struct_blocks(
struct_data1, struct_data2, column_name="outer", id_data1=None, id_data2=None
)
@pytest.fixture
def nested_struct_schema():
"""Fixture for nested struct schema with all fields."""
inner_fields = [("x", pa.int64()), ("y", pa.string()), ("z", pa.float64())]
struct_fields = [("inner", pa.struct(inner_fields))]
return _create_struct_schema(
struct_fields,
include_id=False,
other_fields=[("outer", pa.struct(struct_fields))],
)
@pytest.fixture
def missing_column_blocks():
"""Fixture for blocks where one is missing a struct column entirely."""
# Block 1: Has struct column
t1 = pa.table(
{
"struct": pa.array([{"a": 1, "b": "x"}, {"a": 2, "b": "y"}]),
"other": pa.array([10, 20]),
}
)
# Block 2: Missing struct column entirely
t2 = pa.table({"other": pa.array([30, 40])})
return t1, t2
@pytest.fixture
def missing_column_schema():
"""Fixture for schema with struct column that may be missing."""
return pa.schema(
[
("struct", pa.struct([("a", pa.int64()), ("b", pa.string())])),
("other", pa.int64()),
]
)
@pytest.fixture
def multiple_struct_blocks():
"""Fixture for blocks with multiple struct columns."""
# Block 1: Two struct columns with different field sets
struct1_data1 = [{"a": 1, "b": "x"}, {"a": 2, "b": "y"}]
struct2_data1 = [{"p": 10, "q": True}, {"p": 20, "q": False}]
# Block 2: Same struct columns but with different/missing fields
struct1_data2 = [{"a": 3, "c": 1.5}, {"a": 4, "c": 2.5}] # missing 'b', has 'c'
struct2_data2 = [
{"p": 30, "r": "alpha"},
{"p": 40, "r": "beta"},
] # missing 'q', has 'r'
t1 = pa.table(
{
"struct1": pa.array(struct1_data1),
"struct2": pa.array(struct2_data1),
}
)
t2 = pa.table(
{
"struct1": pa.array(struct1_data2),
"struct2": pa.array(struct2_data2),
}
)
return t1, t2
@pytest.fixture
def multiple_struct_schema():
"""Fixture for schema with multiple struct columns."""
struct1_fields = [("a", pa.int64()), ("b", pa.string()), ("c", pa.float64())]
struct2_fields = [("p", pa.int64()), ("q", pa.bool_()), ("r", pa.string())]
return pa.schema(
[
("struct1", pa.struct(struct1_fields)),
("struct2", pa.struct(struct2_fields)),
]
)
@pytest.fixture
def mixed_column_blocks():
"""Fixture for blocks with mix of struct and non-struct columns."""
# Block 1: Mix of struct and non-struct columns
struct_data1 = [{"a": 1, "b": "x"}, {"a": 2, "b": "y"}]
int_col1 = [10, 20]
string_col1 = ["foo", "bar"]
# Block 2: Same structure
struct_data2 = [{"a": 3, "c": True}, {"a": 4, "c": False}] # missing 'b', has 'c'
int_col2 = [30, 40]
string_col2 = ["baz", "qux"]
t1 = pa.table(
{
"struct": pa.array(struct_data1),
"int_col": pa.array(int_col1),
"string_col": pa.array(string_col1),
}
)
t2 = pa.table(
{
"struct": pa.array(struct_data2),
"int_col": pa.array(int_col2),
"string_col": pa.array(string_col2),
}
)
return t1, t2
@pytest.fixture
def mixed_column_schema():
"""Fixture for schema with mix of struct and non-struct columns."""
struct_fields = [("a", pa.int64()), ("b", pa.string()), ("c", pa.bool_())]
return pa.schema(
[
("struct", pa.struct(struct_fields)),
("int_col", pa.int64()),
("string_col", pa.string()),
]
)
@pytest.fixture
def empty_block_blocks():
"""Fixture for blocks where one is empty."""
# Empty block
empty_struct_type = pa.struct([("a", pa.int64()), ("b", pa.string())])
t1 = pa.table({"struct": pa.array([], type=empty_struct_type)})
# Non-empty block
struct_data2 = [{"a": 1, "c": True}, {"a": 2, "c": False}] # missing 'b', has 'c'
t2 = pa.table({"struct": pa.array(struct_data2)})
return t1, t2
@pytest.fixture
def empty_block_schema():
"""Fixture for schema used with empty blocks."""
struct_fields = [("a", pa.int64()), ("b", pa.string()), ("c", pa.bool_())]
return _create_struct_schema(struct_fields, include_id=False)
@pytest.fixture
def already_aligned_blocks():
"""Fixture for blocks that are already aligned."""
# Both blocks have identical schemas
struct_data1 = [{"a": 1, "b": "x"}, {"a": 2, "b": "y"}]
struct_data2 = [{"a": 3, "b": "z"}, {"a": 4, "b": "w"}]
return _create_basic_struct_blocks(
struct_data1, struct_data2, id_data1=None, id_data2=None
)
@pytest.fixture
def already_aligned_schema():
"""Fixture for schema used with already aligned blocks."""
struct_fields = [("a", pa.int64()), ("b", pa.string())]
return _create_struct_schema(struct_fields, include_id=False)
@pytest.fixture
def no_struct_blocks():
"""Fixture for blocks with no struct columns."""
# Blocks with no struct columns
int_col1 = [1, 2]
string_col1 = ["a", "b"]
int_col2 = [3, 4]
string_col2 = ["c", "d"]
t1 = pa.table({"int_col": pa.array(int_col1), "string_col": pa.array(string_col1)})
t2 = pa.table({"int_col": pa.array(int_col2), "string_col": pa.array(string_col2)})
return t1, t2
@pytest.fixture
def no_struct_schema():
"""Fixture for schema with no struct columns."""
return pa.schema([("int_col", pa.int64()), ("string_col", pa.string())])
@pytest.fixture
def deep_nesting_blocks():
"""Fixture for blocks with deeply nested structs."""
# Block 1: Deeply nested struct
struct_data1 = [
{"level2": {"level3": {"a": 1, "b": "x"}}},
{"level2": {"level3": {"a": 2, "b": "y"}}},
]
# Block 2: Same structure but missing some fields
struct_data2 = [
{"level2": {"level3": {"a": 3, "c": True}}}, # missing 'b', has 'c'
{"level2": {"level3": {"a": 4, "c": False}}},
]
return _create_basic_struct_blocks(
struct_data1, struct_data2, column_name="level1", id_data1=None, id_data2=None
)
@pytest.fixture
def deep_nesting_schema():
"""Fixture for schema with deeply nested structs."""
level3_fields = [("a", pa.int64()), ("b", pa.string()), ("c", pa.bool_())]
level2_fields = [("level3", pa.struct(level3_fields))]
level1_fields = [("level2", pa.struct(level2_fields))]
return pa.schema([("level1", pa.struct(level1_fields))])
def test_align_struct_fields_simple(simple_struct_blocks, simple_struct_schema):
"""Test basic struct field alignment with missing fields."""
t1, t2 = simple_struct_blocks
aligned_blocks = _align_struct_fields([t1, t2], simple_struct_schema)
assert len(aligned_blocks) == 2
# Check first block - should have 'c' field filled with None
result1 = aligned_blocks[0]
assert result1.schema == simple_struct_schema
assert result1["struct"].to_pylist() == [
{"a": 1, "b": "x", "c": None},
{"a": 2, "b": "y", "c": None},
]
# Check second block - should have 'b' field filled with None
result2 = aligned_blocks[1]
assert result2.schema == simple_struct_schema
assert result2["struct"].to_pylist() == [
{"a": 3, "b": None, "c": True},
{"a": 4, "b": None, "c": False},
]
def test_align_struct_fields_nested(nested_struct_blocks, nested_struct_schema):
"""Test nested struct field alignment."""
t1, t2 = nested_struct_blocks
aligned_blocks = _align_struct_fields([t1, t2], nested_struct_schema)
assert len(aligned_blocks) == 2
# Check first block - should have 'z' field filled with None
result1 = aligned_blocks[0]
assert result1.schema == nested_struct_schema
assert result1["outer"].to_pylist() == [
{"inner": {"x": 1, "y": "a", "z": None}},
{"inner": {"x": 2, "y": "b", "z": None}},
]
# Check second block - should have 'y' field filled with None
result2 = aligned_blocks[1]
assert result2.schema == nested_struct_schema
assert result2["outer"].to_pylist() == [
{"inner": {"x": 3, "y": None, "z": 1.5}},
{"inner": {"x": 4, "y": None, "z": 2.5}},
]
def test_align_struct_fields_missing_column(
missing_column_blocks, missing_column_schema
):
"""Test alignment when a struct column is missing from some blocks."""
t1, t2 = missing_column_blocks
aligned_blocks = _align_struct_fields([t1, t2], missing_column_schema)
assert len(aligned_blocks) == 2
# Check first block - should be unchanged
result1 = aligned_blocks[0]
assert result1.schema == missing_column_schema
assert result1["struct"].to_pylist() == [{"a": 1, "b": "x"}, {"a": 2, "b": "y"}]
assert result1["other"].to_pylist() == [10, 20]
# Check second block - should have null struct column
result2 = aligned_blocks[1]
assert result2.schema == missing_column_schema
assert result2["struct"].to_pylist() == [None, None]
assert result2["other"].to_pylist() == [30, 40]
def test_align_struct_fields_multiple_structs(
multiple_struct_blocks, multiple_struct_schema
):
"""Test alignment with multiple struct columns."""
t1, t2 = multiple_struct_blocks
aligned_blocks = _align_struct_fields([t1, t2], multiple_struct_schema)
assert len(aligned_blocks) == 2
# Check first block
result1 = aligned_blocks[0]
assert result1.schema == multiple_struct_schema
assert result1["struct1"].to_pylist() == [
{"a": 1, "b": "x", "c": None},
{"a": 2, "b": "y", "c": None},
]
assert result1["struct2"].to_pylist() == [
{"p": 10, "q": True, "r": None},
{"p": 20, "q": False, "r": None},
]
# Check second block
result2 = aligned_blocks[1]
assert result2.schema == multiple_struct_schema
assert result2["struct1"].to_pylist() == [
{"a": 3, "b": None, "c": 1.5},
{"a": 4, "b": None, "c": 2.5},
]
assert result2["struct2"].to_pylist() == [
{"p": 30, "q": None, "r": "alpha"},
{"p": 40, "q": None, "r": "beta"},
]
def test_align_struct_fields_non_struct_columns(
mixed_column_blocks, mixed_column_schema
):
"""Test that non-struct columns are left unchanged."""
t1, t2 = mixed_column_blocks
aligned_blocks = _align_struct_fields([t1, t2], mixed_column_schema)
assert len(aligned_blocks) == 2
# Check that non-struct columns are unchanged
for i, block in enumerate(aligned_blocks):
assert block["int_col"].to_pylist() == [10 + i * 20, 20 + i * 20]
assert (
block["string_col"].to_pylist() == ["foo", "bar"]
if i == 0
else ["baz", "qux"]
)
def test_align_struct_fields_empty_blocks(empty_block_blocks, empty_block_schema):
"""Test alignment with empty blocks."""
t1, t2 = empty_block_blocks
aligned_blocks = _align_struct_fields([t1, t2], empty_block_schema)
assert len(aligned_blocks) == 2
# Check empty block
result1 = aligned_blocks[0]
assert result1.schema == empty_block_schema
assert len(result1) == 0
# Check non-empty block
result2 = aligned_blocks[1]
assert result2.schema == empty_block_schema
assert result2["struct"].to_pylist() == [
{"a": 1, "b": None, "c": True},
{"a": 2, "b": None, "c": False},
]
def test_align_struct_fields_already_aligned(
already_aligned_blocks, already_aligned_schema
):
"""Test that already aligned blocks are returned unchanged."""
t1, t2 = already_aligned_blocks
aligned_blocks = _align_struct_fields([t1, t2], already_aligned_schema)
# Should return the original blocks unchanged
assert aligned_blocks == [t1, t2]
def test_align_struct_fields_no_struct_columns(no_struct_blocks, no_struct_schema):
"""Test alignment when there are no struct columns in the schema."""
t1, t2 = no_struct_blocks
aligned_blocks = _align_struct_fields([t1, t2], no_struct_schema)
# Should return the original blocks unchanged
assert aligned_blocks == [t1, t2]
def test_align_struct_fields_deep_nesting(deep_nesting_blocks, deep_nesting_schema):
"""Test alignment with deeply nested structs."""
t1, t2 = deep_nesting_blocks
aligned_blocks = _align_struct_fields([t1, t2], deep_nesting_schema)
assert len(aligned_blocks) == 2
# Check first block - should have 'c' field filled with None
result1 = aligned_blocks[0]
assert result1.schema == deep_nesting_schema
assert result1["level1"].to_pylist() == [
{"level2": {"level3": {"a": 1, "b": "x", "c": None}}},
{"level2": {"level3": {"a": 2, "b": "y", "c": None}}},
]
# Check second block - should have 'b' field filled with None
result2 = aligned_blocks[1]
assert result2.schema == deep_nesting_schema
assert result2["level1"].to_pylist() == [
{"level2": {"level3": {"a": 3, "b": None, "c": True}}},
{"level2": {"level3": {"a": 4, "b": None, "c": False}}},
]
# Test fixtures for tensor-related tests
@pytest.fixture
def uniform_tensor_blocks():
"""Fixture for uniform tensor blocks with same shape."""
# Block 1: Fixed shape tensors (2x2)
a1 = np.arange(12).reshape((3, 2, 2))
t1 = pa.table({"a": ArrowTensorArray.from_numpy(a1)})
# Block 2: Fixed shape tensors (2x2)
a2 = np.arange(12, 24).reshape((3, 2, 2))
t2 = pa.table({"a": ArrowTensorArray.from_numpy(a2)})
return t1, t2
@pytest.fixture
def uniform_tensor_expected():
"""Fixture for expected results from uniform tensor concatenation."""
if DataContext.get_current().use_arrow_tensor_v2:
tensor_type = ArrowTensorTypeV2
else:
tensor_type = ArrowTensorType
expected_schema = pa.schema([("a", tensor_type((2, 2), pa.int64()))])
expected_length = 6
expected_chunks = 2
# Expected content
a1 = np.arange(12).reshape((3, 2, 2))
a2 = np.arange(12, 24).reshape((3, 2, 2))
return {
"schema": expected_schema,
"length": expected_length,
"chunks": expected_chunks,
"content": [a1, a2],
}
@pytest.fixture
def variable_shaped_tensor_blocks():
"""Fixture for variable-shaped tensor blocks."""
# Block 1: Variable shape tensors
a1 = np.array(
[np.arange(4).reshape((2, 2)), np.arange(4, 13).reshape((3, 3))], dtype=object
)
t1 = pa.table({"a": ArrowTensorArray.from_numpy(a1)})
# Block 2: Variable shape tensors
a2 = np.array(
[np.arange(4).reshape((2, 2)), np.arange(4, 13).reshape((3, 3))], dtype=object
)
t2 = pa.table({"a": ArrowTensorArray.from_numpy(a2)})
return t1, t2
@pytest.fixture
def variable_shaped_tensor_expected():
"""Fixture for expected results from variable-shaped tensor concatenation."""
expected_schema = pa.schema([("a", ArrowVariableShapedTensorType(pa.int64(), 2))])
expected_length = 4
expected_chunks = 2
# Expected content
a1 = np.array(
[np.arange(4).reshape((2, 2)), np.arange(4, 13).reshape((3, 3))], dtype=object
)
a2 = np.array(
[np.arange(4).reshape((2, 2)), np.arange(4, 13).reshape((3, 3))], dtype=object
)
return {
"schema": expected_schema,
"length": expected_length,
"chunks": expected_chunks,
"content": [a1, a2],
}
@pytest.fixture
def mixed_tensor_blocks():
"""Fixture for mixed fixed-shape and variable-shaped tensor blocks."""
# Block 1: Fixed shape tensors
a1 = np.arange(12).reshape((3, 2, 2))
t1 = pa.table({"a": ArrowTensorArray.from_numpy(a1)})
# Block 2: Variable shape tensors
a2 = np.array(
[np.arange(4).reshape((2, 2)), np.arange(4, 13).reshape((3, 3))], dtype=object
)
t2 = pa.table({"a": ArrowTensorArray.from_numpy(a2)})
return t1, t2
@pytest.fixture
def mixed_tensor_expected():
"""Fixture for expected results from mixed tensor concatenation."""
expected_schema = pa.schema([("a", ArrowVariableShapedTensorType(pa.int64(), 2))])
expected_length = 5
expected_chunks = 2
# Expected content
a1 = np.arange(12).reshape((3, 2, 2))
a2 = np.array(
[np.arange(4).reshape((2, 2)), np.arange(4, 13).reshape((3, 3))], dtype=object
)
return {
"schema": expected_schema,
"length": expected_length,
"chunks": expected_chunks,
"content": [a1, a2],
}
@pytest.fixture
def different_shape_tensor_blocks():
"""Fixture for tensor blocks with different fixed shapes."""
# Block 1: Fixed shape tensors (2x2)
a1 = np.arange(12).reshape((3, 2, 2))
t1 = pa.table({"a": ArrowTensorArray.from_numpy(a1)})
# Block 2: Fixed shape tensors (3x3)
a2 = np.arange(12, 39).reshape((3, 3, 3))
t2 = pa.table({"a": ArrowTensorArray.from_numpy(a2)})
return t1, t2
@pytest.fixture
def different_shape_tensor_expected():
"""Fixture for expected results from different shape tensor concatenation."""
expected_schema = pa.schema([("a", ArrowVariableShapedTensorType(pa.int64(), 2))])
expected_length = 6
expected_chunks = 2
# Expected content
a1 = np.arange(12).reshape((3, 2, 2))
a2 = np.arange(12, 39).reshape((3, 3, 3))
return {
"schema": expected_schema,
"length": expected_length,
"chunks": expected_chunks,
"content": [a1, a2],
}
@pytest.fixture
def mixed_tensor_types_same_dtype_blocks():
"""Fixture for mixed tensor types with same dtype but different shapes."""
# Block 1: Fixed shape tensors with float32
tensor_data1 = np.ones((2, 2), dtype=np.float32)
# Block 2: Variable shape tensors with float32
tensor_data2 = np.array(
[
np.ones((3, 3), dtype=np.float32),
np.zeros((1, 4), dtype=np.float32),
],
dtype=object,
)
return _create_tensor_blocks(tensor_data1, tensor_data2, "fixed", "variable")
@pytest.fixture
def mixed_tensor_types_same_dtype_expected():
"""Fixture for expected results from mixed tensor types with same dtype."""
expected_schema = _create_tensor_schema(struct_name="tensor")
expected_tensors = [
# First 2 were converted to var-shaped with their shape expanded
# with singleton axis: from (2,) to (1, 2)
np.ones((1, 2), dtype=np.float32),
np.ones((1, 2), dtype=np.float32),
# Last 2 were left intact
np.ones((3, 3), dtype=np.float32),
np.zeros((1, 4), dtype=np.float32),
]
return _create_expected_result(expected_schema, 4, tensor_values=expected_tensors)
@pytest.fixture
def mixed_tensor_types_fixed_shape_blocks():
"""Fixture for mixed tensor types with different fixed shapes."""
# Block 1: Fixed shape tensors (2x2)
tensor_data1 = np.ones((2, 2), dtype=np.float32)
# Block 2: Fixed shape tensors (3x3)
tensor_data2 = np.zeros((3, 3), dtype=np.float32)
return _create_tensor_blocks(
tensor_data1, tensor_data2, "fixed", "fixed", id_data2=[3, 4, 5]
)
@pytest.fixture
def mixed_tensor_types_fixed_shape_expected():
"""Fixture for expected results from mixed tensor types with different fixed shapes."""
expected_schema = _create_tensor_schema(struct_name="tensor", ndim=1)
expected_tensors = [
np.ones((2,), dtype=np.float32), # First 2 converted to variable-shaped
np.ones((2,), dtype=np.float32),
np.zeros((3,), dtype=np.float32), # Last 3 variable-shaped
np.zeros((3,), dtype=np.float32),
np.zeros((3,), dtype=np.float32),
]
return _create_expected_result(expected_schema, 5, tensor_values=expected_tensors)
@pytest.fixture
def mixed_tensor_types_variable_shaped_blocks():
"""Fixture for mixed tensor types with variable-shaped tensors."""
# Block 1: Variable shape tensors
tensor_data1 = np.array(
[
np.ones((2, 2), dtype=np.float32),
np.zeros((3, 3), dtype=np.float32),
],
dtype=object,
)
# Block 2: Variable shape tensors with different shapes
tensor_data2 = np.array(
[
np.ones((1, 4), dtype=np.float32),
np.zeros((2, 1), dtype=np.float32),
],
dtype=object,
)
return _create_tensor_blocks(tensor_data1, tensor_data2, "variable", "variable")
@pytest.fixture
def mixed_tensor_types_variable_shaped_expected():
"""Fixture for expected results from mixed variable-shaped tensor types."""
expected_schema = _create_tensor_schema(struct_name="tensor")
expected_tensors = [
np.ones((2, 2), dtype=np.float32),
np.zeros((3, 3), dtype=np.float32),
np.ones((1, 4), dtype=np.float32),
np.zeros((2, 1), dtype=np.float32),
]
return _create_expected_result(expected_schema, 4, tensor_values=expected_tensors)
@pytest.fixture
def struct_with_mixed_tensor_types_blocks():
"""Fixture for struct blocks with mixed tensor types."""
# Block 1: Struct with fixed-shape tensor
tensor_data1 = np.ones((2, 2), dtype=np.float32)
# Block 2: Struct with variable-shaped tensor
tensor_data2 = np.array(
[
np.ones((3, 3), dtype=np.float32),
np.zeros((1, 4), dtype=np.float32),
],
dtype=object,
)
return _create_struct_tensor_blocks(tensor_data1, tensor_data2, "fixed", "variable")
@pytest.fixture
def struct_with_mixed_tensor_types_expected():
"""Fixture for expected results from struct with mixed tensor types."""
expected_schema = _create_tensor_schema(struct_name="struct")
expected_struct_values = [
{"value": 1}, # First two from fixed-shape tensor struct
{"value": 2},
{"value": 3}, # Last two from variable-shaped tensor struct
{"value": 4},
]
return _create_expected_result(
expected_schema, 4, struct_values=expected_struct_values
)
@pytest.fixture
def nested_struct_with_mixed_tensor_types_blocks():
"""Fixture for nested struct blocks with mixed tensor types."""
# Block 1: Nested struct with fixed-shape tensors
tensor_data1 = np.ones((2, 2), dtype=np.float32)
tensor_array1 = _create_tensor_array(tensor_data1, "fixed")
inner_struct1 = pa.StructArray.from_arrays(
[tensor_array1, pa.array([10, 20], type=pa.int64())],
names=["inner_tensor", "inner_value"],
)
outer_tensor1 = _create_tensor_array(np.zeros((2, 1), dtype=np.float32), "fixed")
outer_struct1 = pa.StructArray.from_arrays(
[inner_struct1, outer_tensor1, pa.array([1, 2], type=pa.int64())],
names=["nested", "outer_tensor", "outer_value"],
)
t1 = pa.table({"id": [1, 2], "complex_struct": outer_struct1})
# Block 2: Nested struct with variable-shaped tensors
tensor_data2 = np.array(
[
np.ones((3, 3), dtype=np.float32),
np.zeros((1, 4), dtype=np.float32),
],
dtype=object,
)
tensor_array2 = _create_tensor_array(tensor_data2, "variable")
inner_struct2 = pa.StructArray.from_arrays(
[tensor_array2, pa.array([30, 40], type=pa.int64())],
names=["inner_tensor", "inner_value"],
)
outer_tensor2 = _create_tensor_array(
np.array(
[np.ones((2, 2), dtype=np.float32), np.zeros((1, 3), dtype=np.float32)],
dtype=object,
),
"variable",
)
outer_struct2 = pa.StructArray.from_arrays(
[inner_struct2, outer_tensor2, pa.array([3, 4], type=pa.int64())],
names=["nested", "outer_tensor", "outer_value"],
)
t2 = pa.table({"id": [3, 4], "complex_struct": outer_struct2})
return t1, t2
@pytest.fixture
def nested_struct_with_mixed_tensor_types_expected():
"""Fixture for expected results from nested struct with mixed tensor types."""
expected_schema = pa.schema(
[
("id", pa.int64()),
(
"complex_struct",
pa.struct(
[
(
"nested",
pa.struct(
[
(
"inner_tensor",
ArrowVariableShapedTensorType(pa.float32(), 2),
),
("inner_value", pa.int64()),
]
),
),
(
"outer_tensor",
ArrowVariableShapedTensorType(pa.float32(), 2),
),
("outer_value", pa.int64()),
]
),
),
]
)
expected_fields = [
"nested",
"outer_tensor",
"outer_value",
"inner_tensor",
"inner_value",
]
return _create_expected_result(expected_schema, 4, expected_fields=expected_fields)
@pytest.fixture
def multiple_tensor_fields_struct_blocks():
"""Fixture for struct blocks with multiple tensor fields."""
# Block 1: Struct with multiple fixed-shape tensors
tensor1_data = np.ones((2, 2), dtype=np.float32)
tensor1_array = _create_tensor_array(tensor1_data, "fixed")
tensor2_data = np.zeros((2, 3), dtype=np.int32)
tensor2_array = _create_tensor_array(tensor2_data, "fixed")
struct_array1 = pa.StructArray.from_arrays(
[tensor1_array, tensor2_array, pa.array([1, 2], type=pa.int64())],
names=["tensor1", "tensor2", "value"],
)
t1 = pa.table({"id": [1, 2], "multi_tensor_struct": struct_array1})
# Block 2: Struct with multiple variable-shaped tensors
tensor1_data2 = np.array(
[
np.ones((3, 3), dtype=np.float32),
np.zeros((1, 4), dtype=np.float32),
],
dtype=object,
)
tensor1_array2 = _create_tensor_array(tensor1_data2, "variable")
tensor2_data2 = np.array(
[
np.ones((2, 2), dtype=np.int32),
np.zeros((3, 1), dtype=np.int32),
],
dtype=object,
)
tensor2_array2 = _create_tensor_array(tensor2_data2, "variable")
struct_array2 = pa.StructArray.from_arrays(
[tensor1_array2, tensor2_array2, pa.array([3, 4], type=pa.int64())],
names=["tensor1", "tensor2", "value"],
)
t2 = pa.table({"id": [3, 4], "multi_tensor_struct": struct_array2})
return t1, t2
@pytest.fixture
def multiple_tensor_fields_struct_expected():
"""Fixture for expected results from struct with multiple tensor fields."""
expected_schema = pa.schema(
[
("id", pa.int64()),
(
"multi_tensor_struct",
pa.struct(
[
("tensor1", ArrowVariableShapedTensorType(pa.float32(), 2)),
("tensor2", ArrowVariableShapedTensorType(pa.int32(), 2)),
("value", pa.int64()),
]
),
),
]
)
expected_fields = ["tensor1", "tensor2", "value"]
return _create_expected_result(expected_schema, 4, expected_fields=expected_fields)
@pytest.fixture
def struct_with_additional_fields_blocks():
"""Fixture for struct blocks where some have additional fields."""
# Block 1: Struct with tensor field and basic fields
tensor_data1 = np.ones((2, 2), dtype=np.float32)
# Block 2: Struct with tensor field and additional fields
tensor_data2 = np.array(
[
np.ones((3, 3), dtype=np.float32),
np.zeros((1, 4), dtype=np.float32),
],
dtype=object,
)
return _create_struct_tensor_blocks(
tensor_data1, tensor_data2, "fixed", "variable", extra_data2=["a", "b"]
)
@pytest.fixture
def struct_with_additional_fields_expected():
"""Fixture for expected results from struct with additional fields."""
expected_schema = _create_tensor_schema(struct_name="struct", include_extra=True)
expected_field_presence = {"tensor": True, "value": True, "extra": True}
expected_extra_values = [None, None, "a", "b"]
return _create_expected_result(
expected_schema,
4,
field_presence=expected_field_presence,
extra_values=expected_extra_values,
)
@pytest.fixture
def struct_with_null_tensor_values_blocks():
"""Fixture for struct blocks where some fields are missing and get filled with nulls."""
# Block 1: Struct with tensor and value fields
tensor_data1 = np.ones((2, 2), dtype=np.float32)
tensor_array1 = ArrowTensorArray.from_numpy(tensor_data1)
value_array1 = pa.array([1, 2], type=pa.int64())
struct_array1 = pa.StructArray.from_arrays(
[tensor_array1, value_array1], names=["tensor", "value"]
)
t1 = pa.table({"id": [1, 2], "struct": struct_array1})
# Block 2: Struct with only value field (missing tensor field)
value_array2 = pa.array([3], type=pa.int64())
struct_array2 = pa.StructArray.from_arrays([value_array2], names=["value"])
t2 = pa.table({"id": [3], "struct": struct_array2})
return t1, t2
@pytest.fixture
def struct_with_null_tensor_values_expected():
"""Fixture for expected results from struct with null tensor values."""
expected_schema = pa.schema(
[
("id", pa.int64()),
(
"struct",
pa.struct(
[
("tensor", ArrowTensorTypeV2((2,), pa.float32())),
("value", pa.int64()),
]
),
),
]
)
expected_length = 3
expected_ids = [1, 2, 3]
# Expected value field values
expected_values = [1, 2, 3]
# Expected tensor field validity
expected_tensor_validity = [True, True, False]
return {
"schema": expected_schema,
"length": expected_length,
"ids": expected_ids,
"values": expected_values,
"tensor_validity": expected_tensor_validity,
}
@pytest.fixture
def basic_concat_blocks():
"""Fixture for basic concat test data."""
t1 = pa.table({"a": [1, 2], "b": [5, 6]})
t2 = pa.table({"a": [3, 4], "b": [7, 8]})
return [t1, t2]
@pytest.fixture
def basic_concat_expected():
"""Fixture for basic concat expected results."""
return {
"length": 4,
"column_names": ["a", "b"],
"schema_types": [pa.int64(), pa.int64()],
"chunks": 2,
"content": {"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]},
}
@pytest.fixture
def null_promotion_blocks():
"""Fixture for null promotion test data."""
t1 = pa.table({"a": [None, None], "b": [5, 6]})
t2 = pa.table({"a": [3, 4], "b": [None, None]})
return [t1, t2]
@pytest.fixture
def null_promotion_expected():
"""Fixture for null promotion expected results."""
return {
"length": 4,
"column_names": ["a", "b"],
"schema_types": [pa.int64(), pa.int64()],
"chunks": 2,
"content": {"a": [None, None, 3, 4], "b": [5, 6, None, None]},
}
@pytest.fixture
def struct_different_field_names_blocks():
"""Fixture for struct with different field names test data."""
struct_data1 = [{"x": 1, "y": "a"}, {"x": 2, "y": "b"}]
struct_data2 = [{"x": 3, "z": "c"}]
struct_type1 = pa.struct([("x", pa.int32()), ("y", pa.string())])
struct_type2 = pa.struct([("x", pa.int32()), ("z", pa.string())])
additional_columns1 = {"a": [1, 2]}
additional_columns2 = {"a": [3]}
return _create_struct_blocks_with_columns(
struct_data1,
struct_data2,
struct_type1,
struct_type2,
additional_columns1,
additional_columns2,
)
@pytest.fixture
def struct_different_field_names_expected():
"""Fixture for struct with different field names expected results."""
field_names = ["x", "y", "z"]
field_types = [pa.int32(), pa.string(), pa.string()]
additional_fields = [("a", pa.int64())]
schema = _create_simple_struct_schema(field_names, field_types, additional_fields)
content = {
"a": [1, 2, 3],
"d": [
{"x": 1, "y": "a", "z": None},
{"x": 2, "y": "b", "z": None},
{"x": 3, "y": None, "z": "c"},
],
}
return _create_struct_expected_result(schema, 3, content)
@pytest.fixture
def nested_structs_blocks():
"""Fixture for nested structs test data."""
t1 = pa.table(
{
"a": [1],
"d": pa.array(
[
{
"x": {
"y": {"p": 1}, # Missing "q"
"z": {"m": 3}, # Missing "n"
},
"w": 5,
}
],
type=pa.struct(
[
(
"x",
pa.struct(
[
(
"y",
pa.struct([("p", pa.int32())]), # Only "p"
),
(
"z",
pa.struct([("m", pa.int32())]), # Only "m"
),
]
),
),
("w", pa.int32()),
]
),
),
}
)
t2 = pa.table(
{
"a": [2],
"d": pa.array(
[
{
"x": {
"y": {"q": 7}, # Missing "p"
"z": {"n": 9}, # Missing "m"
},
"w": 10,
}
],
type=pa.struct(
[
(
"x",
pa.struct(
[
(
"y",
pa.struct([("q", pa.int32())]), # Only "q"
),
(
"z",
pa.struct([("n", pa.int32())]), # Only "n"
),
]
),
),
("w", pa.int32()),
]
),
),
}
)
return [t1, t2]
@pytest.fixture
def nested_structs_expected():
"""Fixture for nested structs expected results."""
return {
"length": 2,
"schema": pa.schema(
[
("a", pa.int64()),
(
"d",
pa.struct(
[
(
"x",
pa.struct(
[
(
"y",
pa.struct(
[("p", pa.int32()), ("q", pa.int32())]
),
),
(
"z",
pa.struct(
[("m", pa.int32()), ("n", pa.int32())]
),
),
]
),
),
("w", pa.int32()),
]
),
),
]
),
"content": {
"a": [1, 2],
"d": [
{
"x": {
"y": {"p": 1, "q": None}, # Missing "q" filled with None
"z": {"m": 3, "n": None}, # Missing "n" filled with None
},
"w": 5,
},
{
"x": {
"y": {"p": None, "q": 7}, # Missing "p" filled with None
"z": {"m": None, "n": 9}, # Missing "m" filled with None
},
"w": 10,
},
],
},
}
@pytest.fixture
def struct_null_values_blocks():
"""Fixture for struct with null values test data."""
struct_data1 = [{"x": 1, "y": "a"}, None] # Second row is null
struct_data2 = [None] # Entire struct is null
field_names = ["x", "y"]
field_types = [pa.int32(), pa.string()]
additional_columns1 = {"a": [1, 2]}
additional_columns2 = {"a": [3]}
return _create_simple_struct_blocks(
struct_data1,
struct_data2,
field_names,
field_types,
additional_columns1,
additional_columns2,
)
@pytest.fixture
def struct_null_values_expected():
"""Fixture for struct with null values expected results."""
field_names = ["x", "y"]
field_types = [pa.int32(), pa.string()]
additional_fields = [("a", pa.int64())]
schema = _create_simple_struct_schema(field_names, field_types, additional_fields)
content = {
"a": [1, 2, 3],
"d": [
{"x": 1, "y": "a"},
None, # Entire struct is None, not {"x": None, "y": None}
None, # Entire struct is None, not {"x": None, "y": None}
],
}
return _create_struct_expected_result(schema, 3, content)
@pytest.fixture
def struct_mismatched_lengths_blocks():
"""Fixture for struct with mismatched lengths test data."""
struct_data1 = [{"x": 1, "y": "a"}, {"x": 2, "y": "b"}]
struct_data2 = [{"x": 3, "y": "c"}]
field_names = ["x", "y"]
field_types = [pa.int32(), pa.string()]
additional_columns1 = {"a": [1, 2]}
additional_columns2 = {"a": [3]}
return _create_simple_struct_blocks(
struct_data1,
struct_data2,
field_names,
field_types,
additional_columns1,
additional_columns2,
)
@pytest.fixture
def struct_mismatched_lengths_expected():
"""Fixture for struct with mismatched lengths expected results."""
field_names = ["x", "y"]
field_types = [pa.int32(), pa.string()]
additional_fields = [("a", pa.int64())]
schema = _create_simple_struct_schema(field_names, field_types, additional_fields)
content = {
"a": [1, 2, 3],
"d": [
{"x": 1, "y": "a"},
{"x": 2, "y": "b"},
{"x": 3, "y": "c"},
],
}
return _create_struct_expected_result(schema, 3, content)
@pytest.fixture
def struct_empty_arrays_blocks():
"""Fixture for struct with empty arrays test data."""
struct_data1 = [{"x": 1, "y": "a"}, {"x": 2, "y": "b"}]
# Define the second table with null struct value (empty arrays for fields)
x_array = pa.array([None], type=pa.int32())
y_array = pa.array([None], type=pa.string())
# Create a struct array from null field arrays
null_struct_array = pa.StructArray.from_arrays(
[x_array, y_array],
["x", "y"],
mask=pa.array([True]),
)
t1 = pa.table(
{
"a": [1, 2],
"d": pa.array(
struct_data1, type=pa.struct([("x", pa.int32()), ("y", pa.string())])
),
}
)
t2 = pa.table({"a": [3], "d": null_struct_array})
return [t1, t2]
@pytest.fixture
def struct_empty_arrays_expected():
"""Fixture for struct with empty arrays expected results."""
field_names = ["x", "y"]
field_types = [pa.int32(), pa.string()]
additional_fields = [("a", pa.int64())]
schema = _create_simple_struct_schema(field_names, field_types, additional_fields)
content = {
"a": [1, 2, 3],
"d": [
{"x": 1, "y": "a"},
{"x": 2, "y": "b"},
None, # Entire struct is None, as PyArrow handles it
],
}
return _create_struct_expected_result(schema, 3, content)
@pytest.fixture
def unify_schemas_basic_schemas():
"""Fixture for basic unify schemas test data."""
tensor_arr_1 = pa.schema([("tensor_arr", ArrowTensorType((3, 5), pa.int32()))])
tensor_arr_2 = pa.schema([("tensor_arr", ArrowTensorType((2, 1), pa.int32()))])
tensor_arr_3 = pa.schema([("tensor_arr", ArrowTensorType((3, 5), pa.int32()))])
var_tensor_arr = pa.schema(
[
("tensor_arr", ArrowVariableShapedTensorType(pa.int32(), 2)),
]
)
var_tensor_arr_1d = pa.schema(
[
("tensor_arr", ArrowVariableShapedTensorType(pa.int32(), 1)),
]
)
var_tensor_arr_3d = pa.schema(
[
("tensor_arr", ArrowVariableShapedTensorType(pa.int32(), 3)),
]
)
return {
"tensor_arr_1": tensor_arr_1,
"tensor_arr_2": tensor_arr_2,
"tensor_arr_3": tensor_arr_3,
"var_tensor_arr": var_tensor_arr,
"var_tensor_arr_1d": var_tensor_arr_1d,
"var_tensor_arr_3d": var_tensor_arr_3d,
}
@pytest.fixture
def unify_schemas_multicol_schemas():
"""Fixture for multi-column unify schemas test data."""
multicol_schema_1 = pa.schema(
[
("col_int", pa.int32()),
("col_fixed_tensor", ArrowTensorType((4, 2), pa.int32())),
("col_var_tensor", ArrowVariableShapedTensorType(pa.int16(), 5)),
]
)
multicol_schema_2 = pa.schema(
[
("col_int", pa.int32()),
("col_fixed_tensor", ArrowTensorType((4, 2), pa.int32())),
("col_var_tensor", ArrowTensorType((9, 4, 1, 0, 5), pa.int16())),
]
)
multicol_schema_3 = pa.schema(
[
("col_int", pa.int32()),
("col_fixed_tensor", ArrowVariableShapedTensorType(pa.int32(), 3)),
("col_var_tensor", ArrowVariableShapedTensorType(pa.int16(), 5)),
]
)
return {
"multicol_schema_1": multicol_schema_1,
"multicol_schema_2": multicol_schema_2,
"multicol_schema_3": multicol_schema_3,
}
@pytest.fixture
def object_concat_blocks():
"""Fixture for object concat test data."""
obj = types.SimpleNamespace(a=1, b="test")
t1 = pa.table({"a": [3, 4], "b": [7, 8]})
t2 = pa.table({"a": ArrowPythonObjectArray.from_objects([obj, obj]), "b": [0, 1]})
return [t1, t2]
@pytest.fixture
def object_concat_expected():
"""Fixture for object concat expected results."""
obj = types.SimpleNamespace(a=1, b="test")
return {
"length": 4,
"a_type": ArrowPythonObjectType,
"b_type": pa.types.is_integer,
"content": {"a": [3, 4, obj, obj], "b": [7, 8, 0, 1]},
}
@pytest.fixture
def struct_variable_shaped_tensor_blocks():
"""Fixture for struct with variable shaped tensor test data."""
# Create variable-shaped tensor data for the first table
tensor_data1 = np.array(
[
np.ones((2, 2), dtype=np.float32),
np.zeros((3, 3), dtype=np.float32),
],
dtype=object,
)
tensor_array1 = ArrowVariableShapedTensorArray.from_numpy(tensor_data1)
# Create struct data with tensor field for the first table
metadata_array1 = pa.array(["row1", "row2"])
struct_array1 = pa.StructArray.from_arrays(
[metadata_array1, tensor_array1], names=["metadata", "tensor"]
)
t1 = pa.table({"id": [1, 2], "struct_with_tensor": struct_array1})
# Create variable-shaped tensor data for the second table
tensor_data2 = np.array(
[
np.ones((1, 4), dtype=np.float32),
np.zeros((2, 1), dtype=np.float32),
],
dtype=object,
)
tensor_array2 = ArrowVariableShapedTensorArray.from_numpy(tensor_data2)
# Create struct data with tensor field for the second table
metadata_array2 = pa.array(["row3", "row4"])
struct_array2 = pa.StructArray.from_arrays(
[metadata_array2, tensor_array2], names=["metadata", "tensor"]
)
t2 = pa.table({"id": [3, 4], "struct_with_tensor": struct_array2})
return [t1, t2]
@pytest.fixture
def struct_variable_shaped_tensor_expected():
"""Fixture for struct with variable shaped tensor expected results."""
return {
"length": 4,
"schema": pa.schema(
[
("id", pa.int64()),
(
"struct_with_tensor",
pa.struct(
[
("metadata", pa.string()),
("tensor", ArrowVariableShapedTensorType(pa.float32(), 2)),
]
),
),
]
),
"content": {"id": [1, 2, 3, 4]},
}
@pytest.fixture
def unify_schemas_object_types_schemas():
"""Fixture for object types unify schemas test data."""
from ray.data._internal.object_extensions.arrow import ArrowPythonObjectType
schema1 = pa.schema([("obj_col", ArrowPythonObjectType())])
schema2 = pa.schema([("obj_col", pa.int32())])
schema3 = pa.schema([("obj_col", pa.float64())])
expected = pa.schema([("obj_col", ArrowPythonObjectType())])
return {
"object_schema": schema1,
"int_schema": schema2,
"float_schema": schema3,
"expected": expected,
}
@pytest.fixture
def unify_schemas_incompatible_tensor_schemas():
"""Fixture for incompatible tensor dtypes unify schemas test data."""
schema1 = pa.schema([("tensor", ArrowTensorType((2, 2), pa.int32()))])
schema2 = pa.schema([("tensor", ArrowTensorType((2, 2), pa.float32()))])
return [schema1, schema2]
@pytest.fixture
def unify_schemas_objects_and_tensors_schemas():
"""Fixture for objects and tensors unify schemas test data."""
from ray.data._internal.object_extensions.arrow import ArrowPythonObjectType
schema1 = pa.schema([("col", ArrowPythonObjectType())])
schema2 = pa.schema([("col", ArrowTensorType((2, 2), pa.int32()))])
return [schema1, schema2]
@pytest.fixture
def unify_schemas_missing_tensor_fields_schemas():
"""Fixture for missing tensor fields unify schemas test data."""
schema1 = pa.schema(
[
(
"struct",
pa.struct(
[
("tensor", ArrowTensorType((2, 2), pa.int32())),
("value", pa.int64()),
]
),
)
]
)
schema2 = pa.schema(
[("struct", pa.struct([("value", pa.int64())]))] # Missing tensor field
)
expected = pa.schema(
[
(
"struct",
pa.struct(
[
("tensor", ArrowTensorType((2, 2), pa.int32())),
("value", pa.int64()),
]
),
)
]
)
return {"with_tensor": schema1, "without_tensor": schema2, "expected": expected}
@pytest.fixture
def unify_schemas_nested_struct_tensors_schemas():
"""Fixture for nested struct tensors unify schemas test data."""
schema1 = pa.schema(
[
(
"outer",
pa.struct(
[
(
"inner",
pa.struct(
[
("tensor", ArrowTensorType((3, 3), pa.float32())),
("data", pa.string()),
]
),
),
("id", pa.int64()),
]
),
)
]
)
schema2 = pa.schema(
[
(
"outer",
pa.struct(
[
(
"inner",
pa.struct([("data", pa.string())]), # Missing tensor field
),
("id", pa.int64()),
]
),
)
]
)
expected = pa.schema(
[
(
"outer",
pa.struct(
[
(
"inner",
pa.struct(
[
(
"tensor",
ArrowTensorType((3, 3), pa.float32()),
),
("data", pa.string()),
]
),
),
("id", pa.int64()),
]
),
)
]
)
return {"with_tensor": schema1, "without_tensor": schema2, "expected": expected}
@pytest.fixture
def object_with_tensor_fails_blocks():
"""Blocks that should fail when concatenating objects with tensors."""
obj = types.SimpleNamespace(a=1, b="test")
t1 = pa.table({"a": ArrowPythonObjectArray.from_objects([obj, obj])})
# Create tensor array with proper extension type
tensor_array = ArrowTensorArray.from_numpy(np.array([[1, 2], [3, 4]]))
t2 = pa.table({"a": tensor_array})
return [t1, t2]
@pytest.fixture
def simple_concat_data():
"""Test data for simple concat operations."""
return {"empty": [], "single_block": pa.table({"a": [1, 2]})}
# Helper function for creating tensor arrays
def _create_tensor_array(data, tensor_type="fixed"):
"""Helper function to create tensor arrays with consistent patterns."""
if tensor_type == "fixed":
return ArrowTensorArray.from_numpy(data)
elif tensor_type == "variable":
return ArrowVariableShapedTensorArray.from_numpy(data)
else:
raise ValueError(f"Unknown tensor type: {tensor_type}")
# Helper function for creating expected results
def _create_expected_result(schema, length, **kwargs):
"""Helper function to create expected result dictionaries."""
result = {"schema": schema, "length": length}
result.update(kwargs)
return result
# Helper function for creating tensor blocks
def _create_tensor_blocks(
tensor_data1,
tensor_data2,
tensor_type1="fixed",
tensor_type2="variable",
id_data1=None,
id_data2=None,
column_name="tensor",
):
"""Helper function to create tensor blocks with consistent patterns."""
if id_data1 is None:
id_data1 = [1, 2]
if id_data2 is None:
id_data2 = [3, 4]
tensor_array1 = _create_tensor_array(tensor_data1, tensor_type1)
tensor_array2 = _create_tensor_array(tensor_data2, tensor_type2)
t1 = pa.table({"id": id_data1, column_name: tensor_array1})
t2 = pa.table({"id": id_data2, column_name: tensor_array2})
return t1, t2
# Helper function for creating struct blocks with tensors
def _create_struct_tensor_blocks(
tensor_data1,
tensor_data2,
tensor_type1="fixed",
tensor_type2="variable",
value_data1=None,
value_data2=None,
extra_data2=None,
struct_name="struct",
id_data1=None,
id_data2=None,
):
"""Helper function to create struct blocks with tensor fields."""
if value_data1 is None:
value_data1 = [1, 2]
if value_data2 is None:
value_data2 = [3, 4]
if id_data1 is None:
id_data1 = [1, 2]
if id_data2 is None:
id_data2 = [3, 4]
tensor_array1 = _create_tensor_array(tensor_data1, tensor_type1)
tensor_array2 = _create_tensor_array(tensor_data2, tensor_type2)
value_array1 = pa.array(value_data1, type=pa.int64())
value_array2 = pa.array(value_data2, type=pa.int64())
if extra_data2 is not None:
extra_array2 = pa.array(extra_data2, type=pa.string())
struct_array1 = pa.StructArray.from_arrays(
[tensor_array1, value_array1], names=["tensor", "value"]
)
struct_array2 = pa.StructArray.from_arrays(
[tensor_array2, value_array2, extra_array2],
names=["tensor", "value", "extra"],
)
else:
struct_array1 = pa.StructArray.from_arrays(
[tensor_array1, value_array1], names=["tensor", "value"]
)
struct_array2 = pa.StructArray.from_arrays(
[tensor_array2, value_array2], names=["tensor", "value"]
)
t1 = pa.table({"id": id_data1, struct_name: struct_array1})
t2 = pa.table({"id": id_data2, struct_name: struct_array2})
return t1, t2
# Helper function for creating expected tensor schemas
def _create_tensor_schema(
tensor_type=ArrowVariableShapedTensorType,
dtype=pa.float32(),
ndim=2,
include_id=True,
struct_name="struct",
include_extra=False,
):
"""Helper function to create expected tensor schemas."""
fields = []
if include_id:
fields.append(("id", pa.int64()))
if struct_name == "struct":
struct_fields = [
("tensor", tensor_type(dtype, ndim)),
("value", pa.int64()),
]
if include_extra:
struct_fields.append(("extra", pa.string()))
fields.append((struct_name, pa.struct(struct_fields)))
else:
fields.append(("tensor", tensor_type(dtype, ndim)))
return pa.schema(fields)
# Helper function for creating basic struct blocks
def _create_basic_struct_blocks(
struct_data1,
struct_data2,
column_name="struct",
id_data1=None,
id_data2=None,
other_columns=None,
):
"""Helper function to create basic struct blocks."""
struct_array1 = pa.array(struct_data1)
struct_array2 = pa.array(struct_data2)
t1_data = {column_name: struct_array1}
t2_data = {column_name: struct_array2}
# Only add id columns if they are provided
if id_data1 is not None:
t1_data["id"] = id_data1
if id_data2 is not None:
t2_data["id"] = id_data2
if other_columns:
t1_data.update(other_columns.get("t1", {}))
t2_data.update(other_columns.get("t2", {}))
t1 = pa.table(t1_data)
t2 = pa.table(t2_data)
return t1, t2
# Helper function for creating struct schemas
def _create_struct_schema(struct_fields, include_id=True, other_fields=None):
"""Helper function to create struct schemas."""
fields = []
if include_id:
fields.append(("id", pa.int64()))
fields.append(("struct", pa.struct(struct_fields)))
if other_fields:
fields.extend(other_fields)
return pa.schema(fields)
# Helper function for creating struct blocks with additional columns
def _create_struct_blocks_with_columns(
struct_data1,
struct_data2,
struct_type1,
struct_type2,
additional_columns1=None,
additional_columns2=None,
struct_column="d",
):
"""Helper function to create struct blocks with additional columns."""
t1_data = {}
t2_data = {}
# Add additional columns first to maintain expected order
if additional_columns1:
t1_data.update(additional_columns1)
if additional_columns2:
t2_data.update(additional_columns2)
# Add struct column
t1_data[struct_column] = pa.array(struct_data1, type=struct_type1)
t2_data[struct_column] = pa.array(struct_data2, type=struct_type2)
t1 = pa.table(t1_data)
t2 = pa.table(t2_data)
return t1, t2
# Helper function for creating expected results for struct tests
def _create_struct_expected_result(schema, length, content):
"""Helper function to create expected results for struct tests."""
return {
"length": length,
"schema": schema,
"content": content,
}
# Helper function for creating struct blocks with simple field patterns
def _create_simple_struct_blocks(
struct_data1,
struct_data2,
field_names,
field_types,
additional_columns1=None,
additional_columns2=None,
struct_column="d",
):
"""Helper function to create struct blocks with simple field patterns."""
struct_type = pa.struct(list(zip(field_names, field_types)))
return _create_struct_blocks_with_columns(
struct_data1,
struct_data2,
struct_type,
struct_type,
additional_columns1,
additional_columns2,
struct_column,
)
# Helper function for creating simple struct schemas
def _create_simple_struct_schema(field_names, field_types, additional_fields=None):
"""Helper function to create simple struct schemas."""
struct_fields = list(zip(field_names, field_types))
fields = []
if additional_fields:
fields.extend(additional_fields)
fields.append(("d", pa.struct(struct_fields)))
return pa.schema(fields)
@pytest.fixture
def unify_schemas_edge_cases_data():
"""Test data for unify schemas edge cases."""
return {
"empty_schemas": [],
"single_schema": pa.schema([("col", pa.int32())]),
"no_common_columns": {
"schema1": pa.schema([("col1", pa.int32())]),
"schema2": pa.schema([("col2", pa.string())]),
"expected": pa.schema([("col1", pa.int32()), ("col2", pa.string())]),
},
"all_null_schemas": {
"schema1": pa.schema([("col", pa.null())]),
"schema2": pa.schema([("col", pa.null())]),
},
}
@pytest.fixture
def unify_schemas_mixed_tensor_data():
"""Test data for mixed tensor types in unify schemas."""
return {
"fixed_shape": pa.schema([("tensor", ArrowTensorType((2, 2), pa.int32()))]),
"variable_shaped": pa.schema(
[("tensor", ArrowVariableShapedTensorType(pa.int32(), 2))]
),
"different_shape": pa.schema([("tensor", ArrowTensorType((3, 3), pa.int32()))]),
"expected_variable": pa.schema(
[("tensor", ArrowVariableShapedTensorType(pa.int32(), 2))]
),
}
@pytest.fixture
def unify_schemas_type_promotion_data():
"""Test data for type promotion scenarios."""
return {
"non_null": pa.schema([pa.field("A", pa.int32())]),
"nullable": pa.schema([pa.field("A", pa.int32(), nullable=True)]),
"int64": pa.schema([pa.field("A", pa.int64())]),
"float64": pa.schema([pa.field("A", pa.float64())]),
}
@pytest.fixture
def block_select_data():
"""Test data for block select operations."""
df = pd.DataFrame({"one": [10, 11, 12], "two": [11, 12, 13], "three": [14, 15, 16]})
table = pa.Table.from_pandas(df)
return {
"table": table,
"df": df,
"single_column": {
"columns": ["two"],
"expected_schema": pa.schema([("two", pa.int64())]),
},
"multiple_columns": {
"columns": ["two", "one"],
"expected_schema": pa.schema([("two", pa.int64()), ("one", pa.int64())]),
},
}
@pytest.fixture
def block_slice_data():
"""Test data for block slice operations."""
n = 20
df = pd.DataFrame(
{"one": list(range(n)), "two": ["a"] * n, "three": [np.nan] + [1.5] * (n - 1)}
)
table = pa.Table.from_pandas(df)
empty_df = pd.DataFrame({"one": []})
empty_table = pa.Table.from_pandas(empty_df)
return {
"normal": {"table": table, "df": df, "slice_params": {"a": 5, "b": 10}},
"empty": {"table": empty_table, "slice_params": {"a": 0, "b": 0}},
}
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/unit/test_transform_pyarrow.py",
"license": "Apache License 2.0",
"lines": 2534,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/_private/tracing_utils.py | import inspect
import os
import threading
from contextvars import ContextVar, Token
from functools import wraps
from typing import Any, Callable, Dict, List, Optional
from ray._common.utils import import_attr
from ray.serve._private.constants import (
DEFAULT_TRACING_EXPORTER_IMPORT_PATH,
RAY_SERVE_TRACING_EXPORTER_IMPORT_PATH,
RAY_SERVE_TRACING_SAMPLING_RATIO,
)
try:
from opentelemetry import trace
from opentelemetry.context import attach, detach, get_current
from opentelemetry.sdk.trace import SpanProcessor, TracerProvider
from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor
from opentelemetry.sdk.trace.sampling import ParentBasedTraceIdRatio
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.trace import SpanKind
from opentelemetry.trace.propagation import set_span_in_context
from opentelemetry.trace.propagation.tracecontext import (
TraceContextTextMapPropagator,
)
from opentelemetry.trace.status import Status, StatusCode
except ImportError:
SpanProcessor = None
ConsoleSpanExporter = None
SimpleSpanProcessor = None
trace = None
SpanKind = None
TracerProvider = None
TraceIdRatioBased = None
Status = None
StatusCode = None
set_span_in_context = None
TraceContextTextMapPropagator = None
get_current = None
attach = None
detach = None
SpanAttributes = None
ParentBasedTraceIdRatio = None
TRACE_STACK: ContextVar[List[Any]] = ContextVar(
"trace_stack"
) # Create tracer once at module level
_tracer = None
_tracer_lock = threading.Lock()
def get_tracer():
global _tracer
if _tracer is None:
with _tracer_lock:
if _tracer is None:
_tracer = trace.get_tracer(__name__)
return _tracer
# Default tracing exporter needs to map to DEFAULT_TRACING_EXPORTER_IMPORT_PATH
# defined in "python/ray/serve/_private/constants.py"
def default_tracing_exporter(tracing_file_name):
from ray.serve._private.logging_utils import get_serve_logs_dir
serve_logs_dir = get_serve_logs_dir()
spans_dir = os.path.join(serve_logs_dir, "spans")
os.makedirs(spans_dir, exist_ok=True)
spans_file = os.path.join(spans_dir, tracing_file_name)
return [SimpleSpanProcessor(ConsoleSpanExporter(out=open(spans_file, "a")))]
class TraceContextManager:
def __init__(
self, trace_name, span_kind=None, trace_context: Optional[Dict[str, str]] = None
):
self.span = None
self.trace_name = trace_name
self.span_kind = span_kind
self.trace_context = trace_context
self.is_tracing_enabled = is_tracing_enabled()
def __enter__(self):
if self.is_tracing_enabled:
self.span_kind = self.span_kind or SpanKind.SERVER
tracer = get_tracer()
ctx = self.trace_context if self.trace_context else get_trace_context()
self.span = tracer.start_span(
self.trace_name,
kind=self.span_kind,
context=ctx,
)
if not self.span.get_span_context().trace_flags.sampled:
return self
new_ctx = set_span_in_context(self.span)
set_trace_context(new_ctx)
_append_trace_stack(self.span)
set_span_name(self.trace_name)
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.is_tracing_enabled and self.span is not None:
# if exc_type is not None, we have made a explicit decision
# to not set the span status to error. This is because
# errors are spans internal to Ray Serve and should not
# be reported as errors in the trace. They cause noise
# in the trace and are not meaningful to the user.
self.span.end()
_pop_trace_stack()
return False
class BatchTraceContextManager:
"""Attach/detach a tracing context around a block to scope the span of a batch."""
def __init__(self, trace_context: Optional[object]):
self._enabled = is_tracing_enabled() and trace_context is not None
self._trace_context = trace_context
self._token: Optional[Token] = None
def __enter__(self):
if self._enabled:
self._token = set_trace_context(self._trace_context)
return self
def __exit__(self, exc_type, exc, tb):
if self._enabled and self._token is not None:
detach_trace_context(self._token)
return False
def tracing_decorator_factory(
trace_name: str, span_kind: Optional[Any] = None
) -> Callable:
"""
Factory function to create a tracing decorator for instrumenting functions/methods
with distributed tracing.
Args:
trace_name: The name of the trace.
span_kind: The kind of span to create
(e.g., SERVER, CLIENT). Defaults to trace.SpanKind.SERVER.
Returns:
Callable: A decorator function that can be used to wrap
functions/methods with distributed tracing.
Example Usage:
```python
@tracing_decorator_factory(
"my_trace",
span_kind=trace.SpanKind.CLIENT,
)
def my_function(obj):
# Function implementation
```
"""
def tracing_decorator(func):
if not is_tracing_enabled():
# if tracing is not enabled, we don't want to wrap the function
# with the tracing decorator.
return func
@wraps(func)
def synchronous_wrapper(*args, **kwargs):
with TraceContextManager(trace_name, span_kind):
result = func(*args, **kwargs)
return result
@wraps(func)
def generator_wrapper(*args, **kwargs):
with TraceContextManager(trace_name, span_kind):
for item in func(*args, **kwargs):
yield item
@wraps(func)
async def asynchronous_wrapper(*args, **kwargs):
with TraceContextManager(trace_name, span_kind):
result = await func(*args, **kwargs)
return result
@wraps(func)
async def asyc_generator_wrapper(*args, **kwargs):
with TraceContextManager(trace_name, span_kind):
async for item in func(*args, **kwargs):
yield item
is_generator = _is_generator_function(func)
is_async = _is_async_function(func)
if is_generator and is_async:
return asyc_generator_wrapper
elif is_async:
return asynchronous_wrapper
elif is_generator:
return generator_wrapper
else:
return synchronous_wrapper
return tracing_decorator
def setup_tracing(
component_name: str,
component_id: str,
component_type: Optional["ServeComponentType"] = None, # noqa: F821
tracing_exporter_import_path: Optional[
str
] = RAY_SERVE_TRACING_EXPORTER_IMPORT_PATH,
tracing_sampling_ratio: Optional[float] = RAY_SERVE_TRACING_SAMPLING_RATIO,
) -> bool:
"""
Set up tracing for a specific Serve component.
Args:
component_name: The name of the component.
component_id: The unique identifier of the component.
component_type: The type of the component.
tracing_exporter_import_path: Path to tracing exporter function.
tracing_sampling_ratio: Sampling ratio for traces (0.0 to 1.0).
Returns:
bool: True if tracing setup is successful, False otherwise.
"""
if tracing_exporter_import_path == "":
return False
# Check dependencies
if not trace:
raise ImportError(
"You must `pip install opentelemetry-api` and "
"`pip install opentelemetry-sdk` "
"to enable tracing on Ray Serve."
)
from ray.serve._private.utils import get_component_file_name
tracing_file_name = get_component_file_name(
component_name=component_name,
component_id=component_id,
component_type=component_type,
suffix="_tracing.json",
)
span_processors = _load_span_processors(
tracing_exporter_import_path, tracing_file_name
)
# Intialize tracing
# Sets the tracer_provider. This is only allowed once~ per execution
# context and will log a warning if attempted multiple times.
# use ParentBasedTraceIdRatio to respect the parent span's sampling decision
# and sample probabilistically based on the tracing_sampling_ratio
sampler = ParentBasedTraceIdRatio(tracing_sampling_ratio)
trace.set_tracer_provider(TracerProvider(sampler=sampler))
for span_processor in span_processors:
trace.get_tracer_provider().add_span_processor(span_processor)
return True
def create_propagated_context() -> Dict[str, str]:
"""Create context that can be used across services and processes.
This function retrieves the current context and converts it
into a dictionary that can be used across actors and tasks since
it is serializable.
Returns:
- Trace Context Propagator (dict or None): A dictionary containing the propagated
trace context if available, otherwise None.
"""
trace_context = get_trace_context()
if trace_context and TraceContextTextMapPropagator:
ctx = {}
TraceContextTextMapPropagator().inject(ctx, trace_context)
return ctx
return None
def extract_propagated_context(
propagated_context: Optional[Dict[str, str]] = None
) -> Optional[Dict[str, str]]:
"""Extract the trace context from a Trace Context Propagator."""
if is_tracing_enabled() and propagated_context and TraceContextTextMapPropagator:
return TraceContextTextMapPropagator().extract(carrier=propagated_context)
return None
def set_trace_context(trace_context: Dict[str, str]) -> Optional[Token]:
"""Set the current trace context."""
if attach is None:
return
return attach(trace_context)
def detach_trace_context(token: Token):
"""Detach the current trace context corresponding to the token."""
if detach is None:
return
detach(token)
def get_trace_context() -> Optional[Dict[str, str]]:
"""Retrieve the current trace context."""
if get_current is None:
return None
context = get_current()
return context if context else None
def set_span_name(name: str):
"""Set the name for the current span in context."""
if TRACE_STACK:
trace_stack = TRACE_STACK.get([])
if trace_stack:
trace_stack[-1].update_name(name)
# this is added specifically for Datadog tracing.
# See https://docs.datadoghq.com/tracing/guide/configuring-primary-operation/#opentracing
set_span_attributes({"resource.name": name})
def set_rpc_span_attributes(
system: str = "grpc",
method: Optional[str] = None,
status_code: Optional[str] = None,
service: Optional[str] = None,
):
"""
Use this function to set attributes for RPC spans.
Only include attributes that are in the OpenTelemetry
RPC span attributes spec https://opentelemetry.io/docs/specs/semconv/attributes-registry/rpc/.
"""
if not is_tracing_enabled():
return
attributes = {
SpanAttributes.RPC_SYSTEM: system,
SpanAttributes.RPC_METHOD: method,
SpanAttributes.RPC_GRPC_STATUS_CODE: status_code,
SpanAttributes.RPC_SERVICE: service,
}
set_span_attributes(attributes)
def set_http_span_attributes(
method: Optional[str] = None,
status_code: Optional[str] = None,
route: Optional[str] = None,
):
"""
Use this function to set attributes for HTTP spans.
Only include attributes that are in the OpenTelemetry
HTTP span attributes spec https://opentelemetry.io/docs/specs/semconv/attributes-registry/http/.
"""
if not is_tracing_enabled():
return
attributes = {
SpanAttributes.HTTP_METHOD: method,
SpanAttributes.HTTP_STATUS_CODE: status_code,
SpanAttributes.HTTP_ROUTE: route,
}
set_span_attributes(attributes)
def set_span_attributes(attributes: Dict[str, Any]):
"""Set attributes for the current span in context."""
if TRACE_STACK:
trace_stack = TRACE_STACK.get([])
if trace_stack:
# filter attribute values that are None, otherwise they
# will show up as warning logs on the console.
attributes = {k: v for k, v in attributes.items() if v is not None}
trace_stack[-1].set_attributes(attributes)
def set_trace_status(is_error: bool, description: str = ""):
"""Set the status for the current span in context."""
trace_stack = TRACE_STACK.get([])
if trace_stack:
if is_error:
status_code = StatusCode.ERROR
else:
status_code = StatusCode.OK
description = None
trace_stack[-1].set_status(
Status(status_code=status_code, description=description)
)
def set_span_exception(exc: Exception, escaped: bool = False):
"""Set the exception for the current span in context."""
trace_stack = TRACE_STACK.get([])
if trace_stack:
trace_stack[-1].record_exception(exc, escaped=escaped)
def is_tracing_enabled() -> bool:
return RAY_SERVE_TRACING_EXPORTER_IMPORT_PATH != "" and trace is not None
def is_span_recording() -> bool:
if TRACE_STACK:
trace_stack = TRACE_STACK.get([])
if trace_stack:
return True
return False
def _append_trace_stack(span):
"""Append span to global trace stack."""
trace_stack = TRACE_STACK.get([])
trace_stack.append(span)
TRACE_STACK.set(trace_stack)
def _pop_trace_stack():
"""Pop span to global trace stack."""
trace_stack = TRACE_STACK.get([])
if trace_stack:
trace_stack.pop()
TRACE_STACK.set(trace_stack)
def _validate_tracing_exporter(func: Callable) -> None:
"""Validate that the custom tracing exporter
is a function that takes no arguments.
"""
if inspect.isfunction(func) is False:
raise TypeError("Tracing exporter must be a function.")
signature = inspect.signature(func)
if len(signature.parameters) != 0:
raise TypeError("Tracing exporter cannot take any arguments.")
def _validate_tracing_exporter_processors(span_processors: List[Any]):
"""Validate that the output of a custom tracing exporter
returns type List[SpanProcessor].
"""
if not isinstance(span_processors, list):
raise TypeError(
"Output of tracing exporter needs to be of type "
f"List[SpanProcessor], but received type {type(span_processors)}."
)
for span_processor in span_processors:
if not isinstance(span_processor, SpanProcessor):
raise TypeError(
"Output of tracing exporter needs to be of "
"type List[SpanProcessor], "
f"but received type {type(span_processor)}."
)
def _load_span_processors(
tracing_exporter_import_path: str,
tracing_file_name: str,
):
"""Load span processors from a custome tracing
exporter function.
"""
tracing_exporter_def = import_attr(tracing_exporter_import_path)
if tracing_exporter_import_path == DEFAULT_TRACING_EXPORTER_IMPORT_PATH:
return tracing_exporter_def(tracing_file_name)
else:
# Validate tracing exporter function
_validate_tracing_exporter(tracing_exporter_def)
# Validate tracing exporter processors
span_processors = tracing_exporter_def()
_validate_tracing_exporter_processors(span_processors)
return span_processors
def _is_generator_function(func):
return inspect.isgeneratorfunction(func) or inspect.isasyncgenfunction(func)
def _is_async_function(func):
return inspect.iscoroutinefunction(func) or inspect.isasyncgenfunction(func)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/_private/tracing_utils.py",
"license": "Apache License 2.0",
"lines": 397,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/serve/tests/test_tracing_utils.py | import json
import os
import re
import shutil
import uuid
from pathlib import Path
from threading import Thread
from typing import Set
from unittest.mock import patch
import grpc
import httpx
import pytest
import starlette
from starlette.requests import Request
from starlette.responses import StreamingResponse
import ray
from ray import serve
from ray._common.test_utils import SignalActor
from ray.serve._private.common import ServeComponentType
from ray.serve._private.constants import (
RAY_SERVE_ENABLE_DIRECT_INGRESS,
RAY_SERVE_ENABLE_HA_PROXY,
)
from ray.serve._private.logging_utils import get_serve_logs_dir
from ray.serve._private.test_utils import get_application_url
from ray.serve._private.tracing_utils import (
DEFAULT_TRACING_EXPORTER_IMPORT_PATH,
TRACE_STACK,
_append_trace_stack,
_load_span_processors,
_validate_tracing_exporter,
_validate_tracing_exporter_processors,
set_trace_status,
setup_tracing,
)
from ray.serve.config import HTTPOptions, gRPCOptions
from ray.serve.generated import serve_pb2, serve_pb2_grpc
from ray.serve.tests.conftest import * # noqa
from ray.serve.utils import get_trace_context
from ray.tests.conftest import * # noqa
try:
from opentelemetry import trace
from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor
from opentelemetry.sdk.trace.sampling import ParentBasedTraceIdRatio
from opentelemetry.trace.propagation.tracecontext import (
TraceContextTextMapPropagator,
)
from opentelemetry.trace.status import StatusCode
except ImportError:
raise ModuleNotFoundError(
"`opentelemetry` or `opentelemetry.sdk.trace.export` not found"
)
CUSTOM_EXPORTER_OUTPUT_FILENAME = "spans.txt"
@pytest.fixture
def use_custom_tracing_exporter():
yield "ray.serve.tests.test_tracing_utils:custom_tracing_exporter"
# Clean up output file produced by custom exporter
if os.path.exists(CUSTOM_EXPORTER_OUTPUT_FILENAME):
os.remove(CUSTOM_EXPORTER_OUTPUT_FILENAME)
@pytest.fixture
def serve_and_ray_shutdown():
serve.shutdown()
ray.shutdown()
yield
serve.shutdown()
class FakeSpan:
def __init__(self):
self.status = None
def set_status(self, status):
self.status = status
def test_disable_tracing_exporter():
"""Test that setting `tracing_exporter_import_path`
to an empty string disables tracing.
"""
is_tracing_setup_successful = setup_tracing(
component_type=ServeComponentType.REPLICA,
component_name="component_name",
component_id="component_id",
tracing_exporter_import_path="",
tracing_sampling_ratio=1.0,
)
assert is_tracing_setup_successful is False
def test_validate_tracing_exporter_with_string():
"""Test exception message for invalid exporter type."""
invalid_exporters = [1, "string", []]
expected_exception = "Tracing exporter must be a function."
for invalid_exporter in invalid_exporters:
with pytest.raises(TypeError, match=expected_exception):
_validate_tracing_exporter(invalid_exporter)
def test_validate_tracing_exporter_with_args():
"""Test exception message for _validate_tracing_exporter.
if exporter contains an argument.
"""
def test_exporter(arg):
return arg
expected_exception = "Tracing exporter cannot take any arguments."
with pytest.raises(TypeError, match=expected_exception):
_validate_tracing_exporter(test_exporter)
def test_validate_tracing_exporter_processors_list():
"""Test exception message for _validate_tracing_exporter.
if exporter returns invalid return type.
"""
invalid_span_processors = [1, "string"]
for invalid_span_processor in invalid_span_processors:
expected_exception = re.escape(
"Output of tracing exporter needs to be of type "
"List[SpanProcessor], but received type "
f"{type(invalid_span_processor)}."
)
with pytest.raises(TypeError, match=expected_exception):
_validate_tracing_exporter_processors(invalid_span_processor)
def test_validate_tracing_exporter_processors_full_output():
"""Test exception message for _validate_tracing_exporter.
if exporter returns invalid return type.
"""
invalid_span_processors = [[1, 2], ["1", "2"]]
for invalid_span_processor in invalid_span_processors:
expected_exception = re.escape(
"Output of tracing exporter needs to be of "
"type List[SpanProcessor], "
f"but received type {type(invalid_span_processor[0])}."
)
with pytest.raises(TypeError, match=expected_exception):
_validate_tracing_exporter_processors(invalid_span_processor)
def test_missing_dependencies():
"""Test that setup_tracing raises an exception if
tracing module is not installed.
"""
expected_exception = (
"You must `pip install opentelemetry-api` and "
"`pip install opentelemetry-sdk` "
"to enable tracing on Ray Serve."
)
with patch("ray.serve._private.tracing_utils.trace", new=None):
with pytest.raises(ImportError, match=expected_exception):
setup_tracing(
component_type=ServeComponentType.REPLICA,
component_name="component_name",
component_id="component_id",
tracing_exporter_import_path=DEFAULT_TRACING_EXPORTER_IMPORT_PATH,
tracing_sampling_ratio=1.0,
)
def test_default_tracing_exporter(ray_start_cluster):
"""Test that the default tracing exporter returns
List[SimpleSpanProcessor].
"""
cluster = ray_start_cluster
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
span_processors = _load_span_processors(
DEFAULT_TRACING_EXPORTER_IMPORT_PATH, "mock_file.json"
)
assert isinstance(span_processors, list)
for span_processor in span_processors:
assert isinstance(span_processor, SimpleSpanProcessor)
def test_custom_tracing_exporter(use_custom_tracing_exporter):
"""Test setup_tracing with a custom tracing exporter."""
custom_tracing_exporter_path = use_custom_tracing_exporter
span_processors = _load_span_processors(
custom_tracing_exporter_path, "mock_file.json"
)
assert isinstance(span_processors, list)
for span_processor in span_processors:
assert isinstance(span_processor, SimpleSpanProcessor)
is_tracing_setup_successful = setup_tracing(
"component_name",
"component_id",
ServeComponentType.REPLICA,
custom_tracing_exporter_path,
tracing_sampling_ratio=1.0,
)
# Validate that tracing is setup successfully
# and the tracing exporter created the output file
assert is_tracing_setup_successful
assert os.path.exists(CUSTOM_EXPORTER_OUTPUT_FILENAME)
def test_tracing_sampler(use_custom_tracing_exporter):
"""Test that tracing sampler is properly configured
through tracing_sampling_ratio argument.
"""
custom_tracing_exporter_path = use_custom_tracing_exporter
tracing_sampling_ratio = 1
is_tracing_setup_successful = setup_tracing(
"component_name",
"component_id",
ServeComponentType.REPLICA,
custom_tracing_exporter_path,
tracing_sampling_ratio,
)
# Validate that tracing is setup successfully
# and the tracing exporter created the output file
assert is_tracing_setup_successful
assert os.path.exists(CUSTOM_EXPORTER_OUTPUT_FILENAME)
tracer = trace.get_tracer(__name__)
tracer_data = tracer.__dict__
assert "sampler" in tracer_data
sampler = tracer_data["sampler"]
sampler_data = sampler.__dict__
# ParentBasedTraceIdRatio sampler contains other samplers as attributes
# The sampling ratio is stored in the underlying samplers
# Check that we have the expected sampler structure
assert "_local_parent_not_sampled" in sampler_data
assert "_local_parent_sampled" in sampler_data
assert "_remote_parent_sampled" in sampler_data
assert "_remote_parent_not_sampled" in sampler_data
assert isinstance(sampler, ParentBasedTraceIdRatio)
@pytest.mark.parametrize(
(
"serve_application",
"expected_proxy_spans_path",
"expected_replica_spans_path",
"expected_upstream_spans_path",
),
[
(
"basic",
"fixtures/basic_proxy_spans.json",
"fixtures/basic_replica_spans.json",
"fixtures/basic_upstream_spans.json",
),
(
"streaming",
"fixtures/streaming_proxy_spans.json",
"fixtures/streaming_replica_spans.json",
"fixtures/streaming_upstream_spans.json",
),
(
"grpc",
"fixtures/grpc_proxy_spans.json",
"fixtures/grpc_replica_spans.json",
"fixtures/grpc_upstream_spans.json",
),
],
)
def test_tracing_e2e(
serve_and_ray_shutdown,
serve_application,
expected_proxy_spans_path,
expected_replica_spans_path,
expected_upstream_spans_path,
):
"""Test tracing e2e."""
signal_actor = SignalActor.remote()
@serve.deployment
class BasicModel:
def __call__(self, req: starlette.requests.Request):
replica_context = serve.get_replica_context()
tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span(
"application_span", context=get_trace_context()
) as span:
span.set_attribute("deployment", replica_context.deployment)
span.set_attribute("replica_id", replica_context.replica_id.unique_id)
return "hello"
def hi_gen_sync():
for i in range(10):
yield f"hello_{i}"
# to avoid coalescing chunks
ray.get(signal_actor.wait.remote())
@serve.deployment
class StreamingModel:
def __call__(self, request: Request) -> StreamingResponse:
gen = hi_gen_sync()
replica_context = serve.get_replica_context()
tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span(
"application_span", context=get_trace_context()
) as span:
span.set_attribute("deployment", replica_context.deployment)
span.set_attribute("replica_id", replica_context.replica_id.unique_id)
return StreamingResponse(gen, media_type="text/plain")
@serve.deployment
class GrpcDeployment:
def __call__(self, user_message):
replica_context = serve.get_replica_context()
tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span(
"application_span", context=get_trace_context()
) as span:
span.set_attribute("deployment", replica_context.deployment)
span.set_attribute("replica_id", replica_context.replica_id.unique_id)
greeting = f"Hello {user_message.name} from {user_message.foo}"
num_x2 = user_message.num * 2
user_response = serve_pb2.UserDefinedResponse(
greeting=greeting,
num_x2=num_x2,
)
return user_response
if serve_application == "basic":
serve.start(
http_options=HTTPOptions(
host="0.0.0.0",
)
)
serve.run(BasicModel.bind())
setup_tracing(
component_name="upstream_app",
component_id="345",
tracing_sampling_ratio=1.0,
)
tracer = trace.get_tracer("test_tracing")
with tracer.start_as_current_span("upstream_app"):
ctx = get_trace_context()
headers = {}
TraceContextTextMapPropagator().inject(headers, ctx)
url = get_application_url("HTTP")
r = httpx.post(f"{url}/", headers=headers)
assert r.text == "hello"
elif serve_application == "streaming":
serve.start(
http_options=HTTPOptions(
host="0.0.0.0",
)
)
serve.run(StreamingModel.bind())
setup_tracing(
component_name="upstream_app",
component_id="345",
tracing_sampling_ratio=1.0,
)
tracer = trace.get_tracer("test_tracing")
with tracer.start_as_current_span("upstream_app"):
ctx = get_trace_context()
headers = {}
TraceContextTextMapPropagator().inject(headers, ctx)
url = get_application_url("HTTP")
with httpx.stream("GET", f"{url}/", headers=headers) as r:
r.raise_for_status()
for i, chunk in enumerate(r.iter_text()):
assert chunk == f"hello_{i}"
ray.get(signal_actor.send.remote())
elif serve_application == "grpc":
# TODO: Remove this once HAProxy supports gRPC
if RAY_SERVE_ENABLE_HA_PROXY:
return
grpc_port = 9000
grpc_servicer_functions = [
"ray.serve.generated.serve_pb2_grpc"
".add_UserDefinedServiceServicer_to_server",
"ray.serve.generated.serve_pb2_grpc.add_FruitServiceServicer_to_server",
]
serve.start(
grpc_options=gRPCOptions(
port=grpc_port,
grpc_servicer_functions=grpc_servicer_functions,
),
)
g = GrpcDeployment.options(name="grpc-deployment").bind()
serve.run(g)
setup_tracing(
component_name="upstream_app",
component_id="345",
tracing_sampling_ratio=1.0,
)
tracer = trace.get_tracer("test_tracing")
with tracer.start_as_current_span("upstream_app"):
ctx = get_trace_context()
headers = {}
TraceContextTextMapPropagator().inject(headers, ctx)
metadata = tuple(headers.items())
channel = grpc.insecure_channel(get_application_url("gRPC"))
stub = serve_pb2_grpc.UserDefinedServiceStub(channel)
request = serve_pb2.UserDefinedMessage(name="foo", num=30, foo="bar")
response, call = stub.__call__.with_call(request=request, metadata=metadata)
assert call.code() == grpc.StatusCode.OK, call.code()
assert response.greeting == "Hello foo from bar", response.greeting
serve.shutdown()
serve_logs_dir = get_serve_logs_dir()
spans_dir = os.path.join(serve_logs_dir, "spans")
files = os.listdir(spans_dir)
if RAY_SERVE_ENABLE_HA_PROXY:
# We don't currently trace HAProxy.
assert len(files) == 2
else:
assert len(files) == 3
replica_filename = None
proxy_filename = None or RAY_SERVE_ENABLE_HA_PROXY
upstream_filename = None
for file in files:
if "replica" in file:
replica_filename = file
elif "proxy" in file:
proxy_filename = file
elif "upstream" in file:
upstream_filename = file
else:
assert False, f"Did not expect tracing file with name {file}"
assert replica_filename and proxy_filename and upstream_filename
upstream_spans = load_spans(os.path.join(spans_dir, upstream_filename))
if not RAY_SERVE_ENABLE_DIRECT_INGRESS:
proxy_spans = load_spans(os.path.join(spans_dir, proxy_filename))
else:
proxy_spans = []
replica_spans = load_spans(os.path.join(spans_dir, replica_filename))
entire_trace = replica_spans + proxy_spans + upstream_spans
validate_span_associations_in_trace(entire_trace)
expected_upstream_spans = load_json_fixture(expected_upstream_spans_path)
if not RAY_SERVE_ENABLE_DIRECT_INGRESS:
expected_proxy_spans = load_json_fixture(expected_proxy_spans_path)
else:
expected_proxy_spans = []
expected_replica_spans = load_json_fixture(expected_replica_spans_path)
sanitize_spans(upstream_spans)
sanitize_spans(proxy_spans)
sanitize_spans(replica_spans)
assert upstream_spans == expected_upstream_spans
assert proxy_spans == expected_proxy_spans
assert replica_spans == expected_replica_spans
shutil.rmtree(spans_dir)
@pytest.mark.parametrize(
"protocol,expected_status_code,expected_span_status",
[
("http", 500, StatusCode.ERROR),
("streaming", 500, StatusCode.ERROR),
("grpc", grpc.StatusCode.INTERNAL.name, StatusCode.ERROR),
],
)
def test_tracing_e2e_with_errors(
serve_and_ray_shutdown, protocol, expected_status_code, expected_span_status
):
"""Test tracing with error responses across HTTP, streaming, and gRPC protocols."""
@serve.deployment
class HttpErrorModel:
def __call__(self, request: starlette.requests.Request):
replica_context = serve.get_replica_context()
tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span(
"application_span", context=get_trace_context()
) as span:
span.set_attribute("deployment", replica_context.deployment)
span.set_attribute("replica_id", replica_context.replica_id.unique_id)
raise RuntimeError("Internal server error")
@serve.deployment
class StreamingErrorModel:
def __call__(self, request: Request) -> StreamingResponse:
replica_context = serve.get_replica_context()
tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span(
"application_span", context=get_trace_context()
) as span:
span.set_attribute("deployment", replica_context.deployment)
span.set_attribute("replica_id", replica_context.replica_id.unique_id)
def error_generator():
raise RuntimeError("Streaming error occurred")
return StreamingResponse(error_generator(), media_type="text/plain")
@serve.deployment
class GrpcErrorModel:
def __call__(self, user_message):
replica_context = serve.get_replica_context()
tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span(
"application_span", context=get_trace_context()
) as span:
span.set_attribute("deployment", replica_context.deployment)
span.set_attribute("replica_id", replica_context.replica_id.unique_id)
# Raise error
raise RuntimeError("gRPC error occurred")
# Setup based on protocol
if protocol == "http":
serve.start(
http_options=HTTPOptions(
host="0.0.0.0",
)
)
serve.run(HttpErrorModel.bind())
setup_tracing(
component_name="upstream_app",
component_id="345",
tracing_sampling_ratio=1.0,
)
tracer = trace.get_tracer("test_tracing")
with tracer.start_as_current_span("upstream_app"):
ctx = get_trace_context()
headers = {}
TraceContextTextMapPropagator().inject(headers, ctx)
# Make HTTP request
url = get_application_url("HTTP")
response = httpx.post(f"{url}/", headers=headers)
assert response.status_code == expected_status_code
elif protocol == "streaming":
serve.start(
http_options=HTTPOptions(
host="0.0.0.0",
)
)
serve.run(StreamingErrorModel.bind())
setup_tracing(
component_name="upstream_app",
component_id="345",
tracing_sampling_ratio=1.0,
)
tracer = trace.get_tracer("test_tracing")
with tracer.start_as_current_span("upstream_app"):
ctx = get_trace_context()
headers = {}
TraceContextTextMapPropagator().inject(headers, ctx)
url = get_application_url("HTTP")
with httpx.stream("GET", f"{url}/", headers=headers) as r:
assert r.status_code == expected_status_code
elif protocol == "grpc":
# TODO: Remove this once HAProxy supports gRPC
if RAY_SERVE_ENABLE_HA_PROXY:
return
grpc_port = 9000
grpc_servicer_functions = [
"ray.serve.generated.serve_pb2_grpc.add_UserDefinedServiceServicer_to_server",
]
serve.start(
grpc_options=gRPCOptions(
port=grpc_port,
grpc_servicer_functions=grpc_servicer_functions,
),
)
serve.run(GrpcErrorModel.options(name="grpc-error-model").bind())
setup_tracing(
component_name="upstream_app",
component_id="345",
tracing_sampling_ratio=1.0,
)
tracer = trace.get_tracer("test_tracing")
with tracer.start_as_current_span("upstream_app"):
ctx = get_trace_context()
headers = {}
TraceContextTextMapPropagator().inject(headers, ctx)
channel = grpc.insecure_channel("localhost:9000")
stub = serve_pb2_grpc.UserDefinedServiceStub(channel)
request = serve_pb2.UserDefinedMessage(name="test", num=10, foo="bar")
with pytest.raises(grpc.RpcError) as exception_info:
_ = stub.__call__(request=request)
rpc_error = exception_info.value
print(rpc_error)
assert rpc_error.code().name == expected_status_code
else:
assert False, "Invalid protocol"
serve.shutdown()
# Verify the trace data
serve_logs_dir = get_serve_logs_dir()
spans_dir = os.path.join(serve_logs_dir, "spans")
files = os.listdir(spans_dir)
if RAY_SERVE_ENABLE_HA_PROXY:
# We don't currently trace HAProxy.
assert len(files) == 2
else:
assert len(files) == 3 # proxy, replica, and upstream spans
replica_filename = None
proxy_filename = None or RAY_SERVE_ENABLE_HA_PROXY
upstream_filename = None
for file in files:
if "replica" in file:
replica_filename = file
elif "proxy" in file:
proxy_filename = file
elif "upstream" in file:
upstream_filename = file
else:
assert False, f"Did not expect tracing file with name {file}"
assert replica_filename and proxy_filename and upstream_filename
# Load and check spans
if not RAY_SERVE_ENABLE_DIRECT_INGRESS:
proxy_spans = load_spans(os.path.join(spans_dir, proxy_filename))
else:
proxy_spans = []
replica_spans = load_spans(os.path.join(spans_dir, replica_filename))
# Verify error status in spans
for span in replica_spans:
if "application_span" in span["name"]:
assert span["status"]["status_code"] == expected_span_status.name
# Check for error attributes based on protocol and error type
if protocol == "http":
assert "Internal server error" in str(span.get("events", []))
elif protocol == "streaming":
assert "Streaming error occurred" in str(span.get("events", []))
elif protocol == "grpc":
assert "gRPC error occurred" in str(span.get("events", []))
else:
assert False, "Invalid protocol"
# Verify status code in proxy spans
for span in proxy_spans:
if protocol == "http" or protocol == "streaming":
if "proxy_http_request" in span["name"]:
assert (
span["attributes"].get("http.status_code") == expected_status_code
)
assert span["status"]["status_code"] == "ERROR"
elif "route_to_replica" in span["name"]:
pass
else:
raise Exception("Invalid proxy span")
elif protocol == "grpc":
if "proxy_grpc_request" in span["name"]:
assert (
span["attributes"].get("rpc.grpc.status_code")
== expected_status_code
)
assert span["status"]["status_code"] == "ERROR"
elif "route_to_replica" in span["name"]:
pass
else:
assert False, "Invalid proxy span"
else:
assert False, "Invalid protocol"
# Clean up
shutil.rmtree(spans_dir)
def custom_tracing_exporter():
"""Custom tracing exporter used for testing."""
return [
SimpleSpanProcessor(
ConsoleSpanExporter(out=open(CUSTOM_EXPORTER_OUTPUT_FILENAME, "a"))
)
]
def load_json_fixture(file_path):
"""Load json from a fixture."""
with Path(__file__).parent.joinpath(file_path).open() as f:
return json.load(f)
def load_spans(file_path):
"""Load and parse spans from a `.span` file.
This requires special handling because ConsoleSpanExporter
does not write proper JSON since the data is streamed.
"""
with open(file_path, "r") as file:
file_contents = file.read()
raw_spans = file_contents.split("}\n{")
spans = []
for i, raw_span in enumerate(raw_spans):
if len(raw_spans) > 1:
if i == 0:
raw_span = raw_span + "}"
elif i == (len(raw_spans) - 1):
raw_span = "{" + raw_span
elif i != 0:
raw_span = "{" + raw_span + "}"
span_dict = json.loads(raw_span)
spans.append(span_dict)
spans.sort(reverse=True, key=lambda x: x["start_time"])
return spans
def sanitize_spans(spans):
"""Remove span attributes with ephemeral data."""
for span in spans:
del span["context"]
del span["parent_id"]
del span["start_time"]
del span["end_time"]
del span["resource"]
for k, _ in span["attributes"].items():
if "_id" in k:
span["attributes"][k] = ""
def validate_span_associations_in_trace(spans):
"""Validate that all spans in a
trace are correctly associated with each
other through the parent_id relationship.
"""
if len(spans) <= 1:
return
class Span:
def __init__(self, _span_id: str, _parent_id: str, _name: str):
self.span_id = _span_id
self.parent_id = _parent_id
self.name = _name
def __repr__(self):
return f"Span(span_id={self.span_id}, parent_id={self.parent_id}, name={self.name})"
span_nodes = {}
starting_span = None
for span in spans:
span_id = span["context"]["span_id"].lstrip("0x")
parent_id = span["parent_id"].lstrip("0x") if span["parent_id"] else None
name = span["name"]
new_span = Span(span_id, parent_id, name)
span_nodes[span_id] = new_span
# The starting span is always the application span.
if name == "application_span":
assert starting_span is None, "Multiple starting spans found in trace"
starting_span = new_span
current_span = starting_span
span_nodes.pop(current_span.span_id)
while current_span:
current_span = span_nodes.pop(current_span.parent_id, None)
# All spans should have been visited.
assert not span_nodes
def test_set_trace_status_empty_stack():
"""test calling set_trace_status with an empty trace stack.
When there is nothing in the trace stack, calling set_trace_status does nothing
and does not error.
"""
set_trace_status(is_error=True, description="error")
trace_stack = TRACE_STACK.get([])
assert trace_stack == []
def test_set_trace_status_error():
"""test calling set_trace_status with error status.
When there is a trace stack, calling set_trace_status with error updates
the correct status.
"""
error_message = "error"
TRACE_STACK.set([FakeSpan()])
set_trace_status(is_error=True, description=error_message)
trace_stack = TRACE_STACK.get([])
assert len(trace_stack) == 1
assert trace_stack[0].status.status_code == StatusCode.ERROR
assert trace_stack[0].status.description == error_message
def test_set_trace_status_ok(caplog):
"""test calling set_trace_status with ok status.
When there is a trace stack, calling set_trace_status with ok updates
the correct status.
"""
ok_message = "ok"
TRACE_STACK.set([FakeSpan()])
set_trace_status(is_error=False, description=ok_message)
trace_stack = TRACE_STACK.get([])
assert len(trace_stack) == 1
assert trace_stack[0].status.status_code == StatusCode.OK
# Note: when the status is OK, the description is not set.
assert trace_stack[0].status.description is None
# Ensure we don't attempt to set the description when the status is OK.
assert (
"description should only be set when status_code is set to StatusCode.ERROR"
not in caplog.text
)
def test_append_trace_stack_multithread():
"""test calling _append_trace_stack in multiple threads.
When multiple threads call _append_trace_stack, TRACE_STACK should only be updated
to contain the task name of the thread that called _append_trace_stack.
"""
number_of_tasks = 10
passing = set()
def try_append_trace_stack(_task_name: str, _passing: Set[str]):
"""
Helper to call `_append_trace_stack()` in a thread. It checks TRACE_STACK in
the current thread starts out empty and the correct task name is added to it
after `_append_trace_stack()` is called. If both checks out, the task name will
be added to the _passing.
"""
trace_stack_before = TRACE_STACK.get([])
assert trace_stack_before == []
_append_trace_stack(_task_name)
trace_stack_after = TRACE_STACK.get()
assert trace_stack_after == [_task_name]
passing.add(_task_name)
tasks = []
for i in range(number_of_tasks):
t = Thread(target=try_append_trace_stack, args=(f"task{i}", passing))
t.start()
tasks.append(t)
for task in tasks:
task.join()
assert len(passing) == number_of_tasks
def test_batched_span_attached_to_first_request_trace():
"""Ensure span created inside a batched method attaches to the first request's trace.
With 6 requests and max_batch_size=3, Serve should create 2 batches. Only the first request
in each batch should contribute the parent trace to the 'batched_span', yielding exactly
2 spans whose trace_ids match the traces of those first requests.
"""
@serve.deployment
class BatchedDeployment:
def __init__(self):
# We keep a counter so we can assert two unique batches producing two unique spans
self._batch_idx = 0
@serve.batch(
max_batch_size=3, batch_wait_timeout_s=1.0, max_concurrent_batches=2
)
async def handle_batch(self, reqs):
tracer = trace.get_tracer(__name__)
# whichever request is at index 0 is the one whose context is attached
first_req_id = None
try:
first_req_id = reqs[0].headers.get("req-id")
except Exception:
pass
batch_idx = self._batch_idx
self._batch_idx += 1
with tracer.start_as_current_span(
"batched_span", context=get_trace_context()
) as span:
if first_req_id is not None:
span.set_attribute("first-req-id", first_req_id)
span.set_attribute("batch-idx", batch_idx)
return ["ok" for _ in reqs]
async def __call__(self, request: Request):
return await self.handle_batch(request)
serve.start(http_options=HTTPOptions(host="0.0.0.0"))
serve.run(BatchedDeployment.bind())
setup_tracing(
component_name="upstream_app",
component_id="batching_test_upstream_multi",
tracing_sampling_ratio=1.0,
)
tracer = trace.get_tracer("test_tracing_batching_multi")
def do_request(span_name: str):
req_id = str(uuid.uuid4())
with tracer.start_as_current_span(span_name) as span:
# tag the upstream span so we can map req-id -> trace_id later
span.set_attribute("req-id", req_id)
# build headers from current OTEL context
ctx = get_trace_context()
headers = {"req-id": req_id}
TraceContextTextMapPropagator().inject(headers, ctx)
url = get_application_url("HTTP")
r = httpx.post(f"{url}/", headers=headers)
r.raise_for_status()
# Launch 6 requests -> expect 2 batches of 3 requests each
threads = [Thread(target=do_request, args=(f"upstream_{i}",)) for i in range(6)]
for t in threads:
t.start()
for t in threads:
t.join()
serve.shutdown()
# Load span files
serve_logs_dir = get_serve_logs_dir()
spans_dir = os.path.join(serve_logs_dir, "spans")
files = os.listdir(spans_dir)
assert files, "No span files found. Tracing may not be configured."
replica_filename = None
upstream_filename = None
for file in files:
if "replica" in file:
replica_filename = file
elif "upstream" in file:
upstream_filename = file
assert replica_filename and upstream_filename
upstream_spans = load_spans(os.path.join(spans_dir, upstream_filename))
replica_spans = load_spans(os.path.join(spans_dir, replica_filename))
id_to_trace = {}
for s in upstream_spans:
rid = s.get("attributes", {}).get("req-id")
if rid:
id_to_trace[rid] = s["context"]["trace_id"]
assert (
len(id_to_trace) == 6
), f"Expected 6 upstream request spans, saw {len(id_to_trace)}"
# Exactly two batch spans, one per each batch, are expected
batched_spans = [s for s in replica_spans if s["name"] == "batched_span"]
assert (
len(batched_spans) == 2
), f"Expected 2 batched spans, saw {len(batched_spans)}"
# For each batched span, its trace_id should equal the trace of the first request in the batch
batched_trace_ids = set()
batch_indices = set()
for bs in batched_spans:
first_req_id = bs.get("attributes", {}).get("first-req-id")
assert (
first_req_id in id_to_trace
), f"first-req-id {first_req_id} not found among upstream req-ids"
assert bs["context"]["trace_id"] == id_to_trace[first_req_id]
batched_trace_ids.add(bs["context"]["trace_id"])
batch_indices.add(bs.get("attributes", {}).get("batch-idx"))
# The two batched spans must correspond to two unique traces
assert (
len(batched_trace_ids) == 2
), "Batched spans did not map to two unique upstream traces"
# And they must be from distinct batches
assert (
len(batch_indices) == 2
), f"Expected two distinct batch indices, got {batch_indices}"
shutil.rmtree(spans_dir)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_tracing_utils.py",
"license": "Apache License 2.0",
"lines": 855,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/preprocessors/test_backwards_compatibility.py | """
Backwards compatibility tests for Preprocessor private field renaming.
These tests verify that preprocessors pickled with old public field names
(e.g., 'columns') can be deserialized correctly after fields were renamed
to private (e.g., '_columns').
The __setstate__ method in each preprocessor handles migration automatically.
"""
import numpy as np
import pandas as pd
import pytest
import ray
from ray.data.preprocessors import (
Categorizer,
Chain,
Concatenator,
CountVectorizer,
CustomKBinsDiscretizer,
FeatureHasher,
HashingVectorizer,
LabelEncoder,
MaxAbsScaler,
MinMaxScaler,
MultiHotEncoder,
Normalizer,
OneHotEncoder,
OrdinalEncoder,
PowerTransformer,
RobustScaler,
SimpleImputer,
StandardScaler,
Tokenizer,
TorchVisionPreprocessor,
UniformKBinsDiscretizer,
)
# =============================================================================
# Field Migration Tests
# =============================================================================
@pytest.mark.parametrize(
"preprocessor_class,old_state,expected_attrs",
[
(
Concatenator,
{
"columns": ["A", "B"],
"output_column_name": "concat",
"dtype": np.float32,
"raise_if_missing": True,
"flatten": True,
},
{
"columns": ["A", "B"],
"output_column_name": "concat",
"dtype": np.float32,
"raise_if_missing": True,
"flatten": True,
},
),
(
Normalizer,
{
"columns": ["A", "B"],
"norm": "l1",
"output_columns": ["A_norm", "B_norm"],
},
{
"columns": ["A", "B"],
"norm": "l1",
"output_columns": ["A_norm", "B_norm"],
},
),
(
Tokenizer,
{
"columns": ["text"],
"tokenization_fn": lambda s: s.split(),
"output_columns": ["text_tokens"],
},
{
"columns": ["text"],
"tokenization_fn": "callable", # Special marker
"output_columns": ["text_tokens"],
},
),
(
PowerTransformer,
{
"columns": ["A", "B"],
"power": 3,
"method": "box-cox",
"output_columns": ["A_pow", "B_pow"],
},
{
"columns": ["A", "B"],
"power": 3,
"method": "box-cox",
"output_columns": ["A_pow", "B_pow"],
},
),
(
HashingVectorizer,
{
"columns": ["text"],
"num_features": 200,
"tokenization_fn": lambda s: s.split(),
"output_columns": ["text_vec"],
},
{
"columns": ["text"],
"num_features": 200,
"tokenization_fn": "callable",
"output_columns": ["text_vec"],
},
),
(
CountVectorizer,
{
"columns": ["text"],
"tokenization_fn": lambda s: s.split(),
"max_features": 100,
"output_columns": ["text_count"],
},
{
"columns": ["text"],
"tokenization_fn": "callable",
"max_features": 100,
"output_columns": ["text_count"],
},
),
(
FeatureHasher,
{"columns": ["A", "B"], "num_features": 20, "output_column": "features"},
{"columns": ["A", "B"], "num_features": 20, "output_column": "features"},
),
(
OrdinalEncoder,
{
"columns": ["color"],
"output_columns": ["color_encoded"],
"encode_lists": False,
},
{
"columns": ["color"],
"output_columns": ["color_encoded"],
"encode_lists": False,
},
),
(
OneHotEncoder,
{
"columns": ["color"],
"output_columns": ["color_encoded"],
"max_categories": {"color": 5},
},
{
"columns": ["color"],
"output_columns": ["color_encoded"],
"max_categories": {"color": 5},
},
),
(
MultiHotEncoder,
{
"columns": ["tags"],
"output_columns": ["tags_encoded"],
"max_categories": {},
},
{"columns": ["tags"], "output_columns": ["tags_encoded"]},
),
(
LabelEncoder,
{"label_column": "label", "output_column": "label_id"},
{"label_column": "label", "output_column": "label_id"},
),
(
Categorizer,
{"columns": ["sex"], "output_columns": ["sex_cat"], "dtypes": {}},
{"columns": ["sex"], "output_columns": ["sex_cat"]},
),
(
StandardScaler,
{"columns": ["A", "B"], "output_columns": ["A_scaled", "B_scaled"]},
{"columns": ["A", "B"], "output_columns": ["A_scaled", "B_scaled"]},
),
(
MinMaxScaler,
{"columns": ["A"], "output_columns": ["A_scaled"]},
{"columns": ["A"], "output_columns": ["A_scaled"]},
),
(
MaxAbsScaler,
{"columns": ["A"], "output_columns": ["A_scaled"]},
{"columns": ["A"], "output_columns": ["A_scaled"]},
),
(
RobustScaler,
{
"columns": ["A"],
"output_columns": ["A_scaled"],
"quantile_range": (0.1, 0.9),
"quantile_precision": 1000,
},
{
"columns": ["A"],
"output_columns": ["A_scaled"],
"quantile_range": (0.1, 0.9),
"quantile_precision": 1000,
},
),
(
SimpleImputer,
{
"columns": ["A", "B"],
"output_columns": ["A_imputed", "B_imputed"],
"strategy": "median",
"fill_value": 99.0,
},
{
"columns": ["A", "B"],
"output_columns": ["A_imputed", "B_imputed"],
"strategy": "median",
"fill_value": 99.0,
},
),
(
CustomKBinsDiscretizer,
{
"columns": ["A", "B"],
"bins": [0, 1, 2, 3],
"right": False,
"include_lowest": True,
"duplicates": "drop",
"dtypes": None,
"output_columns": ["A_binned", "B_binned"],
},
{
"columns": ["A", "B"],
"bins": [0, 1, 2, 3],
"right": False,
"include_lowest": True,
"duplicates": "drop",
"dtypes": None,
"output_columns": ["A_binned", "B_binned"],
},
),
(
UniformKBinsDiscretizer,
{
"columns": ["A", "B"],
"bins": 4,
"right": False,
"include_lowest": True,
"duplicates": "drop",
"dtypes": None,
"output_columns": ["A_binned", "B_binned"],
},
{
"columns": ["A", "B"],
"bins": 4,
"right": False,
"include_lowest": True,
"duplicates": "drop",
"dtypes": None,
"output_columns": ["A_binned", "B_binned"],
},
),
],
ids=lambda x: x.__name__ if hasattr(x, "__name__") else str(x)[:20],
)
def test_field_migration_from_old_public_names(
preprocessor_class, old_state, expected_attrs
):
"""Verify old public field names are migrated to new private fields."""
preprocessor = preprocessor_class.__new__(preprocessor_class)
preprocessor.__setstate__(old_state)
for attr_name, expected_value in expected_attrs.items():
actual_value = getattr(preprocessor, attr_name)
if expected_value == "callable":
assert callable(actual_value), f"{attr_name} should be callable"
else:
assert actual_value == expected_value, f"Mismatch in {attr_name}"
@pytest.mark.parametrize(
"preprocessor_class,minimal_state,expected_defaults",
[
# Callable default: tokenization_fn must be stored as the function itself,
# not called. This would have failed with the old callable() check.
(
Tokenizer,
{"columns": ["text"]},
{"tokenization_fn": "callable", "output_columns": ["text"]},
),
(
HashingVectorizer,
{"columns": ["text"], "num_features": 100},
{"tokenization_fn": "callable", "output_columns": ["text"]},
),
(
CountVectorizer,
{"columns": ["text"]},
{"tokenization_fn": "callable", "output_columns": ["text"]},
),
# _Computed default: output_columns derives from _columns.
(
StandardScaler,
{"columns": ["A", "B"]},
{"output_columns": ["A", "B"]},
),
# _Computed default deriving from a different source field.
(
LabelEncoder,
{"label_column": "label"},
{"output_column": "label"},
),
# Plain value default alongside a _Computed default.
(
Normalizer,
{"columns": ["A", "B"]},
{"norm": "l2", "output_columns": ["A", "B"]},
),
],
ids=[
"Tokenizer",
"HashingVectorizer",
"CountVectorizer",
"StandardScaler",
"LabelEncoder",
"Normalizer",
],
)
def test_missing_optional_fields_use_defaults(
preprocessor_class, minimal_state, expected_defaults
):
"""
Verify that absent optional fields are filled with their correct defaults.
This exercises the default-fallback branch of migrate_private_fields. The minimal_state
deliberately omits optional fields to force the default path.
"""
preprocessor = preprocessor_class.__new__(preprocessor_class)
preprocessor.__setstate__(minimal_state)
for attr_name, expected_value in expected_defaults.items():
actual_value = getattr(preprocessor, attr_name)
if expected_value == "callable":
assert callable(
actual_value
), f"{attr_name} should be a stored callable, not the result of calling it"
else:
assert (
actual_value == expected_value
), f"Mismatch in {attr_name}: {actual_value!r} != {expected_value!r}"
def test_torchvision_preprocessor_field_migration():
try:
from torchvision import transforms
except ImportError:
pytest.skip("torchvision not installed")
transform = transforms.Lambda(lambda x: x)
preprocessor = TorchVisionPreprocessor.__new__(TorchVisionPreprocessor)
state = {
"columns": ["image"],
"output_columns": ["image_out"],
"torchvision_transform": transform,
"batched": True,
}
preprocessor.__setstate__(state)
assert preprocessor.columns == ["image"]
assert preprocessor.output_columns == ["image_out"]
assert preprocessor.torchvision_transform == transform
assert preprocessor.batched is True
def test_chain_field_migration():
scaler1 = StandardScaler(columns=["A"])
scaler2 = StandardScaler(columns=["B"])
chain = Chain.__new__(Chain)
state = {"preprocessors": (scaler1, scaler2)}
chain.__setstate__(state)
assert len(chain.preprocessors) == 2
assert chain.preprocessors[0] == scaler1
assert chain.preprocessors[1] == scaler2
# =============================================================================
# Functional Test Helpers
# =============================================================================
def _simulate_old_format_deserialization(preprocessor, field_mapping):
"""Simulate deserialization from old format by renaming private->public fields."""
state = preprocessor.__dict__.copy()
for public_name, private_name in field_mapping.items():
if private_name in state:
state[public_name] = state.pop(private_name)
new_preprocessor = preprocessor.__class__.__new__(preprocessor.__class__)
new_preprocessor.__setstate__(state)
return new_preprocessor
def _test_functional_backwards_compat(preprocessor, test_ds, field_mapping):
"""Generic functional test: verify deserialized preprocessor produces same output."""
expected_result = preprocessor.transform(test_ds).to_pandas()
new_preprocessor = _simulate_old_format_deserialization(preprocessor, field_mapping)
result = new_preprocessor.transform(test_ds).to_pandas()
pd.testing.assert_frame_equal(result, expected_result)
# =============================================================================
# Functional Tests - Simple Preprocessors (No Fitting Required)
# =============================================================================
@pytest.mark.parametrize(
"setup_func,field_mapping",
[
(
lambda: (
Concatenator(columns=["A", "B"], output_column_name="C"),
pd.DataFrame({"A": [1, 2], "B": [3, 4]}),
{
"columns": "_columns",
"output_column_name": "_output_column_name",
"dtype": "_dtype",
"raise_if_missing": "_raise_if_missing",
"flatten": "_flatten",
},
),
None,
),
(
lambda: (
Normalizer(columns=["A", "B"], norm="l2"),
pd.DataFrame({"A": [1.0, 2.0], "B": [3.0, 4.0]}),
{
"columns": "_columns",
"norm": "_norm",
"output_columns": "_output_columns",
},
),
None,
),
(
lambda: (
Tokenizer(columns=["text"]),
pd.DataFrame({"text": ["hello world", "foo bar"]}),
{
"columns": "_columns",
"tokenization_fn": "_tokenization_fn",
"output_columns": "_output_columns",
},
),
None,
),
(
lambda: (
PowerTransformer(columns=["A", "B"], power=2),
pd.DataFrame({"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]}),
{
"columns": "_columns",
"power": "_power",
"method": "_method",
"output_columns": "_output_columns",
},
),
None,
),
(
lambda: (
HashingVectorizer(columns=["text"], num_features=10),
pd.DataFrame({"text": ["hello world", "foo bar"]}),
{
"columns": "_columns",
"num_features": "_num_features",
"tokenization_fn": "_tokenization_fn",
"output_columns": "_output_columns",
},
),
None,
),
(
lambda: (
FeatureHasher(
columns=["token_a", "token_b"],
num_features=5,
output_column="hashed",
),
pd.DataFrame({"token_a": [1, 2], "token_b": [3, 4]}),
{
"columns": "_columns",
"num_features": "_num_features",
"output_column": "_output_column",
},
),
None,
),
(
lambda: (
CustomKBinsDiscretizer(
columns=["A", "B"],
bins=[0, 1, 2, 3, 4],
output_columns=["A_binned", "B_binned"],
),
pd.DataFrame({"A": [0.5, 1.5, 2.5, 3.5], "B": [0.2, 1.2, 2.2, 3.2]}),
{
"columns": "_columns",
"bins": "_bins",
"right": "_right",
"include_lowest": "_include_lowest",
"duplicates": "_duplicates",
"dtypes": "_dtypes",
"output_columns": "_output_columns",
},
),
None,
),
],
ids=[
"Concatenator",
"Normalizer",
"Tokenizer",
"PowerTransformer",
"HashingVectorizer",
"FeatureHasher",
"CustomKBinsDiscretizer",
],
)
def test_simple_functional_backwards_compat(setup_func, field_mapping):
"""Verify preprocessors that don't need fitting work after deserialization."""
preprocessor, test_data, field_mapping = setup_func()
test_ds = ray.data.from_pandas(test_data)
_test_functional_backwards_compat(preprocessor, test_ds, field_mapping)
# =============================================================================
# Functional Tests - Stateful Preprocessors (Require Fitting)
# =============================================================================
@pytest.mark.parametrize(
"setup_func",
[
lambda: (
OrdinalEncoder(columns=["color"]),
pd.DataFrame({"color": ["red", "green", "blue", "red", "green"]}),
{
"columns": "_columns",
"output_columns": "_output_columns",
"encode_lists": "_encode_lists",
},
),
lambda: (
OneHotEncoder(columns=["color"]),
pd.DataFrame({"color": ["red", "green", "blue", "red", "green", "blue"]}),
{
"columns": "_columns",
"output_columns": "_output_columns",
"max_categories": "_max_categories",
},
),
lambda: (
LabelEncoder(label_column="label"),
pd.DataFrame(
{
"feature": [1.0, 2.0, 3.0, 4.0],
"label": ["cat", "dog", "cat", "bird"],
}
),
{"label_column": "_label_column", "output_column": "_output_column"},
),
lambda: (
StandardScaler(columns=["A", "B"]),
pd.DataFrame(
{"A": [1.0, 2.0, 3.0, 4.0, 5.0], "B": [10.0, 20.0, 30.0, 40.0, 50.0]}
),
{"columns": "_columns", "output_columns": "_output_columns"},
),
lambda: (
MinMaxScaler(columns=["A", "B"]),
pd.DataFrame(
{"A": [1.0, 2.0, 3.0, 4.0, 5.0], "B": [10.0, 20.0, 30.0, 40.0, 50.0]}
),
{"columns": "_columns", "output_columns": "_output_columns"},
),
lambda: (
RobustScaler(columns=["A"]),
pd.DataFrame({"A": [1.0, 2.0, 3.0, 4.0, 5.0, 100.0]}),
{
"columns": "_columns",
"output_columns": "_output_columns",
"quantile_range": "_quantile_range",
"quantile_precision": "_quantile_precision",
},
),
lambda: (
SimpleImputer(columns=["A", "B"], strategy="mean"),
pd.DataFrame(
{"A": [1.0, 2.0, None, 4.0, 5.0], "B": [10.0, None, 30.0, 40.0, 50.0]}
),
{
"columns": "_columns",
"output_columns": "_output_columns",
"strategy": "_strategy",
"fill_value": "_fill_value",
},
),
lambda: (
CountVectorizer(columns=["text"]),
pd.DataFrame({"text": ["hello world", "foo bar", "hello foo"]}),
{
"columns": "_columns",
"tokenization_fn": "_tokenization_fn",
"max_features": "_max_features",
"output_columns": "_output_columns",
},
),
lambda: (
UniformKBinsDiscretizer(
columns=["A", "B"], bins=3, output_columns=["A_binned", "B_binned"]
),
pd.DataFrame(
{"A": [1.0, 2.0, 3.0, 4.0, 5.0], "B": [10.0, 20.0, 30.0, 40.0, 50.0]}
),
{
"columns": "_columns",
"bins": "_bins",
"right": "_right",
"include_lowest": "_include_lowest",
"duplicates": "_duplicates",
"dtypes": "_dtypes",
"output_columns": "_output_columns",
},
),
lambda: (
MultiHotEncoder(columns=["genre"]),
pd.DataFrame(
{
"genre": [
["comedy", "action"],
["drama", "action"],
["comedy", "drama"],
]
}
),
{
"columns": "_columns",
"output_columns": "_output_columns",
"max_categories": "_max_categories",
},
),
lambda: (
MaxAbsScaler(columns=["A", "B"]),
pd.DataFrame({"A": [-6.0, 3.0, -3.0], "B": [2.0, -4.0, 1.0]}),
{"columns": "_columns", "output_columns": "_output_columns"},
),
lambda: (
Categorizer(columns=["color"]),
pd.DataFrame({"color": ["red", "green", "blue", "red", "green"]}),
{
"columns": "_columns",
"output_columns": "_output_columns",
"dtypes": "_dtypes",
},
),
],
ids=[
"OrdinalEncoder",
"OneHotEncoder",
"LabelEncoder",
"StandardScaler",
"MinMaxScaler",
"RobustScaler",
"SimpleImputer",
"CountVectorizer",
"UniformKBinsDiscretizer",
"MultiHotEncoder",
"MaxAbsScaler",
"Categorizer",
],
)
def test_stateful_functional_backwards_compat(setup_func):
"""Verify fitted preprocessors work after deserialization."""
preprocessor, test_data, field_mapping = setup_func()
test_ds = ray.data.from_pandas(test_data)
preprocessor = preprocessor.fit(test_ds)
_test_functional_backwards_compat(preprocessor, test_ds, field_mapping)
def test_chain_functional_backwards_compat():
df = pd.DataFrame({"A": [1.0, 2.0, 3.0]})
ds = ray.data.from_pandas(df)
scaler = StandardScaler(columns=["A"])
normalizer = Normalizer(columns=["A"])
chain = Chain(scaler, normalizer)
chain = chain.fit(ds)
expected_result = chain.transform(ds).to_pandas()
state = chain.__dict__.copy()
state["preprocessors"] = state.pop("_preprocessors")
new_chain = Chain.__new__(Chain)
new_chain.__setstate__(state)
result = new_chain.transform(ds).to_pandas()
pd.testing.assert_frame_equal(result, expected_result)
def test_torchvision_functional_backwards_compat():
try:
import torch
from torchvision import transforms
except ImportError:
pytest.skip("torchvision not installed")
transform = transforms.Lambda(lambda x: torch.as_tensor(x, dtype=torch.float32))
df = pd.DataFrame(
{
"image": [
np.array([[1, 2], [3, 4]], dtype=np.uint8),
np.array([[5, 6], [7, 8]], dtype=np.uint8),
]
}
)
ds = ray.data.from_pandas(df)
preprocessor = TorchVisionPreprocessor(
columns=["image"], transform=transform, batched=False
)
expected_result = preprocessor.transform(ds).to_pandas()
state = preprocessor.__dict__.copy()
state["columns"] = state.pop("_columns")
state["output_columns"] = state.pop("_output_columns")
state["torchvision_transform"] = state.pop("_torchvision_transform")
state["batched"] = state.pop("_batched")
new_preprocessor = TorchVisionPreprocessor.__new__(TorchVisionPreprocessor)
new_preprocessor.__setstate__(state)
result = new_preprocessor.transform(ds).to_pandas()
assert len(result) == len(expected_result)
assert "image" in result.columns
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/preprocessors/test_backwards_compatibility.py",
"license": "Apache License 2.0",
"lines": 712,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/ci/nb2sh.py | #!/usr/bin/env python3
"""Convert README.ipynb code cells into a README.sh shell script.
Extracts shell commands from code cells (lines prefixed with ``!`` in
Jupyter) and plain shell-looking cells, skipping pure Python illustration
cells that contain imports, decorators, or async/await syntax.
Also extracts fenced ``bash`` / ``shell`` / ``sh`` code blocks from
markdown cells so that commands documented in prose are not lost.
Post-processing rewrites applied to the raw extracted lines:
* ``serve run …`` gets ``--non-blocking``, a readiness poll, and a cleanup trap.
* ``%env VAR=val`` Jupyter magic is converted to ``export VAR=val``.
* Hardcoded ``%env BASE_URL`` / ``%env ANYSCALE_API_TOKEN`` lines are replaced
with dynamic extraction via ``anyscale service status``.
* ``anyscale service wait`` is injected after ``anyscale service deploy``.
Usage:
python nb2sh.py <notebook.ipynb> <output.sh>
"""
import re
import sys
import nbformat
# Patterns that indicate a code cell is pure Python (not shell).
_PYTHON_RE = re.compile(
r"(?:^\s*(?:from\s|import\s|@\w|(?:async\s+)?def\s|class\s))|(?:\bawait\s)",
re.MULTILINE,
)
# Matches fenced code blocks tagged as bash / shell / sh.
_BASH_FENCE_RE = re.compile(
r"```(?:bash|shell|sh)\s*\n(.*?)```", re.DOTALL
)
_PLACEHOLDER_RE = re.compile(r"<[^>]+>")
_SERVE_RUN_RE = re.compile(r"^serve run\s+(.+)$")
_SERVE_RUN_BLOCK = """\
# Start Serve (returns immediately with --non-blocking)
serve run {config} --non-blocking --working-dir .
# Wait for all applications to be RUNNING
echo "Waiting for all Serve applications to be ready..."
for i in $(seq 1 600); do
STATUS=$(serve status 2>/dev/null || true)
if echo "$STATUS" | grep -q 'status: RUNNING' && \\
! echo "$STATUS" | grep -qE 'status: (DEPLOYING|NOT_STARTED)'; then
echo "All applications RUNNING after ${{i}}s"
break
fi
if echo "$STATUS" | grep -qE 'status: DEPLOY_FAILED'; then
echo "ERROR: an application failed to deploy"
serve status
exit 1
fi
sleep 1
done
# Cleanup on exit
trap 'serve shutdown -y 2>/dev/null || true' EXIT"""
_DEPLOY_RE = re.compile(r"^anyscale service deploy\s+(.+)$")
# Extract the file path from flags like ``-f path.yaml`` or ``--config-file path.yaml``.
_CONFIG_PATH_RE = re.compile(r"(?:-f|--config-file)\s+(\S+)")
def _extract_config_path(flags: str) -> str:
"""Return the config file path from deploy-style CLI flags."""
m = _CONFIG_PATH_RE.search(flags)
return m.group(1) if m else flags.strip()
# Matches hardcoded %env / export lines for BASE_URL or ANYSCALE_API_TOKEN.
_HARDCODED_ENV_RE = re.compile(
r"^(?:%env|export)\s+(?:BASE_URL|ANYSCALE_API_TOKEN)="
)
_DYNAMIC_ENV_BLOCK = """\
# Extract BASE_URL and ANYSCALE_API_TOKEN from the deployed service
SERVICE_STATUS=$(anyscale service status {flags} --json)
export BASE_URL=$(echo "$SERVICE_STATUS" | python3 -c "import sys,json; print(json.load(sys.stdin)['query_url'])")
export ANYSCALE_API_TOKEN=$(echo "$SERVICE_STATUS" | python3 -c "import sys,json; print(json.load(sys.stdin)['query_auth_token'])")"""
def _is_shell_cell(source: str) -> bool:
"""Return True if the cell contains shell commands."""
if any(line.startswith("!") for line in source.splitlines()):
return True
# No bang lines — check whether the cell looks like Python code.
code_lines = [
l.strip()
for l in source.splitlines()
if l.strip() and not l.strip().startswith("#")
]
if not code_lines:
return False
return not _PYTHON_RE.search(source)
def _extract_bash_fences(source: str) -> list[str]:
"""Return shell lines from fenced ```bash blocks in markdown.
Blocks that contain angle-bracket placeholders (e.g. ``<your-key>``)
are skipped because they are documentation, not runnable commands.
"""
lines: list[str] = []
for match in _BASH_FENCE_RE.finditer(source):
block = match.group(1).strip()
if not block or _PLACEHOLDER_RE.search(block):
continue
lines.extend(block.splitlines())
return lines
def _postprocess(lines: list[str]) -> list[str]:
"""Rewrite raw extracted lines for CI-friendly execution."""
out: list[str] = []
deploy_flags: str | None = None # set after we see anyscale service deploy
env_block_emitted = False
for line in lines:
# Drop hardcoded BASE_URL / ANYSCALE_API_TOKEN assignments.
# They are replaced by a dynamic block emitted once.
if _HARDCODED_ENV_RE.match(line):
if deploy_flags and not env_block_emitted:
out.append(_DYNAMIC_ENV_BLOCK.format(flags=deploy_flags))
env_block_emitted = True
continue
# %env VAR=val → export VAR=val
if line.startswith("%env "):
out.append("export " + line[len("%env "):])
continue
# serve run <config> → --non-blocking + poll + trap
m = _SERVE_RUN_RE.match(line)
if m:
out.append(_SERVE_RUN_BLOCK.format(config=m.group(1)))
continue
# anyscale service deploy <flags> → deploy + wait
m = _DEPLOY_RE.match(line)
if m:
deploy_flags = m.group(1)
out.append(line)
out.append(f"anyscale service wait {deploy_flags} --timeout-s 1200")
continue
out.append(line)
# Terminate the Anyscale service at the end of the script.
# Use --name instead of -f because `terminate` rejects extra fields
# (applications, logging_config, …) in the full service config YAML.
if deploy_flags:
out.append("")
out.append("# Tear down the Anyscale service")
out.append(
f"SERVICE_NAME=$(python3 -c \"import yaml; print(yaml.safe_load(open('{_extract_config_path(deploy_flags)}'))['name'])\")"
)
out.append("anyscale service terminate --name \"$SERVICE_NAME\"")
return out
def nb2sh(notebook_path: str, output_path: str) -> None:
nb = nbformat.read(notebook_path, as_version=4)
lines = [
"#!/usr/bin/env bash",
"# Auto-generated from README.ipynb — do not edit manually.",
"set -euo pipefail",
"",
]
for cell in nb.cells:
source = cell.source.strip()
if not source:
continue
if cell.cell_type == "markdown":
bash_lines = _extract_bash_fences(source)
if bash_lines:
lines.extend(bash_lines)
lines.append("")
continue
if cell.cell_type != "code":
continue
if not _is_shell_cell(source):
continue
for line in source.splitlines():
lines.append(line[1:] if line.startswith("!") else line)
lines.append("")
lines = _postprocess(lines)
with open(output_path, "w") as f:
f.write("\n".join(lines))
print(f"Wrote {output_path}")
if __name__ == "__main__":
if len(sys.argv) != 3:
print(f"Usage: {sys.argv[0]} <notebook.ipynb> <output.sh>", file=sys.stderr)
sys.exit(1)
nb2sh(sys.argv[1], sys.argv[2])
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/ci/nb2sh.py",
"license": "Apache License 2.0",
"lines": 167,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/agent_runtime/a2a_deployment.py | """
A2A (Agent-to-Agent) deployment utilities for Ray Serve.
This module provides a factory function to wrap any LangChain agent with A2A
protocol endpoints:
- GET /.well-known/agent-card.json (discovery)
- POST /v1/message:send (blocking call)
- POST /v1/message:stream (SSE stream)
- GET /v1/tasks/{id} (poll / fetch history)
"""
from __future__ import annotations
import asyncio
import functools
import inspect
from contextlib import asynccontextmanager
from typing import Any, Callable, Coroutine, Optional, Union
from fastapi import FastAPI, HTTPException
from fastapi.routing import APIRoute, request_response
from ray import serve
from a2a.server.agent_execution import AgentExecutor, RequestContext
from a2a.server.apps.rest.fastapi_app import A2ARESTFastAPIApplication
from a2a.server.events import EventQueue
from a2a.server.request_handlers.default_request_handler import DefaultRequestHandler
from a2a.server.tasks.inmemory_task_store import InMemoryTaskStore
from a2a.server.tasks.task_updater import TaskUpdater
from a2a.types import AgentCard, Part, TextPart
# Type for agent builder functions (sync or async)
AgentBuilder = Union[Callable[[], Any], Callable[[], Coroutine[Any, Any, Any]]]
def _ray_serve_patch_partial_endpoints(app: FastAPI) -> None:
"""
Ray Serve's FastAPI ingress code assumes `route.endpoint` has `__qualname__`.
Some third-party FastAPI builders (including A2A SDK) register endpoints as
`functools.partial`, which are callable but do not define `__qualname__`.
Ray Serve then crashes during deployment with:
AttributeError: 'functools.partial' object has no attribute '__qualname__'
This function replaces those `route.endpoint` objects with small wrapper
callables that preserve behavior but have a real `__qualname__`.
"""
# FastAPI keeps routes on both `app.routes` and `app.router.routes`.
for route in list(getattr(app, "routes", ())):
endpoint = getattr(route, "endpoint", None)
if not isinstance(endpoint, functools.partial):
continue
original = endpoint
original_func = original.func
@functools.wraps(original_func)
async def _endpoint_wrapper(*args: Any, __orig: functools.partial = original, **kwargs: Any) -> Any:
result = __orig(*args, **kwargs)
if inspect.isawaitable(result):
return await result
return result
# Patch what Ray Serve inspects.
try:
route.endpoint = _endpoint_wrapper # type: ignore[attr-defined]
except Exception:
# Best-effort: if we can't patch, Ray will still error.
continue
# Keep FastAPI internals consistent for runtime and OpenAPI.
dependant = getattr(route, "dependant", None)
if dependant is not None and getattr(dependant, "call", None) is original:
try:
dependant.call = _endpoint_wrapper # type: ignore[attr-defined]
except Exception:
pass
if isinstance(route, APIRoute):
# Rebuild Starlette handler so the wrapper is used.
route.app = request_response(route.get_route_handler())
def _to_pascal_case(name: str) -> str:
"""Convert 'weather-agent' / 'research_agent' / 'travel agent' -> 'WeatherAgent'."""
parts: list[str] = []
for chunk in (name or "").replace("_", "-").replace(" ", "-").split("-"):
chunk = chunk.strip()
if not chunk:
continue
parts.append(chunk[:1].upper() + chunk[1:])
return "".join(parts) or "Agent"
async def run_langchain_agent_once(
agent: Any,
*,
user_request: str,
thread_id: str,
max_updates: int = 200,
) -> str:
"""
Run a LangChain agent using stream_mode="updates" and return the last assistant text.
This matches how the repo's /chat endpoints stream, providing consistency
across SSE and A2A interfaces.
"""
config = {"configurable": {"thread_id": thread_id}}
inputs = {"messages": [{"role": "user", "content": user_request}]}
def _extract_text(obj: Any) -> str:
"""Best-effort extraction of assistant text from various LangChain shapes."""
try:
if isinstance(obj, str):
return obj.strip()
if isinstance(obj, dict):
# Common keys across versions/wrappers
for key in ("content", "output", "result"):
if isinstance(obj.get(key), str):
return obj[key].strip()
# Sometimes the final state is nested
for key in ("messages", "agent", "state", "final"):
val = obj.get(key)
if isinstance(val, dict):
txt = _extract_text(val)
if txt:
return txt
if isinstance(val, list):
for item in reversed(val):
txt = _extract_text(item)
if txt:
return txt
return ""
if isinstance(obj, list):
for item in reversed(obj):
txt = _extract_text(item)
if txt:
return txt
content = getattr(obj, "content", None)
if isinstance(content, str):
return content.strip()
except Exception:
return ""
return ""
last_text = ""
n = 0
async for update in agent.astream(inputs, config=config, stream_mode="updates"):
n += 1
if n > max_updates:
break
if isinstance(update, dict):
txt = _extract_text(update)
if txt:
last_text = txt
if last_text:
return last_text
# Fallback: read the committed checkpoint state instead of re-invoking,
# which would duplicate the user message and re-run tool calls.
try:
if hasattr(agent, "aget_state"):
state = await agent.aget_state(config)
elif hasattr(agent, "get_state"):
state = await asyncio.to_thread(agent.get_state, config)
else:
state = None
if state and hasattr(state, "values"):
txt = _extract_text(state.values)
if txt:
return txt
except Exception:
pass
return "(no final text captured from stream updates)"
def create_a2a_app(
build_agent_fn: AgentBuilder,
card: AgentCard,
) -> FastAPI:
"""
Create a FastAPI app that wraps a LangChain agent with A2A protocol endpoints.
Args:
build_agent_fn: Function that returns a LangChain agent (sync or async)
card: Agent discovery card with name, description, version, skills
Returns:
Configured FastAPI application with A2A endpoints
"""
agent_ready = False
agent_obj: Optional[Any] = None
agent_init_error: Optional[str] = None
agent_init_lock = asyncio.Lock()
async def _init_agent() -> Any:
nonlocal agent_ready, agent_obj, agent_init_error
agent_init_error = None
agent_ready = False
try:
maybe = build_agent_fn()
agent_obj = await maybe if asyncio.iscoroutine(maybe) else maybe
agent_ready = True
return agent_obj
except Exception as e:
agent_obj = None
agent_ready = False
agent_init_error = f"{type(e).__name__}: {e}"
raise
async def _ensure_agent() -> Any:
nonlocal agent_ready, agent_obj, agent_init_error
if agent_ready and agent_obj is not None:
return agent_obj
async with agent_init_lock:
if agent_ready and agent_obj is not None:
return agent_obj
try:
return await _init_agent()
except Exception:
detail = agent_init_error or "Agent initialization failed."
raise HTTPException(status_code=503, detail=detail)
class LangChainAgentExecutor(AgentExecutor):
"""A2A AgentExecutor adapter for a LangChain agent."""
async def execute(self, context: RequestContext, event_queue: EventQueue) -> None:
a = await _ensure_agent()
user_text = context.get_user_input() or ""
task_id = context.task_id or (context.message.task_id if context.message else None)
context_id = context.context_id or (context.message.context_id if context.message else None)
task_id = (task_id or "").strip() or "unknown-task"
context_id = (context_id or "").strip() or task_id
updater = TaskUpdater(event_queue=event_queue, task_id=task_id, context_id=context_id)
await updater.submit()
await updater.start_work()
try:
text = await run_langchain_agent_once(
a,
user_request=user_text,
thread_id=context_id,
)
msg = updater.new_agent_message(parts=[Part(root=TextPart(text=text))])
await updater.complete(msg)
except Exception as e:
# Surface the exception in the task status message for debuggability.
msg = updater.new_agent_message(
parts=[Part(root=TextPart(text=f"{type(e).__name__}: {e}"))]
)
await updater.failed(msg)
raise
async def cancel(self, context: RequestContext, event_queue: EventQueue) -> None:
task_id = context.task_id or (context.message.task_id if context.message else None)
context_id = context.context_id or (context.message.context_id if context.message else None)
task_id = (task_id or "").strip() or "unknown-task"
context_id = (context_id or "").strip() or task_id
updater = TaskUpdater(event_queue=event_queue, task_id=task_id, context_id=context_id)
msg = updater.new_agent_message(parts=[Part(root=TextPart(text="Canceled"))])
await updater.cancel(msg)
@asynccontextmanager
async def lifespan(app: FastAPI):
nonlocal agent_ready, agent_obj
# Best-effort eager init
try:
await _init_agent()
except Exception:
pass # Keep serving; execution will return 503
try:
yield
finally:
agent_ready = False
try:
if hasattr(agent_obj, "aclose"):
await agent_obj.aclose()
except Exception:
pass
agent_obj = None
# Build official A2A REST app (HTTP+JSON), and then attach repo-specific endpoints.
task_store = InMemoryTaskStore()
http_handler = DefaultRequestHandler(agent_executor=LangChainAgentExecutor(), task_store=task_store)
a2a_app = A2ARESTFastAPIApplication(agent_card=card, http_handler=http_handler).build(
title=card.name,
description=card.description,
lifespan=lifespan,
)
@a2a_app.get("/health")
async def health():
if agent_ready:
return {"status": "healthy", "ready": True}
try:
await _ensure_agent()
return {"status": "healthy", "ready": True}
except HTTPException:
payload = {"status": "starting", "ready": False}
if agent_init_error:
payload["status"] = "error"
payload["error"] = agent_init_error
return payload
return a2a_app
def create_a2a_deployment(
build_agent_fn: AgentBuilder,
card: AgentCard,
*,
name: str | None = None,
num_cpus: float = 1,
) -> Any:
"""
Create a Ray Serve deployment with A2A protocol endpoints.
Args:
build_agent_fn: Function that returns a LangChain agent
card: Agent discovery card
num_cpus: Number of CPUs for the Ray actor
Returns:
Bound Ray Serve application ready for deployment
"""
fastapi_app = create_a2a_app(build_agent_fn, card)
_ray_serve_patch_partial_endpoints(fastapi_app)
deployment_name = name or f"A2A{_to_pascal_case(card.name)}Deployment"
DeploymentCls = type(deployment_name, (), {})
Deployment = serve.ingress(fastapi_app)(DeploymentCls)
Deployment = serve.deployment(
name=deployment_name,
ray_actor_options={"num_cpus": num_cpus},
)(Deployment)
return Deployment.bind()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/agent_runtime/a2a_deployment.py",
"license": "Apache License 2.0",
"lines": 294,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/agent_runtime/agent_builder.py | """
Shared agent-building helpers.
This repo had multiple agents with very similar boilerplate:
- load config
- build LLM
- (optionally) discover MCP tools
- create a LangChain agent with MemorySaver
This module centralizes that logic so individual agents only specify:
- system prompt
- tools source (MCP endpoints vs explicitly provided tools)
"""
from __future__ import annotations
from typing import Any, Dict, Iterable, List, Optional
from urllib.parse import urljoin
from langchain.agents import create_agent
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from agent_runtime.config import LLMConfig, MCPEndpoint, load_llm_config
# =============================================================================
# LLM Builder
# =============================================================================
def build_llm(config: LLMConfig, *, streaming: bool = False) -> ChatOpenAI:
"""
Build a ChatOpenAI instance from LLM configuration.
Args:
config: LLM configuration with model settings
streaming: Whether to enable streaming mode
Returns:
Configured ChatOpenAI instance
Raises:
ValueError: If required configuration is missing
"""
if not config.openai_base_url:
raise ValueError(
"OPENAI_COMPAT_BASE_URL is required (base URL for your OpenAI-compatible LLM service)."
)
if not config.openai_api_key:
raise ValueError("OPENAI_API_KEY is required.")
# Build headers if Anyscale version is specified
default_headers = {}
if config.anyscale_version:
default_headers["X-ANYSCALE-VERSION"] = config.anyscale_version
# NOTE: urllib.parse.urljoin will DROP the last path segment if the base URL
# does not end with "/". For example:
# urljoin("http://127.0.0.1:8000/llm", "v1") -> "http://127.0.0.1:8000/v1"
# We always force a trailing slash so "/llm/" + "v1" -> "/llm/v1".
base_url = urljoin(config.openai_base_url.rstrip("/") + "/", "v1")
return ChatOpenAI(
model=config.model,
base_url=base_url,
api_key=config.openai_api_key,
temperature=config.temperature,
streaming=streaming,
default_headers=default_headers or None,
)
# =============================================================================
# MCP Tools Discovery
# =============================================================================
async def load_mcp_tools(
endpoints: List[MCPEndpoint],
*,
anyscale_version: str = "",
) -> List[Any]:
"""
Load tools from one or more MCP server endpoints.
Args:
endpoints: List of MCPEndpoint configs (uses endpoint.name as server key)
anyscale_version: Optional Anyscale version header
Returns:
List of discovered LangChain tools from all MCP servers
Example:
>>> endpoint = MCPEndpoint(name="weather", base_url="http://...")
>>> tools = await load_mcp_tools([endpoint])
"""
if not endpoints:
print("[MCP] No MCP endpoints configured; continuing without MCP tools.")
return []
try:
from langchain_mcp_adapters.client import MultiServerMCPClient
# Build server configurations
servers: Dict[str, Dict[str, Any]] = {}
for endpoint in endpoints:
if not endpoint.base_url:
print(f"[MCP] Skipping {endpoint.name}: no base URL configured")
continue
headers = {}
if endpoint.token:
headers["Authorization"] = f"Bearer {endpoint.token}"
if anyscale_version:
headers["X-ANYSCALE-VERSION"] = anyscale_version
# Same urljoin caveat: ensure trailing slash before joining with "mcp"
servers[endpoint.name] = {
"url": urljoin(endpoint.base_url.rstrip("/") + "/", "mcp"),
"transport": "streamable_http",
"headers": headers,
}
if not servers:
print("[MCP] No valid MCP servers configured.")
return []
mcp_client = MultiServerMCPClient(servers)
tools = await mcp_client.get_tools()
print(f"\n[MCP] Discovered {len(tools)} tool(s) from {len(servers)} MCP server(s).")
for tool_obj in tools:
name = getattr(tool_obj, "name", type(tool_obj).__name__)
desc = getattr(tool_obj, "description", "") or ""
print(f" - {name}: {desc[:120]}")
return tools
except Exception as exc:
print(f"[MCP] Skipping MCP tools (error): {exc}")
return []
# =============================================================================
# Agent Builders
# =============================================================================
async def build_tool_agent(
*,
system_prompt: str,
tools: Iterable[Any],
model: Optional[str] = None,
temperature: Optional[float] = None,
llm_config: Optional[LLMConfig] = None,
) -> Any:
"""
Build a LangChain agent with an explicit tool list.
Args:
system_prompt: System prompt for the agent
tools: List of tools to provide to the agent
model: Override model name (if llm_config not provided)
temperature: Override temperature (if llm_config not provided)
llm_config: LLM configuration (takes precedence over model/temperature)
Returns:
Configured LangChain agent
"""
cfg = llm_config or load_llm_config(model=model, temperature=temperature)
llm = build_llm(cfg)
memory = MemorySaver()
return create_agent(
llm,
list(tools),
system_prompt=system_prompt,
checkpointer=memory,
)
async def build_mcp_agent(
*,
system_prompt: str,
mcp_endpoints: List[MCPEndpoint],
model: Optional[str] = None,
temperature: Optional[float] = None,
llm_config: Optional[LLMConfig] = None,
) -> Any:
"""
Build a LangChain agent whose tools are discovered from one or more MCP servers.
Args:
system_prompt: System prompt for the agent
mcp_endpoints: List of MCPEndpoint configs (uses endpoint.name as server key)
model: Override model name (if llm_config not provided)
temperature: Override temperature (if llm_config not provided)
llm_config: LLM configuration (takes precedence over model/temperature)
Returns:
Configured LangChain agent with MCP tools
"""
cfg = llm_config or load_llm_config(model=model, temperature=temperature)
llm = build_llm(cfg)
mcp_tools = await load_mcp_tools(mcp_endpoints, anyscale_version=cfg.anyscale_version)
memory = MemorySaver()
return create_agent(
llm,
list(mcp_tools),
system_prompt=system_prompt,
checkpointer=memory,
)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/agent_runtime/agent_builder.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/agent_runtime/config.py | """
Centralized configuration loading for all agents.
LLM configuration is separate from MCP endpoint configuration.
"""
from __future__ import annotations
import os
from dataclasses import dataclass
from typing import Optional
@dataclass
class LLMConfig:
"""Configuration for the LLM backend."""
model: str = "Qwen/Qwen3-4B-Instruct-2507-FP8"
temperature: float = 0.01
openai_base_url: str = ""
openai_api_key: str = ""
anyscale_version: str = ""
def __post_init__(self):
# Ensure base URL doesn't have trailing slash for consistent urljoin behavior
self.openai_base_url = self.openai_base_url.rstrip("/")
@dataclass
class MCPEndpoint:
"""Configuration for an MCP server endpoint."""
base_url: str
token: str = ""
name: str = "mcp" # Server name for MultiServerMCPClient
def __post_init__(self):
self.base_url = self.base_url.rstrip("/")
# Default values (non-sensitive only)
_DEFAULTS = {
"model": "Qwen/Qwen3-4B-Instruct-2507-FP8",
"temperature": "0.01",
}
def load_llm_config(
*,
model: Optional[str] = None,
temperature: Optional[float] = None,
) -> LLMConfig:
"""
Load LLM configuration from environment variables with sensible defaults.
Environment variables:
- MODEL: LLM model name
- TEMPERATURE: LLM temperature (0.0-1.0)
- OPENAI_COMPAT_BASE_URL: Base URL for OpenAI-compatible LLM service
- OPENAI_API_KEY: API key for the LLM service
- X_ANYSCALE_VERSION or ANYSCALE_VERSION: Optional version header
Args:
model: Override model name (takes precedence over env var)
temperature: Override temperature (takes precedence over env var)
Returns:
LLMConfig instance with loaded values
"""
return LLMConfig(
model=model or os.getenv("MODEL", _DEFAULTS["model"]),
temperature=temperature if temperature is not None else float(os.getenv("TEMPERATURE", _DEFAULTS["temperature"])),
openai_base_url=os.getenv("OPENAI_COMPAT_BASE_URL", "").strip(),
openai_api_key=os.getenv("OPENAI_API_KEY", "").strip(),
anyscale_version=os.getenv(
"X_ANYSCALE_VERSION",
os.getenv("ANYSCALE_VERSION", ""),
).strip(),
)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/agent_runtime/config.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/agent_runtime/serve_deployment.py | """
Ray Serve deployment factory for LangChain agents.
This module provides a unified way to create Ray Serve deployments for any
LangChain agent, eliminating duplicate FastAPI app and deployment code.
"""
from __future__ import annotations
import asyncio
import json
from contextlib import asynccontextmanager
from typing import Any, AsyncGenerator, Callable, Coroutine, Optional, Union
from uuid import uuid4
from fastapi import FastAPI, Request
from fastapi.encoders import jsonable_encoder
from ray import serve
from starlette.responses import StreamingResponse
# Type for agent builder functions (sync or async)
AgentBuilder = Union[Callable[[], Any], Callable[[], Coroutine[Any, Any, Any]]]
def create_chat_app(
build_agent_fn: AgentBuilder,
*,
title: str = "LangChain Agent",
description: str = "LangChain agent with streaming SSE support",
) -> FastAPI:
"""
Create a FastAPI app with a /chat endpoint that streams LangChain updates as SSE.
This is the core FastAPI app used by all agent deployments. It provides:
- Async lifespan management for agent initialization/cleanup
- POST /chat endpoint with SSE streaming
- Thread ID support for conversation continuity
Args:
build_agent_fn: Function that returns a LangChain agent (can be sync or async)
title: FastAPI app title
description: FastAPI app description
Returns:
Configured FastAPI application
Example:
>>> from weather_agent_with_mcp import build_agent
>>> app = create_chat_app(build_agent, title="Weather Agent")
"""
@asynccontextmanager
async def lifespan(app: FastAPI):
"""Initialize agent on startup, cleanup on shutdown."""
# Support both sync and async build functions
maybe_coro = build_agent_fn()
agent = await maybe_coro if asyncio.iscoroutine(maybe_coro) else maybe_coro
app.state.agent = agent
try:
yield
finally:
if hasattr(agent, "aclose"):
await agent.aclose()
fastapi_app = FastAPI(title=title, description=description, lifespan=lifespan)
@fastapi_app.post("/chat")
async def chat(request: Request):
"""
POST /chat
Body: {"user_request": "<text>", "thread_id": "<optional>", "checkpoint_ns": "<optional>"}
Streams LangChain 'update' dicts as Server-Sent Events (one JSON object per event).
SSE frames look like:
data: {"some": "update"}
Errors are emitted as:
event: error
data: {"error": "ErrorType", "detail": "..."}
"""
body = await request.json()
user_request: str = body.get("user_request") or ""
# Threading and checkpoint identifiers
thread_id = (
body.get("thread_id")
or request.headers.get("X-Thread-Id")
or str(uuid4()) # New thread per request if none provided
)
checkpoint_ns = body.get("checkpoint_ns") # Optional namespacing
# Build config for LangChain
config = {"configurable": {"thread_id": thread_id}}
if checkpoint_ns:
config["configurable"]["checkpoint_ns"] = checkpoint_ns
async def event_stream() -> AsyncGenerator[str, None]:
agent = request.app.state.agent
inputs = {"messages": [{"role": "user", "content": user_request}]}
try:
# Stream updates from the agent
async for update in agent.astream(
inputs, config=config, stream_mode="updates"
):
safe_update = jsonable_encoder(update)
chunk = json.dumps(safe_update, ensure_ascii=False)
# Proper SSE framing: "data: <json>\n\n"
yield f"data: {chunk}\n\n"
except asyncio.CancelledError:
# Client disconnected; exit quietly without sending an error frame
return
except Exception as e:
# Surface one terminal error event and end
err = {"error": type(e).__name__, "detail": str(e)}
err_chunk = json.dumps(err, ensure_ascii=False)
# SSE with a named event for clients that listen for "error"
yield f"event: error\ndata: {err_chunk}\n\n"
# Expose thread id so the client can reuse it on the next call
# Also add headers commonly used for SSE behind proxies
headers = {
"X-Thread-Id": thread_id,
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no", # Disable buffering in nginx, if present
}
return StreamingResponse(
event_stream(),
media_type="text/event-stream",
headers=headers,
)
return fastapi_app
def create_agent_deployment(
build_agent_fn: AgentBuilder,
*,
name: str = "LangChainAgent",
title: str = "LangChain Agent",
description: str = "LangChain agent with streaming SSE support",
num_cpus: float = 1,
) -> Any:
"""
Create a Ray Serve deployment for a LangChain agent.
This is a factory function that creates both the FastAPI app and the
Ray Serve deployment wrapper in one step.
Args:
build_agent_fn: Function that returns a LangChain agent (can be sync or async)
name: Name for the Ray Serve deployment class
title: FastAPI app title
description: FastAPI app description
num_cpus: Number of CPUs for the Ray actor
Returns:
Bound Ray Serve application ready for deployment
Example:
>>> from weather_agent_with_mcp import build_agent
>>> app = create_agent_deployment(build_agent, name="WeatherAgent")
>>> # Deploy: serve run module:app
"""
fastapi_app = create_chat_app(
build_agent_fn,
title=title,
description=description,
)
# IMPORTANT:
# - Changing __name__ after @serve.deployment does NOT reliably change the Serve deployment name.
# - Use an explicit deployment name via @serve.deployment(name=...).
DeploymentCls = type(
name,
(),
{"__doc__": "Ray Serve deployment that exposes the FastAPI app as ingress."},
)
Deployment = serve.ingress(fastapi_app)(DeploymentCls)
Deployment = serve.deployment(
name=name,
ray_actor_options={"num_cpus": num_cpus},
)(Deployment)
return Deployment.bind()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/agent_runtime/serve_deployment.py",
"license": "Apache License 2.0",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/agents/research_agent_with_web_search_mcp.py | """
Research agent that uses a Web Search MCP server (brave_search + fetch_url)
to perform online research and gather sources.
"""
from __future__ import annotations
import os
from agent_runtime.agent_builder import build_mcp_agent
from agent_runtime.config import MCPEndpoint
# ========== MCP CONFIG ==========
# Web Search MCP server endpoint configuration
WEB_SEARCH_MCP_BASE_URL = os.getenv("WEB_SEARCH_MCP_BASE_URL", "").strip().rstrip("/")
WEB_SEARCH_MCP_TOKEN = os.getenv("WEB_SEARCH_MCP_TOKEN", "").strip()
def _web_search_mcp_endpoint() -> MCPEndpoint:
"""Build Web Search MCP endpoint configuration."""
return MCPEndpoint(
name="web_search",
base_url=WEB_SEARCH_MCP_BASE_URL,
token=WEB_SEARCH_MCP_TOKEN,
)
# ========== SYSTEM PROMPT ==========
PROMPT = (
"You are a careful research assistant.\n"
"\n"
"You can use MCP tools for online research:\n"
"- brave_search(query, num_results)\n"
"- fetch_url(url, max_length, start_index, raw, ignore_robots_txt)\n"
"\n"
"Rules:\n"
"- Break the task into sub-questions.\n"
"- Use brave_search first to find relevant sources.\n"
"- Use fetch_url to read primary sources and confirm details.\n"
"- Don't fabricate. If you can't verify something, say so.\n"
"- In the final answer, include sources as a short bullet list of URLs.\n"
"- Only output the final answer (no hidden thoughts).\n"
)
# ========== BUILD AGENT ==========
async def build_agent():
"""
Build the research agent with web search MCP tools.
Returns:
A LangChain agent configured with web search MCP tools.
"""
return await build_mcp_agent(
system_prompt=PROMPT,
mcp_endpoints=[_web_search_mcp_endpoint()],
)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/agents/research_agent_with_web_search_mcp.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/agents/travel_agent_with_a2a.py | """
Travel planning agent that orchestrates by calling two downstream agents over A2A:
- Research agent (A2A) -> attractions/logistics/sources
- Weather agent (A2A) -> forecast/packing suggestions
This agent is itself a LangChain agent so it can be served via the existing
Ray Serve patterns (/chat SSE and/or A2A wrapper).
"""
from __future__ import annotations
import os
from langchain_core.tools import tool
from agent_runtime.agent_builder import build_tool_agent
from protocols.a2a_client import a2a_execute_text
# ========== A2A CONFIG ==========
# Downstream A2A agent base URLs (no trailing slash). These should point at:
# http://host:8000/a2a-research
# http://host:8000/a2a-weather
RESEARCH_A2A_BASE_URL = os.getenv(
"RESEARCH_A2A_BASE_URL", "http://127.0.0.1:8000/a2a-research"
).rstrip("/")
WEATHER_A2A_BASE_URL = os.getenv(
"WEATHER_A2A_BASE_URL", "http://127.0.0.1:8000/a2a-weather"
).rstrip("/")
# Timeout in seconds for downstream agent calls
# Note: Travel agent calls both research and weather agents which can be slow
# (research does web searches, weather calls external APIs). 360s provides headroom.
A2A_TIMEOUT_S = float(os.getenv("A2A_TIMEOUT_S", "360"))
# ========== A2A TOOLS ==========
@tool
async def a2a_research(query: str) -> str:
"""Call the Research agent over A2A to gather up-to-date info and sources."""
return await a2a_execute_text(RESEARCH_A2A_BASE_URL, query, timeout_s=A2A_TIMEOUT_S)
@tool
async def a2a_weather(query: str) -> str:
"""Call the Weather agent over A2A to get weather/forecast guidance."""
return await a2a_execute_text(WEATHER_A2A_BASE_URL, query, timeout_s=A2A_TIMEOUT_S)
# ========== SYSTEM PROMPT ==========
PROMPT = (
"You are a travel planning assistant.\n"
"\n"
"You have TWO tools you must use for every travel-plan request:\n"
"- a2a_research(query): research attractions/logistics/costs/safety and return sources\n"
"- a2a_weather(query): get weather/forecast context for the destination and dates\n"
"\n"
"Rules:\n"
"- Always call BOTH tools at least once before producing the final travel plan.\n"
"- If the user is missing key constraints (dates, budget, origin airport/city, travelers, pace, interests), "
"ask up to 5 concise clarification questions first.\n"
"- Otherwise, produce a structured travel plan with:\n"
" 1) Assumptions & trip summary\n"
" 2) Day-by-day itinerary (morning/afternoon/evening)\n"
" 3) Weather-aware packing + timing suggestions\n"
" 4) Budget outline (high/medium/low)\n"
" 5) Bookings checklist + local transit notes\n"
" 6) Sources (from research tool output)\n"
"- Be practical and specific; avoid vague filler.\n"
)
# ========== BUILD AGENT ==========
async def build_agent():
"""
Build the travel planning agent with A2A tools.
Returns:
A LangChain agent configured with A2A research and weather tools.
"""
tools = [a2a_research, a2a_weather]
return await build_tool_agent(
system_prompt=PROMPT,
tools=tools,
temperature=0.2,
)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/agents/travel_agent_with_a2a.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/agents/weather_agent_with_mcp.py | """
Weather agent that uses an MCP server to answer weather questions.
This agent connects to a Weather MCP server and uses the discovered tools
to provide weather information.
"""
from __future__ import annotations
import os
from agent_runtime.agent_builder import build_mcp_agent
from agent_runtime.config import MCPEndpoint
# ========== MCP CONFIG ==========
# Weather MCP server endpoint configuration
WEATHER_MCP_BASE_URL = os.getenv("WEATHER_MCP_BASE_URL", "").strip().rstrip("/")
WEATHER_MCP_TOKEN = os.getenv("WEATHER_MCP_TOKEN", "").strip()
def _weather_mcp_endpoint() -> MCPEndpoint:
"""Build Weather MCP endpoint configuration."""
return MCPEndpoint(
name="weather",
base_url=WEATHER_MCP_BASE_URL,
token=WEATHER_MCP_TOKEN,
)
# ========== SYSTEM PROMPT ==========
PROMPT = (
"You are a weather assistant that provides accurate weather information "
"using available tools.\n"
"\n"
"Follow this process:\n"
"- Break tasks into sub-questions (e.g., finding coordinates first).\n"
"- Use the weather tools to get current conditions and forecasts.\n"
"- Provide a concise, actionable answer for the user.\n"
"\n"
"Only output final answers or tool calls (no hidden thoughts)."
)
# ========== BUILD AGENT ==========
async def build_agent():
"""
Build the weather agent with MCP tools.
Returns:
A LangChain agent configured with weather MCP tools.
"""
return await build_mcp_agent(
system_prompt=PROMPT,
mcp_endpoints=[_weather_mcp_endpoint()],
)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/agents/weather_agent_with_mcp.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/llm/llm_deploy_qwen.py | from ray.serve.llm import LLMConfig, build_openai_app
llm_config = LLMConfig(
model_loading_config=dict(
# The name your clients will use in the OpenAI-compatible API.
model_id="Qwen/Qwen3-4B-Instruct-2507-FP8",
# Hugging Face repo to pull from.
model_source="Qwen/Qwen3-4B-Instruct-2507-FP8",
),
# L4 (Ada) is FP8-friendly. Prefer H100 for best FP8 throughput.
accelerator_type="L4",
deployment_config=dict(
autoscaling_config=dict(
num_replicas=1,
)
),
# vLLM engine flags.
engine_kwargs=dict(
# Qwen3 supports 262,144 context natively, but that requires more GPU memory.
max_model_len=65536,
# Qwen models use custom chat templates; needed for some Hugging Face repos.
trust_remote_code=True,
gpu_memory_utilization=0.9,
enable_auto_tool_choice=True,
tool_call_parser="hermes",
),
)
app = build_openai_app({"llm_configs": [llm_config]})
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/llm/llm_deploy_qwen.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/mcps/weather_mcp_server.py | from typing import Any
import httpx
from fastapi import FastAPI
from mcp.server.fastmcp import FastMCP
from ray import serve
from contextlib import asynccontextmanager
# Constants.
NWS_API_BASE = "https://api.weather.gov"
USER_AGENT = "weather-app/1.0"
# Helper functions.
async def make_nws_request(url: str) -> dict[str, Any] | None:
headers = {"User-Agent": USER_AGENT, "Accept": "application/geo+json"}
async with httpx.AsyncClient(timeout=30.0) as client:
try:
resp = await client.get(url, headers=headers)
resp.raise_for_status()
return resp.json()
except Exception:
return None
def format_alert(feature: dict) -> str:
props = feature["properties"]
return (
f"Event: {props.get('event', 'Unknown')}\n"
f"Area: {props.get('areaDesc', 'Unknown')}\n"
f"Severity: {props.get('severity', 'Unknown')}\n"
f"Description: {props.get('description', 'No description available')}\n"
f"Instructions: {props.get('instruction', 'No specific instructions provided')}"
)
# Instantiate FastMCP and register tools via decorators.
mcp = FastMCP("weather", stateless_http=True)
@mcp.tool()
async def get_alerts(state: str) -> str:
"""Fetch active alerts for a given state code (for example, 'CA')."""
url = f"{NWS_API_BASE}/alerts/active/area/{state}"
data = await make_nws_request(url)
if not data or "features" not in data:
return "Unable to fetch alerts or no alerts found."
features = data["features"]
if not features:
return "No active alerts for this state."
return "\n---\n".join(format_alert(f) for f in features)
@mcp.tool()
async def get_forecast(latitude: float, longitude: float) -> str:
"""Fetch a 5-period weather forecast for given latitude and longitude."""
points_url = f"{NWS_API_BASE}/points/{latitude},{longitude}"
points_data = await make_nws_request(points_url)
if not points_data or "properties" not in points_data:
return "Unable to fetch forecast data for this location."
forecast_url = points_data["properties"].get("forecast")
if not forecast_url:
return "No forecast URL found for this location."
forecast_data = await make_nws_request(forecast_url)
if not forecast_data or "properties" not in forecast_data:
return "Unable to fetch detailed forecast."
periods = forecast_data["properties"].get("periods", [])
if not periods:
return "No forecast periods available."
parts: list[str] = []
for p in periods[:5]:
parts.append(
f"{p['name']}:\nTemperature: {p['temperature']}°{p['temperatureUnit']}\n"
+ f"Wind: {p['windSpeed']} {p['windDirection']}\n"
+ f"Forecast: {p['detailedForecast']}"
)
return "\n---\n".join(parts)
# FastAPI app and Ray Serve setup.
@asynccontextmanager
async def lifespan(app: FastAPI):
# Mount the MCP app.
app.mount("/", mcp.streamable_http_app())
# Enter the session_manager's context.
async with mcp.session_manager.run():
yield
fastapi_app = FastAPI(lifespan=lifespan)
@serve.deployment(
autoscaling_config={
"min_replicas": 1,
"max_replicas": 20,
"target_ongoing_requests": 5,
},
ray_actor_options={"num_cpus": 0.2},
)
@serve.ingress(fastapi_app)
class WeatherMCP:
def __init__(self):
pass
# Ray Serve entry point.
app = WeatherMCP.bind()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/mcps/weather_mcp_server.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/mcps/web_search_mcp_server.py | """
Web search + URL fetch MCP server deployed with Ray Serve.
Tools exposed:
- brave_search: Uses Brave Search API
- fetch_url: Fetches a URL and returns simplified markdown (or raw) content, with optional robots.txt checks
Environment variables:
- BRAVE_API_KEY: Brave Search API subscription token
- WEB_SEARCH_USER_AGENT: User-Agent string used for outbound HTTP requests
- WEB_SEARCH_PROXY_URL: Optional proxy URL (passed to httpx AsyncClient)
- WEB_SEARCH_IGNORE_ROBOTS_TXT: "true"/"false" (default: false)
"""
from __future__ import annotations
import json
import os
from contextlib import asynccontextmanager
from typing import Any
from urllib.parse import quote_plus, urlparse, urlunparse
import httpx
from fastapi import FastAPI
from mcp.server.fastmcp import FastMCP
from protego import Protego
from ray import serve
# ----------------------------------------------------------------------
# Config
# ----------------------------------------------------------------------
BRAVE_API_KEY = os.getenv("BRAVE_API_KEY", "")
DEFAULT_USER_AGENT = "ModelContextProtocol/1.0 (WebSearch; +https://github.com/modelcontextprotocol/servers)"
USER_AGENT = os.getenv("WEB_SEARCH_USER_AGENT", DEFAULT_USER_AGENT)
PROXY_URL = os.getenv("WEB_SEARCH_PROXY_URL") or None
IGNORE_ROBOTS_TXT = (os.getenv("WEB_SEARCH_IGNORE_ROBOTS_TXT", "false").strip().lower() in {"1", "true", "yes"})
BRAVE_SEARCH_URL = "https://api.search.brave.com/res/v1/web/search"
# ----------------------------------------------------------------------
# Helpers
# ----------------------------------------------------------------------
def _get_robots_txt_url(url: str) -> str:
parsed = urlparse(url)
return urlunparse((parsed.scheme, parsed.netloc, "/robots.txt", "", "", ""))
async def _check_may_fetch_url(url: str, user_agent: str) -> None:
"""Raise ValueError if robots.txt forbids fetching this URL for the given user agent."""
robots_url = _get_robots_txt_url(url)
async with httpx.AsyncClient(proxy=PROXY_URL, timeout=30.0, follow_redirects=True) as client:
try:
resp = await client.get(robots_url, headers={"User-Agent": user_agent})
except Exception as e:
# If robots.txt can't be fetched due to connection issues, be conservative and block.
raise ValueError(f"Failed to fetch robots.txt ({robots_url}): {e!r}")
# If robots.txt is missing (404) or other 4xx besides auth, allow.
if 400 <= resp.status_code < 500 and resp.status_code not in (401, 403):
return
if resp.status_code in (401, 403):
raise ValueError(
f"Robots check blocked: fetching robots.txt ({robots_url}) returned HTTP {resp.status_code}."
)
if resp.status_code >= 500:
raise ValueError(f"Robots check blocked: fetching robots.txt ({robots_url}) returned HTTP {resp.status_code}.")
robots_txt = resp.text
processed = "\n".join(line for line in robots_txt.splitlines() if not line.strip().startswith("#"))
parser = Protego.parse(processed)
if not parser.can_fetch(url, user_agent):
raise ValueError(
"Robots check blocked: site robots.txt disallows fetching this URL for the current user agent. "
f"robots.txt={robots_url} url={url}"
)
def _extract_markdown_from_html(html: str) -> str:
"""
Convert HTML to simplified markdown using Readability.
Falls back to a short error string if parsing fails.
"""
try:
import markdownify
import readabilipy.simple_json
ret = readabilipy.simple_json.simple_json_from_html_string(html, use_readability=True)
if not ret.get("content"):
return "<error>Page failed to be simplified from HTML</error>"
return markdownify.markdownify(ret["content"], heading_style=markdownify.ATX)
except Exception:
return "<error>Failed to simplify HTML to markdown</error>"
def _truncate(content: str, *, max_length: int, start_index: int) -> tuple[str, str]:
"""
Returns (chunk, suffix_note).
"""
original_length = len(content)
if start_index >= original_length:
return "<error>No more content available.</error>", ""
chunk = content[start_index : start_index + max_length]
if not chunk:
return "<error>No more content available.</error>", ""
remaining = original_length - (start_index + len(chunk))
if len(chunk) == max_length and remaining > 0:
next_start = start_index + len(chunk)
return (
chunk,
f"\n\n<error>Content truncated. Call fetch_url with start_index={next_start} to get more.</error>",
)
return chunk, ""
async def _http_get_text(url: str, *, user_agent: str, timeout_s: float = 30.0, headers: dict | None = None) -> tuple[str, str]:
"""
Fetch URL. Returns (text, content_type).
"""
request_headers = {"User-Agent": user_agent}
if headers:
request_headers.update(headers)
async with httpx.AsyncClient(proxy=PROXY_URL, timeout=timeout_s, follow_redirects=True) as client:
resp = await client.get(url, headers=request_headers)
resp.raise_for_status()
return resp.text, (resp.headers.get("content-type") or "")
# ----------------------------------------------------------------------
# MCP server
# ----------------------------------------------------------------------
mcp = FastMCP("web_search", stateless_http=True)
@mcp.tool()
async def brave_search(query: str, num_results: int = 10) -> str:
"""
Search the web through the Brave Search API.
Requires env var:
- BRAVE_API_KEY
Args:
query: Search query string.
num_results: Total number of results to return (1-20).
Returns:
JSON string: [{"title": "...", "link": "...", "snippet": "..."}, ...]
"""
if not query.strip():
return "Query must be non-empty."
num_results = int(num_results)
if num_results < 1:
return "num_results must be >= 1."
if num_results > 20:
num_results = 20 # Brave API max per request
brave_headers = {
"Accept": "application/json",
"Accept-Encoding": "gzip",
"X-Subscription-Token": BRAVE_API_KEY,
}
params = f"?q={quote_plus(query)}&count={num_results}"
url = f"{BRAVE_SEARCH_URL}{params}"
try:
text, _ = await _http_get_text(url, user_agent=USER_AGENT, headers=brave_headers)
data = json.loads(text)
except httpx.HTTPStatusError as e:
return f"Brave search request failed: HTTP {e.response.status_code}"
except Exception:
return "Brave search request failed. Check server logs for details."
web_results = (data.get("web") or {}).get("results") or []
results: list[dict[str, Any]] = []
for item in web_results:
results.append(
{
"title": item.get("title", ""),
"link": item.get("url", ""),
"snippet": item.get("description", ""),
}
)
return json.dumps(results, ensure_ascii=False)
@mcp.tool()
async def fetch_url(
url: str,
max_length: int = 5000,
start_index: int = 0,
raw: bool = False,
ignore_robots_txt: bool = False,
) -> str:
"""
Fetch a URL and return its content (optionally simplified to markdown).
Args:
url: URL to fetch.
max_length: Maximum number of characters to return (1..1_000_000).
start_index: Start returning content from this character offset (>=0).
raw: If true, return raw page content (HTML/text) without simplification.
ignore_robots_txt: If true, skip robots.txt checks for this request.
"""
url = (url or "").strip()
if not url:
return "url is required."
if max_length < 1 or max_length > 1_000_000:
return "max_length must be between 1 and 1_000_000."
if start_index < 0:
return "start_index must be >= 0."
# robots.txt gate
if not (IGNORE_ROBOTS_TXT or ignore_robots_txt):
try:
await _check_may_fetch_url(url, USER_AGENT)
except Exception as e:
return f"Fetch blocked by robots.txt: {e}"
try:
page_raw, content_type = await _http_get_text(url, user_agent=USER_AGENT)
except httpx.HTTPStatusError as e:
return f"Failed to fetch {url} - status code {e.response.status_code}"
except Exception as e:
return f"Failed to fetch {url}: {e!r}"
is_html = ("<html" in page_raw[:200].lower()) or ("text/html" in content_type.lower()) or (not content_type)
if is_html and not raw:
content = _extract_markdown_from_html(page_raw)
prefix = ""
else:
content = page_raw
prefix = (
f"Content type {content_type} cannot be simplified to markdown, but here is the raw content:\n"
if is_html and raw is False
else ""
)
chunk, suffix = _truncate(content, max_length=max_length, start_index=start_index)
return f"{prefix}Contents of {url}:\n{chunk}{suffix}"
# ----------------------------------------------------------------------
# FastAPI + Ray Serve
# ----------------------------------------------------------------------
@asynccontextmanager
async def lifespan(app: FastAPI):
app.mount("/", mcp.streamable_http_app())
async with mcp.session_manager.run():
yield
fastapi_app = FastAPI(lifespan=lifespan)
@serve.deployment(
autoscaling_config={
"min_replicas": 1,
"max_replicas": 20,
"target_ongoing_requests": 5,
},
ray_actor_options={"num_cpus": 0.2},
)
@serve.ingress(fastapi_app)
class WebSearchMCP:
def __init__(self):
pass
app = WebSearchMCP.bind()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/mcps/web_search_mcp_server.py",
"license": "Apache License 2.0",
"lines": 225,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/protocols/a2a_card.py | """
Helpers for creating A2A Agent Cards using the official `a2a-sdk`.
This repo previously used a local "minimal protocol" implementation. We now
use the canonical models from `a2a.types`.
"""
from __future__ import annotations
import os
from typing import Iterable, Sequence
from a2a.types import AgentCapabilities, AgentCard, AgentSkill, TransportProtocol
def build_agent_card(
*,
name: str,
description: str,
version: str = "0.1.0",
skills: Sequence[str] | None = None,
url: str | None = None,
documentation_url: str | None = None,
icon_url: str | None = None,
) -> AgentCard:
"""
Build an `a2a.types.AgentCard` for an agent served over HTTP+JSON (REST).
Notes:
- `AgentCard.url` should be the *base URL* for the agent (no trailing slash),
e.g. "http://127.0.0.1:8000/a2a-weather". The REST endpoints are mounted
under that base (e.g. `/v1/message:send`).
- If `url` is omitted, we fall back to `A2A_AGENT_URL` env var.
"""
base_url = (url or os.getenv("A2A_AGENT_URL", "")).strip().rstrip("/")
if not base_url:
# Keep a deterministic placeholder so the card validates.
# In Ray Serve deployments, prefer setting A2A_AGENT_URL per application.
base_url = "http://127.0.0.1:8000"
tags = [s for s in (skills or []) if isinstance(s, str) and s.strip()]
tags = list(dict.fromkeys([t.strip() for t in tags])) # de-dupe, preserve order
# Keep the card simple: one "primary" skill that advertises tags.
primary_skill = AgentSkill(
id=f"{name}-primary",
name=name,
description=description,
tags=tags or [name],
)
return AgentCard(
name=name,
description=description,
version=version,
url=base_url,
preferred_transport=TransportProtocol.http_json.value,
documentation_url=documentation_url,
icon_url=icon_url,
capabilities=AgentCapabilities(
streaming=True,
push_notifications=False,
state_transition_history=False,
),
default_input_modes=["text/plain"],
default_output_modes=["text/plain"],
skills=[primary_skill],
)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/protocols/a2a_card.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/protocols/a2a_client.py | """
Client helpers using the official `a2a-sdk` REST transport.
This is used by agents that orchestrate other agents (agent-to-agent calls).
"""
from __future__ import annotations
from typing import Any, Iterable, Optional
from uuid import uuid4
import httpx
from a2a.client.helpers import create_text_message_object
from a2a.client.transports.rest import RestTransport
from a2a.types import Message, MessageSendConfiguration, MessageSendParams, Role, Task, TextPart
def _extract_text_parts(parts: Iterable[Any]) -> str:
texts: list[str] = []
for p in parts or []:
# `Part` is a pydantic RootModel; `.root` holds TextPart/FilePart/DataPart.
root = getattr(p, "root", p)
if isinstance(root, TextPart):
t = getattr(root, "text", "")
if isinstance(t, str) and t.strip():
texts.append(t.strip())
return "\n".join(texts).strip()
def _extract_text_from_task(task: Task) -> str:
status = getattr(task, "status", None)
msg = getattr(status, "message", None) if status is not None else None
if isinstance(msg, Message):
txt = _extract_text_parts(getattr(msg, "parts", []))
if txt:
return txt
history = getattr(task, "history", None) or []
for m in reversed(history):
if isinstance(m, Message) and getattr(m, "role", None) == Role.agent:
txt = _extract_text_parts(getattr(m, "parts", []))
if txt:
return txt
return ""
def _extract_text_from_result(result: Task | Message) -> str:
if isinstance(result, Message):
return _extract_text_parts(getattr(result, "parts", []))
if isinstance(result, Task):
return _extract_text_from_task(result)
return ""
async def a2a_execute_text(
base_url: str,
input_text: str,
*,
timeout_s: float = 240.0,
headers: Optional[dict[str, str]] = None,
) -> str:
"""
Send a single text message to an A2A agent over the official REST API.
Args:
base_url: Base URL of the downstream agent (no trailing slash), e.g.
"http://127.0.0.1:8000/a2a-research"
input_text: User request to send
timeout_s: Overall read timeout
headers: Optional extra HTTP headers (auth, etc.)
"""
base_url = (base_url or "").rstrip("/")
if not base_url:
raise ValueError("Missing downstream A2A base URL.")
if not isinstance(input_text, str) or not input_text.strip():
raise ValueError("Missing input text.")
timeout = httpx.Timeout(connect=10.0, read=timeout_s, write=30.0, pool=30.0)
async with httpx.AsyncClient(timeout=timeout, headers=headers) as client:
transport = RestTransport(client, url=base_url)
message = create_text_message_object(role=Role.user, content=input_text)
# Ensure deterministic IDs for easier tracing/debugging.
message.message_id = str(uuid4())
params = MessageSendParams(
message=message,
configuration=MessageSendConfiguration(blocking=True),
metadata=None,
)
result = await transport.send_message(params)
text = _extract_text_from_result(result)
if not text:
raise RuntimeError("Downstream A2A agent returned an empty result.")
return text
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/protocols/a2a_client.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/ray_serve_all_deployments.py | """
Unified Ray Serve deployments for all agents and services.
This file consolidates:
- Standard agent deployments (streaming /chat endpoints)
- A2A-wrapped agents (discovery + execute endpoints)
- Multi-app deployment entrypoint
Services:
- /llm -> OpenAI-compatible LLM API
- /mcp-web-search -> MCP web search server
- /mcp-weather -> MCP weather server
- /weather-agent -> Weather agent (uses weather MCP)
- /research-agent -> Research agent (uses web-search MCP)
- /travel-agent -> Travel agent (calls research + weather agents via A2A)
- /a2a-weather -> A2A-wrapped weather agent
- /a2a-research -> A2A-wrapped research agent
- /a2a-travel -> A2A-wrapped travel agent
Usage:
# Deploy individual agents:
serve run ray_serve_all_deployments:weather_app
serve run ray_serve_all_deployments:research_app
serve run ray_serve_all_deployments:travel_app
# Deploy A2A-wrapped agents:
serve run ray_serve_all_deployments:a2a_weather_app
serve run ray_serve_all_deployments:a2a_research_app
serve run ray_serve_all_deployments:a2a_travel_app
# Deploy all services:
serve run serve_multi_config.yaml
"""
from __future__ import annotations
from protocols.a2a_card import build_agent_card
from agent_runtime.a2a_deployment import create_a2a_deployment
from agent_runtime.serve_deployment import create_agent_deployment
# ============================================================
# LLM and MCP Server Imports (for multi-app deployment)
# ============================================================
from mcps.web_search_mcp_server import app as web_search_mcp_app
from mcps.weather_mcp_server import app as weather_mcp_app
# ============================================================
# Agent Cards (A2A Discovery Metadata)
# ============================================================
WEATHER_CARD = build_agent_card(
name="weather-agent",
description="Weather agent that uses a Weather MCP server to answer weather questions.",
version="0.1.0",
skills=["weather", "forecast", "current_conditions"],
)
RESEARCH_CARD = build_agent_card(
name="research-agent",
description="Research agent that uses a Web Search MCP server to gather sources and summarize.",
version="0.1.0",
skills=["web_search", "research", "summarization", "fact_checking"],
)
TRAVEL_CARD = build_agent_card(
name="travel-agent",
description=(
"Travel planning agent that calls the Research and Weather agents over A2A "
"to produce a structured, weather-aware itinerary."
),
version="0.1.0",
skills=["travel_planning", "itinerary", "weather_aware", "logistics"],
)
# ============================================================
# Agent Builder Functions (Lazy Imports)
# ============================================================
def _build_weather_agent():
"""Lazily import and build the weather agent."""
from agents.weather_agent_with_mcp import build_agent
return build_agent()
def _build_research_agent():
"""Lazily import and build the research agent."""
from agents.research_agent_with_web_search_mcp import build_agent
return build_agent()
def _build_travel_agent():
"""Lazily import and build the travel agent."""
from agents.travel_agent_with_a2a import build_agent
return build_agent()
# ============================================================
# Standard Agent Deployments (streaming /chat endpoints)
# ============================================================
weather_agent_app = create_agent_deployment(
_build_weather_agent,
name="WeatherAgentDeployment",
title="Weather Agent",
description="Weather agent that uses a Weather MCP server to answer weather questions.",
)
research_agent_app = create_agent_deployment(
_build_research_agent,
name="ResearchAgentDeployment",
title="Research Agent",
description="Research agent that uses a Web Search MCP server to gather sources and summarize.",
)
travel_agent_app = create_agent_deployment(
_build_travel_agent,
name="TravelAgentDeployment",
title="Travel Agent",
description="Travel planning agent that orchestrates Research and Weather agents via A2A.",
)
# ============================================================
# A2A-Wrapped Agent Deployments (discovery + execute endpoints)
# ============================================================
a2a_weather_app = create_a2a_deployment(_build_weather_agent, WEATHER_CARD)
a2a_research_app = create_a2a_deployment(_build_research_agent, RESEARCH_CARD)
a2a_travel_app = create_a2a_deployment(_build_travel_agent, TRAVEL_CARD)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/ray_serve_all_deployments.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/tests/helpers.py | """
Helper functions for tests.
"""
from __future__ import annotations
import json
import os
from typing import Any
from urllib.parse import urljoin
import httpx
from a2a.client.helpers import create_text_message_object
from a2a.client.middleware import ClientCallContext
from a2a.client.transports.rest import RestTransport
from a2a.types import MessageSendConfiguration, MessageSendParams, Role
from mcp import ClientSession
from mcp.client.streamable_http import streamablehttp_client
# Whether to print request/response details
SHOW_IO = os.getenv("TEST_SHOW_IO", "1").strip().lower() in {"1", "true", "yes"}
# Max characters to print for IO
MAX_IO_CHARS = int(os.getenv("TEST_MAX_IO_CHARS", "2000"))
# Anyscale API token for authenticated requests
ANYSCALE_API_TOKEN = os.getenv("ANYSCALE_API_TOKEN", "").strip()
def get_auth_headers() -> dict[str, str]:
"""Get authorization headers if ANYSCALE_API_TOKEN is set."""
if ANYSCALE_API_TOKEN:
return {"Authorization": f"Bearer {ANYSCALE_API_TOKEN}"}
return {}
def create_http_client() -> httpx.AsyncClient:
"""Create an httpx AsyncClient with auth headers if available."""
headers = get_auth_headers()
return httpx.AsyncClient(headers=headers)
def make_url(base: str, path: str) -> str:
"""Join base URL with path."""
return urljoin(base.rstrip("/") + "/", path.lstrip("/"))
def truncate(text: str, max_chars: int = MAX_IO_CHARS) -> str:
"""Truncate text to max_chars."""
if len(text) <= max_chars:
return text
return text[:max_chars - 20] + f"... ({len(text)} chars total)"
def format_json(obj: Any, max_chars: int = MAX_IO_CHARS) -> str:
"""Format object as JSON string, truncated."""
if obj is None:
return ""
try:
text = json.dumps(obj, indent=2, ensure_ascii=False)
except Exception:
text = str(obj)
return truncate(text, max_chars)
def print_io(url: str = None, request: Any = None, response: Any = None) -> None:
"""Print request/response details if SHOW_IO is enabled."""
if not SHOW_IO:
return
print(" IO:")
if url:
print(f" URL: {url}")
if request is not None:
print(f" Request: {format_json(request)}")
if response is not None:
print(f" Response: {format_json(response)}")
def print_result(passed: bool, name: str, detail: str = "", duration_ms: float = 0) -> bool:
"""Print test result and return pass/fail status."""
status = "PASS" if passed else "FAIL"
symbol = "✓" if passed else "✗"
time_str = f" ({duration_ms:.0f}ms)" if duration_ms > 0 else ""
print(f" [{status}] {symbol} {name}{time_str}")
if detail:
print(f" {detail[:150]}")
return passed
async def read_sse_stream(
client: httpx.AsyncClient,
url: str,
payload: dict,
timeout_s: float,
) -> tuple[bool, str, list[str]]:
"""
Read SSE stream from agent /chat endpoints.
Returns:
(success, last_data_or_error, list_of_data_frames)
"""
data_count = 0
last_data = ""
error_event = False
error_detail = ""
frames: list[str] = []
timeout = httpx.Timeout(connect=10.0, read=timeout_s, write=30.0, pool=30.0)
async with client.stream("POST", url, json=payload, timeout=timeout) as response:
response.raise_for_status()
async for line in response.aiter_lines():
if not line:
continue
# Check for error event
if line.startswith("event:"):
event_type = line.split(":", 1)[1].strip()
if event_type == "error":
error_event = True
continue
# Parse data frames
if line.startswith("data:"):
data = line.split(":", 1)[1].strip()
if data:
data_count += 1
last_data = data
if len(frames) < 3:
frames.append(data)
if error_event:
error_detail = data
break
# Stop after a few frames (smoke test)
if data_count >= 3:
break
if error_event:
return False, error_detail or "SSE error event received", frames
if data_count == 0:
return False, "No SSE data frames received", frames
return True, last_data, frames
async def mcp_list_tools(mcp_url: str, timeout_s: float) -> list[str]:
"""
Connect to MCP server and list available tools.
Returns:
List of tool names
"""
headers = get_auth_headers() or None
async with streamablehttp_client(
mcp_url.rstrip("/"),
headers=headers,
timeout=timeout_s,
sse_read_timeout=timeout_s * 2,
) as (read_stream, write_stream, _):
async with ClientSession(read_stream, write_stream) as session:
await session.initialize()
tools_result = await session.list_tools()
names = []
for tool in getattr(tools_result, "tools", []) or []:
name = getattr(tool, "name", None)
if isinstance(name, str) and name:
names.append(name)
return names
def _model_dump_any(obj: Any) -> Any:
"""Best-effort conversion of pydantic/SDK objects to plain Python structures."""
if obj is None:
return None
dump = getattr(obj, "model_dump", None)
if callable(dump):
try:
return dump()
except Exception:
pass
to_dict = getattr(obj, "dict", None)
if callable(to_dict):
try:
return to_dict()
except Exception:
pass
return obj
def _extract_text_from_mcp_result(result_dump: Any) -> str:
"""
Extract human-readable text from a MCP CallToolResult-like dump.
Works with common MCP SDK shapes: {"content": [{"type":"text","text":"..."}], ...}
"""
if not isinstance(result_dump, dict):
return ""
content = result_dump.get("content")
if not isinstance(content, list):
return ""
parts: list[str] = []
for item in content:
if isinstance(item, dict):
text = item.get("text")
if isinstance(text, str) and text.strip():
parts.append(text.strip())
return "\n".join(parts).strip()
async def mcp_call_tool(mcp_url: str, tool_name: str, arguments: dict, timeout_s: float) -> tuple[dict, str]:
"""
Connect to MCP server and call a tool.
Returns:
(result_dump, extracted_text)
"""
headers = get_auth_headers() or None
async with streamablehttp_client(
mcp_url.rstrip("/"),
headers=headers,
timeout=timeout_s,
sse_read_timeout=timeout_s * 2,
) as (read_stream, write_stream, _):
async with ClientSession(read_stream, write_stream) as session:
await session.initialize()
result = await session.call_tool(tool_name, arguments)
result_dump = _model_dump_any(result)
if not isinstance(result_dump, dict):
result_dump = {"result": result_dump}
return result_dump, _extract_text_from_mcp_result(result_dump)
async def a2a_execute(
client: httpx.AsyncClient,
base_url: str,
input_text: str,
timeout_s: float,
) -> tuple[dict, dict]:
"""
Execute an A2A call via the official REST API (`a2a-sdk`).
Returns:
(request_payload, response_data)
"""
base_url = (base_url or "").rstrip("/")
if not base_url:
raise ValueError("Missing A2A base URL")
timeout = httpx.Timeout(connect=10.0, read=timeout_s, write=30.0, pool=30.0)
ctx = ClientCallContext(state={"http_kwargs": {"timeout": timeout}})
message = create_text_message_object(role=Role.user, content=input_text)
params = MessageSendParams(
message=message,
configuration=MessageSendConfiguration(blocking=True),
metadata=None,
)
transport = RestTransport(client, url=base_url)
result = await transport.send_message(params, context=ctx)
request_payload = _model_dump_any(params)
response_data = _model_dump_any(result)
if not isinstance(request_payload, dict):
request_payload = {"request": request_payload}
if not isinstance(response_data, dict):
response_data = {"response": response_data}
return request_payload, response_data
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/tests/helpers.py",
"license": "Apache License 2.0",
"lines": 221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/tests/run_all.py | """
Run all tests for the multi-agent stack.
Usage:
python -m tests.run_all
# Or directly:
python tests/run_all.py
Environment variables:
BASE_URL - Base URL for Ray Serve (default: http://127.0.0.1:8000)
LLM_MODEL_ID - Model ID for LLM tests (default: Qwen/Qwen3-4B-Instruct-2507-FP8)
TEST_TIMEOUT_SECONDS - Base timeout in seconds (default: 60)
TEST_SHOW_IO - Show request/response details (default: 1)
TEST_MAX_IO_CHARS - Max chars to print (default: 2000)
"""
from __future__ import annotations
import asyncio
import os
import sys
import time
from pathlib import Path
import httpx
# Allow running this file directly via: `python tests/run_all.py`
# (ensures repo root is on sys.path so `tests.*` imports work)
_REPO_ROOT = Path(__file__).resolve().parents[1]
if str(_REPO_ROOT) not in sys.path:
sys.path.insert(0, str(_REPO_ROOT))
from tests import test_a2a, test_agents_sse, test_llm, test_mcp
from tests.helpers import create_http_client
# Base URL for the Ray Serve HTTP proxy
BASE_URL = os.getenv("BASE_URL", "http://127.0.0.1:8000").rstrip("/")
# Whether to print request/response details
SHOW_IO = os.getenv("TEST_SHOW_IO", "1").strip().lower() in {"1", "true", "yes"}
# Max characters to print for IO
MAX_IO_CHARS = int(os.getenv("TEST_MAX_IO_CHARS", "2000"))
def print_banner():
"""Print test run banner."""
print()
print("=" * 60)
print("MULTI-AGENT STACK SMOKE TEST")
print("=" * 60)
print(f" Base URL: {BASE_URL}")
print(f" Show IO: {SHOW_IO} (max_chars={MAX_IO_CHARS})")
print()
def print_summary(results: list[bool], total_time: float):
"""Print test summary."""
passed = sum(1 for r in results if r)
failed = sum(1 for r in results if not r)
total = len(results)
print()
print("=" * 60)
print("TEST SUMMARY")
print("=" * 60)
print()
if failed > 0:
print(f" FAILED")
print()
print(f" Passed: {passed}")
print(f" Failed: {failed}")
print(f" Total: {total}")
print(f" Time: {total_time:.2f}s")
print()
return 1
else:
print(f" ALL TESTS PASSED")
print()
print(f" Passed: {passed}")
print(f" Total: {total}")
print(f" Time: {total_time:.2f}s")
print()
return 0
async def run_tests() -> int:
"""Run all tests and return exit code."""
all_results: list[bool] = []
start_time = time.perf_counter()
print_banner()
async with create_http_client() as client:
# LLM tests
all_results.extend(await test_llm.run_all(client))
# MCP tests
all_results.extend(await test_mcp.run_all())
# SSE agent tests
all_results.extend(await test_agents_sse.run_all(client))
# A2A agent tests
all_results.extend(await test_a2a.run_all(client))
total_time = time.perf_counter() - start_time
return print_summary(all_results, total_time)
def main() -> int:
"""Main entry point."""
try:
return asyncio.run(run_tests())
except KeyboardInterrupt:
print("\nInterrupted.")
return 130
except Exception as e:
print(f"Error: {type(e).__name__}: {e}", file=sys.stderr)
return 2
if __name__ == "__main__":
sys.exit(main())
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/tests/run_all.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/tests/test_a2a.py | """
Tests for A2A (Agent-to-Agent) protocol endpoints.
"""
from __future__ import annotations
import asyncio
import json
import os
import sys
import time
from pathlib import Path
import httpx
# Allow running this file directly via: `python tests/test_a2a.py`
# (ensures repo root is on sys.path so `tests.*` imports work)
_REPO_ROOT = Path(__file__).resolve().parents[1]
if str(_REPO_ROOT) not in sys.path:
sys.path.insert(0, str(_REPO_ROOT))
from tests.helpers import a2a_execute, create_http_client, make_url, print_io, print_result
# Base URL for the Ray Serve HTTP proxy
BASE_URL = os.getenv("BASE_URL", "http://127.0.0.1:8000").rstrip("/")
# Timeout settings (seconds)
TIMEOUT = float(os.getenv("TEST_TIMEOUT_SECONDS", "60"))
async def _test_a2a_health(client: httpx.AsyncClient, base_url: str, name: str) -> bool:
"""Test A2A agent health endpoint."""
url = make_url(base_url, "/health")
start = time.perf_counter()
try:
response = await client.get(url, timeout=httpx.Timeout(TIMEOUT))
response.raise_for_status()
data = response.json()
duration_ms = (time.perf_counter() - start) * 1000
is_ready = isinstance(data, dict) and data.get("ready") is True
print_io(url=url, response=data)
return print_result(is_ready, f"{name} /health", "ready=true" if is_ready else "ready=false", duration_ms)
except Exception as e:
duration_ms = (time.perf_counter() - start) * 1000
return print_result(False, f"{name} /health", f"{type(e).__name__}: {e}", duration_ms)
async def _test_a2a_agent_card(client: httpx.AsyncClient, base_url: str, name: str) -> bool:
"""Test A2A agent discovery endpoint."""
url = make_url(base_url, "/.well-known/agent-card.json")
start = time.perf_counter()
try:
response = await client.get(url, timeout=httpx.Timeout(TIMEOUT))
response.raise_for_status()
data = response.json()
duration_ms = (time.perf_counter() - start) * 1000
is_valid = isinstance(data, dict) and bool(data.get("name"))
agent_name = data.get("name", "N/A") if isinstance(data, dict) else "N/A"
print_io(url=url, response=data)
return print_result(is_valid, f"{name} agent-card", f"Agent: {agent_name}", duration_ms)
except Exception as e:
duration_ms = (time.perf_counter() - start) * 1000
return print_result(False, f"{name} agent-card", f"{type(e).__name__}: {e}", duration_ms)
async def _test_a2a_execute(
client: httpx.AsyncClient,
base_url: str,
name: str,
prompt: str,
timeout_multiplier: float = 2.0,
) -> bool:
"""Test A2A agent execute endpoint."""
start = time.perf_counter()
try:
request, data = await a2a_execute(client, base_url, prompt, TIMEOUT * timeout_multiplier)
duration_ms = (time.perf_counter() - start) * 1000
kind = data.get("kind")
if kind == "task":
status = (data.get("status") or {}).get("state")
is_valid = status == "completed"
detail = f"task.state={status}" if is_valid else json.dumps(data)[:150]
elif kind == "message":
# Minimal sanity check: must contain at least one text part.
parts = data.get("parts") if isinstance(data, dict) else None
is_valid = isinstance(parts, list) and len(parts) > 0
detail = "message" if is_valid else json.dumps(data)[:150]
else:
is_valid = False
detail = json.dumps(data)[:150]
print_io(url=make_url(base_url, "/v1/message:send"), request=request, response=data)
return print_result(is_valid, f"{name} /v1/message:send", detail, duration_ms)
except Exception as e:
duration_ms = (time.perf_counter() - start) * 1000
return print_result(False, f"{name} /v1/message:send", f"{type(e).__name__}: {e}", duration_ms)
async def test_weather_agent(client: httpx.AsyncClient) -> list[bool]:
"""Test A2A Weather Agent."""
print("\n --- A2A Weather Agent ---")
base_url = make_url(BASE_URL, "/a2a-weather")
results = []
results.append(await _test_a2a_health(client, base_url, "weather"))
results.append(await _test_a2a_agent_card(client, base_url, "weather"))
results.append(await _test_a2a_execute(
client, base_url, "weather",
"What's the weather like in Palo Alto today?",
timeout_multiplier=2.0,
))
return results
async def test_research_agent(client: httpx.AsyncClient) -> list[bool]:
"""Test A2A Research Agent."""
print("\n --- A2A Research Agent ---")
base_url = make_url(BASE_URL, "/a2a-research")
results = []
results.append(await _test_a2a_health(client, base_url, "research"))
results.append(await _test_a2a_agent_card(client, base_url, "research"))
results.append(await _test_a2a_execute(
client, base_url, "research",
"What is Anyscale Jobs? Keep it concise.",
timeout_multiplier=3.0,
))
return results
async def test_travel_agent(client: httpx.AsyncClient) -> list[bool]:
"""Test A2A Travel Agent."""
print("\n --- A2A Travel Agent ---")
base_url = make_url(BASE_URL, "/a2a-travel")
results = []
results.append(await _test_a2a_health(client, base_url, "travel"))
results.append(await _test_a2a_agent_card(client, base_url, "travel"))
results.append(await _test_a2a_execute(
client, base_url, "travel",
(
"Create a 2-day Seattle itinerary + packing list for 2 adults in the next week. "
"Provide: timed itinerary, backup rainy plan, 3 restaurant picks/day, and a packing list tailored to June."
),
timeout_multiplier=6.0, # Travel agent calls downstream A2A agents
))
return results
async def run_all(client: httpx.AsyncClient) -> list[bool]:
"""Run all A2A tests."""
print("\n" + "=" * 60)
print("A2A AGENT TESTS")
print("=" * 60)
results = []
results.extend(await test_weather_agent(client))
results.extend(await test_research_agent(client))
results.extend(await test_travel_agent(client))
return results
def _print_summary(results: list[bool]) -> int:
passed = sum(1 for r in results if r)
failed = sum(1 for r in results if not r)
total = len(results)
print("\n" + "=" * 60)
print("A2A TEST SUMMARY")
print("=" * 60)
print(f" Passed: {passed}")
print(f" Failed: {failed}")
print(f" Total: {total}")
return 1 if failed else 0
async def _run_as_script() -> int:
async with create_http_client() as client:
results = await run_all(client)
return _print_summary(results)
if __name__ == "__main__":
asyncio.run(_run_as_script())
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/tests/test_a2a.py",
"license": "Apache License 2.0",
"lines": 151,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/tests/test_agents_sse.py | """
Tests for SSE (Server-Sent Events) agent chat endpoints.
"""
from __future__ import annotations
import asyncio
import os
import sys
import time
from pathlib import Path
import httpx
# Allow running this file directly via: `python tests/test_agents_sse.py`
# (ensures repo root is on sys.path so `tests.*` imports work)
_REPO_ROOT = Path(__file__).resolve().parents[1]
if str(_REPO_ROOT) not in sys.path:
sys.path.insert(0, str(_REPO_ROOT))
from tests.helpers import create_http_client, make_url, print_io, print_result, read_sse_stream
# Base URL for the Ray Serve HTTP proxy
BASE_URL = os.getenv("BASE_URL", "http://127.0.0.1:8000").rstrip("/")
# Timeout settings (seconds)
TIMEOUT = float(os.getenv("TEST_TIMEOUT_SECONDS", "60"))
async def test_weather_agent(client: httpx.AsyncClient) -> bool:
"""Test /weather-agent/chat - Weather agent SSE streaming."""
print("\n Test: Weather Agent SSE")
url = make_url(BASE_URL, "/weather-agent/chat")
payload = {"user_request": "What is the current weather forecast for New York City?"}
start = time.perf_counter()
try:
success, detail, frames = await read_sse_stream(client, url, payload, TIMEOUT * 2)
duration_ms = (time.perf_counter() - start) * 1000
result_detail = f"Received {len(frames)} SSE frames" if success else detail[:200]
print_io(url=url, request=payload, response={"frames": frames})
return print_result(success, "/weather-agent/chat", result_detail, duration_ms)
except Exception as e:
duration_ms = (time.perf_counter() - start) * 1000
return print_result(False, "/weather-agent/chat", f"{type(e).__name__}: {e}", duration_ms)
async def test_research_agent(client: httpx.AsyncClient) -> bool:
"""Test /research-agent/chat - Research agent SSE streaming."""
print("\n Test: Research Agent SSE")
url = make_url(BASE_URL, "/research-agent/chat")
payload = {"user_request": "What are the top 3 benefits of solar energy? Keep it brief."}
start = time.perf_counter()
try:
success, detail, frames = await read_sse_stream(client, url, payload, TIMEOUT * 2)
duration_ms = (time.perf_counter() - start) * 1000
result_detail = f"Received {len(frames)} SSE frames" if success else detail[:200]
print_io(url=url, request=payload, response={"frames": frames})
return print_result(success, "/research-agent/chat", result_detail, duration_ms)
except Exception as e:
duration_ms = (time.perf_counter() - start) * 1000
return print_result(False, "/research-agent/chat", f"{type(e).__name__}: {e}", duration_ms)
async def test_travel_agent(client: httpx.AsyncClient) -> bool:
"""Test /travel-agent/chat - Travel agent SSE streaming (calls A2A agents)."""
print("\n Test: Travel Agent SSE")
url = make_url(BASE_URL, "/travel-agent/chat")
payload = {
"user_request": (
"Create a 1-day San Francisco itinerary for 2 adults. "
"Defaults: month = January; budget = mid-range; interests = Golden Gate, Fisherman's Wharf, food. "
"Include weather-aware suggestions. Do not ask questions; use defaults."
)
}
start = time.perf_counter()
try:
success, detail, frames = await read_sse_stream(client, url, payload, TIMEOUT * 2)
duration_ms = (time.perf_counter() - start) * 1000
result_detail = f"Received {len(frames)} SSE frames" if success else detail[:200]
print_io(url=url, request=payload, response={"frames": frames})
return print_result(success, "/travel-agent/chat", result_detail, duration_ms)
except Exception as e:
duration_ms = (time.perf_counter() - start) * 1000
return print_result(False, "/travel-agent/chat", f"{type(e).__name__}: {e}", duration_ms)
async def run_all(client: httpx.AsyncClient) -> list[bool]:
"""Run all SSE agent tests."""
print("\n" + "=" * 60)
print("SSE AGENT TESTS")
print("=" * 60)
results = []
results.append(await test_weather_agent(client))
results.append(await test_research_agent(client))
results.append(await test_travel_agent(client))
return results
def _print_summary(results: list[bool]) -> int:
passed = sum(1 for r in results if r)
failed = sum(1 for r in results if not r)
total = len(results)
print("\n" + "=" * 60)
print("SSE TEST SUMMARY")
print("=" * 60)
print(f" Passed: {passed}")
print(f" Failed: {failed}")
print(f" Total: {total}")
return 1 if failed else 0
async def _run_as_script() -> int:
async with create_http_client() as client:
results = await run_all(client)
return _print_summary(results)
if __name__ == "__main__":
asyncio.run(_run_as_script())
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/tests/test_agents_sse.py",
"license": "Apache License 2.0",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/tests/test_llm.py | """
Tests for the LLM service (OpenAI-compatible API).
"""
from __future__ import annotations
import os
import sys
import time
from pathlib import Path
import asyncio
import httpx
# Allow running this file directly via: `python tests/test_llm.py`
# (ensures repo root is on sys.path so `tests.*` imports work)
_REPO_ROOT = Path(__file__).resolve().parents[1]
if str(_REPO_ROOT) not in sys.path:
sys.path.insert(0, str(_REPO_ROOT))
from tests.helpers import create_http_client, make_url, print_io, print_result
# Base URL for the Ray Serve HTTP proxy
BASE_URL = os.getenv("BASE_URL", "http://127.0.0.1:8000").rstrip("/")
# Default model ID for LLM tests
MODEL_ID = os.getenv("LLM_MODEL_ID", "Qwen/Qwen3-4B-Instruct-2507-FP8").strip()
# Timeout settings (seconds)
TIMEOUT = float(os.getenv("TEST_TIMEOUT_SECONDS", "60"))
async def test_list_models(client: httpx.AsyncClient) -> bool:
"""Test GET /llm/v1/models - list available models."""
print("\n Test: List available models")
url = make_url(BASE_URL, "/llm/v1/models")
start = time.perf_counter()
try:
response = await client.get(url, timeout=httpx.Timeout(TIMEOUT))
response.raise_for_status()
data = response.json()
duration_ms = (time.perf_counter() - start) * 1000
is_valid = isinstance(data, dict) and isinstance(data.get("data"), list)
model_count = len(data.get("data", [])) if is_valid else 0
print_io(url=url, response=data)
return print_result(is_valid, "/llm/v1/models", f"Found {model_count} model(s)", duration_ms)
except Exception as e:
duration_ms = (time.perf_counter() - start) * 1000
return print_result(False, "/llm/v1/models", f"{type(e).__name__}: {e}", duration_ms)
async def test_chat_completion(client: httpx.AsyncClient) -> bool:
"""Test POST /llm/v1/chat/completions - chat inference."""
print("\n Test: Chat completion")
url = make_url(BASE_URL, "/llm/v1/chat/completions")
payload = {
"model": MODEL_ID,
"messages": [{"role": "user", "content": "Tell me a joke about AI"}],
"max_tokens": 1000,
"temperature": 0.0,
}
start = time.perf_counter()
try:
timeout = httpx.Timeout(connect=10.0, read=TIMEOUT * 3, write=30.0, pool=30.0)
response = await client.post(url, json=payload, timeout=timeout)
response.raise_for_status()
data = response.json()
duration_ms = (time.perf_counter() - start) * 1000
is_valid = (
isinstance(data, dict)
and isinstance(data.get("choices"), list)
and len(data["choices"]) > 0
)
print_io(url=url, request=payload, response=data)
return print_result(is_valid, "/llm/v1/chat/completions", f"Model: {MODEL_ID}", duration_ms)
except Exception as e:
duration_ms = (time.perf_counter() - start) * 1000
return print_result(False, "/llm/v1/chat/completions", f"{type(e).__name__}: {e}", duration_ms)
async def run_all(client: httpx.AsyncClient) -> list[bool]:
"""Run all LLM tests."""
print("\n" + "=" * 60)
print("LLM SERVICE TESTS")
print("=" * 60)
results = []
results.append(await test_list_models(client))
results.append(await test_chat_completion(client))
return results
def _print_summary(results: list[bool]) -> int:
passed = sum(1 for r in results if r)
failed = sum(1 for r in results if not r)
total = len(results)
print("\n" + "=" * 60)
print("LLM TEST SUMMARY")
print("=" * 60)
print(f" Passed: {passed}")
print(f" Failed: {failed}")
print(f" Total: {total}")
return 1 if failed else 0
async def _run_as_script() -> int:
async with create_http_client() as client:
results = await run_all(client)
return _print_summary(results)
if __name__ == "__main__":
asyncio.run(_run_as_script())
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/tests/test_llm.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/tests/test_mcp.py | """
Tests for MCP (Model Context Protocol) servers.
"""
from __future__ import annotations
import asyncio
import json
import os
import sys
import time
from pathlib import Path
# Allow running this file directly via: `python tests/test_mcp.py`
# (ensures repo root is on sys.path so `tests.*` imports work)
_REPO_ROOT = Path(__file__).resolve().parents[1]
if str(_REPO_ROOT) not in sys.path:
sys.path.insert(0, str(_REPO_ROOT))
from tests.helpers import make_url, mcp_call_tool, mcp_list_tools, print_io, print_result
# Base URL for the Ray Serve HTTP proxy
BASE_URL = os.getenv("BASE_URL", "http://127.0.0.1:8000").rstrip("/")
# Timeout settings (seconds)
TIMEOUT = float(os.getenv("TEST_TIMEOUT_SECONDS", "60"))
async def test_web_search_mcp() -> bool:
"""Test MCP web search server - list tools and execute a real tool call."""
print("\n Test: Web Search MCP")
mcp_url = make_url(BASE_URL, "/mcp-web-search/mcp")
start = time.perf_counter()
try:
tools = await mcp_list_tools(mcp_url, TIMEOUT)
# 1) Web search tool call
search_tool = "brave_search" if "brave_search" in tools else ""
search_args = {"query": "Ray Serve HTTP proxy", "num_results": 3}
search_dump, search_text = await mcp_call_tool(mcp_url, search_tool, search_args, TIMEOUT)
search_ok = False
search_not_configured = False
try:
parsed = json.loads(search_text) if isinstance(search_text, str) else None
search_ok = isinstance(parsed, list) and len(parsed) > 0
except Exception:
# Check if search is simply not configured (not an error)
search_not_configured = "not configured" in (search_text or "").lower()
# 2) Web fetch tool call (use a stable, robots-friendly URL)
fetch_tool = "fetch_url" if "fetch_url" in tools else ""
fetch_args = {
"url": "https://example.com",
"max_length": 2000,
"start_index": 0,
"raw": False,
"ignore_robots_txt": False,
}
fetch_dump, fetch_text = await mcp_call_tool(mcp_url, fetch_tool, fetch_args, TIMEOUT)
fetch_ok = isinstance(fetch_text, str) and ("Contents of https://example.com" in fetch_text)
duration_ms = (time.perf_counter() - start) * 1000
# Pass if fetch works; search is optional (requires BRAVE_API_KEY)
is_valid = fetch_ok
search_status = "ok" if search_ok else ("not configured" if search_not_configured else "fail")
detail = f"fetch={fetch_tool}:{'ok' if fetch_ok else 'fail'} search={search_tool}:{search_status}"
print_io(
url=mcp_url,
request={"search": {"tool": search_tool, "arguments": search_args}, "fetch": {"tool": fetch_tool, "arguments": fetch_args}},
response={"search": search_dump, "fetch": fetch_dump},
)
return print_result(is_valid, "MCP web-search tool calls", detail, duration_ms)
except Exception as e:
duration_ms = (time.perf_counter() - start) * 1000
return print_result(False, "MCP web-search tool calls", f"{type(e).__name__}: {e}", duration_ms)
async def test_weather_mcp() -> bool:
"""Test MCP weather server - list tools and execute a real tool call."""
print("\n Test: Weather MCP")
mcp_url = make_url(BASE_URL, "/mcp-weather/mcp")
start = time.perf_counter()
try:
tools = await mcp_list_tools(mcp_url, TIMEOUT)
# Prefer forecast (lat/lon) as a deterministic "real" tool call.
tool_to_call = "get_forecast" if "get_forecast" in tools else (tools[0] if tools else "")
# NYC coordinates (stable and commonly supported by weather tools)
arguments = {"latitude": 40.7128, "longitude": -74.0060}
result_dump, extracted_text = await mcp_call_tool(mcp_url, tool_to_call, arguments, TIMEOUT)
duration_ms = (time.perf_counter() - start) * 1000
is_valid = bool(extracted_text) or bool(result_dump)
detail = f"Called {tool_to_call} (args={list(arguments.keys())})"
print_io(url=mcp_url, request={"tool": tool_to_call, "arguments": arguments}, response=result_dump)
return print_result(is_valid, "MCP weather tool call", detail, duration_ms)
except Exception as e:
duration_ms = (time.perf_counter() - start) * 1000
return print_result(False, "MCP weather tool call", f"{type(e).__name__}: {e}", duration_ms)
async def run_all() -> list[bool]:
"""Run all MCP tests."""
print("\n" + "=" * 60)
print("MCP SERVER TESTS")
print("=" * 60)
results = []
results.append(await test_web_search_mcp())
results.append(await test_weather_mcp())
return results
def _print_summary(results: list[bool]) -> int:
passed = sum(1 for r in results if r)
failed = sum(1 for r in results if not r)
total = len(results)
print("\n" + "=" * 60)
print("MCP TEST SUMMARY")
print("=" * 60)
print(f" Passed: {passed}")
print(f" Failed: {failed}")
print(f" Total: {total}")
return 1 if failed else 0
async def _run_as_script() -> int:
results = await run_all()
return _print_summary(results)
if __name__ == "__main__":
asyncio.run(_run_as_script())
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/tests/test_mcp.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/llm_tests/serve/test_llm_serve_sglang.py | import sys
import pytest
from openai import OpenAI
from ray import serve
from ray._common.test_utils import wait_for_condition
from ray.llm.examples.sglang.modules.sglang_engine import SGLangServer
from ray.serve._private.constants import SERVE_DEFAULT_APP_NAME
from ray.serve.llm import LLMConfig, build_openai_app
from ray.serve.schema import ApplicationStatus
MODEL_ID = "Qwen/Qwen2.5-0.5B-Instruct"
RAY_MODEL_ID = "qwen-0.5b-sglang"
def _app_is_running():
try:
return (
serve.status().applications[SERVE_DEFAULT_APP_NAME].status
== ApplicationStatus.RUNNING
)
except (KeyError, AttributeError):
return False
@pytest.fixture(scope="module")
def sglang_client():
"""Start an SGLang server once for all tests in this module."""
llm_config = LLMConfig(
model_loading_config={
"model_id": RAY_MODEL_ID,
"model_source": MODEL_ID,
},
deployment_config={
"autoscaling_config": {
"min_replicas": 1,
"max_replicas": 1,
}
},
server_cls=SGLangServer,
engine_kwargs={
"model_path": MODEL_ID,
"tp_size": 1,
"mem_fraction_static": 0.8,
},
)
app = build_openai_app({"llm_configs": [llm_config]})
serve.run(app, blocking=False)
wait_for_condition(_app_is_running, timeout=300)
client = OpenAI(base_url="http://localhost:8000/v1", api_key="fake-key")
yield client
serve.shutdown()
def test_sglang_serve_e2e(sglang_client):
"""Verify chat and completions endpoints work end-to-end."""
chat_resp = sglang_client.chat.completions.create(
model=RAY_MODEL_ID,
messages=[{"role": "user", "content": "What is the capital of France?"}],
max_tokens=64,
temperature=0.0,
)
assert chat_resp.choices[0].message.content.strip()
comp_resp = sglang_client.completions.create(
model=RAY_MODEL_ID,
prompt="The capital of France is",
max_tokens=64,
temperature=0.0,
)
assert comp_resp.choices[0].text.strip()
@pytest.fixture(scope="module")
def sglang_embedding_client():
"""Start an SGLang server with is_embedding enabled for embedding tests."""
llm_config = LLMConfig(
model_loading_config={
"model_id": RAY_MODEL_ID,
"model_source": MODEL_ID,
},
deployment_config={
"autoscaling_config": {
"min_replicas": 1,
"max_replicas": 1,
}
},
server_cls=SGLangServer,
engine_kwargs={
"model_path": MODEL_ID,
"tp_size": 1,
"mem_fraction_static": 0.8,
"is_embedding": True,
},
)
app = build_openai_app({"llm_configs": [llm_config]})
serve.run(app, blocking=False)
wait_for_condition(_app_is_running, timeout=300)
client = OpenAI(base_url="http://localhost:8000/v1", api_key="fake-key")
yield client
serve.shutdown()
def test_sglang_embeddings(sglang_embedding_client):
"""Verify embeddings endpoint works with single and batch inputs."""
# Single input
emb_resp = sglang_embedding_client.embeddings.create(
model=RAY_MODEL_ID,
input="Hello world",
)
assert emb_resp.data
assert len(emb_resp.data) == 1
assert emb_resp.data[0].embedding
assert len(emb_resp.data[0].embedding) > 0
assert emb_resp.usage.prompt_tokens > 0
# Batch input
emb_batch_resp = sglang_embedding_client.embeddings.create(
model=RAY_MODEL_ID,
input=["Hello world", "How are you"],
)
assert len(emb_batch_resp.data) == 2
assert emb_batch_resp.data[0].embedding
assert emb_batch_resp.data[1].embedding
if __name__ == "__main__":
sys.exit(pytest.main(["-xvs", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "release/llm_tests/serve/test_llm_serve_sglang.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:rllib/core/models/tests/test_encoder_layernorm.py | """Tests for LayerNorm in encoder models with different model_config variations.
Verifies that fcnet_use_layernorm and output_layer_use_layernorm are correctly
applied to encoder networks, including LayerNorm after the last (output) layer.
"""
import dataclasses
import gymnasium as gym
import numpy as np
import pytest
from ray.rllib.algorithms.ppo.torch.default_ppo_torch_rl_module import (
DefaultPPOTorchRLModule,
)
from ray.rllib.core.models.base import ENCODER_OUT
from ray.rllib.core.models.catalog import Catalog
from ray.rllib.core.models.configs import MLPEncoderConfig
from ray.rllib.core.rl_module.default_model_config import DefaultModelConfig
from ray.rllib.core.rl_module.rl_module import RLModuleSpec
from ray.rllib.utils.framework import try_import_torch
torch, nn = try_import_torch()
def count_layernorm_modules(module):
"""Count LayerNorm layers in a module and its submodules."""
return sum(1 for m in module.modules() if isinstance(m, nn.LayerNorm))
def get_layernorm_positions(sequential_net):
"""Get indices and normalized shapes of LayerNorm layers in a Sequential net."""
result = []
for i, layer in enumerate(sequential_net):
if isinstance(layer, nn.LayerNorm):
result.append((i, tuple(layer.normalized_shape)))
return result
def get_full_model_config(overrides):
"""Merge overrides with DefaultModelConfig for Catalog compatibility."""
return dataclasses.asdict(DefaultModelConfig()) | overrides
# ---------------------------------------------------------------------------
# MLPEncoderConfig parametrized tests
# ---------------------------------------------------------------------------
MLP_ENCODER_CONFIGS = [
pytest.param(
{
"hidden_layer_use_layernorm": True,
"output_layer_use_layernorm": False,
"hidden_layer_dims": [16, 16],
"output_layer_dim": 16,
"expected_count": 2,
"expected_positions": [(16,), (16,)],
},
id="hidden_only_layernorm",
),
pytest.param(
{
"hidden_layer_use_layernorm": False,
"output_layer_use_layernorm": True,
"hidden_layer_dims": [16],
"output_layer_dim": 16,
"expected_count": 1,
"expected_positions": [(16,)],
},
id="output_only_layernorm",
),
pytest.param(
{
"hidden_layer_use_layernorm": True,
"output_layer_use_layernorm": True,
"hidden_layer_dims": [16],
"output_layer_dim": 16,
"expected_count": 2,
"expected_positions": [(16,), (16,)],
},
id="both_hidden_and_output_layernorm",
),
pytest.param(
{
"hidden_layer_use_layernorm": False,
"output_layer_use_layernorm": False,
"hidden_layer_dims": [16, 16],
"output_layer_dim": 16,
"expected_count": 0,
"expected_positions": [],
},
id="no_layernorm",
),
]
@pytest.mark.parametrize("config", MLP_ENCODER_CONFIGS)
def test_mlp_encoder_config_layernorm(config):
"""MLPEncoderConfig LayerNorm combinations via parametrize."""
encoder_config = MLPEncoderConfig(
input_dims=[10],
hidden_layer_dims=config["hidden_layer_dims"],
hidden_layer_activation="relu",
hidden_layer_use_layernorm=config["hidden_layer_use_layernorm"],
output_layer_use_layernorm=config["output_layer_use_layernorm"],
output_layer_dim=config["output_layer_dim"],
output_layer_activation="relu",
)
encoder = encoder_config.build(framework="torch")
layernorm_count = count_layernorm_modules(encoder.net)
assert layernorm_count == config["expected_count"]
positions = get_layernorm_positions(encoder.net.mlp)
assert len(positions) == len(config["expected_positions"])
for (_, shape), expected_shape in zip(positions, config["expected_positions"]):
assert shape == expected_shape
def test_mlp_encoder_forward_pass():
"""Verify encoder produces correct output shape with LayerNorm enabled."""
config = MLPEncoderConfig(
input_dims=[10],
hidden_layer_dims=[32, 32],
hidden_layer_activation="relu",
hidden_layer_use_layernorm=True,
output_layer_use_layernorm=True,
output_layer_dim=64,
output_layer_activation="relu",
)
encoder = config.build(framework="torch")
batch = torch.randn(8, 10)
out = encoder({ENCODER_OUT: None, "obs": batch})
assert out[ENCODER_OUT].shape == (8, 64)
# ---------------------------------------------------------------------------
# Catalog parametrized tests
# ---------------------------------------------------------------------------
CATALOG_CONFIGS = [
pytest.param(
{"fcnet_use_layernorm": True, "expected_count": 2},
id="fcnet_use_layernorm_true",
),
pytest.param(
{"fcnet_use_layernorm": False, "expected_count": 0},
id="fcnet_use_layernorm_false",
),
]
@pytest.mark.parametrize("config", CATALOG_CONFIGS)
def test_catalog_encoder_layernorm(config):
"""Catalog passes fcnet_use_layernorm to encoder config."""
model_config_dict = get_full_model_config(
{
"fcnet_hiddens": [16, 16],
"fcnet_activation": "relu",
"fcnet_use_layernorm": config["fcnet_use_layernorm"],
}
)
catalog = Catalog(
observation_space=gym.spaces.Box(0, 1, (10,), dtype=np.float32),
action_space=gym.spaces.Box(0, 1, (10,), dtype=np.float32),
model_config_dict=model_config_dict,
)
encoder_config = catalog._get_encoder_config(
observation_space=catalog.observation_space,
model_config_dict=catalog._model_config_dict,
action_space=catalog.action_space,
)
assert encoder_config.hidden_layer_use_layernorm == config["fcnet_use_layernorm"]
assert encoder_config.output_layer_use_layernorm == config["fcnet_use_layernorm"]
encoder = encoder_config.build(framework="torch")
layernorm_count = count_layernorm_modules(encoder.net)
assert layernorm_count == config["expected_count"]
# ---------------------------------------------------------------------------
# RLModule model_config parametrized tests
# ---------------------------------------------------------------------------
RL_MODULE_CONFIGS = [
pytest.param(
{
"fcnet_hiddens": [16, 16],
"head_fcnet_hiddens": [16],
"vf_share_layers": False,
"fcnet_use_layernorm": True,
"fcnet_activation": "relu",
},
4,
id="fcnet_use_layernorm_true",
),
pytest.param(
{
"fcnet_hiddens": [16, 16],
"head_fcnet_hiddens": [16],
"vf_share_layers": False,
"fcnet_use_layernorm": False,
"fcnet_activation": "relu",
},
0,
id="fcnet_use_layernorm_false",
),
pytest.param(
{
"fcnet_hiddens": [16],
"head_fcnet_hiddens": [16],
"vf_share_layers": False,
"fcnet_use_layernorm": True,
"fcnet_activation": "relu",
},
2,
id="single_hidden_layer_with_layernorm",
),
pytest.param(
{
"fcnet_hiddens": [16, 16],
"head_fcnet_hiddens": [16],
"vf_share_layers": True,
"fcnet_use_layernorm": True,
"fcnet_activation": "relu",
},
2,
id="shared_encoder_layernorm",
),
pytest.param(
{
"fcnet_hiddens": [16, 16, 16],
"head_fcnet_hiddens": [16],
"vf_share_layers": False,
"fcnet_use_layernorm": True,
"fcnet_activation": "relu",
},
6,
id="three_hidden_layers_with_layernorm",
),
]
@pytest.mark.parametrize("model_config,expected_count", RL_MODULE_CONFIGS)
def test_rl_module_encoder_layernorm(model_config, expected_count):
"""RLModuleSpec with different model_config variations for LayerNorm."""
model = RLModuleSpec(
module_class=DefaultPPOTorchRLModule,
observation_space=gym.spaces.Box(0, 1, (10,), dtype=np.float32),
action_space=gym.spaces.Box(0, 1, (10,), dtype=np.float32),
model_config=model_config,
).build()
count = count_layernorm_modules(model.encoder)
assert count == expected_count
def test_rl_module_forward_pass_with_layernorm():
"""Verify forward pass works with fcnet_use_layernorm=True."""
model_config = {
"fcnet_hiddens": [32, 32],
"head_fcnet_hiddens": [16],
"vf_share_layers": False,
"fcnet_use_layernorm": True,
"fcnet_activation": "relu",
}
model = RLModuleSpec(
module_class=DefaultPPOTorchRLModule,
observation_space=gym.spaces.Box(0, 1, (10,), dtype=np.float32),
action_space=gym.spaces.Box(0, 1, (10,), dtype=np.float32),
model_config=model_config,
).build()
batch = {"obs": torch.randn(4, 10)}
out = model.forward_inference(batch)
assert "action_dist_inputs" in out
assert out["action_dist_inputs"].shape[0] == 4
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/core/models/tests/test_encoder_layernorm.py",
"license": "Apache License 2.0",
"lines": 245,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/train_tests/benchmark/elastic_training/elastic_e2e.py | import json
import os
import pprint
import time
import ray
import ray.train
from ray._private.test_utils import safe_write_to_results_json
from ray.train.torch import TorchTrainer
from ray.train.v2._internal.util import date_str
from config import cli_to_config
from image_classification.factory import ImageClassificationFactory
from elastic_training.resource_schedule import (
MockResourceAvailabilityUpdater,
ResourceAvailabilityEvent,
generate_schedule,
)
from train_benchmark import (
METRICS_OUTPUT_PATH,
get_datasets_and_data_config,
train_fn_per_worker,
)
def main():
config = cli_to_config()
print("\nBenchmark config:\n" + pprint.pformat(config.__dict__, indent=2))
factory = ImageClassificationFactory(config)
# Resolve num_workers based on min_workers and max_workers.
if config.min_workers and config.max_workers:
num_workers = (config.min_workers, config.max_workers)
else:
num_workers = config.num_workers
updater_actor = ray.remote(num_cpus=0)(MockResourceAvailabilityUpdater).remote(
resource_key="GPU"
)
ray.get(updater_actor.__ray_ready__.remote())
interval_s = 60 * 5
schedule = generate_schedule(
resource_availability_options=[4, 8, 16, 32],
duration_s=60 * 60,
interval_s=interval_s,
seed=777777,
)
# Make sure the run can finish at the end of the schedule.
schedule.append(
ResourceAvailabilityEvent(
time_s=schedule[-1].time_s + interval_s, resource_units=32
)
)
execute_schedule_fut = updater_actor.execute_schedule.remote(schedule)
datasets, data_config = get_datasets_and_data_config(factory)
start_time = time.perf_counter()
trainer = TorchTrainer(
train_loop_per_worker=train_fn_per_worker,
train_loop_config={"factory": factory},
scaling_config=ray.train.ScalingConfig(num_workers=num_workers, use_gpu=True),
run_config=ray.train.RunConfig(
storage_path=f"{os.environ['ANYSCALE_ARTIFACT_STORAGE']}/train_benchmark/",
name=f"{config.task}-{date_str(include_ms=True)}",
failure_config=ray.train.FailureConfig(max_failures=len(schedule)),
),
datasets=datasets,
dataset_config=data_config,
)
trainer.fit()
end_time = time.perf_counter()
e2e_time = end_time - start_time
with open(METRICS_OUTPUT_PATH, "r") as f:
metrics = json.load(f)
# Includes recovery time across resource updates.
total_rows_processed = metrics["train/rows_processed-total"]
metrics["e2e_throughput"] = total_rows_processed / e2e_time
metrics["e2e_time"] = e2e_time
safe_write_to_results_json(metrics)
final_metrics_str = (
f"\nTotal training time: {e2e_time} seconds\n"
+ "Final metrics:\n"
+ "-" * 80
+ "\n"
+ pprint.pformat(metrics)
+ "\n"
+ "-" * 80
)
print(final_metrics_str)
ray.get(execute_schedule_fut)
ray.get(updater_actor.shutdown.remote())
if __name__ == "__main__":
# Workers need to access the working directory module.
ray.init(runtime_env={"working_dir": os.path.dirname(os.path.dirname(__file__))})
main()
| {
"repo_id": "ray-project/ray",
"file_path": "release/train_tests/benchmark/elastic_training/elastic_e2e.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/train_tests/benchmark/elastic_training/resource_schedule.py | from dataclasses import dataclass
from enum import Enum
import logging
import random
import time
from typing import List
import uuid
import psutil
import ray
from ray.data._internal.cluster_autoscaler import (
ResourceRequestPriority,
get_or_create_autoscaling_coordinator,
)
from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy
from ray.util.state import list_actors
logger = logging.getLogger(__name__)
@ray.remote(num_cpus=0)
def kill_process(pid):
proc = psutil.Process(pid)
proc.kill()
class MockResourceRequestPriority(Enum):
OVERRIDE = ResourceRequestPriority.HIGH.value + 1
@dataclass
class ResourceAvailabilityEvent:
time_s: int
resource_units: int
class ResourceAvailabilityUpdater:
def __init__(self, starting_resource_units: int = 0, resource_key: str = "GPU"):
self._starting_resource_units = starting_resource_units
self._resource_key = resource_key
def execute_schedule(self, schedule: List[ResourceAvailabilityEvent]):
pass
def shutdown(self):
pass
class MockResourceAvailabilityUpdater(ResourceAvailabilityUpdater):
def __init__(self, starting_resource_units: int = 0, resource_key: str = "GPU"):
super().__init__(starting_resource_units, resource_key)
self._coord = get_or_create_autoscaling_coordinator()
self._clear_all_requests()
logging.basicConfig(level=logging.INFO)
logger.info(
"Initializing resource availability: '%s': %s",
resource_key,
starting_resource_units,
)
self._total_resource_units = int(ray.cluster_resources()[resource_key])
self._dummy_requester_ids = [
self._get_requester_id()
for _ in range(self._total_resource_units - starting_resource_units)
]
self._request(self._dummy_requester_ids)
def _request(self, requester_ids):
futs = []
for requester_id in requester_ids:
fut = self._coord.request_resources.remote(
requester_id=requester_id,
resources=[{self._resource_key: 1.0}],
expire_after_s=10000,
priority=MockResourceRequestPriority.OVERRIDE,
)
futs.append(fut)
ray.get(futs)
def _cancel(self, requester_ids):
futs = []
for requester_id in requester_ids:
fut = self._coord.cancel_request.remote(requester_id=requester_id)
futs.append(fut)
ray.get(futs)
def _clear_all_requests(self):
def clear_all_requests(coord_self):
coord_self._ongoing_reqs = {}
ray.get(self._coord.__ray_call__.remote(clear_all_requests))
def _get_requester_id(self):
return f"dummy_{uuid.uuid4().hex[:6]}"
def _kill_random_train_worker(self):
actors = list_actors(
filters=[("class_name", "=", "RayTrainWorker"), ("state", "=", "ALIVE")]
)
if not actors:
return
actor_to_kill = random.choice(actors)
logger.info("Killing random train worker: %s", actor_to_kill)
strategy = NodeAffinitySchedulingStrategy(
node_id=actor_to_kill.node_id, soft=False
)
ray.get(
kill_process.options(scheduling_strategy=strategy).remote(actor_to_kill.pid)
)
def execute_schedule(self, schedule: List[ResourceAvailabilityEvent]):
schedule_str = " -> ".join(
f"({event.time_s:.0f}s, {self._resource_key}: {event.resource_units})"
for event in schedule
)
logger.info("Executing availability schedule: %s", schedule_str)
start_time = time.time()
for event in schedule:
curr_time_s = time.time() - start_time
time.sleep(max(0, event.time_s - curr_time_s))
logger.info("Executing scheduled event: %s", event)
curr_withheld = len(self._dummy_requester_ids)
curr_available = self._total_resource_units - curr_withheld
if curr_available == event.resource_units:
logger.info(
"No change in availability: %s -> %s",
curr_available,
event.resource_units,
)
continue
if curr_available > event.resource_units:
num_units_to_withhold = curr_available - event.resource_units
new_requesters = [
self._get_requester_id() for _ in range(num_units_to_withhold)
]
logger.info(
"Reducing availability from %s to %s",
curr_available,
event.resource_units,
)
# If reducing resources, kill a worker process to trigger recovery.
self._kill_random_train_worker()
self._request(new_requesters)
self._dummy_requester_ids += new_requesters
else:
num_to_cancel = event.resource_units - curr_available
self._dummy_requester_ids, ids_to_cancel = (
self._dummy_requester_ids[num_to_cancel:],
self._dummy_requester_ids[:num_to_cancel],
)
logger.info(
"Increasing availability from %s to %s",
curr_available,
event.resource_units,
)
self._cancel(ids_to_cancel)
def shutdown(self):
self._cancel(self._dummy_requester_ids)
def generate_schedule(
resource_availability_options: list,
duration_s: int = 60,
interval_s: int = 5,
seed: int = 42,
) -> List[ResourceAvailabilityEvent]:
random.seed(seed)
num_updates = duration_s // interval_s
curr_idx = random.choice(range(len(resource_availability_options)))
schedule = [
ResourceAvailabilityEvent(
time_s=0, resource_units=resource_availability_options[curr_idx]
)
]
for i in range(1, num_updates):
# Weights are chosen to bias schedules towards the max workers.
weights = None
if curr_idx == 0:
choices = [0, 1]
elif curr_idx == len(resource_availability_options) - 1:
choices = [-1, 0]
weights = [20, 80]
else:
choices = [-1, 0, 1]
weights = [25, 25, 50]
random_update = random.choices(choices, weights=weights)[0]
curr_idx += random_update
schedule.append(
ResourceAvailabilityEvent(
time_s=i * interval_s,
resource_units=resource_availability_options[curr_idx],
)
)
return schedule
| {
"repo_id": "ray-project/ray",
"file_path": "release/train_tests/benchmark/elastic_training/resource_schedule.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/train_tests/elastic_training/elastic_util.py | import subprocess
import requests
from torch import nn
import ray
from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy
class NeuralNetwork(nn.Module):
def __init__(self):
super().__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28 * 28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10),
nn.ReLU(),
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
def terminate_current_instance():
"""Use AWS CLI to terminate current instance."""
token = requests.put(
"http://169.254.169.254/latest/api/token",
headers={"X-aws-ec2-metadata-token-ttl-seconds": "300"},
timeout=10,
).text
instance_id = requests.get(
"http://169.254.169.254/latest/meta-data/instance-id",
headers={"X-aws-ec2-metadata-token": token},
timeout=10,
).text
region = requests.get(
"http://169.254.169.254/latest/meta-data/placement/region",
headers={"X-aws-ec2-metadata-token": token},
timeout=10,
).text
return subprocess.run(
[
"aws",
"ec2",
"terminate-instances",
"--instance-ids",
instance_id,
"--region",
region,
],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def terminate_node(node_id: str):
killer_task = ray.remote(terminate_current_instance).options(
num_cpus=0,
scheduling_strategy=NodeAffinitySchedulingStrategy(node_id, soft=False),
)
ray.get(killer_task.remote())
| {
"repo_id": "ray-project/ray",
"file_path": "release/train_tests/elastic_training/elastic_util.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/train_tests/elastic_training/torch_elastic_e2e.py | import logging
import os
import tempfile
import time
from pathlib import Path
from typing import Dict, List, Tuple
import click
from elastic_util import NeuralNetwork, terminate_node
from filelock import FileLock
import ray
import ray.train as train
from ray.tune.utils import date_str
from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor
logger = logging.getLogger(__name__)
CONFIG = {"lr": 1e-3, "batch_size": 64}
LOG_FILE = "/tmp/driver.log"
DATA_DIR = "/tmp/fashion_mnist"
def get_default_storage_path():
remote_default_artifact_storage_prefix = os.environ.get(
"ANYSCALE_ARTIFACT_STORAGE", "artifact_storage"
)
return f"{remote_default_artifact_storage_prefix}/train_release_tests/elastic_e2e"
STORAGE_PATH = get_default_storage_path()
def load_data(data_dir):
with FileLock(f"{DATA_DIR}.data.lock"):
trainset = datasets.FashionMNIST(
root=data_dir, train=True, download=True, transform=ToTensor()
)
testset = datasets.FashionMNIST(
root=data_dir, train=False, download=True, transform=ToTensor()
)
return trainset, testset
def train_epoch(
dataloader, model, loss_fn, optimizer, world_size: int, world_rank: int
):
size = len(dataloader.dataset) // world_size
model.train()
for batch_index, (inputs, labels) in enumerate(dataloader):
predictions = model(inputs)
loss = loss_fn(predictions, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_index % 100 == 0:
current = batch_index * len(inputs)
print(
f"[rank={world_rank}] loss: {loss.item():>7f} [{current:>5d}/{size:>5d}]"
)
def validate_epoch(dataloader, model, loss_fn, world_size: int, world_rank: int):
size = len(dataloader.dataset) // world_size
num_batches = len(dataloader)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for inputs, labels in dataloader:
predictions = model(inputs)
test_loss += loss_fn(predictions, labels).item()
correct += (predictions.argmax(1) == labels).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(
f"[rank={world_rank}] Test Error: \n "
f"Accuracy: {(100 * correct):>0.1f}%, "
f"Avg loss: {test_loss:>8f} \n"
)
return test_loss
def save_checkpoint(local_dir, model, optimizer, epoch):
checkpoint = {
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"epoch": epoch,
}
torch.save(checkpoint, os.path.join(local_dir, "checkpoint.pt"))
def load_checkpoint(local_ckpt_path, model, optimizer) -> int:
checkpoint = torch.load(os.path.join(local_ckpt_path, "checkpoint.pt"))
model.load_state_dict(checkpoint["model"])
optimizer.load_state_dict(checkpoint["optimizer"])
return checkpoint["epoch"] + 1
def train_func(config: Dict):
local_start_time = time.monotonic()
batch_size = config["batch_size"]
lr = config["lr"]
epochs = config["epochs"]
shuffle = config.get("shuffle", False)
world_size = train.get_context().get_world_size()
world_rank = train.get_context().get_world_rank()
worker_batch_size = batch_size // world_size
if world_rank == 0:
print(f"global batch size is {worker_batch_size * world_size}")
training_data, test_data = load_data(DATA_DIR)
train_dataloader = DataLoader(
training_data, shuffle=shuffle, batch_size=worker_batch_size
)
test_dataloader = DataLoader(
test_data, shuffle=shuffle, batch_size=worker_batch_size
)
train_dataloader = train.torch.prepare_data_loader(train_dataloader)
test_dataloader = train.torch.prepare_data_loader(test_dataloader)
model = train.torch.prepare_model(NeuralNetwork())
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
start_epoch = 1
checkpoint = ray.train.get_checkpoint()
if checkpoint:
with checkpoint.as_directory() as temp_ckpt_dir:
print("Found checkpoint: ", checkpoint)
start_epoch = load_checkpoint(temp_ckpt_dir, model, optimizer)
print(f"Restoration done! Resuming training from {start_epoch=}")
for epoch in range(start_epoch, epochs + 1):
if world_size > 1:
train_dataloader.sampler.set_epoch(epoch)
train_epoch(
train_dataloader,
model,
loss_fn,
optimizer,
world_size=world_size,
world_rank=world_rank,
)
loss = validate_epoch(
test_dataloader,
model,
loss_fn,
world_size=world_size,
world_rank=world_rank,
)
local_time_taken = time.monotonic() - local_start_time
with tempfile.TemporaryDirectory() as temp_checkpoint_dir:
checkpoint = None
if world_rank == 0:
print("Saving checkpoint...")
save_checkpoint(temp_checkpoint_dir, model, optimizer, epoch)
checkpoint = train.Checkpoint.from_directory(temp_checkpoint_dir)
train.report(
metrics={"loss": loss, "local_time_taken": local_time_taken},
checkpoint=checkpoint,
checkpoint_dir_name=f"checkpoint-epoch={epoch}",
)
def train_torch_ray_train(
config: dict,
num_workers: Tuple[int, int] = (4, 12),
use_gpu: bool = True,
) -> train.Result:
from ray.train.torch import TorchTrainer
trainer = TorchTrainer(
train_loop_per_worker=lambda c: train_func(config=c),
train_loop_config=config,
scaling_config=ray.train.ScalingConfig(
num_workers=num_workers, use_gpu=use_gpu
),
run_config=ray.train.RunConfig(
name=f"elastic_train_experiment-{date_str()}",
storage_path=STORAGE_PATH,
checkpoint_config=ray.train.CheckpointConfig(num_to_keep=2),
failure_config=ray.train.FailureConfig(max_failures=3),
),
)
return trainer.fit()
@ray.remote(num_cpus=0)
def run_cluster_node_killing_events(target_gpu_count: int):
logging.basicConfig(level=logging.INFO)
terminator_logger = logging.getLogger(__name__)
terminator_logger.addHandler(get_file_handler())
start = time.time()
head_node_id = ray.get_runtime_context().get_node_id()
def get_cluster_resources() -> Dict[str, float]:
return {
resource: value
for resource, value in ray.cluster_resources().items()
if resource in ("CPU", "GPU")
}
def get_worker_nodes() -> List[Dict]:
return [
node
for node in ray.nodes()
if node["Alive"] and node["NodeID"] != head_node_id
]
def kill_nodes(nodes_to_kill):
terminator_logger.info(
"Nodes to kill: %s", [n["NodeID"] for n in nodes_to_kill]
)
for node in nodes_to_kill:
terminator_logger.info(
"Killing node: %s (alive=%s)", node["NodeID"], node["Alive"]
)
terminate_node(node["NodeID"])
def all_nodes_dead(dying_nodes) -> bool:
dying_node_ids = [n["NodeID"] for n in dying_nodes]
return all(
not node["Alive"]
for node in ray.nodes()
if node["NodeID"] in dying_node_ids
)
def log_status(message):
elapsed = time.time() - start
status_str = "\n"
status_str += "-" * 80 + "\n"
status_str += (
f"[elapsed={elapsed:.1f}s] cluster_resources={get_cluster_resources()}\n"
)
status_str += message + "\n"
status_str += "-" * 80 + "\n\n"
terminator_logger.info(status_str)
log_status(f"Waiting to upscale back to {target_gpu_count} GPUs...")
while get_cluster_resources().get("GPU", 0) < target_gpu_count:
time.sleep(1)
log_status("Waiting for 30s before modifying cluster resources...")
time.sleep(30)
log_status("Killing all nodes in the current cluster...")
nodes_to_kill = get_worker_nodes()
kill_nodes(nodes_to_kill)
while not all_nodes_dead(nodes_to_kill):
time.sleep(1)
log_status(f"Waiting to upscale back to {target_gpu_count} GPUs...")
while get_cluster_resources().get("GPU", 0) < target_gpu_count:
time.sleep(1)
log_status("Waiting for 30s before modifying cluster resources...")
time.sleep(30)
log_status("Killing two worker nodes...")
nodes_to_kill = get_worker_nodes()[-2:]
kill_nodes(nodes_to_kill)
while not all_nodes_dead(nodes_to_kill):
time.sleep(1)
log_status(f"Waiting to upscale back to {target_gpu_count} GPUs...")
while get_cluster_resources().get("GPU", 0) < target_gpu_count:
time.sleep(1)
log_status("Waiting for 30s before modifying cluster resources...")
time.sleep(30)
log_status("Killing 1 worker node...")
nodes_to_kill = [get_worker_nodes()[-1]]
kill_nodes(nodes_to_kill)
while not all_nodes_dead(nodes_to_kill):
time.sleep(1)
log_status("All node killing events generated, waiting for training finish...")
@click.group(help="Run Torch benchmarks")
def cli():
pass
@cli.command(help="Kick off Ray Train elastic benchmark")
@click.option("--num-epochs", type=int, default=50)
@click.option("--num-workers", type=tuple, default=(4, 12))
@click.option("--use-gpu", is_flag=True, default=True)
@click.option("--batch-size", type=int, default=64)
def run(
num_epochs: int = 50,
num_workers: Tuple[int, int] = (4, 12),
use_gpu: bool = True,
batch_size: int = 64,
):
config = CONFIG.copy()
config["epochs"] = num_epochs
config["batch_size"] = batch_size
ray.init(log_to_driver=True, runtime_env={"working_dir": os.path.dirname(__file__)})
head_node_id = ray.get_runtime_context().get_node_id()
event_future = run_cluster_node_killing_events.options(
scheduling_strategy=NodeAffinitySchedulingStrategy(
node_id=head_node_id, soft=False
),
runtime_env={"env_vars": {"RAY_TRAIN_V2_ENABLED": "1"}},
).remote(target_gpu_count=num_workers[1])
result = train_torch_ray_train(
config=config,
num_workers=num_workers,
use_gpu=use_gpu,
)
ray.get(event_future)
logger.info(
"`trainer.fit` finished with (error, checkpoint):\nerror = %s\ncheckpoint = %s",
result.error,
result.checkpoint,
)
assert not result.error, result.error
assert result.checkpoint
checkpoint_dir_name = Path(result.checkpoint.path).name
expected_checkpoint_dir_name = f"checkpoint-epoch={num_epochs}"
assert (
checkpoint_dir_name == expected_checkpoint_dir_name
), f"{checkpoint_dir_name=} != {expected_checkpoint_dir_name=}"
with open(LOG_FILE, "r") as f:
print(f.read())
def get_file_handler() -> logging.FileHandler:
handler = logging.FileHandler(LOG_FILE)
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s [%(levelname)s] :: %(message)s")
handler.setFormatter(formatter)
return handler
def setup_logging():
file_handler = get_file_handler()
logger.addHandler(file_handler)
logging.getLogger("ray.train").addHandler(file_handler)
def main():
setup_logging()
cli()
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "release/train_tests/elastic_training/torch_elastic_e2e.py",
"license": "Apache License 2.0",
"lines": 299,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/serve/doc_code/async_inference_autoscaling.py | # __basic_example_begin__
from ray import serve
from ray.serve.config import AutoscalingConfig, AutoscalingPolicy
from ray.serve.schema import CeleryAdapterConfig, TaskProcessorConfig
from ray.serve.task_consumer import task_consumer, task_handler
processor_config = TaskProcessorConfig(
queue_name="my_queue",
adapter_config=CeleryAdapterConfig(
broker_url="redis://localhost:6379/0",
backend_url="redis://localhost:6379/1",
),
)
@serve.deployment(
max_ongoing_requests=5,
autoscaling_config=AutoscalingConfig(
min_replicas=1,
max_replicas=10,
target_ongoing_requests=2,
policy=AutoscalingPolicy(
policy_function="ray.serve.async_inference_autoscaling_policy:AsyncInferenceAutoscalingPolicy",
policy_kwargs={
"broker_url": "redis://localhost:6379/0",
"queue_name": "my_queue",
},
),
),
)
@task_consumer(task_processor_config=processor_config)
class MyConsumer:
@task_handler(name="process")
def process(self, data):
return f"processed: {data}"
app = MyConsumer.bind()
serve.run(app)
# __basic_example_end__
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/doc_code/async_inference_autoscaling.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:rllib/core/rl_module/torch/tests/test_lstm_target_network_rl_module.py | import unittest
import gymnasium as gym
import numpy as np
import torch
import tree
from ray.rllib.core.columns import Columns
from ray.rllib.core.rl_module.apis import TARGET_NETWORK_ACTION_DIST_INPUTS
from ray.rllib.examples.rl_modules.classes.lstm_containing_rlm import (
LSTMContainingRLModuleWithTargetNetwork,
)
class TestLSTMContainingRLModuleWithTargetNetwork(unittest.TestCase):
"""Test cases for LSTMContainingRLModuleWithTargetNetwork."""
def setUp(self):
"""Set up test fixtures."""
self.obs_dim = 25
self.action_dim = 4
self.batch_size = 10
self.seq_len = 5
self.lstm_cell_size = 32
self.module = LSTMContainingRLModuleWithTargetNetwork(
observation_space=gym.spaces.Box(-1.0, 1.0, (self.obs_dim,), np.float32),
action_space=gym.spaces.Discrete(self.action_dim),
model_config={"lstm_cell_size": self.lstm_cell_size},
)
def _create_input_batch(self):
"""Helper method to create a dummy input batch."""
obs = torch.from_numpy(
np.random.random_sample(
size=(self.batch_size, self.seq_len, self.obs_dim)
).astype(np.float32)
)
state_in = self.module.get_initial_state()
# Repeat state_in across batch.
state_in = tree.map_structure(
lambda s: torch.from_numpy(s).unsqueeze(0).repeat(self.batch_size, 1),
state_in,
)
return {
Columns.OBS: obs,
Columns.STATE_IN: state_in,
}
def test_make_target_networks(self):
"""Test that target networks are created correctly."""
# Initially, target networks should not exist
self.assertFalse(hasattr(self.module, "_old_lstm"))
self.assertFalse(hasattr(self.module, "_old_fc_net"))
self.assertFalse(hasattr(self.module, "_old_pi_head"))
# Create target networks
self.module.make_target_networks()
# After creation, target networks should exist
self.assertTrue(hasattr(self.module, "_old_lstm"))
self.assertTrue(hasattr(self.module, "_old_fc_net"))
self.assertTrue(hasattr(self.module, "_old_pi_head"))
# Target networks should be torch modules
self.assertIsInstance(self.module._old_lstm, torch.nn.Module)
self.assertIsInstance(self.module._old_fc_net, torch.nn.Module)
self.assertIsInstance(self.module._old_pi_head, torch.nn.Module)
# Target networks should be different objects from main networks
self.assertIsNot(self.module._lstm, self.module._old_lstm)
self.assertIsNot(self.module._fc_net, self.module._old_fc_net)
self.assertIsNot(self.module._pi_head, self.module._old_pi_head)
def test_get_target_network_pairs(self):
"""Test that get_target_network_pairs returns correct pairs."""
# Create target networks first
self.module.make_target_networks()
# Get target network pairs
pairs = self.module.get_target_network_pairs()
# Should return exactly 3 pairs (LSTM, FC net, policy head)
self.assertEqual(len(pairs), 3)
# Check that pairs are tuples of (main_net, target_net)
for main_net, target_net in pairs:
self.assertIsInstance(main_net, torch.nn.Module)
self.assertIsInstance(target_net, torch.nn.Module)
self.assertIsNot(main_net, target_net)
# Verify the specific pairs
expected_pairs = [
(self.module._lstm, self.module._old_lstm),
(self.module._fc_net, self.module._old_fc_net),
(self.module._pi_head, self.module._old_pi_head),
]
self.assertEqual(pairs, expected_pairs)
def test_forward_target(self):
"""Test that forward_target produces correct output structure."""
# Create target networks first
self.module.make_target_networks()
# Create input batch
input_dict = self._create_input_batch()
# Forward through target networks
output = self.module.forward_target(input_dict)
# Output should be a dictionary
self.assertIsInstance(output, dict)
# Should contain TARGET_NETWORK_ACTION_DIST_INPUTS
self.assertIn(TARGET_NETWORK_ACTION_DIST_INPUTS, output)
# Action dist inputs should be a tensor with correct shape
action_dist_inputs = output[TARGET_NETWORK_ACTION_DIST_INPUTS]
self.assertIsInstance(action_dist_inputs, torch.Tensor)
expected_shape = (self.batch_size, self.seq_len, self.action_dim)
self.assertEqual(action_dist_inputs.shape, expected_shape)
def test_target_networks_independence(self):
"""Test that target networks are independent from main networks."""
# Create target networks
self.module.make_target_networks()
# Get initial parameters from both networks
main_lstm_params = [p.clone().detach() for p in self.module._lstm.parameters()]
target_lstm_params = [
p.clone().detach() for p in self.module._old_lstm.parameters()
]
# Initially, parameters should be equal (target is copied from main)
for main_param, target_param in zip(main_lstm_params, target_lstm_params):
self.assertTrue(torch.allclose(main_param, target_param))
# Create input batch
input_dict = self._create_input_batch()
# Forward through main network and compute gradients
main_output = self.module.forward_train(input_dict)
main_loss = main_output[Columns.ACTION_DIST_INPUTS].sum()
main_loss.backward()
# Modify main network parameters (simulate training step)
with torch.no_grad():
for param in self.module._lstm.parameters():
param.add_(0.1)
# Target network parameters should remain unchanged
for target_param, original_target_param in zip(
self.module._old_lstm.parameters(), target_lstm_params
):
self.assertTrue(torch.allclose(target_param, original_target_param))
# Verify that main and target networks now have different parameters
for main_param, target_param in zip(
self.module._lstm.parameters(), self.module._old_lstm.parameters()
):
self.assertFalse(torch.allclose(main_param, target_param))
if __name__ == "__main__":
import sys
import pytest
# One can specify the specific TestCase class to run.
# None for all unittest.TestCase classes in this file.
class_ = sys.argv[1] if len(sys.argv) > 1 else None
sys.exit(pytest.main(["-v", __file__ + ("" if class_ is None else "::" + class_)]))
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/core/rl_module/torch/tests/test_lstm_target_network_rl_module.py",
"license": "Apache License 2.0",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:rllib/examples/algorithms/appo/multi_agent_footsies_appo.py | """
Multi-agent RLlib Footsies Example (APPO)
About:
- Example is based on the Footsies environment (https://github.com/chasemcd/FootsiesGym).
- Footsies is a two-player fighting game where each player controls a character and tries to hit the opponent while avoiding being hit.
- Footsies is a zero-sum game, when one player wins (+1 reward) the other loses (-1 reward).
Summary:
- Main policy is an LSTM-based policy.
- Training algorithm is APPO.
Training:
- Training is governed by adding new, more complex opponents to the mix as the main policy reaches a certain win rate threshold against the current opponent.
- Current opponent is always the newest opponent added to the mix.
- Training starts with a very simple opponent: "noop" (does nothing), then progresses to "back" (only moves backwards). These are the fixed (very simple) policies that are used to kick off the training.
- New opponents are frozen copies of the main policy at different training stages. They will be added to the mix as "lstm_v0", "lstm_v1", etc.
- In this way - after kick-starting the training with fixed simple opponents - the main policy will play against a version of itself from an earlier training stage.
- The main policy has to achieve the win rate threshold against the current opponent to add a new opponent to the mix.
- Training concludes when the target mix size is reached.
Evaluation:
- Evaluation is performed against the current (newest) opponent.
- Evaluation runs for a fixed number of episodes at the end of each training iteration.
"""
import functools
from pathlib import Path
from ray.rllib.algorithms.appo import APPOConfig
from ray.rllib.core.rl_module import MultiRLModuleSpec, RLModuleSpec
from ray.rllib.env.multi_agent_env_runner import MultiAgentEnvRunner
from ray.rllib.examples.envs.classes.multi_agent.footsies.fixed_rlmodules import (
BackFixedRLModule,
NoopFixedRLModule,
)
from ray.rllib.examples.envs.classes.multi_agent.footsies.footsies_env import (
env_creator,
)
from ray.rllib.examples.envs.classes.multi_agent.footsies.utils import (
Matchmaker,
Matchup,
MetricsLoggerCallback,
MixManagerCallback,
platform_for_binary_to_download,
)
from ray.rllib.examples.rl_modules.classes.lstm_containing_rlm import (
LSTMContainingRLModuleWithTargetNetwork,
)
from ray.rllib.utils.metrics import NUM_ENV_STEPS_SAMPLED_LIFETIME
from ray.rllib.utils.test_utils import (
add_rllib_example_script_args,
)
from ray.tune.registry import register_env
from ray.tune.result import TRAINING_ITERATION
parser = add_rllib_example_script_args(
default_iters=500,
default_timesteps=5_000_000,
)
parser.add_argument(
"--train-start-port",
type=int,
default=45001,
help="First port number for the Footsies training environment server (default: 45001). Each server gets its own port.",
)
parser.add_argument(
"--eval-start-port",
type=int,
default=55001,
help="First port number for the Footsies evaluation environment server (default: 55001) Each server gets its own port.",
)
parser.add_argument(
"--binary-download-dir",
type=Path,
default="/tmp/ray/binaries/footsies",
help="Directory to download Footsies binaries (default: /tmp/ray/binaries/footsies)",
)
parser.add_argument(
"--binary-extract-dir",
type=Path,
default="/tmp/ray/binaries/footsies",
help="Directory to extract Footsies binaries (default: /tmp/ray/binaries/footsies)",
)
parser.add_argument(
"--win-rate-threshold",
type=float,
default=0.8,
help="The main policy should have at least 'win-rate-threshold' win rate against the "
"other policy to advance to the next level. Moving to the next level "
"means adding a new policy to the mix.",
)
parser.add_argument(
"--target-mix-size",
type=int,
default=5,
help="Target number of policies (RLModules) in the mix to consider the test passed. "
"The initial mix size is 2: 'main policy' vs. 'other'. "
"`--target-mix-size=5` means that 3 new policies will be added to the mix. "
"Whether to add new policy is decided by checking the '--win-rate-threshold' condition. ",
)
parser.add_argument(
"--rollout-fragment-length",
type=int,
default=256,
help="The length of each rollout fragment to be collected by the EnvRunners when sampling.",
)
parser.add_argument(
"--log-unity-output",
action="store_true",
help="Whether to log Unity output (from the game engine). Default is False.",
default=False,
)
parser.add_argument(
"--render",
action="store_true",
default=False,
help="Whether to render the Footsies environment. Default is False.",
)
main_policy = "lstm"
args = parser.parse_args()
register_env(name="FootsiesEnv", env_creator=env_creator)
# Detect platform and choose appropriate binary
binary_to_download = platform_for_binary_to_download(args.render)
config = (
APPOConfig()
.reporting(
min_time_s_per_iteration=30,
)
.environment(
env="FootsiesEnv",
env_config={
"max_t": 1000,
"frame_skip": 4,
"observation_delay": 16,
"train_start_port": args.train_start_port,
"eval_start_port": args.eval_start_port,
"host": "localhost",
"binary_download_dir": args.binary_download_dir,
"binary_extract_dir": args.binary_extract_dir,
"binary_to_download": binary_to_download,
"log_unity_output": args.log_unity_output,
},
)
.learners(
num_learners=1,
num_cpus_per_learner=1,
num_gpus_per_learner=0,
num_aggregator_actors_per_learner=2,
)
.env_runners(
env_runner_cls=MultiAgentEnvRunner,
num_env_runners=args.num_env_runners or 1,
num_cpus_per_env_runner=1,
num_envs_per_env_runner=1,
batch_mode="truncate_episodes",
rollout_fragment_length=args.rollout_fragment_length,
episodes_to_numpy=True,
create_env_on_local_worker=False,
)
.training(
train_batch_size_per_learner=4096 * (args.num_env_runners or 1),
lr=1e-4,
entropy_coeff=0.01,
)
.multi_agent(
policies={
main_policy,
"noop",
"back",
},
# this is a starting policy_mapping_fn
# It will be updated by the MixManagerCallback during training.
policy_mapping_fn=Matchmaker(
[Matchup(main_policy, "noop", 1.0)]
).agent_to_module_mapping_fn,
# we only train the main policy, this doesn't change during training.
policies_to_train=[main_policy],
)
.rl_module(
rl_module_spec=MultiRLModuleSpec(
rl_module_specs={
main_policy: RLModuleSpec(
module_class=LSTMContainingRLModuleWithTargetNetwork,
model_config={
"lstm_cell_size": 128,
"dense_layers": [128, 128],
"max_seq_len": 64,
},
),
# for simplicity, all fixed RLModules are added to the config at the start.
# However, only "noop" is used at the start of training,
# the others are added to the mix later by the MixManagerCallback.
"noop": RLModuleSpec(module_class=NoopFixedRLModule),
"back": RLModuleSpec(module_class=BackFixedRLModule),
},
)
)
.evaluation(
evaluation_num_env_runners=args.evaluation_num_env_runners or 1,
evaluation_sample_timeout_s=120,
evaluation_interval=1,
evaluation_duration=10, # 10 episodes is enough to get a good win rate estimate
evaluation_duration_unit="episodes",
evaluation_parallel_to_training=False,
# we may add new RLModules to the mix at the end of the evaluation stage.
# Running evaluation in parallel may result in training for one more iteration on the old mix.
evaluation_force_reset_envs_before_iteration=True,
evaluation_config={
"env_config": {"env-for-evaluation": True},
}, # evaluation_config is used to add an argument to the env creator.
)
.callbacks(
[
functools.partial(
MetricsLoggerCallback,
main_policy=main_policy,
),
functools.partial(
MixManagerCallback,
win_rate_threshold=args.win_rate_threshold,
main_policy=main_policy,
target_mix_size=args.target_mix_size,
starting_modules=[main_policy, "noop"],
fixed_modules_progression_sequence=(
"noop",
"back",
),
),
]
)
)
stop = {
NUM_ENV_STEPS_SAMPLED_LIFETIME: args.stop_timesteps,
TRAINING_ITERATION: args.stop_iters,
"mix_size": args.target_mix_size,
}
if __name__ == "__main__":
from ray.rllib.utils.test_utils import run_rllib_example_script_experiment
results = run_rllib_example_script_experiment(
base_config=config,
args=args,
stop=stop,
success_metric={
"mix_size": args.target_mix_size
}, # pass the success metric for RLlib's testing framework
)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/algorithms/appo/multi_agent_footsies_appo.py",
"license": "Apache License 2.0",
"lines": 240,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/tests/unit/test_resource_manager.py | import pytest
import ray
from ray.data._internal.execution.interfaces import PhysicalOperator, RefBundle
from ray.data._internal.execution.interfaces.execution_options import (
ExecutionOptions,
ExecutionResources,
)
from ray.data._internal.execution.operators.union_operator import UnionOperator
from ray.data._internal.execution.resource_manager import (
ResourceManager,
)
from ray.data._internal.execution.streaming_executor_state import (
build_streaming_topology,
)
from ray.data.block import BlockMetadata
from ray.data.context import DataContext
from ray.data.tests.conftest import * # noqa
def test_physical_operator_tracks_output_dependencies():
input_op = PhysicalOperator("input", [], DataContext.get_current())
downstream_op = PhysicalOperator(
"downstream", [input_op], DataContext.get_current()
)
assert input_op.output_dependencies == [downstream_op]
def test_physical_apply_transform_rewires_all_input_output_dependencies():
ctx = DataContext.get_current()
left_input = PhysicalOperator("left_input", [], ctx)
right_input = PhysicalOperator("right_input", [], ctx)
root = PhysicalOperator("root", [left_input, right_input], ctx)
left_replacement = PhysicalOperator("left_replacement", [], ctx)
transformed_root = root._apply_transform(
lambda op: left_replacement if op is left_input else op
)
assert transformed_root is not root
assert transformed_root.id != root.id
assert transformed_root.metrics is not root.metrics
assert transformed_root.input_dependencies == [left_replacement, right_input]
assert transformed_root in left_replacement.output_dependencies
assert transformed_root in right_input.output_dependencies
assert root not in left_input.output_dependencies
assert root not in right_input.output_dependencies
def test_physical_apply_transform_rewires_when_current_node_is_replaced():
ctx = DataContext.get_current()
left_input = PhysicalOperator("left_input", [], ctx)
right_input = PhysicalOperator("right_input", [], ctx)
root = PhysicalOperator("root", [left_input, right_input], ctx)
transformed_root = root._apply_transform(
lambda op: PhysicalOperator("replacement", [left_input], ctx)
if op is root
else op
)
assert transformed_root is not root
assert transformed_root in left_input.output_dependencies
assert root not in left_input.output_dependencies
assert root not in right_input.output_dependencies
assert transformed_root not in right_input.output_dependencies
def test_physical_apply_transform_deep_chain_no_stale_downstream_refs():
ctx = DataContext.get_current()
leaf = PhysicalOperator("leaf", [], ctx)
mid = PhysicalOperator("mid", [leaf], ctx)
root = PhysicalOperator("root", [mid], ctx)
def transform(op: PhysicalOperator) -> PhysicalOperator:
if op is leaf:
return PhysicalOperator("leaf_replacement", [], ctx)
if op.name == "root":
return PhysicalOperator("root_replacement", op.input_dependencies, ctx)
return op
transformed_root = root._apply_transform(transform)
transformed_mid = transformed_root.input_dependencies[0]
transformed_leaf = transformed_mid.input_dependencies[0]
assert transformed_root.name == "root_replacement"
assert transformed_mid is not mid
assert transformed_leaf.name == "leaf_replacement"
assert root not in transformed_mid.output_dependencies
assert transformed_mid.output_dependencies == [transformed_root]
def test_physical_apply_transform_rejects_in_place_input_mutation():
ctx = DataContext.get_current()
old_input = PhysicalOperator("old_input", [], ctx)
new_input = PhysicalOperator("new_input", [], ctx)
root = PhysicalOperator("root", [old_input], ctx)
def transform(op: PhysicalOperator) -> PhysicalOperator:
if op is root:
op._input_dependencies = [new_input]
return op
return op
with pytest.raises(
AssertionError,
match="In-place input mutation is not supported; return a new node instead.",
):
root._apply_transform(transform)
def test_does_not_double_count_usage_from_union():
"""Regression test for https://github.com/ray-project/ray/pull/61040."""
# Create a mock topology:
#
# input1 ───┐
# ├─▶ union_op
# input2 ───┘
input1 = PhysicalOperator("op1", [], DataContext.get_current())
input2 = PhysicalOperator("op2", [], DataContext.get_current())
union_op = UnionOperator(DataContext.get_current(), input1, input2)
topology = build_streaming_topology(union_op, ExecutionOptions())
# Create a resource manager.
total_resources = ExecutionResources(cpu=0, object_store_memory=2)
resource_manager = ResourceManager(
topology, ExecutionOptions(), lambda: total_resources, DataContext.get_current()
)
# Create a 1-byte `RefBundle`.
block_ref = ray.ObjectRef(b"1" * 28)
block_metadata = BlockMetadata(
num_rows=1, size_bytes=1, input_files=None, exec_stats=None
)
bundle = RefBundle([(block_ref, block_metadata)], owns_blocks=True, schema=None)
# Add two 1-byte `RefBundle` to the union operator.
topology[union_op].add_output(bundle)
topology[union_op].add_output(bundle)
resource_manager.update_usages()
# The total object store memory usage should be 2. If the resource manager double-
# counts the usage from the union operator, the total object store memory usage can
# be greater than 2.
total_object_store_memory = sum(
[
resource_manager.get_op_usage(
op, include_ineligible_downstream=True
).object_store_memory
for op in topology.keys()
]
)
assert total_object_store_memory == 2, total_object_store_memory
def test_per_input_inqueue_attribution_for_union():
"""Test that per-input attribution correctly charges each upstream operator
only for the blocks it produced in the union's internal input queue.
When preserve_order=True, the union operator buffers blocks per-input.
The resource manager should attribute each input buffer's memory only to
the corresponding upstream operator, not to all upstream operators.
"""
# Create a mock topology:
#
# input1 ───┐
# ├─▶ union_op
# input2 ───┘
input1 = PhysicalOperator("op1", [], DataContext.get_current())
input2 = PhysicalOperator("op2", [], DataContext.get_current())
union_op = UnionOperator(DataContext.get_current(), input1, input2)
options = ExecutionOptions()
options.preserve_order = True
topology = build_streaming_topology(union_op, options)
# Create a resource manager.
total_resources = ExecutionResources(cpu=0, object_store_memory=200)
resource_manager = ResourceManager(
topology, options, lambda: total_resources, DataContext.get_current()
)
# Create two 10-byte RefBundles with distinct block refs (simulates real execution
# where each block from a source has its own ObjectRef).
block_ref1 = ray.ObjectRef(b"1" * 28)
block_ref2 = ray.ObjectRef(b"2" * 28)
block_metadata = BlockMetadata(
num_rows=1, size_bytes=10, input_files=None, exec_stats=None
)
bundle1 = RefBundle([(block_ref1, block_metadata)], owns_blocks=True, schema=None)
bundle2 = RefBundle([(block_ref2, block_metadata)], owns_blocks=True, schema=None)
# Add blocks only to input2's buffer inside the union operator.
# With preserve_order=True, _add_input_inner routes to _input_buffers[input_index].
union_op.add_input(bundle1, input_index=1)
union_op.add_input(bundle2, input_index=1)
resource_manager.update_usages()
# input2 should be charged for its blocks in the union's input buffer (20 bytes).
input2_usage = resource_manager.get_op_usage(
input2, include_ineligible_downstream=True
).object_store_memory
# input1 should NOT be charged for input2's blocks (0 bytes from union inqueue).
input1_usage = resource_manager.get_op_usage(
input1, include_ineligible_downstream=True
).object_store_memory
assert input1_usage == 0
assert input2_usage == 20
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/unit/test_resource_manager.py",
"license": "Apache License 2.0",
"lines": 177,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:ci/ray_ci/ray_image.py | """
Shared RayImage dataclass for image identity logic.
Centralizes wanda image naming, validation, repo lookup, arch suffix,
and variation suffix computation used by both ci/build/build_image.py
and ci/ray_ci/automation/push_ray_image.py.
"""
from __future__ import annotations
from dataclasses import dataclass
from ci.ray_ci.configs import DEFAULT_ARCHITECTURE, DEFAULT_PYTHON_TAG_VERSION
from ci.ray_ci.docker_container import (
ARCHITECTURES_RAY,
ARCHITECTURES_RAY_LLM,
PLATFORMS_RAY,
PLATFORMS_RAY_LLM,
PYTHON_VERSIONS_RAY,
PYTHON_VERSIONS_RAY_LLM,
RAY_REPO_MAP,
RayType,
)
class RayImageError(Exception):
"""Raised when a RayImage field combination is invalid."""
# Per-type configuration: valid python versions, platforms, architectures,
# and the default python/platform (used for nightly aliases and CLI defaults).
IMAGE_TYPE_CONFIG: dict[str, dict] = {
RayType.RAY: {
"python_versions": PYTHON_VERSIONS_RAY,
"platforms": PLATFORMS_RAY,
"architectures": ARCHITECTURES_RAY,
"default_python": DEFAULT_PYTHON_TAG_VERSION,
"default_platform": "cpu",
},
RayType.RAY_EXTRA: {
"python_versions": PYTHON_VERSIONS_RAY,
"platforms": PLATFORMS_RAY,
"architectures": ARCHITECTURES_RAY,
"default_python": DEFAULT_PYTHON_TAG_VERSION,
"default_platform": "cpu",
},
RayType.RAY_LLM: {
"python_versions": PYTHON_VERSIONS_RAY_LLM,
"platforms": PLATFORMS_RAY_LLM,
"architectures": ARCHITECTURES_RAY_LLM,
"default_python": "3.11",
"default_platform": "cu12.8.1-cudnn",
},
RayType.RAY_LLM_EXTRA: {
"python_versions": PYTHON_VERSIONS_RAY_LLM,
"platforms": PLATFORMS_RAY_LLM,
"architectures": ARCHITECTURES_RAY_LLM,
"default_python": "3.11",
"default_platform": "cu12.8.1-cudnn",
},
}
@dataclass(frozen=True)
class RayImage:
"""Immutable identity of a Ray Docker image variant."""
image_type: str
python_version: str
platform: str
architecture: str = DEFAULT_ARCHITECTURE
def __post_init__(self):
# Normalize RayType enum values to plain strings so f-strings
# produce "ray" instead of "RayType.RAY" (Python 3.12+ changed
# str(Enum) formatting).
if isinstance(self.image_type, RayType):
object.__setattr__(self, "image_type", self.image_type.value)
@property
def wanda_image_name(self) -> str:
"""Wanda output image name (without registry prefix)."""
if self.platform == "cpu":
return f"{self.image_type}-py{self.python_version}-cpu{self.arch_suffix}"
return f"{self.image_type}-py{self.python_version}-{self.platform}{self.arch_suffix}"
@property
def arch_suffix(self) -> str:
"""Architecture suffix for image names (empty for default arch)."""
if self.architecture == DEFAULT_ARCHITECTURE:
return ""
return f"-{self.architecture}"
@property
def repo(self) -> str:
"""Docker Hub repository name (e.g. 'ray', 'ray-ml', 'ray-llm')."""
return RAY_REPO_MAP[self.image_type]
@property
def variation_suffix(self) -> str:
"""Variation suffix: '-extra' for extra types, '' otherwise."""
if self.image_type in (
RayType.RAY_EXTRA,
RayType.RAY_ML_EXTRA,
RayType.RAY_LLM_EXTRA,
):
return "-extra"
return ""
def validate(self) -> None:
"""
Validate that image_type, python_version, platform, and architecture
are a valid combination. Raises RayImageError on invalid input.
"""
if self.image_type not in IMAGE_TYPE_CONFIG:
valid = ", ".join(IMAGE_TYPE_CONFIG.keys())
raise RayImageError(
f"Unknown image type {self.image_type!r}. Valid types: {valid}"
)
cfg = IMAGE_TYPE_CONFIG[self.image_type]
if self.python_version not in cfg["python_versions"]:
raise RayImageError(
f"Invalid python version {self.python_version} "
f"for {self.image_type}. "
f"Valid versions: {', '.join(cfg['python_versions'])}"
)
if self.platform not in cfg["platforms"]:
raise RayImageError(
f"Invalid platform {self.platform} "
f"for {self.image_type}. "
f"Valid platforms: {', '.join(cfg['platforms'])}"
)
if self.architecture not in cfg["architectures"]:
raise RayImageError(
f"Invalid architecture {self.architecture} "
f"for {self.image_type}. "
f"Valid architectures: {', '.join(cfg['architectures'])}"
)
| {
"repo_id": "ray-project/ray",
"file_path": "ci/ray_ci/ray_image.py",
"license": "Apache License 2.0",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:ci/ray_ci/test_ray_image.py | """Tests for ci.ray_ci.ray_image.RayImage."""
import sys
import pytest
from ci.ray_ci.configs import DEFAULT_ARCHITECTURE
from ci.ray_ci.docker_container import RayType
from ci.ray_ci.ray_image import IMAGE_TYPE_CONFIG, RayImage, RayImageError
class TestWandaImageName:
DEFAULT_TEST_CUDA_PLATFORM = "cu12.1.1-cudnn8"
@pytest.mark.parametrize(
("image_type", "python_version", "platform", "architecture", "expected"),
[
# CPU images
(RayType.RAY, "3.10", "cpu", DEFAULT_ARCHITECTURE, "ray-py3.10-cpu"),
(RayType.RAY, "3.10", "cpu", "aarch64", "ray-py3.10-cpu-aarch64"),
(
RayType.RAY_EXTRA,
"3.10",
"cpu",
DEFAULT_ARCHITECTURE,
"ray-extra-py3.10-cpu",
),
# CUDA images
(
RayType.RAY,
"3.11",
DEFAULT_TEST_CUDA_PLATFORM,
DEFAULT_ARCHITECTURE,
f"ray-py3.11-{DEFAULT_TEST_CUDA_PLATFORM}",
),
(
RayType.RAY,
"3.11",
DEFAULT_TEST_CUDA_PLATFORM,
"aarch64",
f"ray-py3.11-{DEFAULT_TEST_CUDA_PLATFORM}-aarch64",
),
(
RayType.RAY_EXTRA,
"3.11",
DEFAULT_TEST_CUDA_PLATFORM,
DEFAULT_ARCHITECTURE,
f"ray-extra-py3.11-{DEFAULT_TEST_CUDA_PLATFORM}",
),
(
RayType.RAY_LLM,
"3.11",
DEFAULT_TEST_CUDA_PLATFORM,
DEFAULT_ARCHITECTURE,
f"ray-llm-py3.11-{DEFAULT_TEST_CUDA_PLATFORM}",
),
(
RayType.RAY_LLM_EXTRA,
"3.11",
DEFAULT_TEST_CUDA_PLATFORM,
DEFAULT_ARCHITECTURE,
f"ray-llm-extra-py3.11-{DEFAULT_TEST_CUDA_PLATFORM}",
),
# TPU images
(RayType.RAY, "3.10", "tpu", DEFAULT_ARCHITECTURE, "ray-py3.10-tpu"),
# ray-ml types
(RayType.RAY_ML, "3.10", "cpu", DEFAULT_ARCHITECTURE, "ray-ml-py3.10-cpu"),
(
RayType.RAY_ML_EXTRA,
"3.10",
DEFAULT_TEST_CUDA_PLATFORM,
DEFAULT_ARCHITECTURE,
f"ray-ml-extra-py3.10-{DEFAULT_TEST_CUDA_PLATFORM}",
),
],
)
def test_wanda_image_name(
self, image_type, python_version, platform, architecture, expected
):
img = RayImage(image_type, python_version, platform, architecture)
assert img.wanda_image_name == expected
class TestArchSuffix:
def test_default_architecture_empty(self):
img = RayImage("ray", "3.10", "cpu", DEFAULT_ARCHITECTURE)
assert img.arch_suffix == ""
def test_aarch64(self):
img = RayImage("ray", "3.10", "cpu", "aarch64")
assert img.arch_suffix == "-aarch64"
class TestRepo:
@pytest.mark.parametrize(
("image_type", "expected"),
[
(RayType.RAY, "ray"),
(RayType.RAY_EXTRA, "ray"),
(RayType.RAY_ML, "ray-ml"),
(RayType.RAY_ML_EXTRA, "ray-ml"),
(RayType.RAY_LLM, "ray-llm"),
(RayType.RAY_LLM_EXTRA, "ray-llm"),
],
)
def test_repo(self, image_type, expected):
img = RayImage(image_type, "3.10", "cpu")
assert img.repo == expected
class TestVariationSuffix:
@pytest.mark.parametrize(
("image_type", "expected"),
[
(RayType.RAY, ""),
(RayType.RAY_EXTRA, "-extra"),
(RayType.RAY_ML, ""),
(RayType.RAY_ML_EXTRA, "-extra"),
(RayType.RAY_LLM, ""),
(RayType.RAY_LLM_EXTRA, "-extra"),
],
)
def test_variation_suffix(self, image_type, expected):
img = RayImage(image_type, "3.10", "cpu")
assert img.variation_suffix == expected
class TestValidateValid:
def test_ray_cpu(self):
RayImage("ray", "3.10", "cpu").validate()
def test_ray_tpu(self):
RayImage("ray", "3.10", "tpu").validate()
def test_ray_cuda(self):
RayImage("ray", "3.13", "cu12.8.1-cudnn").validate()
def test_ray_extra(self):
RayImage("ray-extra", "3.12", "cu11.8.0-cudnn8").validate()
def test_ray_llm(self):
RayImage("ray-llm", "3.11", "cu12.8.1-cudnn").validate()
def test_ray_llm_extra(self):
RayImage("ray-llm-extra", "3.11", "cu12.8.1-cudnn").validate()
def test_ray_aarch64(self):
RayImage("ray", "3.10", "cpu", "aarch64").validate()
class TestValidateInvalid:
def test_unknown_image_type(self):
with pytest.raises(RayImageError, match="Unknown image type"):
RayImage("ray-foo", "3.10", "cpu").validate()
def test_invalid_python_for_ray_llm(self):
with pytest.raises(
RayImageError, match="Invalid python version 3.10 for ray-llm"
):
RayImage("ray-llm", "3.10", "cu12.8.1-cudnn").validate()
def test_invalid_platform_for_ray_llm(self):
with pytest.raises(RayImageError, match="Invalid platform cpu for ray-llm"):
RayImage("ray-llm", "3.11", "cpu").validate()
def test_invalid_platform_for_ray(self):
with pytest.raises(RayImageError, match="Invalid platform cu99.9.9 for ray"):
RayImage("ray", "3.10", "cu99.9.9").validate()
def test_invalid_architecture_for_ray_llm(self):
with pytest.raises(
RayImageError, match="Invalid architecture aarch64 for ray-llm"
):
RayImage("ray-llm", "3.11", "cu12.8.1-cudnn", "aarch64").validate()
class TestImageTypeConfig:
def test_expected_types_covered(self):
expected = {"ray", "ray-extra", "ray-llm", "ray-llm-extra"}
assert set(IMAGE_TYPE_CONFIG.keys()) == expected
if __name__ == "__main__":
sys.exit(pytest.main(["-vv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "ci/ray_ci/test_ray_image.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/tests/test_backpressure_grpc.py | import sys
from typing import Tuple
import grpc
import pytest
import ray
from ray import serve
from ray._common.test_utils import SignalActor, wait_for_condition
from ray.serve._private.common import RequestProtocol
from ray.serve._private.test_utils import get_application_url
from ray.serve.generated import serve_pb2, serve_pb2_grpc
def test_grpc_backpressure(serve_instance):
"""Requests should return UNAVAILABLE once the limit is reached."""
signal_actor = SignalActor.remote()
@serve.deployment(max_ongoing_requests=1, max_queued_requests=1)
class Deployment:
async def __call__(self, request: serve_pb2.UserDefinedMessage):
await signal_actor.wait.remote()
return serve_pb2.UserDefinedResponse(greeting=request.name)
serve.run(Deployment.bind())
@ray.remote(num_cpus=0)
def do_request(msg: str) -> Tuple[grpc.StatusCode, str]:
channel = grpc.insecure_channel(
get_application_url(protocol=RequestProtocol.GRPC)
)
stub = serve_pb2_grpc.UserDefinedServiceStub(channel)
try:
response, call = stub.__call__.with_call(
serve_pb2.UserDefinedMessage(name=msg)
)
return call.code(), response.greeting
except grpc.RpcError as e:
return e.code(), e.details()
# First response should block. Until the signal is sent, all subsequent requests
# will be queued in the handle.
first_ref = do_request.remote("hi-1")
wait_for_condition(lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 1)
_, pending = ray.wait([first_ref], timeout=0.1)
assert len(pending) == 1
# Check that beyond the 1st queued request, others are dropped due to backpressure.
second_ref = do_request.remote("hi-2")
_, pending = ray.wait([second_ref], timeout=0.1)
for _ in range(10):
status_code, text = ray.get(do_request.remote(("hi-err")))
assert status_code == grpc.StatusCode.RESOURCE_EXHAUSTED
assert text.startswith("Request dropped due to backpressure")
# Send the signal; the first request will be unblocked and the second should
# subsequently get scheduled and executed.
ray.get(signal_actor.send.remote())
assert ray.get(first_ref) == (grpc.StatusCode.OK, "hi-1")
assert ray.get(second_ref) == (grpc.StatusCode.OK, "hi-2")
ray.get(signal_actor.send.remote(clear=True))
wait_for_condition(lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 0)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_backpressure_grpc.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/tests/test_cli_4.py | import os
import subprocess
import sys
from tempfile import NamedTemporaryFile
import grpc
import httpx
import pytest
from ray._common.test_utils import wait_for_condition
from ray.serve._private.test_utils import (
get_application_url,
ping_fruit_stand,
ping_grpc_another_method,
ping_grpc_call_method,
ping_grpc_healthz,
ping_grpc_list_applications,
ping_grpc_model_multiplexing,
ping_grpc_streaming,
)
from ray.serve.generated import serve_pb2, serve_pb2_grpc
from ray.serve.tests.test_cli_2 import ping_endpoint
@pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.")
def test_build_multi_app(ray_start_stop):
with NamedTemporaryFile(mode="w+", suffix=".yaml") as tmp:
print('Building nodes "TestApp1Node" and "TestApp2Node".')
# Build an app
grpc_servicer_func_root = "ray.serve.generated.serve_pb2_grpc"
subprocess.check_output(
[
"serve",
"build",
"ray.serve.tests.test_cli_3.TestApp1Node",
"ray.serve.tests.test_cli_3.TestApp2Node",
"ray.serve.tests.test_config_files.grpc_deployment.g",
"--grpc-servicer-functions",
f"{grpc_servicer_func_root}.add_UserDefinedServiceServicer_to_server",
"-o",
tmp.name,
]
)
print("Build succeeded! Deploying node.")
subprocess.check_output(["serve", "deploy", tmp.name])
print("Deploy succeeded!")
wait_for_condition(
lambda: ping_endpoint("app1") == "wonderful world", timeout=15
)
print("App 1 is live and reachable over HTTP.")
wait_for_condition(
lambda: ping_endpoint("app2") == "wonderful world", timeout=15
)
print("App 2 is live and reachable over HTTP.")
app_name = "app3"
channel = grpc.insecure_channel(get_application_url("gRPC", app_name=app_name))
stub = serve_pb2_grpc.UserDefinedServiceStub(channel)
request = serve_pb2.UserDefinedMessage(name="foo", num=30, foo="bar")
metadata = (("application", app_name),)
response = stub.__call__(request=request, metadata=metadata)
assert response.greeting == "Hello foo from bar"
print("App 3 is live and reachable over gRPC.")
print("Deleting applications.")
app_urls = [
get_application_url("HTTP", app_name=app) for app in ["app1", "app2"]
]
subprocess.check_output(["serve", "shutdown", "-y"])
def check_no_apps():
for url in app_urls:
with pytest.raises(httpx.HTTPError):
_ = httpx.get(url).text
return True
wait_for_condition(check_no_apps, timeout=15)
print("Delete succeeded! Node is no longer reachable over HTTP.")
@pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.")
def test_serving_request_through_grpc_proxy(ray_start_stop):
"""Test serving request through gRPC proxy
When Serve runs with a gRPC deployment, the app should be deployed successfully,
both ListApplications and Healthz methods returning success response, and registered
gRPC methods are routing to the correct replica and return the correct response.
"""
config_file = os.path.join(
os.path.dirname(__file__),
"test_config_files",
"deploy_grpc_app.yaml",
)
subprocess.check_output(["serve", "deploy", config_file], stderr=subprocess.STDOUT)
app1 = "app1"
app_names = [app1]
channel = grpc.insecure_channel(get_application_url("gRPC", app_name=app1))
# Ensures ListApplications method succeeding.
wait_for_condition(
ping_grpc_list_applications, channel=channel, app_names=app_names
)
# Ensures Healthz method succeeding.
ping_grpc_healthz(channel)
# Ensures a custom defined method is responding correctly.
ping_grpc_call_method(channel, app1)
# Ensures another custom defined method is responding correctly.
ping_grpc_another_method(channel, app1)
# Ensures model multiplexing is responding correctly.
ping_grpc_model_multiplexing(channel, app1)
# Ensure Streaming method is responding correctly.
ping_grpc_streaming(channel, app1)
@pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.")
def test_grpc_proxy_model_composition(ray_start_stop):
"""Test serving request through gRPC proxy
When Serve runs with a gRPC deployment, the app should be deployed successfully,
both ListApplications and Healthz methods returning success response, and model
composition should work correctly.
"""
config_file = os.path.join(
os.path.dirname(__file__),
"test_config_files",
"deploy_grpc_model_composition.yaml",
)
subprocess.check_output(["serve", "deploy", config_file], stderr=subprocess.STDOUT)
app = "app1"
app_names = [app]
channel = grpc.insecure_channel(get_application_url("gRPC", app_name=app))
# Ensures ListApplications method succeeding.
wait_for_condition(
ping_grpc_list_applications, channel=channel, app_names=app_names
)
# Ensures Healthz method succeeding.
ping_grpc_healthz(channel)
# Ensure model composition is responding correctly.
ping_fruit_stand(channel, app)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_cli_4.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/tests/test_failure_2.py | import sys
import httpx
import pytest
import ray
from ray import serve
from ray._common.test_utils import SignalActor
def test_no_available_replicas_does_not_block_proxy(serve_instance):
"""Test that handle blocking waiting for replicas doesn't block proxy.
This is essential so that other requests and health checks can pass while a
deployment is deploying/updating.
See https://github.com/ray-project/ray/issues/36460.
"""
@serve.deployment
class SlowStarter:
def __init__(self, starting_actor, finish_starting_actor):
ray.get(starting_actor.send.remote())
ray.get(finish_starting_actor.wait.remote())
def __call__(self):
return "hi"
@ray.remote
def make_blocked_request():
r = httpx.get("http://localhost:8000/")
r.raise_for_status()
return r.text
# Loop twice: first iteration tests deploying from nothing, second iteration
# tests updating the replicas of an existing deployment.
for _ in range(2):
starting_actor = SignalActor.remote()
finish_starting_actor = SignalActor.remote()
serve._run(
SlowStarter.bind(starting_actor, finish_starting_actor), _blocking=False
)
# Ensure that the replica has been started (we use _blocking=False).
ray.get(starting_actor.wait.remote())
# The request shouldn't complete until the replica has finished started.
blocked_ref = make_blocked_request.remote()
with pytest.raises(TimeoutError):
ray.get(blocked_ref, timeout=1)
# If the proxy's loop was blocked, these would hang.
httpx.get("http://localhost:8000/-/routes").raise_for_status()
httpx.get("http://localhost:8000/-/healthz").raise_for_status()
# Signal the replica to finish starting; request should complete.
ray.get(finish_starting_actor.send.remote())
assert ray.get(blocked_ref) == "hi"
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_failure_2.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/tests/test_logging_2.py | import os
import sys
import uuid
import httpx
import pytest
from fastapi import FastAPI
from ray import serve
from ray._common.test_utils import wait_for_condition
from ray.serve._private.logging_utils import (
get_serve_logs_dir,
)
from ray.serve._private.utils import get_component_file_name
from ray.util.state import list_nodes
def test_http_access_log_in_proxy_logs_file(serve_instance):
name = "deployment_name"
fastapi_app = FastAPI()
@serve.deployment(name=name)
@serve.ingress(fastapi_app)
class Handler:
@fastapi_app.get("/")
def get_root(self):
return "Hello World!"
serve.run(Handler.bind(), logging_config={"encoding": "TEXT"})
# Get log file information
nodes = list_nodes()
serve_log_dir = get_serve_logs_dir()
node_ip_address = nodes[0].node_ip
proxy_log_file_name = get_component_file_name(
"proxy", node_ip_address, component_type=None, suffix=".log"
)
proxy_log_path = os.path.join(serve_log_dir, proxy_log_file_name)
request_id = str(uuid.uuid4())
response = httpx.get("http://localhost:8000", headers={"X-Request-ID": request_id})
assert response.status_code == 200
def verify_request_id_in_logs(proxy_log_path, request_id):
with open(proxy_log_path, "r") as f:
for line in f:
if request_id in line:
return True
return False
wait_for_condition(
verify_request_id_in_logs, proxy_log_path=proxy_log_path, request_id=request_id
)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_logging_2.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/async_inference_autoscaling_policy.py | import asyncio
import logging
import time
from typing import Any, Dict, Optional, Tuple, Union
from ray.serve._private.broker import Broker
from ray.serve._private.constants import SERVE_LOGGER_NAME
from ray.serve.config import AutoscalingContext
logger = logging.getLogger(SERVE_LOGGER_NAME)
DEFAULT_ASYNC_INFERENCE_QUEUE_POLL_INTERVAL_S = 10.0
class AsyncInferenceAutoscalingPolicy:
"""Autoscaling policy that scales replicas based on message queue length.
Polls a message broker (Redis or RabbitMQ) for queue length and combines
it with HTTP request load to compute the desired number of replicas.
Polling uses one-shot async tasks instead of an infinite background loop.
An infinite ``while True`` coroutine holds a strong reference to ``self``
through the coroutine, and the event loop keeps the task alive, so
``__del__`` would never fire after the framework drops the policy on
redeploy/deregistration — leaking both the poller and the broker
connection. Instead, each poll is a single one-shot task kicked off from
``__call__`` when the poll interval has elapsed. The task completes
naturally after one poll, so there is at most one short-lived in-flight
task at any time and no cleanup is needed when the policy is
garbage-collected.
This policy is intended for use with ``@task_consumer`` deployments.
Pass it as a class-based policy via ``AutoscalingPolicy``:
.. code-block:: python
from ray.serve.config import AutoscalingConfig, AutoscalingPolicy
@serve.deployment(
autoscaling_config=AutoscalingConfig(
min_replicas=1,
max_replicas=10,
policy=AutoscalingPolicy(
policy_function=AsyncInferenceAutoscalingPolicy,
policy_kwargs={
"broker_url": "redis://localhost:6379/0",
"queue_name": "my_queue",
},
),
),
)
@task_consumer(task_processor_config=config)
class MyConsumer: ...
Args:
broker_url: URL of the message broker (e.g. ``redis://localhost:6379/0``
or ``amqp://guest:guest@localhost:5672//``).
queue_name: Name of the queue to monitor.
rabbitmq_management_url: RabbitMQ HTTP management API URL. Only required
for RabbitMQ brokers (e.g. ``http://guest:guest@localhost:15672/api/``).
poll_interval_s: How often (in seconds) to poll the broker for queue
length. Defaults to 10s. Lower values increase responsiveness
but add broker load.
"""
def __init__(
self,
broker_url: str,
queue_name: str,
rabbitmq_management_url: Optional[str] = None,
poll_interval_s: float = DEFAULT_ASYNC_INFERENCE_QUEUE_POLL_INTERVAL_S,
):
self._broker_url = broker_url
self._queue_name = queue_name
self._rabbitmq_management_url = rabbitmq_management_url
self._poll_interval_s = poll_interval_s
self._queue_length: int = 0
self._broker: Optional[Broker] = None
self._task: Optional[asyncio.Task] = None
self._last_poll_time: float = 0.0
def _ensure_broker(self) -> None:
"""Lazily initialize the broker connection."""
if self._broker is not None:
return
if self._rabbitmq_management_url is not None:
self._broker = Broker(
self._broker_url, http_api=self._rabbitmq_management_url
)
else:
self._broker = Broker(self._broker_url)
async def _poll_once(self) -> None:
"""Single one-shot poll of the broker for queue length."""
try:
queues = await self._broker.queues([self._queue_name])
if queues is not None:
for q in queues:
if q.get("name") == self._queue_name:
queue_length = q.get("messages")
if queue_length is not None:
self._queue_length = queue_length
break
except Exception as e:
logger.warning(f"Failed to get queue length for '{self._queue_name}': {e}")
def __call__(
self, ctx: AutoscalingContext
) -> Tuple[Union[int, float], Dict[str, Any]]:
self._ensure_broker()
# Clear completed poll task so a new one can be started.
if self._task is not None and self._task.done():
self._task = None
# Start a new poll if the interval has elapsed and no poll is in-flight.
now = time.monotonic()
if self._task is None and (now - self._last_poll_time) >= self._poll_interval_s:
self._last_poll_time = now
self._task = asyncio.get_running_loop().create_task(self._poll_once())
num_running_replicas = ctx.current_num_replicas
total_workload = ctx.total_num_requests + self._queue_length
config = ctx.config
if num_running_replicas == 0:
return 1 if total_workload > 0 else 0, {"queue_length": self._queue_length}
target_num_requests = (
config.get_target_ongoing_requests() * num_running_replicas
)
error_ratio = total_workload / target_num_requests
desired_num_replicas = num_running_replicas * error_ratio
return desired_num_replicas, {"queue_length": self._queue_length}
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/async_inference_autoscaling_policy.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/serve/tests/test_task_consumer_autoscaling.py | import os
import sys
import pytest
import ray
from ray import serve
from ray._common.test_utils import SignalActor, wait_for_condition
from ray.serve._private.common import DeploymentID, ReplicaState
from ray.serve.config import AutoscalingConfig, AutoscalingPolicy
from ray.serve.schema import CeleryAdapterConfig, TaskProcessorConfig
from ray.serve.task_consumer import (
instantiate_adapter_from_config,
task_consumer,
task_handler,
)
from ray.tests.conftest import external_redis # noqa: F401
@ray.remote
def enqueue_task(processor_config: TaskProcessorConfig, data, task_name="process"):
adapter = instantiate_adapter_from_config(task_processor_config=processor_config)
result = adapter.enqueue_task_sync(task_name, args=[data])
assert result.id is not None
return result.id
def get_num_running_replicas(controller, deployment_name, app_name):
"""Get the number of running replicas for a deployment."""
deployment_id = DeploymentID(name=deployment_name, app_name=app_name)
replicas = ray.get(
controller._dump_replica_states_for_testing.remote(deployment_id)
)
return len(replicas.get([ReplicaState.RUNNING]))
@pytest.mark.skipif(sys.platform == "win32", reason="Flaky on Windows.")
class TestTaskConsumerQueueAutoscaling:
"""Test queue-based autoscaling for TaskConsumer deployments."""
def test_task_consumer_queue_autoscaling(
self, external_redis, serve_instance # noqa: F811
):
"""Test that TaskConsumer deployments autoscale based on queue length.
Verifies the full e2e flow:
1. Replicas scale up when messages pile up in the queue
2. Replicas scale down when the queue drains
"""
redis_address = os.environ.get("RAY_REDIS_ADDRESS")
app_name = "autoscaling_app"
deployment_name = "AutoscalingConsumer"
processor_config = TaskProcessorConfig(
queue_name="autoscaling_test_queue",
adapter_config=CeleryAdapterConfig(
broker_url=f"redis://{redis_address}/0",
backend_url=f"redis://{redis_address}/1",
),
)
signal = SignalActor.remote()
@serve.deployment(
name=deployment_name,
max_ongoing_requests=1,
autoscaling_config=AutoscalingConfig(
min_replicas=1,
max_replicas=5,
target_ongoing_requests=1,
upscale_delay_s=0,
downscale_delay_s=0,
metrics_interval_s=0.1,
look_back_period_s=0.5,
policy=AutoscalingPolicy(
policy_function="ray.serve.async_inference_autoscaling_policy:AsyncInferenceAutoscalingPolicy",
policy_kwargs={
"broker_url": f"redis://{redis_address}/0",
"queue_name": "autoscaling_test_queue",
},
),
),
)
@task_consumer(task_processor_config=processor_config)
class AutoscalingConsumer:
def __init__(self, signal_actor):
self._signal = signal_actor
@task_handler(name="process")
def process(self, data):
ray.get(self._signal.wait.remote())
_ = serve.run(
AutoscalingConsumer.bind(signal),
name=app_name,
route_prefix="/autoscaling",
)
controller = serve_instance._controller
# Wait for initial replica to be running
wait_for_condition(
lambda: get_num_running_replicas(controller, deployment_name, app_name)
== 1,
timeout=30,
)
# Enqueue tasks to build up the queue (signal blocks processing)
num_tasks = 10
for i in range(num_tasks):
enqueue_task.remote(processor_config, f"data_{i}")
# Wait for replicas to scale up to max_replicas
wait_for_condition(
lambda: get_num_running_replicas(controller, deployment_name, app_name)
== 5,
timeout=60,
)
# Release the signal to let all tasks drain
ray.get(signal.send.remote())
# Wait for replicas to scale back down to min_replicas
wait_for_condition(
lambda: get_num_running_replicas(controller, deployment_name, app_name)
== 1,
timeout=60,
)
serve.delete(app_name)
def test_task_consumer_scale_from_and_to_zero(
self, external_redis, serve_instance # noqa: F811
):
"""Test that TaskConsumer deployments can scale down to zero.
Verifies:
1. Replicas scale up when messages pile up in the queue
2. Replicas scale down to 0 when the queue drains
"""
redis_address = os.environ.get("RAY_REDIS_ADDRESS")
app_name = "scale_to_zero_app"
deployment_name = "ScaleToZeroConsumer"
processor_config = TaskProcessorConfig(
queue_name="scale_to_zero_queue",
adapter_config=CeleryAdapterConfig(
broker_url=f"redis://{redis_address}/0",
backend_url=f"redis://{redis_address}/1",
),
)
signal = SignalActor.remote()
@serve.deployment(
name=deployment_name,
max_ongoing_requests=1,
autoscaling_config=AutoscalingConfig(
min_replicas=0,
max_replicas=5,
target_ongoing_requests=1,
upscale_delay_s=0,
downscale_delay_s=0,
downscale_to_zero_delay_s=5,
metrics_interval_s=0.1,
look_back_period_s=0.5,
policy=AutoscalingPolicy(
policy_function="ray.serve.async_inference_autoscaling_policy:AsyncInferenceAutoscalingPolicy",
policy_kwargs={
"broker_url": f"redis://{redis_address}/0",
"queue_name": "scale_to_zero_queue",
"poll_interval_s": 1,
},
),
),
)
@task_consumer(task_processor_config=processor_config)
class ScaleToZeroConsumer:
def __init__(self, signal_actor):
self._signal = signal_actor
@task_handler(name="process")
def process(self, data):
ray.get(self._signal.wait.remote())
_ = serve.run(
ScaleToZeroConsumer.bind(signal),
name=app_name,
route_prefix="/scale_to_zero",
)
controller = serve_instance._controller
wait_for_condition(
lambda: get_num_running_replicas(controller, deployment_name, app_name)
== 0,
timeout=60,
)
enqueue_task.remote(processor_config, "data_0")
wait_for_condition(
lambda: get_num_running_replicas(controller, deployment_name, app_name)
== 1,
timeout=60,
)
# Release the signal to let all tasks drain
ray.get(signal.send.remote())
# Wait for replicas to scale down to 0
wait_for_condition(
lambda: get_num_running_replicas(controller, deployment_name, app_name)
== 0,
timeout=60,
)
serve.delete(app_name)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_task_consumer_autoscaling.py",
"license": "Apache License 2.0",
"lines": 184,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/typing_files/check_typing_actor_async.py | import ray
from ray import ObjectRef
@ray.remote
class AsyncActor:
@ray.method
async def add(self, a: int, b: int) -> int:
return a + b
@ray.method(num_returns=1)
async def mul(self, a: int, b: int) -> int:
return a * b
@ray.method(num_returns=1)
def divide(self, a: int, b: int) -> int:
if b == 0:
raise ValueError("Division by zero")
return a // b
@ray.method(num_returns=1)
def echo(self, x: str) -> str:
return x
actor = AsyncActor.remote()
ref_add: ObjectRef[int] = actor.add.remote(1, 2)
ref_mul: ObjectRef[int] = actor.mul.remote(2, 3)
ref_echo: ObjectRef[str] = actor.echo.remote("hello")
ref_divide: ObjectRef[int] = actor.divide.remote(10, 2)
# ray.get() should resolve to int for both
result_add: int = ray.get(ref_add)
result_mul: int = ray.get(ref_mul)
result_echo: str = ray.get(ref_echo)
result_divide: int = ray.get(ref_divide)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/typing_files/check_typing_actor_async.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_map_transformer.py | import gc
import weakref
from collections import deque
import pandas as pd
import pytest
from ray.data._internal.execution.interfaces.task_context import TaskContext
from ray.data._internal.execution.operators.map_transformer import (
BatchMapTransformFn,
MapTransformer,
)
from ray.data._internal.output_buffer import OutputBlockSizeOption
from ray.data._internal.planner.plan_udf_map_op import (
_generate_transform_fn_for_map_batches,
)
from ray.data.block import DataBatch
def _create_chained_transformer(udf, n):
"""Create a MapTransformer with chained batch transforms that track intermediates."""
transform_fns = [
BatchMapTransformFn(
_generate_transform_fn_for_map_batches(udf),
batch_size=1,
output_block_size_option=OutputBlockSizeOption.of(target_max_block_size=1),
)
for _ in range(n)
]
return MapTransformer(transform_fns)
def test_chained_transforms_release_intermediates_between_batches():
"""Test that chained transforms release intermediate refs when moving to next batch.
This test uses `_generate_transform_fn_for_map_batches` to wrap UDFs,
which is the same code path used in production by `map_batches`.
"""
NUM_BATCHES = 1
NUM_CHAINED_TRANSFORMS = 5
input_intermediates: deque = deque()
def udf(batch: DataBatch) -> DataBatch:
# Append received batch into a list
#
# NOTE: Every of the chained UDFs will be appending into this list in
# order, meaning that in 1 iteration N refs will be added, where
# N is the number of chained UDFs
input_intermediates.append(weakref.ref(batch))
return pd.DataFrame({"id": batch["id"] * 2})
transformer = _create_chained_transformer(udf, NUM_CHAINED_TRANSFORMS)
ctx = TaskContext(task_idx=0, op_name="test")
# Use a generator instead of a list to avoid list_iterator holding references
def make_input_blocks():
for i in range(NUM_BATCHES):
yield pd.DataFrame({"id": [i + 1]})
result_iter = transformer.apply_transform(make_input_blocks(), ctx)
for i in range(NUM_BATCHES):
# Consume batch
result = next(result_iter)
assert result is not None
pd.testing.assert_frame_equal(
result, pd.DataFrame({"id": [(i + 1) * 2**NUM_CHAINED_TRANSFORMS]})
)
# Trigger GC
gc.collect()
# Extract current set of intermediate input refs
cur_intermediates = [
input_intermediates.popleft() for _ in range(NUM_CHAINED_TRANSFORMS)
]
assert len(input_intermediates) == 0
alive_after_first = sum(1 for ref in cur_intermediates if ref() is not None)
if alive_after_first > 0:
print(">>> Found captured intermediate references!")
_trace_back_refs(cur_intermediates, "After first batch")
pytest.fail(
f"Expected 0 intermediates alive after first batch, found {alive_after_first}"
)
def _trace_back_refs(intermediates: list, label: str = ""):
"""Debug utility to show which intermediates are alive and what holds them.
Args:
intermediates: List of weakrefs to track
label: Optional label for the debug output
"""
if label:
print(f"\n{label}:")
for i, ref in enumerate(intermediates):
obj = ref()
print(f" intermediate[{i}]: {'ALIVE' if obj is not None else 'dead'}")
if obj is not None:
referrers = gc.get_referrers(obj)
for r in referrers:
if isinstance(r, list):
print(f" -> list (len={len(r)}, id={id(r)})")
# Find what holds this list - 2 levels up
list_referrers = gc.get_referrers(r)
for lr in list_referrers:
if hasattr(lr, "gi_frame") and lr.gi_frame:
print(
f" held by generator: {lr.__name__} at "
f"{lr.gi_frame.f_code.co_filename.split('/')[-1]}:"
f"{lr.gi_frame.f_lineno}"
)
elif hasattr(lr, "__class__") and not isinstance(
lr, (dict, list, tuple)
):
print(f" held by {type(lr).__name__}")
elif isinstance(r, dict):
# Skip frame dicts
pass
elif hasattr(r, "gi_frame"):
frame = r.gi_frame
if frame:
print(
f" -> generator: {r.__name__} at "
f"{frame.f_code.co_filename.split('/')[-1]}:{frame.f_lineno}"
)
else:
print(f" -> {type(r).__name__}")
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_map_transformer.py",
"license": "Apache License 2.0",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/train/examples/pytorch/tensor_parallel_dtensor/ci/nb2py.py | #!/usr/bin/env python3
import argparse
import nbformat
def convert_notebook(input_path: str, output_path: str) -> None:
"""
Read a Jupyter notebook and write a Python script, converting all %%bash
cells and IPython "!" commands into subprocess.run calls that raise on error.
Cells that load or autoreload extensions are ignored.
"""
nb = nbformat.read(input_path, as_version=4)
with open(output_path, "w") as out:
for cell in nb.cells:
# Only process code cells
if cell.cell_type != "code":
continue
lines = cell.source.splitlines()
# Skip cells that load or autoreload extensions
if any(
l.strip().startswith("%load_ext autoreload")
or l.strip().startswith("%autoreload all")
for l in lines
):
continue
# Detect a %%bash cell
if lines and lines[0].strip().startswith("%%bash"):
bash_script = "\n".join(lines[1:]).rstrip()
out.write("import subprocess\n")
out.write(
f"subprocess.run(r'''{bash_script}''',\n"
" shell=True,\n"
" check=True,\n"
" executable='/bin/bash')\n\n"
)
else:
# Detect any IPython '!' shell commands in code lines
has_bang = any(line.lstrip().startswith("!") for line in lines)
if has_bang:
out.write("import subprocess\n")
for line in lines:
stripped = line.lstrip()
if stripped.startswith("!"):
cmd = stripped[1:].lstrip()
out.write(
f"subprocess.run(r'''{cmd}''',\n"
" shell=True,\n"
" check=True,\n"
" executable='/bin/bash')\n"
)
else:
out.write(line.rstrip() + "\n")
out.write("\n")
else:
# Regular Python cell: dump as-is
out.write(cell.source.rstrip() + "\n\n")
def main() -> None:
parser = argparse.ArgumentParser(
description="Convert a Jupyter notebook to a Python script, preserving bash cells and '!' commands as subprocess calls."
)
parser.add_argument("input_nb", help="Path to the input .ipynb file")
parser.add_argument("output_py", help="Path for the output .py script")
args = parser.parse_args()
convert_notebook(args.input_nb, args.output_py)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/train/examples/pytorch/tensor_parallel_dtensor/ci/nb2py.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/serve/taskiq_task_processor.py | import logging
from typing import Any, Callable, Dict, List, Optional
from ray._common.pydantic_compat import BaseModel, Field
from ray._common.utils import import_attr
from ray.serve._private.constants import (
DEFAULT_CONSUMER_CONCURRENCY,
SERVE_LOGGER_NAME,
)
from ray.serve.schema import (
TaskProcessorAdapter,
TaskProcessorConfig,
TaskResult,
)
from ray.util.annotations import PublicAPI
logger = logging.getLogger(SERVE_LOGGER_NAME)
# Supported broker types and their required packages.
# Each entry maps a short broker_type name to the broker class import path,
# the pip package that provides it, and the constructor kwarg name for the
# queue/topic (so we can inject TaskProcessorConfig.queue_name automatically).
_BROKER_REGISTRY = {
# Redis — standalone
"redis_stream": {
"import": "taskiq_redis.RedisStreamBroker",
"package": "taskiq-redis",
"queue_param": "queue_name",
"required_kwargs": ["url"],
},
"redis_list": {
"import": "taskiq_redis.ListQueueBroker",
"package": "taskiq-redis",
"queue_param": "queue_name",
"required_kwargs": ["url"],
},
"redis_pubsub": {
"import": "taskiq_redis.PubSubBroker",
"package": "taskiq-redis",
"queue_param": "queue_name",
"required_kwargs": ["url"],
},
# Redis — cluster
"redis_stream_cluster": {
"import": "taskiq_redis.RedisStreamClusterBroker",
"package": "taskiq-redis",
"queue_param": "queue_name",
"required_kwargs": ["url"],
},
"redis_list_cluster": {
"import": "taskiq_redis.ListQueueClusterBroker",
"package": "taskiq-redis",
"queue_param": "queue_name",
"required_kwargs": ["url"],
},
# Redis — sentinel
"redis_stream_sentinel": {
"import": "taskiq_redis.RedisStreamSentinelBroker",
"package": "taskiq-redis",
"queue_param": "queue_name",
"required_kwargs": ["sentinels", "master_name"],
},
"redis_list_sentinel": {
"import": "taskiq_redis.ListQueueSentinelBroker",
"package": "taskiq-redis",
"queue_param": "queue_name",
"required_kwargs": ["sentinels", "master_name"],
},
"redis_pubsub_sentinel": {
"import": "taskiq_redis.PubSubSentinelBroker",
"package": "taskiq-redis",
"queue_param": "queue_name",
"required_kwargs": ["sentinels", "master_name"],
},
# RabbitMQ
"rabbitmq": {
"import": "taskiq_aio_pika.AioPikaBroker",
"package": "taskiq-aio-pika",
"queue_param": "queue_name",
"required_kwargs": [],
},
# NATS
"nats": {
"import": "taskiq_nats.PullBasedJetStreamBroker",
"package": "taskiq-nats",
"queue_param": "subject",
"required_kwargs": ["servers"],
},
# Kafka
"kafka": {
"import": "taskiq_aio_kafka.AioKafkaBroker",
"package": "taskiq-aio-kafka",
"queue_param": "kafka_topic",
"required_kwargs": ["bootstrap_servers"],
},
}
def _import_broker_class(broker_type: str):
"""Lazily import and return the broker class for the given broker_type."""
entry = _BROKER_REGISTRY.get(broker_type)
if entry is None:
raise ValueError(
f"Unsupported broker_type: {broker_type!r}. "
f"Supported types: {list(_BROKER_REGISTRY.keys())}"
)
try:
return import_attr(entry["import"])
except (ImportError, ModuleNotFoundError) as e:
raise ImportError(
f"Broker {broker_type!r} requires package {entry['package']!r}. "
f"Install it with: pip install {entry['package']}"
) from e
def _create_broker(
broker_type: str,
queue_name: str,
broker_kwargs: Optional[Dict[str, Any]] = None,
):
"""Create a broker instance from the given config."""
entry = _BROKER_REGISTRY.get(broker_type)
if entry is None:
raise ValueError(
f"Unsupported broker_type: {broker_type!r}. "
f"Supported types: {list(_BROKER_REGISTRY.keys())}"
)
kwargs = dict(broker_kwargs) if broker_kwargs else {}
# Validate required kwargs are present.
required = entry.get("required_kwargs", [])
missing = [k for k in required if k not in kwargs]
if missing:
raise ValueError(
f"Broker {broker_type!r} requires the following keys in "
f"broker_kwargs: {missing}"
)
broker_cls = _import_broker_class(broker_type)
# Inject the queue/topic name under the broker-specific parameter name.
queue_param = entry["queue_param"]
kwargs[queue_param] = queue_name
return broker_cls(**kwargs)
@PublicAPI(stability="beta")
class TaskiqAdapterConfig(BaseModel):
"""
Taskiq adapter config for async task processing in Ray Serve.
Supports multiple brokers via ``broker_type``. Connection URLs and
broker-specific constructor arguments are passed through ``broker_kwargs``.
Example — Redis Streams::
TaskiqAdapterConfig(
broker_type="redis_stream",
broker_kwargs={"url": "redis://localhost:6379"},
)
Example — Redis Sentinel::
TaskiqAdapterConfig(
broker_type="redis_stream_sentinel",
broker_kwargs={
"sentinels": [("sentinel1", 26379), ("sentinel2", 26379)],
"master_name": "mymaster",
},
)
Example — RabbitMQ::
TaskiqAdapterConfig(
broker_type="rabbitmq",
broker_kwargs={
"url": "amqp://guest:guest@localhost:5672",
"exchange_name": "my_exchange",
},
)
Example — NATS::
TaskiqAdapterConfig(
broker_type="nats",
broker_kwargs={"servers": ["nats://host1:4222", "nats://host2:4222"]},
)
Example — Kafka::
TaskiqAdapterConfig(
broker_type="kafka",
broker_kwargs={"bootstrap_servers": ["localhost:9092"]},
)
"""
broker_type: str = Field(
...,
description=(
"Broker backend to use. Supported values: "
"'redis_stream', 'redis_list', 'redis_pubsub', "
"'redis_stream_cluster', 'redis_list_cluster', "
"'redis_stream_sentinel', 'redis_list_sentinel', 'redis_pubsub_sentinel', "
"'rabbitmq', 'nats', 'kafka'."
),
)
broker_kwargs: Optional[Dict[str, Any]] = Field(
default=None,
description=(
"Keyword arguments passed directly to the broker constructor. "
"Includes connection URLs and broker-specific options — refer to "
"the taskiq broker documentation for available parameters."
),
)
# TODO(harshit): Support additional result backends (e.g., MongoDB, PostgreSQL, DynamoDB).
# See: https://taskiq-python.github.io/available-components/result-backends.html
result_backend_url: Optional[str] = Field(
default=None,
description=(
"Redis URL for the result backend (e.g., 'redis://localhost:6379'). "
"Currently only Redis is supported as a result backend, regardless "
"of the broker type."
),
)
@PublicAPI(stability="beta")
class TaskiqTaskProcessorAdapter(TaskProcessorAdapter):
"""
Taskiq-based task processor adapter for Ray Serve.
Supports multiple brokers (Redis Streams, RabbitMQ, NATS, Kafka) via
the ``broker_type`` field in ``TaskiqAdapterConfig``. Broker-specific
options are passed through ``broker_kwargs``.
"""
def __init__(self, config: TaskProcessorConfig, *args, **kwargs):
super().__init__(*args, **kwargs)
if not isinstance(config.adapter_config, TaskiqAdapterConfig):
raise TypeError(
"TaskProcessorConfig.adapter_config must be an instance of "
"TaskiqAdapterConfig"
)
self._config = config
self._broker = None
self._result_backend = None
def initialize(self, consumer_concurrency: int = DEFAULT_CONSUMER_CONCURRENCY):
"""Initialize the taskiq broker and result backend."""
self._consumer_concurrency = consumer_concurrency
adapter_config: TaskiqAdapterConfig = self._config.adapter_config
# Create the broker using the factory function
self._broker = _create_broker(
broker_type=adapter_config.broker_type,
queue_name=self._config.queue_name,
broker_kwargs=adapter_config.broker_kwargs,
)
# Create result backend only if explicitly configured
if adapter_config.result_backend_url:
from taskiq_redis import RedisAsyncResultBackend
self._result_backend = RedisAsyncResultBackend(
redis_url=adapter_config.result_backend_url,
)
self._broker = self._broker.with_result_backend(self._result_backend)
logger.info(
f"Taskiq adapter initialized with broker_type={adapter_config.broker_type!r}, "
f"queue: {self._config.queue_name}"
)
# ------------------------------------------------------------------
# Abstract method stubs — full implementations in a follow-up PR.
# ------------------------------------------------------------------
def register_task_handle(self, func: Callable, name: Optional[str] = None):
raise NotImplementedError
def enqueue_task_sync(
self, task_name: str, args=None, kwargs=None, **options
) -> TaskResult:
raise NotImplementedError
def get_task_status_sync(self, task_id: str) -> TaskResult:
raise NotImplementedError
def start_consumer(self, **kwargs):
raise NotImplementedError
def stop_consumer(self, timeout: float = 10.0):
raise NotImplementedError
def cancel_task_sync(self, task_id: str):
raise NotImplementedError
def get_metrics_sync(self) -> Dict[str, Any]:
raise NotImplementedError
def health_check_sync(self) -> List[Dict]:
raise NotImplementedError
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/taskiq_task_processor.py",
"license": "Apache License 2.0",
"lines": 260,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/dashboard/modules/metrics/dashboards/llm_dashboard_panels.py | # ruff: noqa: E501
"""Unified LLM Dashboard panels.
Single dashboard for all LLM workloads (Ray Serve LLM and Ray Data LLM),
organized into two sections:
1. **vLLM Engine Metrics** — applicable to both Serve and Data LLM.
Latency (TTFT, TPOT, E2E), throughput, cache utilization,
scheduler state, NIXL, etc.
2. **Serve Orchestrator Metrics** — row with Serve-specific panels.
QPS per deployment, token statistics, etc. Only populated when
Ray Serve is the orchestrator.
"""
from ray.dashboard.modules.metrics.dashboards.common import (
DashboardConfig,
Panel,
PanelTemplate,
Row,
Target,
)
from ray.dashboard.modules.metrics.dashboards.vllm_engine_panels import (
build_vllm_engine_panels,
)
# vLLM Engine Metrics: Shared across Serve LLM and Data LLM workloads.
LLM_ENGINE_PANELS, _next_id, _engine_end_y = build_vllm_engine_panels(
id_start=1, y_start=0
)
# Serve Orchestrator Metrics
_serve_row_id_start = _next_id + 1 # +1 to leave room for the row's own id
_SERVE_ORCHESTRATOR_PANELS = [
Panel(
id=_serve_row_id_start,
title="QPS per vLLM worker",
description="",
unit="short",
targets=[
Target(
expr='sum by (model_name, WorkerId, replica) (rate(ray_serve_deployment_request_counter_total{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", deployment=~"$deployment", {global_filters}}}[$interval]))',
legend="replica {{replica}}, worker {{WorkerId}}",
),
Target(
expr='sum(rate(ray_serve_deployment_request_counter_total{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", deployment=~"$deployment", {global_filters}}}[$interval]))',
legend="Total QPS",
),
],
fill=1,
linewidth=2,
stack=False,
),
Panel(
id=_serve_row_id_start + 1,
title="Tokens Last 24 Hours",
description="",
unit="short",
targets=[
Target(
expr='(sum by (model_name) (delta(ray_vllm_prompt_tokens_total{{WorkerId=~"$workerid", {global_filters}}}[1d])))',
legend="Input: {{model_name}}",
),
Target(
expr='(sum by (model_name) (delta(ray_vllm_generation_tokens_total{{WorkerId=~"$workerid", {global_filters}}}[1d])))',
legend="Generated: {{model_name}}",
),
],
fill=1,
linewidth=2,
stack=False,
template=PanelTemplate.STAT,
),
Panel(
id=_serve_row_id_start + 2,
title="Tokens Last Hour",
description="",
unit="short",
targets=[
Target(
expr='sum by (model_name) (delta(ray_vllm_prompt_tokens_total{{WorkerId=~"$workerid", {global_filters}}}[1h]))',
legend="Input: {{model_name}}",
),
Target(
expr='sum by (model_name) (delta(ray_vllm_generation_tokens_total{{WorkerId=~"$workerid", {global_filters}}}[1h]))',
legend="Generated: {{model_name}}",
),
],
fill=1,
linewidth=2,
stack=False,
template=PanelTemplate.STAT,
),
Panel(
id=_serve_row_id_start + 3,
title="Ratio Input: Generated Tokens Last 24 Hours",
description="",
unit="short",
targets=[
Target(
expr='sum by (model_name) (delta(ray_vllm_prompt_tokens_total{{WorkerId=~"$workerid", {global_filters}}}[1d])) / sum by (model_name) (delta(ray_vllm_generation_tokens_total{{WorkerId=~"$workerid", {global_filters}}}[1d]))',
legend="{{model_name}}",
),
],
fill=1,
linewidth=2,
stack=False,
template=PanelTemplate.STAT,
),
Panel(
id=_serve_row_id_start + 4,
title="Distribution of Requests Per Model Last 24 Hours",
description="",
unit="Requests",
targets=[
Target(
expr='sum by (model_name) (delta(ray_vllm_request_success_total{{WorkerId=~"$workerid", {global_filters}}}[1d]))',
legend="{{model_name}}",
),
],
fill=1,
linewidth=2,
stack=False,
template=PanelTemplate.PIE_CHART,
),
Panel(
id=_serve_row_id_start + 5,
title="Peak Tokens Per Second Per Model Last 24 Hours",
description="",
unit="short",
targets=[
Target(
expr='max_over_time(sum by (model_name) (rate(ray_vllm_generation_tokens_total{{WorkerId=~"$workerid", {global_filters}}}[2m]))[24h:1m])',
legend="{{model_name}}",
),
],
fill=1,
linewidth=2,
stack=False,
template=PanelTemplate.STAT,
),
Panel(
id=_serve_row_id_start + 6,
title="Tokens Per Model Last 24 Hours",
description="",
unit="short",
targets=[
Target(
expr='sum by (model_name) (delta(ray_vllm_prompt_tokens_total{{WorkerId=~"$workerid", {global_filters}}}[1d])) + sum by (model_name) (delta(ray_vllm_generation_tokens_total{{WorkerId=~"$workerid", {global_filters}}}[1d]))',
legend="{{model_name}}",
),
],
fill=1,
linewidth=2,
stack=False,
template=PanelTemplate.STAT,
),
Panel(
id=_serve_row_id_start + 7,
title="Avg Total Tokens Per Request Last 7 Days",
description="",
unit="short",
targets=[
Target(
expr='(sum by (model_name) (delta(ray_vllm_prompt_tokens_total{{WorkerId=~"$workerid", {global_filters}}}[1w])) +\nsum by (model_name) (delta(ray_vllm_generation_tokens_total{{WorkerId=~"$workerid", {global_filters}}}[1w]))) / sum by (model_name) (delta(ray_vllm_request_success_total{{WorkerId=~"$workerid", {global_filters}}}[1w]))',
legend="{{ model_name}}",
),
],
fill=1,
linewidth=2,
stack=False,
template=PanelTemplate.GAUGE,
),
Panel(
id=_serve_row_id_start + 8,
title="Requests Per Model Last Week",
description="",
unit="short",
targets=[
Target(
expr='sum by (model_name) (delta(ray_vllm_request_success_total{{WorkerId=~"$workerid", {global_filters}}}[1w]))',
legend="{{ model_name}}",
),
],
fill=1,
linewidth=2,
stack=False,
template=PanelTemplate.GAUGE,
),
Panel(
id=_serve_row_id_start + 9,
title="Tokens Per Model Last 7 Days",
description="",
unit="short",
targets=[
Target(
expr='sum by (model_name) (delta(ray_vllm_prompt_tokens_total{{WorkerId=~"$workerid", {global_filters}}}[1w]))',
legend="In: {{ model_name}}",
),
Target(
expr='sum by (model_name) (delta(ray_vllm_generation_tokens_total{{WorkerId=~"$workerid", {global_filters}}}[1w]))',
legend="Out: {{ model_name }}",
),
],
fill=1,
linewidth=2,
stack=False,
template=PanelTemplate.GAUGE,
),
Panel(
id=_serve_row_id_start + 10,
title="Avg Total Tokens Per Request Per Model Last 7 Days",
description="",
unit="short",
targets=[
Target(
expr='(sum by (model_name) (delta(ray_vllm_prompt_tokens_total{{WorkerId=~"$workerid", {global_filters}}}[1w])) + sum by (model_name) (delta(ray_vllm_generation_tokens_total{{WorkerId=~"$workerid", {global_filters}}}[1w])))/ sum by (model_name) (delta(ray_vllm_request_success_total{{WorkerId=~"$workerid", {global_filters}}}[1w]))',
legend="{{ model_name}}",
),
],
fill=1,
linewidth=2,
stack=False,
template=PanelTemplate.GAUGE,
),
Panel(
id=_serve_row_id_start + 11,
title="Tokens Per Request Per Model Last 7 Days",
description="",
unit="short",
targets=[
Target(
expr='sum by (model_name) (delta(ray_vllm_prompt_tokens_total{{WorkerId=~"$workerid", {global_filters}}}[1w])) / sum by (model_name) (delta(ray_vllm_request_success_total{{WorkerId=~"$workerid", {global_filters}}}[1w]))',
legend="In: {{ model_name}}",
),
Target(
expr='sum by (model_name) (delta(ray_vllm_generation_tokens_total{{WorkerId=~"$workerid", {global_filters}}}[1w])) / sum by (model_name) (delta(ray_vllm_request_success_total{{WorkerId=~"$workerid", {global_filters}}}[1w]))',
legend="Out: {{ model_name}}",
),
],
fill=1,
linewidth=2,
stack=False,
template=PanelTemplate.GAUGE,
),
]
LLM_GRAFANA_ROWS = [
Row(
title="Serve Orchestrator Metrics",
id=_next_id,
panels=_SERVE_ORCHESTRATOR_PANELS,
collapsed=False,
),
]
ids = [p.id for p in LLM_ENGINE_PANELS]
ids.extend(row.id for row in LLM_GRAFANA_ROWS)
for row in LLM_GRAFANA_ROWS:
ids.extend(p.id for p in row.panels)
ids.sort()
assert len(ids) == len(
set(ids)
), f"Duplicated id found. Use unique id for each panel/row. {ids}"
llm_dashboard_config = DashboardConfig(
name="LLM",
default_uid="rayLlmDashboard",
panels=LLM_ENGINE_PANELS,
rows=LLM_GRAFANA_ROWS,
standard_global_filters=[],
base_json_file_name="llm_grafana_dashboard_base.json",
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/metrics/dashboards/llm_dashboard_panels.py",
"license": "Apache License 2.0",
"lines": 264,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/dashboard/modules/metrics/dashboards/vllm_engine_panels.py | # ruff: noqa: E501
"""vLLM engine metric panels for the unified LLM Grafana dashboard.
All PromQL expressions use the {global_filters} placeholder, which gets
populated from DashboardConfig.standard_global_filters at generation time.
"""
from typing import List, Tuple
from ray.dashboard.modules.metrics.dashboards.common import (
GridPos,
Panel,
Target,
)
PANEL_HEIGHT = 8
PANEL_WIDTH = 12
def build_vllm_engine_panels(
id_start: int = 1,
y_start: int = 0,
) -> Tuple[List[Panel], int, int]:
_id = id_start
y = y_start
panels: List[Panel] = []
def _next_id() -> int:
nonlocal _id
result = _id
_id += 1
return result
# --- Row: Token Throughput + TPOT ---
panels.append(
Panel(
id=_next_id(),
title="vLLM: Token Throughput",
description="Number of tokens processed per second",
unit="tokens/s",
targets=[
Target(
expr='sum by (model_name, WorkerId) (rate(ray_vllm_request_prompt_tokens_sum{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))',
legend="Prompt Tokens/Sec - {{model_name}} - {{WorkerId}}",
),
Target(
expr='sum by (model_name, WorkerId) (rate(ray_vllm_generation_tokens_total{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))',
legend="Generation Tokens/Sec - {{model_name}} - {{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(0, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
panels.append(
Panel(
id=_next_id(),
title="vLLM: Time Per Output Token Latency",
description="P50, P90, P95, P99, and Mean TPOT latency",
unit="s",
targets=[
Target(
expr='histogram_quantile(0.99, sum by(le, model_name, WorkerId) (rate(ray_vllm_request_time_per_output_token_seconds_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P99 - {{model_name}} - {{WorkerId}}",
),
Target(
expr='histogram_quantile(0.95, sum by(le, model_name, WorkerId) (rate(ray_vllm_request_time_per_output_token_seconds_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P95 - {{model_name}} - {{WorkerId}}",
),
Target(
expr='histogram_quantile(0.9, sum by(le, model_name, WorkerId) (rate(ray_vllm_request_time_per_output_token_seconds_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P90 - {{model_name}} - {{WorkerId}}",
),
Target(
expr='histogram_quantile(0.5, sum by(le, model_name, WorkerId) (rate(ray_vllm_request_time_per_output_token_seconds_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P50 - {{model_name}} - {{WorkerId}}",
),
Target(
expr='(sum by(model_name, WorkerId) (rate(ray_vllm_request_time_per_output_token_seconds_sum{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))\n/\nsum by(model_name, WorkerId) (rate(ray_vllm_request_time_per_output_token_seconds_count{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="Mean - {{model_name}} - {{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(PANEL_WIDTH, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
y += PANEL_HEIGHT
# --- Row: Cache Utilization + KV Cache Hit Rate ---
panels.append(
Panel(
id=_next_id(),
title="vLLM: Cache Utilization",
description="Percentage of used KV cache blocks by vLLM.",
unit="percentunit",
targets=[
Target(
expr='sum by (WorkerId) (ray_vllm_kv_cache_usage_perc{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}})',
legend="GPU Cache Usage - {{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(0, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
panels.append(
Panel(
id=_next_id(),
title="vLLM: KV Cache Hit Rate",
description="Percentage of prefix cache hits. Higher is better for repeated prefixes.",
unit="percent",
targets=[
Target(
expr='max(100 * (sum by (WorkerId) (rate(ray_vllm_prefix_cache_hits_total{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])) / sum by (WorkerId) (rate(ray_vllm_prefix_cache_queries_total{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))))',
legend="Max Hit Rate",
),
Target(
expr='min(100 * (sum by (WorkerId) (rate(ray_vllm_prefix_cache_hits_total{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])) / sum by (WorkerId) (rate(ray_vllm_prefix_cache_queries_total{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))))',
legend="Min Hit Rate",
),
Target(
expr='100 * (sum by (WorkerId) (rate(ray_vllm_prefix_cache_hits_total{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])) / sum by (WorkerId) (rate(ray_vllm_prefix_cache_queries_total{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="Hit Rate: worker {{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(PANEL_WIDTH, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
y += PANEL_HEIGHT
# --- Row: TTFT + E2E Request Latency ---
panels.append(
Panel(
id=_next_id(),
title="vLLM: Time To First Token Latency",
description="P50, P90, P95, P99, and Mean TTFT latency",
unit="s",
targets=[
Target(
expr='(sum by(model_name, WorkerId) (rate(ray_vllm_time_to_first_token_seconds_sum{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))\n/\nsum by(model_name, WorkerId) (rate(ray_vllm_time_to_first_token_seconds_count{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="Average - {{model_name}} - {{WorkerId}}",
),
Target(
expr='histogram_quantile(0.5, sum by(le, model_name, WorkerId)(rate(ray_vllm_time_to_first_token_seconds_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P50 - {{model_name}} - {{WorkerId}}",
),
Target(
expr='histogram_quantile(0.9, sum by(le, model_name, WorkerId)(rate(ray_vllm_time_to_first_token_seconds_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P90 - {{model_name}} - {{WorkerId}}",
),
Target(
expr='histogram_quantile(0.95, sum by(le, model_name, WorkerId) (rate(ray_vllm_time_to_first_token_seconds_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P95 - {{model_name}} - {{WorkerId}}",
),
Target(
expr='histogram_quantile(0.99, sum by(le, model_name, WorkerId)(rate(ray_vllm_time_to_first_token_seconds_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P99 - {{model_name}} - {{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(0, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
panels.append(
Panel(
id=_next_id(),
title="vLLM: E2E Request Latency",
description="End-to-end request latency from arrival to completion.",
unit="s",
targets=[
Target(
expr='sum by(model_name, WorkerId) (rate(ray_vllm_e2e_request_latency_seconds_sum{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))\n/\nsum by(model_name, WorkerId) (rate(ray_vllm_e2e_request_latency_seconds_count{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))',
legend="Average - {{model_name}} - {{WorkerId}}",
),
Target(
expr='histogram_quantile(0.5, sum by(le, model_name, WorkerId) (rate(ray_vllm_e2e_request_latency_seconds_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P50 - {{model_name}} - {{WorkerId}}",
),
Target(
expr='histogram_quantile(0.9, sum by(le, model_name, WorkerId) (rate(ray_vllm_e2e_request_latency_seconds_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P90 - {{model_name}} - {{WorkerId}}",
),
Target(
expr='histogram_quantile(0.95, sum by(le, model_name, WorkerId) (rate(ray_vllm_e2e_request_latency_seconds_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P95 - {{model_name}} - {{WorkerId}}",
),
Target(
expr='histogram_quantile(0.99, sum by(le, model_name, WorkerId) (rate(ray_vllm_e2e_request_latency_seconds_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P99 - {{model_name}} - {{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(PANEL_WIDTH, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
y += PANEL_HEIGHT
# --- Row: Scheduler State + Queue Time ---
panels.append(
Panel(
id=_next_id(),
title="vLLM: Scheduler State",
description="Number of requests in RUNNING, WAITING, and SWAPPED state",
unit="Requests",
targets=[
Target(
expr='ray_vllm_num_requests_running{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}',
legend="Num Running - {{model_name}} - {{WorkerId}}",
),
Target(
expr='ray_vllm_num_requests_swapped{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}',
legend="Num Swapped - {{model_name}} - {{WorkerId}}",
),
Target(
expr='ray_vllm_num_requests_waiting{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}',
legend="Num Waiting - {{model_name}} - {{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(0, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
panels.append(
Panel(
id=_next_id(),
title="vLLM: Queue Time",
description="P50, P90, P95, P99, and Mean time requests spend waiting in the queue.",
unit="s",
targets=[
Target(
expr='(sum by(model_name, WorkerId) (rate(ray_vllm_request_queue_time_seconds_sum{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))\n/\nsum by(model_name, WorkerId) (rate(ray_vllm_request_queue_time_seconds_count{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="Mean - {{model_name}} - {{WorkerId}}",
),
Target(
expr='histogram_quantile(0.5, sum by(le, model_name, WorkerId) (rate(ray_vllm_request_queue_time_seconds_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P50 - {{model_name}} - {{WorkerId}}",
),
Target(
expr='histogram_quantile(0.9, sum by(le, model_name, WorkerId) (rate(ray_vllm_request_queue_time_seconds_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P90 - {{model_name}} - {{WorkerId}}",
),
Target(
expr='histogram_quantile(0.95, sum by(le, model_name, WorkerId) (rate(ray_vllm_request_queue_time_seconds_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P95 - {{model_name}} - {{WorkerId}}",
),
Target(
expr='histogram_quantile(0.99, sum by(le, model_name, WorkerId) (rate(ray_vllm_request_queue_time_seconds_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P99 - {{model_name}} - {{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(PANEL_WIDTH, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
y += PANEL_HEIGHT
# --- Row: Prompt Length + Generation Length ---
panels.append(
Panel(
id=_next_id(),
title="vLLM: Prompt Length",
description="Distribution of prompt token lengths.",
unit="short",
targets=[
Target(
expr='histogram_quantile(0.5, sum by(le, model_name, WorkerId) (rate(ray_vllm_request_prompt_tokens_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P50-{{model_name}}-{{WorkerId}}",
),
Target(
expr='histogram_quantile(0.90, sum by(le, model_name, WorkerId) (rate(ray_vllm_request_prompt_tokens_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P90-{{model_name}}-{{WorkerId}}",
),
Target(
expr='(sum by(model_name, WorkerId) (rate(ray_vllm_request_prompt_tokens_sum{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))\n/\nsum by(model_name, WorkerId) (rate(ray_vllm_request_prompt_tokens_count{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="Average-{{model_name}}-{{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(0, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
panels.append(
Panel(
id=_next_id(),
title="vLLM: Generation Length",
description="Distribution of generated token lengths.",
unit="short",
targets=[
Target(
expr='histogram_quantile(0.50, sum by(le, model_name, WorkerId) (rate(ray_vllm_request_generation_tokens_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P50-{{model_name}}-{{WorkerId}}",
),
Target(
expr='histogram_quantile(0.90, sum by(le, model_name, WorkerId) (rate(ray_vllm_request_generation_tokens_bucket{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))',
legend="P90-{{model_name}}-{{WorkerId}}",
),
Target(
expr=(
'(sum by(model_name, WorkerId) (rate(ray_vllm_request_generation_tokens_sum{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))'
"\n/\n"
'(sum by(model_name, WorkerId) (rate(ray_vllm_request_generation_tokens_count{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval])))'
),
legend="Average-{{model_name}}-{{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(PANEL_WIDTH, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
y += PANEL_HEIGHT
# --- Row: Finish Reason + Prefill and Decode Time ---
panels.append(
Panel(
id=_next_id(),
title="vLLM: Finish Reason",
description="Number of finished requests by their finish reason: EOS token or max length reached.",
unit="Requests",
targets=[
Target(
expr='sum by(finished_reason, model_name, WorkerId) (increase(ray_vllm_request_success_total{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))',
legend="{{finished_reason}} - {{model_name}} - {{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(0, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
panels.append(
Panel(
id=_next_id(),
title="vLLM: Prefill and Decode Time",
description="Time spent in prefill vs decode phases.",
unit="s",
targets=[
Target(
expr='sum by(model_name, WorkerId) (rate(ray_vllm_request_decode_time_seconds_sum{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))',
legend="Decode - {{model_name}} - {{WorkerId}}",
),
Target(
expr='sum by(model_name, WorkerId) (rate(ray_vllm_request_prefill_time_seconds_sum{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))',
legend="Prefill - {{model_name}} - {{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(PANEL_WIDTH, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
y += PANEL_HEIGHT
# --- Row: Max Generation Token (single panel) ---
panels.append(
Panel(
id=_next_id(),
title="vLLM: Max Generation Token in Sequence Group",
description="",
unit="none",
targets=[
Target(
expr='sum by(model_name, WorkerId) (rate(ray_vllm_request_max_num_generation_tokens_sum{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))',
legend="{{model_name}} - {{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(0, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
y += PANEL_HEIGHT
# --- NIXL Panels ---
# Row: Transfer Latency + Transfer Throughput
panels.append(
Panel(
id=_next_id(),
title="NIXL: Transfer Latency",
description="Average NIXL KV cache transfer latency in milliseconds.",
unit="ms",
targets=[
Target(
expr='sum by(model_name, WorkerId) (rate(ray_vllm_nixl_xfer_time_seconds_sum{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))\n/\nsum by(model_name, WorkerId) (rate(ray_vllm_nixl_xfer_time_seconds_count{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))\n* 1000',
legend="Avg Latency - {{model_name}} - {{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(0, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
panels.append(
Panel(
id=_next_id(),
title="NIXL: Transfer Throughput",
description="NIXL KV cache transfer throughput in GB/s (bytes transferred / transfer time).",
unit="GBs",
targets=[
Target(
expr='sum by(model_name, WorkerId) (rate(ray_vllm_nixl_bytes_transferred_sum{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))\n/\nsum by(model_name, WorkerId) (rate(ray_vllm_nixl_xfer_time_seconds_sum{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))\n/ 1024 / 1024 / 1024',
legend="Throughput - {{model_name}} - {{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(PANEL_WIDTH, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
y += PANEL_HEIGHT
# Row: Transfer Rate + Avg Post Time
panels.append(
Panel(
id=_next_id(),
title="NIXL: Transfer Rate",
description="Number of NIXL KV cache transfers per second.",
unit="ops",
targets=[
Target(
expr='sum by (model_name, WorkerId) (rate(ray_vllm_nixl_xfer_time_seconds_count{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))',
legend="Transfers/s - {{model_name}} - {{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(0, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
panels.append(
Panel(
id=_next_id(),
title="NIXL: Avg Post Time",
description="Average time to post/initiate a NIXL transfer in milliseconds.",
unit="ms",
targets=[
Target(
expr='sum by(model_name, WorkerId) (rate(ray_vllm_nixl_post_time_seconds_sum{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))\n/\nsum by(model_name, WorkerId) (rate(ray_vllm_nixl_post_time_seconds_count{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))\n* 1000',
legend="Avg Post Time - {{model_name}} - {{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(PANEL_WIDTH, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
y += PANEL_HEIGHT
# Row: KV Transfer Failures + KV Expired Requests
panels.append(
Panel(
id=_next_id(),
title="NIXL: KV Transfer Failures",
description="Number of failed NIXL KV cache transfers. Any non-zero value is concerning and indicates RDMA transfer errors.",
unit="short",
targets=[
Target(
expr='sum by (model_name, WorkerId) (increase(ray_vllm_nixl_num_failed_transfers{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))',
legend="Failed Transfers - {{model_name}} - {{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(0, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
panels.append(
Panel(
id=_next_id(),
title="NIXL: KV Expired Requests",
description="Number of requests whose KV blocks expired before decode consumed them. Spikes indicate prefill is outrunning decode or the timeout is too short.",
unit="short",
targets=[
Target(
expr='sum by (model_name, WorkerId) (increase(ray_vllm_nixl_num_kv_expired_reqs{{model_name=~"$vllm_model_name", WorkerId=~"$workerid", {global_filters}}}[$interval]))',
legend="KV Expired - {{model_name}} - {{WorkerId}}",
),
],
fill=1,
linewidth=2,
stack=False,
grid_pos=GridPos(PANEL_WIDTH, y, PANEL_WIDTH, PANEL_HEIGHT),
)
)
y += PANEL_HEIGHT
return panels, _id, y
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/metrics/dashboards/vllm_engine_panels.py",
"license": "Apache License 2.0",
"lines": 497,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/doc_code/class_based_autoscaling.py | # __serve_example_begin__
import json
import tempfile
from ray import serve
from ray.serve.config import AutoscalingConfig, AutoscalingPolicy
# Create a JSON file with the initial target replica count.
# In production this file would be written by an external system.
scaling_file = tempfile.NamedTemporaryFile(
mode="w", suffix=".json", delete=False
)
json.dump({"replicas": 2}, scaling_file)
scaling_file.close()
@serve.deployment(
autoscaling_config=AutoscalingConfig(
min_replicas=1,
max_replicas=10,
upscale_delay_s=3,
downscale_delay_s=10,
policy=AutoscalingPolicy(
policy_function="class_based_autoscaling_policy:FileBasedAutoscalingPolicy",
policy_kwargs={
"file_path": scaling_file.name,
"poll_interval_s": 2.0,
},
),
),
max_ongoing_requests=100,
)
class MyDeployment:
async def __call__(self) -> str:
return "Hello, world!"
app = MyDeployment.bind()
# __serve_example_end__
if __name__ == "__main__":
import requests # noqa
serve.run(app)
resp = requests.get("http://localhost:8000/")
assert resp.text == "Hello, world!"
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/doc_code/class_based_autoscaling.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/doc_code/class_based_autoscaling_policy.py | # __begin_class_based_autoscaling_policy__
import asyncio
import json
import logging
from pathlib import Path
from typing import Any, Dict, Tuple
from ray.serve.config import AutoscalingContext
logger = logging.getLogger("ray.serve")
class FileBasedAutoscalingPolicy:
"""Scale replicas based on a target written to a JSON file.
A background asyncio task re-reads the file every ``poll_interval_s``
seconds. ``__call__`` returns the latest value on every autoscaling
tick. In production you could replace the file read with an HTTP
call, a message-queue consumer, or any other async IO operation.
"""
def __init__(self, file_path: str, poll_interval_s: float = 5.0):
self._file_path = Path(file_path)
self._poll_interval_s = poll_interval_s
self._desired_replicas: int = 1
self._task: asyncio.Task = None
self._started: bool = False
def _ensure_started(self) -> None:
"""Lazily start the background poll on the controller event loop."""
if self._started:
return
self._started = True
loop = asyncio.get_running_loop()
self._task = loop.create_task(self._poll_file())
async def _poll_file(self) -> None:
"""Read the target replica count from the JSON file in a loop."""
while True:
try:
text = self._file_path.read_text()
data = json.loads(text)
self._desired_replicas = int(data["replicas"])
except Exception:
pass # Keep the last known value on failure.
await asyncio.sleep(self._poll_interval_s)
def __call__(
self, ctx: AutoscalingContext
) -> Tuple[int, Dict[str, Any]]:
self._ensure_started()
desired = self._desired_replicas
return desired, {"last_polled_value": self._desired_replicas}
# __end_class_based_autoscaling_policy__
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/doc_code/class_based_autoscaling_policy.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/serve/tests/test_config_files/gang_scheduling.py | from ray import serve
@serve.deployment
class GangApp:
def __call__(self, *args):
return "hello_from_gang_scheduling"
app = GangApp.bind()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_config_files/gang_scheduling.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/tests/test_gang_scheduling.py | import os
import sys
import tempfile
import pytest
import ray
from ray import serve
from ray._common.test_utils import SignalActor, wait_for_condition
from ray.serve._private.common import GANG_PG_NAME_PREFIX, DeploymentID, ReplicaState
from ray.serve._private.constants import SERVE_DEFAULT_APP_NAME
from ray.serve._private.test_utils import check_apps_running
from ray.serve._private.utils import get_all_live_placement_group_names
from ray.serve.config import GangPlacementStrategy, GangSchedulingConfig
from ray.tests.conftest import * # noqa
from ray.util.placement_group import get_current_placement_group, placement_group_table
from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy
@ray.remote
class Collector:
def __init__(self):
self.items = []
def add(self, item):
self.items.append(item)
def get(self):
return self.items
@ray.remote(num_cpus=0)
class FailedReplicaStore:
"""Stores the first replica ID that failed, for gang startup failure tests."""
def __init__(self):
self._failed_replica_id = None
def set_if_first(self, replica_id: str) -> bool:
"""Atomically set failed replica if none set. Returns True if we're the first."""
if self._failed_replica_id is None:
self._failed_replica_id = replica_id
return True
return False
def get(self):
return self._failed_replica_id
class TestGangScheduling:
"""Tests for gang scheduling with placement groups."""
def test_sufficient_resources(self, ray_cluster):
"""Verifies that gang scheduling succeeds when cluster has sufficient resources."""
cluster = ray_cluster
cluster.add_node(num_cpus=1)
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
serve.start()
@serve.deployment(
num_replicas=8,
ray_actor_options={"num_cpus": 0.25},
gang_scheduling_config=GangSchedulingConfig(gang_size=4),
)
class GangDeployment:
def __call__(self):
return ray.get_runtime_context().get_node_id()
handle = serve.run(GangDeployment.bind(), name="gang_app_success")
wait_for_condition(
check_apps_running,
apps=["gang_app_success"],
)
# Verify all replicas are running and responding
refs = [handle.remote() for _ in range(8)]
results = [ref.result() for ref in refs]
assert len(results) == 8
serve.delete("gang_app_success")
serve.shutdown()
def test_sufficient_resources_with_options(self, ray_cluster):
"""Verifies gang scheduling via .options() succeeds and responds to requests."""
cluster = ray_cluster
cluster.add_node(num_cpus=1)
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
serve.start()
@serve.deployment(num_replicas=1, ray_actor_options={"num_cpus": 0})
class GangDeployment:
def __call__(self):
return ray.get_runtime_context().get_node_id()
app = GangDeployment.options(
num_replicas=8,
ray_actor_options={"num_cpus": 0.25},
gang_scheduling_config=GangSchedulingConfig(gang_size=4),
).bind()
handle = serve.run(app, name="gang_app_options")
wait_for_condition(
check_apps_running,
apps=["gang_app_options"],
)
# Verify all replicas are running and responding
refs = [handle.remote() for _ in range(8)]
results = [ref.result() for ref in refs]
assert len(results) == 8
serve.delete("gang_app_options")
serve.shutdown()
def test_incomplete_deployment(self, ray_cluster):
"""
Verifies that schedulable gangs serve traffic while unschedulable gangs wait for resources.
"""
cluster = ray_cluster
cluster.add_node(num_cpus=1)
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
serve.start()
@serve.deployment
class IncompleteGangDeployment:
def __call__(self):
return ray.get_runtime_context().get_node_id()
app = IncompleteGangDeployment.options(
num_replicas=12,
ray_actor_options={"num_cpus": 0.25},
gang_scheduling_config=GangSchedulingConfig(gang_size=4),
).bind()
handle = serve._run(app, name="gang_partial_app", _blocking=False)
# The deployment should NOT fail. 2 of 3 gangs should be scheduled,
# and those 8 replicas should serve traffic. The deployment stays
# DEPLOYING because it hasn't reached 12 replicas.
def check_replicas_running(expected_count: int):
try:
app_status = serve.status().applications["gang_partial_app"]
# Should be DEPLOYING
if app_status.status == "DEPLOY_FAILED":
raise AssertionError(
"Deployment should not fail with partial gang scheduling"
)
# Check that some replicas are running
dep_status = list(app_status.deployments.values())[0]
running = dep_status.replica_states.get("RUNNING", 0)
assert running == expected_count
return True
except KeyError:
return False
wait_for_condition(check_replicas_running, expected_count=8, timeout=60)
# Verify the running replicas can serve traffic.
results = set()
for _ in range(40):
results.add(handle.remote().result())
assert len(results) > 0
# Verify deployment is still DEPLOYING
app_status = serve.status().applications["gang_partial_app"]
assert app_status.status == "DEPLOYING"
# Now add a 3rd node so the remaining gang can be scheduled.
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
# The deployment should become RUNNING with all 12 replicas.
wait_for_condition(
check_apps_running,
apps=["gang_partial_app"],
timeout=60,
)
# Verify all 12 replicas serve traffic.
results = set()
for _ in range(100):
results.add(handle.remote().result())
assert len(results) == 3
serve.delete("gang_partial_app")
serve.shutdown()
def test_no_partial_gang(self, ray_cluster):
"""Verifies atomic gang scheduling: no partial gangs are created."""
cluster = ray_cluster
# 2 CPUs total: enough for 2 full gangs (1.6 CPUs) but not 3 (2.4 CPUs).
# The leftover 0.4 CPUs must NOT produce a partial gang.
cluster.add_node(num_cpus=1)
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
serve.start()
@serve.deployment
class AtomicGangDeployment:
def __call__(self):
return ray.get_runtime_context().get_node_id()
app = AtomicGangDeployment.options(
num_replicas=12,
ray_actor_options={"num_cpus": 0.2},
gang_scheduling_config=GangSchedulingConfig(gang_size=4),
).bind()
handle = serve._run(app, name="atomic_gang_app", _blocking=False)
# Wait until exactly 8 replicas (2 gangs) are running.
def check_replicas_running(expected_count: int):
try:
app_status = serve.status().applications["atomic_gang_app"]
if app_status.status == "DEPLOY_FAILED":
raise AssertionError(
"Deployment should not fail — partial gangs should "
"serve traffic while waiting for resources."
)
dep_status = list(app_status.deployments.values())[0]
running = dep_status.replica_states.get("RUNNING", 0)
assert running == expected_count
return True
except KeyError:
return False
wait_for_condition(check_replicas_running, expected_count=8, timeout=60)
# Deployment should still be DEPLOYING (not RUNNING, not DEPLOY_FAILED).
app_status = serve.status().applications["atomic_gang_app"]
assert app_status.status == "DEPLOYING"
# Verify the 8 running replicas can serve traffic.
results = set()
for _ in range(80):
results.add(handle.remote().result())
assert len(results) > 0
# Add 1 more CPU so the 3rd gang (0.8 CPUs) can be scheduled.
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
# The deployment should become RUNNING with all 12 replicas.
wait_for_condition(check_apps_running, apps=["atomic_gang_app"], timeout=60)
# All 12 replicas should now serve traffic.
app_status = serve.status().applications["atomic_gang_app"]
dep_status = list(app_status.deployments.values())[0]
running = dep_status.replica_states.get("RUNNING", 0)
assert running == 12
serve.delete("atomic_gang_app")
serve.shutdown()
def test_pack_strategy(self, ray_cluster):
"""Verifies that PACK strategy places gang replicas on the same node."""
cluster = ray_cluster
cluster.add_node(num_cpus=1)
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
serve.start()
@serve.deployment
def PackDeployment():
return os.environ.get(
"RAY_NODE_ID", ray.get_runtime_context().get_node_id()
)
# 1 gang with PACK strategy - all replicas should be on same node
app = PackDeployment.options(
num_replicas=4,
ray_actor_options={"num_cpus": 0.25},
gang_scheduling_config=GangSchedulingConfig(
gang_size=4,
gang_placement_strategy=GangPlacementStrategy.PACK,
),
).bind()
handle = serve.run(app, name="gang_pack_app")
wait_for_condition(check_apps_running, apps=["gang_pack_app"])
# Query multiple times to hit all replicas and collect node IDs
node_ids = set()
for _ in range(40):
result = handle.remote().result()
node_ids.add(result)
# With PACK strategy, all 4 replicas should be on the same node
assert len(node_ids) == 1
serve.delete("gang_pack_app")
serve.shutdown()
def test_gang_scheduling_spread_strategy(self, ray_cluster):
"""Verifies that SPREAD strategy places gang replicas on different nodes."""
cluster = ray_cluster
cluster.add_node(num_cpus=1)
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
serve.start()
@serve.deployment
def SpreadDeployment():
return os.environ.get(
"RAY_NODE_ID", ray.get_runtime_context().get_node_id()
)
# 1 gang with SPREAD strategy - replicas should be on different nodes
app = SpreadDeployment.options(
num_replicas=2,
ray_actor_options={"num_cpus": 0.25},
gang_scheduling_config=GangSchedulingConfig(
gang_size=2,
gang_placement_strategy=GangPlacementStrategy.SPREAD,
),
).bind()
handle = serve.run(app, name="gang_spread_app")
wait_for_condition(check_apps_running, apps=["gang_spread_app"])
# Query multiple times to hit all replicas and collect node IDs
node_ids = set()
for _ in range(40):
result = handle.remote().result()
node_ids.add(result)
# With SPREAD strategy, 2 replicas should be on 2 different nodes
assert len(node_ids) == 2
serve.delete("gang_spread_app")
serve.shutdown()
def test_gang_context(self, ray_cluster):
"""Verifies GangContext is correctly populated in ReplicaContext."""
cluster = ray_cluster
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
serve.start()
@serve.deployment
class GangContextDeployment:
def __call__(self):
ctx = ray.serve.context._get_internal_replica_context()
gc = ctx.gang_context
if gc is None:
return None
return {
"gang_id": gc.gang_id,
"rank": gc.rank,
"world_size": gc.world_size,
"member_replica_ids": gc.member_replica_ids,
"replica_id": ctx.replica_id.unique_id,
}
app = GangContextDeployment.options(
num_replicas=4,
ray_actor_options={"num_cpus": 0.25},
gang_scheduling_config=GangSchedulingConfig(gang_size=2),
).bind()
handle = serve.run(app, name="gang_context_app")
wait_for_condition(check_apps_running, apps=["gang_context_app"])
# Collect gang contexts from all replicas
# Query enough times to hit all 4 replicas
contexts_by_replica = {}
for _ in range(100):
result = handle.remote().result()
assert result is not None
replica_id = result["replica_id"]
if replica_id not in contexts_by_replica:
contexts_by_replica[replica_id] = result
if len(contexts_by_replica) == 4:
break
assert len(contexts_by_replica) == 4
# Group replicas by gang_id
gangs = {}
for replica_id, ctx in contexts_by_replica.items():
gang_id = ctx["gang_id"]
gangs.setdefault(gang_id, []).append(ctx)
assert len(gangs) == 2
for gang_id, members in gangs.items():
assert len(members) == 2
assert all(member["world_size"] == 2 for member in members)
assert members[0]["member_replica_ids"] == members[1]["member_replica_ids"]
expected_ids = sorted([m["replica_id"] for m in members])
actual_ids = sorted(members[0]["member_replica_ids"])
assert actual_ids == expected_ids
ranks = sorted([m["rank"] for m in members])
assert ranks == [0, 1]
# Across gangs: gang_ids should be different
gang_ids = list(gangs.keys())
assert gang_ids[0] != gang_ids[1]
# Across gangs: member_replica_ids should be different
gang_members_list = list(gangs.values())
assert sorted(gang_members_list[0][0]["member_replica_ids"]) != sorted(
gang_members_list[1][0]["member_replica_ids"]
)
serve.delete("gang_context_app")
serve.shutdown()
def test_gang_placement_groups_cleanup_on_deletion(self, ray_cluster):
"""Verifies serve.delete() removes reserved gang placement groups."""
cluster = ray_cluster
cluster.add_node(num_cpus=1)
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
serve.start()
@serve.deployment(
num_replicas=4,
ray_actor_options={"num_cpus": 0.25},
gang_scheduling_config=GangSchedulingConfig(gang_size=2),
)
class GangDeleteCleanupDeployment:
def __call__(self):
return "ok"
app_name = "gang_delete_cleanup_app"
deployment_name = "GangDeleteCleanupDeployment"
pg_name_prefix = f"{GANG_PG_NAME_PREFIX}{app_name}_{deployment_name}_"
serve.run(GangDeleteCleanupDeployment.bind(), name=app_name)
wait_for_condition(check_apps_running, apps=[app_name])
wait_for_condition(
lambda: any(
name.startswith(pg_name_prefix)
for name in get_all_live_placement_group_names()
),
timeout=60,
)
serve.delete(app_name)
wait_for_condition(
lambda: not any(
name.startswith(pg_name_prefix)
for name in get_all_live_placement_group_names()
),
timeout=60,
)
serve.shutdown()
def test_multiple_gang_deployments_in_one_app(self, ray_cluster):
"""Verifies two gang deployments run together under one Serve app."""
cluster = ray_cluster
cluster.add_node(num_cpus=1)
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
serve.start()
@serve.deployment(
num_replicas=4,
ray_actor_options={"num_cpus": 0.25},
gang_scheduling_config=GangSchedulingConfig(gang_size=2),
)
class GangA:
def __init__(self, gang_b):
self._gang_b = gang_b
def __call__(self):
return "a"
@serve.deployment(
num_replicas=4,
ray_actor_options={"num_cpus": 0.25},
gang_scheduling_config=GangSchedulingConfig(gang_size=2),
)
class GangB:
def __call__(self):
return "b"
app_name = "multi_gang_app"
serve.run(GangA.bind(GangB.bind()), name=app_name)
wait_for_condition(check_apps_running, apps=[app_name])
app_status = serve.status().applications[app_name]
assert app_status.deployments["GangA"].replica_states.get("RUNNING", 0) == 4
assert app_status.deployments["GangB"].replica_states.get("RUNNING", 0) == 4
serve.delete(app_name)
serve.shutdown()
class TestGangResourceReservation:
@pytest.mark.parametrize(
"ray_actor_options, placement_group_bundles, gang_placement_strategy, "
"expected_bundles, expected_strategy, expect_same_node",
[
# Case 1: Only ray_actor_options — one flat bundle per replica, PACK
(
{"num_cpus": 0.25},
None,
"PACK",
[{"CPU": 0.25}, {"CPU": 0.25}],
"PACK",
True,
),
# Case 2: placement_group_bundles — flattened into the gang PG, PACK
(
{"num_cpus": 0},
[{"CPU": 0.25}] * 2,
"PACK",
[{"CPU": 0.25}] * 4,
"PACK",
True,
),
# Case 3: placement_group_bundles + SPREAD strategy
(
{"num_cpus": 0},
[{"CPU": 0.25}] * 2,
"SPREAD",
[{"CPU": 0.25}] * 4,
"SPREAD",
False,
),
],
)
def test_gang_resource_reservation(
self,
ray_cluster,
ray_actor_options,
placement_group_bundles,
gang_placement_strategy,
expected_bundles,
expected_strategy,
expect_same_node,
):
"""Verifies the gang PG has the correct bundles, strategy, and
that per-replica bundles are placed according to the strategy."""
cluster = ray_cluster
cluster.add_node(num_cpus=1)
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
serve.start()
deployment_kwargs = {
"num_replicas": 2,
"ray_actor_options": ray_actor_options,
"gang_scheduling_config": GangSchedulingConfig(
gang_size=2,
gang_placement_strategy=gang_placement_strategy,
),
}
if placement_group_bundles is not None:
deployment_kwargs["placement_group_bundles"] = placement_group_bundles
@serve.deployment(**deployment_kwargs)
class GangDeployment:
def get_pg_info(self):
pg = get_current_placement_group()
if pg is None:
return None
pg_table = placement_group_table(pg)
return {
"bundle_specs": pg.bundle_specs,
"strategy": pg_table["strategy"],
"bundles_to_node_id": pg_table["bundles_to_node_id"],
}
def __call__(self):
return "ok"
app = GangDeployment.bind()
handle = serve.run(app, name="gang_reservation_app")
wait_for_condition(
check_apps_running,
apps=["gang_reservation_app"],
)
for _ in range(20):
pg_info = handle.get_pg_info.remote().result()
assert pg_info is not None
assert pg_info["bundle_specs"] == expected_bundles
assert pg_info["strategy"] == expected_strategy
bundles_per_replica = (
len(placement_group_bundles) if placement_group_bundles else 1
)
gang_size = 2
for replica_idx in range(gang_size):
start = replica_idx * bundles_per_replica
replica_nodes = {
pg_info["bundles_to_node_id"][i]
for i in range(start, start + bundles_per_replica)
}
if expect_same_node:
assert len(replica_nodes) == 1
else:
assert len(replica_nodes) == bundles_per_replica
serve.delete("gang_reservation_app")
serve.shutdown()
def test_gang_label_selector(self, ray_cluster):
"""
Verifies that placement_group_bundle_label_selector steers gang bundles
onto the labeled node.
"""
cluster = ray_cluster
cluster.add_node(num_cpus=1)
cluster.add_node(num_cpus=1, labels={"accelerator": "tpu"})
cluster.wait_for_nodes()
ray.init(address=cluster.address)
serve.start()
@serve.deployment(
num_replicas=2,
ray_actor_options={"num_cpus": 0},
placement_group_bundles=[{"CPU": 0.25}],
placement_group_bundle_label_selector=[{"accelerator": "tpu"}],
gang_scheduling_config=GangSchedulingConfig(gang_size=2),
)
class LabeledGangDeployment:
def get_pg_info(self):
pg = get_current_placement_group()
if pg is None:
return None
pg_table = placement_group_table(pg)
return {
"bundle_specs": pg.bundle_specs,
"bundles_to_node_id": pg_table["bundles_to_node_id"],
"node_labels": ray.get_runtime_context().get_node_labels(),
}
def __call__(self):
return "ok"
app = LabeledGangDeployment.bind()
handle = serve.run(app, name="label_selector_app")
wait_for_condition(
check_apps_running,
apps=["label_selector_app"],
)
labeled_node_id = None
for node in ray.nodes():
if node["Labels"].get("accelerator") == "tpu":
labeled_node_id = node["NodeID"]
break
assert labeled_node_id is not None
for _ in range(20):
pg_info = handle.get_pg_info.remote().result()
assert pg_info is not None
assert pg_info["bundle_specs"] == [{"CPU": 0.25}, {"CPU": 0.25}]
# Replica actor itself should be on the labeled node
assert pg_info["node_labels"].get("accelerator") == "tpu"
# All bundles in the gang PG should be on the labeled node
for node_id in pg_info["bundles_to_node_id"].values():
assert node_id == labeled_node_id
serve.delete("label_selector_app")
serve.shutdown()
class TestGangConstructorFailure:
"""Tests for gang scheduling with constructor failures."""
def test_consistent_constructor_failure(self, ray_shutdown):
"""Validates gang deployment where all replicas consistently fail their constructor."""
ray.init(num_cpus=1)
serve.start()
@serve.deployment(
num_replicas=4,
ray_actor_options={"num_cpus": 0.1},
gang_scheduling_config=GangSchedulingConfig(gang_size=2),
)
class GangConstructorFailure:
def __init__(self):
raise RuntimeError("Intentionally failing gang replica constructor")
async def __call__(self, request):
return "hi"
with pytest.raises(RuntimeError):
serve.run(GangConstructorFailure.bind())
client = serve.context._get_global_client()
deployment_dict = ray.get(client._controller._all_running_replicas.remote())
deployment_id = DeploymentID(name="GangConstructorFailure")
assert len(deployment_dict[deployment_id]) == 0
app_status = serve.status().applications[SERVE_DEFAULT_APP_NAME]
assert app_status.status == "DEPLOY_FAILED"
assert (
app_status.deployments["GangConstructorFailure"].status == "DEPLOY_FAILED"
)
def test_partial_constructor_failure(self, ray_shutdown):
"""Validates gang deployment where one replica consistently fails."""
ray.init(num_cpus=1)
serve.start()
with tempfile.TemporaryDirectory() as tmpdir:
file_path = os.path.join(tmpdir, "test_deploy.txt")
@serve.deployment(
num_replicas=4,
ray_actor_options={"num_cpus": 0.1},
gang_scheduling_config=GangSchedulingConfig(gang_size=2),
)
class GangPartialConstructorFailure:
def __init__(self):
if not os.path.exists(file_path):
with open(file_path, "w") as f:
f.write(serve.get_replica_context().replica_id.unique_id)
raise RuntimeError("Consistently throwing on same replica.")
else:
with open(file_path) as f:
content = f.read()
if content == serve.get_replica_context().replica_id.unique_id:
raise RuntimeError("Consistently throwing on same replica.")
async def __call__(self, request):
return "hi"
serve.run(GangPartialConstructorFailure.bind())
client = serve.context._get_global_client()
deployment_id = DeploymentID(name="GangPartialConstructorFailure")
deployment_dict = ray.get(client._controller._all_running_replicas.remote())
assert len(deployment_dict[deployment_id]) == 4
app_status = serve.status().applications[SERVE_DEFAULT_APP_NAME]
assert app_status.status == "RUNNING"
assert (
app_status.deployments["GangPartialConstructorFailure"].status == "HEALTHY"
)
def test_transient_constructor_failure(self, ray_shutdown):
"""Validates gang deployment where the first constructor call fails then succeeds."""
ray.init(num_cpus=1)
serve.start()
with tempfile.TemporaryDirectory() as tmpdir:
file_path = os.path.join(tmpdir, "test_deploy.txt")
@serve.deployment(
num_replicas=4,
ray_actor_options={"num_cpus": 0.1},
gang_scheduling_config=GangSchedulingConfig(gang_size=2),
)
class GangTransientConstructorFailure:
def __init__(self):
if os.path.exists(file_path):
return
with open(file_path, "w") as f:
f.write("ONE")
raise RuntimeError("Intentionally throw on first try.")
async def __call__(self, request):
return "hi"
serve.run(GangTransientConstructorFailure.bind())
client = serve.context._get_global_client()
deployment_id = DeploymentID(name="GangTransientConstructorFailure")
deployment_dict = ray.get(client._controller._all_running_replicas.remote())
assert len(deployment_dict[deployment_id]) == 4
app_status = serve.status().applications[SERVE_DEFAULT_APP_NAME]
assert app_status.status == "RUNNING"
assert (
app_status.deployments["GangTransientConstructorFailure"].status
== "HEALTHY"
)
class TestGangFailureRecovery:
def test_startup_failure_stops_entire_gang(self, ray_shutdown):
"""Startup failure stops both replicas in the affected gang."""
ray.init(num_cpus=1)
serve.start()
failed_replica_store = FailedReplicaStore.remote()
recovery_signal = SignalActor.remote()
@serve.deployment(
num_replicas=4,
ray_actor_options={"num_cpus": 0.1},
gang_scheduling_config=GangSchedulingConfig(gang_size=2),
)
class StartupFailureDeployment:
def __init__(self, failed_replica_store, recovery_signal):
replica_id = serve.get_replica_context().replica_id.unique_id
is_first_failure = ray.get(
failed_replica_store.set_if_first.remote(replica_id)
)
if is_first_failure:
raise RuntimeError("Fail one startup to trigger gang cleanup.")
failed_replica_id = ray.get(failed_replica_store.get.remote())
if replica_id == failed_replica_id:
# Hold failed replica retry until the intermediate state is asserted.
ray.get(recovery_signal.wait.remote())
def __call__(self):
ctx = serve.get_replica_context()
gc = ctx.gang_context
return {
"replica_id": ctx.replica_id.unique_id,
"gang_id": gc.gang_id,
}
app_name = "gang_startup_cleanup_app"
deployment_name = "StartupFailureDeployment"
handle = serve._run(
StartupFailureDeployment.bind(failed_replica_store, recovery_signal),
name=app_name,
_blocking=False,
)
# The unaffected gang should reach 2 RUNNING while the failed
# gang is being cleaned up and retried.
wait_for_condition(
lambda: (
serve.status()
.applications[app_name]
.deployments[deployment_name]
.replica_states.get("RUNNING", 0)
== 2
),
timeout=60,
)
# The 2 running replicas must belong to the SAME gang,
# proving no partial gang survived.
contexts = {}
for _ in range(50):
result = handle.remote().result()
contexts.setdefault(result["replica_id"], result)
if len(contexts) == 2:
break
assert len(contexts) == 2
assert len({ctx["gang_id"] for ctx in contexts.values()}) == 1
# Release constructor retry gate so the failed gang can recover.
ray.get(recovery_signal.send.remote())
# After retry, all 4 replicas should be RUNNING.
wait_for_condition(check_apps_running, apps=[app_name], timeout=60)
app_status = serve.status().applications[app_name]
dep_status = app_status.deployments[deployment_name]
assert dep_status.replica_states.get("RUNNING", 0) == 4
serve.delete(app_name)
serve.shutdown()
def test_health_failure_restarts_gang(self, ray_shutdown):
"""Single health check failure tears down and restarts the entire gang."""
ray.init(num_cpus=1)
serve.start()
target_replica_collector = Collector.remote()
@serve.deployment(
num_replicas=4,
ray_actor_options={"num_cpus": 0.1},
health_check_period_s=1,
health_check_timeout_s=1,
gang_scheduling_config=GangSchedulingConfig(gang_size=2),
)
class HealthFailureDeployment:
def __call__(self):
ctx = serve.get_replica_context()
gc = ctx.gang_context
return {
"replica_id": ctx.replica_id.unique_id,
"gang_id": gc.gang_id,
}
def check_health(self):
targets = ray.get(target_replica_collector.get.remote())
if not targets:
return
target_id = targets[-1]
# Only 1 replica fails; its sibling stays healthy.
# The gang-aware cleanup must stop the sibling too.
ctx = serve.get_replica_context()
if ctx.replica_id.unique_id == target_id:
raise RuntimeError("Intentional health check failure.")
app_name = "gang_health_failure_app"
deployment_name = "HealthFailureDeployment"
handle = serve.run(HealthFailureDeployment.bind(), name=app_name)
wait_for_condition(check_apps_running, apps=[app_name], timeout=60)
# Discover all 4 replica contexts.
contexts_by_replica = {}
for _ in range(120):
result = handle.remote().result()
contexts_by_replica.setdefault(result["replica_id"], result)
if len(contexts_by_replica) == 4:
break
assert len(contexts_by_replica) == 4
# Pick 1 replica to fail health checks.
target_ctx = next(iter(contexts_by_replica.values()))
target_gang_id = target_ctx["gang_id"]
target_gang_replica_ids = {
ctx["replica_id"]
for ctx in contexts_by_replica.values()
if ctx["gang_id"] == target_gang_id
}
unaffected_replica_ids = (
set(contexts_by_replica.keys()) - target_gang_replica_ids
)
assert len(target_gang_replica_ids) == 2
assert len(unaffected_replica_ids) == 2
# Trigger failure for only 1 replica in the target gang.
ray.get(target_replica_collector.add.remote(target_ctx["replica_id"]))
client = serve.context._get_global_client()
deployment_id = DeploymentID(name=deployment_name, app_name=app_name)
def check_target_gang_restarted():
replicas = ray.get(
client._controller._dump_replica_states_for_testing.remote(
deployment_id
)
)
running_replicas = replicas.get([ReplicaState.RUNNING])
running_ids = {r.replica_id.unique_id for r in running_replicas}
# Both old gang members must be gone (not just the one that
# failed), and the unaffected gang must be untouched.
return (
len(running_ids) == 4
and len(running_ids & target_gang_replica_ids) == 0
and len(running_ids & unaffected_replica_ids) == 2
)
wait_for_condition(check_target_gang_restarted, timeout=90)
wait_for_condition(check_apps_running, apps=[app_name], timeout=60)
serve.delete(app_name)
serve.shutdown()
class TestGangChildSpawnPlacementGroup:
@ray.remote(num_cpus=0.1)
class ChildActor:
def get_pg(self):
return get_current_placement_group()
@ray.remote(num_cpus=0)
def child_task_get_pg():
return get_current_placement_group()
@pytest.mark.parametrize("child_type", ["actor", "task"])
def test_child_in_gang_pg(self, ray_cluster, child_type):
"""Spawn a child actor/task inside a gang replica and verify it shares the gang placement group."""
cluster = ray_cluster
cluster.add_node(num_cpus=2)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
serve.start()
ChildActor = TestGangChildSpawnPlacementGroup.ChildActor
child_task_get_pg = TestGangChildSpawnPlacementGroup.child_task_get_pg
@serve.deployment(
num_replicas=2,
ray_actor_options={"num_cpus": 0.1},
# Extra bundle per replica so the child actor has resources
# inside the gang PG (the first bundle is consumed by the replica).
placement_group_bundles=[{"CPU": 0.1}, {"CPU": 0.1}],
gang_scheduling_config=GangSchedulingConfig(gang_size=2),
)
class GangWithChild:
def test_child_in_pg(self):
parent_pg = get_current_placement_group()
if child_type == "actor":
child = ChildActor.remote()
child_pg = ray.get(child.get_pg.remote())
else:
child_pg = ray.get(child_task_get_pg.remote())
return {
"parent_pg_id": parent_pg.id.hex() if parent_pg else None,
"child_pg_id": child_pg.id.hex() if child_pg else None,
}
def __call__(self):
return "ok"
app_name = "gang_child_app"
handle = serve.run(GangWithChild.bind(), name=app_name)
wait_for_condition(check_apps_running, apps=[app_name])
for _ in range(20):
result = handle.test_child_in_pg.remote().result()
assert result["parent_pg_id"] is not None
assert result["child_pg_id"] is not None
assert result["child_pg_id"] == result["parent_pg_id"]
serve.delete(app_name)
serve.shutdown()
def test_child_actor_gang_pg_bundles_bounded(self, ray_cluster):
"""Gang replicas with placement_group_bundles: verify child actors are resource-bounded by the gang PG."""
cluster = ray_cluster
cluster.add_node(num_cpus=2)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
serve.start()
ChildActor = TestGangChildSpawnPlacementGroup.ChildActor
@serve.deployment(
num_replicas=1,
ray_actor_options={"num_cpus": 0.1},
# Replica consumes the first bundle (0.1 CPU). Worker bundle (0.1
# CPU) fits exactly one ChildActor, so a second child is blocked.
placement_group_bundles=[{"CPU": 0.1}, {"CPU": 0.1}],
gang_scheduling_config=GangSchedulingConfig(gang_size=1),
)
class GangWithBundlesAndChild:
def test_second_worker_blocked(self):
"""The second child actor shouldn't fit in this replica's bundle slice."""
w1 = ChildActor.remote()
w2 = ChildActor.remote()
ready, _ = ray.wait([w2.get_pg.remote()], timeout=1)
ray.kill(w1)
ray.kill(w2)
return len(ready) == 0
def __call__(self):
return "ok"
app_name = "gang_bundles_child_app"
handle = serve.run(GangWithBundlesAndChild.bind(), name=app_name)
wait_for_condition(check_apps_running, apps=[app_name])
# Verify resource limits are enforced within the gang PG bundle slice.
for _ in range(4):
assert handle.test_second_worker_blocked.remote().result() is True
serve.delete(app_name)
serve.shutdown()
def test_child_actor_opt_out_gang_pg(self, ray_cluster):
"""Verify a child actor can opt out of the gang PG by passing placement_group=None."""
cluster = ray_cluster
cluster.add_node(num_cpus=2)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
serve.start()
ChildActor = TestGangChildSpawnPlacementGroup.ChildActor
@serve.deployment(
num_replicas=2,
ray_actor_options={"num_cpus": 0.1},
gang_scheduling_config=GangSchedulingConfig(gang_size=2),
)
class GangWithEscapedChild:
def get_child_outside_pg(self):
parent_pg = get_current_placement_group()
child = ChildActor.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=None, # Explicitly schedule outside the placement group
)
).remote()
child_pg = ray.get(child.get_pg.remote())
return {
"parent_pg_id": parent_pg.id.hex() if parent_pg else None,
"child_pg_id": child_pg.id.hex() if child_pg else None,
}
def __call__(self):
return "ok"
app_name = "gang_escaped_child_app"
handle = serve.run(GangWithEscapedChild.bind(), name=app_name)
wait_for_condition(check_apps_running, apps=[app_name])
for _ in range(20):
result = handle.get_child_outside_pg.remote().result()
assert result["parent_pg_id"] is not None
assert result["child_pg_id"] is None
serve.delete(app_name)
serve.shutdown()
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_gang_scheduling.py",
"license": "Apache License 2.0",
"lines": 934,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/train/v2/_internal/execution/scaling_policy/elastic.py | import logging
import uuid
from functools import cached_property
from typing import TYPE_CHECKING, Dict, List, Optional
import ray
from ray.train.v2._internal.execution.context import TrainRunContext
from ray.train.v2._internal.execution.scaling_policy import (
NoopDecision,
ResizeDecision,
ScalingDecision,
ScalingPolicy,
)
from ray.train.v2._internal.execution.worker_group import (
WorkerGroupPollStatus,
WorkerGroupState,
)
from ray.train.v2._internal.util import time_monotonic
from ray.train.v2.api.config import ScalingConfig
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from ray.data._internal.cluster_autoscaler.default_autoscaling_coordinator import (
ResourceDict,
)
class ElasticScalingPolicy(ScalingPolicy):
# The time in seconds after which an autoscaling request will expire.
AUTOSCALING_REQUESTS_EXPIRE_TIME_S = 180
# Minimum interval in seconds between two consecutive autoscaling requests.
AUTOSCALING_REQUESTS_INTERVAL_S = 20
# Timeout in seconds for getting the result of a call to the AutoscalingCoordinator.
AUTOSCALING_REQUESTS_GET_TIMEOUT_S = 5
# Minimum interval in seconds between querying the AutoscalingCoordinator for allocated resources.
GET_ALLOCATED_RESOURCES_INTERVAL_S = 1
# Minimum interval in seconds between logging warnings about insufficient workers.
INSUFFICIENT_WORKERS_WARNING_INTERVAL_S = 30
def __init__(self, scaling_config: ScalingConfig):
super().__init__(scaling_config)
self._latest_monitor_time = float("-inf")
# Requester ID for AutoscalingCoordinator.
# Prefer the Train run_id when available (set in after_controller_start).
self._requester_id = "train-" + uuid.uuid4().hex
self._latest_autoscaling_request_time = float("-inf")
self._latest_insufficient_workers_warning_time = float("-inf")
self._latest_allocated_resources_query_time = float("-inf")
self._latest_allocated_resources: Optional[List["ResourceDict"]] = None
def _count_possible_workers(
self, allocated_resources: List[Dict[str, float]]
) -> int:
"""Count the number of workers that can be started/restarted with the given
the list of node resources. The returned number is capped at the maximum
number of workers.
"""
# TODO: Fractional resources do not work well here.
single_worker_resources = self.scaling_config._resources_per_worker_not_none
total_num_workers = 0
# If workers require no resources, we can run as many as we want.
if sum(single_worker_resources.values()) == 0:
return self.scaling_config.max_workers
for resources in allocated_resources:
num_workers = min(
[
resources.get(resource, 0.0) // single_worker_resources[resource]
for resource in single_worker_resources
if single_worker_resources[resource] > 0
]
)
total_num_workers += num_workers
return min(int(total_num_workers), self.scaling_config.max_workers)
def _get_resize_decision(self, num_workers: int) -> ResizeDecision:
return ResizeDecision(
num_workers=num_workers,
resources_per_worker=self.scaling_config._resources_per_worker_not_none,
)
def make_decision_for_non_running_worker_group(self) -> ScalingDecision:
self._maybe_send_resource_request()
allocated_resources = self._get_allocated_resources()
if allocated_resources is None:
return NoopDecision()
num_workers = self._count_possible_workers(allocated_resources)
if num_workers < self.scaling_config.min_workers:
now = time_monotonic()
# Only log this warning periodically to avoid spamming logs
if (
now - self._latest_insufficient_workers_warning_time
>= self.INSUFFICIENT_WORKERS_WARNING_INTERVAL_S
):
logger.info(
f"Detected ready resources for {num_workers} workers "
"in the cluster. "
"Deciding NOT to start/restart training due to the number of workers "
"falling below the minimum "
f"(min_workers={self.scaling_config.min_workers})."
)
self._latest_insufficient_workers_warning_time = now
return NoopDecision()
logger.info(
f"Detected ready resources for {num_workers} workers "
"in the cluster. "
"Deciding to start/restart training with this worker group size."
)
return self._get_resize_decision(num_workers)
def make_decision_for_running_worker_group(
self,
worker_group_state: WorkerGroupState,
worker_group_status: WorkerGroupPollStatus,
) -> ScalingDecision:
self._maybe_send_resource_request()
# Ensure that we don't make resizing decisions too frequently.
# The latest restart time and the latest monitor time (whichever is later)
# determine the time of the next resize consideration.
latest_consideration_time = max(
worker_group_state.start_time, self._latest_monitor_time
)
now = time_monotonic()
time_since_latest_consideration = now - latest_consideration_time
if (
time_since_latest_consideration
< self.scaling_config.elastic_resize_monitor_interval_s
):
logger.debug(
"Skipping resize decision due to the latest resizing consideration "
"happening too recently: "
"%.2f seconds < ScalingConfig(elastic_resize_monitor_interval_s=%.2f seconds).",
time_since_latest_consideration,
self.scaling_config.elastic_resize_monitor_interval_s,
)
return NoopDecision()
self._latest_monitor_time = now
allocated_resources = self._get_allocated_resources()
if allocated_resources is None:
return NoopDecision()
num_workers = self._count_possible_workers(allocated_resources)
if num_workers == worker_group_state.num_workers:
logger.info(
"Did not detect any changes in the cluster resources. "
"Training will continue with the same worker group size "
f"({num_workers})."
)
return NoopDecision()
elif num_workers < self.scaling_config.min_workers:
# This covers an edge case where allocated resources decrease to less
# than the minimum number of workers.
# This situation is rare, since cluster downsizing typically involves
# worker failures. However, this check is still useful to fully
# avoid entering an invalid state with fewer workers than the minimum.
return NoopDecision()
logger.info(
"Detected changes in the cluster resources. "
"Deciding to resize the worker group from "
f"{worker_group_state.num_workers} -> {num_workers} workers."
)
return self._get_resize_decision(num_workers)
# ---------------------------------------------------
# Methods for interacting with AutoscalingCoordinator
# ---------------------------------------------------
@cached_property
def _autoscaling_coordinator(self):
from ray.data._internal.cluster_autoscaler.default_autoscaling_coordinator import (
get_or_create_autoscaling_coordinator,
)
return get_or_create_autoscaling_coordinator()
def _maybe_send_resource_request(self):
"""Send a resource request to AutoscalingCoordinator,
if AUTOSCALING_REQUESTS_INTERVAL_S has passed since the last send."""
now = time_monotonic()
if (
now - self._latest_autoscaling_request_time
< self.AUTOSCALING_REQUESTS_INTERVAL_S
):
return
resources_per_worker = self.scaling_config._resources_per_worker_not_none
max_workers = self.scaling_config.max_workers
try:
from ray.data._internal.cluster_autoscaler.default_autoscaling_coordinator import (
ResourceRequestPriority,
)
ray.get(
self._autoscaling_coordinator.request_resources.remote(
requester_id=self._requester_id,
resources=[resources_per_worker] * max_workers,
expire_after_s=self.AUTOSCALING_REQUESTS_EXPIRE_TIME_S,
priority=ResourceRequestPriority.HIGH,
),
timeout=self.AUTOSCALING_REQUESTS_GET_TIMEOUT_S,
)
self._latest_autoscaling_request_time = time_monotonic()
except Exception:
msg = (
f"Failed to send resource request for {self._requester_id}."
" If this only happens transiently during network partition or"
" CPU being overloaded, it's safe to ignore this error."
" If this error persists, file a GitHub issue."
)
logger.warning(msg, exc_info=True)
def _get_allocated_resources(self) -> Optional[List["ResourceDict"]]:
"""Get allocated resources from AutoscalingCoordinator.
Return None if there is an error."""
now = time_monotonic()
time_since_last_call = now - self._latest_allocated_resources_query_time
if time_since_last_call < self.GET_ALLOCATED_RESOURCES_INTERVAL_S:
return self._latest_allocated_resources
allocated_resources = None
try:
allocated_resources = ray.get(
self._autoscaling_coordinator.get_allocated_resources.remote(
self._requester_id
),
timeout=self.AUTOSCALING_REQUESTS_GET_TIMEOUT_S,
)
except Exception:
msg = (
f"Failed to get allocated resources for {self._requester_id}."
" Will not resize the worker group."
" If this only happens transiently during network partition or"
" CPU being overloaded, it's safe to ignore this error."
" If this error persists, file a GitHub issue."
)
logger.warning(msg, exc_info=True)
finally:
self._latest_allocated_resources_query_time = time_monotonic()
self._latest_allocated_resources = allocated_resources
return self._latest_allocated_resources
def _cancel_resource_request(self):
"""Cancel the resource request to AutoscalingCoordinator."""
try:
ray.get(
self._autoscaling_coordinator.cancel_request.remote(
requester_id=self._requester_id,
),
timeout=self.AUTOSCALING_REQUESTS_GET_TIMEOUT_S,
)
except Exception:
msg = (
f"Failed to cancel resource request for {self._requester_id}."
" The request will still expire after the timeout of"
f" {self.AUTOSCALING_REQUESTS_EXPIRE_TIME_S} seconds."
)
logger.warning(msg, exc_info=True)
# --------------------------
# ControllerCallback
# --------------------------
def after_controller_start(self, train_run_context: TrainRunContext):
"""Send cluster autoscaling requests when the control loop starts."""
self._requester_id = f"train-{train_run_context.run_id}"
resources_per_worker = self.scaling_config._resources_per_worker_not_none
max_workers = self.scaling_config.max_workers
logger.info(
"Requesting resources to fit the maximum number of workers: "
f"{resources_per_worker} * {max_workers}"
)
self._maybe_send_resource_request()
def before_controller_shutdown(self):
"""Clear the autoscaling request eagerly when the control loop shuts down.
So that cluster can scale down more quickly before the request timeout.
"""
self._cancel_resource_request()
def before_controller_abort(self):
"""Cancel the autoscaling request when the controller is aborted."""
self._cancel_resource_request()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/_internal/execution/scaling_policy/elastic.py",
"license": "Apache License 2.0",
"lines": 258,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/train/v2/tests/test_elastic_e2e.py | import sys
import time
from pathlib import Path
from typing import List
import pytest
import ray
import ray.train
from ray.cluster_utils import Cluster
from ray.train.tests.util import create_dict_checkpoint, load_dict_checkpoint
from ray.train.v2._internal.constants import HEALTH_CHECK_INTERVAL_S_ENV_VAR
from ray.train.v2.api.data_parallel_trainer import DataParallelTrainer
@pytest.fixture
def cluster():
cluster = Cluster(initialize_head=True, head_node_args=dict(num_cpus=0))
cluster.wait_for_nodes()
ray.init(
address=cluster.address,
runtime_env={"working_dir": str(Path(__file__).parent)},
)
yield cluster
ray.shutdown()
cluster.shutdown()
def train_fn(config: dict):
train_context = ray.train.get_context()
rank = train_context.get_world_rank()
start_epoch = 1
checkpoint = ray.train.get_checkpoint()
min_world_size = None
max_world_size = None
if checkpoint:
checkpoint_data = load_dict_checkpoint(checkpoint)
start_epoch = checkpoint_data["epoch"] + 1
min_world_size = checkpoint_data.get("min_world_size")
max_world_size = checkpoint_data.get("max_world_size")
if rank == 0:
print("Restoring from epoch: ", start_epoch)
for epoch in range(start_epoch, config.get("num_epochs", 60) + 1):
world_size = train_context.get_world_size()
if min_world_size is None:
min_world_size = world_size
if max_world_size is None:
max_world_size = world_size
min_world_size = min(min_world_size, world_size)
max_world_size = max(max_world_size, world_size)
# TODO: This test injects errors by "killing nodes," which ungracefully
# kills processes. This means that any backlog in the checkpoint queue
# will not be flushed to the controller.
# This means that the checkpoint populated on restore may not be
# the most recent one.
# Set the poll interval < health check interval to reduce the
# backlog size to mitigate the issue.
time.sleep(2 * config.get("health_check_interval_s", 1))
with create_dict_checkpoint(
{
"epoch": epoch,
"min_world_size": min_world_size,
"max_world_size": max_world_size,
}
) as checkpoint:
ray.train.report(
{
"epoch": epoch,
"world_size": world_size,
"min_world_size": min_world_size,
"max_world_size": max_world_size,
},
checkpoint=checkpoint if rank == 0 else None,
checkpoint_dir_name=f"checkpoint-epoch={epoch}",
)
if rank == 0:
print("Finished epoch: ", epoch)
def test_elastic_training(monkeypatch, tmp_path, cluster):
"""End to end test for elastic training.
This test covers:
* Elastic startup (0 resources -> min resources)
* Elastic scale up while running (min resources -> max resources)
* Elastic scale down due to failure while running
* Checkpointing + restoration
* Preemption failure handling
"""
unit_time_s = 0.1
health_check_interval_s = unit_time_s
elastic_resize_monitor_interval_s = unit_time_s * 10
num_epochs = 30
monkeypatch.setenv(HEALTH_CHECK_INTERVAL_S_ENV_VAR, str(health_check_interval_s))
@ray.remote(num_cpus=0)
def run_training():
trainer = DataParallelTrainer(
train_fn,
train_loop_config={
"num_epochs": num_epochs,
"health_check_interval_s": health_check_interval_s,
},
scaling_config=ray.train.ScalingConfig(
num_workers=(4, 32),
use_gpu=True,
elastic_resize_monitor_interval_s=elastic_resize_monitor_interval_s,
),
run_config=ray.train.RunConfig(
storage_path=str(tmp_path),
checkpoint_config=ray.train.CheckpointConfig(num_to_keep=2),
# NOTE: The outer test script will inject 2 node failures.
failure_config=ray.train.FailureConfig(max_failures=2),
),
)
return trainer.fit()
run_training_future = run_training.remote()
start = time.time()
ALL_NODES = []
def print_status(message):
elapsed = time.time() - start
print()
print("-" * 80)
cluster_resources = {
resource: value
for resource, value in ray.cluster_resources().items()
if resource in ("CPU", "GPU")
}
print(f"[elapsed={elapsed:.1f}s] {cluster_resources=}")
print(message)
print("-" * 80)
print()
def sleep(num_units):
time.sleep(unit_time_s * num_units)
def add_nodes(gpus: List[int]) -> List:
added_nodes = []
for num_gpus in gpus:
node = cluster.add_node(num_gpus=num_gpus, wait=False)
added_nodes.append(node)
print_status(f"Added {len(gpus)} node(s) with num_gpus: {gpus}")
cluster.wait_for_nodes()
return added_nodes
def remove_nodes(nodes: List):
for node in nodes:
cluster.remove_node(node)
cluster.wait_for_nodes()
print_status(f"Removed nodes: {nodes}")
# Wait a bit before adding resources.
print_status("Waiting for training to start...")
sleep(8)
# Add a node with 4 GPUs
ALL_NODES.extend(add_nodes([4]))
# Wait a bit before adding more resources.
sleep(8)
print("Adding 4 GPU node.")
ALL_NODES.extend(add_nodes([4]))
sleep(1)
ALL_NODES.extend(add_nodes([4]))
# Should not upscale here due to the elastic resize monitor interval.
# Should upscale to 12 during this sleep.
sleep(20)
# Kill a node.
remove_nodes([ALL_NODES.pop(0)])
sleep(12)
# Kill all worker nodes.
remove_nodes(ALL_NODES)
ALL_NODES = []
sleep(8)
ALL_NODES.extend(add_nodes(gpus=[1] * 16))
sleep(12)
# 4 extra GPUs shouldn't be used.
ALL_NODES.extend(add_nodes(gpus=[4] * 4 + [1] * 4))
result: ray.train.Result = ray.get(run_training_future)
print_status(f"Training finished with result: {result}")
assert not result.error
assert result.metrics["min_world_size"] >= 4
assert result.metrics["max_world_size"] <= 32
assert result.metrics["max_world_size"] >= result.metrics["min_world_size"]
assert result.checkpoint
assert Path(result.checkpoint.path).name == f"checkpoint-epoch={num_epochs}"
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-x", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/test_elastic_e2e.py",
"license": "Apache License 2.0",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/train/v2/tests/test_elastic_scaling_policy.py | import sys
from unittest.mock import MagicMock, patch
import pytest
from freezegun import freeze_time
from ray.data._internal.cluster_autoscaler.default_autoscaling_coordinator import (
ResourceRequestPriority,
)
from ray.train.v2._internal.execution.callback import ControllerCallback
from ray.train.v2._internal.execution.scaling_policy import NoopDecision, ResizeDecision
from ray.train.v2._internal.execution.scaling_policy.elastic import (
ElasticScalingPolicy,
)
from ray.train.v2._internal.execution.worker_group import (
WorkerGroupPollStatus,
WorkerGroupState,
WorkerStatus,
)
from ray.train.v2._internal.util import time_monotonic
from ray.train.v2.api.config import ScalingConfig
@pytest.fixture(autouse=True)
def mock_autoscaling_coordinator(monkeypatch):
mock_coordinator = MagicMock()
mock_coordinator._allocated_resources = None
mock_coordinator.get_allocated_resources.remote = MagicMock(
side_effect=lambda _: mock_coordinator._allocated_resources
)
monkeypatch.setattr(
ElasticScalingPolicy, "_autoscaling_coordinator", mock_coordinator
)
@pytest.fixture(autouse=True)
def patch_ray_get():
with patch(
"ray.get",
side_effect=lambda x, **_: x,
):
yield
def _get_mock_worker_group_status(num_workers: int) -> WorkerGroupPollStatus:
return WorkerGroupPollStatus(
worker_statuses={
i: WorkerStatus(running=True, error=None) for i in range(num_workers)
},
)
def _get_mock_worker_group_state(
num_workers: int, start_time: float
) -> WorkerGroupState:
return WorkerGroupState(
start_time=start_time,
placement_group_handle=MagicMock(),
workers=[MagicMock() for _ in range(num_workers)],
sync_actor=MagicMock(),
)
@patch.object(ElasticScalingPolicy, "GET_ALLOCATED_RESOURCES_INTERVAL_S", 0.0)
def test_non_running_worker_group_decision():
"""Test decisions being made when the worker group is initializing/restarting.
Ensures that the policy will resize the worker group as soon as resources are available.
"""
min_workers, max_workers = 4, 64
resources_per_worker = {"CPU": 8, "GPU": 1}
scaling_config = ScalingConfig(
num_workers=(min_workers, max_workers),
resources_per_worker=resources_per_worker,
use_gpu=True,
)
policy = ElasticScalingPolicy(scaling_config)
mock_coordinator = policy._autoscaling_coordinator
# No resources are available at the start
decision = policy.make_decision_for_non_running_worker_group()
assert isinstance(decision, NoopDecision)
# Resources for < min workers are available
mock_coordinator._allocated_resources = [resources_per_worker] * (min_workers - 1)
decision = policy.make_decision_for_non_running_worker_group()
assert isinstance(decision, NoopDecision)
# Resources for >= min workers are available
mock_coordinator._allocated_resources = [resources_per_worker] * min_workers
decision = policy.make_decision_for_non_running_worker_group()
assert isinstance(decision, ResizeDecision)
assert decision.num_workers == min_workers
# Resources for >= max workers are available
mock_coordinator._allocated_resources = [resources_per_worker] * max_workers
decision = policy.make_decision_for_non_running_worker_group()
assert isinstance(decision, ResizeDecision)
assert decision.num_workers == max_workers
def test_before_controller_abort():
"""Test that before_controller_abort sends a cancel request to the AutoscalingCoordinator."""
resources_per_worker = {"CPU": 4, "GPU": 1}
scaling_config = ScalingConfig(
num_workers=(2, 4),
resources_per_worker=resources_per_worker,
use_gpu=True,
)
policy = ElasticScalingPolicy(scaling_config)
mock_coordinator = policy._autoscaling_coordinator
# Call before_controller_abort and check that cancel_request is called with the requester_id
policy.before_controller_abort()
mock_coordinator.cancel_request.remote.assert_called_once_with(
requester_id=policy._requester_id
)
def test_get_allocated_resources_interval():
"""Tests that remote calls to the AutoscalingCoordinator are spaced out by a minimum time interval."""
min_workers, max_workers = 4, 64
resources_per_worker = {"CPU": 8, "GPU": 1}
get_allocated_resources_interval_s = (
ElasticScalingPolicy.GET_ALLOCATED_RESOURCES_INTERVAL_S
)
scaling_config = ScalingConfig(
num_workers=(min_workers, max_workers),
resources_per_worker=resources_per_worker,
use_gpu=True,
)
policy = ElasticScalingPolicy(scaling_config)
mock_coordinator = policy._autoscaling_coordinator
with freeze_time() as frozen_time:
# No resources are available at the start
allocated_resources = policy._get_allocated_resources()
assert allocated_resources is None
# Resources for < min workers are available
frozen_time.tick(get_allocated_resources_interval_s)
mock_coordinator._allocated_resources = [resources_per_worker] * (
min_workers - 1
)
allocated_resources = policy._get_allocated_resources()
assert allocated_resources == [resources_per_worker] * (min_workers - 1)
# Resources for >= min workers are available, but get_allocated_resources interval
# has not yet passed.
mock_coordinator._allocated_resources = [resources_per_worker] * min_workers
allocated_resources = policy._get_allocated_resources()
assert allocated_resources == [resources_per_worker] * (min_workers - 1)
# Resources for >= min workers are available and the get_allocated_resources
# interval has passed.
frozen_time.tick(get_allocated_resources_interval_s)
mock_coordinator._allocated_resources = [resources_per_worker] * min_workers
allocated_resources = policy._get_allocated_resources()
assert allocated_resources == [resources_per_worker] * min_workers
# Resources for >= max workers are available but the get_allocated_resources
# interval has not yet passed.
mock_coordinator._allocated_resources = [resources_per_worker] * max_workers
allocated_resources = policy._get_allocated_resources()
assert allocated_resources == [resources_per_worker] * min_workers
# Resources for >= max workers are available and the get_allocated_resources
# interval has passed.
frozen_time.tick(get_allocated_resources_interval_s)
allocated_resources = policy._get_allocated_resources()
assert allocated_resources == [resources_per_worker] * max_workers
@patch.object(ElasticScalingPolicy, "GET_ALLOCATED_RESOURCES_INTERVAL_S", 0.0)
def test_running_worker_group_decision():
"""Test decisions being made when the worker group is running.
Ensures that the policy will resize the worker group when there is a change
in available resources.
"""
min_workers, max_workers = 4, 64
resources_per_worker = {"CPU": 8, "GPU": 1}
scaling_config = ScalingConfig(
num_workers=(min_workers, max_workers),
resources_per_worker=resources_per_worker,
use_gpu=True,
# NOTE: This test just asserts the policy decisions, not the monitor interval.
elastic_resize_monitor_interval_s=0.0,
)
policy = ElasticScalingPolicy(scaling_config)
mock_coordinator = policy._autoscaling_coordinator
# The worker group just started
worker_group_state = _get_mock_worker_group_state(min_workers, time_monotonic())
worker_group_status = _get_mock_worker_group_status(min_workers)
# No change in resources
mock_coordinator._allocated_resources = [resources_per_worker] * min_workers
decision = policy.make_decision_for_running_worker_group(
worker_group_state=worker_group_state,
worker_group_status=worker_group_status,
)
assert isinstance(decision, NoopDecision)
# Resources for < min workers are available
mock_coordinator._allocated_resources = [resources_per_worker] * (min_workers - 1)
decision = policy.make_decision_for_running_worker_group(
worker_group_state=worker_group_state,
worker_group_status=worker_group_status,
)
assert isinstance(decision, NoopDecision)
# More resources are available.
mock_coordinator._allocated_resources = [resources_per_worker] * max_workers
decision = policy.make_decision_for_running_worker_group(
worker_group_state=worker_group_state,
worker_group_status=worker_group_status,
)
assert isinstance(decision, ResizeDecision)
assert decision.num_workers == max_workers
def test_monitor_recently_started_worker_group():
"""Test monitor decisions being made when the worker group is running.
Ensures that resizing decisions are not made too soon after the worker group starts.
"""
min_workers, max_workers = 4, 64
monitor_interval_s = 60
resources_per_worker = {"CPU": 8, "GPU": 1}
scaling_config = ScalingConfig(
num_workers=(min_workers, max_workers),
resources_per_worker=resources_per_worker,
use_gpu=True,
elastic_resize_monitor_interval_s=monitor_interval_s,
)
policy = ElasticScalingPolicy(scaling_config)
mock_coordinator = policy._autoscaling_coordinator
with freeze_time() as frozen_time:
# The worker group just started
worker_group_state = _get_mock_worker_group_state(min_workers, time_monotonic())
worker_group_status = _get_mock_worker_group_status(min_workers)
# Advance time partway through the monitor interval
frozen_time.tick(delta=monitor_interval_s / 2)
# Even though there are new resources available, we should not resize yet
# because the monitor interval has not passed since
mock_coordinator._allocated_resources = [resources_per_worker] * (
max_workers - 1
)
assert isinstance(
policy.make_decision_for_running_worker_group(
worker_group_state=worker_group_state,
worker_group_status=worker_group_status,
),
NoopDecision,
)
frozen_time.tick(delta=monitor_interval_s / 2)
# The monitor interval has passed, should detect resources and resize
decision = policy.make_decision_for_running_worker_group(
worker_group_state=worker_group_state,
worker_group_status=worker_group_status,
)
assert isinstance(decision, ResizeDecision)
assert decision.num_workers == max_workers - 1
def test_monitor_long_running_worker_group():
"""Test monitor decisions being made when the worker group is running.
Ensures that the resizing considerations are not made too frequently.
"""
min_workers, max_workers = 4, 64
monitor_interval_s = 60
resources_per_worker = {"CPU": 8, "GPU": 1}
scaling_config = ScalingConfig(
num_workers=(min_workers, max_workers),
resources_per_worker=resources_per_worker,
use_gpu=True,
elastic_resize_monitor_interval_s=monitor_interval_s,
)
policy = ElasticScalingPolicy(scaling_config)
mock_coordinator = policy._autoscaling_coordinator
with freeze_time() as frozen_time:
worker_group_state = _get_mock_worker_group_state(min_workers, time_monotonic())
worker_group_status = _get_mock_worker_group_status(min_workers)
mock_coordinator._allocated_resources = [resources_per_worker] * min_workers
# The worker group has been running for a while at the same size
frozen_time.tick(monitor_interval_s * 60)
# Consider resizing.
decision = policy.make_decision_for_running_worker_group(
worker_group_state=worker_group_state,
worker_group_status=worker_group_status,
)
assert isinstance(decision, NoopDecision)
# We recently considered resizing, so we should wait until the next interval
# to consider again --> no-op even if new resources are available
mock_coordinator._allocated_resources = [resources_per_worker] * max_workers
frozen_time.tick(monitor_interval_s / 2)
decision = policy.make_decision_for_running_worker_group(
worker_group_state=worker_group_state,
worker_group_status=worker_group_status,
)
assert isinstance(decision, NoopDecision)
frozen_time.tick(monitor_interval_s / 2)
decision = policy.make_decision_for_running_worker_group(
worker_group_state=worker_group_state,
worker_group_status=worker_group_status,
)
assert isinstance(decision, ResizeDecision)
assert decision.num_workers == max_workers
def test_count_possible_workers():
"""Test counting the number of workers that can be started with
available node resources."""
resources_per_worker = {"CPU": 8, "GPU": 1}
scaling_config = ScalingConfig(
num_workers=(1, 8),
use_gpu=True,
resources_per_worker=resources_per_worker,
)
policy = ElasticScalingPolicy(scaling_config)
# No resources
assert policy._count_possible_workers([]) == 0
# Single node
assert policy._count_possible_workers([{"CPU": 8, "GPU": 1}]) == 1
assert policy._count_possible_workers([{"CPU": 16, "GPU": 2}]) == 2
assert policy._count_possible_workers([{"CPU": 16, "GPU": 1}]) == 1
# Multinode
assert policy._count_possible_workers([{"CPU": 7, "GPU": 1}] * 2) == 0
assert policy._count_possible_workers([{"CPU": 9, "GPU": 2}] * 8) == 8
assert policy._count_possible_workers([{"CPU": 16, "GPU": 2}] * 2) == 4
assert policy._count_possible_workers([{"CPU": 8, "GPU": 1}] * 4) == 4
# If there are excess resources, the number of workers is still capped at max_workers
assert policy._count_possible_workers([{"CPU": 16, "GPU": 2}] * 10) == 8
def test_count_possible_workers_with_zero_resources():
max_workers = 4
scaling_config = ScalingConfig(
num_workers=(1, max_workers),
resources_per_worker={"CPU": 0, "GPU": 0, "memory": 0},
)
policy = ElasticScalingPolicy(scaling_config)
assert (
policy._count_possible_workers([{"CPU": 1, "GPU": 1, "memory": 1}])
== max_workers
)
def test_request_and_clear():
"""Tests that the policy makes resource requests and clears the requests."""
resources_per_worker = {"CPU": 8, "GPU": 1}
policy = ElasticScalingPolicy(
scaling_config=ScalingConfig(
use_gpu=True, resources_per_worker=resources_per_worker, num_workers=(2, 4)
)
)
assert isinstance(policy, ControllerCallback)
mock_coordinator = policy._autoscaling_coordinator
def assert_resource_request_called_with():
nonlocal mock_coordinator
mock_coordinator.request_resources.remote.assert_called_with(
requester_id=policy._requester_id,
resources=[resources_per_worker] * 4,
expire_after_s=ElasticScalingPolicy.AUTOSCALING_REQUESTS_EXPIRE_TIME_S,
priority=ResourceRequestPriority.HIGH,
)
with freeze_time() as frozen_time:
worker_group_state = _get_mock_worker_group_state(2, time_monotonic())
worker_group_status = _get_mock_worker_group_status(2)
# Test request_resources is called when the controller starts.
policy.after_controller_start(train_run_context=MagicMock())
assert mock_coordinator.request_resources.remote.call_count == 1
assert_resource_request_called_with()
# Test request_resources is only called in
# `make_decision_for_running_worker_group`,
# if `AUTOSCALING_REQUESTS_INTERVAL_S` has passed.
frozen_time.tick(ElasticScalingPolicy.AUTOSCALING_REQUESTS_INTERVAL_S / 2)
policy.make_decision_for_running_worker_group(
worker_group_state=worker_group_state,
worker_group_status=worker_group_status,
)
assert mock_coordinator.request_resources.remote.call_count == 1
frozen_time.tick(ElasticScalingPolicy.AUTOSCALING_REQUESTS_INTERVAL_S / 2)
policy.make_decision_for_running_worker_group(
worker_group_state=worker_group_state,
worker_group_status=worker_group_status,
)
assert mock_coordinator.request_resources.remote.call_count == 2
assert_resource_request_called_with()
# Test cancel_request is called when the controller is shutting down.
policy.before_controller_shutdown()
mock_coordinator.cancel_request.remote.assert_called_once()
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/test_elastic_scaling_policy.py",
"license": "Apache License 2.0",
"lines": 349,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:ci/ray_ci/automation/push_release_test_image.py | """
Push Wanda-cached release test images to ECR, GCP, and Azure registries used for
release tests:
- AWS ECR: anyscale/{image_type}:{tag}
- GCP Artifact Registry: anyscale/{image_type}:{tag}
- Azure Container Registry: anyscale/{image_type}:{tag}
Example:
bazel run //ci/ray_ci/automation:push_release_test_image -- \\
--python-version 3.10 \\
--platform cpu \\
--image-type ray \\
--upload
Run with --help to see all options.
"""
import logging
import subprocess
import sys
from typing import List
import click
import runfiles
from ci.ray_ci.automation.image_tags_lib import (
ImageTagsError,
copy_image,
get_platform_suffixes,
get_python_suffixes,
get_variation_suffix,
image_exists,
)
from ci.ray_ci.container import _AZURE_REGISTRY_NAME
from ci.ray_ci.utils import ci_init, ecr_docker_login
from ray_release.configs.global_config import get_global_config
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
stream=sys.stdout,
)
logger = logging.getLogger(__name__)
_runfiles = runfiles.Create()
class PushReleaseTestImageError(Exception):
"""Error raised when pushing release test images fails."""
def _run_gcloud_docker_login() -> None:
"""Authenticate with GCP Artifact Registry using gcloud."""
credentials_path = _runfiles.Rlocation("io_ray/release/aws2gce_iam.json")
logger.info("Authenticating with GCP Artifact Registry...")
result = subprocess.run(
["gcloud", "auth", "login", "--cred-file", credentials_path, "--quiet"],
capture_output=True,
text=True,
)
if result.returncode != 0:
raise PushReleaseTestImageError(f"GCP auth login failed: {result.stderr}")
gcp_registry = get_global_config()["byod_gcp_cr"]
gcp_hostname = gcp_registry.split("/")[0]
result = subprocess.run(
["gcloud", "auth", "configure-docker", gcp_hostname, "--quiet"],
capture_output=True,
text=True,
)
if result.returncode != 0:
raise PushReleaseTestImageError(f"GCP docker config failed: {result.stderr}")
def _run_azure_docker_login() -> None:
"""Authenticate with Azure Container Registry."""
script_path = _runfiles.Rlocation("io_ray/release/azure_docker_login.sh")
logger.info("Authenticating with Azure Container Registry...")
result = subprocess.run(
["bash", script_path],
capture_output=True,
text=True,
)
if result.returncode != 0:
raise PushReleaseTestImageError(f"Azure authentication failed: {result.stderr}")
logger.info(f"Logging into Azure ACR: {_AZURE_REGISTRY_NAME}...")
result = subprocess.run(
["az", "acr", "login", "--name", _AZURE_REGISTRY_NAME],
capture_output=True,
text=True,
)
if result.returncode != 0:
raise PushReleaseTestImageError(f"Azure ACR login failed: {result.stderr}")
class ReleaseTestImagePushContext:
"""Context for publishing an anyscale image from Wanda cache to cloud registries."""
image_type: str
python_version: str
platform: str
branch: str
commit: str
rayci_build_id: str
pull_request: str
# Computed fields (set in __init__)
wanda_tag: str
def __init__(
self,
image_type: str,
python_version: str,
platform: str,
branch: str,
commit: str,
rayci_build_id: str,
pull_request: str,
) -> None:
self.image_type = image_type
self.python_version = python_version
self.platform = platform
self.branch = branch
self.commit = commit
self.rayci_build_id = rayci_build_id
self.pull_request = pull_request
self.wanda_tag = f"{rayci_build_id}-{self.wanda_image_name()}"
def destination_tags(self) -> List[str]:
"""
Compute the destination tags for this context.
Tags are formed as:
{version}{variation}{python_suffix}{platform_suffix}
For example:
- abc123-py310-cpu
- abc123-py310-gpu
- abc123-py310
- 2.53.0.abc123-py310-cu121
"""
tags = []
for version in self._versions():
for plat in self._platform_suffixes():
for py in self._python_suffixes():
tags.append(f"{version}{self._variation_suffix()}{py}{plat}")
return tags
def wanda_image_name(self) -> str:
"""Get the wanda source image name for this context."""
return f"{self.image_type}-anyscale-py{self.python_version}-{self.platform}"
def _versions(self) -> List[str]:
"""Compute version tags based on branch/PR status.
Priority matches original DockerContainer._get_image_version_tags:
1. master branch -> sha_tag
2. release branch -> release_version.sha_tag
3. PR -> pr-{number}.sha_tag
4. other branches -> sha_tag
"""
sha_tag = self.commit[:6]
if self.branch == "master":
primary_tag = sha_tag
elif self.branch and self.branch.startswith("releases/"):
primary_tag = f"{self.branch[len('releases/'):]}.{sha_tag}"
elif self.pull_request != "false":
primary_tag = f"pr-{self.pull_request}.{sha_tag}"
else:
primary_tag = sha_tag
versions = [primary_tag]
if self.rayci_build_id:
versions.append(self.rayci_build_id)
return versions
def _variation_suffix(self) -> str:
"""Get -extra suffix for extra image types."""
return get_variation_suffix(self.image_type)
def _python_suffixes(self) -> List[str]:
"""Get python version suffixes (includes empty for default version)."""
return get_python_suffixes(self.python_version)
def _platform_suffixes(self) -> List[str]:
"""Get platform suffixes (includes aliases like -gpu for GPU_PLATFORM)."""
return get_platform_suffixes(self.platform, self.image_type)
@click.command()
@click.option(
"--python-version",
type=str,
required=True,
help="Python version (e.g., '3.10')",
)
@click.option(
"--platform",
type=str,
required=True,
help="Platform (e.g., 'cpu', 'cu12.3.2-cudnn9')",
)
@click.option(
"--image-type",
type=str,
default="ray",
help="Image type (e.g., 'ray', 'ray-llm', 'ray-ml')",
)
@click.option(
"--upload",
is_flag=True,
default=False,
help="Actually push to registries. Without this flag, runs in dry-run mode.",
)
@click.option(
"--rayci-work-repo",
type=str,
required=True,
envvar="RAYCI_WORK_REPO",
help="ECR work repo (e.g., '029272617770.dkr.ecr.us-west-2.amazonaws.com/rayproject/citemp')",
)
@click.option(
"--rayci-build-id",
type=str,
required=True,
envvar="RAYCI_BUILD_ID",
help="Rayci build ID",
)
@click.option("--branch", type=str, required=True, envvar="BUILDKITE_BRANCH")
@click.option("--commit", type=str, required=True, envvar="BUILDKITE_COMMIT")
@click.option(
"--pull-request", type=str, default="false", envvar="BUILDKITE_PULL_REQUEST"
)
def main(
python_version: str,
platform: str,
image_type: str,
upload: bool,
rayci_work_repo: str,
rayci_build_id: str,
branch: str,
commit: str,
pull_request: str,
) -> None:
"""
Push a Wanda-cached release test image to ECR, GCP, and Azure registries.
Handles authentication for all three registries internally.
"""
ci_init()
dry_run = not upload
if dry_run:
logger.info("DRY RUN MODE - no images will be pushed")
ctx = ReleaseTestImagePushContext(
image_type=image_type,
python_version=python_version,
platform=platform,
branch=branch,
commit=commit,
rayci_build_id=rayci_build_id,
pull_request=pull_request,
)
ecr_registry = rayci_work_repo.split("/")[0]
source_tag = f"{rayci_work_repo}:{ctx.wanda_tag}"
logger.info(f"Source image (Wanda): {source_tag}")
# Get image tags
try:
tags = ctx.destination_tags()
except ImageTagsError as e:
raise PushReleaseTestImageError(str(e))
canonical_tag = tags[0]
logger.info(f"Canonical tag: {canonical_tag}")
logger.info(f"All tags: {tags}")
# Destination registries (from global config)
global_config = get_global_config()
registries = [
(ecr_registry, "ECR"),
(global_config["byod_gcp_cr"], "GCP"),
(global_config["byod_azure_cr"], "Azure"),
]
if dry_run:
for tag in tags:
for registry, name in registries:
dest_image = f"{registry}/anyscale/{image_type}:{tag}"
logger.info(f"Would push to {name}: {dest_image}")
return
# Authenticate with all registries
ecr_docker_login(ecr_registry)
_run_gcloud_docker_login()
_run_azure_docker_login()
# Verify source image exists
logger.info("Verifying source image in Wanda cache...")
if not image_exists(source_tag):
raise PushReleaseTestImageError(
f"Source image not found in Wanda cache: {source_tag}"
)
# Push to all three registries
try:
for tag in tags:
for registry, name in registries:
dest_image = f"{registry}/anyscale/{image_type}:{tag}"
logger.info(f"Pushing to {name}: {dest_image}")
copy_image(source_tag, dest_image, dry_run=False)
except ImageTagsError as e:
raise PushReleaseTestImageError(str(e))
logger.info("Successfully pushed release test images to all registries")
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "ci/ray_ci/automation/push_release_test_image.py",
"license": "Apache License 2.0",
"lines": 276,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:ci/ray_ci/automation/test_push_release_test_image.py | import sys
import pytest
from ci.ray_ci.automation.image_tags_lib import (
format_platform_tag,
format_python_tag,
)
from ci.ray_ci.automation.push_release_test_image import ReleaseTestImagePushContext
from ci.ray_ci.configs import DEFAULT_PYTHON_TAG_VERSION
from ci.ray_ci.docker_container import GPU_PLATFORM
def make_ctx(
image_type: str = "ray",
python_version: str = "3.11",
platform: str = "cu12.3.2-cudnn9",
branch: str = "master",
commit: str = "abc123def456",
rayci_build_id: str = "build-123",
pull_request: str = "false",
) -> ReleaseTestImagePushContext:
"""Helper to create a context with defaults."""
return ReleaseTestImagePushContext(
image_type=image_type,
python_version=python_version,
platform=platform,
branch=branch,
commit=commit,
rayci_build_id=rayci_build_id,
pull_request=pull_request,
)
class TestFormatPythonTag:
@pytest.mark.parametrize(
("python_version", "expected"),
[
("3.10", "-py310"),
("3.11", "-py311"),
("3.12", "-py312"),
("3.9", "-py39"),
],
)
def test_format_python_tag(self, python_version, expected):
assert format_python_tag(python_version) == expected
class TestFormatPlatformTag:
@pytest.mark.parametrize(
("platform", "expected"),
[
("cpu", "-cpu"),
("cu11.7.1-cudnn8", "-cu117"),
("cu11.8.0-cudnn8", "-cu118"),
("cu12.1.1-cudnn8", "-cu121"),
("cu12.3.2-cudnn9", "-cu123"),
("cu12.8.1-cudnn", "-cu128"),
],
)
def test_format_platform_tag(self, platform, expected):
assert format_platform_tag(platform) == expected
class TestWandaImageName:
@pytest.mark.parametrize(
("python_version", "platform", "image_type", "expected"),
[
("3.10", "cpu", "ray", "ray-anyscale-py3.10-cpu"),
("3.11", "cu12.1.1-cudnn8", "ray", "ray-anyscale-py3.11-cu12.1.1-cudnn8"),
("3.10", "cpu", "ray-llm", "ray-llm-anyscale-py3.10-cpu"),
(
"3.11",
"cu12.8.1-cudnn",
"ray-llm",
"ray-llm-anyscale-py3.11-cu12.8.1-cudnn",
),
("3.10", "cpu", "ray-ml", "ray-ml-anyscale-py3.10-cpu"),
(
"3.11",
"cu12.3.2-cudnn9",
"ray-ml",
"ray-ml-anyscale-py3.11-cu12.3.2-cudnn9",
),
],
)
def test_wanda_image_name(self, python_version, platform, image_type, expected):
ctx = make_ctx(
python_version=python_version, platform=platform, image_type=image_type
)
assert ctx.wanda_image_name() == expected
class TestDestinationTags:
def test_master_branch_tags(self):
ctx = make_ctx(
python_version="3.11",
platform="cu12.3.2-cudnn9",
image_type="ray",
branch="master",
commit="abc123def456",
rayci_build_id="build-123",
)
tags = ctx.destination_tags()
assert tags == [
"abc123-py311-cu123",
"build-123-py311-cu123",
]
def test_release_branch_tags(self):
ctx = make_ctx(
python_version="3.11",
platform="cu12.3.2-cudnn9",
image_type="ray",
branch="releases/2.44.0",
commit="abc123def456",
rayci_build_id="build-456",
)
tags = ctx.destination_tags()
assert tags == [
"2.44.0.abc123-py311-cu123",
"build-456-py311-cu123",
]
def test_gpu_platform_includes_alias(self):
"""GPU_PLATFORM gets -gpu alias, other platforms do not."""
gpu_ctx = make_ctx(
python_version="3.11", platform=GPU_PLATFORM, image_type="ray"
)
other_cuda_ctx = make_ctx(
python_version="3.11", platform="cu12.3.2-cudnn9", image_type="ray"
)
cpu_ctx = make_ctx(python_version="3.11", platform="cpu", image_type="ray")
gpu_tags = gpu_ctx.destination_tags()
other_cuda_tags = other_cuda_ctx.destination_tags()
cpu_tags = cpu_ctx.destination_tags()
assert any("-gpu" in tag for tag in gpu_tags)
assert not any("-gpu" in tag for tag in other_cuda_tags)
assert not any("-gpu" in tag for tag in cpu_tags)
def test_ray_ml_gpu_platform_includes_empty_platform_suffix(self):
"""ray-ml with GPU_PLATFORM gets -gpu alias AND empty platform suffix."""
ctx = make_ctx(
python_version="3.11", platform=GPU_PLATFORM, image_type="ray-ml"
)
tags = ctx.destination_tags()
# Should have: -cu121, -gpu, and empty suffix (no platform)
assert "abc123-py311-cu121" in tags
assert "abc123-py311-gpu" in tags
assert "abc123-py311" in tags
assert "build-123-py311-cu121" in tags
assert "build-123-py311-gpu" in tags
assert "build-123-py311" in tags
def test_ray_ml_non_gpu_platform_no_empty_suffix(self):
"""ray-ml with non-GPU_PLATFORM does NOT get empty suffix."""
ctx = make_ctx(
python_version="3.11", platform="cu12.3.2-cudnn9", image_type="ray-ml"
)
tags = ctx.destination_tags()
assert tags == [
"abc123-py311-cu123",
"build-123-py311-cu123",
]
def test_pr_branch_tags(self):
ctx = make_ctx(
python_version="3.12",
platform="cu12.3.2-cudnn9",
image_type="ray",
branch="feature-branch",
commit="abc123def456",
rayci_build_id="build-789",
pull_request="123",
)
tags = ctx.destination_tags()
assert tags == [
"pr-123.abc123-py312-cu123",
"build-789-py312-cu123",
]
def test_non_pr_feature_branch_tags(self):
ctx = make_ctx(
python_version="3.11",
platform="cu12.3.2-cudnn9",
image_type="ray",
branch="feature-branch",
commit="abc123def456",
rayci_build_id="build-789",
pull_request="false",
)
tags = ctx.destination_tags()
assert tags == [
"abc123-py311-cu123",
"build-789-py311-cu123",
]
def test_default_python_version_includes_empty_suffix(self):
"""DEFAULT_PYTHON_TAG_VERSION (3.10) gets empty python suffix."""
assert DEFAULT_PYTHON_TAG_VERSION == "3.10"
ctx = make_ctx(
python_version="3.10", platform="cu12.3.2-cudnn9", image_type="ray"
)
tags = ctx.destination_tags()
# Should have both -py310 and empty python suffix
assert "abc123-py310-cu123" in tags
assert "abc123-cu123" in tags
assert "build-123-py310-cu123" in tags
assert "build-123-cu123" in tags
def test_cpu_ray_includes_empty_platform_suffix(self):
"""ray with cpu gets empty platform suffix."""
ctx = make_ctx(python_version="3.11", platform="cpu", image_type="ray")
tags = ctx.destination_tags()
# Should have both -cpu and empty platform suffix
assert "abc123-py311-cpu" in tags
assert "abc123-py311" in tags
assert "build-123-py311-cpu" in tags
assert "build-123-py311" in tags
def test_cpu_ray_ml_no_empty_platform_suffix(self):
"""ray-ml with cpu does NOT get empty platform suffix."""
ctx = make_ctx(python_version="3.11", platform="cpu", image_type="ray-ml")
tags = ctx.destination_tags()
assert tags == [
"abc123-py311-cpu",
"build-123-py311-cpu",
]
def test_extra_image_type_includes_variation_suffix(self):
"""-extra image types get -extra variation suffix."""
ray_extra_ctx = make_ctx(
python_version="3.11", platform="cu12.3.2-cudnn9", image_type="ray-extra"
)
ray_ml_extra_ctx = make_ctx(
python_version="3.11", platform="cu12.3.2-cudnn9", image_type="ray-ml-extra"
)
ray_extra_tags = ray_extra_ctx.destination_tags()
ray_ml_extra_tags = ray_ml_extra_ctx.destination_tags()
assert "abc123-extra-py311-cu123" in ray_extra_tags
assert "abc123-extra-py311-cu123" in ray_ml_extra_tags
def test_ray_extra_cpu_includes_empty_platform_suffix(self):
"""ray-extra with cpu gets empty platform suffix (like ray)."""
ctx = make_ctx(python_version="3.11", platform="cpu", image_type="ray-extra")
tags = ctx.destination_tags()
# Should have both -cpu and empty platform suffix
assert "abc123-extra-py311-cpu" in tags
assert "abc123-extra-py311" in tags
if __name__ == "__main__":
sys.exit(pytest.main(["-vv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "ci/ray_ci/automation/test_push_release_test_image.py",
"license": "Apache License 2.0",
"lines": 228,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:ci/ray_ci/automation/test_push_release_test_image_diff.py | """
Diff test to ensure ReleaseTestImagePushContext produces identical tags
to the original AnyscaleDockerContainer/DockerContainer implementation.
"""
import os
import sys
from unittest import mock
import pytest
from ci.ray_ci.anyscale_docker_container import AnyscaleDockerContainer
from ci.ray_ci.automation.push_release_test_image import ReleaseTestImagePushContext
from ci.ray_ci.docker_container import GPU_PLATFORM, RayType
# Test matrix covering various scenarios
TEST_CASES = [
# (python_version, platform, image_type, branch, commit, build_id, pr)
# Master branch scenarios
("3.11", "cpu", "ray", "master", "abc123def456", "build-123", "false"),
("3.11", "cpu", "ray-ml", "master", "abc123def456", "build-123", "false"),
("3.11", GPU_PLATFORM, "ray", "master", "abc123def456", "build-123", "false"),
("3.11", GPU_PLATFORM, "ray-ml", "master", "abc123def456", "build-123", "false"),
("3.10", "cpu", "ray", "master", "abc123def456", "build-123", "false"),
("3.10", GPU_PLATFORM, "ray-ml", "master", "abc123def456", "build-123", "false"),
# Release branch scenarios
("3.11", "cpu", "ray", "releases/2.44.0", "abc123def456", "build-456", "false"),
(
"3.11",
GPU_PLATFORM,
"ray-ml",
"releases/2.44.0",
"abc123def456",
"build-456",
"false",
),
("3.10", "cpu", "ray", "releases/2.44.0", "abc123def456", "build-456", "false"),
# PR scenarios
("3.11", "cpu", "ray", "feature-branch", "abc123def456", "build-789", "123"),
(
"3.11",
GPU_PLATFORM,
"ray-ml",
"feature-branch",
"abc123def456",
"build-789",
"456",
),
# Feature branch (no PR)
("3.11", "cpu", "ray", "feature-branch", "abc123def456", "build-789", "false"),
# Other CUDA versions (not GPU_PLATFORM) - only valid for ray, not ray-ml
("3.11", "cu12.3.2-cudnn9", "ray", "master", "abc123def456", "build-123", "false"),
# Extra image types
("3.11", "cpu", "ray-extra", "master", "abc123def456", "build-123", "false"),
(
"3.11",
GPU_PLATFORM,
"ray-ml-extra",
"master",
"abc123def456",
"build-123",
"false",
),
]
def image_type_to_ray_type(image_type: str) -> RayType:
"""Convert string image type to RayType enum."""
return RayType(image_type)
class TestAnyscaleImageTagsDiff:
"""Test that new implementation matches original exactly."""
@pytest.mark.parametrize(
(
"python_version",
"platform",
"image_type",
"branch",
"commit",
"build_id",
"pr",
),
TEST_CASES,
)
def test_tags_match_original(
self,
python_version: str,
platform: str,
image_type: str,
branch: str,
commit: str,
build_id: str,
pr: str,
):
"""Compare tags from new ReleaseTestImagePushContext with original DockerContainer."""
env = {
"RAYCI_CHECKOUT_DIR": "/ray",
"RAYCI_BUILD_ID": build_id,
"RAYCI_WORK_REPO": "rayproject/citemp",
"BUILDKITE_COMMIT": commit,
"BUILDKITE_BRANCH": branch,
"BUILDKITE_PIPELINE_ID": "123456",
"BUILDKITE_PULL_REQUEST": pr,
}
with mock.patch.dict(os.environ, env, clear=False):
# Create original container (AnyscaleDockerContainer inherits from DockerContainer)
original = AnyscaleDockerContainer(
python_version=python_version,
platform=platform,
image_type=image_type_to_ray_type(image_type),
upload=False,
)
original_tags = original._get_image_tags()
# Create new context
new_ctx = ReleaseTestImagePushContext(
image_type=image_type,
python_version=python_version,
platform=platform,
branch=branch,
commit=commit,
rayci_build_id=build_id,
pull_request=pr,
)
new_tags = new_ctx.destination_tags()
# Compare - sort both to ignore order differences
assert sorted(original_tags) == sorted(new_tags), (
f"Tags mismatch for {image_type} py{python_version} {platform} on {branch}\n"
f"Original: {sorted(original_tags)}\n"
f"New: {sorted(new_tags)}"
)
@pytest.mark.parametrize(
(
"python_version",
"platform",
"image_type",
"branch",
"commit",
"build_id",
"pr",
),
TEST_CASES,
)
def test_wanda_image_name_format(
self,
python_version: str,
platform: str,
image_type: str,
branch: str,
commit: str,
build_id: str,
pr: str,
):
"""Verify wanda image name follows expected format."""
new_ctx = ReleaseTestImagePushContext(
image_type=image_type,
python_version=python_version,
platform=platform,
branch=branch,
commit=commit,
rayci_build_id=build_id,
pull_request=pr,
)
wanda_name = new_ctx.wanda_image_name()
# Wanda image name should be: {image_type}-anyscale-py{version}-{platform}
assert wanda_name.startswith(f"{image_type}-anyscale-py{python_version}-")
if platform == "cpu":
assert wanda_name.endswith("-cpu")
else:
assert wanda_name.endswith(f"-{platform}")
@pytest.mark.parametrize(
(
"python_version",
"platform",
"image_type",
"branch",
"commit",
"build_id",
"pr",
),
TEST_CASES,
)
def test_canonical_tag_matches(
self,
python_version: str,
platform: str,
image_type: str,
branch: str,
commit: str,
build_id: str,
pr: str,
):
"""Verify the canonical (first) tag matches between implementations."""
env = {
"RAYCI_CHECKOUT_DIR": "/ray",
"RAYCI_BUILD_ID": build_id,
"RAYCI_WORK_REPO": "rayproject/citemp",
"BUILDKITE_COMMIT": commit,
"BUILDKITE_BRANCH": branch,
"BUILDKITE_PIPELINE_ID": "123456",
"BUILDKITE_PULL_REQUEST": pr,
}
with mock.patch.dict(os.environ, env, clear=False):
original = AnyscaleDockerContainer(
python_version=python_version,
platform=platform,
image_type=image_type_to_ray_type(image_type),
upload=False,
)
original_canonical = original._get_canonical_tag()
new_ctx = ReleaseTestImagePushContext(
image_type=image_type,
python_version=python_version,
platform=platform,
branch=branch,
commit=commit,
rayci_build_id=build_id,
pull_request=pr,
)
new_canonical = new_ctx.destination_tags()[0]
assert original_canonical == new_canonical, (
f"Canonical tag mismatch for {image_type} py{python_version} {platform}\n"
f"Original: {original_canonical}\n"
f"New: {new_canonical}"
)
if __name__ == "__main__":
sys.exit(pytest.main(["-vv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "ci/ray_ci/automation/test_push_release_test_image_diff.py",
"license": "Apache License 2.0",
"lines": 219,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_monotonically_increasing_id.py | import pandas as pd
import pyarrow as pa
import pytest
import ray
from ray.data.expressions import monotonically_increasing_id
from ray.data.tests.conftest import * # noqa
from ray.tests.conftest import * # noqa
@pytest.mark.parametrize(
"block_type",
["arrow", "pandas"],
)
def test_monotonically_increasing_id(ray_start_regular_shared, block_type):
"""Test monotonically_increasing_id() expression produces monotonically increasing IDs."""
if block_type == "arrow":
blocks = [pa.table({"a": [1, 2]}), pa.table({"a": [3, 4]})]
else:
blocks = [pd.DataFrame({"a": [1, 2]}), pd.DataFrame({"a": [3, 4]})]
# Create dataset with 2 blocks of 2 rows each
ds = ray.data.from_blocks(blocks)
ds = ds.with_column("uid", monotonically_increasing_id())
expected = {0, 1, (1 << 33) + 0, (1 << 33) + 1}
all_ids = []
for batch in ds.iter_batches(batch_size=None, batch_format="pyarrow"):
block_ids = batch["uid"].to_pylist()
all_ids.extend(block_ids)
assert block_ids == sorted(block_ids), "block IDs are not monotonic"
assert set(all_ids) == expected
def test_monotonically_increasing_id_multiple_expressions(ray_start_regular_shared):
"""
Test that two monotonically_increasing_id() expressions are isolated
if executed by the same task.
"""
ds = ray.data.range(10, override_num_blocks=5)
# Two monotonically_increasing_id() expressions should have isolated row counts
ds = ds.with_column("uid1", monotonically_increasing_id())
ds = ds.with_column("uid2", monotonically_increasing_id())
result = ds.to_pandas()
assert list(result["uid1"]) == list(result["uid2"])
def test_monotonically_increasing_id_multi_block_per_task(ray_start_regular_shared):
"""Test that IDs are unique when a single task processes multiple blocks."""
ctx = ray.data.DataContext.get_current()
original_max_block_size = ctx.target_max_block_size
try:
# Set max block size to 32 bytes ~ 4 int64 rows per block.
# With 5 read tasks of 20 rows each every task should see 5 blocks.
ctx.target_max_block_size = 32
ds = ray.data.range(100, override_num_blocks=5)
ds = ds.with_column("uid", monotonically_increasing_id())
result = ds.take_all()
uids = [row["uid"] for row in result]
assert len(uids) == 100, f"expected 100 rows, got {len(uids)}"
assert len(uids) == len(set(uids)), "IDs are not unique across blocks"
finally:
ctx.target_max_block_size = original_max_block_size
def test_monotonically_increasing_id_structurally_equals_always_false():
"""Test that structurally_equals() is False for monotonically_increasing_id() expressions."""
expr1 = monotonically_increasing_id()
expr2 = monotonically_increasing_id()
# Should always be false (even to itself) due to non-determinism
assert not expr1.structurally_equals(expr2)
assert not expr1.structurally_equals(expr1)
def test_monotonically_increasing_id_shuffle_and_sort(ray_start_regular_shared):
"""Test monotonically_increasing_id() in shuffle and sort."""
ds = ray.data.range(20, override_num_blocks=5)
ds = ds.with_column("uid", monotonically_increasing_id())
ds = ds.random_shuffle()
ds = ds.sort("uid")
result = ds.take_all()
uids = [row["uid"] for row in result]
assert len(uids) == len(set(uids)), "ids are not unique"
assert uids == sorted(uids), "ids are not sorted"
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_monotonically_increasing_id.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/datasource/turbopuffer_datasink.py | """
TurbopufferDatasink - Ray Data datasink for Turbopuffer vector database
Implementation following the pattern of MongoDatasink and Daft's Turbopuffer sink.
This is based on [Turbopuffer Write API](https://turbopuffer.com/docs/write)
"""
import logging
import os
from typing import TYPE_CHECKING, Iterable, Literal, Optional, Union
import pyarrow as pa
import pyarrow.compute as pc
from ray._common.retry import call_with_retry
from ray.data._internal.arrow_block import ArrowBlockAccessor
from ray.data._internal.arrow_ops import transform_pyarrow
from ray.data._internal.execution.interfaces import TaskContext
from ray.data._internal.planner.exchange.sort_task_spec import SortKey
from ray.data._internal.util import _check_import
from ray.data.block import Block, BlockAccessor
from ray.data.datasource.datasink import Datasink
if TYPE_CHECKING:
import turbopuffer
logger = logging.getLogger(__name__)
# Reserved column names for Turbopuffer
_ID_COLUMN = "id"
_VECTOR_COLUMN = "vector"
TURBOPUFFER_API_KEY_ENV_VAR = "TURBOPUFFER_API_KEY"
class TurbopufferDatasink(Datasink):
"""Turbopuffer Ray Datasink.
A Ray :class:`~ray.data.datasource.Datasink` for writing data into the
`Turbopuffer <https://turbopuffer.com/>`_ vector database.
Supports two modes of operation:
* **Single namespace** -- provide ``namespace`` to write all rows into one
Turbopuffer namespace.
* **Multi-namespace** -- provide ``namespace_column`` to route each row to
the namespace whose name is stored in that column. The column is
automatically dropped before the data is sent to Turbopuffer.
Exactly one of ``namespace`` or ``namespace_column`` must be supplied.
Args:
namespace: Name of the Turbopuffer namespace to write into.
Mutually exclusive with ``namespace_column``.
namespace_column: Name of a column whose values determine the
target namespace for each row. Rows are grouped by this column
and each group is written to its corresponding namespace. The
column is removed from the data before writing. Mutually
exclusive with ``namespace``.
region: Turbopuffer region identifier (for example,
``"gcp-us-central1"``). Mutually exclusive with ``base_url``.
Exactly one of ``region`` or ``base_url`` must be supplied.
base_url: Base URL for the Turbopuffer API (for example,
``"https://gcp-us-central1.turbopuffer.com"``). Mutually
exclusive with ``region``. Exactly one of ``region`` or
``base_url`` must be supplied.
api_key: Turbopuffer API key. If omitted, the value is read from the
``TURBOPUFFER_API_KEY`` environment variable.
schema: Optional Turbopuffer schema definition to pass along with
writes. If provided, it is forwarded as the ``schema`` argument
to ``namespace.write``.
id_column: Name of the column to treat as the document identifier.
Rows with null IDs are dropped before writing. Defaults to ``"id"``.
vector_column: Name of the column containing embedding vectors.
If this differs from ``"vector"``, it is renamed to ``"vector"``
before writing. Defaults to ``"vector"``.
batch_size: Maximum number of rows to include in a single Turbopuffer
write call (logical row batching; subject to Turbopuffer's
256MiB request-size limit). Defaults to ``10000``.
distance_metric: Distance metric for the namespace. Passed to
``namespace.write`` as the ``distance_metric`` argument.
Defaults to ``"cosine_distance"``.
concurrency: Unused; Ray Data controls write parallelism via
:meth:`~ray.data.Dataset.write_datasink` ``concurrency``.
Examples:
Write to a single namespace using a region:
.. testcode::
:skipif: True
import ray
from ray.data._internal.datasource.turbopuffer_datasink import (
TurbopufferDatasink,
)
ds = ray.data.range(100)
ds = ds.map_batches(lambda batch: {"id": batch["id"], "vector": ...})
ds.write_datasink(
TurbopufferDatasink(
namespace="my-namespace",
api_key="<YOUR_API_KEY>",
region="gcp-us-central1",
)
)
Write using a base URL instead of a region:
.. testcode::
:skipif: True
ds.write_datasink(
TurbopufferDatasink(
namespace="my-namespace",
api_key="<YOUR_API_KEY>",
base_url="https://gcp-us-central1.turbopuffer.com",
)
)
Write to multiple namespaces driven by a column:
.. testcode::
:skipif: True
ds.write_datasink(
TurbopufferDatasink(
namespace_column="tenant",
api_key="<YOUR_API_KEY>",
region="gcp-us-central1",
)
)
"""
def __init__(
self,
namespace: Optional[str] = None,
*,
namespace_column: Optional[str] = None,
region: Optional[str] = None,
base_url: Optional[str] = None,
api_key: Optional[str] = None,
schema: Optional[dict] = None,
id_column: str = "id",
vector_column: str = "vector",
batch_size: int = 10000,
distance_metric: Literal[
"cosine_distance", "euclidean_distance"
] = "cosine_distance",
concurrency: Optional[int] = None,
):
_check_import(self, module="turbopuffer", package="turbopuffer")
# Validate namespace / namespace_column mutual exclusivity.
if namespace and namespace_column:
raise ValueError(
"Specify exactly one of 'namespace' or 'namespace_column', " "not both."
)
if not namespace and not namespace_column:
raise ValueError(
"Either 'namespace' or 'namespace_column' must be provided."
)
# Validate region / base_url mutual exclusivity.
if region is not None and base_url is not None:
raise ValueError("Specify exactly one of 'region' or 'base_url', not both.")
if region is None and base_url is None:
raise ValueError("Either 'region' or 'base_url' must be provided.")
# Store configuration
self.namespace = namespace
self.namespace_column = namespace_column
self.api_key = api_key or os.getenv(TURBOPUFFER_API_KEY_ENV_VAR)
self.region = region
self.base_url = base_url
self.schema = schema
self.id_column = id_column
self.vector_column = vector_column
self.batch_size = batch_size
self.distance_metric = distance_metric
# Validate column configuration
if self.id_column == self.vector_column:
raise ValueError(
"id_column and vector_column refer to the same column "
f"'{self.id_column}'. They must be distinct."
)
if self.namespace_column and self.namespace_column in (
self.id_column,
self.vector_column,
):
raise ValueError(
f"namespace_column '{self.namespace_column}' must not be the "
f"same as id_column ('{self.id_column}') or vector_column "
f"('{self.vector_column}')."
)
# Validate API key
if not self.api_key:
raise ValueError(
"API key is required. Provide via api_key parameter or "
"TURBOPUFFER_API_KEY environment variable"
)
# Initialize client
self._client = None
def __getstate__(self) -> dict:
"""Exclude `_client` during pickling."""
state = self.__dict__.copy()
state.pop("_client", None)
return state
def __setstate__(self, state: dict) -> None:
self.__dict__.update(state)
self._client = None
def _get_client(self):
"""Lazy initialize Turbopuffer client."""
if self._client is None:
import turbopuffer
kwargs = {"api_key": self.api_key}
if self.region is not None:
kwargs["region"] = self.region
else:
kwargs["base_url"] = self.base_url
self._client = turbopuffer.Turbopuffer(**kwargs)
return self._client
def write(
self,
blocks: Iterable[Block],
ctx: TaskContext,
) -> None:
"""
Write blocks to Turbopuffer in a streaming fashion.
For memory efficiency, blocks are processed one at a time rather than
concatenating all blocks into a single large table. This follows the
pattern used by ClickHouseDatasink.
Each block is prepared (columns renamed, null IDs filtered), then
written in batches of ``batch_size``.
When ``namespace_column`` is set, each block is grouped by the
namespace column and each group is written to its corresponding
Turbopuffer namespace.
"""
client = self._get_client()
for block in blocks:
accessor = BlockAccessor.for_block(block)
table = accessor.to_arrow()
if table.num_rows == 0:
continue
if self.namespace_column:
# Multi-namespace: group by namespace column, write to each.
self._write_multi_namespace(client, table)
else:
# Single namespace.
table = self._prepare_arrow_table(table)
if table.num_rows == 0:
continue
ns = client.namespace(self.namespace)
for batch in table.to_batches(max_chunksize=self.batch_size):
self._write_batch_with_retry(ns, batch, self.namespace)
def _rename_column_if_needed(
self,
table: pa.Table,
source_column: str,
target_column: str,
column_type: str,
) -> pa.Table:
"""
Rename a column in the table if it differs from the target name.
Args:
table: The Arrow table to modify.
source_column: The current column name in the table.
target_column: The required column name for Turbopuffer.
column_type: Human-readable type for error messages (e.g., "ID", "Vector").
Returns:
The table with the column renamed, or the original table if no rename needed.
Raises:
ValueError: If source column is missing or target column already exists.
"""
if source_column not in table.column_names:
raise ValueError(
f"{column_type} column '{source_column}' not found in table"
)
# No rename needed if source and target are the same
if source_column == target_column:
return table
if target_column in table.column_names:
raise ValueError(
f"Table already has a '{target_column}' column; cannot also rename "
f"'{source_column}' to '{target_column}'. Please disambiguate your schema."
)
return BlockAccessor.for_block(table).rename_columns(
{source_column: target_column}
)
def _prepare_arrow_table(self, table: pa.Table) -> pa.Table:
"""
Prepare Arrow table for Turbopuffer write.
1. Rename ID column to "id" if needed
2. Rename vector column to "vector" if needed
3. Filter out rows with null IDs
"""
table = self._rename_column_if_needed(table, self.id_column, _ID_COLUMN, "ID")
table = self._rename_column_if_needed(
table, self.vector_column, _VECTOR_COLUMN, "Vector"
)
# Filter out rows with null IDs
if _ID_COLUMN in table.column_names:
table = table.filter(pc.is_valid(table.column(_ID_COLUMN)))
return table
def _write_multi_namespace(
self, client: "turbopuffer.Turbopuffer", table: pa.Table
) -> None:
"""Group rows by ``namespace_column`` and write each group to its namespace.
Uses :meth:`BlockAccessor._iter_groups_sorted` for efficient
zero-copy slicing by group.
"""
group_col_name = self.namespace_column
if group_col_name not in table.column_names:
raise ValueError(
f"Namespace column '{group_col_name}' not found in table. "
f"Available columns: {table.column_names}"
)
# Reject null namespace values early -- we cannot route them.
ns_col = table.column(group_col_name)
if pc.any(pc.is_null(ns_col)).as_py():
raise ValueError(
f"Namespace column '{group_col_name}' contains null values; "
"fill or drop them before writing with namespace_column."
)
# Sort by the namespace column so _iter_groups_sorted can yield
# contiguous zero-copy slices for each unique namespace value.
sort_key = SortKey(key=group_col_name, descending=False)
sorted_table = transform_pyarrow.sort(table, sort_key)
block_accessor = ArrowBlockAccessor.for_block(sorted_table)
for (namespace_name,), group_table in block_accessor._iter_groups_sorted(
sort_key
):
# Drop the namespace column -- it is routing metadata, not data.
group_table = group_table.drop(group_col_name)
# Prepare (rename id/vector columns, filter null IDs).
group_table = self._prepare_arrow_table(group_table)
if group_table.num_rows == 0:
continue
ns = client.namespace(namespace_name)
for batch in group_table.to_batches(max_chunksize=self.batch_size):
self._write_batch_with_retry(ns, batch, namespace_name)
def _transform_to_turbopuffer_format(
self, table: Union[pa.Table, pa.RecordBatch]
) -> dict:
if _ID_COLUMN not in table.column_names:
raise ValueError(f"Table must have '{_ID_COLUMN}' column")
# Cast 16-byte binary ID column to native UUID type for Turbopuffer performance.
# Native UUIDs are 16 bytes vs 36 bytes for string-encoded UUIDs.
# See: https://turbopuffer.com/docs/performance
id_col = table.column(_ID_COLUMN)
if pa.types.is_fixed_size_binary(id_col.type) and id_col.type.byte_width == 16:
# Cast fixed_size_binary(16) to uuid type
uuid_col = id_col.cast(pa.uuid())
table = table.set_column(
table.schema.get_field_index(_ID_COLUMN), _ID_COLUMN, uuid_col
)
# to_pydict() on UuidArray automatically returns uuid.UUID objects
return table.to_pydict()
def _write_batch_with_retry(
self,
namespace: "turbopuffer.Namespace",
batch: pa.Table,
namespace_name: Optional[str] = None,
):
"""Write a single batch with exponential backoff retry.
Args:
namespace: The Turbopuffer namespace object to write to.
batch: Arrow table or record-batch to write.
namespace_name: Human-readable namespace name for log messages.
Falls back to ``self.namespace`` when not provided.
"""
ns_label = namespace_name or self.namespace
try:
batch_data = self._transform_to_turbopuffer_format(batch)
call_with_retry(
lambda: namespace.write(
upsert_columns=batch_data,
schema=self.schema,
distance_metric=self.distance_metric,
),
description=f"write batch to namespace '{ns_label}'",
max_attempts=5,
max_backoff_s=32,
)
except Exception as e:
logger.error(f"Write failed for namespace '{ns_label}' after retries: {e}")
raise
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/datasource/turbopuffer_datasink.py",
"license": "Apache License 2.0",
"lines": 357,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/tests/datasource/test_turbopuffer_datasink.py | """Tests for TurbopufferDatasink.
Organized by critical paths:
1. Constructor validation
2. Client initialization
3. Arrow table preparation
4. Single-namespace batching
5. Transform to Turbopuffer format
6. Retry logic
7. End-to-end write orchestration
8. Streaming behavior
9. Multi-namespace writes
10. Serialization
"""
import pickle
import sys
import time
import uuid
from typing import List
from unittest.mock import MagicMock, patch
import pyarrow as pa
import pytest
from packaging.version import parse as parse_version
from ray.data._internal.datasource.turbopuffer_datasink import TurbopufferDatasink
from ray.data._internal.utils.arrow_utils import get_pyarrow_version
# Skip all tests if PyArrow version is less than 19.0
pytestmark = pytest.mark.skipif(
get_pyarrow_version() < parse_version("19.0.0"),
reason="TurbopufferDatasink tests require PyArrow >= 19.0",
)
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture(autouse=True)
def mock_turbopuffer_module(monkeypatch):
"""Provide a fake turbopuffer module so imports in the datasink succeed."""
fake_module = MagicMock()
fake_module.Turbopuffer = MagicMock()
with patch.dict(sys.modules, {"turbopuffer": fake_module}):
yield fake_module
@pytest.fixture
def sink():
"""Default sink with minimal required arguments."""
return TurbopufferDatasink(
namespace="default_ns",
region="gcp-us-central1",
api_key="test-api-key",
)
@pytest.fixture
def mock_client():
"""Mock Turbopuffer client with namespace support."""
client = MagicMock()
client.namespace.return_value = MagicMock()
return client
@pytest.fixture
def sample_table():
"""Standard table with id and vector columns."""
return pa.table(
{
"id": [1, 2, 3],
"vector": [[0.1], [0.2], [0.3]],
}
)
def make_sink(**kwargs) -> TurbopufferDatasink:
"""Helper to construct a sink with minimal required arguments."""
params = {
"namespace": "default_ns",
"region": "gcp-us-central1",
"api_key": "test-api-key",
}
params.update(kwargs)
return TurbopufferDatasink(**params)
# =============================================================================
# 1. Constructor validation
# =============================================================================
class TestConstructorValidation:
"""Tests for constructor argument validation."""
def test_requires_namespace_or_namespace_column(self):
"""Must provide exactly one of namespace / namespace_column."""
with pytest.raises(ValueError, match="Either.*must be provided"):
TurbopufferDatasink(
region="gcp-us-central1",
api_key="k",
)
def test_rejects_both_namespace_and_namespace_column(self):
"""Cannot provide both namespace and namespace_column."""
with pytest.raises(ValueError, match="exactly one"):
TurbopufferDatasink(
namespace="ns",
namespace_column="ns_col",
region="gcp-us-central1",
api_key="k",
)
def test_namespace_column_cannot_be_id_or_vector(self):
"""namespace_column must not collide with id_column or vector_column."""
with pytest.raises(ValueError, match="namespace_column.*must not be the same"):
make_sink(namespace=None, namespace_column="id")
with pytest.raises(ValueError, match="namespace_column.*must not be the same"):
make_sink(namespace=None, namespace_column="vector")
def test_api_key_from_env(self, monkeypatch):
"""API key can come from environment variable."""
monkeypatch.delenv("TURBOPUFFER_API_KEY", raising=False)
# No api_key and no env var -> error
with pytest.raises(ValueError):
TurbopufferDatasink(namespace="ns", region="gcp-us-central1")
# With env var, init should succeed
monkeypatch.setenv("TURBOPUFFER_API_KEY", "env-api-key")
sink = TurbopufferDatasink(namespace="ns", region="gcp-us-central1")
assert sink.api_key == "env-api-key"
def test_rejects_same_id_and_vector_column(self):
"""id_column and vector_column must be distinct."""
with pytest.raises(ValueError, match="id_column and vector_column"):
make_sink(id_column="doc_id", vector_column="doc_id")
def test_accepts_region_only(self):
"""Constructor succeeds with region and no base_url."""
sink = make_sink(region="gcp-us-central1")
assert sink.region == "gcp-us-central1"
assert sink.base_url is None
def test_accepts_base_url_only(self):
"""Constructor succeeds with base_url and no region."""
sink = make_sink(
region=None,
base_url="https://gcp-us-central1.turbopuffer.com",
)
assert sink.base_url == "https://gcp-us-central1.turbopuffer.com"
assert sink.region is None
def test_rejects_both_region_and_base_url(self):
"""Cannot provide both region and base_url."""
with pytest.raises(ValueError, match="exactly one of 'region' or 'base_url'"):
make_sink(
region="gcp-us-central1",
base_url="https://gcp-us-central1.turbopuffer.com",
)
def test_rejects_neither_region_nor_base_url(self):
"""Must provide at least one of region or base_url."""
with pytest.raises(ValueError, match="Either 'region' or 'base_url'"):
TurbopufferDatasink(
namespace="ns",
api_key="k",
)
# =============================================================================
# 2. Client initialization
# =============================================================================
class TestClientInitialization:
"""Tests for Turbopuffer client lazy initialization."""
def test_lazy_initialization(self, sink, mock_turbopuffer_module):
"""Client is created lazily and cached."""
client1 = sink._get_client()
client2 = sink._get_client()
assert client1 is client2
mock_turbopuffer_module.Turbopuffer.assert_called_once_with(
api_key="test-api-key",
region="gcp-us-central1",
)
def test_uses_explicit_region(self, mock_turbopuffer_module):
"""Client uses the configured region."""
sink = make_sink(region="custom-region")
sink._get_client()
mock_turbopuffer_module.Turbopuffer.assert_called_once_with(
api_key="test-api-key",
region="custom-region",
)
def test_uses_base_url(self, mock_turbopuffer_module):
"""Client uses base_url when region is not provided."""
sink = make_sink(
region=None,
base_url="https://gcp-us-central1.turbopuffer.com",
)
sink._get_client()
mock_turbopuffer_module.Turbopuffer.assert_called_once_with(
api_key="test-api-key",
base_url="https://gcp-us-central1.turbopuffer.com",
)
def test_base_url_does_not_pass_region(self, mock_turbopuffer_module):
"""When base_url is used, region is not passed to the client."""
sink = make_sink(
region=None,
base_url="https://custom.turbopuffer.com",
)
sink._get_client()
call_kwargs = mock_turbopuffer_module.Turbopuffer.call_args[1]
assert "region" not in call_kwargs
assert call_kwargs["base_url"] == "https://custom.turbopuffer.com"
def test_region_does_not_pass_base_url(self, mock_turbopuffer_module):
"""When region is used, base_url is not passed to the client."""
sink = make_sink(region="gcp-us-central1")
sink._get_client()
call_kwargs = mock_turbopuffer_module.Turbopuffer.call_args[1]
assert "base_url" not in call_kwargs
assert call_kwargs["region"] == "gcp-us-central1"
# =============================================================================
# 3. Arrow table preparation
# =============================================================================
class TestArrowTablePreparation:
"""Tests for _prepare_arrow_table."""
def test_renames_columns_and_filters_null_ids(self):
"""Custom columns are renamed and null IDs filtered."""
table = pa.table(
{
"doc_id": [1, 2, None],
"emb": [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]],
}
)
sink = make_sink(id_column="doc_id", vector_column="emb")
prepared = sink._prepare_arrow_table(table)
# Null ID row filtered, columns renamed to id/vector
expected = pa.table(
{
"id": [1, 2],
"vector": [[0.1, 0.2], [0.3, 0.4]],
}
)
assert prepared.equals(expected)
def test_missing_id_column_raises(self):
"""Missing custom ID column raises ValueError."""
table = pa.table({"other": [1, 2, 3]})
sink = make_sink(id_column="doc_id")
with pytest.raises(ValueError):
sink._prepare_arrow_table(table)
def test_missing_vector_column_raises(self):
"""Missing vector column raises ValueError."""
table = pa.table({"id": [1, 2, 3]})
sink = make_sink(vector_column="embedding")
with pytest.raises(ValueError, match="Vector column 'embedding' not found"):
sink._prepare_arrow_table(table)
@pytest.mark.parametrize(
"existing_col,custom_col,expected_match",
[
("id", "doc_id", "already has.*'id' column"),
("vector", "emb", "already has.*'vector' column"),
],
ids=["id_conflict", "vector_conflict"],
)
def test_conflicting_column_names_raise(
self, existing_col, custom_col, expected_match
):
"""Raise if table already has target column name."""
if existing_col == "id":
table = pa.table(
{"id": [1, 2], "doc_id": [10, 20], "vector": [[0.1], [0.2]]}
)
sink = make_sink(id_column="doc_id")
else:
table = pa.table(
{"id": [1, 2], "vector": [[0.1], [0.2]], "emb": [[0.3], [0.4]]}
)
sink = make_sink(vector_column="emb")
with pytest.raises(ValueError, match=expected_match):
sink._prepare_arrow_table(table)
# =============================================================================
# 4. Single-namespace batching
# =============================================================================
class TestSingleNamespaceBatching:
"""Tests for write batching behavior."""
def test_batches_by_batch_size(self, mock_client):
"""Large tables are split into batches."""
num_rows = 25
table = pa.table(
{
"id": list(range(num_rows)),
"vector": [[float(i)] for i in range(num_rows)],
}
)
sink = make_sink(batch_size=10)
batch_sizes: List[int] = []
def track_batch(ns, batch, namespace_name=None):
# batch is a RecordBatch, get its row count
batch_sizes.append(batch.num_rows)
with patch.object(sink, "_get_client", return_value=mock_client):
with patch.object(sink, "_write_batch_with_retry", side_effect=track_batch):
sink.write([table], ctx=None)
assert batch_sizes == [10, 10, 5]
def test_skips_empty_blocks(self, sink):
"""Empty blocks don't trigger namespace writes."""
empty_table = pa.table({"id": [], "vector": []})
with patch.object(sink, "_get_client") as mock_get_client:
with patch.object(sink, "_write_batch_with_retry") as mock_write:
mock_get_client.return_value = MagicMock()
sink.write([empty_table], ctx=None)
mock_write.assert_not_called()
# =============================================================================
# 5. Transform to Turbopuffer format
# =============================================================================
class TestTransformToTurbopufferFormat:
"""Tests for _transform_to_turbopuffer_format."""
def test_requires_id_column(self, sink):
"""Table must have 'id' column."""
table = pa.table({"col": [1, 2, 3]})
with pytest.raises(ValueError):
sink._transform_to_turbopuffer_format(table)
def test_converts_uuid_bytes_to_native_uuid(self, sink):
"""16-byte binary IDs become native uuid.UUID objects.
Per Turbopuffer performance docs, native UUIDs (16 bytes) are more
efficient than string UUIDs (36 bytes).
"""
u = uuid.uuid4()
# ID column must be binary(16) for UUID conversion
table = pa.table(
{
"id": pa.array([u.bytes], type=pa.binary(16)),
"vector": [[0.1, 0.2]],
}
)
columns = sink._transform_to_turbopuffer_format(table)
expected = {
"id": [u], # Native uuid.UUID, not bytes
"vector": [[0.1, 0.2]],
}
assert columns == expected
assert isinstance(columns["id"][0], uuid.UUID)
# =============================================================================
# 6. Retry logic
# =============================================================================
class TestRetryLogic:
"""Tests for _write_batch_with_retry."""
@pytest.fixture
def sample_batch(self):
"""A simple batch for retry tests."""
return pa.table({"id": [1], "vector": [[0.1]]})
def test_success_first_try(self, sink, sample_batch):
"""Successful write on first attempt."""
namespace = MagicMock()
sink._write_batch_with_retry(namespace, sample_batch)
namespace.write.assert_called_once_with(
upsert_columns={"id": [1], "vector": [[0.1]]},
schema=None,
distance_metric="cosine_distance",
)
def test_retries_then_succeeds(self, sink, sample_batch, monkeypatch):
"""Transient failures are retried."""
monkeypatch.setattr(time, "sleep", lambda _: None)
namespace = MagicMock()
attempts = {"count": 0}
def flaky_write(*args, **kwargs):
attempts["count"] += 1
if attempts["count"] < 3:
raise RuntimeError("temporary error")
namespace.write.side_effect = flaky_write
sink._write_batch_with_retry(namespace, sample_batch)
assert attempts["count"] == 3
def test_exhausts_retries_and_raises(self, sink, sample_batch, monkeypatch):
"""Persistent failures exhaust retries and raise."""
monkeypatch.setattr(time, "sleep", lambda _: None)
namespace = MagicMock()
namespace.write.side_effect = RuntimeError("persistent error")
with pytest.raises(RuntimeError, match="persistent error"):
sink._write_batch_with_retry(namespace, sample_batch)
assert namespace.write.call_count == 5 # max_attempts=5
@pytest.mark.parametrize(
"schema,distance_metric",
[
({"field": "value"}, "cosine_distance"),
(None, "euclidean_squared"),
({"type": "string"}, "euclidean_squared"),
],
ids=["with_schema", "alt_metric", "both"],
)
def test_configurable_options(self, schema, distance_metric):
"""Schema and distance_metric are passed to write."""
sink = make_sink(schema=schema, distance_metric=distance_metric)
namespace = MagicMock()
batch = pa.table({"id": [1], "vector": [[0.1]]})
sink._write_batch_with_retry(namespace, batch)
namespace.write.assert_called_once_with(
upsert_columns={"id": [1], "vector": [[0.1]]},
schema=schema,
distance_metric=distance_metric,
)
# =============================================================================
# 7. End-to-end write orchestration
# =============================================================================
class TestWriteOrchestration:
"""Tests for top-level write() method."""
def test_write_multiple_blocks(self, sink):
"""Multiple blocks are processed and written."""
blocks = [
pa.table({"id": [1, 2], "vector": [[1.0], [2.0]]}),
pa.table({"id": [3], "vector": [[3.0]]}),
]
write_calls = []
def track_write(ns, batch, namespace_name=None):
write_calls.append(batch.num_rows)
with patch.object(sink, "_get_client") as mock_get_client:
mock_client = MagicMock()
mock_get_client.return_value = mock_client
with patch.object(sink, "_write_batch_with_retry", side_effect=track_write):
sink.write(blocks, ctx=None)
# Two blocks written
assert len(write_calls) == 2
assert write_calls == [2, 1]
# Namespace accessed with correct name
mock_client.namespace.assert_called_with("default_ns")
# =============================================================================
# 8. Streaming behavior (memory efficiency)
# =============================================================================
class TestStreamingBehavior:
"""Tests for memory-efficient streaming writes."""
def test_processes_blocks_independently(self, sink):
"""Each block is processed and written separately."""
blocks = [pa.table({"id": [i], "vector": [[float(i)]]}) for i in range(5)]
write_counts = []
def track_write(ns, batch, namespace_name=None):
write_counts.append(batch.num_rows)
with patch.object(sink, "_get_client", return_value=MagicMock()):
with patch.object(sink, "_write_batch_with_retry", side_effect=track_write):
sink.write(blocks, ctx=None)
# 5 blocks → 5 writes of 1 row each
assert len(write_counts) == 5
assert all(c == 1 for c in write_counts)
# =============================================================================
# 9. Multi-namespace writes
# =============================================================================
class TestMultiNamespaceWrites:
"""Tests for namespace_column-driven multi-namespace writes."""
def test_routes_rows_to_correct_namespaces(self):
"""Rows are grouped by namespace_column and written to the right ns."""
sink = make_sink(namespace=None, namespace_column="tenant")
table = pa.table(
{
"tenant": ["ns_a", "ns_b", "ns_a", "ns_b"],
"id": [1, 2, 3, 4],
"vector": [[0.1], [0.2], [0.3], [0.4]],
}
)
writes = {} # namespace_name -> list of row counts
def track_write(ns, batch, namespace_name=None):
writes.setdefault(namespace_name, []).append(batch.num_rows)
mock_client = MagicMock()
mock_client.namespace.return_value = MagicMock()
with patch.object(sink, "_get_client", return_value=mock_client):
with patch.object(sink, "_write_batch_with_retry", side_effect=track_write):
sink.write([table], ctx=None)
assert "ns_a" in writes
assert "ns_b" in writes
assert sum(writes["ns_a"]) == 2
assert sum(writes["ns_b"]) == 2
def test_drops_namespace_column_before_writing(self):
"""The namespace column is not included in the written data."""
sink = make_sink(namespace=None, namespace_column="tenant")
table = pa.table(
{
"tenant": ["ns_a"],
"id": [1],
"vector": [[0.1]],
}
)
written_batches = []
def capture_batch(ns, batch, namespace_name=None):
written_batches.append(batch)
mock_client = MagicMock()
mock_client.namespace.return_value = MagicMock()
with patch.object(sink, "_get_client", return_value=mock_client):
with patch.object(
sink, "_write_batch_with_retry", side_effect=capture_batch
):
sink.write([table], ctx=None)
assert len(written_batches) == 1
assert "tenant" not in written_batches[0].column_names
assert "id" in written_batches[0].column_names
def test_missing_namespace_column_raises(self):
"""Missing namespace column in data raises ValueError."""
sink = make_sink(namespace=None, namespace_column="tenant")
table = pa.table(
{
"id": [1],
"vector": [[0.1]],
}
)
mock_client = MagicMock()
with patch.object(sink, "_get_client", return_value=mock_client):
with pytest.raises(ValueError, match="Namespace column.*not found"):
sink.write([table], ctx=None)
def test_null_namespace_values_raise(self):
"""Null values in namespace column raise ValueError."""
sink = make_sink(namespace=None, namespace_column="tenant")
table = pa.table(
{
"tenant": ["ns_a", None],
"id": [1, 2],
"vector": [[0.1], [0.2]],
}
)
mock_client = MagicMock()
with patch.object(sink, "_get_client", return_value=mock_client):
with pytest.raises(ValueError, match="contains null values"):
sink.write([table], ctx=None)
def test_skips_empty_blocks_in_multi_namespace(self):
"""Empty blocks are skipped in multi-namespace mode."""
sink = make_sink(namespace=None, namespace_column="tenant")
empty_table = pa.table(
{
"tenant": pa.array([], type=pa.string()),
"id": pa.array([], type=pa.int64()),
"vector": pa.array([], type=pa.list_(pa.float64())),
}
)
mock_client = MagicMock()
with patch.object(sink, "_get_client", return_value=mock_client):
with patch.object(sink, "_write_batch_with_retry") as mock_write:
sink.write([empty_table], ctx=None)
mock_write.assert_not_called()
# =============================================================================
# 10. Serialization behavior
# =============================================================================
class TestSerialization:
"""Tests for pickle serialization support."""
def test_preserves_configuration(self, sink, mock_turbopuffer_module):
"""Configuration is preserved after pickle round-trip."""
pickled = pickle.dumps(sink)
unpickled = pickle.loads(pickled)
assert unpickled.namespace == sink.namespace
assert unpickled.namespace_column == sink.namespace_column
assert unpickled.api_key == sink.api_key
assert unpickled.region == sink.region
assert unpickled.base_url == sink.base_url
assert unpickled.batch_size == sink.batch_size
assert unpickled._client is None
# Lazy initialization works after unpickling
client = unpickled._get_client()
assert client is not None
mock_turbopuffer_module.Turbopuffer.assert_called()
def test_preserves_namespace_column_configuration(self, mock_turbopuffer_module):
"""namespace_column configuration survives pickle round-trip."""
sink = make_sink(namespace=None, namespace_column="tenant")
pickled = pickle.dumps(sink)
unpickled = pickle.loads(pickled)
assert unpickled.namespace is None
assert unpickled.namespace_column == "tenant"
assert unpickled._client is None
def test_preserves_base_url_configuration(self, mock_turbopuffer_module):
"""base_url configuration survives pickle round-trip."""
sink = make_sink(
region=None,
base_url="https://gcp-us-central1.turbopuffer.com",
)
pickled = pickle.dumps(sink)
unpickled = pickle.loads(pickled)
assert unpickled.region is None
assert unpickled.base_url == "https://gcp-us-central1.turbopuffer.com"
assert unpickled._client is None
# Lazy initialization works and uses base_url
unpickled._get_client()
mock_turbopuffer_module.Turbopuffer.assert_called_once_with(
api_key="test-api-key",
base_url="https://gcp-us-central1.turbopuffer.com",
)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/datasource/test_turbopuffer_datasink.py",
"license": "Apache License 2.0",
"lines": 549,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/_private/haproxy.py | import asyncio
import csv
import io
import json
import logging
import os
import re
import time
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Set, Tuple
from jinja2 import Environment
import ray
from ray._common.utils import get_or_create_event_loop
from ray.serve._private.common import (
NodeId,
ReplicaID,
RequestMetadata,
)
from ray.serve._private.constants import (
DRAINING_MESSAGE,
HEALTHY_MESSAGE,
NO_REPLICAS_MESSAGE,
NO_ROUTES_MESSAGE,
PROXY_MIN_DRAINING_PERIOD_S,
RAY_SERVE_ENABLE_HAPROXY_OPTIMIZED_CONFIG,
RAY_SERVE_HAPROXY_CONFIG_FILE_LOC,
RAY_SERVE_HAPROXY_HARD_STOP_AFTER_S,
RAY_SERVE_HAPROXY_HEALTH_CHECK_DOWNINTER,
RAY_SERVE_HAPROXY_HEALTH_CHECK_FALL,
RAY_SERVE_HAPROXY_HEALTH_CHECK_FASTINTER,
RAY_SERVE_HAPROXY_HEALTH_CHECK_INTER,
RAY_SERVE_HAPROXY_HEALTH_CHECK_RISE,
RAY_SERVE_HAPROXY_MAXCONN,
RAY_SERVE_HAPROXY_METRICS_PORT,
RAY_SERVE_HAPROXY_NBTHREAD,
RAY_SERVE_HAPROXY_SERVER_STATE_BASE,
RAY_SERVE_HAPROXY_SERVER_STATE_FILE,
RAY_SERVE_HAPROXY_SOCKET_PATH,
RAY_SERVE_HAPROXY_SYSLOG_PORT,
RAY_SERVE_HAPROXY_TIMEOUT_CLIENT_S,
RAY_SERVE_HAPROXY_TIMEOUT_CONNECT_S,
RAY_SERVE_HAPROXY_TIMEOUT_SERVER_S,
SERVE_CONTROLLER_NAME,
SERVE_LOGGER_NAME,
SERVE_NAMESPACE,
)
from ray.serve._private.haproxy_templates import (
HAPROXY_CONFIG_TEMPLATE,
HAPROXY_HEALTHZ_RULES_TEMPLATE,
)
from ray.serve._private.logging_utils import get_component_logger_file_path
from ray.serve._private.long_poll import LongPollClient, LongPollNamespace
from ray.serve._private.proxy import ProxyActorInterface
from ray.serve.config import HTTPOptions, gRPCOptions
from ray.serve.schema import (
LoggingConfig,
Target,
TargetGroup,
)
logger = logging.getLogger(SERVE_LOGGER_NAME)
@dataclass
class ServerStats:
"""Server statistics from HAProxy."""
backend: str # Which backend this server belongs to
server: str # Server name within the backend
status: str # Current status: "UP", "DOWN", "DRAIN", etc.
current_sessions: int # Active sessions (HAProxy 'scur')
queued: int # Queued requests (HAProxy 'qcur')
@property
def is_up(self) -> bool:
return self.status == "UP"
@property
def is_draining(self) -> bool:
return self.status in ["DRAIN", "NOLB"]
@property
def can_drain_safely(self) -> bool:
"""
Return True if the server can be drained safely based on the current load.
Safe to drain when:
- No current active sessions (0)
- No queued requests waiting
This ensures no active user sessions are disrupted during draining.
"""
return self.current_sessions == 0 and self.queued == 0
@dataclass
class HAProxyStats:
"""Complete HAProxy statistics with both individual server data and aggregate metrics."""
# Individual server statistics by backend and server name
backend_to_servers: Dict[str, Dict[str, ServerStats]] = field(default_factory=dict)
# Computed aggregate metrics (calculated from server data)
@property
def total_backends(self) -> int:
"""Total number of backends."""
return len(self.backend_to_servers)
@property
def total_servers(self) -> int:
"""Total number of servers across all backends."""
return sum(
len(backend_servers) for backend_servers in self.backend_to_servers.values()
)
@property
def active_servers(self) -> int:
"""Number of servers currently UP."""
return sum(
1
for backend_servers in self.backend_to_servers.values()
for server in backend_servers.values()
if server.is_up
)
@property
def draining_servers(self) -> int:
"""Number of servers currently draining."""
return sum(
1
for backend_servers in self.backend_to_servers.values()
for server in backend_servers.values()
if server.is_draining
)
@property
def total_active_sessions(self) -> int:
"""Sum of all active sessions across all servers."""
return sum(
server.current_sessions
for backend_servers in self.backend_to_servers.values()
for server in backend_servers.values()
)
@property
def total_queued_requests(self) -> int:
"""Sum of all queued requests across all servers."""
return sum(
server.queued
for backend_servers in self.backend_to_servers.values()
for server in backend_servers.values()
)
@property
def is_system_idle(self) -> bool:
"""Return True if the entire system has no active load."""
return self.total_active_sessions == 0 and self.total_queued_requests == 0
@property
def draining_progress_pct(self) -> float:
"""Return percentage of servers currently draining (0.0 to 100.0)."""
if self.total_servers == 0:
return 0.0
return (self.draining_servers / self.total_servers) * 100.0
@dataclass
class HealthRouteInfo:
"""Information regarding how proxy should respond to health and routes requests."""
healthy: bool = True
status: int = 200
health_message: str = HEALTHY_MESSAGE
routes_message: str = "{}"
routes_content_type: str = "application/json"
@dataclass
class ServerConfig:
"""Configuration for a single server."""
name: str # Server identifier for HAProxy config
host: str # IP/hostname to connect to
port: int # Port to connect to
def __str__(self) -> str:
return f"ServerConfig(name='{self.name}', host='{self.host}', port={self.port})"
def __repr__(self) -> str:
return str(self)
@dataclass
class BackendConfig:
"""Configuration for a single application backend."""
# Name of the target group.
name: str
# Path prefix for the target group. This will be used to route requests to the target group.
path_prefix: str
# Maximum time HAProxy will wait for a successful TCP connection to be established with the backend server.
timeout_connect_s: Optional[int] = None
# Maximum time that the backend server can be inactive while sending data back to HAProxy.
# This is also active during the initial response phase.
timeout_server_s: Optional[int] = None
# Maximum time that the client can be inactive while sending data to HAProxy.
# This is active during the initial request phase.
timeout_client_s: Optional[int] = None
timeout_http_request_s: Optional[int] = None
# Maximum time HAProxy will wait for a request in the queue.
timeout_queue_s: Optional[int] = None
# Maximum time HAProxy will keep the connection alive.
# This has to be the same or greater than the client side keep-alive timeout.
timeout_http_keep_alive_s: Optional[int] = None
# Control the inactivity timeout for established WebSocket connections.
# Without this setting, a WebSocket connection could be prematurely terminated by other,
# more general timeout settings like timeout client or timeout server,
# which are intended for the initial phases of a connection.
timeout_tunnel_s: Optional[int] = None
# The number of consecutive failed health checks that must occur before a service instance is marked as unhealthy
health_check_fall: Optional[int] = None
# Number of consecutive successful health checks required to mark an unhealthy service instance as healthy again
health_check_rise: Optional[int] = None
# Interval, or the amount of time, between each health check attempt
health_check_inter: Optional[str] = None
# The interval between two consecutive health checks when the server is in any of the transition states: UP - transitionally DOWN or DOWN - transitionally UP
health_check_fastinter: Optional[str] = None
# The interval between two consecutive health checks when the server is in the DOWN state
health_check_downinter: Optional[str] = None
# Endpoint path that the health check mechanism will send a request to. It's typically an HTTP path.
health_check_path: Optional[str] = "/-/healthz"
# List of servers in this backend
servers: List[ServerConfig] = field(default_factory=list)
# The app name for this backend.
app_name: str = field(default_factory=str)
def build_health_check_config(self, global_config: "HAProxyConfig") -> dict:
"""Build health check configuration for HAProxy backend.
Returns a dict with:
- health_path: path for HTTP health checks (or None)
- default_server_directive: complete "default-server" line with all params
"""
# Resolve values: backend-specific overrides global defaults
fall = (
self.health_check_fall
if self.health_check_fall is not None
else global_config.health_check_fall
)
rise = (
self.health_check_rise
if self.health_check_rise is not None
else global_config.health_check_rise
)
inter = (
self.health_check_inter
if self.health_check_inter is not None
else global_config.health_check_inter
)
fastinter = (
self.health_check_fastinter
if self.health_check_fastinter is not None
else global_config.health_check_fastinter
)
downinter = (
self.health_check_downinter
if self.health_check_downinter is not None
else global_config.health_check_downinter
)
health_path = (
self.health_check_path
if self.health_check_path is not None
else global_config.health_check_path
)
# Build default-server directive
parts = []
# Add optional fastinter/downinter only if provided
if fastinter is not None:
parts.append(f"fastinter {fastinter}")
if downinter is not None:
parts.append(f"downinter {downinter}")
# Add required fall/rise/inter if any are set
if fall is not None:
parts.append(f"fall {fall}")
if rise is not None:
parts.append(f"rise {rise}")
if inter is not None:
parts.append(f"inter {inter}")
# Always add check at the end
parts.append("check")
default_server_directive = "default-server " + " ".join(parts)
return {
"health_path": health_path,
"default_server_directive": default_server_directive,
}
def __str__(self) -> str:
return f"BackendConfig(app_name='{self.app_name}', name='{self.name}', path_prefix='{self.path_prefix}', servers={self.servers})"
def __repr__(self) -> str:
return str(self)
@dataclass
class HAProxyConfig:
"""Configuration for HAProxy."""
socket_path: str = RAY_SERVE_HAPROXY_SOCKET_PATH
server_state_base: str = RAY_SERVE_HAPROXY_SERVER_STATE_BASE
server_state_file: str = RAY_SERVE_HAPROXY_SERVER_STATE_FILE
# Enable HAProxy optimizations (server state persistence, etc.)
# Disabled by default to prevent test suite interference
enable_hap_optimization: bool = RAY_SERVE_ENABLE_HAPROXY_OPTIMIZED_CONFIG
maxconn: int = RAY_SERVE_HAPROXY_MAXCONN
nbthread: int = RAY_SERVE_HAPROXY_NBTHREAD
stats_port: int = 8404
stats_uri: str = "/stats"
metrics_port: int = RAY_SERVE_HAPROXY_METRICS_PORT
metrics_uri: str = "/metrics"
# All timeout values are in seconds
timeout_queue_s: Optional[int] = None
timeout_connect_s: Optional[int] = RAY_SERVE_HAPROXY_TIMEOUT_CONNECT_S
timeout_client_s: Optional[int] = RAY_SERVE_HAPROXY_TIMEOUT_CLIENT_S
timeout_server_s: Optional[int] = RAY_SERVE_HAPROXY_TIMEOUT_SERVER_S
timeout_http_request_s: Optional[int] = None
hard_stop_after_s: Optional[int] = RAY_SERVE_HAPROXY_HARD_STOP_AFTER_S
custom_global: Dict[str, str] = field(default_factory=dict)
custom_defaults: Dict[str, str] = field(default_factory=dict)
inject_process_id_header: bool = False
reload_id: Optional[str] = None # Unique ID for each reload
enable_so_reuseport: bool = (
os.environ.get("SERVE_SOCKET_REUSE_PORT_ENABLED", "0") == "1"
)
has_received_routes: bool = False
has_received_servers: bool = False
pass_health_checks: bool = True
health_check_endpoint: str = "/-/healthz"
# Global health check parameters (used as defaults for backends)
# Number of consecutive failed health checks that must occur before a service instance is marked as unhealthy
health_check_fall: Optional[int] = RAY_SERVE_HAPROXY_HEALTH_CHECK_FALL
# Number of consecutive successful health checks required to mark an unhealthy service instance as healthy again
health_check_rise: Optional[int] = RAY_SERVE_HAPROXY_HEALTH_CHECK_RISE
# Interval, or the amount of time, between each health check attempt
health_check_inter: Optional[str] = RAY_SERVE_HAPROXY_HEALTH_CHECK_INTER
# The interval between two consecutive health checks when the server is in any of the transition states: UP - transitionally DOWN or DOWN - transitionally UP
health_check_fastinter: Optional[str] = RAY_SERVE_HAPROXY_HEALTH_CHECK_FASTINTER
# The interval between two consecutive health checks when the server is in the DOWN state
health_check_downinter: Optional[str] = RAY_SERVE_HAPROXY_HEALTH_CHECK_DOWNINTER
health_check_path: Optional[str] = "/-/healthz" # For HTTP health checks
http_options: HTTPOptions = field(default_factory=HTTPOptions)
syslog_port: int = RAY_SERVE_HAPROXY_SYSLOG_PORT
@property
def frontend_host(self) -> str:
if self.http_options.host is None or self.http_options.host == "0.0.0.0":
return "*"
return self.http_options.host
@property
def frontend_port(self) -> int:
return self.http_options.port
@property
def timeout_http_keep_alive_s(self) -> int:
return self.http_options.keep_alive_timeout_s
def build_health_route_info(self, backends: List[BackendConfig]) -> HealthRouteInfo:
if not self.has_received_routes:
router_ready_for_traffic = False
router_message = NO_ROUTES_MESSAGE
elif not self.has_received_servers:
router_ready_for_traffic = False
router_message = NO_REPLICAS_MESSAGE
else:
router_ready_for_traffic = True
router_message = ""
if not self.pass_health_checks:
healthy = False
message = DRAINING_MESSAGE
elif not router_ready_for_traffic:
healthy = False
message = router_message
else:
healthy = True
message = HEALTHY_MESSAGE
if healthy:
# Build routes JSON mapping: {"<path_prefix>": "<app_name>", ...}
routes = {
be.path_prefix: be.app_name
for be in backends
if be.app_name and be.path_prefix
}
routes_json = json.dumps(routes, separators=(",", ":"), ensure_ascii=False)
# Escape for haproxy double-quoted string literal
routes_message = routes_json.replace("\\", "\\\\").replace('"', '\\"')
else:
routes_message = message
return HealthRouteInfo(
healthy=healthy,
status=200 if healthy else 503,
health_message=message,
routes_message=routes_message,
routes_content_type="application/json" if healthy else "text/plain",
)
# TODO: support custom root_path and https
class ProxyApi(ABC):
"""Generic interface for load balancer management operations."""
@abstractmethod
async def start(self) -> None:
"""Initializes proxy configuration files."""
pass
@abstractmethod
async def get_all_stats(self) -> Dict[str, Dict[str, ServerStats]]:
"""Get statistics for all servers in all backends."""
pass
@abstractmethod
async def stop(self) -> None:
"""Stop the proxy."""
pass
@abstractmethod
async def disable(self) -> None:
"""Disables the proxy from receiving any HTTP requests"""
pass
@abstractmethod
async def enable(self) -> None:
"""Enables the proxy from receiving any HTTP requests"""
pass
@abstractmethod
async def reload(self) -> None:
"""Gracefully reload the service."""
pass
class HAProxyApi(ProxyApi):
"""ProxyApi implementation for HAProxy."""
def __init__(
self,
cfg: HAProxyConfig,
backend_configs: Dict[str, BackendConfig] = None,
config_file_path: str = RAY_SERVE_HAPROXY_CONFIG_FILE_LOC,
):
self.cfg = cfg
self.backend_configs = backend_configs or {}
self.config_file_path = config_file_path
# Lock to prevent concurrent config modifications
self._config_lock = asyncio.Lock()
self._proc = None
# Track old processes from graceful reloads that may still be draining
self._old_procs: List[asyncio.subprocess.Process] = []
# Ensure required directories exist during initialization
self._initialize_directories_and_error_files()
def _initialize_directories_and_error_files(self) -> None:
"""
Ensures all required directories exist, creates a unified 500 error file,
and assigns its path to self.cfg.error_file_path. Called once during initialization.
"""
# Create a config file directory
config_dir = os.path.dirname(self.config_file_path)
os.makedirs(config_dir, exist_ok=True)
# Create a socket directory
socket_dir = os.path.dirname(self.cfg.socket_path)
os.makedirs(socket_dir, exist_ok=True)
# Create a server state directory only if optimization is enabled
if self.cfg.enable_hap_optimization:
server_state_dir = os.path.dirname(self.cfg.server_state_file)
os.makedirs(server_state_dir, exist_ok=True)
# Create a single error file for both 502 and 504 errors
# Both will be normalized to 500 Internal Server Error
error_file_path = os.path.join(config_dir, "500.http")
with open(error_file_path, "w") as ef:
ef.write("HTTP/1.1 500 Internal Server Error\r\n")
ef.write("Content-Type: text/plain\r\n")
ef.write("Content-Length: 21\r\n")
ef.write("\r\n")
ef.write("Internal Server Error")
self.cfg.error_file_path = error_file_path
def _is_running(self) -> bool:
"""Check if the HAProxy process is still running."""
return self._proc is not None and self._proc.returncode is None
async def _start_and_wait_for_haproxy(
self, *extra_args: str, timeout_s: int = 5
) -> asyncio.subprocess.Process:
# Build command args
args = ["haproxy", "-db", "-f", self.config_file_path]
if not self.cfg.enable_so_reuseport:
args.append("-dR")
# Add any extra args (like -sf for graceful reload)
args.extend(extra_args)
logger.debug(f"Starting HAProxy with args: {args}")
proc = await asyncio.create_subprocess_exec(
*args,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
try:
await self._wait_for_hap_availability(proc)
except Exception:
# If startup fails, ensure the process is killed to avoid orphaned processes
if proc.returncode is None:
proc.kill()
await proc.wait()
raise
return proc
async def _save_server_state(self) -> None:
"""Save the server state to the file."""
server_state = await self._send_socket_command("show servers state")
with open(self.cfg.server_state_file, "w") as f:
f.write(server_state)
async def _graceful_reload(self) -> None:
"""Perform a graceful reload of HAProxy by starting a new process with -sf."""
try:
old_proc = self._proc
await self._wait_for_hap_availability(old_proc)
# Save server state if optimization is enabled
if self.cfg.enable_hap_optimization:
await self._save_server_state()
# Start new HAProxy process with -sf flag to gracefully take over from old process
# Use -x socket transfer for seamless reloads if optimization is enabled
reload_args = ["-sf", str(old_proc.pid)]
if self.cfg.enable_hap_optimization:
reload_args.extend(["-x", self.cfg.socket_path])
self._proc = await self._start_and_wait_for_haproxy(*reload_args)
# Track old process so we can ensure it's cleaned up during shutdown
if old_proc is not None:
self._old_procs.append(old_proc)
logger.info(
"Successfully performed graceful HAProxy reload with process restart."
)
except Exception as e:
logger.error(f"HAProxy graceful reload failed: {e}")
raise
async def _wait_for_hap_availability(
self, proc: asyncio.subprocess.Process, timeout_s: int = 5
) -> None:
start_time = time.time()
# TODO: update this to use health checks
while time.time() - start_time < timeout_s:
if proc.returncode is not None:
stdout = await proc.stdout.read() if proc.stdout else b""
stderr = await proc.stderr.read() if proc.stderr else b""
output = (
stderr.decode("utf-8", errors="ignore").strip()
or stdout.decode("utf-8", errors="ignore").strip()
)
raise RuntimeError(
f"HAProxy crashed during startup: {output or f'exit code {proc.returncode}'}"
)
if await self.is_running():
return
await asyncio.sleep(0.5)
raise RuntimeError(
f"HAProxy did not enter running state within {timeout_s} seconds."
)
def _generate_config_file_internal(self) -> None:
"""Internal config generation without locking (for use within locked sections)."""
try:
env = Environment()
# Backends are sorted in decreasing order of length of path prefix
# to ensure that the longest path prefix match is taken first.
# Equal lengthed prefixes are then sorted alphabetically.
backends = sorted(
self.backend_configs.values(),
key=lambda be: (-len(be.path_prefix), be.path_prefix),
)
# Enrich backends with precomputed health check configuration strings
backends_with_health_config = [
{
"backend": backend,
"health_config": backend.build_health_check_config(self.cfg),
}
for backend in backends
]
health_route_info = self.cfg.build_health_route_info(backends)
# Render healthz rules separately for readability/reuse
healthz_template = env.from_string(HAPROXY_HEALTHZ_RULES_TEMPLATE)
healthz_rules = healthz_template.render(
{
"config": self.cfg,
"backends": backends,
"health_info": health_route_info,
}
)
config_template = env.from_string(HAPROXY_CONFIG_TEMPLATE)
config_content = config_template.render(
{
"config": self.cfg,
"backends": backends,
"backends_with_health_config": backends_with_health_config,
"healthz_rules": healthz_rules,
"route_info": health_route_info,
}
)
# Ensure the config ends with a newline
if not config_content.endswith("\n"):
config_content += "\n"
# Use file locking to prevent concurrent writes from multiple processes
# This is important in test environments where multiple nodes may run
# on the same machine
import fcntl
lock_file_path = self.config_file_path + ".lock"
with open(lock_file_path, "w") as lock_f:
fcntl.flock(lock_f.fileno(), fcntl.LOCK_EX)
try:
with open(self.config_file_path, "w") as f:
f.write(config_content)
finally:
fcntl.flock(lock_f.fileno(), fcntl.LOCK_UN)
logger.debug(
f"Succesfully generated HAProxy configuration: {self.config_file_path}."
)
except Exception as e:
logger.error(f"Failed to create HAProxy configuration files: {e}")
raise
async def start(self) -> None:
"""
Generate HAProxy configuration files and start the HAProxy server process.
This method creates the necessary configuration files and launches the HAProxy
process in foreground mode, ensuring that the proxy is running with the latest
configuration and that the parent retains control of the subprocess handle.
"""
try:
async with self._config_lock:
# Set initial reload ID if header injection is enabled and ID is not set
if self.cfg.inject_process_id_header and self.cfg.reload_id is None:
self.cfg.reload_id = f"initial-{int(time.time() * 1000)}"
self._generate_config_file_internal()
logger.info("Successfully generated HAProxy config file.")
self._proc = await self._start_and_wait_for_haproxy()
logger.info("HAProxy started successfully.")
except Exception as e:
logger.error(f"Failed to initialize and start HAProxy configuration: {e}")
raise
async def get_all_stats(self) -> Dict[str, Dict[str, ServerStats]]:
"""Get statistics for all servers in all backends (implements abstract method).
Returns only application backends configured in self.backend_configs,
excluding HAProxy internal components (frontends, default_backend, stats).
Also excludes BACKEND aggregate entries, returning only individual servers.
"""
try:
stats_output = await self._send_socket_command("show stat")
all_stats = self._parse_haproxy_csv_stats(stats_output)
# Filter to only return application backends (ones in backend_configs)
# Exclude HAProxy internal components like frontends, default_backend, stats
# Also exclude BACKEND aggregate entries, keep only individual servers
return {
backend_name: {
server_name: stats
for server_name, stats in servers.items()
if server_name != "BACKEND"
}
for backend_name, servers in all_stats.items()
if backend_name in self.backend_configs
}
except Exception as e:
logger.error(f"Failed to get HAProxy stats: {e}")
return {}
async def get_haproxy_stats(self) -> HAProxyStats:
"""Get complete HAProxy statistics including both individual and aggregate data."""
server_stats = await self.get_all_stats()
return HAProxyStats(backend_to_servers=server_stats)
# TODO: use socket library instead of subprocess
async def _send_socket_command(self, command: str) -> str:
"""Send a command to the HAProxy stats socket via subprocess."""
try:
# Check if a socket file exists
if not os.path.exists(self.cfg.socket_path):
raise RuntimeError(
f"HAProxy socket file does not exist: {self.cfg.socket_path}."
)
proc = await asyncio.create_subprocess_exec(
"socat",
"-",
f"UNIX-CONNECT:{self.cfg.socket_path}",
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
try:
stdout, stderr = await asyncio.wait_for(
proc.communicate(f"{command}\n".encode("utf-8")), timeout=5.0
)
except asyncio.TimeoutError:
proc.kill()
await proc.wait()
raise RuntimeError(
f"Timeout while sending command '{command}' to HAProxy socket"
)
if proc.returncode != 0:
err = stderr.decode("utf-8", errors="ignore").strip()
raise RuntimeError(
f"Command '{command}' failed with code {proc.returncode}: {err}"
)
result = stdout.decode("utf-8", errors="ignore")
logger.debug(f"Socket command '{command}' returned {len(result)} chars.")
return result
except Exception as e:
raise RuntimeError(f"Failed to send socket command '{command}': {e}")
@staticmethod
def _parse_haproxy_csv_stats(
stats_output: str,
) -> Dict[str, Dict[str, ServerStats]]:
"""Parse HAProxy stats CSV output into structured data."""
if not stats_output or not stats_output.strip():
return {}
# HAProxy stats start with '#' comment - replace with nothing for CSV parsing
csv_data = stats_output.replace("# ", "", 1)
backend_stats: Dict[str, Dict[str, ServerStats]] = {}
def safe_int(v):
try:
return int(v)
except (TypeError, ValueError):
return 0
for row in csv.DictReader(io.StringIO(csv_data)):
backend = row.get("pxname", "").strip()
server = row.get("svname", "").strip()
status = row.get("status", "").strip() or "UNKNOWN"
if not backend or not server:
continue
backend_stats.setdefault(backend, {})
backend_stats[backend][server] = ServerStats(
backend=backend,
server=server,
status=status,
current_sessions=safe_int(row.get("scur")),
queued=safe_int(row.get("qcur")),
)
return backend_stats
async def stop(self) -> None:
proc = self._proc
if proc is None:
logger.info("HAProxy process not running, skipping shutdown.")
return
try:
# Kill the current process
if proc.returncode is None:
proc.kill()
await proc.wait()
self._proc = None
# Also kill any old processes from graceful reloads that might still be running
for old_proc in self._old_procs:
try:
if old_proc.returncode is None:
old_proc.kill()
await old_proc.wait()
logger.info(f"Killed old HAProxy process (PID: {old_proc.pid})")
except Exception as e:
logger.warning(f"Error killing old HAProxy process: {e}")
self._old_procs.clear()
logger.info("Stopped HAProxy process.")
except RuntimeError as e:
logger.error(f"Error during HAProxy shutdown: {e}")
async def reload(self) -> None:
try:
self._generate_config_file_internal()
await self._graceful_reload()
except Exception as e:
raise RuntimeError(f"Failed to update and reload HAProxy: {e}")
async def disable(self) -> None:
"""Force haproxy health checks to fail."""
try:
# Disable health checks (set to fail)
self.cfg.pass_health_checks = False
# Regenerate the config file with the deny rule
self._generate_config_file_internal()
# Perform a graceful reload to apply changes
await self._graceful_reload()
logger.info("Successfully disabled health checks.")
except Exception as e:
logger.error(f"Failed to disable health checks: {e}")
raise
async def enable(self) -> None:
"""Force haproxy health checks to pass."""
try:
self.cfg.pass_health_checks = True
self._generate_config_file_internal()
# Perform a graceful reload to apply changes
await self._graceful_reload()
logger.info("Successfully enabled health checks.")
except Exception as e:
logger.error(f"Failed to disable health checks: {e}")
raise
def set_backend_configs(
self,
backend_configs: Dict[str, BackendConfig],
) -> None:
if backend_configs:
self.cfg.has_received_routes = True
self.backend_configs = backend_configs
self.cfg.has_received_servers = self.cfg.has_received_servers or any(
len(bc.servers) > 0 for bc in backend_configs.values()
)
async def is_running(self) -> bool:
try:
await self._send_socket_command("show info")
return True
except Exception:
# During reload or shutdown, socket can be temporarily unavailable.
# Treat as unhealthy instead of raising.
return False
@ray.remote(num_cpus=0)
class HAProxyManager(ProxyActorInterface):
def __init__(
self,
http_options: HTTPOptions,
grpc_options: gRPCOptions,
*,
node_id: NodeId,
node_ip_address: str,
logging_config: LoggingConfig,
long_poll_client: Optional[LongPollClient] = None,
): # noqa: F821
super().__init__(
node_id=node_id,
node_ip_address=node_ip_address,
logging_config=logging_config,
# HAProxyManager is not on the request path, so we can disable
# the buffer to ensure logs are immediately flushed.
log_buffer_size=1,
)
self._grpc_options = grpc_options
self._http_options = http_options
# The time when the node starts to drain.
# The node is not draining if it's None.
self._draining_start_time: Optional[float] = None
self.event_loop = get_or_create_event_loop()
self._target_groups: List[TargetGroup] = []
# Lock to serialize HAProxy reloads and prevent concurrent reload operations
# which can cause race conditions with SO_REUSEPORT
self._reload_lock = asyncio.Lock()
self.long_poll_client = long_poll_client or LongPollClient(
ray.get_actor(SERVE_CONTROLLER_NAME, namespace=SERVE_NAMESPACE),
{
LongPollNamespace.GLOBAL_LOGGING_CONFIG: self._update_logging_config,
LongPollNamespace.TARGET_GROUPS: self.update_target_groups,
},
call_in_event_loop=self.event_loop,
)
startup_msg = f"HAProxy starting on node {self._node_id} (HTTP port: {self._http_options.port})."
logger.info(startup_msg)
logger.debug(
f"Configure HAProxyManager actor {ray.get_runtime_context().get_actor_id()} "
f"logger with logging config: {logging_config}"
)
self._haproxy = HAProxyApi(cfg=HAProxyConfig(http_options=http_options))
self._haproxy_start_task = self.event_loop.create_task(self._haproxy.start())
async def shutdown(self) -> None:
"""Shutdown the HAProxyManager and clean up the HAProxy process.
This method should be called before the actor is killed to ensure
the HAProxy subprocess is properly terminated.
"""
try:
logger.info(
f"Shutting down HAProxyManager on node {self._node_id}.",
extra={"log_to_stderr": False},
)
await self._haproxy.stop()
logger.info(
f"Successfully stopped HAProxy process on node {self._node_id}.",
extra={"log_to_stderr": False},
)
except Exception as e:
raise RuntimeError(f"Error stopping HAProxy during shutdown: {e}")
async def ready(self) -> str:
try:
# Wait for haproxy to start. Internally, this starts the process and
# waits for it to be running by querying the stats socket.
await self._haproxy_start_task
except Exception as e:
logger.exception("Failed to start HAProxy.")
raise e from None
# Return proxy metadata used by the controller.
# NOTE(zcin): We need to convert the metadata to a json string because
# of cross-language scenarios. Java can't deserialize a Python tuple.
return json.dumps(
[
ray.get_runtime_context().get_worker_id(),
get_component_logger_file_path(),
]
)
async def serving(self, wait_for_applications_running: bool = True) -> None:
"""Wait for the HAProxy process to be ready to serve requests."""
if not wait_for_applications_running:
return
ready_to_serve = False
while not ready_to_serve:
if self._is_draining():
return
try:
all_backends = set()
ready_backends = set()
stats = await self._haproxy.get_all_stats()
for backend, servers in stats.items():
# The backend name is suffixed with the protocol. We omit
# grpc backends for now since they aren't supported yet.
if backend.lower().startswith("grpc"):
continue
all_backends.add(backend)
for server in servers.values():
if server.is_up:
ready_backends.add(backend)
ready_to_serve = all_backends == ready_backends
except Exception:
pass
if not ready_to_serve:
await asyncio.sleep(0.2)
def _is_draining(self) -> bool:
"""Whether is haproxy is in the draining status or not."""
return self._draining_start_time is not None
async def update_draining(
self, draining: bool, _after: Optional[Any] = None
) -> None:
"""Update the draining status of the proxy.
This is called by the proxy state manager
to drain or un-drain the haproxy.
"""
if draining and (not self._is_draining()):
logger.info(
f"Start to drain the HAProxy on node {self._node_id}.",
extra={"log_to_stderr": False},
)
# Use the reload lock to serialize with other HAProxy reload operations
async with self._reload_lock:
await self._haproxy.disable()
self._draining_start_time = time.time()
if (not draining) and self._is_draining():
logger.info(
f"Stop draining the HAProxy on node {self._node_id}.",
extra={"log_to_stderr": False},
)
# Use the reload lock to serialize with other HAProxy reload operations
async with self._reload_lock:
await self._haproxy.enable()
self._draining_start_time = None
async def is_drained(self, _after: Optional[Any] = None) -> bool:
"""Check whether the haproxy is drained or not.
An haproxy is drained if it has no ongoing requests
AND it has been draining for more than
`PROXY_MIN_DRAINING_PERIOD_S` seconds.
"""
if not self._is_draining():
return False
haproxy_stats = await self._haproxy.get_haproxy_stats()
return haproxy_stats.is_system_idle and (
(time.time() - self._draining_start_time) > PROXY_MIN_DRAINING_PERIOD_S
)
async def check_health(self) -> bool:
# If haproxy is already shutdown, return False.
if not self._haproxy or not self._haproxy._proc:
return False
logger.debug("Received health check.", extra={"log_to_stderr": False})
return await self._haproxy.is_running()
def pong(self) -> str:
pass
async def receive_asgi_messages(self, request_metadata: RequestMetadata) -> bytes:
raise NotImplementedError("Receive is handled by the ingress replicas.")
async def receive_grpc_messages(
self, session_id: str
) -> Tuple[bool, Optional[Any], bool]:
raise NotImplementedError("Receive is handled by the ingress replicas.")
def _get_http_options(self) -> HTTPOptions:
return self._http_options
def _get_logging_config(self) -> Optional[str]:
"""Get the logging configuration (for testing purposes)."""
log_file_path = None
for handler in logger.handlers:
if isinstance(handler, logging.handlers.MemoryHandler):
log_file_path = handler.target.baseFilename
return log_file_path
def _targets_to_servers(self, targets: List[Target]) -> List[ServerConfig]:
"""Convert a list of targets to a list of servers."""
# The server name is derived from the replica's actor name, with the
# format `SERVE_REPLICA::<app>#<deployment>#<replica_id>`, or the
# proxy's actor name, with the format `SERVE_PROXY_ACTOR-<node_id>`.
# Special characters in the names are converted to comply with haproxy
# config's allowed characters, e.g. `#` -> `-`.
return [
ServerConfig(
name=self.get_safe_name(target.name),
# Use localhost if target is on the same node as HAProxy
host="127.0.0.1" if target.ip == self._node_ip_address else target.ip,
port=target.port,
)
for target in targets
]
def _target_group_to_backend(self, target_group: TargetGroup) -> BackendConfig:
"""Convert a target group to a backend name."""
servers = self._targets_to_servers(target_group.targets)
# The name is lowercased and formatted as <protocol>-<app_name>. Special
# characters in the name are converted to comply with haproxy config's
# allowed characters, e.g. `#` -> `-`.
return BackendConfig(
name=self.get_safe_name(
f"{target_group.protocol.value.lower()}-{target_group.app_name}"
),
path_prefix=target_group.route_prefix,
servers=servers,
app_name=target_group.app_name,
)
async def _reload_haproxy(self) -> None:
# To avoid dropping updates from a long poll, we wait until HAProxy
# is up and running before attempting to generate config and reload.
# Use lock to serialize reloads and prevent race conditions with SO_REUSEPORT
async with self._reload_lock:
await self._haproxy_start_task
await self._haproxy.reload()
def update_target_groups(self, target_groups: List[TargetGroup]) -> None:
self._target_groups = target_groups
backend_configs = [
self._target_group_to_backend(target_group)
for target_group in target_groups
]
logger.info(
f"Got updated backend configs: {backend_configs}.",
extra={"log_to_stderr": True},
)
name_to_backend_configs = {
backend_config.name: backend_config for backend_config in backend_configs
}
self._haproxy.set_backend_configs(name_to_backend_configs)
self.event_loop.create_task(self._reload_haproxy())
def get_target_groups(self) -> List[TargetGroup]:
"""Get current target groups."""
return self._target_groups
@staticmethod
def get_safe_name(name: str) -> str:
"""Get a safe label name for the haproxy config."""
name = name.replace("#", "-").replace("/", ".")
# replace all remaining non-alphanumeric and non-{".", "_", "-"} with "_"
return re.sub(r"[^A-Za-z0-9._-]+", "_", name)
def _dump_ingress_replicas_for_testing(self, route: str) -> Set[ReplicaID]:
"""Return the set of replica IDs for targets matching the given route.
Args:
route: The route prefix to match against target groups.
Returns:
Set of ReplicaID objects for targets in the matching target group.
"""
replica_ids = set()
if self._target_groups is None:
return replica_ids
for target_group in self._target_groups:
if target_group.route_prefix == route:
for target in target_group.targets:
# Target names are in the format "SERVE_REPLICA::<app>#<deployment>#<replica_id>"
if ReplicaID.is_full_id_str(target.name):
replica_id = ReplicaID.from_full_id_str(target.name)
replica_ids.add(replica_id)
return replica_ids
def _dump_ingress_cache_for_testing(self, route: str) -> Set[ReplicaID]:
"""Return replica IDs that are cached/ready for the given route (for testing).
For HAProxy, all registered replicas are immediately ready for routing
(no warm-up cache like the internal router), so this returns the same
set as _dump_ingress_replicas_for_testing.
Args:
route: The route prefix to match against target groups.
Returns:
Set of ReplicaID objects for targets in the matching target group.
"""
return self._dump_ingress_replicas_for_testing(route)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/_private/haproxy.py",
"license": "Apache License 2.0",
"lines": 1015,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/serve/_private/haproxy_templates.py | HAPROXY_HEALTHZ_RULES_TEMPLATE = """ # Health check endpoint
acl healthcheck path -i {{ config.health_check_endpoint }}
# Suppress logging for health checks
http-request set-log-level silent if healthcheck
{%- if not health_info.healthy %}
# Override: force health checks to fail (used by drain/disable)
http-request return status {{ health_info.status }} content-type text/plain string "{{ health_info.health_message }}" if healthcheck
{%- elif backends %}
# 200 if any backend has at least one server UP
{%- for backend in backends %}
acl backend_{{ backend.name or 'unknown' }}_server_up nbsrv({{ backend.name or 'unknown' }}) ge 1
{%- endfor %}
# Any backend with a server UP passes the health check (OR logic)
{%- for backend in backends %}
http-request return status {{ health_info.status }} content-type text/plain string "{{ health_info.health_message }}" if healthcheck backend_{{ backend.name or 'unknown' }}_server_up
{%- endfor %}
http-request return status 503 content-type text/plain string "Service Unavailable" if healthcheck
{%- endif %}
"""
HAPROXY_CONFIG_TEMPLATE = """global
# Log to the standard system log socket with debug level.
log /dev/log local0 debug
log 127.0.0.1:{{ config.syslog_port }} local0 debug
stats socket {{ config.socket_path }} mode 666 level admin expose-fd listeners
stats timeout 30s
maxconn {{ config.maxconn }}
nbthread {{ config.nbthread }}
{%- if config.enable_hap_optimization %}
server-state-base {{ config.server_state_base }}
server-state-file {{ config.server_state_file }}
{%- endif %}
{%- if config.hard_stop_after_s is not none %}
hard-stop-after {{ config.hard_stop_after_s }}s
{%- endif %}
defaults
mode http
option log-health-checks
{% if config.timeout_connect_s is not none %}timeout connect {{ config.timeout_connect_s }}s{% endif %}
{% if config.timeout_client_s is not none %}timeout client {{ config.timeout_client_s }}s{% endif %}
{% if config.timeout_server_s is not none %}timeout server {{ config.timeout_server_s }}s{% endif %}
{% if config.timeout_http_request_s is not none %}timeout http-request {{ config.timeout_http_request_s }}s{% endif %}
{% if config.timeout_http_keep_alive_s is not none %}timeout http-keep-alive {{ config.timeout_http_keep_alive_s }}s{% endif %}
{% if config.timeout_queue_s is not none %}timeout queue {{ config.timeout_queue_s }}s{% endif %}
log global
option httplog
option abortonclose
{%- if config.enable_hap_optimization %}
option idle-close-on-response
{%- endif %}
# Normalize 502 and 504 errors to 500 per Serve's default behavior
{%- if config.error_file_path %}
errorfile 502 {{ config.error_file_path }}
errorfile 504 {{ config.error_file_path }}
{%- endif %}
{%- if config.enable_hap_optimization %}
load-server-state-from-file global
{%- endif %}
frontend prometheus
bind :{{ config.metrics_port }}
mode http
http-request use-service prometheus-exporter if { path {{ config.metrics_uri }} }
no log
frontend http_frontend
bind {{ config.frontend_host }}:{{ config.frontend_port }}
{{ healthz_rules|safe }}
# Routes endpoint
acl routes path -i /-/routes
http-request return status {{ route_info.status }} content-type {{ route_info.routes_content_type }} string "{{ route_info.routes_message }}" if routes
{%- if config.inject_process_id_header and config.reload_id %}
# Inject unique reload ID as header to track which HAProxy instance handled the request (testing only)
http-request set-header x-haproxy-reload-id {{ config.reload_id }}
{%- endif %}
# Static routing based on path prefixes in decreasing length then alphabetical order
{%- for backend in backends %}
acl is_{{ backend.name or 'unknown' }} path_beg {{ '/' if not backend.path_prefix or backend.path_prefix == '/' else backend.path_prefix ~ '/' }}
acl is_{{ backend.name or 'unknown' }} path {{ backend.path_prefix or '/' }}
use_backend {{ backend.name or 'unknown' }} if is_{{ backend.name or 'unknown' }}
{%- endfor %}
default_backend default_backend
backend default_backend
http-request return status 404 content-type text/plain lf-string "Path \'%[path]\' not found. Ping http://.../-/routes for available routes."
{%- for item in backends_with_health_config %}
{%- set backend = item.backend %}
{%- set hc = item.health_config %}
backend {{ backend.name or 'unknown' }}
log global
balance leastconn
# Enable HTTP connection reuse for better performance
http-reuse always
# Set backend-specific timeouts, overriding defaults if specified
{%- if backend.timeout_connect_s is not none %}
timeout connect {{ backend.timeout_connect_s }}s
{%- endif %}
{%- if backend.timeout_server_s is not none %}
timeout server {{ backend.timeout_server_s }}s
{%- endif %}
{%- if backend.timeout_client_s is not none %}
timeout client {{ backend.timeout_client_s }}s
{%- endif %}
{%- if backend.timeout_http_request_s is not none %}
timeout http-request {{ backend.timeout_http_request_s }}s
{%- endif %}
{%- if backend.timeout_queue_s is not none %}
timeout queue {{ backend.timeout_queue_s }}s
{%- endif %}
# Set timeouts to support keep-alive connections
{%- if backend.timeout_http_keep_alive_s is not none %}
timeout http-keep-alive {{ backend.timeout_http_keep_alive_s }}s
{%- endif %}
{%- if backend.timeout_tunnel_s is not none %}
timeout tunnel {{ backend.timeout_tunnel_s }}s
{%- endif %}
# Health check configuration - use backend-specific or global defaults
{%- if hc.health_path %}
# HTTP health check with custom path
option httpchk GET {{ hc.health_path }}
http-check expect status 200
{%- endif %}
{{ hc.default_server_directive }}
# Servers in this backend
{%- for server in backend.servers %}
server {{ server.name }} {{ server.host }}:{{ server.port }} check
{%- endfor %}
{%- endfor %}
listen stats
bind *:{{ config.stats_port }}
stats enable
stats uri {{ config.stats_uri }}
stats refresh 1s
"""
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/_private/haproxy_templates.py",
"license": "Apache License 2.0",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/serve/tests/test_haproxy.py | import asyncio
import logging
import subprocess
import sys
import threading
import time
from tempfile import NamedTemporaryFile
import httpx
import pytest
import requests
import ray
from ray import serve
from ray._common.test_utils import (
SignalActor,
wait_for_condition,
)
from ray.actor import ActorHandle
from ray.cluster_utils import Cluster
from ray.serve._private.constants import (
DEFAULT_UVICORN_KEEP_ALIVE_TIMEOUT_S,
RAY_SERVE_ENABLE_HA_PROXY,
SERVE_NAMESPACE,
)
from ray.serve._private.haproxy import HAProxyManager
from ray.serve._private.test_utils import get_application_url
from ray.serve.context import _get_global_client
from ray.serve.schema import (
ProxyStatus,
ServeDeploySchema,
ServeInstanceDetails,
)
from ray.serve.tests.conftest import * # noqa
from ray.serve.tests.test_cli_2 import ping_endpoint
from ray.tests.conftest import call_ray_stop_only # noqa: F401
from ray.util.state import list_actors
logger = logging.getLogger(__name__)
# Skip all tests in this module if the HAProxy feature flag is not enabled
pytestmark = pytest.mark.skipif(
not RAY_SERVE_ENABLE_HA_PROXY,
reason="RAY_SERVE_ENABLE_HA_PROXY not set.",
)
@pytest.fixture(autouse=True)
def clean_up_haproxy_processes():
"""Clean up haproxy processes before and after each test."""
subprocess.run(
["pkill", "-x", "haproxy"], capture_output=True, text=True, check=False
)
yield
# After test: verify no haproxy processes are running
result = subprocess.run(
["pgrep", "-x", "haproxy"], capture_output=True, text=True, check=False
)
assert (
result.returncode != 0
), f"HAProxy processes still running after test: {result.stdout.strip()}"
@pytest.fixture
def shutdown_ray():
if ray.is_initialized():
ray.shutdown()
yield
if ray.is_initialized():
ray.shutdown()
def test_deploy_with_no_applications(ray_shutdown):
"""Deploy an empty list of applications, serve should just be started."""
ray.init(num_cpus=8)
serve.start(http_options=dict(port=8003))
client = _get_global_client()
config = ServeDeploySchema.parse_obj({"applications": []})
client.deploy_apps(config)
def serve_running():
ServeInstanceDetails.parse_obj(
ray.get(client._controller.get_serve_instance_details.remote())
)
actors = list_actors(
filters=[
("ray_namespace", "=", SERVE_NAMESPACE),
("state", "=", "ALIVE"),
]
)
actor_names = [actor["class_name"] for actor in actors]
return "ServeController" in actor_names and "HAProxyManager" in actor_names
wait_for_condition(serve_running)
client.shutdown()
def test_single_app_shutdown_actors(ray_shutdown):
"""Tests serve.shutdown() works correctly in single-app case
Ensures that after deploying a (nameless) app using serve.run(), serve.shutdown()
deletes all actors (controller, haproxy, all replicas) in the "serve" namespace.
"""
address = ray.init(num_cpus=8)["address"]
serve.start(http_options=dict(port=8003))
@serve.deployment
def f():
pass
serve.run(f.bind(), name="app")
actor_names = {
"ServeController",
"HAProxyManager",
"ServeReplica:app:f",
}
def check_alive():
actors = list_actors(
address=address,
filters=[("ray_namespace", "=", SERVE_NAMESPACE), ("state", "=", "ALIVE")],
)
return {actor["class_name"] for actor in actors} == actor_names
def check_dead():
actors = list_actors(
address=address,
filters=[("ray_namespace", "=", SERVE_NAMESPACE), ("state", "=", "ALIVE")],
)
return len(actors) == 0
wait_for_condition(check_alive)
serve.shutdown()
wait_for_condition(check_dead)
@pytest.mark.asyncio
async def test_single_app_shutdown_actors_async(ray_shutdown):
"""Tests serve.shutdown_async() works correctly in single-app case
Ensures that after deploying a (nameless) app using serve.run(), serve.shutdown_async()
deletes all actors (controller, haproxy, all replicas) in the "serve" namespace.
"""
address = ray.init(num_cpus=8)["address"]
serve.start(http_options=dict(port=8003))
@serve.deployment
def f():
pass
serve.run(f.bind(), name="app")
actor_names = {
"ServeController",
"HAProxyManager",
"ServeReplica:app:f",
}
def check_alive():
actors = list_actors(
address=address,
filters=[("ray_namespace", "=", SERVE_NAMESPACE), ("state", "=", "ALIVE")],
)
return {actor["class_name"] for actor in actors} == actor_names
def check_dead():
actors = list_actors(
address=address,
filters=[("ray_namespace", "=", SERVE_NAMESPACE), ("state", "=", "ALIVE")],
)
return len(actors) == 0
wait_for_condition(check_alive)
await serve.shutdown_async()
wait_for_condition(check_dead)
def test_haproxy_subprocess_killed_on_manager_shutdown(ray_shutdown):
"""Test that the HAProxy subprocess is killed when the HAProxyManager actor is shutdown.
This ensures proper cleanup of HAProxy processes when the manager is killed,
preventing orphaned HAProxy processes.
"""
def get_haproxy_pids():
"""Get all haproxy process PIDs."""
result = subprocess.run(
["pgrep", "-x", "haproxy"], capture_output=True, text=True, timeout=2
)
if result.returncode == 0 and result.stdout.strip():
return [int(pid) for pid in result.stdout.strip().split("\n")]
return []
wait_for_condition(
lambda: len(get_haproxy_pids()) == 0, timeout=5, retry_interval_ms=100
)
@serve.deployment
def hello():
return "hello"
serve.run(hello.bind())
wait_for_condition(
lambda: len(get_haproxy_pids()) == 1, timeout=10, retry_interval_ms=100
)
serve.shutdown()
wait_for_condition(
lambda: len(get_haproxy_pids()) == 0, timeout=10, retry_interval_ms=100
)
# TODO(alexyang): Delete these tests and run test_proxy.py instead once HAProxy is fully supported.
class TestTimeoutKeepAliveConfig:
"""Test setting keep_alive_timeout_s in config and env."""
def get_proxy_actor(self) -> ActorHandle:
[proxy_actor] = list_actors(filters=[("class_name", "=", "HAProxyManager")])
return ray.get_actor(proxy_actor.name, namespace=SERVE_NAMESPACE)
def test_default_keep_alive_timeout_s(self, ray_shutdown):
"""Test when no keep_alive_timeout_s is set.
When the keep_alive_timeout_s is not set, the uvicorn keep alive is 5.
"""
serve.start()
proxy_actor = self.get_proxy_actor()
assert (
ray.get(proxy_actor._get_http_options.remote()).keep_alive_timeout_s
== DEFAULT_UVICORN_KEEP_ALIVE_TIMEOUT_S
)
def test_set_keep_alive_timeout_in_http_configs(self, ray_shutdown):
"""Test when keep_alive_timeout_s is in http configs.
When the keep_alive_timeout_s is set in http configs, the uvicorn keep alive
is set correctly.
"""
keep_alive_timeout_s = 222
serve.start(http_options={"keep_alive_timeout_s": keep_alive_timeout_s})
proxy_actor = self.get_proxy_actor()
assert (
ray.get(proxy_actor._get_http_options.remote()).keep_alive_timeout_s
== keep_alive_timeout_s
)
@pytest.mark.parametrize(
"ray_instance",
[
{"RAY_SERVE_HTTP_KEEP_ALIVE_TIMEOUT_S": "333"},
],
indirect=True,
)
def test_set_keep_alive_timeout_in_env(self, ray_instance, ray_shutdown):
"""Test when keep_alive_timeout_s is in env.
When the keep_alive_timeout_s is set in env, the uvicorn keep alive
is set correctly.
"""
serve.start()
proxy_actor = self.get_proxy_actor()
assert (
ray.get(proxy_actor._get_http_options.remote()).keep_alive_timeout_s == 333
)
@pytest.mark.parametrize(
"ray_instance",
[
{"RAY_SERVE_HTTP_KEEP_ALIVE_TIMEOUT_S": "333"},
],
indirect=True,
)
def test_set_timeout_keep_alive_in_both_config_and_env(
self, ray_instance, ray_shutdown
):
"""Test when keep_alive_timeout_s is in both http configs and env.
When the keep_alive_timeout_s is set in env, the uvicorn keep alive
is set to the one in env.
"""
keep_alive_timeout_s = 222
serve.start(http_options={"keep_alive_timeout_s": keep_alive_timeout_s})
proxy_actor = self.get_proxy_actor()
assert (
ray.get(proxy_actor._get_http_options.remote()).keep_alive_timeout_s == 333
)
@pytest.mark.asyncio
async def test_drain_and_undrain_haproxy_manager(
monkeypatch, shutdown_ray, call_ray_stop_only # noqa: F811
):
"""Test the state transtion of the haproxy manager between
HEALTHY, DRAINING and DRAINED
"""
monkeypatch.setenv("RAY_SERVE_PROXY_MIN_DRAINING_PERIOD_S", "10")
monkeypatch.setenv("SERVE_SOCKET_REUSE_PORT_ENABLED", "1")
cluster = Cluster()
head_node = cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=1)
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
ray.init(address=head_node.address)
serve.start(http_options={"location": "EveryNode"})
signal_actor = SignalActor.remote()
@serve.deployment
class HelloModel:
async def __call__(self):
await signal_actor.wait.remote()
return "hello"
serve.run(HelloModel.options(num_replicas=2).bind())
# 3 proxies, 1 controller, 2 replicas, 1 signal actor
wait_for_condition(lambda: len(list_actors()) == 7)
assert len(ray.nodes()) == 3
client = _get_global_client()
serve_details = ServeInstanceDetails(
**ray.get(client._controller.get_serve_instance_details.remote())
)
proxy_actor_ids = {proxy.actor_id for _, proxy in serve_details.proxies.items()}
assert len(proxy_actor_ids) == 3
# Start a long-running request in background to test draining behavior
request_result = []
def make_blocking_request():
try:
response = httpx.get("http://localhost:8000/", timeout=5)
request_result.append(("success", response.status_code))
except Exception as e:
request_result.append(("error", str(e)))
request_thread = threading.Thread(target=make_blocking_request)
request_thread.start()
wait_for_condition(
lambda: ray.get(signal_actor.cur_num_waiters.remote()) >= 1, timeout=10
)
serve.run(HelloModel.options(num_replicas=1).bind())
# 1 proxy should be draining
def check_proxy_status(proxy_status_to_count):
serve_details = ServeInstanceDetails(
**ray.get(client._controller.get_serve_instance_details.remote())
)
proxy_status_list = [proxy.status for _, proxy in serve_details.proxies.items()]
current_status = {
status: proxy_status_list.count(status) for status in proxy_status_list
}
return current_status == proxy_status_to_count, current_status
wait_for_condition(
condition_predictor=check_proxy_status,
proxy_status_to_count={ProxyStatus.HEALTHY: 2, ProxyStatus.DRAINING: 1},
)
# should stay in draining status until the signal is sent
await asyncio.sleep(1)
assert check_proxy_status(
proxy_status_to_count={ProxyStatus.HEALTHY: 2, ProxyStatus.DRAINING: 1}
)
serve.run(HelloModel.options(num_replicas=2).bind())
# The proxy should return to healthy status
wait_for_condition(
condition_predictor=check_proxy_status,
proxy_status_to_count={ProxyStatus.HEALTHY: 3},
)
serve_details = ServeInstanceDetails(
**ray.get(client._controller.get_serve_instance_details.remote())
)
assert {
proxy.actor_id for _, proxy in serve_details.proxies.items()
} == proxy_actor_ids
serve.run(HelloModel.options(num_replicas=1).bind())
await signal_actor.send.remote()
# 1 proxy should be draining and eventually be drained.
wait_for_condition(
condition_predictor=check_proxy_status,
timeout=40,
proxy_status_to_count={ProxyStatus.HEALTHY: 2},
)
# Verify the long-running request completed successfully
request_thread.join(timeout=5)
# Clean up serve.
serve.shutdown()
def test_haproxy_failure(ray_shutdown):
"""Test HAProxyManager is successfully restarted after being killed."""
ray.init(num_cpus=1)
serve.start()
@serve.deployment(name="proxy_failure")
def function(_):
return "hello1"
serve.run(function.bind())
def check_proxy_alive():
actors = list_actors(
filters=[("ray_namespace", "=", SERVE_NAMESPACE), ("state", "=", "ALIVE")],
)
return "HAProxyManager" in {actor["class_name"] for actor in actors}
wait_for_condition(check_proxy_alive)
[proxy_actor] = list_actors(
filters=[("class_name", "=", "HAProxyManager"), ("state", "=", "ALIVE")]
)
proxy_actor_id = proxy_actor.actor_id
proxy_actor = ray.get_actor(proxy_actor.name, namespace=SERVE_NAMESPACE)
ray.kill(proxy_actor, no_restart=False)
def check_new_proxy():
proxies = list_actors(
filters=[("class_name", "=", "HAProxyManager"), ("state", "=", "ALIVE")]
)
return len(proxies) == 1 and proxies[0].actor_id != proxy_actor_id
wait_for_condition(check_new_proxy, timeout=45)
serve.shutdown()
def test_haproxy_get_target_groups(shutdown_ray):
"""Test that haproxy get_target_groups retrieves the correct target groups."""
ray.init(num_cpus=4)
serve.start()
@serve.deployment
def function(_):
return "hello1"
# Deploy the application
serve.run(
function.options(num_replicas=1).bind(), name="test_app", route_prefix="/test"
)
def check_proxy_alive():
actors = list_actors(
filters=[("ray_namespace", "=", SERVE_NAMESPACE), ("state", "=", "ALIVE")],
)
return "HAProxyManager" in {actor["class_name"] for actor in actors}
wait_for_condition(check_proxy_alive)
[proxy_actor] = list_actors(
filters=[("class_name", "=", "HAProxyManager"), ("state", "=", "ALIVE")]
)
proxy_actor = ray.get_actor(proxy_actor.name, namespace=SERVE_NAMESPACE)
def has_n_targets(route_prefix: str, n: int):
target_groups = ray.get(proxy_actor.get_target_groups.remote())
for tg in target_groups:
if tg.route_prefix == route_prefix and len(tg.targets) == n:
return True
return False
wait_for_condition(has_n_targets, route_prefix="/test", n=1)
serve.run(
function.options(num_replicas=2).bind(), name="test_app", route_prefix="/test2"
)
wait_for_condition(has_n_targets, route_prefix="/test2", n=2)
serve.shutdown()
@pytest.mark.asyncio
async def test_haproxy_update_target_groups(ray_shutdown):
"""Test that the haproxy correctly updates the target groups."""
ray.init(num_cpus=4)
serve.start(http_options={"host": "0.0.0.0"})
@serve.deployment
def function(_):
return "hello1"
serve.run(
function.options(num_replicas=1).bind(), name="app1", route_prefix="/test"
)
assert httpx.get("http://localhost:8000/test").text == "hello1"
assert httpx.get("http://localhost:8000/test2").status_code == 404
serve.run(
function.options(num_replicas=1).bind(), name="app2", route_prefix="/test2"
)
assert httpx.get("http://localhost:8000/test").text == "hello1"
assert httpx.get("http://localhost:8000/test2").text == "hello1"
serve.delete("app1")
assert httpx.get("http://localhost:8000/test").status_code == 404
assert httpx.get("http://localhost:8000/test2").text == "hello1"
serve.run(
function.options(num_replicas=1).bind(), name="app1", route_prefix="/test"
)
assert httpx.get("http://localhost:8000/test").text == "hello1"
assert httpx.get("http://localhost:8000/test2").text == "hello1"
serve.shutdown()
@pytest.mark.asyncio
async def test_haproxy_update_draining_health_checks(ray_shutdown):
"""Test that the haproxy update_draining method updates the HAProxy health checks."""
ray.init(num_cpus=4)
serve.start()
signal_actor = SignalActor.remote()
@serve.deployment
async def function(_):
await signal_actor.wait.remote()
return "hello1"
serve.run(function.bind())
def check_proxy_alive():
actors = list_actors(
filters=[("ray_namespace", "=", SERVE_NAMESPACE), ("state", "=", "ALIVE")],
)
return "HAProxyManager" in {actor["class_name"] for actor in actors}
wait_for_condition(check_proxy_alive)
[proxy_actor] = list_actors(
filters=[("class_name", "=", "HAProxyManager"), ("state", "=", "ALIVE")]
)
proxy_actor = ray.get_actor(proxy_actor.name, namespace=SERVE_NAMESPACE)
assert httpx.get("http://localhost:8000/-/healthz").status_code == 200
await proxy_actor.update_draining.remote(draining=True)
wait_for_condition(
lambda: httpx.get("http://localhost:8000/-/healthz").status_code == 503
)
await proxy_actor.update_draining.remote(draining=False)
wait_for_condition(
lambda: httpx.get("http://localhost:8000/-/healthz").status_code == 200
)
assert not await proxy_actor._is_draining.remote()
serve.shutdown()
def test_haproxy_http_options(ray_shutdown):
"""Test that the haproxy config file is generated correctly with http options."""
ray.init(num_cpus=4)
serve.start(
http_options={
"host": "0.0.0.0",
"port": 8001,
"keep_alive_timeout_s": 30,
},
)
@serve.deployment
def function(_):
return "hello1"
serve.run(function.bind(), name="test_app", route_prefix="/test")
url = get_application_url(app_name="test_app", use_localhost=False)
assert httpx.get(url).text == "hello1"
with pytest.raises(httpx.ConnectError):
_ = httpx.get(url.replace(":8001", ":8000")).status_code
serve.shutdown()
def test_haproxy_metrics(ray_shutdown):
"""Test that the haproxy metrics are exported correctly."""
ray.init(num_cpus=4)
serve.start(
http_options={
"host": "0.0.0.0",
},
)
@serve.deployment
def function(_):
return "hello1"
serve.run(function.bind())
assert httpx.get("http://localhost:8000/").text == "hello1"
metrics_response = httpx.get("http://localhost:9101/metrics")
assert metrics_response.status_code == 200
http_backend_metrics = (
'haproxy_backend_http_responses_total{proxy="http-default",code="2xx"} 1'
)
assert http_backend_metrics in metrics_response.text
serve.shutdown()
def test_haproxy_safe_name():
"""Test that the safe name is generated correctly."""
assert HAProxyManager.get_safe_name("HTTP-test_foo.bar") == "HTTP-test_foo.bar"
assert HAProxyManager.get_safe_name("HTTP:test") == "HTTP_test"
assert HAProxyManager.get_safe_name("HTTP:test/foo") == "HTTP_test.foo"
assert HAProxyManager.get_safe_name("replica#abc") == "replica-abc"
@pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.")
def test_build_multi_app(ray_start_stop):
with NamedTemporaryFile(mode="w+", suffix=".yaml") as tmp:
print('Building nodes "TestApp1Node" and "TestApp2Node".')
# Build an app
subprocess.check_output(
[
"serve",
"build",
"ray.serve.tests.test_cli_3.TestApp1Node",
"ray.serve.tests.test_cli_3.TestApp2Node",
"-o",
tmp.name,
]
)
print("Build succeeded! Deploying node.")
subprocess.check_output(["serve", "deploy", tmp.name])
print("Deploy succeeded!")
wait_for_condition(
lambda: ping_endpoint("app1") == "wonderful world", timeout=15
)
print("App 1 is live and reachable over HTTP.")
wait_for_condition(
lambda: ping_endpoint("app2") == "wonderful world", timeout=15
)
print("App 2 is live and reachable over HTTP.")
print("Deleting applications.")
app_urls = [
get_application_url("HTTP", app_name=app) for app in ["app1", "app2"]
]
subprocess.check_output(["serve", "shutdown", "-y"])
def check_no_apps():
for url in app_urls:
with pytest.raises(httpx.HTTPError):
_ = httpx.get(url).text
return True
wait_for_condition(check_no_apps, timeout=15)
print("Delete succeeded! Node is no longer reachable over HTTP.")
def test_haproxy_manager_ready_with_application(ray_shutdown):
"""Test that HAProxyManager.ready() succeeds when an application is deployed."""
ray.init(num_cpus=4)
serve.start()
@serve.deployment
def function(_):
return "hello"
# Deploy application
serve.run(function.bind(), name="test_app", route_prefix="/test")
# Get HAProxyManager actor
def check_proxy_alive():
actors = list_actors(
filters=[("ray_namespace", "=", SERVE_NAMESPACE), ("state", "=", "ALIVE")],
)
return "HAProxyManager" in {actor["class_name"] for actor in actors}
wait_for_condition(check_proxy_alive)
[proxy_actor] = list_actors(
filters=[("class_name", "=", "HAProxyManager"), ("state", "=", "ALIVE")]
)
proxy_actor = ray.get_actor(proxy_actor.name, namespace=SERVE_NAMESPACE)
# Call ready() - should succeed with active targets
ready_result = ray.get(proxy_actor.ready.remote())
assert ready_result is not None
wait_for_condition(lambda: httpx.get("http://localhost:8000/test").text == "hello")
serve.shutdown()
def test_504_error_translated_to_500(ray_shutdown, monkeypatch):
"""Test that HAProxy translates 504 Gateway Timeout errors to 500 Internal Server Error."""
monkeypatch.setenv("RAY_SERVE_HAPROXY_TIMEOUT_SERVER_S", "2")
monkeypatch.setenv("RAY_SERVE_HAPROXY_TIMEOUT_CONNECT_S", "1")
ray.init(num_cpus=8)
serve.start(http_options=dict(port=8003))
@serve.deployment
class TimeoutDeployment:
def __call__(self, request):
# Sleep for 3 seconds, longer than HAProxy's 2s timeout
# Use regular time.sleep (not async) to avoid event loop issues
time.sleep(3)
return "This should not be reached"
serve.run(TimeoutDeployment.bind(), name="timeout_app", route_prefix="/test")
url = get_application_url("HTTP", app_name="timeout_app")
# HAProxy should timeout after 2s and return 504->500
# Client timeout is 10s to ensure HAProxy times out first
response = requests.get(f"{url}/test", timeout=10)
# Verify we got 500 (translated from 504), not 504 or 200
assert (
response.status_code == 500
), f"Expected 500 Internal Server Error (translated from 504), got {response.status_code}"
assert (
"Internal Server Error" in response.text
), f"Response should contain 'Internal Server Error' message, got: {response.text}"
def test_502_error_translated_to_500(ray_shutdown):
"""Test that HAProxy translates 502 Bad Gateway errors to 500 Internal Server Error."""
ray.init(num_cpus=8)
serve.start(http_options=dict(port=8003))
@serve.deployment
class BrokenDeployment:
def __call__(self, request):
# Always raise an exception to simulate backend failure
raise RuntimeError("Simulated backend failure for 502 error")
serve.run(
BrokenDeployment.bind(), name="broken_app", route_prefix="/test", blocking=False
)
url = get_application_url("HTTP", app_name="broken_app")
response = requests.get(f"{url}/test", timeout=5)
assert (
response.status_code == 500
), f"Expected 500 Internal Server Error, got {response.status_code}"
assert (
"Internal Server Error" in response.text
), "Response should contain 'Internal Server Error' message"
def test_haproxy_healthcheck_multiple_apps_and_backends(ray_shutdown):
"""Health check behavior with 3 apps and 2 servers per backend.
Expectations:
- With two servers per backend, healthz returns 200 (all backends have a primary UP).
- Disabling one primary in each backend keeps health at 200 (the other primary is UP).
- Disabling all servers in each backend results in healthz 503.
"""
ray.init(num_cpus=8)
serve.start()
@serve.deployment
def f(_):
return "hello"
# Helpers
SOCKET_PATH = "/tmp/haproxy-serve/admin.sock"
def app_to_backend(app: str) -> str:
return f"http-{app}"
def haproxy_show_stat() -> str:
result = subprocess.run(
f'echo "show stat" | socat - {SOCKET_PATH}',
shell=True,
capture_output=True,
text=True,
timeout=5,
)
if result.returncode != 0:
raise RuntimeError(f"Failed to query HAProxy stats: {result.stderr}")
return result.stdout
def list_primary_servers(backend_name: str) -> list:
lines = haproxy_show_stat().strip().split("\n")
servers = []
for line in lines:
parts = line.split(",")
if len(parts) < 2:
continue
pxname, svname = parts[0], parts[1]
if pxname == backend_name and svname not in [
"FRONTEND",
"BACKEND",
]:
servers.append(svname)
return servers
def set_server_state(backend: str, server: str, state: str) -> None:
subprocess.run(
f'echo "set server {backend}/{server} state {state}" | socat - {SOCKET_PATH}',
shell=True,
capture_output=True,
timeout=5,
)
def wait_health(expected: int, timeout: float = 15.0) -> None:
wait_for_condition(
lambda: httpx.get("http://localhost:8000/-/healthz").status_code
== expected,
timeout=timeout,
)
# Deploy 3 apps, each with 2 replicas (servers) so each backend has 2 servers + 1 backup
apps = [
("app_a", "/a"),
("app_b", "/b"),
("app_c", "/c"),
]
for app_name, route in apps:
serve.run(f.options(num_replicas=2).bind(), name=app_name, route_prefix=route)
# Wait for all endpoints to be reachable
for _, route in apps:
wait_for_condition(
lambda r=route: httpx.get(f"http://localhost:8000{r}").text == "hello"
)
# Wait until each backend shows 2 primary servers in HAProxy stats
backends = [app_to_backend(app) for app, _ in apps]
for be in backends:
wait_for_condition(lambda b=be: len(list_primary_servers(b)) >= 2, timeout=20)
# Initially healthy
wait_health(200, timeout=20)
# Disable one primary per backend, should remain healthy (one primary still UP)
disabled_servers = []
for be in backends:
servers = list_primary_servers(be)
set_server_state(be, servers[0], "maint")
disabled_servers.append((be, servers[0]))
wait_health(200, timeout=20)
# Disable the remaining primary per backend, should become unhealthy (no servers UP)
disabled_all = []
for be in backends:
servers = list_primary_servers(be)
# Disable any remaining primary (skip ones already disabled)
for sv in servers:
if (be, sv) not in disabled_servers:
set_server_state(be, sv, "maint")
disabled_all.append((be, sv))
break
wait_health(503, timeout=20)
# Re-enable all servers and expect health back to 200
for be, sv in disabled_servers + disabled_all:
set_server_state(be, sv, "ready")
wait_health(200, timeout=20)
# Sanity: all apps still respond
for _, route in apps:
resp = httpx.get(f"http://localhost:8000{route}")
assert resp.status_code == 200 and resp.text == "hello"
serve.shutdown()
def test_haproxy_empty_backends_for_scaled_down_apps(ray_shutdown):
"""Test that HAProxy has no backend servers for deleted apps.
Verifies that when RAY_SERVE_ENABLE_HA_PROXY is True and apps are
deleted, the HAProxy stats show the backend is removed or has no servers.
"""
ray.init(num_cpus=4)
serve.start()
@serve.deployment
def hello():
return "hello"
# Deploy app with 1 replica
serve.run(
hello.options(num_replicas=1).bind(), name="test_app", route_prefix="/test"
)
r = httpx.get("http://localhost:8000/test")
assert r.status_code == 200
assert r.text == "hello"
# Delete the app - this should remove or empty the backend
serve.delete("test_app")
r = httpx.get("http://localhost:8000/test")
assert r.status_code == 404
serve.shutdown()
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_haproxy.py",
"license": "Apache License 2.0",
"lines": 723,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/tests/test_haproxy_api.py | import asyncio
import logging
import os
import subprocess
import sys
import tempfile
import threading
import time
from typing import Optional
from unittest import mock
import pytest
import pytest_asyncio
import requests
import uvicorn
from fastapi import FastAPI, Request, Response
from ray._common.test_utils import async_wait_for_condition, wait_for_condition
from ray.serve._private.constants import (
RAY_SERVE_ENABLE_HA_PROXY,
)
from ray.serve._private.haproxy import (
BackendConfig,
HAProxyApi,
HAProxyConfig,
ServerConfig,
)
from ray.serve.config import HTTPOptions
logger = logging.getLogger(__name__)
# Skip all tests in this module if the HAProxy feature flag is not enabled
pytestmark = pytest.mark.skipif(
not RAY_SERVE_ENABLE_HA_PROXY,
reason="RAY_SERVE_ENABLE_HA_PROXY not set.",
)
EXCLUDED_ACL_NAMES = ("healthcheck", "routes")
def check_haproxy_ready(stats_port: int, timeout: int = 2) -> bool:
"""Check if HAProxy is ready by verifying the stats endpoint is accessible."""
try:
response = requests.get(f"http://127.0.0.1:{stats_port}/stats", timeout=timeout)
return response.status_code == 200
except Exception:
return False
def create_test_backend_server(port: int):
"""Create a test backend server with slow and fast endpoints using uvicorn."""
app = FastAPI()
@app.get("/-/healthz")
async def health_endpoint():
return {"status": "OK"}
@app.get("/slow")
async def slow_endpoint():
await asyncio.sleep(3) # 3-second delay
return "Slow response completed"
@app.get("/fast")
async def fast_endpoint(req: Request, res: Response):
res.headers["x-haproxy-reload-id"] = req.headers.get("x-haproxy-reload-id", "")
return "Fast response"
# Configure uvicorn server with 60s keep-alive timeout
config = uvicorn.Config(
app=app,
host="127.0.0.1",
port=port,
log_level="error", # Reduce log noise
access_log=False,
timeout_keep_alive=60, # 60 seconds keep-alive timeout
)
server = uvicorn.Server(config)
# Run server in a separate thread
def run_server():
asyncio.run(server.serve())
thread = threading.Thread(target=run_server, daemon=True)
thread.start()
# Wait for the server to start
def wait_for_server():
r = requests.get(f"http://127.0.0.1:{port}/-/healthz")
assert r.status_code == 200
return True
wait_for_condition(wait_for_server)
return server, thread
def process_exists(pid: int) -> bool:
"""Check if a process with the given PID exists."""
try:
# Send signal 0 to check if process exists without actually sending a signal
os.kill(pid, 0)
return True
except (OSError, ProcessLookupError):
return False
def make_test_request(
url: str,
track_results: list = None,
signal_started: threading.Event = None,
timeout: int = 10,
):
"""Unified function to make test requests with optional result tracking."""
try:
if signal_started:
signal_started.set() # Signal that request has started
start_time = time.time()
response = requests.get(url, timeout=timeout)
end_time = time.time()
if track_results is not None:
track_results.append(
{
"status": response.status_code,
"duration": end_time - start_time,
"content": response.content,
}
)
except Exception as ex:
if track_results is not None:
track_results.append({"error": str(ex)})
@pytest.fixture(autouse=True)
def clean_up_haproxy_processes():
"""Clean up haproxy processes before and after each test."""
subprocess.run(
["pkill", "-x", "haproxy"], capture_output=True, text=True, check=False
)
yield
# After test: verify no haproxy processes are running
result = subprocess.run(
["pgrep", "-x", "haproxy"], capture_output=True, text=True, check=False
)
assert (
result.returncode != 0 or not result.stdout.strip()
), f"HAProxy processes still running after test: {result.stdout.strip()}"
@pytest_asyncio.fixture
async def haproxy_api_cleanup():
registered_apis = []
def register(api: Optional[HAProxyApi]) -> None:
if api is not None:
registered_apis.append(api)
yield register
for api in registered_apis:
proc = getattr(api, "_proc", None)
if proc and proc.returncode is None:
try:
await api.stop()
except Exception as exc: # pragma: no cover - best effort cleanup
logger.warning(f"Failed to stop HAProxy API cleanly: {exc}")
try:
proc.kill()
await proc.wait()
except Exception as kill_exc:
logger.error(
f"Failed to kill HAProxy process {proc.pid}: {kill_exc}"
)
elif proc and proc.returncode is not None:
continue
def test_generate_config_file_internal(haproxy_api_cleanup):
"""Test that initialize writes the correct config_stub file content using the actual template."""
with tempfile.TemporaryDirectory() as temp_dir:
config_file_path = os.path.join(temp_dir, "haproxy.cfg")
socket_path = os.path.join(temp_dir, "admin.sock")
config_stub = HAProxyConfig(
socket_path=socket_path,
maxconn=1000,
nbthread=2,
timeout_connect_s=5,
timeout_client_s=30,
timeout_server_s=30,
timeout_http_request_s=10,
timeout_queue_s=1,
stats_port=8080,
stats_uri="/mystats",
health_check_fall=3,
health_check_rise=2,
health_check_inter="2s",
health_check_path="/health",
http_options=HTTPOptions(
host="0.0.0.0",
port=8000,
keep_alive_timeout_s=55,
),
has_received_routes=True,
has_received_servers=True,
enable_hap_optimization=True,
)
backend_config_stub = {
"api_backend": BackendConfig(
name="api_backend",
path_prefix="/api",
app_name="api_backend",
timeout_http_keep_alive_s=60,
timeout_tunnel_s=60,
health_check_path="/api/health",
health_check_fall=2,
health_check_rise=3,
health_check_inter="5s",
servers=[
ServerConfig(name="api_server1", host="127.0.0.1", port=8001),
ServerConfig(name="api_server2", host="127.0.0.1", port=8002),
],
),
"web_backend": BackendConfig(
name="web_backend",
path_prefix="/web",
app_name="web_backend",
timeout_connect_s=3,
timeout_server_s=25,
timeout_http_keep_alive_s=45,
timeout_tunnel_s=45,
servers=[
ServerConfig(name="web_server1", host="127.0.0.1", port=8003),
]
# No health check overrides - should use global defaults
),
}
with mock.patch(
"ray.serve._private.constants.RAY_SERVE_HAPROXY_CONFIG_FILE_LOC",
config_file_path,
):
api = HAProxyApi(
cfg=config_stub,
backend_configs=backend_config_stub,
config_file_path=config_file_path,
)
try:
api._generate_config_file_internal()
# Read and verify the generated file
with open(config_file_path, "r") as f:
actual_content = f.read()
routes = '{\\"/api\\":\\"api_backend\\",\\"/web\\":\\"web_backend\\"}'
# Expected configuration stub (matching the actual template output)
expected_config = f"""
global
# Log to the standard system log socket with debug level.
log /dev/log local0 debug
log 127.0.0.1:514 local0 debug
stats socket {socket_path} mode 666 level admin expose-fd listeners
stats timeout 30s
maxconn 1000
nbthread 2
server-state-base /tmp/haproxy-serve
server-state-file /tmp/haproxy-serve/server-state
hard-stop-after 120s
defaults
mode http
option log-health-checks
timeout connect 5s
timeout client 30s
timeout server 30s
timeout http-request 10s
timeout http-keep-alive 55s
timeout queue 1s
log global
option httplog
option abortonclose
option idle-close-on-response
# Normalize 502 and 504 errors to 500 per Serve's default behavior
errorfile 502 {temp_dir}/500.http
errorfile 504 {temp_dir}/500.http
load-server-state-from-file global
frontend prometheus
bind :9101
mode http
http-request use-service prometheus-exporter if {{ path /metrics }}
no log
frontend http_frontend
bind *:8000
# Health check endpoint
acl healthcheck path -i /-/healthz
# Suppress logging for health checks
http-request set-log-level silent if healthcheck
# 200 if any backend has at least one server UP
acl backend_api_backend_server_up nbsrv(api_backend) ge 1
acl backend_web_backend_server_up nbsrv(web_backend) ge 1
# Any backend with a server UP passes the health check (OR logic)
http-request return status 200 content-type text/plain string "success" if healthcheck backend_api_backend_server_up
http-request return status 200 content-type text/plain string "success" if healthcheck backend_web_backend_server_up
http-request return status 503 content-type text/plain string "Service Unavailable" if healthcheck
# Routes endpoint
acl routes path -i /-/routes
http-request return status 200 content-type application/json string "{routes}" if routes
# Static routing based on path prefixes in decreasing length then alphabetical order
acl is_api_backend path_beg /api/
acl is_api_backend path /api
use_backend api_backend if is_api_backend
acl is_web_backend path_beg /web/
acl is_web_backend path /web
use_backend web_backend if is_web_backend
default_backend default_backend
backend default_backend
http-request return status 404 content-type text/plain lf-string "Path \'%[path]\' not found. Ping http://.../-/routes for available routes."
backend api_backend
log global
balance leastconn
# Enable HTTP connection reuse for better performance
http-reuse always
# Set backend-specific timeouts, overriding defaults if specified
# Set timeouts to support keep-alive connections
timeout http-keep-alive 60s
timeout tunnel 60s
# Health check configuration - use backend-specific or global defaults
# HTTP health check with custom path
option httpchk GET /api/health
http-check expect status 200
default-server fastinter 250ms downinter 250ms fall 2 rise 3 inter 5s check
# Servers in this backend
server api_server1 127.0.0.1:8001 check
server api_server2 127.0.0.1:8002 check
backend web_backend
log global
balance leastconn
# Enable HTTP connection reuse for better performance
http-reuse always
# Set backend-specific timeouts, overriding defaults if specified
timeout connect 3s
timeout server 25s
# Set timeouts to support keep-alive connections
timeout http-keep-alive 45s
timeout tunnel 45s
# Health check configuration - use backend-specific or global defaults
# HTTP health check with custom path
option httpchk GET /-/healthz
http-check expect status 200
default-server fastinter 250ms downinter 250ms fall 3 rise 2 inter 2s check
# Servers in this backend
server web_server1 127.0.0.1:8003 check
listen stats
bind *:8080
stats enable
stats uri /mystats
stats refresh 1s
"""
# Compare the entire configuration
assert actual_content.strip() == expected_config.strip()
finally:
# Clean up any temporary files created by initialize()
temp_files = ["haproxy.cfg", "routes.map"]
for temp_file in temp_files:
try:
if os.path.exists(temp_file):
os.remove(temp_file)
except (FileNotFoundError, OSError):
pass # File already removed or doesn't exist
def test_generate_backends_in_order(haproxy_api_cleanup):
"""Test that the backends are generated in the correct order."""
with tempfile.TemporaryDirectory() as temp_dir:
config_file_path = os.path.join(temp_dir, "haproxy.cfg")
backend_config_stub = {
"foo": BackendConfig(
name="foo",
path_prefix="/foo",
app_name="foo",
),
"foobar": BackendConfig(
name="foobar",
path_prefix="/foo/bar",
app_name="foobar",
),
"bar": BackendConfig(
name="bar",
path_prefix="/bar",
app_name="bar",
),
"default": BackendConfig(
name="default",
path_prefix="/",
app_name="default",
),
}
with mock.patch(
"ray.serve._private.constants.RAY_SERVE_HAPROXY_CONFIG_FILE_LOC",
config_file_path,
):
api = HAProxyApi(
cfg=HAProxyConfig(),
config_file_path=config_file_path,
backend_configs=backend_config_stub,
)
api._generate_config_file_internal()
# Read and verify the generated file
lines = []
with open(config_file_path, "r") as f:
lines = f.readlines()
acl_names = []
path_begs = []
paths = []
backend_lines = []
for line in lines:
line = line.strip()
if line.startswith("acl"):
acl_name = line.split(" ")[1]
if acl_name in EXCLUDED_ACL_NAMES:
continue
acl_names.append(acl_name)
# strip prefix/suffix added for acl checks
backend_name = (
acl_name.lstrip("is_")
.replace("backend_", "")
.replace("_server_up", "")
)
assert backend_name in backend_config_stub
condition = line.split(" ")[-2]
if condition == "path_beg":
path_prefix = line.split(" ")[-1].rstrip("/") or "/"
assert backend_config_stub[backend_name].path_prefix == path_prefix
path_begs.append(path_prefix)
elif condition == "path":
path_prefix = line.split(" ")[-1]
assert backend_config_stub[backend_name].path_prefix == path_prefix
paths.append(path_prefix)
else:
# gt condition is used for health check, no need to check.
continue
if line.startswith("use_backend"):
acl_name = line.split(" ")[-1]
assert acl_name in acl_names
backend_lines.append(acl_name)
expected_order = ["is_foobar", "is_bar", "is_foo", "is_default"]
assert backend_lines == expected_order
@pytest.mark.asyncio
async def test_graceful_reload(haproxy_api_cleanup):
"""Test that graceful reload preserves long-running connections."""
with tempfile.TemporaryDirectory() as temp_dir:
# Setup ports
haproxy_port = 8000
backend_port = 8404
stats_port = 8405
# Create and start a backend server
backend_server, backend_thread = create_test_backend_server(backend_port)
# Configure HAProxy
config = HAProxyConfig(
http_options=HTTPOptions(
host="127.0.0.1",
port=haproxy_port,
keep_alive_timeout_s=58,
),
stats_port=stats_port,
inject_process_id_header=True, # Enable for testing graceful reload
reload_id=f"initial-{int(time.time() * 1000)}", # Set initial reload ID
socket_path=os.path.join(temp_dir, "admin.sock"),
)
backend_config = BackendConfig(
name="test_backend",
path_prefix="/",
app_name="test_app",
servers=[ServerConfig(name="backend", host="127.0.0.1", port=backend_port)],
timeout_http_keep_alive_s=58,
)
config_file_path = os.path.join(temp_dir, "haproxy.cfg")
api = HAProxyApi(
cfg=config,
backend_configs={"test_backend": backend_config},
config_file_path=config_file_path,
)
haproxy_api_cleanup(api)
try:
await api.start()
# Wait for HAProxy to be ready (check stat endpoint)
def check_stats_ready():
try:
response = requests.get(
f"http://127.0.0.1:{config.stats_port}/stats", timeout=2
)
return response.status_code == 200
except Exception:
return False
wait_for_condition(check_stats_ready, timeout=10, retry_interval_ms=100)
# Track slow request results
slow_results = []
request_started = threading.Event()
slow_thread = threading.Thread(
target=make_test_request,
args=[f"http://127.0.0.1:{haproxy_port}/slow"],
kwargs={
"track_results": slow_results,
"signal_started": request_started,
},
)
slow_thread.start()
wait_for_condition(
lambda: request_started.is_set(), timeout=5, retry_interval_ms=10
)
assert api._proc is not None
original_pid = api._proc.pid
await api._graceful_reload()
assert api._proc is not None
new_pid = api._proc.pid
def check_for_new_reload_id():
fast_response = requests.get(
f"http://127.0.0.1:{haproxy_port}/fast", timeout=5
)
# Reload ID should always match what exists in the config.
return (
fast_response.headers.get("x-haproxy-reload-id")
== api.cfg.reload_id
and fast_response.status_code == 200
)
wait_for_condition(
check_for_new_reload_id, timeout=5, retry_interval_ms=100
)
slow_thread.join(timeout=10)
assert (
original_pid != new_pid
), "Process should have been reloaded with new PID"
wait_for_condition(
lambda: not process_exists(original_pid),
timeout=15,
retry_interval_ms=100,
)
assert len(slow_results) == 1, "Slow request should have completed"
result = slow_results[0]
assert "error" not in result, f"Slow request failed: {result.get('error')}"
assert result["status"] == 200, "Slow request should have succeeded"
assert result["duration"] >= 3.0, "Slow request should have taken full time"
assert (
b"Slow response completed" in result["content"]
), "Slow request should have completed"
finally:
# Backend server cleanup
try:
backend_server.should_exit = True
backend_thread.join(timeout=5) # Wait for thread to finish
except Exception as e:
print(f"Error occurred while shutting down server stub. Error: {e}")
@pytest.mark.asyncio
async def test_start(haproxy_api_cleanup):
"""Test HAProxy start functionality."""
with tempfile.TemporaryDirectory() as temp_dir:
config_file_path = os.path.join(temp_dir, "haproxy.cfg")
socket_path = os.path.join(temp_dir, "admin.sock")
# Create HAProxy config
config = HAProxyConfig(
http_options=HTTPOptions(
host="127.0.0.1",
port=8000,
keep_alive_timeout_s=58,
),
stats_port=8404,
pass_health_checks=True,
socket_path=socket_path,
has_received_routes=True,
has_received_servers=True,
)
# Add a backend so routes are populated
backend = BackendConfig(
name="test_backend",
path_prefix="/",
app_name="test_app",
servers=[ServerConfig(name="server", host="127.0.0.1", port=9999)],
)
api = HAProxyApi(
cfg=config,
backend_configs={"test_backend": backend},
config_file_path=config_file_path,
)
haproxy_api_cleanup(api)
await api.start()
assert api._proc is not None, "HAProxy process should exist"
assert api._is_running(), "HAProxy should be running"
# Verify config file contains expected content
with open(config_file_path, "r") as f:
config_content = f.read()
assert "frontend http_frontend" in config_content
assert f"bind 127.0.0.1:{config.frontend_port}" in config_content
assert "acl healthcheck path -i /-/healthz" in config_content
health_response = requests.get(
f"http://127.0.0.1:{config.frontend_port}/-/healthz", timeout=5
)
assert (
health_response.status_code == 503
), "Health check with no servers up should return 503"
await api.stop()
assert api._proc is None
assert not api._is_running()
@pytest.mark.asyncio
async def test_stop(haproxy_api_cleanup):
"""Test HAProxy stop functionality."""
with tempfile.TemporaryDirectory() as temp_dir:
config_file_path = os.path.join(temp_dir, "haproxy.cfg")
config = HAProxyConfig(
http_options=HTTPOptions(
host="127.0.0.1",
port=8000,
),
stats_port=8404,
socket_path=os.path.join(temp_dir, "admin.sock"),
)
api = HAProxyApi(cfg=config, config_file_path=config_file_path)
haproxy_api_cleanup(api)
# Start HAProxy
await api.start()
haproxy_api_cleanup(api)
await api.stop()
# Verify it's stopped
assert not api._is_running(), "HAProxy should be stopped after shutdown"
@pytest.mark.asyncio
async def test_stop_kills_haproxy_process(haproxy_api_cleanup):
"""Test that stop() properly kills the HAProxy subprocess."""
with tempfile.TemporaryDirectory() as temp_dir:
config_file_path = os.path.join(temp_dir, "haproxy.cfg")
config = HAProxyConfig(
http_options=HTTPOptions(
host="127.0.0.1",
port=8000,
),
stats_port=8404,
socket_path=os.path.join(temp_dir, "admin.sock"),
)
api = HAProxyApi(cfg=config, config_file_path=config_file_path)
haproxy_api_cleanup(api)
# Start HAProxy
await api.start()
assert api._proc is not None, "HAProxy process should exist after start"
haproxy_pid = api._proc.pid
assert process_exists(haproxy_pid), "HAProxy process should be running"
# Stop HAProxy
await api.stop()
# Verify the process is killed
assert api._proc is None, "HAProxy proc should be None after stop"
# Wait a bit for process cleanup
def haproxy_process_killed():
return not process_exists(haproxy_pid)
wait_for_condition(
haproxy_process_killed,
timeout=1,
retry_interval_ms=100,
)
@pytest.mark.asyncio
async def test_get_stats_integration(haproxy_api_cleanup):
with tempfile.TemporaryDirectory() as temp_dir:
config_file_path = os.path.join(temp_dir, "haproxy.cfg")
socket_path = os.path.join(temp_dir, "admin.sock")
# Create test backend servers
backend_port1 = 9900
backend_port2 = 9901
backend_server1, backend_thread1 = create_test_backend_server(backend_port1)
backend_server2, backend_thread2 = create_test_backend_server(backend_port2)
# Configure HAProxy with multiple backends
config = HAProxyConfig(
http_options=HTTPOptions(
port=8000,
keep_alive_timeout_s=58,
),
socket_path=socket_path,
stats_port=8404,
)
backend_configs = {
"test_backend1": BackendConfig(
name="test_backend1",
path_prefix="/api",
app_name="test_app1",
servers=[
ServerConfig(name="server1", host="127.0.0.1", port=backend_port1)
],
timeout_http_keep_alive_s=58,
),
"test_backend2": BackendConfig(
name="test_backend2",
path_prefix="/web",
app_name="test_app2",
servers=[
ServerConfig(name="server2", host="127.0.0.1", port=backend_port2)
],
timeout_http_keep_alive_s=58,
),
}
api = HAProxyApi(
cfg=config,
backend_configs=backend_configs,
config_file_path=config_file_path,
)
haproxy_api_cleanup(api)
try:
# Start HAProxy
await api.start()
# Wait for HAProxy to be ready
wait_for_condition(
lambda: check_haproxy_ready(config.stats_port),
timeout=10,
retry_interval_ms=500,
)
# Make some API calls to generate sessions and traffic
request_threads = []
for i in range(3):
thread = threading.Thread(
target=make_test_request,
args=[f"http://127.0.0.1:{config.frontend_port}/api/slow"],
)
thread.start()
request_threads.append(thread)
for i in range(3):
thread = threading.Thread(
target=make_test_request,
args=[f"http://127.0.0.1:{config.frontend_port}/web/slow"],
)
thread.start()
request_threads.append(thread)
# Get actual stats
async def two_servers_up():
stats = await api.get_haproxy_stats()
return stats.active_servers == 2
await async_wait_for_condition(
two_servers_up, timeout=10, retry_interval_ms=200
)
async def wait_for_running():
return await api.is_running()
await async_wait_for_condition(
wait_for_running, timeout=10, retry_interval_ms=200
)
all_stats = await api.get_all_stats()
haproxy_stats = await api.get_haproxy_stats()
# Assert against the expected stub with exact values
assert (
len(all_stats) == 2
), f"Should have exactly 2 backends, got {len(all_stats)}"
assert (
haproxy_stats.total_backends == 2
), f"Should have exactly 2 backends, got {haproxy_stats.total_backends}"
assert (
haproxy_stats.total_servers == 2
), f"Should have exactly 2 servers, got {haproxy_stats.total_servers}"
assert (
haproxy_stats.active_servers == 2
), f"Should have exactly 2 active servers, got {haproxy_stats.active_servers}"
# Wait for request threads to complete
for thread in request_threads:
thread.join(timeout=1)
finally:
# Stop HAProxy
await api.stop()
# Cleanup backend servers
try:
backend_server1.should_exit = True
backend_server2.should_exit = True
backend_thread1.join(timeout=5) # Wait for the thread to finish
backend_thread2.join(timeout=5) # Wait for the thread to finish
except Exception as e:
print(f"Error cleaning up backend servers: {e}")
@pytest.mark.asyncio
async def test_update_and_reload(haproxy_api_cleanup):
with tempfile.TemporaryDirectory() as temp_dir:
config_file_path = os.path.join(temp_dir, "haproxy.cfg")
socket_path = os.path.join(temp_dir, "admin.sock")
backend = BackendConfig(
name="backend",
path_prefix="/",
app_name="backend_app",
servers=[ServerConfig(name="server", host="127.0.0.1", port=9999)],
)
config = HAProxyConfig(
http_options=HTTPOptions(
host="127.0.0.1",
port=8000,
),
stats_port=8404,
socket_path=socket_path,
)
api = HAProxyApi(
cfg=config,
backend_configs={backend.name: backend},
config_file_path=config_file_path,
)
await api.start()
haproxy_api_cleanup(api)
with open(config_file_path, "r") as f:
actual_content = f.read()
assert "backend_2" not in actual_content
original_proc = api._proc
original_pid = original_proc.pid
# Add another backend
backend2 = BackendConfig(
name="backend_2",
path_prefix="/",
app_name="backend_app_2",
servers=[ServerConfig(name="server", host="127.0.0.1", port=9999)],
)
api.set_backend_configs({backend.name: backend, backend2.name: backend2})
await api.reload()
assert api._proc is not None
assert api._proc.pid != original_pid
with open(config_file_path, "r") as f:
actual_content = f.read()
assert "backend_2" in actual_content
wait_for_condition(
lambda: not process_exists(original_pid),
timeout=5,
retry_interval_ms=100,
)
@pytest.mark.asyncio
async def test_haproxy_start_should_throw_error_when_already_running(
haproxy_api_cleanup,
):
"""Test that HAProxy throws an error when trying to start on an already-used port (SO_REUSEPORT disabled)."""
with tempfile.TemporaryDirectory() as temp_dir:
config_file_path = os.path.join(temp_dir, "haproxy.cfg")
socket_path = os.path.join(temp_dir, "admin.sock")
config = HAProxyConfig(
http_options=HTTPOptions(
host="127.0.0.1",
port=8000,
),
stats_port=8404,
socket_path=socket_path,
enable_so_reuseport=False, # Disable SO_REUSEPORT
)
api = HAProxyApi(cfg=config, config_file_path=config_file_path)
haproxy_api_cleanup(api)
# Start HAProxy with SO_REUSEPORT disabled
await api.start()
assert api._proc is not None, "HAProxy process should be running"
first_pid = api._proc.pid
# Verify we can't start another instance on the same port (SO_REUSEPORT disabled)
config2 = HAProxyConfig(
http_options=HTTPOptions(
host="127.0.0.1",
port=config.frontend_port, # Same port
),
stats_port=8404,
socket_path=os.path.join(temp_dir, "admin2.sock"),
enable_so_reuseport=False, # Disable SO_REUSEPORT
)
api2 = HAProxyApi(
cfg=config2, config_file_path=os.path.join(temp_dir, "haproxy2.cfg")
)
# This should fail because SO_REUSEPORT is disabled
with pytest.raises(RuntimeError, match="(Address already in use)"):
await api2.start()
# Cleanup first instance
await api.stop()
assert not process_exists(first_pid), "HAProxy process should be stopped"
@pytest.mark.asyncio
async def test_toggle_health_checks(haproxy_api_cleanup):
"""Test that disable()/enable() toggle HAProxy health checks end-to-end."""
with tempfile.TemporaryDirectory() as temp_dir:
config_file_path = os.path.join(temp_dir, "haproxy.cfg")
socket_path = os.path.join(temp_dir, "admin.sock")
backend = BackendConfig(
name="backend",
path_prefix="/",
app_name="backend_app",
servers=[ServerConfig(name="server", host="127.0.0.1", port=9999)],
)
config = HAProxyConfig(
http_options=HTTPOptions(
host="127.0.0.1",
port=8000,
),
stats_port=8404,
socket_path=socket_path,
inject_process_id_header=True,
has_received_routes=True,
has_received_servers=True,
)
# Start a real backend server so HAProxy can mark the server UP
backend_server, backend_thread = create_test_backend_server(9999)
try:
api = HAProxyApi(
cfg=config,
backend_configs={backend.name: backend},
config_file_path=config_file_path,
)
await api.start()
haproxy_api_cleanup(api)
# Verify HAProxy is running
assert api._is_running(), "HAProxy should be running"
# Health requires servers; wait until health passes
def health_ok():
resp = requests.get(
f"http://127.0.0.1:{config.frontend_port}{config.health_check_endpoint}",
timeout=5,
)
return resp.status_code == 200
wait_for_condition(health_ok, timeout=10)
# Verify a config file contains health check enabled
with open(api.config_file_path, "r") as f:
config_content = f.read()
assert (
"http-request return status 200" in config_content
), "Health checks should be enabled in config"
# Disable health checks
await api.disable()
# Verify HAProxy is still running after calling disable()
assert api._is_running(), "HAProxy should still be running after disable"
# Config should now deny the health endpoint
with open(api.config_file_path, "r") as f:
config_content = f.read()
assert (
"http-request return status 503" in config_content
), "Health checks should be disabled in config"
def health_check_condition(status_code: int):
# Test health check endpoint now fails
health_response = requests.get(
f"http://127.0.0.1:{config.frontend_port}{config.health_check_endpoint}",
timeout=5,
)
return health_response.status_code == status_code
wait_for_condition(health_check_condition, timeout=2, status_code=503)
# Re-enable health checks
await api.enable()
# Config should contain the 200 response again
with open(api.config_file_path, "r") as f:
config_content = f.read()
assert (
"http-request return status 200" in config_content
), "Health checks should be re-enabled in config"
wait_for_condition(health_check_condition, timeout=5, status_code=200)
finally:
backend_server.should_exit = True
backend_thread.join(timeout=5)
@pytest.mark.asyncio
async def test_health_endpoint_or_logic_multiple_backends(haproxy_api_cleanup):
"""Test that the health endpoint returns 200 if ANY backend has at least one server UP (OR logic)."""
with tempfile.TemporaryDirectory() as temp_dir:
config_file_path = os.path.join(temp_dir, "haproxy.cfg")
socket_path = os.path.join(temp_dir, "admin.sock")
backend1_port = 9996
backend2_port = 9997
config = HAProxyConfig(
http_options=HTTPOptions(
host="127.0.0.1",
port=8000,
),
stats_port=8404,
socket_path=socket_path,
has_received_routes=True,
has_received_servers=True,
)
backend1 = BackendConfig(
name="backend1",
path_prefix="/api1",
servers=[
ServerConfig(name="server1", host="127.0.0.1", port=backend1_port)
],
health_check_fall=1,
health_check_rise=1,
health_check_inter="1s",
)
backend2 = BackendConfig(
name="backend2",
path_prefix="/api2",
servers=[
ServerConfig(name="server2", host="127.0.0.1", port=backend2_port)
],
health_check_fall=1,
health_check_rise=1,
health_check_inter="1s",
)
backend1_server, backend1_thread = create_test_backend_server(backend1_port)
backend2_server, backend2_thread = create_test_backend_server(backend2_port)
try:
api = HAProxyApi(
cfg=config,
backend_configs={backend1.name: backend1, backend2.name: backend2},
config_file_path=config_file_path,
)
await api.start()
haproxy_api_cleanup(api)
# Wait for health check to pass (both servers are UP)
def health_ok():
resp = requests.get(
f"http://127.0.0.1:{config.frontend_port}{config.health_check_endpoint}",
timeout=5,
)
return resp.status_code == 200
wait_for_condition(health_ok, timeout=10, retry_interval_ms=200)
# Verify health check returns 200 when both servers are UP
health_response = requests.get(
f"http://127.0.0.1:{config.frontend_port}{config.health_check_endpoint}",
timeout=5,
)
assert (
health_response.status_code == 200
), "Health check should return 200 when both servers are UP"
assert b"success" in health_response.content
# Stop backend1 server
backend1_server.should_exit = True
backend1_thread.join(timeout=5)
# Wait a bit for HAProxy to detect backend1 is down
await asyncio.sleep(2)
# Verify health check STILL returns 200 (backend2 is still UP - OR logic)
health_response = requests.get(
f"http://127.0.0.1:{config.frontend_port}{config.health_check_endpoint}",
timeout=5,
)
assert (
health_response.status_code == 200
), "Health check should return 200 when at least one backend (backend2) is UP (OR logic)"
assert b"success" in health_response.content
# Stop backend2 server as well
backend2_server.should_exit = True
backend2_thread.join(timeout=5)
# Wait for health check to fail (both servers are DOWN)
def health_fails():
resp = requests.get(
f"http://127.0.0.1:{config.frontend_port}{config.health_check_endpoint}",
timeout=5,
)
return resp.status_code == 503
wait_for_condition(health_fails, timeout=10, retry_interval_ms=200)
# Verify health check returns 503 when ALL servers are DOWN
health_response = requests.get(
f"http://127.0.0.1:{config.frontend_port}{config.health_check_endpoint}",
timeout=5,
)
assert (
health_response.status_code == 503
), "Health check should return 503 when all servers are DOWN"
assert b"Service Unavailable" in health_response.content
await api.stop()
finally:
# Cleanup
try:
if not backend1_server.should_exit:
backend1_server.should_exit = True
backend1_thread.join(timeout=5)
except Exception:
pass
try:
if not backend2_server.should_exit:
backend2_server.should_exit = True
backend2_thread.join(timeout=5)
except Exception:
pass
@pytest.mark.asyncio
async def test_errorfile_creation_and_config(haproxy_api_cleanup):
"""Test that the errorfile is created and configured correctly for both 502 and 504."""
with tempfile.TemporaryDirectory() as temp_dir:
config_file_path = os.path.join(temp_dir, "haproxy.cfg")
socket_path = os.path.join(temp_dir, "admin.sock")
# Launch a simple backend server with /fast endpoint
backend_port = 9107
backend_server, backend_thread = create_test_backend_server(backend_port)
# Configure HAProxy with one backend under root ('/') so upstream sees '/fast'
config = HAProxyConfig(
http_options=HTTPOptions(
host="127.0.0.1",
port=8000,
keep_alive_timeout_s=58,
),
stats_port=8404,
socket_path=socket_path,
)
api = HAProxyApi(cfg=config, config_file_path=config_file_path)
haproxy_api_cleanup(api)
# Verify the error file was created during initialization
expected_error_file_path = os.path.join(temp_dir, "500.http")
assert os.path.exists(
expected_error_file_path
), "Error file 500.http should be created"
assert (
api.cfg.error_file_path == expected_error_file_path
), "Error file path should be set in config"
# Verify the error file content
with open(expected_error_file_path, "r") as ef:
error_content = ef.read()
assert (
"HTTP/1.1 500 Internal Server Error" in error_content
), "Error file should contain 500 status"
assert (
"Content-Type: text/plain" in error_content
), "Error file should contain content-type header"
assert (
"Internal Server Error" in error_content
), "Error file should contain error message"
# Start HAProxy and verify config contains errorfile directives
await api.start()
# Verify config file contains errorfile directives for both 502 and 504 pointing to the same file
with open(config_file_path, "r") as f:
config_content = f.read()
assert (
f"errorfile 502 {expected_error_file_path}" in config_content
), "HAProxy config should contain 502 errorfile directive"
assert (
f"errorfile 504 {expected_error_file_path}" in config_content
), "HAProxy config should contain 504 errorfile directive"
await api.stop()
backend = BackendConfig(
name="app_backend",
path_prefix="/",
app_name="app",
servers=[ServerConfig(name="server1", host="127.0.0.1", port=backend_port)],
timeout_http_keep_alive_s=58,
)
api = HAProxyApi(
cfg=config,
backend_configs={backend.name: backend},
config_file_path=config_file_path,
)
haproxy_api_cleanup(api)
try:
await api.start()
# Ensure HAProxy is up (stats endpoint reachable)
wait_for_condition(
lambda: check_haproxy_ready(config.stats_port),
timeout=10,
retry_interval_ms=100,
)
# Route exists -> expect 200
r = requests.get("http://127.0.0.1:8000/fast", timeout=5)
assert r.status_code == 200
# Remove backend (no targets for /app) and reload
api.set_backend_configs({})
await api.reload()
# After removal, route should fall back to default backend -> 404
def get_status():
resp = requests.get("http://127.0.0.1:8000/fast", timeout=5)
return resp.status_code
# Allow a brief window for reload to take effect
wait_for_condition(
lambda: get_status() == 404, timeout=5, retry_interval_ms=100
)
finally:
try:
await api.stop()
except Exception:
pass
try:
backend_server.should_exit = True
backend_thread.join(timeout=5)
except Exception:
pass
@pytest.mark.asyncio
async def test_routes_endpoint_returns_backends_and_respects_health(
haproxy_api_cleanup,
):
with tempfile.TemporaryDirectory() as temp_dir:
config_file_path = os.path.join(temp_dir, "haproxy.cfg")
socket_path = os.path.join(temp_dir, "admin.sock")
# Start two backend servers; health endpoint exists at '/-/healthz'.
backend_port1 = 9910
backend_port2 = 9911
backend_server1, backend_thread1 = create_test_backend_server(backend_port1)
backend_server2, backend_thread2 = create_test_backend_server(backend_port2)
# Configure HAProxy with two prefixed backends
config = HAProxyConfig(
http_options=HTTPOptions(
host="127.0.0.1",
port=8013,
keep_alive_timeout_s=58,
),
stats_port=8413,
socket_path=socket_path,
)
backend_api = BackendConfig(
name="api_backend",
path_prefix="/api",
app_name="api_app",
servers=[
ServerConfig(name="server1", host="127.0.0.1", port=backend_port1)
],
timeout_http_keep_alive_s=58,
)
backend_web = BackendConfig(
name="web_backend",
path_prefix="/web",
app_name="web_app",
servers=[
ServerConfig(name="server2", host="127.0.0.1", port=backend_port2)
],
timeout_http_keep_alive_s=58,
)
api = HAProxyApi(
cfg=config,
backend_configs={
backend_api.name: backend_api,
backend_web.name: backend_web,
},
config_file_path=config_file_path,
)
haproxy_api_cleanup(api)
try:
await api.start()
# Wait for HAProxy to be ready
wait_for_condition(
lambda: check_haproxy_ready(config.stats_port),
timeout=10,
retry_interval_ms=100,
)
# Helper to get fresh routes response (avoids connection reuse)
def get_routes():
with requests.Session() as session:
return session.get("http://127.0.0.1:8013/-/routes", timeout=1)
# Initial state: no routes
r = requests.get("http://127.0.0.1:8013/-/routes", timeout=5)
assert r.status_code == 503
assert r.headers.get("content-type", "").startswith("text/plain")
assert r.text == "Route table is not populated yet."
# Set has_received_routes but not has_received_servers -> should show "No replicas available"
api.cfg.has_received_routes = True
api.cfg.has_received_servers = False
await api.reload()
get_routes().text == "No replicas are available yet.",
r = get_routes()
assert r.status_code == 503
assert r.headers.get("content-type", "").startswith("text/plain")
# Set both flags -> should show routes JSON
api.cfg.has_received_routes = True
api.cfg.has_received_servers = True
await api.reload()
# Reload is not synchronous, so we need to wait for the config to be applied
def check_json_routes():
r = get_routes()
return r.status_code == 200 and r.headers.get(
"content-type", ""
).startswith("application/json")
wait_for_condition(check_json_routes, timeout=5, retry_interval_ms=50)
r = get_routes()
data = r.json()
assert data == {"/api": "api_app", "/web": "web_app"}
# Disable (simulate draining/unhealthy) -> wait for healthz to flip, then routes 503
await api.disable()
def health_is(code: int):
resp = requests.get("http://127.0.0.1:8013/-/healthz", timeout=5)
return resp.status_code == code
wait_for_condition(health_is, timeout=5, retry_interval_ms=100, code=503)
r = requests.get("http://127.0.0.1:8013/-/routes", timeout=5)
assert r.status_code == 503
assert r.headers.get("content-type", "").startswith("text/plain")
assert r.text == "This node is being drained."
# Re-enable -> wait for healthz to flip back, then routes 200
await api.enable()
wait_for_condition(health_is, timeout=5, retry_interval_ms=100, code=200)
r = requests.get("http://127.0.0.1:8013/-/routes", timeout=5)
assert r.status_code == 200
finally:
try:
await api.stop()
except Exception:
pass
@pytest.mark.asyncio
async def test_routes_endpoint_no_routes(haproxy_api_cleanup):
"""When no backends are configured, /-/routes should return {} and respect health gating."""
with tempfile.TemporaryDirectory() as temp_dir:
config_file_path = os.path.join(temp_dir, "haproxy.cfg")
socket_path = os.path.join(temp_dir, "admin.sock")
config = HAProxyConfig(
http_options=HTTPOptions(
host="127.0.0.1",
port=8014,
keep_alive_timeout_s=58,
),
stats_port=8414,
socket_path=socket_path,
)
api = HAProxyApi(
cfg=config,
backend_configs={},
config_file_path=config_file_path,
)
haproxy_api_cleanup(api)
try:
await api.start()
# Wait for HAProxy to be ready
wait_for_condition(
lambda: check_haproxy_ready(config.stats_port),
timeout=10,
retry_interval_ms=100,
)
# Healthy -> expect 200 and empty JSON
r = requests.get(
f"http://127.0.0.1:{config.frontend_port}/-/routes", timeout=5
)
assert r.status_code == 503
assert r.headers.get("content-type", "").startswith("text/plain")
assert r.text == "Route table is not populated yet."
# Disable -> wait for healthz to flip, then expect 503 with draining message
await api.disable()
def health_is(code: int):
resp = requests.get(
f"http://127.0.0.1:{config.frontend_port}/-/healthz", timeout=5
)
return resp.status_code == code
wait_for_condition(health_is, timeout=5, retry_interval_ms=100, code=503)
# Wait for routes endpoint to also return draining message (graceful reload might take a moment)
def routes_is_draining():
try:
resp = requests.get(
f"http://127.0.0.1:{config.frontend_port}/-/routes", timeout=5
)
return (
resp.status_code == 503
and resp.text == "This node is being drained."
)
except Exception:
return False
wait_for_condition(routes_is_draining, timeout=5, retry_interval_ms=100)
r = requests.get(
f"http://127.0.0.1:{config.frontend_port}/-/routes", timeout=5
)
assert r.status_code == 503
assert r.headers.get("content-type", "").startswith("text/plain")
assert r.text == "This node is being drained."
# Re-enable -> wait for healthz back to 200, then routes 200
await api.enable()
wait_for_condition(health_is, timeout=5, retry_interval_ms=100, code=503)
def routes_is_healthy():
try:
r = requests.get(
f"http://127.0.0.1:{config.frontend_port}/-/routes", timeout=5
)
return (
r.status_code == 503
and r.text == "Route table is not populated yet."
)
except Exception:
return False
wait_for_condition(routes_is_healthy, timeout=5, retry_interval_ms=100)
finally:
try:
await api.stop()
except Exception:
pass
@pytest.mark.asyncio
async def test_404_error_message(haproxy_api_cleanup):
"""Test that HAProxy returns the correct 404 error message for non-existent paths."""
with tempfile.TemporaryDirectory() as temp_dir:
config_file_path = os.path.join(temp_dir, "haproxy.cfg")
socket_path = os.path.join(temp_dir, "admin.sock")
# Create a backend that serves /api
backend = BackendConfig(
name="api_backend",
path_prefix="/api",
servers=[], # No servers, but we're testing the 404 path anyway
)
config = HAProxyConfig(
http_options=HTTPOptions(
host="127.0.0.1",
port=8000,
),
stats_port=8404,
socket_path=socket_path,
)
api = HAProxyApi(
cfg=config,
backend_configs={"api_backend": backend},
config_file_path=config_file_path,
)
await api.start()
haproxy_api_cleanup(api)
# Verify HAProxy is running
assert api._is_running(), "HAProxy should be running"
# Wait for HAProxy to be ready
wait_for_condition(
lambda: check_haproxy_ready(config.stats_port),
timeout=10,
retry_interval_ms=500,
)
# Request a non-existent path and verify the error message
response = requests.get(
f"http://127.0.0.1:{config.frontend_port}/nonexistent",
timeout=5,
)
assert response.status_code == 404, "Should return 404 for non-existent path"
assert (
"Path '/nonexistent' not found" in response.text
), f"Error message should contain path. Got: {response.text}"
assert (
"Ping http://.../-/routes for available routes" in response.text
), f"Error message should contain routes hint. Got: {response.text}"
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_haproxy_api.py",
"license": "Apache License 2.0",
"lines": 1350,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/tests/test_metrics_haproxy.py | """
HAProxy metrics tests for Ray Serve.
These tests verify that Ray Serve metrics work correctly when HAProxy is enabled
as a replacement for the default Serve HTTP proxy.
Key differences from the default Serve proxy:
1. When HAProxy is enabled, RAY_SERVE_ENABLE_DIRECT_INGRESS is automatically set.
2. HTTP proxy metrics (serve_num_http_requests, etc.) are emitted from replicas when
they receive direct ingress requests from HAProxy.
3. 404 errors for non-existent routes are handled by HAProxy itself (not forwarded to
replicas), so these won't generate Serve metrics. Tests that need to verify 404
metrics must deploy an application that returns 404s.
4. HAProxy has its own metrics exposed on a separate port (default 9101), but these
tests focus on Serve metrics exposed via the Ray metrics port (9999).
"""
import http
import json
import sys
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Dict, Optional
import httpx
import pytest
from fastapi import FastAPI
from starlette.requests import Request
from starlette.responses import PlainTextResponse
import ray
from ray import serve
from ray._common.network_utils import parse_address
from ray._common.test_utils import (
SignalActor,
fetch_prometheus_metrics,
wait_for_condition,
)
from ray._common.utils import reset_ray_address
from ray.serve import HTTPOptions
from ray.serve._private.long_poll import LongPollHost, UpdatedObject
from ray.serve._private.test_utils import get_application_url, get_metric_dictionaries
from ray.serve._private.utils import block_until_http_ready
from ray.serve.tests.conftest import TEST_METRICS_EXPORT_PORT
from ray.util.state import list_actors
@pytest.fixture
def metrics_start_shutdown(request):
"""Fixture provides a fresh Ray cluster to prevent metrics state sharing."""
param = request.param if hasattr(request, "param") else None
request_timeout_s = param if param else None
ray.init(
_metrics_export_port=TEST_METRICS_EXPORT_PORT,
_system_config={
"metrics_report_interval_ms": 100,
"task_retry_delay_ms": 50,
},
)
yield serve.start(
http_options=HTTPOptions(
host="0.0.0.0",
request_timeout_s=request_timeout_s,
),
)
serve.shutdown()
ray.shutdown()
reset_ray_address()
def extract_tags(line: str) -> Dict[str, str]:
"""Extracts any tags from the metrics line."""
try:
tags_string = line.replace("{", "}").split("}")[1]
except IndexError:
# No tags were found in this line.
return {}
detected_tags = {}
for tag_pair in tags_string.split(","):
sanitized_pair = tag_pair.replace('"', "")
tag, value = sanitized_pair.split("=")
detected_tags[tag] = value
return detected_tags
def contains_tags(line: str, expected_tags: Optional[Dict[str, str]] = None) -> bool:
"""Checks if the metrics line contains the expected tags.
Does nothing if expected_tags is None.
"""
if expected_tags is not None:
detected_tags = extract_tags(line)
# Check if expected_tags is a subset of detected_tags
return expected_tags.items() <= detected_tags.items()
else:
return True
def get_metric_float(
metric: str, expected_tags: Optional[Dict[str, str]] = None
) -> float:
"""Gets the float value of metric.
If tags is specified, searched for metric with matching tags.
Returns -1 if the metric isn't available.
"""
metrics = httpx.get("http://127.0.0.1:9999").text
metric_value = -1
for line in metrics.split("\n"):
if metric in line and contains_tags(line, expected_tags):
metric_value = line.split(" ")[-1]
return metric_value
def check_metric_float_eq(
metric: str, expected: float, expected_tags: Optional[Dict[str, str]] = None
) -> bool:
metric_value = get_metric_float(metric, expected_tags)
assert float(metric_value) == expected
return True
def check_sum_metric_eq(
metric_name: str,
expected: float,
tags: Optional[Dict[str, str]] = None,
) -> bool:
if tags is None:
tags = {}
metrics = fetch_prometheus_metrics(["localhost:9999"])
metrics = {k: v for k, v in metrics.items() if "ray_serve_" in k}
metric_samples = metrics.get(metric_name, None)
if metric_samples is None:
metric_sum = 0
else:
metric_samples = [
sample for sample in metric_samples if tags.items() <= sample.labels.items()
]
metric_sum = sum(sample.value for sample in metric_samples)
# Check the metrics sum to the expected number
assert float(metric_sum) == float(expected), (
f"The following metrics don't sum to {expected}: "
f"{json.dumps(metric_samples, indent=4)}\n."
f"All metrics: {json.dumps(metrics, indent=4)}"
)
# # For debugging
if metric_samples:
print(f"The following sum to {expected} for '{metric_name}' and tags {tags}:")
for sample in metric_samples:
print(sample)
return True
def test_serve_metrics_for_successful_connection(metrics_start_shutdown):
@serve.deployment(name="metrics")
async def f(request):
return "hello"
app_name = "app1"
handle = serve.run(target=f.bind(), name=app_name)
http_url = get_application_url(app_name=app_name)
# send 10 concurrent requests
ray.get([block_until_http_ready.remote(http_url) for _ in range(10)])
[handle.remote(http_url) for _ in range(10)]
def verify_metrics(do_assert=False):
try:
resp = httpx.get("http://127.0.0.1:9999").text
# Requests will fail if we are crashing the controller
except httpx.HTTPError:
return False
# NOTE: These metrics should be documented at
# https://docs.ray.io/en/latest/serve/monitoring.html#metrics
# Any updates to here should be reflected there too.
expected_metrics = [
# counter
"ray_serve_num_router_requests",
"ray_serve_num_http_requests",
"ray_serve_deployment_queued_queries",
"ray_serve_deployment_request_counter",
"ray_serve_deployment_replica_starts",
# histogram
"ray_serve_deployment_processing_latency_ms_bucket",
"ray_serve_deployment_processing_latency_ms_count",
"ray_serve_deployment_processing_latency_ms_sum",
"ray_serve_deployment_processing_latency_ms",
# gauge
"ray_serve_replica_processing_queries",
"ray_serve_deployment_replica_healthy",
# handle
"ray_serve_handle_request_counter",
]
for metric in expected_metrics:
# For the final error round
if do_assert:
assert metric in resp
# For the wait_for_condition
else:
if metric not in resp:
return False
return True
try:
wait_for_condition(verify_metrics, retry_interval_ms=500)
except RuntimeError:
verify_metrics(do_assert=True)
def test_http_replica_gauge_metrics(metrics_start_shutdown):
"""Test http replica gauge metrics"""
signal = SignalActor.remote()
@serve.deployment(graceful_shutdown_timeout_s=0.0001)
class A:
async def __call__(self):
await signal.wait.remote()
handle = serve.run(A.bind(), name="app1")
_ = handle.remote()
processing_requests = get_metric_dictionaries(
"ray_serve_replica_processing_queries", timeout=5
)
assert len(processing_requests) == 1
assert processing_requests[0]["deployment"] == "A"
assert processing_requests[0]["application"] == "app1"
print("ray_serve_replica_processing_queries exists.")
def ensure_request_processing():
resp = httpx.get("http://127.0.0.1:9999").text
resp = resp.split("\n")
for metrics in resp:
if "# HELP" in metrics or "# TYPE" in metrics:
continue
if "ray_serve_replica_processing_queries" in metrics:
assert "1.0" in metrics
return True
wait_for_condition(ensure_request_processing, timeout=5)
def test_proxy_metrics_not_found(metrics_start_shutdown):
# NOTE: When using HAProxy, 404 errors for non-existent routes are handled
# by HAProxy itself (not forwarded to replicas), so we need to deploy an
# application and test 404s within that application's context.
# These metrics should be documented at
# https://docs.ray.io/en/latest/serve/monitoring.html#metrics
# Any updates here should be reflected there too.
expected_metrics = [
"ray_serve_num_http_requests",
"ray_serve_num_http_error_requests_total",
"ray_serve_num_deployment_http_error_requests",
"ray_serve_http_request_latency_ms",
]
app = FastAPI()
@serve.deployment(name="A")
@serve.ingress(app)
class A:
@app.get("/existing-path") # Only this path is defined
async def handler(self, request: Request):
return {"message": "success"}
app_name = "app"
serve.run(A.bind(), name=app_name, route_prefix="/A")
def verify_metrics(_expected_metrics, do_assert=False):
try:
resp = httpx.get("http://127.0.0.1:9999").text
# Requests will fail if we are crashing the controller
except httpx.HTTPError:
return False
for metric in _expected_metrics:
if do_assert:
assert metric in resp
if metric not in resp:
return False
return True
# Trigger HTTP 404 error via the deployed application
httpx.get("http://127.0.0.1:8000/A/nonexistent")
httpx.get("http://127.0.0.1:8000/A/nonexistent")
# Ensure all expected metrics are present.
try:
wait_for_condition(
verify_metrics,
retry_interval_ms=1000,
timeout=10,
expected_metrics=expected_metrics,
)
except RuntimeError:
verify_metrics(expected_metrics, True)
def verify_error_count(do_assert=False):
resp = httpx.get("http://127.0.0.1:9999").text
resp = resp.split("\n")
http_error_count = 0
deployment_404_count = 0
for metrics in resp:
if "# HELP" in metrics or "# TYPE" in metrics:
continue
# Skip health check metrics
if "/-/healthz" in metrics:
continue
if (
"ray_serve_num_http_error_requests_total" in metrics
and 'route="/A"' in metrics
):
# Accumulate error counts from route "/A"
http_error_count += int(float(metrics.split(" ")[-1]))
elif (
"ray_serve_num_deployment_http_error_requests_total" in metrics
and 'route="/A"' in metrics
and 'error_code="404"' in metrics
):
# Count deployment 404 errors
deployment_404_count += int(float(metrics.split(" ")[-1]))
# We expect 2 requests total, both should be 404 errors from the deployment
if do_assert:
assert (
http_error_count == 2
), f"Expected at least 2 HTTP errors, got {http_error_count}"
assert (
deployment_404_count == 2
), f"Expected 2 deployment 404 errors, got {deployment_404_count}"
return http_error_count >= 2 and deployment_404_count == 2
# There is a latency in updating the counter
try:
wait_for_condition(verify_error_count, retry_interval_ms=1000, timeout=20)
except RuntimeError:
verify_error_count(do_assert=True)
def test_proxy_metrics_internal_error(metrics_start_shutdown):
# NOTE: When using HAProxy, we need the replica to stay alive to emit metrics.
# Instead of crashing the actor (which prevents metric emission), we return
# a 500 error explicitly.
# These metrics should be documented at
# https://docs.ray.io/en/latest/serve/monitoring.html#metrics
# Any updates here should be reflected there too.
expected_metrics = [
"ray_serve_num_http_requests",
"ray_serve_num_http_error_requests_total",
"ray_serve_num_deployment_http_error_requests",
"ray_serve_http_request_latency_ms",
]
def verify_metrics(_expected_metrics, do_assert=False):
try:
resp = httpx.get("http://127.0.0.1:9999", timeout=None).text
# Requests will fail if we are crashing the controller
except httpx.HTTPError:
return False
for metric in _expected_metrics:
if do_assert:
assert metric in resp
if metric not in resp:
return False
return True
@serve.deployment(name="A")
class A:
async def __init__(self):
pass
async def __call__(self, request: Request):
# Return 500 Internal Server Error
return PlainTextResponse("Internal Server Error", status_code=500)
app_name = "app"
serve.run(A.bind(), name=app_name, route_prefix="/")
httpx.get("http://localhost:8000/", timeout=None)
httpx.get("http://localhost:8000/", timeout=None)
# Ensure all expected metrics are present.
try:
wait_for_condition(
verify_metrics,
retry_interval_ms=1000,
timeout=10,
expected_metrics=expected_metrics,
)
except RuntimeError:
verify_metrics(expected_metrics, True)
def verify_error_count(do_assert=False):
resp = httpx.get("http://127.0.0.1:9999", timeout=None).text
resp = resp.split("\n")
for metrics in resp:
if "# HELP" in metrics or "# TYPE" in metrics:
continue
if "ray_serve_num_http_error_requests_total" in metrics:
# route "/" should have error count 2 (HTTP 500)
if do_assert:
assert "2.0" in metrics
if "2.0" not in metrics:
return False
elif "ray_serve_num_deployment_http_error_requests" in metrics:
# deployment A should have error count 2 (HTTP 500)
if do_assert:
assert 'deployment="A"' in metrics and "2.0" in metrics
if 'deployment="A"' not in metrics or "2.0" not in metrics:
return False
return True
# There is a latency in updating the counter
try:
wait_for_condition(verify_error_count, retry_interval_ms=1000, timeout=10)
except RuntimeError:
verify_error_count(do_assert=True)
def test_proxy_metrics_fields_not_found(metrics_start_shutdown):
"""Tests the proxy metrics' fields' behavior for not found.
Note: When using HAProxy, we need to deploy an application that returns 404,
as HAProxy handles non-existent route 404s itself without forwarding to replicas.
"""
# These metrics should be documented at
# https://docs.ray.io/en/latest/serve/monitoring.html#metrics
# Any updates here should be reflected there too.
expected_metrics = [
"ray_serve_num_http_requests",
"ray_serve_num_http_error_requests_total",
"ray_serve_num_deployment_http_error_requests",
"ray_serve_http_request_latency_ms",
]
app = FastAPI()
@serve.deployment(name="test_app")
@serve.ingress(app)
class NotFoundApp:
@app.get("/existing-path") # Only this path is defined
async def handler(self, request: Request):
return {"message": "success"}
app_name = "app"
serve.run(NotFoundApp.bind(), name=app_name, route_prefix="/test")
def verify_metrics(_expected_metrics, do_assert=False):
try:
resp = httpx.get("http://127.0.0.1:9999").text
# Requests will fail if we are crashing the controller
except httpx.HTTPError:
return False
for metric in _expected_metrics:
if do_assert:
assert metric in resp
if metric not in resp:
return False
return True
# Trigger HTTP 404 error via the deployed application
httpx.get("http://127.0.0.1:8000/test/nonexistent")
httpx.get("http://127.0.0.1:8000/test/nonexistent")
# Ensure all expected metrics are present.
try:
wait_for_condition(
verify_metrics,
retry_interval_ms=1000,
timeout=10,
expected_metrics=expected_metrics,
)
except RuntimeError:
verify_metrics(expected_metrics, True)
def verify_error_count(do_assert=False):
resp = httpx.get("http://127.0.0.1:9999").text
resp = resp.split("\n")
http_error_count = 0
deployment_404_count = 0
for metrics in resp:
if "# HELP" in metrics or "# TYPE" in metrics:
continue
# Skip health check metrics
if "/-/healthz" in metrics:
continue
if (
"ray_serve_num_http_error_requests_total" in metrics
and 'route="/test"' in metrics
):
# Accumulate error counts from route "/test"
http_error_count += int(float(metrics.split(" ")[-1]))
elif (
"ray_serve_num_deployment_http_error_requests_total" in metrics
and 'route="/test"' in metrics
and 'error_code="404"' in metrics
):
# Count deployment 404 errors
deployment_404_count += int(float(metrics.split(" ")[-1]))
# We expect 2 requests total, both should be 404 errors from the deployment
if do_assert:
assert (
http_error_count == 2
), f"Expected at least 2 HTTP errors, got {http_error_count}"
assert (
deployment_404_count == 2
), f"Expected 2 deployment 404 errors, got {deployment_404_count}"
return http_error_count >= 2 and deployment_404_count == 2
# There is a latency in updating the counter
try:
wait_for_condition(verify_error_count, retry_interval_ms=1000, timeout=20)
except RuntimeError:
verify_error_count(do_assert=True)
@pytest.mark.parametrize(
"metrics_start_shutdown",
[
1,
],
indirect=True,
)
def test_proxy_timeout_metrics(metrics_start_shutdown):
"""Test that HTTP timeout metrics are reported correctly."""
signal = SignalActor.remote()
@serve.deployment
async def return_status_code_with_timeout(request: Request):
await signal.wait.remote()
return
serve.run(
return_status_code_with_timeout.bind(),
route_prefix="/status_code_timeout",
name="status_code_timeout",
)
http_url = get_application_url("HTTP", app_name="status_code_timeout")
r = httpx.get(http_url)
assert r.status_code == 408
ray.get(signal.send.remote(clear=True))
num_errors = get_metric_dictionaries("ray_serve_num_http_error_requests_total")
assert len(num_errors) == 1
assert num_errors[0]["route"] == "/status_code_timeout"
assert num_errors[0]["error_code"] == "408"
assert num_errors[0]["method"] == "GET"
assert num_errors[0]["application"] == "status_code_timeout"
@pytest.mark.skipif(sys.platform == "win32", reason="Flaky on Windows")
def test_proxy_disconnect_http_metrics(metrics_start_shutdown):
"""Test that HTTP disconnect metrics are reported correctly."""
signal = SignalActor.remote()
@serve.deployment
class Disconnect:
async def __call__(self, request: Request):
await signal.wait.remote()
return
serve.run(
Disconnect.bind(),
route_prefix="/disconnect",
name="disconnect",
)
# Simulate an HTTP disconnect
http_url = get_application_url("HTTP", app_name="disconnect")
ip_port = http_url.replace("http://", "").split("/")[0] # remove the route prefix
ip, port = parse_address(ip_port)
conn = http.client.HTTPConnection(ip, int(port))
conn.request("GET", "/disconnect")
wait_for_condition(
lambda: ray.get(signal.cur_num_waiters.remote()) == 1, timeout=10
)
conn.close() # Forcefully close the connection
ray.get(signal.send.remote(clear=True))
num_errors = get_metric_dictionaries("ray_serve_num_http_error_requests_total")
assert len(num_errors) == 1
assert num_errors[0]["route"] == "/disconnect"
assert num_errors[0]["error_code"] == "499"
assert num_errors[0]["method"] == "GET"
assert num_errors[0]["application"] == "disconnect"
@pytest.mark.skipif(sys.platform == "win32", reason="Flaky on Windows")
def test_no_499_misclassification_after_successful_response(metrics_start_shutdown):
"""Reproduce the race where response is sent (body without more_body) but
response_finished stays False, then disconnect arrives and we incorrectly log 499.
convert_object_to_asgi_messages omits more_body in the final body chunk (valid
per ASGI spec). The fix treats omitted more_body as final so we don't
misclassify successful responses as 499 when client disconnects after response.
"""
@serve.deployment
async def fast_return(request: Request):
return "ok"
serve.run(
fast_return.bind(),
route_prefix="/race_test",
name="race_test",
)
http_url = get_application_url("HTTP", app_name="race_test")
def _request_then_close_immediately():
"""Send request, read 1 byte of response, then close. This creates the race:
server has sent full response (body without more_body) but client closes
before request_task exits. Without the fix, response_finished stays False
and we incorrectly log 499."""
with httpx.Client() as client:
with client.stream("GET", http_url) as response:
next(
response.iter_bytes(1)
) # Read 1 byte, then exit - connection closes
# Run many times to hit the race (disconnect arrives before request_task exits)
num_requests = 500
with ThreadPoolExecutor(max_workers=10) as executor:
futures = [
executor.submit(_request_then_close_immediately)
for _ in range(num_requests)
]
for f in as_completed(futures):
f.result()
# First assert all requests were processed
def check_request_count_and_no_499_errors():
check_sum_metric_eq(
"ray_serve_num_http_requests_total",
num_requests,
tags={"route": "/race_test"},
)
check_sum_metric_eq(
"ray_serve_num_http_error_requests_total",
0,
tags={"route": "/race_test", "error_code": "499"},
)
return True
wait_for_condition(check_request_count_and_no_499_errors, timeout=30)
def test_proxy_metrics_fields_internal_error(metrics_start_shutdown):
"""Tests the proxy metrics' fields' behavior for internal error."""
@serve.deployment()
def f(*args):
return 1 / 0
real_app_name = "app"
real_app_name2 = "app2"
serve.run(f.bind(), name=real_app_name, route_prefix="/real_route")
serve.run(f.bind(), name=real_app_name2, route_prefix="/real_route2")
# Deployment should generate divide-by-zero errors
correct_url = get_application_url("HTTP", real_app_name)
_ = httpx.get(correct_url).text
print("Sent requests to correct URL.")
num_deployment_errors = get_metric_dictionaries(
"ray_serve_num_deployment_http_error_requests_total"
)
assert len(num_deployment_errors) == 1
assert num_deployment_errors[0]["deployment"] == "f"
assert num_deployment_errors[0]["error_code"] == "500"
assert num_deployment_errors[0]["method"] == "GET"
assert num_deployment_errors[0]["application"] == "app"
print("ray_serve_num_deployment_http_error_requests working as expected.")
latency_metrics = get_metric_dictionaries("ray_serve_http_request_latency_ms_sum")
# Filter out health check metrics - HAProxy generates health checks to /-/healthz
latency_metrics = [m for m in latency_metrics if m["route"] != "/-/healthz"]
assert len(latency_metrics) == 1
assert latency_metrics[0]["method"] == "GET"
assert latency_metrics[0]["route"] == "/real_route"
assert latency_metrics[0]["application"] == "app"
assert latency_metrics[0]["status_code"] == "500"
print("ray_serve_http_request_latency_ms working as expected.")
@pytest.mark.skipif(sys.platform == "win32", reason="Flaky on Windows")
def test_proxy_metrics_http_status_code_is_error(metrics_start_shutdown):
"""Verify that 2xx and 3xx status codes aren't errors, others are."""
def check_request_count_metrics(
expected_error_count: int,
expected_success_count: int,
):
resp = httpx.get("http://127.0.0.1:9999").text
error_count = 0
success_count = 0
for line in resp.split("\n"):
# Skip health check metrics
if "/-/healthz" in line:
continue
if line.startswith("ray_serve_num_http_error_requests_total"):
error_count += int(float(line.split(" ")[-1]))
if line.startswith("ray_serve_num_http_requests_total"):
success_count += int(float(line.split(" ")[-1]))
assert error_count == expected_error_count
assert success_count == expected_success_count
return True
@serve.deployment
async def return_status_code(request: Request):
code = int((await request.body()).decode("utf-8"))
return PlainTextResponse("", status_code=code)
serve.run(return_status_code.bind())
http_url = get_application_url("HTTP")
# 200 is not an error.
r = httpx.request("GET", http_url, content=b"200")
assert r.status_code == 200
wait_for_condition(
check_request_count_metrics,
expected_error_count=0,
expected_success_count=1,
)
# 2xx is not an error.
r = httpx.request("GET", http_url, content=b"250")
assert r.status_code == 250
wait_for_condition(
check_request_count_metrics,
expected_error_count=0,
expected_success_count=2,
)
# 3xx is not an error.
r = httpx.request("GET", http_url, content=b"300")
assert r.status_code == 300
wait_for_condition(
check_request_count_metrics,
expected_error_count=0,
expected_success_count=3,
)
# 4xx is an error.
r = httpx.request("GET", http_url, content=b"400")
assert r.status_code == 400
wait_for_condition(
check_request_count_metrics,
expected_error_count=1,
expected_success_count=4,
)
# 5xx is an error.
r = httpx.request("GET", http_url, content=b"500")
assert r.status_code == 500
wait_for_condition(
check_request_count_metrics,
expected_error_count=2,
expected_success_count=5,
)
def test_replica_metrics_fields(metrics_start_shutdown):
"""Test replica metrics fields"""
@serve.deployment
def f():
return "hello"
@serve.deployment
def g():
return "world"
serve.run(f.bind(), name="app1", route_prefix="/f")
serve.run(g.bind(), name="app2", route_prefix="/g")
url_f = get_application_url("HTTP", "app1")
url_g = get_application_url("HTTP", "app2")
assert "hello" == httpx.post(url_f).text
assert "world" == httpx.post(url_g).text
wait_for_condition(
lambda: len(
get_metric_dictionaries("ray_serve_deployment_request_counter_total")
)
== 2,
timeout=40,
)
metrics = get_metric_dictionaries("ray_serve_deployment_request_counter_total")
assert len(metrics) == 2
expected_output = {
("/f", "f", "app1"),
("/g", "g", "app2"),
}
assert {
(
metric["route"],
metric["deployment"],
metric["application"],
)
for metric in metrics
} == expected_output
start_metrics = get_metric_dictionaries("ray_serve_deployment_replica_starts_total")
assert len(start_metrics) == 2
expected_output = {("f", "app1"), ("g", "app2")}
assert {
(start_metric["deployment"], start_metric["application"])
for start_metric in start_metrics
} == expected_output
# Latency metrics
wait_for_condition(
lambda: len(
get_metric_dictionaries("ray_serve_deployment_processing_latency_ms_count")
)
== 2,
timeout=40,
)
for metric_name in [
"ray_serve_deployment_processing_latency_ms_count",
"ray_serve_deployment_processing_latency_ms_sum",
]:
latency_metrics = get_metric_dictionaries(metric_name)
print(f"checking metric {metric_name}, {latency_metrics}")
assert len(latency_metrics) == 2
expected_output = {("f", "app1"), ("g", "app2")}
assert {
(latency_metric["deployment"], latency_metric["application"])
for latency_metric in latency_metrics
} == expected_output
wait_for_condition(
lambda: len(get_metric_dictionaries("ray_serve_replica_processing_queries"))
== 2
)
processing_queries = get_metric_dictionaries("ray_serve_replica_processing_queries")
expected_output = {("f", "app1"), ("g", "app2")}
assert {
(processing_query["deployment"], processing_query["application"])
for processing_query in processing_queries
} == expected_output
@serve.deployment
def h():
return 1 / 0
serve.run(h.bind(), name="app3", route_prefix="/h")
url_h = get_application_url("HTTP", "app3")
assert 500 == httpx.get(url_h).status_code
wait_for_condition(
lambda: len(get_metric_dictionaries("ray_serve_deployment_error_counter_total"))
== 1,
timeout=40,
)
err_requests = get_metric_dictionaries("ray_serve_deployment_error_counter_total")
assert len(err_requests) == 1
expected_output = ("/h", "h", "app3")
assert (
err_requests[0]["route"],
err_requests[0]["deployment"],
err_requests[0]["application"],
) == expected_output
wait_for_condition(
lambda: len(get_metric_dictionaries("ray_serve_deployment_replica_healthy"))
== 3,
timeout=40,
)
health_metrics = get_metric_dictionaries("ray_serve_deployment_replica_healthy")
expected_output = {
("f", "app1"),
("g", "app2"),
("h", "app3"),
}
assert {
(health_metric["deployment"], health_metric["application"])
for health_metric in health_metrics
} == expected_output
@pytest.mark.skipif(sys.platform == "win32", reason="Flaky on Windows")
def test_multiplexed_metrics(metrics_start_shutdown):
"""Tests multiplexed API corresponding metrics."""
@serve.deployment
class Model:
@serve.multiplexed(max_num_models_per_replica=2)
async def get_model(self, model_id: str):
return model_id
async def __call__(self, model_id: str):
await self.get_model(model_id)
return
handle = serve.run(Model.bind(), name="app", route_prefix="/app")
handle.remote("model1")
handle.remote("model2")
# Trigger model eviction.
handle.remote("model3")
expected_metrics = [
"ray_serve_multiplexed_model_load_latency_ms",
"ray_serve_multiplexed_model_unload_latency_ms",
"ray_serve_num_multiplexed_models",
"ray_serve_multiplexed_models_load_counter",
"ray_serve_multiplexed_models_unload_counter",
]
def verify_metrics():
try:
resp = httpx.get("http://127.0.0.1:9999").text
# Requests will fail if we are crashing the controller
except httpx.HTTPError:
return False
for metric in expected_metrics:
assert metric in resp
return True
wait_for_condition(
verify_metrics,
timeout=40,
retry_interval_ms=1000,
)
def test_long_poll_host_sends_counted(serve_instance):
"""Check that the transmissions by the long_poll are counted."""
host = ray.remote(LongPollHost).remote(
listen_for_change_request_timeout_s=(0.01, 0.01)
)
# Write a value.
ray.get(host.notify_changed.remote({"key_1": 999}))
object_ref = host.listen_for_change.remote({"key_1": -1})
# Check that the result's size is reported.
result_1: Dict[str, UpdatedObject] = ray.get(object_ref)
wait_for_condition(
check_metric_float_eq,
timeout=15,
metric="ray_serve_long_poll_host_transmission_counter",
expected=1,
expected_tags={"namespace_or_state": "key_1"},
)
# Write two new values.
ray.get(host.notify_changed.remote({"key_1": 1000}))
ray.get(host.notify_changed.remote({"key_2": 1000}))
object_ref = host.listen_for_change.remote(
{"key_1": result_1["key_1"].snapshot_id, "key_2": -1}
)
# Check that the new objects are transmitted.
result_2: Dict[str, UpdatedObject] = ray.get(object_ref)
wait_for_condition(
check_metric_float_eq,
timeout=15,
metric="ray_serve_long_poll_host_transmission_counter",
expected=1,
expected_tags={"namespace_or_state": "key_2"},
)
wait_for_condition(
check_metric_float_eq,
timeout=15,
metric="ray_serve_long_poll_host_transmission_counter",
expected=2,
expected_tags={"namespace_or_state": "key_1"},
)
# Check that a timeout result is counted.
object_ref = host.listen_for_change.remote({"key_2": result_2["key_2"].snapshot_id})
_ = ray.get(object_ref)
wait_for_condition(
check_metric_float_eq,
timeout=15,
metric="ray_serve_long_poll_host_transmission_counter",
expected=1,
expected_tags={"namespace_or_state": "TIMEOUT"},
)
def test_actor_summary(serve_instance):
@serve.deployment
def f():
pass
serve.run(f.bind(), name="app")
actors = list_actors(filters=[("state", "=", "ALIVE")])
class_names = {actor["class_name"] for actor in actors}
assert class_names.issuperset(
{"ServeController", "HAProxyManager", "ServeReplica:app:f"}
)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_metrics_haproxy.py",
"license": "Apache License 2.0",
"lines": 860,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/tests/unit/test_controller_haproxy.py | import pytest
from ray.serve._private.common import (
DeploymentID,
ReplicaID,
RequestProtocol,
RunningReplicaInfo,
)
from ray.serve.schema import (
Target,
TargetGroup,
)
from ray.serve.tests.unit.test_controller_direct_ingress import (
FakeApplicationStateManager,
FakeDeploymentStateManager,
FakeDirectIngressController,
FakeKVStore,
FakeLongPollHost,
FakeProxyStateManager,
)
# Test Controller that overrides methods and dependencies for HAProxy testing
class FakeHAProxyController(FakeDirectIngressController):
def __init__(
self,
kv_store,
long_poll_host,
application_state_manager,
deployment_state_manager,
proxy_state_manager,
):
super().__init__(
kv_store=kv_store,
long_poll_host=long_poll_host,
application_state_manager=application_state_manager,
deployment_state_manager=deployment_state_manager,
proxy_state_manager=proxy_state_manager,
)
self._ha_proxy_enabled = True
@pytest.fixture
def haproxy_controller():
kv_store = FakeKVStore()
long_poll_host = FakeLongPollHost()
app_state_manager = FakeApplicationStateManager({}, {}, {})
deployment_state_manager = FakeDeploymentStateManager({})
proxy_state_manager = FakeProxyStateManager()
controller = FakeHAProxyController(
kv_store=kv_store,
long_poll_host=long_poll_host,
application_state_manager=app_state_manager,
deployment_state_manager=deployment_state_manager,
proxy_state_manager=proxy_state_manager,
)
yield controller
@pytest.mark.parametrize("from_proxy_manager", [True, False])
@pytest.mark.parametrize("ha_proxy_enabled", [True, False])
def test_get_target_groups_haproxy(
haproxy_controller: FakeHAProxyController,
from_proxy_manager: bool,
ha_proxy_enabled: bool,
):
"""Tests get_target_groups returns the appropriate target groups based on the
ha_proxy_enabled and from_proxy_manager parameters."""
haproxy_controller._ha_proxy_enabled = ha_proxy_enabled
# Setup test data with running applications
app_statuses = {"app1": {}}
route_prefixes = {"app1": "/app1"}
ingress_deployments = {"app1": "app1_ingress"}
deployment_id1 = DeploymentID(name="app1_ingress", app_name="app1")
# Create replica info
replica_id1 = ReplicaID(unique_id="replica1", deployment_id=deployment_id1)
replica_info1 = RunningReplicaInfo(
replica_id=replica_id1,
node_id="node1",
node_ip="10.0.0.1",
availability_zone="az1",
actor_name="replica1",
max_ongoing_requests=100,
)
running_replica_infos = {deployment_id1: [replica_info1]}
# Setup test application state manager
haproxy_controller.application_state_manager = FakeApplicationStateManager(
app_statuses=app_statuses,
route_prefixes=route_prefixes,
ingress_deployments=ingress_deployments,
)
# Setup test deployment state manager
haproxy_controller.deployment_state_manager = FakeDeploymentStateManager(
running_replica_infos=running_replica_infos,
)
# Setup proxy state manager
haproxy_controller.proxy_state_manager.add_proxy_details(
"proxy_node1", "10.0.1.1", "proxy1"
)
haproxy_controller.proxy_state_manager.add_proxy_details(
"proxy_node2", "10.0.1.2", "proxy2"
)
# Allocate ports for replicas using controller's methods
http_port1 = haproxy_controller.allocate_replica_port(
"node1", replica_id1.unique_id, RequestProtocol.HTTP
)
grpc_port1 = haproxy_controller.allocate_replica_port(
"node1", replica_id1.unique_id, RequestProtocol.GRPC
)
target_groups = haproxy_controller.get_target_groups(
from_proxy_manager=from_proxy_manager
)
# Create expected target groups
if ha_proxy_enabled and not from_proxy_manager:
expected_target_groups = [
TargetGroup(
protocol=RequestProtocol.HTTP,
route_prefix="/",
app_name="",
targets=[
Target(ip="10.0.1.1", port=8000, instance_id="", name="proxy1"),
Target(ip="10.0.1.2", port=8000, instance_id="", name="proxy2"),
],
),
TargetGroup(
protocol=RequestProtocol.GRPC,
route_prefix="/",
app_name="",
targets=[
Target(ip="10.0.1.1", port=9000, instance_id="", name="proxy1"),
Target(ip="10.0.1.2", port=9000, instance_id="", name="proxy2"),
],
),
]
else:
expected_target_groups = [
TargetGroup(
protocol=RequestProtocol.HTTP,
route_prefix="/app1",
app_name="app1",
targets=[
Target(
ip="10.0.0.1", port=http_port1, instance_id="", name="replica1"
),
],
),
TargetGroup(
protocol=RequestProtocol.GRPC,
route_prefix="/app1",
app_name="app1",
targets=[
Target(
ip="10.0.0.1", port=grpc_port1, instance_id="", name="replica1"
),
],
),
]
# Sort both lists to ensure consistent comparison
target_groups.sort(key=lambda g: (g.protocol, g.route_prefix))
expected_target_groups.sort(key=lambda g: (g.protocol, g.route_prefix))
assert target_groups == expected_target_groups
if __name__ == "__main__":
pytest.main()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/unit/test_controller_haproxy.py",
"license": "Apache License 2.0",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/expressions/test_cast.py | import pandas as pd
import pyarrow as pa
import pytest
from packaging.version import parse as parse_version
import ray
from ray.data._internal.util import rows_same
from ray.data._internal.utils.arrow_utils import get_pyarrow_version
from ray.data.datatype import DataType
from ray.data.exceptions import UserCodeException
from ray.data.expressions import col
from ray.data.tests.conftest import * # noqa
from ray.tests.conftest import * # noqa
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
@pytest.mark.parametrize(
"expr, target_type, expected_rows",
[
# Basic type conversions using Ray Data's DataType
(col("id"), DataType.int64(), [{"id": i, "result": i} for i in range(5)]),
(
col("id"),
DataType.float64(),
[{"id": i, "result": float(i)} for i in range(5)],
),
(
col("id"),
DataType.string(),
[{"id": i, "result": str(i)} for i in range(5)],
),
(
col("id") / 2,
DataType.int64(),
[{"id": i, "result": i // 2} for i in range(5)],
),
# col("id")/2 uses integer division in expression layer, then cast to float64
(
col("id") / 2,
DataType.float64(),
[{"id": i, "result": float(i // 2)} for i in range(5)],
),
],
)
def test_cast_expression_basic(
ray_start_regular_shared,
expr,
target_type,
expected_rows,
target_max_block_size_infinite_or_default,
):
"""Test basic type casting with cast() method."""
ds = ray.data.range(5).with_column("result", expr.cast(target_type))
actual = ds.take_all()
assert rows_same(pd.DataFrame(actual), pd.DataFrame(expected_rows))
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_cast_expression_usecase(
ray_start_regular_shared, target_max_block_size_infinite_or_default
):
"""Test the user use case: converting float result from modulo to int64."""
ds = ray.data.range(10)
# The modulo operation returns float, cast it to int64
ds = ds.with_column("part", (col("id") % 2).cast(DataType.int64()))
actual = ds.take_all()
expected_rows = [{"id": i, "part": i % 2} for i in range(10)]
assert rows_same(pd.DataFrame(actual), pd.DataFrame(expected_rows))
# Verify the schema shows int64 type
schema = ds.schema()
assert "part" in schema.names
part_type = schema.types[schema.names.index("part")]
assert part_type == pa.int64()
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_cast_expression_chained(
ray_start_regular_shared, target_max_block_size_infinite_or_default
):
"""Test that cast() can be chained with other expressions."""
ds = ray.data.range(5)
# Cast to float64 then multiply
ds = ds.with_column("result", col("id").cast(DataType.float64()) * 2.5)
actual = ds.take_all()
expected_rows = [{"id": i, "result": i * 2.5} for i in range(5)]
assert rows_same(pd.DataFrame(actual), pd.DataFrame(expected_rows))
# Cast result of arithmetic operation
ds = ray.data.range(5)
ds = ds.with_column("result", (col("id") + 1).cast(DataType.string()))
actual = ds.take_all()
expected_rows = [{"id": i, "result": str(i + 1)} for i in range(5)]
assert rows_same(pd.DataFrame(actual), pd.DataFrame(expected_rows))
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_cast_expression_safe_mode(
ray_start_regular_shared, target_max_block_size_infinite_or_default
):
"""Test that safe=True (default) raises errors on invalid conversions."""
ds = ray.data.from_items([{"value": "not_a_number"}])
# Attempting to cast non-numeric string to int should raise an error
with pytest.raises((UserCodeException, ValueError, pa.ArrowInvalid)):
ds.with_column("result", col("value").cast(DataType.int64())).materialize()
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_cast_expression_invalid_type(
ray_start_regular_shared, target_max_block_size_infinite_or_default
):
"""Test that invalid type targets raise appropriate errors."""
ds = ray.data.range(5)
# Passing a non-DataType target should raise TypeError
with pytest.raises(
TypeError, match="target_type must be a ray.data.datatype.DataType"
):
ds.with_column("result", col("id").cast("invalid_type")).materialize()
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_cast_expression_multiple_types(
ray_start_regular_shared, target_max_block_size_infinite_or_default
):
"""Test casting with multiple different target types."""
ds = ray.data.from_items([{"id": 42, "score": 3.14}])
# Cast id to different types
ds = ds.with_column("id_int", col("id").cast(DataType.int64()))
ds = ds.with_column("id_float", col("id").cast(DataType.float64()))
ds = ds.with_column("id_str", col("id").cast(DataType.string()))
# Cast score to int (use safe=False to allow float truncation to int)
ds = ds.with_column("score_int", col("score").cast(DataType.int64(), safe=False))
# Use rows_same to compare the full row content (expects DataFrames).
results = ds.take_all()
expected = [
{
"id": 42,
"score": 3.14,
"id_int": 42,
"id_float": 42.0,
"id_str": "42",
"score_int": 3,
}
]
assert rows_same(pd.DataFrame(results), pd.DataFrame(expected))
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="with_column requires PyArrow >= 20.0.0",
)
def test_cast_expression_python_type_datatype_error(
ray_start_regular_shared, target_max_block_size_infinite_or_default
):
"""Test that using Python-type-backed DataType in cast() raises a clear error."""
# Error is raised at expression build time when cast() is called (not at materialize).
error_match = "Python-type-backed DataType.*requires.*values"
with pytest.raises(TypeError, match=error_match):
col("id").cast(DataType(int))
with pytest.raises(TypeError, match=error_match):
col("id").cast(DataType(str))
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/expressions/test_cast.py",
"license": "Apache License 2.0",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:ci/ray_ci/automation/image_tags_lib.py | """
Shared utilities for generating Docker image tags for Ray and Anyscale images.
"""
import logging
from typing import List
from ci.ray_ci.automation.crane_lib import (
CraneError,
call_crane_copy,
call_crane_manifest,
)
from ci.ray_ci.configs import DEFAULT_PYTHON_TAG_VERSION
from ci.ray_ci.docker_container import GPU_PLATFORM
logger = logging.getLogger(__name__)
class ImageTagsError(Exception):
"""Error raised when image tag operations fail."""
def format_platform_tag(platform: str) -> str:
"""
Format platform as -cpu, -tpu, or shortened CUDA version.
Examples:
cpu -> -cpu
tpu -> -tpu
cu12.1.1-cudnn8 -> -cu121
cu12.3.2-cudnn9 -> -cu123
"""
if platform == "cpu":
return "-cpu"
if platform == "tpu":
return "-tpu"
# cu12.3.2-cudnn9 -> -cu123
platform_base = platform.split("-", 1)[0]
parts = platform_base.split(".")
if len(parts) < 2:
raise ImageTagsError(f"Unrecognized GPU platform format: {platform}")
return f"-{parts[0]}{parts[1]}"
def format_python_tag(python_version: str) -> str:
"""
Format python version as -py310 (no dots, with hyphen prefix).
Examples:
3.10 -> -py310
3.11 -> -py311
"""
return f"-py{python_version.replace('.', '')}"
def get_python_suffixes(python_version: str) -> List[str]:
"""
Get python version suffixes (includes empty for default version).
For DEFAULT_PYTHON_TAG_VERSION (3.10), returns both the explicit
suffix and an empty suffix for backward compatibility.
Examples:
3.10 -> ["-py310", ""]
3.11 -> ["-py311"]
"""
suffixes = [format_python_tag(python_version)]
if python_version == DEFAULT_PYTHON_TAG_VERSION:
suffixes.append("")
return suffixes
def get_platform_suffixes(platform: str, image_type: str) -> List[str]:
"""
Get platform suffixes (includes aliases like -gpu for GPU_PLATFORM).
Handles the following cases:
- CPU with ray/ray-extra: adds empty suffix alias
- GPU_PLATFORM: adds -gpu alias
- GPU_PLATFORM with ray-ml/ray-ml-extra: adds empty suffix alias
Args:
platform: The platform string (e.g., "cpu", "cu12.1.1-cudnn8")
image_type: The image type (e.g., "ray", "ray-ml", "ray-extra")
Returns:
List of platform suffixes to use for tagging.
"""
platform_tag = format_platform_tag(platform)
suffixes = [platform_tag]
if platform == "cpu":
# no tag is alias to cpu for ray image
if image_type in ("ray", "ray-extra"):
suffixes.append("")
elif platform == GPU_PLATFORM:
# gpu is alias to GPU_PLATFORM
suffixes.append("-gpu")
# no tag is alias to gpu for ray-ml image
if image_type in ("ray-ml", "ray-ml-extra"):
suffixes.append("")
return suffixes
def get_variation_suffix(image_type: str) -> str:
"""
Get variation suffix for -extra image types.
Examples:
ray -> ""
ray-extra -> "-extra"
ray-ml-extra -> "-extra"
"""
if image_type in ("ray-extra", "ray-ml-extra", "ray-llm-extra"):
return "-extra"
return ""
def image_exists(tag: str) -> bool:
"""Check if a container image manifest exists using crane."""
try:
call_crane_manifest(tag)
return True
except CraneError:
return False
def copy_image(source: str, destination: str, dry_run: bool = False) -> None:
"""Copy a container image from source to destination using crane."""
if dry_run:
logger.info(f"DRY RUN: Would copy {source} -> {destination}")
return
logger.info(f"Copying {source} -> {destination}")
try:
call_crane_copy(source, destination)
logger.info(f"Successfully copied to {destination}")
except CraneError as e:
raise ImageTagsError(f"Crane copy failed: {e}")
| {
"repo_id": "ray-project/ray",
"file_path": "ci/ray_ci/automation/image_tags_lib.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:ci/ray_ci/automation/test_image_tags_lib.py | import sys
import pytest
from ci.ray_ci.automation.image_tags_lib import (
ImageTagsError,
format_platform_tag,
format_python_tag,
get_platform_suffixes,
get_python_suffixes,
get_variation_suffix,
)
from ci.ray_ci.configs import DEFAULT_PYTHON_TAG_VERSION
from ci.ray_ci.docker_container import GPU_PLATFORM
class TestFormatPlatformTag:
@pytest.mark.parametrize(
("platform", "expected"),
[
("cpu", "-cpu"),
("tpu", "-tpu"),
("cu11.7.1-cudnn8", "-cu117"),
("cu11.8.0-cudnn8", "-cu118"),
("cu12.1.1-cudnn8", "-cu121"),
("cu12.3.2-cudnn9", "-cu123"),
("cu12.8.1-cudnn", "-cu128"),
],
)
def test_format_platform_tag(self, platform, expected):
assert format_platform_tag(platform) == expected
def test_invalid_platform_raises_error(self):
with pytest.raises(ImageTagsError):
format_platform_tag("invalid")
class TestFormatPythonTag:
@pytest.mark.parametrize(
("python_version", "expected"),
[
("3.9", "-py39"),
("3.10", "-py310"),
("3.11", "-py311"),
("3.12", "-py312"),
],
)
def test_format_python_tag(self, python_version, expected):
assert format_python_tag(python_version) == expected
class TestGetPythonSuffixes:
def test_default_python_version_includes_empty(self):
"""DEFAULT_PYTHON_TAG_VERSION gets both explicit and empty suffix."""
assert DEFAULT_PYTHON_TAG_VERSION == "3.10"
suffixes = get_python_suffixes("3.10")
assert suffixes == ["-py310", ""]
def test_non_default_python_version(self):
"""Non-default python versions only get explicit suffix."""
assert get_python_suffixes("3.11") == ["-py311"]
assert get_python_suffixes("3.12") == ["-py312"]
class TestGetPlatformSuffixes:
def test_cpu_ray_includes_empty(self):
"""ray with cpu gets empty platform suffix."""
assert get_platform_suffixes("cpu", "ray") == ["-cpu", ""]
def test_cpu_ray_extra_includes_empty(self):
"""ray-extra with cpu gets empty platform suffix."""
assert get_platform_suffixes("cpu", "ray-extra") == ["-cpu", ""]
def test_cpu_ray_ml_no_empty(self):
"""ray-ml with cpu does NOT get empty platform suffix."""
assert get_platform_suffixes("cpu", "ray-ml") == ["-cpu"]
def test_cpu_ray_llm_no_empty(self):
"""ray-llm with cpu does NOT get empty platform suffix."""
assert get_platform_suffixes("cpu", "ray-llm") == ["-cpu"]
def test_gpu_platform_ray_includes_gpu_alias(self):
"""GPU_PLATFORM with ray gets -gpu alias but not empty suffix."""
suffixes = get_platform_suffixes(GPU_PLATFORM, "ray")
assert "-cu121" in suffixes
assert "-gpu" in suffixes
assert "" not in suffixes
def test_gpu_platform_ray_ml_includes_empty(self):
"""GPU_PLATFORM with ray-ml gets -gpu alias AND empty suffix."""
suffixes = get_platform_suffixes(GPU_PLATFORM, "ray-ml")
assert "-cu121" in suffixes
assert "-gpu" in suffixes
assert "" in suffixes
def test_gpu_platform_ray_ml_extra_includes_empty(self):
"""GPU_PLATFORM with ray-ml-extra gets -gpu alias AND empty suffix."""
suffixes = get_platform_suffixes(GPU_PLATFORM, "ray-ml-extra")
assert "-cu121" in suffixes
assert "-gpu" in suffixes
assert "" in suffixes
def test_tpu_no_aliases(self):
"""TPU gets no aliases."""
assert get_platform_suffixes("tpu", "ray") == ["-tpu"]
assert get_platform_suffixes("tpu", "ray-extra") == ["-tpu"]
def test_non_gpu_platform_cuda_no_aliases(self):
"""Non-GPU_PLATFORM CUDA versions get no aliases."""
suffixes = get_platform_suffixes("cu12.3.2-cudnn9", "ray")
assert suffixes == ["-cu123"]
def test_non_gpu_platform_ray_ml_no_aliases(self):
"""Non-GPU_PLATFORM with ray-ml gets no aliases."""
suffixes = get_platform_suffixes("cu12.3.2-cudnn9", "ray-ml")
assert suffixes == ["-cu123"]
class TestGetVariationSuffix:
@pytest.mark.parametrize(
("image_type", "expected"),
[
("ray", ""),
("ray-ml", ""),
("ray-llm", ""),
("ray-extra", "-extra"),
("ray-ml-extra", "-extra"),
("ray-llm-extra", "-extra"),
],
)
def test_variation_suffix(self, image_type, expected):
assert get_variation_suffix(image_type) == expected
if __name__ == "__main__":
sys.exit(pytest.main(["-vv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "ci/ray_ci/automation/test_image_tags_lib.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/_private/rolling_window_accumulator.py | import threading
import time
from typing import List
class _ThreadBuckets:
"""Per-thread bucket storage for rolling window accumulator.
Each thread gets its own instance to avoid lock contention on the hot path.
"""
# This is a performance optimization to avoid creating a dictionary for the instance.
__slots__ = ("buckets", "current_bucket_idx", "last_rotation_time")
def __init__(self, num_buckets: int):
self.buckets = [0.0] * num_buckets
self.current_bucket_idx = 0
self.last_rotation_time = time.time()
class _ThreadLocalRef(threading.local):
"""Thread-local reference to the thread's _ThreadBuckets instance."""
def __init__(self):
super().__init__()
# by using threading.local, each thread gets its own instance of _ThreadBuckets.
self.data: _ThreadBuckets = None
class RollingWindowAccumulator:
"""Tracks cumulative values over a rolling time window.
Uses bucketing for memory efficiency - divides the window into N buckets
and rotates them as time passes. This allows efficient tracking of values
over a sliding window without storing individual data points.
Uses thread-local storage for lock-free writes on the hot path (add()).
Only get_total() requires synchronization to aggregate across threads.
Example:
# Create a 10-minute rolling window with 60 buckets (10s each)
accumulator = RollingWindowAccumulator(
window_duration_s=600.0,
num_buckets=60,
)
# Add values (lock-free, safe from multiple threads)
accumulator.add(100.0)
accumulator.add(50.0)
# Get total (aggregates across all threads)
total = accumulator.get_total()
Thread Safety:
- add() is lock-free after the first call from each thread
- get_total() acquires a lock to aggregate across threads
- Safe to call from multiple threads concurrently
"""
def __init__(
self,
window_duration_s: float,
num_buckets: int = 60,
):
"""Initialize the rolling window accumulator.
Args:
window_duration_s: Total duration of the rolling window in seconds.
Values older than this are automatically expired.
num_buckets: Number of buckets to divide the window into. More buckets
gives finer granularity but uses slightly more memory. Default is 60,
which for a 10-minute window gives 10-second granularity.
Raises:
ValueError: If window_duration_s <= 0 or num_buckets <= 0.
"""
if window_duration_s <= 0:
raise ValueError(
f"window_duration_s must be positive, got {window_duration_s}"
)
if num_buckets <= 0:
raise ValueError(f"num_buckets must be positive, got {num_buckets}")
self._window_duration_s = window_duration_s
self._num_buckets = num_buckets
self._bucket_duration_s = window_duration_s / num_buckets
# Thread-local reference to per-thread bucket data
self._local = _ThreadLocalRef()
# Track all per-thread bucket instances for aggregation
self._all_thread_data: List[_ThreadBuckets] = []
self._registry_lock = threading.Lock()
@property
def window_duration_s(self) -> float:
"""The total duration of the rolling window in seconds."""
return self._window_duration_s
@property
def num_buckets(self) -> int:
"""The number of buckets in the rolling window."""
return self._num_buckets
@property
def bucket_duration_s(self) -> float:
"""The duration of each bucket in seconds."""
return self._bucket_duration_s
def _ensure_initialized(self) -> _ThreadBuckets:
"""Ensure thread-local storage is initialized for the current thread.
This is called on every add() but the fast path (already initialized)
is just a single attribute check with no locking.
Returns:
The _ThreadBuckets instance for the current thread.
"""
data = self._local.data
if data is not None:
return data
# Slow path: first call from this thread
data = _ThreadBuckets(self._num_buckets)
self._local.data = data
# Register for aggregation (only happens once per thread)
with self._registry_lock:
self._all_thread_data.append(data)
return data
def _rotate_buckets_if_needed(self, data: _ThreadBuckets) -> None:
"""Rotate buckets for the given thread's storage.
Advances the current bucket index and clears old buckets as time passes.
"""
now = time.time()
elapsed = now - data.last_rotation_time
buckets_to_advance = int(elapsed / self._bucket_duration_s)
if buckets_to_advance > 0:
if buckets_to_advance >= self._num_buckets:
# All buckets have expired, reset everything
data.buckets = [0.0] * self._num_buckets
data.current_bucket_idx = 0
else:
# Clear old buckets as we advance
for _ in range(buckets_to_advance):
data.current_bucket_idx = (
data.current_bucket_idx + 1
) % self._num_buckets
data.buckets[data.current_bucket_idx] = 0.0
data.last_rotation_time = now
def add(self, value: float) -> None:
"""Add a value to the current bucket.
This operation is lock-free for the calling thread after the first call.
Safe to call from multiple threads concurrently.
Args:
value: The value to add to the accumulator.
"""
# Fast path: just check if initialized (no lock)
data = self._ensure_initialized()
# Lock-free: only touches thread-local data
self._rotate_buckets_if_needed(data)
data.buckets[data.current_bucket_idx] += value
def get_total(self) -> float:
"""Get total value across all buckets in the window.
This aggregates values from all threads that have called add().
Expired buckets (older than window_duration_s) are not included.
Note: We are accepting some inaccuracy in the total value to avoid the overhead of a lock.
This is acceptable because we are only using this for utilization metrics, which are not
critical for the overall system. Given that the default window duration is 600s and the
default report interval is 10s, the inaccuracy is less than 0.16%.
Returns:
The sum of all non-expired values in the rolling window.
"""
total = 0.0
now = time.time()
with self._registry_lock:
for data in self._all_thread_data:
# Calculate which buckets are still valid for this thread's data
elapsed = now - data.last_rotation_time
buckets_expired = int(elapsed / self._bucket_duration_s)
if buckets_expired >= self._num_buckets:
# All buckets have expired for this thread
continue
# Sum buckets that haven't expired
# Buckets are arranged in a circular buffer, with current_bucket_idx
# being the most recent. We need to skip buckets that have expired.
for i in range(self._num_buckets - buckets_expired):
# Go backwards from current bucket
idx = (data.current_bucket_idx - i) % self._num_buckets
total += data.buckets[idx]
return total
def get_num_registered_threads(self) -> int:
"""Get the number of threads that have called add().
Useful for debugging and testing.
Returns:
The number of threads registered with this accumulator.
"""
with self._registry_lock:
return len(self._all_thread_data)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/_private/rolling_window_accumulator.py",
"license": "Apache License 2.0",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:rllib/env/tests/test_env_runner.py | """Shared tests for SingleAgentEnvRunner and MultiAgentEnvRunner.
These tests are parameterized to run against both runner types, ensuring
consistent behavior across the EnvRunner interface.
Additionally, the tests are split into separate classes for different testing
components.
We have attempted to use a conftest however there is a problem where bazel and pytest
view the classes as different causing some of the tests to fail.
"""
import math
from typing import Any, Optional
import gymnasium
import numpy as np
import pytest
import ray
from ray.rllib.algorithms import AlgorithmConfig, PPOConfig
from ray.rllib.callbacks.callbacks import RLlibCallback
from ray.rllib.connectors.connector_v2 import ConnectorV2
from ray.rllib.env.multi_agent_env_runner import MultiAgentEnvRunner
from ray.rllib.env.multi_agent_episode import MultiAgentEpisode
from ray.rllib.env.single_agent_env_runner import SingleAgentEnvRunner
from ray.rllib.env.single_agent_episode import SingleAgentEpisode
from ray.rllib.examples.envs.classes.multi_agent import MultiAgentCartPole
from ray.rllib.utils import check, override
from ray.rllib.utils.metrics import (
EPISODE_DURATION_SEC_MEAN,
EPISODE_LEN_MAX,
EPISODE_LEN_MEAN,
EPISODE_LEN_MIN,
EPISODE_RETURN_MAX,
EPISODE_RETURN_MEAN,
EPISODE_RETURN_MIN,
NUM_ENV_STEPS_SAMPLED,
NUM_ENV_STEPS_SAMPLED_LIFETIME,
NUM_EPISODES,
NUM_EPISODES_LIFETIME,
)
@pytest.fixture(scope="module")
def ray_init():
"""Initialize Ray for the test module."""
ray.init(ignore_reinit_error=True)
yield
ray.shutdown()
# Parameter values for test generation
RUNNER_TYPES = ["single_agent", "multi_agent"]
NUM_ENVS_VALUES = [1, 3, 8]
GYM_VECTORIZE_MODES = [
gymnasium.VectorizeMode.SYNC,
gymnasium.VectorizeMode.VECTOR_ENTRY_POINT,
]
@pytest.fixture(params=RUNNER_TYPES)
def runner_type(request):
"""Fixture for runner type."""
return request.param
@pytest.fixture(params=NUM_ENVS_VALUES)
def num_envs_per_env_runner(request):
"""Fixture for number of environments per runner."""
return request.param
@pytest.fixture(params=GYM_VECTORIZE_MODES)
def gym_env_vectorize_mode(request):
"""Fixture for gym vectorize mode."""
return request.param
@pytest.fixture
def env_runner_config(runner_type, num_envs_per_env_runner, gym_env_vectorize_mode):
"""Build appropriate config for each runner type."""
# Skip invalid combinations
if (
runner_type == "multi_agent"
and gym_env_vectorize_mode is gymnasium.VectorizeMode.VECTOR_ENTRY_POINT
):
pytest.skip("gym_env_vectorize_mode not applicable for multi_agent")
if runner_type == "single_agent":
return (
PPOConfig()
.environment("CartPole-v1")
.env_runners(
num_envs_per_env_runner=num_envs_per_env_runner,
rollout_fragment_length=10,
gym_env_vectorize_mode=gym_env_vectorize_mode,
)
)
elif runner_type == "multi_agent":
# We use MultiAgentCartPole for the parallel environment to ensure a fair comparison to the SingleAgent version.
return (
PPOConfig()
.environment(MultiAgentCartPole, env_config={"num_agents": 2})
.multi_agent(
policies={"p0", "p1"},
policy_mapping_fn=lambda aid, *a, **kw: f"p{aid}",
)
.env_runners(
num_envs_per_env_runner=num_envs_per_env_runner,
rollout_fragment_length=10,
)
)
else:
raise ValueError(f"Unknown runner type: {runner_type}")
@pytest.fixture
def env_runner_cls(runner_type):
"""Return the appropriate EnvRunner class."""
if runner_type == "single_agent":
return SingleAgentEnvRunner
elif runner_type == "multi_agent":
return MultiAgentEnvRunner
else:
raise ValueError(f"Unknown runner type: {runner_type}")
@pytest.fixture
def env_runner(env_runner_cls, env_runner_config, ray_init):
"""Create an EnvRunner instance."""
runner = env_runner_cls(config=env_runner_config)
yield runner
runner.stop()
def get_t_started(episode, runner_type: str) -> int:
"""Get the t_started value handling SA vs MA episode differences."""
if runner_type == "single_agent":
return episode.t_started
elif runner_type == "multi_agent":
return episode.env_t_started
else:
raise ValueError(f"Unknown runner type: {runner_type}")
class CallbackTracker(RLlibCallback):
"""Helper callback class that tracks all callback invocations."""
# Class-level storage for callback calls
calls: list[tuple[str, dict[str, Any]]] = []
def on_episode_created(
self,
*,
episode,
env_runner=None,
metrics_logger=None,
env=None,
env_index=None,
rl_module=None,
**kwargs,
):
CallbackTracker.calls.append(
("on_episode_created", {"episode_id": episode.id_, "env_index": env_index})
)
def on_episode_start(
self,
*,
episode,
env_runner=None,
metrics_logger=None,
env=None,
env_index=None,
rl_module=None,
**kwargs,
):
CallbackTracker.calls.append(
("on_episode_start", {"episode_id": episode.id_, "env_index": env_index})
)
def on_episode_step(
self,
*,
episode,
env_runner=None,
metrics_logger=None,
env=None,
env_index=None,
rl_module=None,
**kwargs,
):
# Handle both SingleAgentEpisode (has .t) and MultiAgentEpisode (has .env_t)
t_val = getattr(episode, "t", None) or getattr(episode, "env_t", None)
CallbackTracker.calls.append(
(
"on_episode_step",
{"episode_id": episode.id_, "env_index": env_index, "t": t_val},
)
)
def on_episode_end(
self,
*,
episode,
env_runner=None,
metrics_logger=None,
env=None,
env_index=None,
rl_module=None,
**kwargs,
):
CallbackTracker.calls.append(
(
"on_episode_end",
{
"episode_id": episode.id_,
"env_index": env_index,
"length": len(episode),
},
)
)
def on_sample_end(
self,
*,
env_runner=None,
metrics_logger=None,
samples=None,
**kwargs,
):
CallbackTracker.calls.append(
("on_sample_end", {"num_episodes": len(samples) if samples else 0})
)
@classmethod
def reset(cls):
cls.calls = []
@classmethod
def get_calls(
cls, callback_name: Optional[str] = None
) -> list[dict[str, Any]] | list[tuple[str, dict[str, Any]]]:
if callback_name:
return [c[1] for c in cls.calls if c[0] == callback_name]
return cls.calls
@pytest.fixture
def env_runner_with_callback(runner_type, ray_init):
CallbackTracker.reset()
if runner_type == "single_agent":
config = (
AlgorithmConfig()
.environment("CartPole-v1")
.env_runners(num_envs_per_env_runner=1)
.callbacks(CallbackTracker)
)
runner = SingleAgentEnvRunner(config=config)
elif runner_type == "multi_agent":
config = (
PPOConfig()
.environment(MultiAgentCartPole, env_config={"num_agents": 2})
.multi_agent(
policies={"p0", "p1"},
policy_mapping_fn=lambda aid, *a, **kw: f"p{aid}",
)
.env_runners(num_envs_per_env_runner=1)
.callbacks(CallbackTracker)
)
runner = MultiAgentEnvRunner(config=config)
else:
raise ValueError(f"Unknown runner type: {runner_type}")
yield runner
CallbackTracker.reset()
runner.stop()
class EnvToModuleConnectorTracker(ConnectorV2):
"""Tracks all env_to_module connector calls with detailed information."""
# Class-level storage to track calls across instances
call_records: list[dict[str, Any]] = []
call_count: int = 0
def __init__(self, input_observation_space, input_action_space, **kwargs):
super().__init__(input_observation_space, input_action_space)
@override(ConnectorV2)
def __call__(
self,
*,
rl_module,
batch,
episodes: list[MultiAgentEpisode | SingleAgentEpisode],
explore,
shared_data,
metrics,
**kwargs,
):
EnvToModuleConnectorTracker.call_count += 1
for episode in episodes:
# For SingleAgentEpisode, use .t; for MultiAgentEpisode, use .env_t
t_val = getattr(episode, "t", None) or getattr(episode, "env_t", 0)
record = {
"call_number": EnvToModuleConnectorTracker.call_count,
"episode_id": episode.id_,
"is_done": episode.is_done,
"is_reset": episode.is_reset if hasattr(episode, "is_reset") else None,
"timestep": t_val,
"explore": explore,
"has_metrics": metrics is not None,
}
EnvToModuleConnectorTracker.call_records.append(record)
return batch
@classmethod
def reset(cls):
cls.call_records = []
cls.call_count = 0
@classmethod
def get_records_for_episode(cls, episode_id: str) -> list[dict[str, Any]]:
return [r for r in cls.call_records if r["episode_id"] == episode_id]
@classmethod
def get_done_episode_records(cls) -> list[dict[str, Any]]:
return [r for r in cls.call_records if r["is_done"]]
class ModuleToEnvConnectorTracker(ConnectorV2):
"""Tracks all module_to_env connector calls with detailed information."""
# Class-level storage to track calls across instances
call_records: list[dict[str, Any]] = []
call_count: int = 0
def __init__(self, input_observation_space, input_action_space, **kwargs):
super().__init__(input_observation_space, input_action_space)
@override(ConnectorV2)
def __call__(
self,
*,
rl_module,
batch,
episodes: list[MultiAgentEpisode | SingleAgentEpisode],
explore,
shared_data,
metrics,
**kwargs,
):
ModuleToEnvConnectorTracker.call_count += 1
for episode in episodes:
t_val = getattr(episode, "t", None) or getattr(episode, "env_t", 0)
record = {
"call_number": ModuleToEnvConnectorTracker.call_count,
"episode_id": episode.id_,
"is_done": episode.is_done,
"timestep": t_val,
"explore": explore,
"has_batch_actions": "actions" in batch if batch else False,
}
ModuleToEnvConnectorTracker.call_records.append(record)
return batch
@classmethod
def reset(cls):
cls.call_records = []
cls.call_count = 0
def make_env_to_module_connector_tracker(env, spaces, device):
"""Factory function for EnvToModuleConnectorTracker."""
return EnvToModuleConnectorTracker(
input_observation_space=env.observation_space,
input_action_space=env.action_space,
)
def make_module_to_env_connector_tracker(env, spaces):
"""Factory function for ModuleToEnvConnectorTracker."""
return ModuleToEnvConnectorTracker(
input_observation_space=env.observation_space if env else None,
input_action_space=env.action_space if env else None,
)
@pytest.fixture
def env_runner_with_env_to_module_tracker(runner_type, ray_init):
"""Create an EnvRunner with EnvToModuleConnectorTracker installed."""
EnvToModuleConnectorTracker.reset()
if runner_type == "single_agent":
config = (
PPOConfig()
.environment("CartPole-v1")
.env_runners(
num_envs_per_env_runner=2,
env_to_module_connector=make_env_to_module_connector_tracker,
)
)
runner = SingleAgentEnvRunner(config=config)
elif runner_type == "multi_agent":
config = (
PPOConfig()
.environment(MultiAgentCartPole, env_config={"num_agents": 2})
.multi_agent(
policies={"p0", "p1"},
policy_mapping_fn=lambda aid, *a, **kw: f"p{aid}",
)
.env_runners(
num_envs_per_env_runner=2,
env_to_module_connector=make_env_to_module_connector_tracker,
)
)
runner = MultiAgentEnvRunner(config=config)
else:
raise ValueError(f"Unknown runner type: {runner_type}")
yield runner
EnvToModuleConnectorTracker.reset()
runner.stop()
@pytest.fixture
def env_runner_with_module_to_env_tracker(runner_type, ray_init):
"""Create an EnvRunner with ModuleToEnvConnectorTracker installed."""
ModuleToEnvConnectorTracker.reset()
if runner_type == "single_agent":
config = (
PPOConfig()
.environment("CartPole-v1")
.env_runners(
num_envs_per_env_runner=1,
module_to_env_connector=make_module_to_env_connector_tracker,
)
)
runner = SingleAgentEnvRunner(config=config)
elif runner_type == "multi_agent":
config = (
PPOConfig()
.environment(MultiAgentCartPole, env_config={"num_agents": 2})
.multi_agent(
policies={"p0", "p1"},
policy_mapping_fn=lambda aid, *a, **kw: f"p{aid}",
)
.env_runners(
num_envs_per_env_runner=1,
module_to_env_connector=make_module_to_env_connector_tracker,
)
)
runner = MultiAgentEnvRunner(config=config)
else:
raise ValueError(f"Unknown runner type: {runner_type}")
yield runner
ModuleToEnvConnectorTracker.reset()
runner.stop()
class TestEnvRunnerSampling:
"""Tests for sampling functionality common to both runner types."""
repeats = 10
def test_sample_num_episodes(self, env_runner, num_episodes=3):
"""Test sampling a specific number of episodes."""
for _ in range(self.repeats):
episodes = env_runner.sample(num_episodes=num_episodes, random_actions=True)
assert len(episodes) == num_episodes
assert all(e.is_done for e in episodes)
def test_sample_num_timesteps(self, env_runner, num_timesteps=20):
"""Test sampling a number of timesteps."""
for _ in range(self.repeats):
episodes = env_runner.sample(
num_timesteps=num_timesteps, random_actions=True
)
total_timesteps = sum(len(e) for e in episodes)
# Allow some slack for vectorized envs (up to num_envs extra)
assert (
num_timesteps <= total_timesteps <= num_timesteps + env_runner.num_envs
)
def test_sample_default_rollout_fragment(self, env_runner, env_runner_config):
"""Test sampling with default rollout_fragment_length."""
for _ in range(self.repeats):
episodes = env_runner.sample(random_actions=True)
total_timesteps = sum(len(e) for e in episodes)
rollout_fragment_length = env_runner_config.rollout_fragment_length
assert (
env_runner.num_envs * rollout_fragment_length
<= total_timesteps
<= (env_runner.num_envs * rollout_fragment_length + env_runner.num_envs)
)
def test_sample_force_reset_with_timesteps(self, env_runner, runner_type):
"""Test that force_reset starts fresh episodes when using num_timesteps."""
for repeat in range(self.repeats):
# Sample partial episode
env_runner.sample(num_timesteps=5, random_actions=True)
# Sample with force_reset
episodes = env_runner.sample(
num_timesteps=10, random_actions=True, force_reset=True
)
assert all(get_t_started(e, runner_type) == 0 for e in episodes)
def test_sample_force_reset_with_episodes(self, env_runner, runner_type):
"""Test that force_reset works with num_episodes."""
# Sample some episodes first
env_runner.sample(num_episodes=1, random_actions=True)
# Sample with force_reset (should still work fine)
episodes = env_runner.sample(
num_episodes=2, random_actions=True, force_reset=True
)
assert len(episodes) == 2
assert all(get_t_started(e, runner_type) == 0 for e in episodes)
def test_sample_zero_timesteps(self, env_runner):
"""Test sampling with zero timesteps."""
# This might either return empty or raise - document the behavior
episodes = env_runner.sample(num_timesteps=0, random_actions=True)
# If it doesn't raise, should return empty or minimal
assert isinstance(episodes, list)
assert len(episodes) == 0
def test_sample_zero_episodes(self, env_runner):
"""Test sampling with zero timesteps."""
# This might either return empty or raise - document the behavior
episodes = env_runner.sample(num_episodes=0, random_actions=True)
# If it doesn't raise, should return empty or minimal
assert isinstance(episodes, list)
assert len(episodes) == 0
def test_sample_both_args_error(self, env_runner):
"""Test that providing both num_timesteps and num_episodes raises error."""
with pytest.raises(AssertionError):
env_runner.sample(num_timesteps=10, num_episodes=10, random_actions=True)
def test_sample_negative_timesteps_error(self, env_runner):
"""Test that negative num_timesteps raises error."""
with pytest.raises(AssertionError):
env_runner.sample(num_timesteps=-1, random_actions=True)
def test_sample_negative_episodes_error(self, env_runner):
"""Test that negative num_episodes raises error."""
with pytest.raises(AssertionError):
env_runner.sample(num_episodes=-1, random_actions=True)
class TestEnvRunnerEpisodeContinuation:
"""Tests for episode continuation across sample() calls."""
def test_episode_continuation_between_samples(self, env_runner, runner_type):
"""Test that episodes continue correctly across sample() calls."""
# Sample partial episode (fewer timesteps than episode length)
episode_1 = env_runner.sample(num_timesteps=5, random_actions=True)
episode_1_ids = {e.id_ for e in episode_1 if not e.is_done}
assert len(episode_1_ids) > 0
# Sample more timesteps - should continue the same episodes
episodes_2 = env_runner.sample(num_timesteps=5, random_actions=True)
continued_ids = {e.id_ for e in episodes_2 if get_t_started(e, runner_type) > 0}
# The continued episodes should have IDs from the first batch
if continued_ids:
assert continued_ids.issubset(episode_1_ids)
def test_force_reset_breaks_continuation(self, env_runner, runner_type):
"""Test that force_reset prevents episode continuation."""
# Sample partial episode
episodes_1 = env_runner.sample(num_timesteps=5, random_actions=True)
# Sample with force_reset - should NOT continue
episodes_2 = env_runner.sample(
num_timesteps=5, random_actions=True, force_reset=True
)
# All episodes should start fresh
assert all(get_t_started(e, runner_type) == 0 for e in episodes_2)
# check there is no overlap in episode ids
episode_1_ids = {e.id_ for e in episodes_1}
episode_2_ids = {e.id_ for e in episodes_2}
assert len(episode_1_ids.intersection(episode_2_ids)) == 0
def test_complete_episodes_dont_continue(self, env_runner, runner_type):
"""Test that completed episodes are not continued."""
episodes_1 = env_runner.sample(num_episodes=2, random_actions=True)
assert all(e.is_done for e in episodes_1)
# Sample more complete episodes
episodes_2 = env_runner.sample(num_episodes=2, random_actions=True)
# All episodes should start fresh
assert all(get_t_started(e, runner_type) == 0 for e in episodes_2)
# check there is no overlap in episode ids
episode_1_ids = {e.id_ for e in episodes_1}
episode_2_ids = {e.id_ for e in episodes_2}
assert len(episode_1_ids.intersection(episode_2_ids)) == 0
class TestEnvRunnerStateManagement:
"""Tests for state management common to both runner types."""
def test_get_state_returns_dict(
self, env_runner, env_runner_config, env_runner_cls
):
"""Test that get_state returns a dictionary."""
state = env_runner.get_state()
assert isinstance(state, dict)
assert "rl_module" in state
env_runner.sample(num_episodes=1, random_actions=True)
# recheck after sample
state = env_runner.get_state()
assert isinstance(state, dict)
assert "rl_module" in state
# check that a new env runner can be updated based on an older state
new_runner = env_runner_cls(config=env_runner_config)
try:
# Check the states are not identical
new_state = new_runner.get_state()
assert set(state.keys()) == set(new_state.keys())
with pytest.raises(
AssertionError, match="Arrays are not almost equal to 5 decimal"
):
check(state, new_state)
new_runner.set_state(state)
# roundtrip the runner state
new_state = new_runner.get_state()
assert set(state.keys()) == set(new_state.keys())
check(state, new_state)
finally:
new_runner.stop()
class TestEnvRunnerMetrics:
"""Tests for metrics collection common to both runner types.
Both SingleAgentEnvRunner and MultiAgentEnvRunner share a common metrics
interface via `get_metrics()`. This test class verifies that:
1. Metrics are properly initialized and returned as dicts
2. Core metrics keys (env steps, episodes, returns) exist in both
3. Episode metrics are only logged after completed episodes
4. Metrics accumulate correctly over multiple sample calls
5. Metrics are properly cleared after get_metrics() is called
"""
# Shared metrics keys that should exist in both runner types
SHARED_STEP_METRICS = [
NUM_ENV_STEPS_SAMPLED,
NUM_ENV_STEPS_SAMPLED_LIFETIME,
]
SHARED_EPISODE_COUNT_METRICS = [NUM_EPISODES, NUM_EPISODES_LIFETIME]
SHARED_EPISODE_STATS_METRICS = [
EPISODE_LEN_MAX,
EPISODE_LEN_MIN,
EPISODE_LEN_MEAN,
EPISODE_DURATION_SEC_MEAN,
EPISODE_RETURN_MEAN,
EPISODE_RETURN_MAX,
EPISODE_RETURN_MIN,
]
def test_get_metrics_returns_dict(self, env_runner):
"""Test that get_metrics returns a dictionary."""
env_runner.sample(num_episodes=1, random_actions=True)
metrics = env_runner.get_metrics()
assert isinstance(metrics, dict)
assert set(
self.SHARED_STEP_METRICS
+ self.SHARED_EPISODE_COUNT_METRICS
+ self.SHARED_EPISODE_STATS_METRICS
) <= set(metrics.keys())
def test_metrics_after_sampling_timesteps(self, env_runner, num_timesteps=100):
"""Test that step metrics exist after sampling timesteps."""
episodes = env_runner.sample(num_timesteps=num_timesteps, random_actions=True)
metrics = env_runner.get_metrics()
# Check step metrics exist
for key in self.SHARED_STEP_METRICS:
assert key in metrics, f"Missing metric: {key}"
# Verify env steps count
max_num_timesteps = (
math.ceil(num_timesteps / env_runner.num_envs) * env_runner.num_envs
)
assert num_timesteps <= metrics[NUM_ENV_STEPS_SAMPLED] <= max_num_timesteps
assert (
num_timesteps
<= metrics[NUM_ENV_STEPS_SAMPLED_LIFETIME]
<= max_num_timesteps
)
num_completed_episodes = sum(eps.is_done for eps in episodes)
if num_completed_episodes > 0:
assert metrics[NUM_EPISODES] == num_completed_episodes
assert metrics[EPISODE_LEN_MEAN] > 0
# CartPole return is always positive
assert metrics[EPISODE_RETURN_MEAN] > 0
assert metrics[EPISODE_DURATION_SEC_MEAN] > 0
def test_metrics_after_sampling_rollout_fragment(self, env_runner):
"""Test that step metrics exist after sampling timesteps."""
episodes = env_runner.sample(random_actions=True)
metrics = env_runner.get_metrics()
# Check step metrics exist
for key in self.SHARED_STEP_METRICS:
assert key in metrics, f"Missing metric: {key}"
# Verify env steps count
expected_num_timesteps = sum(len(eps) for eps in episodes)
assert metrics[NUM_ENV_STEPS_SAMPLED] == expected_num_timesteps
assert metrics[NUM_ENV_STEPS_SAMPLED_LIFETIME] == expected_num_timesteps
num_completed_episodes = sum(eps.is_done for eps in episodes)
if num_completed_episodes > 0:
assert metrics[NUM_EPISODES] == num_completed_episodes
assert metrics[EPISODE_LEN_MEAN] > 0
# CartPole return is always positive
assert metrics[EPISODE_RETURN_MEAN] > 0
assert metrics[EPISODE_DURATION_SEC_MEAN] > 0
def test_metrics_after_sampling_episodes(self, env_runner, num_episodes=2):
"""Test that episode metrics exist after sampling complete episodes."""
episodes = env_runner.sample(num_episodes=num_episodes, random_actions=True)
metrics = env_runner.get_metrics()
# Check episode count metrics
for key in self.SHARED_EPISODE_COUNT_METRICS:
assert key in metrics, f"Missing metric: {key}"
# With multiple environments, if on the same timestep that the final episode is collected,
# then other environment can also terminate causing greater than the number of episodes requested
assert metrics[NUM_EPISODES] >= num_episodes
assert metrics[NUM_EPISODES_LIFETIME] >= num_episodes
episode_num_timesteps = sum(len(eps) for eps in episodes)
# As some sub-environment stepped but didn't complete the episode, more steps might have been sampled than returned.
assert metrics[NUM_ENV_STEPS_SAMPLED] >= episode_num_timesteps
assert metrics[NUM_ENV_STEPS_SAMPLED_LIFETIME] >= episode_num_timesteps
# Check episode stats metrics exist after complete episodes
for key in self.SHARED_EPISODE_STATS_METRICS:
assert key in metrics, f"Missing metric: {key}"
# Episode return and length should be positive
assert metrics[EPISODE_LEN_MEAN] > 0
# CartPole return is always positive
assert metrics[EPISODE_RETURN_MEAN] > 0
assert metrics[EPISODE_DURATION_SEC_MEAN] > 0
def test_metrics_accumulate_over_samples(self, env_runner):
"""Test that metrics accumulate correctly over multiple sample calls.
As an env-runner metrics isn't root (algorithm will be), then lifetime metrics
aren't aggregated over multiple samples.
"""
# Zero sample
metrics_0 = env_runner.get_metrics()
assert metrics_0 == {}
# First sample
episodes_1 = env_runner.sample(num_episodes=1, random_actions=True)
metrics_1 = env_runner.get_metrics()
steps_sampled_1 = metrics_1[NUM_ENV_STEPS_SAMPLED]
lifetime_1 = metrics_1[NUM_ENV_STEPS_SAMPLED_LIFETIME]
episodes_lifetime_1 = metrics_1[NUM_EPISODES_LIFETIME]
assert steps_sampled_1 >= sum(len(eps) for eps in episodes_1)
assert steps_sampled_1 >= lifetime_1
# on the final timestep sampled, if other environment also terminate then
# they will count towards
assert episodes_lifetime_1 >= sum(eps.is_done for eps in episodes_1)
# Second sample
episodes_2 = env_runner.sample(num_episodes=1, random_actions=True)
metrics_2 = env_runner.get_metrics()
steps_sampled_2 = metrics_2[NUM_ENV_STEPS_SAMPLED]
lifetime_2 = metrics_2[NUM_ENV_STEPS_SAMPLED_LIFETIME]
episodes_lifetime_2 = metrics_2[NUM_EPISODES_LIFETIME]
assert steps_sampled_2 >= sum(len(eps) for eps in episodes_2)
assert steps_sampled_2 >= lifetime_2
assert episodes_lifetime_2 >= sum(eps.is_done for eps in episodes_2)
def test_metrics_cleared_after_get_metrics(self, env_runner):
"""Test that per-iteration metrics are cleared after get_metrics."""
# Sample some episodes
env_runner.sample(num_episodes=2, random_actions=True)
env_runner.get_metrics()
# Get metrics again without sampling
metrics = env_runner.get_metrics()
assert np.isnan(metrics[NUM_ENV_STEPS_SAMPLED].peek())
assert metrics[NUM_ENV_STEPS_SAMPLED_LIFETIME] == 0.0
assert np.isnan(metrics[NUM_EPISODES].peek())
assert metrics[NUM_EPISODES_LIFETIME] == 0.0
def test_metrics_min_max_tracking(self, env_runner):
"""Test that min/max episode metrics are tracked correctly."""
# Sample multiple episodes to get variation
env_runner.sample(num_episodes=5, random_actions=True)
metrics = env_runner.get_metrics()
# Min should be <= mean <= max for episode length
assert metrics[EPISODE_LEN_MIN] <= metrics[EPISODE_LEN_MEAN]
assert metrics[EPISODE_LEN_MEAN] <= metrics[EPISODE_LEN_MAX]
# Min should be <= mean <= max for episode return
assert metrics[EPISODE_RETURN_MIN] <= metrics[EPISODE_RETURN_MEAN]
assert metrics[EPISODE_RETURN_MEAN] <= metrics[EPISODE_RETURN_MAX]
def test_metrics_consistency_across_sample_modes(self, env_runner):
"""Test that metrics structure is consistent regardless of sample mode."""
# Sample by timesteps
env_runner.sample(num_timesteps=20, random_actions=True, force_reset=True)
metrics_timesteps = env_runner.get_metrics()
# Sample by episodes
env_runner.sample(num_episodes=1, random_actions=True, force_reset=True)
metrics_episodes = env_runner.get_metrics()
# Core step metrics should exist in both
for key in self.SHARED_STEP_METRICS:
assert key in metrics_timesteps, f"Missing in timesteps mode: {key}"
assert key in metrics_episodes, f"Missing in episodes mode: {key}"
class TestEnvRunnerCallbacks:
"""Tests for callback invocations common to both runner types.
Possible callbacks: on_episode_created, on_episode_start, on_episode_step, on_episode_end, on_sample_end
"""
@pytest.mark.parametrize("num_timesteps", [8, 32])
def test_callbacks_on_sample_timesteps(
self, env_runner_with_callback, ray_init, num_timesteps
):
"""Test the callbacks for sample timesteps."""
episodes = env_runner_with_callback.sample(
num_timesteps=num_timesteps, random_actions=True
)
on_episode_created_calls = CallbackTracker.get_calls("on_episode_created")
on_episode_start_calls = CallbackTracker.get_calls("on_episode_start")
on_episode_end_calls = CallbackTracker.get_calls("on_episode_end")
on_sample_end_calls = CallbackTracker.get_calls("on_sample_end")
assert (
len(on_episode_created_calls)
== sum(e.is_done for e in episodes) + env_runner_with_callback.num_envs
)
assert (
len(on_episode_start_calls)
== sum(e.is_done for e in episodes) + env_runner_with_callback.num_envs
)
assert len(on_episode_end_calls) == sum(e.is_done for e in episodes)
assert len(on_sample_end_calls) == 1
assert on_sample_end_calls[0][NUM_EPISODES] == len(episodes)
@pytest.mark.parametrize("num_episodes", [1, 8])
def test_callbacks_on_sample_episodes(
self, env_runner_with_callback, ray_init, num_episodes
):
"""Test the callbacks for completed episodes.
When sampling by num_episodes, the runner skips creating/starting a new
episode after the final episode completes (since it would never be used).
So we expect exactly num_episodes created/started calls.
"""
episodes = env_runner_with_callback.sample(
num_episodes=num_episodes, random_actions=True
)
on_episode_created_calls = CallbackTracker.get_calls("on_episode_created")
on_episode_start_calls = CallbackTracker.get_calls("on_episode_start")
on_episode_end_calls = CallbackTracker.get_calls("on_episode_end")
on_sample_end_calls = CallbackTracker.get_calls("on_sample_end")
# When sampling by num_episodes, the runner skips creating a new episode
# after the final episode completes, so we expect exactly num_episodes calls
assert len(on_episode_created_calls) == num_episodes + 1
assert len(on_episode_start_calls) == num_episodes
assert len(on_episode_end_calls) == num_episodes == len(episodes)
assert len(on_sample_end_calls) == 1
assert on_sample_end_calls[0][NUM_EPISODES] == num_episodes
def test_callbacks_on_sample_rollout(self, env_runner_with_callback, ray_init):
"""Test the callbacks for sampling with default rollout fragment."""
episodes = env_runner_with_callback.sample(random_actions=True)
on_episode_created_calls = CallbackTracker.get_calls("on_episode_created")
on_episode_start_calls = CallbackTracker.get_calls("on_episode_start")
on_episode_end_calls = CallbackTracker.get_calls("on_episode_end")
on_sample_end_calls = CallbackTracker.get_calls("on_sample_end")
assert (
len(on_episode_created_calls)
== sum(e.is_done for e in episodes) + env_runner_with_callback.num_envs
)
assert (
len(on_episode_start_calls)
== sum(e.is_done for e in episodes) + env_runner_with_callback.num_envs
)
assert len(on_episode_end_calls) == sum(e.is_done for e in episodes)
assert len(on_sample_end_calls) == 1
assert on_sample_end_calls[0][NUM_EPISODES] == len(episodes)
@pytest.mark.parametrize("num_episodes", [1, 8])
def test_callbacks_multi_samples(
self, env_runner_with_callback, ray_init, num_episodes, repeats=3
):
"""Test callbacks across multiple sample() calls.
When sampling by num_episodes, the runner skips creating a new episode
after the final episode completes. Each sample() call independently
creates exactly num_episodes episodes.
"""
for repeat in range(repeats):
episodes = env_runner_with_callback.sample(
num_episodes=num_episodes, random_actions=True
)
assert len(episodes) == num_episodes
on_episode_created_calls = CallbackTracker.get_calls("on_episode_created")
on_episode_start_calls = CallbackTracker.get_calls("on_episode_start")
on_episode_end_calls = CallbackTracker.get_calls("on_episode_end")
on_sample_end_calls = CallbackTracker.get_calls("on_sample_end")
# Cumulative counts: each sample() creates num_episodes episodes
expected_created = (num_episodes + 1) * (repeat + 1)
expected_started = num_episodes * (repeat + 1)
expected_ended = num_episodes * (repeat + 1)
assert len(on_episode_created_calls) == expected_created
assert len(on_episode_start_calls) == expected_started
assert len(on_episode_end_calls) == expected_ended
assert len(on_sample_end_calls) == repeat + 1
assert on_sample_end_calls[-1][NUM_EPISODES] == num_episodes
class TestEnvRunnerConnectors:
"""Tests for connector invocations in both runner types.
Connectors are called in specific situations:
env_to_module connector:
- After environment reset (to process initial observations)
- After each environment step (to process observations for next action)
- For done episodes in MultiAgent (extra postprocessing call)
module_to_env connector:
- After RLModule forward pass (to process actions before sending to env)
- NOT called when using random_actions=True
"""
def test_env_to_module_called_on_reset(self, env_runner_with_env_to_module_tracker):
"""Test env_to_module connector is called during environment reset."""
env_runner = env_runner_with_env_to_module_tracker
records = EnvToModuleConnectorTracker.call_records
assert len(records) == 0
# Initial reset happens during construction, sample triggers it
env_runner.sample(num_timesteps=0, random_actions=True)
# Should have records for each vectorized env after reset
assert len(records) == env_runner.num_envs
# First records should be at timestep 0 (reset)
reset_records = [r for r in records if r["timestep"] == 0]
assert len(reset_records) == env_runner.num_envs
@pytest.mark.parametrize("num_timesteps", [8, 25, 50, 100])
def test_env_to_module_called_per_step(
self, env_runner_with_env_to_module_tracker, num_timesteps
):
"""Test env_to_module connector is called after each environment step."""
env_runner = env_runner_with_env_to_module_tracker
env_runner.sample(num_timesteps=num_timesteps, random_actions=True)
# Connector is called once per loop iteration, not once per timestep
# With vectorized envs, each iteration steps all envs in parallel
# So: 1 reset call + ceil(num_timesteps / num_envs) step calls
call_count = EnvToModuleConnectorTracker.call_count
min_expected_calls = 1 + math.ceil(num_timesteps / env_runner.num_envs)
assert call_count >= min_expected_calls
@pytest.mark.parametrize("num_timesteps", [8, 25, 50, 100])
def test_module_to_env_called_only_with_rl_module(
self, env_runner_with_module_to_env_tracker, num_timesteps
):
"""Test module_to_env connector is called only when RLModule is used.
Verifies:
1. module_to_env IS called when using random_actions=False (RLModule engaged)
2. module_to_env is NOT called when using random_actions=True (RLModule bypassed)
"""
env_runner = env_runner_with_module_to_env_tracker
# With random_actions=True, the RLModule is bypassed
env_runner.sample(num_timesteps=num_timesteps, random_actions=True)
assert ModuleToEnvConnectorTracker.call_count == 0
# Use random_actions=False to engage the RLModule and module_to_env
env_runner.sample(num_timesteps=num_timesteps, random_actions=False)
assert ModuleToEnvConnectorTracker.call_count >= num_timesteps
def test_connector_sample_options(self, env_runner_with_env_to_module_tracker):
"""Test connector behavior with various sample options.
This test verifies:
1. Episode IDs are consistent across sample calls (continuity)
2. force_reset triggers new reset calls and creates new episode IDs
3. The explore flag is correctly passed to connectors
"""
env_runner = env_runner_with_env_to_module_tracker
# Part 1: Test episode ID continuity across samples
episodes_1 = env_runner.sample(num_timesteps=3, random_actions=True)
episode_ids_1 = {e.id_ for e in episodes_1}
call_count_after_first = EnvToModuleConnectorTracker.call_count
# Sample more - should continue same episodes
env_runner.sample(num_timesteps=3, random_actions=True)
# Verify episode IDs appear in connector records
for ep_id in episode_ids_1:
ep_records = EnvToModuleConnectorTracker.get_records_for_episode(ep_id)
assert len(ep_records) >= 1
# Part 2: Test force_reset creates new episodes
env_runner.sample(num_timesteps=5, random_actions=True, force_reset=True)
call_count_after_reset = EnvToModuleConnectorTracker.call_count
assert call_count_after_reset > call_count_after_first
# Should have reset records from initial + force_reset
records = EnvToModuleConnectorTracker.call_records
reset_records = [r for r in records if r["timestep"] == 0]
assert len(reset_records) >= 2 * env_runner.num_envs
# Part 3: Test explore flag is passed correctly
# All records so far should have explore=True (default)
assert all(r["explore"] for r in records)
EnvToModuleConnectorTracker.reset()
# Sample with explore=False
env_runner.sample(num_timesteps=3, random_actions=True, explore=False)
records = EnvToModuleConnectorTracker.call_records
assert all(not r["explore"] for r in records)
def test_env_to_module_postprocess_done_episodes_multi_agent(
self, env_runner_with_env_to_module_tracker, runner_type
):
"""Test that MultiAgent runner calls env_to_module for done episode postprocessing.
This is specific to MultiAgentEnvRunner which has an extra connector call
for done episodes to postprocess artifacts like one-hot encoded observations.
"""
if runner_type != "multi_agent":
pytest.skip("Test only applicable to multi_agent runner")
env_runner = env_runner_with_env_to_module_tracker
num_episodes = 3
episodes = env_runner.sample(num_episodes=num_episodes, random_actions=True)
# With multiple envs, we may get more than num_episodes due to parallel completion
assert len(episodes) >= num_episodes
# Check that done episodes were recorded
done_records = EnvToModuleConnectorTracker.get_done_episode_records()
# Each done episode should have at least one record where is_done=True
assert len(done_records) >= num_episodes
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/env/tests/test_env_runner.py",
"license": "Apache License 2.0",
"lines": 910,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/arrow_utils.py | from __future__ import annotations
import itertools
from typing import List, Union
import numpy as np
import pyarrow as pa
import pyarrow.compute as pc
def _counts_to_offsets(counts: pa.Array) -> pa.Array:
"""Convert per-row counts to list offsets via cumulative sum."""
cumsum = pc.cumulative_sum(counts)
return pa.concat_arrays([pa.array([0], type=cumsum.type), cumsum])
def _combine_as_list_array(
column_values: List[Union[pa.Array, pa.ChunkedArray]] | None = None,
*,
offsets: pa.Array | None = None,
values: pa.Array | None = None,
is_large: bool = False,
null_mask: pa.Array | None = None,
) -> pa.Array:
"""Combine list arrays or build a list array from offsets and values."""
if column_values is None:
if offsets is None or values is None:
raise ValueError(
"Either column_values or both offsets and values must be provided."
)
else:
lens = [len(v) for v in column_values]
offsets_type = pa.int64() if is_large else pa.int32()
offsets = pa.array(np.concatenate([[0], np.cumsum(lens)]), type=offsets_type)
values = pa.concat_arrays(
itertools.chain(
*[
v.chunks if isinstance(v, pa.ChunkedArray) else [v]
for v in column_values
]
)
)
offsets_type = pa.int64() if is_large else pa.int32()
offsets = pc.cast(offsets, offsets_type)
array_cls = pa.LargeListArray if is_large else pa.ListArray
list_type = pa.large_list(values.type) if is_large else pa.list_(values.type)
return array_cls.from_arrays(offsets, values, list_type, mask=null_mask)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/arrow_utils.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/train/v2/_internal/execution/callback_manager.py | import logging
from ray.train.v2.api.exceptions import ControllerError
logger = logging.getLogger(__name__)
class CallbackManager:
def __init__(self, callbacks):
self._callbacks = callbacks
# change this return type later
def invoke(self, hook_name: str, *args, **context) -> None:
for callback in self._callbacks:
callback_name = type(callback).__name__
method = getattr(callback, hook_name, None)
if method is None or not callable(method):
raise ControllerError(
AttributeError(
f"Callback '{callback_name}' hook '{hook_name}' is missing "
"or not callable."
)
)
try:
method(*args, **context)
except Exception as e:
# TODO: Enable configuration to suppress exceptions.
logger.exception(
f"Exception raised in callback hook '{hook_name}' from callback "
f"'{callback_name}'."
)
raise ControllerError(e) from e
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/_internal/execution/callback_manager.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/train/v2/tests/test_callback_manager.py | import sys
from unittest.mock import MagicMock
import pytest
from ray.train.v2._internal.execution.callback import ControllerCallback
from ray.train.v2._internal.execution.callback_manager import CallbackManager
from ray.train.v2.api.exceptions import ControllerError
def test_invoke_callback_without_hook():
"""Test that callbacks without the hook method raise an error."""
class CallbackWithoutHook:
pass
manager = CallbackManager([CallbackWithoutHook()])
with pytest.raises(ControllerError) as exc_info:
manager.invoke("test_hook", "arg1")
assert isinstance(exc_info.value.controller_failure, AttributeError)
def test_invoke_multiple_callbacks_all_succeed():
"""Test that multiple callbacks are invoked in sequence."""
callback1 = MagicMock()
callback1.test_hook = MagicMock()
callback2 = MagicMock()
callback2.test_hook = MagicMock()
manager = CallbackManager([callback1, callback2])
result = manager.invoke("test_hook", "arg")
callback1.test_hook.assert_called_once_with("arg")
callback2.test_hook.assert_called_once_with("arg")
assert result is None
def test_invoke_with_real_controller_callback_error_returned():
"""Test with a real ControllerCallback implementation that raises an error."""
class MockControllerCallback(ControllerCallback):
def __init__(self):
self.called = False
def after_controller_start(self, train_run_context):
self.called = True
raise ValueError("Intentional error")
callback = MockControllerCallback()
manager = CallbackManager([callback])
train_run_context = MagicMock()
with pytest.raises(ControllerError) as exc_info:
manager.invoke("after_controller_start", train_run_context)
assert callback.called is True
assert isinstance(exc_info.value.controller_failure, ValueError)
def test_invoke_callback_error_returns_controller_error():
callback = MagicMock()
callback.test_hook = MagicMock(side_effect=ValueError("Original hook error"))
manager = CallbackManager([callback])
with pytest.raises(ControllerError) as exc_info:
manager.invoke("test_hook", "arg1", key1="value1")
assert isinstance(exc_info.value.controller_failure, ValueError)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-x", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/test_callback_manager.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:rllib/offline/tests/test_offline_policy_evaluation_runner.py | import unittest
from pathlib import Path
import gymnasium as gym
import ray
from ray.rllib.algorithms.bc.bc import BCConfig
from ray.rllib.core import ALL_MODULES, DEFAULT_MODULE_ID
from ray.rllib.offline.offline_policy_evaluation_runner import (
OfflinePolicyEvaluationRunner,
)
from ray.rllib.utils.metrics import (
DATASET_NUM_ITERS_EVALUATED,
DATASET_NUM_ITERS_EVALUATED_LIFETIME,
EPISODE_LEN_MAX,
EPISODE_LEN_MEAN,
EPISODE_LEN_MIN,
EVALUATION_RESULTS,
MODULE_SAMPLE_BATCH_SIZE_MEAN,
NUM_ENV_STEPS_SAMPLED,
NUM_ENV_STEPS_SAMPLED_LIFETIME,
NUM_MODULE_STEPS_SAMPLED,
NUM_MODULE_STEPS_SAMPLED_LIFETIME,
OFFLINE_EVAL_RUNNER_RESULTS,
WEIGHTS_SEQ_NO,
)
from ray.rllib.utils.typing import ResultDict
class TestOfflineEvaluationRunner(unittest.TestCase):
def setUp(self) -> None:
data_path = "offline/tests/data/cartpole/cartpole-v1_large"
self.base_path = Path(__file__).parents[2]
self.data_path = "local://" + self.base_path.joinpath(data_path).as_posix()
# Assign the observation and action spaces.
env = gym.make("CartPole-v1")
self.observation_space = env.observation_space
self.action_space = env.action_space
# Create a simple config.
self.config = (
BCConfig()
.environment(
observation_space=self.observation_space,
action_space=self.action_space,
)
.api_stack(
enable_env_runner_and_connector_v2=True,
enable_rl_module_and_learner=True,
)
.offline_data(
input_=[self.data_path],
dataset_num_iters_per_learner=1,
)
.learners(
num_learners=0,
)
.training(
train_batch_size_per_learner=256,
)
.evaluation(
offline_evaluation_interval=2,
offline_evaluation_type="is",
num_offline_eval_runners=0,
offline_eval_batch_size_per_runner=256,
)
)
def tearDown(self):
# Pull down Ray after each test.
ray.shutdown()
def test_offline_policy_evaluation_runner_setup(self):
"""Test the setup of the `OfflinePolicyEvaluationRunner`.
Checks that after instantiation, the runner has a valid config and
a `MultiRLModule`.
"""
# Create an `OfflinePolicyEvaluationRunner` instance.
offline_policy_eval_runner = OfflinePolicyEvaluationRunner(config=self.config)
# Ensure that the runner has a config.
self.assertIsInstance(offline_policy_eval_runner.config, BCConfig)
# Ensure that the runner has an `MultiRLModule`.
from ray.rllib.core.rl_module.multi_rl_module import MultiRLModule
self.assertIsInstance(offline_policy_eval_runner.module, MultiRLModule)
def test_offline_policy_evaluation_runner_dataset_iterator(self):
"""Test setting the dataset iterator in the `OfflinePolicyEvaluationRunner`.
Ensures that after setting the iterator, the internal `_dataset_iterator`
is not `None`.
"""
# Create an algorithm from the config.
algo = self.config.build()
# Create an `OfflinePolicyEvaluationRunner`.
offline_eval_runner = OfflinePolicyEvaluationRunner(config=self.config)
# Assign an iterator to the runner.
iterators = algo.offline_eval_runner_group._offline_data_iterators
offline_eval_runner.set_dataset_iterator(iterator=iterators[0])
# Ensure the dataset iterator is set.
self.assertIsNotNone(offline_eval_runner._dataset_iterator)
# Clean up.
algo.cleanup()
def test_offline_policy_evaluation_runner_run(self):
"""Test the `OfflinePolicyEvaluationRunner.run()` method.
Checks, that the correct number of env steps and dataset iterations
were sampled. Furthermore, ensures that the returned metrics dict has the
correct structure and types. Tests also that the internal `_batch_iterator`
was built correctly.
"""
# Build an algorithm.
algo = self.config.build()
# Build an `OfflinePolicyEvaluationRunner` instance.
offline_eval_runner = OfflinePolicyEvaluationRunner(config=self.config)
# Assign a data iterator to the runner.
iterators = algo.offline_eval_runner_group._offline_data_iterators
offline_eval_runner.set_dataset_iterator(iterator=iterators[0])
# Run the runner and receive metrics.
metrics = offline_eval_runner.run()
# Ensure that we received a dictionary.
self.assertIsInstance(metrics, ResultDict)
# Ensure that the metrics of the `default_policy` are also a dict.
self.assertIsInstance(metrics[DEFAULT_MODULE_ID], ResultDict)
# Make sure that the metric for the total eval loss is a `Stats` instance.
from ray.rllib.utils.metrics.stats import StatsBase
for key in metrics[DEFAULT_MODULE_ID]:
self.assertIsInstance(metrics[DEFAULT_MODULE_ID][key], StatsBase)
# Ensure that we sampled exactly the desired number of env steps.
self.assertEqual(
metrics[DEFAULT_MODULE_ID][MODULE_SAMPLE_BATCH_SIZE_MEAN].peek(),
self.config.offline_eval_batch_size_per_runner,
)
# Ensure that - in this case of 1-step episodes - the number of
# module steps sampled equals the number of env steps sampled.
for key in [DEFAULT_MODULE_ID, ALL_MODULES]:
self.assertEqual(
metrics[key][NUM_MODULE_STEPS_SAMPLED].peek(),
self.config.offline_eval_batch_size_per_runner
* self.config.dataset_num_iters_per_learner,
)
self.assertEqual(
metrics[key][NUM_MODULE_STEPS_SAMPLED_LIFETIME].peek(),
self.config.offline_eval_batch_size_per_runner
* self.config.dataset_num_iters_per_learner,
)
# Ensure that we sampled the correct number of env steps.
self.assertEqual(
metrics[key][NUM_ENV_STEPS_SAMPLED].peek(),
self.config.offline_eval_batch_size_per_runner
* self.config.dataset_num_iters_per_learner,
)
# Ensure that the lifetime env steps sampled equal the number of
# env steps sampled.
self.assertEqual(
metrics[key][NUM_ENV_STEPS_SAMPLED_LIFETIME].peek(),
self.config.offline_eval_batch_size_per_runner
* self.config.dataset_num_iters_per_learner,
)
# Make sure we also iterated only once over the dataset.
self.assertEqual(
metrics[ALL_MODULES][DATASET_NUM_ITERS_EVALUATED].peek(),
self.config.dataset_num_iters_per_learner,
)
self.assertEqual(
metrics[ALL_MODULES][DATASET_NUM_ITERS_EVALUATED_LIFETIME].peek(),
self.config.dataset_num_iters_per_learner,
)
# Since we have 1-step episodes, ensure that min, max, mean episode
# lengths are all equal to 1.
self.assertEqual(metrics[EPISODE_LEN_MIN].peek().item(), 1)
self.assertEqual(metrics[EPISODE_LEN_MAX].peek().item(), 1)
self.assertEqual(metrics[EPISODE_LEN_MEAN].peek().item(), 1)
# Ensure that the `_batch_iterator` instance was built. Note, this is
# built in the first call to `OfflineEvaluationRunner.run()`.
from ray.rllib.offline.offline_policy_evaluation_runner import (
MiniBatchEpisodeRayDataIterator,
)
self.assertIsInstance(
offline_eval_runner._batch_iterator, MiniBatchEpisodeRayDataIterator
)
# Clean up.
algo.cleanup()
def test_evaluation_in_algorithm_evaluate_offline(self):
"""Test using the algorithm's `evaluate_offline()` method.
Checks, that the correct number of env steps and dataset iterations
were sampled.
"""
# Build an algorithm.
algo = self.config.build()
# Get evaluation metrics.
eval_metrics = algo.evaluate_offline()
# Ensure that we received a dictionary.
self.assertIsInstance(eval_metrics, ResultDict)
# Ensure that the metrics of the `default_policy` are also a dict.
self.assertIsInstance(eval_metrics[OFFLINE_EVAL_RUNNER_RESULTS], ResultDict)
eval_metrics = eval_metrics[OFFLINE_EVAL_RUNNER_RESULTS]
# Ensure that we sampled exactly the desired number of env steps.
self.assertEqual(
eval_metrics[DEFAULT_MODULE_ID][MODULE_SAMPLE_BATCH_SIZE_MEAN],
self.config.offline_eval_batch_size_per_runner,
)
# Ensure that - in this case of 1-step episodes - the number of
# module steps sampled equals the number of env steps sampled.
for key in [DEFAULT_MODULE_ID, ALL_MODULES]:
self.assertEqual(
eval_metrics[key][NUM_MODULE_STEPS_SAMPLED],
self.config.offline_eval_batch_size_per_runner
* self.config.dataset_num_iters_per_learner,
)
self.assertEqual(
eval_metrics[key][NUM_MODULE_STEPS_SAMPLED_LIFETIME],
self.config.offline_eval_batch_size_per_runner
* self.config.dataset_num_iters_per_learner,
)
# Ensure that we sampled the correct number of env steps.
self.assertEqual(
eval_metrics[key][NUM_ENV_STEPS_SAMPLED],
self.config.offline_eval_batch_size_per_runner
* self.config.dataset_num_iters_per_learner,
)
# Ensure that the lifetime env steps sampled equal the number of
# env steps sampled.
self.assertEqual(
eval_metrics[key][NUM_ENV_STEPS_SAMPLED_LIFETIME],
self.config.offline_eval_batch_size_per_runner
* self.config.dataset_num_iters_per_learner,
)
# Make sure we also iterated only once over the dataset.
self.assertEqual(
eval_metrics[ALL_MODULES][DATASET_NUM_ITERS_EVALUATED],
self.config.dataset_num_iters_per_learner,
)
self.assertEqual(
eval_metrics[ALL_MODULES][DATASET_NUM_ITERS_EVALUATED_LIFETIME],
self.config.dataset_num_iters_per_learner,
)
# Clean up.
algo.cleanup()
def test_evaluation_in_algorithm_train(self):
"""Test using the algorithm's `train()` method with offline evaluation.
Checks, that the correct number of env steps and dataset iterations
were sampled. Furthermore, ensures that offline evaluation is run at the
correct interval.
"""
# Build an algorithm.
algo = self.config.build()
# Run a few training iterations.
results = []
for i in range(5):
results.append(algo.train())
# Ensure that we evaluated every 2 training iterations.
self.assertEqual(self.config.offline_evaluation_interval, 2)
self.assertIn(EVALUATION_RESULTS, results[1])
self.assertIn(EVALUATION_RESULTS, results[2])
self.assertIn(EVALUATION_RESULTS, results[3])
self.assertIn(EVALUATION_RESULTS, results[4])
# Also ensure we have no evaluation results in the first iteration.
self.assertNotIn(EVALUATION_RESULTS, results[0])
# Ensure that we did 2 iterations over the dataset in each evaluation.
# TODO (simon): Add a test for the `weights_seq_no`.
expected_weights_seq_no = 0
for eval_idx in [1, 2, 3, 4]:
# Evaluation ran at this iteration.
evaluation_ran = (
eval_idx + 1
) % self.config.offline_evaluation_interval == 0
# Get evaluation metrics.
eval_metrics = results[eval_idx][EVALUATION_RESULTS]
eval_metrics = eval_metrics[OFFLINE_EVAL_RUNNER_RESULTS]
# Ensure that we sampled exacxtly once from the dataset.
self.assertEqual(
eval_metrics[ALL_MODULES][DATASET_NUM_ITERS_EVALUATED],
self.config.dataset_num_iters_per_learner,
)
# Update expected weights seq no.
if evaluation_ran:
expected_weights_seq_no = eval_idx + 1
# Check weights seq no.
self.assertEqual(
eval_metrics[WEIGHTS_SEQ_NO],
expected_weights_seq_no,
)
# Check lifetime dataset iterations one iteration after actual evaluation.
if not evaluation_ran:
# NOTE: In the first evaluation iteration the lifetime metrics are correct
# right away, in the later evaluations they are only updated one iteration later,
# due to compilation in `_run_one_training_iteration`. See also the note below.
if eval_idx <= 2:
self.assertEqual(
eval_metrics[ALL_MODULES][DATASET_NUM_ITERS_EVALUATED_LIFETIME],
self.config.dataset_num_iters_per_learner,
)
else:
self.assertEqual(
eval_metrics[ALL_MODULES][DATASET_NUM_ITERS_EVALUATED_LIFETIME],
results[eval_idx - 1][EVALUATION_RESULTS][
OFFLINE_EVAL_RUNNER_RESULTS
][ALL_MODULES][DATASET_NUM_ITERS_EVALUATED_LIFETIME]
+ self.config.dataset_num_iters_per_learner,
)
# Get evaluation metrics from the last training iteration.
# NOTE: Evaluation ran at one iteration before, but lifetime metrics
# are updated only one iteration later. This is a known issue, but hard
# to fix without breaking existing code.
eval_metrics = results[4][EVALUATION_RESULTS]
# Ensure that we received a dictionary.
self.assertIsInstance(eval_metrics, ResultDict)
# Ensure that the metrics of the `default_policy` are also a dict.
self.assertIsInstance(eval_metrics[OFFLINE_EVAL_RUNNER_RESULTS], ResultDict)
eval_metrics = eval_metrics[OFFLINE_EVAL_RUNNER_RESULTS]
# Ensure that we sampled exactly the desired number of env steps.
self.assertEqual(
eval_metrics[DEFAULT_MODULE_ID][MODULE_SAMPLE_BATCH_SIZE_MEAN],
self.config.offline_eval_batch_size_per_runner,
)
# Ensure that - in this case of 1-step episodes - the number of
# module steps sampled equals the number of env steps sampled.
for key in [DEFAULT_MODULE_ID, ALL_MODULES]:
self.assertEqual(
eval_metrics[key][NUM_MODULE_STEPS_SAMPLED],
self.config.offline_eval_batch_size_per_runner
* self.config.dataset_num_iters_per_learner,
)
self.assertEqual(
eval_metrics[key][NUM_MODULE_STEPS_SAMPLED_LIFETIME],
self.config.offline_eval_batch_size_per_runner
* self.config.dataset_num_iters_per_learner
* self.config.offline_evaluation_interval,
)
# Ensure that we sampled the correct number of env steps.
self.assertEqual(
eval_metrics[key][NUM_ENV_STEPS_SAMPLED],
self.config.offline_eval_batch_size_per_runner
* self.config.dataset_num_iters_per_learner,
)
# Ensure that the lifetime env steps sampled equal the number of
# env steps sampled.
self.assertEqual(
eval_metrics[key][NUM_ENV_STEPS_SAMPLED_LIFETIME],
self.config.offline_eval_batch_size_per_runner
* self.config.dataset_num_iters_per_learner
* self.config.offline_evaluation_interval,
)
# Clean up.
algo.cleanup()
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/offline/tests/test_offline_policy_evaluation_runner.py",
"license": "Apache License 2.0",
"lines": 339,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/operator_schema_exporter.py | """Exporter API for Ray Data operator schema."""
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any, Dict, Optional
import ray
from ray._private.event.export_event_logger import (
EventLogType,
check_export_api_enabled,
get_export_event_logger,
)
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class OperatorSchema:
"""Represents a Ray Data operator schema
Attributes:
operator_uuid: The uuid of the operator.
schema_fields: The schema fields of the operator.
"""
operator_uuid: str
schema_fields: Dict[str, str] # Mapping from name to type
def operator_schema_to_proto(operator_schema: OperatorSchema) -> Any:
"""Convert the operator schema to a protobuf message.
Args:
operator_schema: OperatorSchema object containing the schema details
Returns:
The protobuf message representing the operator schema.
"""
from ray.core.generated.export_dataset_operator_schema_pb2 import (
ExportDatasetOperatorSchema as ProtoOperatorSchema,
)
# Create the protobuf message
proto_operator_schema = ProtoOperatorSchema(
operator_uuid=operator_schema.operator_uuid,
schema_fields=operator_schema.schema_fields,
)
return proto_operator_schema
def get_operator_schema_exporter() -> Optional["LoggerOperatorSchemaExporter"]:
"""Get the operator schema exporter instance.
Returns:
The operator schema exporter instance.
"""
return LoggerOperatorSchemaExporter.create_if_enabled()
class OperatorSchemaExporter(ABC):
"""Abstract base class for operator schema exporters.
Implementations of this interface can export Ray Data operator schema to various
destinations like log files, databases, or monitoring systems.
"""
@abstractmethod
def export_operator_schema(self, operator_schema: OperatorSchema) -> None:
"""Export operator schema to the destination.
Args:
operator_schema: OperatorSchema object containing operator schema details.
"""
pass
@classmethod
@abstractmethod
def create_if_enabled(cls) -> Optional["OperatorSchemaExporter"]:
"""Create a schema exporter instance if the export functionality is enabled.
Returns:
A schema exporter instance if enabled, none otherwise.
"""
pass
class LoggerOperatorSchemaExporter(OperatorSchemaExporter):
"""Operator schema exporter implementation that uses the Ray export event logger.
This exporter writes operator schema to log files using Ray's export event system.
"""
def __init__(self, logger: logging.Logger):
"""Initialize with a configured export event logger.
Args:
logger: The export event logger to use for writing events.
"""
self._export_logger = logger
def export_operator_schema(self, operator_schema: OperatorSchema) -> None:
"""Export operator schema using the export event logger.
Args:
operator_schema: OperatorSchema object containing operator event details.
"""
operator_schema_proto = operator_schema_to_proto(operator_schema)
self._export_logger.send_event(operator_schema_proto)
@classmethod
def create_if_enabled(cls) -> Optional["LoggerOperatorSchemaExporter"]:
"""Create a logger-based exporter if the export API is enabled.
Returns:
A LoggerOperatorSchemaExporter instance, none otherwise.
"""
from ray.core.generated.export_event_pb2 import ExportEvent
is_operator_schema_export_api_enabled = check_export_api_enabled(
ExportEvent.SourceType.EXPORT_DATASET_OPERATOR_SCHEMA
)
if not is_operator_schema_export_api_enabled:
# The export API is not enabled, so we shouldn't create an exporter
return None
log_directory = ray._private.worker._global_node.get_logs_dir_path()
try:
export_logger = get_export_event_logger(
EventLogType.DATASET_OPERATOR_SCHEMA,
log_directory,
)
return LoggerOperatorSchemaExporter(export_logger)
except Exception:
logger.exception(
"Unable to initialize the export event logger, so no operator export "
"schema will be written."
)
return None
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/operator_schema_exporter.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/data/tests/test_operator_schema_export.py | import json
import os
import sys
import pyarrow as pa
import pytest
import ray
from ray._private import ray_constants
from ray.data._internal.execution.operators.input_data_buffer import InputDataBuffer
from ray.data._internal.execution.streaming_executor import StreamingExecutor
from ray.data.context import DataContext
def _get_exported_data():
exported_file = os.path.join(
ray._private.worker._global_node.get_session_dir_path(),
"logs",
"export_events",
"event_EXPORT_DATASET_OPERATOR_SCHEMA.log",
)
assert os.path.isfile(exported_file)
with open(exported_file, "r") as f:
data = f.readlines()
return [json.loads(line) for line in data]
def test_export_operator_schema():
ray.init()
ray_constants.RAY_ENABLE_EXPORT_API_WRITE = True
ctx = DataContext.get_current()
op = InputDataBuffer(ctx, [])
executor = StreamingExecutor(ctx)
# Do not export schema if it's None
executor._export_operator_schema(op)
data = _get_exported_data()
assert len(data) == 0
# Export if it's a valid schema
schema = pa.schema([pa.field("id", pa.int32()), pa.field("name", pa.string())])
executor._op_schema[op] = schema
executor._export_operator_schema(op)
data = _get_exported_data()
assert len(data) == 1
assert data[0]["event_data"]["schema_fields"] == {"name": "string", "id": "int32"}
# Export updated schema of the same operator
schema2 = pa.schema(
[
pa.field("id", pa.int32()),
pa.field("name", pa.string()),
pa.field("age", pa.int32()),
]
)
executor._op_schema[op] = schema2
executor._export_operator_schema(op)
data = _get_exported_data()
assert len(data) == 2
assert (
data[0]["event_data"]["operator_uuid"] == data[1]["event_data"]["operator_uuid"]
)
assert data[0]["event_data"]["schema_fields"] == {"name": "string", "id": "int32"}
assert data[1]["event_data"]["schema_fields"] == {
"name": "string",
"id": "int32",
"age": "int32",
}
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_operator_schema_export.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/datasource/databricks_credentials.py | """Databricks credential providers for Ray Data.
This module provides credential abstraction for Databricks authentication,
supporting static tokens with extensibility for future credential sources.
"""
import logging
import os
from abc import ABC, abstractmethod
from typing import Callable, Optional
import requests
logger = logging.getLogger(__name__)
# Default environment variable names for Databricks credentials
DEFAULT_TOKEN_ENV_VAR = "DATABRICKS_TOKEN"
DEFAULT_HOST_ENV_VAR = "DATABRICKS_HOST"
class DatabricksCredentialProvider(ABC):
"""Abstract base class for Databricks credential providers.
This abstraction allows different credential sources (static tokens,
file-based credentials, etc.) to be used with DatabricksUCDatasource.
Subclasses must implement:
- get_token(): Returns the current authentication token
- get_host(): Returns the Databricks host URL (optional)
- invalidate(): Clears any cached credentials
"""
@abstractmethod
def get_token(self) -> str:
"""Get the current authentication token.
Returns:
The Databricks authentication token string.
Raises:
ValueError: If no valid token is available.
"""
pass
@abstractmethod
def get_host(self) -> str:
"""Get the Databricks host URL.
Returns:
The Databricks host URL.
Raises:
ValueError: If no valid host is available.
"""
pass
@abstractmethod
def invalidate(self) -> None:
"""Invalidate any cached credentials.
This method should be called when credentials need to be refreshed,
such as after an authentication error.
"""
pass
class StaticCredentialProvider(DatabricksCredentialProvider):
"""A credential provider that wraps static token and host.
This is the simplest credential provider, useful when you have a
token that doesn't need to be refreshed.
Args:
token: The Databricks authentication token.
host: The Databricks host URL.
Raises:
ValueError: If token or host is empty or None.
"""
def __init__(self, token: str, host: str):
if not token:
raise ValueError("Token cannot be empty or None")
if not host:
raise ValueError("Host cannot be empty or None")
self._token = token
self._host = host
def get_token(self) -> str:
"""Get the static token.
Returns:
The authentication token provided at construction.
"""
return self._token
def get_host(self) -> str:
"""Get the host URL.
Returns:
The host URL provided at construction.
"""
return self._host
def invalidate(self) -> None:
"""No-op for static credentials.
Static credentials cannot be refreshed, so this method
does nothing.
"""
pass
class EnvironmentCredentialProvider(DatabricksCredentialProvider):
"""A credential provider that reads from environment variables.
Reads token and host from environment variables.
If host env var is not set and running in Databricks runtime,
automatically detects the host.
Args:
token_env_var: Environment variable name for the token.
Defaults to DEFAULT_TOKEN_ENV_VAR ("DATABRICKS_TOKEN").
host_env_var: Environment variable name for the host.
Defaults to DEFAULT_HOST_ENV_VAR ("DATABRICKS_HOST").
Raises:
ValueError: If token or host cannot be resolved.
"""
def __init__(
self,
token_env_var: str = DEFAULT_TOKEN_ENV_VAR,
host_env_var: str = DEFAULT_HOST_ENV_VAR,
):
self._token_env_var = token_env_var
self._host_env_var = host_env_var
# Validate token is set at initialization
token = os.environ.get(self._token_env_var)
if not token:
raise ValueError(
f"Environment variable '{self._token_env_var}' is not set. "
"Please set it to your Databricks access token."
)
self._token = token
# Resolve host: env var > Databricks runtime detection
host = os.environ.get(self._host_env_var) or self._detect_databricks_host()
if not host:
raise ValueError(
"You are not in databricks runtime, please set environment variable "
f"'{self._host_env_var}' to databricks workspace URL "
'(e.g. "adb-<workspace-id>.<random-number>.azuredatabricks.net").'
)
self._host = host
def _detect_databricks_host(self) -> Optional[str]:
"""Detect host from Databricks runtime if available."""
try:
from ray.util.spark.utils import is_in_databricks_runtime
if is_in_databricks_runtime():
import IPython
ip_shell = IPython.get_ipython()
if ip_shell is not None:
dbutils = ip_shell.ns_table["user_global"]["dbutils"]
ctx = (
dbutils.notebook.entry_point.getDbutils()
.notebook()
.getContext()
)
return ctx.tags().get("browserHostName").get()
except Exception as e:
logger.warning(f"Failed to detect Databricks host from runtime: {e}")
return None
def get_token(self) -> str:
"""Get the token from environment variable.
Returns:
The authentication token from the environment.
"""
return self._token
def get_host(self) -> str:
"""Get the host from environment variable or Databricks runtime.
Returns:
The host URL.
"""
return self._host
def invalidate(self) -> None:
"""Re-read token from environment variable.
This allows picking up refreshed tokens when the environment
variable is updated (e.g., by an external token refresh process).
"""
token = os.environ.get(self._token_env_var)
if token:
self._token = token
def resolve_credential_provider(
credential_provider: Optional[DatabricksCredentialProvider] = None,
) -> DatabricksCredentialProvider:
"""Resolve credential provider.
Args:
credential_provider: An explicit credential provider instance.
If None, falls back to EnvironmentCredentialProvider.
Returns:
A DatabricksCredentialProvider instance.
"""
if credential_provider is not None:
return credential_provider
# Fall back to environment variables
return EnvironmentCredentialProvider()
def build_headers(
credential_provider: DatabricksCredentialProvider,
) -> dict[str, str]:
"""Build request headers with fresh token from credential provider.
Args:
credential_provider: The credential provider to get the token from.
Returns:
Dictionary containing Content-Type and Authorization headers.
"""
return {
"Content-Type": "application/json",
"Authorization": f"Bearer {credential_provider.get_token()}",
}
def request_with_401_retry(
request_fn: Callable[..., requests.Response],
url: str,
credential_provider: DatabricksCredentialProvider,
**kwargs,
) -> requests.Response:
"""Make an HTTP request with one retry on 401 after invalidating credentials.
Args:
request_fn: Request function (e.g., requests.get or requests.post)
url: Request URL
credential_provider: Credential provider for authentication
**kwargs: Additional arguments passed to requests
Returns:
Response object (after calling raise_for_status)
"""
response = request_fn(url, headers=build_headers(credential_provider), **kwargs)
if response.status_code == 401:
logger.info("Received 401 response, invalidating credentials and retrying.")
credential_provider.invalidate()
response = request_fn(url, headers=build_headers(credential_provider), **kwargs)
response.raise_for_status()
return response
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/datasource/databricks_credentials.py",
"license": "Apache License 2.0",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/data/tests/datasource/databricks_test_utils.py | """Test utilities for Databricks datasource tests."""
from dataclasses import dataclass, field
from typing import Optional
from ray.data._internal.datasource.databricks_credentials import (
DatabricksCredentialProvider,
)
@dataclass
class MockResponse:
"""Mock HTTP response for testing.
Args:
status_code: HTTP status code. Defaults to 200.
content: Response content as bytes. Defaults to None.
_json_data: JSON response data. Defaults to None.
raise_on_error: If True, raise_for_status() raises for status >= 400.
Defaults to True.
"""
status_code: int = 200
content: Optional[bytes] = None
_json_data: Optional[dict] = None
raise_on_error: bool = field(default=True, repr=False)
def raise_for_status(self):
"""Raise an exception if status code indicates an error."""
if self.raise_on_error and self.status_code >= 400:
raise Exception(f"HTTP Error {self.status_code}")
def json(self):
"""Return the JSON data."""
return self._json_data
class RefreshableCredentialProvider(DatabricksCredentialProvider):
"""A credential provider that simulates token refresh on invalidate.
Useful for testing 401 retry logic. When invalidate() is called,
the token changes from initial_token to "refreshed_token".
Args:
initial_token: The initial token value. Defaults to "expired_token".
host: The host URL to return. Defaults to "https://test-host.databricks.com".
"""
def __init__(
self,
initial_token: str = "expired_token",
host: str = "https://test-host.databricks.com",
):
self.current_token = initial_token
self.invalidate_count = 0
self._host = host
def get_token(self) -> str:
"""Get the current token."""
return self.current_token
def get_host(self) -> str:
"""Get the host URL."""
return self._host
def invalidate(self) -> None:
"""Simulate token refresh by changing to 'refreshed_token'."""
self.invalidate_count += 1
self.current_token = "refreshed_token"
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/datasource/databricks_test_utils.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/datasource/test_databricks_credentials.py | """Unit tests for Databricks credential providers."""
import os
from unittest import mock
import pytest
from ray.data._internal.datasource.databricks_credentials import (
DatabricksCredentialProvider,
EnvironmentCredentialProvider,
StaticCredentialProvider,
resolve_credential_provider,
)
class TestDatabricksCredentialProvider:
"""Tests for the abstract DatabricksCredentialProvider base class."""
def test_cannot_instantiate_abstract_class(self):
"""Verify DatabricksCredentialProvider cannot be instantiated directly."""
with pytest.raises(TypeError, match="Can't instantiate abstract class"):
DatabricksCredentialProvider()
def test_abstract_methods_defined(self):
"""Verify all abstract methods are defined."""
abstract_methods = DatabricksCredentialProvider.__abstractmethods__
assert "get_token" in abstract_methods
assert "get_host" in abstract_methods
assert "invalidate" in abstract_methods
class TestStaticCredentialProvider:
"""Tests for StaticCredentialProvider."""
def test_init_with_valid_token_and_host(self):
"""Test successful initialization with token and host."""
provider = StaticCredentialProvider(
token="test_token", host="https://my-workspace.cloud.databricks.com"
)
assert provider.get_token() == "test_token"
assert provider.get_host() == "https://my-workspace.cloud.databricks.com"
@pytest.mark.parametrize(
"token,host,expected_error",
[
("", "host", "Token cannot be empty"),
(None, "host", "Token cannot be empty"),
("valid_token", "", "Host cannot be empty"),
("valid_token", None, "Host cannot be empty"),
],
)
def test_init_with_invalid_inputs_raises_error(self, token, host, expected_error):
"""Test that invalid token or host raises ValueError."""
with pytest.raises(ValueError, match=expected_error):
StaticCredentialProvider(token=token, host=host)
def test_invalidate_is_noop(self):
"""Test that invalidate doesn't affect the static token."""
provider = StaticCredentialProvider(token="test_token", host="test_host")
provider.invalidate()
assert provider.get_token() == "test_token"
assert provider.get_host() == "test_host"
def test_get_token_returns_same_value(self):
"""Test that get_token always returns the same value."""
provider = StaticCredentialProvider(token="consistent_token", host="host")
assert provider.get_token() == "consistent_token"
assert provider.get_token() == "consistent_token"
class TestEnvironmentCredentialProvider:
"""Tests for EnvironmentCredentialProvider."""
def test_get_token_from_env(self):
"""Test get_token reads from environment variable."""
with mock.patch.dict(
os.environ, {"DATABRICKS_TOKEN": "env_token", "DATABRICKS_HOST": "host"}
):
provider = EnvironmentCredentialProvider()
assert provider.get_token() == "env_token"
def test_get_host_from_env(self):
"""Test get_host reads from environment variable."""
with mock.patch.dict(
os.environ, {"DATABRICKS_TOKEN": "token", "DATABRICKS_HOST": "env_host"}
):
provider = EnvironmentCredentialProvider()
assert provider.get_host() == "env_host"
@pytest.mark.parametrize(
"env_vars,expected_error",
[
({"DATABRICKS_HOST": "host"}, "DATABRICKS_TOKEN.*not set"),
(
{"DATABRICKS_TOKEN": "token"},
"set environment variable.*DATABRICKS_HOST",
),
],
)
def test_init_raises_when_env_var_not_set(self, env_vars, expected_error):
"""Test __init__ raises ValueError when required env var is not set."""
with mock.patch.dict(os.environ, env_vars, clear=True):
with pytest.raises(ValueError, match=expected_error):
EnvironmentCredentialProvider()
def test_host_detected_from_databricks_runtime(self):
"""Test host is detected from Databricks runtime when env var not set."""
with (
mock.patch.dict(os.environ, {"DATABRICKS_TOKEN": "token"}, clear=True),
mock.patch.object(
EnvironmentCredentialProvider,
"_detect_databricks_host",
return_value="detected-host.databricks.com",
),
):
provider = EnvironmentCredentialProvider()
assert provider.get_host() == "detected-host.databricks.com"
def test_custom_env_var_names(self):
"""Test using custom environment variable names."""
with mock.patch.dict(
os.environ, {"MY_TOKEN": "custom_token", "MY_HOST": "custom_host"}
):
provider = EnvironmentCredentialProvider(
token_env_var="MY_TOKEN", host_env_var="MY_HOST"
)
assert provider.get_token() == "custom_token"
assert provider.get_host() == "custom_host"
def test_invalidate_refreshes_token_from_env(self):
"""Test that invalidate re-reads token from environment."""
with mock.patch.dict(
os.environ, {"DATABRICKS_TOKEN": "initial_token", "DATABRICKS_HOST": "host"}
):
provider = EnvironmentCredentialProvider()
assert provider.get_token() == "initial_token"
# Simulate external token refresh
os.environ["DATABRICKS_TOKEN"] = "refreshed_token"
provider.invalidate()
assert provider.get_token() == "refreshed_token"
def test_invalidate_keeps_token_if_env_unset(self):
"""Test that invalidate keeps existing token if env var is unset."""
with mock.patch.dict(
os.environ, {"DATABRICKS_TOKEN": "initial_token", "DATABRICKS_HOST": "host"}
):
provider = EnvironmentCredentialProvider()
# Remove env var after initialization
del os.environ["DATABRICKS_TOKEN"]
provider.invalidate()
# Should keep the old token rather than failing
assert provider.get_token() == "initial_token"
class TestResolveCredentialProvider:
"""Tests for resolve_credential_provider function."""
def test_resolve_with_explicit_provider(self):
"""Test that explicit credential_provider is returned as-is."""
provider = StaticCredentialProvider(token="my_token", host="my_host")
result = resolve_credential_provider(credential_provider=provider)
assert result is provider
@pytest.mark.parametrize("credential_provider_arg", [None, "no_arg"])
def test_resolve_with_none_returns_environment_provider(
self, credential_provider_arg
):
"""Test that EnvironmentCredentialProvider is returned when none provided."""
with mock.patch.dict(
os.environ, {"DATABRICKS_TOKEN": "token", "DATABRICKS_HOST": "host"}
):
if credential_provider_arg == "no_arg":
result = resolve_credential_provider()
else:
result = resolve_credential_provider(
credential_provider=credential_provider_arg
)
assert isinstance(result, EnvironmentCredentialProvider)
class TestCredentialProviderSerialization:
"""Tests for credential provider serialization (needed for Ray workers)."""
@pytest.mark.parametrize(
"provider_type,expected_token,expected_host",
[
("static", "test_token", "test_host"),
("environment", "env_token", "env_host"),
],
)
def test_provider_is_picklable(self, provider_type, expected_token, expected_host):
"""Verify credential providers can be pickled and unpickled."""
import pickle
with mock.patch.dict(
os.environ,
{"DATABRICKS_TOKEN": expected_token, "DATABRICKS_HOST": expected_host},
):
if provider_type == "static":
provider = StaticCredentialProvider(
token=expected_token, host=expected_host
)
else:
provider = EnvironmentCredentialProvider()
pickled = pickle.dumps(provider)
unpickled = pickle.loads(pickled)
assert unpickled.get_token() == expected_token
assert unpickled.get_host() == expected_host
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/datasource/test_databricks_credentials.py",
"license": "Apache License 2.0",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/datasource/test_databricks_uc_datasource.py | """Tests for Databricks Unity Catalog datasource."""
import json
import os
import re
import tempfile
import uuid
from contextlib import contextmanager
from dataclasses import dataclass
from unittest import mock
import pandas as pd
import pyarrow as pa
import pytest
import ray
import ray.cloudpickle as pickle
from ray.data._internal.datasource.databricks_credentials import (
DatabricksCredentialProvider,
StaticCredentialProvider,
)
from ray.data._internal.datasource.databricks_uc_datasource import (
DatabricksUCDatasource,
)
from ray.data._internal.util import rows_same
from ray.data.tests.datasource.databricks_test_utils import (
MockResponse,
RefreshableCredentialProvider,
)
from ray.tests.conftest import * # noqa
# =============================================================================
# Dataclasses for mock objects
# =============================================================================
@dataclass
class MockChunk:
"""Mock chunk data for testing."""
index: int
row_count: int
byte_count: int
data: bytes
# =============================================================================
# Mock credential providers for testing
# =============================================================================
class TokenTrackingProvider(DatabricksCredentialProvider):
"""A credential provider that returns incrementing tokens to track fetches."""
def __init__(self):
self.token_fetch_count = 0
def get_token(self) -> str:
self.token_fetch_count += 1
return f"token_{self.token_fetch_count}"
def get_host(self) -> str:
return "test_host"
def invalidate(self) -> None:
pass
# =============================================================================
# Pytest fixtures
# =============================================================================
@pytest.fixture
def databricks_env():
"""Fixture that sets up Databricks environment variables."""
with mock.patch.dict(
os.environ,
{"DATABRICKS_HOST": "test_host", "DATABRICKS_TOKEN": "test_token"},
):
yield
@pytest.fixture
def refreshable_credential_provider():
"""Fixture that provides a refreshable credential provider."""
return RefreshableCredentialProvider(host="test_host")
@pytest.fixture
def token_tracking_provider():
"""Fixture that provides a token tracking credential provider."""
return TokenTrackingProvider()
@pytest.fixture
def requests_mocker():
"""Fixture that mocks requests.get and requests.post."""
with mock.patch("requests.get") as mock_get:
with mock.patch("requests.post") as mock_post:
yield {"get": mock_get, "post": mock_post}
@pytest.fixture
def test_data():
"""Fixture that provides test DataFrame and configuration."""
return {
"expected_df": pd.DataFrame(
{
"c1": range(10000),
"c2": [f"str{i}" for i in range(10000)],
}
),
"token": "test_token",
"warehouse_id": "test_warehouse_id",
"catalog": "catalog1",
"schema": "db1",
"query": "select * from table1",
"rows_per_chunk": 700,
}
# =============================================================================
# Helper functions
# =============================================================================
def create_mock_chunks(df: pd.DataFrame, rows_per_chunk: int) -> list[MockChunk]:
"""Create mock chunks from a DataFrame."""
chunks = []
num_rows = len(df)
cur_pos = 0
index = 0
while cur_pos < num_rows:
chunk_rows = min(rows_per_chunk, num_rows - cur_pos)
chunk_df = df[cur_pos : cur_pos + chunk_rows]
chunk_pa_table = pa.Table.from_pandas(chunk_df)
sink = pa.BufferOutputStream()
with pa.ipc.new_stream(sink, chunk_pa_table.schema) as writer:
writer.write_table(chunk_pa_table)
chunks.append(
MockChunk(
index=index,
row_count=chunk_rows,
byte_count=len(sink.getvalue()),
data=sink.getvalue(),
)
)
index += 1
cur_pos += rows_per_chunk
return chunks
# =============================================================================
# Test classes
# =============================================================================
class TestDatabricksUCDatasourceIntegration:
"""Integration tests for DatabricksUCDatasource."""
_MOCK_ENV_VAR = "RAY_DATABRICKS_UC_DATASOURCE_READ_FN_MOCK_TEST_SETUP_FN_PATH"
@contextmanager
def _setup_mock(self, test_data: dict, mock_chunks: list[MockChunk]):
"""Set up mocks for integration tests."""
chunk_meta_json = [
{
"chunk_index": chunk.index,
"row_count": chunk.row_count,
"byte_count": chunk.byte_count,
}
for chunk in mock_chunks
]
chunk_meta_json.reverse()
valid_statement_ids = set()
def request_post_mock(url, data=None, json=None, **kwargs):
import json as jsonlib
headers = kwargs["headers"]
if url == "https://test_shard/api/2.0/sql/statements/":
assert headers == {
"Content-Type": "application/json",
"Authorization": f"Bearer {test_data['token']}",
}
assert jsonlib.loads(data) == {
"statement": test_data["query"],
"warehouse_id": test_data["warehouse_id"],
"wait_timeout": "0s",
"disposition": "EXTERNAL_LINKS",
"format": "ARROW_STREAM",
"catalog": test_data["catalog"],
"schema": test_data["schema"],
}
statement_id = uuid.uuid4().hex
valid_statement_ids.add(statement_id)
return MockResponse(
status_code=200,
content=b"",
_json_data={
"statement_id": statement_id,
"status": {"state": "PENDING"},
},
)
assert False, "Invalid request."
def request_get_mock(url, params=None, **kwargs):
headers = kwargs["headers"]
if match := re.match(
r"^https://test_shard/api/2\.0/sql/statements/([^/]*)/$", url
):
statement_id = match.group(1)
assert headers == {
"Content-Type": "application/json",
"Authorization": f"Bearer {test_data['token']}",
}
assert statement_id in valid_statement_ids
return MockResponse(
status_code=200,
_json_data={
"status": {"state": "SUCCEEDED"},
"manifest": {
"truncated": False,
"chunks": chunk_meta_json,
},
},
)
if match := re.match(
r"^https://test_shard/api/2\.0/sql/"
r"statements/([^/]*)/result/chunks/([^/]*)$",
url,
):
assert headers == {
"Content-Type": "application/json",
"Authorization": f"Bearer {test_data['token']}",
}
chunk_index = match.group(2)
external_link = f"https://test_external_link/{chunk_index}"
return MockResponse(
status_code=200,
_json_data={"external_links": [{"external_link": external_link}]},
)
if match := re.match(r"^https://test_external_link/([^/]*)$", url):
assert headers is None
chunk_index = int(match.group(1))
return MockResponse(
status_code=200,
content=mock_chunks[chunk_index].data,
)
assert False, "Invalid request."
with (
mock.patch("requests.get", request_get_mock),
mock.patch("requests.post", request_post_mock),
mock.patch.dict(
os.environ,
{
"DATABRICKS_HOST": "test_shard",
"DATABRICKS_TOKEN": test_data["token"],
},
),
):
yield
@contextmanager
def _setup_integration_test(self, test_data: dict):
"""Set up complete integration test environment with mocks and Ray."""
mock_chunks = create_mock_chunks(
test_data["expected_df"], test_data["rows_per_chunk"]
)
setup_mock_fn_path = os.path.join(tempfile.mkdtemp(), "setup_mock_fn.pkl")
with open(setup_mock_fn_path, "wb") as fp:
pickle.dump(lambda: self._setup_mock(test_data, mock_chunks), fp)
with (
self._setup_mock(test_data, mock_chunks),
mock.patch.dict(os.environ, {self._MOCK_ENV_VAR: setup_mock_fn_path}),
):
ray.shutdown()
ray.init()
yield
def test_read_with_table_name(self, test_data):
"""Test reading data using table name."""
with self._setup_integration_test(test_data):
result = ray.data.read_databricks_tables(
warehouse_id=test_data["warehouse_id"],
table="table1",
catalog=test_data["catalog"],
schema=test_data["schema"],
override_num_blocks=5,
).to_pandas()
assert rows_same(result, test_data["expected_df"])
def test_read_with_sql_query(self, test_data):
"""Test reading data using SQL query."""
with self._setup_integration_test(test_data):
result = ray.data.read_databricks_tables(
warehouse_id=test_data["warehouse_id"],
query=test_data["query"],
catalog=test_data["catalog"],
schema=test_data["schema"],
override_num_blocks=5,
).to_pandas()
assert rows_same(result, test_data["expected_df"])
@pytest.mark.parametrize("num_blocks", [5, 100])
def test_read_with_different_parallelism(self, test_data, num_blocks):
"""Test reading data with different parallelism settings."""
with self._setup_integration_test(test_data):
result = ray.data.read_databricks_tables(
warehouse_id=test_data["warehouse_id"],
query=test_data["query"],
catalog=test_data["catalog"],
schema=test_data["schema"],
override_num_blocks=num_blocks,
).to_pandas()
assert rows_same(result, test_data["expected_df"])
class TestDatabricksUCDatasourceCredentials:
"""Tests for credential provider handling."""
def test_schema_name_does_not_shadow_datasource_fields(self, requests_mocker):
"""Test that schema name is stored without using the `schema` attribute.
This is a regression test for https://github.com/ray-project/ray/issues/46481.
"""
requests_mocker["post"].return_value = mock.Mock(
status_code=200,
raise_for_status=lambda: None,
json=lambda: {"statement_id": "test_stmt", "status": {"state": "PENDING"}},
)
requests_mocker["get"].return_value = mock.Mock(
status_code=200,
raise_for_status=lambda: None,
json=lambda: {
"status": {"state": "SUCCEEDED"},
"manifest": {"truncated": False, "chunks": []},
},
)
provider = StaticCredentialProvider(token="my_provider_token", host="test_host")
datasource = DatabricksUCDatasource(
warehouse_id="test_warehouse",
catalog="test_catalog",
schema="test_schema",
query="SELECT 1",
credential_provider=provider,
)
assert datasource.schema_name == "test_schema"
assert "schema" not in datasource.__dict__
call_kwargs = requests_mocker["post"].call_args[1]
payload = json.loads(call_kwargs["data"])
assert payload["schema"] == "test_schema"
def test_with_credential_provider(self, requests_mocker):
"""Test DatabricksUCDatasource with credential_provider parameter."""
requests_mocker["post"].return_value = mock.Mock(
status_code=200,
raise_for_status=lambda: None,
json=lambda: {"statement_id": "test_stmt", "status": {"state": "PENDING"}},
)
requests_mocker["get"].return_value = mock.Mock(
status_code=200,
raise_for_status=lambda: None,
json=lambda: {
"status": {"state": "SUCCEEDED"},
"manifest": {"truncated": False},
},
)
provider = StaticCredentialProvider(token="my_provider_token", host="test_host")
_datasource = DatabricksUCDatasource(
warehouse_id="test_warehouse",
catalog="test_catalog",
schema="test_schema",
query="SELECT 1",
credential_provider=provider,
)
# Verify the token from provider was used in requests
call_kwargs = requests_mocker["post"].call_args[1]
assert "Authorization" in call_kwargs["headers"]
assert "Bearer my_provider_token" in call_kwargs["headers"]["Authorization"]
def test_fresh_token_per_request(self, requests_mocker, token_tracking_provider):
"""Test that fresh tokens are fetched for each request during polling."""
tokens_used = []
def capture_post(url, *args, **kwargs):
tokens_used.append(kwargs["headers"]["Authorization"])
return mock.Mock(
status_code=200,
raise_for_status=lambda: None,
json=lambda: {
"statement_id": "test_stmt",
"status": {"state": "PENDING"},
},
)
poll_count = [0]
def capture_get(url, *args, **kwargs):
tokens_used.append(kwargs["headers"]["Authorization"])
poll_count[0] += 1
state = "PENDING" if poll_count[0] < 3 else "SUCCEEDED"
return mock.Mock(
status_code=200,
raise_for_status=lambda: None,
json=lambda: {
"status": {"state": state},
"manifest": {"truncated": False, "chunks": []},
},
)
requests_mocker["post"].side_effect = capture_post
requests_mocker["get"].side_effect = capture_get
DatabricksUCDatasource(
warehouse_id="test_warehouse",
catalog="test_catalog",
schema="test_schema",
query="SELECT 1",
credential_provider=token_tracking_provider,
)
# Verify fresh token was fetched for each request:
# 1 POST (statement creation) + 3 GETs (polling)
assert token_tracking_provider.token_fetch_count == 4
assert tokens_used == [
"Bearer token_1", # POST
"Bearer token_2", # GET poll 1
"Bearer token_3", # GET poll 2
"Bearer token_4", # GET poll 3
]
class TestDatabricksUCDatasource401Retry:
"""Tests for 401 retry behavior."""
def test_401_during_initial_post(
self, requests_mocker, refreshable_credential_provider
):
"""Test that 401 during initial POST triggers credential invalidation and retry."""
post_call_count = [0]
post_headers_captured = []
def post_side_effect(url, *args, **kwargs):
post_call_count[0] += 1
headers = kwargs.get("headers", {})
post_headers_captured.append(headers.get("Authorization", ""))
# First POST returns 401
if post_call_count[0] == 1:
return mock.Mock(status_code=401)
# Retry succeeds
return mock.Mock(
status_code=200,
json=lambda: {
"statement_id": "test_stmt",
"status": {"state": "SUCCEEDED"},
"manifest": {"truncated": False, "chunks": []},
},
)
requests_mocker["post"].side_effect = post_side_effect
DatabricksUCDatasource(
warehouse_id="test_warehouse",
catalog="test_catalog",
schema="test_schema",
query="SELECT 1",
credential_provider=refreshable_credential_provider,
)
# Verify retry occurred
assert (
post_call_count[0] == 2
), "Expected POST to be called twice (initial + retry)"
# Verify invalidate was called
assert refreshable_credential_provider.invalidate_count == 1
# Verify first request used expired token, retry used refreshed token
assert "expired_token" in post_headers_captured[0]
assert "refreshed_token" in post_headers_captured[1]
def test_401_during_polling(self, requests_mocker, refreshable_credential_provider):
"""Test that 401 during polling triggers credential invalidation and retry."""
poll_call_count = [0]
poll_headers_captured = []
requests_mocker["post"].return_value = mock.Mock(
status_code=200,
json=lambda: {
"statement_id": "test_stmt",
"status": {"state": "PENDING"},
},
)
def get_side_effect(url, *args, **kwargs):
poll_call_count[0] += 1
headers = kwargs.get("headers", {})
poll_headers_captured.append(headers.get("Authorization", ""))
# First poll returns 401 with expired token
if poll_call_count[0] == 1:
return mock.Mock(status_code=401)
# Retry succeeds
return mock.Mock(
status_code=200,
json=lambda: {
"status": {"state": "SUCCEEDED"},
"manifest": {"truncated": False, "chunks": []},
},
)
requests_mocker["get"].side_effect = get_side_effect
DatabricksUCDatasource(
warehouse_id="test_warehouse",
catalog="test_catalog",
schema="test_schema",
query="SELECT 1",
credential_provider=refreshable_credential_provider,
)
# Verify retry occurred
assert (
poll_call_count[0] == 2
), "Expected GET to be called twice (initial + retry)"
# Verify invalidate was called once
assert refreshable_credential_provider.invalidate_count == 1
# Verify first request used expired token, retry used refreshed token
assert "expired_token" in poll_headers_captured[0]
assert "refreshed_token" in poll_headers_captured[1]
def test_401_during_chunk_fetch(
self, requests_mocker, refreshable_credential_provider
):
"""Test that 401 during chunk fetch triggers credential invalidation and retry."""
chunk_fetch_count = [0]
chunk_fetch_headers = []
# Create Arrow data for external URL response
table = pa.Table.from_pydict({"col1": [1, 2, 3]})
sink = pa.BufferOutputStream()
with pa.ipc.new_stream(sink, table.schema) as writer:
writer.write_table(table)
arrow_data = sink.getvalue().to_pybytes()
# POST for statement creation succeeds
requests_mocker["post"].return_value = mock.Mock(
status_code=200,
json=lambda: {
"statement_id": "test_stmt",
"status": {"state": "SUCCEEDED"},
"manifest": {
"truncated": False,
"chunks": [{"chunk_index": 0, "row_count": 10, "byte_count": 100}],
},
},
)
def get_side_effect(url, *args, **kwargs):
headers = kwargs.get("headers", {})
# External URL fetch (no auth headers)
if url.startswith("https://external/"):
return mock.Mock(status_code=200, content=arrow_data)
if "/result/chunks/" in url:
chunk_fetch_count[0] += 1
chunk_fetch_headers.append(headers.get("Authorization", ""))
# First chunk fetch returns 401
if chunk_fetch_count[0] == 1:
return mock.Mock(status_code=401)
# Retry succeeds
return mock.Mock(
status_code=200,
json=lambda: {
"external_links": [{"external_link": "https://external/data"}]
},
)
else:
# Polling response (already succeeded in POST)
return mock.Mock(
status_code=200,
json=lambda: {
"status": {"state": "SUCCEEDED"},
"manifest": {
"truncated": False,
"chunks": [
{"chunk_index": 0, "row_count": 10, "byte_count": 100}
],
},
},
)
requests_mocker["get"].side_effect = get_side_effect
# Create datasource
datasource = DatabricksUCDatasource(
warehouse_id="test_warehouse",
catalog="test_catalog",
schema="test_schema",
query="SELECT 1",
credential_provider=refreshable_credential_provider,
)
# Get read tasks and execute the read function to trigger chunk fetch
read_tasks = datasource.get_read_tasks(parallelism=1)
assert len(read_tasks) == 1
# Execute the read function - this triggers chunk fetch
read_fn = read_tasks[0].read_fn
results = list(read_fn())
# Verify chunk fetch retry occurred
assert (
chunk_fetch_count[0] == 2
), "Expected chunk fetch to be called twice (initial + retry)"
# Verify invalidate was called during chunk fetch
assert refreshable_credential_provider.invalidate_count == 1
# Verify first chunk fetch used expired token, retry used refreshed token
assert "expired_token" in chunk_fetch_headers[0]
assert "refreshed_token" in chunk_fetch_headers[1]
# Verify we got results
assert len(results) == 1
class TestDatabricksUCDatasourceEmptyResult:
"""Tests for empty result handling."""
def test_empty_result_returns_zero_count(self, requests_mocker, databricks_env):
"""Test that empty result returns zero count."""
def post_mock(url, *args, **kwargs):
return MockResponse(
status_code=200,
_json_data={
"statement_id": "test_stmt",
"status": {"state": "PENDING"},
},
)
def get_mock(url, *args, **kwargs):
return MockResponse(
status_code=200,
_json_data={
"status": {"state": "SUCCEEDED"},
"manifest": {"truncated": False},
},
)
requests_mocker["post"].side_effect = post_mock
requests_mocker["get"].side_effect = get_mock
ds = ray.data.read_databricks_tables(
warehouse_id="dummy_warehouse",
query="select * from dummy_table",
catalog="dummy_catalog",
schema="dummy_schema",
override_num_blocks=1,
)
assert ds.count() == 0
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/datasource/test_databricks_uc_datasource.py",
"license": "Apache License 2.0",
"lines": 575,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/datasource/test_uc_datasource.py | """Tests for Unity Catalog datasource (uc_datasource.py)."""
from unittest import mock
import pytest
from ray.data._internal.datasource.databricks_credentials import (
StaticCredentialProvider,
build_headers,
request_with_401_retry,
)
from ray.data._internal.datasource.uc_datasource import (
UnityCatalogConnector,
)
from ray.data.tests.datasource.databricks_test_utils import (
MockResponse,
RefreshableCredentialProvider,
)
# =============================================================================
# Pytest fixtures
# =============================================================================
@pytest.fixture
def static_credential_provider():
"""Fixture that provides a static credential provider."""
return StaticCredentialProvider(
token="test_token", host="https://test-host.databricks.com"
)
@pytest.fixture
def refreshable_credential_provider():
"""Fixture that provides a refreshable credential provider."""
return RefreshableCredentialProvider()
@pytest.fixture
def requests_mocker():
"""Fixture that mocks requests.get and requests.post."""
with mock.patch("requests.get") as mock_get:
with mock.patch("requests.post") as mock_post:
yield {"get": mock_get, "post": mock_post}
# =============================================================================
# Test classes
# =============================================================================
class TestBuildHeaders:
"""Tests for build_headers function."""
def test_builds_correct_headers(self, static_credential_provider):
"""Test that headers contain correct token and content type."""
headers = build_headers(static_credential_provider)
assert headers["Content-Type"] == "application/json"
assert headers["Authorization"] == "Bearer test_token"
def test_fetches_fresh_token(self, refreshable_credential_provider):
"""Test that token is fetched fresh each time."""
headers1 = build_headers(refreshable_credential_provider)
assert "expired_token" in headers1["Authorization"]
refreshable_credential_provider.invalidate()
headers2 = build_headers(refreshable_credential_provider)
assert "refreshed_token" in headers2["Authorization"]
class TestRequestWith401Retry:
"""Tests for request_with_401_retry function."""
def test_successful_request_no_retry(self, static_credential_provider):
"""Test that successful request doesn't trigger retry."""
mock_request = mock.Mock(return_value=MockResponse(status_code=200))
response = request_with_401_retry(
mock_request,
"https://test-url.com",
static_credential_provider,
)
assert response.status_code == 200
assert mock_request.call_count == 1
def test_401_triggers_invalidate_and_retry(self, refreshable_credential_provider):
"""Test that 401 response triggers credential invalidation and retry."""
call_count = [0]
headers_captured = []
def mock_request(url, headers=None, **kwargs):
call_count[0] += 1
headers_captured.append(headers.get("Authorization", ""))
if call_count[0] == 1:
return MockResponse(status_code=401)
return MockResponse(status_code=200)
response = request_with_401_retry(
mock_request,
"https://test-url.com",
refreshable_credential_provider,
)
assert response.status_code == 200
assert call_count[0] == 2
assert refreshable_credential_provider.invalidate_count == 1
assert "expired_token" in headers_captured[0]
assert "refreshed_token" in headers_captured[1]
def test_non_401_error_raises(self, static_credential_provider):
"""Test that non-401 errors are raised without retry."""
mock_request = mock.Mock(return_value=MockResponse(status_code=500))
with pytest.raises(Exception, match="HTTP Error 500"):
request_with_401_retry(
mock_request,
"https://test-url.com",
static_credential_provider,
)
assert mock_request.call_count == 1
class TestUnityCatalogConnectorInit:
"""Tests for UnityCatalogConnector initialization."""
def test_init_with_credential_provider(self, static_credential_provider):
"""Test initialization with credential provider."""
connector = UnityCatalogConnector(
table_full_name="catalog.schema.table",
credential_provider=static_credential_provider,
)
assert connector.base_url == "https://test-host.databricks.com"
assert connector.table_full_name == "catalog.schema.table"
@pytest.mark.parametrize(
"input_host,expected_url",
[
("test-host.databricks.com", "https://test-host.databricks.com"),
("https://test-host.databricks.com/", "https://test-host.databricks.com"),
("http://test-host.databricks.com", "http://test-host.databricks.com"),
],
ids=["adds_https", "strips_trailing_slash", "preserves_http"],
)
def test_init_normalizes_host_url(self, input_host, expected_url):
"""Test that host URL is normalized correctly."""
provider = StaticCredentialProvider(token="token", host=input_host)
connector = UnityCatalogConnector(
table_full_name="catalog.schema.table",
credential_provider=provider,
)
assert connector.base_url == expected_url
class TestUnityCatalogConnector401Retry:
"""Tests for 401 retry behavior in UnityCatalogConnector."""
def test_401_during_get_table_info(
self, requests_mocker, refreshable_credential_provider
):
"""Test that 401 during _get_table_info triggers retry."""
call_count = [0]
headers_captured = []
def get_side_effect(url, headers=None, **kwargs):
call_count[0] += 1
headers_captured.append(headers.get("Authorization", ""))
if call_count[0] == 1:
return MockResponse(status_code=401)
return MockResponse(
status_code=200,
_json_data={"table_id": "test_table_id", "name": "table"},
)
requests_mocker["get"].side_effect = get_side_effect
connector = UnityCatalogConnector(
table_full_name="catalog.schema.table",
credential_provider=refreshable_credential_provider,
)
result = connector._get_table_info()
assert result["table_id"] == "test_table_id"
assert call_count[0] == 2
assert refreshable_credential_provider.invalidate_count == 1
assert "expired_token" in headers_captured[0]
assert "refreshed_token" in headers_captured[1]
def test_401_during_get_creds(
self, requests_mocker, refreshable_credential_provider
):
"""Test that 401 during _get_creds triggers retry."""
# First set up table info
requests_mocker["get"].return_value = MockResponse(
status_code=200,
_json_data={"table_id": "test_table_id", "name": "table"},
)
connector = UnityCatalogConnector(
table_full_name="catalog.schema.table",
credential_provider=refreshable_credential_provider,
)
connector._get_table_info()
# Reset for _get_creds test
refreshable_credential_provider.invalidate_count = 0
refreshable_credential_provider.current_token = "expired_token"
post_call_count = [0]
post_headers_captured = []
def post_side_effect(url, headers=None, **kwargs):
post_call_count[0] += 1
post_headers_captured.append(headers.get("Authorization", ""))
if post_call_count[0] == 1:
return MockResponse(status_code=401)
return MockResponse(
status_code=200,
_json_data={"url": "s3://bucket/path"},
)
requests_mocker["post"].side_effect = post_side_effect
connector._get_creds()
assert connector._table_url == "s3://bucket/path"
assert post_call_count[0] == 2
assert refreshable_credential_provider.invalidate_count == 1
assert "expired_token" in post_headers_captured[0]
assert "refreshed_token" in post_headers_captured[1]
class TestReadUnityCatalogAPI:
"""Tests for read_unity_catalog API function."""
@pytest.mark.parametrize(
"credential_provider, url, token",
[
(
StaticCredentialProvider(
token="my_token", host="https://my-host.databricks.com"
),
None,
None,
),
(None, "https://my-host.databricks.com", "my_token"),
],
ids=["with_credential_provider", "with_url_and_token"],
)
def test_successful_read_with_valid_credentials(
self, requests_mocker, credential_provider, url, token
):
"""Test read_unity_catalog succeeds with valid credentials."""
import ray.data
with mock.patch.object(
UnityCatalogConnector, "read", return_value=mock.Mock()
) as mock_read:
ray.data.read_unity_catalog(
table="catalog.schema.table",
credential_provider=credential_provider,
url=url,
token=token,
)
mock_read.assert_called_once()
@pytest.mark.parametrize(
"url,token",
[
(None, None),
("https://my-host.databricks.com", None),
(None, "my_token"),
],
ids=["no_credentials", "only_url", "only_token"],
)
def test_raises_with_incomplete_credentials(self, url, token):
"""Test that read_unity_catalog raises when credentials are incomplete."""
import ray.data
with pytest.raises(ValueError, match="Either 'credential_provider' or both"):
ray.data.read_unity_catalog(
table="catalog.schema.table",
url=url,
token=token,
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/datasource/test_uc_datasource.py",
"license": "Apache License 2.0",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/_private/direct_ingress_http_util.py | import asyncio
import logging
from starlette.types import Message, Receive, Scope
from ray.serve._private.constants import (
SERVE_LOGGER_NAME,
)
logger = logging.getLogger(SERVE_LOGGER_NAME)
class ASGIDIReceiveProxy:
"""Proxies ASGI receive from an actor.
The `receive_asgi_messages` callback will be called repeatedly to fetch messages
until a disconnect message is received.
"""
def __init__(
self,
scope: Scope,
receive: Receive,
user_event_loop: asyncio.AbstractEventLoop,
):
self._type = scope["type"] # Either 'http' or 'websocket'.
# Lazy init the queue to ensure it is created in the user code event loop.
self._queue = None
self._receive = receive
self._user_event_loop = user_event_loop
self._disconnect_message = None
def _get_default_disconnect_message(self) -> Message:
"""Return the appropriate disconnect message based on the connection type.
HTTP ASGI spec:
https://asgi.readthedocs.io/en/latest/specs/www.html#disconnect-receive-event
WS ASGI spec:
https://asgi.readthedocs.io/en/latest/specs/www.html#disconnect-receive-event-ws
"""
if self._type == "websocket":
return {
"type": "websocket.disconnect",
# 1005 is the default disconnect code according to the ASGI spec.
"code": 1005,
}
else:
return {"type": "http.disconnect"}
@property
def queue(self) -> asyncio.Queue:
if self._queue is None:
self._queue = asyncio.Queue()
return self._queue
def put_message(self, msg: Message):
self.queue.put_nowait(msg)
def close_queue(self):
self.queue.close()
def fetch_until_disconnect_task(self) -> asyncio.Task:
return asyncio.create_task(self._fetch_until_disconnect())
async def _fetch_until_disconnect(self):
"""Fetch messages repeatedly until a disconnect message is received.
If a disconnect message is received, this function exits and returns it.
If an exception occurs, it will be raised on the next __call__ and no more
messages will be received.
Note that this is meant to be called in the system event loop.
"""
while True:
msg = await self._receive()
if asyncio.get_running_loop() == self._user_event_loop:
await self.queue.put(msg)
else:
self._user_event_loop.call_soon_threadsafe(self.put_message, msg)
if msg["type"] == "http.disconnect":
self._disconnect_message = msg
return None
if msg["type"] == "websocket.disconnect":
self._disconnect_message = msg
return msg["code"]
async def __call__(self) -> Message:
"""Return the next message once available.
This will repeatedly return a disconnect message once it's been received.
"""
if self.queue.empty() and self._disconnect_message is not None:
return self._disconnect_message
message = await self.queue.get()
if isinstance(message, Exception):
raise message
return message
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/_private/direct_ingress_http_util.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/serve/_private/node_port_manager.py | import heapq
import logging
from typing import Dict, List, Optional, Set, Tuple
from ray.serve._private.common import RequestProtocol
from ray.serve._private.constants import (
RAY_SERVE_DIRECT_INGRESS_MAX_GRPC_PORT,
RAY_SERVE_DIRECT_INGRESS_MAX_HTTP_PORT,
RAY_SERVE_DIRECT_INGRESS_MIN_GRPC_PORT,
RAY_SERVE_DIRECT_INGRESS_MIN_HTTP_PORT,
SERVE_LOGGER_NAME,
)
logger = logging.getLogger(SERVE_LOGGER_NAME)
class NoAvailablePortError(Exception):
def __init__(self, protocol: str, node_id: str):
message = f"No available ports on node {node_id} for {protocol} protocol."
super().__init__(message)
class PortAllocator:
"""Manages a pool of ports for a specific protocol (e.g., HTTP or gRPC)."""
def __init__(self, min_port: int, max_port: int, protocol: str, node_id: str):
self._protocol = protocol
self._node_id = node_id
# TODO(abrar): add a validation here to ensure min_port and max_port dont overlap with
# ray params min_worker_port and max_worker_port.
self._available_ports = list(range(min_port, max_port))
heapq.heapify(self._available_ports)
self._allocated_ports: Dict[str, int] = {}
self._blocked_ports: Set[int] = set()
def update_port_if_missing(self, replica_id: str, port: Optional[int]):
"""Update port value for a replica."""
if replica_id in self._allocated_ports:
return
assert (
port is not None
), f"Port is None for {self._protocol} protocol on replica {replica_id} on node {self._node_id}"
if self._protocol == RequestProtocol.HTTP:
if not (
RAY_SERVE_DIRECT_INGRESS_MIN_HTTP_PORT
<= port
<= RAY_SERVE_DIRECT_INGRESS_MAX_HTTP_PORT
):
logger.warning(f"HTTP port out of range: {port}")
elif self._protocol == RequestProtocol.GRPC:
if not (
RAY_SERVE_DIRECT_INGRESS_MIN_GRPC_PORT
<= port
<= RAY_SERVE_DIRECT_INGRESS_MAX_GRPC_PORT
):
logger.warning(f"GRPC port out of range: {port}")
self._allocated_ports[replica_id] = port
logger.info(
f"Recovered {self._protocol} port {port} for replica {replica_id} on node {self._node_id}"
)
return port
def allocate(self, replica_id: str) -> int:
if replica_id in self._allocated_ports:
logger.warning(
f"{self._protocol} port already allocated for replica {replica_id}"
)
return self._allocated_ports[replica_id]
while self._available_ports:
port = heapq.heappop(self._available_ports)
if port not in self._blocked_ports:
self._allocated_ports[replica_id] = port
logger.info(
f"Allocated {self._protocol} port {port} to replica {replica_id} on node {self._node_id}"
)
return port
raise NoAvailablePortError(self._protocol, self._node_id)
def release(self, replica_id: str, port: int, block_port: bool = False):
"""
Releases a port for a replica.
Args:
replica_id: The ID of the replica to release the port for.
port: The port to release.
block_port: Whether to block the port from being allocated again. Use this in
situations where the port is being released due some other process is using it.
"""
if replica_id not in self._allocated_ports:
raise ValueError(
f"{self._protocol} port not allocated for replica {replica_id} on node {self._node_id}"
)
expected_port = self._allocated_ports[replica_id]
assert expected_port == port, (
f"{self._protocol} port mismatch for replica {replica_id} on node {self._node_id}: "
f"expected {expected_port}, got {port}"
)
heapq.heappush(self._available_ports, port)
del self._allocated_ports[replica_id]
logger.info(
f"Released {self._protocol} port {port} for replica {replica_id} on node {self._node_id}"
)
if block_port:
self._blocked_ports.add(port)
logger.info(f"Blocked {self._protocol} port {port} on node {self._node_id}")
def prune(self, active_replica_ids: Set[str]):
for replica_id in list(self._allocated_ports.keys()):
if replica_id not in active_replica_ids:
port = self._allocated_ports[replica_id]
logger.info(
f"Cleaning up {self._protocol} port {port} for stale replica {replica_id} on node {self._node_id}"
)
self.release(replica_id, port)
def get_port(self, replica_id: str) -> int:
if replica_id not in self._allocated_ports:
raise ValueError(
f"{self._protocol} port not allocated for replica {replica_id} on node {self._node_id}"
)
return self._allocated_ports[replica_id]
def is_port_allocated(self, replica_id: str) -> bool:
return replica_id in self._allocated_ports
class NodePortManager:
"""
This class is responsible for managing replica-specific port allocations on a node,
and is only used in direct ingress mode, where each Serve replica is exposed individually
via a Kubernetes or GCP or AWS Ingress.
The primary goal of this class is to assign ports in a consistent and efficient manner,
minimizing EndpointSlice fragmentation in Kubernetes. It uses a min-heap strategy to
allocate ports incrementally, ensuring that all nodes tend to reuse the same port numbers.
Background:
Kubernetes groups endpoints into EndpointSlices based on the set of ports exposed by each Pod.
If Pods expose different port combinations (e.g., due to random port assignment), Kubernetes
generates separate EndpointSlices per unique port list. This leads to unnecessary fragmentation
and increased resource consumption.
By allocating ports deterministically, we ensure:
- Consistent port usage across all nodes
- Fewer unique port lists, reducing the number of EndpointSlices created
- Improved performance and resource utilization
Although Kubernetes does not allow users to explicitly configure the ports included in
EndpointSlices, maintaining a uniform port layout across nodes is still beneficial.
Port lifecycle:
- Replicas are expected to release their ports when stopped
- If a replica crashes without releasing its port, the controller loop will detect and
reclaim leaked ports during reconciliation
Note:
Although this strategy is designed with Kubernetes in mind, it is applied uniformly
across all platforms for consistency.
"""
_node_managers: Dict[str, "NodePortManager"] = {}
@classmethod
def get_node_manager(cls, node_id: str) -> "NodePortManager":
# this doesn't need to be behind a lock because it will already be called from same thread
if node_id not in cls._node_managers:
logger.info(f"Creating node manager for node {node_id}")
cls._node_managers[node_id] = cls(node_id)
return cls._node_managers[node_id]
@classmethod
def prune(cls, node_id_to_alive_replica_ids: Dict[str, Set[str]]):
# this doesn't need to be behind a lock because it will already be called from same thread
for node_id in list(cls._node_managers):
if node_id not in node_id_to_alive_replica_ids:
logger.info(f"Removing node manager for node {node_id}")
del cls._node_managers[node_id]
else:
manager = cls._node_managers[node_id]
manager._prune_replica_ports(node_id_to_alive_replica_ids[node_id])
@classmethod
def update_ports(cls, ingress_replicas_info: List[Tuple[str, str, int, int]]):
"""Update port values for ingress replicas."""
for node_id, replica_id, http_port, grpc_port in ingress_replicas_info:
if node_id is None:
continue
node_port_manager = cls.get_node_manager(node_id)
if http_port is not None:
node_port_manager._http_allocator.update_port_if_missing(
replica_id,
http_port,
)
if grpc_port is not None:
node_port_manager._grpc_allocator.update_port_if_missing(
replica_id,
grpc_port,
)
def __init__(self, node_id: str):
self._node_id = node_id
self._http_allocator = PortAllocator(
RAY_SERVE_DIRECT_INGRESS_MIN_HTTP_PORT,
RAY_SERVE_DIRECT_INGRESS_MAX_HTTP_PORT,
protocol=RequestProtocol.HTTP,
node_id=node_id,
)
self._grpc_allocator = PortAllocator(
RAY_SERVE_DIRECT_INGRESS_MIN_GRPC_PORT,
RAY_SERVE_DIRECT_INGRESS_MAX_GRPC_PORT,
protocol=RequestProtocol.GRPC,
node_id=node_id,
)
def _prune_replica_ports(self, active_replica_ids: Set[str]):
self._http_allocator.prune(active_replica_ids)
self._grpc_allocator.prune(active_replica_ids)
def allocate_port(self, replica_id: str, protocol: RequestProtocol) -> int:
if protocol == RequestProtocol.HTTP:
return self._http_allocator.allocate(replica_id)
elif protocol == RequestProtocol.GRPC:
return self._grpc_allocator.allocate(replica_id)
else:
raise ValueError(f"Unsupported protocol: {protocol}")
def release_port(
self,
replica_id: str,
port: int,
protocol: RequestProtocol,
block_port: bool = False,
):
if protocol == RequestProtocol.HTTP:
self._http_allocator.release(replica_id, port, block_port)
elif protocol == RequestProtocol.GRPC:
self._grpc_allocator.release(replica_id, port, block_port)
else:
raise ValueError(f"Unsupported protocol: {protocol}")
def get_port(self, replica_id: str, protocol: RequestProtocol) -> int:
if protocol == RequestProtocol.HTTP:
return self._http_allocator.get_port(replica_id)
elif protocol == RequestProtocol.GRPC:
return self._grpc_allocator.get_port(replica_id)
else:
raise ValueError(f"Unsupported protocol: {protocol}")
def is_port_allocated(self, replica_id: str, protocol: RequestProtocol) -> bool:
if protocol == RequestProtocol.HTTP:
return self._http_allocator.is_port_allocated(replica_id)
elif protocol == RequestProtocol.GRPC:
return self._grpc_allocator.is_port_allocated(replica_id)
else:
raise ValueError(f"Unsupported protocol: {protocol}")
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/_private/node_port_manager.py",
"license": "Apache License 2.0",
"lines": 223,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/serve/_private/replica_response_generator.py | import asyncio
import time
from typing import Any, AsyncGenerator, Callable, Optional
from ray.serve._private.proxy_response_generator import (
_ProxyResponseGeneratorBase,
swallow_cancelled,
)
from ray.serve._private.utils import calculate_remaining_timeout
class ReplicaResponseGenerator(_ProxyResponseGeneratorBase):
"""Generic wrapper that adds disconnect detection to any async generator.
This can be used to wrap any async generator and add timeout and disconnect
detection capabilities. When a disconnect is detected, the generator will
raise asyncio.CancelledError.
"""
def __init__(
self,
async_generator: AsyncGenerator[Any, None],
*,
timeout_s: Optional[float] = None,
disconnected_task: Optional[asyncio.Task] = None,
result_callback: Optional[Callable[[Any], Any]] = None,
):
super().__init__(
timeout_s=timeout_s,
disconnected_task=disconnected_task,
result_callback=result_callback,
)
self._async_generator = async_generator
self._done = False
async def __anext__(self):
if self._done:
raise StopAsyncIteration
try:
result = await self._get_next_result()
if self._result_callback is not None:
result = self._result_callback(result)
return result
except (StopAsyncIteration, asyncio.CancelledError) as e:
self._done = True
raise e from None
except Exception as e:
self._done = True
raise e from None
async def _await_response_anext(self) -> Any:
return await self._async_generator.__anext__()
async def _get_next_result(self) -> Any:
"""Get the next result from the async generator with disconnect detection."""
# If there's no disconnect detection needed, use direct await to preserve
# cancellation propagation (important for gRPC cancellation)
remaining_timeout = calculate_remaining_timeout(
timeout_s=self._timeout_s,
start_time_s=self._start_time_s,
curr_time_s=time.time(),
)
if self._disconnected_task is None:
try:
return await asyncio.wait_for(
self._await_response_anext(), timeout=remaining_timeout
)
except asyncio.TimeoutError:
raise TimeoutError()
# Otherwise use asyncio.wait for disconnect detection
next_result_task = asyncio.create_task(self._await_response_anext())
tasks = [next_result_task, self._disconnected_task]
done, _ = await asyncio.wait(
tasks,
return_when=asyncio.FIRST_COMPLETED,
timeout=remaining_timeout,
)
if next_result_task in done:
return next_result_task.result()
elif self._disconnected_task in done:
next_result_task.cancel()
next_result_task.add_done_callback(swallow_cancelled)
raise asyncio.CancelledError()
else:
# Timeout occurred
next_result_task.cancel()
next_result_task.add_done_callback(swallow_cancelled)
raise TimeoutError()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/_private/replica_response_generator.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/serve/tests/test_direct_ingress.py | import asyncio
import json
import os
import sys
import time
from concurrent.futures import ThreadPoolExecutor
from typing import Optional, Tuple
from uuid import UUID
import grpc
import httpx
import pytest
from fastapi import FastAPI
from starlette.requests import Request
from starlette.responses import PlainTextResponse
import ray
from ray import serve
from ray._common.test_utils import Semaphore, SignalActor, wait_for_condition
from ray.actor import ActorHandle
from ray.dashboard.modules.serve.sdk import ServeSubmissionClient
from ray.serve._private.common import DeploymentID
from ray.serve._private.config import DeploymentConfig
from ray.serve._private.constants import (
DEFAULT_AUTOSCALING_POLICY_NAME,
HEALTHY_MESSAGE,
RAY_SERVE_DIRECT_INGRESS_MAX_HTTP_PORT,
RAY_SERVE_DIRECT_INGRESS_MIN_GRPC_PORT,
RAY_SERVE_DIRECT_INGRESS_MIN_HTTP_PORT,
RAY_SERVE_DIRECT_INGRESS_PORT_RETRY_COUNT,
RAY_SERVE_ENABLE_DIRECT_INGRESS,
RAY_SERVE_ENABLE_HA_PROXY,
SERVE_DEFAULT_APP_NAME,
)
from ray.serve._private.deployment_info import DeploymentInfo
from ray.serve._private.test_utils import (
check_deployment_status,
check_num_replicas_gte,
check_num_replicas_lte,
get_application_url,
get_application_urls,
ping_grpc_list_applications,
send_signal_on_cancellation,
)
from ray.serve.autoscaling_policy import default_autoscaling_policy
from ray.serve.config import ProxyLocation
from ray.serve.context import _get_global_client
from ray.serve.generated import serve_pb2, serve_pb2_grpc
from ray.serve.generated.serve_pb2 import DeploymentRoute
from ray.serve.schema import (
ApplicationStatus,
DeploymentStatus,
RequestProtocol,
ServeDeploySchema,
ServeInstanceDetails,
)
from ray.serve.tests.conftest import TEST_GRPC_SERVICER_FUNCTIONS
@ray.remote
class Collector:
def __init__(self):
self.items = []
def add(self, item):
self.items.append(item)
def get(self):
return self.items
@pytest.fixture
def _skip_if_ff_not_enabled():
if not RAY_SERVE_ENABLE_DIRECT_INGRESS:
pytest.skip(
reason="RAY_SERVE_ENABLE_DIRECT_INGRESS not set.",
)
@pytest.fixture
def _skip_if_haproxy_enabled():
if RAY_SERVE_ENABLE_HA_PROXY:
pytest.skip(
reason="RAY_SERVE_ENABLE_HA_PROXY is set.",
)
@pytest.fixture(scope="module")
def _shared_serve_instance():
"""Module-scoped serve instance fixture."""
# Save original env var value
env_var_name = "RAY_SERVE_DIRECT_INGRESS_MIN_DRAINING_PERIOD_S"
original_value = os.environ.get(env_var_name)
if RAY_SERVE_ENABLE_HA_PROXY:
# Setting a longer minimum draining period ensures that the client connecting
# to the uvicorn server closes the connection first. This prevents the socket
# used by the uvicorn server from entering the TIME_WAIT tcp state, which blocks
# the port from being immediately reused and causes failures in subsequent tests
# that condition on specific ports assignments.
os.environ[env_var_name] = "6"
ray.init(
num_cpus=36,
namespace="default_test_namespace",
_metrics_export_port=9999,
_system_config={"metrics_report_interval_ms": 1000, "task_retry_delay_ms": 50},
)
serve.start(
proxy_location=ProxyLocation.HeadOnly,
http_options={"host": "0.0.0.0"},
grpc_options={
"port": 9000,
"grpc_servicer_functions": TEST_GRPC_SERVICER_FUNCTIONS,
},
)
yield _get_global_client()
# Cleanup after all tests in this module complete
serve.shutdown()
ray.shutdown()
# Restore original env var value
if original_value is not None:
os.environ[env_var_name] = original_value
elif env_var_name in os.environ:
del os.environ[env_var_name]
@pytest.fixture
def serve_instance(_shared_serve_instance):
yield _shared_serve_instance
# Clear all state for 2.x applications and deployments.
_shared_serve_instance.delete_all_apps()
# Clear the ServeHandle cache between tests to avoid them piling up.
_shared_serve_instance.shutdown_cached_handles()
@serve.deployment(name="default-deployment")
class Hybrid:
def __init__(
self,
*,
message: str = "",
raise_error: bool = False,
wait_signal: Optional[ActorHandle] = None,
fail_hc_signal: Optional[ActorHandle] = None,
shutdown_signal: Optional[ActorHandle] = None,
initialize_signal: Optional[ActorHandle] = None,
):
self._message = message
self._raise_error = raise_error
self._wait_signal = wait_signal
self._fail_hc_signal = fail_hc_signal
self._shutdown_signal = shutdown_signal
if initialize_signal is not None:
ray.get(initialize_signal.wait.remote())
async def check_health(self):
# Fail health check once the signal is sent, else pass.
if self._fail_hc_signal is not None:
try:
await asyncio.wait_for(
asyncio.gather(self._fail_hc_signal.wait.remote()), timeout=0.1
)
raise RuntimeError("Failing health check!")
except asyncio.TimeoutError:
pass
async def __del__(self):
if self._shutdown_signal is not None:
await self._shutdown_signal.wait.remote()
async def __call__(self, request: Request):
if self._raise_error:
raise RuntimeError("oops!")
if self._wait_signal:
await self._wait_signal.wait.remote()
return self._message
async def Method1(
self, request: serve_pb2.UserDefinedMessage
) -> serve_pb2.UserDefinedResponse:
if self._raise_error:
raise RuntimeError("oops!")
if self._wait_signal:
await self._wait_signal.wait.remote()
return serve_pb2.UserDefinedResponse(greeting=self._message)
def get_target_groups(
app_name: str = SERVE_DEFAULT_APP_NAME,
from_proxy_manager: bool = False,
):
client = _get_global_client(_health_check_controller=True)
target_groups = ray.get(
client._controller.get_target_groups.remote(app_name, from_proxy_manager)
)
return target_groups
def test_proxy_is_started_on_head_only_mode(_skip_if_ff_not_enabled, serve_instance):
assert len(serve.status().proxies) == 1
def get_http_ports(route_prefix=None, first_only=True):
target_groups = get_target_groups(app_name=None, from_proxy_manager=True)
if first_only:
http_target_group = next(
(
tg
for tg in target_groups
if tg.protocol == RequestProtocol.HTTP
and (route_prefix is None or tg.route_prefix == route_prefix)
)
)
http_targets = http_target_group.targets
http_ports = [target.port for target in http_targets]
return http_ports
else:
http_ports = []
for target_group in target_groups:
if target_group.protocol == RequestProtocol.HTTP and (
route_prefix is None or target_group.route_prefix == route_prefix
):
http_ports.extend([target.port for target in target_group.targets])
return http_ports
def get_grpc_ports(route_prefix=None, first_only=True):
target_groups = get_target_groups(app_name=None, from_proxy_manager=True)
if first_only:
grpc_target_group = next(
(
tg
for tg in target_groups
if tg.protocol == RequestProtocol.GRPC
and (route_prefix is None or tg.route_prefix == route_prefix)
)
)
grpc_targets = grpc_target_group.targets
grpc_ports = [target.port for target in grpc_targets]
return grpc_ports
else:
grpc_ports = []
for target_group in target_groups:
if target_group.protocol == RequestProtocol.GRPC and (
route_prefix is None or target_group.route_prefix == route_prefix
):
grpc_ports.extend([target.port for target in target_group.targets])
return grpc_ports
def test_basic(_skip_if_ff_not_enabled, serve_instance):
serve.run(Hybrid.bind(message="Hello world!"))
http_urls = get_application_urls("HTTP")
grpc_urls = get_application_urls("gRPC", from_proxy_manager=True)
# Basic HTTP request.
for http_url in http_urls:
r = httpx.get(http_url)
r.raise_for_status()
assert r.text == "Hello world!"
# Basic gRPC request.
for grpc_url in grpc_urls:
channel = grpc.insecure_channel(grpc_url)
stub = serve_pb2_grpc.UserDefinedServiceStub(channel)
assert stub.Method1(serve_pb2.UserDefinedMessage()).greeting == "Hello world!"
channel.close()
def test_internal_server_error(_skip_if_ff_not_enabled, serve_instance):
serve.run(Hybrid.bind(raise_error=True))
http_urls = get_application_urls("HTTP")
grpc_urls = get_application_urls("gRPC", from_proxy_manager=True)
# Basic HTTP request.
for http_url in http_urls:
r = httpx.get(http_url)
assert r.status_code == 500
assert r.text == "Internal Server Error"
# Basic gRPC request.
for grpc_url in grpc_urls:
channel = grpc.insecure_channel(grpc_url)
stub = serve_pb2_grpc.UserDefinedServiceStub(channel)
try:
with pytest.raises(grpc.RpcError) as exception_info:
stub.Method1(serve_pb2.UserDefinedMessage())
rpc_error = exception_info.value
assert rpc_error.code() == grpc.StatusCode.INTERNAL
finally:
# Force close the gRPC channel to ensure ports are released
channel.close()
def test_fastapi_app(_skip_if_ff_not_enabled, serve_instance):
fastapi_app = FastAPI()
@serve.deployment
@serve.ingress(fastapi_app)
class FastAPIDeployment:
@fastapi_app.get("/")
def root(self) -> PlainTextResponse:
return PlainTextResponse("Hello from root!")
@fastapi_app.post("/{wildcard}")
def post(self, wildcard: str) -> PlainTextResponse:
return PlainTextResponse(
f"Hello from {wildcard}!",
status_code=201,
)
serve.run(FastAPIDeployment.bind())
http_urls = get_application_urls("HTTP")
# Test GET /.
for http_url in http_urls:
r = httpx.get(http_url)
r.raise_for_status()
assert r.text == "Hello from root!"
# Test POST /{wildcard}.
for http_url in http_urls:
r = httpx.post(f"{http_url}/foobar")
assert r.status_code == 201
assert r.text == "Hello from foobar!"
@pytest.mark.parametrize("use_fastapi", [False, True])
def test_http_request_id(_skip_if_ff_not_enabled, serve_instance, use_fastapi: bool):
if use_fastapi:
fastapi_app = FastAPI()
@serve.deployment
@serve.ingress(fastapi_app)
class EchoRequestID:
@fastapi_app.get("/")
async def root(self, request: Request) -> PlainTextResponse:
return PlainTextResponse(request.headers.get("x-request-id", ""))
else:
@serve.deployment
class EchoRequestID:
async def __call__(self, request: Request) -> str:
return PlainTextResponse(request.headers.get("x-request-id", ""))
serve.run(EchoRequestID.bind())
http_url = get_application_url("HTTP")
# Case 1: no x-request-id passed, should get populated and returned as a header.
r = httpx.get(http_url)
r.raise_for_status()
assert r.text != "" and r.text == r.headers["x-request-id"]
# This call would raise if the request ID isn't a valid UUID.
UUID(r.text, version=4)
# Case 2: x-request-id passed, result and header should match it.
r = httpx.get(http_url, headers={"x-request-id": "TEST-HEADER"})
r.raise_for_status()
assert r.text == "TEST-HEADER" and r.text == r.headers["x-request-id"]
def test_grpc_request_id(_skip_if_ff_not_enabled, serve_instance):
pytest.skip("TODO: duplicate HTTP tests for gRPC")
def test_multiplexed_model_id(_skip_if_ff_not_enabled, serve_instance):
pytest.skip("TODO: test that sends a MM ID and checks that it's set correctly")
def test_health_check(_skip_if_ff_not_enabled, serve_instance):
wait_signal = SignalActor.remote()
fail_hc_signal = SignalActor.remote()
shutdown_signal = SignalActor.remote()
initialize_signal = SignalActor.remote()
# Use private `_run` API so we can test the behavior before replicas initialize.
serve._run(
# Set a high health check period so we have time to check behavior before the
# controller restarts the replica.
Hybrid.options(health_check_period_s=1).bind(
wait_signal=wait_signal,
fail_hc_signal=fail_hc_signal,
shutdown_signal=shutdown_signal,
initialize_signal=initialize_signal,
),
_blocking=False,
)
# Here I am assuming that min port will always be available. But that may be true
# since that port maybe occupied by some other parallel test. But we have no way of
# knowing which port will be used ahead of replica initialization. May need to revisit
# this in the future.
http_port = RAY_SERVE_DIRECT_INGRESS_MIN_HTTP_PORT
grpc_port = RAY_SERVE_DIRECT_INGRESS_MIN_GRPC_PORT
def _do_grpc_hc() -> Tuple[grpc.StatusCode, str]:
channel = grpc.insecure_channel(f"localhost:{grpc_port}")
stub = serve_pb2_grpc.RayServeAPIServiceStub(channel)
try:
response, call = stub.Healthz.with_call(serve_pb2.HealthzRequest())
return call.code(), response.message
except grpc.RpcError as e:
return e.code(), ""
finally:
channel.close()
# Wait for replica constructor to start. The direct ingress server should not be
# listening on the port at all yet.
wait_for_condition(lambda: ray.get(initialize_signal.cur_num_waiters.remote()) == 1)
for _ in range(10):
with pytest.raises(httpx.ConnectError):
httpx.get(f"http://localhost:{http_port}/-/healthz")
code, _ = _do_grpc_hc()
assert code == grpc.StatusCode.UNAVAILABLE
def _verify_health_check(
*,
passing: bool,
message: str,
) -> bool:
# Check HTTP health check.
expected_status = 200 if passing else 503
r = httpx.get(f"http://localhost:{http_port}/-/healthz")
assert r.status_code == expected_status
assert r.text == message
# Check gRPC health check.
expected_code = grpc.StatusCode.OK if passing else grpc.StatusCode.UNAVAILABLE
code, response_message = _do_grpc_hc()
assert code == expected_code
# NOTE(edoakes): we can't access the response message if the gRPC call fails
# due to StatusCode.UNAVAILABLE.
if passing:
assert response_message == message
return True
# Signal the constructor to finish and verify that health checks start to pass.
ray.get(initialize_signal.send.remote())
wait_for_condition(
lambda: _verify_health_check(passing=True, message=HEALTHY_MESSAGE),
)
# Signal the health check method to fail and verify that health checks fail.
ray.get(fail_hc_signal.send.remote())
wait_for_condition(
lambda: _verify_health_check(passing=False, message="UNHEALTHY"),
)
# Signal the health check method to pass and verify that health checks pass.
ray.get(fail_hc_signal.send.remote(clear=True))
wait_for_condition(
lambda: _verify_health_check(passing=True, message=HEALTHY_MESSAGE),
)
# Initiate graceful shutdown and verify that health checks fail.
serve.delete("default", _blocking=False)
wait_for_condition(
lambda: ray.get(shutdown_signal.cur_num_waiters.remote()) == 1,
)
for _ in range(10):
assert _verify_health_check(passing=False, message="DRAINING")
ray.get(shutdown_signal.send.remote())
wait_for_condition(
lambda: len(serve.status().applications) == 0,
)
def test_port_retry_logic(_skip_if_ff_not_enabled, serve_instance):
"""Test that replicas retry port allocation when ports are in use."""
import socket
# Create a function to occupy a port
def occupy_port(port: int, max_attempts: int = 10):
import errno
attempts = 0
while attempts < max_attempts:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.bind(("localhost", port))
sock.listen(1)
return sock
except OSError as exc:
sock.close()
# If the port is already in use, try the next one; otherwise
# re-raise unexpected errors.
if exc.errno != errno.EADDRINUSE:
raise
attempts += 1
# backoff to wait for the port to be released
time.sleep(0.5)
raise RuntimeError(
f"Unable to bind a socket after {max_attempts} attempts at port {port}."
)
# Start occupying the min HTTP and gRPC ports
http_sock = occupy_port(RAY_SERVE_DIRECT_INGRESS_MIN_HTTP_PORT)
grpc_sock = occupy_port(RAY_SERVE_DIRECT_INGRESS_MIN_GRPC_PORT)
try:
# Deploy an app - it should retry port allocation and eventually fall back
# to shared ingress since we're occupying the ports
serve.run(Hybrid.bind(message="Hello world!"))
target_groups = get_target_groups(from_proxy_manager=True)
# Check HTTP target group
http_target_group = next(
(tg for tg in target_groups if tg.protocol == RequestProtocol.HTTP)
)
assert (
http_target_group.targets[0].port != RAY_SERVE_DIRECT_INGRESS_MIN_HTTP_PORT
)
# Check gRPC target group
grpc_target_group = next(
(tg for tg in target_groups if tg.protocol == RequestProtocol.GRPC)
)
assert (
grpc_target_group.targets[0].port != RAY_SERVE_DIRECT_INGRESS_MIN_GRPC_PORT
)
http_url = get_application_url("HTTP")
# Verify the service still works through shared ingress
r = httpx.get(http_url)
r.raise_for_status()
assert r.text == "Hello world!"
finally:
# Clean up the sockets
http_sock.close()
grpc_sock.close()
def test_replica_gives_up_after_max_port_retries_for_http(
_skip_if_ff_not_enabled, serve_instance
):
"""Test that replicas give up after max port retries."""
import socket
occupied_ports = []
# TODO(sheikh): Control env variables
for port in range(
RAY_SERVE_DIRECT_INGRESS_MIN_HTTP_PORT,
RAY_SERVE_DIRECT_INGRESS_MIN_HTTP_PORT
+ RAY_SERVE_DIRECT_INGRESS_PORT_RETRY_COUNT,
):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("localhost", port))
sock.listen(1)
occupied_ports.append(sock)
serve._run(Hybrid.bind(message="Hello world!"), _blocking=False)
# wait to deployment to be DEPLOY_FAILED
def _func():
serve_details = ServeInstanceDetails(
**ServeSubmissionClient("http://localhost:8265").get_serve_details()
)
status = (
serve_details.applications["default"]
.deployments["default-deployment"]
.status
)
assert status == DeploymentStatus.DEPLOY_FAILED
return True
wait_for_condition(_func, timeout=20)
serve.delete("default", _blocking=True)
def test_replica_gives_up_after_max_port_retries_for_grpc(
_skip_if_ff_not_enabled, serve_instance
):
"""Test that replicas give up after max port retries."""
import socket
occupied_ports = []
for port in range(
RAY_SERVE_DIRECT_INGRESS_MIN_GRPC_PORT,
RAY_SERVE_DIRECT_INGRESS_MIN_GRPC_PORT
+ RAY_SERVE_DIRECT_INGRESS_PORT_RETRY_COUNT,
):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.bind(("localhost", port))
sock.listen(1)
except socket.error:
# Port may already be in use, continue to next port
pass
occupied_ports.append(sock)
serve._run(Hybrid.bind(message="Hello world!"), _blocking=False)
# wait to deployment to be DEPLOY_FAILED
def _func():
serve_details = ServeInstanceDetails(
**ServeSubmissionClient("http://localhost:8265").get_serve_details()
)
status = (
serve_details.applications["default"]
.deployments["default-deployment"]
.status
)
assert status == DeploymentStatus.DEPLOY_FAILED
return True
wait_for_condition(_func, timeout=20)
serve.delete("default", _blocking=True)
def test_no_port_available(_skip_if_ff_not_enabled, serve_instance):
"""Test that replicas give up after max port retries."""
import socket
occupied_ports = []
for port in range(
RAY_SERVE_DIRECT_INGRESS_MIN_HTTP_PORT, RAY_SERVE_DIRECT_INGRESS_MAX_HTTP_PORT
):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("localhost", port))
sock.listen(1)
occupied_ports.append(sock)
"""Test that multiple replicas on the same node occupy unique ports."""
serve._run(
Hybrid.options(name="default-deployment").bind(message="Hello world!"),
_blocking=False,
)
# check that the deployment failed
def _func():
serve_details = ServeInstanceDetails(
**ServeSubmissionClient("http://localhost:8265").get_serve_details()
)
assert (
serve_details.applications["default"]
.deployments["default-deployment"]
.status
== DeploymentStatus.DEPLOY_FAILED
)
assert (
serve_details.applications["default"].status
== ApplicationStatus.DEPLOY_FAILED
)
return True
wait_for_condition(_func, timeout=20)
def test_replica_releases_ports_on_shutdown(_skip_if_ff_not_enabled, serve_instance):
"""Test that replicas release ports on shutdown."""
serve.run(Hybrid.options(num_replicas=4).bind(message="Hello world!"))
http_ports = get_http_ports()
grpc_ports = get_grpc_ports()
assert set(http_ports) == {30000, 30001, 30002, 30003}
assert set(grpc_ports) == {40000, 40001, 40002, 40003}
assert len(http_ports) == 4
assert len(grpc_ports) == 4
def _is_port_in_use(port):
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return sock.connect_ex(("0.0.0.0", port)) == 0
# Check that the ports are occupied
for http_port in http_ports:
assert _is_port_in_use(http_port)
for grpc_port in grpc_ports:
assert _is_port_in_use(grpc_port)
# make requests to the application
for http_port in http_ports:
req = httpx.get(f"http://localhost:{http_port}/")
assert req.status_code == 200
assert req.text == "Hello world!"
for grpc_port in grpc_ports:
channel = grpc.insecure_channel(f"localhost:{grpc_port}")
stub = serve_pb2_grpc.UserDefinedServiceStub(channel)
assert stub.Method1(serve_pb2.UserDefinedMessage()).greeting == "Hello world!"
channel.close()
# Shutdown the replica
serve.delete("default", _blocking=True)
# Check that the ports are released
for http_port in http_ports:
assert not _is_port_in_use(http_port)
for grpc_port in grpc_ports:
assert not _is_port_in_use(grpc_port)
# redeploy the application
serve.run(Hybrid.options(num_replicas=4).bind(message="Hello world!"))
http_ports = get_http_ports()
grpc_ports = get_grpc_ports()
assert set(http_ports) == {30000, 30001, 30002, 30003}
assert set(grpc_ports) == {40000, 40001, 40002, 40003}
assert len(http_ports) == 4
assert len(grpc_ports) == 4
def test_get_serve_instance_details(_skip_if_ff_not_enabled, serve_instance):
"""Test that get_serve_instance_details returns the correct information."""
serve.run(Hybrid.options(num_replicas=4).bind(message="Hello world!"))
target_groups = get_target_groups(from_proxy_manager=True)
assert len(target_groups) == 2
assert len(target_groups[0].targets) == 4
assert len(target_groups[1].targets) == 4
def test_only_ingress_deployment_replicas_are_used_for_target_groups(
_skip_if_ff_not_enabled, serve_instance
):
@serve.deployment(num_replicas=2)
class DownstreamDeployment:
def __init__(self):
pass
def __call__(self):
return "downstream-deployment"
@serve.deployment(num_replicas=3)
class IngressDeployment:
def __init__(self, downstream_deployment: DownstreamDeployment):
self.downstream_deployment = downstream_deployment
async def __call__(self):
res = await self.downstream_deployment.remote()
return f"ingress-deployment-{res}"
async def Method1(
self, request: serve_pb2.UserDefinedMessage
) -> serve_pb2.UserDefinedResponse:
res = await self.downstream_deployment.remote()
return serve_pb2.UserDefinedResponse(greeting=f"ingress-deployment-{res}")
serve.run(
IngressDeployment.options(name="ingress-deployment").bind(
DownstreamDeployment.options(name="downstream-deployment").bind()
)
)
target_groups = get_target_groups(from_proxy_manager=True)
assert len(target_groups) == 2
assert len(target_groups[0].targets) == 3
assert len(target_groups[1].targets) == 3
# test that the target groups are unique and contain the correct ports for ingress deployment
http_ports = get_http_ports()
grpc_ports = get_grpc_ports()
assert len(set(http_ports) & {30000, 30001, 30002, 30003, 30004}) == 3
assert len(set(grpc_ports) & {40000, 40001, 40002, 40003, 40004}) == 3
http_urls = get_application_urls("HTTP")
grpc_urls = get_application_urls("gRPC", from_proxy_manager=True)
for http_url in http_urls:
req = httpx.get(http_url)
assert req.status_code == 200
assert req.text == "ingress-deployment-downstream-deployment"
for grpc_url in grpc_urls:
channel = grpc.insecure_channel(grpc_url)
stub = serve_pb2_grpc.UserDefinedServiceStub(channel)
assert (
stub.Method1(serve_pb2.UserDefinedMessage()).greeting
== "ingress-deployment-downstream-deployment"
)
channel.close()
def test_crashed_replica_port_is_released_and_reused(
_skip_if_ff_not_enabled, serve_instance
):
"""Test that crashed replica port is released and reused."""
serve.run(Hybrid.options(num_replicas=4).bind(message="Hello world!"))
http_ports = get_http_ports()
grpc_ports = get_grpc_ports()
assert set(http_ports) == {30000, 30001, 30002, 30003}
assert set(grpc_ports) == {40000, 40001, 40002, 40003}
# delete the application
serve.delete("default", _blocking=True)
# run the deployment again
serve.run(Hybrid.options(num_replicas=4).bind(message="Hello world!"))
new_http_ports = get_http_ports()
new_grpc_ports = get_grpc_ports()
assert set(http_ports) == set(new_http_ports)
assert set(grpc_ports) == set(new_grpc_ports)
# get pid of the replicas
serve_details = ServeInstanceDetails(
**ServeSubmissionClient("http://localhost:8265").get_serve_details()
)
replicas = (
serve_details.applications["default"].deployments["default-deployment"].replicas
)
pids = [replica.pid for replica in replicas]
# kill the replicas
import os
import signal
# force kill the replicas
os.kill(pids[0], signal.SIGKILL)
# keyboard interrupt the replicas
os.kill(pids[1], signal.SIGINT)
# TODO(sheikh): Find a way to gracefully stop the replicas
def _func():
# get pid of the replicas
serve_details = ServeInstanceDetails(
**ServeSubmissionClient("http://localhost:8265").get_serve_details()
)
replicas = (
serve_details.applications["default"]
.deployments["default-deployment"]
.replicas
)
new_pids = [replica.pid for replica in replicas]
assert new_pids != pids and len(new_pids) == 4
return True
wait_for_condition(_func, timeout=20)
# wait for deployment to be running
def _func2():
serve_details = ServeInstanceDetails(
**ServeSubmissionClient("http://localhost:8265").get_serve_details()
)
assert (
serve_details.applications["default"]
.deployments["default-deployment"]
.status
== DeploymentStatus.HEALTHY
)
return True
wait_for_condition(lambda: _func2(), timeout=30)
# check that the ports are released
after_crash_http_ports = get_http_ports()
after_crash_grpc_ports = get_grpc_ports()
assert len(after_crash_http_ports) == 4
assert len(after_crash_grpc_ports) == 4
# show that smart port selection is working even with crashed ports
assert set(after_crash_http_ports) == set(http_ports)
assert set(after_crash_grpc_ports) == set(grpc_ports)
# make requests to the application
for http_port in http_ports:
req = httpx.get(f"http://localhost:{http_port}/")
assert req.status_code == 200
assert req.text == "Hello world!"
for grpc_port in grpc_ports:
channel = grpc.insecure_channel(f"localhost:{grpc_port}")
stub = serve_pb2_grpc.UserDefinedServiceStub(channel)
assert stub.Method1(serve_pb2.UserDefinedMessage()).greeting == "Hello world!"
channel.close()
def test_multiple_applications_on_same_node(_skip_if_ff_not_enabled, serve_instance):
"""Test that multiple applications, such that each app has a ingress deployment"""
@serve.deployment(num_replicas=2)
def deployment_1():
return "deployment-1"
@serve.deployment(num_replicas=2)
def deployment_2():
return "deployment-2"
serve.run(
deployment_1.options(name="deployment-1").bind(),
name="app-1",
route_prefix="/app-1",
)
serve.run(
deployment_2.options(name="deployment-2").bind(),
name="app-2",
route_prefix="/app-2",
)
http_ports_1 = get_http_ports("/app-1")
http_ports_2 = get_http_ports("/app-2")
grpc_ports_1 = get_grpc_ports("/app-1")
grpc_ports_2 = get_grpc_ports("/app-2")
assert set(http_ports_1) == {30000, 30001}
assert set(http_ports_2) == {30002, 30003}
assert set(grpc_ports_1) == {40000, 40001}
assert set(grpc_ports_2) == {40002, 40003}
# make a request to the ingress deployment
for http_port in http_ports_1:
req = httpx.get(f"http://localhost:{http_port}/app-1")
assert req.status_code == 200
assert req.text == "deployment-1"
# make a request to the other ingress deployment
for http_port in http_ports_2:
req = httpx.get(f"http://localhost:{http_port}/app-2")
assert req.status_code == 200
assert req.text == "deployment-2"
def test_app_with_composite_deployments(_skip_if_ff_not_enabled, serve_instance):
"""Test that an app with composite deployments can be deployed. verify
that ports are occupied by all deployments in the app but only the ingress
deployment is used for the target groups"""
@serve.deployment(num_replicas=3)
class ChildDeployment:
def __call__(self):
return "child-deployment"
@serve.deployment(num_replicas=2)
class IngressDeployment:
def __init__(self, child_deployment: ChildDeployment):
self.child_deployment = child_deployment
async def __call__(self):
return await self.child_deployment.remote()
async def Method1(
self, request: serve_pb2.UserDefinedMessage
) -> serve_pb2.UserDefinedResponse:
res = await self.child_deployment.remote()
return serve_pb2.UserDefinedResponse(greeting=res)
serve.run(
IngressDeployment.options(name="ingress-deployment").bind(
ChildDeployment.options(name="child-deployment").bind()
),
name="app-1",
route_prefix="/app-1",
)
# test that the target groups are unique and contain the correct ports for ingress deployment
http_ports = get_http_ports()
grpc_ports = get_grpc_ports()
# difficult to say which ports are used for the target groups
assert len(set(http_ports) & {30000, 30001, 30002, 30003, 30004}) == 2
assert len(set(grpc_ports) & {40000, 40001, 40002, 40003, 40004}) == 2
http_urls = get_application_urls("HTTP", app_name="app-1")
grpc_urls = get_application_urls("gRPC", app_name="app-1", from_proxy_manager=True)
# make a request to the ingress deployment
for http_url in http_urls:
req = httpx.get(http_url)
assert req.status_code == 200
assert req.text == "child-deployment"
# grpc request
for grpc_url in grpc_urls:
channel = grpc.insecure_channel(grpc_url)
stub = serve_pb2_grpc.UserDefinedServiceStub(channel)
assert (
stub.Method1(serve_pb2.UserDefinedMessage()).greeting == "child-deployment"
)
channel.close()
def _is_port_in_use(ports):
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for port in ports:
if sock.connect_ex(("0.0.0.0", port)) == 0:
return True
return False
# assert that child deployment is not occupying ports
assert not _is_port_in_use(
[30002, 30003, 30004]
), "Child deployment is occupying ports"
assert not _is_port_in_use(
[40002, 40003, 40004]
), "Child deployment is occupying ports"
def test_only_running_apps_are_used_for_target_groups(
_skip_if_ff_not_enabled, serve_instance
):
"""Test that only running apps are used for target groups"""
signal_actor = SignalActor.remote()
@serve.deployment(num_replicas=2)
def deployment_1():
return "deployment-1"
@serve.deployment(num_replicas=2)
class Deployment2:
async def __init__(self, signal_actor: SignalActor):
self.signal_actor = signal_actor
await self.signal_actor.wait.remote()
async def __call__(self):
return "deployment-2"
serve.run(
deployment_1.options(name="deployment-1").bind(),
name="app-1",
route_prefix="/app-1",
)
serve._run(
Deployment2.options(name="deployment-2").bind(signal_actor=signal_actor),
name="app-2",
route_prefix="/app-2",
_blocking=False,
)
wait_for_condition(
lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 2, timeout=10
)
serve_details = ServeInstanceDetails(
**ServeSubmissionClient("http://localhost:8265").get_serve_details()
)
assert (
serve_details.applications["app-2"].deployments["deployment-2"].status
== DeploymentStatus.UPDATING
)
assert serve_details.applications["app-2"].status == ApplicationStatus.DEPLOYING
assert serve_details.applications["app-1"].status == ApplicationStatus.RUNNING
http_ports = get_http_ports(first_only=False)
grpc_ports = get_grpc_ports(first_only=False)
# In HAProxy mode, we don't return itself or the Serve proxy as a target yet.
# This will change when we support scale to/from zero.
assert set(http_ports) == (
{30000, 30001} if RAY_SERVE_ENABLE_HA_PROXY else {30000, 30001, 8000}
)
assert set(grpc_ports) == (
{40000, 40001} if RAY_SERVE_ENABLE_HA_PROXY else {40000, 40001, 9000}
)
ray.get(signal_actor.send.remote())
def _func():
serve_details = ServeInstanceDetails(
**ServeSubmissionClient("http://localhost:8265").get_serve_details()
)
assert serve_details.applications["app-2"].status == ApplicationStatus.RUNNING
return True
wait_for_condition(_func, timeout=10)
http_ports = get_http_ports("/app-1", first_only=False)
grpc_ports = get_grpc_ports("/app-1", first_only=False)
assert set(http_ports) == {30000, 30001}
assert set(grpc_ports) == {40000, 40001}
http_urls = get_application_urls("HTTP", app_name="app-1")
# make requests to the application
for http_url in http_urls:
req = httpx.get(http_url)
assert req.status_code == 200
assert req.text == "deployment-1"
http_ports = get_http_ports("/app-2", first_only=False)
grpc_ports = get_grpc_ports("/app-2", first_only=False)
assert set(http_ports) == {30002, 30003}
assert set(grpc_ports) == {40002, 40003}
http_urls = get_application_urls("HTTP", app_name="app-2")
# make requests to the application
for http_url in http_urls:
req = httpx.get(http_url)
assert req.status_code == 200
assert req.text == "deployment-2"
def test_some_replicas_not_running(_skip_if_ff_not_enabled, serve_instance):
signal_actor = Semaphore.remote(2)
@serve.deployment(num_replicas=4)
class Deployment1:
async def __init__(self):
await signal_actor.acquire.remote()
def __call__(self):
return "deployment-1"
serve._run(
Deployment1.options(name="deployment-1").bind(),
name="app-1",
route_prefix="/app-1",
_blocking=False,
)
def _func():
http_ports = get_http_ports("/app-1", first_only=False)
grpc_ports = get_grpc_ports("/app-1", first_only=False)
assert set(http_ports) == {30000, 30001}
assert set(grpc_ports) == {40000, 40001}
return True
wait_for_condition(_func, timeout=10)
# check status of the deployment
serve_details = ServeInstanceDetails(
**ServeSubmissionClient("http://localhost:8265").get_serve_details()
)
assert (
serve_details.applications["app-1"].deployments["deployment-1"].status
== DeploymentStatus.UPDATING
)
assert serve_details.applications["app-1"].status == ApplicationStatus.DEPLOYING
def test_port_recovery_on_controller_restart(_skip_if_ff_not_enabled, serve_instance):
"""Test that ports are recovered on controller restart."""
client = serve_instance
serve.run(Hybrid.options(num_replicas=4).bind(message="Hello world!"))
pre_controller_restart_http_ports = get_http_ports()
pre_controller_restart_grpc_ports = get_grpc_ports()
ray.kill(client._controller, no_restart=False)
def validate_port_recovery():
post_controller_restart_http_ports = get_http_ports()
post_controller_restart_grpc_ports = get_grpc_ports()
assert set(post_controller_restart_http_ports) == set(
pre_controller_restart_http_ports
)
assert set(post_controller_restart_grpc_ports) == set(
pre_controller_restart_grpc_ports
)
return True
wait_for_condition(validate_port_recovery)
class TestDirectIngressBackpressure:
def _do_http_request(self, url: str) -> bool:
r = httpx.get(url, timeout=10)
if r.status_code == 200:
return True
elif r.status_code == 503:
return False
else:
raise RuntimeError(f"Unexpected status code: {r.status_code}")
def _do_grpc_request(self, url: str) -> bool:
channel = grpc.insecure_channel(url)
stub = serve_pb2_grpc.UserDefinedServiceStub(channel)
try:
stub.Method1(serve_pb2.UserDefinedMessage(), timeout=20)
return True
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.RESOURCE_EXHAUSTED:
return False
raise RuntimeError(f"Unexpected status code: {e.code()}")
finally:
channel.close()
def test_max_ongoing_requests(self, _skip_if_ff_not_enabled, serve_instance):
wait_signal = SignalActor.remote()
serve.run(
Hybrid.options(max_ongoing_requests=5).bind(
message="done waiting!", wait_signal=wait_signal
)
)
http_url = get_application_url("HTTP")
grpc_url = get_application_url("gRPC", from_proxy_manager=True)
for _do_request in [self._do_grpc_request, self._do_http_request]:
url = grpc_url if _do_request == self._do_grpc_request else http_url
num_requests = 5
with ThreadPoolExecutor(num_requests + 5) as tpe:
# Submit `max_ongoing_requests` blocking requests.
futures = [tpe.submit(_do_request, url) for _ in range(num_requests)]
wait_for_condition(
lambda: ray.get(wait_signal.cur_num_waiters.remote())
== num_requests
)
assert all(not f.done() for f in futures)
# Send another request beyond `max_ongoing_requests`
queued_requests = [
tpe.submit(_do_request, url) for _ in range(num_requests + 5)
]
wait_for_condition(
lambda: ray.get(wait_signal.cur_num_waiters.remote())
== num_requests
)
assert all(not f.done() for f in queued_requests)
# Unblock the requests, check they finish successfully.
ray.get(wait_signal.send.remote())
assert all(f.result() is True for f in futures)
assert all(f.result() is True for f in queued_requests)
# Now a new request showld succeed.
assert _do_request(url) is True
ray.get(wait_signal.send.remote(clear=True))
def test_backpressure_queued_requests(
self, _skip_if_ff_not_enabled, serve_instance
):
"""Test that the backpressure logic works"""
signal = SignalActor.remote()
serve.run(
Hybrid.options(max_ongoing_requests=1).bind(
message="done waiting!", wait_signal=signal
)
)
http_url = get_application_url("HTTP")
grpc_url = get_application_url("gRPC", from_proxy_manager=True)
for _do_request in [self._do_grpc_request, self._do_http_request]:
url = grpc_url if _do_request == self._do_grpc_request else http_url
num_requests = 1000
with ThreadPoolExecutor(num_requests) as tpe:
futures = [tpe.submit(_do_request, url) for _ in range(1)]
wait_for_condition(
lambda: ray.get(signal.cur_num_waiters.remote()) == 1
)
futures.extend(
[tpe.submit(_do_request, url) for _ in range(num_requests - 1)]
)
ray.get(signal.send.remote())
wait_for_condition(
lambda: ray.get(signal.cur_num_waiters.remote()) == 0
)
assert sum(f.result() is True for f in futures) == num_requests
ray.get(signal.send.remote(clear=True))
def test_drop_after_max_queued_requests(
self, _skip_if_ff_not_enabled, serve_instance
):
"""Test that the backpressure logic works"""
signal = SignalActor.remote()
serve.run(
Hybrid.options(max_ongoing_requests=10, max_queued_requests=10).bind(
message="done waiting!", wait_signal=signal
)
)
http_url = get_application_url("HTTP")
grpc_url = get_application_url("gRPC", from_proxy_manager=True)
for _do_request in [self._do_grpc_request, self._do_http_request]:
url = grpc_url if _do_request == self._do_grpc_request else http_url
num_requests = 1000
with ThreadPoolExecutor(num_requests) as tpe:
futures = [tpe.submit(_do_request, url) for _ in range(10)]
wait_for_condition(
lambda: ray.get(signal.cur_num_waiters.remote()) == 10
)
futures.extend(
[tpe.submit(_do_request, url) for _ in range(num_requests - 10)]
)
def _func():
count = sum(
f.done() and f.result(timeout=0) is False for f in futures
)
assert count == num_requests - 20
return True
wait_for_condition(_func, timeout=10)
signal.send.remote()
wait_for_condition(
lambda: ray.get(signal.cur_num_waiters.remote()) == 0
)
# assert 2 requests succeeded
assert sum(f.result() is True for f in futures) == 20
signal.send.remote(clear=True)
def test_mixed_http_grpc_backpressure(
self, _skip_if_ff_not_enabled, serve_instance
):
"""Test backpressure with simultaneous HTTP and gRPC requests"""
signal = SignalActor.remote()
serve.run(
Hybrid.options(max_ongoing_requests=5, max_queued_requests=5).bind(
message="done waiting!", wait_signal=signal
)
)
http_url = get_application_url("HTTP")
grpc_url = get_application_url("gRPC", from_proxy_manager=True)
num_requests = 500
with ThreadPoolExecutor(num_requests) as tpe:
# Submit mixed HTTP and gRPC requests
http_futures = []
grpc_futures = []
http_futures.extend(
[tpe.submit(self._do_http_request, http_url) for _ in range(5)]
)
grpc_futures.extend(
[tpe.submit(self._do_grpc_request, grpc_url) for _ in range(5)]
)
# Wait for ongoing requests to block (should be 10 total across both protocols)
wait_for_condition(lambda: ray.get(signal.cur_num_waiters.remote()) == 5)
http_futures.extend(
[
tpe.submit(self._do_http_request, http_url)
for _ in range((num_requests // 2) - 5)
]
)
grpc_futures.extend(
[
tpe.submit(self._do_grpc_request, grpc_url)
for _ in range((num_requests // 2) - 5)
]
)
def _func():
# Only check results for futures that are actually done
http_rejected = sum(
f.done() and f.result(timeout=0) is False for f in http_futures
)
grpc_rejected = sum(
f.done() and f.result(timeout=0) is False for f in grpc_futures
)
total_rejected = http_rejected + grpc_rejected
# Should have 10 ongoing + 10 queued = 20 allowed, so 10 rejected
assert total_rejected == num_requests - 10
return True
wait_for_condition(_func, timeout=20)
# Unblock and verify
ray.get(signal.send.remote())
http_successful = sum(1 for f in http_futures if f.result() is True)
grpc_successful = sum(1 for f in grpc_futures if f.result() is True)
total_successful = http_successful + grpc_successful
# Should have exactly 20 successful (10 ongoing + 10 queued)
assert total_successful == 10
def test_health_check_during_backpressure(
self, _skip_if_ff_not_enabled, serve_instance
):
"""Test that health checks work correctly during backpressure"""
signal = SignalActor.remote()
fail_hc_signal = SignalActor.remote()
serve.run(
Hybrid.options(
max_ongoing_requests=1, max_queued_requests=2, health_check_period_s=0.5
).bind(
message="done waiting!",
wait_signal=signal,
fail_hc_signal=fail_hc_signal,
)
)
# this is specifically checking the health check on the replica
http_url = get_application_url("HTTP", from_proxy_manager=True)
num_requests = 100
with ThreadPoolExecutor(num_requests) as tpe:
# Submit requests to create backpressure
futures = [tpe.submit(self._do_http_request, http_url) for _ in range(1)]
# Wait for backpressure
wait_for_condition(lambda: ray.get(signal.cur_num_waiters.remote()) == 1)
futures.extend(
[
tpe.submit(self._do_http_request, http_url)
for _ in range(num_requests - 1)
]
)
# Health check should still pass during backpressure
hc_response = httpx.get(f"{http_url}/-/healthz")
assert hc_response.status_code == 200
assert hc_response.text == HEALTHY_MESSAGE
# Fail health check
ray.get(fail_hc_signal.send.remote())
# Health check should fail even during backpressure
def _check_unhealthy():
hc_response = httpx.get(f"{http_url}/-/healthz")
assert hc_response.status_code == 503
assert hc_response.text == "UNHEALTHY"
return True
wait_for_condition(_check_unhealthy, timeout=2)
# Restore health check
ray.get(fail_hc_signal.send.remote(clear=True))
# Unblock requests
ray.get(signal.send.remote())
# Verify some requests succeeded
successful = sum(1 for f in futures if f.result() is True)
assert successful == 3
# check remaining requests are rejected
rejected = sum(1 for f in futures if f.done() and f.result() is False)
assert rejected == num_requests - 3
def test_multiple_deployment_backpressure_isolation(
self, _skip_if_ff_not_enabled, serve_instance
):
"""Test that backpressure is isolated between different deployments"""
signal1 = SignalActor.remote()
signal2 = SignalActor.remote()
@serve.deployment(name="deployment-1")
class Deployment1:
def __init__(self, signal):
self.signal = signal
async def __call__(self, request):
await self.signal.wait.remote()
return "deployment-1"
async def Method1(self, request):
await self.signal.wait.remote()
return serve_pb2.UserDefinedResponse(greeting="deployment-1")
@serve.deployment(name="deployment-2")
class Deployment2:
def __init__(self, signal):
self.signal = signal
async def __call__(self, request):
await self.signal.wait.remote()
return "deployment-2"
async def Method1(self, request):
await self.signal.wait.remote()
return serve_pb2.UserDefinedResponse(greeting="deployment-2")
# Deploy with different backpressure settings
serve.run(
Deployment1.options(max_ongoing_requests=1, max_queued_requests=1).bind(
signal1
),
name="app-1",
route_prefix="/app-1",
)
serve.run(
Deployment2.options(max_ongoing_requests=5, max_queued_requests=5).bind(
signal2
),
name="app-2",
route_prefix="/app-2",
)
http_url_1 = get_application_url("HTTP", app_name="app-1")
http_url_2 = get_application_url("HTTP", app_name="app-2")
grpc_url_1 = get_application_url(
"gRPC", app_name="app-1", from_proxy_manager=True
)
grpc_url_2 = get_application_url(
"gRPC", app_name="app-2", from_proxy_manager=True
)
for do_request in [self._do_http_request, self._do_grpc_request]:
url1 = http_url_1 if do_request == self._do_http_request else grpc_url_1
url2 = http_url_2 if do_request == self._do_http_request else grpc_url_2
num_requests = 20
with ThreadPoolExecutor(num_requests) as tpe:
# Saturate deployment-1 (should cause backpressure)
futures_1 = [tpe.submit(do_request, url1) for _ in range(1)]
# Wait for both to have ongoing requests
wait_for_condition(
lambda: ray.get(signal1.cur_num_waiters.remote()) == 1
)
futures_1.extend([tpe.submit(do_request, url1) for _ in range(9)])
# Submit to deployment-2 (should not be affected by deployment-1's backpressure)
futures_2 = [tpe.submit(do_request, url2) for _ in range(5)]
wait_for_condition(
lambda: ray.get(signal2.cur_num_waiters.remote()) == 5
)
futures_2.extend([tpe.submit(do_request, url2) for _ in range(5)])
def _func():
# deployment-1 should have rejected requests
rejected_1 = sum(
1
for f in futures_1
if f.done() and f.result(timeout=0) is False
)
assert rejected_1 == 8 # Most should be rejected (10 - 2 allowed)
# deployment-2 should not have rejected requests yet (higher limits)
rejected_2 = sum(
1
for f in futures_2
if f.done() and f.result(timeout=0) is False
)
assert rejected_2 == 0 # None should be rejected yet
return True
wait_for_condition(_func, timeout=20)
# Unblock both
ray.get(signal1.send.remote())
ray.get(signal2.send.remote())
# Verify deployment-2 succeeded more than deployment-1
successful_1 = sum(1 for f in futures_1 if f.result() is True)
successful_2 = sum(1 for f in futures_2 if f.result() is True)
assert successful_1 == 2 # At most 2 for deployment-1
assert successful_2 == 10
ray.get(signal1.send.remote(clear=True))
ray.get(signal2.send.remote(clear=True))
def test_backpressure_with_composite_deployments(
self, _skip_if_ff_not_enabled, serve_instance
):
"""Test backpressure with composite deployments"""
signal = SignalActor.remote()
@serve.deployment(max_ongoing_requests=1, max_queued_requests=2)
class ChildDeployment:
def __init__(self, signal):
self.signal = signal
async def __call__(self):
await self.signal.wait.remote()
return "child-deployment"
@serve.deployment(max_ongoing_requests=1000)
class CompositeDeployment:
def __init__(self, child_deployment: ChildDeployment):
self.child_deployment = child_deployment
async def __call__(self):
await self.child_deployment.remote()
return "composite-deployment"
async def Method1(self, request):
await self.child_deployment.remote()
return serve_pb2.UserDefinedResponse(greeting="composite-deployment")
serve.run(
CompositeDeployment.options(name="composite-deployment").bind(
ChildDeployment.options(name="child-deployment").bind(signal)
),
name="composite-app",
route_prefix="/composite-app",
)
http_url = get_application_url("HTTP", app_name="composite-app")
grpc_url = get_application_url(
"gRPC", app_name="composite-app", from_proxy_manager=True
)
num_requests = 10
for do_request in [self._do_http_request, self._do_grpc_request]:
url = http_url if do_request == self._do_http_request else grpc_url
with ThreadPoolExecutor(num_requests) as tpe:
futures = []
# there is a race condition in the router where if multiple requests
# are submitted at the same time, then we could reject more requests
# than strictly necessary. Hence we submit 1 request first and then
# submit the rest of the requests.
futures.append(tpe.submit(do_request, url))
wait_for_condition(
lambda: ray.get(signal.cur_num_waiters.remote()) == 1
)
futures.extend(
[tpe.submit(do_request, url) for _ in range(num_requests - 1)]
)
def _func():
rejected = sum(
[f.done() and f.result(timeout=0) is False for f in futures]
)
assert rejected == num_requests - 3
return True
wait_for_condition(_func, timeout=5)
ray.get(signal.send.remote())
successful = sum(1 for f in futures if f.result() is True)
assert successful == 3
ray.get(signal.send.remote(clear=True))
def test_client_disconnect_during_request(
self, _skip_if_ff_not_enabled, serve_instance
):
signal = SignalActor.remote()
collector = Collector.remote()
@serve.deployment(max_ongoing_requests=1, max_queued_requests=10)
class A:
async def __call__(self):
await signal.wait.remote()
await collector.add.remote(
ray.serve.context._get_serve_request_context().request_id
)
serve.run(A.options(name="A").bind(), name="app-1", route_prefix="/app-1")
http_url = get_application_url("HTTP", app_name="app-1")
num_requests = 100
with ThreadPoolExecutor(num_requests) as tpe:
futures = [tpe.submit(httpx.get, http_url, timeout=0.5) for _ in range(1)]
wait_for_condition(lambda: ray.get(signal.cur_num_waiters.remote()) == 1)
futures.extend(
[
tpe.submit(httpx.get, http_url, timeout=0.5)
for _ in range(num_requests - 1)
]
)
# wait for all futures to fail with a timeout
def _func():
for future in futures:
assert future.done()
try:
future.result()
except Exception as e:
assert isinstance(e, httpx.ReadTimeout)
return True
wait_for_condition(_func, timeout=10)
ray.get(signal.send.remote())
# check that the collector has the correct request ids
assert len(ray.get(collector.get.remote())) == 0
def test_graceful_shutdown_wait_loop(self, _skip_if_ff_not_enabled, serve_instance):
"""Test that the graceful shutdown wait loop works"""
signal = SignalActor.remote()
@serve.deployment(
graceful_shutdown_timeout_s=20,
graceful_shutdown_wait_loop_s=0.01,
max_ongoing_requests=10,
max_queued_requests=10,
)
class A:
async def __call__(self):
await signal.wait.remote()
return "ok"
serve.run(A.options(name="A").bind(), name="app-1", route_prefix="/app-1")
http_url = get_application_url("HTTP", app_name="app-1")
num_requests = 20
with ThreadPoolExecutor(num_requests) as tpe:
futures = [tpe.submit(httpx.get, http_url, timeout=10) for _ in range(10)]
wait_for_condition(
lambda: ray.get(signal.cur_num_waiters.remote()) == 10, timeout=10
)
# Submit the remaining requests
futures = [tpe.submit(httpx.get, http_url, timeout=10) for _ in range(10)]
serve.delete("app-1", _blocking=False)
# send the signal to unblock all requests
ray.get(signal.send.remote())
# wait for all requests to finish
for future in futures:
assert future.result().status_code == 200
def test_requests_are_not_running_serially(
self, _skip_if_ff_not_enabled, serve_instance
):
"""Test that requests are processed concurrently, not serially"""
@serve.deployment(
max_ongoing_requests=20,
)
class A:
async def __call__(self):
await asyncio.sleep(1)
return "ok"
serve.run(A.options(name="A").bind(), name="app-1", route_prefix="/app-1")
http_url = get_application_url("HTTP", app_name="app-1")
num_requests = 20
with ThreadPoolExecutor(num_requests) as tpe:
futures = [
tpe.submit(httpx.get, http_url, timeout=None)
for _ in range(num_requests)
]
def _func():
for future in futures:
assert future.result().status_code == 200
return True
wait_for_condition(_func, timeout=5)
class TestDirectIngressAutoscaling:
@pytest.mark.parametrize("min_replicas", [1, 2])
def test_autoscaling_scale_up_down_basic(
self, _skip_if_ff_not_enabled, serve_instance, min_replicas
):
"""Send 100 requests and check that we autoscale up, and then back down."""
signal = SignalActor.remote()
@serve.deployment(
autoscaling_config={
"metrics_interval_s": 0.1,
"min_replicas": min_replicas,
"max_replicas": 3,
"look_back_period_s": 0.2,
"downscale_delay_s": 0.5,
"upscale_delay_s": 0,
"target_num_ongoing_requests": 100,
},
# We will send over a lot of queries. This will make sure replicas are
# killed quickly during cleanup.
graceful_shutdown_timeout_s=1,
max_ongoing_requests=1000,
)
class A:
async def __call__(self, request: Request):
await signal.wait.remote()
return "ok"
serve.run(A.options(name="A").bind(), name="app-1", route_prefix="/app-1")
wait_for_condition(
check_deployment_status,
name="A",
expected_status=DeploymentStatus.HEALTHY,
app_name="app-1",
)
http_url = get_application_url("HTTP", app_name="app-1")
# Send 100 concurrent HTTP requests
with ThreadPoolExecutor() as executor:
futures = [
executor.submit(httpx.get, http_url, timeout=None) for _ in range(100)
]
# scale up one more replica from min_replicas
wait_for_condition(
check_num_replicas_gte,
name="A",
target=min_replicas + 1,
app_name="app-1",
)
signal.send.remote()
# verify that all requests completed successfully
for future in futures:
assert future.result().status_code == 200
# As the queue is drained, we should scale back down.
wait_for_condition(
check_num_replicas_lte, name="A", target=min_replicas, app_name="app-1"
)
def test_autoscaling_scale_from_and_to_zero(
self, _skip_if_ff_not_enabled, _skip_if_haproxy_enabled, serve_instance
):
signal = SignalActor.remote()
@serve.deployment(
autoscaling_config={
"metrics_interval_s": 0.1,
"min_replicas": 0,
"max_replicas": 3,
"look_back_period_s": 0.2,
"downscale_delay_s": 0.5,
"upscale_delay_s": 0,
"target_num_ongoing_requests": 100,
},
# We will send over a lot of queries. This will make sure replicas are
# killed quickly during cleanup.
graceful_shutdown_timeout_s=1,
max_ongoing_requests=1000,
)
class A:
async def __call__(self, request: Request):
await signal.wait.remote()
return "ok"
serve.run(A.options(name="A").bind(), name="app-1", route_prefix="/app-1")
wait_for_condition(
check_deployment_status,
name="A",
expected_status=DeploymentStatus.HEALTHY,
app_name="app-1",
)
http_url = get_application_url("HTTP", app_name="app-1")
# Send 100 concurrent HTTP requests
with ThreadPoolExecutor() as executor:
futures = [
executor.submit(httpx.get, http_url, timeout=None) for _ in range(50)
]
# scale up one more replica from min_replicas
wait_for_condition(
check_num_replicas_gte, name="A", target=1, app_name="app-1"
)
# now that replicas are running, check that http ports are occupied
def _func():
_ = get_application_url("HTTP", app_name="app-1")
_ = get_application_url("gRPC", app_name="app-1")
return True
wait_for_condition(_func, timeout=10)
signal.send.remote()
# verify that all requests completed successfully
for future in futures:
assert future.result().status_code == 200
# As the queue is drained, we should scale back down.
wait_for_condition(check_num_replicas_lte, name="A", target=0, app_name="app-1")
# check that http ports are released
http_ports = get_http_ports(route_prefix="/app-1")
assert len(http_ports) == 1
assert http_ports[0] == 8000 # proxy port
# check that grpc ports are released
grpc_ports = get_grpc_ports(route_prefix="/app-1")
assert len(grpc_ports) == 1
assert grpc_ports[0] == 9000 # proxy port
def test_disconnect(_skip_if_ff_not_enabled, serve_instance):
"""Test gRPC client disconnect/cancellation behavior."""
running_signal = SignalActor.remote()
cancelled_signal = SignalActor.remote()
@serve.deployment(name="disconnect-deployment")
class DisconnectTest:
async def wait_for_signal(self):
async with send_signal_on_cancellation(cancelled_signal):
await running_signal.wait.remote()
async def __call__(self, request: Request):
await self.wait_for_signal()
return "completed"
async def Method1(
self, request: serve_pb2.UserDefinedMessage
) -> serve_pb2.UserDefinedResponse:
await self.wait_for_signal()
return serve_pb2.UserDefinedResponse(greeting="completed")
serve.run(DisconnectTest.bind())
http_url = get_application_url("HTTP")
grpc_url = get_application_url("gRPC", from_proxy_manager=True)
# Test gRPC cancellation
channel = grpc.insecure_channel(grpc_url)
stub = serve_pb2_grpc.UserDefinedServiceStub(channel)
# Send request and wait for it to start executing
request = serve_pb2.UserDefinedMessage()
future = stub.Method1.future(request=request)
# Wait for the request to start processing
wait_for_condition(
lambda: ray.get(running_signal.cur_num_waiters.remote()) == 1, timeout=10
)
# Cancel the request
future.cancel()
# Verify that cancellation was detected by the deployment
ray.get(cancelled_signal.wait.remote(), timeout=5)
# Verify the future was cancelled
with pytest.raises(grpc.FutureCancelledError):
future.result()
channel.close()
# Clean up signals
ray.get(running_signal.send.remote(clear=True))
ray.get(cancelled_signal.send.remote(clear=True))
# Test HTTP cancellation
http_url = get_application_url("HTTP")
try:
httpx.get(http_url, timeout=1)
except httpx.TimeoutException:
pass
else:
raise RuntimeError("Request should have been cancelled")
wait_for_condition(
lambda: ray.get(running_signal.cur_num_waiters.remote()) == 1, timeout=10
)
try:
ray.get(cancelled_signal.wait.remote(), timeout=5)
except ray.exceptions.GetTimeoutError:
assert False, "Cancelled signal should have been sent"
ray.get(running_signal.send.remote(clear=True))
ray.get(cancelled_signal.send.remote(clear=True))
def test_context_propagation(_skip_if_ff_not_enabled, serve_instance):
"""Test that the context is propagated to the deployment"""
@serve.deployment(name="context-propagation-deployment")
class ContextPropagationTest:
async def __call__(self):
return ray.serve.context._get_serve_request_context().app_name
async def Method1(
self, request: serve_pb2.UserDefinedMessage
) -> serve_pb2.UserDefinedResponse:
return serve_pb2.UserDefinedResponse(
greeting=ray.serve.context._get_serve_request_context().app_name
)
serve.run(
ContextPropagationTest.bind(),
name="context-propagation-deployment",
route_prefix="/context-propagation-deployment",
)
http_url = get_application_url("HTTP", app_name="context-propagation-deployment")
response = httpx.get(http_url)
assert response.status_code == 200
assert response.text == "context-propagation-deployment"
grpc_url = get_application_url(
"gRPC", app_name="context-propagation-deployment", from_proxy_manager=True
)
channel = grpc.insecure_channel(grpc_url)
stub = serve_pb2_grpc.UserDefinedServiceStub(channel)
request = serve_pb2.UserDefinedMessage()
future = stub.Method1.future(request=request)
assert future.result().greeting == "context-propagation-deployment"
def test_context_propagation_with_child(_skip_if_ff_not_enabled, serve_instance):
"""Test that the context is propagated to the deployment"""
@serve.deployment(name="child-deployment")
class ChildDeployment:
async def __call__(self):
return ray.serve.context._get_serve_request_context().app_name
@serve.deployment(name="context-propagation-deployment")
class ContextPropagationTest:
def __init__(self, child_deployment: ChildDeployment):
self.child_deployment = child_deployment
async def __call__(self):
return await self.child_deployment.remote()
async def Method1(
self, request: serve_pb2.UserDefinedMessage
) -> serve_pb2.UserDefinedResponse:
return serve_pb2.UserDefinedResponse(
greeting=await self.child_deployment.remote()
)
serve.run(
ContextPropagationTest.bind(ChildDeployment.bind()),
name="context-propagation-deployment",
route_prefix="/context-propagation-deployment",
)
http_url = get_application_url("HTTP", app_name="context-propagation-deployment")
response = httpx.get(http_url)
assert response.status_code == 200
assert response.text == "context-propagation-deployment"
grpc_url = get_application_url(
"gRPC", app_name="context-propagation-deployment", from_proxy_manager=True
)
channel = grpc.insecure_channel(grpc_url)
stub = serve_pb2_grpc.UserDefinedServiceStub(channel)
request = serve_pb2.UserDefinedMessage()
future = stub.Method1.future(request=request)
assert future.result().greeting == "context-propagation-deployment"
def test_shutdown_replica_only_after_draining_requests(
_skip_if_ff_not_enabled, serve_instance
):
"""Test that the replica is shutdown correctly when the deployment is shutdown."""
signal = SignalActor.remote()
# In direct ingress mode, graceful_shutdown_timeout_s is automatically bumped to
# max(graceful_shutdown_timeout_s, RAY_SERVE_DIRECT_INGRESS_MIN_DRAINING_PERIOD_S)
# to give external load balancers time to deregister the replica.
@serve.deployment(name="replica-shutdown-deployment", graceful_shutdown_timeout_s=5)
class ReplicaShutdownTest:
async def __call__(self):
await signal.wait.remote()
return "ok"
serve.run(ReplicaShutdownTest.bind(), name="replica-shutdown-deployment")
http_url = get_application_url("HTTP", app_name="replica-shutdown-deployment")
with ThreadPoolExecutor() as executor:
futures = [executor.submit(httpx.get, http_url, timeout=10) for _ in range(4)]
wait_for_condition(
lambda: ray.get(signal.cur_num_waiters.remote()) == 4, timeout=10
)
serve.delete("replica-shutdown-deployment", _blocking=False)
# Wait less than graceful_shutdown_timeout_s to ensure requests can complete
time.sleep(0.5)
ray.get(signal.send.remote(clear=True))
for future in futures:
assert future.result().status_code == 200
wait_for_condition(
lambda: "replica-shutdown-deployment" not in serve.status().applications,
timeout=10,
)
def test_http_routes_endpoint(_skip_if_ff_not_enabled, serve_instance):
"""Test that the routes endpoint returns pair of routes_prefix and
app_name of which the replica is serving for.
"""
@serve.deployment
class D1:
def __call__(self, *args):
return "D1"
@serve.deployment
class D2:
def __call__(self, *args):
return "D2"
serve.run(D1.bind(), name="app1", route_prefix="/D1")
serve.run(D2.bind(), name="app2", route_prefix="/hello/world")
# Test routes endpoint on the replica running for app1 directly
url = get_application_url(
app_name="app1",
exclude_route_prefix=True,
from_proxy_manager=True,
)
routes = httpx.get(f"{url}/-/routes").json()
assert routes == {"/D1": "app1"}, routes
# Test routes endpoint on the replica running for app2 directly
url = get_application_url(
app_name="app2",
exclude_route_prefix=True,
from_proxy_manager=True,
)
routes = httpx.get(f"{url}/-/routes").json()
assert routes == {"/hello/world": "app2"}, routes
# Test routes endpoint on the proxy
url = "http://localhost:8000/-/routes"
routes = httpx.get(url).json()
assert routes == {"/D1": "app1", "/hello/world": "app2"}, routes
# TODO: haproxy doesn't support gRPC ListApplications yet so skipping this test
def test_grpc_list_applications_endpoint(
_skip_if_ff_not_enabled, _skip_if_haproxy_enabled, serve_instance
):
"""Each replica's gRPC `ListApplications` method should only report the
single application that replica is serving.
"""
@serve.deployment
class D1:
def __call__(self, *args):
return "D1"
@serve.deployment
class D2:
def __call__(self, *args):
return "D2"
serve.run(D1.bind(), name="app1", route_prefix="/D1")
serve.run(D2.bind(), name="app2", route_prefix="/hello/world")
# Test gRPC `ListApplications` on the replica running for app1 directly
url = get_application_url("gRPC", app_name="app1")
channel = grpc.insecure_channel(url)
try:
ping_grpc_list_applications(channel, ["app1"])
finally:
channel.close()
# Test gRPC `ListApplications` on the replica running for app2 directly
url = get_application_url("gRPC", app_name="app2")
channel = grpc.insecure_channel(url)
try:
ping_grpc_list_applications(channel, ["app2"])
finally:
channel.close()
# Test gRPC `ListApplications` on the proxy
channel = grpc.insecure_channel("localhost:9000")
try:
ping_grpc_list_applications(channel, ["app1", "app2"])
finally:
channel.close()
# Copied from test_controller.py
def test_redeploy_start_time(_skip_if_ff_not_enabled, serve_instance):
"""Check that redeploying a deployment doesn't reset its start time."""
controller = _get_global_client()._controller
@serve.deployment
def test(_):
return "1"
serve.run(test.bind())
deployment_route = DeploymentRoute.FromString(
ray.get(controller.get_deployment_info.remote("test", SERVE_DEFAULT_APP_NAME))
)
deployment_info_1 = DeploymentInfo.from_proto(deployment_route.deployment_info)
start_time_ms_1 = deployment_info_1.start_time_ms
time.sleep(0.1)
@serve.deployment
def test(_):
return "2"
serve.run(test.bind())
deployment_route = DeploymentRoute.FromString(
ray.get(controller.get_deployment_info.remote("test", SERVE_DEFAULT_APP_NAME))
)
deployment_info_2 = DeploymentInfo.from_proto(deployment_route.deployment_info)
start_time_ms_2 = deployment_info_2.start_time_ms
assert start_time_ms_1 == start_time_ms_2
# Copied from test_controller.py
def test_deploy_app_custom_exception(_skip_if_ff_not_enabled, serve_instance):
"""Check that controller doesn't deserialize an exception from deploy_app."""
controller = _get_global_client()._controller
config = {
"applications": [
{
"name": "broken_app",
"route_prefix": "/broken",
"import_path": "ray.serve.tests.test_config_files.broken_app:app",
}
]
}
ray.get(controller.apply_config.remote(config=ServeDeploySchema.parse_obj(config)))
def check_custom_exception() -> bool:
status = serve.status().applications["broken_app"]
assert status.status == ApplicationStatus.DEPLOY_FAILED
assert "custom exception info" in status.message
return True
wait_for_condition(check_custom_exception, timeout=10)
# Copied from test_controller.py
@pytest.mark.parametrize(
"policy_name", [None, DEFAULT_AUTOSCALING_POLICY_NAME, default_autoscaling_policy]
)
def test_get_serve_instance_details_json_serializable(
_skip_if_ff_not_enabled, serve_instance, policy_name
):
"""Test the result from get_serve_instance_details is json serializable."""
controller = _get_global_client()._controller
autoscaling_config = {
"min_replicas": 1,
"max_replicas": 10,
"_policy": {"name": policy_name},
}
if policy_name is None:
autoscaling_config.pop("_policy")
@serve.deployment(autoscaling_config=autoscaling_config)
def autoscaling_app():
return "1"
serve.run(autoscaling_app.bind())
details = ray.get(controller.get_serve_instance_details.remote())
details_json = json.dumps(details)
controller_details = ray.get(controller.get_actor_details.remote())
node_id = controller_details.node_id
node_ip = controller_details.node_ip
node_instance_id = controller_details.node_instance_id
proxy_details = ray.get(controller.get_proxy_details.remote(node_id=node_id))
deployment_timestamp = ray.get(
controller.get_deployment_timestamps.remote(app_name="default")
)
deployment_details = ray.get(
controller.get_deployment_details.remote("default", "autoscaling_app")
)
replica = deployment_details.replicas[0]
expected_json = json.dumps(
{
"controller_info": {
"node_id": node_id,
"node_ip": node_ip,
"node_instance_id": node_instance_id,
"actor_id": controller_details.actor_id,
"actor_name": controller_details.actor_name,
"worker_id": controller_details.worker_id,
"log_file_path": controller_details.log_file_path,
},
"proxy_location": "HeadOnly",
"http_options": {"host": "0.0.0.0"},
"grpc_options": {
"port": 9000,
"grpc_servicer_functions": TEST_GRPC_SERVICER_FUNCTIONS,
},
"proxies": {
node_id: {
"node_id": node_id,
"node_ip": node_ip,
"node_instance_id": node_instance_id,
"actor_id": proxy_details.actor_id,
"actor_name": proxy_details.actor_name,
"worker_id": proxy_details.worker_id,
"log_file_path": proxy_details.log_file_path,
"status": proxy_details.status,
}
},
"applications": {
"default": {
"name": "default",
"route_prefix": "/",
"docs_path": None,
"status": "RUNNING",
"message": "",
"last_deployed_time_s": deployment_timestamp,
"deployed_app_config": None,
"source": "imperative",
"deployments": {
"autoscaling_app": {
"name": "autoscaling_app",
"status": "HEALTHY",
"status_trigger": "CONFIG_UPDATE_COMPLETED",
"message": "",
"deployment_config": {
"name": "autoscaling_app",
"max_ongoing_requests": 5,
"max_queued_requests": -1,
"user_config": None,
"autoscaling_config": {
"min_replicas": 1,
"initial_replicas": None,
"max_replicas": 10,
"target_ongoing_requests": 2.0,
"metrics_interval_s": 10.0,
"look_back_period_s": 30.0,
"smoothing_factor": 1.0,
"upscale_smoothing_factor": None,
"downscale_smoothing_factor": None,
"upscaling_factor": None,
"downscaling_factor": None,
"downscale_delay_s": 600.0,
"downscale_to_zero_delay_s": None,
"upscale_delay_s": 30.0,
"aggregation_function": "mean",
"policy": {
"policy_function": "ray.serve.autoscaling_policy:default_autoscaling_policy",
"policy_kwargs": {},
},
},
"graceful_shutdown_wait_loop_s": 2.0,
"graceful_shutdown_timeout_s": 20.0,
"health_check_period_s": 10.0,
"health_check_timeout_s": 30.0,
"ray_actor_options": {
"num_cpus": 1.0,
},
"request_router_config": {
"request_router_class": "ray.serve._private.request_router:PowerOfTwoChoicesRequestRouter",
"request_router_kwargs": {},
"request_routing_stats_period_s": 10.0,
"request_routing_stats_timeout_s": 30.0,
},
},
"target_num_replicas": 1,
"required_resources": {"CPU": 1},
"replicas": [
{
"node_id": node_id,
"node_ip": node_ip,
"node_instance_id": node_instance_id,
"actor_id": replica.actor_id,
"actor_name": replica.actor_name,
"worker_id": replica.worker_id,
"log_file_path": replica.log_file_path,
"replica_id": replica.replica_id,
"state": "RUNNING",
"pid": replica.pid,
"start_time_s": replica.start_time_s,
}
],
}
},
"external_scaler_enabled": False,
"deployment_topology": {
"app_name": "default",
"nodes": {
"autoscaling_app": {
"name": "autoscaling_app",
"app_name": "default",
"outbound_deployments": [],
"is_ingress": True,
},
},
"ingress_deployment": "autoscaling_app",
},
}
},
"target_capacity": None,
"target_groups": [
{
"targets": [
{
"ip": node_ip,
"port": 8000 if RAY_SERVE_ENABLE_HA_PROXY else 30000,
"instance_id": node_instance_id,
"name": proxy_details.actor_name
if RAY_SERVE_ENABLE_HA_PROXY
else replica.actor_name,
},
],
"route_prefix": "/",
"protocol": "HTTP",
"app_name": "" if RAY_SERVE_ENABLE_HA_PROXY else "default",
},
{
"targets": [
{
"ip": node_ip,
"port": 9000 if RAY_SERVE_ENABLE_HA_PROXY else 40000,
"instance_id": node_instance_id,
"name": proxy_details.actor_name
if RAY_SERVE_ENABLE_HA_PROXY
else replica.actor_name,
},
],
"route_prefix": "/",
"protocol": "gRPC",
"app_name": "" if RAY_SERVE_ENABLE_HA_PROXY else "default",
},
],
}
)
assert details_json == expected_json
# ensure internal field, serialized_policy_def, is not exposed
application = details["applications"]["default"]
deployment = application["deployments"]["autoscaling_app"]
autoscaling_config = deployment["deployment_config"]["autoscaling_config"]
assert "_serialized_policy_def" not in autoscaling_config
# Copied from test_controller.py
def test_get_deployment_config(_skip_if_ff_not_enabled, serve_instance):
"""Test getting deployment config."""
controller = _get_global_client()._controller
deployment_id = DeploymentID(name="App", app_name="default")
deployment_config = ray.get(
controller.get_deployment_config.remote(deployment_id=deployment_id)
)
# Before any deployment is created, the config should be None.
assert deployment_config is None
@serve.deployment
class App:
pass
serve.run(App.bind())
deployment_config = ray.get(
controller.get_deployment_config.remote(deployment_id=deployment_id)
)
# After the deployment is created, the config should be DeploymentConfig.
assert isinstance(deployment_config, DeploymentConfig)
def test_stuck_requests_are_force_killed(_skip_if_ff_not_enabled, serve_instance):
"""This test is really slow, because it waits for the ports to be released from TIME_WAIT state.
The ports are in TIME_WAIT state because the replicas are force-killed and the ports are not
released immediately."""
import socket
def _can_bind_to_port(port):
"""Check if we can bind to the port (not just if nothing is listening)."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.bind(("0.0.0.0", port))
sock.close()
return True
except OSError:
sock.close()
return False
signal = SignalActor.remote()
@serve.deployment(
name="stuck-requests-deployment",
graceful_shutdown_timeout_s=1,
)
class StuckRequestsTest:
async def __call__(self):
# This request will never complete - it waits forever
await signal.wait.remote()
return "ok"
serve.run(
StuckRequestsTest.bind(),
name="stuck-requests-deployment",
route_prefix="/stuck-requests-deployment",
)
# Collect all ports used by the application before deleting it
http_ports = get_http_ports(route_prefix="/stuck-requests-deployment")
grpc_ports = get_grpc_ports(route_prefix="/stuck-requests-deployment")
http_url = get_application_url("HTTP", app_name="stuck-requests-deployment")
with ThreadPoolExecutor() as executor:
# Send requests that will hang forever (signal is never sent)
futures = [executor.submit(httpx.get, http_url, timeout=60) for _ in range(2)]
# Wait for requests to be received by the replica
wait_for_condition(
lambda: ray.get(signal.cur_num_waiters.remote()) == 2, timeout=10
)
# Delete the deployment - requests are still stuck
serve.delete("stuck-requests-deployment", _blocking=False)
# Verify the application is eventually deleted (replica was force-killed).
# This should complete within graceful_shutdown_timeout_s (35s) + buffer.
wait_for_condition(
lambda: "stuck-requests-deployment" not in serve.status().applications,
timeout=10,
)
# The stuck requests should fail (connection closed or similar)
for future in futures:
try:
result = future.result(timeout=5)
# If we get a response, it should be an error (not 200)
assert result.status_code != 200
except Exception:
# Expected - request failed due to force-kill
pass
# Wait until all ports can be bound (not just until nothing is listening).
# This ensures the ports are fully released from TIME_WAIT state.
def all_ports_can_be_bound():
for port in http_ports + grpc_ports:
if not _can_bind_to_port(port):
return False
return True
# TIME_WAIT can last up to 60s on Linux, so use a generous timeout
wait_for_condition(all_ports_can_be_bound, timeout=120)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_direct_ingress.py",
"license": "Apache License 2.0",
"lines": 2093,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/tests/test_direct_ingress_standalone.py | import asyncio
import httpx
import pytest
from ray import serve
from ray._common.test_utils import SignalActor
from ray.serve._private.constants import (
RAY_SERVE_ENABLE_DIRECT_INGRESS,
SERVE_HTTP_REQUEST_DISCONNECT_DISABLED_HEADER,
SERVE_HTTP_REQUEST_TIMEOUT_S_HEADER,
)
from ray.serve._private.test_utils import get_application_url
from ray.serve.config import HTTPOptions
from ray.serve.tests.conftest import * # noqa
@pytest.fixture
def _skip_if_ff_not_enabled():
if not RAY_SERVE_ENABLE_DIRECT_INGRESS:
pytest.skip(
reason="RAY_SERVE_ENABLE_DIRECT_INGRESS not set.",
)
@pytest.mark.asyncio
@pytest.mark.parametrize("set_http_options_timeout", [False, True])
@pytest.mark.parametrize("timeout_header", [None, "abc", "0", "1"])
@pytest.mark.parametrize("disconnect_header", [None, "?0", "?1"])
async def test_http_request_timeout_disconnect_headers(
_skip_if_ff_not_enabled,
ray_instance,
ray_shutdown,
set_http_options_timeout,
disconnect_header,
timeout_header,
):
"""Test cases of request timeout and disconnect header."""
http_request_timeout_s = None
if set_http_options_timeout:
http_request_timeout_s = 1
serve.start(
http_options=HTTPOptions(
host="0.0.0.0",
request_timeout_s=http_request_timeout_s,
),
)
signal = SignalActor.remote()
@serve.deployment
class Model:
async def __call__(self):
await signal.wait.remote()
serve.run(Model.bind())
http_url = get_application_url("HTTP")
headers = {}
if timeout_header is not None:
headers[
SERVE_HTTP_REQUEST_TIMEOUT_S_HEADER.encode("utf-8")
] = timeout_header.encode("utf-8")
if disconnect_header is not None:
headers[
SERVE_HTTP_REQUEST_DISCONNECT_DISABLED_HEADER.encode("utf-8")
] = disconnect_header.encode("utf-8")
expect_timeout = (
timeout_header == "1"
or set_http_options_timeout is True
and timeout_header != "0"
)
expect_disconnect = not expect_timeout and disconnect_header != "?1"
async with httpx.AsyncClient() as client:
response_task = asyncio.create_task(
client.get(http_url, headers=headers, timeout=2)
)
await asyncio.sleep(0.01)
if not expect_timeout and not expect_disconnect:
signal.send.remote()
if expect_disconnect:
with pytest.raises(httpx.ReadTimeout):
await response_task
else:
response = await response_task
if expect_timeout:
assert response.status_code == 408
else:
assert response.status_code == 200
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_direct_ingress_standalone.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/tests/unit/test_controller_direct_ingress.py | import asyncio
from typing import Dict, List, Optional, Tuple
from unittest import mock
import pytest
from ray.serve._private.common import (
DeploymentID,
DeploymentStatus,
DeploymentStatusTrigger,
ReplicaID,
ReplicaState,
RequestProtocol,
RunningReplicaInfo,
)
from ray.serve._private.controller import ServeController
from ray.serve._private.node_port_manager import NodePortManager
from ray.serve.config import HTTPOptions, gRPCOptions
from ray.serve.schema import (
DeploymentDetails,
DeploymentSchema,
ReplicaDetails,
Target,
TargetGroup,
)
# Simple test KV store implementation
class FakeKVStore:
def __init__(self):
self.data = {}
def get(self, key):
return self.data.get(key)
def put(self, key, value):
self.data[key] = value
def delete(self, key):
if key in self.data:
del self.data[key]
# Simplified LongPollHost for tests
class FakeLongPollHost:
def __init__(self):
self.notified_changes = {}
def notify_changed(self, changes):
self.notified_changes.update(changes)
# Application State Manager for dependency injection
class FakeApplicationStateManager:
def __init__(self, app_statuses, route_prefixes, ingress_deployments):
self.app_statuses = app_statuses
self.route_prefixes = route_prefixes
self.ingress_deployments = ingress_deployments
def list_app_statuses(self):
return self.app_statuses
def get_route_prefix(self, app_name):
return self.route_prefixes.get(app_name, f"/{app_name}")
def get_ingress_deployment_name(self, app_name):
return self.ingress_deployments.get(app_name, f"{app_name}_ingress")
class FakeDeploymentReplica:
def __init__(self, node_id, replica_id: ReplicaID):
self.actor_node_id = node_id
self.replica_id = replica_id
class FakeProxyState:
def __init__(self, node_id, node_ip, name):
self.node_id = node_id
self.node_ip = node_ip
self.name = name
class FakeProxyStateManager:
def __init__(self):
self.proxy_details = {}
self._http_options = HTTPOptions()
self._grpc_options = gRPCOptions(
grpc_servicer_functions=["f1"],
)
def add_proxy_details(self, node_id, node_ip, name):
self.proxy_details[node_id] = FakeProxyState(
node_id=node_id,
node_ip=node_ip,
name=name,
)
def get_proxy_details(self):
return self.proxy_details
def get_targets(self, protocol: RequestProtocol):
if protocol == RequestProtocol.HTTP:
port = self._http_options.port
else:
port = self._grpc_options.port
return [
Target(
ip=proxy_details.node_ip,
port=port,
instance_id="",
name=proxy_details.name,
)
for node_id, proxy_details in self.proxy_details.items()
]
def get_grpc_config(self):
return self._grpc_options
def get_fallback_proxy_details(self):
return None
class FakeReplicaStateContainer:
def __init__(self, replica_infos: List[RunningReplicaInfo]):
self.replica_infos = replica_infos
def get(self):
return [
FakeDeploymentReplica(replica_info.node_id, replica_info.replica_id)
for replica_info in self.replica_infos
]
class FakeDeploymentState:
def __init__(self, id, replica_infos):
self.id = id
self._replicas = FakeReplicaStateContainer(replica_infos)
# Deployment State Manager for dependency injection
class FakeDeploymentStateManager:
def __init__(
self,
running_replica_infos: Dict[DeploymentID, List[RunningReplicaInfo]],
):
self.running_replica_infos = running_replica_infos
self._deployment_states = {}
for deployment_id, replica_infos in self.running_replica_infos.items():
deployment_state = FakeDeploymentState(deployment_id, replica_infos)
self._deployment_states[deployment_id] = deployment_state
def get_running_replica_infos(self):
return self.running_replica_infos
def get_replica_details(self, replica_info: RunningReplicaInfo) -> ReplicaDetails:
return ReplicaDetails(
replica_id=replica_info.replica_id.unique_id,
node_id=replica_info.node_id,
node_ip=replica_info.node_ip,
node_instance_id="",
start_time_s=0,
state=ReplicaState.RUNNING,
actor_name=replica_info.replica_id.unique_id,
)
def get_deployment_details(self, id: DeploymentID) -> Optional[DeploymentDetails]:
if id not in self.running_replica_infos:
return None
replica_details = [
self.get_replica_details(replica_info)
for replica_info in self.running_replica_infos[id]
]
return DeploymentDetails(
name=id.name,
status=DeploymentStatus.HEALTHY,
status_trigger=DeploymentStatusTrigger.UNSPECIFIED,
message="",
deployment_config=mock.Mock(spec=DeploymentSchema),
target_num_replicas=1,
required_resources={},
replicas=replica_details,
)
def get_ingress_replicas_info(self) -> List[Tuple[str, str, int, int]]:
return []
# Test Controller that overrides methods and dependencies
class FakeDirectIngressController(ServeController):
def __init__(
self,
kv_store,
long_poll_host,
application_state_manager,
deployment_state_manager,
proxy_state_manager,
):
# Skip parent __init__ since we'll set dependencies directly
self.kv_store = kv_store
self.long_poll_host = long_poll_host
self.application_state_manager = application_state_manager
self.deployment_state_manager = deployment_state_manager
self.proxy_state_manager = proxy_state_manager
self._direct_ingress_enabled = True
self._ha_proxy_enabled = False
self._controller_node_id = "head_node_id"
self._shutting_down = False
self.done_recovering_event = asyncio.Event()
self.done_recovering_event.set()
self.node_update_duration_gauge_s = mock.Mock()
def _update_proxy_nodes(self):
pass
@pytest.fixture(autouse=True)
def setup_env(monkeypatch):
monkeypatch.setenv("RAY_SERVE_ENABLE_DIRECT_INGRESS", "1")
monkeypatch.setenv("RAY_SERVE_DIRECT_INGRESS_MIN_HTTP_PORT", "30000")
monkeypatch.setenv("RAY_SERVE_DIRECT_INGRESS_MAX_HTTP_PORT", "30100")
monkeypatch.setenv("RAY_SERVE_DIRECT_INGRESS_MIN_GRPC_PORT", "40000")
monkeypatch.setenv("RAY_SERVE_DIRECT_INGRESS_MAX_GRPC_PORT", "40100")
yield
# Fixture to clear NodePortManager between tests
@pytest.fixture(autouse=True)
def reset_node_port_manager():
# Save original managers
original_managers = NodePortManager._node_managers.copy()
# Clear all managers
NodePortManager._node_managers = {}
yield
# Restore original managers
NodePortManager._node_managers = original_managers
# Fixture for a minimal controller setup
@pytest.fixture
def direct_ingress_controller():
# Setup test dependencies
kv_store = FakeKVStore()
long_poll_host = FakeLongPollHost()
app_state_manager = FakeApplicationStateManager({}, {}, {})
deployment_state_manager = FakeDeploymentStateManager({})
proxy_state_manager = FakeProxyStateManager()
# Create controller with test dependencies
controller = FakeDirectIngressController(
kv_store=kv_store,
long_poll_host=long_poll_host,
application_state_manager=app_state_manager,
deployment_state_manager=deployment_state_manager,
proxy_state_manager=proxy_state_manager,
)
yield controller
def test_direct_ingress_is_disabled(
direct_ingress_controller: FakeDirectIngressController,
):
"""Test that get_target_groups returns empty list when direct ingress is disabled."""
direct_ingress_controller._direct_ingress_enabled = False
target_groups = direct_ingress_controller.get_target_groups()
assert target_groups == []
# proxy has nodes
direct_ingress_controller.proxy_state_manager.add_proxy_details(
"node1", "10.0.0.1", "proxy1"
)
direct_ingress_controller.proxy_state_manager.add_proxy_details(
"node2", "10.0.0.2", "proxy2"
)
target_groups = direct_ingress_controller.get_target_groups()
expected_target_groups = [
TargetGroup(
protocol=RequestProtocol.HTTP,
route_prefix="/",
targets=[
Target(ip="10.0.0.1", port=8000, instance_id="", name="proxy1"),
Target(ip="10.0.0.2", port=8000, instance_id="", name="proxy2"),
],
),
TargetGroup(
protocol=RequestProtocol.GRPC,
route_prefix="/",
targets=[
Target(ip="10.0.0.1", port=9000, instance_id="", name="proxy1"),
Target(ip="10.0.0.2", port=9000, instance_id="", name="proxy2"),
],
),
]
assert target_groups == expected_target_groups
# Test with empty applications
def test_get_target_groups_empty_when_no_apps(
direct_ingress_controller: FakeDirectIngressController,
):
"""Test that get_target_groups returns empty list when no apps are running."""
target_groups = direct_ingress_controller.get_target_groups()
assert target_groups == []
# proxy has nodes
direct_ingress_controller.proxy_state_manager.add_proxy_details(
"node1", "10.0.0.1", "proxy1"
)
direct_ingress_controller.proxy_state_manager.add_proxy_details(
"node2", "10.0.0.2", "proxy2"
)
target_groups = direct_ingress_controller.get_target_groups()
expected_target_groups = [
TargetGroup(
protocol=RequestProtocol.HTTP,
route_prefix="/",
targets=[
Target(ip="10.0.0.1", port=8000, instance_id="", name="proxy1"),
Target(ip="10.0.0.2", port=8000, instance_id="", name="proxy2"),
],
),
TargetGroup(
protocol=RequestProtocol.GRPC,
route_prefix="/",
targets=[
Target(ip="10.0.0.1", port=9000, instance_id="", name="proxy1"),
Target(ip="10.0.0.2", port=9000, instance_id="", name="proxy2"),
],
),
]
assert target_groups == expected_target_groups
# Test with running applications
def test_get_target_groups_with_running_apps(
direct_ingress_controller: FakeDirectIngressController,
):
"""Test that get_target_groups properly returns target groups for running apps."""
# Setup test data
app_statuses = {
"app1": {},
"app2": {},
}
route_prefixes = {
"app1": "/app1",
"app2": "/app2",
}
ingress_deployments = {
"app1": "app1_ingress",
"app2": "app2_ingress",
}
deployment_id1 = DeploymentID(name="app1_ingress", app_name="app1")
deployment_id2 = DeploymentID(name="app2_ingress", app_name="app2")
# Create replica info
replica_id1 = ReplicaID(unique_id="replica1", deployment_id=deployment_id1)
replica_id2 = ReplicaID(unique_id="replica2", deployment_id=deployment_id2)
replica_info1 = RunningReplicaInfo(
replica_id=replica_id1,
node_id="node1",
node_ip="10.0.0.1",
availability_zone="az1",
actor_name="replica1",
max_ongoing_requests=100,
)
replica_info2 = RunningReplicaInfo(
replica_id=replica_id2,
node_id="node2",
node_ip="10.0.0.2",
availability_zone="az2",
actor_name="replica2",
max_ongoing_requests=100,
)
running_replica_infos = {
deployment_id1: [replica_info1],
deployment_id2: [replica_info2],
}
# Setup test application state manager
direct_ingress_controller.application_state_manager = FakeApplicationStateManager(
app_statuses=app_statuses,
route_prefixes=route_prefixes,
ingress_deployments=ingress_deployments,
)
# Setup test deployment state manager
direct_ingress_controller.deployment_state_manager = FakeDeploymentStateManager(
running_replica_infos=running_replica_infos,
)
# setup proxy state manager
direct_ingress_controller.proxy_state_manager.add_proxy_details(
"node1", "10.0.0.1", "proxy1"
)
direct_ingress_controller.proxy_state_manager.add_proxy_details(
"node2", "10.0.0.2", "proxy2"
)
# Allocate ports for replicas using controller's methods
http_port1 = direct_ingress_controller.allocate_replica_port(
"node1", replica_id1.unique_id, RequestProtocol.HTTP
)
grpc_port1 = direct_ingress_controller.allocate_replica_port(
"node1", replica_id1.unique_id, RequestProtocol.GRPC
)
http_port2 = direct_ingress_controller.allocate_replica_port(
"node2", replica_id2.unique_id, RequestProtocol.HTTP
)
grpc_port2 = direct_ingress_controller.allocate_replica_port(
"node2", replica_id2.unique_id, RequestProtocol.GRPC
)
# Call get_target_groups
target_groups = direct_ingress_controller.get_target_groups()
# Create expected target groups for direct comparison
expected_target_groups = [
TargetGroup(
protocol=RequestProtocol.HTTP,
route_prefix="/app1",
app_name="app1",
targets=[
Target(ip="10.0.0.1", port=http_port1, instance_id="", name="replica1"),
],
),
TargetGroup(
protocol=RequestProtocol.GRPC,
route_prefix="/app1",
app_name="app1",
targets=[
Target(ip="10.0.0.1", port=grpc_port1, instance_id="", name="replica1"),
],
),
TargetGroup(
protocol=RequestProtocol.HTTP,
route_prefix="/app2",
app_name="app2",
targets=[
Target(ip="10.0.0.2", port=http_port2, instance_id="", name="replica2"),
],
),
TargetGroup(
protocol=RequestProtocol.GRPC,
route_prefix="/app2",
app_name="app2",
targets=[
Target(ip="10.0.0.2", port=grpc_port2, instance_id="", name="replica2"),
],
),
]
# Sort both lists to ensure consistent comparison
target_groups.sort(key=lambda g: (g.protocol, g.route_prefix))
expected_target_groups.sort(key=lambda g: (g.protocol, g.route_prefix))
# Direct comparison
assert target_groups == expected_target_groups
# now release some ports
direct_ingress_controller.release_replica_port(
"node1", replica_id1.unique_id, http_port1, RequestProtocol.HTTP
)
direct_ingress_controller.release_replica_port(
"node2", replica_id2.unique_id, grpc_port2, RequestProtocol.GRPC
)
# verify the ports are released
assert not direct_ingress_controller._is_port_allocated(
direct_ingress_controller.deployment_state_manager.get_replica_details(
replica_info1
),
RequestProtocol.HTTP,
)
assert not direct_ingress_controller._is_port_allocated(
direct_ingress_controller.deployment_state_manager.get_replica_details(
replica_info2
),
RequestProtocol.GRPC,
)
# get the target groups again
target_groups = direct_ingress_controller.get_target_groups()
expected_target_groups = [
TargetGroup(
protocol=RequestProtocol.GRPC,
route_prefix="/app1",
app_name="app1",
targets=[
Target(ip="10.0.0.1", port=grpc_port1, instance_id="", name="replica1"),
],
),
TargetGroup(
protocol=RequestProtocol.HTTP,
route_prefix="/app2",
app_name="app2",
targets=[
Target(ip="10.0.0.2", port=http_port2, instance_id="", name="replica2"),
],
),
]
# Sort both lists again for the second comparison
target_groups.sort(key=lambda g: (g.protocol, g.route_prefix))
expected_target_groups.sort(key=lambda g: (g.protocol, g.route_prefix))
assert target_groups == expected_target_groups
def test_get_target_groups_with_port_not_allocated(
direct_ingress_controller: FakeDirectIngressController,
):
"""Test that get_target_groups correctly filters out targets with unallocated ports."""
# Setup test data
app_statuses = {"app1": {}}
route_prefixes = {"app1": "/app1"}
ingress_deployments = {"app1": "app1_ingress"}
# Create replica info for two replicas
deployment_id1 = DeploymentID(name="app1_ingress", app_name="app1")
replica_id1 = ReplicaID(unique_id="replica1", deployment_id=deployment_id1)
replica_id2 = ReplicaID(unique_id="replica2", deployment_id=deployment_id1)
replica_info1 = RunningReplicaInfo(
replica_id=replica_id1,
node_id="node1",
node_ip="10.0.0.1",
availability_zone="az1",
actor_name="replica1",
max_ongoing_requests=100,
)
replica_info2 = RunningReplicaInfo(
replica_id=replica_id2,
node_id="node2",
node_ip="10.0.0.2",
availability_zone="az2",
actor_name="replica2",
max_ongoing_requests=100,
)
running_replica_infos = {
deployment_id1: [replica_info1, replica_info2],
}
# Setup test application state manager
direct_ingress_controller.application_state_manager = FakeApplicationStateManager(
app_statuses=app_statuses,
route_prefixes=route_prefixes,
ingress_deployments=ingress_deployments,
)
# Setup test deployment state manager
direct_ingress_controller.deployment_state_manager = FakeDeploymentStateManager(
running_replica_infos=running_replica_infos,
)
# Only allocate ports for the first replica, leave the second one without ports
http_port1 = direct_ingress_controller.allocate_replica_port(
"node1", replica_id1.unique_id, RequestProtocol.HTTP
)
grpc_port1 = direct_ingress_controller.allocate_replica_port(
"node1", replica_id1.unique_id, RequestProtocol.GRPC
)
# Call get_target_groups
target_groups = direct_ingress_controller.get_target_groups()
# Create expected target groups
expected_target_groups = [
TargetGroup(
protocol=RequestProtocol.HTTP,
route_prefix="/app1",
app_name="app1",
targets=[
Target(ip="10.0.0.1", port=http_port1, instance_id="", name="replica1"),
],
),
TargetGroup(
protocol=RequestProtocol.GRPC,
route_prefix="/app1",
app_name="app1",
targets=[
Target(ip="10.0.0.1", port=grpc_port1, instance_id="", name="replica1"),
],
),
]
# Sort both lists to ensure consistent comparison
target_groups.sort(key=lambda g: (g.protocol, g.route_prefix))
expected_target_groups.sort(key=lambda g: (g.protocol, g.route_prefix))
# Direct comparison
assert target_groups == expected_target_groups
def test_get_target_groups_only_includes_ingress_deployments(
direct_ingress_controller: FakeDirectIngressController,
):
"""Test that get_target_groups only includes ingress deployments and not regular deployments."""
# Setup test data
app_statuses = {"app1": {}}
route_prefixes = {"app1": "/app1"}
ingress_deployments = {"app1": "app1_ingress"}
# Create replica info for ingress deployment
ingress_deployment_id = DeploymentID(name="app1_ingress", app_name="app1")
ingress_replica_id = ReplicaID(
unique_id="ingress_replica", deployment_id=ingress_deployment_id
)
ingress_replica_info = RunningReplicaInfo(
replica_id=ingress_replica_id,
node_id="node1",
node_ip="10.0.0.1",
availability_zone="az1",
actor_name="ingress_replica",
max_ongoing_requests=100,
)
# Create replica info for regular non-ingress deployment
regular_deployment_id = DeploymentID(name="app1_model", app_name="app1")
regular_replica_id = ReplicaID(
unique_id="regular_replica", deployment_id=regular_deployment_id
)
regular_replica_info = RunningReplicaInfo(
replica_id=regular_replica_id,
node_id="node2",
node_ip="10.0.0.2",
availability_zone="az2",
actor_name="regular_replica",
max_ongoing_requests=100,
)
# Set up running replica infos for both deployments
running_replica_infos = {
ingress_deployment_id: [ingress_replica_info],
regular_deployment_id: [regular_replica_info],
}
# Setup test application state manager
direct_ingress_controller.application_state_manager = FakeApplicationStateManager(
app_statuses=app_statuses,
route_prefixes=route_prefixes,
ingress_deployments=ingress_deployments,
)
# Setup test deployment state manager
direct_ingress_controller.deployment_state_manager = FakeDeploymentStateManager(
running_replica_infos=running_replica_infos,
)
# Allocate ports for both replicas
ingress_http_port = direct_ingress_controller.allocate_replica_port(
"node1", ingress_replica_id.unique_id, RequestProtocol.HTTP
)
ingress_grpc_port = direct_ingress_controller.allocate_replica_port(
"node1", ingress_replica_id.unique_id, RequestProtocol.GRPC
)
_ = direct_ingress_controller.allocate_replica_port(
"node2", regular_replica_id.unique_id, RequestProtocol.HTTP
)
_ = direct_ingress_controller.allocate_replica_port(
"node2", regular_replica_id.unique_id, RequestProtocol.GRPC
)
# Call get_target_groups
target_groups = direct_ingress_controller.get_target_groups()
# Create expected target groups - only including the ingress deployment
expected_target_groups = [
TargetGroup(
protocol=RequestProtocol.HTTP,
route_prefix="/app1",
app_name="app1",
targets=[
Target(
ip="10.0.0.1",
port=ingress_http_port,
instance_id="",
name="ingress_replica",
),
],
),
TargetGroup(
protocol=RequestProtocol.GRPC,
route_prefix="/app1",
app_name="app1",
targets=[
Target(
ip="10.0.0.1",
port=ingress_grpc_port,
instance_id="",
name="ingress_replica",
),
],
),
]
# Sort both lists to ensure consistent comparison
target_groups.sort(key=lambda g: (g.protocol, g.route_prefix))
expected_target_groups.sort(key=lambda g: (g.protocol, g.route_prefix))
# Direct comparison - regular deployment should not be included
assert target_groups == expected_target_groups
# Verify all ports are still allocated even though not all are included in target groups
assert direct_ingress_controller._is_port_allocated(
direct_ingress_controller.deployment_state_manager.get_replica_details(
ingress_replica_info
),
RequestProtocol.HTTP,
)
assert direct_ingress_controller._is_port_allocated(
direct_ingress_controller.deployment_state_manager.get_replica_details(
ingress_replica_info
),
RequestProtocol.GRPC,
)
assert direct_ingress_controller._is_port_allocated(
direct_ingress_controller.deployment_state_manager.get_replica_details(
regular_replica_info
),
RequestProtocol.HTTP,
)
assert direct_ingress_controller._is_port_allocated(
direct_ingress_controller.deployment_state_manager.get_replica_details(
regular_replica_info
),
RequestProtocol.GRPC,
)
def test_get_target_groups_app_with_no_running_replicas(
direct_ingress_controller: FakeDirectIngressController,
):
"""Test that get_target_groups correctly handles apps with no running replicas."""
# Setup test data for two apps - one with replicas, one without
app_statuses = {
"app1": {}, # App with replicas
"app2": {}, # App with no replicas
}
route_prefixes = {
"app1": "/app1",
"app2": "/app2",
}
ingress_deployments = {
"app1": "app1_ingress",
"app2": "app2_ingress",
}
# Create replica info for app1's ingress deployment
deployment_id1 = DeploymentID(name="app1_ingress", app_name="app1")
replica_id1 = ReplicaID(unique_id="replica1", deployment_id=deployment_id1)
replica_info1 = RunningReplicaInfo(
replica_id=replica_id1,
node_id="node1",
node_ip="10.0.0.1",
availability_zone="az1",
actor_name="replica1",
max_ongoing_requests=100,
)
# Note: No replicas for app2_ingress
# Running replica infos only for app1
running_replica_infos = {
deployment_id1: [replica_info1],
# No entry for app2_ingress
}
# Setup test application state manager
direct_ingress_controller.application_state_manager = FakeApplicationStateManager(
app_statuses=app_statuses,
route_prefixes=route_prefixes,
ingress_deployments=ingress_deployments,
)
# Setup test deployment state manager
direct_ingress_controller.deployment_state_manager = FakeDeploymentStateManager(
running_replica_infos=running_replica_infos,
)
# setup proxy state manager
direct_ingress_controller.proxy_state_manager.add_proxy_details(
"node1", "10.0.0.1", "proxy1"
)
direct_ingress_controller.proxy_state_manager.add_proxy_details(
"node2", "10.0.0.2", "proxy2"
)
# Allocate ports for the only existing replica
http_port = direct_ingress_controller.allocate_replica_port(
"node1", replica_id1.unique_id, RequestProtocol.HTTP
)
grpc_port = direct_ingress_controller.allocate_replica_port(
"node1", replica_id1.unique_id, RequestProtocol.GRPC
)
# Call get_target_groups
target_groups = direct_ingress_controller.get_target_groups()
# Create expected target groups - only including app1, nothing for app2
expected_target_groups = [
TargetGroup(
protocol=RequestProtocol.HTTP,
route_prefix="/app1",
app_name="app1",
targets=[
Target(ip="10.0.0.1", port=http_port, instance_id="", name="replica1"),
],
),
TargetGroup(
protocol=RequestProtocol.GRPC,
route_prefix="/app1",
app_name="app1",
targets=[
Target(ip="10.0.0.1", port=grpc_port, instance_id="", name="replica1"),
],
),
TargetGroup(
protocol=RequestProtocol.HTTP,
route_prefix="/app2",
app_name="app2",
targets=[
Target(ip="10.0.0.1", port=8000, instance_id="", name="proxy1"),
Target(ip="10.0.0.2", port=8000, instance_id="", name="proxy2"),
],
),
TargetGroup(
protocol=RequestProtocol.GRPC,
route_prefix="/app2",
app_name="app2",
targets=[
Target(ip="10.0.0.1", port=9000, instance_id="", name="proxy1"),
Target(ip="10.0.0.2", port=9000, instance_id="", name="proxy2"),
],
),
]
# Sort both lists to ensure consistent comparison
target_groups.sort(key=lambda g: (g.protocol, g.route_prefix))
expected_target_groups.sort(key=lambda g: (g.protocol, g.route_prefix))
# Direct comparison - app2 should have no target groups since it has no replicas
assert target_groups == expected_target_groups
def test_control_loop_pruning(
direct_ingress_controller: FakeDirectIngressController,
):
"""Test that the controller loop properly prunes stale node port managers."""
# Setup replica info for testing
deployment_id = DeploymentID(name="app1_ingress", app_name="app1")
replica_id1 = ReplicaID(unique_id="replica1", deployment_id=deployment_id)
replica_id2 = ReplicaID(unique_id="replica2", deployment_id=deployment_id)
replica_id3 = ReplicaID(unique_id="replica3", deployment_id=deployment_id)
replica_info1 = RunningReplicaInfo(
replica_id=replica_id1,
node_id="node1",
node_ip="10.0.0.1",
availability_zone="az1",
actor_name="replica1",
max_ongoing_requests=100,
)
replica_info2 = RunningReplicaInfo(
replica_id=replica_id2,
node_id="node1",
node_ip="10.0.0.1",
availability_zone="az1",
actor_name="replica2",
max_ongoing_requests=100,
)
replica_info3 = RunningReplicaInfo(
replica_id=replica_id3,
node_id="node2",
node_ip="10.0.0.2",
availability_zone="az2",
actor_name="replica3",
max_ongoing_requests=100,
)
running_replica_infos = {
deployment_id: [replica_info1, replica_info2, replica_info3],
}
# Set up controller with the replica info
direct_ingress_controller.deployment_state_manager = FakeDeploymentStateManager(
running_replica_infos=running_replica_infos,
)
# Allocate ports for testing
direct_ingress_controller.allocate_replica_port(
"node1", replica_id1.unique_id, RequestProtocol.HTTP
)
direct_ingress_controller.allocate_replica_port(
"node1", replica_id2.unique_id, RequestProtocol.HTTP
)
direct_ingress_controller.allocate_replica_port(
"node1", replica_id3.unique_id, RequestProtocol.HTTP
) # This should be pruned
direct_ingress_controller.allocate_replica_port(
"node2", replica_id3.unique_id, RequestProtocol.HTTP
)
direct_ingress_controller.allocate_replica_port(
"node3", "replica4", RequestProtocol.HTTP
) # Node should be pruned
# Verify ports are initially allocated
assert direct_ingress_controller._is_port_allocated(
direct_ingress_controller.deployment_state_manager.get_replica_details(
replica_info1
),
RequestProtocol.HTTP,
)
assert direct_ingress_controller._is_port_allocated(
direct_ingress_controller.deployment_state_manager.get_replica_details(
replica_info2
),
RequestProtocol.HTTP,
)
assert direct_ingress_controller._is_port_allocated(
direct_ingress_controller.deployment_state_manager.get_replica_details(
replica_info3
),
RequestProtocol.HTTP,
)
# We need to use NodePortManager directly for this check since we don't have a ReplicaInfo for stale_replica
node1_manager = NodePortManager.get_node_manager("node1")
node3_manager = NodePortManager.get_node_manager("node3")
assert node1_manager.is_port_allocated(replica_id3.unique_id, RequestProtocol.HTTP)
assert node3_manager.is_port_allocated("replica4", RequestProtocol.HTTP)
# Call the control loop step - this should trigger port pruning
direct_ingress_controller._maybe_update_ingress_ports()
# Verify the active replicas still have their ports
assert direct_ingress_controller._is_port_allocated(
direct_ingress_controller.deployment_state_manager.get_replica_details(
replica_info1
),
RequestProtocol.HTTP,
)
assert direct_ingress_controller._is_port_allocated(
direct_ingress_controller.deployment_state_manager.get_replica_details(
replica_info2
),
RequestProtocol.HTTP,
)
assert direct_ingress_controller._is_port_allocated(
direct_ingress_controller.deployment_state_manager.get_replica_details(
replica_info3
),
RequestProtocol.HTTP,
)
# Verify stale ports were pruned
assert not node1_manager.is_port_allocated(
replica_id3.unique_id, RequestProtocol.HTTP
)
assert "node3" not in NodePortManager._node_managers # Entire node should be pruned
if __name__ == "__main__":
pytest.main()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/unit/test_controller_direct_ingress.py",
"license": "Apache License 2.0",
"lines": 843,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/_private/controller_health_metrics_tracker.py | import asyncio
import math
import sys
import time
from collections import deque
from dataclasses import dataclass, field
from typing import Deque, List, Optional
from ray._common.pydantic_compat import BaseModel
from ray.serve._private.constants import CONTROL_LOOP_INTERVAL_S
# Number of recent loop iterations to track for rolling averages
_HEALTH_METRICS_HISTORY_SIZE = 100
class DurationStats(BaseModel):
"""Statistics for a collection of duration/latency measurements."""
mean: float = 0.0
std: float = 0.0
min: float = 0.0
max: float = 0.0
@classmethod
def from_values(cls, values: List[float]) -> "DurationStats":
"""Compute statistics from a list of values."""
if not values:
return cls()
n = len(values)
mean = sum(values) / n
min_val = min(values)
max_val = max(values)
# Compute standard deviation
if n > 1:
variance = sum((x - mean) ** 2 for x in values) / n
std = math.sqrt(variance)
else:
std = 0.0
return cls(mean=mean, std=std, min=min_val, max=max_val)
class ControllerHealthMetrics(BaseModel):
"""Health metrics for the Ray Serve controller.
These metrics help diagnose controller performance issues, especially
as cluster size increases.
"""
# Timestamps
timestamp: float = 0.0 # When these metrics were collected
controller_start_time: float = 0.0 # When the controller started
uptime_s: float = 0.0 # Controller uptime in seconds
# Control loop metrics
num_control_loops: int = 0 # Total number of control loops executed
loop_duration_s: Optional[
DurationStats
] = None # Loop duration stats (rolling window)
loops_per_second: float = 0.0 # Control loop iterations per second
# Sleep/scheduling metrics
last_sleep_duration_s: float = 0.0 # Actual sleep duration of last iteration
expected_sleep_duration_s: float = 0.0 # Expected sleep (CONTROL_LOOP_INTERVAL_S)
event_loop_delay_s: float = 0.0 # Delay = actual - expected (positive = overloaded)
# Event loop health
num_asyncio_tasks: int = 0 # Number of pending asyncio tasks
# Component update durations (rolling window stats)
deployment_state_update_duration_s: Optional[DurationStats] = None
application_state_update_duration_s: Optional[DurationStats] = None
proxy_state_update_duration_s: Optional[DurationStats] = None
node_update_duration_s: Optional[DurationStats] = None
# Autoscaling metrics latency tracking (rolling window stats)
# These track the delay between when metrics are generated and when they reach controller
handle_metrics_delay_ms: Optional[DurationStats] = None
replica_metrics_delay_ms: Optional[DurationStats] = None
# Memory usage (in MB)
process_memory_mb: float = 0.0
@dataclass
class ControllerHealthMetricsTracker:
"""Tracker for collecting controller health metrics over time."""
controller_start_time: float = field(default_factory=time.time)
# Rolling history of loop durations
loop_durations: Deque[float] = field(
default_factory=lambda: deque(maxlen=_HEALTH_METRICS_HISTORY_SIZE)
)
# Rolling history of metrics delays
handle_metrics_delays: Deque[float] = field(
default_factory=lambda: deque(maxlen=_HEALTH_METRICS_HISTORY_SIZE)
)
replica_metrics_delays: Deque[float] = field(
default_factory=lambda: deque(maxlen=_HEALTH_METRICS_HISTORY_SIZE)
)
# Rolling history of component update durations
dsm_update_durations: Deque[float] = field(
default_factory=lambda: deque(maxlen=_HEALTH_METRICS_HISTORY_SIZE)
)
asm_update_durations: Deque[float] = field(
default_factory=lambda: deque(maxlen=_HEALTH_METRICS_HISTORY_SIZE)
)
proxy_update_durations: Deque[float] = field(
default_factory=lambda: deque(maxlen=_HEALTH_METRICS_HISTORY_SIZE)
)
node_update_durations: Deque[float] = field(
default_factory=lambda: deque(maxlen=_HEALTH_METRICS_HISTORY_SIZE)
)
# Latest values (used in collect_metrics)
last_sleep_duration_s: float = 0.0
num_control_loops: int = 0
def record_loop_duration(self, duration: float):
self.loop_durations.append(duration)
def record_handle_metrics_delay(self, delay_ms: float):
self.handle_metrics_delays.append(delay_ms)
def record_replica_metrics_delay(self, delay_ms: float):
self.replica_metrics_delays.append(delay_ms)
def record_dsm_update_duration(self, duration: float):
self.dsm_update_durations.append(duration)
def record_asm_update_duration(self, duration: float):
self.asm_update_durations.append(duration)
def record_proxy_update_duration(self, duration: float):
self.proxy_update_durations.append(duration)
def record_node_update_duration(self, duration: float):
self.node_update_durations.append(duration)
def collect_metrics(self) -> ControllerHealthMetrics:
"""Collect and return current health metrics."""
now = time.time()
# Calculate loop statistics from rolling history
loop_duration_stats = DurationStats.from_values(list(self.loop_durations))
# Calculate loops per second based on uptime and total loops
uptime = now - self.controller_start_time
loops_per_second = self.num_control_loops / uptime if uptime > 0 else 0.0
# Calculate event loop delay (actual sleep - expected sleep)
# Positive values indicate the event loop is overloaded
event_loop_delay = max(
0.0, self.last_sleep_duration_s - CONTROL_LOOP_INTERVAL_S
)
# Get asyncio task count
try:
loop = asyncio.get_event_loop()
num_asyncio_tasks = len(asyncio.all_tasks(loop))
except RuntimeError:
num_asyncio_tasks = 0
# Calculate metrics delay statistics
handle_delay_stats = DurationStats.from_values(list(self.handle_metrics_delays))
replica_delay_stats = DurationStats.from_values(
list(self.replica_metrics_delays)
)
# Calculate component update duration statistics
dsm_update_stats = DurationStats.from_values(list(self.dsm_update_durations))
asm_update_stats = DurationStats.from_values(list(self.asm_update_durations))
proxy_update_stats = DurationStats.from_values(
list(self.proxy_update_durations)
)
node_update_stats = DurationStats.from_values(list(self.node_update_durations))
# Get memory usage in MB
# Note: ru_maxrss is in bytes on macOS but kilobytes on Linux
# The resource module is Unix-only, so we handle Windows gracefully
try:
import resource
rusage = resource.getrusage(resource.RUSAGE_SELF)
process_memory_mb = (
rusage.ru_maxrss / (1024 * 1024) # Convert bytes to MB on macOS
if sys.platform == "darwin"
else rusage.ru_maxrss / 1024 # Convert KB to MB on Linux
)
except ImportError:
# resource module not available on Windows
process_memory_mb = 0.0
return ControllerHealthMetrics(
timestamp=now,
controller_start_time=self.controller_start_time,
uptime_s=uptime,
num_control_loops=self.num_control_loops,
loop_duration_s=loop_duration_stats,
loops_per_second=loops_per_second,
last_sleep_duration_s=self.last_sleep_duration_s,
expected_sleep_duration_s=CONTROL_LOOP_INTERVAL_S,
event_loop_delay_s=event_loop_delay,
num_asyncio_tasks=num_asyncio_tasks,
deployment_state_update_duration_s=dsm_update_stats,
application_state_update_duration_s=asm_update_stats,
proxy_state_update_duration_s=proxy_update_stats,
node_update_duration_s=node_update_stats,
handle_metrics_delay_ms=handle_delay_stats,
replica_metrics_delay_ms=replica_delay_stats,
process_memory_mb=process_memory_mb,
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/_private/controller_health_metrics_tracker.py",
"license": "Apache License 2.0",
"lines": 174,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.