sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
ray-project/ray:python/ray/data/tests/unit/expressions/test_predicate.py | """Tests for predicate expression operations.
This module tests:
- Null predicates: is_null(), is_not_null()
- Membership predicates: is_in(), not_in()
"""
import pandas as pd
import pytest
from ray.data._internal.planner.plan_expression.expression_evaluator import eval_expr
from ray.data.expressions import BinaryExpr, Operation, UnaryExpr, col, lit
# ββββββββββββββββββββββββββββββββββββββ
# Null Predicate Operations
# ββββββββββββββββββββββββββββββββββββββ
class TestIsNull:
"""Tests for is_null() predicate."""
@pytest.fixture
def sample_data(self):
"""Sample data with null values for null predicate tests."""
return pd.DataFrame(
{
"value": [1.0, None, 3.0, None, 5.0],
"name": ["Alice", None, "Charlie", "Diana", None],
}
)
def test_is_null_numeric(self, sample_data):
"""Test is_null on numeric column."""
expr = col("value").is_null()
assert isinstance(expr, UnaryExpr)
assert expr.op == Operation.IS_NULL
result = eval_expr(expr, sample_data)
expected = pd.Series([False, True, False, True, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_is_null_string(self, sample_data):
"""Test is_null on string column."""
expr = col("name").is_null()
result = eval_expr(expr, sample_data)
expected = pd.Series([False, True, False, False, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_is_null_structural_equality(self):
"""Test structural equality for is_null expressions."""
expr1 = col("value").is_null()
expr2 = col("value").is_null()
expr3 = col("other").is_null()
assert expr1.structurally_equals(expr2)
assert not expr1.structurally_equals(expr3)
class TestIsNotNull:
"""Tests for is_not_null() predicate."""
@pytest.fixture
def sample_data(self):
"""Sample data with null values."""
return pd.DataFrame(
{
"value": [1.0, None, 3.0, None, 5.0],
"name": ["Alice", None, "Charlie", "Diana", None],
}
)
def test_is_not_null_numeric(self, sample_data):
"""Test is_not_null on numeric column."""
expr = col("value").is_not_null()
assert isinstance(expr, UnaryExpr)
assert expr.op == Operation.IS_NOT_NULL
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, True, False, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_is_not_null_string(self, sample_data):
"""Test is_not_null on string column."""
expr = col("name").is_not_null()
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, True, True, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_is_not_null_structural_equality(self):
"""Test structural equality for is_not_null expressions."""
expr1 = col("value").is_not_null()
expr2 = col("value").is_not_null()
expr3 = col("other").is_not_null()
assert expr1.structurally_equals(expr2)
assert not expr1.structurally_equals(expr3)
class TestNullPredicateCombinations:
"""Tests for null predicates combined with other operations."""
@pytest.fixture
def sample_data(self):
"""Sample data with null values and other columns."""
return pd.DataFrame(
{
"value": [10.0, None, 30.0, None, 50.0],
"threshold": [5.0, 20.0, 25.0, 10.0, 40.0],
}
)
def test_null_aware_comparison(self, sample_data):
"""Test null-aware comparison (is_not_null AND comparison)."""
# Filter: value is not null AND value > threshold
expr = col("value").is_not_null() & (col("value") > col("threshold"))
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, True, False, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_is_null_or_condition(self, sample_data):
"""Test is_null combined with OR."""
# value is null OR value > 40
expr = col("value").is_null() | (col("value") > 40)
result = eval_expr(expr, sample_data)
expected = pd.Series([False, True, False, True, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
# ββββββββββββββββββββββββββββββββββββββ
# Membership Predicate Operations
# ββββββββββββββββββββββββββββββββββββββ
class TestIsIn:
"""Tests for is_in() predicate."""
@pytest.fixture
def sample_data(self):
"""Sample data for membership tests."""
return pd.DataFrame(
{
"status": ["active", "inactive", "pending", "active", "deleted"],
"category": ["A", "B", "C", "A", "D"],
"value": [1, 2, 3, 4, 5],
}
)
def test_is_in_string_list(self, sample_data):
"""Test is_in with string list."""
expr = col("status").is_in(["active", "pending"])
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.IN
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, True, True, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_is_in_single_value_list(self, sample_data):
"""Test is_in with single-value list."""
expr = col("status").is_in(["active"])
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, False, True, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_is_in_numeric_list(self, sample_data):
"""Test is_in with numeric list."""
expr = col("value").is_in([1, 3, 5])
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, True, False, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_is_in_empty_list(self, sample_data):
"""Test is_in with empty list (should return all False)."""
expr = col("status").is_in([])
result = eval_expr(expr, sample_data)
expected = pd.Series([False, False, False, False, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_is_in_with_literal_expr(self, sample_data):
"""Test is_in with LiteralExpr containing list."""
values_expr = lit(["A", "C"])
expr = col("category").is_in(values_expr)
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, True, True, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_is_in_structural_equality(self):
"""Test structural equality for is_in expressions."""
expr1 = col("status").is_in(["active", "pending"])
expr2 = col("status").is_in(["active", "pending"])
expr3 = col("status").is_in(["active"])
assert expr1.structurally_equals(expr2)
assert not expr1.structurally_equals(expr3)
class TestNotIn:
"""Tests for not_in() predicate."""
@pytest.fixture
def sample_data(self):
"""Sample data for membership tests."""
return pd.DataFrame(
{
"status": ["active", "inactive", "pending", "active", "deleted"],
"value": [1, 2, 3, 4, 5],
}
)
def test_not_in_string_list(self, sample_data):
"""Test not_in with string list."""
expr = col("status").not_in(["inactive", "deleted"])
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.NOT_IN
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, True, True, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_not_in_numeric_list(self, sample_data):
"""Test not_in with numeric list."""
expr = col("value").not_in([2, 4])
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, True, False, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_not_in_empty_list(self, sample_data):
"""Test not_in with empty list (should return all True)."""
expr = col("status").not_in([])
result = eval_expr(expr, sample_data)
expected = pd.Series([True, True, True, True, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_not_in_structural_equality(self):
"""Test structural equality for not_in expressions."""
expr1 = col("status").not_in(["deleted"])
expr2 = col("status").not_in(["deleted"])
expr3 = col("status").not_in(["deleted", "inactive"])
assert expr1.structurally_equals(expr2)
assert not expr1.structurally_equals(expr3)
class TestMembershipWithNulls:
"""Tests for membership predicates with null values."""
@pytest.fixture
def sample_data(self):
"""Sample data with null values for membership tests."""
return pd.DataFrame(
{
"status": ["active", None, "pending", None, "deleted"],
"value": [1, None, 3, None, 5],
}
)
def test_is_in_with_nulls_in_data(self, sample_data):
"""Test is_in when data contains nulls."""
expr = col("status").is_in(["active", "pending"])
result = eval_expr(expr, sample_data)
# Nulls should return False (null is not in any list)
expected = pd.Series([True, False, True, False, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_not_in_with_nulls_in_data(self, sample_data):
"""Test not_in when data contains nulls."""
expr = col("status").not_in(["active"])
result = eval_expr(expr, sample_data)
# Nulls should return True (null is not in the exclusion list)
expected = pd.Series([False, True, True, True, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
class TestMembershipCombinations:
"""Tests for membership predicates combined with other operations."""
@pytest.fixture
def sample_data(self):
"""Sample data for combination tests."""
return pd.DataFrame(
{
"status": ["active", "inactive", "pending", "active", "deleted"],
"priority": ["high", "low", "high", "medium", "low"],
"value": [100, 50, 75, 200, 25],
}
)
def test_is_in_and_comparison(self, sample_data):
"""Test is_in combined with comparison."""
# status in ["active", "pending"] AND value > 50
expr = col("status").is_in(["active", "pending"]) & (col("value") > 50)
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, True, True, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_multiple_is_in(self, sample_data):
"""Test multiple is_in predicates."""
# status in ["active"] AND priority in ["high", "medium"]
expr = col("status").is_in(["active"]) & col("priority").is_in(
["high", "medium"]
)
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, False, True, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_is_in_or_not_in(self, sample_data):
"""Test is_in combined with not_in."""
# status in ["active"] OR priority not_in ["low"]
expr = col("status").is_in(["active"]) | col("priority").not_in(["low"])
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, True, True, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/unit/expressions/test_predicate.py",
"license": "Apache License 2.0",
"lines": 295,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/serve/doc_code/mlflow_model_registry_integration.py | # __train_model_start__
from sklearn.datasets import make_regression
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
import mlflow
import mlflow.sklearn
import mlflow.pyfunc
from mlflow.entities import LoggedModelStatus
from mlflow.models import infer_signature
import numpy as np
def train_and_register_model():
# Initialize model in PENDING state
logged_model = mlflow.initialize_logged_model(
name="sk-learn-random-forest-reg-model",
model_type="sklearn",
tags={"model_type": "random_forest"},
)
try:
with mlflow.start_run() as run:
X, y = make_regression(n_features=4, n_informative=2, random_state=0, shuffle=False)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
params = {"max_depth": 2, "random_state": 42}
# Best Practice: Use sklearn Pipeline to persist preprocessing
# This ensures training and serving transformations stay aligned
pipeline = Pipeline([
("scaler", StandardScaler()),
("regressor", RandomForestRegressor(**params))
])
pipeline.fit(X_train, y_train)
# Log parameters and metrics
mlflow.log_params(params)
y_pred = pipeline.predict(X_test)
mlflow.log_metrics({"mse": mean_squared_error(y_test, y_pred)})
# Best Practice: Infer model signature for input validation
# Prevents silent failures from mismatched feature order or missing columns
signature = infer_signature(X_train, y_pred)
# Best Practice: Pin dependency versions explicitly
# Ensures identical behavior across training, evaluation, and serving
pip_requirements = [
f"scikit-learn=={__import__('sklearn').__version__}",
f"numpy=={np.__version__}",
]
# Log the sklearn pipeline with signature and dependencies
mlflow.sklearn.log_model(
sk_model=pipeline,
name="sklearn-model",
input_example=X_train[:1],
signature=signature,
pip_requirements=pip_requirements,
registered_model_name="sk-learn-random-forest-reg-model",
model_id=logged_model.model_id,
)
# Finalize model as READY
mlflow.finalize_logged_model(logged_model.model_id, LoggedModelStatus.READY)
mlflow.set_logged_model_tags(
logged_model.model_id,
tags={"production": "true"},
)
except Exception as e:
# Mark model as FAILED if issues occur
mlflow.finalize_logged_model(logged_model.model_id, LoggedModelStatus.FAILED)
raise
# Retrieve and work with the logged model
final_model = mlflow.get_logged_model(logged_model.model_id)
print(f"Model {final_model.name} is {final_model.status}")
# __train_model_end__
# __deployment_start__
from ray import serve
import mlflow.pyfunc
import numpy as np
@serve.deployment
class MLflowModelDeployment:
def __init__(self):
# Search for models with production tag
models = mlflow.search_logged_models(
filter_string="tags.production='true' AND name='sk-learn-random-forest-reg-model'",
order_by=[{"field_name": "creation_time", "ascending": False}],
)
if models.empty:
raise ValueError("No model with production tag found")
# Get the most recent production model
model_row = models.iloc[0]
artifact_location = model_row["artifact_location"]
# Best Practice: Load model once during initialization (warm-start)
# This eliminates first-request latency spikes
self.model = mlflow.pyfunc.load_model(artifact_location)
# Pre-warm the model with a dummy prediction
dummy_input = np.zeros((1, 4))
_ = self.model.predict(dummy_input)
async def __call__(self, request):
data = await request.json()
features = np.array(data["features"])
# MLflow validates input against the logged signature automatically
prediction = self.model.predict(features)
return {"prediction": prediction.tolist()}
app = MLflowModelDeployment.bind()
# __deployment_end__
if __name__ == "__main__":
import requests
from ray import serve
train_and_register_model()
serve.run(app)
# Test prediction
response = requests.post("http://localhost:8000/", json={"features": [[0.1, 0.2, 0.3, 0.4]]})
print(response.json())
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/doc_code/mlflow_model_registry_integration.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/algorithms/tqc/default_tqc_rl_module.py | """
Default TQC RLModule.
TQC uses distributional critics with quantile regression.
"""
from typing import List, Tuple
from ray.rllib.core.learner.utils import make_target_network
from ray.rllib.core.rl_module.apis import InferenceOnlyAPI, QNetAPI, TargetNetworkAPI
from ray.rllib.core.rl_module.rl_module import RLModule
from ray.rllib.utils.annotations import (
override,
)
from ray.rllib.utils.typing import NetworkType
from ray.util.annotations import DeveloperAPI
@DeveloperAPI
class DefaultTQCRLModule(RLModule, InferenceOnlyAPI, TargetNetworkAPI, QNetAPI):
"""RLModule for the TQC (Truncated Quantile Critics) algorithm.
TQC extends SAC by using distributional critics with quantile regression.
Each critic outputs n_quantiles values instead of a single Q-value.
Architecture:
- Policy (Actor): Same as SAC
[obs] -> [pi_encoder] -> [pi_head] -> [action_dist_inputs]
- Quantile Critics: Multiple critics, each outputting n_quantiles
[obs, action] -> [qf_encoder_i] -> [qf_head_i] -> [n_quantiles values]
- Target Quantile Critics: Target networks for each critic
[obs, action] -> [target_qf_encoder_i] -> [target_qf_head_i] -> [n_quantiles]
"""
@override(RLModule)
def setup(self):
# TQC-specific parameters from model_config
self.n_quantiles = self.model_config.get("n_quantiles", 25)
self.n_critics = self.model_config.get("n_critics", 2)
self.top_quantiles_to_drop_per_net = self.model_config.get(
"top_quantiles_to_drop_per_net", 2
)
# Total quantiles across all critics
self.quantiles_total = self.n_quantiles * self.n_critics
# Build the encoder for the policy (same as SAC)
self.pi_encoder = self.catalog.build_encoder(framework=self.framework)
if not self.inference_only or self.framework != "torch":
# Build multiple Q-function encoders and heads
self.qf_encoders = []
self.qf_heads = []
for i in range(self.n_critics):
qf_encoder = self.catalog.build_qf_encoder(framework=self.framework)
qf_head = self.catalog.build_qf_head(framework=self.framework)
self.qf_encoders.append(qf_encoder)
self.qf_heads.append(qf_head)
# Build the policy head (same as SAC)
self.pi = self.catalog.build_pi_head(framework=self.framework)
@override(TargetNetworkAPI)
def make_target_networks(self):
"""Creates target networks for all quantile critics."""
self.target_qf_encoders = []
self.target_qf_heads = []
for i in range(self.n_critics):
target_encoder = make_target_network(self.qf_encoders[i])
target_head = make_target_network(self.qf_heads[i])
self.target_qf_encoders.append(target_encoder)
self.target_qf_heads.append(target_head)
@override(InferenceOnlyAPI)
def get_non_inference_attributes(self) -> List[str]:
"""Returns attributes not needed for inference."""
return [
"qf_encoders",
"qf_heads",
"target_qf_encoders",
"target_qf_heads",
]
@override(TargetNetworkAPI)
def get_target_network_pairs(self) -> List[Tuple[NetworkType, NetworkType]]:
"""Returns pairs of (network, target_network) for updating targets."""
pairs = []
for i in range(self.n_critics):
pairs.append((self.qf_encoders[i], self.target_qf_encoders[i]))
pairs.append((self.qf_heads[i], self.target_qf_heads[i]))
return pairs
@override(RLModule)
def get_initial_state(self) -> dict:
"""TQC does not support RNNs yet."""
return {}
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/algorithms/tqc/default_tqc_rl_module.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:rllib/algorithms/tqc/tests/test_tqc.py | """Tests for the TQC (Truncated Quantile Critics) algorithm."""
import unittest
import gymnasium as gym
import numpy as np
from gymnasium.spaces import Box, Dict, Discrete, Tuple
import ray
from ray import tune
from ray.rllib.algorithms import tqc
from ray.rllib.connectors.env_to_module.flatten_observations import FlattenObservations
from ray.rllib.examples.envs.classes.random_env import RandomEnv
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.test_utils import check_train_results_new_api_stack
torch, _ = try_import_torch()
class SimpleEnv(gym.Env):
"""Simple continuous control environment for testing."""
def __init__(self, config):
self.action_space = Box(0.0, 1.0, (1,))
self.observation_space = Box(0.0, 1.0, (1,))
self.max_steps = config.get("max_steps", 100)
self.state = None
self.steps = None
def reset(self, *, seed=None, options=None):
self.state = self.observation_space.sample()
self.steps = 0
return self.state, {}
def step(self, action):
self.steps += 1
# Reward is 1.0 - (max(actions) - state).
[rew] = 1.0 - np.abs(np.max(action) - self.state)
terminated = False
truncated = self.steps >= self.max_steps
self.state = self.observation_space.sample()
return self.state, rew, terminated, truncated, {}
class TestTQC(unittest.TestCase):
"""Test cases for TQC algorithm."""
@classmethod
def setUpClass(cls) -> None:
np.random.seed(42)
torch.manual_seed(42)
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def setUp(self) -> None:
"""Set up base config for tests."""
self.base_config = (
tqc.TQCConfig()
.training(
n_step=3,
n_quantiles=25,
n_critics=2,
top_quantiles_to_drop_per_net=2,
replay_buffer_config={
"capacity": 40000,
},
num_steps_sampled_before_learning_starts=0,
store_buffer_in_checkpoints=True,
train_batch_size=10,
)
.env_runners(
num_env_runners=0,
rollout_fragment_length=10,
)
)
def test_tqc_compilation(self):
"""Test whether TQC can be built and trained."""
config = self.base_config.copy().env_runners(
env_to_module_connector=(lambda env, spaces, device: FlattenObservations()),
)
num_iterations = 1
image_space = Box(-1.0, 1.0, shape=(84, 84, 3))
simple_space = Box(-1.0, 1.0, shape=(3,))
tune.register_env(
"random_dict_env_tqc",
lambda _: RandomEnv(
{
"observation_space": Dict(
{
"a": simple_space,
"b": Discrete(2),
"c": image_space,
}
),
"action_space": Box(-1.0, 1.0, shape=(1,)),
}
),
)
tune.register_env(
"random_tuple_env_tqc",
lambda _: RandomEnv(
{
"observation_space": Tuple(
[simple_space, Discrete(2), image_space]
),
"action_space": Box(-1.0, 1.0, shape=(1,)),
}
),
)
# Test for different env types (dict and tuple observations).
for env in [
"random_dict_env_tqc",
"random_tuple_env_tqc",
]:
print("Env={}".format(env))
config.environment(env)
algo = config.build()
for i in range(num_iterations):
results = algo.train()
check_train_results_new_api_stack(results)
print(results)
algo.stop()
def test_tqc_simple_env(self):
"""Test TQC on a simple continuous control environment."""
tune.register_env("simple_env_tqc", lambda config: SimpleEnv(config))
config = (
tqc.TQCConfig()
.environment("simple_env_tqc", env_config={"max_steps": 50})
.training(
n_quantiles=10,
n_critics=2,
top_quantiles_to_drop_per_net=1,
replay_buffer_config={
"capacity": 10000,
},
num_steps_sampled_before_learning_starts=0,
train_batch_size=32,
)
.env_runners(
num_env_runners=0,
rollout_fragment_length=10,
)
)
algo = config.build()
for _ in range(2):
results = algo.train()
check_train_results_new_api_stack(results)
print(results)
algo.stop()
def test_tqc_quantile_parameters(self):
"""Test TQC with different quantile configurations."""
tune.register_env("simple_env_tqc_params", lambda config: SimpleEnv(config))
# Test with different n_quantiles and n_critics
for n_quantiles, n_critics, top_drop in [
(5, 2, 1),
(25, 3, 2),
(50, 2, 5),
]:
print(
f"Testing n_quantiles={n_quantiles}, n_critics={n_critics}, "
f"top_drop={top_drop}"
)
config = (
tqc.TQCConfig()
.environment("simple_env_tqc_params", env_config={"max_steps": 20})
.training(
n_quantiles=n_quantiles,
n_critics=n_critics,
top_quantiles_to_drop_per_net=top_drop,
replay_buffer_config={
"capacity": 5000,
},
num_steps_sampled_before_learning_starts=0,
train_batch_size=16,
)
.env_runners(
num_env_runners=0,
rollout_fragment_length=5,
)
)
algo = config.build()
results = algo.train()
check_train_results_new_api_stack(results)
algo.stop()
def test_tqc_config_validation(self):
"""Test that TQC config validation works correctly."""
# Test invalid n_quantiles
with self.assertRaises(ValueError):
config = tqc.TQCConfig().training(n_quantiles=0)
config.validate()
# Test invalid n_critics
with self.assertRaises(ValueError):
config = tqc.TQCConfig().training(n_critics=0)
config.validate()
# Test dropping too many quantiles
with self.assertRaises(ValueError):
# With n_quantiles=5, n_critics=2, total=10
# Dropping 6 per net = 12 total, which is > 10
config = tqc.TQCConfig().training(
n_quantiles=5,
n_critics=2,
top_quantiles_to_drop_per_net=6,
)
config.validate()
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/algorithms/tqc/tests/test_tqc.py",
"license": "Apache License 2.0",
"lines": 197,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:rllib/algorithms/tqc/torch/default_tqc_torch_rl_module.py | """
PyTorch implementation of the TQC RLModule.
"""
from typing import Any, Dict
from ray.rllib.algorithms.sac.sac_learner import QF_PREDS, QF_TARGET_NEXT
from ray.rllib.algorithms.tqc.default_tqc_rl_module import DefaultTQCRLModule
from ray.rllib.algorithms.tqc.tqc_catalog import TQCCatalog
from ray.rllib.core.columns import Columns
from ray.rllib.core.learner.utils import make_target_network
from ray.rllib.core.models.base import ENCODER_OUT
from ray.rllib.core.rl_module.torch.torch_rl_module import TorchRLModule
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
torch, nn = try_import_torch()
class DefaultTQCTorchRLModule(TorchRLModule, DefaultTQCRLModule):
"""PyTorch implementation of the TQC RLModule.
TQC uses multiple quantile critics, each outputting n_quantiles values.
"""
framework: str = "torch"
def __init__(self, *args, **kwargs):
catalog_class = kwargs.pop("catalog_class", None)
if catalog_class is None:
catalog_class = TQCCatalog
super().__init__(*args, **kwargs, catalog_class=catalog_class)
@override(DefaultTQCRLModule)
def setup(self):
# Call parent setup to initialize TQC-specific parameters and build networks
super().setup()
# Convert lists to nn.ModuleList for proper PyTorch parameter tracking
if not self.inference_only or self.framework != "torch":
self.qf_encoders = nn.ModuleList(self.qf_encoders)
self.qf_heads = nn.ModuleList(self.qf_heads)
@override(DefaultTQCRLModule)
def make_target_networks(self):
"""Creates target networks for all quantile critics."""
self.target_qf_encoders = nn.ModuleList()
self.target_qf_heads = nn.ModuleList()
for i in range(self.n_critics):
target_encoder = make_target_network(self.qf_encoders[i])
target_head = make_target_network(self.qf_heads[i])
self.target_qf_encoders.append(target_encoder)
self.target_qf_heads.append(target_head)
@override(TorchRLModule)
def _forward_inference(self, batch: Dict[str, Any]) -> Dict[str, Any]:
"""Forward pass for inference (action selection).
Same as SAC - samples actions from the policy.
"""
output = {}
# Extract features from observations
pi_encoder_out = self.pi_encoder(batch)
pi_out = self.pi(pi_encoder_out[ENCODER_OUT])
output[Columns.ACTION_DIST_INPUTS] = pi_out
return output
@override(TorchRLModule)
def _forward_exploration(self, batch: Dict[str, Any], **kwargs) -> Dict[str, Any]:
"""Forward pass for exploration.
Same as inference for TQC (stochastic policy).
"""
return self._forward_inference(batch)
@override(TorchRLModule)
def _forward_train(self, batch: Dict[str, Any]) -> Dict[str, Any]:
"""Forward pass for training.
Computes:
- Action distribution inputs from current observations
- Q-values (quantiles) for current state-action pairs
- Q-values (quantiles) for next states with resampled actions
"""
output = {}
# Get action distribution inputs for current observations
pi_encoder_out = self.pi_encoder(batch)
pi_out = self.pi(pi_encoder_out[ENCODER_OUT])
output[Columns.ACTION_DIST_INPUTS] = pi_out
# Sample actions from current policy for current observations
action_dist_class = self.catalog.get_action_dist_cls(framework=self.framework)
action_dist_curr = action_dist_class.from_logits(pi_out)
actions_curr = action_dist_curr.rsample()
logp_curr = action_dist_curr.logp(actions_curr)
output["actions_curr"] = actions_curr
output["logp_curr"] = logp_curr
# Compute Q-values for actions from replay buffer
qf_out = self._qf_forward_all_critics(
batch[Columns.OBS],
batch[Columns.ACTIONS],
use_target=False,
)
output[QF_PREDS] = qf_out # (batch, n_critics, n_quantiles)
# Compute Q-values for resampled actions (for actor loss)
qf_curr = self._qf_forward_all_critics(
batch[Columns.OBS],
actions_curr,
use_target=False,
)
output["qf_curr"] = qf_curr
# For next state Q-values (target computation)
if Columns.NEXT_OBS in batch:
# Get action distribution for next observations
pi_encoder_out_next = self.pi_encoder(
{Columns.OBS: batch[Columns.NEXT_OBS]}
)
pi_out_next = self.pi(pi_encoder_out_next[ENCODER_OUT])
# Sample actions for next state
action_dist_next = action_dist_class.from_logits(pi_out_next)
actions_next = action_dist_next.rsample()
logp_next = action_dist_next.logp(actions_next)
output["actions_next"] = actions_next
output["logp_next"] = logp_next
# Compute target Q-values for next state
qf_target_next = self._qf_forward_all_critics(
batch[Columns.NEXT_OBS],
actions_next,
use_target=True,
)
output[QF_TARGET_NEXT] = qf_target_next
return output
def _qf_forward_all_critics(
self,
obs: torch.Tensor,
actions: torch.Tensor,
use_target: bool = False,
) -> torch.Tensor:
"""Forward pass through all critic networks.
Args:
obs: Observations tensor.
actions: Actions tensor.
use_target: Whether to use target networks.
Returns:
Stacked quantile values from all critics.
Shape: (batch_size, n_critics, n_quantiles)
"""
# Note: obs should already be a flat tensor at this point.
# Dict observations are handled by connectors (e.g., FlattenObservations)
# before reaching this method.
# Create batch dict for encoder input.
batch_dict = {Columns.OBS: obs, Columns.ACTIONS: actions}
encoders = self.target_qf_encoders if use_target else self.qf_encoders
heads = self.target_qf_heads if use_target else self.qf_heads
quantiles_list = []
for encoder, head in zip(encoders, heads):
encoder_out = encoder(batch_dict)
quantiles = head(encoder_out[ENCODER_OUT]) # (batch, n_quantiles)
quantiles_list.append(quantiles)
# Stack: (batch, n_critics, n_quantiles)
return torch.stack(quantiles_list, dim=1)
@override(DefaultTQCRLModule)
def compute_q_values(self, batch: Dict[str, Any]) -> Dict[str, Any]:
"""Computes Q-values (mean of quantiles) for the given batch.
Args:
batch: Dict containing observations and actions.
Returns:
Mean Q-value across all quantiles and critics.
"""
obs = batch[Columns.OBS]
actions = batch[Columns.ACTIONS]
# Get all quantiles from all critics
quantiles = self._qf_forward_all_critics(obs, actions, use_target=False)
# Return mean across all quantiles and critics
return quantiles.mean(dim=(1, 2))
@override(DefaultTQCRLModule)
def forward_target(self, batch: Dict[str, Any]) -> Dict[str, Any]:
"""Forward pass through target networks.
Args:
batch: Dict containing observations and actions.
Returns:
Target Q-values (mean of truncated quantiles).
"""
obs = batch[Columns.OBS]
actions = batch[Columns.ACTIONS]
# Get all quantiles from target critics
quantiles = self._qf_forward_all_critics(obs, actions, use_target=True)
# Flatten, sort, and truncate top quantiles
batch_size = quantiles.shape[0]
quantiles_flat = quantiles.reshape(batch_size, -1)
quantiles_sorted, _ = torch.sort(quantiles_flat, dim=1)
# Calculate number of quantiles to keep
n_target_quantiles = (
self.quantiles_total - self.top_quantiles_to_drop_per_net * self.n_critics
)
quantiles_truncated = quantiles_sorted[:, :n_target_quantiles]
# Return mean of truncated quantiles
return quantiles_truncated.mean(dim=1)
@staticmethod
def _get_catalog_class():
return TQCCatalog
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/algorithms/tqc/torch/default_tqc_torch_rl_module.py",
"license": "Apache License 2.0",
"lines": 183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/algorithms/tqc/torch/tqc_torch_learner.py | """
PyTorch implementation of the TQC Learner.
Implements the TQC loss computation with quantile Huber loss.
"""
from typing import Any, Dict
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
from ray.rllib.algorithms.sac.sac_learner import (
LOGPS_KEY,
QF_PREDS,
QF_TARGET_NEXT,
)
from ray.rllib.algorithms.sac.torch.sac_torch_learner import SACTorchLearner
from ray.rllib.algorithms.tqc.tqc import TQCConfig
from ray.rllib.algorithms.tqc.tqc_learner import (
QF_LOSS_KEY,
QF_MAX_KEY,
QF_MEAN_KEY,
QF_MIN_KEY,
TD_ERROR_MEAN_KEY,
TQCLearner,
)
from ray.rllib.core.columns import Columns
from ray.rllib.core.learner.learner import POLICY_LOSS_KEY
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.metrics import TD_ERROR_KEY
from ray.rllib.utils.typing import ModuleID, TensorType
torch, nn = try_import_torch()
def quantile_huber_loss_per_sample(
quantiles: torch.Tensor,
target_quantiles: torch.Tensor,
kappa: float = 1.0,
) -> torch.Tensor:
"""Computes the quantile Huber loss per sample (for importance sampling).
Args:
quantiles: Current quantile estimates. Shape: (batch, n_quantiles)
target_quantiles: Target quantile values. Shape: (batch, n_target_quantiles)
kappa: Huber loss threshold parameter.
Returns:
Per-sample quantile Huber loss. Shape: (batch,)
"""
n_quantiles = quantiles.shape[1]
# Compute cumulative probabilities for quantiles (tau values)
tau = (
torch.arange(n_quantiles, device=quantiles.device, dtype=quantiles.dtype) + 0.5
) / n_quantiles
# Expand dimensions for broadcasting
quantiles_expanded = quantiles.unsqueeze(2)
target_expanded = target_quantiles.unsqueeze(1)
# Compute pairwise TD errors: (batch, n_quantiles, n_target_quantiles)
td_error = target_expanded - quantiles_expanded
# Compute Huber loss element-wise using nn.HuberLoss
huber_loss_fn = nn.HuberLoss(reduction="none", delta=kappa)
huber_loss = huber_loss_fn(quantiles_expanded, target_expanded)
# Compute quantile weights
tau_expanded = tau.view(1, n_quantiles, 1)
quantile_weight = torch.abs(tau_expanded - (td_error < 0).float())
# Weighted Huber loss
quantile_huber = quantile_weight * huber_loss
# Sum over quantile dimensions, keep batch dimension
return quantile_huber.sum(dim=(1, 2))
class TQCTorchLearner(SACTorchLearner, TQCLearner):
"""PyTorch Learner for TQC algorithm.
Implements the TQC loss computation:
- Critic loss: Quantile Huber loss with truncated targets
- Actor loss: Maximize mean Q-value (from truncated quantiles)
- Alpha loss: Same as SAC (entropy regularization)
"""
@override(SACTorchLearner)
def build(self) -> None:
super().build()
self._temp_losses = {}
@override(SACTorchLearner)
def configure_optimizers_for_module(
self, module_id: ModuleID, config: AlgorithmConfig = None
) -> None:
"""Configures optimizers for TQC.
TQC has separate optimizers for:
- All critic networks (shared optimizer)
- Actor network
- Temperature (alpha) parameter
"""
module = self._module[module_id]
# Collect all critic parameters
critic_params = []
for encoder in module.qf_encoders:
critic_params.extend(self.get_parameters(encoder))
for head in module.qf_heads:
critic_params.extend(self.get_parameters(head))
optim_critic = torch.optim.Adam(critic_params, eps=1e-7)
self.register_optimizer(
module_id=module_id,
optimizer_name="qf",
optimizer=optim_critic,
params=critic_params,
lr_or_lr_schedule=config.critic_lr,
)
# Actor optimizer
params_actor = self.get_parameters(module.pi_encoder) + self.get_parameters(
module.pi
)
optim_actor = torch.optim.Adam(params_actor, eps=1e-7)
self.register_optimizer(
module_id=module_id,
optimizer_name="policy",
optimizer=optim_actor,
params=params_actor,
lr_or_lr_schedule=config.actor_lr,
)
# Temperature optimizer
temperature = self.curr_log_alpha[module_id]
optim_temperature = torch.optim.Adam([temperature], eps=1e-7)
self.register_optimizer(
module_id=module_id,
optimizer_name="alpha",
optimizer=optim_temperature,
params=[temperature],
lr_or_lr_schedule=config.alpha_lr,
)
@override(SACTorchLearner)
def compute_loss_for_module(
self,
*,
module_id: ModuleID,
config: TQCConfig,
batch: Dict[str, Any],
fwd_out: Dict[str, TensorType],
) -> TensorType:
"""Computes the TQC loss.
Args:
module_id: The module ID.
config: The TQC configuration.
batch: The training batch.
fwd_out: Forward pass outputs.
Returns:
Total loss (sum of critic, actor, and alpha losses).
"""
# Get current alpha (temperature parameter)
alpha = torch.exp(self.curr_log_alpha[module_id])
# Get TQC parameters
n_critics = config.n_critics
n_target_quantiles = self._get_n_target_quantiles(module_id)
batch_size = batch[Columns.OBS].shape[0]
# === Critic Loss ===
# Get current Q-value predictions (quantiles)
# Shape: (batch, n_critics, n_quantiles)
qf_preds = fwd_out[QF_PREDS]
# Get target Q-values for next state
# Shape: (batch, n_critics, n_quantiles)
qf_target_next = fwd_out[QF_TARGET_NEXT]
logp_next = fwd_out["logp_next"]
# Flatten and sort quantiles across all critics
# Shape: (batch, n_critics * n_quantiles)
qf_target_next_flat = qf_target_next.reshape(batch_size, -1)
# Sort and truncate top quantiles to control overestimation
qf_target_next_sorted, _ = torch.sort(qf_target_next_flat, dim=1)
qf_target_next_truncated = qf_target_next_sorted[:, :n_target_quantiles]
# Compute target with entropy bonus
# Shape: (batch, n_target_quantiles)
target_quantiles = (
qf_target_next_truncated - alpha.detach() * logp_next.unsqueeze(1)
)
# Compute TD targets
rewards = batch[Columns.REWARDS].unsqueeze(1)
terminateds = batch[Columns.TERMINATEDS].float().unsqueeze(1)
gamma = config.gamma
n_step = batch.get("n_step", torch.ones_like(batch[Columns.REWARDS]))
if isinstance(n_step, (int, float)):
n_step = torch.full_like(batch[Columns.REWARDS], n_step)
target_quantiles = (
rewards
+ (1.0 - terminateds) * (gamma ** n_step.unsqueeze(1)) * target_quantiles
).detach()
# Get importance sampling weights for prioritized replay
weights = batch.get("weights", torch.ones_like(batch[Columns.REWARDS]))
# Compute critic loss for each critic
critic_loss = torch.tensor(0.0, device=qf_preds.device)
for i in range(n_critics):
# Get quantiles for this critic: (batch, n_quantiles)
critic_quantiles = qf_preds[:, i, :]
# Compute per-sample quantile huber loss
critic_loss_per_sample = quantile_huber_loss_per_sample(
critic_quantiles,
target_quantiles,
)
# Apply importance sampling weights
critic_loss += torch.mean(weights * critic_loss_per_sample)
# === Actor Loss ===
# Get Q-values for resampled actions
qf_curr = fwd_out["qf_curr"] # (batch, n_critics, n_quantiles)
logp_curr = fwd_out["logp_curr"]
# Mean over all quantiles and critics
qf_curr_mean = qf_curr.mean(dim=(1, 2))
# Actor loss: maximize Q-value while maintaining entropy
actor_loss = (alpha.detach() * logp_curr - qf_curr_mean).mean()
# === Alpha Loss ===
alpha_loss = -torch.mean(
self.curr_log_alpha[module_id]
* (logp_curr.detach() + self.target_entropy[module_id])
)
# Total loss
total_loss = critic_loss + actor_loss + alpha_loss
# Compute TD error for prioritized replay
# Use mean across critics and quantiles
qf_preds_mean = qf_preds.mean(dim=(1, 2))
target_mean = target_quantiles.mean(dim=1)
td_error = torch.abs(qf_preds_mean - target_mean)
# Log metrics
self.metrics.log_value(
key=(module_id, TD_ERROR_KEY),
value=td_error,
reduce="item_series",
)
self.metrics.log_dict(
{
POLICY_LOSS_KEY: actor_loss,
QF_LOSS_KEY: critic_loss,
"alpha_loss": alpha_loss,
"alpha_value": alpha[0],
"log_alpha_value": torch.log(alpha)[0],
"target_entropy": self.target_entropy[module_id],
LOGPS_KEY: torch.mean(logp_curr),
QF_MEAN_KEY: torch.mean(qf_preds),
QF_MAX_KEY: torch.max(qf_preds),
QF_MIN_KEY: torch.min(qf_preds),
TD_ERROR_MEAN_KEY: torch.mean(td_error),
},
key=module_id,
window=1,
)
# Store losses for gradient computation
self._temp_losses[(module_id, POLICY_LOSS_KEY)] = actor_loss
self._temp_losses[(module_id, QF_LOSS_KEY)] = critic_loss
self._temp_losses[(module_id, "alpha_loss")] = alpha_loss
return total_loss
# Note: compute_gradients is inherited from SACTorchLearner
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/algorithms/tqc/torch/tqc_torch_learner.py",
"license": "Apache License 2.0",
"lines": 236,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/algorithms/tqc/tqc.py | """
TQC (Truncated Quantile Critics) Algorithm.
Paper: https://arxiv.org/abs/2005.04269
"Controlling Overestimation Bias with Truncated Mixture of Continuous
Distributional Quantile Critics"
TQC extends SAC by using distributional RL with quantile regression to
control overestimation bias in the Q-function.
"""
import logging
from typing import Optional, Type, Union
from ray.rllib.algorithms.algorithm import Algorithm
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig, NotProvided
from ray.rllib.algorithms.sac.sac import SAC, SACConfig
from ray.rllib.core.learner import Learner
from ray.rllib.core.rl_module.rl_module import RLModuleSpec
from ray.rllib.utils.annotations import override
from ray.rllib.utils.typing import RLModuleSpecType
logger = logging.getLogger(__name__)
class TQCConfig(SACConfig):
"""Configuration for the TQC algorithm.
TQC extends SAC with distributional critics using quantile regression.
Example:
>>> from ray.rllib.algorithms.tqc import TQCConfig
>>> config = (
... TQCConfig()
... .environment("Pendulum-v1")
... .training(
... n_quantiles=25,
... n_critics=2,
... top_quantiles_to_drop_per_net=2,
... )
... )
>>> algo = config.build()
"""
def __init__(self, algo_class=None):
"""Initializes a TQCConfig instance."""
super().__init__(algo_class=algo_class or TQC)
# TQC-specific parameters
self.n_quantiles = 25
self.n_critics = 2
self.top_quantiles_to_drop_per_net = 2
@override(SACConfig)
def training(
self,
*,
n_quantiles: Optional[int] = NotProvided,
n_critics: Optional[int] = NotProvided,
top_quantiles_to_drop_per_net: Optional[int] = NotProvided,
**kwargs,
):
"""Sets the training-related configuration.
Args:
n_quantiles: Number of quantiles for each critic network.
Default is 25.
n_critics: Number of critic networks. Default is 2.
top_quantiles_to_drop_per_net: Number of quantiles to drop per
network when computing the target Q-value. This controls
the overestimation bias. Default is 2.
**kwargs: Additional arguments passed to SACConfig.training().
Returns:
This updated TQCConfig object.
"""
super().training(**kwargs)
if n_quantiles is not NotProvided:
self.n_quantiles = n_quantiles
if n_critics is not NotProvided:
self.n_critics = n_critics
if top_quantiles_to_drop_per_net is not NotProvided:
self.top_quantiles_to_drop_per_net = top_quantiles_to_drop_per_net
return self
@override(AlgorithmConfig)
def validate(self) -> None:
"""Validates the TQC configuration."""
super().validate()
# Validate TQC-specific parameters
if self.n_quantiles < 1:
raise ValueError(f"`n_quantiles` must be >= 1, got {self.n_quantiles}")
if self.n_critics < 1:
raise ValueError(f"`n_critics` must be >= 1, got {self.n_critics}")
# Ensure top_quantiles_to_drop_per_net is non-negative
if self.top_quantiles_to_drop_per_net < 0:
raise ValueError(
f"`top_quantiles_to_drop_per_net` must be >= 0, got "
f"{self.top_quantiles_to_drop_per_net}"
)
# Ensure we don't drop more quantiles than we have
total_quantiles = self.n_quantiles * self.n_critics
quantiles_to_drop = self.top_quantiles_to_drop_per_net * self.n_critics
if quantiles_to_drop >= total_quantiles:
raise ValueError(
f"Cannot drop {quantiles_to_drop} quantiles when only "
f"{total_quantiles} total quantiles are available. "
f"Reduce `top_quantiles_to_drop_per_net` or increase "
f"`n_quantiles` or `n_critics`."
)
@override(AlgorithmConfig)
def get_default_rl_module_spec(self) -> RLModuleSpecType:
if self.framework_str == "torch":
from ray.rllib.algorithms.tqc.torch.default_tqc_torch_rl_module import (
DefaultTQCTorchRLModule,
)
return RLModuleSpec(module_class=DefaultTQCTorchRLModule)
else:
raise ValueError(
f"The framework {self.framework_str} is not supported. Use `torch`."
)
@override(AlgorithmConfig)
def get_default_learner_class(self) -> Union[Type["Learner"], str]:
if self.framework_str == "torch":
from ray.rllib.algorithms.tqc.torch.tqc_torch_learner import (
TQCTorchLearner,
)
return TQCTorchLearner
else:
raise ValueError(
f"The framework {self.framework_str} is not supported. Use `torch`."
)
@property
@override(AlgorithmConfig)
def _model_config_auto_includes(self):
return super()._model_config_auto_includes | {
"n_quantiles": self.n_quantiles,
"n_critics": self.n_critics,
"top_quantiles_to_drop_per_net": self.top_quantiles_to_drop_per_net,
}
class TQC(SAC):
"""TQC (Truncated Quantile Critics) Algorithm.
TQC extends SAC by using distributional critics with quantile regression
and truncating the top quantiles to control overestimation bias.
Key differences from SAC:
- Uses multiple critic networks, each outputting multiple quantiles
- Computes target Q-values by sorting and truncating top quantiles
- Uses quantile Huber loss for critic training
See the paper for more details:
https://arxiv.org/abs/2005.04269
"""
@classmethod
@override(Algorithm)
def get_default_config(cls) -> TQCConfig:
return TQCConfig()
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/algorithms/tqc/tqc.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/algorithms/tqc/tqc_catalog.py | """
TQC Catalog for building TQC-specific models.
TQC uses multiple quantile critics, each outputting n_quantiles values.
"""
import gymnasium as gym
from ray.rllib.algorithms.sac.sac_catalog import SACCatalog
from ray.rllib.core.models.configs import MLPHeadConfig
class TQCCatalog(SACCatalog):
"""Catalog class for building TQC models.
TQC extends SAC by using distributional critics with quantile regression.
Each critic outputs `n_quantiles` values instead of a single Q-value.
The catalog builds:
- Pi Encoder: Same as SAC (encodes observations for the actor)
- Pi Head: Same as SAC (outputs mean and log_std for Squashed Gaussian)
- QF Encoders: Multiple encoders for quantile critics
- QF Heads: Multiple heads, each outputting n_quantiles values
"""
def __init__(
self,
observation_space: gym.Space,
action_space: gym.Space,
model_config_dict: dict,
view_requirements: dict = None,
):
"""Initializes the TQCCatalog.
Args:
observation_space: The observation space of the environment.
action_space: The action space of the environment.
model_config_dict: The model config dictionary containing
TQC-specific parameters like n_quantiles and n_critics.
view_requirements: Not used, kept for API compatibility.
"""
# Extract TQC-specific parameters before calling super().__init__
self.n_quantiles = model_config_dict.get("n_quantiles", 25)
self.n_critics = model_config_dict.get("n_critics", 2)
super().__init__(
observation_space=observation_space,
action_space=action_space,
model_config_dict=model_config_dict,
view_requirements=view_requirements,
)
# Override the QF head config to output n_quantiles instead of 1
# For TQC, we always output n_quantiles (continuous action space)
self.qf_head_config = MLPHeadConfig(
input_dims=self.latent_dims,
hidden_layer_dims=self.pi_and_qf_head_hiddens,
hidden_layer_activation=self.pi_and_qf_head_activation,
output_layer_activation="linear",
output_layer_dim=self.n_quantiles,
)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/algorithms/tqc/tqc_catalog.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:rllib/algorithms/tqc/tqc_learner.py | """
TQC Learner base class.
Extends SAC Learner with quantile-specific loss computation.
"""
from ray.rllib.algorithms.sac.sac_learner import SACLearner
from ray.rllib.core.learner.learner import Learner
from ray.rllib.utils.annotations import override
from ray.rllib.utils.typing import ModuleID
# Loss keys for TQC
QF_LOSS_KEY = "qf_loss"
QF_MEAN_KEY = "qf_mean"
QF_MAX_KEY = "qf_max"
QF_MIN_KEY = "qf_min"
QUANTILES_KEY = "quantiles"
TD_ERROR_MEAN_KEY = "td_error_mean"
class TQCLearner(SACLearner):
"""Base Learner class for TQC algorithm.
TQC extends SAC with distributional critics using quantile regression.
The main differences are:
- Uses quantile Huber loss instead of standard Huber/MSE loss
- Computes target Q-values by sorting and truncating top quantiles
"""
@override(Learner)
def build(self) -> None:
"""Builds the TQC learner."""
# Call parent build (handles alpha/entropy coefficient)
super().build()
def _get_n_target_quantiles(self, module_id: ModuleID) -> int:
"""Returns the number of target quantiles after truncation.
Args:
module_id: The module ID.
Returns:
Number of quantiles to use for target computation.
"""
config = self.config.get_config_for_module(module_id)
n_quantiles = config.n_quantiles
n_critics = config.n_critics
top_quantiles_to_drop = config.top_quantiles_to_drop_per_net
total_quantiles = n_quantiles * n_critics
quantiles_to_drop = top_quantiles_to_drop * n_critics
return total_quantiles - quantiles_to_drop
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/algorithms/tqc/tqc_learner.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:rllib/examples/algorithms/tqc/humanoid_tqc.py | """Example showing how to train TQC on the Humanoid-v4 MuJoCo environment.
TQC (Truncated Quantile Critics) is an extension of SAC that uses distributional
critics with quantile regression. By truncating the upper quantiles when computing
target values, TQC reduces overestimation bias that can plague actor-critic methods,
leading to more stable and efficient learning on complex continuous control tasks.
This example:
- Trains on Humanoid-v4, a challenging 17-DoF locomotion task
- Uses truncated quantile critics with 25 quantiles and 2 critics
- Drops the top 2 quantiles per network to reduce overestimation bias
- Employs prioritized experience replay with capacity of 1M transitions
- Uses a large network architecture (1024x1024) suitable for high-dimensional control
- Applies mixed n-step returns (1 to 3 steps) for variance reduction
- Expects to achieve episode returns >12000 with sufficient training
How to run this script
----------------------
`python humanoid_tqc.py --num-env-runners=4`
For faster training, use GPU acceleration and more parallelism:
`python humanoid_tqc.py --num-learners=1 --num-gpus-per-learner=1 --num-env-runners=8`
To scale up with distributed learning using multiple learners and env-runners:
`python humanoid_tqc.py --num-learners=2 --num-env-runners=16`
For debugging, use the following additional command line options
`--no-tune --num-env-runners=0`
which should allow you to set breakpoints anywhere in the RLlib code and
have the execution stop there for inspection and debugging.
For logging to your WandB account, use:
`--wandb-key=[your WandB API key] --wandb-project=[some project name]
--wandb-run-name=[optional: WandB run name (within the defined project)]`
Results to expect
-----------------
On a single-GPU machine with --num-gpus-per-learner=1, this example should learn
an episode return of >1000 within approximately 10 hours. With more hyperparameter
tuning, longer runs, and additional scale, returns of >12000 are achievable.
"""
from torch import nn
from ray.rllib.algorithms.tqc.tqc import TQCConfig
from ray.rllib.core.rl_module.default_model_config import DefaultModelConfig
from ray.rllib.examples.utils import (
add_rllib_example_script_args,
run_rllib_example_script_experiment,
)
parser = add_rllib_example_script_args(
default_timesteps=1_000_000,
default_reward=12_000.0,
default_iters=2_000,
)
parser.set_defaults(
num_env_runners=4,
num_envs_per_env_runner=8,
num_learners=1,
)
# Use `parser` to add your own custom command line options to this script
# and (if needed) use their values to set up `config` below.
args = parser.parse_args()
config = (
TQCConfig()
.environment("Humanoid-v4")
.env_runners(
num_env_runners=args.num_env_runners,
num_envs_per_env_runner=args.num_envs_per_env_runner,
)
.learners(
num_learners=args.num_learners,
num_gpus_per_learner=1,
num_aggregator_actors_per_learner=2,
)
.training(
initial_alpha=1.001,
actor_lr=0.00005,
critic_lr=0.00005,
alpha_lr=0.00005,
target_entropy="auto",
n_step=(1, 3),
tau=0.005,
train_batch_size_per_learner=256,
target_network_update_freq=1,
# TQC-specific parameters
n_quantiles=25,
n_critics=2,
top_quantiles_to_drop_per_net=2,
replay_buffer_config={
"type": "PrioritizedEpisodeReplayBuffer",
"capacity": 1000000,
"alpha": 0.6,
"beta": 0.4,
},
num_steps_sampled_before_learning_starts=10000,
)
.rl_module(
model_config=DefaultModelConfig(
fcnet_hiddens=[1024, 1024],
fcnet_activation="relu",
fcnet_kernel_initializer=nn.init.xavier_uniform_,
head_fcnet_hiddens=[],
head_fcnet_activation=None,
head_fcnet_kernel_initializer="orthogonal_",
head_fcnet_kernel_initializer_kwargs={"gain": 0.01},
fusionnet_hiddens=[256, 256, 256],
fusionnet_activation="relu",
)
)
.reporting(
metrics_num_episodes_for_smoothing=5,
min_sample_timesteps_per_iteration=1000,
)
)
if __name__ == "__main__":
run_rllib_example_script_experiment(config, args)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/algorithms/tqc/humanoid_tqc.py",
"license": "Apache License 2.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:rllib/examples/algorithms/tqc/pendulum_tqc.py | """Example showing how to train TQC on the Pendulum-v1 classic control environment.
TQC (Truncated Quantile Critics) is an extension of SAC that uses distributional
critics with quantile regression to reduce overestimation bias. This example
demonstrates TQC on a simple continuous control task suitable for quick experiments.
This example:
- Trains on Pendulum-v1, a classic swing-up control task with continuous actions
- Uses truncated quantile critics with 25 quantiles and 2 critics
- Drops the top 2 quantiles per network to reduce overestimation bias
- Employs prioritized experience replay with 100K capacity
- Scales learning rates based on the number of learners for distributed training
- Uses mixed n-step returns (2 to 5 steps) for improved sample efficiency
- Expects to achieve episode returns of approximately -250 within 20K timesteps
How to run this script
----------------------
`python pendulum_tqc.py`
To run with different configuration:
`python pendulum_tqc.py --num-env-runners=2`
To scale up with distributed learning using multiple learners and env-runners:
`python pendulum_tqc.py --num-learners=2 --num-env-runners=8`
To use a GPU-based learner add the number of GPUs per learners:
`python pendulum_tqc.py --num-learners=1 --num-gpus-per-learner=1`
For debugging, use the following additional command line options
`--no-tune --num-env-runners=0`
which should allow you to set breakpoints anywhere in the RLlib code and
have the execution stop there for inspection and debugging.
For logging to your WandB account, use:
`--wandb-key=[your WandB API key] --wandb-project=[some project name]
--wandb-run-name=[optional: WandB run name (within the defined project)]`
Results to expect
-----------------
With default settings, this example should achieve an episode return of around -250
within 20,000 timesteps. The Pendulum environment has a maximum possible return of 0
(perfect balancing), with typical good performance in the -200 to -300 range.
"""
from torch import nn
from ray.rllib.algorithms.tqc.tqc import TQCConfig
from ray.rllib.core.rl_module.default_model_config import DefaultModelConfig
from ray.rllib.examples.utils import (
add_rllib_example_script_args,
run_rllib_example_script_experiment,
)
parser = add_rllib_example_script_args(
default_timesteps=20000,
default_reward=-250.0,
)
parser.set_defaults(
num_env_runners=4,
num_envs_per_env_runner=8,
num_learners=1,
)
# Use `parser` to add your own custom command line options to this script
# and (if needed) use their values to set up `config` below.
args = parser.parse_args()
config = (
TQCConfig()
.environment("Pendulum-v1")
.env_runners(
num_env_runners=args.num_env_runners,
num_envs_per_env_runner=args.num_envs_per_env_runner,
)
.learners(
num_learners=args.num_learners,
num_gpus_per_learner=1,
num_aggregator_actors_per_learner=2,
)
.training(
initial_alpha=1.001,
# Use a smaller learning rate for the policy.
actor_lr=2e-4 * (args.num_learners or 1) ** 0.5,
critic_lr=8e-4 * (args.num_learners or 1) ** 0.5,
alpha_lr=9e-4 * (args.num_learners or 1) ** 0.5,
lr=None,
target_entropy="auto",
n_step=(2, 5),
tau=0.005,
train_batch_size_per_learner=256,
target_network_update_freq=1,
# TQC-specific parameters
n_quantiles=25,
n_critics=2,
top_quantiles_to_drop_per_net=2,
replay_buffer_config={
"type": "PrioritizedEpisodeReplayBuffer",
"capacity": 100000,
"alpha": 1.0,
"beta": 0.0,
},
num_steps_sampled_before_learning_starts=256 * (args.num_learners or 1),
)
.rl_module(
model_config=DefaultModelConfig(
fcnet_hiddens=[256, 256],
fcnet_activation="relu",
fcnet_kernel_initializer=nn.init.xavier_uniform_,
head_fcnet_hiddens=[],
head_fcnet_activation=None,
head_fcnet_kernel_initializer="orthogonal_",
head_fcnet_kernel_initializer_kwargs={"gain": 0.01},
fusionnet_hiddens=[256, 256, 256],
fusionnet_activation="relu",
),
)
.reporting(
metrics_num_episodes_for_smoothing=5,
)
)
if __name__ == "__main__":
run_rllib_example_script_experiment(config, args)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/algorithms/tqc/pendulum_tqc.py",
"license": "Apache License 2.0",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:release/ray_release/byod/build_context.py | import hashlib
import json
import os
import shutil
from typing import Dict, List, Optional
from typing_extensions import TypedDict
_INSTALL_PYTHON_DEPS_SCRIPT = """\
#!/bin/bash
set -euo pipefail
LOCK_FILE="${1:-python_depset.lock}"
if [[ ! -f "${LOCK_FILE}" ]]; then
echo "Lock file ${LOCK_FILE} does not exist" >/dev/stderr
exit 1
fi
uv pip install --system --no-deps --index-strategy unsafe-best-match \\
-r "${LOCK_FILE}"
"""
class BuildContext(TypedDict, total=False):
"""
Build context for custom BYOD image builds.
Attributes:
envs: Environment variables to set in the image.
post_build_script: Filename of the post-build script.
post_build_script_digest: SHA256 digest of the post-build script.
python_depset: Filename of the Python dependencies lock file.
python_depset_digest: SHA256 digest of the Python dependencies lock file.
install_python_deps_script_digest: SHA256 digest of the install script.
"""
envs: Dict[str, str]
post_build_script: str
post_build_script_digest: str
python_depset: str
python_depset_digest: str
install_python_deps_script_digest: str
def make_build_context(
base_dir: str,
envs: Optional[Dict[str, str]] = None,
post_build_script: Optional[str] = None,
python_depset: Optional[str] = None,
) -> BuildContext:
"""
Create a BuildContext with computed file digests.
Args:
base_dir: Directory containing the source files.
envs: Environment variables to set in the image.
post_build_script: Filename of the post-build script.
python_depset: Filename of the Python dependencies lock file.
Returns:
A BuildContext with filenames and their SHA256 digests.
"""
ctx: BuildContext = {}
if envs:
ctx["envs"] = envs
if post_build_script:
ctx["post_build_script"] = post_build_script
path = os.path.join(base_dir, post_build_script)
ctx["post_build_script_digest"] = _sha256_file(path)
if python_depset:
ctx["python_depset"] = python_depset
path = os.path.join(base_dir, python_depset)
ctx["python_depset_digest"] = _sha256_file(path)
ctx["install_python_deps_script_digest"] = _sha256_str(
_INSTALL_PYTHON_DEPS_SCRIPT
)
return ctx
def encode_build_context(ctx: BuildContext) -> str:
"""Encode a BuildContext to deterministic minified JSON."""
return json.dumps(ctx, sort_keys=True, separators=(",", ":"))
def decode_build_context(data: str) -> BuildContext:
"""Decode a JSON string to a BuildContext."""
return json.loads(data)
def build_context_digest(ctx: BuildContext) -> str:
"""Compute SHA256 digest of the encoded BuildContext."""
encoded = encode_build_context(ctx)
digest = hashlib.sha256(encoded.encode()).hexdigest()
return f"sha256:{digest}"
def fill_build_context_dir(
ctx: BuildContext,
source_dir: str,
context_dir: str,
) -> None:
"""
Generate Dockerfile and copy source files to the build directory.
Args:
ctx: The BuildContext specifying what to include.
source_dir: Source directory containing the original files.
context_dir: Target directory for the generated Dockerfile and copied files.
"""
dockerfile: List[str] = ["# syntax=docker/dockerfile:1.3-labs"]
dockerfile.append("ARG BASE_IMAGE")
dockerfile.append("FROM ${BASE_IMAGE}")
if "envs" in ctx and ctx["envs"]:
dockerfile.append("ENV \\")
env_lines = [f" {k}={v}" for k, v in sorted(ctx["envs"].items())]
dockerfile.append(" \\\n".join(env_lines))
if "python_depset" in ctx:
shutil.copy(
os.path.join(source_dir, ctx["python_depset"]),
os.path.join(context_dir, "python_depset.lock"),
)
with open(os.path.join(context_dir, "install_python_deps.sh"), "w") as f:
f.write(_INSTALL_PYTHON_DEPS_SCRIPT)
dockerfile.append("COPY install_python_deps.sh /tmp/install_python_deps.sh")
dockerfile.append("COPY python_depset.lock python_depset.lock")
dockerfile.append("RUN bash /tmp/install_python_deps.sh python_depset.lock")
if "post_build_script" in ctx:
shutil.copy(
os.path.join(source_dir, ctx["post_build_script"]),
os.path.join(context_dir, "post_build_script.sh"),
)
dockerfile.append("COPY post_build_script.sh /tmp/post_build_script.sh")
dockerfile.append("RUN bash /tmp/post_build_script.sh")
dockerfile_path = os.path.join(context_dir, "Dockerfile")
with open(dockerfile_path, "w") as f:
f.write("\n".join(dockerfile) + "\n")
def _sha256_file(path: str) -> str:
with open(path, "rb") as f:
digest = hashlib.sha256(f.read()).hexdigest()
return f"sha256:{digest}"
def _sha256_str(content: str) -> str:
digest = hashlib.sha256(content.encode()).hexdigest()
return f"sha256:{digest}"
| {
"repo_id": "ray-project/ray",
"file_path": "release/ray_release/byod/build_context.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:release/ray_release/tests/test_byod_build_context.py | import os
import sys
import tempfile
import pytest
from ray_release.byod.build_context import (
_INSTALL_PYTHON_DEPS_SCRIPT,
build_context_digest,
decode_build_context,
encode_build_context,
fill_build_context_dir,
make_build_context,
)
def test_make_build_context() -> None:
with tempfile.TemporaryDirectory() as tmpdir:
script_path = os.path.join(tmpdir, "post_build.sh")
with open(script_path, "w") as f:
f.write("echo hello")
depset_path = os.path.join(tmpdir, "deps.lock")
with open(depset_path, "w") as f:
f.write("numpy==1.0.0")
ctx = make_build_context(
base_dir=tmpdir,
envs={"FOO": "bar"},
post_build_script="post_build.sh",
python_depset="deps.lock",
)
assert ctx == {
"envs": {"FOO": "bar"},
"post_build_script": "post_build.sh",
"post_build_script_digest": ctx["post_build_script_digest"],
"python_depset": "deps.lock",
"python_depset_digest": ctx["python_depset_digest"],
"install_python_deps_script_digest": ctx[
"install_python_deps_script_digest"
],
}
assert ctx["post_build_script_digest"].startswith("sha256:")
assert ctx["python_depset_digest"].startswith("sha256:")
assert ctx["install_python_deps_script_digest"].startswith("sha256:")
def test_make_build_context_partial() -> None:
with tempfile.TemporaryDirectory() as tmpdir:
script_path = os.path.join(tmpdir, "post_build.sh")
with open(script_path, "w") as f:
f.write("echo hello")
ctx = make_build_context(
base_dir=tmpdir,
post_build_script="post_build.sh",
)
assert ctx == {
"post_build_script": "post_build.sh",
"post_build_script_digest": ctx["post_build_script_digest"],
}
assert ctx["post_build_script_digest"].startswith("sha256:")
def test_make_build_context_empty() -> None:
with tempfile.TemporaryDirectory() as tmpdir:
ctx = make_build_context(base_dir=tmpdir)
assert ctx == {}
def test_encode_build_context() -> None:
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, "build.sh"), "w") as f:
f.write("echo hello")
ctx = make_build_context(
base_dir=tmpdir,
envs={"ZZZ": "last", "AAA": "first"},
post_build_script="build.sh",
)
encoded = encode_build_context(ctx)
# Verify minified (no spaces)
assert " " not in encoded
# Verify deterministic via digest - same content in different order produces same digest
ctx_reordered = {
"post_build_script_digest": ctx["post_build_script_digest"],
"post_build_script": "build.sh",
"envs": {"AAA": "first", "ZZZ": "last"},
}
assert build_context_digest(ctx) == build_context_digest(ctx_reordered)
def test_decode_build_context() -> None:
data = '{"envs":{"FOO":"bar"},"post_build_script":"build.sh"}'
ctx = decode_build_context(data)
assert ctx["envs"] == {"FOO": "bar"}
assert ctx["post_build_script"] == "build.sh"
def test_encode_decode_roundtrip() -> None:
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, "script.sh"), "w") as f:
f.write("echo hello")
with open(os.path.join(tmpdir, "deps.lock"), "w") as f:
f.write("numpy==1.0.0")
ctx = make_build_context(
base_dir=tmpdir,
envs={"KEY": "value"},
post_build_script="script.sh",
python_depset="deps.lock",
)
encoded = encode_build_context(ctx)
decoded = decode_build_context(encoded)
assert decoded == ctx
def test_build_context_digest() -> None:
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, "build.sh"), "w") as f:
f.write("echo hello")
with open(os.path.join(tmpdir, "build2.sh"), "w") as f:
f.write("echo world")
ctx1 = make_build_context(
base_dir=tmpdir,
post_build_script="build.sh",
)
ctx2 = make_build_context(
base_dir=tmpdir,
post_build_script="build2.sh",
)
ctx3 = make_build_context(
base_dir=tmpdir,
envs={"FOO": "bar"},
post_build_script="build.sh",
)
digest1 = build_context_digest(ctx1)
digest2 = build_context_digest(ctx2)
digest3 = build_context_digest(ctx3)
# Different file contents produce different digests
assert digest1 != digest2
# Different envs produce different digests
assert digest1 != digest3
# All three are different
assert len({digest1, digest2, digest3}) == 3
def test_fill_build_context_dir_empty() -> None:
with (
tempfile.TemporaryDirectory() as source_dir,
tempfile.TemporaryDirectory() as build_dir,
):
ctx = make_build_context(base_dir=source_dir)
fill_build_context_dir(ctx, source_dir, build_dir)
# Check Dockerfile
with open(os.path.join(build_dir, "Dockerfile")) as f:
dockerfile = f.read()
expected_dockerfile = "\n".join(
[
"# syntax=docker/dockerfile:1.3-labs",
"ARG BASE_IMAGE",
"FROM ${BASE_IMAGE}",
"",
]
)
assert dockerfile == expected_dockerfile
# Check no other files were created
assert os.listdir(build_dir) == ["Dockerfile"]
def test_fill_build_context_dir() -> None:
with (
tempfile.TemporaryDirectory() as source_dir,
tempfile.TemporaryDirectory() as build_dir,
):
# Create input files
with open(os.path.join(source_dir, "post_build.sh"), "w") as f:
f.write("#!/bin/bash\necho hello")
with open(os.path.join(source_dir, "deps.lock"), "w") as f:
f.write("numpy==1.0.0\npandas==2.0.0")
ctx = make_build_context(
base_dir=source_dir,
envs={"FOO": "bar", "BAZ": "qux"},
post_build_script="post_build.sh",
python_depset="deps.lock",
)
fill_build_context_dir(ctx, source_dir, build_dir)
# Check Dockerfile
with open(os.path.join(build_dir, "Dockerfile")) as f:
dockerfile = f.read()
expected_dockerfile = "\n".join(
[
"# syntax=docker/dockerfile:1.3-labs",
"ARG BASE_IMAGE",
"FROM ${BASE_IMAGE}",
"ENV \\",
" BAZ=qux \\",
" FOO=bar",
"COPY install_python_deps.sh /tmp/install_python_deps.sh",
"COPY python_depset.lock python_depset.lock",
"RUN bash /tmp/install_python_deps.sh python_depset.lock",
"COPY post_build_script.sh /tmp/post_build_script.sh",
"RUN bash /tmp/post_build_script.sh",
"",
]
)
assert dockerfile == expected_dockerfile
# Check copied files
with open(os.path.join(build_dir, "post_build_script.sh")) as f:
assert f.read() == "#!/bin/bash\necho hello"
with open(os.path.join(build_dir, "python_depset.lock")) as f:
assert f.read() == "numpy==1.0.0\npandas==2.0.0"
with open(os.path.join(build_dir, "install_python_deps.sh")) as f:
assert f.read() == _INSTALL_PYTHON_DEPS_SCRIPT
if __name__ == "__main__":
sys.exit(pytest.main(["-vv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "release/ray_release/tests/test_byod_build_context.py",
"license": "Apache License 2.0",
"lines": 191,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:rllib/offline/tests/test_offline_rl_stateful.py | import unittest
from pathlib import Path
import numpy as np
import ray
from ray.rllib.algorithms.bc import BCConfig
from ray.rllib.core.columns import Columns
from ray.rllib.core.learner.training_data import TrainingData
from ray.rllib.core.rl_module.default_model_config import DefaultModelConfig
from ray.rllib.env import INPUT_ENV_SPACES
from ray.rllib.env.single_agent_episode import SingleAgentEpisode
from ray.rllib.examples.envs.classes.stateless_cartpole import StatelessCartPole
from ray.rllib.offline.offline_prelearner import OfflinePreLearner
from ray.rllib.policy.sample_batch import MultiAgentBatch, SampleBatch
from ray.rllib.utils import unflatten_dict
class OfflineRLStatefulTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init()
@classmethod
def tearDownClass(cls):
ray.shutdown()
def setUp(self):
# Define the path to the offline data.
offline_data_path = Path(__file__).parent / "data/statelesscartpole"
# Define the BC config.
self.config = (
BCConfig()
.environment(StatelessCartPole)
# Note, the `input_` argument is the major argument for the
# new offline API. Via the `input_read_method_kwargs` the
# arguments for the `ray.data.Dataset` read method can be
# configured. The read method needs at least as many blocks
# as remote learners.
.offline_data(
input_=[
offline_data_path.as_posix(),
# "s3://anonymous@ray-example-data/rllib/offline-data/statelesscartpole"
],
input_read_episodes=True,
input_read_batch_size=1,
# Concurrency defines the number of processes that run the
# `map_batches` transformations. This should be aligned with the
# 'prefetch_batches' argument in 'iter_batches_kwargs'.
map_batches_kwargs={"concurrency": 2, "num_cpus": 1},
# Default for this test: materialize both data and mapped data.
materialize_data=True,
materialize_mapped_data=True,
# This data set is small so do not prefetch too many batches and use no
# local shuffle.
iter_batches_kwargs={"prefetch_batches": 1},
# The number of iterations to be run per learner when in multi-learner
# mode in a single RLlib training iteration. Leave this to `None` to
# run an entire epoch on the dataset during a single RLlib training
# iteration.
dataset_num_iters_per_learner=5,
)
.training(
train_batch_size_per_learner=256,
lr=0.0008,
)
.rl_module(
model_config=DefaultModelConfig(
max_seq_len=20,
use_lstm=True,
),
)
.evaluation(
evaluation_interval=1,
evaluation_num_env_runners=1,
evaluation_duration=5,
evaluation_duration_unit="episodes",
evaluation_parallel_to_training=False,
)
)
# Build the algorithm.
self.algo = self.config.build()
def tearDown(self):
self.algo.stop()
def test_training_on_single_episode_and_evaluate(self):
"""Trains on a single episode from the recorded dataset and evaluates.
Uses a zero initial state for training (from `RLModule`).
"""
# Load these packages inline.
import msgpack
import msgpack_numpy as mnp
# Load the dataset.
ds = self.algo.offline_data.data
# Take a single-row batch (one episode).
batch = ds.take_batch(1)
# Read the episodes and decode them.
episodes = [
SingleAgentEpisode.from_state(
msgpack.unpackb(state, object_hook=mnp.decode)
)
for state in batch["item"]
][:1]
# Get the episode return.
# Note: dataset contains only best 5 episodes recorded:
# [SAEps(len=1596 done=True R=1596.0 id_=79f4875d8d814f50b88077073fb5101c),
# SAEps(len=1463 done=True R=1463.0 id_=6b87782a7d9c4eb79f98defb68fb198e),
# SAEps(len=1421 done=True R=1421.0 id_=32275111853d4f9ebd1215694498132a),
# SAEps(len=1418 done=True R=1418.0 id_=7cae30fc42244e01aad4afd3e84c0fb4),
# SAEps(len=1349 done=True R=1349.0 id_=0aa924b047494c83a5e63e67f3d180c9)]
episode_return = episodes[0].get_return()
print(f"episodes[0].id_: {episodes[0].id_}")
print(f"Found episode with return {episode_return}")
# Assert the episode has a decent return.
assert episodes[0].get_return() > 350.0, "Return must be >350.0"
# Remove recorded states.
if Columns.STATE_OUT in episodes[0].extra_model_outputs.keys():
del episodes[0].extra_model_outputs[Columns.STATE_OUT]
if Columns.STATE_IN in episodes[0].extra_model_outputs.keys():
del episodes[0].extra_model_outputs[Columns.STATE_IN]
# Build the learner connector.
obs_space, action_space = self.algo.offline_data.spaces[INPUT_ENV_SPACES]
learner_connector = self.algo.config.build_learner_connector(
input_observation_space=obs_space,
input_action_space=action_space,
)
# Run the learner connector on the episode.
processed_batch = learner_connector(
rl_module=self.algo.learner_group._learner.module,
batch={},
episodes=episodes,
shared_data={},
# TODO (simon): Add MetricsLogger to non-Learner components that have a
# LearnerConnector pipeline.
metrics=None,
)
# Create a MA batch from the processed batch and a TrainingData object.
ma_batch = MultiAgentBatch(
policy_batches={
"default_policy": SampleBatch(processed_batch["default_policy"])
},
env_steps=np.prod(processed_batch["default_policy"]["obs"].shape[:-1]),
)
training_data = TrainingData(batch=ma_batch)
# Overfit on this single episode.
i = 0
while True:
i += 1
learner_results = self.algo.learner_group.update(
training_data=training_data,
minibatch_size=ma_batch["default_policy"].count,
num_iters=self.algo.config.dataset_num_iters_per_learner,
**self.algo.offline_data.iter_batches_kwargs,
)
if i % 10 == 0:
loss = learner_results[0]["default_policy"]["policy_loss"].peek()
print(f"Iteration {i}: policy_loss: {loss}")
if np.isclose(loss, 1e-4, atol=1e-5) or i >= 100:
break
# Evaluation
# Get the latest RLModule state from the learner and synchronize
# the eval env runners.
rl_module_state = self.algo.learner_group.get_state()["learner"]["rl_module"]
self.algo.eval_env_runner_group.foreach_env_runner(
func="set_state",
local_env_runner=False,
kwargs={"state": {"rl_module": rl_module_state}},
)
# Evaluate the updated policy for 5 episodes.
eval_episodes = self.algo.eval_env_runner_group.foreach_env_runner(
func=lambda er, duration=self.config.evaluation_duration: er.sample(
num_episodes=duration, explore=False
), # self._remote_eval_episode_fn,
local_env_runner=False,
)
# Assert the eval return is decent.
episode_return_mean = np.mean([ep.get_return() for ep in eval_episodes[0]])
self.assertGreaterEqual(
episode_return_mean,
100.0,
f"Eval return must be >100.0 but is {episode_return_mean}",
)
print(f"Eval episodes returns: {episode_return_mean}")
def test_training_with_recorded_states_on_single_episode_and_evaluate(self):
"""Trains on a single episode from the recorded dataset and evaluates.
Uses recorded states for training.
"""
# Load these packages inline.
import msgpack
import msgpack_numpy as mnp
# Load the dataset.
ds = self.algo.offline_data.data
# Take a single-row batch (one episode).
batch = ds.take_batch(1)
# Read the episodes and decode them.
episodes = [
SingleAgentEpisode.from_state(
msgpack.unpackb(state, object_hook=mnp.decode)
)
for state in batch["item"]
][:1]
# Get the episode return.
episode_return = episodes[0].get_return()
print(f"Found episode with return {episode_return}")
# Assert the episode has a decent return.
assert episodes[0].get_return() > 350.0, "Return must be >350.0"
# Build the learner connector.
obs_space, action_space = self.algo.offline_data.spaces[INPUT_ENV_SPACES]
learner_connector = self.algo.config.build_learner_connector(
input_observation_space=obs_space,
input_action_space=action_space,
)
# Run the learner connector on the episode.
processed_batch = learner_connector(
rl_module=self.algo.learner_group._learner.module,
batch={},
episodes=episodes,
shared_data={},
# TODO (simon): Add MetricsLogger to non-Learner components that have a
# LearnerConnector pipeline.
metrics=None,
)
# Create a MA batch from the processed batch and a TrainingData object.
ma_batch = MultiAgentBatch(
policy_batches={
"default_policy": SampleBatch(processed_batch["default_policy"])
},
env_steps=np.prod(processed_batch["default_policy"]["obs"].shape[:-1]),
)
training_data = TrainingData(batch=ma_batch)
# Overfit on this single episode.
i = 0
while True:
i += 1
learner_results = self.algo.learner_group.update(
training_data=training_data,
minibatch_size=ma_batch["default_policy"].count,
num_iters=self.algo.config.dataset_num_iters_per_learner,
**self.algo.offline_data.iter_batches_kwargs,
)
if i % 10 == 0:
loss = learner_results[0]["default_policy"]["policy_loss"].peek()
print(f"Iteration {i}: policy_loss: {loss}")
if np.isclose(loss, 1e-4, atol=1e-5) or i >= 100:
break
# Evaluation
# Get the latest RLModule state from the learner and synchronize
# the eval env runners.
rl_module_state = self.algo.learner_group.get_state()["learner"]["rl_module"]
self.algo.eval_env_runner_group.foreach_env_runner(
func="set_state",
local_env_runner=False,
kwargs={"state": {"rl_module": rl_module_state}},
)
# Evaluate the updated policy for 5 episodes.
eval_episodes = self.algo.eval_env_runner_group.foreach_env_runner(
func=lambda er, duration=self.config.evaluation_duration: er.sample(
num_episodes=duration, explore=False
), # self._remote_eval_episode_fn,
local_env_runner=False,
)
# Assert the eval return is decent.
episode_return_mean = np.mean([ep.get_return() for ep in eval_episodes[0]])
self.assertGreaterEqual(
episode_return_mean,
100.0,
f"Eval return must be >100.0 but is {episode_return_mean}",
)
print(f"Eval episodes returns: {episode_return_mean}")
def test_training_with_recorded_states_on_single_batch_and_evaluate(self):
"""Trains on a single batch from the recorded dataset and evaluates.
Uses recorded states for training.
"""
import msgpack
import msgpack_numpy as mnp
# Assign the dataset.
ds = self.algo.offline_data.data
# Initialize the OfflinePreLearner.
oplr = OfflinePreLearner(
config=self.algo.config,
spaces=self.algo.offline_data.spaces[INPUT_ENV_SPACES],
module_spec=self.algo.offline_data.module_spec,
module_state=self.algo.learner_group._learner.get_state()["rl_module"],
)
# Take a single-row batch (one episode).
batch = ds.take_batch(1)
# Read the episodes and decode them.
episodes = [
SingleAgentEpisode.from_state(
msgpack.unpackb(state, object_hook=mnp.decode)
)
for state in batch["item"]
][:1]
# Get the episode return.
episode_return = episodes[0].get_return()
print(f"Found episode with return {episode_return}")
# Assert the episode has a decent return.
assert episodes[0].get_return() > 350.0, "Return must be >350.0"
# Run the OfflinePreLearner on the batch.
processed_batch = oplr(batch)
# Create a MA batch from the processed batch and a TrainingData object.
processed_batch = unflatten_dict(processed_batch)
ma_batch = MultiAgentBatch(
policy_batches={
"default_policy": SampleBatch(processed_batch["default_policy"])
},
env_steps=np.prod(processed_batch["default_policy"]["obs"].shape[:-1]),
)
training_data = TrainingData(batch=ma_batch)
# Overfit on this single batch.
i = 0
while True:
i += 1
learner_results = self.algo.learner_group.update(
training_data=training_data,
minibatch_size=self.algo.config.train_batch_size_per_learner,
num_iters=self.algo.config.dataset_num_iters_per_learner,
**self.algo.offline_data.iter_batches_kwargs,
)
if i % 10 == 0:
loss = learner_results[0]["default_policy"]["policy_loss"].peek()
print(f"Iteration {i}: policy_loss: {loss}")
if np.isclose(loss, 1e-4, atol=1e-5) or i >= 100:
break
# Evaluation
# Get the latest RLModule state from the learner and synchronize
# the eval env runners.
rl_module_state = self.algo.learner_group.get_state()["learner"]["rl_module"]
self.algo.eval_env_runner_group.foreach_env_runner(
func="set_state",
local_env_runner=False,
kwargs={"state": {"rl_module": rl_module_state}},
)
eval_episodes = self.algo.eval_env_runner_group.foreach_env_runner(
func=lambda er, duration=self.config.evaluation_duration: er.sample(
num_episodes=duration, explore=False
), # self._remote_eval_episode_fn,
local_env_runner=False,
)
# Assert the eval return is decent.
episode_return_mean = np.mean([ep.get_return() for ep in eval_episodes[0]])
self.assertGreaterEqual(
episode_return_mean,
100.0,
f"Eval return must be >100.0 but is {episode_return_mean}",
)
print(f"Eval episodes returns: {episode_return_mean}")
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/offline/tests/test_offline_rl_stateful.py",
"license": "Apache License 2.0",
"lines": 344,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/_internal/batch/constants.py | from typing import Literal
class TaskType:
@classmethod
def values(cls):
"""Return a set of all valid task type values."""
return {
value
for key, value in vars(cls).items()
if not key.startswith("_") and isinstance(value, str)
}
class vLLMTaskType(TaskType):
"""The type of task to run on the vLLM engine."""
# Generate text.
GENERATE = "generate"
# Generate embeddings.
EMBED = "embed"
# Classification (e.g., sequence classification models).
CLASSIFY = "classify"
# Scoring (e.g., cross-encoder models).
SCORE = "score"
class SGLangTaskType(TaskType):
"""The type of task to run on the SGLang engine."""
# Generate text.
GENERATE = "generate"
TypeVLLMTaskType = Literal[tuple(vLLMTaskType.values())]
TypeSGLangTaskType = Literal[tuple(SGLangTaskType.values())]
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/batch/constants.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/_internal/planner/checkpoint/plan_read_op.py | import functools
from typing import Callable, List, Optional
from ray import ObjectRef
from ray.data._internal.execution.interfaces import PhysicalOperator
from ray.data._internal.execution.operators.map_transformer import (
BlockMapTransformFn,
)
from ray.data._internal.logical.operators import Read
from ray.data._internal.output_buffer import OutputBlockSizeOption
from ray.data._internal.planner.plan_read_op import plan_read_op
from ray.data.checkpoint.util import (
CHECKPOINTED_IDS_KWARG_NAME,
filter_checkpointed_rows_for_blocks,
)
from ray.data.context import DataContext
def plan_read_op_with_checkpoint_filter(
op: Read,
physical_children: List[PhysicalOperator],
data_context: DataContext,
load_checkpoint: Optional[Callable[[], ObjectRef]] = None,
) -> PhysicalOperator:
physical_op = plan_read_op(op, physical_children, data_context)
# TODO avoid modifying in-place
physical_op._map_transformer.add_transform_fns(
[
BlockMapTransformFn(
functools.partial(
filter_checkpointed_rows_for_blocks,
checkpoint_config=data_context.checkpoint_config,
),
output_block_size_option=OutputBlockSizeOption.of(
target_max_block_size=data_context.target_max_block_size,
),
),
]
)
if load_checkpoint is not None:
physical_op.add_map_task_kwargs_fn(
lambda: {CHECKPOINTED_IDS_KWARG_NAME: load_checkpoint()}
)
return physical_op
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/planner/checkpoint/plan_read_op.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/_internal/planner/checkpoint/plan_write_op.py | import itertools
from typing import Iterable, List
from ray.data._internal.execution.interfaces import PhysicalOperator
from ray.data._internal.execution.interfaces.task_context import TaskContext
from ray.data._internal.execution.operators.map_transformer import (
BlockMapTransformFn,
)
from ray.data._internal.logical.operators import Write
from ray.data._internal.planner.plan_write_op import (
_plan_write_op_internal,
generate_collect_write_stats_fn,
)
from ray.data.block import Block, BlockAccessor
from ray.data.checkpoint.checkpoint_writer import CheckpointWriter
from ray.data.checkpoint.interfaces import (
InvalidCheckpointingOperators,
)
from ray.data.context import DataContext
from ray.data.datasource.datasink import Datasink
def plan_write_op_with_checkpoint_writer(
op: Write, physical_children: List[PhysicalOperator], data_context: DataContext
) -> PhysicalOperator:
assert data_context.checkpoint_config is not None
collect_stats_fn = generate_collect_write_stats_fn()
write_checkpoint_for_block_fn = _generate_checkpoint_writing_transform(
data_context, op
)
physical_op = _plan_write_op_internal(
op,
physical_children,
data_context,
extra_transformations=[
write_checkpoint_for_block_fn,
collect_stats_fn,
],
)
return physical_op
def _generate_checkpoint_writing_transform(
data_context: DataContext, logical_op: Write
) -> BlockMapTransformFn:
datasink = logical_op.datasink_or_legacy_datasource
if not isinstance(datasink, Datasink):
raise InvalidCheckpointingOperators(
f"To enable checkpointing, Write operation must use a "
f"Datasink and not a legacy Datasource, but got: "
f"{type(datasink)}"
)
checkpoint_writer = CheckpointWriter.create(data_context.checkpoint_config)
# MapTransformFn for writing checkpoint files after write completes.
def write_checkpoint_for_block(
blocks: Iterable[Block], ctx: TaskContext
) -> Iterable[Block]:
it1, it2 = itertools.tee(blocks, 2)
for block in it1:
ba = BlockAccessor.for_block(block)
if ba.num_rows() > 0:
if data_context.checkpoint_config.id_column not in ba.column_names():
raise ValueError(
f"ID column {data_context.checkpoint_config.id_column} is "
f"absent in the block to be written. Do not drop or rename "
f"this column."
)
checkpoint_writer.write_block_checkpoint(ba)
return list(it2)
return BlockMapTransformFn(
write_checkpoint_for_block,
is_udf=False,
# NOTE: No need for block-shaping
disable_block_shaping=True,
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/planner/checkpoint/plan_write_op.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/checkpoint/checkpoint_filter.py | import abc
import logging
import time
from typing import List, Optional
import numpy
import pyarrow
import ray
from ray.data._internal.arrow_ops import transform_pyarrow
from ray.data._internal.execution.interfaces.ref_bundle import RefBundle
from ray.data.block import Block, BlockAccessor, BlockMetadata, DataBatch, Schema
from ray.data.checkpoint import CheckpointConfig
from ray.data.datasource import PathPartitionFilter
from ray.data.datasource.path_util import _unwrap_protocol
from ray.types import ObjectRef
logger = logging.getLogger(__name__)
class CheckpointFilter(abc.ABC):
"""Abstract class which defines the interface for filtering checkpointed rows
based on varying backends.
"""
def __init__(self, config: CheckpointConfig):
self.ckpt_config = config
self.checkpoint_path = self.ckpt_config.checkpoint_path
self.checkpoint_path_unwrapped = _unwrap_protocol(
self.ckpt_config.checkpoint_path
)
self.id_column = self.ckpt_config.id_column
self.filesystem = self.ckpt_config.filesystem
self.filter_num_threads = self.ckpt_config.filter_num_threads
@ray.remote(max_retries=-1)
def _combine_chunks(ckpt_block: pyarrow.Table) -> pyarrow.Table:
"""Combine chunks for the checkpoint block.
Args:
ckpt_block: The checkpoint block to combine chunks for
Returns:
The combined checkpoint block
"""
from ray.data._internal.arrow_ops.transform_pyarrow import combine_chunks
combined_ckpt_block = combine_chunks(ckpt_block)
logger.debug(
"Checkpoint block stats for id column checkpoint: Combined block: type=%s, %d rows, %d bytes",
combined_ckpt_block.schema.to_string(),
combined_ckpt_block.num_rows,
combined_ckpt_block.nbytes,
)
return combined_ckpt_block
class CheckpointLoader:
"""Loading checkpoint data."""
def __init__(
self,
checkpoint_path: str,
filesystem: pyarrow.fs.FileSystem,
id_column: str,
checkpoint_path_partition_filter: Optional[PathPartitionFilter] = None,
):
"""Initialize the CheckpointLoader.
Args:
checkpoint_path: The path to the checkpoint
filesystem: The filesystem to use
id_column: The name of the ID column
checkpoint_path_partition_filter: Filter for checkpoint files to load during
restoration when reading from `checkpoint_path`.
"""
self.checkpoint_path = checkpoint_path
self.filesystem = filesystem
self.id_column = id_column
self.checkpoint_path_partition_filter = checkpoint_path_partition_filter
def load_checkpoint(self) -> ObjectRef[Block]:
"""Loading checkpoint data.
Returns:
ObjectRef[Block]: ObjectRef to the checkpointed IDs block.
"""
start_t = time.time()
# Load the checkpoint data
checkpoint_ds: ray.data.Dataset = ray.data.read_parquet(
self.checkpoint_path,
filesystem=self.filesystem,
partition_filter=self.checkpoint_path_partition_filter,
)
# Manually disable checkpointing for loading the checkpoint metadata
# to avoid recursively restoring checkpoints.
# TODO: Clean way to do this would be to introduce per Op config
# [https://github.com/ray-project/ray/issues/54520]
checkpoint_ds.context.checkpoint_config = None
# Pre-process data pipeline
checkpoint_ds: ray.data.Dataset = self._preprocess_data_pipeline(checkpoint_ds)
# Repartition to 1 block.
checkpoint_ds = checkpoint_ds.repartition(num_blocks=1)
# Get the block reference
ref_bundles: List[RefBundle] = list(checkpoint_ds.iter_internal_ref_bundles())
assert len(ref_bundles) == 1
ref_bundle: RefBundle = ref_bundles[0]
schema: Schema = ref_bundle.schema
assert len(ref_bundle.blocks) == 1
block_ref: ObjectRef[Block] = ref_bundle.blocks[0][0]
metadata: BlockMetadata = ref_bundle.blocks[0][1]
# Post-process the block
checkpoint_block_ref: ObjectRef[Block] = self._postprocess_block(block_ref)
# Validate the loaded checkpoint
self._validate_loaded_checkpoint(schema, metadata)
logger.info(
"Checkpoint loaded for %s in %.2f seconds. SizeBytes = %d, Schema = %s",
type(self).__name__,
time.time() - start_t,
metadata.size_bytes,
schema.to_string(),
)
return checkpoint_block_ref
@abc.abstractmethod
def _preprocess_data_pipeline(
self, checkpoint_ds: ray.data.Dataset
) -> ray.data.Dataset:
"""Pre-process the checkpoint dataset. To be implemented by subclasses."""
raise NotImplementedError("Subclasses must implement this method")
def _postprocess_block(self, block_ref: ObjectRef[Block]) -> ObjectRef[Block]:
"""Combine the block so it has fewer chunks."""
return _combine_chunks.remote(block_ref)
def _validate_loaded_checkpoint(
self, schema: Schema, metadata: BlockMetadata
) -> None:
"""Validate the loaded checkpoint. Subclasses can override for custom validation."""
pass
class IdColumnCheckpointLoader(CheckpointLoader):
"""Loader for regular ID columns."""
def _preprocess_data_pipeline(
self, checkpoint_ds: ray.data.Dataset
) -> ray.data.Dataset:
"""In the pre-process data pipeline,
- Sort by the IDs, as `filter_rows_for_block` will perform binary search on the
checkpointed IDs during restore.
Args:
checkpoint_ds: The checkpoint dataset to pre-process
Returns:
The pre-processed checkpoint dataset
"""
# Sort by the ID column.
return checkpoint_ds.sort(self.id_column)
class BatchBasedCheckpointFilter(CheckpointFilter):
"""CheckpointFilter for batch-based backends."""
def load_checkpoint(self) -> ObjectRef[Block]:
"""Load checkpointed ids as a sorted block.
Returns:
ObjectRef[Block]: ObjectRef to the checkpointed IDs block.
"""
loader = IdColumnCheckpointLoader(
checkpoint_path=self.checkpoint_path,
filesystem=self.filesystem,
id_column=self.id_column,
checkpoint_path_partition_filter=self.ckpt_config.checkpoint_path_partition_filter,
)
return loader.load_checkpoint()
def delete_checkpoint(self) -> None:
self.filesystem.delete_dir(self.checkpoint_path_unwrapped)
def filter_rows_for_block(
self,
block: Block,
checkpointed_ids: Block,
) -> Block:
"""For the given block, filter out rows that have already
been checkpointed, and return the resulting block.
Args:
block: The input block to filter.
checkpointed_ids: A block containing IDs of all rows that have
been checkpointed.
Returns:
A new block with rows that have not been checkpointed.
"""
if len(checkpointed_ids) == 0 or len(block) == 0:
return block
assert isinstance(block, pyarrow.Table)
assert isinstance(checkpointed_ids, pyarrow.Table)
# The checkpointed_ids block is sorted (see load_checkpoint).
# We'll use binary search to filter out processed rows.
# And we process a single chunk at a time, otherwise `to_numpy` below
# will copy the data from shared memory to worker's heap memory.
import concurrent.futures
# Get all chunks of the checkpointed ID column.
ckpt_chunks = checkpointed_ids[self.id_column].chunks
# Convert the block's ID column to a numpy array for fast processing.
block_ids = block[self.id_column].to_numpy()
def filter_with_ckpt_chunk(ckpt_chunk: pyarrow.ChunkedArray) -> numpy.ndarray:
# Convert checkpoint chunk to numpy for fast search.
# Use internal helper function for consistency and robustness (handles null-typed arrays, etc.)
ckpt_ids = transform_pyarrow.to_numpy(ckpt_chunk, zero_copy_only=False)
# Start with a mask of all True (keep all rows).
mask = numpy.ones(len(block_ids), dtype=bool)
# Use binary search to find where block_ids would be in ckpt_ids.
sorted_indices = numpy.searchsorted(ckpt_ids, block_ids)
# Only consider indices that are within bounds.
valid_indices = sorted_indices < len(ckpt_ids)
# For valid indices, check for exact matches.
potential_matches = sorted_indices[valid_indices]
matched = ckpt_ids[potential_matches] == block_ids[valid_indices]
# Mark matched IDs as False (filter out these rows).
mask[valid_indices] = ~matched
# Delete the chunk to free memory.
del ckpt_chunk
return mask
# Use ThreadPoolExecutor to process each checkpoint chunk in parallel.
with concurrent.futures.ThreadPoolExecutor(
max_workers=self.filter_num_threads or None
) as executor:
masks = list(executor.map(filter_with_ckpt_chunk, ckpt_chunks))
# Combine all masks using logical AND (row must not be in any checkpoint chunk).
final_mask = numpy.logical_and.reduce(masks)
# Convert the final mask to a PyArrow array and filter the block.
mask_array = pyarrow.array(final_mask)
filtered_block = block.filter(mask_array)
return filtered_block
def filter_rows_for_batch(
self,
batch: DataBatch,
checkpointed_ids: Block,
) -> DataBatch:
"""For the given batch, filter out rows that have already
been checkpointed, and return the resulting batch.
Note that this method calls `filter_rows_for_block()` under the hood,
so it is preferred to call that method directly if you already have a block.
"""
arrow_block = BlockAccessor.batch_to_block(batch)
filtered_block = self.filter_rows_for_block(arrow_block, checkpointed_ids)
filtered_batch = BlockAccessor.for_block(filtered_block).to_batch_format(None)
return filtered_batch
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/checkpoint/checkpoint_filter.py",
"license": "Apache License 2.0",
"lines": 222,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/checkpoint/checkpoint_writer.py | import logging
import os
import uuid
from abc import abstractmethod
from pyarrow import parquet as pq
from ray.data._internal.util import call_with_retry
from ray.data.block import BlockAccessor
from ray.data.checkpoint import CheckpointBackend, CheckpointConfig
from ray.data.context import DataContext
from ray.data.datasource.path_util import _unwrap_protocol
logger = logging.getLogger(__name__)
class CheckpointWriter:
"""Abstract class which defines the interface for writing row-level
checkpoints based on varying backends.
Subclasses must implement `.write_block_checkpoint()`."""
def __init__(self, config: CheckpointConfig):
self.ckpt_config = config
self.checkpoint_path_unwrapped = _unwrap_protocol(
self.ckpt_config.checkpoint_path
)
self.id_col = self.ckpt_config.id_column
self.filesystem = self.ckpt_config.filesystem
self.write_num_threads = self.ckpt_config.write_num_threads
@abstractmethod
def write_block_checkpoint(self, block: BlockAccessor):
"""Write a checkpoint for all rows in a single block to the checkpoint
output directory given by `self.checkpoint_path`.
Subclasses of `CheckpointWriter` must implement this method."""
...
@staticmethod
def create(config: CheckpointConfig) -> "CheckpointWriter":
"""Factory method to create a `CheckpointWriter` based on the
provided `CheckpointConfig`."""
backend = config.backend
if backend in [
CheckpointBackend.CLOUD_OBJECT_STORAGE,
CheckpointBackend.FILE_STORAGE,
]:
return BatchBasedCheckpointWriter(config)
raise NotImplementedError(f"Backend {backend} not implemented")
class BatchBasedCheckpointWriter(CheckpointWriter):
"""CheckpointWriter for batch-based backends."""
def __init__(self, config: CheckpointConfig):
super().__init__(config)
self.filesystem.create_dir(self.checkpoint_path_unwrapped, recursive=True)
def write_block_checkpoint(self, block: BlockAccessor):
"""Write a checkpoint for all rows in a single block to the checkpoint
output directory given by `self.checkpoint_path`.
Subclasses of `CheckpointWriter` must implement this method."""
if block.num_rows() == 0:
return
file_name = f"{uuid.uuid4()}.parquet"
ckpt_file_path = os.path.join(self.checkpoint_path_unwrapped, file_name)
checkpoint_ids_block = block.select(columns=[self.id_col])
# `pyarrow.parquet.write_parquet` requires a PyArrow table. It errors if the block is
# a pandas DataFrame.
checkpoint_ids_table = BlockAccessor.for_block(checkpoint_ids_block).to_arrow()
def _write():
pq.write_table(
checkpoint_ids_table,
ckpt_file_path,
filesystem=self.filesystem,
)
try:
return call_with_retry(
_write,
description=f"Write checkpoint file: {file_name}",
match=DataContext.get_current().retried_io_errors,
)
except Exception:
logger.exception(f"Checkpoint write failed: {file_name}")
raise
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/checkpoint/checkpoint_writer.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/checkpoint/interfaces.py | import os
import warnings
from enum import Enum
from typing import TYPE_CHECKING, Optional, Tuple
import pyarrow
from ray.util.annotations import DeveloperAPI, PublicAPI
if TYPE_CHECKING:
from ray.data.datasource import PathPartitionFilter
@PublicAPI(stability="alpha")
class CheckpointBackend(Enum):
"""Supported backends for storing and reading checkpoint files.
Currently, only one type of backend is supported:
* Batch-based backends: CLOUD_OBJECT_STORAGE and FILE_STORAGE.
Their differences are as follows:
1. Writing checkpoints: Batch-based backends write a checkpoint file
for each block.
2. Loading checkpoints and filtering input data: Batch-based backends
load all checkpoint data into memory prior to dataset execution.
The checkpoint data is then passed to each read task to perform filtering.
"""
CLOUD_OBJECT_STORAGE = "CLOUD_OBJECT_STORAGE"
"""
Batch-based checkpoint backend that uses cloud object storage, such as
AWS S3, Google Cloud Storage, etc.
"""
FILE_STORAGE = "FILE_STORAGE"
"""
Batch based checkpoint backend that uses file system storage.
Note, when using this backend, the checkpoint path must be a network-mounted
file system (e.g. `/mnt/cluster_storage/`).
"""
@PublicAPI(stability="beta")
class CheckpointConfig:
"""Configuration for checkpointing.
Args:
id_column: Name of the ID column in the input dataset.
ID values must be unique across all rows in the dataset and must persist
during all operators.
checkpoint_path: Path to store the checkpoint data. It can be a path to a cloud
object storage (e.g. `s3://bucket/path`) or a file system path.
If the latter, the path must be a network-mounted file system (e.g.
`/mnt/cluster_storage/`) that is accessible to the entire cluster.
If not set, defaults to `RAY_DATA_CHECKPOINT_PATH_BUCKET/ray_data_checkpoint`.
delete_checkpoint_on_success: If true, automatically delete checkpoint
data when the dataset execution succeeds. Only supported for
batch-based backend currently.
override_filesystem: Override the :class:`pyarrow.fs.FileSystem` object used to
read/write checkpoint data. Use this when you want to use custom credentials.
override_backend: Override the :class:`CheckpointBackend` object used to
access the checkpoint backend storage.
filter_num_threads: Number of threads used to filter checkpointed rows.
write_num_threads: Number of threads used to write checkpoint files for
completed rows.
checkpoint_path_partition_filter: Filter for checkpoint files to load during
restoration when reading from `checkpoint_path`.
"""
DEFAULT_CHECKPOINT_PATH_BUCKET_ENV_VAR = "RAY_DATA_CHECKPOINT_PATH_BUCKET"
DEFAULT_CHECKPOINT_PATH_DIR = "ray_data_checkpoint"
def __init__(
self,
id_column: Optional[str] = None,
checkpoint_path: Optional[str] = None,
*,
delete_checkpoint_on_success: bool = True,
override_filesystem: Optional["pyarrow.fs.FileSystem"] = None,
override_backend: Optional[CheckpointBackend] = None,
filter_num_threads: int = 3,
write_num_threads: int = 3,
checkpoint_path_partition_filter: Optional["PathPartitionFilter"] = None,
):
self.id_column: Optional[str] = id_column
if not isinstance(self.id_column, str) or len(self.id_column) == 0:
raise InvalidCheckpointingConfig(
"Checkpoint ID column must be a non-empty string, "
f"but got {self.id_column}"
)
if override_backend is not None:
warnings.warn(
"`override_backend` is deprecated and will be removed in August 2025.",
FutureWarning,
stacklevel=2,
)
self.checkpoint_path: str = (
checkpoint_path or self._get_default_checkpoint_path()
)
inferred_backend, inferred_fs = self._infer_backend_and_fs(
self.checkpoint_path,
override_filesystem,
override_backend,
)
self.filesystem: "pyarrow.fs.FileSystem" = inferred_fs
self.backend: CheckpointBackend = inferred_backend
self.delete_checkpoint_on_success: bool = delete_checkpoint_on_success
self.filter_num_threads: int = filter_num_threads
self.write_num_threads: int = write_num_threads
self.checkpoint_path_partition_filter = checkpoint_path_partition_filter
def _get_default_checkpoint_path(self) -> str:
artifact_storage = os.environ.get(self.DEFAULT_CHECKPOINT_PATH_BUCKET_ENV_VAR)
if artifact_storage is None:
raise InvalidCheckpointingConfig(
f"`{self.DEFAULT_CHECKPOINT_PATH_BUCKET_ENV_VAR}` env var is not set, "
"please explicitly set `CheckpointConfig.checkpoint_path`."
)
return f"{artifact_storage}/{self.DEFAULT_CHECKPOINT_PATH_DIR}"
def _infer_backend_and_fs(
self,
checkpoint_path: str,
override_filesystem: Optional["pyarrow.fs.FileSystem"] = None,
override_backend: Optional[CheckpointBackend] = None,
) -> Tuple[CheckpointBackend, "pyarrow.fs.FileSystem"]:
try:
if override_filesystem is not None:
assert isinstance(override_filesystem, pyarrow.fs.FileSystem), (
"override_filesystem must be an instance of "
f"`pyarrow.fs.FileSystem`, but got {type(override_filesystem)}"
)
fs = override_filesystem
else:
fs, _ = pyarrow.fs.FileSystem.from_uri(checkpoint_path)
if override_backend is not None:
assert isinstance(override_backend, CheckpointBackend), (
"override_backend must be an instance of `CheckpointBackend`, "
f"but got {type(override_backend)}"
)
backend = override_backend
else:
if isinstance(fs, pyarrow.fs.LocalFileSystem):
backend = CheckpointBackend.FILE_STORAGE
else:
backend = CheckpointBackend.CLOUD_OBJECT_STORAGE
return backend, fs
except Exception as e:
raise InvalidCheckpointingConfig(
f"Invalid checkpoint path: {checkpoint_path}. "
) from e
@DeveloperAPI
class InvalidCheckpointingConfig(Exception):
"""Exception which indicates that the checkpointing
configuration is invalid."""
pass
@DeveloperAPI
class InvalidCheckpointingOperators(Exception):
"""Exception which indicates that the DAG is not eligible for checkpointing,
due to one or more incompatible operators."""
pass
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/checkpoint/interfaces.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/checkpoint/load_checkpoint_callback.py | import logging
from typing import Optional
from ray.data._internal.execution.execution_callback import (
ExecutionCallback,
remove_execution_callback,
)
from ray.data._internal.execution.streaming_executor import StreamingExecutor
from ray.data.block import Block
from ray.data.checkpoint import CheckpointConfig
from ray.data.checkpoint.checkpoint_filter import BatchBasedCheckpointFilter
from ray.types import ObjectRef
logger = logging.getLogger(__name__)
class LoadCheckpointCallback(ExecutionCallback):
"""ExecutionCallback that handles checkpoints."""
def __init__(self, config: CheckpointConfig):
assert config is not None
self._config = config
self._ckpt_filter = self._create_checkpoint_filter(config)
self._checkpoint_ref: Optional[ObjectRef[Block]] = None
def _create_checkpoint_filter(
self, config: CheckpointConfig
) -> BatchBasedCheckpointFilter:
"""Factory method to create the checkpoint filter.
Subclasses can override this to use a different filter implementation.
"""
return BatchBasedCheckpointFilter(config)
def _load_checkpoint_data(self) -> ObjectRef[Block]:
"""Load checkpoint data from storage (via the checkpoint filter)."""
return self._ckpt_filter.load_checkpoint()
def before_execution_starts(self, executor: StreamingExecutor):
assert self._config is executor._data_context.checkpoint_config
# Load checkpoint data before execution starts.
self._checkpoint_ref = self._load_checkpoint_data()
def after_execution_succeeds(self, executor: StreamingExecutor):
assert self._config is executor._data_context.checkpoint_config
# Remove the callback from the DataContext.
remove_execution_callback(self, executor._data_context)
# Delete checkpoint data.
try:
if self._config.delete_checkpoint_on_success:
self._ckpt_filter.delete_checkpoint()
except Exception:
logger.warning("Failed to delete checkpoint data.", exc_info=True)
def after_execution_fails(self, executor: StreamingExecutor, error: Exception):
assert self._config is executor._data_context.checkpoint_config
# Remove the callback from the DataContext.
remove_execution_callback(self, executor._data_context)
def load_checkpoint(self) -> ObjectRef[Block]:
assert self._checkpoint_ref is not None
return self._checkpoint_ref
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/checkpoint/load_checkpoint_callback.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/checkpoint/util.py | import logging
from typing import Iterable
from ray.data._internal.execution.interfaces.task_context import TaskContext
from ray.data.block import Block, BlockAccessor, DataBatch
from ray.data.checkpoint.interfaces import (
CheckpointConfig,
)
logger = logging.getLogger(__name__)
# Checkpoint keyword argument name
CHECKPOINTED_IDS_KWARG_NAME = "checkpointed_ids"
def filter_checkpointed_rows_for_blocks(
blocks: Iterable[Block],
task_context: TaskContext,
checkpoint_config: CheckpointConfig,
) -> Iterable[Block]:
"""For each block, filter rows that have already been checkpointed
and yield the resulting block."""
from ray.data.checkpoint.checkpoint_filter import (
BatchBasedCheckpointFilter,
)
ckpt_filter = BatchBasedCheckpointFilter(checkpoint_config)
checkpointed_ids = task_context.kwargs[CHECKPOINTED_IDS_KWARG_NAME]
def filter_fn(block: Block) -> Block:
return ckpt_filter.filter_rows_for_block(
block=block,
checkpointed_ids=checkpointed_ids,
)
for block in blocks:
filtered_block = filter_fn(block)
ba = BlockAccessor.for_block(filtered_block)
if ba.num_rows() > 0:
yield filtered_block
def filter_checkpointed_rows_for_batches(
batches: Iterable[DataBatch],
task_context: TaskContext,
checkpoint_config: CheckpointConfig,
) -> Iterable[DataBatch]:
"""For each batch, filter rows that have already been checkpointed
and yield the resulting batches."""
from ray.data.checkpoint.checkpoint_filter import (
BatchBasedCheckpointFilter,
)
ckpt_filter = BatchBasedCheckpointFilter(checkpoint_config)
checkpointed_ids = task_context.kwargs[CHECKPOINTED_IDS_KWARG_NAME]
def filter_fn(batch: DataBatch) -> DataBatch:
return ckpt_filter.filter_rows_for_batch(
batch=batch,
checkpointed_ids=checkpointed_ids,
)
for batch in batches:
filtered_batch = filter_fn(batch)
yield filtered_batch
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/checkpoint/util.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/tests/test_checkpoint.py | import csv
import os
import random
from typing import List, Union
import pandas as pd
import pyarrow
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from pyarrow.fs import FileSelector, LocalFileSystem
from pytest_lazy_fixtures import lf as lazy_fixture
import ray
from ray.data._internal.datasource.csv_datasource import CSVDatasource
from ray.data._internal.datasource.parquet_datasink import ParquetDatasink
from ray.data._internal.logical.interfaces.logical_plan import LogicalPlan
from ray.data._internal.logical.operators import Read, Write
from ray.data._internal.logical.optimizers import get_execution_plan
from ray.data.block import BlockAccessor
from ray.data.checkpoint import CheckpointConfig
from ray.data.checkpoint.checkpoint_filter import (
BatchBasedCheckpointFilter,
)
from ray.data.checkpoint.checkpoint_writer import (
BatchBasedCheckpointWriter,
)
from ray.data.checkpoint.interfaces import (
CheckpointBackend,
InvalidCheckpointingConfig,
)
from ray.data.context import DataContext
from ray.data.datasource.path_util import _unwrap_protocol
from ray.data.tests.conftest import * # noqa
from ray.tests.conftest import * # noqa
# User-provided ID column name
ID_COL = "id"
# Number of rows in the sample data
SAMPLE_DATA_NUM_ROWS = 10
# Auto-use `restore_data_context` for each test and apply 300-second timeout to all tests.
pytestmark = [
pytest.mark.usefixtures("restore_data_context"),
pytest.mark.timeout(300),
]
@pytest.fixture
def generate_sample_data_csv(tmp_path):
def _generate():
# Generate a dummy dataset with SAMPLE_DATA_NUM_ROWS rows and columns [ID_COL, "col1"]
data = [
{ID_COL: i, "col1": random.random()} for i in range(SAMPLE_DATA_NUM_ROWS)
]
f_path = os.path.join(tmp_path, "sample_data.csv")
with open(f_path, mode="w", newline="") as file:
writer = csv.DictWriter(file, fieldnames=data[0].keys())
writer.writeheader()
writer.writerows(data)
return f_path
return _generate
@pytest.fixture
def checkpoint_path(tmp_path):
"""Fixture to provide a temporary checkpoint path."""
return str(tmp_path / "checkpoint")
@pytest.fixture
def data_output_path(data_path):
"""Fixture to provide a standardized data output path."""
return os.path.join(data_path, "output")
@pytest.fixture
def generate_sample_data_parquet(tmp_path):
def _generate():
f_dir = os.path.join(tmp_path, "sample_data_parquet")
os.makedirs(f_dir, exist_ok=True)
# Generate a dummy dataset with SAMPLE_DATA_NUM_ROWS rows and columns [ID_COL, "col1"]
df = pd.DataFrame(
[{ID_COL: i, "col1": random.random()} for i in range(SAMPLE_DATA_NUM_ROWS)]
)
f_path = os.path.join(f_dir, "sample_data.parquet")
# Write 3 row groups per file with uneven distribution of rows per row group
table = pa.table(df)
row_group_size = max(1, SAMPLE_DATA_NUM_ROWS // 3)
pq.write_table(table, f_path, row_group_size=row_group_size)
return f_dir
return _generate
@pytest.fixture
def generate_sample_physical_plan(generate_sample_data_csv, tmp_path):
ctx = ray.data.DataContext.get_current()
datasource = CSVDatasource(generate_sample_data_csv())
read_op = Read(datasource, datasource, -1, None)
write_path = os.path.join(tmp_path, "output")
write_op = Write(read_op, ParquetDatasink(write_path))
logical_plan = LogicalPlan(write_op, ctx)
physical_plan = get_execution_plan(logical_plan)
yield physical_plan
def _get_batch_based_files(ckpt_path: str, fs) -> List[str]:
"""Get checkpoint file paths for batch-based backends."""
if fs is None:
if not os.path.exists(ckpt_path):
return []
return [os.path.join(ckpt_path, f) for f in os.listdir(ckpt_path)]
else:
files = fs.get_file_info(
FileSelector(_unwrap_protocol(ckpt_path), allow_not_found=True)
)
return [file_info.path for file_info in files if file_info.is_file]
def _read_batch_file_ids(file_paths: List[str], id_column: str, fs) -> List[int]:
"""Read IDs from batch-based checkpoint files."""
ids = []
for file_path in file_paths:
if fs is None:
table = pa.parquet.read_table(file_path)
else:
with fs.open_input_file(file_path) as f:
table = pa.parquet.read_table(f)
df = table.to_pandas()
ids.extend(df[id_column].tolist())
return ids
def read_ids_from_checkpoint_files(config: CheckpointConfig) -> List[Union[int, str]]:
"""Reads the checkpoint files and returns a sorted list of IDs which have been checkpointed."""
# Batch-based backends
if config.backend in (
CheckpointBackend.FILE_STORAGE,
CheckpointBackend.CLOUD_OBJECT_STORAGE,
):
file_paths = _get_batch_based_files(config.checkpoint_path, config.filesystem)
return sorted(
_read_batch_file_ids(file_paths, config.id_column, config.filesystem)
)
else:
raise ValueError(f"Invalid backend: {config.backend}")
class TestCheckpointConfig:
@pytest.mark.parametrize("id_column", ["", 1])
def test_invalid_id_column(self, id_column, local_path):
with pytest.raises(
InvalidCheckpointingConfig,
match="Checkpoint ID column",
):
CheckpointConfig(id_column, local_path)
def test_override_backend_emits_deprecation_warning(self):
with pytest.warns(FutureWarning, match="deprecated"):
CheckpointConfig(
ID_COL,
"s3://bucket/path",
override_backend=CheckpointBackend.FILE_STORAGE,
)
def test_default_checkpoint_path(self, s3_path, monkeypatch):
with pytest.raises(
InvalidCheckpointingConfig,
match="CheckpointConfig.checkpoint_path",
):
CheckpointConfig(ID_COL, None)
default_bucket = s3_path
monkeypatch.setenv(
CheckpointConfig.DEFAULT_CHECKPOINT_PATH_BUCKET_ENV_VAR, default_bucket
)
config = CheckpointConfig(ID_COL, None)
assert (
config.checkpoint_path
== f"{default_bucket}/{CheckpointConfig.DEFAULT_CHECKPOINT_PATH_DIR}"
)
@pytest.mark.parametrize("checkpoint_path", ["tmp/", "s3:/tmp", "s4://tmp"])
def test_invalid_checkpoint_path(self, checkpoint_path):
with pytest.raises(
InvalidCheckpointingConfig,
match="Invalid checkpoint path",
):
CheckpointConfig(ID_COL, checkpoint_path)
@pytest.mark.parametrize(
"checkpoint_path",
[
lazy_fixture("local_path"),
lazy_fixture("s3_path"),
],
)
def test_infer_filesystem_and_backend(self, checkpoint_path):
config = CheckpointConfig(ID_COL, checkpoint_path)
if checkpoint_path.startswith("/"):
assert isinstance(config.filesystem, pyarrow.fs.LocalFileSystem)
assert config.backend == CheckpointBackend.FILE_STORAGE
else:
assert isinstance(config.filesystem, pyarrow.fs.S3FileSystem)
assert config.backend == CheckpointBackend.CLOUD_OBJECT_STORAGE
@pytest.mark.parametrize(
"checkpoint_path,fs,backend",
[
(
lazy_fixture("local_path"),
lazy_fixture("local_fs"),
CheckpointBackend.FILE_STORAGE,
),
(
lazy_fixture("s3_path"),
lazy_fixture("s3_fs"),
CheckpointBackend.FILE_STORAGE,
),
(
lazy_fixture("local_path"),
lazy_fixture("local_fs"),
CheckpointBackend.CLOUD_OBJECT_STORAGE,
),
(
lazy_fixture("s3_path"),
lazy_fixture("s3_fs"),
CheckpointBackend.CLOUD_OBJECT_STORAGE,
),
],
)
def test_override_filesystem_and_backend(self, checkpoint_path, fs, backend):
config = CheckpointConfig(
ID_COL, checkpoint_path, override_filesystem=fs, override_backend=backend
)
assert config.filesystem is fs
assert config.backend is backend
def test_skip_inference_with_overrides(self):
"""Test that filesystem inference is skipped when override is provided."""
# Inferring filesystem will fail if the path doesn't exist.
path = "s3://non-existing-bucket/"
fs = pyarrow.fs.S3FileSystem()
config = CheckpointConfig(
ID_COL,
path,
override_filesystem=fs,
)
assert config.filesystem is fs
assert config.backend is CheckpointBackend.CLOUD_OBJECT_STORAGE
@pytest.mark.parametrize(
"backend,fs,data_path",
[
(CheckpointBackend.FILE_STORAGE, None, lazy_fixture("local_path")),
(
CheckpointBackend.FILE_STORAGE,
lazy_fixture("local_fs"),
lazy_fixture("local_path"),
),
(
CheckpointBackend.CLOUD_OBJECT_STORAGE,
lazy_fixture("s3_fs"),
lazy_fixture("s3_path"),
),
],
)
def test_checkpoint(
ray_start_10_cpus_shared,
generate_sample_data_csv,
backend,
fs,
data_path,
data_output_path,
):
class TestActor:
def __init__(self):
pass
def __call__(self, batch):
return batch
ctx = ray.data.DataContext.get_current()
ckpt_path = os.path.join(data_path, "test_checkpoint_output_files")
ctx.checkpoint_config = CheckpointConfig(
id_column=ID_COL,
checkpoint_path=ckpt_path,
override_filesystem=fs,
override_backend=backend,
)
csv_file = generate_sample_data_csv()
ds = ray.data.read_csv(csv_file)
# Execute the dataset with checkpointing enabled.
ds = ds.map_batches(TestActor, concurrency=1)
ds.write_parquet(data_output_path, filesystem=fs)
# Ensure that the written data is correct.
ds_readback = ray.data.read_parquet(data_output_path, filesystem=fs)
actual_output = sorted([row[ID_COL] for row in ds_readback.iter_rows()])
expected_output = sorted([row[ID_COL] for row in ds.iter_rows()])
assert actual_output == expected_output
# When execution succeeds, checkpoint data should be automatically deleted.
# Check that the checkpoint directory is empty or doesn't exist
if ctx.checkpoint_config.delete_checkpoint_on_success:
try:
unwrapped_path = _unwrap_protocol(ckpt_path)
# Try to get file info for the checkpoint directory
files = ctx.checkpoint_config.filesystem.get_file_info(
pyarrow.fs.FileSelector(unwrapped_path, recursive=True)
)
# If we can get file info, the directory exists and should be empty
assert (
len(files) == 0
), f"Checkpoint directory should be empty but contains {len(files)} files"
except (FileNotFoundError, OSError):
# If directory doesn't exist, that's also fine (cleanup worked)
pass
@pytest.mark.parametrize(
"backend,fs,data_path",
[
(CheckpointBackend.FILE_STORAGE, None, lazy_fixture("local_path")),
(
CheckpointBackend.FILE_STORAGE,
lazy_fixture("local_fs"),
lazy_fixture("local_path"),
),
(
CheckpointBackend.CLOUD_OBJECT_STORAGE,
lazy_fixture("s3_fs"),
lazy_fixture("s3_path"),
),
],
)
def test_full_dataset_executed_for_non_write(
ray_start_10_cpus_shared,
generate_sample_data_parquet,
backend,
fs,
data_path,
data_output_path,
):
"""Tests that for an already fully checkpointed Dataset,
calling `schema()` and `count()` should not skip checkpointing
and should execute the full Dataset to get the correct information.
"""
ctx = ray.data.DataContext.get_current()
ctx.default_hash_shuffle_parallelism = 1
ckpt_path = os.path.join(data_path, "test_checkpoint_output_files")
ctx.checkpoint_config = CheckpointConfig(
id_column=ID_COL,
checkpoint_path=ckpt_path,
override_filesystem=fs,
override_backend=backend,
)
parquet_dir = generate_sample_data_parquet()
ds = ray.data.read_parquet(parquet_dir)
ds = ds.map(lambda row: row)
# Get the schema and count prior to writing the dataset.
schema_before_write = ds.schema()
count_before_write = ds.count()
ds.write_parquet(data_output_path, filesystem=fs)
# Recreate the same dataset, so that it will skip checkpointed rows.
ds2 = ray.data.read_parquet(parquet_dir)
ds2 = ds2.map(lambda row: row)
# Check that when re-running a dataset which has already been completely
# checkpointed, it does not skip any rows during `schema()` and `count()` calls.
assert ds2.schema() == schema_before_write
assert ds2.count() == count_before_write
@pytest.mark.parametrize(
"ds_factory",
[
(lazy_fixture("generate_sample_data_parquet")),
(lazy_fixture("generate_sample_data_parquet")),
],
)
@pytest.mark.parametrize(
"backend,fs,data_path",
[
(CheckpointBackend.FILE_STORAGE, None, lazy_fixture("local_path")),
(
CheckpointBackend.FILE_STORAGE,
lazy_fixture("local_fs"),
lazy_fixture("local_path"),
),
(
CheckpointBackend.CLOUD_OBJECT_STORAGE,
lazy_fixture("s3_fs"),
lazy_fixture("s3_path"),
),
],
)
def test_recovery_skips_checkpointed_rows(
ray_start_10_cpus_shared,
ds_factory,
backend,
fs,
data_path,
data_output_path,
):
"""Tests that for a Dataset which fails partway and is recovered,
it skips rows which have already been checkpointed."""
ctx = ray.data.DataContext.get_current()
ctx.execution_options.preserve_order = True
ctx.default_hash_shuffle_parallelism = 1
ckpt_path = os.path.join(data_path, "test_checkpoint_output_files")
# Ensure checkpoint directory exists
os.makedirs(ckpt_path, exist_ok=True)
ctx.checkpoint_config = CheckpointConfig(
id_column=ID_COL,
checkpoint_path=ckpt_path,
override_filesystem=fs,
override_backend=backend,
)
# Catch the custom TestException raised by FailActor.
ctx.raise_original_map_exception = True
@ray.remote(num_cpus=0)
class Coordinator:
def __init__(self):
self._should_fail = True
def disable_failure(self):
self._should_fail = False
def should_fail(self):
return self._should_fail
coordinator_actor = Coordinator.remote()
class TestException(Exception):
pass
class FailActor:
"""Simple passthrough actor, which fails after a certain number of rows."""
def __init__(self, coordinator_actor, max_num_items, checkpoint_config):
self._should_fail = ray.get(coordinator_actor.should_fail.remote())
self._max_num_items = max_num_items
self._checkpoint_config = checkpoint_config
def __call__(self, batch):
# Get the ID column name from the checkpoint config
id_col = self._checkpoint_config.id_column
# Process each row in the batch
ids = batch[id_col]
for _, id in enumerate(ids):
if self._should_fail and id == 2:
raise TestException(f"FailActor: Failing on row {id}")
return batch
# Use the ds_factory to create the dataset
local_data_path = ds_factory()
ds = ray.data.read_parquet(local_data_path)
# Get the actual number of items from the dataset
max_num_items = ds.count()
ds = ds.map_batches(
FailActor,
fn_constructor_args=[coordinator_actor, max_num_items, ctx.checkpoint_config],
concurrency=1,
batch_size=None,
num_cpus=1.1, # Use a different num_cpus to avoid operator fusion.
)
# Should fail in the middle.
with pytest.raises(TestException):
ds.write_parquet(data_output_path, filesystem=fs, concurrency=1)
ray.get(coordinator_actor.disable_failure.remote())
# When executing the same dataset again, this should skip the already
# checkpointed rows.
ds.write_parquet(data_output_path, filesystem=fs, concurrency=1)
# When execution succeeds, checkpoint data should be automatically deleted.
assert read_ids_from_checkpoint_files(ctx.checkpoint_config) == []
# Get the ID column name from the checkpoint config
id_col = ctx.checkpoint_config.id_column
# Disable checkpointing prior to reading back the data, so we don't skip any rows.
ctx.checkpoint_config = None
# Ensure that the written data is correct.
ds_readback = ray.data.read_parquet(data_output_path, filesystem=fs)
# For existing id column, expect integer IDs
actual_output = sorted([row[id_col] for row in ds_readback.iter_rows()])
expected_output = sorted(range(max_num_items))
assert actual_output == expected_output
@pytest.mark.parametrize(
"backend,fs,data_path",
[
(CheckpointBackend.FILE_STORAGE, None, lazy_fixture("local_path")),
(
CheckpointBackend.FILE_STORAGE,
lazy_fixture("local_fs"),
lazy_fixture("local_path"),
),
(
CheckpointBackend.CLOUD_OBJECT_STORAGE,
lazy_fixture("s3_fs"),
lazy_fixture("s3_path"),
),
],
)
def test_skip_checkpoint_flag(
ray_start_10_cpus_shared,
generate_sample_data_csv,
backend,
fs,
data_path,
):
"""Test that for a valid Dataset with checkpointing enabled, calling methods like
`schema()` and `count()` should skip checkpointing and not create any checkpoint
files. Subsequently calling `write_xxx()` on the same dataset should have
checkpointing enabled."""
ctx = ray.data.DataContext.get_current()
ckpt_path = os.path.join(data_path, "test_checkpoint_output_files")
ctx.checkpoint_config = CheckpointConfig(
ID_COL,
ckpt_path,
delete_checkpoint_on_success=False,
override_filesystem=fs,
override_backend=backend,
)
def generate_ds():
ds = ray.data.read_csv(generate_sample_data_csv())
ds = ds.map(lambda row: row)
return ds
ds = generate_ds()
# Calling `ds.schema()` should skip checkpointing.
assert ds.schema() is not None
assert len(read_ids_from_checkpoint_files(ctx.checkpoint_config)) == 0
# Calling `ds.count()` should skip checkpointing.
ds = generate_ds()
assert ds.count() is not None
assert len(read_ids_from_checkpoint_files(ctx.checkpoint_config)) == 0
# Calling `ds.write_xxx()` afterwards should enable checkpointing.
ds.write_parquet(os.path.join(data_path, "output"), filesystem=fs)
# Check what checkpoint files exist
checkpoint_files = read_ids_from_checkpoint_files(ctx.checkpoint_config)
assert len(checkpoint_files) == SAMPLE_DATA_NUM_ROWS
def test_checkpoint_with_missing_id_column(
ray_start_10_cpus_shared,
generate_sample_data_csv,
tmp_path,
):
"""Test that checkpointing fails gracefully when the configured id_column doesn't exist in the data."""
ctx = ray.data.DataContext.get_current()
ckpt_path = os.path.join(tmp_path, "test_checkpoint_output_files")
# Configure checkpointing with an id_column that doesn't exist in the CSV data
ctx.checkpoint_config = CheckpointConfig(
id_column="nonexistent_column",
checkpoint_path=ckpt_path,
delete_checkpoint_on_success=False,
)
def generate_ds():
ds = ray.data.read_csv(generate_sample_data_csv())
ds = ds.map(lambda row: row)
return ds
ds = generate_ds()
# The write operation should fail because the id_column doesn't exist
with pytest.raises(
ValueError,
match="ID column nonexistent_column is absent in the block to be written",
):
ds.write_parquet(os.path.join(tmp_path, "output"))
def test_dict_checkpoint_config(checkpoint_path):
"""Test that a dict checkpoint config can be used to create a CheckpointConfig."""
context = ray.data.DataContext.get_current()
fs = LocalFileSystem()
context.checkpoint_config = {
"id_column": ID_COL,
"checkpoint_path": checkpoint_path,
"override_filesystem": fs,
"override_backend": "CLOUD_OBJECT_STORAGE",
}
assert context.checkpoint_config.id_column == ID_COL
assert context.checkpoint_config.checkpoint_path == checkpoint_path
assert context.checkpoint_config.filesystem is fs
assert context.checkpoint_config.backend == CheckpointBackend.CLOUD_OBJECT_STORAGE
def test_write_block_checkpoint_with_pandas_df(restore_data_context, tmp_path):
ctx = ray.data.DataContext.get_current()
ctx.checkpoint_config = CheckpointConfig(
ID_COL,
str(tmp_path),
)
df = pd.DataFrame({ID_COL: [0, 1]})
expected_ids = [0, 1]
checkpoint_writer = BatchBasedCheckpointWriter(ctx.checkpoint_config)
checkpoint_writer.write_block_checkpoint(BlockAccessor.for_block(df))
assert len(os.listdir(tmp_path)) == 1
checkpoint_filename = os.listdir(tmp_path)[0]
checkpoint_path = tmp_path / checkpoint_filename
table = pa.parquet.read_table(checkpoint_path)
df = table.to_pandas()
written_ids = df[ID_COL].tolist()
assert written_ids == expected_ids
def test_filter_rows_for_block():
"""Test BatchBasedCheckpointFilter.filter_rows_for_block."""
# Common test setup
checkpoint_path = "/mock/path"
# Test with simple ID column
config = CheckpointConfig(
id_column=ID_COL,
checkpoint_path=checkpoint_path,
)
# Create a mock block.
block = pyarrow.table(
{
ID_COL: list(range(10)),
"data": [str(i) for i in range(10)],
}
)
# Create a mock checkpointed_ids with multiple chunks.
chunk1 = pyarrow.table({ID_COL: [1, 2, 4]})
chunk2 = pyarrow.table({ID_COL: [6, 8, 9, 11]})
chunk3 = pyarrow.table({ID_COL: [12, 13]})
checkpointed_ids = pyarrow.concat_tables([chunk1, chunk2, chunk3])
assert len(checkpointed_ids[ID_COL].chunks) == 3
expected_block = pyarrow.table(
{
ID_COL: [0, 3, 5, 7],
"data": ["0", "3", "5", "7"],
}
)
filter_instance = BatchBasedCheckpointFilter(config)
filtered_block = filter_instance.filter_rows_for_block(
block=block,
checkpointed_ids=checkpointed_ids,
)
assert filtered_block.equals(expected_block)
def test_checkpoint_restore_after_full_execution(
ray_start_10_cpus_shared,
tmp_path,
generate_sample_data_parquet,
checkpoint_path,
):
"""Test checkpoint restore after full execution of data pipeline. This is
done by retaining the checkpoint metadata files with
delete_checkpoint_on_success=False.
"""
def run_simple_pipeline(
checkpoint_config: CheckpointConfig, input_path: str, output_path: str
) -> int:
"""Run a simple pipeline with checkpointing."""
from ray.data.datasource import WriteResult
ctx = DataContext.get_current()
ctx.checkpoint_config = checkpoint_config
ctx.default_hash_shuffle_parallelism = 1
ds = ray.data.read_parquet(input_path)
# Patch `on_write_complete` to get the WriteResult.
num_rows_written = None
original_on_write_complete = ParquetDatasink.on_write_complete
def patched_on_write_complete(self, write_result: WriteResult[None]):
nonlocal num_rows_written
num_rows_written = write_result.num_rows
return original_on_write_complete(self, write_result)
ParquetDatasink.on_write_complete = patched_on_write_complete
ds.write_parquet(output_path)
return int(num_rows_written)
# Create test paths
input_data_path = generate_sample_data_parquet()
data_output_path = str(tmp_path / "output")
# Create checkpoint config
checkpoint_config = CheckpointConfig(
id_column=ID_COL,
checkpoint_path=checkpoint_path,
override_backend=CheckpointBackend.FILE_STORAGE,
delete_checkpoint_on_success=False,
)
# First run: create checkpoint
num_rows_first = run_simple_pipeline(
checkpoint_config, input_data_path, data_output_path
)
assert (
num_rows_first == SAMPLE_DATA_NUM_ROWS
), f"Expected {SAMPLE_DATA_NUM_ROWS} rows, got {num_rows_first}"
# Check if checkpoint files were created
assert os.path.exists(checkpoint_path), "No checkpoint directory created!"
# Second run: should use checkpoint
num_rows_second = run_simple_pipeline(
checkpoint_config, input_data_path, data_output_path
)
assert (
num_rows_second == 0 # No rows should be written
), f"Expected 0 rows, got {num_rows_second}"
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_checkpoint.py",
"license": "Apache License 2.0",
"lines": 640,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_push_based_shuffle.py | from collections import defaultdict
import pytest
import ray
from ray._raylet import NodeID
from ray.data._internal.planner.exchange.push_based_shuffle_task_scheduler import (
PushBasedShuffleTaskScheduler,
)
def test_push_based_shuffle_schedule():
def _test(num_input_blocks, merge_factor, num_cpus_per_node_map):
num_cpus = sum(v for v in num_cpus_per_node_map.values())
op_cls = PushBasedShuffleTaskScheduler
schedule = op_cls._compute_shuffle_schedule(
num_cpus_per_node_map, num_input_blocks, merge_factor, num_input_blocks
)
# All input blocks will be processed.
assert (
schedule.num_rounds * schedule.num_map_tasks_per_round >= num_input_blocks
)
# Each round of tasks does not over-subscribe CPUs.
assert (
schedule.num_map_tasks_per_round
+ schedule.merge_schedule.num_merge_tasks_per_round
<= max(num_cpus, 2)
)
print(
"map",
schedule.num_map_tasks_per_round,
"merge",
schedule.merge_schedule.num_merge_tasks_per_round,
"num_cpus",
num_cpus,
"merge_factor",
merge_factor,
)
# Merge factor between map : merge tasks is approximately correct.
if schedule.num_map_tasks_per_round > merge_factor:
actual_merge_factor = (
schedule.num_map_tasks_per_round
/ schedule.merge_schedule.num_merge_tasks_per_round
)
next_highest_merge_factor = schedule.num_map_tasks_per_round / (
schedule.merge_schedule.num_merge_tasks_per_round + 1
)
assert actual_merge_factor - 1 <= merge_factor <= actual_merge_factor + 1, (
next_highest_merge_factor,
merge_factor,
actual_merge_factor,
)
else:
assert schedule.merge_schedule.num_merge_tasks_per_round == 1, (
schedule.num_map_tasks_per_round,
merge_factor,
)
# Tasks are evenly distributed.
tasks_per_node = defaultdict(int)
for i in range(schedule.merge_schedule.num_merge_tasks_per_round):
task_options = schedule.get_merge_task_options(i)
node_id = task_options["scheduling_strategy"].node_id
tasks_per_node[node_id] += 1
low = min(tasks_per_node.values())
high = low + 1
assert low <= max(tasks_per_node.values()) <= high
# Reducers are evenly distributed across mergers.
num_reducers_per_merge_idx = [
schedule.merge_schedule.get_num_reducers_per_merge_idx(i)
for i in range(schedule.merge_schedule.num_merge_tasks_per_round)
]
high = max(num_reducers_per_merge_idx)
for num_reducers in num_reducers_per_merge_idx:
assert num_reducers == high or num_reducers == high - 1
for merge_idx in range(schedule.merge_schedule.num_merge_tasks_per_round):
assert isinstance(
schedule.merge_schedule.get_num_reducers_per_merge_idx(merge_idx), int
)
assert schedule.merge_schedule.get_num_reducers_per_merge_idx(merge_idx) > 0
reduce_idxs = list(range(schedule.merge_schedule.output_num_blocks))
actual_num_reducers_per_merge_idx = [
0 for _ in range(schedule.merge_schedule.num_merge_tasks_per_round)
]
for reduce_idx in schedule.merge_schedule.round_robin_reduce_idx_iterator():
reduce_idxs.pop(reduce_idxs.index(reduce_idx))
actual_num_reducers_per_merge_idx[
schedule.merge_schedule.get_merge_idx_for_reducer_idx(reduce_idx)
] += 1
# Check that each reduce task is submitted exactly once.
assert len(reduce_idxs) == 0
# Check that each merge and reduce task are correctly paired.
for i, num_reducers in enumerate(actual_num_reducers_per_merge_idx):
assert (
num_reducers == num_reducers_per_merge_idx[i]
), f"""Merge task [{i}] has {num_reducers} downstream reduce tasks,
expected {num_reducers_per_merge_idx[i]}."""
assert num_reducers > 0
node_id_1 = NodeID.from_random().hex()
node_id_2 = NodeID.from_random().hex()
node_id_3 = NodeID.from_random().hex()
for num_cpus in range(1, 20):
_test(20, 3, {node_id_1: num_cpus})
_test(20, 3, {node_id_1: 100})
_test(100, 3, {node_id_1: 10, node_id_2: 10, node_id_3: 10})
_test(100, 10, {node_id_1: 10, node_id_2: 10, node_id_3: 10})
# Regression test for https://github.com/ray-project/ray/issues/25863.
_test(1000, 2, {NodeID.from_random().hex(): 16 for i in range(20)})
# Regression test for https://github.com/ray-project/ray/issues/37754.
_test(260, 2, {node_id_1: 128})
_test(1, 2, {node_id_1: 128})
# Test float merge_factor.
for cluster_config in [
{node_id_1: 10},
{node_id_1: 10, node_id_2: 10},
]:
_test(100, 1, cluster_config)
_test(100, 1.3, cluster_config)
_test(100, 1.6, cluster_config)
_test(100, 1.75, cluster_config)
_test(100, 2, cluster_config)
_test(1, 1.2, cluster_config)
_test(2, 1.2, cluster_config)
def test_push_based_shuffle_stats(ray_start_cluster):
ctx = ray.data.context.DataContext.get_current()
try:
original = ctx.use_push_based_shuffle
ctx.use_push_based_shuffle = True
cluster = ray_start_cluster
cluster.add_node(
resources={"bar:1": 100},
num_cpus=10,
_system_config={"max_direct_call_object_size": 0},
)
cluster.add_node(resources={"bar:2": 100}, num_cpus=10)
cluster.add_node(resources={"bar:3": 100}, num_cpus=0)
ray.init(cluster.address)
parallelism = 100
ds = ray.data.range(1000, override_num_blocks=parallelism).random_shuffle()
ds = ds.materialize()
assert "RandomShuffleMerge" in ds.stats()
# Check all nodes used.
assert "2 nodes used" in ds.stats()
assert "1 nodes used" not in ds.stats()
# Check all merge tasks are included in stats.
internal_stats = ds._plan.stats()
num_merge_tasks = len(internal_stats.metadata["RandomShuffleMerge"])
# Merge factor is 2 for random_shuffle ops.
merge_factor = 2
assert (
parallelism // (merge_factor + 1)
<= num_merge_tasks
<= parallelism // merge_factor
)
finally:
ctx.use_push_based_shuffle = original
def test_sort_multinode(ray_start_cluster, configure_shuffle_method):
cluster = ray_start_cluster
cluster.add_node(
resources={"bar:1": 100},
num_cpus=10,
_system_config={"max_direct_call_object_size": 0},
)
cluster.add_node(resources={"bar:2": 100}, num_cpus=10)
cluster.add_node(resources={"bar:3": 100}, num_cpus=0)
ray.init(cluster.address)
parallelism = 100
ds = (
ray.data.range(1000, override_num_blocks=parallelism)
.random_shuffle()
.sort("id")
)
for i, row in enumerate(ds.iter_rows()):
assert row["id"] == i
def patch_ray_remote(condition, callback):
original_ray_remote = ray.remote
def ray_remote_override(*args, **kwargs):
def wrapper(fn):
remote_fn = original_ray_remote(*args, **kwargs)(fn)
if condition(fn):
original_remote_options = remote_fn.options
def options(**task_options):
callback(task_options)
original_options = original_remote_options(**task_options)
return original_options
remote_fn.options = options
return remote_fn
return wrapper
ray.remote = ray_remote_override
return original_ray_remote
def patch_ray_get(callback):
original_ray_get = ray.get
def ray_get_override(object_refs, *args, **kwargs):
callback(object_refs)
return original_ray_get(object_refs, *args, **kwargs)
ray.get = ray_get_override
return original_ray_get
@pytest.mark.parametrize("pipeline", [False, True])
def test_push_based_shuffle_reduce_stage_scheduling(ray_start_cluster, pipeline):
ctx = ray.data.context.DataContext.get_current()
try:
original = ctx.use_push_based_shuffle
ctx.use_push_based_shuffle = True
ctx.pipeline_push_based_shuffle_reduce_tasks = pipeline
num_cpus_per_node = 8
num_nodes = 3
num_output_blocks = 100
task_context = {
"reduce_options_submitted": [],
# The total number of CPUs available.
"pipelined_parallelism": num_cpus_per_node * num_nodes,
# The total number of reduce tasks.
"total_parallelism": num_output_blocks,
"num_instances_below_parallelism": 0,
}
def reduce_options_patch(task_options):
task_context["reduce_options_submitted"].append(task_options)
def check_pipelined(refs):
if task_context["reduce_options_submitted"]:
# Check that we have the correct number of tasks in flight.
if pipeline:
# When pipelining, we should limit the number of reduce
# tasks in flight based on how many CPUs are in the
# cluster.
if not (
task_context["pipelined_parallelism"]
<= len(task_context["reduce_options_submitted"])
<= 2 * task_context["pipelined_parallelism"]
):
task_context["num_instances_below_parallelism"] += 1
else:
# When not pipelining, we should submit all reduce tasks at
# once.
assert (
len(task_context["reduce_options_submitted"])
== task_context["total_parallelism"]
)
# Check that tasks are close to evenly spread across the nodes.
nodes = defaultdict(int)
for options in task_context["reduce_options_submitted"]:
nodes[options["scheduling_strategy"].node_id] += 1
assert len(nodes) > 1
assert min(nodes.values()) >= max(nodes.values()) // 2
task_context["reduce_options_submitted"].clear()
ray_remote = patch_ray_remote(
lambda fn: "reduce" in fn.__name__, reduce_options_patch
)
ray_get = patch_ray_get(check_pipelined)
cluster = ray_start_cluster
for _ in range(num_nodes):
cluster.add_node(
num_cpus=num_cpus_per_node,
)
ray.init(cluster.address)
ds = ray.data.range(
1000, override_num_blocks=num_output_blocks
).random_shuffle()
# Only the last round should have fewer tasks in flight.
assert task_context["num_instances_below_parallelism"] <= 1
task_context["num_instances_below_parallelism"] = 0
ds = ds.sort("id")
# Only the last round should have fewer tasks in flight.
assert task_context["num_instances_below_parallelism"] <= 1
task_context["num_instances_below_parallelism"] = 0
for i, row in enumerate(ds.iter_rows()):
assert row["id"] == i
finally:
ctx.use_push_based_shuffle = original
ray.remote = ray_remote
ray.get = ray_get
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_push_based_shuffle.py",
"license": "Apache License 2.0",
"lines": 268,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_shuffle_diagnostics.py | import logging
import pytest
import ray
from ray.data.context import DataContext, ShuffleStrategy
from ray.data.dataset import Dataset
SHUFFLE_ALL_TO_ALL_OPS = [
Dataset.random_shuffle,
lambda ds: ds.sort(key="id"),
lambda ds: ds.groupby("id").map_groups(lambda group: group),
]
@pytest.mark.parametrize(
"shuffle_op",
SHUFFLE_ALL_TO_ALL_OPS,
)
def test_debug_limit_shuffle_execution_to_num_blocks(
ray_start_regular, restore_data_context, configure_shuffle_method, shuffle_op
):
if configure_shuffle_method == ShuffleStrategy.HASH_SHUFFLE:
pytest.skip("Not supported by hash-shuffle")
shuffle_fn = shuffle_op
parallelism = 100
ds = ray.data.range(1000, override_num_blocks=parallelism)
shuffled_ds = shuffle_fn(ds).materialize()
shuffled_ds = shuffled_ds.materialize()
assert shuffled_ds._plan.initial_num_blocks() == parallelism
ds.context.set_config("debug_limit_shuffle_execution_to_num_blocks", 1)
shuffled_ds = shuffle_fn(ds).materialize()
shuffled_ds = shuffled_ds.materialize()
assert shuffled_ds._plan.initial_num_blocks() == 1
def test_memory_usage(
ray_start_regular, restore_data_context, configure_shuffle_method
):
parallelism = 2
ds = ray.data.range(int(1e8), override_num_blocks=parallelism)
ds = ds.random_shuffle().materialize()
stats = ds._get_stats_summary()
# TODO(swang): Sort on this dataset seems to produce significant skew, so
# one task uses much more memory than the other.
for op_stats in stats.operators_stats:
assert op_stats.memory["max"] < 2000
@pytest.mark.parametrize("under_threshold", [False, True])
def test_sort_object_ref_warnings(
ray_start_regular,
restore_data_context,
configure_shuffle_method,
under_threshold,
propagate_logs,
caplog,
):
# Test that we warn iff expected driver memory usage from
# storing ObjectRefs is higher than the configured
# threshold.
warning_str = "Execution is estimated to use"
warning_str_with_bytes = (
"Execution is estimated to use at least "
f"{90 if configure_shuffle_method == ShuffleStrategy.SORT_SHUFFLE_PUSH_BASED else 300}KB"
)
if not under_threshold:
DataContext.get_current().warn_on_driver_memory_usage_bytes = 10_000
ds = ray.data.range(int(1e8), override_num_blocks=10)
with caplog.at_level(logging.WARNING, logger="ray.data.dataset"):
ds = ds.random_shuffle().materialize()
if under_threshold:
assert warning_str not in caplog.text
assert warning_str_with_bytes not in caplog.text
else:
assert warning_str in caplog.text
assert warning_str_with_bytes in caplog.text
@pytest.mark.parametrize("under_threshold", [False, True])
def test_sort_inlined_objects_warnings(
ray_start_regular,
restore_data_context,
configure_shuffle_method,
under_threshold,
propagate_logs,
caplog,
):
# Test that we warn iff expected driver memory usage from
# storing tiny Ray objects on driver heap is higher than
# the configured threshold.
if configure_shuffle_method == ShuffleStrategy.SORT_SHUFFLE_PUSH_BASED:
warning_strs = [
"More than 3MB of driver memory used",
"More than 7MB of driver memory used",
]
else:
warning_strs = [
"More than 8MB of driver memory used",
]
if not under_threshold:
DataContext.get_current().warn_on_driver_memory_usage_bytes = 3_000_000
ds = ray.data.range(int(1e6), override_num_blocks=10)
with caplog.at_level(logging.WARNING, logger="ray.data.dataset"):
ds = ds.random_shuffle().materialize()
if under_threshold:
assert all(warning_str not in caplog.text for warning_str in warning_strs)
else:
assert all(warning_str in caplog.text for warning_str in warning_strs)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_shuffle_diagnostics.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/util/expression_utils.py | """Utility functions for expression-based operations."""
from typing import TYPE_CHECKING, Any, Callable, List, Optional
if TYPE_CHECKING:
from ray.data.expressions import Expr
def _get_setting_with_copy_warning() -> Optional[type]:
"""Get the SettingWithCopyWarning class from pandas, if available.
Pandas has moved/renamed this warning across versions, and pandas 3.x may not
expose it at all. This function handles the version differences gracefully
using hasattr checks instead of try-except blocks.
Returns:
The SettingWithCopyWarning class if found, None otherwise.
"""
import pandas as pd
# Use hasattr to avoid try-catch blocks as suggested
if hasattr(pd.core.common, "SettingWithCopyWarning"):
return pd.core.common.SettingWithCopyWarning
elif hasattr(pd.errors, "SettingWithCopyWarning"):
return pd.errors.SettingWithCopyWarning
else:
# Warning not available in this pandas version
return None
def _create_callable_class_udf_init_fn(
exprs: List["Expr"],
) -> Optional[Callable[[], None]]:
"""Create an init_fn to initialize all callable class UDFs in expressions.
This function collects all _CallableClassUDF instances from the given expressions,
groups them by their callable_class_spec key, and returns an init_fn that
initializes each group at actor startup. UDFs with the same key (same class and
constructor args) share a single instance to ensure all are properly initialized.
Args:
exprs: List of expressions to collect callable class UDFs from.
Returns:
An init_fn that initializes all callable class UDFs, or None if there are
no callable class UDFs in the expressions.
"""
from ray.data._internal.planner.plan_expression.expression_visitors import (
_CallableClassUDFCollector,
)
callable_class_udfs = []
for expr in exprs:
collector = _CallableClassUDFCollector()
collector.visit(expr)
callable_class_udfs.extend(collector.get_callable_class_udfs())
if not callable_class_udfs:
return None
# Group UDFs by callable_class_spec key.
# Multiple _CallableClassUDF objects may have the same key (same class + args).
# We need to initialize ALL of them, sharing a single instance per key.
udfs_by_key = {}
for udf in callable_class_udfs:
key = udf.callable_class_spec.make_key()
if key not in udfs_by_key:
udfs_by_key[key] = []
udfs_by_key[key].append(udf)
def init_fn():
for udfs_with_same_key in udfs_by_key.values():
# Initialize the first UDF to create the instance
first_udf = udfs_with_same_key[0]
first_udf.init()
# Share the instance with all other UDFs that have the same key
for other_udf in udfs_with_same_key[1:]:
other_udf._instance = first_udf._instance
return init_fn
def _call_udf_instance_with_async_bridge(
instance: Any,
*args,
**kwargs,
) -> Any:
"""Call a UDF instance, bridging from sync context to async if needed.
This handles the complexity of calling callable class UDF instances that may
be sync, async coroutine, or async generator functions.
Args:
instance: The callable instance to call
*args: Positional arguments
**kwargs: Keyword arguments
Returns:
The result of calling the instance
"""
import asyncio
import inspect
# Check if the instance's __call__ is async
if inspect.iscoroutinefunction(instance.__call__):
# Async coroutine: bridge from sync to async
return asyncio.run(instance(*args, **kwargs))
elif inspect.isasyncgenfunction(instance.__call__):
# Async generator: collect results
async def _collect():
results = []
async for item in instance(*args, **kwargs):
results.append(item)
# In expressions, the UDF must return a single array with the same
# length as the input (one output element per input row).
# If the async generator yields multiple arrays, we take the last one
# since expressions don't support multi-batch output semantics.
if not results:
return None
elif len(results) == 1:
return results[0]
else:
import logging
logging.warning(
f"Async generator yielded {len(results)} values in expression context; "
"only the last (most recent) is returned. Use map_batches for multi-yield support."
)
return results[-1]
return asyncio.run(_collect())
else:
# Synchronous instance - direct call
return instance(*args, **kwargs)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/util/expression_utils.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/tests/test_reservation_based_resource_allocator.py | from unittest.mock import MagicMock, patch
import pytest
import ray
from ray.data._internal.execution.interfaces.execution_options import (
ExecutionOptions,
ExecutionResources,
)
from ray.data._internal.execution.operators.input_data_buffer import InputDataBuffer
from ray.data._internal.execution.operators.limit_operator import LimitOperator
from ray.data._internal.execution.operators.map_operator import MapOperator
from ray.data._internal.execution.resource_manager import (
ReservationOpResourceAllocator,
ResourceManager,
)
from ray.data._internal.execution.streaming_executor_state import (
build_streaming_topology,
)
from ray.data._internal.execution.util import make_ref_bundles
from ray.data.context import DataContext
from ray.data.tests.conftest import * # noqa
from ray.data.tests.test_resource_manager import (
mock_join_op,
mock_map_op,
mock_union_op,
)
class TestReservationOpResourceAllocator:
"""Tests for ReservationOpResourceAllocator."""
@pytest.fixture(scope="class", autouse=True)
def enable_reservation_based_resource_allocator(self):
# Switch to V1
with patch(
"ray.data._internal.execution.DEFAULT_USE_OP_RESOURCE_ALLOCATOR_VERSION",
new="V1",
):
yield
def test_basic(self, restore_data_context):
DataContext.get_current().op_resource_reservation_enabled = True
DataContext.get_current().op_resource_reservation_ratio = 0.5
o1 = InputDataBuffer(DataContext.get_current(), [])
# Use ray_remote_args to set CPU requirements instead of mocking
o2 = mock_map_op(o1, ray_remote_args={"num_cpus": 1})
o3 = mock_map_op(o2, ray_remote_args={"num_cpus": 1})
o4 = LimitOperator(1, o3, DataContext.get_current())
# Mock min_max_resource_requirements to return default unbounded behavior
for op in [o2, o3]:
op.min_max_resource_requirements = MagicMock(
return_value=(ExecutionResources.zero(), ExecutionResources.inf())
)
op_usages = {op: ExecutionResources.zero() for op in [o1, o2, o3, o4]}
op_internal_usage = dict.fromkeys([o1, o2, o3, o4], 0)
op_outputs_usages = dict.fromkeys([o1, o2, o3, o4], 0)
topo = build_streaming_topology(o4, ExecutionOptions())
global_limits = ExecutionResources.zero()
def mock_get_global_limits():
nonlocal global_limits
return global_limits
resource_manager = ResourceManager(
topo, ExecutionOptions(), MagicMock(), DataContext.get_current()
)
resource_manager.get_op_usage = MagicMock(side_effect=lambda op: op_usages[op])
resource_manager._mem_op_internal = op_internal_usage
resource_manager._mem_op_outputs = op_outputs_usages
resource_manager.get_global_limits = MagicMock(
side_effect=mock_get_global_limits
)
assert resource_manager.op_resource_allocator_enabled()
allocator = resource_manager._op_resource_allocator
assert isinstance(allocator, ReservationOpResourceAllocator)
# Test initial state when no resources are used.
global_limits = ExecutionResources(cpu=16, gpu=0, object_store_memory=1000)
allocator.update_budgets(
limits=global_limits,
)
# +-----+------------------+------------------+--------------+
# | | _op_reserved | _reserved_for | used shared |
# | | (used/remaining) | _op_outputs | resources |
# | | | (used/remaining) | |
# +-----+------------------+------------------+--------------+
# | op2 | 0/125 | 0/125 | 0 |
# +-----+------------------+------------------+--------------+
# | op3 | 0/125 | 0/125 | 0 |
# +-----+------------------+------------------+--------------+
# o1 and o4 are not handled.
assert o1 not in allocator._op_reserved
assert o4 not in allocator._op_reserved
assert o1 not in allocator._op_budgets
assert o4 not in allocator._op_budgets
# Test reserved resources for o2 and o3.
assert allocator._op_reserved[o2] == ExecutionResources(4, 0, 125)
assert allocator._op_reserved[o3] == ExecutionResources(4, 0, 125)
assert allocator._reserved_for_op_outputs[o2] == 125
assert allocator._reserved_for_op_outputs[o3] == 125
# 50% of the global limits are shared.
assert allocator._total_shared == ExecutionResources(8, 0, 500)
# Test budgets.
assert allocator._op_budgets[o2] == ExecutionResources(8, 0, 375)
assert allocator._op_budgets[o3] == ExecutionResources(8, 0, 375)
# Test max_task_output_bytes_to_read.
assert allocator.max_task_output_bytes_to_read(o2) == 500
assert allocator.max_task_output_bytes_to_read(o3) == 500
# Test get_allocation.
# Ineligible operators should return None.
assert allocator.get_allocation(o1) is None
assert allocator.get_allocation(o4) is None
# allocation = op_reserved + op_shared = (4, 0, 125) + (4, 0, 250) = (8, 0, 375)
# When usage is zero, allocation equals budget.
assert allocator.get_allocation(o2) == ExecutionResources(8, 0, 375)
assert allocator.get_allocation(o3) == ExecutionResources(8, 0, 375)
# Test when each operator uses some resources.
op_usages[o2] = ExecutionResources(6, 0, 500)
op_internal_usage[o2] = 400
op_outputs_usages[o2] = 100
op_usages[o3] = ExecutionResources(2, 0, 125)
op_internal_usage[o3] = 30
op_outputs_usages[o3] = 25
op_usages[o4] = ExecutionResources(0, 0, 50)
allocator.update_budgets(
limits=global_limits,
)
# +-----+------------------+------------------+--------------+
# | | _op_reserved | _reserved_for | used shared |
# | | (used/remaining) | _op_outputs | resources |
# | | | (used/remaining) | |
# +-----+------------------+------------------+--------------+
# | op2 | 125/0 | 100/25 | 400-125=275 |
# +-----+------------------+------------------+--------------+
# | op3 | 30/95 | (25+50)/50 | 0 |
# +-----+------------------+------------------+--------------+
# remaining shared = 1000/2 - 275 = 225
# Test budgets.
# memory_budget[o2] = 0 + 225/2 = 113 (rounded up)
assert allocator._op_budgets[o2] == ExecutionResources(3, 0, 113)
# memory_budget[o3] = 95 + 225/2 = 207 (rounded down)
assert allocator._op_budgets[o3] == ExecutionResources(5, 0, 207)
# Test max_task_output_bytes_to_read.
# max_task_output_bytes_to_read(o2) = 112.5 + 25 = 138 (rounded up)
assert allocator.max_task_output_bytes_to_read(o2) == 138
# max_task_output_bytes_to_read(o3) = 207.5 + 50 = 257 (rounded down)
assert allocator.max_task_output_bytes_to_read(o3) == 257
# Test get_allocation.
# allocation = budget + usage
# budget[o2] = (3, 0, 113), budget[o3] = (5, 0, 207)
# usage[o2] = (6, 0, 500), usage[o3] = (2, 0, 125)
assert allocator.get_allocation(o2) == ExecutionResources(9, 0, 613)
assert allocator.get_allocation(o3) == ExecutionResources(7, 0, 332)
# Test global_limits updated.
global_limits = ExecutionResources(cpu=12, gpu=0, object_store_memory=800)
allocator.update_budgets(
limits=global_limits,
)
# +-----+------------------+------------------+--------------+
# | | _op_reserved | _reserved_for | used shared |
# | | (used/remaining) | _op_outputs | resources |
# | | | (used/remaining) | |
# +-----+------------------+------------------+--------------+
# | op2 | 100/0 | 100/0 | 400-100=300 |
# +-----+------------------+------------------+--------------+
# | op3 | 30/70 | (25+50)/25 | 0 |
# +-----+------------------+------------------+--------------+
# remaining shared = 800/2 - 300 = 100
# Test reserved resources for o2 and o3.
assert allocator._op_reserved[o2] == ExecutionResources(3, 0, 100)
assert allocator._op_reserved[o3] == ExecutionResources(3, 0, 100)
assert allocator._reserved_for_op_outputs[o2] == 100
assert allocator._reserved_for_op_outputs[o3] == 100
# 50% of the global limits are shared.
assert allocator._total_shared == ExecutionResources(6, 0, 400)
# Test budgets.
# memory_budget[o2] = 0 + 100/2 = 50
assert allocator._op_budgets[o2] == ExecutionResources(1.5, 0, 50)
# memory_budget[o3] = 70 + 100/2 = 120
assert allocator._op_budgets[o3] == ExecutionResources(2.5, 0, 120)
# Test max_task_output_bytes_to_read.
# max_task_output_bytes_to_read(o2) = 50 + 0 = 50
assert allocator.max_task_output_bytes_to_read(o2) == 50
# max_task_output_bytes_to_read(o3) = 120 + 25 = 145
assert allocator.max_task_output_bytes_to_read(o3) == 145
# Test get_allocation.
# allocation = budget + usage
# budget[o2] = (1.5, 0, 50), budget[o3] = (2.5, 0, 120)
# usage[o2] = (6, 0, 500), usage[o3] = (2, 0, 125)
assert allocator.get_allocation(o2) == ExecutionResources(7.5, 0, 550)
assert allocator.get_allocation(o3) == ExecutionResources(4.5, 0, 245)
def test_reserve_min_resource_requirements(self, restore_data_context):
"""Test that we'll reserve at least min_resource_requirements
for each operator."""
DataContext.get_current().op_resource_reservation_enabled = True
DataContext.get_current().op_resource_reservation_ratio = 0.5
global_limits = ExecutionResources(cpu=7, gpu=0, object_store_memory=800)
min_resources = ExecutionResources(cpu=3, gpu=0, object_store_memory=500)
o1 = InputDataBuffer(DataContext.get_current(), [])
# Use ray_remote_args to set CPU requirements
o2 = mock_map_op(o1, ray_remote_args={"num_cpus": 3})
o3 = mock_map_op(o2, ray_remote_args={"num_cpus": 3})
o4 = mock_map_op(o3, ray_remote_args={"num_cpus": 3})
o5 = mock_map_op(o4, ray_remote_args={"num_cpus": 3})
# Set min_max_resource_requirements to specify minimum resources
for op in [o2, o3, o4, o5]:
op.min_max_resource_requirements = MagicMock(
return_value=(
min_resources,
ExecutionResources(cpu=100, gpu=0, object_store_memory=10000),
)
)
topo = build_streaming_topology(o5, ExecutionOptions())
resource_manager = ResourceManager(
topo, ExecutionOptions(), MagicMock(), DataContext.get_current()
)
resource_manager.get_op_usage = MagicMock(
return_value=ExecutionResources.zero()
)
resource_manager.get_global_limits = MagicMock(return_value=global_limits)
allocator = resource_manager._op_resource_allocator
assert isinstance(allocator, ReservationOpResourceAllocator)
allocator.update_budgets(
limits=global_limits,
)
# min_resources should be reserved for o2.
assert allocator._op_reserved[o2] == min_resources
# Remaining resources are CPU = 7 - 3 = 4, object_store_memory = 800 - 500 = 300.
# We have enough CPUs for o3's min_resources, but not enough
# object_store_memory. We'll still reserve the min_resources by
# oversubscribing object_store_memory.
assert allocator._op_reserved[o3] == min_resources
# Now the remaining resources are CPU = 4 - 3 = 1,
# object_store_memory = 300 - 500 = -200.
# We don't oversubscribe CPUs, we'll only reserve
# min_resources.object_store_memory.
assert allocator._op_reserved[o4] == ExecutionResources(
0, 0, min_resources.object_store_memory
)
# Same for o5
assert allocator._op_reserved[o5] == ExecutionResources(
0, 0, min_resources.object_store_memory
)
assert allocator._total_shared == ExecutionResources(1, 0, 0)
for op in [o2, o3, o4]:
assert allocator._reserved_for_op_outputs[op] == 50
def test_reserve_min_resources_for_gpu_ops(self, restore_data_context):
"""Test that we'll reserve enough resources for ActorPoolMapOperator
that uses GPU."""
global_limits = ExecutionResources(cpu=6, gpu=8, object_store_memory=1600)
o1 = InputDataBuffer(DataContext.get_current(), [])
o2 = mock_map_op(
o1,
ray_remote_args={"num_cpus": 0, "num_gpus": 1},
compute_strategy=ray.data.ActorPoolStrategy(size=8),
)
# Mock min_max_resource_requirements to return a minimum of 800 bytes
# (simulating 8 actors * 100 bytes per pending output)
o2.min_max_resource_requirements = MagicMock(
return_value=(
ExecutionResources(cpu=0, gpu=8, object_store_memory=800),
ExecutionResources(cpu=0, gpu=8, object_store_memory=float("inf")),
)
)
topo = build_streaming_topology(o2, ExecutionOptions())
resource_manager = ResourceManager(
topo, ExecutionOptions(), MagicMock(), DataContext.get_current()
)
resource_manager.get_op_usage = MagicMock(
return_value=ExecutionResources.zero()
)
resource_manager.get_global_limits = MagicMock(return_value=global_limits)
allocator = resource_manager._op_resource_allocator
assert isinstance(allocator, ReservationOpResourceAllocator)
allocator.update_budgets(
limits=global_limits,
)
# With min_resource_requirements of 800 bytes, reservation should be at least 800
assert allocator._op_reserved[o2].object_store_memory == 800
def test_does_not_reserve_more_than_max_resource_usage(self):
o1 = InputDataBuffer(DataContext.get_current(), [])
o2 = MapOperator.create(
MagicMock(),
o1,
DataContext.get_current(),
)
o2.min_max_resource_requirements = MagicMock(
return_value=(
ExecutionResources(cpu=0, object_store_memory=0),
ExecutionResources(cpu=1, object_store_memory=1),
)
)
topo = build_streaming_topology(o2, ExecutionOptions())
resource_manager = ResourceManager(
topo, ExecutionOptions(), MagicMock(), DataContext.get_current()
)
resource_manager.get_op_usage = MagicMock(
return_value=ExecutionResources.zero()
)
# Mock an extremely large cluster.
resource_manager.get_global_limits = MagicMock(
return_value=ExecutionResources(cpu=1024, object_store_memory=1024**4)
)
allocator = resource_manager._op_resource_allocator
global_limits = resource_manager.get_global_limits()
allocator.update_budgets(
limits=global_limits,
)
# The operator's max resource usage is 1 CPU and 1 byte object store memory, so
# we'll reserve that despite the large global limits.
assert allocator._op_reserved[o2] == ExecutionResources(
cpu=1, object_store_memory=1
)
def test_budget_capped_by_max_resource_usage(self, restore_data_context):
"""Test that the total allocation is capped by max_resource_usage.
Total allocation = max(total_reserved, op_usage) + op_shared
We cap op_shared so that total allocation <= max_resource_usage.
Excess shared resources should remain available for other operators.
"""
DataContext.get_current().op_resource_reservation_enabled = True
DataContext.get_current().op_resource_reservation_ratio = 0.5
o1 = InputDataBuffer(DataContext.get_current(), [])
o2 = mock_map_op(o1, ray_remote_args={"num_cpus": 1})
o3 = mock_map_op(o2, ray_remote_args={"num_cpus": 1})
# o2 has a small max CPU, so its CPU shared allocation will be capped.
# o3 has unlimited max_resource_usage.
o2.min_max_resource_requirements = MagicMock(
return_value=(
ExecutionResources.zero(),
ExecutionResources(cpu=4, object_store_memory=float("inf")),
)
)
o3.min_max_resource_requirements = MagicMock(
return_value=(
ExecutionResources.zero(),
ExecutionResources.inf(),
)
)
topo = build_streaming_topology(o3, ExecutionOptions())
global_limits = ExecutionResources(cpu=20, object_store_memory=400)
op_usages = {
o1: ExecutionResources.zero(),
o2: ExecutionResources(cpu=2, object_store_memory=40),
o3: ExecutionResources(cpu=2, object_store_memory=40),
}
resource_manager = ResourceManager(
topo, ExecutionOptions(), MagicMock(), DataContext.get_current()
)
resource_manager.get_op_usage = MagicMock(side_effect=lambda op: op_usages[op])
resource_manager._mem_op_internal = {o1: 0, o2: 40, o3: 40}
resource_manager._mem_op_outputs = {o1: 0, o2: 0, o3: 0}
resource_manager.get_global_limits = MagicMock(return_value=global_limits)
allocator = resource_manager._op_resource_allocator
assert isinstance(allocator, ReservationOpResourceAllocator)
allocator.update_budgets(limits=global_limits)
# All tuples below are (cpu, object_store_memory).
#
# Reservation phase:
# - default_reserved per op = global_limits * 0.5 / 2 = (5, 100)
# - reserved_for_outputs per op = 100 / 2 = 50
# - o2's reserved_for_tasks is capped by max (4, inf) -> (4, 50)
# - o3's reserved_for_tasks = (5, 50)
# - total_shared = global_limits - o2_total_reserved - o3_total_reserved
# = (20, 400) - (4, 100) - (5, 100) = (11, 200)
#
# Budget phase (first loop calculates reserved_remaining):
# - o2: reserved_remaining = reserved_for_tasks - usage = (4, 50) - (2, 40) = (2, 10)
# - o3: reserved_remaining = (5, 50) - (2, 40) = (3, 10)
#
# Shared allocation (second loop, reversed order):
# - o3: op_shared = remaining_shared / 2 = (5.5, 100), no cap
# budget = reserved_remaining + op_shared = (3, 10) + (5.5, 100) = (8.5, 110)
# - o2: op_shared = (5.5, 100), CPU capped to (0, 100)
# budget = (2, 10) + (0, 100) = (2, 110)
# remaining_shared = (5.5, 0)
# - After loop, remaining (5.5, 0) given to most downstream op (o3):
# o3 budget = (8.5, 110) + (5.5, 0) = (14, 110)
assert allocator._op_budgets[o2] == ExecutionResources(
cpu=2, object_store_memory=110
)
assert allocator._op_budgets[o3] == ExecutionResources(
cpu=14, object_store_memory=110
)
def test_budget_capped_by_max_resource_usage_all_capped(self, restore_data_context):
"""Test when all operators are capped, remaining shared resources are not given."""
DataContext.get_current().op_resource_reservation_enabled = True
DataContext.get_current().op_resource_reservation_ratio = 0.5
o1 = InputDataBuffer(DataContext.get_current(), [])
o2 = mock_map_op(o1, ray_remote_args={"num_cpus": 1})
o3 = mock_map_op(o2, ray_remote_args={"num_cpus": 1})
# Both operators are capped.
o2.min_max_resource_requirements = MagicMock(
return_value=(
ExecutionResources.zero(),
ExecutionResources(cpu=4, object_store_memory=float("inf")),
)
)
o3.min_max_resource_requirements = MagicMock(
return_value=(
ExecutionResources.zero(),
ExecutionResources(cpu=4, object_store_memory=float("inf")),
)
)
topo = build_streaming_topology(o3, ExecutionOptions())
global_limits = ExecutionResources(cpu=20, object_store_memory=400)
op_usages = {
o1: ExecutionResources.zero(),
o2: ExecutionResources(cpu=2, object_store_memory=40),
o3: ExecutionResources(cpu=2, object_store_memory=40),
}
resource_manager = ResourceManager(
topo, ExecutionOptions(), MagicMock(), DataContext.get_current()
)
resource_manager.get_op_usage = MagicMock(side_effect=lambda op: op_usages[op])
resource_manager._mem_op_internal = {o1: 0, o2: 40, o3: 40}
resource_manager._mem_op_outputs = {o1: 0, o2: 0, o3: 0}
resource_manager.get_global_limits = MagicMock(return_value=global_limits)
allocator = resource_manager._op_resource_allocator
allocator.update_budgets(limits=global_limits)
# Both ops are capped (max cpu=4), so remaining CPU is not given to any op.
# o2: reserved_remaining (2, 10) + capped op_shared (0, 100) = (2, 110)
# o3: reserved_remaining (2, 10) + capped op_shared (0, 100) = (2, 110)
assert allocator._op_budgets[o2] == ExecutionResources(
cpu=2, object_store_memory=110
)
assert allocator._op_budgets[o3] == ExecutionResources(
cpu=2, object_store_memory=110
)
def test_only_handle_eligible_ops(self, restore_data_context):
"""Test that we only handle non-completed map ops."""
DataContext.get_current().op_resource_reservation_enabled = True
input = make_ref_bundles([[x] for x in range(1)])
o1 = InputDataBuffer(DataContext.get_current(), input)
o2 = mock_map_op(o1)
o3 = LimitOperator(1, o2, DataContext.get_current())
topo = build_streaming_topology(o3, ExecutionOptions())
resource_manager = ResourceManager(
topo, ExecutionOptions(), MagicMock(), DataContext.get_current()
)
resource_manager.get_op_usage = MagicMock(
return_value=ExecutionResources.zero()
)
resource_manager.get_global_limits = MagicMock(
return_value=ExecutionResources.zero()
)
assert resource_manager.op_resource_allocator_enabled()
allocator = resource_manager._op_resource_allocator
assert isinstance(allocator, ReservationOpResourceAllocator)
global_limits = resource_manager.get_global_limits()
allocator.update_budgets(
limits=global_limits,
)
assert o1 not in allocator._op_budgets
assert o2 in allocator._op_budgets
assert o3 not in allocator._op_budgets
o2.mark_execution_finished()
allocator.update_budgets(
limits=global_limits,
)
assert o2 not in allocator._op_budgets
def test_gpu_allocation(self, restore_data_context):
"""Test GPU allocation for GPU vs non-GPU operators.
With unified allocation (no GPU special-casing), GPU flows through
the normal shared allocation path just like CPU and memory.
"""
DataContext.get_current().op_resource_reservation_enabled = True
DataContext.get_current().op_resource_reservation_ratio = 0.5
o1 = InputDataBuffer(DataContext.get_current(), [])
# Non-GPU operator (unbounded)
o2 = mock_map_op(o1)
o2.min_max_resource_requirements = MagicMock(
return_value=(ExecutionResources(0, 0, 0), ExecutionResources.inf())
)
# GPU operator (unbounded)
o3 = mock_map_op(o2, ray_remote_args={"num_gpus": 1})
o3.min_max_resource_requirements = MagicMock(
return_value=(ExecutionResources(0, 1, 0), ExecutionResources.inf())
)
topo = build_streaming_topology(o3, ExecutionOptions())
global_limits = ExecutionResources(gpu=4)
op_usages = {
o1: ExecutionResources.zero(),
o2: ExecutionResources.zero(),
o3: ExecutionResources(gpu=1), # GPU op using 1 GPU
}
resource_manager = ResourceManager(
topo, ExecutionOptions(), MagicMock(), DataContext.get_current()
)
resource_manager.get_op_usage = MagicMock(side_effect=lambda op: op_usages[op])
resource_manager._mem_op_internal = dict.fromkeys([o1, o2, o3], 0)
resource_manager._mem_op_outputs = dict.fromkeys([o1, o2, o3], 0)
resource_manager.get_global_limits = MagicMock(return_value=global_limits)
allocator = resource_manager._op_resource_allocator
allocator.update_budgets(
limits=global_limits,
)
# Both unbounded operators get shared GPU allocation
# GPU flows through normal allocation, both get GPU budget > 0
assert allocator._op_budgets[o2].gpu > 0
assert allocator._op_budgets[o3].gpu > 0
def test_multiple_gpu_operators(self, restore_data_context):
"""Test GPU allocation for multiple GPU operators."""
DataContext.get_current().op_resource_reservation_enabled = True
DataContext.get_current().op_resource_reservation_ratio = 0.5
o1 = InputDataBuffer(DataContext.get_current(), [])
# Two GPU operators
o2 = mock_map_op(o1, ray_remote_args={"num_gpus": 1})
o2.min_max_resource_requirements = MagicMock(
return_value=(ExecutionResources(0, 1, 0), ExecutionResources(0, 1, 0))
)
o3 = mock_map_op(o2, ray_remote_args={"num_gpus": 1})
o3.min_max_resource_requirements = MagicMock(
return_value=(ExecutionResources(0, 1, 0), ExecutionResources(0, 1, 0))
)
topo = build_streaming_topology(o3, ExecutionOptions())
global_limits = ExecutionResources(gpu=4)
op_usages = {
o1: ExecutionResources.zero(),
o2: ExecutionResources(gpu=1), # Using 1 GPU
o3: ExecutionResources(gpu=0), # Not using GPU yet
}
resource_manager = ResourceManager(
topo, ExecutionOptions(), MagicMock(), DataContext.get_current()
)
resource_manager.get_op_usage = MagicMock(side_effect=lambda op: op_usages[op])
resource_manager._mem_op_internal = dict.fromkeys([o1, o2, o3], 0)
resource_manager._mem_op_outputs = dict.fromkeys([o1, o2, o3], 0)
resource_manager.get_global_limits = MagicMock(return_value=global_limits)
allocator = resource_manager._op_resource_allocator
allocator.update_budgets(
limits=global_limits,
)
# Both operators are capped at their max of 1 GPU
# o2: using 1 GPU, reserved 1, so reserved_remaining = 0, gets 0 shared (capped)
# o3: using 0 GPU, reserved 1, so reserved_remaining = 1, gets 0 shared (capped)
assert allocator._op_budgets[o2].gpu == 0
assert allocator._op_budgets[o3].gpu == 1
def test_gpu_usage_exceeds_global_limits(self, restore_data_context):
"""Test that GPU budget is 0 when usage exceeds limits."""
o1 = InputDataBuffer(DataContext.get_current(), [])
# One GPU operator
o2 = mock_map_op(o1, ray_remote_args={"num_gpus": 1})
o2.min_max_resource_requirements = MagicMock(
return_value=(ExecutionResources(0, 1, 0), ExecutionResources(0, 2, 0))
)
topo = build_streaming_topology(o2, ExecutionOptions())
global_limits = ExecutionResources(gpu=1)
op_usages = {
o1: ExecutionResources.zero(),
# o2 uses 2 GPUs but only 1 is available. This can happen if you set
# `concurrency` to 2 but there's only 1 GPU in the cluster. In this case,
# one actor will be running and the other will be stuck pending.
o2: ExecutionResources(gpu=2),
}
resource_manager = ResourceManager(
topo, ExecutionOptions(), MagicMock(), DataContext.get_current()
)
resource_manager.get_op_usage = MagicMock(side_effect=lambda op: op_usages[op])
resource_manager._mem_op_internal = dict.fromkeys([o1, o2], 0)
resource_manager._mem_op_outputs = dict.fromkeys([o1, o2], 0)
resource_manager.get_global_limits = MagicMock(return_value=global_limits)
allocator = resource_manager._op_resource_allocator
allocator.update_budgets(
limits=global_limits,
)
# When usage (2) exceeds limits (1), the budget should be 0
# because reserved_remaining = reserved - usage = negative, clamped to 0
assert allocator._op_budgets[o2].gpu == 0
def test_gpu_unbounded_operator_can_autoscale(self, restore_data_context):
"""Test that unbounded GPU operators (max_size=None) get GPU budget for autoscaling.
This is a regression test for the bug where ActorPoolStrategy(min_size=1, max_size=None)
with GPU actors would not get any GPU budget, preventing autoscaling.
"""
DataContext.get_current().op_resource_reservation_enabled = True
DataContext.get_current().op_resource_reservation_ratio = 0.5
o1 = InputDataBuffer(DataContext.get_current(), [])
# Unbounded GPU operator (simulating ActorPoolStrategy with max_size=None)
# min = 1 GPU (for 1 actor), max = inf (unbounded)
o2 = mock_map_op(o1, ray_remote_args={"num_gpus": 1})
o2.min_max_resource_requirements = MagicMock(
return_value=(ExecutionResources(0, 1, 0), ExecutionResources.inf())
)
topo = build_streaming_topology(o2, ExecutionOptions())
global_limits = ExecutionResources(gpu=8)
op_usages = {
o1: ExecutionResources.zero(),
o2: ExecutionResources(gpu=1), # Currently using 1 GPU
}
resource_manager = ResourceManager(
topo, ExecutionOptions(), MagicMock(), DataContext.get_current()
)
resource_manager.get_op_usage = MagicMock(side_effect=lambda op: op_usages[op])
resource_manager._mem_op_internal = dict.fromkeys([o1, o2], 0)
resource_manager._mem_op_outputs = dict.fromkeys([o1, o2], 0)
resource_manager.get_global_limits = MagicMock(return_value=global_limits)
allocator = resource_manager._op_resource_allocator
allocator.update_budgets(
limits=global_limits,
)
# The unbounded GPU operator should get GPU budget > 0 so it can autoscale
# With 8 GPUs available and 1 used, there should be budget for more
assert allocator._op_budgets[o2].gpu > 0, (
f"Unbounded GPU operator should get GPU budget for autoscaling, "
f"but got {allocator._op_budgets[o2].gpu}"
)
def test_actor_pool_gpu_operator_gets_gpu_budget_in_cpu_pipeline(
self, restore_data_context
):
"""Test GPU ActorPool gets budget in a pipeline with multiple CPU operators.
Regression test for a following pipeline:
Input -> ListFiles -> ReadFiles -> Preprocess -> Infer(GPU) -> Write
The GPU inference operator (ActorPool with GPUs) was stuck at 1 actor
because it had gpu_budget=0, preventing autoscaling.
Root cause: The borrowing logic used incremental_resource_usage() which
returns gpu=0 for ActorPoolMapOperator (since submitting tasks to existing
actors doesn't need new GPUs). The fix uses min_scheduling_resources()
which returns the per-actor GPU requirement.
"""
DataContext.get_current().op_resource_reservation_enabled = True
DataContext.get_current().op_resource_reservation_ratio = 0.5
# Build pipeline: Input -> Read -> Preprocess -> Infer(GPU) -> Write
# This mirrors the production pipeline structure
o1 = InputDataBuffer(DataContext.get_current(), [])
o2 = mock_map_op(o1, ray_remote_args={"num_cpus": 1}, name="ReadFiles")
o3 = mock_map_op(o2, ray_remote_args={"num_cpus": 1}, name="Preprocess")
o4 = mock_map_op(
o3,
ray_remote_args={"num_cpus": 0, "num_gpus": 1},
compute_strategy=ray.data.ActorPoolStrategy(min_size=1, max_size=4),
name="Infer",
)
o5 = mock_map_op(o4, ray_remote_args={"num_cpus": 1}, name="Write")
topo = build_streaming_topology(o5, ExecutionOptions())
# Cluster with 2 GPUs available
global_limits = ExecutionResources(
cpu=16, gpu=2, object_store_memory=10_000_000
)
# Simulate state where GPU operator has 1 actor running
op_usages = {
o1: ExecutionResources.zero(),
o2: ExecutionResources.zero(),
o3: ExecutionResources.zero(),
o4: ExecutionResources(gpu=1), # 1 GPU actor running
o5: ExecutionResources.zero(),
}
resource_manager = ResourceManager(
topo, ExecutionOptions(), MagicMock(), DataContext.get_current()
)
resource_manager.get_op_usage = MagicMock(side_effect=lambda op: op_usages[op])
resource_manager._mem_op_internal = dict.fromkeys([o1, o2, o3, o4, o5], 0)
resource_manager._mem_op_outputs = dict.fromkeys([o1, o2, o3, o4, o5], 0)
resource_manager.get_global_limits = MagicMock(return_value=global_limits)
allocator = resource_manager._op_resource_allocator
allocator.update_budgets(limits=global_limits)
# Verify the GPU operator gets GPU budget to scale up.
# With 2 GPUs total, 1 used, the operator should have budget for 1 more.
# Before the fix: budget.gpu=0 (couldn't scale)
# After the fix: budget.gpu=1 (can scale to 1 more actor)
assert allocator.get_budget(o4) == ExecutionResources(
cpu=0, gpu=1, object_store_memory=625000
)
assert allocator.get_allocation(o4) == ExecutionResources(
cpu=0, gpu=2, object_store_memory=625000
)
def test_gpu_bounded_vs_unbounded_operators(self, restore_data_context):
"""Test GPU allocation when one operator is bounded and one is unbounded.
With unified allocation, bounded operator is capped, unbounded gets remaining.
"""
DataContext.get_current().op_resource_reservation_enabled = True
DataContext.get_current().op_resource_reservation_ratio = 0.5
o1 = InputDataBuffer(DataContext.get_current(), [])
# Bounded GPU operator (max 2 GPUs)
o2 = mock_map_op(o1, ray_remote_args={"num_gpus": 1})
o2.min_max_resource_requirements = MagicMock(
return_value=(ExecutionResources(0, 1, 0), ExecutionResources(0, 2, 0))
)
# Unbounded GPU operator
o3 = mock_map_op(o2, ray_remote_args={"num_gpus": 1})
o3.min_max_resource_requirements = MagicMock(
return_value=(ExecutionResources(0, 1, 0), ExecutionResources.inf())
)
topo = build_streaming_topology(o3, ExecutionOptions())
global_limits = ExecutionResources(gpu=8)
op_usages = {
o1: ExecutionResources.zero(),
o2: ExecutionResources.zero(),
o3: ExecutionResources.zero(),
}
resource_manager = ResourceManager(
topo, ExecutionOptions(), MagicMock(), DataContext.get_current()
)
resource_manager.get_op_usage = MagicMock(side_effect=lambda op: op_usages[op])
resource_manager._mem_op_internal = dict.fromkeys([o1, o2, o3], 0)
resource_manager._mem_op_outputs = dict.fromkeys([o1, o2, o3], 0)
resource_manager.get_global_limits = MagicMock(return_value=global_limits)
allocator = resource_manager._op_resource_allocator
allocator.update_budgets(
limits=global_limits,
)
# o2 is capped at 2 GPUs (its max)
assert allocator._op_budgets[o2].gpu == 2
# o3 (unbounded) gets remaining GPUs after o2's excess is returned
# With 8 total GPUs and o2 capped at 2, o3 gets 6
assert allocator._op_budgets[o3].gpu == 6
@pytest.mark.parametrize("max_actors", [4, float("inf")])
def test_gpu_not_reserved_for_non_gpu_operators(
self, restore_data_context, max_actors
):
"""Test that GPU budget is not reserved for operators that don't use GPUs.
This tests a realistic inference pipeline DAG:
Read (CPU) -> Infer1 (GPU) -> Infer2 (GPU) -> Write (CPU)
Non-GPU operators (Read, Write) should have 0 GPUs reserved, ensuring
all GPUs are available for GPU operators (Infer1, Infer2).
"""
DataContext.get_current().op_resource_reservation_enabled = True
DataContext.get_current().op_resource_reservation_ratio = 0.5
o1 = InputDataBuffer(DataContext.get_current(), [])
# Read: CPU-only operator (unbounded, gpu=0 in max since it doesn't use GPUs)
read_op = mock_map_op(o1, name="Read")
read_op.min_max_resource_requirements = MagicMock(
return_value=(
ExecutionResources(cpu=1, gpu=0, object_store_memory=0),
ExecutionResources.for_limits(gpu=0),
)
)
# Infer1: GPU operator
infer1_op = mock_map_op(read_op, ray_remote_args={"num_gpus": 1}, name="Infer1")
infer1_op.min_max_resource_requirements = MagicMock(
return_value=(
ExecutionResources(cpu=0, gpu=1, object_store_memory=0),
ExecutionResources(
cpu=0, gpu=max_actors, object_store_memory=float("inf")
),
)
)
# Infer2: GPU operator
infer2_op = mock_map_op(
infer1_op, ray_remote_args={"num_gpus": 1}, name="Infer2"
)
infer2_op.min_max_resource_requirements = MagicMock(
return_value=(
ExecutionResources(cpu=0, gpu=1, object_store_memory=0),
ExecutionResources(
cpu=0, gpu=max_actors, object_store_memory=float("inf")
),
)
)
# Write: CPU-only operator (unbounded, gpu=0 in max since it doesn't use GPUs)
write_op = mock_map_op(infer2_op, name="Write")
write_op.min_max_resource_requirements = MagicMock(
return_value=(
ExecutionResources(cpu=1, gpu=0, object_store_memory=0),
ExecutionResources.for_limits(gpu=0),
)
)
topo = build_streaming_topology(write_op, ExecutionOptions())
global_limits = ExecutionResources(cpu=8, gpu=8, object_store_memory=10_000_000)
ops = [o1, read_op, infer1_op, infer2_op, write_op]
op_usages = {op: ExecutionResources.zero() for op in ops}
resource_manager = ResourceManager(
topo, ExecutionOptions(), MagicMock(), DataContext.get_current()
)
resource_manager.get_op_usage = MagicMock(side_effect=lambda op: op_usages[op])
resource_manager._mem_op_internal = dict.fromkeys(ops, 0)
resource_manager._mem_op_outputs = dict.fromkeys(ops, 0)
resource_manager.get_global_limits = MagicMock(return_value=global_limits)
allocator = resource_manager._op_resource_allocator
allocator.update_budgets(limits=global_limits)
# Non-GPU operators should have 0 GPUs reserved
assert allocator._op_reserved[read_op].gpu == 0
assert allocator._op_reserved[write_op].gpu == 0
# GPU operators should have GPUs reserved
assert allocator._op_reserved[infer1_op].gpu > 0
assert allocator._op_reserved[infer2_op].gpu > 0
# All 8 GPUs should be available (reserved for GPU ops + shared pool)
total_gpu_reserved = sum(
allocator._op_reserved[op].gpu
for op in [read_op, infer1_op, infer2_op, write_op]
)
assert total_gpu_reserved + allocator._total_shared.gpu == 8
def test_reservation_accounts_for_completed_ops(self, restore_data_context):
"""Test that resource reservation properly accounts for completed ops."""
DataContext.get_current().op_resource_reservation_enabled = True
DataContext.get_current().op_resource_reservation_ratio = 0.5
o1 = InputDataBuffer(DataContext.get_current(), [])
o2 = mock_map_op(o1, ray_remote_args={"num_cpus": 1})
o3 = mock_map_op(o2, ray_remote_args={"num_cpus": 1})
o4 = mock_map_op(o3, ray_remote_args={"num_cpus": 1})
# Mock min_max_resource_requirements to return default unbounded behavior
for op in [o2, o3, o4]:
op.min_max_resource_requirements = MagicMock(
return_value=(ExecutionResources.zero(), ExecutionResources.inf())
)
o1.mark_execution_finished()
o2.mark_execution_finished()
op_usages = {
o1: ExecutionResources.zero(),
o2: ExecutionResources(cpu=2, object_store_memory=50),
o3: ExecutionResources.zero(),
o4: ExecutionResources.zero(),
}
op_internal_usage = dict.fromkeys([o1, o2, o3, o4], 0)
op_outputs_usages = dict.fromkeys([o1, o2, o3, o4], 0)
topo = build_streaming_topology(o4, ExecutionOptions())
global_limits = ExecutionResources(cpu=10, object_store_memory=250)
resource_manager = ResourceManager(
topo, ExecutionOptions(), MagicMock(), DataContext.get_current()
)
resource_manager.get_op_usage = MagicMock(side_effect=lambda op: op_usages[op])
resource_manager._mem_op_internal = op_internal_usage
resource_manager._mem_op_outputs = op_outputs_usages
resource_manager.get_global_limits = MagicMock(return_value=global_limits)
# Update allocated budgets
resource_manager._update_allocated_budgets()
# Check that o2's usage was subtracted from remaining resources
# global_limits (10 CPU, 250 mem) - o1 usage (0) - o2 usage (2 CPU, 50 mem) = remaining (8 CPU, 200 mem)
# With 2 eligible ops (o3, o4) and 50% reservation ratio:
# Each op gets reserved: (8 CPU, 200 mem) * 0.5 / 2 = (2 CPU, 50 mem)
allocator = resource_manager._op_resource_allocator
# Verify that reservations are calculated correctly
assert allocator._op_reserved[o3].cpu == 2.0
assert allocator._op_reserved[o4].cpu == 2.0
# The total reserved memory should account for o2's usage being subtracted
total_reserved_memory = (
allocator._op_reserved[o3].object_store_memory
+ allocator._reserved_for_op_outputs[o3]
+ allocator._op_reserved[o4].object_store_memory
+ allocator._reserved_for_op_outputs[o4]
)
assert abs(total_reserved_memory - 100) < 1.0
def test_reservation_accounts_for_completed_ops_complex_graph(
self, restore_data_context
):
"""
o1 (InputDataBuffer)
|
v
o2 (MapOperator, completed)
|
v
o3 (LimitOperator)
|
v o4 (InputDataBuffer)
| |
| v
| o5 (MapOperator, completed)
| |
v v
o6 (UnionOperator) <--
|
v
o8 (ZipOperator) <-- o7 (InputDataBuffer, completed)
"""
DataContext.get_current().op_resource_reservation_enabled = True
DataContext.get_current().op_resource_reservation_ratio = 0.5
o1 = InputDataBuffer(DataContext.get_current(), [])
o2 = mock_map_op(o1, ray_remote_args={"num_cpus": 1})
o3 = LimitOperator(1, o2, DataContext.get_current())
o4 = InputDataBuffer(DataContext.get_current(), [])
o5 = mock_map_op(o4, ray_remote_args={"num_cpus": 1})
o6 = mock_union_op([o3, o5])
o7 = InputDataBuffer(DataContext.get_current(), [])
o8 = mock_join_op(o7, o6)
o1.mark_execution_finished()
o2.mark_execution_finished()
o4.mark_execution_finished()
o5.mark_execution_finished()
o7.mark_execution_finished()
op_usages = {
o1: ExecutionResources.zero(),
o2: ExecutionResources(cpu=2, object_store_memory=150),
o3: ExecutionResources(cpu=2, object_store_memory=50),
o4: ExecutionResources.zero(),
o5: ExecutionResources(cpu=3, object_store_memory=100),
o6: ExecutionResources.zero(),
o7: ExecutionResources(cpu=1, object_store_memory=100),
o8: ExecutionResources.zero(),
}
op_internal_usage = dict.fromkeys([o1, o2, o3, o4, o5, o6, o7, o8], 0)
op_outputs_usages = dict.fromkeys([o1, o2, o3, o4, o5, o6, o7, o8], 0)
topo = build_streaming_topology(o8, ExecutionOptions())
global_limits = ExecutionResources.zero()
def mock_get_global_limits():
nonlocal global_limits
return global_limits
resource_manager = ResourceManager(
topo, ExecutionOptions(), MagicMock(), DataContext.get_current()
)
resource_manager.get_op_usage = MagicMock(side_effect=lambda op: op_usages[op])
resource_manager.get_global_limits = MagicMock(
side_effect=mock_get_global_limits
)
resource_manager._mem_op_internal = op_internal_usage
resource_manager._mem_op_outputs = op_outputs_usages
global_limits = ExecutionResources(cpu=20, object_store_memory=2000)
resource_manager._update_allocated_budgets()
"""
global_limits (20 CPU, 2000 mem) - o2 usage (2 CPU, 150 mem) - o3 usage (2 CPU, 50 mem) - o5 usage (3 CPU, 100 mem) - o7 usage (1 CPU, 100 mem) = remaining (12 CPU, 1600 mem)
+-----+------------------+------------------+--------------+
| | _op_reserved | _reserved_for | used shared |
| | (used/remaining) | _op_outputs | resources |
| | | (used/remaining) | |
+-----+------------------+------------------+--------------+
| op6 | 0/200 | 0/200 | 0 |
+-----+------------------+------------------+--------------+
| op8 | 0/200 | 0/200 | 0 |
+-----+------------------+------------------+--------------+
"""
allocator = resource_manager._op_resource_allocator
assert set(allocator._op_budgets.keys()) == {o6, o8}
assert set(allocator._op_reserved.keys()) == {o6, o8}
assert allocator._op_reserved[o6] == ExecutionResources(
cpu=3, object_store_memory=200
)
assert allocator._op_reserved[o8] == ExecutionResources(
cpu=3, object_store_memory=200
)
assert allocator._reserved_for_op_outputs[o6] == 200
assert allocator._reserved_for_op_outputs[o8] == 200
assert allocator._total_shared == ExecutionResources(
cpu=6, object_store_memory=800
)
assert allocator._op_budgets[o6] == ExecutionResources(
cpu=6, object_store_memory=600
)
# object_store_memory budget is unlimited, since join is a materializing
# operator
assert allocator._op_budgets[o8] == ExecutionResources(
cpu=6, object_store_memory=float("inf")
)
# Test when resources are used.
op_usages[o6] = ExecutionResources(2, 0, 500)
op_internal_usage[o6] = 300
op_outputs_usages[o6] = 200
op_usages[o8] = ExecutionResources(2, 0, 100)
op_internal_usage[o8] = 50
op_outputs_usages[o8] = 50
"""
+-----+------------------+------------------+--------------+
| | _op_reserved | _reserved_for | used shared |
| | (used/remaining) | _op_outputs | resources |
| | | (used/remaining) | |
+-----+------------------+------------------+--------------+
| op6 | 200/0 | 200/0 | 100 |
+-----+------------------+------------------+--------------+
| op8 | 50/150 | 50/150 | 0 |
+-----+------------------+------------------+--------------+
"""
resource_manager._update_allocated_budgets()
assert allocator._op_budgets[o6] == ExecutionResources(
cpu=4, object_store_memory=350
)
# object_store_memory budget is unlimited, since join is a materializing
# operator
assert allocator._op_budgets[o8] == ExecutionResources(
cpu=4, object_store_memory=float("inf")
)
# Test when completed ops update the usage.
op_usages[o5] = ExecutionResources.zero()
resource_manager._update_allocated_budgets()
"""
global_limits (20 CPU, 2000 mem) - o2 usage (2 CPU, 150 mem) - o3 usage (2 CPU, 50 mem) - o5 usage (0 CPU, 0 mem) - o7 usage (1 CPU, 100 mem) = remaining (15 CPU, 1700 mem)
+-----+------------------+------------------+--------------+
| | _op_reserved | _reserved_for | used shared |
| | (used/remaining) | _op_outputs | resources |
| | | (used/remaining) | |
+-----+------------------+------------------+--------------+
| op6 | 213/0 | 200/13 | 300-213=87 |
+-----+------------------+------------------+--------------+
| op8 | 50/163 | 50/163 | 0 |
+-----+------------------+------------------+--------------+
"""
assert set(allocator._op_budgets.keys()) == {o6, o8}
assert set(allocator._op_reserved.keys()) == {o6, o8}
assert allocator._op_reserved[o6] == ExecutionResources(
cpu=3.75, object_store_memory=213
)
assert allocator._op_reserved[o8] == ExecutionResources(
cpu=3.75, object_store_memory=213
)
assert allocator._reserved_for_op_outputs[o6] == 212
assert allocator._reserved_for_op_outputs[o8] == 212
assert allocator._total_shared == ExecutionResources(
cpu=7.5, object_store_memory=850
)
# object_store_memory budget = 0 + (850 - 87) / 2 = 381 (rounded down)
assert allocator._op_budgets[o6] == ExecutionResources(
cpu=5.5, object_store_memory=381
)
# object_store_memory budget is unlimited, since join is a materializing
# operator
assert allocator._op_budgets[o8] == ExecutionResources(
cpu=5.5, object_store_memory=float("inf")
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_reservation_based_resource_allocator.py",
"license": "Apache License 2.0",
"lines": 989,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/progress/base_progress.py | import logging
import threading
import typing
from abc import ABC, abstractmethod
from typing import Any, List, Optional
import ray
from ray.data._internal.execution.operators.sub_progress import SubProgressBarMixin
from ray.data._internal.progress.utils import truncate_operator_name
if typing.TYPE_CHECKING:
from ray.data._internal.execution.resource_manager import ResourceManager
from ray.data._internal.execution.streaming_executor_state import OpState, Topology
from ray.types import ObjectRef
logger = logging.getLogger(__name__)
# Used a signal to cancel execution.
_canceled_threads = set()
_canceled_threads_lock = threading.Lock()
def _extract_num_rows(result: Any) -> int:
"""Extract the number of rows from a result object.
Args:
result: The result object from which to extract the number of rows.
Returns:
The number of rows, defaulting to 1 if it cannot be determined.
"""
if hasattr(result, "num_rows"):
return result.num_rows
elif hasattr(result, "__len__"):
# For output is DataFrame,i.e. sort_sample
return len(result)
else:
return 1
class BaseProgressBar(ABC):
"""Base class to define a progress bar."""
def block_until_complete(self, remaining: List["ObjectRef"]) -> None:
t = threading.current_thread()
while remaining:
done, remaining = ray.wait(
remaining, num_returns=len(remaining), fetch_local=False, timeout=0.1
)
total_rows_processed = 0
for _, result in zip(done, ray.get(done)):
num_rows = _extract_num_rows(result)
total_rows_processed += num_rows
self.update(total_rows_processed)
with _canceled_threads_lock:
if t in _canceled_threads:
break
def fetch_until_complete(self, refs: List["ObjectRef"]) -> List[Any]:
ref_to_result = {}
remaining = refs
t = threading.current_thread()
# Triggering fetch_local redundantly for the same object is slower.
# We only need to trigger the fetch_local once for each object,
# raylet will persist these fetch requests even after ray.wait returns.
# See https://github.com/ray-project/ray/issues/30375.
fetch_local = True
while remaining:
done, remaining = ray.wait(
remaining,
num_returns=len(remaining),
fetch_local=fetch_local,
timeout=0.1,
)
if fetch_local:
fetch_local = False
total_rows_processed = 0
for ref, result in zip(done, ray.get(done)):
ref_to_result[ref] = result
num_rows = _extract_num_rows(result)
total_rows_processed += num_rows
self.update(total_rows_processed)
with _canceled_threads_lock:
if t in _canceled_threads:
break
return [ref_to_result[ref] for ref in refs]
@abstractmethod
def set_description(self, name: str) -> None:
...
@abstractmethod
def get_description(self) -> str:
...
@abstractmethod
def update(self, increment: int = 0, total: Optional[int] = None) -> None:
...
def refresh(self):
pass
def close(self):
pass
class BaseExecutionProgressManager(ABC):
"""Base Data Execution Progress Display Manager"""
# If the name/description of the progress bar exceeds this length,
# it will be truncated.
MAX_NAME_LENGTH = 100
# Total progress refresh rate (update interval in scheduling step)
# refer to `streaming_executor.py::StreamingExecutor::_scheduling_loop_step`
TOTAL_PROGRESS_REFRESH_EVERY_N_STEPS = 50
@abstractmethod
def __init__(
self,
dataset_id: str,
topology: "Topology",
show_op_progress: bool,
verbose_progress: bool,
):
"""Initialize the progress manager, create all necessary progress bars
and sub-progress bars for the given topology. Sub-progress bars are
created for operators that implement the SubProgressBarMixin.
Args:
dataset_id: id of Dataset
topology: operation topology built via `build_streaming_topology`
show_op_progress: whether to show individual operator progress
(only for non-AllToAll by default).
verbose_progress: whether to show individual operator progress for
non-AllToAll operators as well.
"""
...
@abstractmethod
def start(self) -> None:
"""Start the progress manager."""
...
@abstractmethod
def refresh(self) -> None:
"""Refresh displayed progress."""
...
@abstractmethod
def close_with_finishing_description(self, desc: str, success: bool) -> None:
"""Close the progress manager with a finishing message.
Args:
desc: description to display
success: whether the dataset execution was successful
"""
...
@abstractmethod
def update_total_progress(self, new_rows: int, total_rows: Optional[int]) -> None:
"""Update the total progress rows.
Args:
new_rows: new rows processed by the streaming_executor
total_rows: total rows to be processed (if known)
"""
...
@abstractmethod
def update_total_resource_status(self, resource_status: str) -> None:
"""Update the total resource usage statistics.
Args:
resource_status: resource status information string.
"""
...
@abstractmethod
def update_operator_progress(
self, opstate: "OpState", resource_manager: "ResourceManager"
) -> None:
"""Update individual operator progress.
Args:
opstate: opstate of the operator.
resource_manager: the ResourceManager.
"""
...
class NoopSubProgressBar(BaseProgressBar):
"""Sub-Progress Bar for Noop (Disabled) Progress Manager"""
def __init__(self, name: str, max_name_length: int):
self._max_name_length = max_name_length
self._desc = truncate_operator_name(name, self._max_name_length)
def set_description(self, name: str) -> None:
self._desc = truncate_operator_name(name, self._max_name_length)
def get_description(self) -> str:
return self._desc
def update(self, increment: int = 0, total: Optional[int] = None) -> None:
pass
def refresh(self):
pass
def close(self):
pass
class NoopExecutionProgressManager(BaseExecutionProgressManager):
"""Noop Data Execution Progress Display Manager (Progress Display Disabled)"""
def __init__(
self,
dataset_id: str,
topology: "Topology",
show_op_progress: bool,
verbose_progress: bool,
):
for state in topology.values():
op = state.op
if not isinstance(op, SubProgressBarMixin):
continue
sub_pg_names = op.get_sub_progress_bar_names()
if sub_pg_names is not None:
for name in sub_pg_names:
pg = NoopSubProgressBar(
name=name, max_name_length=self.MAX_NAME_LENGTH
)
op.set_sub_progress_bar(name, pg)
def start(self) -> None:
pass
def refresh(self) -> None:
pass
def close_with_finishing_description(self, desc: str, success: bool) -> None:
pass
def update_total_progress(self, new_rows: int, total_rows: Optional[int]) -> None:
pass
def update_total_resource_status(self, resource_status: str) -> None:
pass
def update_operator_progress(
self, opstate: "OpState", resource_manager: "ResourceManager"
) -> None:
pass
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/progress/base_progress.py",
"license": "Apache License 2.0",
"lines": 205,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/_internal/progress/utils.py | import logging
from ray.util.debug import log_once
logger = logging.getLogger(__name__)
def truncate_operator_name(name: str, max_name_length: int) -> str:
from ray.data.context import DataContext
ctx = DataContext.get_current()
if not ctx.enable_progress_bar_name_truncation or len(name) <= max_name_length:
return name
op_names = name.split("->")
if len(op_names) == 1:
return op_names[0]
# Include as many operators as possible without approximately
# exceeding `MAX_NAME_LENGTH`. Always include the first and
# last operator names so it is easy to identify the DAG.
truncated_op_names = [op_names[0]]
for op_name in op_names[1:-1]:
if (
len("->".join(truncated_op_names))
+ len("->")
+ len(op_name)
+ len("->")
+ len(op_names[-1])
) > max_name_length:
truncated_op_names.append("...")
if log_once("ray_data_truncate_operator_name"):
logger.warning(
f"Truncating long operator name to {max_name_length} "
"characters. To disable this behavior, set "
"`ray.data.DataContext.get_current()."
"DEFAULT_ENABLE_PROGRESS_BAR_NAME_TRUNCATION = False`."
)
break
truncated_op_names.append(op_name)
truncated_op_names.append(op_names[-1])
return "->".join(truncated_op_names)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/progress/utils.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/tests/test_local_port_service_discovery.py | import sys
import pytest
from ray._private.services import get_node
@pytest.mark.parametrize(
"ray_start_cluster_head",
[
{
"gcs_server_port": 0,
"metrics_export_port": 0,
"metrics_agent_port": 0,
"runtime_env_agent_port": 0,
"dashboard_agent_listen_port": 0,
}
],
indirect=True,
)
def test_local_port_service_discovery(ray_start_cluster_head):
"""
Test that when all ports are set to 0 (auto-assign), all components
self-bind to available ports and the port information is correctly
reported to GCS.
"""
cluster = ray_start_cluster_head
gcs_address = cluster.gcs_address
node_id = cluster.head_node.node_id
# We won't be able to get node info if GCS port didn't report correctly
# So, get_node implicitly validate GCS port reporting.
node_info = get_node(gcs_address, node_id)
port_fields = [
"metrics_export_port",
"metrics_agent_port",
"runtime_env_agent_port",
"dashboard_agent_listen_port",
]
for field in port_fields:
port = node_info.get(field)
assert port and port > 0, f"{field} should be > 0, got {port}"
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_local_port_service_discovery.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/utils/tensorflow_utils.py | from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
import numpy as np
import pyarrow
import tensorflow as tf
from ray.data._internal.tensor_extensions.arrow import get_arrow_extension_tensor_types
from ray.data.util.data_batch_conversion import _unwrap_ndarray_object_type_if_needed
if TYPE_CHECKING:
from ray.data._internal.pandas_block import PandasBlockSchema
def convert_ndarray_to_tf_tensor(
ndarray: np.ndarray,
dtype: Optional[tf.dtypes.DType] = None,
type_spec: Optional[tf.TypeSpec] = None,
) -> tf.Tensor:
"""Convert a NumPy ndarray to a TensorFlow Tensor.
Args:
ndarray: A NumPy ndarray that we wish to convert to a TensorFlow Tensor.
dtype: A TensorFlow dtype for the created tensor; if None, the dtype will be
inferred from the NumPy ndarray data.
type_spec: A type spec that specifies the shape and dtype of the returned
tensor. If you specify ``dtype``, the dtype stored in the type spec is
ignored.
Returns:
A TensorFlow Tensor.
"""
if dtype is None and type_spec is not None:
dtype = type_spec.dtype
is_ragged = isinstance(type_spec, tf.RaggedTensorSpec)
ndarray = _unwrap_ndarray_object_type_if_needed(ndarray)
if is_ragged:
return tf.ragged.constant(ndarray, dtype=dtype)
else:
return tf.convert_to_tensor(ndarray, dtype=dtype)
def convert_ndarray_batch_to_tf_tensor_batch(
ndarrays: Union[np.ndarray, Dict[str, np.ndarray]],
dtypes: Optional[Union[tf.dtypes.DType, Dict[str, tf.dtypes.DType]]] = None,
) -> Union[tf.Tensor, Dict[str, tf.Tensor]]:
"""Convert a NumPy ndarray batch to a TensorFlow Tensor batch.
Args:
ndarrays: A (dict of) NumPy ndarray(s) that we wish to convert to a TensorFlow
Tensor.
dtypes: A (dict of) TensorFlow dtype(s) for the created tensor; if None, the
dtype will be inferred from the NumPy ndarray data.
Returns:
A (dict of) TensorFlow Tensor(s).
"""
if isinstance(ndarrays, np.ndarray):
# Single-tensor case.
if isinstance(dtypes, dict):
if len(dtypes) != 1:
raise ValueError(
"When constructing a single-tensor batch, only a single dtype "
f"should be given, instead got: {dtypes}"
)
dtypes = next(iter(dtypes.values()))
batch = convert_ndarray_to_tf_tensor(ndarrays, dtypes)
else:
# Multi-tensor case.
batch = {
col_name: convert_ndarray_to_tf_tensor(
col_ndarray,
dtype=dtypes[col_name] if isinstance(dtypes, dict) else dtypes,
)
for col_name, col_ndarray in ndarrays.items()
}
return batch
def get_type_spec(
schema: Union["pyarrow.lib.Schema", "PandasBlockSchema"],
columns: Union[str, List[str]],
) -> Union[tf.TypeSpec, Dict[str, tf.TypeSpec]]:
import pyarrow as pa
from ray.data.extensions import TensorDtype
tensor_extension_types = get_arrow_extension_tensor_types()
assert not isinstance(schema, type)
dtypes: Dict[str, Union[np.dtype, pa.DataType]] = dict(
zip(schema.names, schema.types)
)
def get_dtype(dtype: Union[np.dtype, pa.DataType]) -> tf.dtypes.DType:
if isinstance(dtype, pa.ListType):
dtype = dtype.value_type
if isinstance(dtype, pa.DataType):
dtype = dtype.to_pandas_dtype()
if isinstance(dtype, TensorDtype):
dtype = dtype.element_dtype
res = tf.dtypes.as_dtype(dtype)
return res
def get_shape(dtype: Union[np.dtype, pa.DataType]) -> Tuple[int, ...]:
shape = (None,)
if isinstance(dtype, tensor_extension_types):
dtype = dtype.to_pandas_dtype()
if isinstance(dtype, pa.ListType):
shape += (None,)
elif isinstance(dtype, TensorDtype):
shape += dtype.element_shape
return shape
def get_tensor_spec(
dtype: Union[np.dtype, pa.DataType], *, name: str
) -> tf.TypeSpec:
shape, dtype = get_shape(dtype), get_dtype(dtype)
# Batch dimension is always `None`. So, if there's more than one `None`-valued
# dimension, then the tensor is ragged.
is_ragged = sum(dim is None for dim in shape) > 1
if is_ragged:
type_spec = tf.RaggedTensorSpec(shape, dtype=dtype)
else:
type_spec = tf.TensorSpec(shape, dtype=dtype, name=name)
return type_spec
if isinstance(columns, str):
name, dtype = columns, dtypes[columns]
return get_tensor_spec(dtype, name=name)
return {
name: get_tensor_spec(dtype, name=name)
for name, dtype in dtypes.items()
if name in columns
}
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/utils/tensorflow_utils.py",
"license": "Apache License 2.0",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/constants.py | # Name to use for the column when representing tensors in table format.
TENSOR_COLUMN_NAME = "__value__"
# The maximum length of strings returned by `__repr__` for AIR objects constructed with
# default values.
MAX_REPR_LENGTH = int(80 * 1.5)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/constants.py",
"license": "Apache License 2.0",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/util/data_batch_conversion.py | import warnings
from enum import Enum
from typing import TYPE_CHECKING, Dict, List, Union
import numpy as np
from ray.air.data_batch_type import DataBatchType
from ray.data.constants import TENSOR_COLUMN_NAME
from ray.data.util.expression_utils import _get_setting_with_copy_warning
from ray.util.annotations import DeveloperAPI
if TYPE_CHECKING:
import pandas as pd
# TODO: Consolidate data conversion edges for arrow bug workaround.
try:
import pyarrow
except ImportError:
pyarrow = None
# Lazy import to avoid ray init failures without pandas installed and allow
# dataset to import modules in this file.
_pandas = None
def _lazy_import_pandas():
global _pandas
if _pandas is None:
import pandas
_pandas = pandas
return _pandas
@DeveloperAPI
class BatchFormat(str, Enum):
PANDAS = "pandas"
# TODO: Remove once Arrow is deprecated as user facing batch format
ARROW = "arrow"
NUMPY = "numpy" # Either a single numpy array or a Dict of numpy arrays.
def _convert_batch_type_to_pandas(
data: DataBatchType,
cast_tensor_columns: bool = False,
) -> "pd.DataFrame":
"""Convert the provided data to a Pandas DataFrame.
Args:
data: Data of type DataBatchType
cast_tensor_columns: Whether tensor columns should be cast to NumPy ndarrays.
Returns:
A pandas Dataframe representation of the input data.
"""
pd = _lazy_import_pandas()
if isinstance(data, np.ndarray):
data = pd.DataFrame({TENSOR_COLUMN_NAME: _ndarray_to_column(data)})
elif isinstance(data, dict):
tensor_dict = {}
for col_name, col in data.items():
if not isinstance(col, np.ndarray):
raise ValueError(
"All values in the provided dict must be of type "
f"np.ndarray. Found type {type(col)} for key {col_name} "
f"instead."
)
tensor_dict[col_name] = _ndarray_to_column(col)
data = pd.DataFrame(tensor_dict)
elif pyarrow is not None and isinstance(data, pyarrow.Table):
data = data.to_pandas()
elif not isinstance(data, pd.DataFrame):
raise ValueError(
f"Received data of type: {type(data)}, but expected it to be one "
f"of {DataBatchType}"
)
if cast_tensor_columns:
data = _cast_tensor_columns_to_ndarrays(data)
return data
def _convert_pandas_to_batch_type(
data: "pd.DataFrame",
type: BatchFormat,
cast_tensor_columns: bool = False,
) -> DataBatchType:
"""Convert the provided Pandas dataframe to the provided ``type``.
Args:
data: A Pandas DataFrame
type: The specific ``BatchFormat`` to convert to.
cast_tensor_columns: Whether tensor columns should be cast to our tensor
extension type.
Returns:
The input data represented with the provided type.
"""
if cast_tensor_columns:
data = _cast_ndarray_columns_to_tensor_extension(data)
if type == BatchFormat.PANDAS:
return data
elif type == BatchFormat.NUMPY:
if len(data.columns) == 1:
# If just a single column, return as a single numpy array.
return data.iloc[:, 0].to_numpy()
else:
# Else return as a dict of numpy arrays.
output_dict = {}
for column in data:
output_dict[column] = data[column].to_numpy()
return output_dict
elif type == BatchFormat.ARROW:
if not pyarrow:
raise ValueError(
"Attempted to convert data to Pyarrow Table but Pyarrow "
"is not installed. Please do `pip install pyarrow` to "
"install Pyarrow."
)
return pyarrow.Table.from_pandas(data)
else:
raise ValueError(
f"Received type {type}, but expected it to be one of {DataBatchType}"
)
def _convert_batch_type_to_numpy(
data: DataBatchType,
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
"""Convert the provided data to a NumPy ndarray or dict of ndarrays.
Args:
data: Data of type DataBatchType
Returns:
A numpy representation of the input data.
"""
pd = _lazy_import_pandas()
if isinstance(data, np.ndarray):
return data
elif isinstance(data, dict):
for col_name, col in data.items():
if not isinstance(col, np.ndarray):
raise ValueError(
"All values in the provided dict must be of type "
f"np.ndarray. Found type {type(col)} for key {col_name} "
f"instead."
)
return data
elif pyarrow is not None and isinstance(data, pyarrow.Table):
from ray.data._internal.arrow_ops import transform_pyarrow
from ray.data._internal.tensor_extensions.arrow import (
get_arrow_extension_fixed_shape_tensor_types,
)
column_values_ndarrays = []
for col in data.columns:
# Combine columnar values arrays to make these contiguous
# (making them compatible with numpy format)
combined_array = transform_pyarrow.combine_chunked_array(col)
column_values_ndarrays.append(
transform_pyarrow.to_numpy(combined_array, zero_copy_only=False)
)
arrow_fixed_shape_tensor_types = get_arrow_extension_fixed_shape_tensor_types()
# NOTE: This branch is here for backwards-compatibility
if data.column_names == [TENSOR_COLUMN_NAME] and (
isinstance(data.schema.types[0], arrow_fixed_shape_tensor_types)
):
return column_values_ndarrays[0]
return dict(zip(data.column_names, column_values_ndarrays))
elif isinstance(data, pd.DataFrame):
return _convert_pandas_to_batch_type(data, BatchFormat.NUMPY)
else:
raise ValueError(
f"Received data of type: {type(data)}, but expected it to be one "
f"of {DataBatchType}"
)
def _ndarray_to_column(arr: np.ndarray) -> Union["pd.Series", List[np.ndarray]]:
"""Convert a NumPy ndarray into an appropriate column format for insertion into a
pandas DataFrame.
If conversion to a pandas Series fails (e.g. if the ndarray is multi-dimensional),
fall back to a list of NumPy ndarrays.
"""
pd = _lazy_import_pandas()
try:
# Try to convert to Series, falling back to a list conversion if this fails
# (e.g. if the ndarray is multi-dimensional).
return pd.Series(arr)
except ValueError:
return list(arr)
def _unwrap_ndarray_object_type_if_needed(arr: np.ndarray) -> np.ndarray:
"""Unwrap an object-dtyped NumPy ndarray containing ndarray pointers into a single
contiguous ndarray, if needed/possible.
"""
if arr.dtype.type is np.object_:
try:
# Try to convert the NumPy ndarray to a non-object dtype.
arr = np.array([np.asarray(v) for v in arr])
except Exception:
# This may fail if the subndarrays are of heterogeneous shape
pass
return arr
def _cast_ndarray_columns_to_tensor_extension(df: "pd.DataFrame") -> "pd.DataFrame":
"""
Cast all NumPy ndarray columns in df to our tensor extension type, TensorArray.
"""
# Get the SettingWithCopyWarning class if available
SettingWithCopyWarning = _get_setting_with_copy_warning()
from ray.data._internal.tensor_extensions.pandas import (
TensorArray,
column_needs_tensor_extension,
)
# Try to convert any ndarray columns to TensorArray columns.
# TODO(Clark): Once Pandas supports registering extension types for type
# inference on construction, implement as much for NumPy ndarrays and remove
# this. See https://github.com/pandas-dev/pandas/issues/41848
# TODO(Clark): Optimize this with propagated DataFrame metadata containing a list of
# column names containing tensor columns, to make this an O(# of tensor columns)
# check rather than the current O(# of columns) check.
for col_name, col in df.items():
if column_needs_tensor_extension(col):
try:
# Suppress Pandas warnings:
# https://github.com/ray-project/ray/issues/29270
# We actually want in-place operations so we surpress this warning.
# https://stackoverflow.com/a/74193599
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=FutureWarning)
if SettingWithCopyWarning is not None:
warnings.simplefilter("ignore", category=SettingWithCopyWarning)
df[col_name] = TensorArray(col)
except Exception as e:
raise ValueError(
f"Tried to cast column {col_name} to the TensorArray tensor "
"extension type but the conversion failed. To disable "
"automatic casting to this tensor extension, set "
"ctx = DataContext.get_current(); "
"ctx.enable_tensor_extension_casting = False."
) from e
return df
def _cast_tensor_columns_to_ndarrays(df: "pd.DataFrame") -> "pd.DataFrame":
"""Cast all tensor extension columns in df to NumPy ndarrays."""
# Get the SettingWithCopyWarning class if available
SettingWithCopyWarning = _get_setting_with_copy_warning()
from ray.data._internal.tensor_extensions.pandas import TensorDtype
# Try to convert any tensor extension columns to ndarray columns.
# TODO(Clark): Optimize this with propagated DataFrame metadata containing a list of
# column names containing tensor columns, to make this an O(# of tensor columns)
# check rather than the current O(# of columns) check.
for col_name, col in df.items():
if isinstance(col.dtype, TensorDtype):
# Suppress Pandas warnings:
# https://github.com/ray-project/ray/issues/29270
# We actually want in-place operations so we surpress this warning.
# https://stackoverflow.com/a/74193599
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=FutureWarning)
if SettingWithCopyWarning is not None:
warnings.simplefilter("ignore", category=SettingWithCopyWarning)
df[col_name] = list(col.to_numpy())
return df
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/util/data_batch_conversion.py",
"license": "Apache License 2.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/util/torch_utils.py | import warnings
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import pyarrow
import torch
from ray._common.utils import env_bool
from ray.data.collate_fn import (
TensorBatchReturnType,
TensorBatchType,
_is_nested_tensor_sequence,
_is_tensor,
_is_tensor_mapping,
_is_tensor_sequence,
_is_tensor_sequence_mapping,
)
from ray.data.util.data_batch_conversion import _unwrap_ndarray_object_type_if_needed
# Default non-blocking transfer for tensors.
DEFAULT_TENSOR_NON_BLOCKING_TRANSFER = env_bool(
"RAY_AIR_DEFAULT_TENSOR_NON_BLOCKING_TRANSFER",
True,
)
def convert_pandas_to_torch_tensor(
data_batch: pd.DataFrame,
columns: Optional[Union[List[str], List[List[str]]]] = None,
column_dtypes: Optional[Union[torch.dtype, List[torch.dtype]]] = None,
unsqueeze: bool = True,
) -> Union[torch.Tensor, List[torch.Tensor]]:
"""Converts a Pandas dataframe to a torch Tensor or list of torch Tensors.
The format of the return type will match the format of ``columns``. If a
list of columns is provided, the return type will be a single tensor. If
``columns`` is a list of lists, then the return type will be a list of
tensors.
Args:
data_batch: The pandas dataframe to convert to a
torch tensor.
columns:
The names of the columns in the dataframe to include in the
torch tensor. If this arg is a List[List[str]], then the return
type will be a List of tensors. This is useful for multi-input
models. If None, then use all columns in the ``data_batch``.
column_dtypes: The
torch dtype to use for the tensor. If set to None,
then automatically infer the dtype.
unsqueeze: If set to True, the tensors
will be unsqueezed (reshaped to (N, 1)) before being concatenated into
the final tensor. Otherwise, they will be left as is, that is
(N, ). Defaults to True.
Returns:
Either a torch tensor of size (N, len(columns)) where N is the
number of rows in the ``data_batch`` Dataframe, or a list of
tensors, where the size of item i is (N, len(columns[i])).
"""
multi_input = columns and (isinstance(columns[0], (list, tuple)))
if not multi_input and column_dtypes and not isinstance(column_dtypes, torch.dtype):
raise TypeError(
"If `columns` is a list of strings, "
"`column_dtypes` must be None or a single `torch.dtype`."
f"Got {type(column_dtypes)} instead."
)
columns = columns if columns else []
def tensorize(vals, dtype):
"""This recursive function allows to convert pyarrow List dtypes
to multi-dimensional tensors."""
if isinstance(vals, pd.api.extensions.ExtensionArray):
# torch.as_tensor() does not yet support the __array__ protocol, so we need
# to convert extension arrays to ndarrays manually before converting to a
# Torch tensor.
# See https://github.com/pytorch/pytorch/issues/51156.
vals = vals.to_numpy()
if vals.dtype.type is np.object_:
# Column has an object dtype which Torch can't handle, so we try to
# tensorize each column element and then stack the resulting tensors.
tensors = [tensorize(x, dtype) for x in vals]
try:
return torch.stack(tensors)
except RuntimeError:
# NOTE: RuntimeError is raised when trying to stack ragged tensors.
# Try to coerce the tensor to a nested tensor, if possible.
# If this fails, the exception will be propagated up to the caller.
return torch.nested_tensor(tensors)
else:
return torch.as_tensor(vals, dtype=dtype)
def get_tensor_for_columns(columns, dtype):
feature_tensors = []
if columns:
batch = data_batch[columns]
else:
batch = data_batch
for col in batch.columns:
col_vals = batch[col].values
try:
t = tensorize(col_vals, dtype=dtype)
except Exception as e:
raise ValueError(
f"Failed to convert column {col} to a Torch Tensor of dtype "
f"{dtype}. See above exception chain for the exact failure."
) from e
if unsqueeze:
t = t.unsqueeze(1)
feature_tensors.append(t)
if len(feature_tensors) > 1:
feature_tensor = torch.cat(feature_tensors, dim=1)
else:
feature_tensor = feature_tensors[0]
return feature_tensor
if multi_input:
if type(column_dtypes) not in [list, tuple]:
column_dtypes = [column_dtypes] * len(columns)
return [
get_tensor_for_columns(columns=subcolumns, dtype=dtype)
for subcolumns, dtype in zip(columns, column_dtypes)
]
else:
return get_tensor_for_columns(columns=columns, dtype=column_dtypes)
def convert_ndarray_to_torch_tensor(
ndarray: np.ndarray,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[str, "torch.device"]] = None,
pin_memory: bool = False,
) -> torch.Tensor:
"""Convert a NumPy ndarray to a Torch Tensor.
Args:
ndarray: A NumPy ndarray that we wish to convert to a Torch Tensor.
dtype: A Torch dtype for the created tensor; if None, the dtype will be
inferred from the NumPy ndarray data.
device: The device on which the tensor(s) should be placed; if None, the Torch
tensor(s) will be constructed on the CPU.
pin_memory: Whether to pin the memory of the created tensors.
Returns:
A Torch Tensor.
"""
ndarray = _unwrap_ndarray_object_type_if_needed(ndarray)
# Object dtype cannot be converted into PyTorch Tensor.
if ndarray.dtype.type is np.object_:
raise RuntimeError(
"Numpy array of object dtype cannot be converted to a Torch Tensor. This "
"may because the numpy array is a ragged tensor--it contains items of "
"different sizes. If using `iter_torch_batches()` API, you can pass in a "
"`collate_fn` argument to specify custom logic to convert the Numpy array "
"batch to a Torch tensor batch."
)
# The numpy array is not always writeable as it can come from the Ray object store.
# Numpy will throw a verbose warning here, which we suppress, as we don't write
# to the tensors. We also don't want to copy the array to avoid memory overhead.
# Original warning: https://github.com/pytorch/pytorch/blob/v1.13.0/
# torch/csrc/utils/tensor_numpy.cpp#L198-L206
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = torch.as_tensor(ndarray, dtype=dtype, device=device)
if pin_memory:
assert result.device.type == "cpu", (
"Pin memory is only supported for CPU tensors. "
f"Got device: {result.device} and pin_memory: {pin_memory}."
)
result = result.pin_memory()
return result
def convert_ndarray_batch_to_torch_tensor_batch(
ndarrays: Union[np.ndarray, Dict[str, np.ndarray]],
dtypes: Optional[Union[torch.dtype, Dict[str, torch.dtype]]] = None,
device: Optional[Union[str, "torch.device"]] = None,
pin_memory: bool = False,
) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:
"""Convert a NumPy ndarray batch to a Torch Tensor batch.
Args:
ndarrays: A (dict of) NumPy ndarray(s) that we wish to convert to a Torch Tensor.
dtypes: A (dict of) Torch dtype(s) for the created tensor; if None, the dtype
will be inferred from the NumPy ndarray data.
device: The device on which the tensor(s) should be placed; if None, the Torch
tensor(s) will be constructed on the CPU.
pin_memory: Whether to pin the memory of the created tensors.
Returns:
A (dict of) Torch Tensor(s).
"""
if isinstance(ndarrays, np.ndarray):
# Single-tensor case.
if isinstance(dtypes, dict):
if len(dtypes) != 1:
raise ValueError(
"When constructing a single-tensor batch, only a single dtype "
f"should be given, instead got: {dtypes}"
)
dtypes = next(iter(dtypes.values()))
batch = convert_ndarray_to_torch_tensor(
ndarrays,
dtype=dtypes,
device=device,
pin_memory=pin_memory,
)
else:
# Multi-tensor case.
batch = {
col_name: convert_ndarray_to_torch_tensor(
col_ndarray,
dtype=dtypes[col_name] if isinstance(dtypes, dict) else dtypes,
device=device,
pin_memory=pin_memory,
)
for col_name, col_ndarray in ndarrays.items()
}
return batch
def convert_ndarray_list_to_torch_tensor_list(
ndarrays: Dict[str, List[np.ndarray]],
dtypes: Optional[Union[torch.dtype, Dict[str, torch.dtype]]] = None,
device: Optional[Union[str, "torch.device"]] = None,
pin_memory: bool = False,
) -> Dict[str, List[torch.Tensor]]:
"""Convert a dict mapping column names to lists of ndarrays to Torch Tensors.
Args:
ndarrays: A dict mapping column names to lists of ndarrays that we wish to convert
to Torch Tensors.
dtypes: A (dict of) Torch dtype(s) for the created tensors; if None, the dtype
will be inferred from the NumPy ndarray data.
device: The device on which the tensor(s) should be placed; if None, the Torch
tensor(s) will be constructed on the CPU.
pin_memory: Whether to pin the memory of the created tensors.
Returns:
A dict mapping column names to lists of Tensors.
"""
return {
col_name: [
convert_ndarray_batch_to_torch_tensor_batch(
ndarray,
dtypes=dtypes[col_name] if isinstance(dtypes, dict) else dtypes,
device=device,
pin_memory=pin_memory,
)
for ndarray in col_ndarrays
]
for col_name, col_ndarrays in ndarrays.items()
}
def arrow_batch_to_tensors(
batch: pyarrow.Table,
dtypes: Optional[Union[torch.dtype, Dict[str, torch.dtype]]] = None,
combine_chunks: bool = False,
pin_memory: bool = False,
threadpool: Optional[ThreadPoolExecutor] = None,
) -> Union[Dict[str, torch.Tensor], Dict[str, List[torch.Tensor]]]:
"""Convert PyArrow batch to PyTorch tensors.
Args:
batch: PyArrow batch to convert
dtypes: A (dict of) Torch dtype(s) for the created tensors; if None, the dtype
will be inferred from the NumPy ndarray data.
combine_chunks: If True, combine chunks in Arrow batch before converting to
tensors.
pin_memory: Whether to pin the memory of the created tensors.
threadpool: Optional ThreadPoolExecutor for parallel processing. If provided,
columns/arrays will be processed in parallel. If None, processing is
sequential.
Returns:
When combine_chunks=True: A dictionary of column name to single tensor.
When combine_chunks=False: A dictionary of column name to list of tensors.
"""
from ray.data._internal.arrow_block import ArrowBlockAccessor
from ray.data._internal.arrow_ops import transform_pyarrow
if combine_chunks:
numpy_batch = ArrowBlockAccessor(batch).to_batch_format("numpy")
num_columns = len(numpy_batch)
if num_columns > 1 and threadpool is not None:
# Process columns in parallel using provided threadpool
def process_column(
col_name_col_array: Tuple[str, np.ndarray]
) -> Tuple[str, torch.Tensor]:
col_name, col_array = col_name_col_array
return col_name, convert_ndarray_batch_to_torch_tensor_batch(
col_array,
dtypes=dtypes[col_name] if isinstance(dtypes, dict) else dtypes,
pin_memory=pin_memory,
)
# Submit all columns to threadpool and collect results
processed_cols = threadpool.map(process_column, numpy_batch.items())
return dict(processed_cols)
else:
# Sequential processing for single column or single worker
return {
col_name: convert_ndarray_batch_to_torch_tensor_batch(
col_array,
dtypes=dtypes[col_name] if isinstance(dtypes, dict) else dtypes,
pin_memory=pin_memory,
)
for col_name, col_array in numpy_batch.items()
}
else:
numpy_list = transform_pyarrow.table_to_numpy_dict_chunked(
batch,
)
# Count total number of arrays across all columns
total_arrays = sum(len(arrays) for arrays in numpy_list.values())
num_columns = len(numpy_list)
if total_arrays > 1 and threadpool is not None:
# Process arrays in parallel using provided threadpool
def process_array(
array_item: Tuple[str, int, np.ndarray]
) -> Tuple[str, int, torch.Tensor]:
col_name, array_index, array = array_item
return (
col_name,
array_index,
convert_ndarray_batch_to_torch_tensor_batch(
array,
dtypes=dtypes[col_name] if isinstance(dtypes, dict) else dtypes,
pin_memory=pin_memory,
),
)
# Flatten arrays with column name and index for parallel processing
array_items = [
(col_name, idx, array)
for col_name, arrays in numpy_list.items()
for idx, array in enumerate(arrays)
]
# Submit all arrays to threadpool and collect results
processed_arrays = list(threadpool.map(process_array, array_items))
# Initialize result with all columns from numpy_list, including empty ones
# Pre-allocate lists of the correct size for each column
result: Dict[str, List[torch.Tensor]] = {
col_name: [None] * len(arrays)
for col_name, arrays in numpy_list.items()
}
# Populate result with processed tensors
for col_name, array_index, tensor in processed_arrays:
result[col_name][array_index] = tensor
return result
else:
# Sequential processing
return convert_ndarray_list_to_torch_tensor_list(
numpy_list,
dtypes=dtypes,
pin_memory=pin_memory,
)
@torch.no_grad()
def concat_tensors_to_device(
tensor_sequence: Sequence[torch.Tensor],
device: Optional[Union[str, "torch.device"]] = None,
non_blocking: bool = DEFAULT_TENSOR_NON_BLOCKING_TRANSFER,
) -> torch.Tensor:
"""Stack sequence of tensors into a contiguous GPU tensor.
Args:
tensor_sequence: Sequence of tensors to stack
device: The device to move tensors to. If None, tensors are not moved.
non_blocking: If True, perform device transfer without forcing a
synchronization.
Returns:
A contiguous tensor on the target device
"""
# Assumes tensors have the same shape/dtype
assert (
tensor_sequence
), f"Cannot stack empty sequence of tensors. Received: {tensor_sequence}"
assert all(
isinstance(t, torch.Tensor) for t in tensor_sequence
), "All items must be torch.Tensor. Found invalid types: " + str(
[type(t) for t in tensor_sequence if not isinstance(t, torch.Tensor)]
)
# If there is only one tensor and its device already matches, return it directly.
if len(tensor_sequence) == 1 and (
device is None or tensor_sequence[0].device == torch.device(device)
):
return tensor_sequence[0]
first_dtype = tensor_sequence[0].dtype
assert all(t.dtype == first_dtype for t in tensor_sequence), (
"All tensors must have the same dtype. "
f"Expected: {first_dtype}, got: {[t.dtype for t in tensor_sequence]}"
)
first_shape = tensor_sequence[0].shape[1:]
assert all(t.shape[1:] == first_shape for t in tensor_sequence), (
"All tensors must have the same shape[1:]. "
f"Expected: {first_shape}, got: {[t.shape[1:] for t in tensor_sequence]}"
)
first = tensor_sequence[0]
dtype = first.dtype
shape_tail = first.shape[1:]
total_rows = sum(t.shape[0] for t in tensor_sequence)
# Allocate an empty Tensor on device
result = torch.empty((total_rows, *shape_tail), dtype=dtype, device=device)
row_start = 0
for t in tensor_sequence:
row_end = row_start + t.shape[0]
result[row_start:row_end].copy_(t, non_blocking=non_blocking)
row_start = row_end
return result
def _get_type_str(batch: Any) -> str:
"""Get a string representation of the possibly nested type of the batch.
>>> import torch
>>> _get_type_str([1, 2, "???"])
'list[int | str]'
>>> _get_type_str({"a": [1, 2, 3], "b": 4})
'dict[str, int | list[int]]'
>>> _get_type_str({"a": torch.tensor(1), "b": [torch.tensor(2)]})
'dict[str, Tensor | list[Tensor]]'
>>> _get_type_str({"a": torch.tensor(1), "b": {"c": torch.tensor(2)}})
'dict[str, Tensor | dict[str, Tensor]]'
"""
curr_type = type(batch).__name__
if isinstance(batch, (list, tuple)):
val_types = " | ".join(sorted({_get_type_str(v) for v in batch}))
invalid_type_str = f"{curr_type}[{val_types}]"
elif isinstance(batch, dict):
val_types = " | ".join(sorted({_get_type_str(v) for v in batch.values()}))
invalid_type_str = f"{curr_type}[str, {val_types}]"
else:
invalid_type_str = curr_type
return invalid_type_str
@torch.no_grad()
def move_tensors_to_device(
batch: TensorBatchType,
device: Optional[Union[str, "torch.device"]] = None,
non_blocking: bool = DEFAULT_TENSOR_NON_BLOCKING_TRANSFER,
) -> TensorBatchReturnType:
"""Move tensors to the specified device.
Concatenate nested lists/tuples of tensors along the first (batch) dimension.
For example, for the input
((feature_0_chunk_0,), (feature_1_chunk_0, feature_1_chunk_1))
the output will be (feature_0_chunk_0, feature_1_chunk_0+1)
where each feature is concatenated along the batch dimension.
Args:
batch: A tensor or collection of tensors to move to device. Can be:
- A single tensor
- A sequence of tensors
- A sequence of sequences of tensors. The inner sequence of tensors is
combined during GPU transfer.
- A mapping (e.g., dict) of keys to tensors or sequences of tensors. The
sequence of tensors is combined during GPU transfer.
device: The device to move tensors to. If None, tensors are not moved.
non_blocking: If True, perform device transfer without forcing a
synchronization.
Returns:
The input tensors moved to the specified device
"""
if device is None:
return batch
if _is_tensor(batch):
return batch.to(device, non_blocking=non_blocking)
elif _is_tensor_sequence(batch):
return type(batch)([t.to(device, non_blocking=non_blocking) for t in batch])
elif _is_nested_tensor_sequence(batch):
return type(batch)(
[concat_tensors_to_device(t, device, non_blocking) for t in batch]
)
elif _is_tensor_mapping(batch):
return {k: t.to(device, non_blocking=non_blocking) for k, t in batch.items()}
elif _is_tensor_sequence_mapping(batch):
return {
k: concat_tensors_to_device(v, device, non_blocking)
for k, v in batch.items()
}
else:
raise ValueError(
f"Invalid input type: {_get_type_str(batch)}.\n"
"Expected one of the following: "
"torch.Tensor, "
"List/Tuple[torch.Tensor], "
"Dict[str, torch.Tensor], "
"Mapping[str, List/Tuple[torch.Tensor]]"
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/util/torch_utils.py",
"license": "Apache License 2.0",
"lines": 455,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/_internal/progress/tqdm_progress.py | import logging
import typing
from typing import Dict, List, Optional
from ray.data._internal.execution.operators.input_data_buffer import InputDataBuffer
from ray.data._internal.execution.operators.sub_progress import SubProgressBarMixin
from ray.data._internal.execution.streaming_executor_state import (
format_op_state_summary,
)
from ray.data._internal.progress.base_progress import (
BaseExecutionProgressManager,
BaseProgressBar,
NoopSubProgressBar,
)
from ray.data._internal.progress.progress_bar import ProgressBar
if typing.TYPE_CHECKING:
from ray.data._internal.execution.resource_manager import ResourceManager
from ray.data._internal.execution.streaming_executor_state import OpState, Topology
logger = logging.getLogger(__name__)
class TqdmSubProgressBar(ProgressBar):
"""Thin wrapper to provide helper interface for TqdmExecutionProgressManager"""
def __init__(
self,
name: str,
total: Optional[int],
unit: str,
position: int = 0,
enabled: Optional[bool] = None,
max_name_length: int = 100,
):
# patch to make max_name_length configurable from ProgressManager.
self.MAX_NAME_LENGTH = max_name_length
super().__init__(name, total, unit, position, enabled)
def update_absolute(self, completed: int, total_rows: Optional[int] = None) -> None:
if self._bar:
self._progress = completed
if total_rows is not None:
self._bar.total = total_rows
if self._bar.total is not None and self._progress > self._bar.total:
# If the progress goes over 100%, update the total.
self._bar.total = self._progress
self._bar.n = self._progress
class TqdmExecutionProgressManager(BaseExecutionProgressManager):
"""Execution progress display using tqdm."""
def __init__(
self,
dataset_id: str,
topology: "Topology",
show_op_progress: bool,
verbose_progress: bool,
):
self._dataset_id = dataset_id
self._sub_progress_bars: List[BaseProgressBar] = []
self._op_display: Dict["OpState", TqdmSubProgressBar] = {}
num_progress_bars = 0
self._total = TqdmSubProgressBar(
name=f"Running Dataset {self._dataset_id}.",
total=None,
unit="row",
position=num_progress_bars,
max_name_length=self.MAX_NAME_LENGTH,
enabled=True,
)
num_progress_bars += 1
for state in topology.values():
op = state.op
if isinstance(op, InputDataBuffer):
continue
total = op.num_output_rows_total() or 1
contains_sub_progress_bars = isinstance(op, SubProgressBarMixin)
sub_progress_bar_enabled = show_op_progress and (
contains_sub_progress_bars or verbose_progress
)
# create operator progress bar
if sub_progress_bar_enabled:
pg = TqdmSubProgressBar(
name=f"- {op.name}",
total=total,
unit="row",
position=num_progress_bars,
max_name_length=self.MAX_NAME_LENGTH,
)
num_progress_bars += 1
self._op_display[state] = pg
self._sub_progress_bars.append(pg)
if not contains_sub_progress_bars:
continue
sub_pg_names = op.get_sub_progress_bar_names()
if sub_pg_names is None:
continue
for name in sub_pg_names:
if sub_progress_bar_enabled:
pg = TqdmSubProgressBar(
name=f" *- {name}",
total=total,
unit="row",
position=num_progress_bars,
max_name_length=self.MAX_NAME_LENGTH,
enabled=True,
)
num_progress_bars += 1
else:
pg = NoopSubProgressBar(
name=f" *- {name}",
max_name_length=self.MAX_NAME_LENGTH,
)
op.set_sub_progress_bar(name, pg)
self._sub_progress_bars.append(pg)
# Management
def start(self):
# tqdm is automatically started
pass
def refresh(self):
self._total.refresh()
for pg in self._sub_progress_bars:
pg.refresh()
def close_with_finishing_description(self, desc: str, success: bool):
del success # unused
self._total.set_description(desc)
self._total.close()
for pg in self._sub_progress_bars:
pg.close()
# Total Progress
def update_total_progress(self, new_rows: int, total_rows: Optional[int]):
self._total.update(new_rows, total_rows)
def update_total_resource_status(self, resource_status: str):
desc = f"Running Dataset: {self._dataset_id}. {resource_status}"
self._total.set_description(desc)
# Operator Progress
def update_operator_progress(
self, opstate: "OpState", resource_manager: "ResourceManager"
):
pg = self._op_display.get(opstate)
if pg is not None:
pg.update_absolute(
opstate.op.metrics.row_outputs_taken, opstate.op.num_output_rows_total()
)
summary_str = format_op_state_summary(opstate, resource_manager)
pg.set_description(f"- {opstate.op.name}: {summary_str}")
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/progress/tqdm_progress.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/data/doc_code/working-with-llms/omni_audio_example.py | """
This file serves as a documentation example and CI test for VLM batch inference with audio.
Structure:
1. Infrastructure setup: Dataset compatibility patches, dependency handling
2. Docs example (between __vlm_audio_example_start/end__): Embedded in Sphinx docs via literalinclude
3. Test validation and cleanup
"""
'''
# __audio_message_format_example_start__
"""Supported audio input formats: audio URL, audio binary data"""
{
"messages": [
{
"role": "system",
"content": "Provide a detailed description of the audio."
},
{
"role": "user",
"content": [
{"type": "text", "text": "Describe what happens in this audio."},
# Option 1: Provide audio URL
{"type": "audio_url", "audio_url": {"url": "https://example.com/audio.wav"}},
# Option 2: Provide audio binary data
{"type": "input_audio", "input_audio": {"data": audio_base64, "format": "wav"}},
]
},
]
}
# __audio_message_format_example_end__
'''
# __omni_audio_example_start__
import ray
from ray.data.llm import (
vLLMEngineProcessorConfig,
build_processor,
)
# __omni_audio_config_example_start__
audio_processor_config = vLLMEngineProcessorConfig(
model_source="Qwen/Qwen2.5-Omni-3B",
task_type="generate",
engine_kwargs=dict(
limit_mm_per_prompt={"audio": 1},
),
batch_size=16,
accelerator_type="L4",
concurrency=1,
prepare_multimodal_stage={
"enabled": True,
"chat_template_content_format": "openai",
},
chat_template_stage=True,
tokenize_stage=True,
detokenize_stage=True,
)
# __omni_audio_config_example_end__
# __omni_audio_preprocess_example_start__
def audio_preprocess(row: dict) -> dict:
"""
Preprocessing function for audio-language model inputs.
Converts dataset rows into the format expected by the Omni model:
- System prompt for analysis instructions
- User message with text and audio content
- Sampling parameters
"""
return {
"messages": [
{
"role": "system",
"content": "You are a helpful assistant that analyzes audio. "
"Listen to the audio carefully and provide detailed descriptions.",
},
{
"role": "user",
"content": [
{
"type": "text",
"text": row["text"],
},
{
"type": "input_audio",
"input_audio": {
"data": row["audio_data"],
"format": "wav",
},
},
],
},
],
"sampling_params": {
"temperature": 0.3,
"max_tokens": 150,
"detokenize": False,
},
}
def audio_postprocess(row: dict) -> dict:
return {
"resp": row["generated_text"],
}
# __omni_audio_preprocess_example_end__
def load_audio_dataset():
# __omni_audio_load_dataset_example_start__
"""
Load audio dataset from MRSAudio Hugging Face dataset.
"""
try:
from datasets import load_dataset
from huggingface_hub import hf_hub_download
import base64
dataset_name = "MRSAudio/MRSAudio"
dataset = load_dataset(dataset_name, split="train")
audio_items = []
# Limit to first 10 samples for the example
num_samples = min(10, len(dataset))
for i in range(num_samples):
item = dataset[i]
audio_path = hf_hub_download(
repo_id=dataset_name, filename=item["path"], repo_type="dataset"
)
with open(audio_path, "rb") as f:
audio_bytes = f.read()
audio_base64 = base64.b64encode(audio_bytes).decode("utf-8")
audio_items.append(
{
"audio_data": audio_base64,
"text": item.get("text", "Describe this audio."),
}
)
audio_dataset = ray.data.from_items(audio_items)
return audio_dataset
except Exception as e:
print(f"Error loading dataset: {e}")
return None
# __omni_audio_load_dataset_example_end__
def create_omni_audio_config():
"""Create Omni audio configuration."""
return vLLMEngineProcessorConfig(
model_source="Qwen/Qwen2.5-Omni-3B",
task_type="generate",
engine_kwargs=dict(
enforce_eager=True,
limit_mm_per_prompt={"audio": 1},
),
batch_size=16,
accelerator_type="L4",
concurrency=1,
prepare_multimodal_stage={
"enabled": True,
"chat_template_content_format": "openai",
},
chat_template_stage=True,
tokenize_stage=True,
detokenize_stage=True,
)
def run_omni_audio_example():
# __omni_audio_run_example_start__
"""Run the complete Omni audio example workflow."""
config = create_omni_audio_config()
audio_dataset = load_audio_dataset()
if audio_dataset:
# Build processor with preprocessing and postprocessing
processor = build_processor(
config, preprocess=audio_preprocess, postprocess=audio_postprocess
)
print("Omni audio processor configured successfully")
print(f"Model: {config.model_source}")
print(f"Has multimodal support: {config.prepare_multimodal_stage.get('enabled', False)}")
result = processor(audio_dataset).take_all()
return config, processor, result
# __omni_audio_run_example_end__
return None, None, None
# __omni_audio_example_end__
if __name__ == "__main__":
# Run the example Omni audio workflow only if GPU is available
try:
import torch
if torch.cuda.is_available():
run_omni_audio_example()
else:
print("Skipping Omni audio example run (no GPU available)")
except Exception as e:
print(f"Skipping Omni audio example run due to environment error: {e}")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/data/doc_code/working-with-llms/omni_audio_example.py",
"license": "Apache License 2.0",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/data/doc_code/working-with-llms/vlm_video_example.py | """
This file serves as a documentation example and CI test for VLM batch inference with videos.
Structure:
1. Infrastructure setup: Dataset compatibility patches, dependency handling
2. Docs example (between __vlm_video_example_start/end__): Embedded in Sphinx docs via literalinclude
3. Test validation and cleanup
"""
import os
'''
# __video_message_format_example_start__
"""Supported video input formats: video URL"""
{
"messages": [
{
"role": "system",
"content": "Provide a detailed description of the video."
},
{
"role": "user",
"content": [
{"type": "text", "text": "Describe what happens in this video."},
# Provide video URL
{"type": "video_url", "video_url": {"url": "https://example.com/video.mp4"}},
]
},
]
}
# __video_message_format_example_end__
'''
# __vlm_video_example_start__
import ray
from ray.data.llm import (
vLLMEngineProcessorConfig,
build_processor,
)
# __vlm_video_config_example_start__
video_processor_config = vLLMEngineProcessorConfig(
model_source="Qwen/Qwen3-VL-4B-Instruct",
engine_kwargs=dict(
tensor_parallel_size=4,
pipeline_parallel_size=1,
trust_remote_code=True,
limit_mm_per_prompt={"video": 1},
),
batch_size=1,
accelerator_type="L4",
concurrency=1,
prepare_multimodal_stage={
"enabled": True,
"model_config_kwargs": dict(
# See available model config kwargs at https://docs.vllm.ai/en/latest/api/vllm/config/#vllm.config.ModelConfig
allowed_local_media_path="/tmp",
),
},
chat_template_stage=True,
tokenize_stage=True,
detokenize_stage=True,
)
# __vlm_video_config_example_end__
# __vlm_video_preprocess_example_start__
def video_preprocess(row: dict) -> dict:
"""
Preprocessing function for video-language model inputs.
Converts dataset rows into the format expected by the VLM:
- System prompt for analysis instructions
- User message with text and video content
- Sampling parameters
- Multimodal processor kwargs for video processing
"""
return {
"messages": [
{
"role": "system",
"content": (
"You are a helpful assistant that analyzes videos. "
"Watch the video carefully and provide detailed descriptions."
),
},
{
"role": "user",
"content": [
{
"type": "text",
"text": row["text"],
},
{
"type": "video_url",
"video_url": {"url": row["video_url"]},
},
],
},
],
"sampling_params": {
"temperature": 0.3,
"max_tokens": 150,
"detokenize": False,
},
# Optional: Multimodal processor kwargs for video processing
"mm_processor_kwargs": dict(
min_pixels=28 * 28,
max_pixels=1280 * 28 * 28,
fps=1,
),
}
def video_postprocess(row: dict) -> dict:
return {
"resp": row["generated_text"],
}
# __vlm_video_preprocess_example_end__
def load_video_dataset():
# __vlm_video_load_dataset_example_start__
"""
Load video dataset from ShareGPTVideo Hugging Face dataset.
"""
try:
from huggingface_hub import hf_hub_download
import tarfile
from pathlib import Path
dataset_name = "ShareGPTVideo/train_raw_video"
tar_path = hf_hub_download(
repo_id=dataset_name,
filename="activitynet/chunk_0.tar.gz",
repo_type="dataset",
)
extract_dir = "/tmp/sharegpt_videos"
os.makedirs(extract_dir, exist_ok=True)
if not any(Path(extract_dir).glob("*.mp4")):
with tarfile.open(tar_path, "r:gz") as tar:
tar.extractall(extract_dir)
video_files = list(Path(extract_dir).rglob("*.mp4"))
# Limit to first 10 videos for the example
video_files = video_files[:10]
video_dataset = ray.data.from_items(
[
{
"video_path": str(video_file),
"video_url": f"file://{video_file}",
"text": "Describe what happens in this video.",
}
for video_file in video_files
]
)
return video_dataset
except Exception as e:
print(f"Error loading dataset: {e}")
return None
# __vlm_video_load_dataset_example_end__
def create_vlm_video_config():
"""Create VLM video configuration."""
return vLLMEngineProcessorConfig(
model_source="Qwen/Qwen3-VL-4B-Instruct",
engine_kwargs=dict(
tensor_parallel_size=4,
pipeline_parallel_size=1,
trust_remote_code=True,
limit_mm_per_prompt={"video": 1},
),
batch_size=1,
accelerator_type="L4",
concurrency=1,
prepare_multimodal_stage={
"enabled": True,
"model_config_kwargs": dict(
# See available model config kwargs at https://docs.vllm.ai/en/latest/api/vllm/config/#vllm.config.ModelConfig
allowed_local_media_path="/tmp",
),
},
chat_template_stage=True,
tokenize_stage=True,
detokenize_stage=True,
)
def run_vlm_video_example():
# __vlm_video_run_example_start__
"""Run the complete VLM video example workflow."""
config = create_vlm_video_config()
video_dataset = load_video_dataset()
if video_dataset:
# Build processor with preprocessing and postprocessing
processor = build_processor(
config, preprocess=video_preprocess, postprocess=video_postprocess
)
print("VLM video processor configured successfully")
print(f"Model: {config.model_source}")
print(f"Has multimodal support: {config.prepare_multimodal_stage.get('enabled', False)}")
result = processor(video_dataset).take_all()
return config, processor, result
# __vlm_video_run_example_end__
return None, None, None
# __vlm_video_example_end__
if __name__ == "__main__":
# Run the example VLM video workflow only if GPU is available
try:
import torch
if torch.cuda.is_available():
run_vlm_video_example()
else:
print("Skipping VLM video example run (no GPU available)")
except Exception as e:
print(f"Skipping VLM video example run due to environment error: {e}")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/data/doc_code/working-with-llms/vlm_video_example.py",
"license": "Apache License 2.0",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/examples/envs/classes/ten_step_error_env.py | import logging
import gymnasium as gym
logger = logging.getLogger(__name__)
class TenStepErrorEnv(gym.Env):
"""An environment that lets you sample 1 episode and raises an error during the next one.
The expectation to the env runner is that it will sample one episode and recreate the env
to sample the second one.
"""
def __init__(self, config):
super().__init__()
self.step_count = 0
self.last_eps_errored = False
self.observation_space = gym.spaces.Box(low=0, high=1, shape=(1,))
self.action_space = gym.spaces.Box(low=0, high=1, shape=(1,))
def reset(self, seed=None, options=None):
self.step_count = 0
return self.observation_space.sample(), {
"last_eps_errored": self.last_eps_errored
}
def step(self, action):
self.step_count += 1
if self.step_count == 10:
if not self.last_eps_errored:
self.last_eps_errored = True
return (
self.observation_space.sample(),
0.0,
True,
False,
{"last_eps_errored": False},
)
else:
raise Exception("Test error")
return (
self.observation_space.sample(),
0.0,
False,
False,
{"last_eps_errored": self.last_eps_errored},
)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/envs/classes/ten_step_error_env.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/llm/_internal/serve/core/ingress/mixins/collective_rpc.py | """Collective RPC ingress mixin.
Provides HTTP endpoint for collective RPC operations across all replicas
and their workers, enabling RLHF workflows where a trainer forms a single
NCCL process group with all TP/PP workers across all replicas.
"""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from ray.llm._internal.serve.core.ingress.mixins.broadcastable import (
ReplicaBroadcastable,
)
from ray.llm._internal.serve.observability.logging import get_logger
logger = get_logger(__name__)
# --- Pydantic Models ---
class CollectiveRpcRequest(BaseModel):
"""Request to execute a collective RPC on all replicas."""
model: str
method: str
args: List[Any] = Field(default_factory=list)
kwargs: Dict[str, Any] = Field(default_factory=dict)
timeout: Optional[float] = None
class ReplicaResult(BaseModel):
"""Result from a single replica containing all worker results."""
replica: int
worker_results: List[Any]
class CollectiveRpcResponse(BaseModel):
"""Response containing results from all replicas."""
results: List[ReplicaResult]
# --- Mixin ---
class CollectiveRpcIngressMixin(ReplicaBroadcastable):
"""Ingress mixin for /collective_rpc endpoint.
Adds control plane endpoint for executing collective RPC calls across
all replicas and their workers. This is used for RLHF workflows where
a trainer needs to communicate with all TP/PP workers across all replicas.
"""
ENDPOINTS = {
"collective_rpc": lambda app: app.post("/collective_rpc"),
}
async def collective_rpc(self, body: CollectiveRpcRequest) -> CollectiveRpcResponse:
"""Execute a collective RPC on all replicas for the specified model.
This broadcasts the RPC call to all replicas, and each replica
executes the call on all its workers (TP/PP ranks).
Args:
body: Request containing the model ID, method name, args, kwargs,
and optional timeout.
Returns:
CollectiveRpcResponse with results from all replicas.
"""
logger.info(
"Executing collective_rpc '%s' for model %s with args=%s, kwargs=%s",
body.method,
body.model,
body.args,
body.kwargs,
)
# Broadcast to all replicas - each replica returns a list of worker results
replica_results = await self._broadcast_to_replicas(
body.model,
"collective_rpc",
kwargs={
"method": body.method,
"args": tuple(body.args),
"kwargs": body.kwargs,
"timeout": body.timeout,
},
)
# Format results with replica index for debugging
results = [
ReplicaResult(replica=i, worker_results=worker_results or [])
for i, worker_results in enumerate(replica_results or [])
]
return CollectiveRpcResponse(results=results)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/serve/core/ingress/mixins/collective_rpc.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:release/llm_tests/serve/rlhf_utils.py | import torch
import ray
import requests
from typing import Optional
from transformers import AutoModelForCausalLM
def stateless_init_process_group(master_address, master_port, rank, world_size, device):
"""Create a stateless process group for NCCL communication.
vLLM provides StatelessProcessGroup to create a process group
without considering the global process group in torch.distributed.
"""
from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator
from vllm.distributed.utils import StatelessProcessGroup
pg = StatelessProcessGroup.create(
host=master_address, port=master_port, rank=rank, world_size=world_size
)
pynccl = PyNcclCommunicator(pg, device=device)
return pynccl
class WorkerExtension:
"""Extension class for vLLM workers to enable weight updates.
This class is inherited by vLLM workers when worker_extension_cls is set.
It provides methods for initializing NCCL process groups and receiving
weight updates from an external trainer.
"""
def init_weight_update_group(
self, master_address, master_port, rank_offset, world_size
):
"""Initialize the NCCL process group for weight synchronization."""
from vllm.distributed.parallel_state import get_world_group
rank = get_world_group().rank + rank_offset
self.model_update_group = stateless_init_process_group(
master_address,
master_port,
rank,
world_size,
self.device,
)
def update_weight(self, name, dtype_name, shape):
"""Receive a weight tensor broadcast from the trainer."""
dtype = getattr(torch, dtype_name)
weight = torch.empty(shape, dtype=dtype, device="cuda")
self.model_update_group.broadcast(
weight, src=0, stream=torch.cuda.current_stream()
)
self.model_runner.model.load_weights(weights=[(name, weight)])
del weight
def check_weights_changed(self):
"""Check if weights have been updated to zero (for testing)."""
weights_updated = True
for name, p in self.model_runner.model.named_parameters():
weights_updated = weights_updated and torch.allclose(p, torch.zeros_like(p))
return weights_updated
@ray.remote(num_gpus=1)
class TrainerActor:
"""Simulates a trainer that updates model weights via RLHF.
This actor:
1. Loads the same model as the inference engine
2. Sets up an NCCL process group with all inference workers
3. Broadcasts weight updates to all workers
"""
def __init__(self, model_id: str, base_url: str):
self.model_id = model_id
self._base_url = base_url
self.weight_sync_group = None
self.model = AutoModelForCausalLM.from_pretrained(model_id)
self.model.to("cuda:0")
def setup_weight_sync_group(
self,
tp_size: int,
num_replicas: int,
):
"""Set up the NCCL process group between trainer and inference workers.
Args:
tp_size: Tensor parallel size of each replica
num_replicas: Number of inference replicas
"""
import concurrent.futures
from vllm.utils.network_utils import get_ip, get_open_port
# World size = 1 trainer + (tp_size * num_replicas) inference workers
world_size = 1 + (tp_size * num_replicas)
rank_offset = 1 # Inference workers start at rank 1
master_address = get_ip()
master_port = get_open_port()
print(
f"Setting up weight sync group: master={master_address}:{master_port}, "
f"world_size={world_size}"
)
# Use ThreadPoolExecutor to run both operations concurrently
# One thread calls the HTTP endpoint, another initializes local NCCL
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
# Start HTTP call to init weight update group on inference workers
http_future = executor.submit(
self._call_collective_rpc_sync,
"init_weight_update_group",
[master_address, master_port, rank_offset, world_size],
)
# Initialize trainer's side of the process group (rank 0)
nccl_future = executor.submit(
stateless_init_process_group,
master_address,
master_port,
0,
world_size,
torch.device("cuda:0"),
)
# Wait for both to complete
self.weight_sync_group = nccl_future.result(timeout=120)
http_result = http_future.result(timeout=120)
print(f"Weight sync group initialized. HTTP response: {http_result}")
def update_weights(self):
"""Zero out all weights and broadcast to inference workers.
In a real RLHF loop, this would broadcast the actual trained weights.
For testing, we zero out the weights to verify the sync worked.
"""
import concurrent.futures
# Use a single ThreadPoolExecutor for all parameters to avoid
# creating/destroying many thread pools (one per parameter)
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
for name, p in self.model.named_parameters():
# Zero out weights for testing
p.data.zero_()
dtype_name = str(p.dtype).split(".")[-1]
# Start HTTP call to trigger update_weight on inference workers
http_future = executor.submit(
self._call_collective_rpc_sync,
"update_weight",
[name, dtype_name, list(p.shape)],
)
# Broadcast the tensor via NCCL
self.weight_sync_group.broadcast(
p, src=0, stream=torch.cuda.current_stream()
)
# Wait for HTTP call to complete before next parameter
http_future.result(timeout=60)
# Ensure all NCCL operations have completed
torch.cuda.synchronize()
def _call_collective_rpc_sync(
self, method: str, args: Optional[list] = None, kwargs: Optional[dict] = None
):
"""Call the /collective_rpc endpoint synchronously."""
url = f"{self._base_url}/collective_rpc"
data = {
"model": self.model_id,
"method": method,
"args": args or [],
"kwargs": kwargs or {},
}
response = requests.post(url, json=data, timeout=120)
return response.json()
| {
"repo_id": "ray-project/ray",
"file_path": "release/llm_tests/serve/rlhf_utils.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:release/llm_tests/serve/test_llm_serve_rlhf.py | """Test collective_rpc control plane API for Ray Serve LLM.
This test verifies that the DevIngress /collective_rpc endpoint works correctly
for RLHF-style weight synchronization workflows:
1. Server starts with worker_extension_cls for weight update methods
2. Trainer initializes NCCL process group with all inference workers
3. Trainer broadcasts weight updates to all workers via collective_rpc
4. Workers receive and apply the weight updates
5. Inference continues to work with updated weights
This demonstrates the core RLHF workflow where:
- Trainer and inference engine form a single NCCL communicator
- Weights are synchronized via high-bandwidth GPU-to-GPU transfer
- The /collective_rpc endpoint orchestrates the RPC across all replicas/workers
NOTE (Kourosh): This is part of a design in progress for integrating Ray Serve
LLM with RL workloads. The API is not public and won't be documented until the
end-to-end story is finalized. Class names and endpoint names may change.
"""
import time
import pytest
import ray
import requests
from openai import OpenAI
from ray import serve
from ray._common.test_utils import wait_for_condition
from ray.llm._internal.serve.core.ingress.dev_ingress import build_dev_openai_app
from ray.serve._private.constants import SERVE_DEFAULT_APP_NAME
from ray.serve.llm import LLMConfig, ModelLoadingConfig
from ray.serve.schema import ApplicationStatus
from rlhf_utils import TrainerActor
MODEL_ID = "facebook/opt-125m"
BASE_URL = "http://localhost:8000"
TENSOR_PARALLEL_SIZE = 2
NUM_REPLICAS = 1
def get_llm_config() -> LLMConfig:
"""Create LLMConfig for collective_rpc testing."""
return LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id=MODEL_ID,
),
deployment_config=dict(
num_replicas=NUM_REPLICAS,
),
engine_kwargs=dict(
tensor_parallel_size=TENSOR_PARALLEL_SIZE,
enforce_eager=True,
enable_sleep_mode=True,
# Worker extension for RLHF weight updates
worker_extension_cls="rlhf_utils.WorkerExtension",
),
)
def is_default_app_running():
"""Check if the default application is running successfully."""
try:
default_app = serve.status().applications[SERVE_DEFAULT_APP_NAME]
return default_app.status == ApplicationStatus.RUNNING
except (KeyError, AttributeError):
return False
def wait_for_server_ready(timeout: int = 240) -> None:
"""Wait for the server to be ready to handle requests."""
start_time = time.time()
while time.time() - start_time < timeout:
try:
test_data = {
"model": MODEL_ID,
"prompt": "Hello",
"max_tokens": 5,
}
response = requests.post(
f"{BASE_URL}/v1/completions", json=test_data, timeout=10
)
if response.status_code == 200:
print(f"Server at {BASE_URL} is ready!")
return
except Exception as e:
print(f"Waiting for server... ({e})")
time.sleep(2)
raise TimeoutError(f"Server not ready within {timeout} seconds")
def call_collective_rpc_sync(method: str, args: list = None) -> dict:
"""Synchronously call the /collective_rpc endpoint."""
response = requests.post(
f"{BASE_URL}/collective_rpc",
json={
"model": MODEL_ID,
"method": method,
"args": args or [],
},
timeout=60,
)
return response.json()
# =============================================================================
# Test
# =============================================================================
def test_collective_rpc_weight_sync():
"""Test the complete RLHF weight synchronization workflow."""
# Start Ray Serve with DevIngress
llm_config = get_llm_config()
app = build_dev_openai_app({"llm_configs": [llm_config]})
serve.run(app, blocking=False)
# Wait for application to be running
wait_for_condition(is_default_app_running, timeout=300)
wait_for_server_ready(timeout=240)
trainer = None # Initialize before try block to avoid NameError in finally
try:
# Step 1: Verify model serves requests before weight update
print("\n=== Step 1: Verifying model serves requests before update ===")
client = OpenAI(base_url=f"{BASE_URL}/v1", api_key="fake-key")
response = client.completions.create(
model=MODEL_ID,
prompt="Hello, my name is",
max_tokens=10,
temperature=0,
)
assert response.choices[0].text is not None
original_output = response.choices[0].text
print(f"β Original output: {original_output!r}")
# Step 2: Create trainer and set up weight sync group
print("\n=== Step 2: Setting up trainer and NCCL process group ===")
trainer = TrainerActor.remote(MODEL_ID, BASE_URL)
ray.get(
trainer.setup_weight_sync_group.remote(
tp_size=TENSOR_PARALLEL_SIZE,
num_replicas=NUM_REPLICAS,
)
)
print("β Weight sync group established")
# Step 3: Broadcast weight updates (zero out weights)
print("\n=== Step 3: Broadcasting weight updates ===")
start_time = time.time()
ray.get(trainer.update_weights.remote())
elapsed = time.time() - start_time
print(f"β Weight update completed in {elapsed:.2f}s")
# Step 4: Verify weights changed on inference workers
print("\n=== Step 4: Verifying weights changed on workers ===")
result = call_collective_rpc_sync("check_weights_changed")
print(f"check_weights_changed response: {result}")
# Verify all workers report weights changed
assert "results" in result, f"Expected 'results' in response: {result}"
for replica_result in result["results"]:
worker_results = replica_result.get("worker_results", [])
for worker_result in worker_results:
assert (
worker_result
), f"Worker reported weights not changed: {replica_result}"
print("β All workers confirmed weights updated")
# Step 5: Verify model still serves requests (with zeroed weights)
print("\n=== Step 5: Verifying inference works with updated weights ===")
response = client.completions.create(
model=MODEL_ID,
prompt="Hello, my name is",
max_tokens=10,
temperature=0,
)
assert response.choices[0].text is not None
updated_output = response.choices[0].text
print(f"β Output with zeroed weights: {updated_output!r}")
# Output should be different since weights are now zero
# (model produces garbage/different output)
print(f"\nOriginal: {original_output!r}")
print(f"Updated: {updated_output!r}")
print("\n=== All tests passed! ===")
finally:
# Cleanup
if trainer is not None:
ray.kill(trainer)
serve.shutdown()
ray.shutdown()
time.sleep(1)
if __name__ == "__main__":
pytest.main(["-xvs", __file__])
| {
"repo_id": "ray-project/ray",
"file_path": "release/llm_tests/serve/test_llm_serve_rlhf.py",
"license": "Apache License 2.0",
"lines": 168,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/examples/sglang/modules/sglang_engine.py | import copy
import signal
import time
from typing import (
Any,
AsyncGenerator,
List,
Optional,
)
from ray.llm._internal.serve.core.configs.llm_config import LLMConfig
from ray.llm._internal.serve.core.configs.openai_api_models import (
ChatCompletionRequest,
ChatCompletionResponse,
CompletionRequest,
CompletionResponse,
EmbeddingCompletionRequest,
EmbeddingRequest,
EmbeddingResponse,
)
from ray.llm._internal.serve.core.protocol import RawRequestInfo
class SGLangServer:
def __init__(self, llm_config: LLMConfig):
self._llm_config = llm_config
self.engine_kwargs = llm_config.engine_kwargs
try:
import sglang
except ImportError as e:
raise ImportError(
"SGLang is not installed or failed to import. Please run "
"`pip install sglang[all]` to install required dependencies."
) from e
# TODO(issue-61108): remove this once sglang#18752 is merged and included
# in the minimum supported SGLang version for this example.
original_signal_func = signal.signal
def noop_signal_handler(sig, action):
# Returns default handler to satisfy signal.signal() return signature
return signal.SIG_DFL
try:
# Override signal.signal with our no-op function
signal.signal = noop_signal_handler
self.engine = sglang.Engine(**self.engine_kwargs)
finally:
signal.signal = original_signal_func
@staticmethod
def _build_sampling_params(request: Any) -> dict[str, Any]:
sampling_params: dict[str, Any] = {}
model_fields_set = getattr(request, "model_fields_set", None)
has_model_fields_set = model_fields_set is not None
fields_set = set(model_fields_set) if has_model_fields_set else set()
def was_explicitly_set(field_name: str) -> bool:
# Use model_fields_set when available to avoid injecting defaults for
# fields omitted by the caller.
if has_model_fields_set:
return field_name in fields_set
return getattr(request, field_name, None) is not None
temperature = getattr(request, "temperature", None)
top_p = getattr(request, "top_p", None)
max_tokens = getattr(request, "max_tokens", None)
stop = getattr(request, "stop", None)
if was_explicitly_set("temperature") and temperature is not None:
sampling_params["temperature"] = temperature
if was_explicitly_set("top_p") and top_p is not None:
sampling_params["top_p"] = top_p
if was_explicitly_set("max_tokens") and max_tokens is not None:
sampling_params["max_new_tokens"] = max_tokens
if was_explicitly_set("stop") and stop is not None:
sampling_params["stop"] = stop
return sampling_params
@staticmethod
def _build_chat_messages(messages: List[Any]) -> List[dict[str, Any]]:
converted_messages: List[dict[str, Any]] = []
for message in messages:
if isinstance(message, dict):
message_dict = dict(message)
elif hasattr(message, "model_dump") and callable(message.model_dump):
message_dict = dict(message.model_dump())
else:
message_dict = {
"role": getattr(message, "role", "user"),
"content": getattr(message, "content", ""),
}
message_dict["role"] = str(message_dict.get("role", "user"))
converted_messages.append(message_dict)
return converted_messages
@staticmethod
def _build_chat_template_kwargs(request: ChatCompletionRequest) -> dict[str, Any]:
"""
Build optional chat-template kwargs using request fields when present.
This mirrors SGLang's chat-serving pipeline semantics without directly
coupling to its internal server classes.
"""
kwargs: dict[str, Any] = {}
tools = getattr(request, "tools", None)
if tools is not None:
kwargs["tools"] = tools
reasoning_effort = getattr(request, "reasoning_effort", None)
if reasoning_effort is not None:
kwargs["reasoning_effort"] = reasoning_effort
chat_template_kwargs = getattr(request, "chat_template_kwargs", None)
if isinstance(chat_template_kwargs, dict):
kwargs.update(chat_template_kwargs)
return kwargs
def _render_chat_prompt(
self,
request: ChatCompletionRequest,
messages: List[dict[str, Any]],
) -> str:
tokenizer = self.engine.tokenizer_manager.tokenizer
# SGLang supports --skip-tokenizer-init, where tokenizer is intentionally
# None and text prompt rendering is not available.
if tokenizer is None:
return self._render_fallback_prompt(messages)
template_kwargs = self._build_chat_template_kwargs(request)
return tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
**template_kwargs,
)
@staticmethod
def _render_fallback_prompt(messages: List[dict[str, Any]]) -> str:
# Fallback prompt format for tokenizers without chat-template support.
prompt_lines: List[str] = []
for message in messages:
role = str(message.get("role", "user"))
content = message.get("content", "")
if content is None:
content = ""
prompt_lines.append(f"{role}: {content}")
prompt_lines.append("assistant:")
return "\n".join(prompt_lines)
async def start(self) -> None:
# Engine is initialized in __init__; keep start idempotent for protocol
# compatibility.
return
async def check_health(self) -> None:
# SGLang's in-process Engine API does not expose a health-check method.
# Its health endpoints exist only in HTTP/gRPC server entrypoints, which
# this integration does not run. Keep the protocol hook as a no-op.
return
async def _generate_raw(
self,
request: Any,
prompt: Any,
) -> dict[str, Any]:
"""Run generation and return raw engine output payload."""
sampling_params = self._build_sampling_params(request)
generate_kwargs = {
"prompt": prompt,
"stream": False,
}
if sampling_params:
generate_kwargs["sampling_params"] = sampling_params
return await self.engine.async_generate(**generate_kwargs)
@staticmethod
def _extract_generation_metadata(raw: dict[str, Any]) -> dict[str, Any]:
"""Extract normalized generation metadata from one raw engine payload."""
text: str = raw.get("text", "")
meta: dict[str, Any] = raw.get("meta_info", {}) or {}
finish_reason_info = meta.get("finish_reason", {}) or {}
if isinstance(finish_reason_info, dict):
finish_reason = finish_reason_info.get("type", "length")
else:
finish_reason = str(finish_reason_info)
prompt_tokens = int(meta.get("prompt_tokens", 0))
completion_tokens = int(meta.get("completion_tokens", 0))
total_tokens = prompt_tokens + completion_tokens
return {
"text": text.strip(),
"id": meta.get("id", f"sglang-gen-{int(time.time())}"),
"created": int(time.time()),
"finish_reason": finish_reason,
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"total_tokens": total_tokens,
}
async def _generate_and_extract_metadata(
self,
request: Any,
prompt: Any,
) -> dict[str, Any]:
"""
Handles parameter extraction, calls the SGLang engine, and processes the
raw response to extract common metadata and generated text.
"""
raw = await self._generate_raw(request, prompt)
if isinstance(raw, list):
if not raw:
raise RuntimeError(
"SGLang engine returned an empty response list during generation."
)
raw = raw[0]
return self._extract_generation_metadata(raw)
async def chat(
self,
request: ChatCompletionRequest,
raw_request_info: Optional[RawRequestInfo] = None,
) -> AsyncGenerator[ChatCompletionResponse, None]:
chat_messages = self._build_chat_messages(request.messages)
prompt = self._render_chat_prompt(request, chat_messages)
metadata = await self._generate_and_extract_metadata(
request,
prompt,
)
usage_data = {
"prompt_tokens": metadata["prompt_tokens"],
"completion_tokens": metadata["completion_tokens"],
"total_tokens": metadata["total_tokens"],
}
choice_data = {
"index": 0,
"message": {"role": "assistant", "content": metadata["text"]},
"finish_reason": metadata["finish_reason"],
}
resp = ChatCompletionResponse(
id=metadata["id"],
object="chat.completion",
created=metadata["created"],
model=request.model,
choices=[choice_data],
usage=usage_data,
)
yield resp
async def completions(
self,
request: CompletionRequest,
raw_request_info: Optional[RawRequestInfo] = None,
) -> AsyncGenerator[CompletionResponse, None]:
prompt_input = request.prompt
prompts_to_process: List[str] = []
if isinstance(prompt_input, list):
# Check for empty list
if not prompt_input:
raise ValueError(
"The 'prompt' list cannot be empty for completion requests."
)
# Batched prompts: process all of them
prompts_to_process = prompt_input
else:
# Single string prompt: wrap it in a list for iteration
prompts_to_process = [prompt_input]
all_choices = []
total_prompt_tokens = 0
total_completion_tokens = 0
last_metadata = {}
# 2. Loop through all prompts in the batch
for index, prompt_string in enumerate(prompts_to_process):
metadata = await self._generate_and_extract_metadata(request, prompt_string)
last_metadata = metadata # Keep track of the metadata from the last run
total_prompt_tokens += metadata["prompt_tokens"]
total_completion_tokens += metadata["completion_tokens"]
choice_data = {
"index": index,
"text": metadata["text"],
"logprobs": None,
"finish_reason": metadata["finish_reason"],
}
all_choices.append(choice_data)
usage_data = {
"prompt_tokens": total_prompt_tokens,
"completion_tokens": total_completion_tokens,
"total_tokens": total_prompt_tokens + total_completion_tokens,
}
# Use metadata from the last generation for shared fields (id, created)
resp = CompletionResponse(
id=last_metadata.get("id", f"sglang-batch-gen-{int(time.time())}"),
object="text_completion",
created=last_metadata.get("created", int(time.time())),
model=getattr(request, "model", "default_model"),
choices=all_choices,
usage=usage_data,
)
yield resp
async def embeddings(
self, request: EmbeddingRequest, raw_request: Optional[Any] = None
) -> AsyncGenerator[EmbeddingResponse, None]:
# Input handling follows SGLang's OpenAIServingEmbedding pattern:
# https://github.com/sgl-project/sglang/blob/main/python/sglang/srt/entrypoints/openai/serving_embedding.py
if isinstance(request, EmbeddingCompletionRequest):
prompt = request.input
else:
# Chat embedding request - convert messages to prompt
prompt = self._render_fallback_prompt(
self._build_chat_messages(request.messages)
)
# async_encode handles both single strings and lists of strings
results = await self.engine.async_encode(prompt)
if not isinstance(results, list):
results = [results]
if not results:
raise RuntimeError(
"SGLang engine returned an empty response for embedding request."
)
# Build response following SGLang's _build_embedding_response pattern
data = []
total_prompt_tokens = 0
for idx, ret_item in enumerate(results):
data.append(
{
"index": idx,
"object": "embedding",
"embedding": ret_item.get("embedding", []),
}
)
meta = ret_item.get("meta_info", {}) or {}
total_prompt_tokens += int(meta.get("prompt_tokens", 0))
resp = EmbeddingResponse(
object="list",
model=request.model or "",
data=data,
usage={
"prompt_tokens": total_prompt_tokens,
"total_tokens": total_prompt_tokens,
"completion_tokens": 0,
},
)
yield resp
async def llm_config(self) -> Optional[LLMConfig]:
return self._llm_config
@classmethod
def get_deployment_options(cls, llm_config: "LLMConfig"):
deployment_options = copy.deepcopy(llm_config.deployment_config)
pg_config = llm_config.placement_group_config or {}
tp_size = llm_config.engine_kwargs.get("tp_size", 1)
if "placement_group_bundles" not in pg_config:
pg_bundles = [{"CPU": 1, "GPU": 1}]
if tp_size > 1: # TO DO: to support tp_size > 1 cases
pg_bundles.extend([{"GPU": 1} for _ in range(tp_size - 1)])
pg_strategy = "PACK"
else:
pg_bundles = pg_config.get("placement_group_bundles")
pg_strategy = pg_config.get("placement_group_strategy", "PACK")
deployment_options.update(
{
"placement_group_bundles": pg_bundles,
"placement_group_strategy": pg_strategy,
}
)
ray_actor_options = deployment_options.get("ray_actor_options", {})
runtime_env = ray_actor_options.setdefault("runtime_env", {})
# set as default without checking ENABLE_WORKER_PROCESS_SETUP_HOOK
runtime_env.setdefault(
"worker_process_setup_hook",
"ray.llm._internal.serve._worker_process_setup_hook",
)
if llm_config.runtime_env:
runtime_env.update(llm_config.runtime_env)
deployment_options["ray_actor_options"] = ray_actor_options
return deployment_options
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/examples/sglang/modules/sglang_engine.py",
"license": "Apache License 2.0",
"lines": 349,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/llm/examples/sglang/serve_sglang_example.py | from modules.sglang_engine import SGLangServer
from ray import serve
from ray.serve.llm import LLMConfig, build_openai_app
llm_config = LLMConfig(
model_loading_config={
"model_id": "Llama-3.1-8B-Instruct",
"model_source": "unsloth/Llama-3.1-8B-Instruct",
},
deployment_config={
"autoscaling_config": {
"min_replicas": 1,
"max_replicas": 2,
}
},
server_cls=SGLangServer,
engine_kwargs={
"trust_remote_code": True,
"model_path": "unsloth/Llama-3.1-8B-Instruct",
"tp_size": 1,
"mem_fraction_static": 0.8,
},
)
app = build_openai_app({"llm_configs": [llm_config]})
serve.start()
serve.run(app, blocking=True)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/examples/sglang/serve_sglang_example.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/asynchronous-inference/ci/nb2py.py | import argparse
import nbformat
def convert_notebook(
input_path: str, output_path: str, ignore_cmds: bool = False
) -> None:
"""
Read a Jupyter notebook and write a Python script, converting all %%bash
cells and IPython "!" commands into subprocess.run calls that raise on error.
Cells that load or autoreload extensions are ignored.
"""
nb = nbformat.read(input_path, as_version=4)
with open(output_path, "w") as out:
for cell in nb.cells:
# Only process code cells
if cell.cell_type != "code":
continue
lines = cell.source.splitlines()
# Skip cells that load or autoreload extensions
if any(
l.strip().startswith("%load_ext autoreload")
or l.strip().startswith("%autoreload all")
for l in lines
):
continue
# Detect a %%bash cell
if lines and lines[0].strip().startswith("%%bash"):
if ignore_cmds:
continue
bash_script = "\n".join(lines[1:]).rstrip()
out.write("import subprocess\n")
out.write(
f"subprocess.run(r'''{bash_script}''',\n"
" shell=True,\n"
" check=True,\n"
" executable='/bin/bash')\n\n"
)
else:
# Detect any IPython '!' shell commands in code lines
has_bang = any(line.lstrip().startswith("!") for line in lines)
# Start with "serve run" "serve shutdown" "curl" or "anyscale service" commands
to_ignore_cmd = (
"serve run",
"serve shutdown",
"curl",
"anyscale service",
)
has_ignored_start = any(
line.lstrip().startswith(to_ignore_cmd) for line in lines
)
if has_bang or has_ignored_start:
if ignore_cmds:
continue
out.write("import subprocess\n")
for line in lines:
stripped = line.lstrip()
if stripped.startswith("!"):
cmd = stripped[1:].lstrip()
out.write(
f"subprocess.run(r'''{cmd}''',\n"
" shell=True,\n"
" check=True,\n"
" executable='/bin/bash')\n"
)
else:
out.write(line.rstrip() + "\n")
out.write("\n")
else:
# Regular Python cell:
code = cell.source.rstrip()
if "client.chat.completions.create" in code:
continue # Model isn't deployed in CI so skip cells calling the service
# else, dump as-is
out.write(cell.source.rstrip() + "\n\n")
def main() -> None:
parser = argparse.ArgumentParser(
description="Convert a Jupyter notebook to a Python script, preserving bash cells and '!' commands as subprocess calls unless ignored with --ignore-cmds."
)
parser.add_argument("input_nb", help="Path to the input .ipynb file")
parser.add_argument("output_py", help="Path for the output .py script")
parser.add_argument(
"--ignore-cmds", action="store_true", help="Ignore bash cells and '!' commands"
)
args = parser.parse_args()
convert_notebook(args.input_nb, args.output_py, ignore_cmds=args.ignore_cmds)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/asynchronous-inference/ci/nb2py.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/serve/tutorials/asynchronous-inference/content/client.py | """
Example client for testing asynchronous PDF processing.
Demonstrates:
1. Submitting PDF processing tasks
2. Polling for task status
3. Retrieving results when complete
"""
import time
import argparse
from typing import Dict, Any, Optional
import requests
class AsyncPDFClient:
"""Client for interacting with the async PDF processing API."""
def __init__(self, base_url: str, auth_header: Optional[str] = None):
"""
Initialize the client.
:param base_url: Base URL of the async PDF processing API
:param auth_header: Optional Authorization header value, e.g. "Bearer TOKEN"
"""
self.base_url = base_url.rstrip("/")
self.headers: Dict[str, str] = {}
if auth_header:
# Use the provided value as the Authorization header
self.headers["Authorization"] = auth_header
def process_pdf(self, pdf_url: str, max_summary_paragraphs: int = 3) -> str:
"""
Submit a PDF processing task.
"""
response = requests.post(
f"{self.base_url}/process",
json={
"pdf_url": pdf_url,
"max_summary_paragraphs": max_summary_paragraphs,
},
headers=self.headers,
)
response.raise_for_status()
return response.json()["task_id"]
def get_task_status(self, task_id: str) -> Dict[str, Any]:
"""
Get the current status of a task.
"""
response = requests.get(
f"{self.base_url}/status/{task_id}",
headers=self.headers,
)
response.raise_for_status()
return response.json()
def wait_for_task(
self,
task_id: str,
poll_interval: float = 2.0,
timeout: float = 120.0,
) -> Dict[str, Any]:
"""
Wait for a task to complete by polling its status.
"""
start_time = time.time()
while True:
# Check if we've exceeded the timeout
if time.time() - start_time > timeout:
raise TimeoutError(f"Task {task_id} timed out after {timeout}s")
# Get current task status
status = self.get_task_status(task_id)
state = status["status"]
if state == "SUCCESS":
return status
elif state == "FAILURE":
raise RuntimeError(f"Task failed: {status.get('error')}")
elif state in ["PENDING", "STARTED"]:
print(f" Task status: {state}, waiting...")
time.sleep(poll_interval)
else:
print(f" Unknown status: {state}, waiting...")
time.sleep(poll_interval)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Asynchronous PDF Processing Client"
)
parser.add_argument(
"base_url",
help="Base URL of the async PDF processing API, e.g. http://localhost:8000",
)
parser.add_argument(
"-H",
"--header",
dest="auth_header",
help="Authorization header value, e.g. 'Bearer YOUR_TOKEN'",
)
return parser.parse_args()
def main():
"""Run example PDF processing tasks."""
args = parse_args()
client = AsyncPDFClient(base_url=args.base_url, auth_header=args.auth_header)
print("=" * 70)
print("Asynchronous PDF Processing Example")
print("=" * 70)
# Example: Process multiple PDFs in parallel
print("\n" + "=" * 70)
print("Step 1: Submitting PDF processing tasks")
print("=" * 70)
pdf_urls = [
"https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf",
"https://arxiv.org/pdf/1706.03762.pdf",
]
# Submit all tasks
task_ids = []
for i, url in enumerate(pdf_urls, 1):
try:
task_id = client.process_pdf(url)
task_ids.append((task_id, url))
print(f" β Task {i} submitted: {task_id}")
except Exception as e:
print(f" β Task {i} failed to submit: {e}")
# Wait for all tasks to complete
print("\n" + "=" * 70)
print("Step 2: Waiting for tasks to complete")
print("=" * 70)
for i, (task_id, url) in enumerate(task_ids, 1):
print(f"\nTask {i} ({url.split('/')[-1]}):")
try:
result = client.wait_for_task(task_id, timeout=60.0)
if result["result"]:
res = result["result"]
print(
f" β Complete: {res['page_count']} pages, {res['word_count']} words"
)
print(f" β Processing time: {res['processing_time_seconds']}s")
except Exception as e:
print(f" β Error: {e}")
print("\n" + "=" * 70)
print("Example complete!")
print("=" * 70)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/asynchronous-inference/content/client.py",
"license": "Apache License 2.0",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/serve/tutorials/asynchronous-inference/content/server.py | """
Ray Serve Asynchronous Inference - PDF Processing Example
This example shows how to build async inference services that handle long-running
tasks without blocking HTTP responses. Tasks are queued to Redis and processed by
background workers.
"""
import io
import logging
import time
from typing import Dict, Any
import requests
from fastapi import FastAPI
from pydantic import BaseModel, HttpUrl
from PyPDF2 import PdfReader
from ray import serve
from ray.serve.handle import DeploymentHandle
from ray.serve.schema import CeleryAdapterConfig, TaskProcessorConfig
from ray.serve.task_consumer import (
instantiate_adapter_from_config,
task_consumer,
task_handler,
)
logger = logging.getLogger("ray.serve")
TASK_PROCESSOR_CONFIG = TaskProcessorConfig(
queue_name="pdf_processing_queue",
adapter_config=CeleryAdapterConfig(
broker_url="redis://asynchronous-template-testing-redis.onvfjm.ng.0001.usw2.cache.amazonaws.com:6379/0",
backend_url="redis://asynchronous-template-testing-redis.onvfjm.ng.0001.usw2.cache.amazonaws.com:6379/0",
),
max_retries=3,
failed_task_queue_name="failed_pdfs",
unprocessable_task_queue_name="invalid_pdfs",
)
# ============================================================================
# Request Model
# ============================================================================
class ProcessPDFRequest(BaseModel):
"""Request schema for PDF processing."""
pdf_url: HttpUrl
max_summary_paragraphs: int = 3
# ============================================================================
# Task Consumer - Background Worker
# ============================================================================
@serve.deployment(num_replicas=2, max_ongoing_requests=5)
@task_consumer(task_processor_config=TASK_PROCESSOR_CONFIG)
class PDFProcessor:
"""
Background worker that processes PDF documents asynchronously.
Configuration:
- num_replicas=2: Run 2 worker instances
- max_ongoing_requests=5: Each worker handles up to 5 concurrent tasks
- max_retries=3: Retry failed tasks up to 3 times
"""
@task_handler(name="process_pdf")
def process_pdf(
self, pdf_url: str, max_summary_paragraphs: int = 3
) -> Dict[str, Any]:
"""
Download PDF, extract text, and generate summary.
Args:
pdf_url: URL to the PDF file
max_summary_paragraphs: Number of paragraphs for summary (default: 3)
Returns:
Dictionary with extracted text, summary, and metadata
"""
start_time = time.time()
logger.info(f"Processing PDF: {pdf_url}")
try:
# Download PDF from URL
response = requests.get(pdf_url, timeout=30)
response.raise_for_status()
# Parse PDF content
pdf_file = io.BytesIO(response.content)
try:
pdf_reader = PdfReader(pdf_file)
except Exception as e:
raise ValueError(f"Invalid PDF file: {str(e)}")
if len(pdf_reader.pages) == 0:
raise ValueError("PDF contains no pages")
# Extract text from all pages
full_text = ""
for page in pdf_reader.pages:
text = page.extract_text()
if text:
full_text += text + "\n"
if not full_text.strip():
raise ValueError("PDF contains no extractable text")
# Generate summary (first N paragraphs)
paragraphs = [p.strip() for p in full_text.split("\n\n") if p.strip()]
summary = "\n\n".join(paragraphs[:max_summary_paragraphs])
# Calculate metadata
result = {
"status": "success",
"pdf_url": pdf_url,
"page_count": len(pdf_reader.pages),
"word_count": len(full_text.split()),
"full_text": full_text,
"summary": summary,
"processing_time_seconds": round(time.time() - start_time, 2),
}
logger.info(f"Processed PDF: {result['page_count']} pages, {result['word_count']} words")
return result
except requests.exceptions.RequestException as e:
error_msg = f"Failed to download PDF: {str(e)}"
logger.error(error_msg)
raise ValueError(error_msg)
except Exception as e:
error_msg = f"Failed to process PDF: {str(e)}"
logger.error(error_msg)
raise ValueError(error_msg)
# ============================================================================
# HTTP API - Ingress Deployment
# ============================================================================
fastapi_app = FastAPI(title="Async PDF Processing API")
@serve.deployment()
@serve.ingress(fastapi_app)
class AsyncPDFAPI:
"""
HTTP API for submitting and checking PDF processing tasks.
Endpoints:
- POST /process: Submit a PDF processing task
- GET /status/{task_id}: Check task status and get results
"""
def __init__(self, task_processor_config: TaskProcessorConfig, handler: DeploymentHandle):
"""Initialize the API with task adapter."""
self.adapter = instantiate_adapter_from_config(task_processor_config)
logger.info("AsyncPDFAPI initialized")
@fastapi_app.post("/process")
async def process_pdf(self, request: ProcessPDFRequest):
"""
Submit a PDF processing task.
Returns task_id immediately without waiting for processing to complete.
Client should poll /status/{task_id} to check progress.
"""
task_result = self.adapter.enqueue_task_sync(
task_name="process_pdf",
kwargs={
"pdf_url": str(request.pdf_url),
"max_summary_paragraphs": request.max_summary_paragraphs,
},
)
logger.info(f"Enqueued task: {task_result}")
return {
"task_id": task_result.id,
"status": task_result.status,
"message": "PDF processing task submitted successfully",
}
@fastapi_app.get("/status/{task_id}")
async def get_status(self, task_id: str):
"""
Get task status and results.
Status values:
- PENDING: Task queued, waiting for worker
- STARTED: Worker is processing the task
- SUCCESS: Task completed successfully (result available)
- FAILURE: Task failed (error message available)
"""
status = self.adapter.get_task_status_sync(task_id)
return {
"task_id": task_id,
"status": status.status,
"result": status.result if status.status == "SUCCESS" else None,
"error": str(status.result) if status.status == "FAILURE" else None,
}
# ============================================================================
# Application Setup
# ============================================================================
def build_app():
"""Build and configure the Ray Serve application."""
# Deploy background worker
consumer = PDFProcessor.bind()
# Deploy HTTP API
api = AsyncPDFAPI.bind(TASK_PROCESSOR_CONFIG, consumer)
return api
# Entry point for Ray Serve
app = build_app()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/asynchronous-inference/content/server.py",
"license": "Apache License 2.0",
"lines": 176,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/data/doc_code/working-with-llms/classification_example.py | """
Classification batch inference with Ray Data LLM.
Uses sequence classification models for content classifiers and sentiment analyzers.
"""
# Dependency setup
import subprocess
import sys
subprocess.check_call([sys.executable, "-m", "pip", "install", "--upgrade", "ray[llm]"])
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "--upgrade", "transformers"]
)
subprocess.check_call([sys.executable, "-m", "pip", "install", "numpy==1.26.4"])
# __classification_example_start__
import ray
from ray.data.llm import vLLMEngineProcessorConfig, build_processor
# Configure vLLM for a sequence classification model
classification_config = vLLMEngineProcessorConfig(
model_source="nvidia/nemocurator-fineweb-nemotron-4-edu-classifier",
task_type="classify", # Use 'classify' for sequence classification models
engine_kwargs=dict(
max_model_len=512,
enforce_eager=True,
),
batch_size=8,
concurrency=1,
chat_template_stage=False,
detokenize_stage=False,
)
classification_processor = build_processor(
classification_config,
preprocess=lambda row: dict(prompt=row["text"]),
postprocess=lambda row: {
"text": row["prompt"],
# Classification models return logits in the 'embeddings' field
"edu_score": float(row["embeddings"][0])
if row.get("embeddings") is not None and len(row["embeddings"]) > 0
else None,
},
)
# Sample texts with varying educational quality
texts = [
"lol that was so funny haha",
"Photosynthesis converts light energy into chemical energy.",
"Newton's laws describe the relationship between forces and motion.",
]
ds = ray.data.from_items([{"text": text} for text in texts])
if __name__ == "__main__":
try:
import torch
if torch.cuda.is_available():
classified_ds = classification_processor(ds)
classified_ds.show(limit=3)
else:
print("Skipping classification run (no GPU available)")
except Exception as e:
print(f"Skipping classification run due to environment error: {e}")
# __classification_example_end__
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/data/doc_code/working-with-llms/classification_example.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/serve/_private/event_loop_monitoring.py | import asyncio
import logging
import time
from typing import Dict, Optional
from ray.serve._private.constants import (
RAY_SERVE_EVENT_LOOP_MONITORING_INTERVAL_S,
SERVE_EVENT_LOOP_LATENCY_HISTOGRAM_BOUNDARIES_MS,
SERVE_LOGGER_NAME,
)
from ray.util import metrics
logger = logging.getLogger(SERVE_LOGGER_NAME)
def setup_event_loop_monitoring(
loop: asyncio.AbstractEventLoop,
scheduling_latency: metrics.Histogram,
iterations: metrics.Counter,
tasks: metrics.Gauge,
tags: Dict[str, str],
interval_s: Optional[float] = None,
) -> asyncio.Task:
"""Start monitoring an event loop and recording metrics.
This function creates a background task that periodically measures:
- How long it takes for the event loop to wake up after sleeping
(scheduling latency / event loop lag)
- The number of pending asyncio tasks
Args:
loop: The asyncio event loop to monitor.
scheduling_latency: Histogram metric to record scheduling latency.
iterations: Counter metric to track monitoring iterations.
tasks: Gauge metric to track number of pending tasks.
tags: Dictionary of tags to apply to all metrics.
interval_s: Optional override for the monitoring interval.
Defaults to RAY_SERVE_EVENT_LOOP_MONITORING_INTERVAL_S.
Returns:
The asyncio Task running the monitoring loop.
"""
if interval_s is None:
interval_s = RAY_SERVE_EVENT_LOOP_MONITORING_INTERVAL_S
return loop.create_task(
_run_monitoring_loop(
loop=loop,
schedule_latency=scheduling_latency,
iterations=iterations,
task_gauge=tasks,
tags=tags,
interval_s=interval_s,
),
name="serve_event_loop_monitoring",
)
async def _run_monitoring_loop(
loop: asyncio.AbstractEventLoop,
schedule_latency: metrics.Histogram,
iterations: metrics.Counter,
task_gauge: metrics.Gauge,
tags: Dict[str, str],
interval_s: float,
) -> None:
"""Internal monitoring loop that runs until the event loop stops.
The scheduling latency is measured by comparing the actual elapsed time
after sleeping to the expected sleep duration. In an ideal scenario
with no blocking, the latency should be close to zero.
"""
while loop.is_running():
iterations.inc(1, tags)
num_tasks = len(asyncio.all_tasks(loop))
task_gauge.set(num_tasks, tags)
yield_time = time.monotonic()
await asyncio.sleep(interval_s)
elapsed_time = time.monotonic() - yield_time
# Historically, Ray's implementation of histograms are extremely finicky
# with non-positive values (https://github.com/ray-project/ray/issues/26698).
# Technically it shouldn't be possible for this to be negative, add the
# max just to be safe.
# Convert to milliseconds for the metric.
latency_ms = max(0.0, (elapsed_time - interval_s) * 1000)
schedule_latency.observe(latency_ms, tags)
class EventLoopMonitor:
TAG_KEY_COMPONENT = "component"
TAG_KEY_LOOP_TYPE = "loop_type"
TAG_KEY_ACTOR_ID = "actor_id"
# Component types
COMPONENT_PROXY = "proxy"
COMPONENT_REPLICA = "replica"
COMPONENT_UNKNOWN = "unknown"
# Loop types
LOOP_TYPE_MAIN = "main"
LOOP_TYPE_USER_CODE = "user_code"
LOOP_TYPE_ROUTER = "router"
def __init__(
self,
component: str,
loop_type: str,
actor_id: str,
interval_s: float = RAY_SERVE_EVENT_LOOP_MONITORING_INTERVAL_S,
extra_tags: Optional[Dict[str, str]] = None,
):
"""Initialize the event loop monitor.
Args:
component: The component type ("proxy" or "replica").
loop_type: The type of event loop ("main", "user_code", or "router").
actor_id: The ID of the actor where this event loop runs.
interval_s: Optional override for the monitoring interval.
extra_tags: Optional dictionary of additional tags to include in metrics.
"""
self._interval_s = interval_s
self._tags = {
self.TAG_KEY_COMPONENT: component,
self.TAG_KEY_LOOP_TYPE: loop_type,
self.TAG_KEY_ACTOR_ID: actor_id,
}
if extra_tags:
self._tags.update(extra_tags)
self._tag_keys = tuple(self._tags.keys())
# Create metrics
self._scheduling_latency = metrics.Histogram(
"serve_event_loop_scheduling_latency_ms",
description=(
"Latency of getting yielded control on the event loop in milliseconds. "
"High values indicate the event loop is blocked."
),
boundaries=SERVE_EVENT_LOOP_LATENCY_HISTOGRAM_BOUNDARIES_MS,
tag_keys=self._tag_keys,
)
self._scheduling_latency.set_default_tags(self._tags)
self._iterations = metrics.Counter(
"serve_event_loop_monitoring_iterations",
description=(
"Number of times the event loop monitoring task has run. "
"Can be used as a heartbeat."
),
tag_keys=self._tag_keys,
)
self._iterations.set_default_tags(self._tags)
self._tasks = metrics.Gauge(
"serve_event_loop_tasks",
description="Number of pending asyncio tasks on the event loop.",
tag_keys=self._tag_keys,
)
self._tasks.set_default_tags(self._tags)
self._monitoring_task: Optional[asyncio.Task] = None
def start(self, loop: asyncio.AbstractEventLoop) -> asyncio.Task:
"""Start monitoring the given event loop.
Args:
loop: The asyncio event loop to monitor.
Returns:
The asyncio Task running the monitoring loop.
"""
self._monitoring_task = setup_event_loop_monitoring(
loop=loop,
scheduling_latency=self._scheduling_latency,
iterations=self._iterations,
tasks=self._tasks,
tags=self._tags,
interval_s=self._interval_s,
)
logger.debug(
f"Started event loop monitoring for {self._tags[self.TAG_KEY_COMPONENT]} "
f"({self._tags[self.TAG_KEY_LOOP_TYPE]}) actor {self._tags[self.TAG_KEY_ACTOR_ID]}"
)
return self._monitoring_task
def stop(self):
if self._monitoring_task is not None and not self._monitoring_task.done():
self._monitoring_task.cancel()
self._monitoring_task = None
@property
def tags(self) -> Dict[str, str]:
return self._tags.copy()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/_private/event_loop_monitoring.py",
"license": "Apache License 2.0",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/llm/_internal/serve/core/ingress/mixins/pausable.py | """Pausable ingress mixin.
Provides HTTP endpoints for pause/resume control plane operations.
"""
from typing import Any, Dict
from fastapi import Query
from pydantic import BaseModel, Field
from starlette.responses import Response
from ray.llm._internal.serve.core.ingress.mixins.broadcastable import (
ReplicaBroadcastable,
)
from ray.llm._internal.serve.observability.logging import get_logger
logger = get_logger(__name__)
# --- Pydantic Models ---
class PauseRequest(BaseModel):
"""Request to pause generation on an engine."""
model: str
options: Dict[str, Any] = Field(
default_factory=dict,
description="Engine-specific pause options (e.g., wait_for_inflight_requests, clear_cache)",
)
class ResumeRequest(BaseModel):
"""Request to resume generation on an engine."""
model: str
options: Dict[str, Any] = Field(
default_factory=dict,
description="Engine-specific resume options",
)
class IsPausedResponse(BaseModel):
"""Response indicating whether the engine is paused."""
is_paused: bool
# --- Mixin ---
class PausableIngressMixin(ReplicaBroadcastable):
"""Ingress mixin for /pause, /resume, /is_paused endpoints.
Adds control plane endpoints for managing engine pause state.
Pause mode halts generation/encoding while keeping weights in GPU memory.
Unlike sleep mode, pause does not offload weights to CPU.
"""
ENDPOINTS = {
"pause": lambda app: app.post("/pause"),
"resume": lambda app: app.post("/resume"),
"is_paused": lambda app: app.get("/is_paused"),
}
async def pause(self, body: PauseRequest) -> Response:
"""Pause generation on all replicas for the specified model.
This halts generation/encoding requests while keeping model weights
in GPU memory. New requests are blocked until resume is called.
Unlike sleep mode, pause does not offload weights to CPU.
Args:
body: Request containing the model ID and engine-specific options.
Options may include:
- wait_for_inflight_requests (bool): Wait for in-flight requests
to finish before pausing. Default False (abort immediately).
- clear_cache (bool): Clear KV cache after draining. Default True.
Returns:
200 OK on success.
"""
logger.info("Pausing model %s with options: %s", body.model, body.options)
await self._broadcast_to_replicas(body.model, "pause", kwargs=body.options)
return Response(status_code=200)
async def resume(self, body: ResumeRequest) -> Response:
"""Resume generation on all replicas for the specified model.
Args:
body: Request containing the model ID and engine-specific options.
Returns:
200 OK on success.
"""
logger.info("Resuming model %s with options: %s", body.model, body.options)
await self._broadcast_to_replicas(body.model, "resume", kwargs=body.options)
return Response(status_code=200)
async def is_paused(
self, model: str = Query(..., description="The model ID to check")
) -> IsPausedResponse:
"""Check if the engine is paused for the specified model.
This checks the pause status across all replicas. Returns True if
ANY replica is paused (uses logical OR across replicas).
Args:
model: The model ID to check.
Returns:
IsPausedResponse with is_paused boolean.
"""
results = await self._broadcast_to_replicas(model, "is_paused")
is_paused_result = any(results) if results else False
return IsPausedResponse(is_paused=is_paused_result)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/serve/core/ingress/mixins/pausable.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:release/llm_tests/serve/test_llm_serve_pause_resume.py | """Test pause/resume control plane API for Ray Serve LLM.
This test verifies that the DevIngress pause/resume endpoints work correctly:
1. Engine starts in unpaused state (is_paused=False)
2. Pause command halts generation (is_paused=True) while keeping weights in GPU
3. Resume command restores generation (is_paused=False)
4. Model can still serve requests after resume
Unlike sleep/wakeup which offloads weights to CPU, pause/resume keeps model
weights in GPU memory. This is useful for quick pause/resume cycles during
RL training where you want to pause generation for weight updates without
the overhead of offloading/reloading weights.
NOTE (Kourosh): This is part of a design in progress for integrating Ray Serve
LLM with RL workloads. The API is not public and won't be documented until the
end-to-end story is finalized. Class names and endpoint names may change.
"""
import time
import pytest
import requests
from openai import OpenAI
from ray import serve
from ray.llm._internal.serve.core.ingress.dev_ingress import build_dev_openai_app
from ray.serve.llm import LLMConfig, ModelLoadingConfig
from ray._common.test_utils import wait_for_condition
from ray.serve._private.constants import SERVE_DEFAULT_APP_NAME
from ray.serve.schema import ApplicationStatus
MODEL_ID = "Qwen/Qwen2-0.5B-Instruct"
BASE_URL = "http://localhost:8000"
def get_llm_config() -> LLMConfig:
"""Create LLMConfig for pause/resume testing."""
return LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id=MODEL_ID,
),
deployment_config=dict(
num_replicas=2,
),
engine_kwargs=dict(
tensor_parallel_size=2,
),
)
def is_default_app_running():
"""Check if the default application is running successfully."""
try:
default_app = serve.status().applications[SERVE_DEFAULT_APP_NAME]
return default_app.status == ApplicationStatus.RUNNING
except (KeyError, AttributeError):
return False
def wait_for_server_ready(timeout: int = 240) -> None:
"""Wait for the server to be ready to handle requests."""
start_time = time.time()
while time.time() - start_time < timeout:
try:
test_data = {
"model": MODEL_ID,
"messages": [{"role": "user", "content": "test"}],
"max_tokens": 5,
"temperature": 0,
}
response = requests.post(
f"{BASE_URL}/v1/chat/completions", json=test_data, timeout=10
)
if response.status_code == 200:
print(f"Server at {BASE_URL} is ready to handle requests!")
return
except Exception as e:
print(f"Waiting for server to be ready... (error: {e})")
time.sleep(2)
raise TimeoutError(
f"Server at {BASE_URL} did not become ready within {timeout} seconds"
)
def test_pause_resume_lifecycle():
"""Test the complete pause/resume lifecycle."""
# Start Ray Serve with DevIngress
llm_config = get_llm_config()
app = build_dev_openai_app({"llm_configs": [llm_config]})
serve.run(app, blocking=False)
# Wait for application to be running
wait_for_condition(is_default_app_running, timeout=300)
wait_for_server_ready(timeout=240)
try:
# Step 1: Verify initial state - engine should not be paused
print("\n=== Step 1: Checking initial state ===")
response = requests.get(
f"{BASE_URL}/is_paused?model={MODEL_ID}",
timeout=10,
)
assert response.status_code == 200, f"is_paused returned {response.status_code}"
initial_pause_state = response.json().get("is_paused", None)
assert (
initial_pause_state is False
), f"Expected is_paused=False, got {initial_pause_state}"
print(f"β Initial paused state: {initial_pause_state}")
# Step 2: Verify model can serve requests before pause
print("\n=== Step 2: Verifying model serves requests before pause ===")
client = OpenAI(base_url=f"{BASE_URL}/v1", api_key="fake-key")
chat_response = client.chat.completions.create(
model=MODEL_ID,
messages=[{"role": "user", "content": "Hello"}],
max_tokens=10,
temperature=0,
)
assert chat_response.choices[0].message.content is not None
print(
f"β Pre-pause response: {chat_response.choices[0].message.content[:50]}..."
)
# Step 3: Pause the engine
print("\n=== Step 3: Pausing engine ===")
pause_response = requests.post(
f"{BASE_URL}/pause",
json={
"model": MODEL_ID,
"options": {"wait_for_inflight_requests": False, "clear_cache": True},
},
timeout=60,
)
assert (
pause_response.status_code == 200
), f"pause returned {pause_response.status_code}"
print("β Pause command executed successfully")
# Step 4: Verify engine is paused
print("\n=== Step 4: Verifying engine is paused ===")
# Wait for pause to complete
wait_for_condition(
lambda: requests.get(f"{BASE_URL}/is_paused?model={MODEL_ID}", timeout=5)
.json()
.get("is_paused")
is True,
timeout=30,
retry_interval_ms=1000,
)
# Step 5: Resume the engine
print("\n=== Step 5: Resuming engine ===")
resume_response = requests.post(
f"{BASE_URL}/resume",
json={"model": MODEL_ID, "options": {}},
timeout=60,
)
assert (
resume_response.status_code == 200
), f"resume returned {resume_response.status_code}"
print("β Resume command executed successfully")
# Step 6: Verify engine is no longer paused
print("\n=== Step 6: Verifying engine is resumed ===")
wait_for_condition(
lambda: requests.get(f"{BASE_URL}/is_paused?model={MODEL_ID}", timeout=5)
.json()
.get("is_paused")
is False,
timeout=30,
retry_interval_ms=1000,
)
# Step 7: Verify model can still serve requests after resume
print("\n=== Step 7: Verifying model can serve requests after resume ===")
chat_response = client.chat.completions.create(
model=MODEL_ID,
messages=[{"role": "user", "content": "What is the capital of France?"}],
max_tokens=10,
temperature=0,
)
assert chat_response.choices[0].message.content is not None
print(
f"β Post-resume response: {chat_response.choices[0].message.content[:50]}..."
)
print("\n=== All tests passed! ===")
finally:
# Cleanup
serve.shutdown()
time.sleep(1)
if __name__ == "__main__":
pytest.main(["-xvs", __file__])
| {
"repo_id": "ray-project/ray",
"file_path": "release/llm_tests/serve/test_llm_serve_pause_resume.py",
"license": "Apache License 2.0",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/execution/callbacks/execution_idx_update_callback.py | from typing import TYPE_CHECKING
from ray.data._internal.execution.execution_callback import (
ExecutionCallback,
)
if TYPE_CHECKING:
from ray.data._internal.execution.streaming_executor import StreamingExecutor
class ExecutionIdxUpdateCallback(ExecutionCallback):
def after_execution_succeeds(self, executor: "StreamingExecutor"):
dataset_context = executor._data_context
dataset_context._execution_idx += 1
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/execution/callbacks/execution_idx_update_callback.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/dashboard/modules/aggregator/task_events_metadata_buffer.py | from collections import deque
from typing import Dict, Optional
from ray._private.telemetry.open_telemetry_metric_recorder import (
OpenTelemetryMetricRecorder,
)
from ray.core.generated import events_event_aggregator_service_pb2
from ray.dashboard.modules.aggregator.constants import AGGREGATOR_AGENT_METRIC_PREFIX
class TaskEventsMetadataBuffer:
"""Buffer for accumulating task event metadata and batching it into a bounded queue.
This buffer is used to construct TaskEventsMetadata protobuf messages (defined in events_event_aggregator_service.proto).
"""
def __init__(
self,
max_buffer_size: int = 1000,
max_dropped_attempts_per_metadata_entry: int = 100,
common_metric_tags: Optional[Dict[str, str]] = None,
):
self._buffer_maxlen = max(
max_buffer_size - 1, 1
) # -1 to account for the current batch
self._buffer = deque(maxlen=self._buffer_maxlen)
self._current_metadata_batch = (
events_event_aggregator_service_pb2.TaskEventsMetadata()
)
self._max_dropped_attempts = max_dropped_attempts_per_metadata_entry
self._common_metric_tags = common_metric_tags or {}
self._metric_recorder = OpenTelemetryMetricRecorder()
self._dropped_metadata_count_metric_name = f"{AGGREGATOR_AGENT_METRIC_PREFIX}_task_metadata_buffer_dropped_attempts_total"
self._metric_recorder.register_counter_metric(
self._dropped_metadata_count_metric_name,
"Total number of dropped task attempt metadata entries which were dropped due to buffer being full",
)
def merge(
self,
new_metadata: Optional[events_event_aggregator_service_pb2.TaskEventsMetadata],
) -> None:
"""Merge new task event metadata into the current entry, enqueuing when limits are reached."""
if new_metadata is None:
return
for new_attempt in new_metadata.dropped_task_attempts:
if (
len(self._current_metadata_batch.dropped_task_attempts)
>= self._max_dropped_attempts
):
# Add current metadata to buffer, if buffer is full, drop the oldest entry
if len(self._buffer) >= self._buffer_maxlen:
# Record the number of dropped attempts
oldest_entry = self._buffer.popleft()
self._metric_recorder.set_metric_value(
self._dropped_metadata_count_metric_name,
self._common_metric_tags,
len(oldest_entry.dropped_task_attempts),
)
# Enqueue current metadata batch and start a new batch
metadata_copy = events_event_aggregator_service_pb2.TaskEventsMetadata()
metadata_copy.CopyFrom(self._current_metadata_batch)
self._buffer.append(metadata_copy)
self._current_metadata_batch.Clear()
# Now add the new attempt
new_entry = self._current_metadata_batch.dropped_task_attempts.add()
new_entry.CopyFrom(new_attempt)
def get(self) -> events_event_aggregator_service_pb2.TaskEventsMetadata:
"""Return the next buffered metadata entry or a snapshot of the current one and reset state."""
if len(self._buffer) == 0:
# create a copy of the current metadata and return it
current_metadata = events_event_aggregator_service_pb2.TaskEventsMetadata()
current_metadata.CopyFrom(self._current_metadata_batch)
# Reset the current metadata and start merging afresh
self._current_metadata_batch.Clear()
return current_metadata
return self._buffer.popleft()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/aggregator/task_events_metadata_buffer.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/dashboard/modules/aggregator/tests/test_task_events_metadata_buffer.py | import sys
import pytest
from ray.core.generated.events_event_aggregator_service_pb2 import TaskEventsMetadata
from ray.dashboard.modules.aggregator.task_events_metadata_buffer import (
TaskEventsMetadataBuffer,
)
def _create_test_metadata(dropped_task_ids: list = None, attempt_number=1):
"""Helper function to create test metadata"""
metadata = TaskEventsMetadata()
if dropped_task_ids:
for task_id in dropped_task_ids:
attempt = metadata.dropped_task_attempts.add()
attempt.task_id = task_id.encode()
attempt.attempt_number = attempt_number
return metadata
def _result_to_attempts_list(result):
"""Normalize return value from buffer.get() to a python list of attempts."""
if hasattr(result, "dropped_task_attempts"):
attempts = result.dropped_task_attempts
else:
attempts = result
return list(attempts)
def _drain_all_attempts(buffer: TaskEventsMetadataBuffer):
"""Drain the buffer completely via public API and return list of bytes task_ids.
Continues calling get() until it returns an empty set of attempts.
"""
collected_ids = []
num_metadata_entries = 0
while True:
result = buffer.get()
attempts = _result_to_attempts_list(result)
if len(attempts) == 0:
break
num_metadata_entries += 1
collected_ids.extend([a.task_id for a in attempts])
return collected_ids, num_metadata_entries
class TestTaskMetadataBuffer:
"""tests for TaskMetadataBuffer class"""
def test_merge_and_get(self):
"""Test merging multiple metadata objects and verify task attempts are combined."""
buffer = TaskEventsMetadataBuffer(
max_buffer_size=100, max_dropped_attempts_per_metadata_entry=10
)
# Create two separate metadata objects with different task IDs
metadata1 = _create_test_metadata(["task_1", "task_2"])
metadata2 = _create_test_metadata(["task_3", "task_4"])
# Merge both metadata objects
buffer.merge(metadata1)
buffer.merge(metadata2)
# Get the merged results
result = buffer.get()
attempts = _result_to_attempts_list(result)
# Verify we have all 4 task attempts
assert len(attempts) == 4
# Verify all expected task IDs are present
task_ids = [attempt.task_id for attempt in attempts]
assert sorted(task_ids) == [b"task_1", b"task_2", b"task_3", b"task_4"]
@pytest.mark.parametrize(
"max_attempts_per_metadata_entry,num_tasks,max_buffer_size,expected_drop_attempts,expected_num_metadata_entries",
[
# No overflow, two metadata entries should be created
(2, 3, 100, 0, 2),
# No overflow, three metadata entries should be created
(5, 15, 100, 0, 3),
# Overflow scenario: buffer too small, ensure drop count is tracked.
(1, 4, 2, 2, 2),
],
)
def test_buffer_merge_and_overflow(
self,
max_attempts_per_metadata_entry,
num_tasks,
max_buffer_size,
expected_drop_attempts,
expected_num_metadata_entries,
):
buffer = TaskEventsMetadataBuffer(
max_buffer_size=max_buffer_size,
max_dropped_attempts_per_metadata_entry=max_attempts_per_metadata_entry,
)
for i in range(num_tasks):
test_metadata = _create_test_metadata([f"task_{i}"])
buffer.merge(test_metadata)
# Drain everything and verify number of attempts in buffer is as expected
drained_ids, num_metadata_entries = _drain_all_attempts(buffer)
assert len(drained_ids) == num_tasks - expected_drop_attempts
assert num_metadata_entries == expected_num_metadata_entries
# Buffer should now be empty
assert len(_result_to_attempts_list(buffer.get())) == 0
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/aggregator/tests/test_task_events_metadata_buffer.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/_internal/serve/core/ingress/mixins/broadcastable.py | import asyncio
from typing import Any, List
from ray.llm._internal.serve.utils.broadcast import broadcast
class ReplicaBroadcastable:
async def _broadcast_to_replicas(
self, model: str, method: str, kwargs: dict | None = None
) -> List[Any]:
"""Broadcast a command to all replicas and return their results.
Args:
model: The model ID to broadcast to.
method: The method name to call on each replica.
kwargs: Optional kwargs to pass to the method.
Returns:
List of results from each replica.
"""
model_id = await self._get_model_id(model)
handle = self._get_configured_serve_handle(model_id)
# Run blocking broadcast() in a thread to avoid blocking the event loop.
# broadcast() uses ray.get() internally which is synchronous.
results = await asyncio.to_thread(broadcast, handle, method, kwargs=kwargs)
return results
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/serve/core/ingress/mixins/broadcastable.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/llm/_internal/serve/core/ingress/mixins/cache_manager.py | """Cache manager ingress mixin.
Provides HTTP endpoints for cache management control plane operations.
"""
from pydantic import BaseModel
from starlette.responses import Response
from ray.llm._internal.serve.core.ingress.mixins.broadcastable import (
ReplicaBroadcastable,
)
from ray.llm._internal.serve.observability.logging import get_logger
logger = get_logger(__name__)
# --- Pydantic Models ---
class ResetPrefixCacheRequest(BaseModel):
"""Request to reset the prefix cache."""
model: str
# --- Mixin ---
class CacheManagerIngressMixin(ReplicaBroadcastable):
"""Ingress mixin for /reset_prefix_cache endpoint.
Adds control plane endpoint for managing the KV prefix cache.
"""
ENDPOINTS = {
"reset_prefix_cache": lambda app: app.post("/reset_prefix_cache"),
}
async def reset_prefix_cache(self, body: ResetPrefixCacheRequest) -> Response:
"""Reset the KV prefix cache on all replicas for the specified model.
Clears cached key-value pairs from previous requests. Useful for
benchmarking or when cache invalidation is needed.
Args:
body: Request containing the model ID.
Returns:
200 OK on success.
"""
logger.info("Resetting prefix cache for model: %s", body.model)
await self._broadcast_to_replicas(body.model, "reset_prefix_cache")
return Response(status_code=200)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/serve/core/ingress/mixins/cache_manager.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/llm/_internal/serve/core/ingress/mixins/sleepable.py | """Sleepable ingress mixin.
Provides HTTP endpoints for sleep/wakeup control plane operations.
"""
from typing import Any, Dict
from fastapi import Query
from pydantic import BaseModel, Field
from starlette.responses import Response
from ray.llm._internal.serve.core.ingress.mixins.broadcastable import (
ReplicaBroadcastable,
)
from ray.llm._internal.serve.observability.logging import get_logger
logger = get_logger(__name__)
# --- Pydantic Models ---
class SleepRequest(BaseModel):
"""Request to put an engine to sleep."""
model: str
options: Dict[str, Any] = Field(
default_factory=dict,
description="Engine-specific sleep options (e.g., level for vLLM)",
)
class WakeupRequest(BaseModel):
"""Request to wake up an engine from sleep."""
model: str
options: Dict[str, Any] = Field(
default_factory=dict,
description="Engine-specific wakeup options (e.g., tags for vLLM)",
)
class IsSleepingResponse(BaseModel):
"""Response indicating whether the engine is sleeping."""
is_sleeping: bool
# --- Mixin ---
class SleepableIngressMixin(ReplicaBroadcastable):
"""Ingress mixin for /sleep, /wakeup, /is_sleeping endpoints.
Adds control plane endpoints for managing engine sleep state.
Sleep mode offloads model weights to CPU and discards KV cache.
"""
ENDPOINTS = {
"sleep": lambda app: app.post("/sleep"),
"wakeup": lambda app: app.post("/wakeup"),
"is_sleeping": lambda app: app.get("/is_sleeping"),
}
async def sleep(self, body: SleepRequest) -> Response:
"""Put the engine to sleep on all replicas for the specified model.
This offloads model weights to CPU and discards KV cache, freeing
GPU memory. The engine cannot process requests while sleeping.
Args:
body: Request containing the model ID and engine-specific options.
Returns:
200 OK on success.
"""
logger.info(
"Putting model %s to sleep with options: %s", body.model, body.options
)
await self._broadcast_to_replicas(body.model, "sleep", kwargs=body.options)
return Response(status_code=200)
async def wakeup(self, body: WakeupRequest) -> Response:
"""Wake up the engine from sleep on all replicas for the specified model.
Args:
body: Request containing the model ID and engine-specific options.
Returns:
200 OK on success.
"""
logger.info("Waking up model %s with options: %s", body.model, body.options)
await self._broadcast_to_replicas(body.model, "wakeup", kwargs=body.options)
return Response(status_code=200)
async def is_sleeping(
self, model: str = Query(..., description="The model ID to check")
) -> IsSleepingResponse:
"""Check if the engine is sleeping for the specified model.
This checks the sleep status across all replicas. Returns True if
ANY replica is sleeping (uses logical OR across replicas).
Args:
model: The model ID to check.
Returns:
IsSleepingResponse with is_sleeping boolean.
"""
results = await self._broadcast_to_replicas(model, "is_sleeping")
is_sleeping_result = any(results) if results else False
return IsSleepingResponse(is_sleeping=is_sleeping_result)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/serve/core/ingress/mixins/sleepable.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/_private/telemetry/metric_types.py | from enum import Enum
class MetricType(Enum):
"""Types of metrics supported by the telemetry system.
Note: SUMMARY metric type is not supported. SUMMARY is a Prometheus metric type
that is not explicitly supported in OpenTelemetry. Use HISTOGRAM instead for
similar use cases (e.g., latency distributions with quantiles).
"""
GAUGE = 0
COUNTER = 1
SUM = 2
HISTOGRAM = 3
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_private/telemetry/metric_types.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/llm/_internal/serve/core/ingress/dev_ingress.py | """Development/RL-focused ingress with control plane endpoints.
This module provides DevIngress, an extension of OpenAiIngress that adds
control plane endpoints for managing engine lifecycle. These endpoints
are useful for RL training workflows where engines need to be put to sleep
during training and woken up for inference.
Endpoints:
POST /sleep: Put engine to sleep (frees GPU memory)
POST /wakeup: Wake up engine from sleep
GET /is_sleeping: Check if engine is sleeping
POST /pause: Pause generation (keeps weights in GPU)
POST /resume: Resume generation after pause
GET /is_paused: Check if engine is paused
POST /reset_prefix_cache: Reset the KV prefix cache
POST /collective_rpc: Execute collective RPC on all workers
"""
import pprint
from typing import Dict
from ray import serve
from ray.llm._internal.common.dict_utils import (
maybe_apply_llm_deployment_config_defaults,
)
from ray.llm._internal.serve.core.ingress.builder import LLMServingArgs
from ray.llm._internal.serve.core.ingress.ingress import (
DEFAULT_ENDPOINTS,
OpenAiIngress,
make_fastapi_ingress,
)
from ray.llm._internal.serve.core.ingress.mixins import (
CacheManagerIngressMixin,
CollectiveRpcIngressMixin,
PausableIngressMixin,
SleepableIngressMixin,
)
from ray.llm._internal.serve.core.server.builder import build_llm_deployment
from ray.llm._internal.serve.observability.logging import get_logger
from ray.serve.deployment import Application
logger = get_logger(__name__)
# Endpoint map for DevIngress - includes all default endpoints plus control plane
DEV_ENDPOINTS = {
**CacheManagerIngressMixin.ENDPOINTS,
**CollectiveRpcIngressMixin.ENDPOINTS,
**PausableIngressMixin.ENDPOINTS,
**SleepableIngressMixin.ENDPOINTS,
**DEFAULT_ENDPOINTS,
}
class DevIngress(
OpenAiIngress,
SleepableIngressMixin,
PausableIngressMixin,
CacheManagerIngressMixin,
CollectiveRpcIngressMixin,
):
"""OpenAI-compatible ingress with additional control plane endpoints.
This ingress extends the standard OpenAI endpoints with control plane
operations for managing engine lifecycle. These are useful for:
- RL training: Put engines to sleep during training, wake up for rollouts
- Memory management: Free GPU memory between inference workloads
- Benchmarking: Reset prefix cache between benchmark rounds
- RLHF: Execute collective RPC on all workers for weight updates
Control plane endpoints provided by mixins:
- SleepableIngressMixin: /sleep, /wakeup, /is_sleeping
- PausableIngressMixin: /pause, /resume, /is_paused
- CacheManagerIngressMixin: /reset_prefix_cache
- CollectiveRpcIngressMixin: /collective_rpc
WARNING: These endpoints are intended for development and trusted
environments. Consider access control in production deployments.
"""
pass
def build_dev_openai_app(builder_config: Dict) -> Application:
"""Build an OpenAI compatible app with dev/control plane endpoints.
This is similar to build_openai_app but uses DevIngress with
additional control plane endpoints:
- /sleep, /wakeup, /is_sleeping (sleep mode - offloads weights to CPU)
- /pause, /resume, /is_paused (pause mode - keeps weights in GPU)
- /reset_prefix_cache (cache management)
- /collective_rpc (RLHF - execute RPC on all workers)
Args:
builder_config: Configuration conforming to LLMServingArgs.
See LLMServingArgs for details on the expected structure.
Returns:
The configured Ray Serve Application.
Example:
config = {
"llm_configs": [llm_config],
"ingress_deployment_config": {}
}
app = build_dev_openai_app(config)
serve.run(app)
"""
config = LLMServingArgs.model_validate(builder_config)
llm_configs = config.llm_configs
llm_deployments = [build_llm_deployment(c) for c in llm_configs]
ingress_cls_config = config.ingress_cls_config
default_ingress_options = DevIngress.get_deployment_options(llm_configs)
ingress_options = maybe_apply_llm_deployment_config_defaults(
default_ingress_options, config.ingress_deployment_config
)
ingress_cls = make_fastapi_ingress(DevIngress, endpoint_map=DEV_ENDPOINTS)
logger.info("============== Ingress Options ==============")
logger.info(pprint.pformat(ingress_options))
return serve.deployment(ingress_cls, **ingress_options).bind(
llm_deployments=llm_deployments, **ingress_cls_config.ingress_extra_kwargs
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/serve/core/ingress/dev_ingress.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/routers/test_dev_ingress.py | """Tests for DevIngress control plane endpoints.
This module tests the HTTP endpoints exposed by DevIngress:
- POST /sleep, POST /wakeup, GET /is_sleeping
- POST /pause, POST /resume, GET /is_paused
- POST /reset_prefix_cache
These tests verify:
1. Endpoints are correctly registered and accessible
2. Broadcast API correctly broadcasts to replicas
3. Sleep/wakeup and pause/resume isolation between different models
"""
import sys
import httpx
import pytest
import ray
from ray import serve
from ray.llm._internal.serve.core.ingress.dev_ingress import DEV_ENDPOINTS, DevIngress
from ray.llm._internal.serve.core.ingress.ingress import make_fastapi_ingress
from ray.llm._internal.serve.core.server.llm_server import LLMServer
from ray.llm.tests.serve.mocks.mock_vllm_engine import MockVLLMEngine
from ray.serve.llm import LLMConfig, ModelLoadingConfig
@pytest.fixture(scope="module")
def ray_instance():
"""Initialize Ray for the module."""
if not ray.is_initialized():
ray.init()
yield
serve.shutdown()
ray.shutdown()
@pytest.fixture
def single_model_dev_ingress(ray_instance, disable_placement_bundles):
"""Start a Serve app with one model and DevIngress endpoints."""
model_id = "test-model-1"
llm_config = LLMConfig(
model_loading_config=ModelLoadingConfig(model_id=model_id),
runtime_env={},
log_engine_metrics=False,
)
# Create LLMServer deployment with mock engine
llm_deployment = serve.deployment(LLMServer).bind(
llm_config, engine_cls=MockVLLMEngine
)
# Create DevIngress with the dev endpoints
ingress_cls = make_fastapi_ingress(DevIngress, endpoint_map=DEV_ENDPOINTS)
ingress_options = DevIngress.get_deployment_options([llm_config])
ingress_app = serve.deployment(ingress_cls, **ingress_options).bind(
llm_deployments=[llm_deployment],
)
serve.run(ingress_app, name="single-model-app")
yield model_id
serve.delete("single-model-app", _blocking=True)
@pytest.fixture
def two_model_dev_ingress(ray_instance, disable_placement_bundles):
"""Start a Serve app with TWO model deployments to test isolation."""
model_id_1 = "test-model-1"
model_id_2 = "test-model-2"
llm_config_1 = LLMConfig(
model_loading_config=ModelLoadingConfig(model_id=model_id_1),
runtime_env={},
log_engine_metrics=False,
)
llm_config_2 = LLMConfig(
model_loading_config=ModelLoadingConfig(model_id=model_id_2),
runtime_env={},
log_engine_metrics=False,
)
# Create LLMServer deployments with mock engine
llm_deployment_1 = serve.deployment(LLMServer).bind(
llm_config_1, engine_cls=MockVLLMEngine
)
llm_deployment_2 = serve.deployment(LLMServer).bind(
llm_config_2, engine_cls=MockVLLMEngine
)
# Create DevIngress with the dev endpoints
ingress_cls = make_fastapi_ingress(DevIngress, endpoint_map=DEV_ENDPOINTS)
ingress_options = DevIngress.get_deployment_options([llm_config_1, llm_config_2])
ingress_app = serve.deployment(ingress_cls, **ingress_options).bind(
llm_deployments=[llm_deployment_1, llm_deployment_2],
)
serve.run(ingress_app, name="two-model-app")
yield model_id_1, model_id_2
serve.delete("two-model-app", _blocking=True)
class TestDevIngressEndpoints:
"""Test DevIngress endpoints."""
@pytest.mark.asyncio
async def test_reset_prefix_cache_endpoint(self, single_model_dev_ingress):
"""Test POST /reset_prefix_cache endpoint."""
model_id = single_model_dev_ingress
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(
"http://localhost:8000/reset_prefix_cache",
json={"model": model_id},
)
assert response.status_code == 200
@pytest.mark.asyncio
async def test_sleep_wakeup_cycle(self, single_model_dev_ingress):
"""Test full sleep -> is_sleeping -> wakeup -> is_sleeping cycle."""
model_id = single_model_dev_ingress
async with httpx.AsyncClient(timeout=60.0) as client:
# Initial state - should not be sleeping
response = await client.get(
f"http://localhost:8000/is_sleeping?model={model_id}",
)
assert response.status_code == 200
assert response.json().get("is_sleeping") is False
# Sleep the engine
response = await client.post(
"http://localhost:8000/sleep",
json={"model": model_id, "options": {"level": 1}},
)
assert response.status_code == 200
# Check is_sleeping - should be True
response = await client.get(
f"http://localhost:8000/is_sleeping?model={model_id}",
)
assert response.status_code == 200
assert response.json().get("is_sleeping") is True
# Wake up the engine
response = await client.post(
"http://localhost:8000/wakeup",
json={"model": model_id, "options": {}},
)
assert response.status_code == 200
# Check is_sleeping - should be False again
response = await client.get(
f"http://localhost:8000/is_sleeping?model={model_id}",
)
assert response.status_code == 200
assert response.json().get("is_sleeping") is False
@pytest.mark.asyncio
async def test_pause_resume_cycle(self, single_model_dev_ingress):
"""Test full pause -> is_paused -> resume -> is_paused cycle."""
model_id = single_model_dev_ingress
async with httpx.AsyncClient(timeout=60.0) as client:
# Initial state - should not be paused
response = await client.get(
f"http://localhost:8000/is_paused?model={model_id}",
)
assert response.status_code == 200
assert response.json().get("is_paused") is False
# Pause the engine
response = await client.post(
"http://localhost:8000/pause",
json={"model": model_id, "options": {"clear_cache": True}},
)
assert response.status_code == 200
# Check is_paused - should be True
response = await client.get(
f"http://localhost:8000/is_paused?model={model_id}",
)
assert response.status_code == 200
assert response.json().get("is_paused") is True
# Resume the engine
response = await client.post(
"http://localhost:8000/resume",
json={"model": model_id, "options": {}},
)
assert response.status_code == 200
# Check is_paused - should be False again
response = await client.get(
f"http://localhost:8000/is_paused?model={model_id}",
)
assert response.status_code == 200
assert response.json().get("is_paused") is False
class TestDevIngressModelIsolation:
"""Test that control plane operations are isolated per model."""
@pytest.mark.asyncio
async def test_sleep_wakeup_isolation(self, two_model_dev_ingress):
"""Test that sleeping model_1 does NOT affect model_2."""
model_1, model_2 = two_model_dev_ingress
async with httpx.AsyncClient(timeout=60.0) as client:
# Both models should start awake
response = await client.get(
f"http://localhost:8000/is_sleeping?model={model_1}",
)
assert response.json().get("is_sleeping") is False
response = await client.get(
f"http://localhost:8000/is_sleeping?model={model_2}",
)
assert response.json().get("is_sleeping") is False
# Sleep model_1 only
response = await client.post(
"http://localhost:8000/sleep",
json={"model": model_1, "options": {"level": 1}},
)
assert response.status_code == 200
# model_1 should be sleeping
response = await client.get(
f"http://localhost:8000/is_sleeping?model={model_1}",
)
assert response.json().get("is_sleeping") is True
# model_2 should NOT be sleeping
response = await client.get(
f"http://localhost:8000/is_sleeping?model={model_2}",
)
assert response.json().get("is_sleeping") is False
# Wake up model_1
response = await client.post(
"http://localhost:8000/wakeup",
json={"model": model_1, "options": {}},
)
assert response.status_code == 200
# Both should now be awake
response = await client.get(
f"http://localhost:8000/is_sleeping?model={model_1}",
)
assert response.json().get("is_sleeping") is False
response = await client.get(
f"http://localhost:8000/is_sleeping?model={model_2}",
)
assert response.json().get("is_sleeping") is False
@pytest.mark.asyncio
async def test_pause_resume_isolation(self, two_model_dev_ingress):
"""Test that pausing model_1 does NOT affect model_2."""
model_1, model_2 = two_model_dev_ingress
async with httpx.AsyncClient(timeout=60.0) as client:
# Both models should start unpaused
response = await client.get(
f"http://localhost:8000/is_paused?model={model_1}",
)
assert response.json().get("is_paused") is False
response = await client.get(
f"http://localhost:8000/is_paused?model={model_2}",
)
assert response.json().get("is_paused") is False
# Pause model_1 only
response = await client.post(
"http://localhost:8000/pause",
json={"model": model_1, "options": {"clear_cache": True}},
)
assert response.status_code == 200
# model_1 should be paused
response = await client.get(
f"http://localhost:8000/is_paused?model={model_1}",
)
assert response.json().get("is_paused") is True
# model_2 should NOT be paused
response = await client.get(
f"http://localhost:8000/is_paused?model={model_2}",
)
assert response.json().get("is_paused") is False
# Resume model_1
response = await client.post(
"http://localhost:8000/resume",
json={"model": model_1, "options": {}},
)
assert response.status_code == 200
# Both should now be unpaused
response = await client.get(
f"http://localhost:8000/is_paused?model={model_1}",
)
assert response.json().get("is_paused") is False
response = await client.get(
f"http://localhost:8000/is_paused?model={model_2}",
)
assert response.json().get("is_paused") is False
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/serve/cpu/deployments/routers/test_dev_ingress.py",
"license": "Apache License 2.0",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/llm_tests/serve/test_llm_serve_sleep_wakeup.py | """Test sleep/wakeup control plane API for Ray Serve LLM.
This test verifies that the DevIngress sleep/wakeup endpoints work correctly:
1. Engine starts in awake state (is_sleeping=False)
2. Sleep command puts engine to sleep (is_sleeping=True) and frees GPU memory
3. Wakeup command restores engine (is_sleeping=False) and restores GPU memory
4. Model can still serve requests after wakeup
NOTE (Kourosh): This is part of a design in progress for integrating Ray Serve
LLM with RL workloads. The API is not public and won't be documented until the
end-to-end story is finalized. Class names and endpoint names may change.
"""
import subprocess
import time
from typing import List
import pytest
import requests
from openai import OpenAI
from ray import serve
from ray.llm._internal.serve.core.ingress.dev_ingress import build_dev_openai_app
from ray.serve.llm import LLMConfig, ModelLoadingConfig
from ray._common.test_utils import wait_for_condition
from ray.serve._private.constants import SERVE_DEFAULT_APP_NAME
from ray.serve.schema import ApplicationStatus
MODEL_ID = "Qwen/Qwen2-0.5B-Instruct"
BASE_URL = "http://localhost:8000"
def get_llm_config() -> LLMConfig:
"""Create LLMConfig with sleep mode enabled."""
return LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id=MODEL_ID,
),
deployment_config=dict(
num_replicas=2,
),
engine_kwargs=dict(
tensor_parallel_size=2,
enable_sleep_mode=True,
),
)
def is_default_app_running():
"""Check if the default application is running successfully."""
try:
default_app = serve.status().applications[SERVE_DEFAULT_APP_NAME]
return default_app.status == ApplicationStatus.RUNNING
except (KeyError, AttributeError):
return False
def get_gpu_memory_used_mb() -> List[float]:
"""Get GPU memory used per device via nvidia-smi.
Returns:
List of memory used in MB for each GPU device.
"""
result = subprocess.run(
["nvidia-smi", "--query-gpu=memory.used", "--format=csv,noheader,nounits"],
capture_output=True,
text=True,
check=True,
)
return [float(x.strip()) for x in result.stdout.strip().split("\n") if x.strip()]
def get_total_gpu_memory_mb() -> float:
"""Get total GPU memory used across all devices."""
return sum(get_gpu_memory_used_mb())
def wait_for_server_ready(timeout: int = 240) -> None:
"""Wait for the server to be ready to handle requests."""
start_time = time.time()
while time.time() - start_time < timeout:
try:
test_data = {
"model": MODEL_ID,
"messages": [{"role": "user", "content": "test"}],
"max_tokens": 5,
"temperature": 0,
}
response = requests.post(
f"{BASE_URL}/v1/chat/completions", json=test_data, timeout=10
)
if response.status_code == 200:
print(f"Server at {BASE_URL} is ready to handle requests!")
return
except Exception as e:
print(f"Waiting for server to be ready... (error: {e})")
time.sleep(2)
raise TimeoutError(
f"Server at {BASE_URL} did not become ready within {timeout} seconds"
)
def test_sleep_wakeup_lifecycle():
"""Test the complete sleep/wakeup lifecycle with GPU memory verification."""
# Start Ray Serve with DevIngress
llm_config = get_llm_config()
app = build_dev_openai_app({"llm_configs": [llm_config]})
serve.run(app, blocking=False)
# Wait for application to be running
wait_for_condition(is_default_app_running, timeout=300)
wait_for_server_ready(timeout=240)
time.sleep(5) # Buffer time for server to be fully ready
try:
# Step 1: Verify initial state - engine should be awake
print("\n=== Step 1: Checking initial state ===")
response = requests.get(
f"{BASE_URL}/is_sleeping?model={MODEL_ID}",
timeout=10,
)
assert (
response.status_code == 200
), f"is_sleeping returned {response.status_code}"
initial_sleep_state = response.json().get("is_sleeping", None)
assert (
initial_sleep_state is False
), f"Expected is_sleeping=False, got {initial_sleep_state}"
print(f"β Initial sleeping state: {initial_sleep_state}")
# Step 2: Record baseline GPU memory
print("\n=== Step 2: Recording baseline GPU memory ===")
baseline_memory_mb = get_total_gpu_memory_mb()
print(f"Baseline GPU memory: {baseline_memory_mb:.2f} MB")
assert baseline_memory_mb > 0, "Baseline GPU memory should be > 0"
# Step 3: Put engine to sleep
print("\n=== Step 3: Putting engine to sleep ===")
sleep_response = requests.post(
f"{BASE_URL}/sleep",
json={"model": MODEL_ID, "options": {"level": 1}},
timeout=60,
)
assert (
sleep_response.status_code == 200
), f"sleep returned {sleep_response.status_code}"
print("β Sleep command executed successfully")
# Wait a bit for sleep to complete
time.sleep(5)
# Step 4: Verify engine is sleeping
print("\n=== Step 4: Verifying engine is sleeping ===")
response = requests.get(
f"{BASE_URL}/is_sleeping?model={MODEL_ID}",
timeout=10,
)
assert response.status_code == 200
sleep_state = response.json().get("is_sleeping", None)
assert (
sleep_state is True
), f"Expected is_sleeping=True after sleep, got {sleep_state}"
print(f"β Sleeping state: {sleep_state}")
# Step 5: Verify GPU memory reduction
print("\n=== Step 5: Verifying GPU memory reduction ===")
sleep_memory_mb = get_total_gpu_memory_mb()
memory_reduction_mb = baseline_memory_mb - sleep_memory_mb
memory_reduction_pct = (memory_reduction_mb / baseline_memory_mb) * 100
print(f"GPU memory after sleep: {sleep_memory_mb:.2f} MB")
print(
f"Memory reduction: {memory_reduction_mb:.2f} MB ({memory_reduction_pct:.1f}%)"
)
assert (
memory_reduction_pct > 50
), f"Expected >50% memory reduction, got {memory_reduction_pct:.1f}%"
print("β GPU memory reduced significantly after sleep")
# Step 6: Wake up the engine
print("\n=== Step 6: Waking up engine ===")
wakeup_response = requests.post(
f"{BASE_URL}/wakeup",
json={"model": MODEL_ID, "options": {"tags": ["weights", "kv_cache"]}},
timeout=60,
)
assert (
wakeup_response.status_code == 200
), f"wakeup returned {wakeup_response.status_code}"
print("β Wakeup command executed successfully")
# Wait a bit for wakeup to complete
time.sleep(5)
# Step 7: Verify engine is awake
print("\n=== Step 7: Verifying engine is awake ===")
response = requests.get(
f"{BASE_URL}/is_sleeping?model={MODEL_ID}",
timeout=10,
)
assert response.status_code == 200
wake_state = response.json().get("is_sleeping", None)
assert (
wake_state is False
), f"Expected is_sleeping=False after wakeup, got {wake_state}"
print(f"β Sleeping state: {wake_state}")
# Step 8: Verify GPU memory restoration
print("\n=== Step 8: Verifying GPU memory restoration ===")
wake_memory_mb = get_total_gpu_memory_mb()
memory_diff_mb = abs(wake_memory_mb - baseline_memory_mb)
memory_diff_pct = (memory_diff_mb / baseline_memory_mb) * 100
print(f"GPU memory after wakeup: {wake_memory_mb:.2f} MB")
print(f"Baseline memory: {baseline_memory_mb:.2f} MB")
print(f"Memory difference: {memory_diff_mb:.2f} MB ({memory_diff_pct:.1f}%)")
assert (
memory_diff_pct < 20
), f"Expected <20% memory difference from baseline, got {memory_diff_pct:.1f}%"
print("β GPU memory restored to near baseline after wakeup")
# Step 9: Verify model can still serve requests
print("\n=== Step 9: Verifying model can serve requests ===")
client = OpenAI(base_url=f"{BASE_URL}/v1", api_key="fake-key")
chat_response = client.chat.completions.create(
model=MODEL_ID,
messages=[{"role": "user", "content": "What is the capital of France?"}],
max_tokens=10,
temperature=0,
)
assert chat_response.choices[0].message.content is not None
print(
f"β Model successfully generated response: {chat_response.choices[0].message.content[:50]}..."
)
print("\n=== All tests passed! ===")
finally:
# Cleanup
serve.shutdown()
time.sleep(1)
if __name__ == "__main__":
pytest.main(["-xvs", __file__])
| {
"repo_id": "ray-project/ray",
"file_path": "release/llm_tests/serve/test_llm_serve_sleep_wakeup.py",
"license": "Apache License 2.0",
"lines": 208,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:ci/ray_ci/automation/copy_wanda_image.py | """
Copy Wanda-cached container images to a destination registry using crane.
Example:
bazel run //ci/ray_ci/automation:copy_wanda_image -- \\
--wanda-image-name manylinux-cibase \\
--destination-repository rayproject/manylinux2014 \\
--tag-suffix -x86_64 \\
--upload
Tags are generated in the format: YYMMDD.{commit_prefix}.{suffix}
For example: 251215.abc1234-x86_64
Run with --help to see all options.
"""
import logging
import sys
from datetime import datetime, timezone as tz
import click
from ci.ray_ci.automation.crane_lib import (
CraneError,
call_crane_copy,
call_crane_manifest,
)
from ci.ray_ci.utils import ecr_docker_login
# Configure logging
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
stream=sys.stdout,
)
logger = logging.getLogger(__name__)
class CopyWandaImageError(Exception):
"""Error raised when copying Wanda-cached images fails."""
def _generate_destination_tag(commit: str, tag_suffix: str) -> str:
"""
Generate a destination tag in the format: YYMMDD.{commit_prefix}{suffix}
Examples:
251215.abc1234-x86_64
251215.abc1234-jdk-x86_64
"""
date_str = datetime.now(tz.utc).strftime("%y%m%d")
commit_prefix = commit[:7]
return f"{date_str}.{commit_prefix}{tag_suffix}"
def _image_exists(tag: str) -> bool:
"""Check if a container image manifest exists using crane."""
try:
call_crane_manifest(tag)
return True
except CraneError:
return False
def _copy_image(source: str, destination: str, dry_run: bool = False) -> None:
"""Copy a container image from source to destination using crane."""
if dry_run:
logger.info(f"DRY RUN: Would copy {source} -> {destination}")
return
logger.info(f"Copying {source} -> {destination}")
call_crane_copy(source, destination)
logger.info(f"Successfully copied to {destination}")
@click.command()
@click.option(
"--rayci-work-repo",
envvar="RAYCI_WORK_REPO",
required=True,
type=str,
help="RAYCI work repository URL. Falls back to reading from RAYCI_WORK_REPO.",
)
@click.option(
"--rayci-build-id",
envvar="RAYCI_BUILD_ID",
required=True,
type=str,
help="RAYCI build ID. Falls back to reading from RAYCI_BUILD_ID.",
)
@click.option(
"--wanda-image-name",
envvar="WANDA_IMAGE_NAME",
required=True,
type=str,
help="Name of the Wanda-cached image (e.g., 'forge'). Falls back to reading from WANDA_IMAGE_NAME.",
)
@click.option(
"--destination-repository",
envvar="DESTINATION_REPOSITORY",
required=True,
type=str,
help="Destination repository to copy the image to. Falls back to reading from DESTINATION_REPOSITORY.",
)
@click.option(
"--tag-suffix",
type=str,
envvar="TAG_SUFFIX",
required=True,
help="Suffix for the tag (e.g., '-x86_64', '-jdk-x86_64'). Falls back to reading from TAG_SUFFIX.",
)
@click.option(
"--buildkite-commit",
envvar="BUILDKITE_COMMIT",
required=True,
type=str,
help="Buildkite commit. Falls back to reading from BUILDKITE_COMMIT.",
)
@click.option(
"--upload",
is_flag=True,
default=False,
help="Upload the image to the registry. Without this flag, runs in dry-run mode.",
)
def main(
rayci_work_repo: str,
rayci_build_id: str,
wanda_image_name: str,
destination_repository: str,
tag_suffix: str,
buildkite_commit: str,
upload: bool,
) -> None:
"""
Copy a Wanda-cached image to a destination registry.
Handles authentication for both ECR (source/Wanda cache) and Docker Hub
(destination). Requires BUILDKITE_JOB_ID for Docker Hub authentication.
Tags are generated in the format: YYMMDD.{commit_prefix}{suffix}
For example: 251215.abc1234-x86_64
By default, runs in dry-run mode. Use --upload to actually copy images.
"""
if not upload:
logger.info("DRY RUN MODE - no images will be copied")
source_tag = f"{rayci_work_repo}:{rayci_build_id}-{wanda_image_name}"
destination_tag = _generate_destination_tag(buildkite_commit, tag_suffix)
full_destination = f"{destination_repository}:{destination_tag}"
logger.info(f"Source tag (Wanda): {source_tag}")
logger.info(f"Target tag: {full_destination}")
# Authenticate with ECR (source registry). Docker Hub authentication is
# handled by copy_files.py.
ecr_registry = rayci_work_repo.split("/")[0]
ecr_docker_login(ecr_registry)
logger.info("Verifying source image in Wanda cache...")
if not _image_exists(source_tag):
raise CopyWandaImageError(
f"Source image not found in Wanda cache: {source_tag}"
)
# Copy image
_copy_image(source_tag, full_destination, dry_run=not upload)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "ci/ray_ci/automation/copy_wanda_image.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:release/ray_release/scripts/list_release_tests.py | from typing import Tuple
import click
from ray_release.config import (
RELEASE_TEST_CONFIG_FILES,
read_and_validate_release_test_collection,
)
@click.command()
@click.option(
"--test-collection-file",
type=str,
multiple=True,
help="Test collection file, relative path to ray repo.",
)
@click.option(
"--show-disabled",
is_flag=True,
default=False,
help="Show disabled tests.",
)
def main(
test_collection_file: Tuple[str],
show_disabled: bool,
):
if not test_collection_file:
test_collection_file = tuple(RELEASE_TEST_CONFIG_FILES)
tests = read_and_validate_release_test_collection(test_collection_file)
for test in tests:
name = test["name"]
python_version = test.get("python", "")
test_frequency = test.get("frequency", "missing")
test_team = test.get("team", "missing")
if not show_disabled and test_frequency == "manual":
continue
print(f"{name} python={python_version} team={test_team}")
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "release/ray_release/scripts/list_release_tests.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/llm/examples/serve/example_reset_kv_cache/reset_kv_cache_example.py | """
Example: Resetting KV Cache in Ray Serve LLM via Control Plane Messages.
This example demonstrates two approaches to reset the KV cache on all replicas
of a Ray Serve LLM deployment using DevIngress:
1. **HTTP Endpoint Path** (`--use-http`):
Calls the built-in `/reset_prefix_cache` HTTP endpoint provided by
DevIngress via CacheManagerIngressMixin. Useful for external clients.
2. **In-Cluster Serve Handle Path** (default):
Uses Ray Serve's deployment handles and the broadcast API to send control
plane messages directly to all replicas. This keeps cache reset logic
within the cluster, avoiding HTTP overhead.
Both approaches use the same DevIngress server which provides control plane
endpoints (/sleep, /wakeup, /is_sleeping, /reset_prefix_cache).
The example:
1. Starts a Serve application with DevIngress and 2 replicas
2. Populates the KV cache on both replicas by sending multiple requests
3. Measures request time for a cached request (control)
4. Resets the KV cache using the selected method
5. Measures request time after cache reset (test)
6. Verifies that the cache was cleared by comparing request times
Usage:
# In-cluster path (using serve handles directly)
python reset_kv_cache_example.py
# HTTP endpoint path
python reset_kv_cache_example.py --use-http
"""
import argparse
import asyncio
import time
import httpx
from ray import serve
from ray.llm._internal.serve.core.ingress.dev_ingress import build_dev_openai_app
from ray.llm._internal.serve.utils.broadcast import broadcast
from ray.serve.llm import LLMConfig
# =============================================================================
# Server Startup
# =============================================================================
def create_llm_config(model: str) -> LLMConfig:
"""Create the LLM configuration."""
return LLMConfig(
model_loading_config=dict(model_id=model),
deployment_config=dict(num_replicas=2, name="llm"),
engine_kwargs=dict(
enable_prefix_caching=True,
enforce_eager=True,
max_num_batched_tokens=128,
),
)
def start_server(llm_config: LLMConfig):
"""Start the server with DevIngress for control plane endpoints.
DevIngress provides built-in control plane endpoints:
- /reset_prefix_cache (via CacheManagerIngressMixin)
- /sleep, /wakeup, /is_sleeping (via SleepableIngressMixin)
"""
app = build_dev_openai_app({"llm_configs": [llm_config]})
print("Starting server with DevIngress...")
serve.run(app)
print("Server started. Control plane endpoints available.")
# =============================================================================
# Cache Reset Functions
# =============================================================================
async def reset_cache_via_http(model: str):
"""Reset KV cache via HTTP endpoint.
This calls the /reset_prefix_cache endpoint provided by DevIngress
via CacheManagerIngressMixin.
"""
url = "http://localhost:8000/reset_prefix_cache"
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(url, json={"model": model})
response.raise_for_status()
def reset_cache_via_handle(model: str):
"""Reset KV cache via in-cluster serve handle.
This uses the broadcast API to send control plane messages directly to
all replicas without exposing functionality over HTTP.
"""
llm_handle = serve.get_deployment_handle("LLMServer:llm", app_name="default")
broadcast(llm_handle, "reset_prefix_cache")
# =============================================================================
# Test Utilities
# =============================================================================
async def send_request(prompt: str, model: str, measure_time: bool = False):
"""Send a completion request and optionally measure response time."""
url = "http://localhost:8000/v1/completions"
data = {
"model": model,
"prompt": prompt,
"max_tokens": 1,
}
start_time = time.time() if measure_time else None
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(url, json=data)
response.raise_for_status()
if measure_time:
return time.time() - start_time
async def populate_cache(prompts: list[str], model: str, repeat: int = 20):
"""Send requests multiple times to populate cache on all replicas."""
print(
f"Populating cache with {len(prompts)} prompt(s), repeating {repeat} times..."
)
tasks = []
for _ in range(repeat):
for prompt in prompts:
tasks.append(send_request(prompt, model, measure_time=False))
await asyncio.gather(*tasks)
print("Cache populated.")
# =============================================================================
# Main
# =============================================================================
async def main(use_http: bool):
model = "Qwen/Qwen2.5-0.5B-Instruct"
# Create LLM config and start server
llm_config = create_llm_config(model)
start_server(llm_config)
# Determine reset method
reset_method = (
"HTTP endpoint (/reset_prefix_cache)"
if use_http
else "in-cluster serve handle (broadcast API)"
)
print(f"\nUsing {reset_method} for cache reset.\n")
# Use long prompts to ensure prefill time is significant
TEST_PROMPT = "The quick brown fox jumps over the lazy dog." * 3000
# 2. Populate cache on all replicas
print("Step 1: Populating cache on all replicas...")
await populate_cache([TEST_PROMPT], model, repeat=20)
# 3. Measure request time for cached request (control)
print("\nStep 2: Measuring request time for cached request (control)...")
control_time = await send_request(TEST_PROMPT, model, measure_time=True)
print(f"Request time (cached): {control_time:.4f}s")
# 4. Reset the KV cache
print(f"\nStep 3: Resetting KV cache via {reset_method}...")
if use_http:
await reset_cache_via_http(model)
else:
reset_cache_via_handle(model)
print("KV cache reset complete.")
# 5. Measure request time after cache reset (test)
print("\nStep 4: Measuring request time after cache reset (test)...")
test_time = await send_request(TEST_PROMPT, model, measure_time=True)
print(f"Request time (after reset): {test_time:.4f}s")
# 6. Verify the results
print("\nStep 5: Verifying results...")
print(f"Control (cached) time: {control_time:.4f}s")
print(f"Test (after reset) time: {test_time:.4f}s")
print(f"Slowdown factor: {test_time / control_time:.2f}x slower after reset")
if test_time > control_time * 10: # At least 10x slower on L4 instances
print(
"β SUCCESS: Request time increased after cache reset, "
"indicating cache was cleared."
)
else:
print(
"β WARNING: Request time did not increase significantly. "
"Cache may not have been reset properly."
)
print("\nDone. Shutting down...")
time.sleep(2)
serve.shutdown()
print("Shutdown complete.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Demonstrate KV cache reset in Ray Serve LLM.",
)
parser.add_argument(
"--use-http",
action="store_true",
help="Reset cache via HTTP /reset_prefix_cache endpoint instead of "
"in-cluster serve handles. Both use the same DevIngress server.",
)
args = parser.parse_args()
asyncio.run(main(use_http=args.use_http))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/examples/serve/example_reset_kv_cache/reset_kv_cache_example.py",
"license": "Apache License 2.0",
"lines": 171,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/_internal/actor_autoscaler/actor_pool_resizing_policy.py | import abc
import math
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .autoscaling_actor_pool import AutoscalingActorPool
class ActorPoolResizingPolicy(abc.ABC):
"""Interface for determining how to resize an actor pool.
When the actor pool needs to scale up, `compute_upscale_delta` will be called
to determine how many new actors to add.
When the actor pool needs to scale down, `compute_downscale_delta` will be called
to determine how many actors to remove.
"""
@abc.abstractmethod
def compute_upscale_delta(
self, actor_pool: "AutoscalingActorPool", util: float
) -> int:
"""Determine how many new actors to add when the actor pool needs to scale up.
Args:
actor_pool: The actor pool to scale.
util: The current utilization of the actor pool.
Returns:
The number of actors to add (must be >= 1).
"""
...
@abc.abstractmethod
def compute_downscale_delta(self, actor_pool: "AutoscalingActorPool") -> int:
"""Determine how many actors to remove when the actor pool needs to scale down.
Args:
actor_pool: The actor pool to scale down.
Returns:
The number of actors to remove (must be >= 1).
"""
...
class DefaultResizingPolicy(ActorPoolResizingPolicy):
"""Policy that scales based on actor pool utilization.
This policy calculates the upscale delta based on how much the current
utilization exceeds the upscaling threshold. It always scales down by 1.
"""
def __init__(
self,
upscaling_threshold: float,
max_upscaling_delta: int,
):
"""Initialize the utilization-based resizing policy.
Args:
upscaling_threshold: The utilization threshold above which to scale up.
max_upscaling_delta: The maximum number of actors to add in a single
scale-up operation.
"""
self._upscaling_threshold = upscaling_threshold
self._max_upscaling_delta = max_upscaling_delta
def compute_upscale_delta(
self, actor_pool: "AutoscalingActorPool", util: float
) -> int:
# Calculate desired delta based on utilization
plan_delta = math.ceil(
actor_pool.current_size() * (util / self._upscaling_threshold - 1)
)
# Apply limits
limits = [
self._max_upscaling_delta,
actor_pool.max_size() - actor_pool.current_size(),
]
delta = min(plan_delta, *limits)
return max(1, delta) # At least scale up by 1
def compute_downscale_delta(self, actor_pool: "AutoscalingActorPool") -> int:
return 1
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/actor_autoscaler/actor_pool_resizing_policy.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/routers/test_make_fastapi_ingress.py | """Tests for make_fastapi_ingress function."""
import inspect
import sys
import pytest
from fastapi import FastAPI, Request
from fastapi.routing import APIRoute
from ray.llm._internal.serve.core.ingress.ingress import (
DEFAULT_ENDPOINTS,
OpenAiIngress,
make_fastapi_ingress,
)
class TestMakeFastapiIngress:
"""Test suite for make_fastapi_ingress."""
def test_subclass_inherits_endpoints(self):
"""Test that subclassing OpenAiIngress works with make_fastapi_ingress."""
class MyCustomIngress(OpenAiIngress):
"""Custom ingress that inherits all OpenAI endpoints."""
pass
app = FastAPI()
# Create the ingress class - should not raise
ingress_cls = make_fastapi_ingress(MyCustomIngress, app=app)
# Verify the ingress class was created successfully
assert ingress_cls is not None
# Verify routes are registered (inherited from OpenAiIngress)
route_paths = [
route.path for route in app.routes if isinstance(route, APIRoute)
]
assert "/v1/models" in route_paths
assert "/v1/completions" in route_paths
def test_subclass_with_custom_method(self):
"""Test that custom methods added by subclass are also properly handled."""
class MyCustomIngress(OpenAiIngress):
"""Custom ingress with an additional endpoint."""
async def custom_endpoint(self, request: Request):
"""A custom endpoint added by the subclass."""
return {"status": "ok"}
custom_endpoints = {
"custom_endpoint": lambda app: app.post("/custom"),
**DEFAULT_ENDPOINTS,
}
app = FastAPI()
ingress_cls = make_fastapi_ingress(
MyCustomIngress, endpoint_map=custom_endpoints, app=app
)
# Verify the class was created and the custom route is registered
assert ingress_cls is not None
route_paths = [
route.path for route in app.routes if isinstance(route, APIRoute)
]
assert "/custom" in route_paths
def test_routes_registered_correctly(self):
"""Test that routes are registered with the FastAPI app."""
class MyCustomIngress(OpenAiIngress):
pass
app = FastAPI()
make_fastapi_ingress(MyCustomIngress, app=app)
# Get all registered routes
route_paths = [
route.path for route in app.routes if isinstance(route, APIRoute)
]
# Check that default endpoints are registered
assert "/v1/models" in route_paths
assert "/v1/completions" in route_paths
assert "/v1/chat/completions" in route_paths
def test_custom_endpoint_map_overrides_defaults(self):
"""Test that custom endpoint_map can override default endpoints."""
class MyCustomIngress(OpenAiIngress):
async def models(self):
"""Override the models endpoint."""
return {"custom": True}
# Only register models endpoint with a custom path
custom_endpoints = {
"models": lambda app: app.get("/custom/models"),
}
app = FastAPI()
make_fastapi_ingress(MyCustomIngress, endpoint_map=custom_endpoints, app=app)
route_paths = [
route.path for route in app.routes if isinstance(route, APIRoute)
]
# Should have custom path, not default
assert "/custom/models" in route_paths
assert "/v1/models" not in route_paths
def test_deeply_nested_inheritance(self):
"""Test that deeply nested inheritance works correctly."""
class IntermediateIngress(OpenAiIngress):
"""Intermediate class in inheritance chain."""
async def intermediate_method(self, request: Request):
return {"level": "intermediate"}
class FinalIngress(IntermediateIngress):
"""Final class in inheritance chain."""
async def final_method(self, request: Request):
return {"level": "final"}
custom_endpoints = {
"intermediate_method": lambda app: app.post("/intermediate"),
"final_method": lambda app: app.post("/final"),
**DEFAULT_ENDPOINTS,
}
app = FastAPI()
make_fastapi_ingress(FinalIngress, endpoint_map=custom_endpoints, app=app)
# Verify all routes are registered
route_paths = [
route.path for route in app.routes if isinstance(route, APIRoute)
]
assert "/intermediate" in route_paths
assert "/final" in route_paths
assert "/v1/completions" in route_paths
def test_method_signature_preserved(self):
"""Test that method signatures are preserved after decoration."""
class MyCustomIngress(OpenAiIngress):
pass
ingress_cls = make_fastapi_ingress(MyCustomIngress)
# Get the completions method and check its signature
completions_method = ingress_cls.completions
sig = inspect.signature(completions_method)
param_names = list(sig.parameters.keys())
# Should have 'self' and 'body' parameters
assert "self" in param_names
assert "body" in param_names
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/serve/cpu/deployments/routers/test_make_fastapi_ingress.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/tests/typing_files/check_handle_typing.py | """Type checking tests for DeploymentHandle, DeploymentResponse, and DeploymentResponseGenerator.
This file is used with mypy to verify that the generic type annotations
on handle classes work correctly. Run with:
mypy python/ray/serve/tests/typing_files/check_handle_typing.py python/ray/serve/handle.py \
--follow-imports=skip \
--ignore-missing-imports
mypy will fail if any assert_type() call doesn't match the expected type.
"""
from typing import Any, AsyncIterator, Iterator, Union
from typing_extensions import assert_type
from ray.serve.handle import (
DeploymentHandle,
DeploymentResponse,
DeploymentResponseGenerator,
)
def test_deployment_response_types() -> None:
"""Test that DeploymentResponse[R] preserves R through methods."""
response: DeploymentResponse[str] = None # type: ignore[assignment]
# result() should return R (str)
result = response.result()
assert_type(result, str)
# Properties should work
assert_type(response.request_id, str)
assert_type(response.by_reference, bool)
async def test_deployment_response_await() -> None:
"""Test that awaiting DeploymentResponse[R] returns R."""
response: DeploymentResponse[str] = None # type: ignore[assignment]
# Awaiting should return R (str)
awaited_result = await response
assert_type(awaited_result, str)
def test_deployment_response_generator_sync() -> None:
"""Test that DeploymentResponseGenerator[R] iteration returns R."""
gen: DeploymentResponseGenerator[int] = None # type: ignore[assignment]
# __iter__ should return Iterator[R]
assert_type(iter(gen), Iterator[int])
# __next__ should return R (int)
item = next(gen)
assert_type(item, int)
# For loop iteration
for value in gen:
assert_type(value, int)
break
async def test_deployment_response_generator_async() -> None:
"""Test that async iteration of DeploymentResponseGenerator[R] returns R."""
gen: DeploymentResponseGenerator[int] = None # type: ignore[assignment]
# __aiter__ should return AsyncIterator[R]
assert_type(gen.__aiter__(), AsyncIterator[int])
# __anext__ should return R (int)
item = await gen.__anext__()
assert_type(item, int)
# Async for loop iteration
async for value in gen:
assert_type(value, int)
break
def test_deployment_handle_types() -> None:
"""Test DeploymentHandle type annotations."""
class MyDeployment:
def get_string(self) -> str:
return "hello"
def get_int(self) -> int:
return 42
handle: DeploymentHandle[MyDeployment] = None # type: ignore[assignment]
# Basic properties
assert_type(handle.deployment_name, str)
assert_type(handle.app_name, str)
assert_type(handle.is_initialized, bool)
# options() should return DeploymentHandle[T]
new_handle = handle.options(method_name="get_string")
assert_type(new_handle, DeploymentHandle[MyDeployment])
# remote() returns Union[DeploymentResponse[Any], DeploymentResponseGenerator[Any]]
# (until plugin is implemented to infer the actual return type)
response = handle.remote()
assert_type(
response,
Union[DeploymentResponse[Any], DeploymentResponseGenerator[Any]],
)
def test_chained_handle_access() -> None:
"""Test that accessing methods on handle returns typed handle."""
class MyDeployment:
def my_method(self, x: int) -> str:
return str(x)
handle: DeploymentHandle[MyDeployment] = None # type: ignore[assignment]
# Accessing a method via __getattr__ should return DeploymentHandle[T]
method_handle = handle.my_method
assert_type(method_handle, DeploymentHandle[MyDeployment])
# =============================================================================
# TESTS THAT REQUIRE MYPY PLUGIN
# =============================================================================
# The following tests verify that the mypy plugin correctly infers return types
# based on which deployment method is being called. These tests are commented
# out because they will fail without the plugin.
#
# To enable after plugin is implemented:
# 1. Uncomment the tests below
# 2. Run: mypy python/ray/serve/tests/typing_files/check_handle_typing.py
# 3. All assert_type() calls should pass
# =============================================================================
def test_plugin_infers_method_return_type() -> None:
"""[REQUIRES PLUGIN] Test that remote() infers return type from method."""
from typing import Generator
class MyDeployment:
def get_user(self, user_id: int) -> str:
return f"user_{user_id}"
def get_count(self) -> int:
return 42
def stream_items(self) -> Generator[bytes, None, None]:
yield b"chunk1"
yield b"chunk2"
_: DeploymentHandle[MyDeployment] = None # type: ignore[assignment]
# # Calling a method that returns str should give DeploymentResponse[str]
# response_str = handle.get_user.remote(123)
# assert_type(response_str, DeploymentResponse[str])
# # result() should return str
# user = response_str.result()
# assert_type(user, str)
# # Calling a method that returns int should give DeploymentResponse[int]
# response_int = handle.get_count.remote()
# assert_type(response_int, DeploymentResponse[int])
# # result() should return int
# count = response_int.result()
# assert_type(count, int)
async def test_plugin_infers_await_return_type() -> None:
"""[REQUIRES PLUGIN] Test that await infers return type from method."""
class MyDeployment:
def process(self, data: str) -> dict:
return {"data": data}
_: DeploymentHandle[MyDeployment] = None # type: ignore[assignment]
# response = handle.process.remote("test")
# assert_type(response, DeploymentResponse[dict])
# # Awaiting should return dict
# result = await response
# assert_type(result, dict)
def test_plugin_infers_generator_yield_type() -> None:
"""[REQUIRES PLUGIN] Test that streaming methods infer yield type."""
from typing import Generator
class MyDeployment:
def stream_strings(self) -> Generator[str, None, None]:
yield "a"
yield "b"
def stream_ints(self) -> Generator[int, None, None]:
yield 1
yield 2
_: DeploymentHandle[MyDeployment] = None # type: ignore[assignment]
# # Streaming handle with generator method should give DeploymentResponseGenerator
# streaming_handle = handle.options(stream=True)
# gen_str = streaming_handle.stream_strings.remote()
# assert_type(gen_str, DeploymentResponseGenerator[str])
# # Iteration should yield str
# for item in gen_str:
# assert_type(item, str)
# break
# gen_int = streaming_handle.stream_ints.remote()
# assert_type(gen_int, DeploymentResponseGenerator[int])
# # Iteration should yield int
# for item in gen_int:
# assert_type(item, int)
# break
async def test_plugin_async_generator_iteration() -> None:
"""[REQUIRES PLUGIN] Test async iteration with inferred yield type."""
from typing import Generator
class MyDeployment:
def stream_bytes(self) -> Generator[bytes, None, None]:
yield b"chunk"
_: DeploymentHandle[MyDeployment] = None # type: ignore[assignment]
# streaming_handle = handle.options(stream=True)
# gen = streaming_handle.stream_bytes.remote()
# assert_type(gen, DeploymentResponseGenerator[bytes])
# # Async iteration should yield bytes
# async for chunk in gen:
# assert_type(chunk, bytes)
# break
def test_plugin_complex_return_types() -> None:
from typing import Dict, List, Optional, Tuple
class User:
name: str
age: int
class MyDeployment:
def get_users(self) -> List[User]:
return []
def get_user_dict(self) -> Dict[str, User]:
return {}
def get_optional(self) -> Optional[User]:
return None
def get_tuple(self) -> Tuple[str, int, User]:
return ("", 0, User())
_: DeploymentHandle[MyDeployment] = None # type: ignore[assignment]
# response_list = handle.get_users.remote()
# assert_type(response_list, DeploymentResponse[List[User]])
# users = response_list.result()
# assert_type(users, List[User])
# response_dict = handle.get_user_dict.remote()
# assert_type(response_dict, DeploymentResponse[Dict[str, User]])
# response_optional = handle.get_optional.remote()
# assert_type(response_optional, DeploymentResponse[Optional[User]])
# response_tuple = handle.get_tuple.remote()
# assert_type(response_tuple, DeploymentResponse[Tuple[str, int, User]])
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/typing_files/check_handle_typing.py",
"license": "Apache License 2.0",
"lines": 196,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/serve/doc_code/replica_scheduling.py | import ray
# __max_replicas_per_node_start__
from ray import serve
@serve.deployment(num_replicas=6, max_replicas_per_node=2, ray_actor_options={"num_cpus": 0.1})
class MyDeployment:
def __call__(self, request):
return "Hello!"
app = MyDeployment.bind()
# __max_replicas_per_node_end__
# __placement_group_start__
from ray import serve
@serve.deployment(
ray_actor_options={"num_cpus": 0.1},
placement_group_bundles=[{"CPU": 0.1}, {"CPU": 0.1}],
placement_group_strategy="STRICT_PACK",
)
class MultiCPUModel:
def __call__(self, request):
return "Processed with 2 CPUs"
multi_cpu_app = MultiCPUModel.bind()
# __placement_group_end__
# __placement_group_labels_start__
@serve.deployment(
ray_actor_options={"num_cpus": 0.1},
placement_group_bundles=[{"CPU": 0.1, "GPU": 1}],
placement_group_bundle_label_selector=[
{"ray.io/accelerator-type": "A100"}
]
)
def PlacementGroupBundleLabelSelector(request):
return "Running in PG on A100"
pg_label_app = PlacementGroupBundleLabelSelector.bind()
# __placement_group_labels_end__
# __label_selectors_start__
from ray import serve
# Schedule only on nodes with A100 GPUs
@serve.deployment(ray_actor_options={"label_selector": {"ray.io/accelerator-type": "A100"}})
class A100Model:
def __call__(self, request):
return "Running on A100"
# Schedule only on nodes with T4 GPUs
@serve.deployment(ray_actor_options={"label_selector": {"ray.io/accelerator-type": "T4"}})
class T4Model:
def __call__(self, request):
return "Running on T4"
a100_app = A100Model.bind()
t4_app = T4Model.bind()
# __label_selectors_end__
# __fallback_strategy_start__
@serve.deployment(
ray_actor_options={
"label_selector": {"zone": "us-west-2a"},
"fallback_strategy": [{"label_selector": {"zone": "us-west-2b"}}]
}
)
class SoftAffinityDeployment:
def __call__(self, request):
return "Scheduling to a zone with soft constraints!"
soft_affinity_app = SoftAffinityDeployment.bind()
# __fallback_strategy_end__
# __label_selector_main_start__
if __name__ == "__main__":
# RayCluster with resources to run example tests.
ray.init(
labels={
"ray.io/accelerator-type": "A100",
"zone": "us-west-2b",
},
num_cpus=16,
num_gpus=1,
resources={"my_custom_resource": 10},
)
serve.run(a100_app, name="a100", route_prefix="/a100")
# __label_selector_main_end__
# Run remaining doc code.
serve.run(MyDeployment.options(max_replicas_per_node=6).bind(), name="max_replicas", route_prefix="/max_replicas")
serve.run(multi_cpu_app, name="multi_cpu", route_prefix="/multi_cpu")
serve.run(pg_label_app, name="pg_label", route_prefix="/pg_label")
serve.run(soft_affinity_app, name="soft_affinity", route_prefix="/soft_affinity")
serve.shutdown()
ray.shutdown()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/doc_code/replica_scheduling.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/llm/_internal/batch/stages/prepare_multimodal_stage.py | """Prepare Multimodal Stage"""
import asyncio
import copyreg
from typing import Any, AsyncIterator, Dict, List
from ray.llm._internal.batch.stages.base import StatefulStage, StatefulStageUDF
def _reconstruct_media_with_bytes(cls, media, original_bytes):
"""Reconstruct MediaWithBytes by setting __dict__ directly.
This avoids triggering __getattr__ during unpickling, which would cause
infinite recursion since vLLM's MediaWithBytes.__getattr__ accesses
self.media unconditionally.
"""
obj = object.__new__(cls)
obj.__dict__["media"] = media
obj.__dict__["original_bytes"] = original_bytes
return obj
def _register_vllm_pickle_reducers():
"""Register pickle reducer for vLLM's MediaWithBytes to fix unpickling.
vLLM's MediaWithBytes has a __getattr__ that delegates to self.media,
but this causes infinite recursion during pickle.load() because pickle
creates objects via __new__ (not __init__), so self.media isn't set
when __getattr__ is first called.
TODO(seiji): remove when https://github.com/vllm-project/vllm/issues/30818
is fixed
"""
try:
from vllm.multimodal.base import MediaWithBytes
except ImportError:
return
def _reduce(obj):
return (
_reconstruct_media_with_bytes,
(type(obj), obj.media, obj.original_bytes),
)
copyreg.pickle(MediaWithBytes, _reduce)
_register_vllm_pickle_reducers()
class PrepareMultimodalUDF(StatefulStageUDF):
def __init__(
self,
data_column: str,
expected_input_keys: List[str],
model_config_kwargs: Dict[str, Any],
chat_template_content_format: str,
apply_sys_msg_formatting: bool = False,
):
"""
Initialize the PrepareMultimodalUDF.
Args:
data_column: The data column name.
expected_input_keys: The expected input keys of the stage.
model_config_kwargs: The kwargs to pass to the model config.
chat_template_content_format: The format to render message content.
apply_sys_msg_formatting: Whether to skip formatting system messages.
"""
super().__init__(data_column, expected_input_keys)
try:
from vllm.config import ModelConfig
except ImportError as e:
raise ImportError(
"vLLM is not installed or failed to import. Please run "
"`pip install ray[llm]` to install required dependencies."
) from e
self.model_config = ModelConfig(**model_config_kwargs)
self.chat_template_content_format = chat_template_content_format
self.apply_sys_msg_formatting = apply_sys_msg_formatting
def _extract_system_messages(
self, messages: List[Dict[str, Any]]
) -> tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
"""Extract system messages from the message list.
System messages are kept as strings (not converted to list format) to avoid
issues with chat templates that expect string system messages, e.g. Pixtral.
Args:
messages: The full message list.
Returns:
A tuple of (system_messages, non_system_messages).
"""
system_messages = []
non_system_messages = []
for msg in messages:
if msg.get("role") == "system":
system_content = msg.get("content")
if isinstance(system_content, list):
text_parts = []
for part in system_content:
if isinstance(part, dict) and part.get("type") == "text":
text_value = part.get("text") or part.get("content")
if text_value:
text_parts.append(str(text_value))
elif isinstance(part, str) and part:
text_parts.append(part)
system_content = "\n".join(text_parts) if text_parts else ""
system_messages.append({**msg, "content": system_content})
else:
non_system_messages.append(msg)
return system_messages, non_system_messages
async def udf(self, batch: List[Dict[str, Any]]) -> AsyncIterator[Dict[str, Any]]:
"""
Process multimodal data from input messages.
Args:
batch: A list of rows to process.
Yields:
Dict[str, Any]: A dictionary containing the multimodal data
along with processing metadata.
"""
try:
from vllm.entrypoints.chat_utils import parse_chat_messages_async
except ImportError as e:
raise ImportError(
"vLLM is not installed or failed to import. Please run "
"`pip install ray[llm]` to install required dependencies."
) from e
async def _process_row(row: Dict[str, Any]):
# Extract system messages to keep them as strings (not converted to list format)
# This avoids issues with chat templates that expect string system messages.
system_messages = []
messages_to_parse = row["messages"]
if self.apply_sys_msg_formatting:
system_messages, messages_to_parse = self._extract_system_messages(
row["messages"]
)
# Users can provide stable IDs for each multimodal item from messages to
# enable engine to cache and reuse work across requests.
conversation, mm_data, mm_uuids = await parse_chat_messages_async(
messages_to_parse,
self.model_config,
content_format=self.chat_template_content_format,
)
if system_messages:
conversation = system_messages + conversation
return row, conversation, mm_uuids, mm_data
tasks = [asyncio.create_task(_process_row(row)) for row in batch]
for task in asyncio.as_completed(tasks):
row, conversation, uuid, multimodal_data = await task
output = {
k: v
for k, v in row.items()
if k not in ("messages", self.IDX_IN_BATCH_COLUMN)
}
output.update(
{
self.IDX_IN_BATCH_COLUMN: row[self.IDX_IN_BATCH_COLUMN],
"multimodal_data": multimodal_data,
# Use the parsed conversation which has placeholders embedded instead of the original messages
"messages": conversation,
"multimodal_uuids": uuid,
}
)
yield output
class PrepareMultimodalStage(StatefulStage):
"""
A stage that prepares multimodal data from the input messages for a specific model.
"""
fn: StatefulStageUDF = PrepareMultimodalUDF
def get_required_input_keys(self) -> Dict[str, str]:
"""The required input keys of the stage and their descriptions."""
return {
"messages": "A list of messages in OpenAI chat format. "
"See https://platform.openai.com/docs/api-reference/chat/create "
"for details."
}
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/batch/stages/prepare_multimodal_stage.py",
"license": "Apache License 2.0",
"lines": 159,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/llm/tests/batch/cpu/stages/test_prepare_multimodal_stage.py | import sys
import pytest
from ray.llm._internal.batch.stages.prepare_multimodal_stage import (
PrepareMultimodalUDF,
)
@pytest.mark.asyncio
async def test_prepare_multimodal_udf_image_url(image_asset):
image_url, _ = image_asset
udf = PrepareMultimodalUDF(
data_column="__data",
expected_input_keys=["messages"],
model_config_kwargs={"model": "Qwen/Qwen2.5-VL-3B-Instruct"},
chat_template_content_format="string",
)
batch = {
"__data": [
{
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": [
{
"type": "text",
"text": "Describe this image in 10 words.",
},
{
"type": "image_url",
"image_url": {"url": image_url},
"uuid": "image-1-id",
},
{
"type": "image_url",
"image_url": {"url": image_url},
"uuid": "image-2-id",
},
],
},
]
}
]
}
results = []
async for result in udf(batch):
results.append(result["__data"][0])
assert len(results) == 1
assert "multimodal_data" in results[0]
assert len(results[0]["multimodal_data"]["image"]) == 2
assert "multimodal_uuids" in results[0]
assert results[0]["multimodal_uuids"] == {"image": ["image-1-id", "image-2-id"]}
assert "messages" in results[0]
@pytest.mark.asyncio
async def test_prepare_multimodal_udf_pil_image(image_asset):
_, image_pil = image_asset
udf = PrepareMultimodalUDF(
data_column="__data",
expected_input_keys=["messages"],
model_config_kwargs={"model": "Qwen/Qwen2.5-VL-3B-Instruct"},
chat_template_content_format="string",
)
batch = {
"__data": [
{
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": [
{
"type": "text",
"text": "Describe this image in 10 words.",
},
{
"type": "image_pil",
"image_pil": image_pil,
},
],
},
]
}
]
}
results = []
async for result in udf(batch):
results.append(result["__data"][0])
assert len(results) == 1
assert "multimodal_data" in results[0]
assert "messages" in results[0]
@pytest.mark.asyncio
async def test_prepare_multimodal_udf_no_multimodal_content():
"""
Multimodal stage should proceed as normal if there is no multimodal content provided in messages.
"""
udf = PrepareMultimodalUDF(
data_column="__data",
expected_input_keys=["messages"],
model_config_kwargs={"model": "Qwen/Qwen2.5-VL-3B-Instruct"},
chat_template_content_format="string",
)
batch = {
"__data": [
{
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"},
]
}
]
}
results = []
async for result in udf(batch):
results.append(result["__data"][0])
assert len(results) == 1
assert "multimodal_data" in results[0]
assert results[0]["multimodal_data"] is None
assert "messages" in results[0]
def test_prepare_multimodal_udf_expected_keys():
udf = PrepareMultimodalUDF(
data_column="__data",
expected_input_keys=["messages"],
model_config_kwargs={"model": "Qwen/Qwen2.5-VL-3B-Instruct"},
chat_template_content_format="string",
)
assert udf.expected_input_keys == {"messages"}
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/batch/cpu/stages/test_prepare_multimodal_stage.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/nightly_tests/dataset/autoscaling/does_not_over_provision.py | import time
import ray
from benchmark import Benchmark
from cluster_resource_monitor import ClusterResourceMonitor
def main():
"""This tests check if the cluster doesn't scale up more than necessary."""
ray.init()
def sleep_task(row):
time.sleep(1)
return row
with ClusterResourceMonitor() as monitor:
ray.data.range(1024, override_num_blocks=1024, concurrency=1).map(
sleep_task
).materialize()
peak_resources = monitor.get_peak_cluster_resources()
# There are 8 CPUs on a single node. The autoscaler shouldn't provision more
# than one node.
assert peak_resources.cpu == 8, f"Expected 8 CPUs, got {peak_resources.cpu}"
assert peak_resources.gpu == 0, f"Expected 0 GPUs, got {peak_resources.gpu}"
if __name__ == "__main__":
benchmark = Benchmark()
benchmark.run_fn("main", main)
benchmark.write_result()
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/dataset/autoscaling/does_not_over_provision.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/nightly_tests/dataset/cluster_resource_monitor.py | import time
import threading
from typing import Tuple, Optional
import ray
from ray.data._internal.execution.interfaces import ExecutionResources
class ClusterResourceMonitor:
"""Monitor and validate cluster resources during benchmark execution.
This class tracks the peak number of cluster resources during execution.
This can be used to validate that the autoscaler behaves well.
"""
def __init__(self):
if not ray.is_initialized():
raise RuntimeError("You must start Ray before using this monitor")
self._background_thread: Optional[threading.Thread] = None
self._stop_background_thread_event: Optional[threading.Event] = None
self._peak_cpu_count: float = 0
self._peak_gpu_count: float = 0
def __repr__(self):
return "ClusterResourceMonitor()"
def __enter__(self):
(
self._background_thread,
self._stop_background_thread_event,
) = self._start_background_thread()
return self
def get_peak_cluster_resources(self) -> ExecutionResources:
return ExecutionResources(cpu=self._peak_cpu_count, gpu=self._peak_gpu_count)
def _start_background_thread(
self, interval_s: float = 5.0
) -> Tuple[threading.Thread, threading.Event]:
stop_event = threading.Event()
def monitor_cluster_resources():
while not stop_event.is_set():
resources = ray.cluster_resources()
self._peak_cpu_count = max(
self._peak_cpu_count, resources.get("CPU", 0)
)
self._peak_gpu_count = max(
self._peak_gpu_count, resources.get("GPU", 0)
)
time.sleep(interval_s)
thread = threading.Thread(target=monitor_cluster_resources, daemon=True)
thread.start()
return thread, stop_event
def __exit__(self, exc_type, exc_val, exc_tb):
if self._background_thread is not None:
self._stop_background_thread_event.set()
self._background_thread.join()
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/dataset/cluster_resource_monitor.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/langchain_agent_ray_serve/ci/nb2py.py | #!/usr/bin/env python3
import argparse
import nbformat
def convert_notebook(
input_path: str, output_path: str, ignore_cmds: bool = False
) -> None:
"""
Read a Jupyter notebook and write a Python script, converting all %%bash
cells and IPython "!" commands into subprocess.run calls that raise on error.
Cells that load or autoreload extensions are ignored.
"""
nb = nbformat.read(input_path, as_version=4)
with open(output_path, "w") as out:
for cell in nb.cells:
# Only process code cells
if cell.cell_type != "code":
continue
lines = cell.source.splitlines()
# Skip cells that load or autoreload extensions
if any(
l.strip().startswith("%load_ext autoreload")
or l.strip().startswith("%autoreload all")
for l in lines
):
continue
# Detect a %%bash cell
if lines and lines[0].strip().startswith("%%bash"):
if ignore_cmds:
continue
bash_script = "\n".join(lines[1:]).rstrip()
out.write("import subprocess\n")
out.write(
f"subprocess.run(r'''{bash_script}''',\n"
" shell=True,\n"
" check=True,\n"
" executable='/bin/bash')\n\n"
)
else:
# Detect any IPython '!' shell commands in code lines
has_bang = any(line.lstrip().startswith("!") for line in lines)
# Start with "serve run" "serve shutdown" "curl" or "anyscale service" commands
to_ignore_cmd = (
"serve run",
"serve shutdown",
"curl",
"anyscale service",
)
has_ignored_start = any(
line.lstrip().startswith(to_ignore_cmd) for line in lines
)
if has_bang or has_ignored_start:
if ignore_cmds:
continue
out.write("import subprocess\n")
for line in lines:
stripped = line.lstrip()
if stripped.startswith("!"):
cmd = stripped[1:].lstrip()
out.write(
f"subprocess.run(r'''{cmd}''',\n"
" shell=True,\n"
" check=True,\n"
" executable='/bin/bash')\n"
)
else:
out.write(line.rstrip() + "\n")
out.write("\n")
else:
# Regular Python cell:
code = cell.source.rstrip()
# Remove the line that calls the chat function (no model deployed in CI anyway)
code = code.replace('chat("What\'s the weather in Palo Alto?")', "")
out.write(code.rstrip() + "\n\n")
def main() -> None:
parser = argparse.ArgumentParser(
description="Convert a Jupyter notebook to a Python script, preserving bash cells and '!' commands as subprocess calls unless ignored with --ignore-cmds."
)
parser.add_argument("input_nb", help="Path to the input .ipynb file")
parser.add_argument("output_py", help="Path for the output .py script")
parser.add_argument(
"--ignore-cmds", action="store_true", help="Ignore bash cells and '!' commands"
)
args = parser.parse_args()
convert_notebook(args.input_nb, args.output_py, ignore_cmds=args.ignore_cmds)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/langchain_agent_ray_serve/ci/nb2py.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/ray-overview/examples/langchain_agent_ray_serve/content/agent_with_mcp.py | import asyncio
import os
import time
from typing import Any, Dict, List
from urllib.parse import urljoin
from langchain.agents import create_agent
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
# ========== CONFIG ==========
# Easy-to-edit default configurations.
API_KEY = "VrBDo0s-qNOaP9kugBQtJQhGAIA6EUszb6iJHbB1xDQ"
OPENAI_COMPAT_BASE_URL = (
"https://llm-deploy-qwen-service-jgz99.cld-kvedzwag2qa8i5bj.s.anyscaleuserdata.com"
)
MODEL = "Qwen/Qwen3-4B-Instruct-2507-FP8"
TEMPERATURE = 0.01
WEATHER_MCP_BASE_URL = (
"https://weather-mcp-service-jgz99.cld-kvedzwag2qa8i5bj.s.anyscaleuserdata.com"
)
WEATHER_MCP_TOKEN = "uyOArxwCNeTpxn0odOW7hGY57tXQNNrF16Yy8ziskrY"
# Environment variable overrides.
# For deployment, you can override the above settings with environment variables.
API_KEY = os.getenv("OPENAI_API_KEY", API_KEY)
OPENAI_COMPAT_BASE_URL = os.getenv("OPENAI_COMPAT_BASE_URL", OPENAI_COMPAT_BASE_URL)
WEATHER_MCP_BASE_URL = os.getenv("WEATHER_MCP_BASE_URL", WEATHER_MCP_BASE_URL)
WEATHER_MCP_TOKEN = os.getenv("WEATHER_MCP_TOKEN", WEATHER_MCP_TOKEN)
# ========== LLM ==========
llm = ChatOpenAI(
model=MODEL,
base_url=urljoin(
OPENAI_COMPAT_BASE_URL, "v1"
), ## urljoin automatically appends "/v1" to the base URL.
api_key=API_KEY,
temperature=TEMPERATURE,
streaming=False,
)
# ========== SYSTEM PROMPT ==========
PROMPT = (
"You are a research assistant that uses multiple tool calls to gather comprehensive information.\n"
"\n"
"Follow this process:\n"
"- Break tasks into sub-questions.\n"
"- Prefer calling tools when information is external or time-sensitive.\n"
"- After each tool call, decide whether more calls are needed.\n"
"- When sufficient, provide a concise, actionable answer.\n"
"\n"
"Only output final answers or tool calls (no hidden thoughts)."
)
# ========== MCP Tools ==========
async def get_mcp_tools() -> List[Any]:
"""Return tools discovered from the configured MCP server."""
try:
from langchain_mcp_adapters.client import MultiServerMCPClient
headers = {
"Authorization": f"Bearer {WEATHER_MCP_TOKEN}",
}
mcp_client = MultiServerMCPClient(
{
"weather": {
"url": urljoin(
WEATHER_MCP_BASE_URL, "mcp"
), ## urljoin automatically appends "/mcp" to the base URL.
"transport": "streamable_http",
"headers": headers,
}
}
)
tools = await mcp_client.get_tools()
print(f"\n[MCP] Discovered {len(tools)} tool(s) from MCP.")
for tool_obj in tools:
# Most MCP adapters expose .name and .description on LangChain tools.
name = getattr(tool_obj, "name", type(tool_obj).__name__)
desc = getattr(tool_obj, "description", "") or ""
print(f" - {name}: {desc[:120]}")
return tools
except Exception as exc:
print(f"[MCP] Skipping MCP tools (error): {exc}")
return []
# ========== BUILD AGENT ==========
async def build_agent():
"""Instantiate an agent with MCP tools when available."""
mcp_tools = await get_mcp_tools()
tools = list(mcp_tools)
print(f"\n[Agent] Using {len(tools)} tool(s).")
memory = MemorySaver()
agent = create_agent(
llm,
tools,
system_prompt=PROMPT,
checkpointer=memory,
)
return agent
# ========== MAIN ==========
if __name__ == "__main__":
from helpers.agent_runner import run_agent_with_trace
async def main():
# Example request.
start_time = time.time()
user_request = "what is the weather like in palo alto?"
# Build the agent
agent = await build_agent()
# Set show_model_messages to False to hide the messages sent to model
await run_agent_with_trace(
agent=agent,
user_request=user_request,
system_prompt=PROMPT,
max_iterations=5,
show_model_messages=True,
)
end_time = time.time()
print(f"Time taken: {end_time - start_time} seconds")
asyncio.run(main())
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/langchain_agent_ray_serve/content/agent_with_mcp.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/langchain_agent_ray_serve/content/helpers/agent_client_anyscale.py | import json
import requests
base_url = "https://agent-service-langchain-jgz99.cld-kvedzwag2qa8i5bj.s.anyscaleuserdata.com" ## replace with your service url
token = "nZp2BEjdloNlwGyxoWSpdalYGtkhfiHtfXhmV4BQuyk" ## replace with your service bearer token
SERVER_URL = f"{base_url}/chat" # For Anyscale deployment.
HEADERS = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"}
def chat(user_request: str, thread_id: str | None = None) -> None:
"""Send a chat request to the agent and stream the response."""
payload = {"user_request": user_request}
if thread_id:
payload["thread_id"] = thread_id
with requests.post(SERVER_URL, headers=HEADERS, json=payload, stream=True) as resp:
resp.raise_for_status()
# Capture thread_id for multi-turn conversations.
server_thread = resp.headers.get("X-Thread-Id")
if not thread_id and server_thread:
print(f"[thread_id: {server_thread}]")
# Stream SSE events.
for line in resp.iter_lines():
if not line:
continue
txt = line.decode("utf-8")
if txt.startswith("data: "):
txt = txt[len("data: ") :]
print(txt, flush=True)
# Test the agent.
chat("What's the weather in Palo Alto?")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/langchain_agent_ray_serve/content/helpers/agent_client_anyscale.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/langchain_agent_ray_serve/content/helpers/agent_client_local.py | import json
import requests
SERVER_URL = "http://127.0.0.1:8000/chat" # For local deployment.
HEADERS = {"Content-Type": "application/json"}
def chat(user_request: str, thread_id: str | None = None) -> None:
"""Send a chat request to the agent and stream the response."""
payload = {"user_request": user_request}
if thread_id:
payload["thread_id"] = thread_id
with requests.post(SERVER_URL, headers=HEADERS, json=payload, stream=True) as resp:
resp.raise_for_status()
# Capture thread_id for multi-turn conversations.
server_thread = resp.headers.get("X-Thread-Id")
if not thread_id and server_thread:
print(f"[thread_id: {server_thread}]")
# Stream SSE events.
for line in resp.iter_lines():
if not line:
continue
txt = line.decode("utf-8")
if txt.startswith("data: "):
txt = txt[len("data: ") :]
print(txt, flush=True)
# Test the agent.
chat("What's the weather in Palo Alto?")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/langchain_agent_ray_serve/content/helpers/agent_client_local.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/langchain_agent_ray_serve/content/helpers/agent_runner.py | """Agent runner with trace logging for debugging and monitoring."""
from typing import Any
from logger_utils import (
extract_final_text,
log_final,
log_model_request,
log_tool_end,
log_tool_start,
truncate_preview,
)
async def run_agent_with_trace(
agent: Any,
user_request: str,
system_prompt: str,
max_iterations: int = 5,
show_model_messages: bool = False,
) -> int:
"""Run the agent for a single user request while streaming events for logging.
Streams model and tool events for structured logging, captures the final
response text, and returns the number of tool calls used.
Args:
agent: The agent instance to run.
user_request: The user's input request.
system_prompt: The system prompt used by the agent.
max_iterations: Maximum number of iterations the agent can run.
show_model_messages: If True, logs messages sent to the model. If False, hides them.
Returns:
The number of tool calls made during execution.
"""
print("\n" + "#" * 60)
print(f"USER REQUEST: {user_request}")
print("#" * 60)
print(f"Max iterations: {max_iterations}")
print(f"Show model messages: {show_model_messages}")
config = {
"configurable": {"thread_id": "1"},
"recursion_limit": max_iterations * 2,
}
inputs = {"messages": [{"role": "user", "content": user_request}]}
tool_calls = 0
final_text = None
# Use low-level event stream so we can log everything deterministically.
async for ev in agent.astream_events(inputs, version="v2", config=config):
kind = ev["event"]
if kind == "on_chat_model_start":
# Only log model request if show_model_messages is True
if show_model_messages:
input_data = ev["data"].get("input", {})
messages_raw = input_data.get("messages", [])
# messages_raw is a list containing one list of message objects
# Unwrap it to get the actual messages
messages = (
messages_raw[0]
if messages_raw and isinstance(messages_raw[0], list)
else messages_raw
)
log_model_request(
name=ev["name"],
payload={
"messages": messages,
"system": system_prompt,
},
)
# Tool lifecycle.
elif kind == "on_tool_start":
name = ev.get("name") or ev["data"].get("name") or "tool"
args = ev["data"].get("input") or {}
log_tool_start(name, args)
elif kind == "on_tool_end":
name = ev.get("name") or "tool"
out = ev["data"].get("output")
log_tool_end(name, truncate_preview(out))
tool_calls += 1
# Final output (model end often carries it).
elif kind in ("on_chain_end", "on_chat_model_end"):
out = ev["data"].get("output")
final_text = extract_final_text(out) or final_text
if final_text is None:
final_text = "(No final text returned.)"
log_final(final_text, tool_calls)
return tool_calls
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/langchain_agent_ray_serve/content/helpers/agent_runner.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/ray-overview/examples/langchain_agent_ray_serve/content/llm_deploy_qwen.py | from ray.serve.llm import LLMConfig, build_openai_app
llm_config = LLMConfig(
model_loading_config=dict(
# The name your clients will use in the OpenAI-compatible API.
model_id="Qwen/Qwen3-4B-Instruct-2507-FP8",
# Hugging Face repo to pull from.
model_source="Qwen/Qwen3-4B-Instruct-2507-FP8",
),
# L4 (Ada) is FP8-friendly. Prefer H100 for best FP8 throughput.
accelerator_type="L4",
deployment_config=dict(
autoscaling_config=dict(
num_replicas=1, # use 1 replica for now
)
),
# vLLM engine flags.
engine_kwargs=dict(
# Qwen3 supports 262,144 context natively; but you need a GPU with large memory to serve.
max_model_len=65536,
# Qwen models use custom chat templates; needed for some Hugging Face repos.
trust_remote_code=True,
gpu_memory_utilization=0.9,
enable_auto_tool_choice=True,
tool_call_parser="hermes",
),
)
app = build_openai_app({"llm_configs": [llm_config]})
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/langchain_agent_ray_serve/content/llm_deploy_qwen.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/langchain_agent_ray_serve/content/logger_utils.py | import json
from typing import Any, Dict, Optional, Sequence, Union
PREVIEW_LIMIT = 800
def _pp(obj: Any) -> str:
"""Pretty-print helper for logging."""
try:
return json.dumps(obj, indent=2, ensure_ascii=False)
except Exception:
return str(obj)
def _iter_messages(payload: Dict[str, Any]) -> Sequence[Any]:
"""Return an iterable of messages from a LangChain payload."""
messages = payload.get("messages")
if messages:
return messages
inputs = payload.get("inputs", {})
return inputs.get("messages", []) if isinstance(inputs, dict) else []
def log_model_request(name: str, payload: Dict[str, Any]) -> None:
"""Print the payload sent to the chat model."""
print("\n" + "=" * 70)
print(f"MODEL CALL β {name}")
print("=" * 70)
messages = _iter_messages(payload)
if messages:
print("\n-- Messages sent to model --")
for message in messages:
role = getattr(message, "type", getattr(message, "role", ""))
content = getattr(message, "content", "")
print(f"[{role}] {content}")
system = payload.get("system") or payload.get("system_prompt")
if system:
print("\n-- System prompt --")
print(system)
def log_tool_start(name: str, args: Dict[str, Any]) -> None:
"""Print diagnostic information when a tool call starts."""
print("\n" + "-" * 70)
print(f"TOOL START β {name}")
print("-" * 70)
print(_pp(args))
def log_tool_end(name: str, result_preview: str) -> None:
"""Print diagnostic information when a tool call ends."""
print("\n" + "-" * 70)
print(f"TOOL END β {name}")
print("-" * 70)
print(result_preview)
def log_final(text: str, tool_calls_count: int) -> None:
"""Print the final model response plus summary statistics."""
print("\n" + "=" * 70)
print("FINAL RESPONSE")
print("=" * 70)
print(text)
print("\n" + "#" * 60)
print(f"COMPLETED - Total tool calls made: {tool_calls_count}")
print("#" * 60 + "\n")
def truncate_preview(text: Union[str, Any], limit: int = PREVIEW_LIMIT) -> str:
"""Shorten long tool outputs to keep logs readable."""
if isinstance(text, str) and len(text) > limit:
return text[:limit] + f"... (len={len(text)})"
return str(text)
def extract_final_text(output: Any) -> Optional[str]:
"""Extract the final string content from LangChain outputs."""
if isinstance(output, dict):
messages = output.get("messages")
if messages:
last_message = messages[-1]
return getattr(last_message, "content", None)
if isinstance(output, list) and output:
last_entry = output[-1]
return getattr(last_entry, "content", None)
if isinstance(output, str):
return output
return None
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/langchain_agent_ray_serve/content/logger_utils.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/ray-overview/examples/langchain_agent_ray_serve/content/ray_serve_agent_deployment.py | import asyncio
import json
from contextlib import asynccontextmanager
from typing import AsyncGenerator
from uuid import uuid4
from fastapi import FastAPI, Request
from fastapi.encoders import jsonable_encoder
from starlette.responses import StreamingResponse
from ray import serve
from agent_with_mcp import build_agent
# ----------------------------------------------------------------------
# FastAPI app with an async lifespan hook.
# ----------------------------------------------------------------------
@asynccontextmanager
async def lifespan(app: FastAPI):
agent = await build_agent()
app.state.agent = agent
try:
yield
finally:
if hasattr(agent, "aclose"):
await agent.aclose()
fastapi_app = FastAPI(lifespan=lifespan)
@fastapi_app.post("/chat")
async def chat(request: Request):
"""
POST /chat
Body: {"user_request": "<text>", "thread_id": "<optional>", "checkpoint_ns": "<optional>"}
Streams LangGraph 'update' dicts as Server-Sent Events (one JSON object per event).
SSE frames look like:
data: {"some": "update"}
Errors are emitted as:
event: error
data: {"error": "ErrorType", "detail": "..."}
"""
body = await request.json()
user_request: str = body.get("user_request") or ""
# Threading and checkpoint identifiers.
thread_id = (
body.get("thread_id")
or request.headers.get("X-Thread-Id")
or str(uuid4()) # New thread per request if none provided.
)
checkpoint_ns = body.get("checkpoint_ns") # Optional namespacing.
# Build config for LangGraph.
config = {"configurable": {"thread_id": thread_id}}
if checkpoint_ns:
config["configurable"]["checkpoint_ns"] = checkpoint_ns
async def event_stream() -> AsyncGenerator[str, None]:
agent = request.app.state.agent
inputs = {"messages": [{"role": "user", "content": user_request}]}
try:
# Stream updates from the agent.
async for update in agent.astream(
inputs, config=config, stream_mode="updates"
):
safe_update = jsonable_encoder(update)
chunk = json.dumps(safe_update, ensure_ascii=False)
# Proper SSE framing: "data: <json>\n\n".
yield f"data: {chunk}\n\n"
except asyncio.CancelledError:
# Client disconnected; exit quietly without sending an error frame.
return
except Exception as e:
# Surface one terminal error event and end.
err = {"error": type(e).__name__, "detail": str(e)}
err_chunk = json.dumps(err, ensure_ascii=False)
# SSE with a named event for clients that listen for "error".
yield f"event: error\ndata: {err_chunk}\n\n"
# Expose thread id so the client can reuse it on the next call.
# Also add headers commonly used for SSE behind proxies.
headers = {
"X-Thread-Id": thread_id,
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no", # Disable buffering in nginx, if present.
}
return StreamingResponse(
event_stream(),
media_type="text/event-stream",
headers=headers,
)
# ----------------------------------------------------------------------
# Ray Serve deployment wrapper.
# ----------------------------------------------------------------------
@serve.deployment(ray_actor_options={"num_cpus": 1})
@serve.ingress(fastapi_app)
class LangGraphServeDeployment:
"""Ray Serve deployment that exposes the FastAPI app as ingress."""
pass
app = LangGraphServeDeployment.bind()
# Deploy the agent app locally:
# serve run ray_serve_agent_deployment:app
#
# Deploy the agent using Anyscale service:
# anyscale service deploy ray_serve_agent_deployment:app
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/langchain_agent_ray_serve/content/ray_serve_agent_deployment.py",
"license": "Apache License 2.0",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/ray-overview/examples/langchain_agent_ray_serve/content/weather_mcp_ray.py | # Save the following code as `weather_mcp_ray.py`.
from typing import Any
import httpx
from fastapi import FastAPI
from mcp.server.fastmcp import FastMCP
from ray import serve
from contextlib import asynccontextmanager
# Constants.
NWS_API_BASE = "https://api.weather.gov"
USER_AGENT = "weather-app/1.0"
# Helper functions.
async def make_nws_request(url: str) -> dict[str, Any] | None:
headers = {"User-Agent": USER_AGENT, "Accept": "application/geo+json"}
async with httpx.AsyncClient(timeout=30.0) as client:
try:
resp = await client.get(url, headers=headers)
resp.raise_for_status()
return resp.json()
except Exception:
return None
def format_alert(feature: dict) -> str:
props = feature["properties"]
return (
f"Event: {props.get('event', 'Unknown')}\n"
f"Area: {props.get('areaDesc', 'Unknown')}\n"
f"Severity: {props.get('severity', 'Unknown')}\n"
f"Description: {props.get('description', 'No description available')}\n"
f"Instructions: {props.get('instruction', 'No specific instructions provided')}"
)
# Instantiate FastMCP and register tools via decorators.
mcp = FastMCP("weather", stateless_http=True)
@mcp.tool()
async def get_alerts(state: str) -> str:
"""Fetch active alerts for a given state code (for example, 'CA')."""
url = f"{NWS_API_BASE}/alerts/active/area/{state}"
data = await make_nws_request(url)
if not data or "features" not in data:
return "Unable to fetch alerts or no alerts found."
features = data["features"]
if not features:
return "No active alerts for this state."
return "\n---\n".join(format_alert(f) for f in features)
@mcp.tool()
async def get_forecast(latitude: float, longitude: float) -> str:
"""Fetch a 5-period weather forecast for given latitude and longitude."""
points_url = f"{NWS_API_BASE}/points/{latitude},{longitude}"
points_data = await make_nws_request(points_url)
if not points_data or "properties" not in points_data:
return "Unable to fetch forecast data for this location."
forecast_url = points_data["properties"].get("forecast")
if not forecast_url:
return "No forecast URL found for this location."
forecast_data = await make_nws_request(forecast_url)
if not forecast_data or "properties" not in forecast_data:
return "Unable to fetch detailed forecast."
periods = forecast_data["properties"].get("periods", [])
if not periods:
return "No forecast periods available."
parts: list[str] = []
for p in periods[:5]:
parts.append(
f"{p['name']}:\nTemperature: {p['temperature']}Β°{p['temperatureUnit']}\n"
+ f"Wind: {p['windSpeed']} {p['windDirection']}\n"
+ f"Forecast: {p['detailedForecast']}"
)
return "\n---\n".join(parts)
# FastAPI app and Ray Serve setup.
@asynccontextmanager
async def lifespan(app: FastAPI):
# Mount the MCP app.
app.mount("/", mcp.streamable_http_app())
# Enter the session_manager's context.
async with mcp.session_manager.run():
yield
fastapi_app = FastAPI(lifespan=lifespan)
@serve.deployment(
autoscaling_config={
"min_replicas": 1,
"max_replicas": 20,
"target_ongoing_requests": 5,
},
ray_actor_options={"num_cpus": 0.2},
)
@serve.ingress(fastapi_app)
class WeatherMCP:
def __init__(self):
pass
# Ray Serve entry point.
app = WeatherMCP.bind()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/langchain_agent_ray_serve/content/weather_mcp_ray.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:ci/fossa/ray_oss_analysis.py | #!/usr/bin/env python3
import argparse
import glob
import json
import logging
import os
import re
import shutil
import subprocess
from typing import Dict, List, Optional, Set, Tuple
import yaml
logger = logging.getLogger("ray_oss_analysis")
def _setup_logger(log_file: Optional[str] = None, enable_debug: bool = False) -> None:
"""
Setup logger for the script.
You can either use default console logger or enable additional file logger if needed by passing the log_file.
Setting the log level to debug if enable_debug is true.
"""
logger.setLevel(logging.DEBUG if enable_debug else logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
if log_file:
file_handler = logging.FileHandler(log_file, mode="w")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.info(f"Logging to file: {log_file}")
logger.info(f"Log Level Set to {logging.getLevelName(logger.level)}")
# not being used since we moved to filtering only for source files to get c, cpp libraries
def _is_excluded_kind(kind_str: str) -> bool:
"""
Check if the kind is excluded.
"""
# split the kind_str by whitespace and get the first element as the kind
kind = kind_str.split(" ")[0]
# list of non-target rule kinds
non_target_rule_kinds = [
"config_setting",
"pkg_files",
"pkg_zip_impl",
"int_flag",
"string_flag",
"bool_flag",
"bind",
"constraint_value",
"constraint_setting",
"GENERATED_FILE",
]
python_rule_kinds = ["py_library", "py_binary", "py_test"]
target_rule_kinds = non_target_rule_kinds + python_rule_kinds
return kind in target_rule_kinds
def _is_build_tool(label: str) -> bool:
"""
Check if the label is a build tool.
"""
# list of build package labels that are present in dependencies but not part of the target code
build_package_labels = [
"bazel_tools",
"local_config_python",
"cython",
"local_config_cc",
]
return any(
build_package_label in label for build_package_label in build_package_labels
)
def _is_own_code(location: str) -> bool:
"""
Check if it is own code or not.
"""
if location is None:
return False
else:
return location.startswith(os.getcwd())
def _is_cpp_code(location: str) -> bool:
"""
Check if the label is C/C++ code.
"""
# list of C/C++ file extensions
cExtensions = [".c", ".cc", ".cpp", ".cxx", ".c++", ".h", ".hpp", ".hxx"]
return any(location.endswith(ext) for ext in cExtensions)
def _get_dependency_info(line_json: Dict) -> Tuple[str, str, str, str]:
"""
Get dependency info from the json line.
"""
# earlier when we were getting all types of packages there was a need to get the type of the package,
# but now we are only getting source files and we are not interested in the type of the package
# Yet we are keeping the code here, for future needs
type = line_json["type"]
match type:
case "SOURCE_FILE":
return (
type,
type,
line_json["sourceFile"]["name"],
_clean_path(line_json["sourceFile"]["location"]),
)
case "GENERATED_FILE":
return (
type,
type,
line_json["generatedFile"]["name"],
_clean_path(line_json["generatedFile"]["location"]),
)
case "PACKAGE_GROUP":
return type, type, line_json["packageGroup"]["name"], None
case "RULE":
return (
type,
line_json["rule"]["ruleClass"],
line_json["rule"]["name"],
line_json["rule"]["location"],
)
case _:
return type, type, "unknown", "unknown"
def _clean_path(path: str) -> str:
"""
Clean the path by removing location info.
"""
# Remove location information (e.g., :line:column) from the path
# Format is typically: /path/to/file.ext:line:column
return path.split(":")[0]
def _get_package_name(label: str) -> Optional[str]:
"""
Extract package name from bazel label.
matches @repo//pkg:target to repo. regex breaks the string into groups and we return the first group.
separated out so that this can be tested.
Returns None if the label doesn't have an external package name (e.g., local targets like //pkg:target).
"""
match = re.search(r"(?:@([^/]+))?//", label)
if match:
return match.group(1)
return None
def _get_bazel_dependencies(
package_name: str, bazel_command: str
) -> Tuple[Set[str], Set[str]]:
"""
Returns the package names and file paths of the dependencies minus build tools and own code.
Currently works only for c, cpp.
"""
# package names of dependencies
# file paths for actual files used
package_names = set()
file_paths = set()
# works for c, cpp, not sure if the kind based filter works for other languages
command = [
bazel_command,
"query",
"--output=streamed_jsonproto",
f"kind('source file', deps({package_name}))",
]
logger.debug(f"Running command: {command}")
lines = subprocess.check_output(command, text=True).splitlines()
logger.debug(f"Found {len(lines)} dependencies")
for line in lines:
line_json = json.loads(line)
type, kind, label, location = _get_dependency_info(line_json)
logger.debug(f"Dependency type: {type}, Label: {label}, Location: {location}")
if _is_build_tool(label) or _is_own_code(location):
logger.debug(f"Skipping dependency: {line} because it is a bad kind")
continue
elif _is_cpp_code(location):
file_paths.add(location)
package_name = _get_package_name(label)
if package_name is not None:
package_names.add(package_name)
return package_names, file_paths
def _copy_single_file(source: str, destination: str) -> None:
"""
Copy a single file from source to destination.
"""
# Create parent directories if they don't exist
os.makedirs(os.path.dirname(destination), exist_ok=True)
# Copy the file
try:
shutil.copy(source, destination)
except FileNotFoundError:
logger.warning(f"File not found, skipping: {source}")
def _copy_files(file_paths: Set[str], output_folder: str) -> None:
"""
Copy files to output folder.
"""
for file_path in file_paths:
logger.debug(f"Copying file: {file_path}")
destination = os.path.join(output_folder, file_path.split("external/")[-1])
_copy_single_file(file_path, destination)
def _copy_licenses(
package_names: Set[str], bazel_output_base: str, output_folder: str
) -> None:
"""
Copy licenses to output folder.
"""
for package_name in package_names:
license_paths = _expand_license_files(
os.path.join(bazel_output_base, "external", package_name)
)
for license_path in license_paths:
_copy_single_file(
license_path,
os.path.join(output_folder, license_path.split("external/")[-1]),
)
def _askalono_crawl(path: str) -> List[Dict]:
"""
Crawl licenses using askalono.
"""
license_text = subprocess.check_output(
["askalono", "--format=json", "crawl", path],
text=True,
).strip()
licenses = [json.loads(license_text) for license_text in license_text.splitlines()]
cleaned_licenses = [license for license in licenses if "error" not in license]
error_licenses = [license for license in licenses if "error" in license]
for error_license in error_licenses:
logger.debug(
f"License Crawl failed for {error_license['path']}: {error_license['error']}"
)
return cleaned_licenses
def _expand_license_files(path: str) -> List[str]:
"""
Expand license files using glob patterns.
"""
patterns = [
"**/[Ll][Ii][Cc][Ee][Nn][Ss][Ee]*", # LICENSE
"**/[Cc][Oo][Pp][Yy][Ii][Nn][Gg]*", # COPYING
"**/[Nn][Oo][Tt][Ii][Cc][Ee]*", # NOTICE
"**/[Cc][Oo][Pp][Yy][Rr][Ii][Gg][Hh][Tt]*", # COPYRIGHT
"**/[Rr][Ee][Aa][Dd][Mm][Ee]*", # README
]
all_paths = set()
for pattern in patterns:
full_pattern = os.path.join(path, pattern)
matching_paths = glob.glob(full_pattern, recursive=True)
logger.debug(f"Pattern {full_pattern} matched {len(matching_paths)} files")
# Only include files, not directories
all_paths.update(p for p in matching_paths if os.path.isfile(p))
return list(all_paths)
def _expand_and_crawl(path: str) -> List[Dict]:
"""
Given a path, crawl licenses using askalono.
Works recursively for all licenses in the path.
"""
license_paths = _expand_license_files(path)
all_licenses = []
for license_path in license_paths:
licenses = _askalono_crawl(license_path)
all_licenses.extend(licenses)
return all_licenses
def _get_askalono_results(dependencies: Set[str], bazel_output_base: str) -> List[Dict]:
"""
Get askalono results for all dependencies.
"""
license_info = []
for dependency in dependencies:
dependency_path = os.path.join(bazel_output_base, "external", dependency)
license_json = _askalono_crawl(dependency_path)
if not license_json:
logger.warning(
f"No license text found for {dependency}, trying to crawl licenses and copying files manually"
)
license_json = _expand_and_crawl(dependency_path)
if not license_json:
logger.warning(f"No license text found for {dependency}")
license_info.append(
{
"dependency": dependency,
"path": "unknown",
"license": "unknown",
}
)
continue
for license in license_json:
license_info.append(
{
"dependency": dependency,
"path": license["path"].split("external/")[-1],
"license": license["result"]["license"]["name"],
}
)
return license_info
def _generate_fossa_deps_file(askalono_results: List[Dict], output_folder: str) -> None:
"""
Generate fossa dependencies file from askalono results.
"""
# Group licenses and file paths by dependency
dependency_data = {}
for result in askalono_results:
logger.debug("generating fossa deps file: result: %s", result)
dep = result["dependency"]
license_name = result["license"]
license_file_path = result.get("path", "N/A")
if dep not in dependency_data:
dependency_data[dep] = {"licenses": set(), "file_licenses": []}
dependency_data[dep]["licenses"].add(license_name)
dependency_data[dep]["file_licenses"].append(
f"{license_file_path.split('external/')[-1]}: {license_name}"
)
# Create custom dependencies with aggregated licenses
custom_dependencies = []
for dependency, data in sorted(dependency_data.items()):
licenses = data["licenses"]
file_licenses = data["file_licenses"]
logger.debug(
f"generating fossa deps file: Dependency: {dependency}, Licenses: {licenses}"
)
# Build description with file paths and licenses
description_parts = ["generated by ray_oss_analysis.py, askalono scan results."]
if file_licenses:
description_parts.append("License files: " + "; ".join(file_licenses))
custom_dependencies.append(
{
"name": dependency,
"license": " or ".join(sorted(licenses)),
"version": "Non-versioned",
"metadata": {
"description": " ".join(description_parts),
},
}
)
fossa_deps_file = {"custom-dependencies": custom_dependencies}
# Write to YAML file
with open(os.path.join(output_folder, "fossa_deps.yaml"), "w") as file:
yaml.dump(fossa_deps_file, file, indent=4, sort_keys=False)
def _change_working_directory() -> None:
"""
Change working directory to the workspace in case being executed as bazel py_binary.
"""
workspace = os.environ.get("BUILD_WORKING_DIRECTORY")
if workspace:
os.chdir(workspace)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="OSS Analysis Combo Tool")
parser.add_argument("-o", "--output", help="Output folder path", required=True)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enable verbose output"
)
parser.add_argument(
"-cmd", "--bazel-cmd", help="Bazel command bazel/bazelisk", default="bazel"
)
parser.add_argument(
"-p", "--package", help="Bazel package", default="//:gen_ray_pkg"
)
parser.add_argument("--log-file", help="Log file path")
parser.add_argument(
"--copy-files-for-fossa",
action="store_true",
help="Copy files for fossa analysis on top of askalono",
)
parser.formatter_class = argparse.RawTextHelpFormatter
parser.description = """
Ray OSS Analysis Tool - Analyze Ray's open source components
current status: scans only c, cpp libraries are scanned and scanned via askalono
"""
parser.epilog = """
Examples:
%(prog)s --output oss_analysis -cmd bazelisk # if bazel is not present or you would prefer bazelisk
%(prog)s --output oss_analysis # if bazel is present
%(prog)s --verbose
"""
args = parser.parse_args()
_change_working_directory()
_setup_logger(args.log_file, args.verbose)
bazel_output_base = subprocess.check_output(
[args.bazel_cmd, "info", "output_base"], text=True
).strip()
package_names, file_paths = _get_bazel_dependencies(args.package, args.bazel_cmd)
logger.info(f"Found {len(file_paths)} file paths")
logger.info(f"Found {len(package_names)} package names")
if args.copy_files_for_fossa:
_copy_files(file_paths, args.output)
_copy_licenses(package_names, bazel_output_base, args.output)
askalono_results = _get_askalono_results(package_names, bazel_output_base)
with open(os.path.join(args.output, "askalono_results.json"), "w") as file:
json.dump(askalono_results, file, indent=4)
_generate_fossa_deps_file(askalono_results, args.output)
| {
"repo_id": "ray-project/ray",
"file_path": "ci/fossa/ray_oss_analysis.py",
"license": "Apache License 2.0",
"lines": 374,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:ci/fossa/test_ray_oss_analysis.py | import logging
import os
import sys
from unittest.mock import mock_open, patch
import pytest
from ci.fossa import ray_oss_analysis
@pytest.fixture(autouse=True)
def reset_logger():
"""Reset logger level before each test."""
ray_oss_analysis.logger.setLevel(logging.INFO)
@patch("ci.fossa.ray_oss_analysis.logging.FileHandler")
def test_setup_logger(mock_file_handler) -> None:
# Configure mock to have a valid level so logging check works
mock_file_handler.return_value.level = logging.NOTSET
# Test default setup
ray_oss_analysis._setup_logger()
assert ray_oss_analysis.logger.level == logging.INFO
# Test with debug and log file
ray_oss_analysis._setup_logger(log_file="test.log", enable_debug=True)
assert ray_oss_analysis.logger.level == logging.DEBUG
mock_file_handler.assert_called_with("test.log", mode="w")
def test_is_excluded_kind() -> None:
assert ray_oss_analysis._is_excluded_kind("config_setting rule")
assert ray_oss_analysis._is_excluded_kind("py_library rule")
assert not ray_oss_analysis._is_excluded_kind("cc_library rule")
assert not ray_oss_analysis._is_excluded_kind("unknown_kind rule")
def test_is_build_tool() -> None:
assert ray_oss_analysis._is_build_tool("@bazel_tools//src:windows")
assert ray_oss_analysis._is_build_tool(
"@local_config_cc//:builtin_include_directory_paths"
)
assert not ray_oss_analysis._is_build_tool("//src/ray/util:subreaper.h")
assert not ray_oss_analysis._is_build_tool(
"@upb//upbc:stage0/google/protobuf/compiler/plugin.upb.c"
)
@patch("ci.fossa.ray_oss_analysis.os.getcwd")
def test_is_own_code(mock_getcwd) -> None:
mock_getcwd.return_value = "/repo/root"
assert ray_oss_analysis._is_own_code("/repo/root/file.py")
assert not ray_oss_analysis._is_own_code("/other/root/file.py")
assert not ray_oss_analysis._is_own_code(None)
def test_is_cpp_code() -> None:
assert ray_oss_analysis._is_cpp_code("file.cc")
assert ray_oss_analysis._is_cpp_code("file.h")
assert not ray_oss_analysis._is_cpp_code("file.py")
assert not ray_oss_analysis._is_cpp_code("file.java")
def test_get_dependency_info() -> None:
# SOURCE_FILE
info = ray_oss_analysis._get_dependency_info(
{"type": "SOURCE_FILE", "sourceFile": {"name": "name", "location": "loc"}}
)
assert info == ("SOURCE_FILE", "SOURCE_FILE", "name", "loc")
# RULE
info = ray_oss_analysis._get_dependency_info(
{
"type": "RULE",
"rule": {"ruleClass": "cc_library", "name": "name", "location": "loc"},
}
)
assert info == ("RULE", "cc_library", "name", "loc")
# UNKNOWN
info = ray_oss_analysis._get_dependency_info({"type": "UNKNOWN"})
assert info == ("UNKNOWN", "UNKNOWN", "unknown", "unknown")
def test_clean_path() -> None:
assert ray_oss_analysis._clean_path("/path/to/file:10:20") == "/path/to/file"
assert ray_oss_analysis._clean_path("/path/to/file") == "/path/to/file"
def test_get_package_name() -> None:
# Test extraction logic
assert ray_oss_analysis._get_package_name("@repo//pkg:target") == "repo"
assert ray_oss_analysis._get_package_name("@repo//:target") == "repo"
# Should be None for local targets if regex matches but group 1 is empty
assert ray_oss_analysis._get_package_name("//pkg:target") is None
assert ray_oss_analysis._get_package_name("@//:target") is None
@patch("ci.fossa.ray_oss_analysis.subprocess.check_output")
def test_get_bazel_dependencies(mock_check_output) -> None:
# Mock bazel query output
mock_output = "\n".join(
[
'{"type": "SOURCE_FILE", "sourceFile": {"name": "//:file.cc", "location": "/abs/file.cc:1:1"}}',
'{"type": "SOURCE_FILE", "sourceFile": {"name": "@dep//:lib.h", "location": "/external/dep/lib.h:1:1"}}',
'{"type": "RULE", "rule": {"ruleClass": "py_library", "name": "//:py_lib", "location": "/abs/lib.py:1:1"}}',
]
)
mock_check_output.return_value = mock_output
# Mock _is_own_code to exclude local files
with patch("ci.fossa.ray_oss_analysis._is_own_code") as mock_is_own:
# First file is own code, second is external
mock_is_own.side_effect = [True, False, True]
package_names, file_paths = ray_oss_analysis._get_bazel_dependencies(
"//:target", "bazel"
)
assert "dep" in package_names
assert "/external/dep/lib.h" in file_paths
assert "/abs/file.cc" not in file_paths # Own code
@patch("ci.fossa.ray_oss_analysis.shutil.copy")
@patch("ci.fossa.ray_oss_analysis.os.makedirs")
def test_copy_single_file(mock_makedirs, mock_copy) -> None:
ray_oss_analysis._copy_single_file("src", "dst")
mock_makedirs.assert_called_with(os.path.dirname("dst"), exist_ok=True)
mock_copy.assert_called_with("src", "dst")
@patch("ci.fossa.ray_oss_analysis.glob.glob")
def test_expand_license_files(mock_glob) -> None:
mock_glob.side_effect = [
["/path/LICENSE"], # Match LICENSE
[],
[],
[],
[], # No match for others
]
with patch("os.path.isfile", return_value=True):
paths = ray_oss_analysis._expand_license_files("/path")
assert paths == ["/path/LICENSE"]
@patch("ci.fossa.ray_oss_analysis.subprocess.check_output")
def test_askalono_crawl(mock_check_output) -> None:
mock_check_output.return_value = (
'{"path": "p", "result": {"score": 1.0, "license": {"name": "MIT"}}}\n'
)
results = ray_oss_analysis._askalono_crawl("/path")
assert len(results) == 1
assert results[0]["result"]["license"]["name"] == "MIT"
@patch("ci.fossa.ray_oss_analysis.yaml.dump")
@patch("builtins.open", new_callable=mock_open)
def test_generate_fossa_deps_file(mock_file, mock_yaml_dump) -> None:
askalono_results = [
{"dependency": "dep1", "license": "MIT", "path": "external/dep1/LICENSE"}
]
ray_oss_analysis._generate_fossa_deps_file(askalono_results, "output")
mock_file.assert_called_with(os.path.join("output", "fossa_deps.yaml"), "w")
# Check structure of dumped yaml
args, _ = mock_yaml_dump.call_args
data = args[0]
assert "custom-dependencies" in data
assert data["custom-dependencies"][0]["name"] == "dep1"
assert data["custom-dependencies"][0]["license"] == "MIT"
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "ci/fossa/test_ray_oss_analysis.py",
"license": "Apache License 2.0",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/cluster_autoscaler/resource_utilization_gauge.py | import abc
from typing import Optional
from ray.data._internal.average_calculator import TimeWindowAverageCalculator
from ray.data._internal.execution.interfaces import ExecutionResources
from ray.data._internal.execution.resource_manager import ResourceManager
from ray.util.metrics import Gauge
ClusterUtil = ExecutionResources
class ResourceUtilizationGauge(abc.ABC):
@abc.abstractmethod
def observe(self):
"""Observe the cluster utilization."""
...
@abc.abstractmethod
def get(self) -> ClusterUtil:
"""Get the resource cluster utilization."""
...
class RollingLogicalUtilizationGauge(ResourceUtilizationGauge):
# Default time window in seconds to calculate the average of cluster utilization.
DEFAULT_CLUSTER_UTIL_AVG_WINDOW_S: int = 10
def __init__(
self,
resource_manager: ResourceManager,
*,
cluster_util_avg_window_s: float = DEFAULT_CLUSTER_UTIL_AVG_WINDOW_S,
execution_id: Optional[str] = None,
):
self._resource_manager = resource_manager
self._execution_id = execution_id
self._cluster_cpu_util_calculator = TimeWindowAverageCalculator(
cluster_util_avg_window_s
)
self._cluster_gpu_util_calculator = TimeWindowAverageCalculator(
cluster_util_avg_window_s
)
self._cluster_mem_util_calculator = TimeWindowAverageCalculator(
cluster_util_avg_window_s
)
self._cluster_obj_mem_util_calculator = TimeWindowAverageCalculator(
cluster_util_avg_window_s
)
self._cluster_cpu_utilization_gauge = None
self._cluster_gpu_utilization_gauge = None
self._cluster_mem_utilization_gauge = None
self._cluster_object_store_memory_utilization_gauge = None
if self._execution_id is not None:
self._cluster_cpu_utilization_gauge = Gauge(
"data_cluster_cpu_utilization",
description="Cluster utilization % (CPU)",
tag_keys=("dataset",),
)
self._cluster_gpu_utilization_gauge = Gauge(
"data_cluster_gpu_utilization",
description="Cluster utilization % (GPU)",
tag_keys=("dataset",),
)
self._cluster_mem_utilization_gauge = Gauge(
"data_cluster_mem_utilization",
description="Cluster utilization % (Memory)",
tag_keys=("dataset",),
)
self._cluster_object_store_memory_utilization_gauge = Gauge(
"data_cluster_object_store_memory_utilization",
description="Cluster utilization % (Object Store Memory)",
tag_keys=("dataset",),
)
def observe(self):
"""Report the cluster utilization based on global usage / global limits."""
def save_div(numerator, denominator):
if not denominator:
return 0
else:
return numerator / denominator
global_usage = self._resource_manager.get_global_usage()
global_limits = self._resource_manager.get_global_limits()
cpu_util = save_div(global_usage.cpu, global_limits.cpu)
gpu_util = save_div(global_usage.gpu, global_limits.gpu)
mem_util = save_div(global_usage.memory, global_limits.memory)
obj_store_mem_util = save_div(
global_usage.object_store_memory, global_limits.object_store_memory
)
self._cluster_cpu_util_calculator.report(cpu_util)
self._cluster_gpu_util_calculator.report(gpu_util)
self._cluster_mem_util_calculator.report(mem_util)
self._cluster_obj_mem_util_calculator.report(obj_store_mem_util)
if self._execution_id is not None:
tags = {"dataset": self._execution_id}
if self._cluster_cpu_utilization_gauge is not None:
self._cluster_cpu_utilization_gauge.set(cpu_util * 100, tags=tags)
if self._cluster_gpu_utilization_gauge is not None:
self._cluster_gpu_utilization_gauge.set(gpu_util * 100, tags=tags)
if self._cluster_mem_utilization_gauge is not None:
self._cluster_mem_utilization_gauge.set(mem_util * 100, tags=tags)
if self._cluster_object_store_memory_utilization_gauge is not None:
self._cluster_object_store_memory_utilization_gauge.set(
obj_store_mem_util * 100, tags=tags
)
def get(self) -> ExecutionResources:
"""Get the average cluster utilization based on global usage / global limits."""
return ExecutionResources(
cpu=self._cluster_cpu_util_calculator.get_average(),
gpu=self._cluster_gpu_util_calculator.get_average(),
memory=self._cluster_mem_util_calculator.get_average(),
object_store_memory=self._cluster_obj_mem_util_calculator.get_average(),
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/cluster_autoscaler/resource_utilization_gauge.py",
"license": "Apache License 2.0",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/_internal/average_calculator.py | import time
from collections import deque
from typing import Deque, Tuple
class TimeWindowAverageCalculator:
"""A utility class to calculate the average of values reported in a time window."""
def __init__(
self,
window_s: float,
):
assert window_s > 0
# Time window in seconds.
self._window_s = window_s
# Buffer the values reported in the time window, each value is a
# tuple of (time, value).
self._values: Deque[Tuple[float, float]] = deque()
# Sum of all values in the time window.
self._sum: float = 0
def report(self, value: float):
"""Report a value to the calculator."""
now = time.time()
self._values.append((now, value))
self._sum += value
self._trim(now)
def get_average(self):
"""Get the average of values reported in the time window,
or None if no values reported in the last time window.
"""
self._trim(time.time())
if len(self._values) == 0:
return None
return self._sum / len(self._values)
def _trim(self, now):
"""Remove the values reported outside of the time window."""
while len(self._values) > 0 and now - self._values[0][0] > self._window_s:
_, value = self._values.popleft()
self._sum -= value
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/average_calculator.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/_internal/cluster_autoscaler/default_cluster_autoscaler_v2.py | import logging
import math
import time
from collections import Counter, defaultdict
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
import ray
from .base_autoscaling_coordinator import AutoscalingCoordinator
from .default_autoscaling_coordinator import (
DefaultAutoscalingCoordinator,
)
from .resource_utilization_gauge import (
ResourceUtilizationGauge,
RollingLogicalUtilizationGauge,
)
from .util import cap_resource_request_to_limits
from ray._common.utils import env_bool, env_float, env_integer
from ray.data._internal.cluster_autoscaler import ClusterAutoscaler
from ray.data._internal.execution.interfaces.execution_options import ExecutionResources
from ray.data._internal.execution.util import memory_string
if TYPE_CHECKING:
from ray.data._internal.execution.resource_manager import ResourceManager
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class _NodeResourceSpec:
cpu: int
gpu: int
mem: int
def __post_init__(self):
assert isinstance(self.cpu, int)
assert self.cpu >= 0
assert isinstance(self.gpu, int)
assert self.gpu >= 0
assert isinstance(self.mem, int)
assert self.mem >= 0
def __str__(self):
return (
"{"
+ f"CPU: {self.cpu}, GPU: {self.gpu}, memory: {memory_string(self.mem)}"
+ "}"
)
@classmethod
def of(cls, *, cpu=0, gpu=0, mem=0):
cpu = math.floor(cpu)
gpu = math.floor(gpu)
mem = math.floor(mem)
return cls(cpu=cpu, gpu=gpu, mem=mem)
@classmethod
def from_bundle(cls, bundle: Dict[str, Any]) -> "_NodeResourceSpec":
return _NodeResourceSpec.of(
cpu=bundle.get("CPU", 0),
gpu=bundle.get("GPU", 0),
mem=bundle.get("memory", 0),
)
def to_bundle(self):
return {"CPU": self.cpu, "GPU": self.gpu, "memory": self.mem}
def _get_node_resource_spec_and_count() -> Dict[_NodeResourceSpec, int]:
"""Get the unique node resource specs and their count in the cluster."""
nodes_resource_spec_count = defaultdict(int)
cluster_config = ray._private.state.state.get_cluster_config()
if cluster_config and cluster_config.node_group_configs:
for node_group_config in cluster_config.node_group_configs:
if not node_group_config.resources or node_group_config.max_count == 0:
continue
node_resource_spec = _NodeResourceSpec.from_bundle(
node_group_config.resources
)
nodes_resource_spec_count[node_resource_spec] = 0
# Filter out the head node.
node_resources = [
node["Resources"]
for node in ray.nodes()
if node["Alive"] and "node:__internal_head__" not in node["Resources"]
]
for r in node_resources:
node_resource_spec = _NodeResourceSpec.from_bundle(r)
nodes_resource_spec_count[node_resource_spec] += 1
return nodes_resource_spec_count
class DefaultClusterAutoscalerV2(ClusterAutoscaler):
"""Ray Data's second cluster autoscaler implementation.
It works in the following way:
* Check the average cluster utilization (CPU and memory)
in a time window (by default 10s). If the utilization is above a threshold (by
default 0.75), send a request to Ray's autoscaler to scale up the cluster.
* Unlike previous implementation, each resource bundle in the request is a node
resource spec, rather than an `incremental_resource_usage()`. This allows us
to directly scale up nodes.
* Cluster scaling down isn't handled here. It depends on the idle node
termination.
Notes:
* It doesn't consider multiple concurrent Datasets for now, as the cluster
utilization is calculated by "dataset_usage / global_resources".
"""
# Default cluster utilization threshold to trigger scaling up.
DEFAULT_CLUSTER_SCALING_UP_UTIL_THRESHOLD: float = env_float(
"RAY_DATA_CLUSTER_SCALING_UP_UTIL_THRESHOLD",
0.75,
)
# Default time window in seconds to calculate the average of cluster utilization.
DEFAULT_CLUSTER_UTIL_AVG_WINDOW_S: int = env_integer(
"RAY_DATA_CLUSTER_UTIL_AVG_WINDOW_S",
10,
)
# Default number of nodes to add per node type.
DEFAULT_CLUSTER_SCALING_UP_DELTA: int = env_integer(
"RAY_DATA_CLUSTER_SCALING_UP_DELTA",
1,
)
# Min number of seconds between two autoscaling requests.
MIN_GAP_BETWEEN_AUTOSCALING_REQUESTS: int = env_integer(
"RAY_DATA_MIN_GAP_BETWEEN_AUTOSCALING_REQUESTS",
10,
)
# The time in seconds after which an autoscaling request will expire.
AUTOSCALING_REQUEST_EXPIRE_TIME_S: int = env_integer(
"RAY_DATA_AUTOSCALING_REQUEST_EXPIRE_TIME_S",
180,
)
# Whether to disable INFO-level logs.
RAY_DATA_DISABLE_AUTOSCALER_LOGGING = env_bool(
"RAY_DATA_DISABLE_AUTOSCALER_LOGGING", False
)
def __init__(
self,
resource_manager: "ResourceManager",
execution_id: str,
resource_limits: ExecutionResources = ExecutionResources.inf(),
resource_utilization_calculator: Optional[ResourceUtilizationGauge] = None,
cluster_scaling_up_util_threshold: float = DEFAULT_CLUSTER_SCALING_UP_UTIL_THRESHOLD, # noqa: E501
cluster_scaling_up_delta: float = DEFAULT_CLUSTER_SCALING_UP_DELTA,
cluster_util_avg_window_s: float = DEFAULT_CLUSTER_UTIL_AVG_WINDOW_S,
min_gap_between_autoscaling_requests_s: float = MIN_GAP_BETWEEN_AUTOSCALING_REQUESTS, # noqa: E501
autoscaling_coordinator: Optional[AutoscalingCoordinator] = None,
get_node_counts: Callable[[], Dict[_NodeResourceSpec, int]] = (
_get_node_resource_spec_and_count
),
):
assert cluster_scaling_up_delta > 0
assert cluster_util_avg_window_s > 0
assert min_gap_between_autoscaling_requests_s >= 0
if resource_utilization_calculator is None:
resource_utilization_calculator = RollingLogicalUtilizationGauge(
resource_manager,
cluster_util_avg_window_s=cluster_util_avg_window_s,
execution_id=execution_id,
)
if autoscaling_coordinator is None:
autoscaling_coordinator = DefaultAutoscalingCoordinator()
self._resource_limits = resource_limits
self._resource_utilization_calculator = resource_utilization_calculator
# Threshold of cluster utilization to trigger scaling up.
self._cluster_scaling_up_util_threshold = cluster_scaling_up_util_threshold
self._cluster_scaling_up_delta = int(math.ceil(cluster_scaling_up_delta))
self._min_gap_between_autoscaling_requests_s = (
min_gap_between_autoscaling_requests_s
)
# Last time when a request was sent to Ray's autoscaler.
self._last_request_time = 0
self._requester_id = f"data-{execution_id}"
self._autoscaling_coordinator = autoscaling_coordinator
self._get_node_counts = get_node_counts
# Send an empty request to register ourselves as soon as possible,
# so the first `get_total_resources` call can get the allocated resources.
self._send_resource_request([])
def try_trigger_scaling(self):
# Note, should call this method before checking `_last_request_time`,
# in order to update the average cluster utilization.
self._resource_utilization_calculator.observe()
# Limit the frequency of autoscaling requests.
now = time.time()
if now - self._last_request_time < self._min_gap_between_autoscaling_requests_s:
return
util = self._resource_utilization_calculator.get()
if (
util.cpu < self._cluster_scaling_up_util_threshold
and util.gpu < self._cluster_scaling_up_util_threshold
and util.memory < self._cluster_scaling_up_util_threshold
and util.object_store_memory < self._cluster_scaling_up_util_threshold
):
logger.debug(
"Cluster utilization is below threshold: "
f"CPU={util.cpu:.2f}, GPU={util.gpu:.2f}, memory={util.memory:.2f}, "
f"object_store_memory={util.object_store_memory:.2f}."
)
# Send current resources allocation when upscaling is not needed,
# to renew our registration on AutoscalingCoordinator.
curr_resources = self._autoscaling_coordinator.get_allocated_resources(
requester_id=self._requester_id
)
self._send_resource_request(curr_resources)
return
# We separate active bundles (existing nodes) from pending bundles (scale-up delta)
# to ensure existing nodes' resources are never crowded out by scale-up requests.
# TODO(hchen): We scale up all nodes by the same delta for now.
# We may want to distinguish different node types based on their individual
# utilization.
active_bundles = []
pending_bundles = []
node_resource_spec_count = self._get_node_counts()
for node_resource_spec, count in node_resource_spec_count.items():
bundle = node_resource_spec.to_bundle()
# Bundles for existing nodes -> active (must include)
active_bundles.extend([bundle] * count)
# Bundles for scale-up delta -> pending (best-effort)
pending_bundles.extend([bundle] * self._cluster_scaling_up_delta)
# Cap the resource request to respect user-configured limits.
# Active bundles (existing nodes) are always included; pending bundles
# (scale-up requests) are best-effort.
resource_request = cap_resource_request_to_limits(
active_bundles, pending_bundles, self._resource_limits
)
if resource_request != active_bundles:
self._log_resource_request(util, active_bundles, resource_request)
self._send_resource_request(resource_request)
def _log_resource_request(
self,
current_utilization: ExecutionResources,
active_bundles: List[Dict[str, float]],
resource_request: List[Dict[str, float]],
) -> None:
message = (
"The utilization of one or more logical resource is higher than the "
f"specified threshold of {self._cluster_scaling_up_util_threshold:.0%}: "
f"CPU={current_utilization.cpu:.0%}, GPU={current_utilization.gpu:.0%}, "
f"memory={current_utilization.memory:.0%}, "
f"object_store_memory={current_utilization.object_store_memory:.0%}. "
f"Requesting {self._cluster_scaling_up_delta} node(s) of each shape:"
)
current_node_counts = Counter(
[_NodeResourceSpec.from_bundle(bundle) for bundle in active_bundles]
)
requested_node_counts = Counter(
[_NodeResourceSpec.from_bundle(bundle) for bundle in resource_request]
)
for node_spec, requested_count in requested_node_counts.items():
current_count = current_node_counts.get(node_spec, 0)
message += f" [{node_spec}: {current_count} -> {requested_count}]"
if self.RAY_DATA_DISABLE_AUTOSCALER_LOGGING:
level = logging.DEBUG
else:
level = logging.INFO
logger.log(level, message)
def _send_resource_request(self, resource_request):
# Make autoscaler resource request.
self._autoscaling_coordinator.request_resources(
requester_id=self._requester_id,
resources=resource_request,
expire_after_s=self.AUTOSCALING_REQUEST_EXPIRE_TIME_S,
request_remaining=True,
)
self._last_request_time = time.time()
def on_executor_shutdown(self):
# Cancel the resource request when the executor is shutting down.
try:
self._autoscaling_coordinator.cancel_request(self._requester_id)
except Exception:
msg = (
f"Failed to cancel resource request for {self._requester_id}."
" The request will still expire after the timeout of"
f" {self._min_gap_between_autoscaling_requests_s} seconds."
)
logger.warning(msg, exc_info=True)
def get_total_resources(self) -> ExecutionResources:
"""Get total resources available from the autoscaling coordinator."""
resources = self._autoscaling_coordinator.get_allocated_resources(
requester_id=self._requester_id
)
total = ExecutionResources.zero()
for res in resources:
total = total.add(ExecutionResources.from_resource_dict(res))
return total
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/cluster_autoscaler/default_cluster_autoscaler_v2.py",
"license": "Apache License 2.0",
"lines": 271,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/tests/test_default_cluster_autoscaler_v2.py | import logging
from unittest.mock import MagicMock, patch
import pytest
import ray
from ray.core.generated import autoscaler_pb2
from ray.data._internal.cluster_autoscaler.default_cluster_autoscaler_v2 import (
DefaultClusterAutoscalerV2,
_get_node_resource_spec_and_count,
_NodeResourceSpec,
)
from ray.data._internal.cluster_autoscaler.fake_autoscaling_coordinator import (
FakeAutoscalingCoordinator,
)
from ray.data._internal.cluster_autoscaler.resource_utilization_gauge import (
ResourceUtilizationGauge,
)
from ray.data._internal.execution.interfaces.execution_options import ExecutionResources
class StubUtilizationGauge(ResourceUtilizationGauge):
def __init__(self, utilization: ExecutionResources):
self._utilization = utilization
def observe(self):
pass
def get(self):
return self._utilization
class TestClusterAutoscaling:
"""Tests for cluster autoscaling functions in DefaultClusterAutoscalerV2."""
def setup_class(self):
self._node_type1 = {
"CPU": 4,
"memory": 1000,
"object_store_memory": 500,
}
self._node_type2 = {
"CPU": 8,
"memory": 2000,
"object_store_memory": 500,
}
self._node_type3 = {
"CPU": 4,
"GPU": 1,
"memory": 1000,
"object_store_memory": 500,
}
self._head_node = {
"CPU": 4,
"memory": 1000,
"object_store_memory": 500,
"node:__internal_head__": 1.0,
}
ray.init()
def teardown_class(self):
ray.shutdown()
def test_get_node_resource_spec_and_count(self):
# Test _get_node_resource_spec_and_count
node_table = [
{
"Resources": self._head_node,
"Alive": True,
},
{
"Resources": self._node_type1,
"Alive": True,
},
{
"Resources": self._node_type2,
"Alive": True,
},
{
"Resources": self._node_type3,
"Alive": True,
},
{
"Resources": self._node_type1,
"Alive": True,
},
{
"Resources": self._node_type2,
"Alive": False,
},
]
expected = {
_NodeResourceSpec.of(
cpu=self._node_type1["CPU"],
gpu=self._node_type1.get("GPU", 0),
mem=self._node_type1["memory"],
): 2,
_NodeResourceSpec.of(
cpu=self._node_type2["CPU"],
gpu=self._node_type2.get("GPU", 0),
mem=self._node_type2["memory"],
): 1,
_NodeResourceSpec.of(
cpu=self._node_type3["CPU"],
gpu=self._node_type3.get("GPU", 0),
mem=self._node_type3["memory"],
): 1,
}
# Patch cluster config to return None
with (
patch("ray.nodes", return_value=node_table),
patch(
"ray._private.state.state.get_cluster_config",
return_value=None,
),
):
assert _get_node_resource_spec_and_count() == expected
@pytest.mark.parametrize("cpu_util", [0.5, 0.75])
@pytest.mark.parametrize("gpu_util", [0.5, 0.75])
@pytest.mark.parametrize("mem_util", [0.5, 0.75])
def test_try_scale_up_cluster(self, cpu_util, gpu_util, mem_util):
# Test _try_scale_up_cluster
scale_up_threshold = 0.75
scale_up_delta = 1
utilization = ExecutionResources(
cpu=cpu_util, gpu=gpu_util, object_store_memory=mem_util
)
fake_coordinator = FakeAutoscalingCoordinator()
resource_spec1 = _NodeResourceSpec.of(cpu=4, gpu=0, mem=1000)
resource_spec2 = _NodeResourceSpec.of(cpu=8, gpu=1, mem=1000)
autoscaler = DefaultClusterAutoscalerV2(
resource_manager=MagicMock(),
resource_limits=ExecutionResources.inf(),
execution_id="test_execution_id",
cluster_scaling_up_delta=scale_up_delta,
resource_utilization_calculator=StubUtilizationGauge(utilization),
cluster_scaling_up_util_threshold=scale_up_threshold,
min_gap_between_autoscaling_requests_s=0,
autoscaling_coordinator=fake_coordinator,
get_node_counts=lambda: {resource_spec1: 2, resource_spec2: 1},
)
autoscaler.try_trigger_scaling()
# Should scale up if any resource is above the threshold.
should_scale_up = (
cpu_util >= scale_up_threshold
or gpu_util >= scale_up_threshold
or mem_util >= scale_up_threshold
)
resources_allocated = autoscaler.get_total_resources()
if not should_scale_up:
assert resources_allocated == ExecutionResources.zero()
else:
expected_num_resource_spec1_requested = 2 + scale_up_delta
expected_num_resource_spec2_requested = 1 + scale_up_delta
expected_resources = ExecutionResources(
cpu=(
resource_spec1.cpu * expected_num_resource_spec1_requested
+ resource_spec2.cpu * expected_num_resource_spec2_requested
),
gpu=(
resource_spec1.gpu * expected_num_resource_spec1_requested
+ resource_spec2.gpu * expected_num_resource_spec2_requested
),
memory=(
resource_spec1.mem * expected_num_resource_spec1_requested
+ resource_spec2.mem * expected_num_resource_spec2_requested
),
)
assert resources_allocated == expected_resources
def test_get_node_resource_spec_and_count_from_zero(self):
"""Test that get_node_resource_spec_and_count can discover node types
from cluster config even when there are zero worker nodes."""
# Simulate a cluster with only head node (no worker nodes)
node_table = [
{
"Resources": self._head_node,
"Alive": True,
},
]
# Create a mock cluster config with 2 worker node types
cluster_config = autoscaler_pb2.ClusterConfig()
# Node type 1: 4 CPU, 0 GPU, 1000 memory
node_group_config1 = autoscaler_pb2.NodeGroupConfig()
node_group_config1.resources["CPU"] = 4
node_group_config1.resources["memory"] = 1000
node_group_config1.max_count = 10
cluster_config.node_group_configs.append(node_group_config1)
# Node type 2: 8 CPU, 2 GPU, 2000 memory
node_group_config2 = autoscaler_pb2.NodeGroupConfig()
node_group_config2.resources["CPU"] = 8
node_group_config2.resources["GPU"] = 2
node_group_config2.resources["memory"] = 2000
node_group_config2.max_count = 5
cluster_config.node_group_configs.append(node_group_config2)
expected = {
_NodeResourceSpec.of(cpu=4, gpu=0, mem=1000): 0,
_NodeResourceSpec.of(cpu=8, gpu=2, mem=2000): 0,
}
with patch("ray.nodes", return_value=node_table):
with patch(
"ray._private.state.state.get_cluster_config",
return_value=cluster_config,
):
result = _get_node_resource_spec_and_count()
assert result == expected
def test_try_scale_up_cluster_from_zero(self):
"""Test that the autoscaler can scale up from zero worker nodes."""
scale_up_threshold = 0.75
scale_up_delta = 1
# High utilization to trigger scaling
utilization = ExecutionResources(cpu=0.9, gpu=0.9, object_store_memory=0.9)
# Mock the node resource spec with zero counts
resource_spec1 = _NodeResourceSpec.of(cpu=4, gpu=0, mem=1000)
resource_spec2 = _NodeResourceSpec.of(cpu=8, gpu=2, mem=2000)
fake_coordinator = FakeAutoscalingCoordinator()
autoscaler = DefaultClusterAutoscalerV2(
resource_manager=MagicMock(),
resource_limits=ExecutionResources.inf(),
execution_id="test_execution_id",
cluster_scaling_up_delta=scale_up_delta,
resource_utilization_calculator=StubUtilizationGauge(utilization),
cluster_scaling_up_util_threshold=scale_up_threshold,
min_gap_between_autoscaling_requests_s=0,
autoscaling_coordinator=fake_coordinator,
get_node_counts=lambda: {
resource_spec1: 0,
resource_spec2: 0,
},
)
autoscaler.try_trigger_scaling()
# Should request scale_up_delta nodes of each type
# Verify via get_total_resources which returns what was allocated
resources_allocated = autoscaler.get_total_resources()
expected_resources = ExecutionResources(
cpu=resource_spec1.cpu * scale_up_delta
+ resource_spec2.cpu * scale_up_delta,
gpu=resource_spec1.gpu * scale_up_delta
+ resource_spec2.gpu * scale_up_delta,
memory=resource_spec1.mem * scale_up_delta
+ resource_spec2.mem * scale_up_delta,
)
assert resources_allocated == expected_resources
def test_low_utilization_sends_current_allocation(self):
"""Test that low utilization sends current allocation.
Test scenario:
1. Dataset has already been allocated resources (1 nodes)
2. Utilization is low (0%, below default threshold)
3. Should send current allocation to preserve resource footprint
"""
utilization: ExecutionResources = ...
class FakeUtilizationGauge(ResourceUtilizationGauge):
def observe(self):
pass
def get(self):
return utilization
node_resource_spec = _NodeResourceSpec.of(cpu=1, gpu=0, mem=0)
autoscaler = DefaultClusterAutoscalerV2(
resource_manager=MagicMock(),
resource_limits=ExecutionResources.inf(),
execution_id="test_execution_id",
resource_utilization_calculator=FakeUtilizationGauge(),
min_gap_between_autoscaling_requests_s=0,
autoscaling_coordinator=FakeAutoscalingCoordinator(),
get_node_counts=lambda: {node_resource_spec: 0},
)
# Trigger scaling with high utilization. The cluster autoscaler should request
# one node.
utilization = ExecutionResources(cpu=1)
autoscaler.try_trigger_scaling()
assert autoscaler.get_total_resources() == ExecutionResources(cpu=1)
# Trigger scaling with low utilization. The cluster autoscaler should re-request
# one node rather than no resources.
utilization = ExecutionResources(cpu=0)
autoscaler.try_trigger_scaling()
assert autoscaler.get_total_resources() == ExecutionResources(cpu=1)
def test_get_node_resource_spec_and_count_skips_max_count_zero(self):
"""Test that node types with max_count=0 are skipped."""
# Simulate a cluster with only head node (no worker nodes)
node_table = [
{
"Resources": self._head_node,
"Alive": True,
},
]
# Create a mock cluster config with one valid node type and one with max_count=0
cluster_config = autoscaler_pb2.ClusterConfig()
# Node type 1: 4 CPU, 0 GPU, 1000 memory, max_count=10
node_group_config1 = autoscaler_pb2.NodeGroupConfig()
node_group_config1.resources["CPU"] = 4
node_group_config1.resources["memory"] = 1000
node_group_config1.max_count = 10
cluster_config.node_group_configs.append(node_group_config1)
# Node type 2: 8 CPU, 2 GPU, 2000 memory, max_count=0 (should be skipped)
node_group_config2 = autoscaler_pb2.NodeGroupConfig()
node_group_config2.resources["CPU"] = 8
node_group_config2.resources["GPU"] = 2
node_group_config2.resources["memory"] = 2000
node_group_config2.max_count = 0 # This should be skipped
cluster_config.node_group_configs.append(node_group_config2)
# Only the first node type should be discovered
expected = {
_NodeResourceSpec.of(cpu=4, gpu=0, mem=1000): 0,
}
with patch("ray.nodes", return_value=node_table):
with patch(
"ray._private.state.state.get_cluster_config",
return_value=cluster_config,
):
result = _get_node_resource_spec_and_count()
assert result == expected
def test_get_node_resource_spec_and_count_missing_all_resources(self):
"""Regression test for nodes with empty resources (ie missing CPU, GPU, and memory keys entirely)."""
# Simulate a node with no standard resources defined
node_empty_resources = {
"Alive": True,
"Resources": {
"dummy_resource": 1,
},
}
node_table = [
{
"Resources": self._head_node,
"Alive": True,
},
node_empty_resources,
]
# Expect everything to default to 0
expected = {_NodeResourceSpec.of(cpu=0, gpu=0, mem=0): 1}
with (
patch("ray.nodes", return_value=node_table),
patch(
"ray._private.state.state.get_cluster_config",
return_value=None,
),
):
result = _get_node_resource_spec_and_count()
assert result == expected
@pytest.mark.parametrize(
"resource_limits,node_spec,existing_nodes,scale_up_increment,expected_nodes",
[
# CPU limit: 8 CPUs allows 2 nodes (8 CPUs), not 3 (12 CPUs)
(
ExecutionResources.for_limits(cpu=8),
_NodeResourceSpec.of(cpu=4, gpu=0, mem=1000),
2,
1,
2,
),
# GPU limit: 2 GPUs allows 2 nodes (2 GPUs), not 3 (3 GPUs)
(
ExecutionResources.for_limits(gpu=2),
_NodeResourceSpec.of(cpu=4, gpu=1, mem=1000),
2,
1,
2,
),
# Memory limit: 4000 allows 2 nodes (4000 mem), not 3 (6000 mem)
(
ExecutionResources.for_limits(memory=4000),
_NodeResourceSpec.of(cpu=4, gpu=0, mem=2000),
2,
1,
2,
),
# No limits: all 3 nodes (2 existing + 1 delta) should be requested
(
ExecutionResources.inf(),
_NodeResourceSpec.of(cpu=4, gpu=0, mem=1000),
2,
1,
3,
),
],
)
def test_try_scale_up_respects_resource_limits(
self,
resource_limits,
node_spec,
existing_nodes,
scale_up_increment,
expected_nodes,
):
"""Test that cluster autoscaling respects user-configured resource limits."""
scale_up_threshold = 0.75
# High utilization to trigger scaling
utilization = ExecutionResources(cpu=0.9, gpu=0.9, object_store_memory=0.9)
fake_coordinator = FakeAutoscalingCoordinator()
autoscaler = DefaultClusterAutoscalerV2(
resource_manager=MagicMock(),
resource_limits=resource_limits,
execution_id="test_execution_id",
cluster_scaling_up_delta=scale_up_increment,
resource_utilization_calculator=StubUtilizationGauge(utilization),
cluster_scaling_up_util_threshold=scale_up_threshold,
min_gap_between_autoscaling_requests_s=0,
autoscaling_coordinator=fake_coordinator,
get_node_counts=lambda: {node_spec: existing_nodes},
)
autoscaler.try_trigger_scaling()
resources_allocated = autoscaler.get_total_resources()
assert resources_allocated.cpu == node_spec.cpu * expected_nodes
assert resources_allocated.gpu == node_spec.gpu * expected_nodes
assert resources_allocated.memory == node_spec.mem * expected_nodes
def test_try_scale_up_respects_resource_limits_heterogeneous_nodes(self):
"""Test that smaller bundles are included even when larger bundles exceed limits.
This tests a scenario where:
1. Initial cluster (1 small node, 4 CPUs) is within the budget (10 CPUs)
2. Scaling up is triggered due to high utilization
3. The autoscaler wants to add both large and small nodes
4. Only small nodes are requested because large nodes would exceed the limit
"""
# CPU limit of 10 allows the initial state (4 CPUs) plus room for growth
resource_limits = ExecutionResources.for_limits(cpu=10)
large_node_spec = _NodeResourceSpec.of(cpu=8, gpu=1, mem=4000)
small_node_spec = _NodeResourceSpec.of(cpu=4, gpu=0, mem=2000)
scale_up_threshold = 0.75
utilization = ExecutionResources(cpu=0.9, gpu=0.9, object_store_memory=0.9)
fake_coordinator = FakeAutoscalingCoordinator()
# Initial cluster: 1 small node (4 CPUs) - within the 10 CPU budget
# Node types available: large (8 CPUs) and small (4 CPUs)
def get_heterogeneous_nodes():
return {
large_node_spec: 0, # 0 existing large nodes
small_node_spec: 1, # 1 existing small node (4 CPUs)
}
autoscaler = DefaultClusterAutoscalerV2(
resource_manager=MagicMock(),
resource_limits=resource_limits,
execution_id="test_execution_id",
cluster_scaling_up_delta=1,
resource_utilization_calculator=StubUtilizationGauge(utilization),
cluster_scaling_up_util_threshold=scale_up_threshold,
min_gap_between_autoscaling_requests_s=0,
autoscaling_coordinator=fake_coordinator,
get_node_counts=get_heterogeneous_nodes,
)
autoscaler.try_trigger_scaling()
resources_allocated = autoscaler.get_total_resources()
# With delta=1:
# - Active bundles: 1 small (4 CPUs) - existing nodes, always included
# - Pending bundles: 1 small (4 CPUs) + 1 large (8 CPUs) - scale-up delta
# After capping to 10 CPUs:
# - Active: 4 CPUs (always included)
# - Sorted pending: [small (4), large (8)]
# - Add small: 4 + 4 = 8 CPUs β
# - Add large: 8 + 8 = 16 CPUs β (exceeds limit)
# Result: 2 small bundles (8 CPUs)
# Ray autoscaler would see: need 2 small nodes, have 1 β spin up 1 more
assert resources_allocated.cpu == 8, (
f"Expected 8 CPUs (2 small node bundles), got {resources_allocated.cpu}. "
"Smaller bundles should be included even when larger ones exceed limits."
)
assert resources_allocated.gpu == 0
assert resources_allocated.memory == 4000
def test_try_scale_up_existing_nodes_prioritized_over_delta(self):
"""Test that existing node bundles are prioritized over scale-up delta bundles.
This tests a scenario where:
- Large existing node: 1 node at 6 CPUs (currently allocated)
- Small node type available: can add nodes at 2 CPUs each
- User limit: 8 CPUs
- Scale-up delta: 2 (want to add 2 small nodes)
The existing large node (6 CPUs) should always be included, and only
scale-up bundles that fit within the remaining budget should be added.
Without this prioritization, smaller scale-up bundles could crowd out
the representation of existing nodes.
"""
resource_limits = ExecutionResources.for_limits(cpu=8)
large_node_spec = _NodeResourceSpec.of(cpu=6, gpu=0, mem=3000)
small_node_spec = _NodeResourceSpec.of(cpu=2, gpu=0, mem=1000)
scale_up_threshold = 0.75
utilization = ExecutionResources(cpu=0.9, gpu=0.9, object_store_memory=0.9)
fake_coordinator = FakeAutoscalingCoordinator()
# Existing cluster: 1 large node (6 CPUs)
# Scale-up delta: 2 (want to add 2 of each node type)
def get_node_counts():
return {
large_node_spec: 1, # 1 existing large node (6 CPUs)
small_node_spec: 0, # 0 existing small nodes
}
autoscaler = DefaultClusterAutoscalerV2(
resource_manager=MagicMock(),
resource_limits=resource_limits,
execution_id="test_execution_id",
cluster_scaling_up_delta=2,
resource_utilization_calculator=StubUtilizationGauge(utilization),
cluster_scaling_up_util_threshold=scale_up_threshold,
min_gap_between_autoscaling_requests_s=0,
autoscaling_coordinator=fake_coordinator,
get_node_counts=get_node_counts,
)
autoscaler.try_trigger_scaling()
resources_allocated = autoscaler.get_total_resources()
# Active bundles: 1 large (6 CPUs) - must be included
# Pending bundles: 2 large (12 CPUs) + 2 small (4 CPUs) = delta requests
# After capping to 8 CPUs:
# - Active: 6 CPUs (always included)
# - Remaining budget: 2 CPUs
# - Sorted pending: [small (2), small (2), large (6), large (6)]
# - Add small: 6 + 2 = 8 CPUs β
# - Add another small: 8 + 2 = 10 CPUs β
# Result: 1 large (active) + 1 small (delta) = 8 CPUs
assert resources_allocated.cpu == 8, (
f"Expected 8 CPUs (1 existing large + 1 delta small), got {resources_allocated.cpu}. "
"Existing node bundles should always be included before scale-up delta."
)
# Verify we have the large node's resources (it must be included)
assert resources_allocated.memory >= large_node_spec.mem, (
f"Existing large node (mem={large_node_spec.mem}) should be included. "
f"Got total memory={resources_allocated.memory}"
)
def test_try_scale_up_logs_info_message(self, propagate_logs, caplog):
fake_coordinator = FakeAutoscalingCoordinator()
node_spec = _NodeResourceSpec.of(cpu=1, gpu=0, mem=8 * 1024**3)
utilization = ExecutionResources(cpu=1, gpu=1, object_store_memory=1)
autoscaler = DefaultClusterAutoscalerV2(
resource_manager=MagicMock(),
execution_id="test_execution_id",
resource_utilization_calculator=StubUtilizationGauge(utilization),
min_gap_between_autoscaling_requests_s=0,
autoscaling_coordinator=fake_coordinator,
get_node_counts=lambda: {node_spec: 1},
)
with caplog.at_level(logging.INFO):
autoscaler.try_trigger_scaling()
expected_message = (
"The utilization of one or more logical resource is higher than the "
"specified threshold of 75%: CPU=100%, GPU=100%, memory=0%, "
"object_store_memory=100%. Requesting 1 node(s) of each shape: "
"[{CPU: 1, GPU: 0, memory: 8.0GiB}: 1 -> 2]"
)
log_messages = [record.message for record in caplog.records]
assert expected_message in log_messages, (
f"Expected log message not found.\n"
f"Expected: {expected_message}\n"
f"Actual logs: {log_messages}"
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_default_cluster_autoscaler_v2.py",
"license": "Apache License 2.0",
"lines": 528,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/unit/test_average_calculator.py | from unittest.mock import patch
import pytest
from ray.data._internal.average_calculator import TimeWindowAverageCalculator
@pytest.fixture
def current_time():
class MutableInt:
def __init__(self, value: int = 0):
self.value = value
def __repr__(self):
return f"MutableInt({self.value})"
def increment(self):
self.value += 1
def get_value(self) -> int:
return self.value
_current_time = MutableInt()
def time():
return _current_time.get_value()
with patch("time.time", time):
yield _current_time
def test_calcuate_time_window_average(current_time):
"""Test TimeWindowAverageCalculator."""
window_s = 10
values_to_report = [i + 1 for i in range(20)]
calculator = TimeWindowAverageCalculator(window_s)
assert calculator.get_average() is None
for value in values_to_report:
# Report values, test `get_average`.
# and proceed the time by 1 second each time.
calculator.report(value)
avg = calculator.get_average()
values_in_window = values_to_report[
max(current_time.get_value() - 10, 0) : current_time.get_value() + 1
]
expected = sum(values_in_window) / len(values_in_window)
assert avg == expected, current_time.get_value()
current_time.increment()
for _ in range(10):
# Keep proceeding the time, and test `get_average`.
avg = calculator.get_average()
values_in_window = values_to_report[max(current_time.get_value() - 10, 0) : 20]
expected = sum(values_in_window) / len(values_in_window)
assert avg == expected, current_time.get_value()
current_time.increment()
# Now no values in the time window, `get_average` should return None.
assert calculator.get_average() is None
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/unit/test_average_calculator.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/train/doc_code/collate_utils.py | from dataclasses import dataclass
from typing import Dict, List, Tuple, Union
import torch
from ray import cloudpickle as pickle
import pyarrow as pa
# (dtype, shape, offset)
FEATURE_TYPE = Tuple[torch.dtype, torch.Size, int]
TORCH_BYTE_ELEMENT_TYPE = torch.uint8
def _create_binary_array_from_buffer(buffer: bytes) -> pa.BinaryArray:
"""Zero-copy create a binary array from a buffer."""
data_buffer = pa.py_buffer(buffer)
return pa.Array.from_buffers(
pa.binary(),
1,
[
None,
pa.array([0, data_buffer.size], type=pa.int32()).buffers()[1],
data_buffer,
],
)
@dataclass
class _Metadata:
features: Dict[str, List[FEATURE_TYPE]]
total_buffer_size: int
@dataclass
class _TensorBatch:
"""Internal class for serializing/deserializing tensor batches."""
buffer: torch.Tensor
metadata: _Metadata
@classmethod
def from_batch(cls, batch: Dict[str, Union[List[torch.Tensor], torch.Tensor]]) -> '_TensorBatch':
"""Serialize a batch of tensors into a single buffer."""
features: Dict[str, List[FEATURE_TYPE]] = {}
flattened_binary_tensors = []
total_buffer_size = 0
for name, tensors in batch.items():
features[name] = []
if not isinstance(tensors, list):
tensors = [tensors]
for tensor in tensors:
flattened_tensor = tensor.flatten().contiguous().view(TORCH_BYTE_ELEMENT_TYPE)
flattened_binary_tensors.append(flattened_tensor)
features[name].append((tensor.dtype, tensor.shape, total_buffer_size))
total_buffer_size += flattened_tensor.shape[0]
buffer = torch.empty(total_buffer_size, dtype=TORCH_BYTE_ELEMENT_TYPE)
cur_offset = 0
for flattened_tensor in flattened_binary_tensors:
buffer[cur_offset:cur_offset + flattened_tensor.shape[0]] = flattened_tensor
cur_offset += flattened_tensor.shape[0]
return _TensorBatch(
buffer=buffer,
metadata=_Metadata(
features=features,
total_buffer_size=total_buffer_size,
),
)
def to_table(self) -> pa.Table:
"""Convert to a single-row PyArrow table."""
buffer_array = _create_binary_array_from_buffer(self.buffer.numpy().data)
metadata_array = _create_binary_array_from_buffer(pickle.dumps(self.metadata))
return pa.Table.from_arrays(
arrays=[buffer_array, metadata_array],
names=["_buffer", "_metadata"],
)
@classmethod
def from_table(cls, table: pa.Table) -> '_TensorBatch':
"""Deserialize from a single-row PyArrow table."""
return _TensorBatch(
buffer=torch.frombuffer(
table["_buffer"].chunks[0].buffers()[2],
dtype=TORCH_BYTE_ELEMENT_TYPE
),
metadata=pickle.loads(table["_metadata"].chunks[0].buffers()[2]),
)
def to_batch(self, pin_memory: bool = False) -> Dict[str, List[torch.Tensor]]:
"""Deserialize back to a batch of tensors."""
batch = {}
storage_buffer = self.buffer.untyped_storage()
offsets = []
for name, features in self.metadata.features.items():
for _, _, offset in features:
offsets.append(offset)
offsets.append(self.metadata.total_buffer_size)
offset_id = 0
for name, features in self.metadata.features.items():
batch[name] = []
for dtype, shape, _ in features:
# Create a zero-copy view of the byte slice.
byte_slice = self.buffer[offsets[offset_id]:offsets[offset_id + 1]]
tensor = torch.frombuffer(
byte_slice.numpy().data, dtype=dtype
).view(shape)
if pin_memory:
tensor = tensor.pin_memory()
batch[name].append(tensor)
offset_id += 1
return batch
# Helper functions for use in your code
def serialize_tensors_to_table(batch: Dict[str, Union[List[torch.Tensor], torch.Tensor]]) -> pa.Table:
"""Serialize a batch of tensors to a PyArrow table."""
return _TensorBatch.from_batch(batch).to_table()
def deserialize_table_to_tensors(table: pa.Table, pin_memory: bool = False) -> Dict[str, List[torch.Tensor]]:
"""Deserialize a PyArrow table back to tensors."""
return _TensorBatch.from_table(table).to_batch(pin_memory=pin_memory)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/train/doc_code/collate_utils.py",
"license": "Apache License 2.0",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/train/doc_code/random_text_generator.py | import random
import string
import ray
def random_text(length: int) -> str:
"""Generate random text of specified length."""
if length <= 0:
return ""
if length <= 3:
return "".join(random.choices(string.ascii_lowercase, k=length))
words = []
current_length = 0
while current_length < length:
remaining = length - current_length
if remaining <= 4:
word_length = remaining
word = "".join(random.choices(string.ascii_lowercase, k=word_length))
words.append(word)
break
else:
max_word_length = min(10, remaining - 1)
if max_word_length >= 3:
word_length = random.randint(3, max_word_length)
else:
word_length = remaining
word = "".join(random.choices(string.ascii_lowercase, k=word_length))
words.append(word)
current_length += len(word) + 1
text = " ".join(words)
return text[:length]
def random_label() -> int:
"""Pick a random label."""
labels = [0, 1, 2, 3, 4, 5, 6, 7]
return random.choice(labels)
def create_mock_ray_text_dataset(dataset_size: int = 96, min_len: int = 5, max_len: int = 100):
"""Create a mock Ray dataset with random text and labels."""
numbers = random.choices(range(min_len, max_len + 1), k=dataset_size)
ray_dataset = ray.data.from_items(numbers)
def map_to_text_and_label(item):
length = item['item']
text = random_text(length)
label = random_label()
return {
"length": length,
"text": text,
"label": label
}
text_dataset = ray_dataset.map(map_to_text_and_label)
return text_dataset | {
"repo_id": "ray-project/ray",
"file_path": "doc/source/train/doc_code/random_text_generator.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/_internal/cluster_autoscaler/base_autoscaling_coordinator.py | import abc
from enum import Enum
from typing import Dict, List
ResourceDict = Dict[str, float]
class ResourceRequestPriority(Enum):
"""Priority of a resource request."""
LOW = -10
MEDIUM = 0
HIGH = 10
class AutoscalingCoordinator(abc.ABC):
@abc.abstractmethod
def request_resources(
self,
requester_id: str,
resources: List[ResourceDict],
expire_after_s: float,
request_remaining: bool = False,
priority: ResourceRequestPriority = ResourceRequestPriority.MEDIUM,
) -> None:
"""Request cluster resources.
The requested resources should represent the full set of resources needed,
not just the incremental amount.
A request with the same `requester_id` overwrites the previous one.
Args:
requester_id: A unique identifier for the component making the request.
resources: The requested resources. This should match the format accepted
by `ray.autoscaler.sdk.request_resources`.
expire_after_s: Time in seconds after which this request will expire.
The requester is responsible for periodically sending new requests
to avoid the request being purged.
request_remaining: If true, after allocating requested resources to each
requester, remaining resources will also be allocated to this requester.
priority: The priority of the request. Higher value means higher priority.
"""
...
@abc.abstractmethod
def cancel_request(self, requester_id: str):
"""Cancel the resource request from the given requester.
Args:
requester_id: The unique identifier of the requester.
"""
...
@abc.abstractmethod
def get_allocated_resources(self, requester_id: str) -> List[ResourceDict]:
"""Get the allocated resources for the given requester.
Args:
requester_id: The unique identifier of the requester.
Returns:
A list of dictionaries representing the allocated resources bundles.
"""
...
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/cluster_autoscaler/base_autoscaling_coordinator.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/data/_internal/cluster_autoscaler/default_autoscaling_coordinator.py | import copy
import functools
import logging
import math
import threading
import time
from dataclasses import dataclass
from typing import Callable, Dict, List, Optional
import ray
import ray.exceptions
from .base_autoscaling_coordinator import (
AutoscalingCoordinator,
ResourceDict,
ResourceRequestPriority,
)
from ray.autoscaler._private.constants import env_integer
from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy
logger = logging.getLogger(__name__)
HEAD_NODE_RESOURCE_LABEL = "node:__internal_head__"
@dataclass
class OngoingRequest:
"""Represents an ongoing resource request from a requester."""
# The time when the request was first received.
first_request_time: float
# Requested resources.
requested_resources: List[ResourceDict]
# The expiration time of the request.
expiration_time: float
# If true, after allocating requested resources to each requester,
# remaining resources will also be allocated to this requester.
request_remaining: bool
# The priority of the request, higher value means higher priority.
priority: int
# Resources that are already allocated to the requester.
allocated_resources: List[ResourceDict]
def __lt__(self, other):
"""Used to sort requests when allocating resources.
Higher priority first, then earlier first_request_time first.
"""
if self.priority != other.priority:
return self.priority > other.priority
return self.first_request_time < other.first_request_time
def handle_timeout_errors(
failure_counter_attr: str,
operation_name: str,
requester_id_param: str = "requester_id",
error_msg_suffix: Optional[str] = None,
on_error_return: Optional[Callable] = None,
):
"""Decorator to handle GetTimeoutError with consecutive failure tracking.
Args:
failure_counter_attr: Name of the instance attribute that tracks
consecutive failures.
operation_name: Name of the operation for error messages (e.g.,
"send resource request", "cancel resource request").
requester_id_param: Name of the parameter that contains the
requester_id.
error_msg_suffix: Optional suffix to append to the error message.
If None, uses a default message.
on_error_return: Optional callable that takes (self, requester_id)
and returns a value to return on error. If None, no value is
returned (method should return None).
Returns:
A decorator that wraps methods to handle timeout errors.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
# Extract requester_id from args or kwargs
requester_id = kwargs.get(requester_id_param)
if requester_id is None:
# Try to get from args by checking function signature
import inspect
sig = inspect.signature(func)
param_names = list(sig.parameters.keys())
if requester_id_param in param_names:
param_index = param_names.index(requester_id_param) - 1
if param_index < len(args):
requester_id = args[param_index]
failure_counter = getattr(self, failure_counter_attr)
try:
result = func(self, *args, **kwargs)
# Reset counter on success
setattr(self, failure_counter_attr, 0)
return result
except ray.exceptions.GetTimeoutError as exc:
failure_counter += 1
setattr(self, failure_counter_attr, failure_counter)
consecutive_msg = (
f" (consecutive failures: {failure_counter})"
if failure_counter > 1
else ""
)
# Build error message
base_msg = (
f"Failed to {operation_name} for {requester_id}.{consecutive_msg}"
)
if error_msg_suffix is not None:
msg = f"{base_msg} {error_msg_suffix}"
else:
msg = (
f"{base_msg}"
" If this only happens transiently during network"
" partition or CPU being overloaded, it's safe to"
" ignore this error."
" If this error persists, file a GitHub issue."
)
# Check max failures and raise if exceeded
if failure_counter >= self.MAX_CONSECUTIVE_FAILURES:
raise RuntimeError(
f"Failed to {operation_name} for {requester_id} "
f"after {failure_counter} consecutive failures."
) from exc
logger.warning(msg, exc_info=True)
# Return value on error if callback provided
if on_error_return is not None:
return on_error_return(self, requester_id)
return wrapper
return decorator
class DefaultAutoscalingCoordinator(AutoscalingCoordinator):
AUTOSCALING_REQUEST_GET_TIMEOUT_S = env_integer(
"RAY_DATA_AUTOSCALING_COORDINATOR_REQUEST_GET_TIMEOUT_S", 5
)
MAX_CONSECUTIVE_FAILURES = env_integer(
"RAY_DATA_AUTOSCALING_COORDINATOR_MAX_CONSECUTIVE_FAILURES", 10
)
def __init__(self):
self._cached_allocated_resources: Dict[str, List[ResourceDict]] = {}
self._consecutive_failures_request_resources: int = 0
self._consecutive_failures_cancel_request: int = 0
self._consecutive_failures_get_allocated_resources: int = 0
@functools.cached_property
def _autoscaling_coordinator(self):
# Create the coordinator actor lazily rather than eagerly in the constructor.
return get_or_create_autoscaling_coordinator()
@handle_timeout_errors(
failure_counter_attr="_consecutive_failures_request_resources",
operation_name="send resource request",
error_msg_suffix=(
"If this only happens transiently during network partition"
" or CPU being overloaded, it's safe to ignore this error."
" If this error persists, file a GitHub issue."
),
)
def request_resources(
self,
requester_id: str,
resources: List[ResourceDict],
expire_after_s: float,
request_remaining: bool = False,
priority: ResourceRequestPriority = ResourceRequestPriority.MEDIUM,
) -> None:
ray.get(
self._autoscaling_coordinator.request_resources.remote(
requester_id=requester_id,
resources=resources,
expire_after_s=expire_after_s,
request_remaining=request_remaining,
priority=priority,
),
timeout=self.AUTOSCALING_REQUEST_GET_TIMEOUT_S,
)
@handle_timeout_errors(
failure_counter_attr="_consecutive_failures_cancel_request",
operation_name="cancel resource request",
error_msg_suffix=(
"If this only happens transiently during network partition"
" or CPU being overloaded, it's safe to ignore this error."
" If this error persists, file a GitHub issue."
),
)
def cancel_request(self, requester_id: str):
ray.get(
self._autoscaling_coordinator.cancel_request.remote(
requester_id,
),
timeout=self.AUTOSCALING_REQUEST_GET_TIMEOUT_S,
)
@handle_timeout_errors(
failure_counter_attr="_consecutive_failures_get_allocated_resources",
operation_name="get allocated resources",
error_msg_suffix=(
"Returning cached value."
" If this only happens transiently during network partition"
" or CPU being overloaded, it's safe to ignore this error."
" If this error persists, file a GitHub issue."
),
on_error_return=lambda self, requester_id: (
self._cached_allocated_resources.get(requester_id, [])
),
)
def get_allocated_resources(self, requester_id: str) -> List[ResourceDict]:
result = ray.get(
self._autoscaling_coordinator.get_allocated_resources.remote(
requester_id,
),
timeout=self.AUTOSCALING_REQUEST_GET_TIMEOUT_S,
)
self._cached_allocated_resources[requester_id] = result
return result
class _AutoscalingCoordinatorActor:
"""An actor to coordinate autoscaling resource requests from different components.
This actor is responsible for:
* Merging received requests and dispatching them to Ray Autoscaler.
* Allocating cluster resources to the requesters.
"""
TICK_INTERVAL_S = 20
def __init__(
self,
get_current_time: Callable[[], float] = time.time,
send_resources_request: Callable[
[List[ResourceDict]], None
] = lambda bundles: ray.autoscaler.sdk.request_resources(bundles=bundles),
get_cluster_nodes: Callable[[], List[Dict]] = ray.nodes,
):
self._get_current_time = get_current_time
self._send_resources_request = send_resources_request
self._get_cluster_nodes = get_cluster_nodes
self._ongoing_reqs: Dict[str, OngoingRequest] = {}
self._cluster_node_resources: List[ResourceDict] = []
# Lock for thread-safe access to shared state from the background
self._lock = threading.Lock()
self._update_cluster_node_resources()
# This is an actor, so the following check should always be True.
# It's only needed for unit tests.
if ray.is_initialized():
# Start a thread to perform periodical operations.
def tick_thread_run():
while True:
time.sleep(self.TICK_INTERVAL_S)
self._tick()
self._tick_thread = threading.Thread(target=tick_thread_run, daemon=True)
self._tick_thread.start()
def _tick(self):
"""Used to perform periodical operations, e.g., purge expired requests,
merge and send requests, check cluster resource updates, etc."""
with self._lock:
self._merge_and_send_requests()
self._update_cluster_node_resources()
self._reallocate_resources()
def request_resources(
self,
requester_id: str,
resources: List[ResourceDict],
expire_after_s: float,
request_remaining: bool = False,
priority: ResourceRequestPriority = ResourceRequestPriority.MEDIUM,
) -> None:
logger.debug("Received request from %s: %s.", requester_id, resources)
with self._lock:
# Round up the resource values to integers,
# because the Autoscaler SDK only accepts integer values.
for r in resources:
for k in r:
r[k] = math.ceil(r[k])
now = self._get_current_time()
request_updated = False
old_req = self._ongoing_reqs.get(requester_id)
if old_req is not None:
if request_remaining != old_req.request_remaining:
raise ValueError(
"Cannot change request_remaining flag of an ongoing request."
)
if priority.value != old_req.priority:
raise ValueError("Cannot change priority of an ongoing request.")
request_updated = resources != old_req.requested_resources
old_req.requested_resources = resources
old_req.expiration_time = now + expire_after_s
else:
request_updated = True
self._ongoing_reqs[requester_id] = OngoingRequest(
first_request_time=now,
requested_resources=resources,
request_remaining=request_remaining,
priority=priority.value,
expiration_time=now + expire_after_s,
allocated_resources=[],
)
if request_updated:
# If the request has updated, immediately send
# a new request and reallocate resources.
self._merge_and_send_requests()
self._reallocate_resources()
def cancel_request(
self,
requester_id: str,
):
logger.debug("Canceling request for %s.", requester_id)
with self._lock:
if requester_id not in self._ongoing_reqs:
return
del self._ongoing_reqs[requester_id]
self._merge_and_send_requests()
self._reallocate_resources()
def _purge_expired_requests(self):
now = self._get_current_time()
self._ongoing_reqs = {
requester_id: req
for requester_id, req in self._ongoing_reqs.items()
if req.expiration_time > now
}
def _merge_and_send_requests(self):
"""Merge requests and send them to Ray Autoscaler."""
self._purge_expired_requests()
merged_req = []
for req in self._ongoing_reqs.values():
merged_req.extend(req.requested_resources)
self._send_resources_request(merged_req)
def get_allocated_resources(self, requester_id: str) -> List[ResourceDict]:
"""Get the allocated resources for the requester."""
with self._lock:
if requester_id not in self._ongoing_reqs:
return []
return self._ongoing_reqs[requester_id].allocated_resources
def _maybe_subtract_resources(self, res1: ResourceDict, res2: ResourceDict) -> bool:
"""If res2<=res1, subtract res2 from res1 in-place, and return True.
Otherwise return False."""
if any(res1.get(key, 0) < res2[key] for key in res2):
return False
for key in res2:
if key in res1:
res1[key] -= res2[key]
return True
def _update_cluster_node_resources(self) -> bool:
"""Update cluster's total resources. Return True if changed."""
def _is_node_eligible(node):
# Exclude dead nodes.
if not node["Alive"]:
return False
resources = node["Resources"]
# Exclude the head node if it doesn't have CPUs and GPUs,
# because the object store is not usable.
if HEAD_NODE_RESOURCE_LABEL in resources and (
resources.get("CPU", 0) == 0 and resources.get("GPU", 0) == 0
):
return False
return True
nodes = list(filter(_is_node_eligible, self._get_cluster_nodes()))
nodes = sorted(nodes, key=lambda node: node.get("NodeID", ""))
cluster_node_resources = [node["Resources"] for node in nodes]
if cluster_node_resources == self._cluster_node_resources:
return False
else:
logger.debug("Cluster resources updated: %s.", cluster_node_resources)
self._cluster_node_resources = cluster_node_resources
return True
def _reallocate_resources(self):
"""Reallocate cluster resources."""
now = self._get_current_time()
cluster_node_resources = copy.deepcopy(self._cluster_node_resources)
ongoing_reqs = sorted(
[req for req in self._ongoing_reqs.values() if req.expiration_time >= now]
)
# Allocate resources to ongoing requests.
# TODO(hchen): Optimize the following triple loop.
for ongoing_req in ongoing_reqs:
ongoing_req.allocated_resources = []
for req in ongoing_req.requested_resources:
for node_resource in cluster_node_resources:
if self._maybe_subtract_resources(node_resource, req):
ongoing_req.allocated_resources.append(req)
break
# Allocate remaining resources.
# NOTE: to handle the case where multiple datasets are running concurrently,
# we divide remaining resources equally to all requesters with `request_remaining=True`.
remaining_resource_requesters = [
req for req in ongoing_reqs if req.request_remaining
]
num_remaining_requesters = len(remaining_resource_requesters)
if num_remaining_requesters > 0:
for node_resource in cluster_node_resources:
# Divide remaining resources equally among requesters.
# NOTE: Integer division may leave some resources unallocated.
divided_resource = {
k: v // num_remaining_requesters for k, v in node_resource.items()
}
for ongoing_req in remaining_resource_requesters:
if any(v > 0 for v in divided_resource.values()):
ongoing_req.allocated_resources.append(divided_resource)
if logger.isEnabledFor(logging.DEBUG):
msg = "Allocated resources:\n"
for requester_id, ongoing_req in self._ongoing_reqs.items():
msg += f"Requester {requester_id}: {ongoing_req.allocated_resources}\n"
logger.debug(msg)
_get_or_create_lock = threading.Lock()
def get_or_create_autoscaling_coordinator():
"""Get or create the AutoscalingCoordinator actor."""
# Create the actor on the local node,
# to reduce network overhead.
scheduling_strategy = NodeAffinitySchedulingStrategy(
ray.get_runtime_context().get_node_id(),
soft=False,
)
actor_cls = ray.remote(num_cpus=0, max_restarts=-1, max_task_retries=-1)(
_AutoscalingCoordinatorActor
).options(
name="AutoscalingCoordinator",
namespace="AutoscalingCoordinator",
get_if_exists=True,
lifetime="detached",
scheduling_strategy=scheduling_strategy,
)
# NOTE: Need the following lock, because Ray Core doesn't allow creating the same
# actor from multiple threads simultaneously.
with _get_or_create_lock:
return actor_cls.remote()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/cluster_autoscaler/default_autoscaling_coordinator.py",
"license": "Apache License 2.0",
"lines": 406,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/tests/test_autoscaling_coordinator.py | from unittest.mock import Mock, patch
import pytest
import ray
from ray.cluster_utils import Cluster
from ray.data._internal.cluster_autoscaler.default_autoscaling_coordinator import (
HEAD_NODE_RESOURCE_LABEL,
DefaultAutoscalingCoordinator,
_AutoscalingCoordinatorActor,
get_or_create_autoscaling_coordinator,
)
from ray.tests.conftest import wait_for_condition
def kill_autoscaling_coordinator():
"""Kill the AutoscalingCoordinator actor.
We expose this to keep autoscaling coordinator tests isolated.
If the AutoscalingCoordinator actor doesn't exist, this function is a no-op.
"""
try:
actor = ray.get_actor(
"AutoscalingCoordinator", namespace="AutoscalingCoordinator"
)
except ValueError:
# If the actor doesn't exist, `ray.get_actor` raises a `ValueError`.
return
ray.kill(actor)
@pytest.fixture
def teardown_autoscaling_coordinator():
yield
kill_autoscaling_coordinator()
CLUSTER_NODES_WITH_HEAD = [
# Head node should be included if it has non-zero CPUs or GPUs.
{
"Resources": {
"CPU": 10,
"GPU": 5,
"object_store_memory": 1000,
HEAD_NODE_RESOURCE_LABEL: 1,
},
"Alive": True,
},
# Dead node should be excluded.
{
"Resources": {
"CPU": 10,
"GPU": 5,
"object_store_memory": 1000,
},
"Alive": False,
},
]
CLUSTER_NODES_WITHOUT_HEAD = [
{
"Resources": {"CPU": 10, "GPU": 5, "object_store_memory": 1000},
"Alive": True,
},
# Head node should be excluded if CPUs and GPUs are both 0.
{
"Resources": {
"CPU": 0,
"GPU": 0,
"object_store_memory": 1000,
HEAD_NODE_RESOURCE_LABEL: 1,
},
"Alive": True,
},
]
@pytest.mark.parametrize(
"cluster_nodes",
[
CLUSTER_NODES_WITH_HEAD,
CLUSTER_NODES_WITHOUT_HEAD,
],
)
def test_basic(cluster_nodes):
mocked_time = 0
mock_request_resources = Mock()
as_coordinator = _AutoscalingCoordinatorActor(
get_current_time=lambda: mocked_time,
send_resources_request=mock_request_resources,
get_cluster_nodes=lambda: cluster_nodes,
)
req1 = [{"CPU": 3, "GPU": 1, "object_store_memory": 100}]
req1_timeout = 2
as_coordinator.request_resources(
requester_id="requester1",
resources=req1,
expire_after_s=req1_timeout,
)
mock_request_resources.assert_called_once_with(req1)
res1 = as_coordinator.get_allocated_resources("requester1")
def _remove_head_node_resources(res):
for r in res:
if HEAD_NODE_RESOURCE_LABEL in r:
del r[HEAD_NODE_RESOURCE_LABEL]
_remove_head_node_resources(res1)
assert res1 == req1
# Send the same request again. `mock_request_resources` won't be called
# since the request is not updated.
as_coordinator.request_resources(
requester_id="requester1",
resources=req1,
expire_after_s=req1_timeout,
)
assert mock_request_resources.call_count == 1
# Send a request from requester2, with request_remaining=True.
# requester2 should get the requested + the remaining resources.
req2 = [{"CPU": 2, "GPU": 1, "object_store_memory": 100}]
req2_timeout = 20
as_coordinator.request_resources(
requester_id="requester2",
resources=req2,
expire_after_s=req2_timeout,
request_remaining=True,
)
mock_request_resources.assert_called_with(req1 + req2)
res2 = as_coordinator.get_allocated_resources("requester2")
_remove_head_node_resources(res2)
assert res2 == req2 + [{"CPU": 5, "GPU": 3, "object_store_memory": 800}]
# Test updating req1
req1_updated = [{"CPU": 4, "GPU": 2, "object_store_memory": 300}]
as_coordinator.request_resources(
requester_id="requester1",
resources=req1_updated,
expire_after_s=req1_timeout,
)
mock_request_resources.assert_called_with(req1_updated + req2)
res1 = as_coordinator.get_allocated_resources("requester1")
_remove_head_node_resources(res1)
assert res1 == req1_updated
res2 = as_coordinator.get_allocated_resources("requester2")
_remove_head_node_resources(res2)
assert res2 == req2 + [{"CPU": 4, "GPU": 2, "object_store_memory": 600}]
# After req1_timeout, req1 should be expired.
mocked_time = req1_timeout + 0.1
as_coordinator._tick()
mock_request_resources.assert_called_with(req2)
res1 = as_coordinator.get_allocated_resources("requester1")
res2 = as_coordinator.get_allocated_resources("requester2")
_remove_head_node_resources(res1)
_remove_head_node_resources(res2)
assert res1 == []
assert res2 == req2 + [{"CPU": 8, "GPU": 4, "object_store_memory": 900}]
# After req2_timeout, req2 should be expired.
mocked_time = req2_timeout + 0.1
as_coordinator._tick()
mock_request_resources.assert_called_with([])
res1 = as_coordinator.get_allocated_resources("requester1")
res2 = as_coordinator.get_allocated_resources("requester2")
_remove_head_node_resources(res1)
_remove_head_node_resources(res2)
assert res1 == []
assert res2 == []
# Test canceling a request
as_coordinator.cancel_request("requester2")
res2 = as_coordinator.get_allocated_resources("requester2")
_remove_head_node_resources(res2)
assert res2 == []
def test_double_allocation_with_multiple_request_remaining():
"""Test fair allocation when multiple requesters have request_remaining=True."""
cluster_nodes = [
{
"Resources": {
"CPU": 10,
"GPU": 5,
"object_store_memory": 1000,
},
"Alive": True,
}
]
mocked_time = 0
mock_request_resources = Mock()
coordinator = _AutoscalingCoordinatorActor(
get_current_time=lambda: mocked_time,
send_resources_request=mock_request_resources,
get_cluster_nodes=lambda: cluster_nodes,
)
# Requester1: asks for CPU=2, GPU=1 with request_remaining=True
req1 = [{"CPU": 2, "GPU": 1, "object_store_memory": 100}]
coordinator.request_resources(
requester_id="requester1",
resources=req1,
expire_after_s=100,
request_remaining=True,
)
# Requester2: asks for CPU=3, GPU=1 with request_remaining=True
req2 = [{"CPU": 3, "GPU": 1, "object_store_memory": 200}]
coordinator.request_resources(
requester_id="requester2",
resources=req2,
expire_after_s=100,
request_remaining=True,
)
# Get allocated resources
res1 = coordinator.get_allocated_resources("requester1")
res2 = coordinator.get_allocated_resources("requester2")
# After allocating specific requests (req1 and req2):
# Remaining = CPU: 10-2-3=5, GPU: 5-1-1=3, memory: 1000-100-200=700
# With fair allocation, each requester gets 1/2 of remaining resources
expected_remaining_per_requester = {
"CPU": 5 // 2, # = 2
"GPU": 3 // 2, # = 1
"object_store_memory": 700 // 2, # = 350
}
# Both requesters should get their specific requests + fair share of remaining
assert res1 == req1 + [expected_remaining_per_requester]
assert res2 == req2 + [expected_remaining_per_requester]
@pytest.fixture
def cluster():
"""Initialize a Ray cluster with a 0 CPU head node and no workers."""
cluster = Cluster()
cluster.add_node(num_cpus=0)
cluster.wait_for_nodes()
cluster.connect()
yield cluster
ray.shutdown()
cluster.shutdown()
@pytest.mark.parametrize("gpu_tasks_include_cpu", [True, False])
def test_autoscaling_coordinator_e2e(cluster, gpu_tasks_include_cpu):
"""Integration test for AutoscalingCoordinator.
This test creates 2 dummy components that request resources from
AutoscalingCoordinator, and checks allocated resources are correct.
"""
object_store_memory = 100 * 1024**2
num_cpu_nodes = 4
cpu_node_spec = {"num_cpus": 8, "object_store_memory": object_store_memory}
num_gpu_nodes = 2
gpu_node_spec = {
"num_cpus": 4,
"num_gpus": 1,
"object_store_memory": object_store_memory,
}
for _ in range(num_cpu_nodes):
cluster.add_node(**cpu_node_spec)
for _ in range(num_gpu_nodes):
cluster.add_node(**gpu_node_spec)
cluster.wait_for_nodes()
@ray.remote
def request_and_check_resources(
requester_id, resources, expected, request_remaining
):
as_coordinator = get_or_create_autoscaling_coordinator()
ray.get(
as_coordinator.request_resources.remote(
requester_id=requester_id,
resources=resources,
expire_after_s=100,
request_remaining=request_remaining,
)
)
def check_allocated_resources():
allocated = ray.get(
as_coordinator.get_allocated_resources.remote(requester_id)
)
allocated = [
{
k: int(v)
for k, v in r.items()
if k in ["CPU", "GPU", "object_store_memory"] and v != 0
}
for r in allocated
if "node:__internal_head__" not in r
]
allocated = [r for r in allocated if len(r) > 0]
if allocated != expected:
print(
f"{requester_id}: Allocated resources: {allocated}, "
f"expected: {expected}. Retrying."
)
return False
else:
return True
wait_for_condition(
check_allocated_resources,
retry_interval_ms=1000,
timeout=5,
)
return "ok"
res1_resources = [
{
"CPU": cpu_node_spec["num_cpus"],
"object_store_memory": object_store_memory,
}
] * num_cpu_nodes
req2_resources = [
{
"GPU": gpu_node_spec["num_gpus"],
}
] * num_gpu_nodes
if gpu_tasks_include_cpu:
for r in req2_resources:
r["CPU"] = 1
remaining = [
{
"CPU": gpu_node_spec["num_cpus"] - (1 if gpu_tasks_include_cpu else 0),
"object_store_memory": object_store_memory,
}
] * num_gpu_nodes
res1 = request_and_check_resources.remote(
requester_id="requester1",
resources=res1_resources,
expected=res1_resources + remaining,
request_remaining=True,
)
res2 = request_and_check_resources.remote(
requester_id="requester2",
resources=req2_resources,
expected=req2_resources,
request_remaining=False,
)
assert ray.get([res1, res2]) == ["ok"] * 2
def _test_consecutive_failures(
coordinator,
call_method,
counter_attr,
error_msg_prefix,
):
"""Test consecutive failures: increment counter, raise after max, reset on success."""
max_failures = coordinator.MAX_CONSECUTIVE_FAILURES
timeout_error = ray.exceptions.GetTimeoutError("timeout")
# Counter increments on each failure
with patch("ray.get", side_effect=timeout_error):
for attempt in range(1, max_failures):
call_method()
assert getattr(coordinator, counter_attr) == attempt
# Exception raised after max consecutive failures
expected_error_msg = f"{error_msg_prefix} after {max_failures} consecutive failures"
with patch("ray.get", side_effect=timeout_error):
with pytest.raises(RuntimeError, match=expected_error_msg):
call_method()
# Counter resets on success
with patch("ray.get", return_value=None):
call_method()
assert getattr(coordinator, counter_attr) == 0
def test_get_allocated_resources_handles_timeout_error(
teardown_autoscaling_coordinator,
):
"""Test get_allocated_resources handles timeout error."""
coordinator = DefaultAutoscalingCoordinator()
coordinator._cached_allocated_resources["test"] = [{"CPU": 1}]
def call_method():
return coordinator.get_allocated_resources("test")
max_failures = coordinator.MAX_CONSECUTIVE_FAILURES
timeout_error = ray.exceptions.GetTimeoutError("timeout")
cached_value = [{"CPU": 1}]
new_value = [{"CPU": 2}]
# Counter increments on failures and returns cached value
with patch("ray.get", side_effect=timeout_error):
for attempt in range(1, max_failures):
result = call_method()
assert result == cached_value
assert coordinator._consecutive_failures_get_allocated_resources == attempt
# Exception raised after max consecutive failures
expected_error_msg = (
f"Failed to get allocated resources for test "
f"after {max_failures} consecutive failures"
)
with patch("ray.get", side_effect=timeout_error):
with pytest.raises(RuntimeError, match=expected_error_msg):
call_method()
# Counter resets on success and returns new value
with patch("ray.get", return_value=new_value):
result = call_method()
assert result == new_value
assert coordinator._consecutive_failures_get_allocated_resources == 0
def test_cancel_request_handles_timeout_error(teardown_autoscaling_coordinator):
"""Test cancel_request handles timeout error."""
coordinator = DefaultAutoscalingCoordinator()
_test_consecutive_failures(
coordinator=coordinator,
call_method=lambda: coordinator.cancel_request("test"),
counter_attr="_consecutive_failures_cancel_request",
error_msg_prefix="Failed to cancel resource request for test",
)
def test_request_resources_handles_timeout_error(teardown_autoscaling_coordinator):
"""Test request_resources handles timeout error."""
coordinator = DefaultAutoscalingCoordinator()
_test_consecutive_failures(
coordinator=coordinator,
call_method=lambda: coordinator.request_resources(
"test", [{"CPU": 1}], expire_after_s=1
),
counter_attr="_consecutive_failures_request_resources",
error_msg_prefix="Failed to send resource request for test",
)
def test_coordinator_accepts_zero_resource_for_missing_resource_type(
teardown_autoscaling_coordinator,
):
# This is a regression test for a bug where the coordinator crashes when you request
# a resource type (e.g., GPU: 0) that doesn't exist on the cluster.
coordinator = DefaultAutoscalingCoordinator()
coordinator.request_resources(
requester_id="spam", resources=[{"CPU": 1, "GPU": 0}], expire_after_s=1
)
assert coordinator.get_allocated_resources("spam") == [{"CPU": 1, "GPU": 0}]
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_autoscaling_coordinator.py",
"license": "Apache License 2.0",
"lines": 396,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:ci/ray_ci/automation/crane_lib.py | """
Wrapper library for using the crane tool for managing container images.
https://github.com/google/go-containerregistry/blob/v0.19.0/cmd/crane/doc/crane.md
Functions raise CraneError on failure.
"""
import os
import platform
import subprocess
import tarfile
import tempfile
from typing import List
import runfiles
from ci.ray_ci.utils import logger
class CraneError(Exception):
"""Exception raised when a crane operation fails."""
def _crane_binary() -> str:
"""
Get the path to the crane binary from bazel runfiles.
Returns:
Path to the crane binary.
Raises:
ValueError: If running on unsupported platform (non-Linux or non-x86_64).
"""
r = runfiles.Create()
system = platform.system()
if system != "Linux" or platform.processor() != "x86_64":
raise ValueError(f"Unsupported platform: {system}")
return r.Rlocation("crane_linux_x86_64/crane")
def _run_crane_command(args: List[str], stdin_input: str | None = None) -> str:
"""
Run a crane command that produces TEXT output.
Args:
args: Command arguments to pass to crane.
stdin_input: Optional input to pass via stdin (e.g., for passwords).
Returns:
Command stdout output.
Raises:
CraneError: If the command fails.
"""
command = [_crane_binary()] + args
try:
with subprocess.Popen(
command,
stdin=subprocess.PIPE if stdin_input else None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
env=os.environ,
) as proc:
if stdin_input:
assert proc.stdin is not None
proc.stdin.write(stdin_input)
proc.stdin.close()
output = ""
if proc.stdout:
for line in proc.stdout:
logger.info(line.rstrip("\n"))
output += line
return_code = proc.wait()
if return_code:
stderr = proc.stderr.read() if proc.stderr else ""
raise CraneError(
f"Crane command `{' '.join(command)}` failed "
f"(rc={return_code}): {stderr}"
)
return output
except FileNotFoundError:
raise CraneError(f"Crane binary not found at {command[0]}")
except CraneError:
raise
except Exception as e:
raise CraneError(
f"Unexpected error running crane command `{' '.join(command)}`: {e}"
)
def _extract_tar_to_dir(tar_path: str, output_dir: str) -> None:
"""
Extract a tar file to a directory with path traversal protection.
Args:
tar_path: Path to the tar file to extract.
output_dir: Directory to extract into.
"""
os.makedirs(output_dir, exist_ok=True)
resolved_output_dir = os.path.realpath(output_dir)
with tarfile.open(tar_path, mode="r:*") as tf:
for m in tf:
member_path = os.path.join(resolved_output_dir, m.name)
resolved_member_path = os.path.realpath(member_path)
try:
# Verify extracted files stay within the target directory.
common = os.path.commonpath([resolved_output_dir, resolved_member_path])
if common != resolved_output_dir:
logger.warning(f"Skipping unsafe tar member: {m.name}")
continue
except ValueError:
logger.warning(f"Skipping path on different drive: {m.name}")
continue
tf.extract(m, path=output_dir)
def call_crane_copy(source: str, destination: str) -> None:
"""
Copy a container image from source to destination.
Args:
source: Source image reference (e.g., "registry.example.com/repo:tag").
destination: Destination image reference.
Raises:
CraneError: If the copy fails.
"""
_run_crane_command(["copy", source, destination])
def call_crane_cp(tag: str, source: str, dest_repo: str) -> None:
"""
Copy a container image to a destination repository with a specified tag.
Args:
tag: Tag to apply to the destination image.
source: Source image reference.
dest_repo: Destination repository URL (tag will be appended as ":tag").
Raises:
CraneError: If the copy fails.
"""
_run_crane_command(["cp", source, f"{dest_repo}:{tag}"])
def call_crane_index(index_name: str, tags: List[str]) -> None:
"""
Create a multi-architecture image index from platform-specific images.
Args:
index_name: Name for the resulting multi-arch index.
tags: List of exactly 2 platform-specific image tags to combine.
Raises:
CraneError: If the index creation fails.
ValueError: If tags list doesn't contain exactly 2 tags.
"""
if len(tags) != 2:
raise ValueError("call_crane_index requires exactly 2 tags")
args = ["index", "append", "-m", tags[0], "-m", tags[1], "-t", index_name]
_run_crane_command(args)
def call_crane_manifest(tag: str) -> str:
"""
Fetch the manifest for a container image.
Args:
tag: Image reference to fetch manifest for (e.g., "registry.example.com/repo:tag").
Returns:
The image manifest as a string.
Raises:
CraneError: If the image doesn't exist or fetch fails.
"""
return _run_crane_command(["manifest", tag])
def call_crane_export(tag: str, output_dir: str) -> None:
"""
Export a container image to a tar file and extract it.
Equivalent of:
crane export <tag> output.tar && tar -xf output.tar -C <output_dir>
Args:
tag: Image reference to export.
output_dir: Directory to extract the image filesystem into.
Raises:
CraneError: If the export or extraction fails.
"""
os.makedirs(output_dir, exist_ok=True)
with tempfile.TemporaryDirectory() as tmpdir:
tar_path = os.path.join(tmpdir, "output.tar")
crane_cmd = [_crane_binary(), "export", tag, tar_path]
logger.info(f"Running: {' '.join(crane_cmd)}")
try:
subprocess.check_call(crane_cmd, env=os.environ)
except subprocess.CalledProcessError as e:
raise CraneError(f"crane export failed (rc={e.returncode})")
except FileNotFoundError:
raise CraneError(f"Crane binary not found at {crane_cmd[0]}")
try:
logger.info(f"Extracting {tar_path} to {output_dir}")
_extract_tar_to_dir(tar_path, output_dir)
except Exception as e:
raise CraneError(f"tar extraction failed: {e}")
| {
"repo_id": "ray-project/ray",
"file_path": "ci/ray_ci/automation/crane_lib.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:ci/ray_ci/automation/test_crane_lib.py | import os
import platform
import sys
import tempfile
from unittest import mock
import pytest
import requests
from ci.ray_ci.automation.crane_lib import (
CraneError,
_crane_binary,
call_crane_copy,
call_crane_export,
call_crane_index,
call_crane_manifest,
)
from ci.ray_ci.automation.test_utils import local_registry # noqa: F401, F811
TEST_IMAGE_AMD64 = "alpine:3.16@sha256:0db9d004361b106932f8c7632ae54d56e92c18281e2dd203127d77405020abf6"
TEST_IMAGE_ARM64 = "alpine:3.16@sha256:4bdb4ac63839546daabfe0a267a363b3effa17ce02ac5f42d222174484c5686c"
class TestCraneBinary:
"""Tests for _crane_binary function."""
def test_crane_binary_returns_valid_path(self):
"""Test that crane binary path exists and is executable."""
if platform.system() != "Linux" or platform.processor() != "x86_64":
pytest.skip("Only supported on Linux x86_64")
binary_path = _crane_binary()
assert binary_path is not None
assert binary_path.endswith("crane")
@mock.patch("platform.system")
@mock.patch("platform.processor")
def test_crane_binary_unsupported_platform(self, mock_processor, mock_system):
"""Test crane binary raises error on unsupported platform."""
mock_system.return_value = "Darwin"
mock_processor.return_value = "arm64"
with pytest.raises(ValueError, match="Unsupported platform"):
_crane_binary()
class TestCraneCopyIntegration:
"""Integration tests for crane copy operations using a local registry."""
def test_copy_public_image_to_local_registry(self, local_registry): # noqa: F811
"""Test copying a public image to local registry."""
port = local_registry
# Use a small, well-known public image digest for reproducibility
source = TEST_IMAGE_AMD64
destination = f"localhost:{port}/test-alpine:copied"
call_crane_copy(source=source, destination=destination)
# Verify image exists in local registry
response = requests.get(
f"http://localhost:{port}/v2/test-alpine/manifests/copied"
)
assert response.status_code == 200
def test_copy_nonexistent_image_fails(self, local_registry): # noqa: F811
"""Test that copying a non-existent image raises CraneError."""
port = local_registry
source = "localhost:9999/nonexistent/image:tag"
destination = f"localhost:{port}/should-not-exist:tag"
with pytest.raises(CraneError):
call_crane_copy(source=source, destination=destination)
class TestCraneManifestIntegration:
"""Integration tests for crane manifest operations."""
def test_get_manifest_from_local_registry(self, local_registry): # noqa: F811
"""Test getting manifest from local registry."""
port = local_registry
# First copy an image to the registry
source = TEST_IMAGE_AMD64
destination = f"localhost:{port}/manifest-test:v1"
call_crane_copy(source=source, destination=destination)
output = call_crane_manifest(tag=destination)
assert "schemaVersion" in output or "config" in output
def test_get_manifest_nonexistent_tag_fails(self, local_registry): # noqa: F811
"""Test that getting manifest for non-existent tag raises CraneError."""
port = local_registry
tag = f"localhost:{port}/does-not-exist:missing"
with pytest.raises(CraneError):
call_crane_manifest(tag=tag)
class TestCraneIndexIntegration:
"""Integration tests for crane index operations."""
def test_create_multiarch_index(self, local_registry): # noqa: F811
"""Test creating a multi-architecture index."""
port = local_registry
# Copy two different architecture images
amd64_dest = f"localhost:{port}/index-test:amd64"
arm64_dest = f"localhost:{port}/index-test:arm64"
call_crane_copy(source=TEST_IMAGE_AMD64, destination=amd64_dest)
call_crane_copy(source=TEST_IMAGE_ARM64, destination=arm64_dest)
# Create index
index_name = f"localhost:{port}/index-test:multiarch"
call_crane_index(index_name=index_name, tags=[amd64_dest, arm64_dest])
# Verify index was created
response = requests.get(
f"http://localhost:{port}/v2/index-test/manifests/multiarch"
)
assert response.status_code == 200
manifest = response.json()
assert "manifests" in manifest
assert len(manifest["manifests"]) == 2
class TestCraneExportIntegration:
"""Integration tests for crane export+extract operations."""
def test_export_extracts_into_subdir(self, local_registry): # noqa: F811
"""
Test that call_crane_export exports a container filesystem and extracts
it into the provided directory.
"""
port = local_registry
source = TEST_IMAGE_AMD64
image = f"localhost:{port}/export-test:alpine"
call_crane_copy(source=source, destination=image)
with tempfile.TemporaryDirectory() as tmpdir:
out_dir = os.path.join(tmpdir, "nested", "wanda_fs")
call_crane_export(tag=image, output_dir=out_dir)
assert os.path.isdir(out_dir)
assert any(os.scandir(out_dir)), "export dir is empty"
# Alpine should have these paths in its root filesystem
assert os.path.isdir(os.path.join(out_dir, "bin"))
assert os.path.isdir(os.path.join(out_dir, "etc"))
assert os.path.lexists(
os.path.join(out_dir, "bin", "sh")
) or os.path.lexists(os.path.join(out_dir, "bin", "ash"))
if __name__ == "__main__":
sys.exit(pytest.main(["-vv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "ci/ray_ci/automation/test_crane_lib.py",
"license": "Apache License 2.0",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:ci/ray_ci/automation/test_utils.py | """Shared test utilities for ci/ray_ci/automation tests."""
import os
import platform
import random
import shutil
import subprocess
import tempfile
import threading
import time
import pytest
import requests
import runfiles
def _registry_binary():
"""Get the path to the local registry binary."""
r = runfiles.Create()
system = platform.system()
if system != "Linux" or platform.processor() != "x86_64":
raise ValueError(f"Unsupported platform: {system}")
return r.Rlocation("registry_x86_64/registry")
def _start_local_registry():
"""Start local registry for testing.
Returns:
Tuple of (registry_proc, registry_thread, temp_dir, port)
"""
port = random.randint(2000, 20000)
temp_dir = tempfile.mkdtemp()
config_content = "\n".join(
[
"version: 0.1",
"storage:",
" filesystem:",
f" rootdirectory: {temp_dir}",
"http:",
f" addr: :{port}",
]
)
config_path = os.path.join(temp_dir, "registry.yml")
with open(config_path, "w") as config_file:
config_file.write(config_content)
config_file.flush()
registry_proc = subprocess.Popen(
[_registry_binary(), "serve", config_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
registry_thread = threading.Thread(target=lambda: registry_proc.wait(), daemon=True)
registry_thread.start()
for _ in range(10):
try:
response = requests.get(f"http://localhost:{port}/v2/")
if response.status_code == 200:
return registry_proc, registry_thread, temp_dir, port
except requests.exceptions.ConnectionError:
pass
time.sleep(1)
raise TimeoutError("Registry failed to start within 10 seconds")
@pytest.fixture(scope="module")
def local_registry():
"""Fixture that provides a local Docker registry for testing.
Yields:
int: The port number of the local registry.
"""
registry_proc, registry_thread, temp_dir, port = _start_local_registry()
yield port
registry_proc.kill()
registry_thread.join(timeout=5)
shutil.rmtree(temp_dir, ignore_errors=True)
| {
"repo_id": "ray-project/ray",
"file_path": "ci/ray_ci/automation/test_utils.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:rllib/examples/utils.py | import argparse
import json
import logging
import os
import re
import time
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Type,
Union,
)
import numpy as np
import ray
from ray import tune
from ray.air.integrations.wandb import WANDB_ENV_VAR, WandbLoggerCallback
from ray.rllib.utils.metrics import (
ENV_RUNNER_RESULTS,
EPISODE_RETURN_MEAN,
EVALUATION_RESULTS,
NUM_ENV_STEPS_SAMPLED_LIFETIME,
)
from ray.rllib.utils.serialization import convert_numpy_to_python_primitives
from ray.rllib.utils.typing import ResultDict
from ray.tune.result import TRAINING_ITERATION
if TYPE_CHECKING:
from ray.rllib.algorithms import AlgorithmConfig
logger = logging.getLogger(__name__)
def add_rllib_example_script_args(
parser: Optional[argparse.ArgumentParser] = None,
default_reward: float = 100.0,
default_iters: int = 200,
default_timesteps: int = 100000,
) -> argparse.ArgumentParser:
"""Adds RLlib-typical (and common) examples scripts command line args to a parser.
TODO (sven): This function should be used by most of our examples scripts, which
already mostly have this logic in them (but written out).
Args:
parser: The parser to add the arguments to. If None, create a new one.
default_reward: The default value for the --stop-reward option.
default_iters: The default value for the --stop-iters option.
default_timesteps: The default value for the --stop-timesteps option.
Returns:
The altered (or newly created) parser object.
"""
if parser is None:
parser = argparse.ArgumentParser()
# Algo and Algo config options.
parser.add_argument(
"--algo", type=str, default="PPO", help="The RLlib-registered algorithm to use."
)
parser.add_argument(
"--framework",
choices=["tf", "tf2", "torch"],
default="torch",
help="The DL framework specifier.",
)
parser.add_argument(
"--env",
type=str,
default=None,
help="The gym.Env identifier to run the experiment with.",
)
parser.add_argument(
"--num-env-runners",
type=int,
default=None,
help="The number of (remote) EnvRunners to use for the experiment.",
)
parser.add_argument(
"--num-envs-per-env-runner",
type=int,
default=None,
help="The number of (vectorized) environments per EnvRunner. Note that "
"this is identical to the batch size for (inference) action computations.",
)
parser.add_argument(
"--num-agents",
type=int,
default=0,
help="If 0 (default), will run as single-agent. If > 0, will run as "
"multi-agent with the environment simply cloned n times and each agent acting "
"independently at every single timestep. The overall reward for this "
"experiment is then the sum over all individual agents' rewards.",
)
# Evaluation options.
parser.add_argument(
"--evaluation-num-env-runners",
type=int,
default=0,
help="The number of evaluation (remote) EnvRunners to use for the experiment.",
)
parser.add_argument(
"--evaluation-interval",
type=int,
default=0,
help="Every how many iterations to run one round of evaluation. "
"Use 0 (default) to disable evaluation.",
)
parser.add_argument(
"--evaluation-duration",
type=lambda v: v if v == "auto" else int(v),
default=10,
help="The number of evaluation units to run each evaluation round. "
"Use `--evaluation-duration-unit` to count either in 'episodes' "
"or 'timesteps'. If 'auto', will run as many as possible during train pass ("
"`--evaluation-parallel-to-training` must be set then).",
)
parser.add_argument(
"--evaluation-duration-unit",
type=str,
default="episodes",
choices=["episodes", "timesteps"],
help="The evaluation duration unit to count by. One of 'episodes' or "
"'timesteps'. This unit will be run `--evaluation-duration` times in each "
"evaluation round. If `--evaluation-duration=auto`, this setting does not "
"matter.",
)
parser.add_argument(
"--evaluation-parallel-to-training",
action="store_true",
help="Whether to run evaluation parallel to training. This might help speed up "
"your overall iteration time. Be aware that when using this option, your "
"reported evaluation results are referring to one iteration before the current "
"one.",
)
# RLlib logging options.
parser.add_argument(
"--output",
type=str,
default=None,
help="The output directory to write trajectories to, which are collected by "
"the algo's EnvRunners.",
)
parser.add_argument(
"--log-level",
type=str,
default=None, # None -> use default
choices=["INFO", "DEBUG", "WARN", "ERROR"],
help="The log-level to be used by the RLlib logger.",
)
# tune.Tuner options.
parser.add_argument(
"--no-tune",
action="store_true",
help="Whether to NOT use tune.Tuner(), but rather a simple for-loop calling "
"`algo.train()` repeatedly until one of the stop criteria is met.",
)
parser.add_argument(
"--num-samples",
type=int,
default=1,
help="How many (tune.Tuner.fit()) experiments to execute - if possible in "
"parallel.",
)
parser.add_argument(
"--max-concurrent-trials",
type=int,
default=None,
help="How many (tune.Tuner) trials to run concurrently.",
)
parser.add_argument(
"--verbose",
type=int,
default=2,
help="The verbosity level for the `tune.Tuner()` running the experiment.",
)
parser.add_argument(
"--checkpoint-freq",
type=int,
default=0,
help=(
"The frequency (in training iterations) with which to create checkpoints. "
"Note that if --wandb-key is provided, all checkpoints will "
"automatically be uploaded to WandB."
),
)
parser.add_argument(
"--checkpoint-at-end",
action="store_true",
help=(
"Whether to create a checkpoint at the very end of the experiment. "
"Note that if --wandb-key is provided, all checkpoints will "
"automatically be uploaded to WandB."
),
)
parser.add_argument(
"--tune-max-report-freq",
type=int,
default=5, # tune default to 5
help="The frequency (in seconds) at which to log the training performance.",
)
# WandB logging options.
parser.add_argument(
"--wandb-key",
type=str,
default=None,
help="The WandB API key to use for uploading results.",
)
parser.add_argument(
"--wandb-project",
type=str,
default=None,
help="The WandB project name to use.",
)
parser.add_argument(
"--wandb-run-name",
type=str,
default=None,
help="The WandB run name to use.",
)
# Experiment stopping and testing criteria.
parser.add_argument(
"--stop-reward",
type=float,
default=default_reward,
help="Reward at which the script should stop training.",
)
parser.add_argument(
"--stop-iters",
type=int,
default=default_iters,
help="The number of iterations to train.",
)
parser.add_argument(
"--stop-timesteps",
type=int,
default=default_timesteps,
help="The number of (environment sampling) timesteps to train.",
)
parser.add_argument(
"--as-test",
action="store_true",
help="Whether this script should be run as a test. If set, --stop-reward must "
"be achieved within --stop-timesteps AND --stop-iters, otherwise this "
"script will throw an exception at the end.",
)
parser.add_argument(
"--as-release-test",
action="store_true",
help="Whether this script should be run as a release test. If set, "
"all that applies to the --as-test option is true, plus, a short JSON summary "
"will be written into a results file whose location is given by the ENV "
"variable `TEST_OUTPUT_JSON`.",
)
# Learner scaling options.
parser.add_argument(
"--num-learners",
type=int,
default=None,
help="The number of Learners to use. If `None`, use the algorithm's default "
"value.",
)
parser.add_argument(
"--num-cpus-per-learner",
type=float,
default=None,
help="The number of CPUs per Learner to use. If `None`, use the algorithm's "
"default value.",
)
parser.add_argument(
"--num-gpus-per-learner",
type=float,
default=None,
help="The number of GPUs per Learner to use. If `None` and there are enough "
"GPUs for all required Learners (--num-learners), use a value of 1, "
"otherwise 0.",
)
parser.add_argument(
"--num-aggregator-actors-per-learner",
type=int,
default=None,
help="The number of Aggregator actors to use per Learner. If `None`, use the "
"algorithm's default value.",
)
# Ray init options.
parser.add_argument("--num-cpus", type=int, default=0)
# Old API stack: config.num_gpus.
parser.add_argument(
"--num-gpus",
type=int,
default=None,
help="The number of GPUs to use (only on the old API stack).",
)
parser.add_argument(
"--old-api-stack",
action="store_true",
help="Run this script on the old API stack of RLlib.",
)
# Deprecated options that are maintained to throw an error when still used.
# Use `--old-api-stack` to disable the new API stack.
parser.add_argument(
"--enable-new-api-stack",
action="store_true",
help=argparse.SUPPRESS,
)
parser.add_argument(
"--local-mode",
action="store_true",
help=argparse.SUPPRESS,
)
return parser
# TODO (simon): Use this function in the `run_rllib_example_experiment` when
# `no_tune` is `True`.
def should_stop(
stop: Dict[str, Any], results: ResultDict, keep_ray_up: bool = False
) -> bool:
"""Checks stopping criteria on `ResultDict`
Args:
stop: Dictionary of stopping criteria. Each criterion is a mapping of
a metric in the `ResultDict` of the algorithm to a certain criterion.
results: An RLlib `ResultDict` containing all results from a training step.
keep_ray_up: Optionally shutting down the running Ray instance.
Returns: True, if any stopping criterion is fulfilled. Otherwise, False.
"""
for key, threshold in stop.items():
val = results
for k in key.split("/"):
k = k.strip()
# If k exists in the current level, continue down;
# otherwise, set val to None and break out of this inner loop.
if isinstance(val, dict) and k in val:
val = val[k]
else:
val = None
break
# If the key was not found, simply skip to the next criterion.
if val is None:
continue
try:
# Check that val is numeric and meets the threshold.
if not np.isnan(val) and val >= threshold:
print(f"Stop criterion ({key}={threshold}) fulfilled!")
if not keep_ray_up:
ray.shutdown()
return True
except TypeError:
# If val isn't numeric, skip this criterion.
continue
# If none of the criteria are fulfilled, return False.
return False
# TODO (sven): Make this the de-facto, well documented, and unified utility for most of
# our tests:
# - CI (label: "learning_tests")
# - release tests (benchmarks)
# - example scripts
def run_rllib_example_script_experiment(
base_config: "AlgorithmConfig",
args: Optional[argparse.Namespace] = None,
*,
stop: Optional[Dict] = None,
success_metric: Optional[Dict] = None,
trainable: Optional[Type] = None,
tune_callbacks: Optional[List] = None,
keep_config: bool = False,
keep_ray_up: bool = False,
scheduler=None,
progress_reporter=None,
) -> Union[ResultDict, tune.result_grid.ResultGrid]:
"""Given an algorithm config and some command line args, runs an experiment.
There are some constraints on what properties must be defined in `args`.
It should ideally be generated via calling
`args = add_rllib_example_script_args()`, which can be found in this very module
here.
The function sets up an Algorithm object from the given config (altered by the
contents of `args`), then runs the Algorithm via Tune (or manually, if
`args.no_tune` is set to True) using the stopping criteria in `stop`.
At the end of the experiment, if `args.as_test` is True, checks, whether the
Algorithm reached the `success_metric` (if None, use `env_runners/
episode_return_mean` with a minimum value of `args.stop_reward`).
See https://github.com/ray-project/ray/tree/master/rllib/examples for an overview
of all supported command line options.
Args:
base_config: The AlgorithmConfig object to use for this experiment. This base
config will be automatically "extended" based on some of the provided
`args`. For example, `args.num_env_runners` is used to set
`config.num_env_runners`, etc.
args: A argparse.Namespace object, ideally returned by calling
`args = add_rllib_example_script_args()`. It must have the following
properties defined: `stop_iters`, `stop_reward`, `stop_timesteps`,
`no_tune`, `verbose`, `checkpoint_freq`, `as_test`. Optionally, for WandB
logging: `wandb_key`, `wandb_project`, `wandb_run_name`.
stop: An optional dict mapping ResultDict key strings (using "/" in case of
nesting, e.g. "env_runners/episode_return_mean" for referring to
`result_dict['env_runners']['episode_return_mean']` to minimum
values, reaching of which will stop the experiment). Default is:
{
"env_runners/episode_return_mean": args.stop_reward,
"training_iteration": args.stop_iters,
"num_env_steps_sampled_lifetime": args.stop_timesteps,
}
success_metric: Only relevant if `args.as_test` is True.
A dict mapping a single(!) ResultDict key string (using "/" in
case of nesting, e.g. "env_runners/episode_return_mean" for referring
to `result_dict['env_runners']['episode_return_mean']`) to a single(!)
minimum value to be reached in order for the experiment to count as
successful. If `args.as_test` is True AND this `success_metric` is not
reached with the bounds defined by `stop`, will raise an Exception.
trainable: The Trainable subclass to run in the tune.Tuner. If None (default),
use the registered RLlib Algorithm class specified by args.algo.
tune_callbacks: A list of Tune callbacks to configure with the tune.Tuner.
In case `args.wandb_key` is provided, appends a WandB logger to this
list.
keep_config: Set this to True, if you don't want this utility to change the
given `base_config` in any way and leave it as-is. This is helpful
for those example scripts which demonstrate how to set config settings
that are otherwise taken care of automatically in this function (e.g.
`num_env_runners`).
Returns:
The last ResultDict from a --no-tune run OR the tune.Tuner.fit()
results.
"""
if args is None:
parser = add_rllib_example_script_args()
args = parser.parse_args()
# Deprecated args.
if args.enable_new_api_stack:
raise ValueError(
"`--enable-new-api-stack` flag no longer supported (it's the default "
"behavior now)! To switch back to the old API stack on your scripts, use "
"the `--old-api-stack` flag."
)
if args.local_mode:
raise ValueError("`--local-mode` is no longer supported.")
# If run --as-release-test, --as-test must also be set.
if args.as_release_test:
args.as_test = True
if args.as_test:
args.verbose = 1
args.tune_max_report_freq = 30
# Initialize Ray.
ray.init(
num_cpus=args.num_cpus or None,
ignore_reinit_error=True,
)
# Define one or more stopping criteria.
if stop is None:
stop = {
f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": args.stop_reward,
f"{ENV_RUNNER_RESULTS}/{NUM_ENV_STEPS_SAMPLED_LIFETIME}": (
args.stop_timesteps
),
TRAINING_ITERATION: args.stop_iters,
}
config = base_config
# Enhance the `base_config`, based on provided `args`.
if not keep_config:
# Set the framework.
config.framework(args.framework)
# Add an env specifier (only if not already set in config)?
if args.env is not None and config.env is None:
config.environment(args.env)
# Disable the new API stack?
if args.old_api_stack:
config.api_stack(
enable_rl_module_and_learner=False,
enable_env_runner_and_connector_v2=False,
)
# Define EnvRunner scaling and behavior.
if args.num_env_runners is not None:
config.env_runners(num_env_runners=args.num_env_runners)
if args.num_envs_per_env_runner is not None:
config.env_runners(num_envs_per_env_runner=args.num_envs_per_env_runner)
# Define compute resources used automatically (only using the --num-learners
# and --num-gpus-per-learner args).
# New stack.
if config.enable_rl_module_and_learner:
if args.num_gpus is not None and args.num_gpus > 0:
raise ValueError(
"--num-gpus is not supported on the new API stack! To train on "
"GPUs, use the command line options `--num-gpus-per-learner=1` and "
"`--num-learners=[your number of available GPUs]`, instead."
)
# Do we have GPUs available in the cluster?
num_gpus_available = ray.cluster_resources().get("GPU", 0)
# Number of actual Learner instances (including the local Learner if
# `num_learners=0`).
num_actual_learners = (
args.num_learners
if args.num_learners is not None
else config.num_learners
) or 1 # 1: There is always a local Learner, if num_learners=0.
# How many were hard-requested by the user
# (through explicit `--num-gpus-per-learner >= 1`).
num_gpus_requested = (args.num_gpus_per_learner or 0) * num_actual_learners
# Number of GPUs needed, if `num_gpus_per_learner=None` (auto).
num_gpus_needed_if_available = (
args.num_gpus_per_learner
if args.num_gpus_per_learner is not None
else 1
) * num_actual_learners
# Define compute resources used.
config.resources(num_gpus=0) # @OldAPIStack
if args.num_learners is not None:
config.learners(num_learners=args.num_learners)
# User wants to use aggregator actors per Learner.
if args.num_aggregator_actors_per_learner is not None:
config.learners(
num_aggregator_actors_per_learner=(
args.num_aggregator_actors_per_learner
)
)
# User wants to use GPUs if available, but doesn't hard-require them.
if args.num_gpus_per_learner is None:
if num_gpus_available >= num_gpus_needed_if_available:
config.learners(num_gpus_per_learner=1)
else:
config.learners(num_gpus_per_learner=0)
# User hard-requires n GPUs, but they are not available -> Error.
elif num_gpus_available < num_gpus_requested:
raise ValueError(
"You are running your script with --num-learners="
f"{args.num_learners} and --num-gpus-per-learner="
f"{args.num_gpus_per_learner}, but your cluster only has "
f"{num_gpus_available} GPUs!"
)
# All required GPUs are available -> Use them.
else:
config.learners(num_gpus_per_learner=args.num_gpus_per_learner)
# Set CPUs per Learner.
if args.num_cpus_per_learner is not None:
config.learners(num_cpus_per_learner=args.num_cpus_per_learner)
# Old stack (override only if arg was provided by user).
elif args.num_gpus is not None:
config.resources(num_gpus=args.num_gpus)
# Evaluation setup.
if args.evaluation_interval > 0:
config.evaluation(
evaluation_num_env_runners=args.evaluation_num_env_runners,
evaluation_interval=args.evaluation_interval,
evaluation_duration=args.evaluation_duration,
evaluation_duration_unit=args.evaluation_duration_unit,
evaluation_parallel_to_training=args.evaluation_parallel_to_training,
)
# Set the log-level (if applicable).
if args.log_level is not None:
config.debugging(log_level=args.log_level)
# Set the output dir (if applicable).
if args.output is not None:
config.offline_data(output=args.output)
# Run the experiment w/o Tune (directly operate on the RLlib Algorithm object).
if args.no_tune:
assert not args.as_test and not args.as_release_test
algo = config.build()
for i in range(stop.get(TRAINING_ITERATION, args.stop_iters)):
results = algo.train()
if ENV_RUNNER_RESULTS in results:
mean_return = results[ENV_RUNNER_RESULTS].get(
EPISODE_RETURN_MEAN, np.nan
)
print(f"iter={i} R={mean_return}", end="")
if (
EVALUATION_RESULTS in results
and ENV_RUNNER_RESULTS in results[EVALUATION_RESULTS]
):
Reval = results[EVALUATION_RESULTS][ENV_RUNNER_RESULTS][
EPISODE_RETURN_MEAN
]
print(f" R(eval)={Reval}", end="")
print()
for key, threshold in stop.items():
val = results
for k in key.split("/"):
try:
val = val[k]
except KeyError:
val = None
break
if val is not None and not np.isnan(val) and val >= threshold:
print(f"Stop criterion ({key}={threshold}) fulfilled!")
if not keep_ray_up:
ray.shutdown()
return results
if not keep_ray_up:
ray.shutdown()
return results
# Run the experiment using Ray Tune.
# Log results using WandB.
tune_callbacks = tune_callbacks or []
if hasattr(args, "wandb_key") and (
args.wandb_key is not None or WANDB_ENV_VAR in os.environ
):
wandb_key = args.wandb_key or os.environ[WANDB_ENV_VAR]
project = args.wandb_project or (
args.algo.lower() + "-" + re.sub("\\W+", "-", str(config.env).lower())
)
tune_callbacks.append(
WandbLoggerCallback(
api_key=wandb_key,
project=project,
upload_checkpoints=True,
**({"name": args.wandb_run_name} if args.wandb_run_name else {}),
)
)
# Autoconfigure a tune.CLIReporter (to log the results to the console).
# Use better ProgressReporter for multi-agent cases: List individual policy rewards.
if progress_reporter is None:
if args.num_agents == 0:
progress_reporter = tune.CLIReporter(
metric_columns={
TRAINING_ITERATION: "iter",
"time_total_s": "total time (s)",
NUM_ENV_STEPS_SAMPLED_LIFETIME: "ts",
f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": "episode return mean",
},
max_report_frequency=args.tune_max_report_freq,
)
else:
progress_reporter = tune.CLIReporter(
metric_columns={
**{
TRAINING_ITERATION: "iter",
"time_total_s": "total time (s)",
NUM_ENV_STEPS_SAMPLED_LIFETIME: "ts",
f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": "combined return",
},
**{
(
f"{ENV_RUNNER_RESULTS}/module_episode_returns_mean/{pid}"
): f"return {pid}"
for pid in config.policies
},
},
max_report_frequency=args.tune_max_report_freq,
)
# Force Tuner to use old progress output as the new one silently ignores our custom
# `tune.CLIReporter`.
os.environ["RAY_AIR_NEW_OUTPUT"] = "0"
# Run the actual experiment (using Tune).
start_time = time.time()
results = tune.Tuner(
trainable or config.algo_class,
param_space=config,
run_config=tune.RunConfig(
failure_config=tune.FailureConfig(max_failures=0, fail_fast="raise"),
stop=stop,
verbose=args.verbose,
callbacks=tune_callbacks,
checkpoint_config=tune.CheckpointConfig(
checkpoint_frequency=args.checkpoint_freq,
checkpoint_at_end=args.checkpoint_at_end,
),
progress_reporter=progress_reporter,
),
tune_config=tune.TuneConfig(
num_samples=args.num_samples,
max_concurrent_trials=args.max_concurrent_trials,
scheduler=scheduler,
),
).fit()
time_taken = time.time() - start_time
if not keep_ray_up:
ray.shutdown()
# If run as a test, check whether we reached the specified success criteria.
test_passed = False
if args.as_test:
# Success metric not provided, try extracting it from `stop`.
if success_metric is None:
for try_it in [
f"{EVALUATION_RESULTS}/{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}",
f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}",
]:
if try_it in stop:
success_metric = {try_it: stop[try_it]}
break
if success_metric is None:
success_metric = {
f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": args.stop_reward,
}
# TODO (sven): Make this work for more than one metric (AND-logic?).
# Get maximum value of `metric` over all trials
# (check if at least one trial achieved some learning, not just the final one).
success_metric_key, success_metric_value = next(iter(success_metric.items()))
best_value = max(
row[success_metric_key] for _, row in results.get_dataframe().iterrows()
)
if best_value >= success_metric_value:
test_passed = True
print(f"`{success_metric_key}` of {success_metric_value} reached! ok")
if args.as_release_test:
trial = results._experiment_analysis.trials[0]
stats = trial.last_result
stats.pop("config", None)
json_summary = {
"time_taken": float(time_taken),
"trial_states": [trial.status],
"last_update": float(time.time()),
"stats": convert_numpy_to_python_primitives(stats),
"passed": [test_passed],
"not_passed": [not test_passed],
"failures": {str(trial): 1} if not test_passed else {},
}
filename = os.environ.get("TEST_OUTPUT_JSON", "/tmp/learning_test.json")
with open(filename, "wt") as f:
json.dump(json_summary, f)
if not test_passed:
if args.as_release_test:
print(
f"`{success_metric_key}` of {success_metric_value} not reached! Best value reached is {best_value}"
)
else:
raise ValueError(
f"`{success_metric_key}` of {success_metric_value} not reached! Best value reached is {best_value}"
)
return results
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/utils.py",
"license": "Apache License 2.0",
"lines": 708,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:release/microbenchmark/experimental/rdt_single_node_microbenchmark.py | import argparse
import json
import os
import socket
import time
import torch
import ray
from ray.experimental.collective import (
create_collective_group,
destroy_all_collective_groups,
)
ray.init(
runtime_env={
"env_vars": {
# Needed for torch distributed.
"MASTER_ADDR": socket.gethostbyname(socket.gethostname()),
"MASTER_PORT": "8888",
}
}
)
@ray.remote(num_gpus=1, enable_tensor_transport=True)
class GPUActor:
def send(self, size_in_bytes, device):
return torch.ones(size_in_bytes, dtype=torch.int8, device=device)
def recv(self, rdt_tensor: torch.Tensor):
return rdt_tensor[0].item()
def init_torch(self, rank):
self.rank = rank
torch.distributed.init_process_group(
backend="nccl",
world_size=2,
rank=rank,
)
def send_with_torch(self, size_in_bytes, device, other_rank):
buf = torch.ones(size_in_bytes, dtype=torch.int8, device=device)
torch.distributed.send(buf, other_rank)
def recv_with_torch(self, size_in_bytes, device, other_rank):
buf = torch.empty(size_in_bytes, dtype=torch.int8, device=device)
torch.distributed.recv(buf, other_rank)
return buf[0].item()
def send_many_with_torch(self, size_in_bytes, device, other_rank, num_transfers):
for _ in range(num_transfers):
buf = torch.ones(size_in_bytes, dtype=torch.int8, device=device)
torch.distributed.send(buf, other_rank)
def recv_many_with_torch(self, size_in_bytes, device, other_rank, num_transfers):
results = []
for _ in range(num_transfers):
buf = torch.empty(size_in_bytes, dtype=torch.int8, device=device)
torch.distributed.recv(buf, other_rank)
results.append(buf[0].item())
return results
"""
THROUGHPUT
- NEW SEND OBJECT PER RECV
- SAME SEND OBJECT PER RECV
LATENCY
- JUST 1 TRANSFER
TORCH_LATENCY
- JUST 1 TRANSFER
TORCH THROUGHPUT
- NEW SEND PER RECV (all transfers done inside just 2 ray tasks)
"""
def throughput_new_send_per_recv(
num_transfers, transport, size, device, sender, receiver
):
refs = []
########### optional warmup
send_ref = sender.send.options(tensor_transport=transport).remote(size, device)
ray.get(receiver.recv.remote(send_ref))
############
start = time.perf_counter()
for _ in range(num_transfers):
send_ref = sender.send.options(tensor_transport=transport).remote(size, device)
refs.append(receiver.recv.remote(send_ref))
ray.get(refs)
return time.perf_counter() - start
def throughput_same_send_per_recv(
num_transfers, transport, size, device, sender, receiver
):
refs = []
########### optional warmup
send_ref = sender.send.options(tensor_transport=transport).remote(size, device)
ray.get(receiver.recv.remote(send_ref))
############
start = time.perf_counter()
send_ref = sender.send.options(tensor_transport=transport).remote(size, device)
for _ in range(num_transfers):
refs.append(receiver.recv.remote(send_ref))
ray.get(refs)
return time.perf_counter() - start
def latency_test(_num_transfers, transport, size, device, sender, receiver):
times = []
for _ in range(10):
start = time.perf_counter()
ray.get(
receiver.recv.remote(
sender.send.options(tensor_transport=transport).remote(size, device)
)
)
times.append(time.perf_counter() - start)
return sum(times) / len(times)
def torch_latency(_num_transfers, _transport, size, device, sender, receiver):
times = []
for _ in range(10):
start = time.perf_counter()
send_ref = sender.send_with_torch.remote(size, device, 1)
recv_ref = receiver.recv_with_torch.remote(size, device, 0)
ray.get([send_ref, recv_ref])
times.append(time.perf_counter() - start)
return sum(times) / len(times)
def torch_throughput(num_transfers, _transport, size, device, sender, receiver):
start_time = time.perf_counter()
send_ref = sender.send_many_with_torch.remote(size, device, 1, num_transfers)
recv_ref = receiver.recv_many_with_torch.remote(size, device, 0, num_transfers)
ray.get([send_ref, recv_ref])
return time.perf_counter() - start_time
# torch funcs only for when directly testing torch distributed
TEST_FUNCS = [
throughput_new_send_per_recv,
throughput_same_send_per_recv,
latency_test,
# torch_latency, added based on cli arg
# torch_throughput, added based on cli arg
]
# (transport, device)
TRANSPORTS_AND_DEVICE = [
("nccl", "cuda"),
# ("nixl", "cuda"), # nixl enabled based on cli arg
# ("nixl", "cpu"),
("gloo", "cpu"),
# ("object_store", "cpu"),
# ("object_store", "cuda"),
# ("torch", "cuda") # only works with torch TEST_FUNCS, added based on cli arg
]
# (size_str, size, num_transfers)
SIZES_AND_NUM_TRANSFERS = [
("4B", 4, 50),
# ("1KB", (1024), 50),
# ("50KB", (50 * 1024), 50),
("150KB", (150 * 1024), 50),
# ("500KB", (500 * 1024), 50),
("1MB", (1024 * 1024), 50),
# ("10MB", (10 * 1024 * 1024), 50),
# ("50MB", (50 * 1024 * 1024), 50),
("100MB", (100 * 1024 * 1024), 50),
# ("512MB", (512 * 1024 * 1024), 20),
("1GB", (1024 * 1024 * 1024), 10),
# ("10GB", (10 * 1024 * 1024 * 1024), 1) - added based on cli arg
]
def do_benchmark(transport, device, test_func):
# Create actors + collective group
sender = GPUActor.remote()
receiver = GPUActor.remote()
if transport == "nccl" or transport == "gloo":
create_collective_group([sender, receiver], transport)
# Initialize
if transport == "torch":
ray.get([sender.init_torch.remote(0), receiver.init_torch.remote(1)])
else:
ray.get(
receiver.recv.remote(
sender.send.options(tensor_transport=transport).remote(4, device)
)
)
# Bench per size
bench_times = []
print(f"Benchmark times for transport {transport}, test_func {test_func.__name__}")
for size_str, size, num_transfers in SIZES_AND_NUM_TRANSFERS:
bench_time = test_func(num_transfers, transport, size, device, sender, receiver)
bench_times.append(
{
"transport": transport,
"test_func": test_func.__name__,
"num_transfers": num_transfers,
"size_str": size_str,
"bench_time": bench_time,
}
)
extra_pad = (10 - len(size_str)) * " "
if test_func == latency_test or test_func == torch_latency:
print(f"Size {size_str}{extra_pad}: {bench_time}")
else:
print(
f"{num_transfers} Transfers, Size {size_str}{extra_pad}: {bench_time}"
)
# Cool off, GC time
time.sleep(2)
destroy_all_collective_groups()
print()
return bench_times
parser = argparse.ArgumentParser()
parser.add_argument(
"--enable_10gb",
action="store_true",
)
parser.add_argument(
"--enable_nixl",
action="store_true",
)
parser.add_argument(
"--enable_torch_bench",
action="store_true",
)
args = parser.parse_args()
if args.enable_10gb:
SIZES_AND_NUM_TRANSFERS.append(("10GB", (10 * 1024 * 1024 * 1024), 1))
if args.enable_nixl:
TRANSPORTS_AND_DEVICE.append(("nixl", "cuda"))
if args.enable_torch_bench:
TEST_FUNCS.append(torch_latency)
TEST_FUNCS.append(torch_throughput)
TRANSPORTS_AND_DEVICE.append(("torch", "cuda"))
bench_results = []
for test_func in TEST_FUNCS:
for transport, device in TRANSPORTS_AND_DEVICE:
if (
test_func == torch_latency or test_func == torch_throughput
) and transport != "torch":
continue
if transport == "torch" and (
test_func != torch_latency and test_func != torch_throughput
):
continue
bench_results.extend(do_benchmark(transport, device, test_func))
if "TEST_OUTPUT_JSON" in os.environ:
with open(os.environ["TEST_OUTPUT_JSON"], "w") as out_file:
# NOTE that throughput results are also returned as a time because we have to fix the amount of memory
# being moved to avoid GPU memory OOM's.
results = {}
results["perf_metrics"] = [
{
"perf_metric_name": f"{res['transport']}-{res['size_str']}-{res['test_func']}",
"perf_metric_value": res["bench_time"],
"perf_metric_type": "LATENCY",
}
for res in bench_results
]
json.dump(results, out_file)
| {
"repo_id": "ray-project/ray",
"file_path": "release/microbenchmark/experimental/rdt_single_node_microbenchmark.py",
"license": "Apache License 2.0",
"lines": 238,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/utils/metrics/stats/base.py | import threading
import time
from abc import ABCMeta, abstractmethod
from collections import deque
from typing import Any, Dict, List, Optional, Union
from ray.rllib.utils.annotations import (
OverrideToImplementCustomLogic,
OverrideToImplementCustomLogic_CallToSuperRecommended,
)
from ray.util.annotations import DeveloperAPI
@DeveloperAPI
class StatsBase(metaclass=ABCMeta):
"""A base class for Stats.
Stats are meant to be used to log values to and then aggregate them in a tree.
Therefore, we log to stats in two different ways:
- On a leaf component, we log values directly by pushing.
- On a non-leaf component, we only aggregate incoming values.
Additionally, we pay special respect to Stats that live at the root of the tree.
These may have a different behaviour (example: a lifetime sum).
Note the tight coupling between StatsBase and MetricsLogger.
"""
# In order to restore from a checkpoint, we need to know the class of the Stats object.
# This is set in the subclass.
stats_cls_identifier: str = None
def __init__(
self,
is_root: bool = False,
is_leaf: bool = True,
):
"""Initializes a StatsBase object.
Args:
is_root: If True, the Stats object is a root stats object.
is_leaf: If True, the Stats object is a leaf stats object.
Note: A stats object can be both root and leaf at the same time.
Note: A stats object can also be neither root nor leaf ("intermediate" stats that only aggregate from other stats but are not at the root).
"""
self.is_root = is_root
self.is_leaf = is_leaf
# Used to keep track of start times when using the `with` context manager.
# This helps us measure times with threads in parallel.
self._start_times = {}
# For non-leaf stats (root or intermediate), track the latest merged values
# This is overwritten on each merge operation
if not self.is_leaf:
self.latest_merged: Union[List[Any], Any] = None
assert (
self.stats_cls_identifier is not None
), "stats_cls_identifier must be set in the subclass"
@property
def has_throughputs(self) -> bool:
"""Returns True if the Stats object has throughput tracking enabled.
Some Stats classes may have throughput tracking enabled, such as SumStats.
"""
return False
@OverrideToImplementCustomLogic
def initialize_throughput_reference_time(self, time: float) -> None:
"""Sets the reference time for this Stats object.
This is important because the component that tracks the time
between reduce cycles is not Stats, but MetricsLogger.
Args:
time: The time to establish as the reference time for this Stats object.
"""
if self.has_throughputs:
raise ValueError(
"initialize_throughput_reference_time must be overridden for stats objects that have throughputs."
)
@abstractmethod
def __len__(self) -> int:
"""Returns the length of the internal values list."""
...
def __float__(self):
value = self.peek(compile=True)
if isinstance(value, (list, tuple, deque)):
raise ValueError(f"Can not convert {self} to float.")
return float(value)
def __int__(self):
value = self.peek(compile=True)
if isinstance(value, (list, tuple, deque)):
raise ValueError(f"Can not convert {self} to int.")
return int(value)
def __eq__(self, other):
return float(self) == float(other)
def __le__(self, other):
return float(self) <= float(other)
def __ge__(self, other):
return float(self) >= float(other)
def __lt__(self, other):
return float(self) < float(other)
def __gt__(self, other):
return float(self) > float(other)
def __add__(self, other):
return float(self) + float(other)
def __sub__(self, other):
return float(self) - float(other)
def __mul__(self, other):
return float(self) * float(other)
def __format__(self, fmt):
return f"{float(self):{fmt}}"
def __enter__(self) -> "StatsBase":
"""Called when entering a context (with which users can measure a time delta).
Returns:
This stats instance.
"""
thread_id = threading.get_ident()
self._start_times[thread_id] = time.perf_counter()
return self
def __exit__(self, exc_type, exc_value, tb) -> None:
"""Called when exiting a context (with which users can measure a time delta).
This pushes the time delta since __enter__ to this Stats object.
"""
thread_id = threading.get_ident()
assert self._start_times[thread_id] is not None
time_delta_s = time.perf_counter() - self._start_times[thread_id]
self.push(time_delta_s)
del self._start_times[thread_id]
@classmethod
def from_state(cls, state: Dict[str, Any]) -> "StatsBase":
"""Creates a stats object from a state dictionary.
Any implementation of this should call this base classe's
`stats_object.set_state()` to set the state of the stats object.
Args:
state: The state to set after instantiation.
"""
init_args = cls._get_init_args(state=state)
stats = cls(**init_args)
stats.set_state(state)
return stats
@OverrideToImplementCustomLogic_CallToSuperRecommended
def clone(
self,
init_overrides: Optional[Dict[str, Any]] = None,
) -> "StatsBase":
"""Returns a new stats object with the same settings as `self`.
Args:
init_overrides: Optional dict of initialization arguments to override. Can be used to change is_root, is_leaf, etc.
Returns:
A new stats object similar to `self` but missing internal values.
"""
init_args = self.__class__._get_init_args(stats_object=self)
if init_overrides:
init_args.update(init_overrides)
new_stats = self.__class__(**init_args)
return new_stats
@OverrideToImplementCustomLogic_CallToSuperRecommended
def get_state(self) -> Dict[str, Any]:
"""Returns the state of the stats object."""
state = {
"stats_cls_identifier": self.stats_cls_identifier,
"is_root": self.is_root,
"is_leaf": self.is_leaf,
}
if not self.is_leaf:
state["latest_merged"] = self.latest_merged
return state
@OverrideToImplementCustomLogic_CallToSuperRecommended
def set_state(self, state: Dict[str, Any]) -> None:
"""Sets the state of the stats object.
Args:
state: The state to set on this StatsBase object.
"""
# Handle legacy state that uses old attribute names
self.is_root = state["is_root"]
self.is_leaf = state["is_leaf"]
# Prevent setting a state with a different stats class identifier
assert self.stats_cls_identifier == state["stats_cls_identifier"]
if not self.is_leaf:
# Handle legacy state that doesn't have latest_merged
self.latest_merged = state["latest_merged"]
@OverrideToImplementCustomLogic_CallToSuperRecommended
@staticmethod
def _get_init_args(stats_object=None, state=None) -> Dict[str, Any]:
"""Returns the initialization arguments for this Stats object."""
if state is not None:
# Handle legacy state that uses old attribute names
is_root = state["is_root"]
is_leaf = state["is_leaf"]
return {
"is_root": is_root,
"is_leaf": is_leaf,
}
elif stats_object is not None:
return {
"is_root": stats_object.is_root,
"is_leaf": stats_object.is_leaf,
}
else:
raise ValueError("Either stats_object or state must be provided")
@abstractmethod
def __repr__(self) -> str:
...
@abstractmethod
def merge(self, incoming_stats: List["StatsBase"]) -> None:
"""Merges StatsBase objects.
Args:
incoming_stats: The list of StatsBase objects to merge.
"""
@abstractmethod
def push(self, value: Any) -> None:
"""Pushes a value into this Stats object.
Args:
value: The value to push. Can be of any type.
GPU tensors are moved to CPU memory.
"""
assert (
self.is_leaf
), "Cannot push values to non-leaf Stats. Non-leaf Stats can only receive values via merge()."
@abstractmethod
def peek(
self, compile: bool = True, latest_merged_only: bool = False
) -> Union[Any, List[Any]]:
"""Returns the result of reducing the internal values list.
Note that this method does NOT alter the internal values list in this process.
Thus, users can call this method to get an accurate look at the reduced value(s)
given the current internal values list.
Args:
compile: If True, the result is compiled into a single value if possible.
latest_merged_only: If True, only considers the latest merged values.
This parameter only works on aggregation stats objects (is_leaf=False).
When enabled, peek() will only use the values from the most recent merge operation.
Returns:
The result of reducing the internal values list on CPU memory.
"""
@abstractmethod
def reduce(self, compile: bool = True) -> Union[Any, "StatsBase"]:
"""Reduces the internal values.
This method should NOT be called directly by users.
It can be used as a hook to prepare the stats object for sending it to the root metrics logger and starting a new 'reduce cycle'.
The reduction logic depends on the implementation of the subclass.
Meaning that some classes may reduce to a single value, while others do not or don't even contain values.
Args:
compile: If True, the result is compiled into a single value if possible.
If False, the result is a Stats object similar to itself, but with the internal values reduced.
Returns:
The reduced value or a Stats object similar to itself, but with the internal values reduced.
"""
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/utils/metrics/stats/base.py",
"license": "Apache License 2.0",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/utils/metrics/stats/ema.py | import logging
import warnings
from typing import Any, Dict, List, Union
import numpy as np
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.metrics.stats.base import StatsBase
from ray.rllib.utils.metrics.stats.utils import safe_isnan, single_value_to_cpu
from ray.util import log_once
from ray.util.annotations import DeveloperAPI
logger = logging.getLogger(__name__)
torch, _ = try_import_torch()
_, tf, _ = try_import_tf()
@DeveloperAPI
class EmaStats(StatsBase):
"""A Stats object that tracks the exponential average of a series of singular values (not vectors)."""
stats_cls_identifier = "ema"
def __init__(
self,
ema_coeff: float = 0.01,
*args,
**kwargs,
):
"""Initializes a EmaStats instance.
We calculate the EMA in parallel components.
Also, we potentially aggregate them multiple times per reduction cycle.
We therefore aggregate by taking the mean of all collected EMAs.
We do this for simplicity and accept this limitation because EMAs
inherently only approximate.
Example to illustrate this limitation:
Using an ema coefficient of 0.01:
First incoming ema: [1, 2, 3, 4, 5] -> 1.1
Second incoming ema: [15] -> 15
Mean of both merged ema values: [1.1, 15] -> 8.05
True mean of all values: [1, 2, 3, 4, 5, 15] -> 5
Args:
ema_coeff: The EMA coefficient to use. Defaults to 0.01.
"""
super().__init__(*args, **kwargs)
self._value = np.nan
if not self.is_leaf:
self._values_to_merge = []
self._ema_coeff = ema_coeff
def _quiet_nanmean(self, values: List[Any]) -> float:
"""Compute the nanmean while ignoring warnings if all values are NaN.
Args:
values: The list of values to compute the nanmean of.
Returns:
The nanmean of the values.
"""
if torch and isinstance(values[0], torch.Tensor):
stacked = torch.stack(list(values))
return torch.nanmean(stacked)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Mean of empty slice", RuntimeWarning)
return np.nanmean(values)
def __len__(self) -> int:
"""Returns the length of the internal values list."""
return 1
def merge(self, incoming_stats: List["EmaStats"]) -> None:
"""Merges EmaStats objects.
Args:
incoming_stats: The list of EmaStats objects to merge.
Returns:
None. The merge operation modifies self in place.
"""
assert (
not self.is_leaf
), "EmaStats should only be merged at aggregation stages (root or intermediate)"
all_values = [stat._value for stat in incoming_stats]
if len(all_values) == 0:
return
self._values_to_merge.extend(all_values)
# Track merged values for latest_merged_only peek functionality
if not self.is_leaf:
# Store the values that were merged in this operation
self.latest_merged = all_values
def push(self, value: Any) -> None:
"""Pushes a value into this Stats object.
Args:
value: The value to be pushed. Can be of any type.
PyTorch GPU tensors are kept on GPU until reduce() or peek().
TensorFlow tensors are moved to CPU immediately.
"""
# Convert TensorFlow tensors to CPU immediately
if tf and tf.is_tensor(value):
value = value.numpy()
# If incoming value is NaN, do nothing
if safe_isnan(value):
return
if torch and isinstance(value, torch.Tensor):
# Detach the value from the graph to avoid unnecessary computation
value = value.detach()
# If internal value is NaN, replace it with the incoming value
if safe_isnan(self._value):
self._value = value
else:
# Otherwise, update the internal value using the EMA formula
self._value = (
self._ema_coeff * value + (1.0 - self._ema_coeff) * self._value
)
def _reduce_values_to_merge(self) -> float:
"""Reduces the internal values to merge."""
if not np.isnan(self._value) and log_once("ema_stats_merge_push"):
logger.warning(
f"Merging values in {self} but self._value is not NaN. This leads to an inaccurate metric. Not erroring out to avoid breaking older checkpoints."
)
if len(self._values_to_merge) == 0:
return np.nan
return self._quiet_nanmean(self._values_to_merge)
def peek(
self, compile: bool = True, latest_merged_only: bool = False
) -> Union[Any, List[Any]]:
"""Returns the current EMA value.
If value is a GPU tensor, it's converted to CPU.
Args:
compile: If True, the result is compiled into a single value if possible.
latest_merged_only: If True, only considers the latest merged values.
This parameter only works on aggregation stats (root or intermediate nodes).
When enabled, peek() will only use the values from the most recent merge operation.
"""
# Check latest_merged_only validity
if latest_merged_only and self.is_leaf:
raise ValueError(
"latest_merged_only can only be used on aggregation stats objects (is_leaf=False)."
)
# If latest_merged_only is True, use only the latest merged values
if latest_merged_only:
if self.latest_merged is None:
# No merged values yet, return NaN
if compile:
return np.nan
else:
return [np.nan]
# Use only the latest merged values
latest_merged = self.latest_merged
if len(latest_merged) == 0:
value = np.nan
else:
# Reduce latest merged values
value = self._quiet_nanmean(latest_merged)
else:
# Normal peek behavior
if hasattr(self, "_values_to_merge"):
# If _values_to_merge is empty, use _value instead
# This can happen after reduce(compile=False) returns a new stats object
if len(self._values_to_merge) == 0:
value = self._value
else:
value = self._reduce_values_to_merge()
else:
value = self._value
value = single_value_to_cpu(value)
return value if compile else [value]
def reduce(self, compile: bool = True) -> Union[Any, "EmaStats"]:
"""Reduces the internal value.
If value is a GPU tensor, it's converted to CPU.
Args:
compile: If True, the result is compiled into a single value if possible.
Returns:
The reduced value.
"""
if hasattr(self, "_values_to_merge"):
# If _values_to_merge is empty, use _value instead
# This can happen when a non-leaf stats object logs values directly
if len(self._values_to_merge) == 0:
value = self._value
else:
value = self._reduce_values_to_merge()
self._values_to_merge = []
else:
value = self._value
# Convert GPU tensor to CPU
if torch and isinstance(value, torch.Tensor):
value = single_value_to_cpu(value)
self._value = np.nan
if compile:
return value
return_stats = self.clone()
return_stats._value = value
return return_stats
def __repr__(self) -> str:
values_to_merge_len = (
len(self._values_to_merge) if hasattr(self, "_values_to_merge") else 0
)
return (
f"EmaStats({self.peek()}; number_of_values_to_merge=({values_to_merge_len}); "
f"ema_coeff={self._ema_coeff}, value={self._value})"
)
def get_state(self) -> Dict[str, Any]:
state = super().get_state()
state["ema_coeff"] = self._ema_coeff
state["value"] = self._value
if not self.is_leaf:
state["values_to_merge"] = self._values_to_merge
return state
def set_state(self, state: Dict[str, Any]) -> None:
super().set_state(state)
self._ema_coeff = state["ema_coeff"]
self._value = state["value"]
# Handle legacy state that doesn't have values_to_merge
if not self.is_leaf:
self._values_to_merge = state.get("values_to_merge", [])
@staticmethod
def _get_init_args(stats_object=None, state=None) -> Dict[str, Any]:
"""Returns the initialization arguments for this Stats object."""
super_args = StatsBase._get_init_args(stats_object=stats_object, state=state)
if state is not None:
return {
**super_args,
"ema_coeff": state["ema_coeff"],
}
if stats_object is not None:
return {
**super_args,
"ema_coeff": stats_object._ema_coeff,
}
else:
raise ValueError("Either stats_object or state must be provided")
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/utils/metrics/stats/ema.py",
"license": "Apache License 2.0",
"lines": 218,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/utils/metrics/stats/item.py | from typing import Any, Dict, List, Union
from ray.rllib.utils.metrics.stats.base import StatsBase
from ray.rllib.utils.metrics.stats.utils import single_value_to_cpu
from ray.util.annotations import DeveloperAPI
@DeveloperAPI
class ItemStats(StatsBase):
"""A Stats object that tracks a single item.
Note the following limitation: when calling `ItemStats.merge()`, we replace the current item.
This is because there can only be a single item tracked by definition.
This class will check if the logged item is a GPU tensor.
If it is, it will be converted to CPU memory.
Use this if you want to track a single item that should not be reduced.
An example would be to log the total loss.
"""
stats_cls_identifier = "item"
def __init__(self, *args, **kwargs):
"""Initializes a ItemStats instance."""
super().__init__(*args, **kwargs)
self._item = None
def get_state(self) -> Dict[str, Any]:
state = super().get_state()
state["item"] = self._item
return state
def set_state(self, state: Dict[str, Any]) -> None:
super().set_state(state)
self._item = state["item"]
def __len__(self) -> int:
return 1
def reduce(self, compile: bool = True) -> Union[Any, "ItemStats"]:
item = self._item
self._item = None
item = single_value_to_cpu(item)
if compile:
return item
return_stats = self.clone()
return_stats._item = item
return return_stats
def push(self, item: Any) -> None:
"""Pushes a value into this Stats object.
Args:
item: The value to push. Can be of any type.
GPU tensors are moved to CPU memory.
Returns:
None
"""
# Put directly onto CPU memory. peek(), reduce() and merge() don't handle GPU tensors.
self._item = single_value_to_cpu(item)
def merge(self, incoming_stats: List["ItemStats"]) -> None:
"""Merges ItemStats objects.
Args:
incoming_stats: The list of ItemStats objects to merge.
Returns:
None. The merge operation modifies self in place.
"""
assert (
len(incoming_stats) == 1
), "ItemStats should only be merged with one other ItemStats object which replaces the current item"
self._item = incoming_stats[0]._item
def peek(
self, compile: bool = True, latest_merged_only: bool = False
) -> Union[Any, List[Any]]:
"""Returns the internal item.
This does not alter the internal item.
Args:
compile: If True, return the internal item directly.
If False, return the internal item as a single-element list.
latest_merged_only: This parameter is ignored for ItemStats.
ItemStats tracks a single item, not a series of merged values.
The current item is always returned regardless of this parameter.
Returns:
The internal item.
"""
# ItemStats doesn't support latest_merged_only since it tracks a single item
# Just return the current item regardless
item = single_value_to_cpu(self._item)
if compile:
return item
return [item]
def __repr__(self) -> str:
return f"ItemStats({self.peek()})"
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/utils/metrics/stats/item.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.