sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
ray-project/ray:python/ray/data/tests/test_projection_fusion.py | from dataclasses import dataclass
from typing import Dict, List, Set
import pandas as pd
import pyarrow as pa
import pyarrow.compute as pc
import pytest
import ray
from ray.data._internal.logical.interfaces import LogicalPlan
from ray.data._internal.logical.operators.input_data_operator import InputData
from ray.data._internal.logical.operators.map_operator import Project
from ray.data._internal.logical.optimizers import LogicalOptimizer
from ray.data._internal.logical.rules import (
ProjectionPushdown,
)
from ray.data._internal.util import rows_same
from ray.data.context import DataContext
from ray.data.expressions import DataType, StarExpr, col, star, udf
@dataclass
class FusionTestCase:
"""Test case for projection fusion scenarios."""
name: str
expressions_list: List[Dict[str, str]] # List of {name: expression_desc}
expected_levels: int
expected_level_contents: List[Set[str]] # Expected expressions in each level
description: str
@dataclass
class DependencyTestCase:
"""Test case for dependency analysis."""
name: str
expression_desc: str
expected_refs: Set[str]
description: str
class TestProjectionFusion:
"""Test topological sorting in projection pushdown fusion."""
@pytest.fixture(autouse=True)
def setup(self):
"""Set up test fixtures."""
self.context = DataContext.get_current()
# Create UDFs for testing
@udf(return_dtype=DataType.int64())
def multiply_by_two(x: pa.Array) -> pa.Array:
return pc.multiply(x, 2)
@udf(return_dtype=DataType.int64())
def add_one(x: pa.Array) -> pa.Array:
return pc.add(x, 1)
@udf(return_dtype=DataType.float64())
def divide_by_three(x: pa.Array) -> pa.Array:
# Convert to float to ensure floating point division
return pc.divide(pc.cast(x, pa.float64()), 3.0)
self.udfs = {
"multiply_by_two": multiply_by_two,
"add_one": add_one,
"divide_by_three": divide_by_three,
}
def _create_input_op(self):
"""Create a dummy input operator."""
return InputData(input_data=[])
def _parse_expression(self, expr_desc: str):
"""Parse expression description into actual expression object."""
# Enhanced parser for test expressions
expr_map = {
"col('id')": col("id"),
"col('id') + 10": col("id") + 10,
"col('id') * 2": col("id") * 2,
"col('id') - 5": col("id") - 5,
"col('id') + 1": col("id") + 1,
"col('id') - 1": col("id") - 1,
"col('id') - 3": col("id") - 3,
"col('step1') * 2": col("step1") * 2,
"col('step2') + 1": col("step2") + 1,
"col('a') + col('b')": col("a") + col("b"),
"col('c') + col('d')": col("c") + col("d"),
"col('e') * 3": col("e") * 3,
"col('a') + 1": col("a") + 1,
"multiply_by_two(col('id'))": self.udfs["multiply_by_two"](col("id")),
"multiply_by_two(col('id')) + col('plus_ten')": (
self.udfs["multiply_by_two"](col("id")) + col("plus_ten")
),
"col('times_three') > col('plus_ten')": (
col("times_three") > col("plus_ten")
),
"multiply_by_two(col('x'))": self.udfs["multiply_by_two"](col("x")),
"add_one(col('id'))": self.udfs["add_one"](col("id")),
"multiply_by_two(col('plus_one'))": self.udfs["multiply_by_two"](
col("plus_one")
),
"divide_by_three(col('times_two'))": self.udfs["divide_by_three"](
col("times_two")
),
}
if expr_desc in expr_map:
return expr_map[expr_desc]
else:
raise ValueError(f"Unknown expression: {expr_desc}")
def _create_project_chain(self, input_op, expressions_list: List[Dict[str, str]]):
"""Create a chain of Project operators from expression descriptions."""
current_op = input_op
for expr_dict in expressions_list:
# Convert dictionary to list of named expressions
exprs = []
for name, desc in expr_dict.items():
expr = self._parse_expression(desc)
named_expr = expr.alias(name)
exprs.append(named_expr)
current_op = Project(current_op, exprs=[star()] + exprs, ray_remote_args={})
return current_op
def _extract_levels_from_plan(self, plan: LogicalPlan) -> List[Set[str]]:
"""Extract expression levels from optimized plan."""
current = plan.dag
levels = []
while isinstance(current, Project):
# Extract names, ignoring StarExpr (not a named column)
levels.append(
{expr.name for expr in current.exprs if not isinstance(expr, StarExpr)}
)
current = current.input_dependency
return list(reversed(levels)) # Return bottom-up order
def _count_project_operators(self, plan: LogicalPlan) -> int:
"""Count the number of Project operators in the plan."""
current = plan.dag
count = 0
while current:
if isinstance(current, Project):
count += 1
current = getattr(current, "input_dependency", None)
return count
def _describe_plan_structure(self, plan: LogicalPlan) -> str:
"""Generate a description of the plan structure."""
current = plan.dag
operators = []
while current:
if isinstance(current, Project):
expr_count = len(current.exprs) if current.exprs else 0
operators.append(f"Project({expr_count} exprs)")
else:
operators.append(current.__class__.__name__)
current = getattr(current, "input_dependency", None)
return " -> ".join(operators)
@pytest.mark.parametrize(
"test_case",
[
FusionTestCase(
name="no_dependencies",
expressions_list=[
{"doubled": "col('id') * 2", "plus_five": "col('id') + 10"},
{"minus_three": "col('id') - 3"},
],
expected_levels=1,
expected_level_contents=[{"doubled", "plus_five", "minus_three"}],
description="Independent expressions should fuse into single operator",
),
FusionTestCase(
name="simple_chain",
expressions_list=[
{"step1": "col('id') + 10"},
{"step2": "col('step1') * 2"},
{"step3": "col('step2') + 1"},
],
expected_levels=1,
expected_level_contents=[
{"step1", "step2", "step3"}
], # All in one level
description="All expressions fuse into single operator with OrderedDict preservation",
),
FusionTestCase(
name="mixed_udf_regular",
expressions_list=[
{"plus_ten": "col('id') + 10"},
{"times_three": "multiply_by_two(col('id'))"},
{"minus_five": "col('id') - 5"},
{
"udf_plus_regular": "multiply_by_two(col('id')) + col('plus_ten')"
},
{"comparison": "col('times_three') > col('plus_ten')"},
],
expected_levels=1,
expected_level_contents=[
{
"plus_ten",
"times_three",
"minus_five",
"udf_plus_regular",
"comparison",
}
],
description="All expressions fuse into single operator",
),
FusionTestCase(
name="complex_graph",
expressions_list=[
{"a": "col('id') + 1", "b": "col('id') * 2"},
{"c": "col('a') + col('b')"},
{"d": "col('id') - 1"},
{"e": "col('c') + col('d')"},
{"f": "col('e') * 3"},
],
expected_levels=1,
expected_level_contents=[{"a", "b", "c", "d", "e", "f"}],
description="All expressions fuse into single operator",
),
FusionTestCase(
name="udf_dependency_chain",
expressions_list=[
{"plus_one": "add_one(col('id'))"},
{"times_two": "multiply_by_two(col('plus_one'))"},
{"div_three": "divide_by_three(col('times_two'))"},
],
expected_levels=1, # Changed from 3 to 1
expected_level_contents=[{"plus_one", "times_two", "div_three"}],
description="All UDF expressions fuse into single operator with preserved order",
),
],
)
def test_fusion_scenarios(self, test_case: FusionTestCase):
"""Test various fusion scenarios with simplified single-operator fusion."""
input_op = self._create_input_op()
final_op = self._create_project_chain(input_op, test_case.expressions_list)
# Apply projection pushdown
plan = LogicalPlan(final_op, self.context)
rule = ProjectionPushdown()
optimized_plan = rule.apply(plan)
# Extract levels from optimized plan
actual_levels = self._extract_levels_from_plan(optimized_plan)
# Verify number of levels
assert len(actual_levels) == test_case.expected_levels, (
f"{test_case.name}: Expected {test_case.expected_levels} operators, "
f"got {len(actual_levels)}. Actual operators: {actual_levels}"
)
# Verify level contents (more flexible matching)
for i, expected_content in enumerate(test_case.expected_level_contents):
assert expected_content.issubset(actual_levels[i]), (
f"{test_case.name}: Operator {i} missing expressions. "
f"Expected {expected_content} to be subset of {actual_levels[i]}"
)
def test_pairwise_fusion_behavior(self, ray_start_regular_shared):
"""Test to understand how pairwise fusion works in practice."""
input_data = [{"id": i} for i in range(10)]
# Test with 2 operations (should fuse to 1)
ds2 = ray.data.from_items(input_data)
ds2 = ds2.with_column("col1", col("id") + 1)
ds2 = ds2.with_column("col2", col("id") * 2)
count2 = self._count_project_operators(ds2._logical_plan)
print(f"2 operations -> {count2} operators")
# Test with 3 operations
ds3 = ray.data.from_items(input_data)
ds3 = ds3.with_column("col1", col("id") + 1)
ds3 = ds3.with_column("col2", col("id") * 2)
ds3 = ds3.with_column("col3", col("id") - 1)
count3 = self._count_project_operators(ds3._logical_plan)
print(f"3 operations -> {count3} operators")
# Test with 4 operations
ds4 = ray.data.from_items(input_data)
ds4 = ds4.with_column("col1", col("id") + 1)
ds4 = ds4.with_column("col2", col("id") * 2)
ds4 = ds4.with_column("col3", col("id") - 1)
ds4 = ds4.with_column("col4", col("id") + 5)
count4 = self._count_project_operators(ds4._logical_plan)
print(f"4 operations -> {count4} operators")
# Verify that fusion is happening (fewer operators than original)
assert count2 <= 2, f"2 operations should result in ≤2 operators, got {count2}"
assert count3 <= 3, f"3 operations should result in ≤3 operators, got {count3}"
assert count4 <= 4, f"4 operations should result in ≤4 operators, got {count4}"
# Verify correctness
result2 = ds2.take(1)[0]
result3 = ds3.take(1)[0]
result4 = ds4.take(1)[0]
assert result2 == {"id": 0, "col1": 1, "col2": 0}
assert result3 == {"id": 0, "col1": 1, "col2": 0, "col3": -1}
assert result4 == {"id": 0, "col1": 1, "col2": 0, "col3": -1, "col4": 5}
def test_optimal_fusion_with_single_chain(self, ray_start_regular_shared):
"""Test fusion when all operations are added in a single chain (ideal case)."""
input_data = [{"id": i} for i in range(10)]
# Create a single Project operator with multiple expressions
# This simulates what would happen with perfect fusion
ds = ray.data.from_items(input_data)
# Apply multiple operations that should all be independent
expressions = {
"col1": col("id") + 1,
"col2": col("id") * 2,
"col3": col("id") - 1,
"col4": col("id") + 5,
"col5": col("id") * 3,
}
# Use map_batches to create a single operation that does everything
def apply_all_expressions(batch):
import pyarrow.compute as pc
result = batch.to_pydict()
result["col1"] = pc.add(batch["id"], 1)
result["col2"] = pc.multiply(batch["id"], 2)
result["col3"] = pc.subtract(batch["id"], 1)
result["col4"] = pc.add(batch["id"], 5)
result["col5"] = pc.multiply(batch["id"], 3)
return pa.table(result)
ds_optimal = ds.map_batches(apply_all_expressions, batch_format="pyarrow")
# Compare with the with_column approach
ds_with_column = ds
for col_name, expr in expressions.items():
ds_with_column = ds_with_column.with_column(col_name, expr)
# Convert both to pandas for reliable comparison
result_optimal_df = ds_optimal.to_pandas()
result_with_column_df = ds_with_column.to_pandas()
# Sort columns before comparison
result_optimal_df = result_optimal_df[sorted(result_optimal_df.columns)]
result_with_column_df = result_with_column_df[
sorted(result_with_column_df.columns)
]
# Compare using rows_same (deterministic, ignores order)
assert rows_same(result_optimal_df, result_with_column_df)
def test_basic_fusion_works(self, ray_start_regular_shared):
"""Test that basic fusion of two independent operations works."""
input_data = [{"id": i} for i in range(5)]
# Create dataset with two independent operations
ds = ray.data.from_items(input_data)
ds = ds.with_column("doubled", col("id") * 2)
ds = ds.with_column("plus_one", col("id") + 1)
# Check before optimization
original_count = self._count_project_operators(ds._logical_plan)
print(f"Before optimization: {original_count} operators")
# Apply optimization
rule = ProjectionPushdown()
optimized_plan = rule.apply(ds._logical_plan)
# Check after optimization
optimized_count = self._count_project_operators(optimized_plan)
print(f"After optimization: {optimized_count} operators")
# Two independent operations should fuse into one
assert (
optimized_count == 1
), f"Two independent operations should fuse to 1 operator, got {optimized_count}"
# Verify correctness using rows_same
from ray.data.dataset import Dataset
optimized_ds = Dataset(ds._plan, optimized_plan)
result_df = optimized_ds.to_pandas()
expected_df = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4],
"doubled": [0, 2, 4, 6, 8],
"plus_one": [1, 2, 3, 4, 5],
}
)
# Sort columns before comparison
result_df = result_df[sorted(result_df.columns)]
expected_df = expected_df[sorted(expected_df.columns)]
assert rows_same(result_df, expected_df)
def test_dependency_prevents_fusion(self, ray_start_regular_shared):
"""Test that dependencies are handled in single operator with OrderedDict."""
input_data = [{"id": i} for i in range(5)]
# Create dataset with dependency chain
ds = ray.data.from_items(input_data)
ds = ds.with_column("doubled", col("id") * 2)
ds = ds.with_column(
"doubled_plus_one", col("doubled") + 1
) # Depends on doubled
# Check before optimization
original_count = self._count_project_operators(ds._logical_plan)
print(f"Before optimization: {original_count} operators")
# Apply optimization
rule = ProjectionPushdown()
optimized_plan = rule.apply(ds._logical_plan)
# Check after optimization
optimized_count = self._count_project_operators(optimized_plan)
print(f"After optimization: {optimized_count} operators")
# Should have 1 operator now (changed from 2)
assert (
optimized_count == 1
), f"All operations should fuse into 1 operator, got {optimized_count}"
# Verify correctness using rows_same
from ray.data.dataset import Dataset
optimized_ds = Dataset(ds._plan, optimized_plan)
result_df = optimized_ds.to_pandas()
expected_df = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4],
"doubled": [0, 2, 4, 6, 8],
"doubled_plus_one": [1, 3, 5, 7, 9],
}
)
# Sort columns before comparison
result_df = result_df[sorted(result_df.columns)]
expected_df = expected_df[sorted(expected_df.columns)]
assert rows_same(result_df, expected_df)
def test_mixed_udf_regular_end_to_end(self, ray_start_regular_shared):
"""Test the exact failing scenario from the original issue."""
input_data = [{"id": i} for i in range(5)]
# Create dataset with mixed UDF and regular expressions (the failing test case)
ds = ray.data.from_items(input_data)
ds = ds.with_column("plus_ten", col("id") + 10)
ds = ds.with_column(
"times_three", self.udfs["multiply_by_two"](col("id"))
) # Actually multiply by 2
ds = ds.with_column("minus_five", col("id") - 5)
ds = ds.with_column(
"udf_plus_regular",
self.udfs["multiply_by_two"](col("id")) + col("plus_ten"),
)
ds = ds.with_column("comparison", col("times_three") > col("plus_ten"))
# Apply optimization
rule = ProjectionPushdown()
optimized_plan = rule.apply(ds._logical_plan)
# Verify execution correctness
from ray.data.dataset import Dataset
optimized_ds = Dataset(ds._plan, optimized_plan)
result_df = optimized_ds.to_pandas()
expected_df = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4],
"plus_ten": [10, 11, 12, 13, 14], # id + 10
"times_three": [0, 2, 4, 6, 8], # id * 2 (multiply_by_two UDF)
"minus_five": [-5, -4, -3, -2, -1], # id - 5
"udf_plus_regular": [10, 13, 16, 19, 22], # (id * 2) + (id + 10)
"comparison": [
False,
False,
False,
False,
False,
], # times_three > plus_ten
}
)
# Sort columns before comparison
result_df = result_df[sorted(result_df.columns)]
expected_df = expected_df[sorted(expected_df.columns)]
assert rows_same(result_df, expected_df)
# Verify that we have 1 operator (changed from multiple)
optimized_count = self._count_project_operators(optimized_plan)
assert (
optimized_count == 1
), f"Expected 1 operator with all expressions fused, got {optimized_count}"
def test_optimal_fusion_comparison(self, ray_start_regular_shared):
"""Compare optimized with_column approach against manual map_batches."""
input_data = [{"id": i} for i in range(10)]
# Create dataset using with_column (will be optimized)
ds_with_column = ray.data.from_items(input_data)
ds_with_column = ds_with_column.with_column("col1", col("id") + 1)
ds_with_column = ds_with_column.with_column("col2", col("id") * 2)
ds_with_column = ds_with_column.with_column("col3", col("id") - 1)
ds_with_column = ds_with_column.with_column("col4", col("id") + 5)
ds_with_column = ds_with_column.with_column("col5", col("id") * 3)
# Apply optimization
rule = ProjectionPushdown()
optimized_plan = rule.apply(ds_with_column._logical_plan)
from ray.data.dataset import Dataset
optimized_ds = Dataset(ds_with_column._plan, optimized_plan)
# Create dataset using single map_batches (optimal case)
ds_optimal = ray.data.from_items(input_data)
def apply_all_expressions(batch):
import pyarrow.compute as pc
result = batch.to_pydict()
result["col1"] = pc.add(batch["id"], 1)
result["col2"] = pc.multiply(batch["id"], 2)
result["col3"] = pc.subtract(batch["id"], 1)
result["col4"] = pc.add(batch["id"], 5)
result["col5"] = pc.multiply(batch["id"], 3)
return pa.table(result)
ds_optimal = ds_optimal.map_batches(
apply_all_expressions, batch_format="pyarrow"
)
# Compare results using rows_same
result_optimized = optimized_ds.to_pandas()
result_optimal = ds_optimal.to_pandas()
# Sort columns before comparison
result_optimized = result_optimized[sorted(result_optimized.columns)]
result_optimal = result_optimal[sorted(result_optimal.columns)]
assert rows_same(result_optimized, result_optimal)
def test_chained_udf_dependencies(self, ray_start_regular_shared):
"""Test multiple non-vectorized UDFs in a dependency chain."""
input_data = [{"id": i} for i in range(5)]
# Create dataset with chained UDF dependencies
ds = ray.data.from_items(input_data)
ds = ds.with_column("plus_one", self.udfs["add_one"](col("id")))
ds = ds.with_column("times_two", self.udfs["multiply_by_two"](col("plus_one")))
ds = ds.with_column("div_three", self.udfs["divide_by_three"](col("times_two")))
# Apply optimization
rule = ProjectionPushdown()
optimized_plan = rule.apply(ds._logical_plan)
# Verify 1 operator (changed from 3)
assert self._count_project_operators(optimized_plan) == 1
assert (
self._describe_plan_structure(optimized_plan)
== "Project(4 exprs) -> FromItems" # Changed from multiple operators
)
# Verify execution correctness
from ray.data.dataset import Dataset
optimized_ds = Dataset(ds._plan, optimized_plan)
result_df = optimized_ds.to_pandas()
expected_df = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4],
"plus_one": [1, 2, 3, 4, 5],
"times_two": [2, 4, 6, 8, 10],
"div_three": [2 / 3, 4 / 3, 2.0, 8 / 3, 10 / 3],
}
)
# Sort columns before comparison
result_df = result_df[sorted(result_df.columns)]
expected_df = expected_df[sorted(expected_df.columns)]
assert rows_same(result_df, expected_df)
def test_performance_impact_of_udf_chains(self, ray_start_regular_shared):
"""Test performance characteristics of UDF dependency chains vs independent UDFs."""
input_data = [{"id": i} for i in range(100)]
# Case 1: Independent UDFs (should fuse)
ds_independent = ray.data.from_items(input_data)
ds_independent = ds_independent.with_column(
"udf1", self.udfs["add_one"](col("id"))
)
ds_independent = ds_independent.with_column(
"udf2", self.udfs["multiply_by_two"](col("id"))
)
ds_independent = ds_independent.with_column(
"udf3", self.udfs["divide_by_three"](col("id"))
)
# Case 2: Chained UDFs (should also fuse now)
ds_chained = ray.data.from_items(input_data)
ds_chained = ds_chained.with_column("step1", self.udfs["add_one"](col("id")))
ds_chained = ds_chained.with_column(
"step2", self.udfs["multiply_by_two"](col("step1"))
)
ds_chained = ds_chained.with_column(
"step3", self.udfs["divide_by_three"](col("step2"))
)
# Apply optimization
rule = ProjectionPushdown()
optimized_independent = rule.apply(ds_independent._logical_plan)
optimized_chained = rule.apply(ds_chained._logical_plan)
# Verify fusion behavior (both should be 1 now)
assert self._count_project_operators(optimized_independent) == 1
assert (
self._count_project_operators(optimized_chained) == 1
) # Changed from 3 to 1
assert (
self._describe_plan_structure(optimized_independent)
== "Project(4 exprs) -> FromItems"
)
assert (
self._describe_plan_structure(optimized_chained)
== "Project(4 exprs) -> FromItems" # Changed from multiple operators
)
@pytest.mark.parametrize(
"operations,expected",
[
# Single operations
([("rename", {"a": "A"})], {"A": 1, "b": 2, "c": 3}),
([("select", ["a", "b"])], {"a": 1, "b": 2}),
([("with_column", "d", 4)], {"a": 1, "b": 2, "c": 3, "d": 4}),
# Two operations - rename then select
([("rename", {"a": "A"}), ("select", ["A"])], {"A": 1}),
([("rename", {"a": "A"}), ("select", ["b"])], {"b": 2}),
(
[("rename", {"a": "A", "b": "B"}), ("select", ["A", "B"])],
{"A": 1, "B": 2},
),
# Two operations - select then rename
([("select", ["a", "b"]), ("rename", {"a": "A"})], {"A": 1, "b": 2}),
([("select", ["a"]), ("rename", {"a": "x"})], {"x": 1}),
# Two operations - with_column combinations
([("with_column", "d", 4), ("select", ["a", "d"])], {"a": 1, "d": 4}),
([("select", ["a"]), ("with_column", "d", 4)], {"a": 1, "d": 4}),
(
[("rename", {"a": "A"}), ("with_column", "d", 4)],
{"A": 1, "b": 2, "c": 3, "d": 4},
),
(
[("with_column", "d", 4), ("rename", {"d": "D"})],
{"a": 1, "b": 2, "c": 3, "D": 4},
),
# Three operations
(
[
("rename", {"a": "A"}),
("select", ["A", "b"]),
("with_column", "d", 4),
],
{"A": 1, "b": 2, "d": 4},
),
(
[
("with_column", "d", 4),
("rename", {"a": "A"}),
("select", ["A", "d"]),
],
{"A": 1, "d": 4},
),
(
[
("select", ["a", "b"]),
("rename", {"a": "x"}),
("with_column", "d", 4),
],
{"x": 1, "b": 2, "d": 4},
),
# Column swap (no actual changes)
([("rename", {"a": "b", "b": "a"}), ("select", ["a"])], {"a": 2}),
([("rename", {"a": "b", "b": "a"}), ("select", ["b"])], {"b": 1}),
# Multiple same operations
(
[("rename", {"a": "x"}), ("rename", {"x": "y"})],
{"y": 1, "b": 2, "c": 3},
),
([("select", ["a", "b"]), ("select", ["a"])], {"a": 1}),
(
[("with_column", "d", 4), ("with_column", "e", 5)],
{"a": 1, "b": 2, "c": 3, "d": 4, "e": 5},
),
# Complex expressions with with_column
(
[("rename", {"a": "x"}), ("with_column_expr", "sum", "x", 10)],
{"x": 1, "b": 2, "c": 3, "sum": 10},
),
(
[
("with_column", "d", 4),
("with_column", "e", 5),
("select", ["d", "e"]),
],
{"d": 4, "e": 5},
),
],
)
def test_projection_operations_comprehensive(
self, ray_start_regular_shared, operations, expected
):
"""Comprehensive test for projection operations combinations."""
from ray.data.expressions import col, lit
# Create initial dataset
ds = ray.data.range(1).map(lambda row: {"a": 1, "b": 2, "c": 3})
# Apply operations
for op in operations:
if op[0] == "rename":
ds = ds.rename_columns(op[1])
elif op[0] == "select":
ds = ds.select_columns(op[1])
elif op[0] == "with_column":
ds = ds.with_column(op[1], lit(op[2]))
elif op[0] == "with_column_expr":
# Special case for expressions referencing columns
ds = ds.with_column(op[1], col(op[2]) * op[3])
# Verify result using rows_same
result_df = ds.to_pandas()
expected_df = pd.DataFrame([expected])
# Ensure columns are in the same order for comparison
result_df = result_df[sorted(result_df.columns)]
expected_df = expected_df[sorted(expected_df.columns)]
assert rows_same(result_df, expected_df)
@pytest.mark.parametrize(
"operations,expected",
[
# Basic count operations
([("count",)], 3), # All 3 rows
([("rename", {"a": "A"}), ("count",)], 3),
([("select", ["a", "b"]), ("count",)], 3),
([("with_column", "d", 4), ("count",)], 3),
# Filter operations affecting count
([("filter", col("a") > 1), ("count",)], 2), # 2 rows have a > 1
([("filter", col("b") == 2), ("count",)], 3), # All rows have b == 2
([("filter", col("c") < 10), ("count",)], 3), # All rows have c < 10
([("filter", col("a") == 1), ("count",)], 1), # 1 row has a == 1
# Projection then filter then count
([("rename", {"a": "A"}), ("filter", col("A") > 1), ("count",)], 2),
([("select", ["a", "b"]), ("filter", col("a") > 1), ("count",)], 2),
([("with_column", "d", 4), ("filter", col("d") == 4), ("count",)], 3),
# Filter then projection then count
([("filter", col("a") > 1), ("rename", {"a": "A"}), ("count",)], 2),
([("filter", col("b") == 2), ("select", ["a", "b"]), ("count",)], 3),
([("filter", col("c") < 10), ("with_column", "d", 4), ("count",)], 3),
# Multiple projections with filter and count
(
[
("rename", {"a": "A"}),
("select", ["A", "b"]),
("filter", col("A") > 1),
("count",),
],
2,
),
(
[
("with_column", "d", 4),
("rename", {"d": "D"}),
("filter", col("D") == 4),
("count",),
],
3,
),
(
[
("select", ["a", "b"]),
("filter", col("a") > 1),
("rename", {"a": "x"}),
("count",),
],
2,
),
# Complex combinations
(
[
("filter", col("a") > 0),
("rename", {"b": "B"}),
("select", ["a", "B"]),
("filter", col("B") == 2),
("count",),
],
3,
),
(
[
("with_column", "sum", 99),
("filter", col("a") > 1),
("select", ["a", "sum"]),
("count",),
],
2,
),
(
[
("rename", {"a": "A", "b": "B"}),
("filter", (col("A") + col("B")) > 3),
("select", ["A"]),
("count",),
],
2,
),
],
)
def test_projection_fusion_with_count_and_filter(
self, ray_start_regular_shared, operations, expected
):
"""Test projection fusion with count operations including filters."""
from ray.data.expressions import lit
# Create dataset with 3 rows: {"a": 1, "b": 2, "c": 3}, {"a": 2, "b": 2, "c": 3}, {"a": 3, "b": 2, "c": 3}
ds = ray.data.from_items(
[
{"a": 1, "b": 2, "c": 3},
{"a": 2, "b": 2, "c": 3},
{"a": 3, "b": 2, "c": 3},
]
)
# Apply operations
for op in operations:
if op[0] == "rename":
ds = ds.rename_columns(op[1])
elif op[0] == "select":
ds = ds.select_columns(op[1])
elif op[0] == "with_column":
ds = ds.with_column(op[1], lit(op[2]))
elif op[0] == "filter":
# Use the predicate expression directly
ds = ds.filter(expr=op[1])
elif op[0] == "count":
# Count returns a scalar, not a dataset
result = ds.count()
assert result == expected
return # Early return since count() terminates the pipeline
# This should not be reached for count operations
assert False, "Count operation should have returned early"
@pytest.mark.parametrize(
"invalid_operations,error_type,error_message_contains",
[
# Try to filter on a column that doesn't exist yet
(
[("filter", col("d") > 0), ("with_column", "d", 4)],
(KeyError, ray.exceptions.RayTaskError),
"d",
),
# Try to filter on a renamed column before the rename
(
[("filter", col("A") > 1), ("rename", {"a": "A"})],
(KeyError, ray.exceptions.RayTaskError),
"A",
),
# Try to use a column that was removed by select
(
[("select", ["a"]), ("filter", col("b") == 2)],
(KeyError, ray.exceptions.RayTaskError),
"b",
),
# Try to filter on a column after it was removed by select
(
[("select", ["a", "b"]), ("filter", col("c") < 10)],
(KeyError, ray.exceptions.RayTaskError),
"c",
),
# Try to use with_column referencing a non-existent column
(
[("select", ["a"]), ("with_column", "new_col", col("b") + 1)],
(KeyError, ray.exceptions.RayTaskError),
"b",
),
# Try to filter on a column that was renamed away
(
[("rename", {"b": "B"}), ("filter", col("b") == 2)],
(KeyError, ray.exceptions.RayTaskError),
"b",
),
# Try to use with_column with old column name after rename
(
[("rename", {"a": "A"}), ("with_column", "result", col("a") + 1)],
(KeyError, ray.exceptions.RayTaskError),
"a",
),
# Try to select using old column name after rename
(
[("rename", {"b": "B"}), ("select", ["a", "b", "c"])],
(KeyError, ray.exceptions.RayTaskError),
"b",
),
# Try to filter on a computed column that was removed by select
(
[
("with_column", "d", 4),
("select", ["a", "b"]),
("filter", col("d") == 4),
],
(KeyError, ray.exceptions.RayTaskError),
"d",
),
# Try to rename a column that was removed by select
(
[("select", ["a", "b"]), ("rename", {"c": "C"})],
(KeyError, ray.exceptions.RayTaskError),
"c",
),
# Complex: rename, select (removing renamed source), then use old name
(
[
("rename", {"a": "A"}),
("select", ["b", "c"]),
("filter", col("a") > 0),
],
(KeyError, ray.exceptions.RayTaskError),
"a",
),
# Complex: with_column, select (keeping new column), filter on removed original
(
[
("with_column", "sum", col("a") + col("b")),
("select", ["sum"]),
("filter", col("a") > 0),
],
(KeyError, ray.exceptions.RayTaskError),
"a",
),
# Try to use column in with_column expression after it was removed
(
[
("select", ["a", "c"]),
("with_column", "result", col("a") + col("b")),
],
(KeyError, ray.exceptions.RayTaskError),
"b",
),
],
)
def test_projection_operations_invalid_order(
self,
ray_start_regular_shared,
invalid_operations,
error_type,
error_message_contains,
):
"""Test that operations fail gracefully when referencing non-existent columns."""
import ray
from ray.data.expressions import lit
# Create dataset with 3 rows: {"a": 1, "b": 2, "c": 3}, {"a": 2, "b": 2, "c": 3}, {"a": 3, "b": 2, "c": 3}
ds = ray.data.from_items(
[
{"a": 1, "b": 2, "c": 3},
{"a": 2, "b": 2, "c": 3},
{"a": 3, "b": 2, "c": 3},
]
)
# Apply operations and expect them to fail
with pytest.raises(error_type) as exc_info:
for op in invalid_operations:
if op[0] == "rename":
ds = ds.rename_columns(op[1])
elif op[0] == "select":
ds = ds.select_columns(op[1])
elif op[0] == "with_column":
if len(op) == 3 and not isinstance(op[2], (int, float, str)):
# Expression-based with_column (op[2] is an expression)
ds = ds.with_column(op[1], op[2])
else:
# Literal-based with_column
ds = ds.with_column(op[1], lit(op[2]))
elif op[0] == "filter":
ds = ds.filter(expr=op[1])
elif op[0] == "count":
ds.count()
return
# Force execution to trigger the error
result = ds.take_all()
print(f"Unexpected success: {result}")
# Verify the error message contains the expected column name
error_str = str(exc_info.value).lower()
assert (
error_message_contains.lower() in error_str
), f"Expected '{error_message_contains}' in error message: {error_str}"
@pytest.mark.parametrize(
"operations,expected_output",
[
# === Basic Select Operations ===
pytest.param(
[("select", ["a"])],
[{"a": 1}, {"a": 2}, {"a": 3}],
id="select_single_column",
),
pytest.param(
[("select", ["a", "b"])],
[{"a": 1, "b": 4}, {"a": 2, "b": 5}, {"a": 3, "b": 6}],
id="select_two_columns",
),
pytest.param(
[("select", ["a", "b", "c"])],
[
{"a": 1, "b": 4, "c": 7},
{"a": 2, "b": 5, "c": 8},
{"a": 3, "b": 6, "c": 9},
],
id="select_all_columns",
),
pytest.param(
[("select", ["c", "a"])],
[{"c": 7, "a": 1}, {"c": 8, "a": 2}, {"c": 9, "a": 3}],
id="select_reordered_columns",
),
# === Basic Rename Operations ===
pytest.param(
[("rename", {"a": "alpha"})],
[
{"alpha": 1, "b": 4, "c": 7},
{"alpha": 2, "b": 5, "c": 8},
{"alpha": 3, "b": 6, "c": 9},
],
id="rename_single_column",
),
pytest.param(
[("rename", {"a": "alpha", "b": "beta"})],
[
{"alpha": 1, "beta": 4, "c": 7},
{"alpha": 2, "beta": 5, "c": 8},
{"alpha": 3, "beta": 6, "c": 9},
],
id="rename_multiple_columns",
),
# === Basic with_column Operations ===
pytest.param(
[("with_column_expr", "sum", "add", "a", "b")],
[
{"a": 1, "b": 4, "c": 7, "sum": 5},
{"a": 2, "b": 5, "c": 8, "sum": 7},
{"a": 3, "b": 6, "c": 9, "sum": 9},
],
id="with_column_add_keep_all",
),
pytest.param(
[("with_column_expr", "product", "multiply", "b", "c")],
[
{"a": 1, "b": 4, "c": 7, "product": 28},
{"a": 2, "b": 5, "c": 8, "product": 40},
{"a": 3, "b": 6, "c": 9, "product": 54},
],
id="with_column_multiply_keep_all",
),
# === Chained Selects ===
pytest.param(
[("select", ["a", "b", "c"]), ("select", ["a", "b"])],
[{"a": 1, "b": 4}, {"a": 2, "b": 5}, {"a": 3, "b": 6}],
id="chained_selects_two_levels",
),
pytest.param(
[
("select", ["a", "b", "c"]),
("select", ["a", "b"]),
("select", ["a"]),
],
[{"a": 1}, {"a": 2}, {"a": 3}],
id="chained_selects_three_levels",
),
# === Rename → Select ===
pytest.param(
[("rename", {"a": "x"}), ("select", ["x", "b"])],
[{"x": 1, "b": 4}, {"x": 2, "b": 5}, {"x": 3, "b": 6}],
id="rename_then_select",
),
pytest.param(
[("rename", {"a": "x", "c": "z"}), ("select", ["x", "z"])],
[{"x": 1, "z": 7}, {"x": 2, "z": 8}, {"x": 3, "z": 9}],
id="rename_multiple_then_select",
),
# === Select → Rename ===
pytest.param(
[("select", ["a", "b"]), ("rename", {"a": "x"})],
[{"x": 1, "b": 4}, {"x": 2, "b": 5}, {"x": 3, "b": 6}],
id="select_then_rename",
),
pytest.param(
[("select", ["a", "b", "c"]), ("rename", {"a": "x", "b": "y"})],
[
{"x": 1, "y": 4, "c": 7},
{"x": 2, "y": 5, "c": 8},
{"x": 3, "y": 6, "c": 9},
],
id="select_all_then_rename_some",
),
# === Multiple Renames ===
pytest.param(
[("rename", {"a": "x"}), ("rename", {"x": "y"})],
[
{"y": 1, "b": 4, "c": 7},
{"y": 2, "b": 5, "c": 8},
{"y": 3, "b": 6, "c": 9},
],
id="chained_renames",
),
# === with_column → Select ===
pytest.param(
[("with_column_expr", "sum", "add", "a", "b"), ("select", ["sum"])],
[{"sum": 5}, {"sum": 7}, {"sum": 9}],
id="with_column_then_select_only_computed",
),
pytest.param(
[
("with_column_expr", "sum", "add", "a", "b"),
("select", ["a", "sum"]),
],
[{"a": 1, "sum": 5}, {"a": 2, "sum": 7}, {"a": 3, "sum": 9}],
id="with_column_then_select_mixed",
),
pytest.param(
[
("with_column_expr", "result", "multiply", "b", "c"),
("select", ["a", "result"]),
],
[
{"a": 1, "result": 28},
{"a": 2, "result": 40},
{"a": 3, "result": 54},
],
id="with_column_select_source_and_computed",
),
# === Multiple with_column Operations ===
pytest.param(
[
("with_column_expr", "sum", "add", "a", "b"),
("with_column_expr", "product", "multiply", "a", "c"),
],
[
{"a": 1, "b": 4, "c": 7, "sum": 5, "product": 7},
{"a": 2, "b": 5, "c": 8, "sum": 7, "product": 16},
{"a": 3, "b": 6, "c": 9, "sum": 9, "product": 27},
],
id="multiple_with_column_keep_all",
),
pytest.param(
[
("with_column_expr", "sum", "add", "a", "b"),
("with_column_expr", "product", "multiply", "a", "c"),
("select", ["sum", "product"]),
],
[
{"sum": 5, "product": 7},
{"sum": 7, "product": 16},
{"sum": 9, "product": 27},
],
id="multiple_with_column_then_select",
),
pytest.param(
[
("with_column_expr", "sum", "add", "a", "b"),
("with_column_expr", "diff", "add", "c", "a"),
("select", ["sum", "diff"]),
],
[{"sum": 5, "diff": 8}, {"sum": 7, "diff": 10}, {"sum": 9, "diff": 12}],
id="multiple_with_column_independent_sources",
),
# === with_column → Rename ===
pytest.param(
[
("with_column_expr", "sum", "add", "a", "b"),
("rename", {"sum": "total"}),
],
[
{"a": 1, "b": 4, "c": 7, "total": 5},
{"a": 2, "b": 5, "c": 8, "total": 7},
{"a": 3, "b": 6, "c": 9, "total": 9},
],
id="with_column_then_rename_computed",
),
# === Rename → with_column ===
pytest.param(
[
("rename", {"a": "x"}),
("with_column_expr", "x_plus_b", "add", "x", "b"),
],
[
{"x": 1, "b": 4, "c": 7, "x_plus_b": 5},
{"x": 2, "b": 5, "c": 8, "x_plus_b": 7},
{"x": 3, "b": 6, "c": 9, "x_plus_b": 9},
],
id="rename_then_with_column_using_renamed",
),
pytest.param(
[
("rename", {"a": "x"}),
("with_column_expr", "result", "add", "x", "b"),
("select", ["result"]),
],
[{"result": 5}, {"result": 7}, {"result": 9}],
id="rename_with_column_select_chain",
),
# === Select → with_column → Select ===
pytest.param(
[
("select", ["a", "b"]),
("with_column_expr", "sum", "add", "a", "b"),
("select", ["a", "sum"]),
],
[{"a": 1, "sum": 5}, {"a": 2, "sum": 7}, {"a": 3, "sum": 9}],
id="select_with_column_select_chain",
),
pytest.param(
[
("select", ["a", "b", "c"]),
("with_column_expr", "x", "add", "a", "b"),
("with_column_expr", "y", "multiply", "b", "c"),
("select", ["x", "y"]),
],
[{"x": 5, "y": 28}, {"x": 7, "y": 40}, {"x": 9, "y": 54}],
id="select_multiple_with_column_select_chain",
),
# === Complex Multi-Step Chains ===
pytest.param(
[
("select", ["a", "b", "c"]),
("rename", {"a": "x"}),
("with_column_expr", "result", "add", "x", "b"),
("select", ["result", "c"]),
],
[{"result": 5, "c": 7}, {"result": 7, "c": 8}, {"result": 9, "c": 9}],
id="complex_select_rename_with_column_select",
),
pytest.param(
[
("rename", {"a": "alpha", "b": "beta"}),
("select", ["alpha", "beta", "c"]),
("with_column_expr", "sum", "add", "alpha", "beta"),
("rename", {"sum": "total"}),
("select", ["total", "c"]),
],
[{"total": 5, "c": 7}, {"total": 7, "c": 8}, {"total": 9, "c": 9}],
id="complex_five_step_chain",
),
pytest.param(
[
("select", ["a", "b", "c"]),
("select", ["b", "c"]),
("select", ["c"]),
],
[{"c": 7}, {"c": 8}, {"c": 9}],
id="select_chain",
),
],
)
def test_projection_pushdown_into_parquet_read(
self, ray_start_regular_shared, tmp_path, operations, expected_output
):
"""Test that projection operations fuse and push down into parquet reads.
Verifies:
- Multiple projections fuse into single operator
- Fused projection pushes down into Read operator
- Only necessary columns are read from parquet
- Results are correct for select, rename, and with_column operations
"""
from ray.data.expressions import col
# Create test parquet file
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
parquet_path = tmp_path / "test.parquet"
df.to_parquet(parquet_path, index=False)
# Build pipeline with operations
ds = ray.data.read_parquet(str(parquet_path))
for op_type, *op_args in operations:
if op_type == "select":
ds = ds.select_columns(op_args[0])
elif op_type == "rename":
ds = ds.rename_columns(op_args[0])
elif op_type == "with_column_expr":
col_name, operator, col1, col2 = op_args
if operator == "add":
ds = ds.with_column(col_name, col(col1) + col(col2))
elif operator == "multiply":
ds = ds.with_column(col_name, col(col1) * col(col2))
result_df = ds.to_pandas()
expected_df = pd.DataFrame(expected_output)
# Ensure columns are in the same order for comparison
result_df = result_df[sorted(result_df.columns)]
expected_df = expected_df[sorted(expected_df.columns)]
assert rows_same(result_df, expected_df)
@pytest.mark.parametrize("flavor", ["project_before", "project_after"])
def test_projection_pushdown_merge_rename_x(ray_start_regular_shared, flavor):
"""
Test that valid select and renaming merges correctly.
"""
path = "example://iris.parquet"
ds = ray.data.read_parquet(path)
ds = ds.map_batches(lambda d: d)
if flavor == "project_before":
ds = ds.select_columns(["sepal.length", "petal.width"])
# First projection renames 'sepal.length' to 'length'
ds = ds.rename_columns({"sepal.length": "length"})
# Second projection renames 'petal.width' to 'width'
ds = ds.rename_columns({"petal.width": "width"})
if flavor == "project_after":
ds = ds.select_columns(["length", "width"])
logical_plan = ds._plan._logical_plan
op = logical_plan.dag
assert isinstance(op, Project), op.name
optimized_logical_plan = LogicalOptimizer().optimize(logical_plan)
assert isinstance(optimized_logical_plan.dag, Project)
select_op = optimized_logical_plan.dag
# Check that both "sepal.length" and "petal.width" are present in the columns,
# regardless of their order.
assert select_op.exprs == [
# TODO fix (renaming doesn't remove prev columns)
col("sepal.length").alias("length"),
col("petal.width").alias("width"),
]
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_projection_fusion.py",
"license": "Apache License 2.0",
"lines": 1227,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/_common/serialization.py | import io
import logging
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
pass
import ray._private.utils
import ray.cloudpickle as pickle
import ray.exceptions
from ray._private import ray_constants
from ray.util import inspect_serializability
logger = logging.getLogger(__name__)
ALLOW_OUT_OF_BAND_OBJECT_REF_SERIALIZATION = ray_constants.env_bool(
"RAY_allow_out_of_band_object_ref_serialization", True
)
def pickle_dumps(obj: Any, error_msg: str):
"""Wrap cloudpickle.dumps to provide better error message
when the object is not serializable.
"""
try:
return pickle.dumps(obj)
except (TypeError, ray.exceptions.OufOfBandObjectRefSerializationException) as e:
sio = io.StringIO()
inspect_serializability(obj, print_file=sio)
msg = f"{error_msg}:\n{sio.getvalue()}"
if isinstance(e, TypeError):
raise TypeError(msg) from e
else:
raise ray.exceptions.OufOfBandObjectRefSerializationException(msg)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_common/serialization.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/train/v2/_internal/execution/local_mode/torch.py | import logging
import os
from typing import Callable
import torch
import torch.distributed as dist
from ray.train import Result
from ray.train.v2._internal.execution.local_mode.utils import LocalController
from ray.train.v2._internal.execution.train_fn_utils import (
LocalTrainFnUtils,
get_train_fn_utils,
set_train_fn_utils,
)
logger = logging.getLogger(__name__)
def has_torchrun_env() -> bool:
"""Return True if this process has torch.distributed env vars set.
For torch.distributed.init_process_group with init_method="env://", these variables are required:
- RANK: The rank of the current process
- LOCAL_RANK: The local rank of the current process
- WORLD_SIZE: Total number of processes participating in the job
- LOCAL_WORLD_SIZE: Total number of processes participating in the job on the current node
- MASTER_ADDR: The IP address or hostname of the master node (rank 0)
- MASTER_PORT: A free port on the master node for communication
"""
torch_dist_required_vars = {
"RANK",
"LOCAL_RANK",
"WORLD_SIZE",
"LOCAL_WORLD_SIZE",
"MASTER_ADDR",
"MASTER_PORT",
}
return torch_dist_required_vars.issubset(os.environ.keys())
class LocalTorchController(LocalController):
def _set_train_fn_utils(self) -> None:
world_size = 1
global_rank = 0
local_rank = 0
nproc_per_node = 1
node_rank = 0
if has_torchrun_env():
assert not dist.is_initialized(), "torch.distributed is already initialized"
torch.distributed.init_process_group(
backend="nccl" if torch.cuda.is_available() else "gloo"
)
world_size = torch.distributed.get_world_size()
global_rank = torch.distributed.get_rank()
local_rank = int(os.environ["LOCAL_RANK"])
if torch.cuda.is_available():
torch.cuda.set_device(local_rank)
nproc_per_node = int(os.environ.get("LOCAL_WORLD_SIZE"))
node_rank = global_rank // nproc_per_node
if world_size != 1:
assert (
self.datasets is None or len(self.datasets) == 0
), "Ray Data is not supported in local mode with multiple workers."
set_train_fn_utils(
LocalTrainFnUtils(
experiment_name=self.experiment_name,
world_size=world_size,
world_rank=global_rank,
local_rank=local_rank,
local_world_size=nproc_per_node,
node_rank=node_rank,
dataset_shards=self.datasets,
)
)
def run(self, train_func: Callable[[], None]) -> Result:
self._set_train_fn_utils()
train_func()
train_fn_utils = get_train_fn_utils()
assert isinstance(train_fn_utils, LocalTrainFnUtils)
result = Result(
metrics=train_fn_utils._get_last_metrics(),
checkpoint=train_fn_utils.get_checkpoint(),
path=None,
error=None,
)
if dist.is_initialized():
dist.destroy_process_group()
return result
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/_internal/execution/local_mode/torch.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/dashboard/modules/aggregator/tests/test_ray_node_events.py | import base64
import json
import os
import socket
import sys
import pytest
import ray
from ray._common.test_utils import wait_for_condition
from ray._private.test_utils import wait_for_dashboard_agent_available
from ray.dashboard.tests.conftest import * # noqa
_RAY_EVENT_PORT = 12345
@pytest.fixture(scope="session")
def httpserver_listen_address():
return ("127.0.0.1", _RAY_EVENT_PORT)
def test_ray_node_events(ray_start_cluster, httpserver):
cluster = ray_start_cluster
cluster.add_node(
node_name="test-head-node",
env_vars={
"RAY_DASHBOARD_AGGREGATOR_AGENT_EVENTS_EXPORT_ADDR": f"http://127.0.0.1:{_RAY_EVENT_PORT}",
"RAY_DASHBOARD_AGGREGATOR_AGENT_EXPOSABLE_EVENT_TYPES": "NODE_DEFINITION_EVENT,NODE_LIFECYCLE_EVENT",
},
_system_config={
"enable_ray_event": True,
},
)
cluster.wait_for_nodes()
head_node_id = cluster.head_node.node_id
ray.init(address=cluster.address)
wait_for_dashboard_agent_available(cluster)
# Check that a node definition and a node lifecycle event are published.
httpserver.expect_request("/", method="POST").respond_with_data("", status=200)
wait_for_condition(lambda: len(httpserver.log) >= 1)
req, _ = httpserver.log[0]
req_json = json.loads(req.data)
assert len(req_json) == 2
assert base64.b64decode(req_json[0]["nodeId"]).hex() == head_node_id
assert (
base64.b64decode(req_json[0]["nodeDefinitionEvent"]["nodeId"]).hex()
== cluster.head_node.node_id
)
node_def_event = req_json[0]["nodeDefinitionEvent"]
assert node_def_event["hostname"] == socket.gethostname()
assert node_def_event["nodeName"] == "test-head-node"
# instanceId and instanceTypeName are set via env vars by cloud providers.
# In local/CI environments these are typically empty.
assert node_def_event["instanceId"] == os.environ.get("RAY_CLOUD_INSTANCE_ID", "")
assert node_def_event["instanceTypeName"] == os.environ.get(
"RAY_CLOUD_INSTANCE_TYPE_NAME", ""
)
assert base64.b64decode(req_json[1]["nodeId"]).hex() == head_node_id
assert (
base64.b64decode(req_json[1]["nodeLifecycleEvent"]["nodeId"]).hex()
== cluster.head_node.node_id
)
assert req_json[1]["nodeLifecycleEvent"]["stateTransitions"][0]["state"] == "ALIVE"
assert (
req_json[1]["nodeLifecycleEvent"]["stateTransitions"][0]["aliveSubState"]
== "UNSPECIFIED"
)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/aggregator/tests/test_ray_node_events.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/resource_isolation/test_resource_isolation_integration.py | import os
import platform
import subprocess
import sys
import textwrap
from pathlib import Path
from typing import Set
import pytest
from click.testing import CliRunner
import ray
import ray._common.utils as utils
import ray._private.ray_constants as ray_constants
import ray.scripts.scripts as scripts
from ray._common.test_utils import wait_for_condition
from ray._private.resource_isolation_config import ResourceIsolationConfig
# These tests are intended to run in CI inside a container.
#
# If you want to run this test locally, you will need to create a cgroup that
# the ray can manage and delegate to the correct user.
#
# Run these commands locally before running the test suite:
#
# sudo mkdir -p /sys/fs/cgroup/resource_isolation_test
# sudo chown -R $(whoami):$(whoami) /sys/fs/cgroup/resource_isolation_test/
# sudo chmod -R u+rwx /sys/fs/cgroup/resource_isolation_test/
# echo $$ | sudo tee /sys/fs/cgroup/resource_isolation_test/cgroup.procs
#
# Comment the following line out.
_ROOT_CGROUP = Path("/sys/fs/cgroup")
#
# To run locally, uncomment the following line.
# _ROOT_CGROUP = Path("/sys/fs/cgroup/resource_isolation_test")
# The integration tests assume that the _ROOT_CGROUP exists and that
# the process has read and write access.
#
# This test suite will create the following cgroup hierarchy for the tests
# starting with BASE_CGROUP.
#
# ROOT_CGROUP
# |
# BASE_CGROUP
# / \
# TEST_CGROUP LEAF_CGROUP
# |
# ray-node_<node_id>
# | |
# system user
# | | |
# leaf workers non-ray
#
# NOTE: The test suite does not assume that ROOT_CGROUP is the OS's root cgroup. Therefore,
# 1. setup will migrate all processes from the ROOT_CGROUP -> LEAF_CGROUP
# 2. teardown will migrate all processes from the LEAF_CGROUP -> ROOT_CGROUP
#
# NOTE: BASE_CGROUP will have a randomly generated name to isolate tests from each other.
#
# The test suite assumes that
# 1. cpu, memory controllers are available on ROOT_CGROUP i.e. in the ROOT_CGROUP/cgroup.controllers file.
# 2. All processes inside the base_cgroup can be migrated into the leaf_cgroup to avoid not violating
# the no internal processes contstraint.
#
# All python tests should only have access to the TEST_CGROUP and nothing outside of it.
_BASE_CGROUP = _ROOT_CGROUP / ("testing_" + utils.get_random_alphanumeric_string(5))
_TEST_CGROUP = _BASE_CGROUP / "test"
_LEAF_GROUP = _BASE_CGROUP / "leaf"
_MOUNT_FILE_PATH = "/proc/mounts"
# The names are here to help debug test failures. Tests should
# only use the size of this list. These processes are expected to be moved
# into the the system cgroup.
_EXPECTED_DASHBOARD_MODULES = [
"ray.dashboard.modules.usage_stats.usage_stats_head.UsageStatsHead",
"ray.dashboard.modules.metrics.metrics_head.MetricsHead",
"ray.dashboard.modules.data.data_head.DataHead",
"ray.dashboard.modules.event.event_head.EventHead",
"ray.dashboard.modules.job.job_head.JobHead",
"ray.dashboard.modules.node.node_head.NodeHead",
"ray.dashboard.modules.reporter.reporter_head.ReportHead",
"ray.dashboard.modules.serve.serve_head.ServeHead",
"ray.dashboard.modules.state.state_head.StateHead",
"ray.dashboard.modules.train.train_head.TrainHead",
]
# The list of processes expected to be started in the system cgroup
# with default params for 'ray start' and 'ray.init(...)'
_EXPECTED_SYSTEM_PROCESSES_RAY_START = [
ray_constants.PROCESS_TYPE_DASHBOARD,
ray_constants.PROCESS_TYPE_GCS_SERVER,
ray_constants.PROCESS_TYPE_MONITOR,
ray_constants.PROCESS_TYPE_LOG_MONITOR,
ray_constants.PROCESS_TYPE_RAY_CLIENT_SERVER,
ray_constants.PROCESS_TYPE_RAYLET,
ray_constants.PROCESS_TYPE_DASHBOARD_AGENT,
ray_constants.PROCESS_TYPE_RUNTIME_ENV_AGENT,
]
_EXPECTED_SYSTEM_PROCESSES_RAY_INIT = [
ray_constants.PROCESS_TYPE_DASHBOARD,
ray_constants.PROCESS_TYPE_GCS_SERVER,
ray_constants.PROCESS_TYPE_MONITOR,
ray_constants.PROCESS_TYPE_LOG_MONITOR,
ray_constants.PROCESS_TYPE_RAYLET,
ray_constants.PROCESS_TYPE_DASHBOARD_AGENT,
ray_constants.PROCESS_TYPE_RUNTIME_ENV_AGENT,
]
@pytest.fixture(scope="session", autouse=True)
def test_suite_fixture():
"""Setups up and tears down the cgroup hierachy for the test suite."""
setup_test_suite()
yield
cleanup_test_suite()
def setup_test_suite():
"""Creates the cgroup hierarchy and moves processes out of the _ROOT_CGROUP into the _LEAF_CGROUP.
The setup involves the following steps:
1) Check if the platform is Linux.
2) Check that cgroupv2 is mounted with read, write permissions in unified mode i.e. cgroupv1 is not mounted.
3) Check that the _ROOT_CGROUP exists and has [cpu, memory] controllers available.
4) Create the _BASE_CGROUP, _TEST_CGROUP, and _LEAF_CGROUP respectively.
5) Move processes from the _ROOT_CGROUP to the _LEAF_CGROUP because of the internal processes constraint.
6) Enable [cpu, memory] controllers in the _ROOT_CGROUP, _BASE_CGROUP, and _TEST_CGROUP respectively.
If any of the steps fail, teardown will be run. Teardown will perform a subset of these steps (not the checks), in reverse order.
"""
try:
# 1) If platform is not linux.
assert (
platform.system() == "Linux"
), f"Failed because resource isolation integration tests can only run on Linux and not on {platform.system()}."
# 2) Check that cgroupv2 is mounted in read-write mode in unified mode.
with open(_MOUNT_FILE_PATH, "r") as mount_file:
lines = mount_file.readlines()
found_cgroup_v1 = False
found_cgroup_v2 = False
for line in lines:
found_cgroup_v1 = found_cgroup_v1 or ("cgroup r" in line.strip())
found_cgroup_v2 = found_cgroup_v2 or ("cgroup2 rw" in line.strip())
assert found_cgroup_v2, (
"Failed because cgroupv2 is not mounted on the system in read-write mode."
" See the following documentation for how to enable cgroupv2 properly:"
" https://kubernetes.io/docs/concepts/architecture/cgroups/#linux-distribution-cgroup-v2-support"
)
assert not found_cgroup_v1, (
"Failed because cgroupv2 and cgroupv1 is mounted on this system."
" See the following documentation for how to enable cgroupv2 in properly in unified mode:"
" https://kubernetes.io/docs/concepts/architecture/cgroups/#linux-distribution-cgroup-v2-support"
)
# 3) Check that current user has read-write access to _BASE_CGROUP_PATH by attempting
# to write the current process into it.
root_cgroup_procs_file = _ROOT_CGROUP / "cgroup.procs"
with open(root_cgroup_procs_file, "w") as procs_file:
procs_file.write(str(os.getpid()))
procs_file.flush()
# 4) Check to see that _ROOT_CGROUP has the [cpu, memory] controllers are available.
root_cgroup_controllers_path = _ROOT_CGROUP / "cgroup.controllers"
expected_controllers = {"cpu", "memory"}
with open(root_cgroup_controllers_path, "r") as available_controllers_file:
available_controllers = set(
available_controllers_file.readline().strip().split(" ")
)
assert expected_controllers.issubset(available_controllers), (
f"Failed because the cpu and memory controllers are not available in {root_cgroup_controllers_path}."
" To enable a controller, you need to add it to the cgroup.controllers file of the parent cgroup of {_ROOT_CGROUP}."
" See: https://docs.kernel.org/admin-guide/cgroup-v2.html#enabling-and-disabling."
)
# 5) Create the leaf cgroup and move all processes from _BASE_CGROUP_PATH into it.
os.mkdir(_BASE_CGROUP)
os.mkdir(_TEST_CGROUP)
os.mkdir(_LEAF_GROUP)
# 6) Move all processes into the leaf cgroup.
with open(_ROOT_CGROUP / "cgroup.procs", "r") as root_procs_file, open(
_LEAF_GROUP / "cgroup.procs", "w"
) as leaf_procs_file:
root_cgroup_lines = root_procs_file.readlines()
for line in root_cgroup_lines:
leaf_procs_file.write(line.strip())
leaf_procs_file.flush()
# 7) Enable [cpu, memory] controllers on the base and test cgroup.
with open(
_ROOT_CGROUP / "cgroup.subtree_control", "w"
) as base_subtree_control_file:
base_subtree_control_file.write("+cpu +memory")
base_subtree_control_file.flush()
with open(
_BASE_CGROUP / "cgroup.subtree_control", "w"
) as base_subtree_control_file:
base_subtree_control_file.write("+cpu +memory")
base_subtree_control_file.flush()
with open(
_TEST_CGROUP / "cgroup.subtree_control", "w"
) as test_subtree_control_file:
test_subtree_control_file.write("+cpu +memory")
test_subtree_control_file.flush()
except Exception as e:
print(
f"Failed to setup the test suite with error {str(e)}. Attempting to run teardown."
)
cleanup_test_suite()
def cleanup_test_suite():
"""Cleans up the cgroup hierarchy and moves processes out of the _LEAF_CGROUP into the _ROOT_CGROUP.
The setup involves the following steps:
1) Disable [cpu, memory] controllers in the _ROOT_CGROUP, _BASE_CGROUP, and _TEST_CGROUP respectively.
2) Move processes from the _LEAF_CGROUP to the _ROOT_CGROUP so the hierarchy can be deleted.
3) Create the _BASE_CGROUP, _TEST_CGROUP, and _LEAF_CGROUP respectively.
If any of the steps fail, teardown will fail an assertion.
"""
# 1) Disable the controllers.
try:
with open(
_TEST_CGROUP / "cgroup.subtree_control", "w"
) as test_subtree_control_file:
test_subtree_control_file.write("-cpu -memory")
test_subtree_control_file.flush()
with open(
_BASE_CGROUP / "cgroup.subtree_control", "w"
) as base_subtree_control_file:
base_subtree_control_file.write("-cpu -memory")
base_subtree_control_file.flush()
with open(
_ROOT_CGROUP / "cgroup.subtree_control", "w"
) as base_subtree_control_file:
base_subtree_control_file.write("-cpu -memory")
base_subtree_control_file.flush()
# 2) Move processes back into the root cgroup.
with open(_ROOT_CGROUP / "cgroup.procs", "w") as root_procs_file, open(
_LEAF_GROUP / "cgroup.procs", "r"
) as leaf_procs_file:
leaf_cgroup_lines = leaf_procs_file.readlines()
for line in leaf_cgroup_lines:
root_procs_file.write(line.strip())
root_procs_file.flush()
# 3) Move the current process back into the _ROOT_CGROUP
with open(_ROOT_CGROUP / "cgroup.procs", "w") as root_procs_file, open(
_TEST_CGROUP / "cgroup.procs", "r"
) as test_procs_file:
test_cgroup_lines = test_procs_file.readlines()
for line in test_cgroup_lines:
root_procs_file.write(line.strip())
root_procs_file.flush()
# 3) Delete the cgroups.
os.rmdir(_LEAF_GROUP)
os.rmdir(_TEST_CGROUP)
os.rmdir(_BASE_CGROUP)
except Exception as e:
assert False, (
f"Failed to cleanup test suite's cgroup hierarchy because of {str(e)}."
"You may have to manually clean up the hierachy under ${_ROOT_CGROUP}"
)
@pytest.fixture
def cleanup_ray():
"""Shutdown all ray instances"""
yield
runner = CliRunner()
runner.invoke(scripts.stop)
ray.shutdown()
@pytest.fixture
def ray_shutdown():
yield
ray.shutdown()
def generate_node_id():
"""Returns a random node id."""
return ray.NodeID.from_random().hex()
def assert_cgroup_hierarchy_exists_for_node(
node_id: str, resource_isolation_config: ResourceIsolationConfig
):
"""Asserts that the cgroup hierarchy was created correctly for the node.
The cgroup hierarchy looks like:
_TEST_CGROUP
|
ray-node_<node_id>
| |
system user
| | |
leaf workers non-ray
Args:
node_id: used to find the path of the cgroup subtree
resource_isolation_config: used to verify constraints enabled on the system, workers, and user cgroups
"""
base_cgroup_for_node = resource_isolation_config.cgroup_path
node_cgroup = Path(base_cgroup_for_node) / f"ray-node_{node_id}"
system_cgroup = node_cgroup / "system"
system_leaf_cgroup = system_cgroup / "leaf"
user_cgroup = node_cgroup / "user"
workers_cgroup = user_cgroup / "workers"
non_ray_cgroup = user_cgroup / "non-ray"
# 1) Check that the cgroup hierarchy is created correctly for the node.
assert node_cgroup.is_dir()
assert system_cgroup.is_dir()
assert system_leaf_cgroup.is_dir()
assert workers_cgroup.is_dir()
assert user_cgroup.is_dir()
assert non_ray_cgroup.is_dir()
# 2) Verify the constraints are applied correctly.
with open(system_cgroup / "memory.min", "r") as memory_min_file:
contents = memory_min_file.read().strip()
assert contents == str(resource_isolation_config.system_reserved_memory)
with open(system_cgroup / "cpu.weight", "r") as cpu_weight_file:
contents = cpu_weight_file.read().strip()
assert contents == str(resource_isolation_config.system_reserved_cpu_weight)
with open(user_cgroup / "cpu.weight", "r") as cpu_weight_file:
contents = cpu_weight_file.read().strip()
assert contents == str(
10000 - resource_isolation_config.system_reserved_cpu_weight
)
def assert_process_in_not_moved_into_ray_cgroups(
node_id: str,
resource_isolation_config: ResourceIsolationConfig,
pid: str,
):
"""Asserts that the system processes were created in the correct cgroup.
Args:
node_id: used to construct the path of the cgroup subtree
resource_isolation_config: used to construct the path of the cgroup
subtree
pid:
"""
base_cgroup_for_node = resource_isolation_config.cgroup_path
node_cgroup = Path(base_cgroup_for_node) / f"ray-node_{node_id}"
cgroup_procs_file_paths = [
node_cgroup / "system" / "leaf" / "cgroup.procs",
node_cgroup / "user" / "non-ray" / "cgroup.procs",
node_cgroup / "user" / "workers" / "cgroup.procs",
]
found_pid = False
for file_path in cgroup_procs_file_paths:
with open(file_path, "r") as cgroup_procs_file:
lines = cgroup_procs_file.readlines()
for line in lines:
found_pid = found_pid or (line.strip() == pid)
assert not found_pid
def assert_system_processes_are_in_system_cgroup(
node_id: str,
resource_isolation_config: ResourceIsolationConfig,
expected_count: int,
):
"""Asserts that the system processes were created in the correct cgroup.
Args:
node_id: used to construct the path of the cgroup subtree
resource_isolation_config: used to construct the path of the cgroup
subtree
expected_count: the number of expected system processes.
"""
base_cgroup_for_node = resource_isolation_config.cgroup_path
node_cgroup = Path(base_cgroup_for_node) / f"ray-node_{node_id}"
system_cgroup = node_cgroup / "system"
system_leaf_cgroup = system_cgroup / "leaf"
# At least the raylet process is always moved.
with open(system_leaf_cgroup / "cgroup.procs", "r") as cgroup_procs_file:
lines = cgroup_procs_file.readlines()
assert (
len(lines) == expected_count
), f"Expected only system process passed into the raylet. Found {lines}. You may have added a new dashboard module in which case you need to update _EXPECTED_DASHBOARD_MODULES"
def assert_worker_processes_are_in_workers_cgroup(
node_id: str,
resource_isolation_config: ResourceIsolationConfig,
worker_pids: Set[str],
):
"""Asserts that the worker processes were created in the correct cgroup.
Args:
node_id: used to construct the path of the cgroup subtree
resource_isolation_config: used to construct the path of the cgroup
subtree
worker_pids: a set of pids that are expected inside the workers
leaf cgroup.
"""
base_cgroup_for_node = resource_isolation_config.cgroup_path
node_cgroup = Path(base_cgroup_for_node) / f"ray-node_{node_id}"
workers_cgroup_procs = node_cgroup / "user" / "workers" / "cgroup.procs"
with open(workers_cgroup_procs, "r") as cgroup_procs_file:
pids_in_cgroup = set()
lines = cgroup_procs_file.readlines()
for line in lines:
pids_in_cgroup.add(line.strip())
assert pids_in_cgroup == worker_pids
def assert_cgroup_hierarchy_cleaned_up_for_node(
node_id: str, resource_isolation_config: ResourceIsolationConfig
):
"""Asserts that the cgroup hierarchy was deleted correctly for the node.
Args:
node_id: used to construct the path of the cgroup subtree
resource_isolation_config: used to construct the path of the cgroup
subtree
"""
base_cgroup_for_node = resource_isolation_config.cgroup_path
node_cgroup = Path(base_cgroup_for_node) / f"ray-node_{node_id}"
# If the root cgroup is deleted, there's no need to check anything else.
assert (
not node_cgroup.is_dir()
), f"Root cgroup node at {node_cgroup} was not deleted. Cgroup cleanup failed. You may have to manually delete the cgroup subtree."
def create_driver_in_internal_namespace():
"""
Returns a driver process that is a part of the '_ray_internal_' namespace.
If the driver is part of the '_ray_internal_' namespace, it will NOT
be moved into the workers cgroup by the raylet when it registers.
The Dashboard ServeHead and JobHead modules are drivers that are
technically system processes and use the '_ray_internal_' namespace and therefore
must not be moved into the workers cgroup on registration.
"""
driver_code = textwrap.dedent(
"""
import ray
import time
ray.init(namespace='_ray_internal_')
time.sleep(3600)
"""
).strip()
second_driver_proc = subprocess.Popen(["python", "-c", driver_code])
return second_driver_proc
# The following tests check for cgroup setup and cleanup with the
# ray cli.
def test_ray_cli_start_invalid_resource_isolation_config(cleanup_ray):
runner = CliRunner()
result = runner.invoke(
scripts.start,
["--cgroup-path=/doesnt/matter"],
)
assert result.exit_code != 0
assert isinstance(result.exception, ValueError)
def test_ray_cli_start_resource_isolation_creates_cgroup_hierarchy_and_cleans_up(
cleanup_ray,
):
cgroup_path = str(_TEST_CGROUP)
object_store_memory = 1024**3
system_reserved_memory = 1024**3
num_cpus = 4
system_reserved_cpu = 1
resource_isolation_config = ResourceIsolationConfig(
cgroup_path=cgroup_path,
enable_resource_isolation=True,
system_reserved_cpu=system_reserved_cpu,
system_reserved_memory=system_reserved_memory,
object_store_memory=object_store_memory,
)
node_id = ray.NodeID.from_random().hex()
os.environ["RAY_OVERRIDE_NODE_ID_FOR_TESTING"] = node_id
runner = CliRunner()
result = runner.invoke(
scripts.start,
[
"--head",
"--num-cpus",
num_cpus,
"--enable-resource-isolation",
"--cgroup-path",
cgroup_path,
"--system-reserved-cpu",
system_reserved_cpu,
"--system-reserved-memory",
system_reserved_memory,
"--object-store-memory",
object_store_memory,
],
)
assert result.exit_code == 0
assert_cgroup_hierarchy_exists_for_node(node_id, resource_isolation_config)
@ray.remote(num_cpus=1)
class Actor:
def __init__(self):
pass
def get_pid(self):
return os.getpid()
second_driver_proc = create_driver_in_internal_namespace()
actor_refs = []
for _ in range(num_cpus):
actor_refs.append(Actor.remote())
worker_pids = set()
worker_pids.add(str(os.getpid()))
for actor in actor_refs:
worker_pids.add(str(ray.get(actor.get_pid.remote())))
assert_system_processes_are_in_system_cgroup(
node_id,
resource_isolation_config,
len(_EXPECTED_SYSTEM_PROCESSES_RAY_START) + len(_EXPECTED_DASHBOARD_MODULES),
)
assert_worker_processes_are_in_workers_cgroup(
node_id, resource_isolation_config, worker_pids
)
assert_process_in_not_moved_into_ray_cgroups(
node_id, resource_isolation_config, second_driver_proc.pid
)
second_driver_proc.kill()
wait_for_condition(lambda: second_driver_proc.wait(), timeout=5)
runner.invoke(scripts.stop)
assert_cgroup_hierarchy_cleaned_up_for_node(node_id, resource_isolation_config)
# The following tests will test integration of resource isolation
# with the ray.init() function.
def test_ray_init_resource_isolation_disabled_by_default(ray_shutdown):
ray.init(address="local")
node = ray._private.worker._global_node
assert node is not None
assert not node.resource_isolation_config.is_enabled()
def test_ray_init_resource_isolation_creates_cgroup_hierarchy_and_cleans_up(
ray_shutdown,
):
cgroup_path = str(_TEST_CGROUP)
system_reserved_cpu = 1
system_reserved_memory = 1024**3
object_store_memory = 1024**3
num_cpus = 4
resource_isolation_config = ResourceIsolationConfig(
enable_resource_isolation=True,
cgroup_path=cgroup_path,
system_reserved_cpu=system_reserved_cpu,
system_reserved_memory=system_reserved_memory,
object_store_memory=object_store_memory,
)
node_id = generate_node_id()
os.environ["RAY_OVERRIDE_NODE_ID_FOR_TESTING"] = node_id
ray.init(
address="local",
num_cpus=num_cpus,
enable_resource_isolation=True,
cgroup_path=cgroup_path,
system_reserved_cpu=system_reserved_cpu,
system_reserved_memory=system_reserved_memory,
object_store_memory=object_store_memory,
)
assert_cgroup_hierarchy_exists_for_node(node_id, resource_isolation_config)
@ray.remote(num_cpus=1)
class Actor:
def __init__(self):
pass
def get_pid(self):
return os.getpid()
actor_refs = []
for _ in range(num_cpus):
actor_refs.append(Actor.remote())
worker_pids = set()
worker_pids.add(str(os.getpid()))
for actor in actor_refs:
worker_pids.add(str(ray.get(actor.get_pid.remote())))
assert_system_processes_are_in_system_cgroup(
node_id,
resource_isolation_config,
len(_EXPECTED_SYSTEM_PROCESSES_RAY_INIT) + len(_EXPECTED_DASHBOARD_MODULES),
)
assert_worker_processes_are_in_workers_cgroup(
node_id, resource_isolation_config, worker_pids
)
ray.shutdown()
assert_cgroup_hierarchy_cleaned_up_for_node(node_id, resource_isolation_config)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/resource_isolation/test_resource_isolation_integration.py",
"license": "Apache License 2.0",
"lines": 538,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/test_object_manager_fault_tolerance.py | import json
import sys
import numpy as np
import pytest
import ray
from ray._common.test_utils import wait_for_condition
from ray._private.internal_api import get_memory_info_reply, get_state_from_address
from ray._private.test_utils import (
RPC_FAILURE_MAP,
RPC_FAILURE_TYPES,
)
from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy
@pytest.mark.parametrize("deterministic_failure", RPC_FAILURE_TYPES)
def test_free_objects_idempotent(
monkeypatch, shutdown_only, deterministic_failure, ray_start_cluster
):
failure = RPC_FAILURE_MAP[deterministic_failure].copy()
failure["num_failures"] = 1
monkeypatch.setenv(
"RAY_testing_rpc_failure",
json.dumps({"ObjectManagerService.grpc_client.FreeObjects": failure}),
)
@ray.remote
def simple_task(big_object_ref_list):
ray.get(big_object_ref_list[0])
return "ok"
cluster = ray_start_cluster
remote_node_1 = cluster.add_node(num_cpus=1)
remote_node_2 = cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
big_object_ref = ray.put(np.zeros(100 * 1024 * 1024))
# Propagate the big object to the remote nodes' plasma stores
result_ref_1 = simple_task.options(
scheduling_strategy=NodeAffinitySchedulingStrategy(
node_id=remote_node_1.node_id, soft=False
)
).remote([big_object_ref])
result_ref_2 = simple_task.options(
scheduling_strategy=NodeAffinitySchedulingStrategy(
node_id=remote_node_2.node_id, soft=False
)
).remote([big_object_ref])
assert ray.get([result_ref_1, result_ref_2]) == ["ok", "ok"]
del big_object_ref
def get_cluster_memory_usage():
state = get_state_from_address(ray.get_runtime_context().gcs_address)
reply = get_memory_info_reply(state)
return reply.store_stats.object_store_bytes_used
wait_for_condition(lambda: get_cluster_memory_usage() == 0, timeout=30)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_object_manager_fault_tolerance.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/ray-core/doc_code/direct_transport_gloo.py | # flake8: noqa
# __normal_example_start__
import torch
import ray
@ray.remote
class MyActor:
def random_tensor(self):
return torch.randn(1000, 1000)
# __normal_example_end__
# __gloo_example_start__
@ray.remote
class MyActor:
@ray.method(tensor_transport="gloo")
def random_tensor(self):
return torch.randn(1000, 1000)
# __gloo_example_end__
# __gloo_group_start__
import torch
import ray
from ray.experimental.collective import create_collective_group
@ray.remote
class MyActor:
@ray.method(tensor_transport="gloo")
def random_tensor(self):
return torch.randn(1000, 1000)
def sum(self, tensor: torch.Tensor):
return torch.sum(tensor)
sender, receiver = MyActor.remote(), MyActor.remote()
# The tensor_transport specified here must match the one used in the @ray.method
# decorator.
group = create_collective_group([sender, receiver], backend="torch_gloo")
# __gloo_group_end__
# __gloo_group_destroy_start__
from ray.experimental.collective import destroy_collective_group
destroy_collective_group(group)
# __gloo_group_destroy_end__
# __gloo_full_example_start__
import torch
import ray
from ray.experimental.collective import create_collective_group
@ray.remote
class MyActor:
@ray.method(tensor_transport="gloo")
def random_tensor(self):
return torch.randn(1000, 1000)
def sum(self, tensor: torch.Tensor):
return torch.sum(tensor)
sender, receiver = MyActor.remote(), MyActor.remote()
group = create_collective_group([sender, receiver], backend="torch_gloo")
# The tensor will be stored by the `sender` actor instead of in Ray's object
# store.
tensor = sender.random_tensor.remote()
result = receiver.sum.remote(tensor)
print(ray.get(result))
# __gloo_full_example_end__
# __gloo_multiple_tensors_example_start__
import torch
import ray
from ray.experimental.collective import create_collective_group
@ray.remote
class MyActor:
@ray.method(tensor_transport="gloo")
def random_tensor_dict(self):
return {"tensor1": torch.randn(1000, 1000), "tensor2": torch.randn(1000, 1000)}
def sum(self, tensor_dict: dict):
return torch.sum(tensor_dict["tensor1"]) + torch.sum(tensor_dict["tensor2"])
sender, receiver = MyActor.remote(), MyActor.remote()
group = create_collective_group([sender, receiver], backend="torch_gloo")
# Both tensor values in the dictionary will be stored by the `sender` actor
# instead of in Ray's object store.
tensor_dict = sender.random_tensor_dict.remote()
result = receiver.sum.remote(tensor_dict)
print(ray.get(result))
# __gloo_multiple_tensors_example_end__
# __gloo_intra_actor_start__
import torch
import ray
import pytest
from ray.experimental.collective import create_collective_group
@ray.remote
class MyActor:
@ray.method(tensor_transport="gloo")
def random_tensor(self):
return torch.randn(1000, 1000)
def sum(self, tensor: torch.Tensor):
return torch.sum(tensor)
sender, receiver = MyActor.remote(), MyActor.remote()
group = create_collective_group([sender, receiver], backend="torch_gloo")
tensor = sender.random_tensor.remote()
# Pass the ObjectRef back to the actor that produced it. The tensor will be
# passed back to the same actor without copying.
sum1 = sender.sum.remote(tensor)
sum2 = receiver.sum.remote(tensor)
assert torch.allclose(*ray.get([sum1, sum2]))
# __gloo_intra_actor_end__
# __gloo_get_start__
# Wrong example of ray.get(). Since the tensor transport in the @ray.method decorator is Gloo,
# ray.get() will try to use Gloo to fetch the tensor, which is not supported
# because the caller is not part of the collective group.
with pytest.raises(ValueError) as e:
ray.get(tensor)
assert (
"Trying to use two-sided tensor transport: GLOO for ray.get. This is only supported for one-sided transports such as NIXL or the OBJECT_STORE."
in str(e.value)
)
# Correct example of ray.get(), using the object store to fetch the RDT object because the caller
# is not part of the collective group.
print(ray.get(tensor, _use_object_store=True))
# torch.Tensor(...)
# __gloo_get_end__
# __gloo_object_mutability_warning_start__
import torch
import ray
from ray.experimental.collective import create_collective_group
@ray.remote
class MyActor:
@ray.method(tensor_transport="gloo")
def random_tensor(self):
return torch.randn(1000, 1000)
def increment_and_sum(self, tensor: torch.Tensor):
# In-place update.
tensor += 1
return torch.sum(tensor)
sender, receiver = MyActor.remote(), MyActor.remote()
group = create_collective_group([sender, receiver], backend="torch_gloo")
tensor = sender.random_tensor.remote()
tensor1 = sender.increment_and_sum.remote(tensor)
tensor2 = receiver.increment_and_sum.remote(tensor)
# A warning will be printed:
# UserWarning: GPU ObjectRef(...) is being passed back to the actor that created it Actor(MyActor, ...). Note that GPU objects are mutable. If the tensor is modified, Ray's internal copy will also be updated, and subsequent passes to other actors will receive the updated version instead of the original.
try:
# This assertion may fail because the tensor returned by sender.random_tensor
# is modified in-place by sender.increment_and_sum while being sent to
# receiver.increment_and_sum.
assert torch.allclose(ray.get(tensor1), ray.get(tensor2))
except AssertionError:
print("AssertionError: sender and receiver returned different sums.")
# __gloo_object_mutability_warning_end__
# __gloo_wait_tensor_freed_bad_start__
import torch
import ray
from ray.experimental.collective import create_collective_group
@ray.remote
class MyActor:
@ray.method(tensor_transport="gloo")
def random_tensor(self):
self.tensor = torch.randn(1000, 1000)
# After this function returns, Ray and this actor will both hold a
# reference to the same tensor.
return self.tensor
def increment_and_sum_stored_tensor(self):
# NOTE: In-place update, while Ray still holds a reference to the same tensor.
self.tensor += 1
return torch.sum(self.tensor)
def increment_and_sum(self, tensor: torch.Tensor):
return torch.sum(tensor + 1)
sender, receiver = MyActor.remote(), MyActor.remote()
group = create_collective_group([sender, receiver], backend="torch_gloo")
tensor = sender.random_tensor.remote()
tensor1 = sender.increment_and_sum_stored_tensor.remote()
# Wait for sender.increment_and_sum_stored_tensor task to finish.
tensor1 = ray.get(tensor1)
# Receiver will now receive the updated value instead of the original.
tensor2 = receiver.increment_and_sum.remote(tensor)
try:
# This assertion will fail because sender.increment_and_sum_stored_tensor
# modified the tensor in place before sending it to
# receiver.increment_and_sum.
assert torch.allclose(tensor1, ray.get(tensor2))
except AssertionError:
print("AssertionError: sender and receiver returned different sums.")
# __gloo_wait_tensor_freed_bad_end__
# __gloo_wait_tensor_freed_start__
import torch
import ray
from ray.experimental.collective import create_collective_group
@ray.remote
class MyActor:
@ray.method(tensor_transport="gloo")
def random_tensor(self):
self.tensor = torch.randn(1000, 1000)
return self.tensor
def increment_and_sum_stored_tensor(self):
# 1. Sender actor waits for Ray to release all references to the tensor
# before modifying the tensor in place.
ray.experimental.wait_tensor_freed(self.tensor)
# NOTE: In-place update, but Ray guarantees that it has already released
# its references to this tensor.
self.tensor += 1
return torch.sum(self.tensor)
def increment_and_sum(self, tensor: torch.Tensor):
# Receiver task remains the same.
return torch.sum(tensor + 1)
sender, receiver = MyActor.remote(), MyActor.remote()
group = create_collective_group([sender, receiver], backend="torch_gloo")
tensor = sender.random_tensor.remote()
tensor1 = sender.increment_and_sum_stored_tensor.remote()
# 2. Skip `ray.get`` because `wait_tensor_freed`` will block until all
# references to `tensor` are freed, so calling `ray.get` here would cause a
# deadlock.
# tensor1 = ray.get(tensor1)
tensor2 = receiver.increment_and_sum.remote(tensor)
# 3. Delete all references to `tensor`, to unblock wait_tensor_freed.
del tensor
# This assertion will now pass.
assert torch.allclose(ray.get(tensor1), ray.get(tensor2))
# __gloo_wait_tensor_freed_end__
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-core/doc_code/direct_transport_gloo.py",
"license": "Apache License 2.0",
"lines": 207,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/ray-core/doc_code/direct_transport_nccl.py | # flake8: noqa
# __nccl_full_example_start__
import torch
import ray
from ray.experimental.collective import create_collective_group
@ray.remote(num_gpus=1)
class MyActor:
@ray.method(tensor_transport="nccl")
def random_tensor(self):
return torch.randn(1000, 1000).cuda()
def sum(self, tensor: torch.Tensor):
return torch.sum(tensor)
sender, receiver = MyActor.remote(), MyActor.remote()
group = create_collective_group([sender, receiver], backend="nccl")
# The tensor will be stored by the `sender` actor instead of in Ray's object
# store.
tensor = sender.random_tensor.remote()
result = receiver.sum.remote(tensor)
ray.get(result)
# __nccl_full_example_end__
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-core/doc_code/direct_transport_nccl.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-core/doc_code/direct_transport_nixl.py | # flake8: noqa
# __nixl_full_example_start__
import torch
import ray
@ray.remote(num_gpus=1)
class MyActor:
@ray.method(tensor_transport="nixl")
def random_tensor(self):
return torch.randn(1000, 1000).cuda()
def sum(self, tensor: torch.Tensor):
return torch.sum(tensor)
def produce(self, tensors):
refs = []
for t in tensors:
refs.append(ray.put(t, _tensor_transport="nixl"))
return refs
def consume_with_nixl(self, refs):
# ray.get will also use NIXL to retrieve the
# result.
tensors = [ray.get(ref) for ref in refs]
sum = 0
for t in tensors:
assert t.device.type == "cuda"
sum += t.sum().item()
return sum
# No collective group is needed. The two actors just need to have NIXL
# installed.
sender, receiver = MyActor.remote(), MyActor.remote()
# The tensor will be stored by the `sender` actor instead of in Ray's object
# store.
tensor = sender.random_tensor.remote()
result = receiver.sum.remote(tensor)
ray.get(result)
# __nixl_full_example_end__
# __nixl_get_start__
# ray.get will also use NIXL to retrieve the
# result.
print(ray.get(tensor))
# torch.Tensor(...)
# __nixl_get_end__
# __nixl_put__and_get_start__
tensor1 = torch.randn(1000, 1000).cuda()
tensor2 = torch.randn(1000, 1000).cuda()
refs = sender.produce.remote([tensor1, tensor2])
ref1 = receiver.consume_with_nixl.remote(refs)
print(ray.get(ref1))
# __nixl_put__and_get_end__
# __nixl_limitations_start__
@ray.remote(num_gpus=1)
class Actor:
def __init__(self):
self.tensor1 = torch.tensor([1, 2, 3])
self.tensor2 = torch.tensor([4, 5, 6])
self.tensor3 = torch.tensor([7, 8, 9])
@ray.method(tensor_transport="nixl")
def send_dict1(self):
return {"round1-1": self.tensor1, "round1-2": self.tensor2}
@ray.method(tensor_transport="nixl")
def send_dict2(self):
return {"round2-1": self.tensor1, "round2-3": self.tensor3}
def sum_dict(self, dict):
return sum(v.sum().item() for v in dict.values())
sender, receiver = Actor.remote(), Actor.remote()
ref1 = sender.send_dict1.remote()
result1 = receiver.sum_dict.remote(ref1)
print(ray.get(result1))
ref2 = sender.send_dict2.remote()
result2 = receiver.sum_dict.remote(ref2)
try:
print(ray.get(result2))
except ValueError as e:
print("Error caught:", e)
# __nixl_limitations_end__
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-core/doc_code/direct_transport_nixl.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/serve/tests/test_replica_ranks.py | import random
import sys
from typing import Any, Dict, List
import pytest
import ray
from ray import serve
from ray._common.test_utils import SignalActor, wait_for_condition
from ray.serve._private.common import (
DeploymentID,
DeploymentStatus,
ReplicaState,
)
from ray.serve._private.constants import (
SERVE_CONTROLLER_NAME,
SERVE_DEFAULT_APP_NAME,
SERVE_NAMESPACE,
)
from ray.serve._private.controller import ServeController
from ray.serve._private.test_utils import (
check_deployment_status,
check_num_replicas_eq,
)
from ray.serve.schema import ReplicaRank
def get_controller() -> ServeController:
"""Get the current ServeController actor."""
return ray.get_actor(SERVE_CONTROLLER_NAME, namespace=SERVE_NAMESPACE)
def get_replica_ranks(deployment_name: str) -> Dict[str, ReplicaRank]:
"""Get the current rank mapping for all replicas in a deployment.
Args:
deployment_name: Name of the deployment to get ranks for
Returns:
Dict mapping replica_id to ReplicaRank object
"""
controller = get_controller()
deployment_id = DeploymentID(name=deployment_name, app_name=SERVE_DEFAULT_APP_NAME)
# Use the public API method on the controller
return ray.get(controller._get_replica_ranks_mapping.remote(deployment_id))
def get_running_replica_ids(deployment_name: str) -> List[str]:
"""Get the replica IDs of running replicas for given deployment."""
controller = get_controller()
deployment_id = DeploymentID(name=deployment_name, app_name=SERVE_DEFAULT_APP_NAME)
replicas = ray.get(
controller._dump_replica_states_for_testing.remote(deployment_id)
)
running_replicas = replicas.get([ReplicaState.RUNNING])
return [replica.replica_id.unique_id for replica in running_replicas]
def check_rank_contiguity(ranks: Dict[str, ReplicaRank]) -> bool:
"""Check that all rank types form contiguous sequences from 0 to N-1.
Args:
ranks: Dict mapping replica_id to ReplicaRank object
Returns:
True if all rank types (global, node, local) are contiguous
"""
if not ranks:
return True
# Check global ranks are contiguous
global_ranks = sorted([r.rank for r in ranks.values()])
expected_global = list(range(len(global_ranks)))
if global_ranks != expected_global:
print(
f"Global ranks not contiguous. Expected {expected_global}, got {global_ranks}"
)
return False
# Group by node_rank and check local ranks are contiguous per node
replicas_by_node = {}
node_ranks_set = set()
for replica_id, rank_obj in ranks.items():
node_rank = rank_obj.node_rank
node_ranks_set.add(node_rank)
if node_rank not in replicas_by_node:
replicas_by_node[node_rank] = []
replicas_by_node[node_rank].append(rank_obj.local_rank)
# Check node ranks are contiguous
node_ranks_sorted = sorted(node_ranks_set)
expected_node_ranks = list(range(len(node_ranks_sorted)))
if node_ranks_sorted != expected_node_ranks:
print(
f"Node ranks not contiguous. Expected {expected_node_ranks}, got {node_ranks_sorted}"
)
return False
# Check local ranks are contiguous per node
for node_rank, local_ranks in replicas_by_node.items():
local_ranks_sorted = sorted(local_ranks)
expected_local = list(range(len(local_ranks_sorted)))
if local_ranks_sorted != expected_local:
print(
f"Local ranks not contiguous on node {node_rank}. Expected {expected_local}, got {local_ranks_sorted}"
)
return False
return True
def check_rank_assignment_complete(deployment_name: str, expected_count: int) -> bool:
"""Check that all replicas have been assigned ranks and they are contiguous.
This validates global ranks, node ranks, and local ranks for all running replicas.
"""
try:
replica_ids = get_running_replica_ids(deployment_name)
ranks = get_replica_ranks(deployment_name)
# Check all running replicas have ranks
for replica_id in replica_ids:
if replica_id not in ranks:
print(f"Replica {replica_id} not found in ranks: {ranks.keys()}")
return False
# Check we have expected number of ranks
if len(ranks) != expected_count:
print(
f"Expected {expected_count} ranks, got {len(ranks)}: {list(ranks.keys())}"
)
return False
# Check all rank types are contiguous (global, node, local)
return check_rank_contiguity(ranks)
except Exception as e:
print(f"Error checking rank assignment: {e}")
import traceback
traceback.print_exc()
return False
@pytest.mark.parametrize("num_replicas", [1, 3, 5])
def test_basic_rank_assignment(serve_instance, num_replicas):
"""Test basic rank assignment for different numbers of replicas."""
@serve.deployment(num_replicas=num_replicas)
class RankTracker:
def __init__(self):
self.replica_rank = None
self.world_size = None
def __call__(self):
context = serve.get_replica_context()
self.replica_rank = context.rank.rank if context.rank else None
self.world_size = context.world_size
return {
"rank": self.replica_rank,
"world_size": self.world_size,
}
handle = serve.run(RankTracker.bind())
# Wait for all replicas to be running and have ranks assigned
wait_for_condition(
lambda: check_rank_assignment_complete("RankTracker", num_replicas),
)
# Verify ranks are correctly assigned
ranks = get_replica_ranks("RankTracker")
assert len(ranks) == num_replicas
assert check_rank_contiguity(ranks)
# Verify replicas can access their ranks via API
responses = []
for _ in range(10): # Make multiple requests to hit different replicas
response = handle.remote().result()
responses.append(response)
# Check that we got responses from all replicas
seen_ranks = set()
for response in responses:
assert response["world_size"] == num_replicas
if response["rank"] is not None:
seen_ranks.add(response["rank"])
# We should eventually see all ranks (though it might take multiple requests)
assert len(seen_ranks) <= num_replicas
for rank in seen_ranks:
assert 0 <= rank < num_replicas
def test_node_and_local_rank_assignment(serve_instance):
"""Test node_rank and local_rank assignment in addition to global rank."""
@serve.deployment(num_replicas=4)
class NodeRankTracker:
def __call__(self):
context = serve.get_replica_context()
if context.rank:
return {
"rank": context.rank.rank,
"node_rank": context.rank.node_rank,
"local_rank": context.rank.local_rank,
"world_size": context.world_size,
}
return None
handle = serve.run(NodeRankTracker.bind())
# Wait for all replicas to be running
wait_for_condition(
lambda: check_rank_assignment_complete("NodeRankTracker", 4),
)
# Collect responses from all replicas
responses = []
max_attempts = 50
for _ in range(max_attempts):
response = handle.remote().result()
if response and response not in responses:
responses.append(response)
if len(responses) == 4:
break
assert len(responses) == 4, f"Expected 4 unique responses, got {len(responses)}"
# Verify all responses have valid ranks
global_ranks = set()
node_ranks = set()
replicas_by_node = {}
for response in responses:
assert response["world_size"] == 4
# Check global rank
global_rank = response["rank"]
assert 0 <= global_rank < 4
assert global_rank not in global_ranks, "Duplicate global rank found"
global_ranks.add(global_rank)
# Check node_rank and local_rank
node_rank = response["node_rank"]
local_rank = response["local_rank"]
assert node_rank >= 0
assert local_rank >= 0
node_ranks.add(node_rank)
# Track replicas by node for local rank verification
if node_rank not in replicas_by_node:
replicas_by_node[node_rank] = []
replicas_by_node[node_rank].append(local_rank)
# Verify global ranks are contiguous 0..3
assert global_ranks == {0, 1, 2, 3}
# Verify node ranks are contiguous starting from 0
assert min(node_ranks) == 0
assert max(node_ranks) == len(node_ranks) - 1
# Verify local ranks within each node are contiguous starting from 0
for node_rank, local_ranks_list in replicas_by_node.items():
local_ranks_set = set(local_ranks_list)
expected_local_ranks = set(range(len(local_ranks_list)))
assert local_ranks_set == expected_local_ranks, (
f"Node {node_rank} has non-contiguous local ranks: {local_ranks_set}, "
f"expected {expected_local_ranks}"
)
def test_local_rank_contiguity_within_node(serve_instance):
"""Test that local ranks are contiguous within each node."""
@serve.deployment(num_replicas=3)
class LocalRankTracker:
def __call__(self):
context = serve.get_replica_context()
if context.rank:
return {
"rank": context.rank.rank,
"node_rank": context.rank.node_rank,
"local_rank": context.rank.local_rank,
}
return None
handle = serve.run(LocalRankTracker.bind())
# Wait for all replicas to be running
wait_for_condition(
lambda: check_rank_assignment_complete("LocalRankTracker", 3),
)
# Collect all responses
responses = []
for _ in range(30):
response = handle.remote().result()
if response and response not in responses:
responses.append(response)
if len(responses) == 3:
break
assert len(responses) == 3
# Group by node_rank and check local_rank contiguity
by_node = {}
for r in responses:
node_rank = r["node_rank"]
if node_rank not in by_node:
by_node[node_rank] = []
by_node[node_rank].append(r["local_rank"])
# Within each node, local ranks should start at 0 and be contiguous
for node_rank, local_ranks in by_node.items():
local_ranks_sorted = sorted(local_ranks)
expected = list(range(len(local_ranks)))
assert local_ranks_sorted == expected, (
f"Node {node_rank} has non-contiguous local ranks: "
f"{local_ranks_sorted}, expected {expected}"
)
def test_rank_assignment_with_autoscaling(serve_instance):
"""Test rank assignment and reassignment during autoscaling."""
signal_actor = SignalActor.remote()
@serve.deployment(
autoscaling_config={
"target_ongoing_requests": 1,
"metrics_interval_s": 0.1,
"min_replicas": 2,
"max_replicas": 4,
"upscale_delay_s": 1,
"downscale_delay_s": 1,
"look_back_period_s": 10,
},
max_ongoing_requests=10,
)
class AutoscalingRankTracker:
async def __call__(self):
await signal_actor.wait.remote()
context = serve.get_replica_context()
return {
"rank": context.rank.rank if context.rank else None,
"node_rank": context.rank.node_rank if context.rank else None,
"local_rank": context.rank.local_rank if context.rank else None,
"world_size": context.world_size,
}
handle = serve.run(AutoscalingRankTracker.bind())
# Wait for initial replicas
wait_for_condition(
lambda: check_rank_assignment_complete("AutoscalingRankTracker", 2),
)
initial_ranks = get_replica_ranks("AutoscalingRankTracker")
assert len(initial_ranks) == 2
assert check_rank_contiguity(initial_ranks)
# Send concurrent requests to trigger autoscaling
_ = [handle.remote() for _ in range(10)]
# Wait for scale-up to happen and ranks to be reassigned
wait_for_condition(
lambda: check_num_replicas_eq("AutoscalingRankTracker", 4, use_controller=True),
timeout=20,
)
# Check that ranks are still contiguous after scale-up
wait_for_condition(
lambda: check_rank_assignment_complete("AutoscalingRankTracker", 4),
)
scaled_ranks = get_replica_ranks("AutoscalingRankTracker")
assert len(scaled_ranks) == 4
assert check_rank_contiguity(scaled_ranks)
signal_actor.send.remote()
# Wait for scale-down (no more load)
wait_for_condition(
lambda: check_num_replicas_eq("AutoscalingRankTracker", 2, use_controller=True),
)
# Check that ranks are reassigned and contiguous after scale-down
wait_for_condition(
lambda: check_rank_assignment_complete("AutoscalingRankTracker", 2),
)
final_ranks = get_replica_ranks("AutoscalingRankTracker")
assert len(final_ranks) == 2
assert check_rank_contiguity(final_ranks)
def test_rank_persistence_across_controller_restart(serve_instance):
"""Test that ranks are preserved across controller failures."""
@serve.deployment(num_replicas=3)
class PersistentRankTracker:
def __call__(self):
context = serve.get_replica_context()
return {
"rank": context.rank.rank if context.rank else None,
"world_size": context.world_size,
}
serve.run(PersistentRankTracker.bind())
# Wait for all replicas to be running
wait_for_condition(
lambda: check_rank_assignment_complete("PersistentRankTracker", 3),
)
# Record initial ranks
initial_ranks = get_replica_ranks("PersistentRankTracker")
assert len(initial_ranks) == 3
assert check_rank_contiguity(initial_ranks)
# Kill the controller to simulate failure
controller = get_controller()
ray.kill(controller, no_restart=False)
# Wait for controller to be restarted and deployment to be recovered
wait_for_condition(
lambda: check_deployment_status(
"PersistentRankTracker", DeploymentStatus.HEALTHY
),
)
# Wait for rank assignment to be restored
wait_for_condition(
lambda: check_rank_assignment_complete("PersistentRankTracker", 3),
)
# Check that ranks are preserved for surviving replicas
recovered_ranks = get_replica_ranks("PersistentRankTracker")
assert len(recovered_ranks) == 3
assert check_rank_contiguity(recovered_ranks)
# Check that the recovered ranks are the same as the initial ranks
assert recovered_ranks == initial_ranks
def test_single_replica_deployment(serve_instance):
"""Test rank assignment for single replica deployment."""
@serve.deployment(num_replicas=1)
class SingleReplicaTracker:
def __call__(self):
context = serve.get_replica_context()
return {
"rank": context.rank.rank if context.rank else None,
"node_rank": context.rank.node_rank if context.rank else None,
"local_rank": context.rank.local_rank if context.rank else None,
"world_size": context.world_size,
}
handle = serve.run(SingleReplicaTracker.bind())
# Wait for deployment
wait_for_condition(
lambda: check_rank_assignment_complete("SingleReplicaTracker", 1),
)
# Verify single replica has rank 0
ranks = get_replica_ranks("SingleReplicaTracker")
assert len(ranks) == 1
rank_obj = list(ranks.values())[0]
assert rank_obj.rank == 0
assert rank_obj.node_rank == 0
assert rank_obj.local_rank == 0
# Verify API returns correct values for all rank types
response = handle.remote().result()
assert response["rank"] == 0
assert response["node_rank"] == 0
assert response["local_rank"] == 0
assert response["world_size"] == 1
def test_multiple_deployments_independent_ranks(serve_instance):
"""Test that different deployments have independent rank spaces."""
@serve.deployment(name="deployment1", num_replicas=2)
class RankTracker1:
def __call__(self):
context = serve.get_replica_context()
return {
"deployment": "deployment1",
"rank": context.rank.rank if context.rank else None,
"world_size": context.world_size,
}
@serve.deployment(name="deployment2", num_replicas=3)
class RankTracker2:
def __init__(self, rank_tracker1):
self.rank_tracker1 = rank_tracker1
def __call__(self):
context = serve.get_replica_context()
return {
"deployment": "deployment2",
"rank": context.rank.rank if context.rank else None,
"world_size": context.world_size,
}
serve.run(RankTracker2.bind(RankTracker1.bind()))
# Wait for both deployments
wait_for_condition(
lambda: check_rank_assignment_complete("deployment1", 2),
)
wait_for_condition(
lambda: check_rank_assignment_complete("deployment2", 3),
)
# Check ranks are independent
ranks1 = get_replica_ranks("deployment1")
ranks2 = get_replica_ranks("deployment2")
assert len(ranks1) == 2
assert len(ranks2) == 3
assert check_rank_contiguity(ranks1)
assert check_rank_contiguity(ranks2)
# Both should have rank 0 (in their own space)
ranks1_global = {r.rank for r in ranks1.values()}
ranks2_global = {r.rank for r in ranks2.values()}
assert 0 in ranks1_global
assert 0 in ranks2_global
assert 1 in ranks1_global
assert 1 in ranks2_global
assert 2 in ranks2_global # Only deployment2 should have rank 2
handle1 = serve.get_deployment_handle("deployment1", SERVE_DEFAULT_APP_NAME)
handle2 = serve.get_deployment_handle("deployment2", SERVE_DEFAULT_APP_NAME)
response1 = handle1.remote().result()
response2 = handle2.remote().result()
assert response1["world_size"] == 2
assert response2["world_size"] == 3
def test_rank_stability_on_replica_death(serve_instance):
"""Test that when one replica dies, other replicas keep their ranks."""
@serve.deployment(num_replicas=4)
class StableRankTracker:
def __call__(self):
return "hello"
serve.run(StableRankTracker.bind())
# Wait for all replicas to be running and have ranks
wait_for_condition(
lambda: check_rank_assignment_complete("StableRankTracker", 4),
)
# get_replica_ranks
initial_ranks = get_replica_ranks("StableRankTracker")
initial_replica_ids = get_running_replica_ids("StableRankTracker")
assert len(initial_ranks) == 4
assert check_rank_contiguity(initial_ranks)
# kill the replica with rank 1
random_replica_id_idx = random.choice(range(len(initial_replica_ids)))
killed_replica_id = initial_replica_ids[random_replica_id_idx]
replica_handle = ray.get_actor(
f"SERVE_REPLICA::default#StableRankTracker#{killed_replica_id}",
namespace=SERVE_NAMESPACE,
)
ray.kill(replica_handle, no_restart=False)
def _check():
new_running_replica_ids = get_running_replica_ids("StableRankTracker")
assert len(new_running_replica_ids) == 4
assert new_running_replica_ids != initial_replica_ids
return True
wait_for_condition(_check, timeout=20)
# get_replica_ranks
final_ranks = get_replica_ranks("StableRankTracker")
assert len(final_ranks) == 4
assert check_rank_contiguity(final_ranks)
# for all replicas that is not killed, their ranks should be the same as before
for replica_id in initial_replica_ids:
if replica_id != killed_replica_id:
assert final_ranks[replica_id] == initial_ranks[replica_id]
def test_node_rank_stability_on_replica_death(serve_instance):
"""Test that node_rank and local_rank are correctly maintained when replicas die."""
@serve.deployment(num_replicas=4)
class NodeRankStabilityTracker:
def __call__(self):
context = serve.get_replica_context()
if context.rank:
return {
"rank": context.rank.rank,
"node_rank": context.rank.node_rank,
"local_rank": context.rank.local_rank,
"replica_id": context.replica_id.unique_id,
}
return None
handle = serve.run(NodeRankStabilityTracker.bind())
# Wait for all replicas to be running
wait_for_condition(
lambda: check_rank_assignment_complete("NodeRankStabilityTracker", 4),
)
# Collect initial rank information
initial_responses = []
for _ in range(50):
response = handle.remote().result()
if response and response not in initial_responses:
initial_responses.append(response)
if len(initial_responses) == 4:
break
assert len(initial_responses) == 4
# Kill a random replica
random_replica = random.choice(initial_responses)
killed_replica_id = random_replica["replica_id"]
replica_handle = ray.get_actor(
f"SERVE_REPLICA::default#NodeRankStabilityTracker#{killed_replica_id}",
namespace=SERVE_NAMESPACE,
)
ray.kill(replica_handle, no_restart=False)
# Wait for the replica to be restarted
def _check_replica_restarted():
replica_ids = get_running_replica_ids("NodeRankStabilityTracker")
return len(replica_ids) == 4 and killed_replica_id not in replica_ids
wait_for_condition(_check_replica_restarted, timeout=20)
# Wait for rank assignment to be complete
wait_for_condition(
lambda: check_rank_assignment_complete("NodeRankStabilityTracker", 4),
)
# Collect final rank information
final_responses = []
for _ in range(50):
response = handle.remote().result()
if response and response not in final_responses:
final_responses.append(response)
if len(final_responses) == 4:
break
assert len(final_responses) == 4
# Create mappings for comparison
initial_by_replica_id = {r["replica_id"]: r for r in initial_responses}
final_by_replica_id = {r["replica_id"]: r for r in final_responses}
# Verify that surviving replicas kept their ranks
for replica_id in initial_by_replica_id:
if replica_id != killed_replica_id and replica_id in final_by_replica_id:
initial = initial_by_replica_id[replica_id]
final = final_by_replica_id[replica_id]
# All rank values should be preserved
assert (
initial["rank"] == final["rank"]
), f"Global rank changed for replica {replica_id}"
assert (
initial["node_rank"] == final["node_rank"]
), f"Node rank changed for replica {replica_id}"
assert (
initial["local_rank"] == final["local_rank"]
), f"Local rank changed for replica {replica_id}"
# Verify all global ranks are still contiguous
global_ranks = sorted([r["rank"] for r in final_responses])
assert global_ranks == [0, 1, 2, 3]
def test_user_reconfigure_rank(serve_instance):
"""Test that user can reconfigure the rank of a deployment."""
signal_actor = SignalActor.remote()
@serve.deployment(
num_replicas=4, user_config={"name": "Bob"}, max_ongoing_requests=1
)
class ReconfigureRankTracker:
def __init__(self):
self.my_rank = "Bob"
async def __call__(self):
await signal_actor.wait.remote()
return self.my_rank
async def reconfigure(self, user_config: Any, rank: ReplicaRank):
# rank parameter is actually a ReplicaRank object, extract the integer value
self.my_rank = rank.rank
handle = serve.run(ReconfigureRankTracker.bind())
wait_for_condition(
lambda: check_rank_assignment_complete("ReconfigureRankTracker", 4),
)
f = [handle.remote() for _ in range(4)]
wait_for_condition(
lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 4,
)
signal_actor.send.remote(clear=True)
def _check():
assert {f.result() for f in f} == {0, 1, 2, 3}
return True
wait_for_condition(_check)
serve.run(ReconfigureRankTracker.options(user_config={"name": "Alice"}).bind())
wait_for_condition(
lambda: check_rank_assignment_complete("ReconfigureRankTracker", 4),
)
f = [handle.remote() for _ in range(4)]
wait_for_condition(
lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 4,
)
signal_actor.send.remote()
def _check():
assert {f.result() for f in f} == {0, 1, 2, 3}
return True
wait_for_condition(_check)
def test_user_reconfigure_with_all_rank_fields(serve_instance):
"""Test that reconfigure receives all rank fields (rank, node_rank, local_rank)."""
signal_actor = SignalActor.remote()
@serve.deployment(num_replicas=3, max_ongoing_requests=1)
class AllRanksTracker:
def __init__(self):
self.rank_info = None
async def __call__(self):
await signal_actor.wait.remote()
return self.rank_info
async def reconfigure(self, user_config: Any, rank: ReplicaRank):
# Store all rank information
self.rank_info = {
"rank": rank.rank,
"node_rank": rank.node_rank,
"local_rank": rank.local_rank,
}
handle = serve.run(AllRanksTracker.bind())
wait_for_condition(
lambda: check_rank_assignment_complete("AllRanksTracker", 3),
)
# Send requests to all replicas
futures = [handle.remote() for _ in range(3)]
wait_for_condition(
lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 3,
)
signal_actor.send.remote()
# Collect results
results = [f.result() for f in futures]
# Verify all replicas received their rank information
global_ranks = []
node_ranks = []
local_ranks = []
for result in results:
assert result is not None, "Replica did not receive rank information"
assert "rank" in result
assert "node_rank" in result
assert "local_rank" in result
# Validate rank values are in expected range
assert result["rank"] in {0, 1, 2}, f"Invalid global rank: {result['rank']}"
global_ranks.append(result["rank"])
node_ranks.append(result["node_rank"])
local_ranks.append(result["local_rank"])
# Verify global ranks are unique and complete
assert set(global_ranks) == {0, 1, 2}
# Verify node ranks form contiguous sequence starting from 0
node_ranks_sorted = sorted(set(node_ranks))
expected_node_ranks = list(range(len(node_ranks_sorted)))
assert (
node_ranks_sorted == expected_node_ranks
), f"Node ranks not contiguous from 0: {node_ranks_sorted}"
# Verify local ranks are valid (non-negative and reasonable)
for local_rank in local_ranks:
assert local_rank in range(3), f"Invalid local rank: {local_rank}"
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_replica_ranks.py",
"license": "Apache License 2.0",
"lines": 648,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/nightly_tests/dataset/wide_schema_pipeline_benchmark.py | import argparse
from typing import Dict, Any
import ray
from benchmark import Benchmark
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Wide schema pipeline benchmark")
parser.add_argument(
"--data-type",
choices=["primitives", "tensors", "objects", "nested_structs"],
default="primitives",
help="Type of pre-generated dataset to benchmark",
)
return parser.parse_args()
def main(args: argparse.Namespace) -> None:
benchmark = Benchmark()
# Each dataset contains about 500-600Mbs of data, except for objects,
# which contain about 150Mb (this is because their pickle bloat is big).
# Furthermore, the schema contains 5000 fields, and each column contains
# 500 characters.
input_path = (
f"s3://ray-benchmark-data-internal-us-west-2/wide_schema/{args.data_type}"
)
print(f"Using pre-generated dataset: {input_path}")
# Run the pipeline benchmark (TIMED)
def run_pipeline() -> Dict[str, Any]:
"""Run the data pipeline: read -> map_batches -> write"""
ds = ray.data.read_parquet(input_path)
for _ in ds.iter_internal_ref_bundles():
pass
# Get dataset stats for reporting
actual_num_columns = len(ds.schema().base_schema)
return {
"num_columns": actual_num_columns,
"data_type": args.data_type,
"input_path": input_path,
}
# Run the timed benchmark
benchmark.run_fn("wide_schema_pipeline", run_pipeline)
benchmark.write_result()
if __name__ == "__main__":
args = parse_args()
main(args)
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/dataset/wide_schema_pipeline_benchmark.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/nightly_tests/dataset/text_embedding/create_dataset.py | import pyarrow as pa
import uuid
import random
import string
import ray
import pyarrow.parquet as pq
from tqdm import tqdm
STRING_PLACEHOLDER = ""
UUID_PLACEHOLDER = uuid.UUID(int=0)
INT_PLACEHOLDER = 0
TARGET_SIZE_BYTES = 4096
NUM_FILES = 50
SCHEMA = pa.schema(
[
("metadata00", pa.string()),
("metadata01", pa.list_(pa.binary(16))),
("metadata02", pa.string()),
("metadata03", pa.uint64()),
("metadata04", pa.list_(pa.binary(16))),
("metadata05", pa.list_(pa.binary(16))),
("metadata06", pa.binary(16)),
("metadata07", pa.string()),
("metadata08", pa.binary(16)),
("metadata09", pa.uint64()),
("metadata10", pa.binary(16)),
("metadata11", pa.list_(pa.binary(16))),
("metadata12", pa.uint64()),
("metadata13", pa.uint64()),
("metadata14", pa.list_(pa.binary(16))),
("span_text", pa.string()),
("metadata15", pa.binary(16)),
("metadata16", pa.string()),
("metadata17", pa.list_(pa.binary(16))),
("metadata18", pa.list_(pa.binary(16))),
]
)
def random_word(min_len=3, max_len=8):
length = random.randint(min_len, max_len)
return "".join(random.choices(string.ascii_lowercase, k=length))
def create_random_sentence():
sentence = ""
while len(sentence.encode("utf-8")) < TARGET_SIZE_BYTES:
word = random_word()
sentence += word + " " # space between words
# Trim to exact size
sentence_bytes = sentence.encode("utf-8")[:TARGET_SIZE_BYTES]
return sentence_bytes.decode("utf-8", errors="ignore")
def create_row():
return {
"metadata00": STRING_PLACEHOLDER,
"metadata01": [UUID_PLACEHOLDER.bytes],
"metadata02": STRING_PLACEHOLDER,
"metadata03": INT_PLACEHOLDER,
"metadata04": [UUID_PLACEHOLDER.bytes],
"metadata05": [UUID_PLACEHOLDER.bytes],
"metadata06": UUID_PLACEHOLDER.bytes,
"metadata07": STRING_PLACEHOLDER,
"metadata08": UUID_PLACEHOLDER.bytes,
"metadata09": INT_PLACEHOLDER,
"metadata10": UUID_PLACEHOLDER.bytes,
"metadata11": [UUID_PLACEHOLDER.bytes],
"metadata12": INT_PLACEHOLDER,
"metadata13": None if random.random() < 0.01 else INT_PLACEHOLDER,
"metadata14": [UUID_PLACEHOLDER.bytes],
"span_text": create_random_sentence(),
"metadata15": UUID_PLACEHOLDER.bytes,
"metadata16": STRING_PLACEHOLDER,
"metadata17": [UUID_PLACEHOLDER.bytes],
"metadata18": [UUID_PLACEHOLDER.bytes],
}
@ray.remote
def write_table(i: int):
rows = []
for _ in range(20_000):
rows.append(create_row())
table = pa.Table.from_pylist(rows, schema=SCHEMA)
pq.write_table(
table, f"s3://ray-benchmark-data-internal-us-west-2/text-spans/{i}.parquet"
)
refs = [write_table.remote(i) for i in range(NUM_FILES)]
pbar = tqdm(total=len(refs))
while refs:
ready, refs = ray.wait(refs, num_returns=1)
pbar.update(len(ready))
pbar.close()
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/dataset/text_embedding/create_dataset.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/dashboard/modules/aggregator/tests/test_ray_job_events.py | import base64
import json
import sys
import pytest
import ray
from ray._common.test_utils import wait_for_condition
from ray._private.test_utils import wait_for_dashboard_agent_available
from ray.dashboard.tests.conftest import * # noqa
_RAY_EVENT_PORT = 12345
@pytest.fixture(scope="session")
def httpserver_listen_address():
return ("127.0.0.1", _RAY_EVENT_PORT)
def test_ray_job_events(ray_start_cluster, httpserver):
cluster = ray_start_cluster
cluster.add_node(
env_vars={
"RAY_DASHBOARD_AGGREGATOR_AGENT_EVENTS_EXPORT_ADDR": f"http://127.0.0.1:{_RAY_EVENT_PORT}",
"RAY_DASHBOARD_AGGREGATOR_AGENT_EXPOSABLE_EVENT_TYPES": "DRIVER_JOB_DEFINITION_EVENT,DRIVER_JOB_LIFECYCLE_EVENT",
},
_system_config={
"enable_ray_event": True,
},
)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
wait_for_dashboard_agent_available(cluster)
# Submit a ray job
@ray.remote
def f():
return 1
ray.get(f.remote())
# Check that a driver job event with the correct job id is published.
httpserver.expect_request("/", method="POST").respond_with_data("", status=200)
wait_for_condition(lambda: len(httpserver.log) >= 1)
req, _ = httpserver.log[0]
req_json = json.loads(req.data)
head_node_id = cluster.head_node.node_id
assert base64.b64decode(req_json[0]["nodeId"]).hex() == head_node_id
assert (
base64.b64decode(req_json[0]["driverJobDefinitionEvent"]["jobId"]).hex()
== ray.get_runtime_context().get_job_id()
)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/aggregator/tests/test_ray_job_events.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/train/v2/api/report_config.py | from enum import Enum
from ray.util.annotations import PublicAPI
@PublicAPI(stability="alpha")
class CheckpointUploadMode(Enum):
"""The manner in which we want to upload the checkpoint.
Members:
ASYNC: Upload checkpoint asynchronously.
SYNC: Upload checkpoint synchronously.
NO_UPLOAD: Do not upload checkpoint.
"""
ASYNC = "ASYNC"
SYNC = "SYNC"
NO_UPLOAD = "NO_UPLOAD"
def _default_delete_local_checkpoint_after_upload(self) -> bool:
return self == CheckpointUploadMode.ASYNC
@PublicAPI(stability="alpha")
class CheckpointConsistencyMode(Enum):
"""Read semantics for checkpoint retrieval during an ongoing run.
Members:
COMMITTED: Block until the checkpoint from the latest ray.train.report
has been uploaded and committed.
VALIDATED: Block until the checkpoint from the latest ray.train.report
has been uploaded and validated.
"""
COMMITTED = "COMMITTED"
VALIDATED = "VALIDATED"
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/api/report_config.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/train/v2/tests/test_worker.py | import queue
import time
from unittest.mock import create_autospec
import pytest
from ray.actor import ActorHandle
from ray.train.v2._internal.constants import ENABLE_WORKER_STRUCTURED_LOGGING_ENV_VAR
from ray.train.v2._internal.execution.context import (
DistributedContext,
TrainRunContext,
get_train_context,
)
from ray.train.v2._internal.execution.storage import StorageContext
from ray.train.v2._internal.execution.worker_group.worker import RayTrainWorker
from ray.train.v2._internal.util import ObjectRefWrapper
@pytest.mark.parametrize("created_nested_threads", [True, False])
def test_worker_finished_after_all_threads_finish(monkeypatch, created_nested_threads):
# Disable this to avoid TypeError from logging MagicMock
monkeypatch.setenv(ENABLE_WORKER_STRUCTURED_LOGGING_ENV_VAR, False)
# Initialize RayTrainWorker state
worker = RayTrainWorker()
worker.init_train_context(
train_run_context=create_autospec(TrainRunContext, instance=True),
distributed_context=DistributedContext(
world_rank=0,
world_size=1,
local_rank=0,
local_world_size=1,
node_rank=0,
),
synchronization_actor=create_autospec(ActorHandle, instance=True),
storage_context=create_autospec(StorageContext, instance=True),
worker_callbacks=[],
controller_actor=create_autospec(ActorHandle, instance=True),
)
global_queue = queue.Queue()
def train_fn():
tc = get_train_context()
def target():
# Intentionally sleep longer than poll interval to test that we wait
# for nested threads to finish
time.sleep(0.1)
global_queue.put("nested")
if created_nested_threads:
tc.checkpoint_upload_threadpool.submit(target)
else:
global_queue.put("main")
# Run train fn and wait for it to finish
train_fn_ref = create_autospec(ObjectRefWrapper, instance=True)
train_fn_ref.get.return_value = train_fn
worker.run_train_fn(train_fn_ref)
while worker.poll_status().running:
time.sleep(0.01)
# Verify queue contents
queue_contents = []
while not global_queue.empty():
queue_contents.append(global_queue.get())
if created_nested_threads:
assert queue_contents == ["nested"]
else:
assert queue_contents == ["main"]
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/test_worker.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/_private/benchmarks/locust_utils.py | import argparse
import logging
import time
from dataclasses import asdict, dataclass
from typing import Any, Dict, List
from ray.serve._private.utils import generate_request_id
logger = logging.getLogger(__file__)
logging.basicConfig(level=logging.INFO)
MASTER_PORT = 5557
@dataclass
class LocustStage:
duration_s: int
users: int
spawn_rate: float
@dataclass
class PerformanceStats:
p50_latency: float
p90_latency: float
p99_latency: float
rps: float
@dataclass
class LocustTestResults:
history: List[Dict]
total_requests: int
num_failures: int
avg_latency: float
p50_latency: float
p90_latency: float
p99_latency: float
avg_rps: float
stats_in_stages: List[PerformanceStats]
@dataclass
class FailedRequest:
request_id: str
status_code: int
exception: str
response_time_ms: float
start_time_s: float
class LocustClient:
def __init__(
self,
host_url: str,
token: str,
data: Dict[str, Any] = None,
):
from locust import FastHttpUser, constant, events, task
from locust.contrib.fasthttp import FastResponse
self.errors = []
self.stats_in_stages: List[PerformanceStats] = []
class EndpointUser(FastHttpUser):
wait_time = constant(0)
failed_requests = []
host = host_url
@task
def test(self):
request_id = generate_request_id()
headers = (
{"Authorization": f"Bearer {token}", "X-Request-ID": request_id}
if token
else None
)
with self.client.get(
"", headers=headers, json=data, catch_response=True
) as r:
r.request_meta["context"]["request_id"] = request_id
@events.request.add_listener
def on_request(
response: FastResponse,
exception,
context,
start_time: float,
response_time: float,
**kwargs,
):
if exception and response.status_code != 0:
request_id = context["request_id"]
print(
f"Request '{request_id}' failed with exception:\n"
f"{exception}\n{response.text}"
)
if response.status_code != 0:
response.encoding = "utf-8"
err = FailedRequest(
request_id=request_id,
status_code=response.status_code,
exception=response.text,
response_time_ms=response_time,
start_time_s=start_time,
)
self.errors.append(err)
print(
f"Request '{request_id}' failed with exception:\n"
f"{exception}\n{response.text}"
)
self.user_class = EndpointUser
def on_stage_finished(master_runner, stats_in_stages):
stats_entry_key = ("", "GET")
stats_entry = master_runner.stats.entries.get(stats_entry_key)
stats_in_stages.append(
PerformanceStats(
p50_latency=stats_entry.get_current_response_time_percentile(0.5),
p90_latency=stats_entry.get_current_response_time_percentile(0.9),
p99_latency=stats_entry.get_current_response_time_percentile(0.99),
rps=stats_entry.current_rps,
)
)
def run_locust_worker(
master_address: str, host_url: str, token: str, data: Dict[str, Any]
):
import locust
from locust.env import Environment
from locust.log import setup_logging
setup_logging("INFO")
client = LocustClient(host_url=host_url, token=token, data=data)
env = Environment(user_classes=[client.user_class], events=locust.events)
runner = env.create_worker_runner(
master_host=master_address, master_port=MASTER_PORT
)
runner.greenlet.join()
if client.errors:
raise RuntimeError(f"There were {len(client.errors)} errors: {client.errors}")
def run_locust_master(
host_url: str,
token: str,
expected_num_workers: int,
stages: List[LocustStage],
wait_for_workers_timeout_s: float,
):
import gevent
import locust
from locust import LoadTestShape
from locust.env import Environment
from locust.stats import (
get_error_report_summary,
get_percentile_stats_summary,
get_stats_summary,
stats_history,
stats_printer,
)
client = LocustClient(host_url, token)
class StagesShape(LoadTestShape):
curr_stage_ix = 0
def tick(cls):
run_time = cls.get_run_time()
prefix_time = 0
for i, stage in enumerate(stages):
prefix_time += stage.duration_s
if run_time < prefix_time:
if i != cls.curr_stage_ix:
on_stage_finished(master_runner, client.stats_in_stages)
cls.curr_stage_ix = i
current_stage = stages[cls.curr_stage_ix]
return current_stage.users, current_stage.spawn_rate
# End of stage test
on_stage_finished(master_runner, client.stats_in_stages)
master_env = Environment(
user_classes=[client.user_class],
shape_class=StagesShape(),
events=locust.events,
)
master_runner = master_env.create_master_runner("*", MASTER_PORT)
start = time.time()
while len(master_runner.clients.ready) < expected_num_workers:
if time.time() - start > wait_for_workers_timeout_s:
raise RuntimeError(
f"Timed out waiting for {expected_num_workers} workers to "
"connect to Locust master."
)
print(
f"Waiting for workers to be ready, "
f"{len(master_runner.clients.ready)} "
f"of {expected_num_workers} ready."
)
time.sleep(1)
# Periodically output current stats (each entry is aggregated
# stats over the past 10 seconds, by default)
gevent.spawn(stats_printer(master_env.stats))
gevent.spawn(stats_history, master_runner)
# Start test & wait for the shape test to finish
master_runner.start_shape()
master_runner.shape_greenlet.join()
# Send quit signal to all locust workers
master_runner.quit()
# Print stats
for line in get_stats_summary(master_runner.stats, current=False):
print(line)
# Print percentile stats
for line in get_percentile_stats_summary(master_runner.stats):
print(line)
# Print error report
if master_runner.stats.errors:
for line in get_error_report_summary(master_runner.stats):
print(line)
stats_entry_key = ("", "GET")
stats_entry = master_runner.stats.entries.get(stats_entry_key)
results = LocustTestResults(
history=master_runner.stats.history,
total_requests=master_runner.stats.num_requests,
num_failures=master_runner.stats.num_failures,
avg_latency=stats_entry.avg_response_time,
p50_latency=stats_entry.get_response_time_percentile(0.5),
p90_latency=stats_entry.get_response_time_percentile(0.9),
p99_latency=stats_entry.get_response_time_percentile(0.99),
avg_rps=stats_entry.total_rps,
stats_in_stages=client.stats_in_stages,
)
return asdict(results)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--worker-type", type=str, required=True)
parser.add_argument("--host-url", type=str, required=True)
parser.add_argument("--token", type=str, required=True)
parser.add_argument("--master-address", type=str, required=False)
parser.add_argument("--expected-num-workers", type=int, required=False)
parser.add_argument("--stages", type=str, required=False)
parser.add_argument("--wait-for-workers-timeout-s", type=float, required=False)
args = parser.parse_args()
host_url = args.host_url
token = args.token
if args.worker_type == "master":
results = run_locust_master(
host_url,
token,
args.expected_num_workers,
args.stages,
args.wait_for_workers_timeout_s,
)
else:
results = run_locust_worker(args.master_address, host_url, token, args.data)
print(results)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/_private/benchmarks/locust_utils.py",
"license": "Apache License 2.0",
"lines": 232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/_common/tests/test_filters.py | import logging
import logging.config
import sys
from concurrent.futures import ThreadPoolExecutor
import pytest
import ray
from ray._common.filters import CoreContextFilter
class TestCoreContextFilter:
def test_driver_process(self, shutdown_only):
log_context = ["job_id", "worker_id", "node_id"]
filter = CoreContextFilter()
record = logging.makeLogRecord({})
assert filter.filter(record)
# Ray is not initialized so no context except PID which should be available
for attr in log_context:
assert not hasattr(record, attr)
# PID should be available even when Ray is not initialized
assert hasattr(record, "process")
assert hasattr(record, "_ray_timestamp_ns")
ray.init()
record = logging.makeLogRecord({})
assert filter.filter(record)
runtime_context = ray.get_runtime_context()
expected_values = {
"job_id": runtime_context.get_job_id(),
"worker_id": runtime_context.get_worker_id(),
"node_id": runtime_context.get_node_id(),
"process": record.process,
}
for attr in log_context:
assert hasattr(record, attr)
assert getattr(record, attr) == expected_values[attr]
# This is not a worker process, so actor_id and task_id should not exist.
for attr in ["actor_id", "task_id"]:
assert not hasattr(record, attr)
assert hasattr(record, "_ray_timestamp_ns")
def test_task_process(self, shutdown_only):
@ray.remote
def f():
filter = CoreContextFilter()
record = logging.makeLogRecord({})
assert filter.filter(record)
should_exist = ["job_id", "worker_id", "node_id", "task_id", "process"]
runtime_context = ray.get_runtime_context()
expected_values = {
"job_id": runtime_context.get_job_id(),
"worker_id": runtime_context.get_worker_id(),
"node_id": runtime_context.get_node_id(),
"task_id": runtime_context.get_task_id(),
"task_name": runtime_context.get_task_name(),
"task_func_name": runtime_context.get_task_function_name(),
"process": record.process,
}
for attr in should_exist:
assert hasattr(record, attr)
assert getattr(record, attr) == expected_values[attr]
assert not hasattr(record, "actor_id")
assert not hasattr(record, "actor_name")
assert hasattr(record, "_ray_timestamp_ns")
obj_ref = f.remote()
ray.get(obj_ref)
def test_actor_process(self, shutdown_only):
@ray.remote
class A:
def f(self):
filter = CoreContextFilter()
record = logging.makeLogRecord({})
assert filter.filter(record)
should_exist = [
"job_id",
"worker_id",
"node_id",
"actor_id",
"task_id",
"process",
]
runtime_context = ray.get_runtime_context()
expected_values = {
"job_id": runtime_context.get_job_id(),
"worker_id": runtime_context.get_worker_id(),
"node_id": runtime_context.get_node_id(),
"actor_id": runtime_context.get_actor_id(),
"actor_name": runtime_context.get_actor_name(),
"task_id": runtime_context.get_task_id(),
"task_name": runtime_context.get_task_name(),
"task_func_name": runtime_context.get_task_function_name(),
"process": record.process,
}
for attr in should_exist:
assert hasattr(record, attr)
assert getattr(record, attr) == expected_values[attr]
assert hasattr(record, "_ray_timestamp_ns")
# Record should not have the attribute with a value of an empty string.
assert runtime_context.get_actor_name() == ""
assert not hasattr(record, "actor_name")
actor = A.remote()
ray.get(actor.f.remote())
def test_actor_process_with_thread(self, shutdown_only):
@ray.remote
class MockedRayDataWorker:
def _check_log_record_in_thread(self):
filter = CoreContextFilter()
record = logging.makeLogRecord({})
assert filter.filter(record)
should_exist = [
"job_id",
"worker_id",
"node_id",
"actor_id",
"task_id",
"process",
]
runtime_context = ray.get_runtime_context()
expected_values = {
"job_id": runtime_context.get_job_id(),
"worker_id": runtime_context.get_worker_id(),
"node_id": runtime_context.get_node_id(),
"actor_id": runtime_context.get_actor_id(),
"task_id": runtime_context.get_task_id(),
"process": record.process,
}
for attr in should_exist:
assert hasattr(record, attr)
assert getattr(record, attr) == expected_values[attr]
assert hasattr(record, "_ray_timestamp_ns")
# Record should not have the attribute with a value of an empty string.
assert runtime_context.get_actor_name() == ""
assert not hasattr(record, "actor_name")
assert runtime_context.get_task_name() == ""
assert not hasattr(record, "task_name")
assert runtime_context.get_task_function_name() == ""
assert not hasattr(record, "task_function_name")
return record
def map(self):
with ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(self._check_log_record_in_thread).result()
actor = MockedRayDataWorker.remote()
ray.get(actor.map.remote())
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_common/tests/test_filters.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/_common/tests/test_formatters.py | import json
import logging
import logging.config
import sys
import pytest
from ray._common.formatters import JSONFormatter, TextFormatter
class TestJSONFormatter:
def test_empty_record(self, shutdown_only):
formatter = JSONFormatter()
record = logging.makeLogRecord({})
formatted = formatter.format(record)
record_dict = json.loads(formatted)
should_exist = [
"process",
"asctime",
"levelname",
"message",
"filename",
"lineno",
"timestamp_ns",
]
for key in should_exist:
assert key in record_dict
assert len(record_dict) == len(should_exist)
assert "exc_text" not in record_dict
def test_record_with_exception(self, shutdown_only):
formatter = JSONFormatter()
record = logging.makeLogRecord({})
try:
raise ValueError("test")
except ValueError:
record.exc_info = sys.exc_info()
formatted = formatter.format(record)
record_dict = json.loads(formatted)
should_exist = [
"process",
"asctime",
"levelname",
"message",
"filename",
"lineno",
"exc_text",
"timestamp_ns",
]
for key in should_exist:
assert key in record_dict
assert "Traceback (most recent call last):" in record_dict["exc_text"]
assert len(record_dict) == len(should_exist)
def test_record_with_user_provided_context(self, shutdown_only):
formatter = JSONFormatter()
record = logging.makeLogRecord({"user": "ray"})
formatted = formatter.format(record)
record_dict = json.loads(formatted)
should_exist = [
"process",
"asctime",
"levelname",
"message",
"filename",
"lineno",
"user",
"timestamp_ns",
]
for key in should_exist:
assert key in record_dict
assert record_dict["user"] == "ray"
assert len(record_dict) == len(should_exist)
assert "exc_text" not in record_dict
def test_record_with_flatten_keys_invalid_value(self, shutdown_only):
formatter = JSONFormatter()
record = logging.makeLogRecord({"ray_serve_extra_fields": "not_a_dict"})
with pytest.raises(ValueError):
formatter.format(record)
def test_record_with_flatten_keys_valid_dict(self, shutdown_only):
formatter = JSONFormatter()
record = logging.makeLogRecord(
{"ray_serve_extra_fields": {"key1": "value1", "key2": 2}}
)
formatted = formatter.format(record)
record_dict = json.loads(formatted)
should_exist = [
"process",
"asctime",
"levelname",
"message",
"filename",
"lineno",
"key1",
"key2",
"timestamp_ns",
]
for key in should_exist:
assert key in record_dict
assert record_dict["key1"] == "value1", record_dict
assert record_dict["key2"] == 2
assert "ray_serve_extra_fields" not in record_dict
assert len(record_dict) == len(should_exist)
assert "exc_text" not in record_dict
def test_record_with_valid_additional_log_standard_attrs(self, shutdown_only):
formatter = JSONFormatter()
formatter.set_additional_log_standard_attrs(["name"])
record = logging.makeLogRecord({})
formatted = formatter.format(record)
record_dict = json.loads(formatted)
should_exist = [
"process",
"asctime",
"levelname",
"message",
"filename",
"lineno",
"timestamp_ns",
"name",
]
for key in should_exist:
assert key in record_dict
assert len(record_dict) == len(should_exist)
class TestTextFormatter:
def test_record_with_user_provided_context(self):
formatter = TextFormatter()
record = logging.makeLogRecord({"user": "ray"})
formatted = formatter.format(record)
assert "user=ray" in formatted
def test_record_with_exception(self):
formatter = TextFormatter()
record = logging.LogRecord(
name="test_logger",
level=logging.INFO,
pathname="test.py",
lineno=1000,
msg="Test message",
args=None,
exc_info=None,
)
formatted = formatter.format(record)
for s in ["INFO", "Test message", "test.py:1000", "--"]:
assert s in formatted
def test_record_with_valid_additional_log_standard_attrs(self, shutdown_only):
formatter = TextFormatter()
formatter.set_additional_log_standard_attrs(["name"])
record = logging.makeLogRecord({})
formatted = formatter.format(record)
assert "name=" in formatted
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_common/tests/test_formatters.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/operator_event_exporter.py | """Exporter API for Ray Data operator events."""
import logging
import os
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any, Optional
import ray
from ray._private.event.export_event_logger import (
EventLogType,
check_export_api_enabled,
get_export_event_logger,
)
logger = logging.getLogger(__name__)
@dataclass
class OperatorEvent:
"""Represents an Ray Data operator event, such as issue detection
Attributes:
dataset_id: The id of the dataset.
operator_id: The id of the operator within the DAG structure, typically
incorporating a position or index (e.g., "ReadParquet_0")
operator_name: The name of the operator.
event_time: The timestamp when the event is emitted (in seconds since epoch).
event_type: The type of the event.
message: The content of the event message.
"""
dataset_id: str
operator_id: str
operator_name: str
event_time: float
event_type: str
message: str
def operator_event_to_proto(operator_event: OperatorEvent) -> Any:
"""Convert the operator event to a protobuf message.
Args:
operator_event: OperatorEvent object containing the event details
Returns:
The protobuf message representing the operator event.
"""
from ray.core.generated.export_dataset_operator_event_pb2 import (
ExportDatasetOperatorEventData as ProtoOperatorEventData,
)
# Create the protobuf message
proto_operator_event_data = ProtoOperatorEventData(
dataset_id=operator_event.dataset_id,
operator_id=operator_event.operator_id,
operator_name=operator_event.operator_name,
event_time=operator_event.event_time,
event_type=ProtoOperatorEventData.DatasetOperatorEventType.Value(
operator_event.event_type
),
message=operator_event.message,
)
return proto_operator_event_data
def format_export_issue_event_name(issue_name: str) -> str:
return "ISSUE_DETECTION_" + issue_name.upper().replace(" ", "_")
def get_operator_event_exporter() -> "OperatorEventExporter":
"""Get the operator event exporter instance.
Returns:
The operator event exporter instance.
"""
return LoggerOperatorEventExporter.create_if_enabled()
class OperatorEventExporter(ABC):
"""Abstract base class for operator event exporters.
Implementations of this interface can export Ray Data operator event to various
destinations like log files, databases, or monitoring systems.
"""
@abstractmethod
def export_operator_event(self, operator_event: OperatorEvent) -> None:
"""Export operator event to the destination.
Args:
operator_event: OperatorEvent object containing operator event details.
"""
pass
@classmethod
@abstractmethod
def create_if_enabled(cls) -> Optional["OperatorEventExporter"]:
"""Create an event exporter instance if the export functionality is enabled.
Returns:
An event exporter instance if enabled, none otherwise.
"""
pass
class LoggerOperatorEventExporter(OperatorEventExporter):
"""Operator event exporter implementation that uses the Ray export event logger.
This exporter writes operator event to log files using Ray's export event system.
"""
def __init__(self, logger: logging.Logger):
"""Initialize with a configured export event logger.
Args:
logger: The export event logger to use for writing events.
"""
self._export_logger = logger
def export_operator_event(self, operator_event: OperatorEvent) -> None:
"""Export operator event using the export event logger.
Args:
operator_event: OperatorEvent object containing operator event details.
"""
operator_event_proto = operator_event_to_proto(operator_event)
self._export_logger.send_event(operator_event_proto)
@classmethod
def create_if_enabled(cls) -> Optional["LoggerOperatorEventExporter"]:
"""Create a logger-based exporter if the export API is enabled.
Returns:
A LoggerOperatorEventExporter instance, none otherwise.
"""
from ray.core.generated.export_event_pb2 import ExportEvent
is_operator_event_export_api_enabled = check_export_api_enabled(
ExportEvent.SourceType.EXPORT_DATASET_OPERATOR_EVENT
)
if not is_operator_event_export_api_enabled:
# The export API is not enabled, so we shouldn't create an exporter
return None
log_directory = os.path.join(
ray._private.worker._global_node.get_session_dir_path(), "logs"
)
try:
logger = get_export_event_logger(
EventLogType.DATASET_OPERATOR_EVENT,
log_directory,
)
return LoggerOperatorEventExporter(logger)
except Exception:
logger.exception(
"Unable to initialize the export event logger, so no operator export "
"events will be written."
)
return None
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/operator_event_exporter.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/_common/tests/test_deprecation.py | import sys
from unittest.mock import patch
import pytest
from ray._common.deprecation import (
DEPRECATED_VALUE,
Deprecated,
deprecation_warning,
)
def test_deprecation_warning_warn():
with patch("ray._common.deprecation.logger.warning") as mock_warning:
deprecation_warning("old_feature", "new_feature")
mock_warning.assert_called_once()
args, _ = mock_warning.call_args
assert (
"DeprecationWarning: `old_feature` has been deprecated. Use `new_feature` instead."
in args[0]
)
def test_deprecation_warning_error():
with pytest.raises(ValueError) as excinfo:
deprecation_warning("old_feature", error=True)
assert "`old_feature` has been deprecated." in str(excinfo.value)
def test_deprecated_decorator_function():
with patch("ray._common.deprecation.logger.warning") as mock_warning, patch(
"ray._common.deprecation.log_once"
) as mock_log_once:
mock_log_once.return_value = True
@Deprecated(old="old_func", new="new_func", error=False)
def old_func():
return "result"
result = old_func()
assert result == "result"
mock_warning.assert_called_once()
def test_deprecated_decorator_class():
with patch("ray._common.deprecation.logger.warning") as mock_warning, patch(
"ray._common.deprecation.log_once"
) as mock_log_once:
mock_log_once.return_value = True
@Deprecated(old="OldClass", new="NewClass", error=False)
class OldClass:
pass
instance = OldClass()
assert isinstance(instance, OldClass)
mock_warning.assert_called_once()
def test_deprecated_decorator_method():
with patch("ray._common.deprecation.logger.warning") as mock_warning, patch(
"ray._common.deprecation.log_once"
) as mock_log_once:
mock_log_once.return_value = True
class MyClass:
@Deprecated(old="old_method", new="new_method", error=False)
def old_method(self):
return "method_result"
instance = MyClass()
result = instance.old_method()
assert result == "method_result"
mock_warning.assert_called_once()
def test_deprecated_decorator_error():
with patch("ray._common.deprecation.log_once") as mock_log_once:
mock_log_once.return_value = True
@Deprecated(old="old_func", error=True)
def old_func():
pass
with pytest.raises(ValueError):
old_func()
def test_deprecated_value_constant():
assert (
DEPRECATED_VALUE == -1
), f"DEPRECATED_VALUE should be -1, but got {DEPRECATED_VALUE}"
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_common/tests/test_deprecation.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/test_raylet_fault_tolerance.py | import json
import os
import sys
import pytest
import ray
from ray._common.test_utils import SignalActor, wait_for_condition
from ray._private.test_utils import (
RPC_FAILURE_MAP,
RPC_FAILURE_TYPES,
)
from ray.core.generated import autoscaler_pb2
from ray.exceptions import GetTimeoutError, TaskCancelledError
from ray.util.placement_group import placement_group, remove_placement_group
from ray.util.scheduling_strategies import (
NodeAffinitySchedulingStrategy,
PlacementGroupSchedulingStrategy,
)
import psutil
@pytest.mark.parametrize("deterministic_failure", RPC_FAILURE_TYPES)
def test_request_worker_lease_idempotent(
monkeypatch, shutdown_only, deterministic_failure, ray_start_cluster
):
failure = RPC_FAILURE_MAP[deterministic_failure].copy()
failure["num_failures"] = 1
monkeypatch.setenv(
"RAY_testing_rpc_failure",
json.dumps({"NodeManagerService.grpc_client.RequestWorkerLease": failure}),
)
@ray.remote
def simple_task_1():
return 0
@ray.remote
def simple_task_2():
return 1
# Spin up a two-node cluster where we're targeting scheduling on the
# remote node via NodeAffinitySchedulingStrategy to test remote RequestWorkerLease
# calls.
cluster = ray_start_cluster
remote_node = cluster.add_node(num_cpus=1)
result_ref1 = simple_task_1.options(
scheduling_strategy=NodeAffinitySchedulingStrategy(
node_id=remote_node.node_id, soft=False
)
).remote()
result_ref2 = simple_task_2.options(
scheduling_strategy=NodeAffinitySchedulingStrategy(
node_id=remote_node.node_id, soft=False
)
).remote()
assert ray.get([result_ref1, result_ref2]) == [0, 1]
def test_drain_node_idempotent(monkeypatch, shutdown_only, ray_start_cluster):
# NOTE: not testing response failure since the node is already marked as draining and shuts down gracefully.
monkeypatch.setenv(
"RAY_testing_rpc_failure",
json.dumps(
{
"NodeManagerService.grpc_client.DrainRaylet": {
"num_failures": 1,
"req_failure_prob": 100,
"resp_failure_prob": 0,
"in_flight_failure_prob": 0,
}
}
),
)
cluster = ray_start_cluster
worker_node = cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
worker_node_id = worker_node.node_id
gcs_client = ray._raylet.GcsClient(address=cluster.address)
is_accepted = gcs_client.drain_node(
worker_node_id,
autoscaler_pb2.DrainNodeReason.DRAIN_NODE_REASON_IDLE_TERMINATION,
"Test drain",
0,
)
assert is_accepted
# After drain is accepted on an idle node since no tasks are running nor primary objects kept
# on that raylet, it should be marked idle and gracefully shut down.
def node_is_dead():
nodes = ray.nodes()
for node in nodes:
if node["NodeID"] == worker_node_id:
return not node["Alive"]
return True
wait_for_condition(node_is_dead, timeout=1)
# Bundles can be leaked if the gcs dies before the CancelResourceReserve RPCs are
# propagated to all the raylets. Since this is inherently racy, we block CancelResourceReserve RPCs
# from ever succeeding to make this test deterministic.
@pytest.fixture
def inject_release_unused_bundles_rpc_failure(monkeypatch, request):
deterministic_failure = request.param
failure = RPC_FAILURE_MAP[deterministic_failure].copy()
failure["num_failures"] = 1
monkeypatch.setenv(
"RAY_testing_rpc_failure",
json.dumps(
{
"NodeManagerService.grpc_client.ReleaseUnusedBundles": failure,
"NodeManagerService.grpc_client.CancelResourceReserve": {
"num_failures": -1,
"req_failure_prob": 100,
"resp_failure_prob": 0,
"in_flight_failure_prob": 0,
},
}
),
)
@pytest.mark.parametrize(
"inject_release_unused_bundles_rpc_failure",
RPC_FAILURE_TYPES,
indirect=True,
)
@pytest.mark.parametrize(
"ray_start_cluster_head_with_external_redis",
[{"num_cpus": 1}],
indirect=True,
)
def test_release_unused_bundles_idempotent(
inject_release_unused_bundles_rpc_failure,
ray_start_cluster_head_with_external_redis,
):
cluster = ray_start_cluster_head_with_external_redis
@ray.remote(num_cpus=1)
def task():
return "success"
pg = placement_group(name="test_pg", strategy="PACK", bundles=[{"CPU": 1}])
result_ref = task.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=pg,
placement_group_bundle_index=0,
)
).remote()
assert ray.get(result_ref) == "success"
# Remove the placement group. This will trigger CancelResourceReserve RPCs which need to be blocked
# for the placement group bundle to be leaked.
remove_placement_group(pg)
cluster.head_node.kill_gcs_server()
# ReleaseUnusedBundles only triggers after GCS restart to clean up potentially leaked bundles.
cluster.head_node.start_gcs_server()
# If the leaked bundle wasn't cleaned up, this task will hang due to resource unavailability
result = ray.get(task.remote())
assert result == "success"
@pytest.fixture
def inject_notify_gcs_restart_rpc_failure(monkeypatch, request):
deterministic_failure = request.param
failure = RPC_FAILURE_MAP[deterministic_failure].copy()
failure["num_failures"] = 1
monkeypatch.setenv(
"RAY_testing_rpc_failure",
json.dumps({"NodeManagerService.grpc_client.NotifyGCSRestart": failure}),
)
@pytest.mark.parametrize(
"inject_notify_gcs_restart_rpc_failure",
RPC_FAILURE_TYPES,
indirect=True,
)
@pytest.mark.parametrize(
"ray_start_cluster_head_with_external_redis",
[
{
"_system_config": {
# Extending the fallback timeout to focus on death
# notification received from GCS_ACTOR_CHANNEL pubsub
"timeout_ms_task_wait_for_death_info": 10000,
}
}
],
indirect=True,
)
def test_notify_gcs_restart_idempotent(
inject_notify_gcs_restart_rpc_failure,
ray_start_cluster_head_with_external_redis,
):
cluster = ray_start_cluster_head_with_external_redis
@ray.remote(num_cpus=1, max_restarts=0)
class DummyActor:
def get_pid(self):
return psutil.Process().pid
def ping(self):
return "pong"
actor = DummyActor.remote()
ray.get(actor.ping.remote())
actor_pid = ray.get(actor.get_pid.remote())
cluster.head_node.kill_gcs_server()
cluster.head_node.start_gcs_server()
p = psutil.Process(actor_pid)
p.kill()
# If the actor death notification is not received from the GCS pubsub, this will timeout since
# the fallback via wait_for_death_info_tasks in the actor task submitter will never trigger
# since it's set to 10 seconds.
with pytest.raises(ray.exceptions.RayActorError):
ray.get(actor.ping.remote(), timeout=5)
def test_kill_local_actor_rpc_retry_and_idempotency(monkeypatch, shutdown_only):
"""Test that KillLocalActor RPC retries work correctly and guarantee actor death.
Not testing response since the actor is killed either way.
"""
monkeypatch.setenv(
"RAY_testing_rpc_failure",
json.dumps(
{
"NodeManagerService.grpc_client.KillLocalActor": {
"num_failures": 1,
"req_failure_prob": 100,
"resp_failure_prob": 0,
"in_flight_failure_prob": 0,
}
}
),
)
ray.init()
@ray.remote
class SimpleActor:
def ping(self):
return "pong"
def get_pid(self):
return os.getpid()
actor = SimpleActor.remote()
result = ray.get(actor.ping.remote())
assert result == "pong"
worker_pid = ray.get(actor.get_pid.remote())
# NOTE: checking the process is still alive rather than checking the actor state from the GCS
# since as long as KillActor is sent the GCS will mark the actor as dead even though it may not actually be
assert psutil.pid_exists(worker_pid)
ray.kill(actor)
def verify_process_killed():
return not psutil.pid_exists(worker_pid)
wait_for_condition(verify_process_killed, timeout=30)
@pytest.fixture
def inject_cancel_local_task_rpc_failure(monkeypatch, request):
failure = RPC_FAILURE_MAP[request.param].copy()
failure["num_failures"] = 1
monkeypatch.setenv(
"RAY_testing_rpc_failure",
json.dumps(
{
"NodeManagerService.grpc_client.CancelLocalTask": failure,
}
),
)
@pytest.mark.parametrize(
"inject_cancel_local_task_rpc_failure", RPC_FAILURE_TYPES, indirect=True
)
@pytest.mark.parametrize("force_kill", [True, False])
def test_cancel_local_task_rpc_retry_and_idempotency(
inject_cancel_local_task_rpc_failure, force_kill, shutdown_only
):
"""Test that CancelLocalTask RPC retries work correctly.
Verify that the RPC is idempotent when network failures occur.
When force_kill=True, verify the worker process is actually killed using psutil.
"""
ray.init(num_cpus=1)
signaler = SignalActor.remote()
@ray.remote(num_cpus=1)
def get_pid():
return os.getpid()
@ray.remote(num_cpus=1)
def blocking_task():
return ray.get(signaler.wait.remote())
worker_pid = ray.get(get_pid.remote())
blocking_ref = blocking_task.remote()
with pytest.raises(GetTimeoutError):
ray.get(blocking_ref, timeout=1)
ray.cancel(blocking_ref, force=force_kill)
with pytest.raises(TaskCancelledError):
ray.get(blocking_ref, timeout=10)
if force_kill:
def verify_process_killed():
return not psutil.pid_exists(worker_pid)
wait_for_condition(verify_process_killed, timeout=30)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_raylet_fault_tolerance.py",
"license": "Apache License 2.0",
"lines": 272,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/_private/gc_collect_manager.py | import gc
import logging
import threading
import time
from typing import Callable, Optional
logger = logging.getLogger(__name__)
class PythonGCThread(threading.Thread):
"""A background thread that triggers Python garbage collection.
This thread waits for GC events from CoreWorker and triggers `gc.collect()` when
when requested."""
def __init__(self, *, gc_collect_func: Optional[Callable] = None):
logger.debug("Starting Python GC thread")
super().__init__(name="PythonGCThread", daemon=True)
self._should_exit = False
self._gc_event = threading.Event()
# Sets the gc_collect_func (only for testing), defaults to gc.collect
self._gc_collect_func = gc_collect_func or gc.collect
def trigger_gc(self) -> None:
self._gc_event.set()
def run(self):
while not self._should_exit:
self._gc_event.wait()
self._gc_event.clear()
if self._should_exit:
break
try:
start = time.monotonic()
num_freed = self._gc_collect_func()
if num_freed > 0:
logger.debug(
"gc.collect() freed {} refs in {} seconds".format(
num_freed, time.monotonic() - start
)
)
except Exception as e:
logger.error(f"Error during GC: {e}")
def stop(self):
logger.debug("Stopping Python GC thread")
self._should_exit = True
self._gc_event.set()
self.join()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_private/gc_collect_manager.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:rllib/examples/envs/classes/multi_agent/footsies/encoder.py | import collections
import copy
from typing import Any, Optional, Union
import numpy as np
from ray.rllib.examples.envs.classes.multi_agent.footsies.game import constants
from ray.rllib.examples.envs.classes.multi_agent.footsies.game.proto import (
footsies_service_pb2 as footsies_pb2,
)
class FootsiesEncoder:
"""Encoder class to generate observations from the game state"""
def __init__(self, observation_delay: int):
self._encoding_history = {
agent_id: collections.deque(maxlen=int(observation_delay))
for agent_id in ["p1", "p2"]
}
self.observation_delay = observation_delay
self._last_common_state: Optional[np.ndarray] = None
self._action_id_values = list(constants.FOOTSIES_ACTION_IDS.values())
@staticmethod
def encode_common_state(game_state: footsies_pb2.GameState) -> np.ndarray:
p1_state, p2_state = game_state.player1, game_state.player2
dist_x = np.abs(p1_state.player_position_x - p2_state.player_position_x) / 8.0
return np.array(
[
dist_x,
],
dtype=np.float32,
)
@staticmethod
def _encode_input_buffer(
input_buffer: list[int], last_n: Optional[int] = None
) -> np.ndarray:
"""Encodes the input buffer into a one-hot vector.
:param input_buffer: The input buffer to encode
:type input_buffer: list[int]
:return: The encoded one-hot vector
:rtype: np.ndarray
"""
if last_n is not None:
input_buffer = input_buffer[last_n:]
ib_encoding = []
for action_id in input_buffer:
arr = [0] * (len(constants.ACTION_TO_BITS) + 1)
arr[action_id] = 1
ib_encoding.extend(arr)
input_buffer_vector = np.asarray(ib_encoding, dtype=np.float32)
return input_buffer_vector
def encode(
self,
game_state: footsies_pb2.GameState,
) -> dict[str, Any]:
"""Encodes the game state into observations for all agents.
:param game_state: The game state to encode
:type game_state: footsies_pb2.GameState
:return: The encoded observations for all agents.
:rtype: dict[str, Any]
"""
common_state = self.encode_common_state(game_state)
p1_encoding = self.encode_player_state(game_state.player1)
p2_encoding = self.encode_player_state(game_state.player2)
observation_delay = min(
self.observation_delay, len(self._encoding_history["p1"])
)
if observation_delay > 0:
p1_delayed_encoding = self._encoding_history["p1"][-observation_delay]
p2_delayed_encoding = self._encoding_history["p2"][-observation_delay]
else:
p1_delayed_encoding = copy.deepcopy(p1_encoding)
p2_delayed_encoding = copy.deepcopy(p2_encoding)
self._encoding_history["p1"].append(p1_encoding)
self._encoding_history["p2"].append(p2_encoding)
self._last_common_state = common_state
# Create features dictionary
features = {}
current_index = 0
# Common state
features["common_state"] = {
"start": current_index,
"length": len(common_state),
}
current_index += len(common_state)
# Concatenate the observations for the undelayed encoding
p1_encoding = np.hstack(list(p1_encoding.values()), dtype=np.float32)
p2_encoding = np.hstack(list(p2_encoding.values()), dtype=np.float32)
# Concatenate the observations for the delayed encoding
p1_delayed_encoding = np.hstack(
list(p1_delayed_encoding.values()), dtype=np.float32
)
p2_delayed_encoding = np.hstack(
list(p2_delayed_encoding.values()), dtype=np.float32
)
p1_centric_observation = np.hstack(
[common_state, p1_encoding, p2_delayed_encoding]
)
p2_centric_observation = np.hstack(
[common_state, p2_encoding, p1_delayed_encoding]
)
return {"p1": p1_centric_observation, "p2": p2_centric_observation}
def encode_player_state(
self,
player_state: footsies_pb2.PlayerState,
) -> dict[str, Union[int, float, list, np.ndarray]]:
"""Encodes the player state into observations.
:param player_state: The player state to encode
:type player_state: footsies_pb2.PlayerState
:return: The encoded observations for the player
:rtype: dict[str, Any]
"""
feature_dict = {
"player_position_x": player_state.player_position_x
/ constants.FeatureDictNormalizers.PLAYER_POSITION_X,
"velocity_x": player_state.velocity_x
/ constants.FeatureDictNormalizers.VELOCITY_X,
"is_dead": int(player_state.is_dead),
"vital_health": player_state.vital_health,
"guard_health": one_hot_encoder(player_state.guard_health, [0, 1, 2, 3]),
"current_action_id": self._encode_action_id(player_state.current_action_id),
"current_action_frame": player_state.current_action_frame
/ constants.FeatureDictNormalizers.CURRENT_ACTION_FRAME,
"current_action_frame_count": player_state.current_action_frame_count
/ constants.FeatureDictNormalizers.CURRENT_ACTION_FRAME_COUNT,
"current_action_remaining_frames": (
player_state.current_action_frame_count
- player_state.current_action_frame
)
/ constants.FeatureDictNormalizers.CURRENT_ACTION_REMAINING_FRAMES,
"is_action_end": int(player_state.is_action_end),
"is_always_cancelable": int(player_state.is_always_cancelable),
"current_action_hit_count": player_state.current_action_hit_count,
"current_hit_stun_frame": player_state.current_hit_stun_frame
/ constants.FeatureDictNormalizers.CURRENT_HIT_STUN_FRAME,
"is_in_hit_stun": int(player_state.is_in_hit_stun),
"sprite_shake_position": player_state.sprite_shake_position,
"max_sprite_shake_frame": player_state.max_sprite_shake_frame
/ constants.FeatureDictNormalizers.MAX_SPRITE_SHAKE_FRAME,
"is_face_right": int(player_state.is_face_right),
"current_frame_advantage": player_state.current_frame_advantage
/ constants.FeatureDictNormalizers.CURRENT_FRAME_ADVANTAGE,
# The below features leak some information about the opponent!
"would_next_forward_input_dash": int(
player_state.would_next_forward_input_dash
),
"would_next_backward_input_dash": int(
player_state.would_next_backward_input_dash
),
"special_attack_progress": min(player_state.special_attack_progress, 1.0),
}
return feature_dict
def get_last_encoding(self) -> Optional[dict[str, np.ndarray]]:
if self._last_common_state is None:
return None
return {
"common_state": self._last_common_state.reshape(-1),
"p1": np.hstack(
list(self._encoding_history["p1"][-1].values()),
dtype=np.float32,
),
"p2": np.hstack(
list(self._encoding_history["p2"][-1].values()),
dtype=np.float32,
),
}
def reset(self):
self._encoding_history = {
agent_id: collections.deque(maxlen=int(self.observation_delay))
for agent_id in ["p1", "p2"]
}
def _encode_action_id(self, action_id: int) -> np.ndarray:
"""Encodes the action id into a one-hot vector.
:param action_id: The action id to encode
:type action_id: int
:return: The encoded one-hot vector
:rtype: np.ndarray
"""
action_vector = np.zeros(len(self._action_id_values), dtype=np.float32)
# Get the index of the action id in constants.ActionID
action_index = self._action_id_values.index(action_id)
action_vector[action_index] = 1
assert action_vector.max() == 1 and action_vector.min() == 0
return action_vector
def one_hot_encoder(
value: Union[int, float, str], collection: list[Union[int, float, str]]
) -> np.ndarray:
vector = np.zeros(len(collection), dtype=np.float32)
vector[collection.index(value)] = 1
return vector
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/envs/classes/multi_agent/footsies/encoder.py",
"license": "Apache License 2.0",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:rllib/examples/envs/classes/multi_agent/footsies/fixed_rlmodules.py | import tree # pip install dm_tree
from ray.rllib.core.rl_module import RLModule
from ray.rllib.examples.envs.classes.multi_agent.footsies.game import constants
from ray.rllib.policy import sample_batch
from ray.rllib.utils.spaces.space_utils import batch as batch_func
class FixedRLModule(RLModule):
def _forward_inference(self, batch, **kwargs):
return self._fixed_forward(batch, **kwargs)
def _forward_exploration(self, batch, **kwargs):
return self._fixed_forward(batch, **kwargs)
def _forward_train(self, *args, **kwargs):
raise NotImplementedError(
f"RLlib: {self.__class__.__name__} should not be trained. "
f"It is a fixed RLModule, returning a fixed action for all observations."
)
def _fixed_forward(self, batch, **kwargs):
"""Implements a fixed that always returns the same action."""
raise NotImplementedError(
"FixedRLModule: This method should be overridden by subclasses to implement a specific action."
)
class NoopFixedRLModule(FixedRLModule):
def _fixed_forward(self, batch, **kwargs):
obs_batch_size = len(tree.flatten(batch[sample_batch.SampleBatch.OBS])[0])
actions = batch_func([constants.EnvActions.NONE for _ in range(obs_batch_size)])
return {sample_batch.SampleBatch.ACTIONS: actions}
class BackFixedRLModule(FixedRLModule):
def _fixed_forward(self, batch, **kwargs):
obs_batch_size = len(tree.flatten(batch[sample_batch.SampleBatch.OBS])[0])
actions = batch_func([constants.EnvActions.BACK for _ in range(obs_batch_size)])
return {sample_batch.SampleBatch.ACTIONS: actions}
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/envs/classes/multi_agent/footsies/fixed_rlmodules.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:rllib/examples/envs/classes/multi_agent/footsies/footsies_env.py | import logging
from typing import Any, Optional
import numpy as np
from gymnasium import spaces
from pettingzoo.utils.env import (
ActionType,
AgentID,
ObsType,
)
from ray.rllib.env import EnvContext
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.examples.envs.classes.multi_agent.footsies.encoder import FootsiesEncoder
from ray.rllib.examples.envs.classes.multi_agent.footsies.game import constants
from ray.rllib.examples.envs.classes.multi_agent.footsies.game.footsies_binary import (
FootsiesBinary,
)
from ray.rllib.examples.envs.classes.multi_agent.footsies.game.footsies_game import (
FootsiesGame,
)
import psutil
logger = logging.getLogger("ray.rllib")
class FootsiesEnv(MultiAgentEnv):
metadata = {"render.modes": ["human"]}
SPECIAL_CHARGE_FRAMES = 60
GUARD_BREAK_REWARD = 0.3
observation_space = spaces.Dict(
{
agent: spaces.Box(
low=-np.inf,
high=np.inf,
shape=(constants.OBSERVATION_SPACE_SIZE,),
)
for agent in ["p1", "p2"]
}
)
action_space = spaces.Dict(
{
agent: spaces.Discrete(
len(
[
constants.EnvActions.NONE,
constants.EnvActions.BACK,
constants.EnvActions.FORWARD,
constants.EnvActions.ATTACK,
constants.EnvActions.BACK_ATTACK,
constants.EnvActions.FORWARD_ATTACK,
# This is a special input that holds down
# attack for 60 frames. It's just too long of a sequence
# to easily learn by holding ATTACK for so long.
constants.EnvActions.SPECIAL_CHARGE,
]
)
)
for agent in ["p1", "p2"]
}
)
def __init__(self, config: EnvContext, port: int):
super().__init__()
if config is None:
config = {}
self.config = config
self.port = port
self.footsies_process_pid = (
None # Store PID of the running footsies process (we assume one per env)
)
self.agents: list[AgentID] = ["p1", "p2"]
self.possible_agents: list[AgentID] = self.agents.copy()
self._agent_ids: set[AgentID] = set(self.agents)
self.t: int = 0
self.max_t: int = config.get("max_t", 1000)
self.frame_skip = config.get("frame_skip", 4)
observation_delay = config.get("observation_delay", 16)
assert (
observation_delay % self.frame_skip == 0
), "observation_delay must be divisible by frame_skip"
self.encoder = FootsiesEncoder(
observation_delay=observation_delay // self.frame_skip
)
# start the game server before initializing the communication between the
# game server and the Python harness via gRPC
self._prepare_and_start_game_server()
self.game = FootsiesGame(
host=config["host"],
port=self.port,
)
self.last_game_state = None
self.special_charge_queue = {
"p1": -1,
"p2": -1,
}
@staticmethod
def _convert_to_charge_action(action: int) -> int:
if action == constants.EnvActions.BACK:
return constants.EnvActions.BACK_ATTACK
elif action == constants.EnvActions.FORWARD:
return constants.EnvActions.FORWARD_ATTACK
else:
return constants.EnvActions.ATTACK
def close(self):
"""Terminate Footsies game server process.
Run to ensure no game servers are left running.
"""
timeout = 2
try:
logger.info(
f"RLlib {self.__class__.__name__}: Terminating Footsies "
f"game server process with PID: {self.footsies_process_pid}..."
)
p = psutil.Process(self.footsies_process_pid)
p.terminate()
p.wait(timeout=timeout)
except psutil.NoSuchProcess:
logger.info(
f"RLlib {self.__class__.__name__}: Process with PID {self.footsies_process_pid} not found, "
f"it might have been already terminated."
)
except psutil.TimeoutExpired:
logger.warning(
f"RLlib {self.__class__.__name__}: Process with PID {self.footsies_process_pid} did not terminate "
f"within {timeout} seconds. "
f"Sending SIGKILL signal instead.",
)
p.kill()
p.wait(timeout=timeout)
def get_infos(self):
return {agent: {} for agent in self.agents}
def get_obs(self, game_state):
return self.encoder.encode(game_state)
def reset(
self,
*,
seed: Optional[int] = None,
options: Optional[dict] = None,
) -> tuple[dict[AgentID, ObsType], dict[AgentID, Any]]:
"""Resets the environment to the starting state
and returns the initial observations for all agents.
:return: Tuple of observations and infos for each agent.
:rtype: tuple[dict[AgentID, ObsType], dict[AgentID, Any]]
"""
self.t = 0
self.game.reset_game()
self.game.start_game()
self.encoder.reset()
self.last_game_state = self.game.get_state()
observations = self.get_obs(self.last_game_state)
return observations, {agent: {} for agent in self.agents}
def step(
self, actions: dict[AgentID, ActionType]
) -> tuple[
dict[AgentID, ObsType],
dict[AgentID, float],
dict[AgentID, bool],
dict[AgentID, bool],
dict[AgentID, dict[str, Any]],
]:
"""Step the environment with the provided actions for all agents.
:param actions: Dictionary mapping agent ids to their actions for this step.
:type actions: dict[AgentID, ActionType]
:return: Tuple of observations, rewards, terminates, truncateds and infos for all agents.
:rtype: tuple[ dict[AgentID, ObsType], dict[AgentID, float], dict[AgentID, bool], dict[AgentID, bool], dict[AgentID, dict[str, Any]], ]
"""
self.t += 1
for agent_id in self.agents:
empty_queue = self.special_charge_queue[agent_id] < 0
action_is_special_charge = (
actions[agent_id] == constants.EnvActions.SPECIAL_CHARGE
)
# Refill the charge queue only if we're not already in a special charge.
if action_is_special_charge and empty_queue:
self.special_charge_queue[
agent_id
] = self._build_charged_special_queue()
if self.special_charge_queue[agent_id] >= 0:
self.special_charge_queue[agent_id] -= 1
actions[agent_id] = self._convert_to_charge_action(actions[agent_id])
p1_action = self.game.action_to_bits(actions["p1"], is_player_1=True)
p2_action = self.game.action_to_bits(actions["p2"], is_player_1=False)
game_state = self.game.step_n_frames(
p1_action=p1_action, p2_action=p2_action, n_frames=self.frame_skip
)
observations = self.get_obs(game_state)
terminated = game_state.player1.is_dead or game_state.player2.is_dead
# Zero-sum game: 1 if other player is dead, -1 if you're dead:
rewards = {
"p1": int(game_state.player2.is_dead) - int(game_state.player1.is_dead),
"p2": int(game_state.player1.is_dead) - int(game_state.player2.is_dead),
}
if self.config.get("reward_guard_break", False):
p1_prev_guard_health = self.last_game_state.player1.guard_health
p2_prev_guard_health = self.last_game_state.player2.guard_health
p1_guard_health = game_state.player1.guard_health
p2_guard_health = game_state.player2.guard_health
if p2_guard_health < p2_prev_guard_health:
rewards["p1"] += self.GUARD_BREAK_REWARD
rewards["p2"] -= self.GUARD_BREAK_REWARD
if p1_guard_health < p1_prev_guard_health:
rewards["p2"] += self.GUARD_BREAK_REWARD
rewards["p1"] -= self.GUARD_BREAK_REWARD
terminateds = {
"p1": terminated,
"p2": terminated,
"__all__": terminated,
}
truncated = self.t >= self.max_t
truncateds = {
"p1": truncated,
"p2": truncated,
"__all__": truncated,
}
self.last_game_state = game_state
return observations, rewards, terminateds, truncateds, self.get_infos()
def _build_charged_special_queue(self):
assert self.SPECIAL_CHARGE_FRAMES % self.frame_skip == 0
steps_to_apply_attack = int(self.SPECIAL_CHARGE_FRAMES // self.frame_skip)
return steps_to_apply_attack
def _prepare_and_start_game_server(self):
fb = FootsiesBinary(config=self.config, port=self.port)
self.footsies_process_pid = fb.start_game_server()
def env_creator(env_config: EnvContext) -> FootsiesEnv:
"""Creates the Footsies environment
Ensure that each game server runs on a unique port. Training and evaluation env runners have separate port ranges.
Helper function to create the FootsiesEnv with a unique port based on the worker index and vector index.
It's usually passed to the `register_env()`, like this: register_env(name="FootsiesEnv", env_creator=env_creator).
"""
if env_config.get("env-for-evaluation", False):
port = (
env_config["eval_start_port"]
- 1 # "-1" to start with eval_start_port as the first port (eval worker index starts at 1)
+ int(env_config.worker_index) * env_config.get("num_envs_per_worker", 1)
+ env_config.get("vector_index", 0)
)
else:
port = (
env_config["train_start_port"]
+ int(env_config.worker_index) * env_config.get("num_envs_per_worker", 1)
+ env_config.get("vector_index", 0)
)
return FootsiesEnv(config=env_config, port=port)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/envs/classes/multi_agent/footsies/footsies_env.py",
"license": "Apache License 2.0",
"lines": 238,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/examples/envs/classes/multi_agent/footsies/game/constants.py | from dataclasses import dataclass
OBSERVATION_SPACE_SIZE: int = 81
@dataclass
class EnvActions:
NONE = 0
BACK = 1
FORWARD = 2
ATTACK = 3
BACK_ATTACK = 4
FORWARD_ATTACK = 5
SPECIAL_CHARGE = 6
@dataclass
class GameActions:
NONE = 0
LEFT = 1
RIGHT = 2
ATTACK = 3
LEFT_ATTACK = 4
RIGHT_ATTACK = 5
@dataclass
class ActionBits:
NONE: int = 0
LEFT: int = 1 << 0
RIGHT: int = 1 << 1
ATTACK: int = 1 << 2
LEFT_ATTACK: int = LEFT | ATTACK
RIGHT_ATTACK: int = RIGHT | ATTACK
@dataclass
class ActionID:
STAND = 0
FORWARD = 1
BACKWARD = 2
DASH_FORWARD = 10
DASH_BACKWARD = 11
N_ATTACK = 100
B_ATTACK = 105
N_SPECIAL = 110
B_SPECIAL = 115
DAMAGE = 200
GUARD_M = 301
GUARD_STAND = 305
GUARD_CROUCH = 306
GUARD_BREAK = 310
GUARD_PROXIMITY = 350
DEAD = 500
WIN = 510
@dataclass
class FeatureDictNormalizers:
PLAYER_POSITION_X = 4.0
VELOCITY_X = 5.0
CURRENT_ACTION_FRAME = 25
CURRENT_ACTION_FRAME_COUNT = 25
CURRENT_ACTION_REMAINING_FRAMES = 25
CURRENT_HIT_STUN_FRAME = 10
MAX_SPRITE_SHAKE_FRAME = 10
CURRENT_FRAME_ADVANTAGE = 10
ACTION_TO_BITS = {
GameActions.NONE: ActionBits.NONE,
GameActions.LEFT: ActionBits.LEFT,
GameActions.RIGHT: ActionBits.RIGHT,
GameActions.ATTACK: ActionBits.ATTACK,
GameActions.LEFT_ATTACK: ActionBits.LEFT_ATTACK,
GameActions.RIGHT_ATTACK: ActionBits.RIGHT_ATTACK,
}
FOOTSIES_ACTION_IDS = {
"STAND": ActionID.STAND,
"FORWARD": ActionID.FORWARD,
"BACKWARD": ActionID.BACKWARD,
"DASH_FORWARD": ActionID.DASH_FORWARD,
"DASH_BACKWARD": ActionID.DASH_BACKWARD,
"N_ATTACK": ActionID.N_ATTACK,
"B_ATTACK": ActionID.B_ATTACK,
"N_SPECIAL": ActionID.N_SPECIAL,
"B_SPECIAL": ActionID.B_SPECIAL,
"DAMAGE": ActionID.DAMAGE,
"GUARD_M": ActionID.GUARD_M,
"GUARD_STAND": ActionID.GUARD_STAND,
"GUARD_CROUCH": ActionID.GUARD_CROUCH,
"GUARD_BREAK": ActionID.GUARD_BREAK,
"GUARD_PROXIMITY": ActionID.GUARD_PROXIMITY,
"DEAD": ActionID.DEAD,
"WIN": ActionID.WIN,
}
# backup file location (uploaded July 29th, 2025):
# https://ray-example-data.s3.us-west-2.amazonaws.com/rllib/env-footsies/feature_indices.json
# Dictionary mapping feature names to their index ranges within a flat observation vector.
# Each key is a feature name, and its value is a dictionary with keys:
# "start": the starting index in the observation array.
# "length": it's length in bytes
feature_indices = {
"common_state": {"start": 0, "length": 1},
"frame_count": {"start": 1, "length": 1},
"player_position_x": {"start": 2, "length": 1},
"velocity_x": {"start": 3, "length": 1},
"is_dead": {"start": 4, "length": 1},
"vital_health": {"start": 5, "length": 1},
"guard_health": {"start": 6, "length": 4},
"current_action_id": {"start": 10, "length": 17},
"current_action_frame": {"start": 27, "length": 1},
"current_action_frame_count": {"start": 28, "length": 1},
"current_action_remaining_frames": {"start": 29, "length": 1},
"is_action_end": {"start": 30, "length": 1},
"is_always_cancelable": {"start": 31, "length": 1},
"current_action_hit_count": {"start": 32, "length": 1},
"current_hit_stun_frame": {"start": 33, "length": 1},
"is_in_hit_stun": {"start": 34, "length": 1},
"sprite_shake_position": {"start": 35, "length": 1},
"max_sprite_shake_frame": {"start": 36, "length": 1},
"is_face_right": {"start": 37, "length": 1},
"current_frame_advantage": {"start": 38, "length": 1},
"would_next_forward_input_dash": {"start": 39, "length": 1},
"would_next_backward_input_dash": {"start": 40, "length": 1},
"special_attack_progress": {"start": 41, "length": 1},
"opponent_frame_count": {"start": 42, "length": 1},
"opponent_player_position_x": {"start": 43, "length": 1},
"opponent_velocity_x": {"start": 44, "length": 1},
"opponent_is_dead": {"start": 45, "length": 1},
"opponent_vital_health": {"start": 46, "length": 1},
"opponent_guard_health": {"start": 47, "length": 4},
"opponent_current_action_id": {"start": 51, "length": 17},
"opponent_current_action_frame": {"start": 68, "length": 1},
"opponent_current_action_frame_count": {"start": 69, "length": 1},
"opponent_current_action_remaining_frames": {"start": 70, "length": 1},
"opponent_is_action_end": {"start": 71, "length": 1},
"opponent_is_always_cancelable": {"start": 72, "length": 1},
"opponent_current_action_hit_count": {"start": 73, "length": 1},
"opponent_current_hit_stun_frame": {"start": 74, "length": 1},
"opponent_is_in_hit_stun": {"start": 75, "length": 1},
"opponent_sprite_shake_position": {"start": 76, "length": 1},
"opponent_max_sprite_shake_frame": {"start": 77, "length": 1},
"opponent_is_face_right": {"start": 78, "length": 1},
"opponent_current_frame_advantage": {"start": 79, "length": 1},
"opponent_would_next_forward_input_dash": {"start": 80, "length": 1},
"opponent_would_next_backward_input_dash": {"start": 81, "length": 1},
"opponent_special_attack_progress": {"start": 82, "length": 1},
}
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/envs/classes/multi_agent/footsies/game/constants.py",
"license": "Apache License 2.0",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:rllib/examples/envs/classes/multi_agent/footsies/game/footsies_binary.py | import logging
import os
import stat
import subprocess
import time
import zipfile
from dataclasses import dataclass
from pathlib import Path
import grpc
import requests
from filelock import FileLock
from ray.rllib.env import EnvContext
from ray.rllib.examples.envs.classes.multi_agent.footsies.game.proto import (
footsies_service_pb2 as footsies_pb2,
footsies_service_pb2_grpc as footsies_pb2_grpc,
)
from ray.util import log_once
logger = logging.getLogger(__name__)
@dataclass
class BinaryUrls:
# Uploaded 07.28.2025
S3_ROOT = "https://ray-example-data.s3.us-west-2.amazonaws.com/rllib/env-footsies/binaries/"
# Zip file names
ZIP_LINUX_SERVER = "footsies_linux_server_021725.zip"
ZIP_LINUX_WINDOWED = "footsies_linux_windowed_021725.zip"
ZIP_MAC_HEADLESS = "footsies_mac_headless_5709b6d.zip"
ZIP_MAC_WINDOWED = "footsies_mac_windowed_5709b6d.zip"
# Full URLs
URL_LINUX_SERVER_BINARIES = S3_ROOT + ZIP_LINUX_SERVER
URL_LINUX_WINDOWED_BINARIES = S3_ROOT + ZIP_LINUX_WINDOWED
URL_MAC_HEADLESS_BINARIES = S3_ROOT + ZIP_MAC_HEADLESS
URL_MAC_WINDOWED_BINARIES = S3_ROOT + ZIP_MAC_WINDOWED
class FootsiesBinary:
def __init__(self, config: EnvContext, port: int):
self._urls = BinaryUrls()
self.config = config
self.port = port
self.binary_to_download = config["binary_to_download"]
if self.binary_to_download == "linux_server":
self.url = self._urls.URL_LINUX_SERVER_BINARIES
elif self.binary_to_download == "linux_windowed":
self.url = self._urls.URL_LINUX_WINDOWED_BINARIES
elif self.binary_to_download == "mac_headless":
self.url = self._urls.URL_MAC_HEADLESS_BINARIES
elif self.binary_to_download == "mac_windowed":
self.url = self._urls.URL_MAC_WINDOWED_BINARIES
else:
raise ValueError(f"Invalid target binary: {self.binary_to_download}")
self.full_download_dir = Path(config["binary_download_dir"]).resolve()
self.full_download_path = (
self.full_download_dir / str.split(self.url, sep="/")[-1]
)
self.full_extract_dir = Path(config["binary_extract_dir"]).resolve()
self.renamed_path = self.full_extract_dir / "footsies_binaries"
@staticmethod
def _add_executable_permission(binary_path: Path) -> None:
binary_path.chmod(binary_path.stat().st_mode | stat.S_IXUSR)
def start_game_server(self) -> int:
"""Downloads, unzips, and starts the Footsies game server binary.
Returns footsies process PID.
"""
self._download_game_binary()
self._unzip_game_binary()
if self.binary_to_download == "mac_windowed":
game_binary_path = (
Path(self.renamed_path) / "Contents" / "MacOS" / "FOOTSIES"
)
elif self.binary_to_download == "mac_headless":
game_binary_path = Path(self.renamed_path) / "FOOTSIES"
else:
game_binary_path = Path(self.renamed_path) / "footsies.x86_64"
if os.access(game_binary_path, os.X_OK):
logger.info(
f"Game binary has an 'executable' permission: {game_binary_path}"
)
else:
self._add_executable_permission(game_binary_path)
logger.info(f"Game binary path: {game_binary_path}")
# The underlying game can be quite spammy. So when we are not debugging it, we can suppress the output.
log_output = self.config.get("log_unity_output")
if log_output is None:
if log_once("log_unity_output_not_set"):
logger.warning(
"`log_unity_output` not set in environment config, not logging output by default"
)
log_output = False
if not log_output:
stdout_dest = stderr_dest = subprocess.DEVNULL
else:
stdout_dest = stderr_dest = None # Use parent's stdout/stderr
if (
self.binary_to_download == "linux_server"
or self.binary_to_download == "linux_windowed"
):
process = subprocess.Popen(
[game_binary_path, "--port", str(self.port)],
stdout=stdout_dest,
stderr=stderr_dest,
)
else:
process = subprocess.Popen(
[
"arch",
"-x86_64",
game_binary_path,
"--port",
str(self.port),
],
stdout=stdout_dest,
stderr=stderr_dest,
)
# check if the game server is running correctly
timeout = 2
channel = grpc.insecure_channel(f"localhost:{self.port}")
stub = footsies_pb2_grpc.FootsiesGameServiceStub(channel)
# step 1: try to start the game
while True:
try:
stub.StartGame(footsies_pb2.Empty())
logger.info("Game ready!")
break
except grpc.RpcError as e:
code = e.code()
if code in (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.DEADLINE_EXCEEDED,
):
logger.info(f"RLlib {self.__class__.__name__}: Game not ready...")
time.sleep(timeout)
continue
raise
# step 2: check if the game is ready
ready = False
while not ready:
try:
ready = stub.IsReady(footsies_pb2.Empty()).value
if not ready:
logger.info(f"RLlib {self.__class__.__name__}: Game not ready...")
time.sleep(timeout)
continue
else:
logger.info("Game ready!")
break
except grpc.RpcError as e:
if e.code() in (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.DEADLINE_EXCEEDED,
):
time.sleep(timeout)
logger.info(f"RLlib {self.__class__.__name__}: Game not ready...")
continue
raise
channel.close()
return process.pid
def _download_game_binary(self):
# As multiple actors might try to download all at the same time.
# The file lock should force only one actor to download
chunk_size = 1024 * 1024 # 1MB
lock_path = self.full_download_path.parent / ".footsies-download.lock"
with FileLock(lock_path, timeout=300):
if self.full_download_path.exists():
logger.info(
f"Game binary already exists at {self.full_download_path}, skipping download."
)
else:
try:
with requests.get(self.url, stream=True) as response:
response.raise_for_status()
self.full_download_dir.mkdir(parents=True, exist_ok=True)
with open(self.full_download_path, "wb") as f:
for chunk in response.iter_content(chunk_size=chunk_size):
if chunk:
f.write(chunk)
logger.info(
f"Downloaded game binary to {self.full_download_path}\n"
f"Binary size: {self.full_download_path.stat().st_size / 1024 / 1024:.1f} MB\n"
)
except requests.exceptions.RequestException as e:
logger.error(f"Failed to download binary from {self.url}: {e}")
def _unzip_game_binary(self):
# As multiple actors might try to unzip or rename the paths at the same time.
# The file lock should force this function to be sequential
lock_path = self.full_download_path.parent / ".footsies-unzip.lock"
with FileLock(lock_path, timeout=300):
if self.renamed_path.exists():
logger.info(
f"Game binary already extracted at {self.renamed_path}, skipping extraction."
)
else:
self.full_extract_dir.mkdir(parents=True, exist_ok=True)
with zipfile.ZipFile(self.full_download_path, mode="r") as zip_ref:
zip_ref.extractall(self.full_extract_dir)
if self.binary_to_download == "mac_windowed":
self.full_download_path.with_suffix(".app").rename(
self.renamed_path
)
else:
self.full_download_path.with_suffix("").rename(self.renamed_path)
logger.info(f"Extracted game binary to {self.renamed_path}")
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/envs/classes/multi_agent/footsies/game/footsies_binary.py",
"license": "Apache License 2.0",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/examples/envs/classes/multi_agent/footsies/game/footsies_game.py | import logging
import time
import grpc
import numpy as np
import ray.rllib.examples.envs.classes.multi_agent.footsies.game.proto.footsies_service_pb2 as footsies_pb2
import ray.rllib.examples.envs.classes.multi_agent.footsies.game.proto.footsies_service_pb2_grpc as footsies_pb2_grpc
from ray.rllib.examples.envs.classes.multi_agent.footsies.game import constants
logger = logging.getLogger(__name__)
class FootsiesGame:
"""Handles gRPC communication with game the server.
This class establishes communication between the
game server and the Python harness via gRPC. It provides methods
to start the game, reset it, get the current state, and step the
game by a certain number of frames.
"""
def __init__(self, host: str, port: int):
self.host = host
self.port = port
self.stub = self._initialize_stub()
@staticmethod
def action_to_bits(action: int, is_player_1: bool) -> int:
"""Converts an action to its corresponding bit representation."""
if isinstance(action, np.ndarray):
action = action.item()
if is_player_1:
if action == constants.EnvActions.BACK:
action = constants.GameActions.LEFT
elif action == constants.EnvActions.FORWARD:
action = constants.GameActions.RIGHT
elif action == constants.EnvActions.BACK_ATTACK:
action = constants.GameActions.LEFT_ATTACK
elif action == constants.EnvActions.FORWARD_ATTACK:
action = constants.GameActions.RIGHT_ATTACK
else:
if action == constants.EnvActions.BACK:
action = constants.GameActions.RIGHT
elif action == constants.EnvActions.FORWARD:
action = constants.GameActions.LEFT
elif action == constants.EnvActions.BACK_ATTACK:
action = constants.GameActions.RIGHT_ATTACK
elif action == constants.EnvActions.FORWARD_ATTACK:
action = constants.GameActions.LEFT_ATTACK
return constants.ACTION_TO_BITS[action]
def get_encoded_state(self) -> footsies_pb2.EncodedGameState:
"""Gets the current encoded game state by calling the GetEncodedState RPC."""
try:
return self.stub.GetEncodedState(footsies_pb2.Empty())
except Exception as e:
logger.error(f"Error calling GetEncodedState with exception: {e}")
raise e
def get_state(self) -> footsies_pb2.GameState:
"""Gets the current game state by calling the GetState RPC."""
try:
return self.stub.GetState(footsies_pb2.Empty())
except Exception as e:
logger.error(f"Error calling GetState with exception: {e}")
raise e
def is_ready(self) -> bool:
"""Checks if the game is ready by calling the IsReady RPC."""
try:
return self.stub.IsReady(footsies_pb2.Empty()).value
except Exception as e:
logger.error(f"Error calling IsReady with exception: {e}")
raise e
def reset_game(self) -> None:
"""Resets the game by calling the ResetGame RPC."""
try:
self.stub.ResetGame(footsies_pb2.Empty())
except Exception as e:
logger.error(f"Error calling ResetGame with exception: {e}")
raise e
def start_game(self) -> None:
"""Starts the game by calling the StartGame RPC."""
try:
self.stub.StartGame(footsies_pb2.Empty())
while not self.is_ready():
logger.info("Game not ready...")
time.sleep(0.5)
logger.info("StartGame called successfully")
except Exception as e:
logger.error(f"Error calling StartGame with exception: {e}")
raise e
def step_n_frames(
self, p1_action: int, p2_action: int, n_frames: int
) -> footsies_pb2.GameState:
"""Steps the game by n_frames with the given player actions. The provided actions will be repeated for all n_frames."""
try:
step_input = footsies_pb2.StepInput(
p1_action=p1_action, p2_action=p2_action, nFrames=n_frames
)
return self.stub.StepNFrames(step_input)
except Exception as e:
logger.error(f"Error calling StepNFrames with exception: {e}")
raise e
def _initialize_stub(self) -> footsies_pb2_grpc.FootsiesGameServiceStub:
try:
channel = grpc.insecure_channel(f"{self.host}:{self.port}")
return footsies_pb2_grpc.FootsiesGameServiceStub(channel)
except grpc.RpcError as e:
logger.error(f"Error connecting to gRPC stub with exception: {e}")
raise e
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/envs/classes/multi_agent/footsies/game/footsies_game.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/examples/envs/classes/multi_agent/footsies/utils.py | import collections
import logging
import platform
from dataclasses import dataclass
from typing import Dict, Optional
import gymnasium as gym
import numpy as np
from ray.rllib.algorithms.algorithm import Algorithm
from ray.rllib.algorithms.callbacks import RLlibCallback
from ray.rllib.core.rl_module import RLModuleSpec
from ray.rllib.env.env_runner import EnvRunner
from ray.rllib.env.multi_agent_episode import MultiAgentEpisode
from ray.rllib.examples.envs.classes.multi_agent.footsies.game.constants import (
FOOTSIES_ACTION_IDS,
)
from ray.rllib.utils.metrics import ENV_RUNNER_RESULTS
from ray.rllib.utils.metrics.metrics_logger import MetricsLogger
from ray.rllib.utils.typing import EpisodeType
logger = logging.getLogger("ray.rllib")
@dataclass
class Matchup:
p1: str
p2: str
prob: float
class Matchmaker:
def __init__(self, matchups: list[Matchup]):
self.matchups = matchups
self.probs = [matchup.prob for matchup in matchups]
self.current_matchups = collections.defaultdict(dict)
def agent_to_module_mapping_fn(
self, agent_id: str, episode: EpisodeType, **kwargs
) -> str:
"""Mapping function that retrieves policy_id from the sampled matchup"""
id_ = episode.id_
if self.current_matchups.get(id_) is None:
# step 1: sample a matchup according to the specified probabilities
sampled_matchup = np.random.choice(a=self.matchups, p=self.probs)
# step 2: Randomize who is player 1 and player 2
policies = [sampled_matchup.p1, sampled_matchup.p2]
p1, p2 = np.random.choice(policies, size=2, replace=False)
# step 3: Set as the current matchup for the episode in question (id_)
self.current_matchups[id_]["p1"] = p1
self.current_matchups[id_]["p2"] = p2
policy_id = self.current_matchups[id_].pop(agent_id)
# remove (an empty dict) for the current episode with id_
if not self.current_matchups[id_]:
del self.current_matchups[id_]
return policy_id
class MetricsLoggerCallback(RLlibCallback):
def __init__(self, main_policy: str) -> None:
"""Log experiment metrics
Logs metrics after each episode step and at the end of each (train or eval) episode.
Metrics logged at the end of each episode will be later used by MixManagerCallback
to decide whether to add a new opponent to the mix.
"""
super().__init__()
self.main_policy = main_policy
self.action_id_to_str = {
action_id: action_str
for action_str, action_id in FOOTSIES_ACTION_IDS.items()
}
def on_episode_step(
self,
*,
episode: MultiAgentEpisode,
env_runner: Optional[EnvRunner] = None,
metrics_logger: Optional[MetricsLogger] = None,
env: Optional[gym.Env] = None,
env_index: int,
**kwargs,
) -> None:
"""Log action usage frequency
Log actions performed by both players at each step of the (training or evaluation) episode.
"""
stage = "eval" if env_runner.config.in_evaluation else "train"
# get the ModuleID for each agent
p1_module = episode.module_for("p1")
p2_module = episode.module_for("p2")
# get action string for each agent
p1_action_id = env.envs[
env_index
].unwrapped.last_game_state.player1.current_action_id
p2_action_id = env.envs[
env_index
].unwrapped.last_game_state.player2.current_action_id
p1_action_str = self.action_id_to_str[p1_action_id]
p2_action_str = self.action_id_to_str[p2_action_id]
metrics_logger.log_value(
key=f"footsies/{stage}/actions/{p1_module}/{p1_action_str}",
value=1,
reduce="sum",
window=100,
)
metrics_logger.log_value(
key=f"footsies/{stage}/actions/{p2_module}/{p2_action_str}",
value=1,
reduce="sum",
window=100,
)
def on_episode_end(
self,
*,
episode: MultiAgentEpisode,
env_runner: Optional[EnvRunner] = None,
metrics_logger: Optional[MetricsLogger] = None,
env: Optional[gym.Env] = None,
env_index: int,
**kwargs,
) -> None:
"""Log win rates
Log win rates of the main policy against its opponent at the end of the (training or evaluation) episode.
"""
stage = "eval" if env_runner.config.in_evaluation else "train"
# check status of "p1" and "p2"
last_game_state = env.envs[env_index].unwrapped.last_game_state
p1_dead = last_game_state.player1.is_dead
p2_dead = last_game_state.player2.is_dead
# get the ModuleID for each agent
p1_module = episode.module_for("p1")
p2_module = episode.module_for("p2")
if self.main_policy == p1_module:
opponent_id = p2_module
main_policy_win = p2_dead
elif self.main_policy == p2_module:
opponent_id = p1_module
main_policy_win = p1_dead
else:
logger.info(
f"RLlib {self.__class__.__name__}: Main policy: '{self.main_policy}' not found in this episode. "
f"Policies in this episode are: '{p1_module}' and '{p2_module}'. "
f"Check your multi_agent 'policy_mapping_fn'. "
f"Metrics logging for this episode will be skipped."
)
return
if p1_dead and p2_dead:
metrics_logger.log_value(
key=f"footsies/{stage}/both_dead/{self.main_policy}/vs_{opponent_id}",
value=1,
reduce="mean",
window=100,
)
elif not p1_dead and not p2_dead:
metrics_logger.log_value(
key=f"footsies/{stage}/both_alive/{self.main_policy}/vs_{opponent_id}",
value=1,
reduce="mean",
window=100,
)
else:
# log the win rate against the opponent with an 'opponent_id'
metrics_logger.log_value(
key=f"footsies/{stage}/win_rates/{self.main_policy}/vs_{opponent_id}",
value=int(main_policy_win),
reduce="mean",
window=100,
)
# log the win rate, without specifying the opponent
# this metric collected from the eval env runner
# will be used to decide whether to add
# a new opponent at the current level.
metrics_logger.log_value(
key=f"footsies/{stage}/win_rates/{self.main_policy}/vs_any",
value=int(main_policy_win),
reduce="mean",
window=100,
)
class MixManagerCallback(RLlibCallback):
def __init__(
self,
win_rate_threshold: float,
main_policy: str,
target_mix_size: int,
starting_modules=list[str], # default is ["lstm", "noop"]
fixed_modules_progression_sequence=tuple[str], # default is ("noop", "back")
) -> None:
"""Track win rates and manage mix of opponents"""
super().__init__()
self.win_rate_threshold = win_rate_threshold
self.main_policy = main_policy
self.target_mix_size = target_mix_size
self.fixed_modules_progression_sequence = tuple(
fixed_modules_progression_sequence
) # Order of RL modules to be added to the mix
self.modules_in_mix = list(
starting_modules
) # RLModules that are currently in the mix
self._trained_policy_idx = (
0 # We will use this to create new opponents of the main policy
)
def on_evaluate_end(
self,
*,
algorithm: Algorithm,
metrics_logger: Optional[MetricsLogger] = None,
evaluation_metrics: dict,
**kwargs,
) -> None:
"""Check win rates and add new opponent if necessary.
Check the win rate of the main policy against its current opponent.
If the win rate exceeds the specified threshold, add a new opponent to the mix, by modifying:
1. update the policy_mapping_fn for (training and evaluation) env runners
2. if the new policy is a trained one (not a fixed RL module), modify Algorithm's state (initialize the state of the newly added RLModule by using the main policy)
"""
_main_module = algorithm.get_module(self.main_policy)
new_module_id = None
new_module_spec = None
win_rate = evaluation_metrics[ENV_RUNNER_RESULTS][
f"footsies/eval/win_rates/{self.main_policy}/vs_any"
]
if win_rate > self.win_rate_threshold:
logger.info(
f"RLlib {self.__class__.__name__}: Win rate for main policy '{self.main_policy}' "
f"exceeded threshold ({win_rate} > {self.win_rate_threshold})."
f" Adding new RL Module to the mix..."
)
# check if fixed RL module should be added to the mix,
# and if so, create new_module_id and new_module_spec for it
for module_id in self.fixed_modules_progression_sequence:
if module_id not in self.modules_in_mix:
new_module_id = module_id
break
# in case that all fixed RL Modules are already in the mix (together with the main policy),
# we will add a new RL Module by taking main policy and adding an instance of it to the mix
if new_module_id is None:
new_module_id = f"{self.main_policy}_v{self._trained_policy_idx}"
new_module_spec = RLModuleSpec.from_module(_main_module)
self._trained_policy_idx += 1
# create new policy mapping function, to ensure that the main policy plays against newly added policy
new_mapping_fn = Matchmaker(
[
Matchup(
p1=self.main_policy,
p2=new_module_id,
prob=1.0,
)
]
).agent_to_module_mapping_fn
# STEP 1: Add the new module first (if it's a trained module)
if new_module_id not in self.fixed_modules_progression_sequence:
# Add module to Learners and EnvRunners (but don't update mapping yet)
algorithm.add_module(
module_id=new_module_id,
module_spec=new_module_spec,
new_agent_to_module_mapping_fn=None, # Don't update mapping yet!
)
# Initialize the new module with main policy's weights
algorithm.set_state(
{
"learner_group": {
"learner": {
"rl_module": {
new_module_id: _main_module.get_state(),
}
}
},
}
)
# STEP 2: CRITICAL - Update aggregator actors with the new module
# Aggregators run the learner connector pipeline which needs all modules.
if (
hasattr(algorithm, "_aggregator_actor_manager")
and algorithm._aggregator_actor_manager
):
logger.info(
f"RLlib {self.__class__.__name__}: Updating aggregator actors "
f"with new module '{new_module_id}'..."
)
# Add the new module to each aggregator actor's MultiRLModule
algorithm._aggregator_actor_manager.foreach_actor(
func=lambda actor, mid=new_module_id, spec=new_module_spec: (
actor._module.add_module(
module_id=mid,
module=spec.build(),
)
)
)
# Sync weights from learner to aggregator actors
weights = algorithm.learner_group.get_weights(
module_ids=[new_module_id]
)
algorithm._aggregator_actor_manager.foreach_actor(
func=lambda actor, w=weights: actor._module.set_state(w)
)
logger.info(
f"RLlib {self.__class__.__name__}: Aggregator actors updated successfully."
)
# STEP 3: NOW update the policy mapping function on all EnvRunners
# At this point, the module exists everywhere (Learners, EnvRunners, Aggregators)
algorithm.env_runner_group.foreach_env_runner(
lambda er: er.config.multi_agent(policy_mapping_fn=new_mapping_fn),
local_env_runner=True,
)
algorithm.eval_env_runner_group.foreach_env_runner(
lambda er: er.config.multi_agent(policy_mapping_fn=new_mapping_fn),
local_env_runner=True,
)
# Update algorithm's config to maintain consistency
algorithm.config._is_frozen = False
algorithm.config.multi_agent(policy_mapping_fn=new_mapping_fn)
algorithm.config.freeze()
# Update the current mix list
self.modules_in_mix.append(new_module_id)
else:
logger.info(
f"RLlib {self.__class__.__name__}: Win rate for main policy '{self.main_policy}' "
f"did not exceed threshold ({win_rate} <= {self.win_rate_threshold})."
)
def on_train_result(
self,
*,
algorithm: Algorithm,
metrics_logger: Optional[MetricsLogger] = None,
result: Dict,
**kwargs,
) -> None:
"""Report the current mix size at the end of training iteration.
That will tell Ray Tune, whether to stop training (once the 'target_mix_size' has been reached).
"""
result["mix_size"] = len(self.modules_in_mix)
def platform_for_binary_to_download(render: bool) -> str:
if platform.system() == "Darwin":
if render:
return "mac_windowed"
else:
return "mac_headless"
elif platform.system() == "Linux":
if render:
return "linux_windowed"
else:
return "linux_server"
else:
raise RuntimeError(f"Unsupported platform: {platform.system()}")
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/envs/classes/multi_agent/footsies/utils.py",
"license": "Apache License 2.0",
"lines": 332,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/examples/multi_agent/self_play_footsies.py | """
Multi-agent RLlib Footsies Simplified Example (PPO)
About:
- This example as a simplified version of "rllib/examples/ppo/multi_agent_footsies_ppo.py",
which has more detailed comments and instructions. Please refer to that example for more information.
- This example is created to test the self-play training progression with footsies.
- Simplified version runs with single learner (cpu), single env runner, and single eval env runner.
"""
import platform
from pathlib import Path
from ray.rllib.examples.algorithms.ppo.multi_agent_footsies_ppo import (
config,
env_creator,
stop,
)
from ray.rllib.examples.utils import (
add_rllib_example_script_args,
run_rllib_example_script_experiment,
)
from ray.tune.registry import register_env
parser = add_rllib_example_script_args(
default_iters=500,
default_timesteps=5_000_000,
)
parser.add_argument(
"--train-start-port",
type=int,
default=45001,
help="First port number for the Footsies training environment server (default: 45001). Each server gets its own port.",
)
parser.add_argument(
"--eval-start-port",
type=int,
default=55001,
help="First port number for the Footsies evaluation environment server (default: 55001) Each server gets its own port.",
)
parser.add_argument(
"--binary-download-dir",
type=Path,
default="/tmp/ray/binaries/footsies",
help="Directory to download Footsies binaries (default: /tmp/ray/binaries/footsies)",
)
parser.add_argument(
"--binary-extract-dir",
type=Path,
default="/tmp/ray/binaries/footsies",
help="Directory to extract Footsies binaries (default: /tmp/ray/binaries/footsies)",
)
parser.add_argument(
"--render",
action="store_true",
default=False,
help="Whether to render the Footsies environment. Default is False.",
)
parser.add_argument(
"--win-rate-threshold",
type=float,
default=0.55,
help="The main policy should have at least 'win-rate-threshold' win rate against the "
"other policy to advance to the next level. Moving to the next level "
"means adding a new policy to the mix.",
)
parser.add_argument(
"--target-mix-size",
type=int,
default=4,
help="Target number of policies (RLModules) in the mix to consider the test passed. "
"The initial mix size is 2: 'main policy' vs. 'other'. "
"`--target-mix-size=4` means that 2 new policies will be added to the mix. "
"Whether to add new policy is decided by checking the '--win-rate-threshold' condition. ",
)
parser.add_argument(
"--rollout-fragment-length",
type=int,
default=256,
help="The length of each rollout fragment to be collected by the EnvRunners when sampling.",
)
parser.add_argument(
"--log-unity-output",
action="store_true",
help="Whether to log Unity output (from the game engine). Default is False.",
default=False,
)
args = parser.parse_args()
register_env(name="FootsiesEnv", env_creator=env_creator)
stop["mix_size"] = args.target_mix_size
# Detect platform and choose appropriate binary
if platform.system() == "Darwin":
if args.render:
binary_to_download = "mac_windowed"
else:
binary_to_download = "mac_headless"
elif platform.system() == "Linux":
if args.render:
binary_to_download = "linux_windowed"
else:
binary_to_download = "linux_server"
else:
raise RuntimeError(f"Unsupported platform: {platform.system()}")
config.environment(
env="FootsiesEnv",
env_config={
"train_start_port": args.train_start_port,
"eval_start_port": args.eval_start_port,
"binary_download_dir": args.binary_download_dir,
"binary_extract_dir": args.binary_extract_dir,
"binary_to_download": binary_to_download,
"log_unity_output": args.log_unity_output,
},
).training(
train_batch_size_per_learner=args.rollout_fragment_length
* (args.num_env_runners or 1),
)
if __name__ == "__main__":
results = run_rllib_example_script_experiment(
base_config=config,
args=args,
stop=stop,
success_metric={"mix_size": args.target_mix_size},
)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/multi_agent/self_play_footsies.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/datatype.py | from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
from ray.data._internal.tensor_extensions.arrow import (
_infer_pyarrow_type,
)
from ray.util.annotations import PublicAPI
@PublicAPI(stability="alpha")
class TypeCategory(str, Enum):
"""High-level categories of data types.
These categories correspond to groups of concrete data types.
Use DataType.is_of(category) to check if a DataType belongs to a category.
"""
LIST = "list"
LARGE_LIST = "large_list"
STRUCT = "struct"
MAP = "map"
TENSOR = "tensor"
TEMPORAL = "temporal"
PYARROW_TYPE_DEFINITIONS: Dict[str, Tuple[callable, str]] = {
"int8": (pa.int8, "an 8-bit signed integer"),
"int16": (pa.int16, "a 16-bit signed integer"),
"int32": (pa.int32, "a 32-bit signed integer"),
"int64": (pa.int64, "a 64-bit signed integer"),
"uint8": (pa.uint8, "an 8-bit unsigned integer"),
"uint16": (pa.uint16, "a 16-bit unsigned integer"),
"uint32": (pa.uint32, "a 32-bit unsigned integer"),
"uint64": (pa.uint64, "a 64-bit unsigned integer"),
"float32": (pa.float32, "a 32-bit floating point number"),
"float64": (pa.float64, "a 64-bit floating point number"),
"string": (pa.string, "a variable-length string"),
"bool": (pa.bool_, "a boolean value"),
"binary": (pa.binary, "variable-length binary data"),
}
def _factory_methods(cls: type):
"""Metaprogramming: Class decorator to generate factory methods for PyArrow types using from_arrow.
This decorator automatically creates class methods for common PyArrow data types.
Each generated method is a convenient factory that calls cls.from_arrow(pa.type()).
Generated methods include:
- Signed integers: int8, int16, int32, int64
- Unsigned integers: uint8, uint16, uint32, uint64
- Floating point: float32, float64
- Other types: string, bool, binary
Examples of generated methods::
@classmethod
def int32(cls):
\"\"\"Create a DataType representing a 32-bit signed integer.
Returns:
DataType: A DataType with PyArrow int32 type
\"\"\"
return cls.from_arrow(pa.int32())
@classmethod
def string(cls):
\"\"\"Create a DataType representing a variable-length string.
Returns:
DataType: A DataType with PyArrow string type
\"\"\"
return cls.from_arrow(pa.string())
Usage:
Instead of DataType.from_arrow(pa.int32()), you can use DataType.int32()
"""
for method_name, (pa_func, description) in PYARROW_TYPE_DEFINITIONS.items():
def create_method(name, func, desc):
def factory_method(cls):
return cls.from_arrow(func())
factory_method.__doc__ = f"""Create a DataType representing {desc}.
Returns:
DataType: A DataType with PyArrow {name} type
"""
factory_method.__name__ = name
factory_method.__qualname__ = f"{cls.__name__}.{name}"
return classmethod(factory_method)
setattr(cls, method_name, create_method(method_name, pa_func, description))
return cls
@PublicAPI(stability="alpha")
@dataclass
@_factory_methods
class DataType:
"""A simplified Ray Data DataType supporting Arrow, NumPy, and Python types."""
# Physical dtype: The concrete type implementation (e.g., pa.list_(pa.int64()), np.float64, str)
_physical_dtype: Union[pa.DataType, np.dtype, type]
def __post_init__(self):
"""Validate the _physical_dtype after initialization."""
# TODO: Support Pandas extension types
if not isinstance(
self._physical_dtype,
(pa.DataType, np.dtype, type),
):
raise TypeError(
f"DataType supports only PyArrow DataType, NumPy dtype, or Python type, but was given type {type(self._physical_dtype)}."
)
def is_of(self, category: Union["TypeCategory", str]) -> bool:
"""Check if this DataType belongs to a specific type category.
Args:
category: The category to check against.
Returns:
True if the DataType belongs to the category.
"""
if isinstance(category, str):
try:
category = TypeCategory(category)
except ValueError:
return False
if category == TypeCategory.LIST:
return self.is_list_type()
elif category == TypeCategory.LARGE_LIST:
if not self.is_arrow_type():
return False
pa_type = self._physical_dtype
return pa.types.is_large_list(pa_type) or (
hasattr(pa.types, "is_large_list_view")
and pa.types.is_large_list_view(pa_type)
)
elif category == TypeCategory.STRUCT:
return self.is_struct_type()
elif category == TypeCategory.MAP:
return self.is_map_type()
elif category == TypeCategory.TENSOR:
return self.is_tensor_type()
elif category == TypeCategory.TEMPORAL:
return self.is_temporal_type()
return False
# Type checking methods
def is_arrow_type(self) -> bool:
"""Check if this DataType is backed by a PyArrow DataType.
Returns:
bool: True if the internal type is a PyArrow DataType
"""
return isinstance(self._physical_dtype, pa.DataType)
def is_numpy_type(self) -> bool:
"""Check if this DataType is backed by a NumPy dtype.
Returns:
bool: True if the internal type is a NumPy dtype
"""
return isinstance(self._physical_dtype, np.dtype)
def is_python_type(self) -> bool:
"""Check if this DataType is backed by a Python type.
Returns:
bool: True if the internal type is a Python type
"""
return isinstance(self._physical_dtype, type)
# Conversion methods
def to_arrow_dtype(self, values: Optional[List[Any]] = None) -> pa.DataType:
"""
Convert the DataType to a PyArrow DataType.
Args:
values: Optional list of values to infer the Arrow type from. Required if the DataType is a Python type.
Returns:
A PyArrow DataType
"""
if self.is_arrow_type():
return self._physical_dtype
else:
if isinstance(self._physical_dtype, np.dtype):
return pa.from_numpy_dtype(self._physical_dtype)
else:
assert (
values is not None and len(values) > 0
), "Values are required to infer Arrow type if the provided type is a Python type"
return _infer_pyarrow_type(values)
def to_numpy_dtype(self) -> np.dtype:
"""Convert the DataType to a NumPy dtype.
For PyArrow types, attempts to convert via pandas dtype.
For Python types, returns object dtype.
Returns:
np.dtype: A NumPy dtype representation
Examples:
>>> import numpy as np
>>> DataType.from_numpy(np.dtype('int64')).to_numpy_dtype()
dtype('int64')
>>> DataType.from_numpy(np.dtype('float32')).to_numpy_dtype()
dtype('float32')
"""
if self.is_numpy_type():
return self._physical_dtype
elif self.is_arrow_type():
try:
# For most basic arrow types, this will work
pandas_dtype = self._physical_dtype.to_pandas_dtype()
if isinstance(pandas_dtype, np.dtype):
return pandas_dtype
else:
# If pandas returns an extension dtype, fall back to object
return np.dtype("object")
except (TypeError, NotImplementedError, pa.ArrowNotImplementedError):
return np.dtype("object")
else:
return np.dtype("object")
def to_python_type(self) -> type:
"""Get the internal type if it's a Python type.
This method doesn't perform conversion, it only returns the internal
type if it's already a Python type.
Returns:
type: The internal Python type
Raises:
ValueError: If the DataType is not backed by a Python type
Examples:
>>> dt = DataType(int)
>>> dt.to_python_type()
<class 'int'>
>>> DataType.int64().to_python_type() # doctest: +SKIP
ValueError: DataType is not backed by a Python type
"""
if self.is_python_type():
return self._physical_dtype
else:
raise ValueError(
f"DataType {self} is not backed by a Python type. "
f"Use to_arrow_dtype() or to_numpy_dtype() for conversion."
)
# Factory methods from external systems
@classmethod
def from_arrow(cls, arrow_type: pa.DataType) -> "DataType":
"""Create a DataType from a PyArrow DataType.
Args:
arrow_type: A PyArrow DataType to wrap
Returns:
DataType: A DataType wrapping the given PyArrow type
Examples:
>>> import pyarrow as pa
>>> from ray.data.datatype import DataType
>>> DataType.from_arrow(pa.timestamp('s'))
DataType(arrow:timestamp[s])
>>> DataType.from_arrow(pa.int64())
DataType(arrow:int64)
"""
return cls(_physical_dtype=arrow_type)
@classmethod
def from_numpy(cls, numpy_dtype: Union[np.dtype, str]) -> "DataType":
"""Create a DataType from a NumPy dtype.
Args:
numpy_dtype: A NumPy dtype object or string representation
Returns:
DataType: A DataType wrapping the given NumPy dtype
Examples:
>>> import numpy as np
>>> from ray.data.datatype import DataType
>>> DataType.from_numpy(np.dtype('int32'))
DataType(numpy:int32)
>>> DataType.from_numpy('float64')
DataType(numpy:float64)
"""
if isinstance(numpy_dtype, str):
numpy_dtype = np.dtype(numpy_dtype)
return cls(_physical_dtype=numpy_dtype)
@classmethod
def infer_dtype(cls, value: Any) -> "DataType":
"""Infer DataType from a Python value, handling numpy, Arrow, and Python types.
Args:
value: Any Python value to infer the type from
Returns:
DataType: The inferred data type
Examples:
>>> import numpy as np
>>> from ray.data.datatype import DataType
>>> DataType.infer_dtype(5)
DataType(arrow:int64)
>>> DataType.infer_dtype("hello")
DataType(arrow:string)
>>> DataType.infer_dtype(np.int32(42))
DataType(numpy:int32)
"""
# 1. Handle numpy arrays and scalars
if isinstance(value, (np.ndarray, np.generic)):
return cls.from_numpy(value.dtype)
# 2. Try PyArrow type inference for regular Python values
try:
inferred_arrow_type = _infer_pyarrow_type([value])
if inferred_arrow_type is not None:
return cls.from_arrow(inferred_arrow_type)
except Exception:
return cls(type(value))
def __repr__(self) -> str:
if self.is_arrow_type():
return f"DataType(arrow:{self._physical_dtype})"
elif self.is_numpy_type():
return f"DataType(numpy:{self._physical_dtype})"
else:
return f"DataType(python:{self._physical_dtype.__name__})"
def __eq__(self, other: "DataType") -> bool:
if not isinstance(other, DataType):
return False
# Ensure they're from the same type system by checking the actual type
# of the internal type object, not just the value
if type(self._physical_dtype) is not type(other._physical_dtype):
return False
return self._physical_dtype == other._physical_dtype
def __hash__(self) -> int:
# Include the type of the internal type in the hash to ensure
# different type systems don't collide
return hash((type(self._physical_dtype), self._physical_dtype))
@classmethod
def list(cls, value_type: "DataType") -> "DataType":
"""Create a DataType representing a list with the given element type.
Args:
value_type: The DataType of the list elements.
Returns:
DataType: A DataType with PyArrow list type
Examples:
>>> from ray.data.datatype import DataType
>>> DataType.list(DataType.int64()) # Exact match: list<int64>
DataType(arrow:list<item: int64>)
"""
value_arrow_type = value_type.to_arrow_dtype()
return cls.from_arrow(pa.list_(value_arrow_type))
@classmethod
def large_list(cls, value_type: "DataType") -> "DataType":
"""Create a DataType representing a large_list with the given element type.
Args:
value_type: The DataType of the list elements.
Returns:
DataType: A DataType with PyArrow large_list type
Examples:
>>> DataType.large_list(DataType.int64())
DataType(arrow:large_list<item: int64>)
"""
value_arrow_type = value_type.to_arrow_dtype()
return cls.from_arrow(pa.large_list(value_arrow_type))
@classmethod
def fixed_size_list(cls, value_type: "DataType", list_size: int) -> "DataType":
"""Create a DataType representing a fixed-size list.
Args:
value_type: The DataType of the list elements
list_size: The fixed size of the list
Returns:
DataType: A DataType with PyArrow fixed_size_list type
Examples:
>>> from ray.data.datatype import DataType
>>> DataType.fixed_size_list(DataType.float32(), 3)
DataType(arrow:fixed_size_list<item: float>[3])
"""
value_arrow_type = value_type.to_arrow_dtype()
return cls.from_arrow(pa.list_(value_arrow_type, list_size))
@classmethod
def struct(cls, fields: List[Tuple[str, "DataType"]]) -> "DataType":
"""Create a DataType representing a struct with the given fields.
Args:
fields: List of (field_name, field_type) tuples.
Returns:
DataType: A DataType with PyArrow struct type
Examples:
>>> from ray.data.datatype import DataType
>>> DataType.struct([("x", DataType.int64()), ("y", DataType.float64())])
DataType(arrow:struct<x: int64, y: double>)
"""
arrow_fields = [(name, dtype.to_arrow_dtype()) for name, dtype in fields]
return cls.from_arrow(pa.struct(arrow_fields))
@classmethod
def map(
cls,
key_type: "DataType",
value_type: "DataType",
) -> "DataType":
"""Create a DataType representing a map with the given key and value types.
Args:
key_type: The DataType of the map keys.
value_type: The DataType of the map values.
Returns:
DataType: A DataType with PyArrow map type
Examples:
>>> from ray.data.datatype import DataType
>>> DataType.map(DataType.string(), DataType.int64())
DataType(arrow:map<string, int64>)
"""
key_arrow_type = key_type.to_arrow_dtype()
value_arrow_type = value_type.to_arrow_dtype()
return cls.from_arrow(pa.map_(key_arrow_type, value_arrow_type))
@classmethod
def tensor(
cls,
shape: Tuple[int, ...],
dtype: "DataType",
) -> "DataType":
"""Create a DataType representing a fixed-shape tensor.
Args:
shape: The fixed shape of the tensor.
dtype: The DataType of the tensor elements.
Returns:
DataType: A DataType with Ray's ArrowTensorType
Examples:
>>> from ray.data.datatype import DataType
>>> DataType.tensor(shape=(3, 4), dtype=DataType.float32()) # doctest: +ELLIPSIS
DataType(arrow:ArrowTensorType(...))
"""
from ray.data._internal.tensor_extensions.arrow import ArrowTensorType
element_arrow_type = dtype.to_arrow_dtype()
return cls.from_arrow(ArrowTensorType(shape, element_arrow_type))
@classmethod
def variable_shaped_tensor(
cls,
dtype: "DataType",
ndim: int = 2,
) -> "DataType":
"""Create a DataType representing a variable-shaped tensor.
Args:
dtype: The DataType of the tensor elements.
ndim: The number of dimensions of the tensor. Defaults to 2.
Returns:
DataType: A DataType with Ray's ArrowVariableShapedTensorType
Examples:
>>> from ray.data.datatype import DataType
>>> DataType.variable_shaped_tensor(dtype=DataType.float32(), ndim=2) # doctest: +ELLIPSIS
DataType(arrow:ArrowVariableShapedTensorType(...))
"""
from ray.data._internal.tensor_extensions.arrow import (
ArrowVariableShapedTensorType,
)
element_arrow_type = dtype.to_arrow_dtype()
return cls.from_arrow(ArrowVariableShapedTensorType(element_arrow_type, ndim))
@classmethod
def temporal(
cls,
temporal_type: str,
unit: Optional[str] = None,
tz: Optional[str] = None,
) -> "DataType":
"""Create a DataType representing a temporal type.
Args:
temporal_type: Type of temporal value - one of:
- "timestamp": Timestamp with optional unit and timezone
- "date32": 32-bit date (days since UNIX epoch)
- "date64": 64-bit date (milliseconds since UNIX epoch)
- "time32": 32-bit time of day (s or ms precision)
- "time64": 64-bit time of day (us or ns precision)
- "duration": Time duration with unit
unit: Time unit for timestamp/time/duration types:
- timestamp: "s", "ms", "us", "ns" (default: "us")
- time32: "s", "ms" (default: "s")
- time64: "us", "ns" (default: "us")
- duration: "s", "ms", "us", "ns" (default: "us")
tz: Optional timezone string for timestamp types (e.g., "UTC", "America/New_York")
Returns:
DataType: A DataType with PyArrow temporal type
Examples:
>>> from ray.data.datatype import DataType
>>> DataType.temporal("timestamp", unit="s")
DataType(arrow:timestamp[s])
>>> DataType.temporal("timestamp", unit="us", tz="UTC")
DataType(arrow:timestamp[us, tz=UTC])
>>> DataType.temporal("date32")
DataType(arrow:date32[day])
>>> DataType.temporal("time64", unit="ns")
DataType(arrow:time64[ns])
>>> DataType.temporal("duration", unit="ms")
DataType(arrow:duration[ms])
"""
temporal_type_lower = temporal_type.lower()
if temporal_type_lower == "timestamp":
unit = unit or "us"
return cls.from_arrow(pa.timestamp(unit, tz=tz))
elif temporal_type_lower == "date32":
return cls.from_arrow(pa.date32())
elif temporal_type_lower == "date64":
return cls.from_arrow(pa.date64())
elif temporal_type_lower == "time32":
unit = unit or "s"
if unit not in ("s", "ms"):
raise ValueError(f"time32 unit must be 's' or 'ms', got {unit}")
return cls.from_arrow(pa.time32(unit))
elif temporal_type_lower == "time64":
unit = unit or "us"
if unit not in ("us", "ns"):
raise ValueError(f"time64 unit must be 'us' or 'ns', got {unit}")
return cls.from_arrow(pa.time64(unit))
elif temporal_type_lower == "duration":
unit = unit or "us"
return cls.from_arrow(pa.duration(unit))
else:
raise ValueError(
f"Invalid temporal_type '{temporal_type}'. Must be one of: "
f"'timestamp', 'date32', 'date64', 'time32', 'time64', 'duration'"
)
def is_list_type(self) -> bool:
"""Check if this DataType represents a list type
Returns:
True if this is any list variant (list, large_list, fixed_size_list)
Examples:
>>> DataType.list(DataType.int64()).is_list_type()
True
>>> DataType.int64().is_list_type()
False
"""
if not self.is_arrow_type():
return False
pa_type = self._physical_dtype
return (
pa.types.is_list(pa_type)
or pa.types.is_large_list(pa_type)
or pa.types.is_fixed_size_list(pa_type)
# Pyarrow 16.0.0+ supports list views
or (hasattr(pa.types, "is_list_view") and pa.types.is_list_view(pa_type))
or (
hasattr(pa.types, "is_large_list_view")
and pa.types.is_large_list_view(pa_type)
)
)
def is_tensor_type(self) -> bool:
"""Check if this DataType represents a tensor type.
Returns:
True if this is a tensor type
"""
if not self.is_arrow_type():
return False
from ray.data._internal.tensor_extensions.arrow import (
get_arrow_extension_tensor_types,
)
return isinstance(self._physical_dtype, get_arrow_extension_tensor_types())
def is_struct_type(self) -> bool:
"""Check if this DataType represents a struct type.
Returns:
True if this is a struct type
Examples:
>>> DataType.struct([("x", DataType.int64())]).is_struct_type()
True
>>> DataType.int64().is_struct_type()
False
"""
if not self.is_arrow_type():
return False
return pa.types.is_struct(self._physical_dtype)
def is_map_type(self) -> bool:
"""Check if this DataType represents a map type.
Returns:
True if this is a map type
Examples:
>>> DataType.map(DataType.string(), DataType.int64()).is_map_type()
True
>>> DataType.int64().is_map_type()
False
"""
if not self.is_arrow_type():
return False
return pa.types.is_map(self._physical_dtype)
def is_nested_type(self) -> bool:
"""Check if this DataType represents a nested type.
Nested types include: lists, structs, maps, unions
Returns:
True if this is any nested type
Examples:
>>> DataType.list(DataType.int64()).is_nested_type()
True
>>> DataType.struct([("x", DataType.int64())]).is_nested_type()
True
>>> DataType.int64().is_nested_type()
False
"""
if not self.is_arrow_type():
return False
return pa.types.is_nested(self._physical_dtype)
def _get_underlying_arrow_type(self) -> pa.DataType:
"""Get the underlying Arrow type, handling dictionary and run-end encoding.
Returns:
The underlying PyArrow type, unwrapping dictionary/run-end encoding
Raises:
ValueError: If called on a non-Arrow type (pattern-matching, NumPy, or Python types)
"""
if not self.is_arrow_type():
raise ValueError(
f"Cannot get Arrow type for non-Arrow DataType {self}. "
f"Type is: {type(self._physical_dtype)}"
)
pa_type = self._physical_dtype
if pa.types.is_dictionary(pa_type):
return pa_type.value_type
elif pa.types.is_run_end_encoded(pa_type):
return pa_type.value_type
return pa_type
def is_numerical_type(self) -> bool:
"""Check if this DataType represents a numerical type.
Numerical types support arithmetic operations and include:
integers, floats, decimals
Returns:
True if this is a numerical type
Examples:
>>> DataType.int64().is_numerical_type()
True
>>> DataType.float32().is_numerical_type()
True
>>> DataType.string().is_numerical_type()
False
"""
if self.is_arrow_type():
underlying = self._get_underlying_arrow_type()
return (
pa.types.is_integer(underlying)
or pa.types.is_floating(underlying)
or pa.types.is_decimal(underlying)
)
elif self.is_numpy_type():
return (
np.issubdtype(self._physical_dtype, np.integer)
or np.issubdtype(self._physical_dtype, np.floating)
or np.issubdtype(self._physical_dtype, np.complexfloating)
)
elif self.is_python_type():
return self._physical_dtype in (int, float, complex)
return False
def is_string_type(self) -> bool:
"""Check if this DataType represents a string type.
Includes: string, large_string, string_view
Returns:
True if this is a string type
Examples:
>>> DataType.string().is_string_type()
True
>>> DataType.int64().is_string_type()
False
"""
if self.is_arrow_type():
underlying = self._get_underlying_arrow_type()
return (
pa.types.is_string(underlying)
or pa.types.is_large_string(underlying)
or (
hasattr(pa.types, "is_string_view")
and pa.types.is_string_view(underlying)
)
)
elif self.is_numpy_type():
# Check for Unicode (U) or byte string (S) types
return self._physical_dtype.kind in ("U", "S")
elif self.is_python_type():
return self._physical_dtype is str
return False
def is_binary_type(self) -> bool:
"""Check if this DataType represents a binary type.
Includes: binary, large_binary, binary_view, fixed_size_binary
Returns:
True if this is a binary type
Examples:
>>> DataType.binary().is_binary_type()
True
>>> DataType.string().is_binary_type()
False
"""
if self.is_arrow_type():
underlying = self._get_underlying_arrow_type()
return (
pa.types.is_binary(underlying)
or pa.types.is_large_binary(underlying)
or (
hasattr(pa.types, "is_binary_view")
and pa.types.is_binary_view(underlying)
)
or pa.types.is_fixed_size_binary(underlying)
)
elif self.is_numpy_type():
# NumPy doesn't have a specific binary type, but void or object dtypes might contain bytes
return self._physical_dtype.kind == "V" # void type (raw bytes)
elif self.is_python_type():
return self._physical_dtype in (bytes, bytearray)
return False
def is_temporal_type(self) -> bool:
"""Check if this DataType represents a temporal type.
Includes: date, time, timestamp, duration, interval
Returns:
True if this is a temporal type
Examples:
>>> import pyarrow as pa
>>> DataType.from_arrow(pa.timestamp('s')).is_temporal_type()
True
>>> DataType.int64().is_temporal_type()
False
"""
if self.is_arrow_type():
underlying = self._get_underlying_arrow_type()
return pa.types.is_temporal(underlying)
elif self.is_numpy_type():
return np.issubdtype(self._physical_dtype, np.datetime64) or np.issubdtype(
self._physical_dtype, np.timedelta64
)
elif self.is_python_type():
import datetime
return self._physical_dtype in (
datetime.datetime,
datetime.date,
datetime.time,
datetime.timedelta,
)
return False
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/datatype.py",
"license": "Apache License 2.0",
"lines": 682,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:doc/source/serve/tutorials/deployment-serve-llm/ci/nb2py.py | #!/usr/bin/env python3
import argparse
import nbformat
def convert_notebook(
input_path: str, output_path: str, ignore_cmds: bool = False
) -> None:
"""
Read a Jupyter notebook and write a Python script, converting all %%bash
cells and IPython "!" commands into subprocess.run calls that raise on error.
Cells that load or autoreload extensions are ignored.
"""
nb = nbformat.read(input_path, as_version=4)
with open(output_path, "w") as out:
for cell in nb.cells:
# Only process code cells
if cell.cell_type != "code":
continue
lines = cell.source.splitlines()
# Skip cells that load or autoreload extensions
if any(
l.strip().startswith("%load_ext autoreload")
or l.strip().startswith("%autoreload all")
for l in lines
):
continue
# Detect a %%bash cell
if lines and lines[0].strip().startswith("%%bash"):
if ignore_cmds:
continue
bash_script = "\n".join(lines[1:]).rstrip()
out.write("import subprocess\n")
out.write(
f"subprocess.run(r'''{bash_script}''',\n"
" shell=True,\n"
" check=True,\n"
" executable='/bin/bash')\n\n"
)
else:
# Detect any IPython '!' shell commands in code lines
has_bang = any(line.lstrip().startswith("!") or line.lstrip().startswith("%") for line in lines)
# Start with "serve run" "serve shutdown" "curl" or "anyscale service" commands
to_ignore_cmd = (
"serve run",
"serve shutdown",
"curl",
"anyscale service",
)
has_ignored_start = any(
line.lstrip().startswith(to_ignore_cmd) for line in lines
)
if has_bang or has_ignored_start:
if ignore_cmds:
continue
out.write("import subprocess\n")
for line in lines:
stripped = line.lstrip()
if stripped.startswith("!"):
cmd = stripped[1:].lstrip()
out.write(
f"subprocess.run(r'''{cmd}''',\n"
" shell=True,\n"
" check=True,\n"
" executable='/bin/bash')\n"
)
else:
out.write(line.rstrip() + "\n")
out.write("\n")
else:
# Regular Python cell:
code = cell.source.rstrip()
if "client.chat.completions.create" in code:
continue # Model isn't deployed in CI so skip cells calling the service
# else, dump as-is
out.write(cell.source.rstrip() + "\n\n")
def main() -> None:
parser = argparse.ArgumentParser(
description="Convert a Jupyter notebook to a Python script, preserving bash cells and '!' commands as subprocess calls unless ignored with --ignore-cmds."
)
parser.add_argument("input_nb", help="Path to the input .ipynb file")
parser.add_argument("output_py", help="Path for the output .py script")
parser.add_argument(
"--ignore-cmds", action="store_true", help="Ignore bash cells and '!' commands"
)
args = parser.parse_args()
convert_notebook(args.input_nb, args.output_py, ignore_cmds=args.ignore_cmds)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/deployment-serve-llm/ci/nb2py.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:release/hello_world_tests/hello_world.py | import ray
@ray.remote
def hello_world():
return "Hello, world!"
def main():
print(ray.get(hello_world.remote()))
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "release/hello_world_tests/hello_world.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/ray_release/custom_byod_build_init_helper.py | import hashlib
import os
from typing import Dict, List, Optional, Tuple
import yaml
from ray_release.configs.global_config import get_global_config
from ray_release.logger import logger
from ray_release.test import Test
from ray_release.util import ANYSCALE_RAY_IMAGE_PREFIX, AZURE_REGISTRY_NAME
def generate_custom_build_step_key(image: str) -> str:
image_repository, tag = image.split(":")
tag_variants = tag.split("-")
# Remove build id from the tag name to make hash consistent
image_name_without_id = f"{image_repository}:{'-'.join(tag_variants[1:])}"
logger.info(f"Image: {image_name_without_id}")
result = hashlib.sha256(image_name_without_id.encode()).hexdigest()[:20]
logger.info(f"Result: {result}")
return result
def get_images_from_tests(
tests: List[Test], build_id: str
) -> Tuple[
List[Tuple[str, str, Optional[str], Optional[str], Optional[Dict[str, str]]]],
Dict[str, List[str]],
]:
"""Get a list of custom BYOD images to build from a list of tests."""
custom_byod_images = {}
custom_image_test_names_map = {}
for test in tests:
if not test.require_custom_byod_image():
continue
image_tag = test.get_anyscale_byod_image(build_id)
if image_tag not in custom_byod_images:
runtime_env = test.get_byod_runtime_env() or None
custom_byod_images[image_tag] = (
image_tag,
test.get_anyscale_base_byod_image(build_id),
test.get_byod_post_build_script(),
test.get_byod_python_depset(),
runtime_env,
)
logger.info(f"To be built: {image_tag}")
if image_tag not in custom_image_test_names_map:
custom_image_test_names_map[image_tag] = []
custom_image_test_names_map[image_tag].append(test.get_name())
return list(custom_byod_images.values()), custom_image_test_names_map
def create_custom_build_yaml(destination_file: str, tests: List[Test]) -> None:
"""Create a yaml file for building custom BYOD images"""
config = get_global_config()
if not config or not config.get("byod_ecr_region") or not config.get("byod_ecr"):
raise ValueError("byod_ecr_region and byod_ecr must be set in the config")
custom_byod_images, custom_image_test_names_map = get_images_from_tests(
tests, "$$RAYCI_BUILD_ID"
)
if not custom_byod_images:
return
build_config = {"group": "Custom images build", "steps": []}
ray_want_commit = os.getenv("RAY_WANT_COMMIT_IN_IMAGE", "")
for (
image,
base_image,
post_build_script,
python_depset,
runtime_env,
) in custom_byod_images:
logger.info(
f"Building custom BYOD image: {image}, base image: {base_image}, "
f"post build script: {post_build_script}, runtime_env: {runtime_env}"
)
if not post_build_script and not python_depset and not runtime_env:
continue
step_key = generate_custom_build_step_key(image)
step_name = _get_step_name(image, step_key, custom_image_test_names_map[image])
env_args = ""
if runtime_env:
env_args = " ".join(
f"--env {k}={v}" for k, v in sorted(runtime_env.items())
)
build_cmd_parts = [
"bazelisk run //release:custom_byod_build --",
f"--image-name {image}",
f"--base-image {base_image}",
]
if post_build_script:
build_cmd_parts.append(f"--post-build-script {post_build_script}")
if python_depset:
build_cmd_parts.append(f"--python-depset {python_depset}")
if env_args:
build_cmd_parts.append(env_args)
build_cmd = " ".join(build_cmd_parts)
step = {
"label": step_name,
"key": step_key,
"instance_type": "release-medium",
"mount_buildkite_agent": True,
"commands": [
f"export RAY_WANT_COMMIT_IN_IMAGE={ray_want_commit}",
"bash release/gcloud_docker_login.sh release/aws2gce_iam.json",
"export PATH=$(pwd)/google-cloud-sdk/bin:$$PATH",
"bash release/azure_docker_login.sh",
f"az acr login --name {AZURE_REGISTRY_NAME}",
f"aws ecr get-login-password --region {config['byod_ecr_region']} | docker login --username AWS --password-stdin {config['byod_ecr']}",
build_cmd,
],
}
step["depends_on"] = get_prerequisite_step(image, base_image)
build_config["steps"].append(step)
with open(destination_file, "w") as f:
yaml.dump(build_config, f, default_flow_style=False, sort_keys=False)
def get_prerequisite_step(image: str, base_image: str) -> Optional[str]:
"""Get the base image build step for a job that depends on it."""
config = get_global_config()
image_repository, _ = image.split(":")
image_name = image_repository.split("/")[-1]
if base_image.startswith(ANYSCALE_RAY_IMAGE_PREFIX):
return "forge"
if image_name == "ray-ml":
return config["release_image_step_ray_ml"]
elif image_name == "ray-llm":
return config["release_image_step_ray_llm"]
else:
return config["release_image_step_ray"]
def _get_step_name(image: str, step_key: str, test_names: List[str]) -> str:
ecr, tag = image.split(":")
ecr_repo = ecr.split("/")[-1]
tag_without_build_id_and_custom_hash = tag.split("-")[1:-1]
step_name = f":tapioca: build custom: {ecr_repo}:{'-'.join(tag_without_build_id_and_custom_hash)} ({step_key})"
for test_name in test_names[:2]:
step_name += f" {test_name}"
return step_name
| {
"repo_id": "ray-project/ray",
"file_path": "release/ray_release/custom_byod_build_init_helper.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:release/ray_release/tests/test_custom_byod_build_init_helper.py | import os
import sys
import tempfile
from unittest import mock
import pytest
import yaml
from ray_release.bazel import bazel_runfile
from ray_release.configs.global_config import get_global_config, init_global_config
from ray_release.custom_byod_build_init_helper import (
_get_step_name,
create_custom_build_yaml,
generate_custom_build_step_key,
get_prerequisite_step,
)
from ray_release.test import Test
from ray_release.util import AZURE_REGISTRY_NAME
init_global_config(bazel_runfile("release/ray_release/configs/oss_config.yaml"))
@mock.patch.dict(os.environ, {"RAY_WANT_COMMIT_IN_IMAGE": "abc123"})
@mock.patch("ray_release.custom_byod_build_init_helper.get_images_from_tests")
def test_create_custom_build_yaml(mock_get_images_from_tests):
config = get_global_config()
custom_byod_images = [
(
"ray-project/ray-ml:abc123-custom-123456789abc123456789",
"ray-project/ray-ml:abc123-base",
"custom_script.sh",
None,
None,
),
(
"ray-project/ray-ml:abc123-custom1",
"ray-project/ray-ml:abc123-base",
"",
None,
None,
),
(
"ray-project/ray-ml:abc123-py37-cpu-custom-abcdef123456789abc123456789",
"ray-project/ray-ml:abc123-py37-cpu-base",
"custom_script.sh",
None,
None,
), # longer than 40 chars
(
"ray-project/ray-ml:abc123-py37-cpu-custom-abcdef123456789abc987654321",
"ray-project/ray-ml:abc123-py37-cpu-base",
"custom_script.sh",
"python_depset.lock",
None,
),
(
"custom_ecr/ray-ml:abc123-py37-cpu-custom-abcdef123456789abc987654321",
"anyscale/ray:2.50.0-py37-cpu",
"custom_script.sh",
"python_depset.lock",
None,
),
(
"ray-project/ray-ml:abc123-envonly-abcdef123456789abc000000000",
"ray-project/ray-ml:abc123-base",
None,
None,
{"MY_ENV": "my_value", "OTHER_ENV": "other_value"},
),
]
custom_image_test_names_map = {
"ray-project/ray-ml:abc123-custom-123456789abc123456789": ["test_1"],
"ray-project/ray-ml:abc123-custom1": ["test_2"],
"ray-project/ray-ml:abc123-py37-cpu-custom-abcdef123456789abc123456789": [
"test_1",
"test_2",
],
"ray-project/ray-ml:abc123-py37-cpu-custom-abcdef123456789abc987654321": [
"test_1",
"test_2",
],
"custom_ecr/ray-ml:abc123-py37-cpu-custom-abcdef123456789abc987654321": [
"test_3",
],
"ray-project/ray-ml:abc123-envonly-abcdef123456789abc000000000": [
"test_4",
],
}
mock_get_images_from_tests.return_value = (
custom_byod_images,
custom_image_test_names_map,
)
step_keys = [
generate_custom_build_step_key(image)
for image, _, _, _, _ in custom_byod_images
]
# List of dummy tests
tests = [
Test(
name="test_1",
frequency="manual",
group="test_group",
team="test_team",
working_dir="test_working_dir",
),
Test(
name="test_2",
frequency="manual",
group="test_group",
team="test_team",
working_dir="test_working_dir",
),
Test(
name="test_3",
frequency="manual",
group="test_group",
team="test_team",
working_dir="test_working_dir",
cluster={
"ray_version": "2.50.0",
},
),
Test(
name="test_4",
frequency="manual",
group="test_group",
team="test_team",
working_dir="test_working_dir",
),
]
with tempfile.TemporaryDirectory() as tmpdir:
create_custom_build_yaml(
os.path.join(tmpdir, "custom_byod_build.rayci.yml"), tests
)
with open(os.path.join(tmpdir, "custom_byod_build.rayci.yml"), "r") as f:
content = yaml.safe_load(f)
assert content["group"] == "Custom images build"
assert len(content["steps"]) == 5
assert (
content["steps"][0]["label"]
== f":tapioca: build custom: ray-ml:custom ({step_keys[0]}) test_1"
)
assert (
content["steps"][1]["label"]
== f":tapioca: build custom: ray-ml:py37-cpu-custom ({step_keys[2]}) test_1 test_2"
)
assert (
content["steps"][2]["label"]
== f":tapioca: build custom: ray-ml:py37-cpu-custom ({step_keys[3]}) test_1 test_2"
)
assert (
"export RAY_WANT_COMMIT_IN_IMAGE=abc123"
in content["steps"][0]["commands"][0]
)
assert content["steps"][0]["commands"][4].startswith(
"az acr login"
) and content["steps"][0]["commands"][4].endswith(AZURE_REGISTRY_NAME)
assert (
f"--region {config['byod_ecr_region']}"
in content["steps"][0]["commands"][5]
)
assert f"{config['byod_ecr']}" in content["steps"][0]["commands"][5]
assert (
f"--image-name {custom_byod_images[0][0]}"
in content["steps"][0]["commands"][6]
)
assert (
f"--image-name {custom_byod_images[2][0]}"
in content["steps"][1]["commands"][6]
)
assert (
f"--image-name {custom_byod_images[3][0]}"
in content["steps"][2]["commands"][6]
)
assert content["steps"][3]["depends_on"] == "forge"
# Verify env-only image step has --env flags
env_only_cmd = content["steps"][4]["commands"][6]
assert f"--image-name {custom_byod_images[5][0]}" in env_only_cmd
assert "--env MY_ENV=my_value" in env_only_cmd
assert "--env OTHER_ENV=other_value" in env_only_cmd
# Env-only step should not have --post-build-script or --python-depset
assert "--post-build-script" not in env_only_cmd
assert "--python-depset" not in env_only_cmd
def test_get_prerequisite_step():
config = get_global_config()
assert (
get_prerequisite_step(
"ray-project/ray-ml:abc123-custom", "ray-project/ray-ml:abc123-base"
)
== config["release_image_step_ray_ml"]
)
assert (
get_prerequisite_step(
"ray-project/ray-llm:abc123-custom", "ray-project/ray-llm:abc123-base"
)
== config["release_image_step_ray_llm"]
)
assert (
get_prerequisite_step(
"ray-project/ray:abc123-custom", "ray-project/ray:abc123-base"
)
== config["release_image_step_ray"]
)
assert (
get_prerequisite_step("anyscale/ray:abc123-custom", "anyscale/ray:abc123-base")
== "forge"
)
def test_get_step_name():
test_names = [
"test_1",
"test_2",
"test_3",
]
assert (
_get_step_name(
"ray-project/ray-ml:a1b2c3d4-py39-cpu-abcdef123456789abc123456789",
"abc123",
test_names,
)
== ":tapioca: build custom: ray-ml:py39-cpu (abc123) test_1 test_2"
)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "release/ray_release/tests/test_custom_byod_build_init_helper.py",
"license": "Apache License 2.0",
"lines": 218,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/train/v2/tests/test_local_mode.py | import math
import os
import sys
from unittest.mock import MagicMock, patch
import lightgbm
import pandas as pd
import pytest
import xgboost
from datasets import Dataset
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from transformers import AutoConfig, AutoModelForCausalLM, Trainer, TrainingArguments
import ray
from ray.data.preprocessors import Concatenator
from ray.tests.conftest import _ray_start_cluster
from ray.train import ScalingConfig
from ray.train.constants import TRAIN_DATASET_KEY
from ray.train.examples.pytorch.torch_linear_example import (
train_func as linear_train_func,
)
from ray.train.huggingface.transformers import (
RayTrainReportCallback as HuggingFaceRayTrainReportCallback,
prepare_trainer,
)
from ray.train.lightgbm import (
LightGBMTrainer,
RayTrainReportCallback as LightGBMRayTrainReportCallback,
)
from ray.train.lightning import (
RayDDPStrategy,
RayFSDPStrategy,
RayLightningEnvironment,
RayTrainReportCallback as LightningRayTrainReportCallback,
)
from ray.train.lightning._lightning_utils import import_lightning
from ray.train.tests._huggingface_data import train_data, validation_data
from ray.train.tests.lightning_test_utils import DummyDataModule, LinearModule
from ray.train.tests.util import create_dict_checkpoint
from ray.train.torch import TorchTrainer
from ray.train.v2._internal.execution.local_mode.torch import LocalTorchController
from ray.train.v2._internal.execution.train_fn_utils import get_train_fn_utils
from ray.train.v2.api.data_parallel_trainer import DataParallelTrainer
from ray.train.v2.jax import JaxTrainer
from ray.train.xgboost import (
RayTrainReportCallback as XGBoostRayTrainReportCallback,
XGBoostTrainer,
)
if sys.version_info >= (3, 12):
# Tensorflow is not installed for Python 3.12 because of keras compatibility.
pass
else:
from ray.train.examples.tf.tensorflow_regression_example import (
train_func as tensorflow_linear_train_func,
)
from ray.train.tensorflow import TensorflowTrainer
pl = import_lightning()
@pytest.fixture
def ray_start_6_cpus():
address_info = ray.init(num_cpus=6)
yield address_info
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.fixture
def ray_tpu_single_host(monkeypatch):
"""Start a mock single-host TPU Ray cluster with 2x4 v6e (8 chips per host)."""
with _ray_start_cluster() as cluster:
monkeypatch.setenv("TPU_ACCELERATOR_TYPE", "v6e-8")
# Simulate one node with 8 TPU chips.
cluster.add_node(
num_cpus=4,
resources={"TPU": 8},
)
ray.init(address=cluster.address)
yield cluster
ray.shutdown()
def test_data_parallel_trainer_local_mode():
def train_fn():
with create_dict_checkpoint({}) as checkpoint:
ray.train.report(metrics={"test": 1}, checkpoint=checkpoint)
trainer = DataParallelTrainer(train_fn, scaling_config=ScalingConfig(num_workers=0))
result = trainer.fit()
assert result.metrics == {"test": 1}
assert result.checkpoint
def test_jax_trainer_local_mode(ray_tpu_single_host, monkeypatch):
def jax_train_func():
import jax
devices = jax.devices()
print(f"Devices on this worker: {devices}")
ray.train.report({"result": [str(d) for d in devices]})
mock_jax = MagicMock()
mock_jax.devices.return_value = ["TPU:0"]
monkeypatch.setitem(sys.modules, "jax", mock_jax)
trainer = JaxTrainer(
train_loop_per_worker=jax_train_func,
scaling_config=ScalingConfig(
num_workers=0,
),
)
result = trainer.fit()
assert result.error is None
assert result.metrics == {"result": ["TPU:0"]}
def test_lightgbm_trainer_local_mode(ray_start_6_cpus):
def lightgbm_train_fn_per_worker(
config: dict,
label_column: str,
dataset_keys: set,
num_boost_round: int = 10,
):
remaining_iters = num_boost_round
train_ds_iter = ray.train.get_dataset_shard(TRAIN_DATASET_KEY)
train_df = train_ds_iter.materialize().to_pandas()
eval_ds_iters = {
k: ray.train.get_dataset_shard(k)
for k in dataset_keys
if k != TRAIN_DATASET_KEY
}
eval_dfs = {k: d.materialize().to_pandas() for k, d in eval_ds_iters.items()}
train_X, train_y = train_df.drop(label_column, axis=1), train_df[label_column]
train_set = lightgbm.Dataset(train_X, label=train_y)
# NOTE: Include the training dataset in the evaluation datasets.
# This allows `train-*` metrics to be calculated and reported.
valid_sets = [train_set]
valid_names = [TRAIN_DATASET_KEY]
for eval_name, eval_df in eval_dfs.items():
eval_X, eval_y = eval_df.drop(label_column, axis=1), eval_df[label_column]
valid_sets.append(lightgbm.Dataset(eval_X, label=eval_y))
valid_names.append(eval_name)
# Add network params of the worker group to enable distributed training.
config.update(ray.train.lightgbm.get_network_params())
lightgbm.train(
params=config,
train_set=train_set,
num_boost_round=remaining_iters,
valid_sets=valid_sets,
valid_names=valid_names,
init_model=None,
callbacks=[LightGBMRayTrainReportCallback()],
)
data_raw = load_breast_cancer()
dataset_df = pd.DataFrame(data_raw["data"], columns=data_raw["feature_names"])
dataset_df["target"] = data_raw["target"]
train_df, test_df = train_test_split(dataset_df, test_size=0.3)
train_df_with_cat = train_df.copy()
test_df_with_cat = test_df.copy()
dataset_shard_size = 1
train_df_with_cat["categorical_column"] = pd.Series(
(["A", "B"] * math.ceil(len(train_df_with_cat) / dataset_shard_size))[
: len(train_df_with_cat)
]
).astype("category")
test_df_with_cat["categorical_column"] = pd.Series(
(["A", "B"] * math.ceil(len(test_df_with_cat) / dataset_shard_size))[
: len(test_df_with_cat)
]
).astype("category")
scale_config = ScalingConfig(num_workers=0)
train_dataset = ray.data.from_pandas(train_df_with_cat)
valid_dataset = ray.data.from_pandas(test_df_with_cat)
trainer = LightGBMTrainer(
train_loop_per_worker=lambda: lightgbm_train_fn_per_worker(
config={},
label_column="target",
dataset_keys={TRAIN_DATASET_KEY, "valid"},
),
train_loop_config={
"objective": "binary",
"metric": ["binary_logloss", "binary_error"],
},
scaling_config=scale_config,
datasets={TRAIN_DATASET_KEY: train_dataset, "valid": valid_dataset},
)
result = trainer.fit()
checkpoint = result.checkpoint
assert checkpoint is not None
@pytest.mark.parametrize("datasource", ["dataloader", "datamodule"])
def test_lightning_trainer_local_mode(ray_start_6_cpus, datasource):
num_epochs = 1
batch_size = 8
dataset_size = 256
dataset_shard_size = 1
strategy_name = "ddp"
accelerator = "cpu"
strategy_map = {"ddp": RayDDPStrategy(), "fsdp": RayFSDPStrategy()}
def train_loop():
model = LinearModule(input_dim=32, output_dim=4, strategy=strategy_name)
strategy = strategy_map[strategy_name]
trainer = pl.Trainer(
max_epochs=num_epochs,
devices="auto",
accelerator=accelerator,
strategy=strategy,
plugins=[RayLightningEnvironment()],
callbacks=[LightningRayTrainReportCallback()],
)
datamodule = DummyDataModule(batch_size, dataset_size)
if datasource == "dataloader":
trainer.fit(
model,
train_dataloaders=datamodule.train_dataloader(),
val_dataloaders=datamodule.val_dataloader(),
)
if datasource == "datamodule":
trainer.fit(model, datamodule=datamodule)
trainer = TorchTrainer(
train_loop_per_worker=train_loop,
scaling_config=ScalingConfig(num_workers=0, use_gpu=(accelerator == "gpu")),
)
results = trainer.fit()
assert results.metrics["epoch"] == num_epochs - 1
assert (
results.metrics["step"]
== num_epochs * dataset_size / dataset_shard_size / batch_size
)
assert "loss" in results.metrics
assert "val_loss" in results.metrics
@pytest.mark.skipif(
sys.version_info >= (3, 12),
reason="Tensorflow is not installed for Python 3.12 because of keras compatibility.",
)
def test_tensorflow_linear_local_mode(ray_start_4_cpus):
"""Also tests air Keras callback."""
epochs = 1
def train_func(config):
result = tensorflow_linear_train_func(config)
assert len(result) == epochs
train_loop_config = {
"lr": 1e-3,
"batch_size": 32,
"epochs": epochs,
}
scaling_config = ScalingConfig(num_workers=0)
dataset = ray.data.read_csv("s3://anonymous@air-example-data/regression.csv")
columns_to_concatenate = [f"x{i:03}" for i in range(100)]
preprocessor = Concatenator(columns=columns_to_concatenate, output_column_name="x")
dataset = preprocessor.transform(dataset)
trainer = TensorflowTrainer(
train_loop_per_worker=train_func,
train_loop_config=train_loop_config,
scaling_config=scaling_config,
datasets={TRAIN_DATASET_KEY: dataset},
)
result = trainer.fit()
assert not result.error
assert result.checkpoint
def test_torch_trainer_local_mode(ray_start_6_cpus):
def train_func(config):
result = linear_train_func(config)
assert len(result) == epochs
assert result[-1]["loss"] < result[0]["loss"]
epochs = 3
scaling_config = ScalingConfig(num_workers=0)
config = {"lr": 1e-2, "hidden_size": 1, "batch_size": 4, "epochs": epochs}
trainer = TorchTrainer(
train_loop_per_worker=train_func,
train_loop_config=config,
scaling_config=scaling_config,
)
result = trainer.fit()
assert result.error is None
assert result.metrics is not None
assert result.metrics["loss"] is not None
assert result.checkpoint
HF_BATCH_SIZE_PER_WORKER = 2
HF_MODEL_NAME = "hf-internal-testing/tiny-random-BloomForCausalLM"
HF_MAX_EPOCHS = 1
HF_TRAIN_DATASET_SIZE = 16
@pytest.mark.parametrize("use_ray_data", [False, True])
def test_e2e_hf_local_mode(ray_start_4_cpus, use_ray_data):
def get_transformers_configurations():
"""Get configurations with dynamic step calculations based on number of workers."""
steps_per_epoch = HF_TRAIN_DATASET_SIZE // HF_BATCH_SIZE_PER_WORKER
return {
"epoch_gpu": {
"evaluation_strategy": "epoch",
"save_strategy": "epoch",
"logging_strategy": "epoch",
"eval_steps": None,
"save_steps": None,
"logging_steps": None,
"no_cuda": False,
},
"steps_gpu": {
"evaluation_strategy": "steps",
"save_strategy": "steps",
"logging_strategy": "steps",
"eval_steps": steps_per_epoch,
"save_steps": steps_per_epoch * 2,
"logging_steps": 1,
"no_cuda": False,
},
"steps_cpu": {
"evaluation_strategy": "steps",
"save_strategy": "steps",
"logging_strategy": "steps",
"eval_steps": steps_per_epoch,
"save_steps": steps_per_epoch,
"logging_steps": 1,
"no_cuda": True,
},
"steps_cpu_local": {
"evaluation_strategy": "steps",
"save_strategy": "steps",
"logging_strategy": "steps",
"eval_steps": steps_per_epoch,
"save_steps": steps_per_epoch,
"logging_steps": 1,
"no_cuda": True,
},
}
config_id = "steps_cpu_local"
num_workers = 0
def train_func(config):
# Datasets
if config["use_ray_data"]:
train_ds_shard = ray.train.get_dataset_shard("train")
eval_ds_shard = ray.train.get_dataset_shard("eval")
train_dataset = train_ds_shard.iter_torch_batches(
batch_size=HF_BATCH_SIZE_PER_WORKER
)
eval_dataset = eval_ds_shard.iter_torch_batches(
batch_size=HF_BATCH_SIZE_PER_WORKER
)
else:
train_df = pd.read_json(train_data)
validation_df = pd.read_json(validation_data)
train_dataset = Dataset.from_pandas(train_df)
eval_dataset = Dataset.from_pandas(validation_df)
# Model
model_config = AutoConfig.from_pretrained(HF_MODEL_NAME)
model = AutoModelForCausalLM.from_config(model_config)
# HF Transformers Trainer
training_args = TrainingArguments(
f"{HF_MODEL_NAME}-wikitext2",
evaluation_strategy=config["evaluation_strategy"],
logging_strategy=config["logging_strategy"],
save_strategy=config["save_strategy"],
eval_steps=config["eval_steps"],
save_steps=config["save_steps"],
logging_steps=config["logging_steps"],
num_train_epochs=config.get("num_train_epochs", HF_MAX_EPOCHS),
max_steps=config.get("max_steps", -1),
learning_rate=config.get("learning_rate", 2e-5),
per_device_train_batch_size=HF_BATCH_SIZE_PER_WORKER,
per_device_eval_batch_size=HF_BATCH_SIZE_PER_WORKER,
weight_decay=0.01,
disable_tqdm=True,
no_cuda=config["no_cuda"],
report_to="none",
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
)
# Report to Ray Train
trainer.add_callback(HuggingFaceRayTrainReportCallback())
trainer = prepare_trainer(trainer)
# Start Training
trainer.train()
configurations = get_transformers_configurations()
train_loop_config = configurations[config_id]
# Calculate the num of Ray training iterations
max_steps = HF_MAX_EPOCHS * HF_TRAIN_DATASET_SIZE // HF_BATCH_SIZE_PER_WORKER
train_loop_config["use_ray_data"] = use_ray_data
datasets = None
if use_ray_data:
# Must specify `max_steps` for Iterable Dataset
train_loop_config["max_steps"] = max_steps
train_df = pd.read_json(train_data)
validation_df = pd.read_json(validation_data)
ray_train_ds = ray.data.from_pandas(train_df)
ray_eval_ds = ray.data.from_pandas(validation_df)
datasets = {"train": ray_train_ds, "eval": ray_eval_ds}
else:
# Specify `num_train_epochs` for Map-style Dataset
train_loop_config["num_train_epochs"] = HF_MAX_EPOCHS
use_gpu = not train_loop_config["no_cuda"]
trainer = TorchTrainer(
train_func,
train_loop_config=train_loop_config,
scaling_config=ScalingConfig(num_workers=num_workers, use_gpu=use_gpu),
datasets=datasets,
)
result = trainer.fit()
assert result.metrics["step"] == max_steps
assert "eval_loss" in result.metrics
if not use_ray_data:
assert result.metrics["epoch"] == HF_MAX_EPOCHS
def test_xgboost_trainer_local_mode(ray_start_4_cpus):
def xgboost_train_fn_per_worker():
label_column = "target"
dataset_keys = {TRAIN_DATASET_KEY, "valid"}
checkpoint = ray.train.get_checkpoint()
starting_model = None
remaining_iters = 10
if checkpoint:
starting_model = XGBoostRayTrainReportCallback.get_model(checkpoint)
starting_iter = starting_model.num_boosted_rounds()
remaining_iters = remaining_iters - starting_iter
train_ds_iter = ray.train.get_dataset_shard(TRAIN_DATASET_KEY)
train_df = train_ds_iter.materialize().to_pandas()
eval_ds_iters = {
k: ray.train.get_dataset_shard(k)
for k in dataset_keys
if k != TRAIN_DATASET_KEY
}
eval_dfs = {k: d.materialize().to_pandas() for k, d in eval_ds_iters.items()}
train_X, train_y = train_df.drop(label_column, axis=1), train_df[label_column]
dtrain = xgboost.DMatrix(train_X, label=train_y)
# NOTE: Include the training dataset in the evaluation datasets.
# This allows `train-*` metrics to be calculated and reported.
evals = [(dtrain, TRAIN_DATASET_KEY)]
for eval_name, eval_df in eval_dfs.items():
eval_X, eval_y = eval_df.drop(label_column, axis=1), eval_df[label_column]
evals.append((xgboost.DMatrix(eval_X, label=eval_y), eval_name))
evals_result = {}
xgboost.train(
{},
dtrain=dtrain,
evals=evals,
evals_result=evals_result,
num_boost_round=remaining_iters,
xgb_model=starting_model,
)
data_raw = load_breast_cancer()
dataset_df = pd.DataFrame(data_raw["data"], columns=data_raw["feature_names"])
dataset_df["target"] = data_raw["target"]
train_df, test_df = train_test_split(dataset_df, test_size=0.3)
train_dataset = ray.data.from_pandas(train_df)
valid_dataset = ray.data.from_pandas(test_df)
scale_config = ScalingConfig(num_workers=0)
trainer = XGBoostTrainer(
train_loop_per_worker=xgboost_train_fn_per_worker,
train_loop_config={
"tree_method": "approx",
"objective": "binary:logistic",
"eval_metric": ["logloss", "error"],
},
scaling_config=scale_config,
datasets={TRAIN_DATASET_KEY: train_dataset, "valid": valid_dataset},
)
result = trainer.fit()
with pytest.raises(DeprecationWarning):
XGBoostTrainer.get_model(result.checkpoint)
def test_torch_distributed_variables_local_train_fn_utils():
"""Test that torch distributed variables are correctly used to create LocalTrainFnUtils."""
# Test scenario 1: Without torch distributed environment variables
with patch.dict(os.environ, {}, clear=True):
controller = LocalTorchController("test_experiment")
def dummy_train_func():
train_fn_utils = get_train_fn_utils()
# Verify default values when no torch distributed env vars are set
context = train_fn_utils.get_context()
assert context.get_world_size() == 1
assert context.get_world_rank() == 0
assert context.get_local_rank() == 0
assert context.get_local_world_size() == 1
assert context.get_node_rank() == 0
controller.run(dummy_train_func)
# Test scenario 2: With torch distributed environment variables (CPU)
torch_env_vars = {
"RANK": "2",
"LOCAL_RANK": "1",
"WORLD_SIZE": "4",
"LOCAL_WORLD_SIZE": "2",
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": "29500",
}
with patch.dict(os.environ, torch_env_vars, clear=True), patch(
"torch.distributed.is_initialized", return_value=False
), patch("torch.distributed.get_world_size", return_value=4), patch(
"torch.distributed.get_rank", return_value=2
), patch(
"torch.cuda.is_available", return_value=False
), patch(
"torch.distributed.init_process_group"
) as mock_init_pg:
controller = LocalTorchController("test_experiment")
def dummy_train_func():
train_fn_utils = get_train_fn_utils()
# Verify torch distributed values are correctly passed
context = train_fn_utils.get_context()
assert context.get_world_size() == 4
assert context.get_world_rank() == 2
assert context.get_local_rank() == 1
assert context.get_local_world_size() == 2
assert (
context.get_node_rank() == 1
) # global_rank // nproc_per_node = 2 // 2 = 1
controller.run(dummy_train_func)
# Verify torch.distributed methods were called with CPU backend
mock_init_pg.assert_called_once_with(backend="gloo")
# Test scenario 3: With torch distributed environment variables (GPU)
with patch.dict(os.environ, torch_env_vars, clear=True), patch(
"torch.distributed.is_initialized", return_value=False
), patch("torch.distributed.get_world_size", return_value=4), patch(
"torch.distributed.get_rank", return_value=2
), patch(
"torch.cuda.is_available", return_value=True
), patch(
"torch.distributed.init_process_group"
) as mock_init_pg, patch(
"torch.cuda.set_device"
) as mock_set_device:
controller = LocalTorchController("test_experiment")
def dummy_train_func():
train_fn_utils = get_train_fn_utils()
# Verify torch distributed values are correctly passed
context = train_fn_utils.get_context()
assert context.get_world_size() == 4
assert context.get_world_rank() == 2
assert context.get_local_rank() == 1
assert context.get_local_world_size() == 2
assert context.get_node_rank() == 1
controller.run(dummy_train_func)
mock_init_pg.assert_called_once_with(backend="nccl")
mock_set_device.assert_called_once_with(1)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-x", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/test_local_mode.py",
"license": "Apache License 2.0",
"lines": 520,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/train/v2/xgboost/config.py | from contextlib import contextmanager
from ray.train.v2._internal.execution.train_fn_utils import get_train_fn_utils
from ray.train.xgboost.config import XGBoostConfig as XGBoostConfigV1
class XGBoostConfig(XGBoostConfigV1):
@property
def train_func_context(self):
distributed_context = super(XGBoostConfig, self).train_func_context
@contextmanager
def collective_communication_context():
# The distributed_context is only needed in distributed mode
if get_train_fn_utils().is_distributed():
with distributed_context():
yield
else:
yield
return collective_communication_context
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/xgboost/config.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/train/v2/api/reported_checkpoint.py | from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Dict
from ray.util.annotations import PublicAPI
if TYPE_CHECKING:
from ray.train import Checkpoint
@dataclass
@PublicAPI(stability="alpha")
class ReportedCheckpoint:
"""A user-reported checkpoint and its associated metrics.
Attributes:
checkpoint: The checkpoint reported by the user.
metrics: The metrics associated with that checkpoint.
"""
checkpoint: "Checkpoint"
metrics: Dict[str, Any]
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/api/reported_checkpoint.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/_internal/planner/plan_download_op.py | import logging
import math
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Iterator, List, Optional
from urllib.parse import urlparse
import pyarrow as pa
import ray
from ray.data._internal.compute import ActorPoolStrategy, TaskPoolStrategy
from ray.data._internal.execution.interfaces import PhysicalOperator
from ray.data._internal.execution.operators.actor_pool_map_operator import (
ActorPoolMapOperator,
)
from ray.data._internal.execution.operators.map_operator import MapOperator
from ray.data._internal.execution.operators.map_transformer import (
BlockMapTransformFn,
MapTransformer,
)
from ray.data._internal.logical.operators import Download
from ray.data._internal.output_buffer import OutputBlockSizeOption
from ray.data._internal.util import RetryingPyFileSystem, make_async_gen
from ray.data.block import BlockAccessor
from ray.data.context import DataContext
from ray.data.datasource.path_util import _resolve_paths_and_filesystem
logger = logging.getLogger(__name__)
URI_DOWNLOAD_MAX_WORKERS = 16
def plan_download_op(
op: Download,
physical_children: List[PhysicalOperator],
data_context: DataContext,
) -> MapOperator:
"""Plan the download operation with partitioning and downloading stages."""
assert len(physical_children) == 1
input_physical_dag = physical_children[0]
upstream_op_is_download = False
if len(input_physical_dag._logical_operators) == 1 and isinstance(
input_physical_dag._logical_operators[0], Download
):
upstream_op_is_download = True
uri_column_names = op.uri_column_names
uri_column_names_str = ", ".join(uri_column_names)
output_bytes_column_names = op.output_bytes_column_names
ray_remote_args = op.ray_remote_args
filesystem = op.filesystem
# Import _get_udf from the main planner file
from ray.data._internal.planner.plan_udf_map_op import (
_generate_transform_fn_for_map_batches,
_get_udf,
)
# If we have multiple download operators in a row, we should only include the partition actor
# at the start of the chain. This is primarily done to prevent partition actors from bottlenecking
# the chain becuase the interleaved operators would be a single actor. As a result, the
# URIDownloader physical operator is responsible for outputting appropriately sized blocks.
partition_map_operator = None
if not upstream_op_is_download:
# PartitionActor is a callable class, so we need ActorPoolStrategy
partition_compute = ActorPoolStrategy(
size=1, enable_true_multi_threading=True
) # Use single actor for partitioning
fn, init_fn = _get_udf(
PartitionActor,
(),
{},
(uri_column_names, data_context, filesystem),
{},
compute=partition_compute,
)
block_fn = _generate_transform_fn_for_map_batches(fn)
partition_transform_fns = [
BlockMapTransformFn(
block_fn,
# NOTE: Disable block-shaping to produce blocks as is
disable_block_shaping=True,
),
]
partition_map_transformer = MapTransformer(
partition_transform_fns,
init_fn=init_fn,
)
partition_map_operator = ActorPoolMapOperator(
partition_map_transformer,
input_physical_dag,
data_context,
name=f"Partition({uri_column_names_str})",
# NOTE: Partition actor doesn't use the user-provided `ray_remote_args`
# since those only apply to the actual download tasks. Partitioning is
# a lightweight internal operation that doesn't need custom resource
# requirements.
ray_remote_args=None,
compute_strategy=partition_compute, # Use actor-based compute for callable class
# NOTE: We set `_generator_backpressure_num_objects` to -1 to unblock
# backpressure since partitioning is extremely fast. Without this, the
# partition actor gets bottlenecked by the Ray Data scheduler, which
# can prevent Ray Data from launching enough download tasks.
ray_actor_task_remote_args={"_generator_backpressure_num_objects": -1},
)
fn, init_fn = _get_udf(
download_bytes_threaded,
(uri_column_names, output_bytes_column_names, data_context, filesystem),
{},
None,
None,
None,
)
download_transform_fn = _generate_transform_fn_for_map_batches(fn)
transform_fns = [
BlockMapTransformFn(
download_transform_fn,
output_block_size_option=OutputBlockSizeOption.of(
target_max_block_size=data_context.target_max_block_size
),
),
]
download_compute = TaskPoolStrategy()
download_map_transformer = MapTransformer(
transform_fns,
init_fn=init_fn,
)
download_map_operator = MapOperator.create(
download_map_transformer,
partition_map_operator if partition_map_operator else input_physical_dag,
data_context,
name=f"Download({uri_column_names_str})",
compute_strategy=download_compute,
ray_remote_args=ray_remote_args,
)
return download_map_operator
def uri_to_path(uri: str) -> str:
"""Convert a URI to a filesystem path."""
# TODO(mowen): urlparse might be slow. in the future we could use a faster alternative.
parsed = urlparse(uri)
if parsed.scheme == "file":
return parsed.path
return parsed.netloc + parsed.path
def _arrow_batcher(table: pa.Table, output_batch_size: int):
"""Batch a PyArrow table into smaller tables of size n using zero-copy slicing."""
num_rows = table.num_rows
for i in range(0, num_rows, output_batch_size):
end_idx = min(i + output_batch_size, num_rows)
# Use PyArrow's zero-copy slice operation
batch_table = table.slice(i, end_idx - i)
yield batch_table
def download_bytes_threaded(
block: pa.Table,
uri_column_names: List[str],
output_bytes_column_names: List[str],
data_context: DataContext,
filesystem: Optional["pa.fs.FileSystem"] = None,
) -> Iterator[pa.Table]:
"""Optimized version that uses make_async_gen for concurrent downloads.
Supports downloading from multiple URI columns in a single operation.
Args:
block: Input PyArrow table containing URI columns.
uri_column_names: Names of columns containing URIs to download.
output_bytes_column_names: Names for the output columns containing downloaded bytes.
data_context: Ray Data context for configuration.
filesystem: PyArrow filesystem to use for reading remote files.
If None, the filesystem is auto-detected from the path scheme.
Yields:
pa.Table: PyArrow table with the downloaded bytes added as new columns.
"""
if not isinstance(block, pa.Table):
block = BlockAccessor.for_block(block).to_arrow()
output_block = block
# Download each URI column and add it to the output block
for uri_column_name, output_bytes_column_name in zip(
uri_column_names, output_bytes_column_names
):
# Extract URIs from PyArrow table
uris = output_block.column(uri_column_name).to_pylist()
if len(uris) == 0:
continue
def load_uri_bytes(uri_iterator):
"""Resolve filesystem and download bytes for each URI.
Takes an iterator of URIs and yields bytes for each.
Uses lazy filesystem resolution - resolves once and reuses for subsequent URIs.
If a filesystem was provided explicitly, it will be used for all URIs.
"""
cached_fs = filesystem
for uri in uri_iterator:
read_bytes = None
try:
# Use cached FS if available, otherwise resolve the filesystem for the uri.
resolved_paths, resolved_fs = _resolve_paths_and_filesystem(
uri, filesystem=cached_fs
)
cached_fs = resolved_fs
# Wrap with retrying filesystem
fs = RetryingPyFileSystem.wrap(
resolved_fs, retryable_errors=data_context.retried_io_errors
)
# We only pass one uri to resolve and unwrap it from the list of resolved paths,
# if fails, we will catch the index error and log it.
resolved_path = resolved_paths[0]
if resolved_path is None:
continue
# Download bytes
# Use open_input_stream to handle the rare scenario where the data source is not seekable.
with fs.open_input_stream(resolved_path) as f:
read_bytes = f.read()
except OSError as e:
logger.debug(
f"OSError reading uri '{uri}' for column '{uri_column_name}': {e}"
)
except Exception as e:
# Catch unexpected errors like pyarrow.lib.ArrowInvalid caused by an invalid uri like
# `foo://bar` to avoid failing because of one invalid uri.
logger.warning(
f"Unexpected error reading uri '{uri}' for column '{uri_column_name}': {e}"
)
finally:
yield read_bytes
# Use make_async_gen to resolve and download URI bytes concurrently
# preserve_ordering=True ensures results are returned in the same order as input URIs
uri_bytes = list(
make_async_gen(
base_iterator=iter(uris),
fn=load_uri_bytes,
preserve_ordering=True,
num_workers=URI_DOWNLOAD_MAX_WORKERS,
)
)
# Add the new column to the PyArrow table
output_block = output_block.add_column(
len(output_block.column_names),
output_bytes_column_name,
pa.array(uri_bytes),
)
output_block_size = output_block.nbytes
ctx = ray.data.context.DatasetContext.get_current()
max_bytes = ctx.target_max_block_size
if max_bytes is not None and output_block_size > max_bytes:
num_blocks = math.ceil(output_block_size / max_bytes)
num_rows = output_block.num_rows
yield from _arrow_batcher(output_block, int(math.ceil(num_rows / num_blocks)))
else:
yield output_block
class PartitionActor:
"""Actor that partitions download operations based on estimated file sizes.
For multiple URI columns, estimates the combined size across all columns.
"""
INIT_SAMPLE_BATCH_SIZE = 25
def __init__(
self,
uri_column_names: List[str],
data_context: DataContext,
filesystem: Optional["pa.fs.FileSystem"] = None,
):
self._uri_column_names = uri_column_names
self._data_context = data_context
self._filesystem = filesystem
self._batch_size_estimate = None
def __call__(self, block: pa.Table) -> Iterator[pa.Table]:
if not isinstance(block, pa.Table):
block = BlockAccessor.for_block(block).to_arrow()
# Validate all URI columns exist
for uri_column_name in self._uri_column_names:
if uri_column_name not in block.column_names:
raise ValueError(
"Ray Data tried to download URIs from a column named "
f"{uri_column_name!r}, but a column with that name doesn't "
"exist. Is the specified download column correct?"
)
if self._batch_size_estimate is None:
self._batch_size_estimate = self._estimate_nrows_per_partition(block)
yield from _arrow_batcher(block, self._batch_size_estimate)
def _estimate_nrows_per_partition(self, block: pa.Table) -> int:
sampled_file_sizes_by_column = {}
for uri_column_name in self._uri_column_names:
# Extract URIs from PyArrow table for sampling
uris = block.column(uri_column_name).to_pylist()
sample_uris = uris[: self.INIT_SAMPLE_BATCH_SIZE]
sampled_file_sizes = self._sample_sizes(sample_uris)
sampled_file_sizes_by_column[uri_column_name] = sampled_file_sizes
# If we sample HTTP URIs, or if an error occurs during sampling, then the file
# sizes might be `None`. In these cases, we replace the `file_size` with 0.
sampled_file_sizes_by_column = {
uri_column_name: [
file_size if file_size is not None else 0
for file_size in sampled_file_sizes
]
for uri_column_name, sampled_file_sizes in sampled_file_sizes_by_column.items()
}
# This is some fancy Python code to compute the file size of each row.
row_sizes = [
sum(file_sizes_in_row)
for file_sizes_in_row in zip(*sampled_file_sizes_by_column.values())
]
target_nbytes_per_partition = self._data_context.target_max_block_size
avg_nbytes_per_row = sum(row_sizes) / len(row_sizes)
if avg_nbytes_per_row == 0:
logger.warning(
"Estimated average row size is 0. Falling back to using the number of "
"rows in the block as the partition size."
)
return len(block)
nrows_per_partition = math.floor(
target_nbytes_per_partition / avg_nbytes_per_row
)
return nrows_per_partition
def _sample_sizes(self, uris: List[str]) -> List[int]:
"""Fetch file sizes in parallel using ThreadPoolExecutor."""
def get_file_size(uri_path, fs):
try:
return fs.get_file_info(uri_path).size
except Exception:
return None
# If no URIs, return empty list
if not uris:
return []
# Get the filesystem from the URIs (assumes all URIs use same filesystem for sampling)
# This is for sampling the file sizes which doesn't require a full resolution of the paths.
try:
paths, fs = _resolve_paths_and_filesystem(uris, filesystem=self._filesystem)
fs = RetryingPyFileSystem.wrap(
fs, retryable_errors=self._data_context.retried_io_errors
)
except Exception as e:
logger.warning(f"Failed to resolve URIs for size sampling: {e}")
# Return zeros for all URIs if resolution fails
return [0] * len(uris)
# Use ThreadPoolExecutor for concurrent size fetching
file_sizes = [None] * len(paths)
with ThreadPoolExecutor(max_workers=URI_DOWNLOAD_MAX_WORKERS) as executor:
# Submit all size fetch tasks
future_to_file_index = {
executor.submit(get_file_size, uri_path, fs): file_index
for file_index, uri_path in enumerate(paths)
}
# Collect results as they complete (order doesn't matter)
for future in as_completed(future_to_file_index):
file_index = future_to_file_index[future]
try:
size = future.result()
file_sizes[file_index] = size if size is not None else 0
except Exception as e:
logger.warning(f"Error fetching file size for download: {e}")
file_sizes[file_index] = 0
assert all(
fs is not None for fs in file_sizes
), "File size sampling did not complete for all paths"
return file_sizes
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/planner/plan_download_op.py",
"license": "Apache License 2.0",
"lines": 341,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/tests/test_download_expression.py | import io
import pandas as pd
import pyarrow as pa
import pytest
from PIL import Image
import ray
from ray.data.expressions import DownloadExpr, col, download
class TestDownloadExpressionStructure:
"""Test DownloadExpr structural equality and basic properties."""
def test_download_expression_creation(self):
"""Test that download() creates a DownloadExpr with correct properties."""
expr = download("uri_column")
assert isinstance(expr, DownloadExpr)
assert expr.uri_column_name == "uri_column"
def test_download_expression_structural_equality(self):
"""Test structural equality comparison for download expressions."""
# Same expressions should be equal
expr1 = download("uri")
expr2 = download("uri")
assert expr1.structurally_equals(expr2)
assert expr2.structurally_equals(expr1)
# Different URI column names should not be equal
expr3 = download("different_uri")
assert not expr1.structurally_equals(expr3)
assert not expr3.structurally_equals(expr1)
# Compare with non-DownloadExpr
non_download_expr = col("uri")
assert not expr1.structurally_equals(non_download_expr)
assert not non_download_expr.structurally_equals(expr1)
class TestDownloadExpressionFunctionality:
"""Test actual download functionality with real and mocked data."""
def test_download_expression_with_local_files(self, tmp_path):
"""Test basic download expression functionality with local files."""
# Create sample files with different content types
sample_data = [
b"This is test file 1 content",
b"Different content for file 2",
b"File 3 has some binary data: \x00\x01\x02\x03",
]
file_paths = []
for i, data in enumerate(sample_data):
file_path = tmp_path / f"test_file_{i}.txt"
file_path.write_bytes(data)
file_paths.append(str(file_path))
# Create dataset with file URIs and metadata
table = pa.Table.from_arrays(
[
pa.array([f"local://{path}" for path in file_paths]),
pa.array([f"id_{i}" for i in range(len(file_paths))]),
pa.array([f"metadata_{i}" for i in range(len(file_paths))]),
pa.array(range(len(file_paths))),
],
names=["file_uri", "file_id", "metadata", "index"],
)
ds = ray.data.from_arrow(table)
# Add download column using expression
ds_with_downloads = ds.with_column("file_bytes", download("file_uri"))
# Verify results
results = ds_with_downloads.take_all()
assert len(results) == len(sample_data)
for i, result in enumerate(results):
# Download column should be added correctly
assert "file_bytes" in result
assert result["file_bytes"] == sample_data[i]
# All original columns should be preserved
assert result["file_id"] == f"id_{i}"
assert result["metadata"] == f"metadata_{i}"
assert result["index"] == i
assert result["file_uri"] == f"local://{file_paths[i]}"
def test_download_expression_empty_dataset(self):
"""Test download expression with empty dataset."""
# Create empty dataset with correct schema
table = pa.Table.from_arrays(
[
pa.array([], type=pa.string()),
],
names=["uri"],
)
ds = ray.data.from_arrow(table)
ds_with_downloads = ds.with_column("bytes", download("uri"))
results = ds_with_downloads.take_all()
assert len(results) == 0
def test_download_expression_with_different_file_types(self, tmp_path):
"""Test download expression with various file types including actual images."""
# Create a small 8x8 RGB image
small_image = Image.new("RGB", (8, 8), color=(255, 0, 0)) # Red 8x8 image
image_buffer = io.BytesIO()
small_image.save(image_buffer, format="PNG")
image_bytes = image_buffer.getvalue()
# Create files with different types of content
test_files = [
("text_file.txt", b"Simple text content"),
("binary_file.dat", b"\x00\x01\x02\x03\x04\x05"),
("json_file.json", b'{"key": "value", "number": 123}'),
("small_image.png", image_bytes), # Actual PNG image (primary use case)
("empty_file.txt", b""), # Empty file edge case
]
file_paths = []
expected_data = []
for filename, content in test_files:
file_path = tmp_path / filename
file_path.write_bytes(content)
file_paths.append(str(file_path))
expected_data.append(content)
# Create dataset
table = pa.Table.from_arrays(
[
pa.array([f"local://{path}" for path in file_paths]),
pa.array(
[f.split(".")[0] for f, _ in test_files]
), # filename without extension
],
names=["file_uri", "file_type"],
)
ds = ray.data.from_arrow(table)
ds_with_downloads = ds.with_column("content", download("file_uri"))
results = ds_with_downloads.take_all()
assert len(results) == len(test_files)
for i, result in enumerate(results):
assert result["content"] == expected_data[i]
assert result["file_type"] == test_files[i][0].split(".")[0]
# Special verification for image file - ensure it can be loaded as an image
if test_files[i][0].endswith(".png"):
downloaded_image = Image.open(io.BytesIO(result["content"]))
assert downloaded_image.size == (8, 8)
assert downloaded_image.mode == "RGB"
def test_chained_download_expressions(self, tmp_path):
"""Test chained download expressions functionality."""
# Create sample files with different content
sample_data = [
b"Content for file 1",
b"Content for file 2",
b"Content for file 3",
]
file_paths = []
for i, data in enumerate(sample_data):
file_path = tmp_path / f"test_file_{i}.txt"
file_path.write_bytes(data)
file_paths.append(str(file_path))
# Create dataset with file URIs
table = pa.Table.from_arrays(
[
pa.array([f"local://{path}" for path in file_paths]),
pa.array([f"id_{i}" for i in range(len(file_paths))]),
],
names=["file_uri", "file_id"],
)
ds = ray.data.from_arrow(table)
# Chain multiple download expressions from the same URI column
ds_with_chained_downloads = (
ds.with_column("file_bytes_1", download("file_uri"))
.with_column("file_bytes_2", download("file_uri"))
.with_column("file_bytes_3", download("file_uri"))
)
# Verify results
results = ds_with_chained_downloads.take_all()
assert len(results) == len(sample_data)
for i, result in enumerate(results):
# All download columns should have the same content
assert "file_bytes_1" in result
assert "file_bytes_2" in result
assert "file_bytes_3" in result
assert result["file_bytes_1"] == sample_data[i]
assert result["file_bytes_2"] == sample_data[i]
assert result["file_bytes_3"] == sample_data[i]
# Original columns should be preserved
assert result["file_id"] == f"id_{i}"
assert result["file_uri"] == f"local://{file_paths[i]}"
def test_download_expression_with_pandas_blocks(self, tmp_path):
"""Test download with pandas blocks to ensure arrow conversion works.
This tests the code path in PartitionActor.__call__ where non-arrow
blocks are converted to arrow format before processing.
"""
ctx = ray.data.context.DataContext.get_current()
old_enable_pandas_block = ctx.enable_pandas_block
ctx.enable_pandas_block = True
try:
# Create test files
sample_data = [
b"Pandas block test content 1",
b"Pandas block test content 2",
]
file_paths = []
for i, data in enumerate(sample_data):
file_path = tmp_path / f"pandas_test_{i}.txt"
file_path.write_bytes(data)
file_paths.append(str(file_path))
# Create dataset with pandas blocks (not arrow)
df = pd.DataFrame(
{
"file_uri": [f"local://{path}" for path in file_paths],
"file_id": [f"id_{i}" for i in range(len(file_paths))],
}
)
ds = ray.data.from_pandas(df)
# Apply download - this should trigger arrow conversion in PartitionActor
ds_with_downloads = ds.with_column("content", download("file_uri"))
# Verify results
results = ds_with_downloads.take_all()
assert len(results) == len(sample_data)
for i, result in enumerate(results):
assert result["content"] == sample_data[i]
assert result["file_id"] == f"id_{i}"
assert result["file_uri"] == f"local://{file_paths[i]}"
finally:
ctx.enable_pandas_block = old_enable_pandas_block
def test_download_expression_with_custom_filesystem(self, tmp_path):
import pyarrow.fs as pafs
# 1. Setup paths
subdir = tmp_path / "data"
subdir.mkdir()
file_name = "test_file.txt"
file_path = subdir / file_name
sample_content = b"File content with custom fs"
file_path.write_bytes(sample_content)
# 2. Setup SubTreeFileSystem
# This treats 'subdir' as the root '/'
base_fs = pafs.LocalFileSystem()
custom_fs = pafs.SubTreeFileSystem(str(subdir), base_fs)
# 3. Create Dataset
# Note: We use the relative 'file_name' because the FS is rooted at 'subdir'
ds = ray.data.from_items([{"file_uri": file_name, "file_id": 0}])
# 4. Execute Download
ds_with_downloads = ds.with_column(
"content", download("file_uri", filesystem=custom_fs)
)
# 5. Assertions
results = ds_with_downloads.take_all()
assert len(results) == 1
assert results[0]["content"] == sample_content
assert results[0]["file_id"] == 0
class TestDownloadExpressionErrors:
"""Test error conditions and edge cases for download expressions."""
def test_download_expression_invalid_uri_column(self):
"""Test download expression with non-existent URI column."""
table = pa.Table.from_arrays(
[
pa.array(["local://test.txt"]),
],
names=["existing_column"],
)
ds = ray.data.from_arrow(table)
ds_with_downloads = ds.with_column("bytes", download("non_existent_column"))
# Should raise error when trying to execute
with pytest.raises(ValueError):
ds_with_downloads.take_all()
def test_download_expression_with_null_uris(self):
"""Test download expression handling of null/empty URIs."""
table = pa.Table.from_arrays(
[
pa.array(["local://test.txt", None, ""]),
],
names=["uri"],
)
ds = ray.data.from_arrow(table)
ds_with_downloads = ds.with_column("bytes", download("uri"))
# Should handle nulls gracefully (exact behavior may vary)
# This test mainly ensures no crash occurs
try:
results = ds_with_downloads.take_all()
# If it succeeds, verify structure is reasonable
assert len(results) == 3
for result in results:
assert "bytes" in result
except Exception as e:
# If it fails, should be a reasonable error (not a crash)
assert isinstance(e, (ValueError, KeyError, RuntimeError))
def test_download_expression_with_malformed_uris(self, tmp_path):
"""Test download expression with malformed URIs.
This tests that various malformed URIs are caught and return None
instead of crashing.
All of the URIs should be malformed in order to test the ZeroDivisionError
described in https://github.com/ray-project/ray/issues/58462.
"""
malformed_uris = [
f"local://{tmp_path}/nonexistent.txt", # File doesn't exist
"local:///this/path/does/not/exist/file.txt", # Invalid path
"", # Empty URI
"foobar", # Random string
# TODO(xyuzh): Currently, using the below URIs raises an exception
# in _resolve_paths_and_filesystem. We need to fix that issue and
# add the tests in.
# "file:///\x00/null/byte", # Null byte
# "http://host/path\n\r", # Line breaks
# "foo://bar", # Invalid scheme
# "://no-scheme", # Missing scheme
# "http://host/path?query=<script>", # Injection attempts
]
ds = ray.data.from_items([{"uri": uri} for uri in malformed_uris])
ds_with_downloads = ds.with_column("bytes", download("uri"))
results = ds_with_downloads.take_all()
# All malformed URIs should return None
assert len(results) == len(malformed_uris)
for result in results:
assert result["bytes"] is None
def test_download_expression_mixed_valid_and_invalid_uris(self, tmp_path):
"""Test download expression when some but not all of the URIs are invalid."""
# Create one valid file
valid_file = tmp_path / "valid.txt"
valid_file.write_bytes(b"valid content")
# Create URIs: one valid and one non-existent file.
ds = ray.data.from_items(
[
{"uri": str(valid_file), "id": 0},
{"uri": str(tmp_path / "nonexistent.txt"), "id": 1},
]
)
ds_with_downloads = ds.with_column("bytes", download("uri"))
# Should not crash - failed downloads return None
results = sorted(ds_with_downloads.take_all(), key=lambda row: row["id"])
assert len(results) == 2
# First URI should succeed
assert results[0]["bytes"] == b"valid content"
# Second URI should fail gracefully (return None)
assert results[1]["bytes"] is None
class TestDownloadExpressionIntegration:
"""Integration tests combining download expressions with other Ray Data operations."""
def test_download_expression_with_map_batches(self, tmpdir):
"""Test download expression followed by map_batches processing."""
# Create a test file
test_file = tmpdir.join("test.txt")
test_content = b"Hello, World!"
test_file.write_binary(test_content)
# Create dataset
table = pa.Table.from_arrays(
[
pa.array([f"local://{test_file}"]),
],
names=["uri"],
)
ds = ray.data.from_arrow(table)
# Download then process
ds_with_content = ds.with_column("raw_bytes", download("uri"))
def decode_bytes(batch):
# Access the specific column containing the bytes data
batch["decoded_text"] = [
data.decode("utf-8") for data in batch["raw_bytes"]
]
return batch
ds_decoded = ds_with_content.map_batches(decode_bytes)
results = ds_decoded.take_all()
assert len(results) == 1
assert results[0]["decoded_text"] == "Hello, World!"
assert results[0]["raw_bytes"] == test_content
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_download_expression.py",
"license": "Apache License 2.0",
"lines": 347,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_unify_schemas_performance.py | import pyarrow as pa
import pytest
from ray.data._internal.arrow_ops.transform_pyarrow import (
unify_schemas,
)
from ray.data.extensions import (
ArrowPythonObjectType,
ArrowTensorType,
ArrowVariableShapedTensorType,
)
# Schema factory functions - just return schemas
def _create_simple_schema(num_columns):
return pa.schema([(f"col_{i}", pa.int64()) for i in range(num_columns)])
def _create_tensor_fixed_schema(num_columns):
return pa.schema(
[
(f"tensor_{i}", ArrowTensorType((2, 2), pa.float32()))
for i in range(num_columns)
]
)
def _create_tensor_variable_schema(num_columns):
return pa.schema(
[
(f"tensor_{i}", ArrowVariableShapedTensorType(pa.float32(), 2))
for i in range(num_columns)
]
)
def _create_object_schema(num_columns):
return pa.schema(
[(f"obj_{i}", ArrowPythonObjectType()) for i in range(num_columns)]
)
def _create_nested_struct_schema(num_columns):
fields = []
for i in range(num_columns):
inner_struct = pa.struct(
[("x", pa.int32()), ("y", pa.string()), ("z", pa.float64())]
)
fields.append((f"struct_{i}", inner_struct))
return pa.schema(fields)
def _create_deep_nested_schema(num_columns):
fields = []
for i in range(num_columns):
level4 = pa.struct([("data", pa.int32()), ("meta", pa.string())])
level3 = pa.struct([("level4", level4), ("id3", pa.int64())])
level2 = pa.struct([("level3", level3), ("id2", pa.int64())])
level1 = pa.struct([("level2", level2), ("id1", pa.int64())])
fields.append((f"deep_{i}", level1))
return pa.schema(fields)
def _create_mixed_complex_schema(num_columns):
fields = []
for i in range(num_columns):
field_type = i % 5
if field_type == 0:
fields.append((f"col_{i}", pa.int64()))
elif field_type == 1:
fields.append((f"col_{i}", ArrowTensorType((3, 3), pa.int32())))
elif field_type == 2:
fields.append((f"col_{i}", ArrowPythonObjectType()))
elif field_type == 3:
inner_struct = pa.struct([("a", pa.int32()), ("b", pa.string())])
fields.append((f"col_{i}", inner_struct))
else:
fields.append((f"col_{i}", pa.list_(pa.float64())))
return pa.schema(fields)
@pytest.mark.parametrize("num_schemas", [10, 100])
@pytest.mark.parametrize("num_columns", [10, 100, 1000, 5000])
@pytest.mark.parametrize(
"schema_factory,expected_time_per_schema_per_column",
[
(_create_simple_schema, 0.00001),
(_create_tensor_fixed_schema, 0.00005),
(_create_tensor_variable_schema, 0.00005),
(_create_object_schema, 0.00005),
(_create_nested_struct_schema, 0.0001),
(_create_deep_nested_schema, 0.0002),
(_create_mixed_complex_schema, 0.0002),
],
)
def test_unify_schemas_equivalent_performance(
num_schemas, num_columns, schema_factory, expected_time_per_schema_per_column
):
"""Stress test for unify_schemas when ALL schemas are equivalent (identical).
This tests the fast path where all schemas are the same and should be optimized
to return quickly without expensive comparisons.
"""
import time
# Create the base schema
base_schema = schema_factory(num_columns)
# Create list of identical schemas
schemas = [base_schema] * num_schemas
# Time the unification
start_time = time.time()
unified = unify_schemas(schemas)
elapsed_time = time.time() - start_time
# Verify the result is correct (should be identical to base schema)
assert unified == base_schema
# Performance assertions with scaling based on complexity
scale_factor = num_schemas * num_columns
max_allowed_time = expected_time_per_schema_per_column * scale_factor
buffer_factor = 2
assert elapsed_time < buffer_factor * max_allowed_time, (
f"unify_schemas took {elapsed_time:.4f}s for {num_schemas} identical "
f"{schema_factory.__name__} schemas with {num_columns} columns, "
f"should be < {max_allowed_time:.4f}s"
)
# Print timing info for large cases
if num_schemas >= 1000 or num_columns >= 100:
print(
f"\n{schema_factory.__name__}: {num_schemas} schemas x {num_columns} cols = {elapsed_time:.4f}s"
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_unify_schemas_performance.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/_internal/batch/processor/serve_deployment_proc.py | """The processor that runs serve deployment."""
from typing import Any, Dict, Optional, Type
from pydantic import Field
from ray.data import ActorPoolStrategy
from ray.data.block import UserDefinedFunction
from ray.llm._internal.batch.processor.base import (
Processor,
ProcessorBuilder,
ProcessorConfig,
)
from ray.llm._internal.batch.stages import (
ServeDeploymentStage,
)
class ServeDeploymentProcessorConfig(ProcessorConfig):
"""The configuration for the serve deployment processor."""
# Configurations used to build the serve deployment
deployment_name: str = Field(
description="The name of the serve deployment to use.",
)
app_name: str = Field(
description="The name of the serve application to use.",
default="default",
)
dtype_mapping: Dict[str, Type[Any]] = Field(
description="A dictionary mapping data type names to their corresponding request classes for the serve deployment.",
default=None,
)
should_continue_on_error: bool = Field(
default=False,
description="If True, continue processing when inference fails for a row "
"instead of raising an exception. Failed rows will have a non-null "
"'__inference_error__' column containing the error message. Error rows "
"bypass postprocess. If False (default), any inference error raises.",
)
def build_serve_deployment_processor(
config: ServeDeploymentProcessorConfig,
preprocess: Optional[UserDefinedFunction] = None,
postprocess: Optional[UserDefinedFunction] = None,
preprocess_map_kwargs: Optional[Dict[str, Any]] = None,
postprocess_map_kwargs: Optional[Dict[str, Any]] = None,
) -> Processor:
"""Construct a processor that runs a serve deployment.
Args:
config: The configuration for the processor.
preprocess: An optional lambda function that takes a row (dict) as input
and returns a preprocessed row (dict). The output row must contain the
required fields for the following processing stages.
postprocess: An optional lambda function that takes a row (dict) as input
and returns a postprocessed row (dict).
preprocess_map_kwargs: Optional kwargs to pass to Dataset.map() for the
preprocess stage (e.g., num_cpus, memory, concurrency).
postprocess_map_kwargs: Optional kwargs to pass to Dataset.map() for the
postprocess stage (e.g., num_cpus, memory, concurrency).
Returns:
The constructed processor.
"""
stages = [
ServeDeploymentStage(
fn_constructor_kwargs=dict(
deployment_name=config.deployment_name,
app_name=config.app_name,
dtype_mapping=config.dtype_mapping,
should_continue_on_error=config.should_continue_on_error,
),
map_batches_kwargs=dict(
compute=ActorPoolStrategy(
**config.get_concurrency(autoscaling_enabled=False),
)
),
)
]
# TODO (Kourosh): Add telemetry for ServeDeploymentStage
processor = Processor(
config,
stages,
preprocess=preprocess,
postprocess=postprocess,
preprocess_map_kwargs=preprocess_map_kwargs,
postprocess_map_kwargs=postprocess_map_kwargs,
)
return processor
ProcessorBuilder.register(
ServeDeploymentProcessorConfig, build_serve_deployment_processor
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/batch/processor/serve_deployment_proc.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/llm/_internal/batch/stages/serve_deployment_stage.py | """The stage that runs serve deployment."""
import asyncio
import logging
import time
import uuid
from typing import Any, AsyncIterator, Dict, List, Optional, Tuple, Type
from pydantic import BaseModel, ValidationError
from ray import serve
from ray.exceptions import RayTaskError
from ray.llm._internal.batch.stages.base import (
StatefulStage,
StatefulStageUDF,
)
from ray.llm._internal.batch.stages.common import truncate_str
logger = logging.getLogger(__name__)
_MAX_PROMPT_LENGTH_IN_ERROR = 200
# Request-level errors safe to catch. Unknown errors are treated as fatal.
_SERVE_RECOVERABLE_ERRORS = (
ValueError,
TypeError,
ValidationError,
)
class ServeDeploymentStageUDF(StatefulStageUDF):
def __init__(
self,
data_column: str,
expected_input_keys: List[str],
*,
deployment_name: str,
app_name: str,
dtype_mapping: Dict[str, Type[Any]],
should_continue_on_error: bool = False,
):
"""
Initialize the ServeDeploymentStageUDF.
Args:
data_column: The data column name.
expected_input_keys: The expected input keys of the stage.
deployment_name: The name of the deployment.
app_name: The name of the deployment app.
dtype_mapping: The mapping of the request class name to the request class.
should_continue_on_error: If True, continue processing when inference
fails for a row instead of raising. Failed rows will have
'__inference_error__' set to the error message.
"""
super().__init__(data_column, expected_input_keys)
self._dtype_mapping = dtype_mapping
self.should_continue_on_error = should_continue_on_error
# Using stream=True as LLM serve deployments return async generators.
# TODO (Kourosh): Generalize this to support non-streaming deployments.
self._dh = serve.get_deployment_handle(deployment_name, app_name).options(
stream=True
)
self.request_id = 0
def _prepare_request(
self, row: Dict[str, Any]
) -> Tuple[Dict[str, Any], Optional[Type[Any]], str]:
"""
Decorate the request with metadata related to the batch.
Args:
row: The row.
Returns:
A tuple of (decorated_request, dtype, method_name). dtype is the class of
the request object and can be None if the serve deployment accepts a raw
dict. method_name is the name of the method to invoke on the deployment.
"""
method = row.get("method")
dtype_name = row.get("dtype")
dtype = None
if dtype_name is not None:
if not self._dtype_mapping or dtype_name not in self._dtype_mapping:
raise ValueError(
f"{dtype_name} must be provided in "
"ServeDeploymentProcessorConfig's dtype_mapping."
)
dtype = self._dtype_mapping[dtype_name]
request_kwargs = row.pop("request_kwargs")
request = {
"request_id": str(self.request_id),
"idx_in_batch": row[self.IDX_IN_BATCH_COLUMN],
**request_kwargs,
}
self.request_id += 1
return request, dtype, method
async def generate_async(
self, row: Dict[str, Any]
) -> Tuple[Dict[str, Any], Dict[str, Any], float]:
"""
Run the serve deployment.
Args:
row: The row to run the serve deployment on.
Returns:
The response from the serve deployment.
"""
request, dtype, method = self._prepare_request(row)
request_obj = dtype(**request) if dtype else request
if getattr(self._dh, method) is None:
raise ValueError(f"Method {method} not found in the serve deployment.")
t = time.perf_counter()
# Directly using anext() requires python3.10 and above
output_data = await getattr(self._dh, method).remote(request_obj).__anext__()
time_taken = time.perf_counter() - t
# Convert the output data to a dict if it is a Pydantic model.
if isinstance(output_data, BaseModel):
output_data = output_data.model_dump()
return request, output_data, time_taken
def _is_recoverable_error(self, exc: Exception) -> bool:
"""Check if exception is recoverable. Unknown errors are treated as fatal."""
if isinstance(exc, _SERVE_RECOVERABLE_ERRORS):
return True
# RayTaskError wraps remote exceptions - check the cause
if isinstance(exc, RayTaskError) and hasattr(exc, "cause"):
if isinstance(exc.cause, _SERVE_RECOVERABLE_ERRORS):
return True
return False
async def _generate_with_error_handling(
self,
row: Dict[str, Any],
batch_uuid: uuid.UUID,
) -> Dict[str, Any]:
"""Generate output for a row, yielding error row on recoverable failure."""
idx_in_batch = row[self.IDX_IN_BATCH_COLUMN]
# Save before generate_async pops it
original_request_kwargs = row.get("request_kwargs", {})
try:
request, output, time_taken = await self.generate_async(row)
return {
**output,
"request_id": request["request_id"],
self.IDX_IN_BATCH_COLUMN: request["idx_in_batch"],
"batch_uuid": batch_uuid.hex,
"time_taken": time_taken,
"__inference_error__": "",
}
except Exception as e:
# Only recover from known recoverable errors; unknown errors propagate
if not self._is_recoverable_error(e) or not self.should_continue_on_error:
raise
error_msg = f"{type(e).__name__}: {str(e)}"
logger.warning(
"[Serve Deployment] Inference failed for row %d in batch %s: %s",
idx_in_batch,
batch_uuid.hex,
error_msg,
)
# Include request_kwargs snippet for debuggability
request_str = truncate_str(
str(original_request_kwargs), _MAX_PROMPT_LENGTH_IN_ERROR
)
return {
self.IDX_IN_BATCH_COLUMN: idx_in_batch,
"batch_uuid": batch_uuid.hex,
"__inference_error__": error_msg,
"request_kwargs": request_str,
}
async def udf(self, batch: List[Dict[str, Any]]) -> AsyncIterator[Dict[str, Any]]:
"""
Run the serve deployment.
Args:
batch: A list of rows to run the serve deployment on.
Yields:
Dict[str, Any]: A dictionary containing the response from the serve
deployment along with processing metadata.
"""
batch_uuid = uuid.uuid4()
t = time.perf_counter()
tasks = [
asyncio.create_task(self._generate_with_error_handling(row, batch_uuid))
for row in batch
]
for resp in asyncio.as_completed(tasks):
yield await resp
batch_time_taken = time.perf_counter() - t
logger.info(
"[LLM Batch - Serve Deployment] Elapsed time for batch %s with size %d: %s",
batch_uuid.hex,
len(batch),
batch_time_taken,
)
class ServeDeploymentStage(StatefulStage):
fn: Type[StatefulStageUDF] = ServeDeploymentStageUDF
def get_required_input_keys(self) -> Dict[str, str]:
return {
"method": "Name of the method to invoke on the serve deployment.",
"request_kwargs": "The request_kwargs to construct the request.",
}
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/_internal/batch/stages/serve_deployment_stage.py",
"license": "Apache License 2.0",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/llm/tests/batch/gpu/processor/test_serve_deployment_proc.py | import sys
from typing import Any, Dict
import pytest
import ray
from ray import serve
from ray.data import ActorPoolStrategy
from ray.data.llm import ServeDeploymentProcessorConfig, build_processor
from ray.llm._internal.batch.processor import ProcessorBuilder
from ray.serve.llm.openai_api_models import ChatCompletionRequest, CompletionRequest
@pytest.mark.parametrize(
"dtype_mapping", [None, {"CompletionRequest": CompletionRequest}]
)
def test_serve_deployment_processor(dtype_mapping):
app_name = "test_serve_deployment_processor_app"
deployment_name = "test_serve_deployment_name"
config_kwargs = dict(
deployment_name=deployment_name,
app_name=app_name,
batch_size=16,
concurrency=1,
)
if dtype_mapping is not None:
config_kwargs["dtype_mapping"] = dtype_mapping
config = ServeDeploymentProcessorConfig(**config_kwargs)
processor = ProcessorBuilder.build(config)
assert processor.list_stage_names() == [
"ServeDeploymentStage",
]
stage = processor.get_stage_by_name("ServeDeploymentStage")
assert stage.fn_constructor_kwargs == {
"deployment_name": deployment_name,
"app_name": app_name,
"dtype_mapping": dtype_mapping,
"should_continue_on_error": False,
}
assert "compute" in stage.map_batches_kwargs
assert isinstance(stage.map_batches_kwargs["compute"], ActorPoolStrategy)
assert stage.map_batches_kwargs["compute"].min_size == 1
assert stage.map_batches_kwargs["compute"].max_size == 1
def test_simple_serve_deployment(serve_cleanup):
@serve.deployment
class SimpleServeDeployment:
# ServeDeploymentStageUDF expects an async generator.
async def add(self, request: Dict[str, Any]):
yield {"result": request["x"] + 1}
app_name = "simple_serve_deployment_app"
deployment_name = "SimpleServeDeployment"
serve.run(SimpleServeDeployment.bind(), name=app_name)
config = ServeDeploymentProcessorConfig(
deployment_name=deployment_name,
app_name=app_name,
batch_size=16,
concurrency=1,
)
processor = build_processor(
config,
preprocess=lambda row: dict(
method="add",
dtype=None, # Empty dtype since output is already dict format
request_kwargs=dict(x=row["id"]),
),
postprocess=lambda row: dict(
resp=row["result"],
id=row["id"],
),
)
ds = ray.data.range(60)
ds = ds.map(lambda x: {"id": x["id"]})
ds = processor(ds)
outs = ds.take_all()
assert len(outs) == 60
assert all("resp" in out for out in outs)
assert all(out["resp"] == out["id"] + 1 for out in outs)
def test_serve_deployment_continue_on_error(serve_cleanup):
@serve.deployment
class FailingServeDeployment:
async def process(self, request: Dict[str, Any]):
x = request["x"]
if x % 10 == 0: # Fail every 10th row
raise ValueError(f"Intentional failure for x={x}")
yield {"result": x * 2}
app_name = "failing_serve_deployment_app"
deployment_name = "FailingServeDeployment"
serve.run(FailingServeDeployment.bind(), name=app_name)
config = ServeDeploymentProcessorConfig(
deployment_name=deployment_name,
app_name=app_name,
batch_size=16,
concurrency=1,
should_continue_on_error=True,
)
processor = build_processor(
config,
preprocess=lambda row: dict(
method="process",
dtype=None,
request_kwargs=dict(x=row["id"]),
),
# Error rows will bypass this postprocess and return raw data with
# __inference_error__ set. Only success rows get resp/id keys.
postprocess=lambda row: dict(
resp=row.get("result"),
id=row.get("id"),
),
)
ds = ray.data.range(60)
ds = ds.map(lambda x: {"id": x["id"]})
ds = processor(ds)
outs = ds.take_all()
assert len(outs) == 60
# Check __inference_error__ directly
errors = [o for o in outs if o.get("__inference_error__", "")]
successes = [o for o in outs if not o.get("__inference_error__", "")]
assert len(errors) == 6, f"Expected 6 errors, got {len(errors)}: {errors[:3]}..."
assert len(successes) == 54
for e in errors:
error_msg = e["__inference_error__"]
assert "ValueError" in error_msg, f"Expected ValueError in: {error_msg}"
assert (
"Intentional failure" in error_msg
), f"Expected 'Intentional failure' in: {error_msg}"
for s in successes:
assert s.get("resp") is not None, f"Missing resp in success row: {s}"
def test_completion_model(model_opt_125m, create_model_opt_125m_deployment):
deployment_name, app_name = create_model_opt_125m_deployment
config = ServeDeploymentProcessorConfig(
deployment_name=deployment_name,
app_name=app_name,
dtype_mapping={
"CompletionRequest": CompletionRequest,
},
batch_size=16,
concurrency=1,
)
processor = build_processor(
config,
preprocess=lambda row: dict(
method="completions",
dtype="CompletionRequest",
request_kwargs=dict(
model=model_opt_125m,
prompt=row["prompt"],
stream=False,
),
),
postprocess=lambda row: dict(
resp=row["choices"][0]["text"],
),
)
ds = ray.data.range(60)
ds = ds.map(lambda x: {"prompt": f"Hello {x['id']}"})
ds = processor(ds)
ds = ds.materialize()
outs = ds.take_all()
assert len(outs) == 60
assert all("resp" in out for out in outs)
def test_multi_turn_completion_model(model_opt_125m, create_model_opt_125m_deployment):
deployment_name, app_name = create_model_opt_125m_deployment
config1 = ServeDeploymentProcessorConfig(
deployment_name=deployment_name,
app_name=app_name,
dtype_mapping={
"CompletionRequest": CompletionRequest,
},
# Use lower batch size to reduce resource usage as there are multiple processors
batch_size=4,
concurrency=1,
)
processor1 = build_processor(
config1,
preprocess=lambda row: dict(
dtype="CompletionRequest",
method="completions",
request_kwargs=dict(
model=model_opt_125m,
prompt=row["prompt"],
stream=False,
),
),
postprocess=lambda row: dict(
prompt=row["choices"][0]["text"],
),
)
config2 = ServeDeploymentProcessorConfig(
deployment_name=deployment_name,
app_name=app_name,
dtype_mapping={
"CompletionRequest": CompletionRequest,
},
batch_size=4,
concurrency=1,
)
processor2 = build_processor(
config2,
preprocess=lambda row: dict(
dtype="CompletionRequest",
method="completions",
request_kwargs=dict(
model=model_opt_125m,
prompt=row["prompt"],
stream=False,
),
),
postprocess=lambda row: dict(
resp=row["choices"][0]["text"],
),
)
ds = ray.data.range(60)
ds = ds.map(lambda x: {"prompt": f"Hello {x['id']}"})
ds = processor1(ds)
ds = processor2(ds)
ds = ds.materialize()
outs = ds.take_all()
assert len(outs) == 60
assert all("resp" in out for out in outs)
def test_chat_model(model_opt_125m, create_model_opt_125m_deployment):
deployment_name, app_name = create_model_opt_125m_deployment
config = ServeDeploymentProcessorConfig(
deployment_name=deployment_name,
app_name=app_name,
dtype_mapping={
"ChatCompletionRequest": ChatCompletionRequest,
},
batch_size=16,
concurrency=1,
)
processor = build_processor(
config,
preprocess=lambda row: dict(
dtype="ChatCompletionRequest",
method="chat",
request_kwargs=dict(
model=model_opt_125m,
messages=[
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": f"Hello {row['id']}"},
],
stream=False,
),
),
postprocess=lambda row: dict(
resp=row["choices"][0]["message"]["content"],
),
)
ds = ray.data.range(60)
ds = ds.map(lambda x: {"id": x["id"]})
ds = processor(ds)
ds = ds.materialize()
outs = ds.take_all()
assert len(outs) == 60
assert all("resp" in out for out in outs)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/batch/gpu/processor/test_serve_deployment_proc.py",
"license": "Apache License 2.0",
"lines": 253,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/tests/batch/gpu/stages/test_serve_deployment_stage.py | import sys
from unittest.mock import MagicMock, patch
import pytest
from ray.exceptions import RayActorError
from ray.llm._internal.batch.stages.serve_deployment_stage import (
ServeDeploymentStageUDF,
)
from ray.serve._private.common import DeploymentID
from ray.serve.exceptions import BackPressureError, DeploymentUnavailableError
from ray.serve.llm.openai_api_models import ChatCompletionRequest, CompletionRequest
@pytest.fixture
def mock_serve_deployment_handle():
"""Mock the serve deployment handle and its methods."""
with patch("ray.serve.get_deployment_handle") as mock_get_handle:
mock_handle = MagicMock()
mock_handle.options.return_value = mock_handle
# Mock the chat and completions methods
mock_handle.chat = MagicMock()
mock_handle.completions = MagicMock()
mock_get_handle.return_value = mock_handle
yield mock_handle
@pytest.mark.asyncio
@pytest.mark.parametrize(
"method,test_data",
[
(
"completions",
[
{
"method": "completions",
"dtype": "CompletionRequest",
"request_kwargs": {"prompt": "Hello", "temperature": 0.7},
},
],
),
(
"chat",
[
{
"method": "chat",
"dtype": "ChatCompletionRequest",
"request_kwargs": {
"messages": [
{
"role": "system",
"content": "You are a helpful assistant",
},
{"role": "user", "content": "Hello 1"},
]
},
},
],
),
],
)
async def test_serve_deployment_udf_methods(
mock_serve_deployment_handle, method, test_data
):
"""Test both completions and chat methods."""
# Create a mock response that will be returned directly
mock_response = {"test": "response"}
def mock_remote_call(*args, **kwargs):
async def mock_async_iterator():
yield mock_response
return mock_async_iterator()
getattr(mock_serve_deployment_handle, method).remote.side_effect = mock_remote_call
udf = ServeDeploymentStageUDF(
data_column="__data",
expected_input_keys=["method", "request_kwargs"],
deployment_name="test_deployment",
app_name="test_app",
dtype_mapping={
"CompletionRequest": CompletionRequest,
"ChatCompletionRequest": ChatCompletionRequest,
},
)
batch = {"__data": test_data}
responses = []
async for response in udf(batch):
responses.append(response)
assert len(responses) == 1
assert "__data" in responses[0]
assert len(responses[0]["__data"]) == len(test_data)
for i, item in enumerate(responses[0]["__data"]):
assert "batch_uuid" in item
assert "time_taken" in item
assert item["request_id"] == str(i)
assert "test" in item # From the mock response
assert getattr(mock_serve_deployment_handle, method).remote.call_count == len(
test_data
)
@pytest.mark.asyncio
async def test_serve_deployment_invalid_method(mock_serve_deployment_handle):
"""Test that invalid method raises error at runtime."""
# Set up the mock to simulate a method that doesn't exist
mock_serve_deployment_handle.invalid_method = None
udf = ServeDeploymentStageUDF(
data_column="__data",
expected_input_keys=["method", "request_kwargs"],
deployment_name="test_deployment",
app_name="test_app",
dtype_mapping={
"CompletionRequest": CompletionRequest,
},
)
batch = {
"__data": [
{
"method": "invalid_method",
"dtype": "CompletionRequest",
"request_kwargs": {"prompt": "Hello", "temperature": 0.7},
}
]
}
with pytest.raises(
ValueError, match="Method invalid_method not found in the serve deployment."
):
async for _ in udf(batch):
pass
@pytest.mark.asyncio
@pytest.mark.parametrize(
"dtype_mapping", [None, {"ChatCompletionRequest": ChatCompletionRequest}]
)
async def test_serve_deployment_missing_dtype(
mock_serve_deployment_handle, dtype_mapping
):
"""Test that missing dtype raises error at runtime."""
udf = ServeDeploymentStageUDF(
data_column="__data",
expected_input_keys=["method", "request_kwargs"],
deployment_name="test_deployment",
app_name="test_app",
dtype_mapping=dtype_mapping,
)
batch = {
"__data": [
{
"method": "completions",
"dtype": "CompletionRequest",
"request_kwargs": {"prompt": "Hello", "temperature": 0.7},
}
]
}
with pytest.raises(
ValueError,
match="CompletionRequest must be provided in ServeDeploymentProcessorConfig's dtype_mapping.",
):
async for _ in udf(batch):
pass
# ============================================================================
# Error handling tests for should_continue_on_error
# ============================================================================
@pytest.mark.asyncio
async def test_serve_udf_default_raises_on_error(mock_serve_deployment_handle):
"""Default behavior (should_continue_on_error=False) raises on inference error."""
def mock_remote_call(*args, **kwargs):
async def mock_async_iterator():
raise ValueError("prompt too long")
yield # Make it a generator
return mock_async_iterator()
mock_serve_deployment_handle.completions.remote.side_effect = mock_remote_call
udf = ServeDeploymentStageUDF(
data_column="__data",
expected_input_keys=["method", "request_kwargs"],
deployment_name="test_deployment",
app_name="test_app",
dtype_mapping={"CompletionRequest": CompletionRequest},
should_continue_on_error=False,
)
batch = {
"__data": [
{
"method": "completions",
"dtype": "CompletionRequest",
"request_kwargs": {"prompt": "test", "temperature": 0.7},
}
]
}
with pytest.raises(ValueError, match="prompt too long"):
async for _ in udf(batch):
pass
@pytest.mark.asyncio
async def test_serve_udf_continue_on_error_yields_error_row(
mock_serve_deployment_handle,
):
"""With should_continue_on_error=True, errors yield rows with __inference_error__."""
def mock_remote_call(*args, **kwargs):
async def mock_async_iterator():
raise ValueError("prompt too long")
yield # Make it a generator
return mock_async_iterator()
mock_serve_deployment_handle.completions.remote.side_effect = mock_remote_call
udf = ServeDeploymentStageUDF(
data_column="__data",
expected_input_keys=["method", "request_kwargs"],
deployment_name="test_deployment",
app_name="test_app",
dtype_mapping={"CompletionRequest": CompletionRequest},
should_continue_on_error=True,
)
batch = {
"__data": [
{
"method": "completions",
"dtype": "CompletionRequest",
"request_kwargs": {"prompt": "test prompt", "temperature": 0.7},
}
]
}
results = []
async for result in udf(batch):
results.extend(result["__data"])
assert len(results) == 1
assert "__inference_error__" in results[0]
assert "ValueError" in results[0]["__inference_error__"]
assert "prompt too long" in results[0]["__inference_error__"]
# Error rows include request_kwargs snippet for debuggability
assert "request_kwargs" in results[0]
@pytest.mark.asyncio
async def test_serve_udf_mixed_success_and_error(mock_serve_deployment_handle):
"""Mixed batch: some rows succeed, some fail."""
call_count = 0
def mock_remote_call(*args, **kwargs):
nonlocal call_count
call_count += 1
current_call = call_count
async def mock_async_iterator():
# Second call fails
if current_call == 2:
raise ValueError("prompt too long")
yield {"generated_text": f"Response {current_call}"}
return mock_async_iterator()
mock_serve_deployment_handle.completions.remote.side_effect = mock_remote_call
udf = ServeDeploymentStageUDF(
data_column="__data",
expected_input_keys=["method", "request_kwargs"],
deployment_name="test_deployment",
app_name="test_app",
dtype_mapping={"CompletionRequest": CompletionRequest},
should_continue_on_error=True,
)
batch = {
"__data": [
{
"method": "completions",
"dtype": "CompletionRequest",
"request_kwargs": {"prompt": "first", "temperature": 0.7},
},
{
"method": "completions",
"dtype": "CompletionRequest",
"request_kwargs": {"prompt": "second", "temperature": 0.7},
},
{
"method": "completions",
"dtype": "CompletionRequest",
"request_kwargs": {"prompt": "third", "temperature": 0.7},
},
]
}
results = []
async for result in udf(batch):
results.extend(result["__data"])
assert len(results) == 3
errors = [r for r in results if r.get("__inference_error__", "") != ""]
successes = [r for r in results if r.get("__inference_error__", "") == ""]
assert len(errors) == 1
assert len(successes) == 2
assert "ValueError" in errors[0]["__inference_error__"]
@pytest.mark.asyncio
@pytest.mark.parametrize(
"fatal_error",
[
RayActorError(error_msg="Actor died"),
BackPressureError(num_queued_requests=100, max_queued_requests=50),
DeploymentUnavailableError(
deployment_id=DeploymentID(name="test", app_name="test_app")
),
],
)
async def test_serve_udf_fatal_errors_always_propagate(
mock_serve_deployment_handle, fatal_error
):
"""Fatal errors (RayActorError, BackPressureError, etc.) always propagate."""
def mock_remote_call(*args, **kwargs):
async def mock_async_iterator():
raise fatal_error
yield # Make it a generator
return mock_async_iterator()
mock_serve_deployment_handle.completions.remote.side_effect = mock_remote_call
udf = ServeDeploymentStageUDF(
data_column="__data",
expected_input_keys=["method", "request_kwargs"],
deployment_name="test_deployment",
app_name="test_app",
dtype_mapping={"CompletionRequest": CompletionRequest},
should_continue_on_error=True, # Even with this True, fatal errors propagate
)
batch = {
"__data": [
{
"method": "completions",
"dtype": "CompletionRequest",
"request_kwargs": {"prompt": "test", "temperature": 0.7},
}
]
}
with pytest.raises(type(fatal_error)):
async for _ in udf(batch):
pass
@pytest.mark.asyncio
async def test_serve_udf_unknown_errors_propagate(mock_serve_deployment_handle):
"""Unknown errors propagate even with should_continue_on_error=True."""
def mock_remote_call(*args, **kwargs):
async def mock_async_iterator():
raise RuntimeError("unexpected system error")
yield
return mock_async_iterator()
mock_serve_deployment_handle.completions.remote.side_effect = mock_remote_call
udf = ServeDeploymentStageUDF(
data_column="__data",
expected_input_keys=["method", "request_kwargs"],
deployment_name="test_deployment",
app_name="test_app",
dtype_mapping={"CompletionRequest": CompletionRequest},
should_continue_on_error=True,
)
batch = {
"__data": [
{
"method": "completions",
"dtype": "CompletionRequest",
"request_kwargs": {"prompt": "test", "temperature": 0.7},
}
]
}
with pytest.raises(RuntimeError, match="unexpected system error"):
async for _ in udf(batch):
pass
@pytest.mark.asyncio
async def test_serve_udf_success_with_continue_on_error_includes_none_error(
mock_serve_deployment_handle,
):
"""Successful rows with should_continue_on_error=True have __inference_error__=None."""
mock_response = {"generated_text": "Hello!"}
def mock_remote_call(*args, **kwargs):
async def mock_async_iterator():
yield mock_response
return mock_async_iterator()
mock_serve_deployment_handle.completions.remote.side_effect = mock_remote_call
udf = ServeDeploymentStageUDF(
data_column="__data",
expected_input_keys=["method", "request_kwargs"],
deployment_name="test_deployment",
app_name="test_app",
dtype_mapping={"CompletionRequest": CompletionRequest},
should_continue_on_error=True,
)
batch = {
"__data": [
{
"method": "completions",
"dtype": "CompletionRequest",
"request_kwargs": {"prompt": "test", "temperature": 0.7},
}
]
}
results = []
async for result in udf(batch):
results.extend(result["__data"])
assert len(results) == 1
assert results[0]["__inference_error__"] == ""
assert results[0]["generated_text"] == "Hello!"
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/batch/gpu/stages/test_serve_deployment_stage.py",
"license": "Apache License 2.0",
"lines": 375,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/scripts/symmetric_run.py | """Symmetric Run for Ray."""
import socket
import subprocess
import sys
import time
from typing import List
import click
import ray
from ray._private.ray_constants import env_integer
from ray._raylet import GcsClient
from ray.exceptions import RpcError
import psutil
CLUSTER_WAIT_TIMEOUT = env_integer("RAY_SYMMETRIC_RUN_CLUSTER_WAIT_TIMEOUT", 30)
def check_ray_already_started() -> bool:
import ray._private.services as services
# Try auto-detecting the Ray instance.
running_gcs_addresses = services.find_gcs_addresses()
return len(running_gcs_addresses) > 0
def check_cluster_ready(nnodes, timeout=CLUSTER_WAIT_TIMEOUT):
"""Wait for all nodes to start.
Raises an exception if the nodes don't start in time.
"""
start_time = time.time()
current_nodes = 1
ray.init(ignore_reinit_error=True)
while time.time() - start_time < timeout:
time.sleep(5)
current_nodes = len(ray.nodes())
if current_nodes == nnodes:
return True
else:
click.echo(
f"Waiting for nodes to start... {current_nodes}/{nnodes} nodes started"
)
return False
def check_head_node_ready(address: str, timeout=CLUSTER_WAIT_TIMEOUT):
start_time = time.time()
gcs_client = GcsClient(address=address)
while time.time() - start_time < timeout:
try:
gcs_client.check_alive([], timeout=1)
click.echo("Ray cluster is ready!")
return True
except RpcError:
pass
time.sleep(5)
return False
def curate_and_validate_ray_start_args(run_and_start_args: List[str]) -> List[str]:
# Reparse the arguments to remove symmetric_run arguments.
ctx = symmetric_run.make_context("_", run_and_start_args, resilient_parsing=True)
cleaned_args = list(ctx.params["ray_args_and_entrypoint"])
for arg in cleaned_args:
if arg == "--head":
raise click.ClickException("Cannot use --head option in symmetric_run.")
if arg == "--node-ip-address":
raise click.ClickException(
"Cannot use --node-ip-address option in symmetric_run."
)
if arg == "--port":
raise click.ClickException("Cannot use --port option in symmetric_run.")
if arg == "--block":
raise click.ClickException("Cannot use --block option in symmetric_run.")
return cleaned_args
@click.command(
name="symmetric_run",
context_settings={"ignore_unknown_options": True, "allow_extra_args": True},
help="""Command to start Ray across all nodes and execute an entrypoint command.
USAGE:
ray symmetric-run --address ADDRESS
[--min-nodes NUM_NODES] [RAY_START_OPTIONS] -- [ENTRYPOINT_COMMAND]
DESCRIPTION:
This command (1) starts a Ray cluster across all nodes,
(2) runs a command on the head node, and (3) stops the Ray cluster.
The '--' separator is required to distinguish between Ray start arguments
and the entrypoint command. The --min-nodes option is optional and
can be used to wait for a specific number of nodes to start.
EXAMPLES:
# Start Ray with default settings and run a Python script
ray symmetric-run --address 127.0.0.1:6379 -- python my_script.py
# Start Ray with specific head node and run a command
ray symmetric-run --address 127.0.0.1:6379 --min-nodes 4 -- python train_model.py --epochs=100
# Start Ray and run a multi-word command
ray symmetric-run --address 127.0.0.1:6379 --min-nodes 4 --num-cpus=4 -- python -m my_module --config=prod
RAY START OPTIONS:
Most ray start command options are supported. Arguments that are not
supported are: --head, --node-ip-address, --port, --block.
SEPARATOR REQUIREMENT:
The '--' separator is mandatory and must appear between Ray start
arguments and the entrypoint command. This ensures clear separation
between the two sets of arguments.
""",
)
@click.option(
"--address", required=True, type=str, help="The address of the Ray cluster."
)
@click.option(
"--min-nodes",
type=int,
help="If provided, wait for this number of nodes to start.",
)
@click.argument("ray_args_and_entrypoint", nargs=-1, type=click.UNPROCESSED)
def symmetric_run(address, min_nodes, ray_args_and_entrypoint):
all_args = sys.argv[1:]
if all_args and all_args[0] == "symmetric-run":
all_args = all_args[1:]
try:
separator = all_args.index("--")
except ValueError:
raise click.ClickException(
"No separator '--' found in arguments. Please use '--' to "
"separate Ray start arguments and the entrypoint command."
)
run_and_start_args, entrypoint_on_head = (
all_args[:separator],
all_args[separator + 1 :],
)
ray_start_args = curate_and_validate_ray_start_args(run_and_start_args)
min_nodes = 1 if min_nodes is None else min_nodes
if not entrypoint_on_head:
raise click.ClickException("No entrypoint command provided.")
if check_ray_already_started():
raise click.ClickException("Ray is already started on this node.")
# 1. Parse address and check if we are on the head node.
gcs_host_port = ray._common.network_utils.parse_address(address)
if gcs_host_port is None:
raise click.ClickException(
f"Invalid address format: {address}, should be `host:port`"
)
gcs_host, gcs_port = gcs_host_port
try:
# AF_UNSPEC allows resolving both IPv4 and IPv6
addrinfo = socket.getaddrinfo(
gcs_host, gcs_port, socket.AF_UNSPEC, socket.SOCK_STREAM
)
resolved_gcs_host = addrinfo[0][4][0]
except socket.gaierror:
raise click.ClickException(f"Could not resolve hostname: {gcs_host}")
my_ips = []
for iface, addrs in psutil.net_if_addrs().items():
for addr in addrs:
# Look for AF_INET (IPv4) or AF_INET6 (IPv6)
if addr.family in [
socket.AddressFamily.AF_INET,
socket.AddressFamily.AF_INET6,
]:
my_ips.append(addr.address)
if min_nodes > 1:
# Ban localhost ips if we are not running on a single node
# to avoid starting N head nodes
my_ips = [ip for ip in my_ips if ip != "127.0.0.1" and ip != "::1"]
is_head = resolved_gcs_host in my_ips
result = None
# 2. Start Ray and run commands.
try:
if is_head:
# On the head node, start Ray, run the command, then stop Ray.
click.echo("On head node. Starting Ray cluster head...")
# Build the ray start command with all parameters
ray_start_cmd = [
"ray",
"start",
"--head",
f"--node-ip-address={resolved_gcs_host}",
f"--port={gcs_port}",
*ray_start_args,
]
# Start Ray head. This runs in the background and hides output.
subprocess.run(ray_start_cmd, check=True, capture_output=True)
click.echo("Head node started.")
click.echo("=======================")
if min_nodes > 1 and not check_cluster_ready(min_nodes):
raise click.ClickException(
"Timed out waiting for other nodes to start."
)
click.echo(
f"Running command on head node: {entrypoint_on_head}",
)
click.echo("=======================")
result = subprocess.run(entrypoint_on_head)
click.echo("=======================")
else:
# On a worker node, start Ray and connect to the head.
click.echo(f"On worker node. Connecting to Ray cluster at {address}...")
if not check_head_node_ready(address):
raise click.ClickException("Timed out waiting for head node to start.")
# Build the ray start command for worker nodes with all parameters
ray_start_cmd = [
"ray",
"start",
"--address",
address,
"--block",
*ray_start_args,
]
# This command will block until the Ray cluster is stopped.
subprocess.run(ray_start_cmd, check=True)
except subprocess.CalledProcessError as e:
click.echo(f"Failed to start Ray: {e}", err=True)
if e.stdout:
click.echo(f"stdout:\n{e.stdout.decode()}", err=True)
if e.stderr:
click.echo(f"stderr:\n{e.stderr.decode()}", err=True)
except KeyboardInterrupt:
# This can be triggered by ctrl-c on the user's side.
click.echo("Interrupted by user.", err=True)
finally:
# Stop Ray cluster.
subprocess.run(["ray", "stop"])
# Propagate the exit code of the user script.
if result is not None and result.returncode != 0:
click.echo(f"Command failed with return code {result.returncode}", err=True)
sys.exit(result.returncode)
if __name__ == "__main__":
symmetric_run()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/scripts/symmetric_run.py",
"license": "Apache License 2.0",
"lines": 214,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/tests/test_symmetric_run.py | import sys
from contextlib import contextmanager
from unittest.mock import MagicMock, patch
import pytest
from click.testing import CliRunner
import ray
import ray.scripts.scripts as scripts
@contextmanager
def _setup_mock_network_utils(curr_ip, head_ip):
import socket
# Mock socket.getaddrinfo to return a valid IP
with patch("socket.getaddrinfo") as mock_getaddrinfo:
mock_getaddrinfo.return_value = [("", "", "", "", (curr_ip, 6379))]
# Mock psutil.net_if_addrs to return localhost IP
with patch("psutil.net_if_addrs") as mock_net_if_addrs:
mock_net_if_addrs.return_value = {
"lo": [
type(
"addr",
(),
{"family": socket.AF_INET, "address": head_ip},
)()
]
}
yield
@pytest.fixture
def cleanup_ray():
"""Shutdown all ray instances"""
yield
runner = CliRunner()
runner.invoke(scripts.stop, ["--force"])
ray.shutdown()
def test_symmetric_run_basic_interface(monkeypatch, cleanup_ray):
"""Test basic symmetric_run interface with minimal arguments."""
from ray.scripts.symmetric_run import symmetric_run
runner = CliRunner()
# Mock subprocess.run to avoid actually starting Ray
with patch("subprocess.run") as mock_run:
mock_run.return_value.returncode = 0
with _setup_mock_network_utils("127.0.0.1", "127.0.0.1"):
args = ["--address", "127.0.0.1:6379", "--", "echo", "test"]
with patch("sys.argv", ["/bin/ray", "symmetric-run", *args]):
# Test basic symmetric_run call using CliRunner
result = runner.invoke(symmetric_run, args)
assert result.exit_code == 0
# Verify that subprocess.run was called for ray start
assert mock_run.called
calls = mock_run.call_args_list
# Should have called ray start with --head
ray_start_calls = [
call for call in calls if "ray" in str(call) and "start" in str(call)
]
assert len(ray_start_calls) > 0
# Should have called ray stop
ray_stop_calls = [
call for call in calls if "ray" in str(call) and "stop" in str(call)
]
assert len(ray_stop_calls) > 0
def test_symmetric_run_worker_node_behavior(monkeypatch, cleanup_ray):
"""Test symmetric_run behavior when not on the head node."""
from ray.scripts.symmetric_run import symmetric_run
runner = CliRunner()
with patch("subprocess.run") as mock_run:
mock_run.return_value.returncode = 0
with _setup_mock_network_utils("192.168.1.100", "192.168.1.101"):
# Mock socket connection check to simulate head node ready
with patch("socket.socket") as mock_socket:
mock_socket_instance = MagicMock()
mock_socket_instance.connect_ex.return_value = 0
mock_socket.return_value.__enter__.return_value = mock_socket_instance
# Test worker node behavior
args = ["--address", "192.168.1.100:6379", "--", "echo", "test"]
with patch("sys.argv", ["/bin/ray", "symmetric-run", *args]):
with patch(
"ray.scripts.symmetric_run.check_head_node_ready"
) as mock_check_head_node_ready:
mock_check_head_node_ready.return_value = True
result = runner.invoke(symmetric_run, args)
assert result.exit_code == 0
# Verify that subprocess.run was called
assert mock_run.called
calls = mock_run.call_args_list
# Should have called ray start with --address (worker mode)
ray_start_calls = [
call
for call in calls
if "ray" in str(call) and "start" in str(call)
]
assert len(ray_start_calls) > 0
# Check that it's in worker mode (--address instead of --head)
start_call = ray_start_calls[0]
start_args = start_call[0][0]
assert "--address" in start_args
assert "192.168.1.100:6379" in start_args
assert "--head" not in start_args
assert "--block" in start_args # Worker nodes should block
def test_symmetric_run_arg_validation(monkeypatch, cleanup_ray):
"""Test that symmetric_run validates arguments."""
from ray.scripts.symmetric_run import symmetric_run
runner = CliRunner()
# Mock subprocess.run to avoid actually starting Ray
with _setup_mock_network_utils("127.0.0.1", "127.0.0.1"):
with patch("subprocess.run") as mock_run:
mock_run.return_value.returncode = 0
args = ["--address", "127.0.0.1:6379", "--", "echo", "test"]
with patch("sys.argv", ["/bin/ray", "symmetric-run", *args]):
# Test basic symmetric_run call using CliRunner
result = runner.invoke(symmetric_run, args)
assert result.exit_code == 0
# Test that invalid arguments are rejected
with patch("subprocess.run") as mock_run:
mock_run.return_value.returncode = 0
args = ["--address", "127.0.0.1:6379", "echo", "test"]
with patch("sys.argv", ["/bin/ray", "symmetric-run", *args]):
result = runner.invoke(symmetric_run, args)
assert result.exit_code == 1
assert "No separator" in result.output
# Test that invalid arguments are rejected
with patch("subprocess.run") as mock_run:
mock_run.return_value.returncode = 0
args = ["--address", "127.0.0.1:6379", "--head", "--", "echo", "test"]
with patch("sys.argv", ["/bin/ray", "symmetric-run", *args]):
result = runner.invoke(symmetric_run, args)
assert result.exit_code == 1
assert "Cannot use --head option in symmetric_run." in result.output
with patch("subprocess.run") as mock_run:
mock_run.return_value.returncode = 0
# Test args with "=" are passed to ray start
args = ["--address", "127.0.0.1:6379", "--num-cpus=4", "--", "echo", "test"]
with patch("sys.argv", ["/bin/ray", "symmetric-run", *args]):
result = runner.invoke(symmetric_run, args)
assert result.exit_code == 0
ray_start_calls = [
call
for call in mock_run.call_args_list
if "ray" in str(call) and "start" in str(call)
]
assert len(ray_start_calls) > 0
assert "--num-cpus=4" in ray_start_calls[0][0][0]
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_symmetric_run.py",
"license": "Apache License 2.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:gen_py_proto.py | from bazel.gen_extract import gen_extract
if __name__ == "__main__":
gen_extract(
[
"ray_py_proto.zip",
],
clear_dir_first=[
"ray/core/generated",
"ray/serve/generated",
],
)
| {
"repo_id": "ray-project/ray",
"file_path": "gen_py_proto.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/serve/tests/test_https_proxy.py | import asyncio
import json
import os
import ssl
import tempfile
import pytest
import requests
import websockets
import ray
from ray import serve
from ray._private.tls_utils import generate_self_signed_tls_certs
from ray.serve.config import HTTPOptions
@pytest.fixture(scope="session")
def ssl_cert_and_key():
"""Generate SSL certificates using Ray's built-in utilities for testing."""
# Generate certificate and key using Ray's utility
cert_contents, key_contents = generate_self_signed_tls_certs()
# Create temp directory that persists for the session
temp_dir = tempfile.mkdtemp(prefix="ray_serve_https_test_")
# Write server certificate and key
cert_path = os.path.join(temp_dir, "server.crt")
key_path = os.path.join(temp_dir, "server.key")
with open(cert_path, "w") as f:
f.write(cert_contents)
with open(key_path, "w") as f:
f.write(key_contents)
yield {
"key_path": key_path,
"cert_path": cert_path,
"temp_dir": temp_dir,
}
# Cleanup
import shutil
try:
shutil.rmtree(temp_dir)
except Exception:
pass # Ignore cleanup errors
@pytest.fixture
def https_serve_instance(ssl_cert_and_key):
"""Start Ray Serve with HTTPS enabled."""
# Ensure Ray is shutdown before starting
try:
ray.shutdown()
except Exception:
pass
# Disable runtime env upload (dashboard should work now that it's built)
ray.init(runtime_env={"working_dir": None})
serve.start(
http_options=HTTPOptions(
ssl_keyfile=ssl_cert_and_key["key_path"],
ssl_certfile=ssl_cert_and_key["cert_path"],
)
)
yield serve
serve.shutdown()
ray.shutdown()
class TestHTTPSProxy:
def test_https_basic_deployment(self, https_serve_instance):
"""Test basic HTTPS deployment functionality."""
@serve.deployment
def hello():
return "Hello HTTPS!"
serve.run(hello.bind())
# Test HTTPS request with certificate verification disabled for self-signed cert
response = requests.get(
"https://localhost:8000/hello",
verify=False, # Skip cert verification for self-signed
)
assert response.status_code == 200
assert response.text == "Hello HTTPS!"
def test_https_vs_http_requests(self, https_serve_instance):
"""Test that HTTP requests fail when HTTPS is enabled."""
@serve.deployment
def echo():
return "echo"
serve.run(echo.bind())
# HTTPS request should succeed
https_response = requests.get("https://localhost:8000/echo", verify=False)
assert https_response.status_code == 200
# HTTP request should fail with connection error
with pytest.raises(requests.exceptions.ConnectionError):
requests.get("http://localhost:8000/echo", timeout=5)
def test_https_with_fastapi_deployment(self, https_serve_instance):
"""Test HTTPS with FastAPI-based deployment."""
from fastapi import FastAPI
app = FastAPI()
@app.get("/items/{item_id}")
async def read_item(item_id: int):
return {"item_id": item_id, "secure": True}
@serve.deployment
@serve.ingress(app)
class FastAPIDeployment:
pass
serve.run(FastAPIDeployment.bind())
response = requests.get("https://localhost:8000/items/42", verify=False)
assert response.status_code == 200
assert response.json() == {"item_id": 42, "secure": True}
def test_https_concurrent_requests(self, https_serve_instance):
"""Test HTTPS with concurrent requests."""
import concurrent.futures
@serve.deployment
def concurrent_handler():
import time
time.sleep(0.1) # Small delay to test concurrency
return "concurrent"
serve.run(concurrent_handler.bind())
def make_request():
return requests.get(
"https://localhost:8000/concurrent_handler", verify=False
)
# Send 10 concurrent requests
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
futures = [executor.submit(make_request) for _ in range(10)]
responses = [f.result() for f in futures]
# All requests should succeed
for response in responses:
assert response.status_code == 200
assert response.text == "concurrent"
def test_https_large_payload(self, https_serve_instance):
"""Test HTTPS with large payloads."""
@serve.deployment
class LargePayloadHandler:
def __call__(self, request):
# Return a large response (1MB)
large_data = "x" * (1024 * 1024) # 1MB string
return {"data": large_data, "size": len(large_data)}
serve.run(LargePayloadHandler.bind())
response = requests.get(
"https://localhost:8000/LargePayloadHandler", verify=False
)
assert response.status_code == 200
data = response.json()
assert data["size"] == 1024 * 1024
assert len(data["data"]) == 1024 * 1024
def test_https_websocket_with_fastapi(self, https_serve_instance):
"""Test WebSocket functionality with FastAPI over HTTPS."""
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
app = FastAPI()
@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
try:
while True:
# Receive message from client
data = await websocket.receive_text()
message = json.loads(data)
# Echo back with modification
response = {
"echo": message.get("message", ""),
"secure": True,
"protocol": "wss",
}
await websocket.send_text(json.dumps(response))
except WebSocketDisconnect:
pass
@serve.deployment
@serve.ingress(app)
class WebSocketDeployment:
pass
serve.run(WebSocketDeployment.bind())
# Test WebSocket connection over HTTPS (wss://)
async def test_websocket():
# Create SSL context that doesn't verify certificates (for self-signed certs)
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
uri = "wss://localhost:8000/ws"
async with websockets.connect(uri, ssl=ssl_context) as websocket:
# Send test message
test_message = {"message": "Hello WebSocket over HTTPS!"}
await websocket.send(json.dumps(test_message))
# Receive response
response = await websocket.recv()
data = json.loads(response)
# Verify response
assert data["echo"] == "Hello WebSocket over HTTPS!"
assert data["secure"] is True
assert data["protocol"] == "wss"
# Send another message to test bidirectional communication
test_message2 = {"message": "Second message"}
await websocket.send(json.dumps(test_message2))
response2 = await websocket.recv()
data2 = json.loads(response2)
assert data2["echo"] == "Second message"
# Run the async test
asyncio.run(test_websocket())
def test_https_websocket_multiple_connections(self, https_serve_instance):
"""Test multiple WebSocket connections over HTTPS."""
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
app = FastAPI()
# Store active connections
connections = []
@app.websocket("/ws/broadcast")
async def websocket_broadcast(websocket: WebSocket):
await websocket.accept()
connections.append(websocket)
try:
while True:
data = await websocket.receive_text()
message = json.loads(data)
# Broadcast to all connections
broadcast_message = {
"type": "broadcast",
"message": message.get("message", ""),
"connections": len(connections),
"secure": True,
}
# Send to all connected clients
disconnected = []
for conn in connections:
try:
await conn.send_text(json.dumps(broadcast_message))
except Exception:
disconnected.append(conn)
# Remove disconnected clients
for conn in disconnected:
connections.remove(conn)
except WebSocketDisconnect:
if websocket in connections:
connections.remove(websocket)
@serve.deployment
@serve.ingress(app)
class WebSocketBroadcastDeployment:
pass
serve.run(WebSocketBroadcastDeployment.bind())
async def test_multiple_websockets():
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
uri = "wss://localhost:8000/ws/broadcast"
# Connect multiple clients
websocket1 = await websockets.connect(uri, ssl=ssl_context)
websocket2 = await websockets.connect(uri, ssl=ssl_context)
try:
# Send message from client 1
test_message = {"message": "Hello from client 1"}
await websocket1.send(json.dumps(test_message))
# Both clients should receive the broadcast
response1 = await websocket1.recv()
response2 = await websocket2.recv()
data1 = json.loads(response1)
data2 = json.loads(response2)
# Verify both received the same broadcast
assert data1["type"] == "broadcast"
assert data1["message"] == "Hello from client 1"
assert data1["connections"] == 2
assert data1["secure"] is True
assert data2["type"] == "broadcast"
assert data2["message"] == "Hello from client 1"
assert data2["connections"] == 2
assert data2["secure"] is True
finally:
await websocket1.close()
await websocket2.close()
# Run the async test
asyncio.run(test_multiple_websockets())
class TestSSLConfiguration:
def test_ssl_config_validation_success(self, ssl_cert_and_key):
"""Test successful SSL configuration validation."""
key_path = ssl_cert_and_key["key_path"]
cert_path = ssl_cert_and_key["cert_path"]
# Should not raise exception
options = HTTPOptions(ssl_keyfile=key_path, ssl_certfile=cert_path)
assert options.ssl_keyfile == key_path
assert options.ssl_certfile == cert_path
def test_ssl_config_validation_missing_key(self):
"""Test SSL configuration validation with missing key file."""
with tempfile.TemporaryDirectory() as temp_dir:
cert_path = os.path.join(temp_dir, "test.crt")
with open(cert_path, "w") as f:
f.write("dummy cert")
with pytest.raises(ValueError) as exc_info:
HTTPOptions(ssl_keyfile=None, ssl_certfile=cert_path)
assert "Both ssl_keyfile and ssl_certfile must be provided together" in str(
exc_info.value
)
def test_ssl_config_validation_missing_cert(self):
"""Test SSL configuration validation with missing cert file."""
with tempfile.TemporaryDirectory() as temp_dir:
key_path = os.path.join(temp_dir, "test.key")
with open(key_path, "w") as f:
f.write("dummy key")
with pytest.raises(ValueError) as exc_info:
HTTPOptions(ssl_keyfile=key_path, ssl_certfile=None)
assert "Both ssl_keyfile and ssl_certfile must be provided together" in str(
exc_info.value
)
def test_ssl_config_with_password(self, ssl_cert_and_key):
"""Test SSL configuration with key file password."""
key_path = ssl_cert_and_key["key_path"]
cert_path = ssl_cert_and_key["cert_path"]
options = HTTPOptions(
ssl_keyfile=key_path, ssl_certfile=cert_path, ssl_keyfile_password="secret"
)
assert options.ssl_keyfile_password == "secret"
def test_ssl_config_with_ca_certs(self, ssl_cert_and_key):
"""Test SSL configuration with CA certificates."""
key_path = ssl_cert_and_key["key_path"]
cert_path = ssl_cert_and_key["cert_path"]
# Use cert as CA for testing purposes
ca_path = cert_path
options = HTTPOptions(
ssl_keyfile=key_path, ssl_certfile=cert_path, ssl_ca_certs=ca_path
)
assert options.ssl_ca_certs == ca_path
class TestHTTPSErrorHandling:
def test_ssl_file_paths_validation(self):
"""Test that SSL file paths are properly configured in HTTPOptions."""
with tempfile.TemporaryDirectory() as temp_dir:
key_path = os.path.join(temp_dir, "test.key")
cert_path = os.path.join(temp_dir, "test.crt")
# Create dummy files (content doesn't matter for this test)
with open(key_path, "w") as f:
f.write("dummy key")
with open(cert_path, "w") as f:
f.write("dummy cert")
# Test that HTTPOptions accepts valid file paths
options = HTTPOptions(ssl_keyfile=key_path, ssl_certfile=cert_path)
assert options.ssl_keyfile == key_path
assert options.ssl_certfile == cert_path
def test_https_requires_both_cert_and_key_files(self):
"""Test that HTTPS configuration requires both certificate and key files."""
# This test validates our SSL validation logic works correctly
# Should work with both files
options = HTTPOptions(ssl_keyfile="key.pem", ssl_certfile="cert.pem")
assert options.ssl_keyfile == "key.pem"
assert options.ssl_certfile == "cert.pem"
# Should work with neither file
options = HTTPOptions()
assert options.ssl_keyfile is None
assert options.ssl_certfile is None
class TestHTTPSIntegration:
def test_https_with_custom_port(self, ssl_cert_and_key):
"""Test HTTPS on custom port."""
# Ensure Ray is shutdown before starting
try:
ray.shutdown()
except Exception:
pass
# Disable dashboard to prevent SSL conflicts and disable runtime env upload
ray.init(include_dashboard=False, runtime_env={"working_dir": None})
try:
serve.start(
http_options=HTTPOptions(
host="127.0.0.1",
port=8443,
ssl_keyfile=ssl_cert_and_key["key_path"],
ssl_certfile=ssl_cert_and_key["cert_path"],
)
)
@serve.deployment
def custom_port_handler():
return "custom port"
serve.run(custom_port_handler.bind())
response = requests.get(
"https://127.0.0.1:8443/custom_port_handler", verify=False
)
assert response.status_code == 200
assert response.text == "custom port"
finally:
try:
serve.shutdown()
except Exception:
pass
ray.shutdown()
def test_https_deployment_update(self, https_serve_instance):
"""Test deployment updates work correctly with HTTPS."""
@serve.deployment
def updatable():
return "version 1"
serve.run(updatable.bind())
# Test initial version
response = requests.get("https://localhost:8000/updatable", verify=False)
assert response.text == "version 1"
# Update deployment
@serve.deployment
def updatable():
return "version 2"
serve.run(updatable.bind())
# Test updated version
response = requests.get("https://localhost:8000/updatable", verify=False)
assert response.text == "version 2"
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_https_proxy.py",
"license": "Apache License 2.0",
"lines": 385,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/execution/backpressure_policy/downstream_capacity_backpressure_policy.py | import logging
from typing import TYPE_CHECKING, Optional
from .backpressure_policy import BackpressurePolicy
from ray._common.utils import env_float
from ray.data._internal.execution.resource_manager import (
ResourceManager,
)
from ray.data.context import DataContext
if TYPE_CHECKING:
from ray.data._internal.execution.interfaces.physical_operator import (
PhysicalOperator,
)
from ray.data._internal.execution.streaming_executor_state import Topology
logger = logging.getLogger(__name__)
def get_available_object_store_budget_fraction(
resource_manager: "ResourceManager",
op: "PhysicalOperator",
consider_downstream_ineligible_ops: bool,
) -> Optional[float]:
"""Get available object store memory budget fraction for the operator.
Args:
resource_manager: The resource manager to use.
op: The operator to get the budget fraction for.
consider_downstream_ineligible_ops: If True, include downstream ineligible
ops in the calculation. If False, only consider this op's usage/budget.
Returns:
The available budget fraction, or None if not available.
"""
op_usage = resource_manager.get_op_usage(
op, include_ineligible_downstream=consider_downstream_ineligible_ops
)
op_budget = resource_manager.get_budget(op)
if op_usage is None or op_budget is None:
return None
total_usage = op_usage.object_store_memory
total_budget = op_budget.object_store_memory
total_mem = total_usage + total_budget
if total_mem == 0:
return None
return total_budget / total_mem
def get_utilized_object_store_budget_fraction(
resource_manager: "ResourceManager",
op: "PhysicalOperator",
consider_downstream_ineligible_ops: bool,
) -> Optional[float]:
"""Get utilized object store memory budget fraction for the operator.
Args:
resource_manager: The resource manager to use.
op: The operator to get the utilized fraction for.
consider_downstream_ineligible_ops: If True, include downstream ineligible
ops in the calculation. If False, only consider this op's usage/budget.
Returns:
The utilized budget fraction, or None if not available.
"""
available_fraction = get_available_object_store_budget_fraction(
resource_manager,
op,
consider_downstream_ineligible_ops=consider_downstream_ineligible_ops,
)
if available_fraction is None:
return None
return 1 - available_fraction
class DownstreamCapacityBackpressurePolicy(BackpressurePolicy):
"""Backpressure policy based on downstream processing capacity.
To backpressure a given operator, use queue size build up / downstream capacity ratio.
This ratio represents the upper limit of buffering in object store between pipeline stages
to optimize for throughput.
"""
# Threshold for per-Op object store budget utilization vs total
# (utilization / total) ratio to enable downstream capacity backpressure.
OBJECT_STORE_BUDGET_UTIL_THRESHOLD = env_float(
"RAY_DATA_DOWNSTREAM_CAPACITY_OBJECT_STORE_BUDGET_UTIL_THRESHOLD", 0.9
)
@property
def name(self) -> str:
return "DownstreamCapacity"
def __init__(
self,
data_context: DataContext,
topology: "Topology",
resource_manager: "ResourceManager",
):
super().__init__(data_context, topology, resource_manager)
self._backpressure_capacity_ratio = (
self._data_context.downstream_capacity_backpressure_ratio
)
if self._backpressure_capacity_ratio is not None:
logger.debug(
f"DownstreamCapacityBackpressurePolicy enabled with backpressure capacity ratio: {self._backpressure_capacity_ratio}"
)
def _get_queue_size_bytes(self, op: "PhysicalOperator") -> int:
"""Get the output current queue size
(this operator + ineligible downstream operators) in bytes for the given operator.
"""
op_outputs_usage = self._topology[op].output_queue_bytes()
# Also account the downstream ineligible operators' memory usage.
op_outputs_usage += sum(
self._resource_manager.get_op_usage(next_op).object_store_memory
for next_op in self._resource_manager._get_downstream_ineligible_ops(op)
)
return op_outputs_usage
def _get_downstream_capacity_size_bytes(self, op: "PhysicalOperator") -> int:
"""Get the downstream capacity size for the given operator.
Downstream capacity size is the sum of the pending task inputs of the
downstream eligible operators.
If an output dependency is ineligible, skip it and recurse down to find
eligible output dependencies. If there are no output dependencies,
return external consumer bytes.
"""
if not op.output_dependencies:
# No output dependencies, return external consumer bytes.
return self._resource_manager.get_external_consumer_bytes()
total_capacity_size_bytes = 0
for output_dependency in op.output_dependencies:
if self._resource_manager.is_op_eligible(output_dependency):
# Output dependency is eligible, add its pending task inputs.
total_capacity_size_bytes += (
output_dependency.metrics.obj_store_mem_pending_task_inputs or 0
)
else:
# Output dependency is ineligible, recurse down to find eligible ops.
total_capacity_size_bytes += self._get_downstream_capacity_size_bytes(
output_dependency
)
return total_capacity_size_bytes
def _should_skip_backpressure(self, op: "PhysicalOperator") -> bool:
"""Check if backpressure should be skipped for the operator.
TODO(srinathk10): Extract this to common logic to skip invoking BackpressurePolicy.
"""
if self._backpressure_capacity_ratio is None:
# Downstream capacity backpressure is disabled.
return True
if not self._resource_manager.is_op_eligible(op):
# Operator is not eligible for backpressure.
return True
if self._resource_manager._is_blocking_materializing_op(op):
# Operator is materializing, so no need to perform backpressure.
return True
return False
def _get_queue_ratio(self, op: "PhysicalOperator") -> float:
"""Get queue/capacity ratio for the operator."""
queue_size_bytes = self._get_queue_size_bytes(op)
downstream_capacity_size_bytes = self._get_downstream_capacity_size_bytes(op)
if downstream_capacity_size_bytes == 0:
# No downstream capacity to backpressure against, so no backpressure.
return 0
return queue_size_bytes / downstream_capacity_size_bytes
def _should_apply_backpressure(self, op: "PhysicalOperator") -> bool:
"""Check if backpressure should be applied for the operator.
Returns True if backpressure should be applied, False otherwise.
"""
if self._should_skip_backpressure(op):
return False
utilized_budget_fraction = get_utilized_object_store_budget_fraction(
self._resource_manager, op, consider_downstream_ineligible_ops=True
)
if (
utilized_budget_fraction is not None
and utilized_budget_fraction <= self.OBJECT_STORE_BUDGET_UTIL_THRESHOLD
):
# Utilized budget fraction is below threshold, so should skip backpressure.
return False
queue_ratio = self._get_queue_ratio(op)
# Apply backpressure if queue ratio exceeds the threshold.
return queue_ratio > self._backpressure_capacity_ratio
def can_add_input(self, op: "PhysicalOperator") -> bool:
"""Determine if we can add input to the operator based on
downstream capacity.
"""
return not self._should_apply_backpressure(op)
def max_task_output_bytes_to_read(self, op: "PhysicalOperator") -> Optional[int]:
"""Return the maximum bytes of pending task outputs can be read for
the given operator. None means no limit."""
if self._should_apply_backpressure(op):
return 0
return None
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/execution/backpressure_policy/downstream_capacity_backpressure_policy.py",
"license": "Apache License 2.0",
"lines": 176,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/tests/test_downstream_capacity_backpressure_policy.py | import types
from unittest.mock import MagicMock, patch
import pytest
from ray.data._internal.execution.backpressure_policy.downstream_capacity_backpressure_policy import (
DownstreamCapacityBackpressurePolicy,
)
from ray.data._internal.execution.interfaces.physical_operator import (
OpRuntimeMetrics,
PhysicalOperator,
)
from ray.data._internal.execution.operators.actor_pool_map_operator import (
ActorPoolMapOperator,
)
from ray.data._internal.execution.operators.base_physical_operator import (
AllToAllOperator,
)
from ray.data._internal.execution.operators.task_pool_map_operator import (
TaskPoolMapOperator,
)
from ray.data._internal.execution.resource_manager import ResourceManager
from ray.data._internal.execution.streaming_executor_state import OpState
from ray.data.context import DataContext
class TestDownstreamCapacityBackpressurePolicy:
@pytest.fixture(autouse=True)
def setup_budget_fraction_mock(self):
"""Fixture to patch get_utilized_object_store_budget_fraction for all tests."""
with patch(
"ray.data._internal.execution.backpressure_policy."
"downstream_capacity_backpressure_policy."
"get_utilized_object_store_budget_fraction"
) as mock_func:
self._mock_get_utilized_budget_fraction = mock_func
yield
def _mock_operator(
self,
op_class: type = PhysicalOperator,
num_tasks_running: int = 5,
obj_store_mem_internal_inqueue: int = 1000,
obj_store_mem_pending_task_inputs: int = 1000,
throttling_disabled: bool = False,
has_execution_finished: bool = False,
):
"""Helper method to create mock operator.
Args:
op_class: The operator class to mock.
num_tasks_running: Number of tasks running.
obj_store_mem_internal_inqueue: Object store memory in internal queue.
obj_store_mem_pending_task_inputs: Object store memory for pending inputs.
throttling_disabled: If True, operator is ineligible for backpressure.
has_execution_finished: If True, operator is ineligible for backpressure.
Returns:
A mock operator with the specified configuration.
"""
mock_operator = MagicMock(spec=op_class)
mock_operator.metrics = MagicMock(spec=OpRuntimeMetrics)
mock_operator.metrics.num_tasks_running = num_tasks_running
mock_operator.metrics.obj_store_mem_internal_inqueue = (
obj_store_mem_internal_inqueue
)
mock_operator.metrics.obj_store_mem_pending_task_inputs = (
obj_store_mem_pending_task_inputs
)
mock_operator.metrics.obj_store_mem_pending_task_outputs = 0
mock_operator.output_dependencies = []
# Set up eligibility methods (used by ResourceManager.is_op_eligible)
mock_operator.throttling_disabled.return_value = throttling_disabled
mock_operator.has_execution_finished.return_value = has_execution_finished
op_state = MagicMock(spec=OpState)
op_state.output_queue_bytes.return_value = 0
return mock_operator, op_state
def _mock_materializing_operator(self):
"""Helper method to create mock materializing operator (e.g., AllToAllOperator).
This creates a mock that passes isinstance(op, AllToAllOperator).
We use __class__ assignment to make isinstance work with MagicMock.
"""
mock_operator = MagicMock(spec=AllToAllOperator)
mock_operator.__class__ = AllToAllOperator # Make isinstance work
mock_operator.metrics = MagicMock(spec=OpRuntimeMetrics)
mock_operator.metrics.num_tasks_running = 0
mock_operator.metrics.obj_store_mem_internal_inqueue = 0
mock_operator.metrics.obj_store_mem_pending_task_inputs = 0
mock_operator.metrics.obj_store_mem_pending_task_outputs = 0
mock_operator.output_dependencies = []
mock_operator.has_execution_finished.return_value = False
mock_operator.throttling_disabled = types.MethodType(
AllToAllOperator.throttling_disabled, mock_operator
)
op_state = MagicMock(spec=OpState)
op_state.output_queue_bytes.return_value = 0
return mock_operator, op_state
def _mock_task_pool_map_operator(
self,
num_tasks_running: int = 5,
max_concurrency_limit: int = 10,
obj_store_mem_internal_inqueue: int = 1000,
obj_store_mem_pending_task_inputs: int = 1000,
):
"""Helper method to create mock TaskPoolMapOperator."""
op, op_state = self._mock_operator(
TaskPoolMapOperator,
num_tasks_running,
obj_store_mem_internal_inqueue,
obj_store_mem_pending_task_inputs,
)
op.get_max_concurrency_limit.return_value = max_concurrency_limit
return op, op_state
def _mock_actor_pool_map_operator(
self,
num_tasks_running: int = 5,
max_size: int = 5,
max_tasks_in_flight_per_actor: int = 2,
obj_store_mem_internal_inqueue: int = 1000,
obj_store_mem_pending_task_inputs: int = 1000,
):
"""Helper method to create mock ActorPoolMapOperator."""
op, op_state = self._mock_operator(
ActorPoolMapOperator,
num_tasks_running,
obj_store_mem_internal_inqueue,
obj_store_mem_pending_task_inputs,
)
actor_pool = MagicMock()
actor_pool.max_size.return_value = max_size
actor_pool.max_tasks_in_flight_per_actor.return_value = (
max_tasks_in_flight_per_actor
)
op.get_autoscaling_actor_pools.return_value = [actor_pool]
return op, op_state
def _create_policy(
self,
topology,
data_context=None,
resource_manager=None,
):
"""Helper method to create policy instance."""
context = data_context or DataContext()
rm = resource_manager or MagicMock()
return DownstreamCapacityBackpressurePolicy(
data_context=context,
topology=topology,
resource_manager=rm,
)
def _create_context(self, backpressure_ratio=2.0):
"""Helper to create DataContext with backpressure ratio."""
context = DataContext()
context.downstream_capacity_backpressure_ratio = backpressure_ratio
return context
def _mock_resource_manager(
self,
internal_usage=100,
outputs_usage=100,
external_bytes=100,
):
"""Helper to create a resource manager mock with common settings."""
rm = MagicMock()
# Bind real methods from ResourceManager
rm.is_op_eligible = types.MethodType(ResourceManager.is_op_eligible, rm)
rm._get_downstream_ineligible_ops = types.MethodType(
ResourceManager._get_downstream_ineligible_ops, rm
)
rm._is_blocking_materializing_op = types.MethodType(
ResourceManager._is_blocking_materializing_op, rm
)
rm.get_op_internal_object_store_usage.return_value = internal_usage
rm.get_op_outputs_object_store_usage_with_downstream.return_value = (
outputs_usage
)
rm.get_external_consumer_bytes.return_value = external_bytes
return rm
def _set_utilized_budget_fraction(self, rm, fraction):
"""Helper to set utilized budget fraction.
The policy checks: utilized_fraction <= OBJECT_STORE_BUDGET_UTIL_THRESHOLD
With threshold=0.9, skip backpressure when utilized_fraction <= 0.9.
To trigger backpressure, set utilized_fraction > 0.9.
"""
self._mock_get_utilized_budget_fraction.return_value = fraction
return fraction
def _set_queue_ratio(self, op, op_state, rm, queue_size, downstream_capacity):
"""Helper to set queue ratio via mocks.
Matches _get_queue_ratio logic:
- queue_size_bytes = output_queue_bytes() + sum(get_op_usage(ineligible).object_store_memory)
- downstream_capacity_size_bytes = sum(eligible_downstream.metrics.obj_store_mem_pending_task_inputs)
- If downstream_capacity == 0, returns 0 (no backpressure)
- Else returns queue_size / downstream_capacity
Returns the calculated queue_ratio for assertions.
"""
# Set queue size via output_queue_bytes
op_state.output_queue_bytes.return_value = queue_size
# Set downstream capacity on the first output dependency
if op.output_dependencies:
downstream_op = op.output_dependencies[0]
downstream_op.metrics.obj_store_mem_pending_task_inputs = (
downstream_capacity
)
if downstream_capacity == 0:
return 0
return queue_size / downstream_capacity
def test_backpressure_disabled_when_ratio_is_none(self):
"""Test that backpressure is disabled when ratio is None."""
op, op_state = self._mock_operator()
topology = {op: op_state}
context = self._create_context(backpressure_ratio=None)
policy = self._create_policy(topology, data_context=context)
assert policy.can_add_input(op) is True
def test_backpressure_skipped_for_ineligible_op(self):
"""Test that backpressure is skipped for ineligible operators.
An operator is ineligible when throttling_disabled=True or
has_execution_finished=True.
"""
# Create operator with throttling_disabled=True (ineligible)
op, op_state = self._mock_operator(throttling_disabled=True)
topology = {op: op_state}
context = self._create_context()
rm = self._mock_resource_manager()
policy = self._create_policy(
topology, data_context=context, resource_manager=rm
)
assert policy.can_add_input(op) is True
def test_backpressure_skipped_for_materializing_downstream(self):
"""Test that backpressure is skipped when downstream is materializing.
Creates topology: cur_op -> materializing_op (AllToAllOperator).
"""
# Create the current operator
op, op_state = self._mock_operator()
# Create a materializing downstream operator
materializing_op, materializing_op_state = self._mock_materializing_operator()
# Set up topology: op -> materializing_op
op.output_dependencies = [materializing_op]
topology = {op: op_state, materializing_op: materializing_op_state}
context = self._create_context()
rm = self._mock_resource_manager()
policy = self._create_policy(
topology, data_context=context, resource_manager=rm
)
assert policy.can_add_input(op) is True
def test_backpressure_skipped_for_low_utilization(self):
"""Test backpressure skipped when utilized budget fraction is low."""
op, op_state = self._mock_task_pool_map_operator()
topology = {op: op_state}
context = self._create_context()
rm = self._mock_resource_manager()
# Utilized budget fraction below threshold = skip backpressure
# With threshold=0.9, skip backpressure when utilized <= 0.9
threshold = (
DownstreamCapacityBackpressurePolicy.OBJECT_STORE_BUDGET_UTIL_THRESHOLD
)
self._set_utilized_budget_fraction(rm, threshold - 0.05) # 0.85
policy = self._create_policy(
topology, data_context=context, resource_manager=rm
)
assert policy.can_add_input(op) is True
def test_backpressure_skipped_at_threshold(self):
"""Test backpressure skipped when utilized fraction equals threshold."""
op, op_state = self._mock_task_pool_map_operator()
topology = {op: op_state}
context = self._create_context()
rm = self._mock_resource_manager()
# Utilized budget fraction at threshold = skip backpressure
# With threshold=0.9, utilized <= 0.9 skips backpressure
threshold = (
DownstreamCapacityBackpressurePolicy.OBJECT_STORE_BUDGET_UTIL_THRESHOLD
)
self._set_utilized_budget_fraction(rm, threshold) # 0.9
policy = self._create_policy(
topology, data_context=context, resource_manager=rm
)
assert policy.can_add_input(op) is True
def test_backpressure_triggered_high_utilization(self):
"""Test backpressure applied when utilized budget fraction is high."""
op, op_state = self._mock_task_pool_map_operator()
downstream_op, downstream_op_state = self._mock_operator()
op.output_dependencies = [downstream_op]
topology = {op: op_state, downstream_op: downstream_op_state}
context = self._create_context(backpressure_ratio=2.0)
rm = self._mock_resource_manager()
# Utilized budget fraction above threshold = apply backpressure
# With threshold=0.9, apply backpressure when utilized > 0.9
threshold = (
DownstreamCapacityBackpressurePolicy.OBJECT_STORE_BUDGET_UTIL_THRESHOLD
)
self._set_utilized_budget_fraction(rm, threshold + 0.05) # 0.95
# Queue ratio > 2.0: 1000 / 200 = 5
queue_ratio = self._set_queue_ratio(
op, op_state, rm, queue_size=1000, downstream_capacity=200
)
assert queue_ratio > 2.0
policy = self._create_policy(
topology, data_context=context, resource_manager=rm
)
assert policy.can_add_input(op) is False
def test_backpressure_triggered_high_queue_ratio(self):
"""Test backpressure triggered when queue/capacity ratio is high."""
op, op_state = self._mock_operator()
downstream_op, downstream_op_state = self._mock_operator()
op.output_dependencies = [downstream_op]
topology = {op: op_state, downstream_op: downstream_op_state}
context = self._create_context(backpressure_ratio=2.0)
rm = self._mock_resource_manager()
# Utilized budget fraction above threshold = check queue ratio
threshold = (
DownstreamCapacityBackpressurePolicy.OBJECT_STORE_BUDGET_UTIL_THRESHOLD
)
self._set_utilized_budget_fraction(rm, threshold + 0.05) # 0.95
# Queue ratio > 2.0: 1000 / 200 = 5
queue_ratio = self._set_queue_ratio(
op, op_state, rm, queue_size=1000, downstream_capacity=200
)
assert queue_ratio > 2.0
policy = self._create_policy(
topology, data_context=context, resource_manager=rm
)
assert policy.can_add_input(op) is False
def test_no_backpressure_low_queue_ratio(self):
"""Test no backpressure when queue/capacity ratio is acceptable."""
op, op_state = self._mock_operator()
downstream_op, downstream_op_state = self._mock_operator()
op.output_dependencies = [downstream_op]
topology = {op: op_state, downstream_op: downstream_op_state}
context = self._create_context(backpressure_ratio=2.0)
rm = self._mock_resource_manager()
# Utilized budget fraction below threshold = skip backpressure
threshold = (
DownstreamCapacityBackpressurePolicy.OBJECT_STORE_BUDGET_UTIL_THRESHOLD
)
self._set_utilized_budget_fraction(rm, threshold - 0.1) # 0.8
# Queue ratio < 2.0: 500 / 1000 = 0.5
queue_ratio = self._set_queue_ratio(
op, op_state, rm, queue_size=500, downstream_capacity=1000
)
assert queue_ratio < 2.0
policy = self._create_policy(
topology, data_context=context, resource_manager=rm
)
assert policy.can_add_input(op) is True
def test_no_backpressure_zero_downstream_capacity(self):
"""Test backpressure skipped when downstream capacity is zero."""
op, op_state = self._mock_task_pool_map_operator()
downstream_op, downstream_op_state = self._mock_operator()
op.output_dependencies = [downstream_op]
topology = {op: op_state, downstream_op: downstream_op_state}
context = self._create_context()
rm = self._mock_resource_manager(internal_usage=0, outputs_usage=500)
# Low utilized budget fraction = skip backpressure
threshold = (
DownstreamCapacityBackpressurePolicy.OBJECT_STORE_BUDGET_UTIL_THRESHOLD
)
self._set_utilized_budget_fraction(rm, threshold - 0.05) # 0.85
policy = self._create_policy(
topology, data_context=context, resource_manager=rm
)
assert policy.can_add_input(op) is True
def test_max_bytes_returns_none_when_backpressure_disabled(self):
"""Test max_task_output_bytes_to_read returns None when disabled."""
op, op_state = self._mock_operator()
topology = {op: op_state}
context = self._create_context(backpressure_ratio=None)
policy = self._create_policy(topology, data_context=context)
assert policy.max_task_output_bytes_to_read(op) is None
def test_max_bytes_returns_none_for_ineligible_op(self):
"""Test max_task_output_bytes_to_read returns None for ineligible op."""
# Create operator with throttling_disabled=True (ineligible)
op, op_state = self._mock_operator(throttling_disabled=True)
topology = {op: op_state}
context = self._create_context()
rm = self._mock_resource_manager()
policy = self._create_policy(
topology, data_context=context, resource_manager=rm
)
assert policy.max_task_output_bytes_to_read(op) is None
def test_max_bytes_returns_none_for_low_utilization(self):
"""Test max_task_output_bytes_to_read returns None for low utilization."""
op, op_state = self._mock_task_pool_map_operator()
topology = {op: op_state}
context = self._create_context()
rm = self._mock_resource_manager()
# Low utilized budget fraction = skip backpressure
threshold = (
DownstreamCapacityBackpressurePolicy.OBJECT_STORE_BUDGET_UTIL_THRESHOLD
)
self._set_utilized_budget_fraction(rm, threshold - 0.05) # 0.85
policy = self._create_policy(
topology, data_context=context, resource_manager=rm
)
assert policy.max_task_output_bytes_to_read(op) is None
def test_max_bytes_returns_zero_for_high_utilization(self):
"""Test max_task_output_bytes_to_read returns 0 for high utilization."""
op, op_state = self._mock_task_pool_map_operator()
downstream_op, downstream_op_state = self._mock_operator()
op.output_dependencies = [downstream_op]
topology = {op: op_state, downstream_op: downstream_op_state}
context = self._create_context(backpressure_ratio=2.0)
rm = self._mock_resource_manager()
# High utilized budget fraction = apply backpressure
threshold = (
DownstreamCapacityBackpressurePolicy.OBJECT_STORE_BUDGET_UTIL_THRESHOLD
)
self._set_utilized_budget_fraction(rm, threshold + 0.05) # 0.95
# Queue ratio > 2.0: 1000 / 200 = 5
self._set_queue_ratio(
op, op_state, rm, queue_size=1000, downstream_capacity=200
)
policy = self._create_policy(
topology, data_context=context, resource_manager=rm
)
assert policy.max_task_output_bytes_to_read(op) == 0
def test_max_bytes_returns_zero_for_high_queue_ratio(self):
"""Test max_task_output_bytes_to_read returns 0 for high queue ratio."""
op, op_state = self._mock_task_pool_map_operator()
downstream_op, downstream_op_state = self._mock_operator()
op.output_dependencies = [downstream_op]
topology = {op: op_state, downstream_op: downstream_op_state}
context = self._create_context(backpressure_ratio=2.0)
rm = self._mock_resource_manager()
# High utilized budget fraction = check queue ratio
threshold = (
DownstreamCapacityBackpressurePolicy.OBJECT_STORE_BUDGET_UTIL_THRESHOLD
)
self._set_utilized_budget_fraction(rm, threshold + 0.05) # 0.95
# Queue ratio > 2.0: 1000 / 200 = 5
self._set_queue_ratio(
op, op_state, rm, queue_size=1000, downstream_capacity=200
)
policy = self._create_policy(
topology, data_context=context, resource_manager=rm
)
assert policy.max_task_output_bytes_to_read(op) == 0
def test_max_bytes_returns_none_when_no_backpressure(self):
"""Test max_task_output_bytes_to_read returns None when no backpressure."""
op, op_state = self._mock_task_pool_map_operator()
downstream_op, downstream_op_state = self._mock_operator()
op.output_dependencies = [downstream_op]
topology = {op: op_state, downstream_op: downstream_op_state}
context = self._create_context(backpressure_ratio=2.0)
rm = self._mock_resource_manager()
# High utilized budget fraction = check queue ratio
threshold = (
DownstreamCapacityBackpressurePolicy.OBJECT_STORE_BUDGET_UTIL_THRESHOLD
)
self._set_utilized_budget_fraction(rm, threshold + 0.05) # 0.95
# Queue ratio < 2.0: 500 / 1000 = 0.5
self._set_queue_ratio(
op, op_state, rm, queue_size=500, downstream_capacity=1000
)
policy = self._create_policy(
topology, data_context=context, resource_manager=rm
)
result = policy.max_task_output_bytes_to_read(op)
# Queue ratio is below threshold, so no backpressure limit.
assert result is None
def test_backpressure_applied_fast_producer_slow_consumer(self):
"""Test backpressure IS applied when producer is faster than consumer.
In a fast producer → slow consumer scenario:
- Queue builds up (producer outputs faster than consumer can process)
- Downstream capacity is low (slow consumer has fewer pending inputs)
- Queue/capacity ratio exceeds threshold → backpressure applied
"""
# Fast producer -> slow consumer topology
producer_op, producer_state = self._mock_task_pool_map_operator(
num_tasks_running=5, # Fast producer, many concurrent tasks
max_concurrency_limit=10,
)
consumer_op, consumer_state = self._mock_task_pool_map_operator(
num_tasks_running=1, # Slow consumer, few concurrent tasks
max_concurrency_limit=2,
)
producer_op.output_dependencies = [consumer_op]
topology = {
producer_op: producer_state,
consumer_op: consumer_state,
}
context = self._create_context(backpressure_ratio=2.0)
rm = self._mock_resource_manager()
# High utilization to trigger backpressure evaluation
threshold = (
DownstreamCapacityBackpressurePolicy.OBJECT_STORE_BUDGET_UTIL_THRESHOLD
)
self._set_utilized_budget_fraction(rm, threshold + 0.05)
# Fast producer scenario: large queue, low downstream capacity
# Queue ratio = 2000 / 200 = 10 (well above 2.0 threshold)
queue_ratio = self._set_queue_ratio(
producer_op,
producer_state,
rm,
queue_size=2000, # Large queue (producer outputting fast)
downstream_capacity=200, # Low capacity (slow consumer)
)
assert queue_ratio > 2.0 # Verify ratio exceeds backpressure threshold
policy = self._create_policy(
topology, data_context=context, resource_manager=rm
)
# Producer should be backpressured (cannot add more inputs)
assert policy.can_add_input(producer_op) is False
# Output bytes should be limited to 0 (full backpressure)
assert policy.max_task_output_bytes_to_read(producer_op) == 0
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_downstream_capacity_backpressure_policy.py",
"license": "Apache License 2.0",
"lines": 497,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/train/v2/_internal/data_integration/interfaces.py | from dataclasses import dataclass
from typing import TYPE_CHECKING, Callable, Protocol, Union
if TYPE_CHECKING:
from ray.data import DataIterator, Dataset
# A type representing either a ray.data.Dataset or a function that returns a
# ray.data.Dataset and accepts no arguments.
GenDataset = Union["Dataset", Callable[[], "Dataset"]]
@dataclass
class DatasetShardMetadata:
"""Metadata about a dataset shard used for lookup and configuration."""
dataset_name: str
class DatasetShardProvider(Protocol):
def get_dataset_shard(self, dataset_info: DatasetShardMetadata) -> "DataIterator":
"""Get the dataset shard for the given dataset info.
Args:
dataset_info: The metadata of the shard to retrieve,
including the dataset name.
Returns:
The :class:`~ray.data.DataIterator` shard for the given dataset info.
Raises:
KeyError: If the dataset shard for the given dataset info is not found.
"""
...
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/_internal/data_integration/interfaces.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/train/v2/_internal/execution/collective_impl.py | import logging
from typing import Any
import ray
import ray.cloudpickle as pickle
from ray.train.v2._internal.execution.context import get_train_context
# For reference, {1:1} is 19 bytes, {"1":"1"} is 21 bytes,
# and {"12345": "12345"} is 25 bytes.
_MAX_BROADCAST_SIZE_BYTES = 1000
logger = logging.getLogger(__name__)
def barrier() -> None:
"""
Create a barrier across all training workers.
"""
train_context = get_train_context()
sync_actor = train_context.get_synchronization_actor()
return ray.get(
sync_actor.broadcast_from_rank_zero.remote(
world_rank=train_context.get_world_rank(),
world_size=train_context.get_world_size(),
data=None,
caller_method_name="ray.train.collective.barrier",
)
)
def broadcast_from_rank_zero(data: Any) -> Any:
"""Broadcast data from the rank 0 worker to all other workers.
This method is used by the public API function :func:`ray.train.collective.broadcast_from_rank_zero`.
Users should typically call ``ray.train.collective.broadcast_from_rank_zero()`` instead of calling this method directly.
"""
# Validate data.
if data is not None:
data_bytes = len(pickle.dumps(data))
if data_bytes > _MAX_BROADCAST_SIZE_BYTES:
logger.warning(
f"Data size {data_bytes} bytes exceeds the maximum broadcast "
f"size of {_MAX_BROADCAST_SIZE_BYTES} bytes"
)
train_context = get_train_context()
sync_actor = train_context.get_synchronization_actor()
return ray.get(
sync_actor.broadcast_from_rank_zero.remote(
world_rank=train_context.get_world_rank(),
world_size=train_context.get_world_size(),
data=data,
caller_method_name="ray.train.collective.broadcast_from_rank_zero",
)
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/_internal/execution/collective_impl.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/serve/tests/unit/test_deployment_rank_manager.py | import pytest
from ray.serve._private.common import DeploymentID, ReplicaID
from ray.serve._private.deployment_state import DeploymentRankManager
from ray.serve.schema import ReplicaRank
@pytest.fixture
def rank_manager() -> DeploymentRankManager:
"""Fixture providing a fresh DeploymentRankManager instance for each test."""
return DeploymentRankManager()
class MockDeploymentReplica:
"""Mock replica for testing without heavy dependencies."""
def __init__(
self,
replica_id: str,
deployment_name: str = "test_deployment",
app_name: str = "test_app",
):
self.replica_id = ReplicaID(
unique_id=replica_id,
deployment_id=DeploymentID(name=deployment_name, app_name=app_name),
)
def __str__(self):
return f"MockDeploymentReplica(replica_id={self.replica_id})"
class TestDeploymentRankManager:
"""Test cases for DeploymentRankManager."""
def test_init(self, rank_manager):
"""Test initialization creates empty state."""
assert rank_manager.get_replica_ranks_mapping() == {}
def test_assign_rank_first_replica(self, rank_manager: DeploymentRankManager):
"""Test assigning rank to first replica."""
rank = rank_manager.assign_rank("replica_1", "node_1")
assert rank.rank == 0
assert rank_manager.has_replica_rank("replica_1")
assert rank_manager.get_replica_rank("replica_1").rank == 0
def test_assign_rank_multiple_replicas(self, rank_manager: DeploymentRankManager):
"""Test assigning ranks to multiple replicas."""
rank1 = rank_manager.assign_rank("replica_1", "node_1")
rank2 = rank_manager.assign_rank("replica_2", "node_1")
rank3 = rank_manager.assign_rank("replica_3", "node_1")
assert rank1.rank == 0
assert rank2.rank == 1
assert rank3.rank == 2
mapping = rank_manager.get_replica_ranks_mapping()
assert len(mapping) == 3
assert mapping == {
"replica_1": ReplicaRank(rank=0, node_rank=0, local_rank=0),
"replica_2": ReplicaRank(rank=1, node_rank=0, local_rank=1),
"replica_3": ReplicaRank(rank=2, node_rank=0, local_rank=2),
}
def test_assign_rank_reuses_released_ranks(
self, rank_manager: DeploymentRankManager
):
"""Test that released ranks are reused before assigning new ones."""
# Assign ranks to 3 replicas
rank_manager.assign_rank("replica_1", "node_1")
rank_manager.assign_rank("replica_2", "node_1")
rank_manager.assign_rank("replica_3", "node_1")
# Release middle rank
rank_manager.release_rank("replica_2")
assert not rank_manager.has_replica_rank("replica_2")
# New replica should get the released rank (1)
rank = rank_manager.assign_rank("replica_4", "node_1")
assert rank.rank == 1
assert rank_manager.get_replica_rank("replica_4") == ReplicaRank(
rank=1, node_rank=0, local_rank=1
)
def test_assign_rank_duplicate_fails(self, rank_manager: DeploymentRankManager):
"""Test assigning rank to replica that already has one fails."""
rank_manager.assign_rank("replica_1", "node_1")
with pytest.raises(RuntimeError, match="already assigned"):
rank_manager.assign_rank("replica_1", "node_1")
def test_release_rank(self, rank_manager: DeploymentRankManager):
"""Test releasing a rank makes it available for reuse."""
rank_manager.assign_rank("replica_1", "node_1")
rank_manager.assign_rank("replica_2", "node_1")
rank_manager.release_rank("replica_1")
assert not rank_manager.has_replica_rank("replica_1")
assert rank_manager.has_replica_rank("replica_2")
assert rank_manager.get_replica_rank("replica_2").rank == 1
def test_release_rank_nonexistent_replica(
self, rank_manager: DeploymentRankManager
):
"""Test releasing rank for non-existent replica fails."""
with pytest.raises(RuntimeError, match="not assigned"):
rank_manager.release_rank("nonexistent")
def test_recover_rank_basic(self, rank_manager: DeploymentRankManager):
"""Test basic rank recovery."""
rank_manager.recover_rank(
"replica_1", "node_1", ReplicaRank(rank=5, node_rank=0, local_rank=0)
)
assert rank_manager.has_replica_rank("replica_1")
assert rank_manager.get_replica_rank("replica_1").rank == 5
def test_recover_rank_updates_next_rank(self, rank_manager: DeploymentRankManager):
"""Test that recovering a high rank updates next_rank appropriately."""
rank_manager.assign_rank("replica_1", "node_1") # Gets rank 0
rank_manager.recover_rank(
"replica_2", "node_1", ReplicaRank(rank=10, node_rank=0, local_rank=0)
)
# New replica should get rank 11 (next available after 10)
rank = rank_manager.assign_rank("replica_3", "node_1")
assert rank.rank == 11
mapping = rank_manager.get_replica_ranks_mapping()
assert mapping == {
"replica_1": ReplicaRank(rank=0, node_rank=0, local_rank=0),
"replica_2": ReplicaRank(rank=10, node_rank=0, local_rank=0),
"replica_3": ReplicaRank(rank=11, node_rank=0, local_rank=1),
}
def test_recover_rank_removes_from_available(
self, rank_manager: DeploymentRankManager
):
"""Test that recovering a rank removes it from available ranks."""
rank_manager.assign_rank("replica_1", "node_1")
rank_manager.assign_rank("replica_2", "node_1")
rank_manager.release_rank("replica_1") # Rank 0 becomes available
# Recover rank 0 for a new replica
rank_manager.recover_rank(
"replica_3", "node_1", ReplicaRank(rank=0, node_rank=0, local_rank=0)
)
# Verify replica_3 has rank 0
assert rank_manager.has_replica_rank("replica_3")
assert rank_manager.get_replica_rank("replica_3").rank == 0
# Next assigned replica should get rank 2 (not 0, which is now taken)
rank = rank_manager.assign_rank("replica_4", "node_1")
assert rank.rank == 2
def test_recover_rank_duplicate_fails(self, rank_manager: DeploymentRankManager):
"""Test recovering rank for replica that already has one fails."""
rank_manager.assign_rank("replica_1", "node_1")
with pytest.raises(RuntimeError, match="already assigned"):
rank_manager.recover_rank(
"replica_1", "node_1", ReplicaRank(rank=5, node_rank=0, local_rank=0)
)
def test_get_replica_rank_existing(self, rank_manager: DeploymentRankManager):
"""Test getting rank for existing replica."""
rank_manager.assign_rank("replica_1", "node_1")
rank = rank_manager.get_replica_rank("replica_1")
assert rank.rank == 0
def test_get_replica_rank_nonexistent_fails(
self, rank_manager: DeploymentRankManager
):
"""Test getting rank for non-existent replica fails."""
with pytest.raises(RuntimeError, match="not assigned"):
rank_manager.get_replica_rank("nonexistent")
def test_get_replica_ranks_mapping(self, rank_manager: DeploymentRankManager):
"""Test getting copy of replica ranks mapping."""
rank_manager.assign_rank("replica_1", "node_1")
rank_manager.assign_rank("replica_2", "node_1")
mapping = rank_manager.get_replica_ranks_mapping()
expected = {
"replica_1": ReplicaRank(rank=0, node_rank=0, local_rank=0),
"replica_2": ReplicaRank(rank=1, node_rank=0, local_rank=1),
}
assert mapping == expected
# Verify it's a copy by modifying it
mapping["replica_3"] = ReplicaRank(rank=2, node_rank=0, local_rank=2)
# Get a fresh mapping to verify the original wasn't changed
fresh_mapping = rank_manager.get_replica_ranks_mapping()
assert "replica_3" not in fresh_mapping
assert fresh_mapping == expected
def test_clear(self, rank_manager: DeploymentRankManager):
"""Test clearing all rank data."""
rank_manager.assign_rank("replica_1", "node_1")
rank_manager.assign_rank("replica_2", "node_1")
rank_manager.release_rank("replica_1")
rank_manager.clear()
# After clearing, should have no ranks
assert rank_manager.get_replica_ranks_mapping() == {}
assert not rank_manager.has_replica_rank("replica_1")
assert not rank_manager.has_replica_rank("replica_2")
# Should be able to assign from 0 again
rank = rank_manager.assign_rank("replica_3", "node_1")
assert rank.rank == 0
def test_check_rank_consistency_empty_replicas(
self, rank_manager: DeploymentRankManager
):
"""Test consistency check with no active replicas."""
result = rank_manager.check_rank_consistency_and_reassign_minimally([])
assert result == []
def test_check_rank_consistency_contiguous_ranks(
self, rank_manager: DeploymentRankManager
):
"""Test consistency check with contiguous ranks (no reassignment needed)."""
# Set up contiguous ranks
replica1 = MockDeploymentReplica("replica_1")
replica2 = MockDeploymentReplica("replica_2")
replica3 = MockDeploymentReplica("replica_3")
rank_manager.assign_rank("replica_1", "node_1") # rank 0
rank_manager.assign_rank("replica_2", "node_1") # rank 1
rank_manager.assign_rank("replica_3", "node_1") # rank 2
result = rank_manager.check_rank_consistency_and_reassign_minimally(
[replica1, replica2, replica3]
)
assert result == []
def test_check_rank_consistency_non_contiguous_ranks(
self, rank_manager: DeploymentRankManager
):
"""Test consistency check with non-contiguous ranks (reassignment needed)."""
# Set up non-contiguous ranks (simulate a replica being removed)
replica1 = MockDeploymentReplica("replica_1")
replica2 = MockDeploymentReplica("replica_2")
replica3 = MockDeploymentReplica("replica_3")
# Manually assign non-contiguous ranks using recover_rank
rank_manager.recover_rank(
"replica_1", "node_1", ReplicaRank(rank=0, node_rank=0, local_rank=0)
)
rank_manager.recover_rank(
"replica_2", "node_1", ReplicaRank(rank=2, node_rank=0, local_rank=1)
) # Gap at rank 1
rank_manager.recover_rank(
"replica_3", "node_1", ReplicaRank(rank=3, node_rank=0, local_rank=2)
)
result = rank_manager.check_rank_consistency_and_reassign_minimally(
[replica1, replica2, replica3]
)
# Should reassign some replicas to make ranks contiguous
assert len(result) > 0
# After reassignment, ranks should be contiguous [0, 1, 2]
mapping = rank_manager.get_replica_ranks_mapping()
final_ranks = sorted([r.rank for r in mapping.values()])
expected_ranks = [0, 1, 2]
assert final_ranks == expected_ranks
def test_minimal_reassignment_keeps_existing_when_possible(
self, rank_manager: DeploymentRankManager
):
"""Test that minimal reassignment keeps existing ranks when possible."""
replica1 = MockDeploymentReplica("replica_1")
replica2 = MockDeploymentReplica("replica_2")
replica3 = MockDeploymentReplica("replica_3")
replica4 = MockDeploymentReplica("replica_4")
# Set up ranks: 0, 2, 5, 7 (non-contiguous) using recover_rank
rank_manager.recover_rank(
"replica_1", "node_1", ReplicaRank(rank=0, node_rank=0, local_rank=0)
) # Should keep this
rank_manager.recover_rank(
"replica_2", "node_1", ReplicaRank(rank=2, node_rank=0, local_rank=1)
) # Should keep this
rank_manager.recover_rank(
"replica_3", "node_1", ReplicaRank(rank=5, node_rank=0, local_rank=2)
) # Should be reassigned to 1
rank_manager.recover_rank(
"replica_4", "node_1", ReplicaRank(rank=7, node_rank=0, local_rank=3)
) # Should be reassigned to 3
result = rank_manager.check_rank_consistency_and_reassign_minimally(
[replica1, replica2, replica3, replica4]
)
# Verify minimal reassignment
assert len(result) == 2 # Only 2 replicas should be reassigned
reassigned_ids = {r.replica_id.unique_id for r in result}
assert reassigned_ids == {"replica_3", "replica_4"}
# Verify final ranks are contiguous
mapping = rank_manager.get_replica_ranks_mapping()
final_ranks = sorted([r.rank for r in mapping.values()])
assert final_ranks == [0, 1, 2, 3]
# Verify that replica_1 and replica_2 kept their original ranks
assert rank_manager.get_replica_rank("replica_1").rank == 0
assert rank_manager.get_replica_rank("replica_2").rank == 2
def test_check_rank_consistency_unranked_replicas_fails(
self, rank_manager: DeploymentRankManager
):
"""Test consistency check fails when active replicas have no ranks."""
replica1 = MockDeploymentReplica("replica_1")
with pytest.raises(RuntimeError, match="Rank system is in an invalid state"):
rank_manager.check_rank_consistency_and_reassign_minimally([replica1])
def test_check_rank_consistency_stale_ranks_fails(
self, rank_manager: DeploymentRankManager
):
"""Test consistency check fails when there are stale ranks."""
replica1 = MockDeploymentReplica("replica_1")
# Set up stale rank (replica not in active list)
rank_manager.assign_rank("replica_1", "node_1")
rank_manager.assign_rank("stale_replica", "node_1")
with pytest.raises(RuntimeError, match="Rank system is in an invalid state"):
rank_manager.check_rank_consistency_and_reassign_minimally([replica1])
def test_check_rank_consistency_duplicate_ranks_fails(
self, rank_manager: DeploymentRankManager
):
"""Test consistency check fails when there are duplicate ranks."""
replica1 = MockDeploymentReplica("replica_1")
replica2 = MockDeploymentReplica("replica_2")
# Manually create duplicate ranks using recover_rank (this should never happen in normal operation)
# Note: We can only test this with duplicate global ranks, not duplicate local ranks
# since local_rank uniqueness is enforced by the underlying RankManager
rank_manager.recover_rank(
"replica_1", "node_1", ReplicaRank(rank=0, node_rank=0, local_rank=0)
)
rank_manager.recover_rank(
"replica_2", "node_1", ReplicaRank(rank=0, node_rank=0, local_rank=1)
) # Duplicate global rank!
with pytest.raises(RuntimeError, match="Rank system is in an invalid state"):
rank_manager.check_rank_consistency_and_reassign_minimally(
[replica1, replica2]
)
class TestDeploymentRankManagerMultiNode:
def test_assign_rank_multiple_nodes(self):
"""Test that replicas on different nodes get correct node_rank and local_rank."""
rank_manager = DeploymentRankManager()
# Assign replicas across different nodes
rank1 = rank_manager.assign_rank("replica_1", "node_1")
rank2 = rank_manager.assign_rank("replica_2", "node_2")
rank3 = rank_manager.assign_rank("replica_3", "node_1")
rank4 = rank_manager.assign_rank("replica_4", "node_2")
rank5 = rank_manager.assign_rank("replica_5", "node_3")
# Verify global ranks are sequential
assert rank1.rank == 0
assert rank2.rank == 1
assert rank3.rank == 2
assert rank4.rank == 3
assert rank5.rank == 4
# Verify node ranks are assigned correctly
assert rank1.node_rank == 0 # node_1 is first
assert rank2.node_rank == 1 # node_2 is second
assert rank3.node_rank == 0 # node_1 already has rank 0
assert rank4.node_rank == 1 # node_2 already has rank 1
assert rank5.node_rank == 2 # node_3 is third
# Verify local ranks restart per node
assert rank1.local_rank == 0 # First replica on node_1
assert rank2.local_rank == 0 # First replica on node_2
assert rank3.local_rank == 1 # Second replica on node_1
assert rank4.local_rank == 1 # Second replica on node_2
assert rank5.local_rank == 0 # First replica on node_3
def test_local_rank_independence_across_nodes(self):
"""Test that local_rank sequences are independent per node."""
rank_manager = DeploymentRankManager()
# Add 3 replicas to node_1
r1 = rank_manager.assign_rank("n1_r1", "node_1")
r2 = rank_manager.assign_rank("n1_r2", "node_1")
r3 = rank_manager.assign_rank("n1_r3", "node_1")
# Add 2 replicas to node_2
r4 = rank_manager.assign_rank("n2_r1", "node_2")
r5 = rank_manager.assign_rank("n2_r2", "node_2")
# Verify local ranks are independent
assert r1.local_rank == 0
assert r2.local_rank == 1
assert r3.local_rank == 2
assert r4.local_rank == 0 # Restarts at 0 for node_2
assert r5.local_rank == 1
def test_release_rank_removes_node_when_last_replica(self):
"""Test that releasing the last replica on a node releases the node rank."""
rank_manager = DeploymentRankManager()
# Assign replicas to two nodes
rank_manager.assign_rank("n1_r1", "node_1")
rank_manager.assign_rank("n2_r1", "node_2")
rank_manager.assign_rank("n2_r2", "node_2")
# Release the only replica on node_1
rank_manager.release_rank("n1_r1")
# Now add a replica to node_3 - it should get node_rank 0 (reused from node_1)
new_rank = rank_manager.assign_rank("n3_r1", "node_3")
assert new_rank.node_rank == 0 # Reused node rank
def test_release_rank_keeps_node_when_replicas_remain(self):
"""Test that releasing a replica doesn't release node rank if other replicas remain."""
rank_manager = DeploymentRankManager()
# Assign 3 replicas to node_1
rank_manager.assign_rank("r1", "node_1")
rank_manager.assign_rank("r2", "node_1")
rank_manager.assign_rank("r3", "node_1")
# Assign replica to node_2
node2_rank = rank_manager.assign_rank("r4", "node_2")
assert node2_rank.node_rank == 1
# Release one replica from node_1
rank_manager.release_rank("r2")
# Add another replica to node_1 - should still have node_rank 0
new_rank = rank_manager.assign_rank("r5", "node_1")
assert new_rank.node_rank == 0 # Node rank preserved
def test_node_rank_reuse_after_complete_release(self):
"""Test that node ranks are reused after all replicas are removed."""
rank_manager = DeploymentRankManager()
# Create replicas on 3 nodes
rank_manager.assign_rank("n1_r1", "node_1")
rank_manager.assign_rank("n2_r1", "node_2")
rank_manager.assign_rank("n3_r1", "node_3")
# Verify node ranks
assert rank_manager.get_replica_rank("n1_r1").node_rank == 0
assert rank_manager.get_replica_rank("n2_r1").node_rank == 1
assert rank_manager.get_replica_rank("n3_r1").node_rank == 2
# Remove all replicas from node_2 (middle node)
rank_manager.release_rank("n2_r1")
# Add replica to a new node - should reuse node_rank 1
new_rank = rank_manager.assign_rank("n4_r1", "node_4")
assert new_rank.node_rank == 1 # Reused from released node_2
def test_local_rank_reuse_within_node(self):
"""Test that local ranks are reused within a node after release."""
rank_manager = DeploymentRankManager()
# Assign 3 replicas to node_1
rank_manager.assign_rank("r1", "node_1")
rank_manager.assign_rank("r2", "node_1")
rank_manager.assign_rank("r3", "node_1")
# Release middle replica (local_rank=1)
rank_manager.release_rank("r2")
# Add new replica to node_1 - should reuse local_rank 1
new_rank = rank_manager.assign_rank("r4", "node_1")
assert new_rank.local_rank == 1 # Reused local rank
def test_recover_rank_multiple_nodes(self):
"""Test recovering ranks for replicas on different nodes."""
rank_manager = DeploymentRankManager()
# Recover replicas on different nodes
rank_manager.recover_rank(
"r1", "node_1", ReplicaRank(rank=0, node_rank=0, local_rank=0)
)
rank_manager.recover_rank(
"r2", "node_2", ReplicaRank(rank=1, node_rank=1, local_rank=0)
)
rank_manager.recover_rank(
"r3", "node_1", ReplicaRank(rank=2, node_rank=0, local_rank=1)
)
# Verify all ranks are recovered correctly
assert rank_manager.get_replica_rank("r1") == ReplicaRank(
rank=0, node_rank=0, local_rank=0
)
assert rank_manager.get_replica_rank("r2") == ReplicaRank(
rank=1, node_rank=1, local_rank=0
)
assert rank_manager.get_replica_rank("r3") == ReplicaRank(
rank=2, node_rank=0, local_rank=1
)
def test_recover_rank_preserves_node_rank_when_node_exists(self):
"""Test that recovering a replica on an existing node doesn't create duplicate node ranks."""
rank_manager = DeploymentRankManager()
# Recover first replica on node_1
rank_manager.recover_rank(
"r1", "node_1", ReplicaRank(rank=0, node_rank=0, local_rank=0)
)
# Recover another replica on the same node
rank_manager.recover_rank(
"r2", "node_1", ReplicaRank(rank=1, node_rank=0, local_rank=1)
)
# Both should have the same node_rank
assert rank_manager.get_replica_rank("r1").node_rank == 0
assert rank_manager.get_replica_rank("r2").node_rank == 0
def test_check_rank_consistency_across_multiple_nodes(self):
"""Test consistency checking with replicas spread across nodes."""
rank_manager = DeploymentRankManager()
replica1 = MockDeploymentReplica("r1")
replica2 = MockDeploymentReplica("r2")
replica3 = MockDeploymentReplica("r3")
replica4 = MockDeploymentReplica("r4")
# Set up non-contiguous global ranks across nodes
rank_manager.recover_rank(
"r1", "node_1", ReplicaRank(rank=0, node_rank=0, local_rank=0)
)
rank_manager.recover_rank(
"r2", "node_2", ReplicaRank(rank=5, node_rank=1, local_rank=0)
)
rank_manager.recover_rank(
"r3", "node_1", ReplicaRank(rank=8, node_rank=0, local_rank=1)
)
rank_manager.recover_rank(
"r4", "node_2", ReplicaRank(rank=10, node_rank=1, local_rank=1)
)
result = rank_manager.check_rank_consistency_and_reassign_minimally(
[replica1, replica2, replica3, replica4]
)
# Should reassign some replicas to make global ranks contiguous
assert len(result) > 0
# Verify global ranks are now contiguous
mapping = rank_manager.get_replica_ranks_mapping()
global_ranks = sorted([r.rank for r in mapping.values()])
assert global_ranks == [0, 1, 2, 3]
def test_check_rank_consistency_local_ranks_per_node(self):
"""Test that local rank consistency is checked independently per node."""
rank_manager = DeploymentRankManager()
replica1 = MockDeploymentReplica("r1")
replica2 = MockDeploymentReplica("r2")
replica3 = MockDeploymentReplica("r3")
replica4 = MockDeploymentReplica("r4")
# Set up non-contiguous local ranks on node_1, contiguous on node_2
rank_manager.recover_rank(
"r1", "node_1", ReplicaRank(rank=0, node_rank=0, local_rank=0)
)
rank_manager.recover_rank(
"r2", "node_1", ReplicaRank(rank=1, node_rank=0, local_rank=5)
) # Gap in local rank
rank_manager.recover_rank(
"r3", "node_2", ReplicaRank(rank=2, node_rank=1, local_rank=0)
)
rank_manager.recover_rank(
"r4", "node_2", ReplicaRank(rank=3, node_rank=1, local_rank=1)
)
result = rank_manager.check_rank_consistency_and_reassign_minimally(
[replica1, replica2, replica3, replica4]
)
# Should reassign replica on node_1 with non-contiguous local rank
assert len(result) > 0
# Verify local ranks are contiguous per node
r1_rank = rank_manager.get_replica_rank("r1")
r2_rank = rank_manager.get_replica_rank("r2")
r3_rank = rank_manager.get_replica_rank("r3")
r4_rank = rank_manager.get_replica_rank("r4")
# Node 1 local ranks should be [0, 1]
node1_local_ranks = sorted([r1_rank.local_rank, r2_rank.local_rank])
assert node1_local_ranks == [0, 1]
# Node 2 local ranks should be [0, 1]
node2_local_ranks = sorted([r3_rank.local_rank, r4_rank.local_rank])
assert node2_local_ranks == [0, 1]
def test_check_rank_consistency_node_ranks(self):
"""Test that node rank consistency is maintained."""
rank_manager = DeploymentRankManager()
replica1 = MockDeploymentReplica("r1")
replica2 = MockDeploymentReplica("r2")
replica3 = MockDeploymentReplica("r3")
# Set up non-contiguous node ranks (0, 2, 5)
rank_manager.recover_rank(
"r1", "node_1", ReplicaRank(rank=0, node_rank=0, local_rank=0)
)
rank_manager.recover_rank(
"r2", "node_2", ReplicaRank(rank=1, node_rank=2, local_rank=0)
)
rank_manager.recover_rank(
"r3", "node_3", ReplicaRank(rank=2, node_rank=5, local_rank=0)
)
result = rank_manager.check_rank_consistency_and_reassign_minimally(
[replica1, replica2, replica3]
)
# Should reassign replicas to make node ranks contiguous
assert len(result) > 0
# Verify node ranks are now contiguous [0, 1, 2]
mapping = rank_manager.get_replica_ranks_mapping()
node_ranks = sorted([r.node_rank for r in mapping.values()])
assert node_ranks == [0, 1, 2]
def test_clear_with_multiple_nodes(self):
"""Test that clear() removes all node-related state."""
rank_manager = DeploymentRankManager()
# Assign replicas to multiple nodes
rank_manager.assign_rank("r1", "node_1")
rank_manager.assign_rank("r2", "node_2")
rank_manager.assign_rank("r3", "node_3")
# Clear everything
rank_manager.clear()
# Verify all state is cleared
assert rank_manager.get_replica_ranks_mapping() == {}
# Verify we can assign fresh ranks starting from 0
new_rank = rank_manager.assign_rank("r4", "node_4")
assert new_rank.rank == 0
assert new_rank.node_rank == 0
assert new_rank.local_rank == 0
def test_get_replica_ranks_mapping_multiple_nodes(self):
"""Test that get_replica_ranks_mapping includes correct values for all nodes."""
rank_manager = DeploymentRankManager()
# Create replicas across multiple nodes
rank_manager.assign_rank("r1", "node_1")
rank_manager.assign_rank("r2", "node_2")
rank_manager.assign_rank("r3", "node_1")
rank_manager.assign_rank("r4", "node_3")
mapping = rank_manager.get_replica_ranks_mapping()
# Verify all replicas are in mapping with correct values
assert len(mapping) == 4
assert mapping["r1"] == ReplicaRank(rank=0, node_rank=0, local_rank=0)
assert mapping["r2"] == ReplicaRank(rank=1, node_rank=1, local_rank=0)
assert mapping["r3"] == ReplicaRank(rank=2, node_rank=0, local_rank=1)
assert mapping["r4"] == ReplicaRank(rank=3, node_rank=2, local_rank=0)
def test_complex_multi_node_lifecycle(self):
"""Test a complex scenario with adds, releases, and consistency checks across nodes."""
rank_manager = DeploymentRankManager()
# Phase 1: Initial deployment across 3 nodes
rank_manager.assign_rank("n1_r1", "node_1")
rank_manager.assign_rank("n1_r2", "node_1")
rank_manager.assign_rank("n2_r1", "node_2")
rank_manager.assign_rank("n3_r1", "node_3")
rank_manager.assign_rank("n3_r2", "node_3")
# Phase 2: Scale down - remove some replicas
rank_manager.release_rank("n1_r2") # Remove from node_1
rank_manager.release_rank("n2_r1") # Remove all from node_2
# Phase 3: Scale up - add replicas to new and existing nodes
rank_manager.assign_rank("n1_r3", "node_1") # Add to existing node_1
rank_manager.assign_rank("n4_r1", "node_4") # New node
# Verify state is consistent
mapping = rank_manager.get_replica_ranks_mapping()
assert len(mapping) == 5
# Verify node ranks - node_2 was removed, so node_4 should reuse its rank
assert mapping["n4_r1"].node_rank == 1 # Reused from node_2
# Verify local ranks per node
assert mapping["n1_r1"].local_rank == 0
assert mapping["n1_r3"].local_rank == 1 # Reused local rank
assert mapping["n3_r1"].local_rank == 0
assert mapping["n3_r2"].local_rank == 1
assert mapping["n4_r1"].local_rank == 0
def test_scaling_up_and_down_across_nodes(self):
"""Test scaling operations across multiple nodes."""
rank_manager = DeploymentRankManager()
# Scale up: Add replicas to 4 nodes
for node_idx in range(4):
for replica_idx in range(2):
replica_id = f"n{node_idx}_r{replica_idx}"
rank_manager.assign_rank(replica_id, f"node_{node_idx}")
# Verify 8 replicas total
assert len(rank_manager.get_replica_ranks_mapping()) == 8
# Scale down: Remove all replicas from nodes 1 and 3
rank_manager.release_rank("n1_r0")
rank_manager.release_rank("n1_r1")
rank_manager.release_rank("n3_r0")
rank_manager.release_rank("n3_r1")
# Verify 4 replicas remain
assert len(rank_manager.get_replica_ranks_mapping()) == 4
# Scale up again: Add to new node
new_rank = rank_manager.assign_rank("n5_r0", "node_5")
# Should reuse a released node rank (1 or 3)
assert new_rank.node_rank in [1, 3]
def test_minimal_reassignment_preserves_node_assignments(self):
"""Test that minimal reassignment doesn't move replicas between nodes."""
rank_manager = DeploymentRankManager()
replica1 = MockDeploymentReplica("r1")
replica2 = MockDeploymentReplica("r2")
replica3 = MockDeploymentReplica("r3")
replica4 = MockDeploymentReplica("r4")
# Set up non-contiguous ranks across nodes
rank_manager.recover_rank(
"r1", "node_1", ReplicaRank(rank=0, node_rank=0, local_rank=0)
)
rank_manager.recover_rank(
"r2", "node_2", ReplicaRank(rank=5, node_rank=1, local_rank=0)
)
rank_manager.recover_rank(
"r3", "node_1", ReplicaRank(rank=10, node_rank=0, local_rank=1)
)
rank_manager.recover_rank(
"r4", "node_2", ReplicaRank(rank=15, node_rank=1, local_rank=1)
)
# Perform consistency check
rank_manager.check_rank_consistency_and_reassign_minimally(
[replica1, replica2, replica3, replica4]
)
# Verify replicas stayed on their original nodes
mapping = rank_manager.get_replica_ranks_mapping()
# r1 and r3 should still be on node_1 (node_rank=0)
assert mapping["r1"].node_rank == mapping["r3"].node_rank == 0
# r2 and r4 should still be on node_2 (node_rank=1)
assert mapping["r2"].node_rank == mapping["r4"].node_rank == 1
# Verify local ranks are contiguous per node
assert sorted([mapping["r1"].local_rank, mapping["r3"].local_rank]) == [0, 1]
assert sorted([mapping["r2"].local_rank, mapping["r4"].local_rank]) == [0, 1]
class TestDeploymentRankManagerEdgeCases:
"""Test edge cases and corner cases for correctness."""
def test_recover_rank_updates_next_rank_correctly(self):
"""When recovering a rank >= _next_rank, verify next assigned rank is correct."""
rank_manager = DeploymentRankManager()
# Assign first replica normally - gets rank 0
rank_manager.assign_rank("r1", "node_1")
# Recover a replica with a very high rank
rank_manager.recover_rank(
"r2", "node_1", ReplicaRank(rank=100, node_rank=0, local_rank=1)
)
# Now assign a new replica - should get rank 101 (next after 100)
new_rank = rank_manager.assign_rank("r3", "node_1")
assert new_rank.rank == 101
def test_assign_after_recovery_with_very_high_rank(self):
"""Test that assignment after recovering a very high rank works correctly."""
rank_manager = DeploymentRankManager()
# Recover with rank 1000
rank_manager.recover_rank(
"r1", "node_1", ReplicaRank(rank=1000, node_rank=0, local_rank=0)
)
# Assign new replica - should get 1001
new_rank = rank_manager.assign_rank("r2", "node_1")
assert new_rank.rank == 1001
assert new_rank.local_rank == 1 # Second replica on node_1
def test_recover_multiple_high_ranks_out_of_order(self):
"""Test recovering multiple high ranks in non-sequential order."""
rank_manager = DeploymentRankManager()
# Recover ranks in random order: 50, 10, 100
rank_manager.recover_rank(
"r1", "node_1", ReplicaRank(rank=50, node_rank=0, local_rank=0)
)
rank_manager.recover_rank(
"r2", "node_2", ReplicaRank(rank=10, node_rank=1, local_rank=0)
)
rank_manager.recover_rank(
"r3", "node_1", ReplicaRank(rank=100, node_rank=0, local_rank=1)
)
# Next assignment should get 101 (max + 1)
new_rank = rank_manager.assign_rank("r4", "node_3")
assert new_rank.rank == 101
def test_recover_rank_removes_from_released_ranks(self):
"""Test that recovering a rank that was released removes it from available set."""
rank_manager = DeploymentRankManager()
# Assign and then release rank 5
rank_manager.recover_rank(
"r1", "node_1", ReplicaRank(rank=0, node_rank=0, local_rank=0)
)
rank_manager.recover_rank(
"r2", "node_1", ReplicaRank(rank=5, node_rank=0, local_rank=1)
)
rank_manager.release_rank("r2") # Rank 5 is now released
# Recover a different replica with rank 5
rank_manager.recover_rank(
"r3", "node_1", ReplicaRank(rank=5, node_rank=0, local_rank=1)
)
# Assign new replica - should get rank 6, not 5 (since 5 is taken)
new_rank = rank_manager.assign_rank("r4", "node_1")
assert new_rank.rank == 6
def test_reassignment_with_all_ranks_out_of_range(self):
"""Test reassignment when all replicas have ranks outside target range."""
rank_manager = DeploymentRankManager()
replica1 = MockDeploymentReplica("r1")
replica2 = MockDeploymentReplica("r2")
replica3 = MockDeploymentReplica("r3")
# All replicas have ranks outside [0, 1, 2] range
rank_manager.recover_rank(
"r1", "node_1", ReplicaRank(rank=10, node_rank=0, local_rank=0)
)
rank_manager.recover_rank(
"r2", "node_1", ReplicaRank(rank=20, node_rank=0, local_rank=1)
)
rank_manager.recover_rank(
"r3", "node_1", ReplicaRank(rank=30, node_rank=0, local_rank=2)
)
result = rank_manager.check_rank_consistency_and_reassign_minimally(
[replica1, replica2, replica3]
)
# All replicas should be reassigned
assert len(result) == 3
# Verify final ranks are exactly [0, 1, 2]
mapping = rank_manager.get_replica_ranks_mapping()
final_ranks = sorted([r.rank for r in mapping.values()])
assert final_ranks == [0, 1, 2]
def test_reassignment_preserves_target_ranks_exactly(self):
"""Test that after reassignment, ranks are exactly [0, N-1]."""
rank_manager = DeploymentRankManager()
replicas = []
for i in range(5):
replicas.append(MockDeploymentReplica(f"r{i}"))
# Create non-contiguous ranks: 0, 3, 7, 11, 15
rank_manager.recover_rank(
"r0", "node_1", ReplicaRank(rank=0, node_rank=0, local_rank=0)
)
rank_manager.recover_rank(
"r1", "node_1", ReplicaRank(rank=3, node_rank=0, local_rank=1)
)
rank_manager.recover_rank(
"r2", "node_1", ReplicaRank(rank=7, node_rank=0, local_rank=2)
)
rank_manager.recover_rank(
"r3", "node_1", ReplicaRank(rank=11, node_rank=0, local_rank=3)
)
rank_manager.recover_rank(
"r4", "node_1", ReplicaRank(rank=15, node_rank=0, local_rank=4)
)
rank_manager.check_rank_consistency_and_reassign_minimally(replicas)
# Verify ranks are exactly [0, 1, 2, 3, 4]
mapping = rank_manager.get_replica_ranks_mapping()
final_ranks = sorted([r.rank for r in mapping.values()])
assert final_ranks == [0, 1, 2, 3, 4]
# Verify no duplicates
assert len(final_ranks) == len(set(final_ranks))
def test_multiple_sequential_releases_reuse_in_order(self):
"""Test releasing multiple ranks in sequence maintains correct state."""
rank_manager = DeploymentRankManager()
# Assign ranks 0-5
for i in range(6):
rank_manager.assign_rank(f"r{i}", "node_1")
# Release ranks 0, 2, 4
rank_manager.release_rank("r0")
rank_manager.release_rank("r2")
rank_manager.release_rank("r4")
# Assign new replicas - should reuse in ascending order (min first)
new_rank1 = rank_manager.assign_rank("r6", "node_1")
new_rank2 = rank_manager.assign_rank("r7", "node_1")
new_rank3 = rank_manager.assign_rank("r8", "node_1")
# Should reuse 0, 2, 4 in that order
assert new_rank1.rank == 0
assert new_rank2.rank == 2
assert new_rank3.rank == 4
def test_interleaved_assign_release_recover(self):
"""Test complex sequence of operations maintains consistency."""
rank_manager = DeploymentRankManager()
# Complex sequence
rank_manager.assign_rank("r1", "node_1") # rank 0
rank_manager.assign_rank("r2", "node_1") # rank 1
rank_manager.release_rank("r1") # release 0
rank_manager.recover_rank(
"r3", "node_2", ReplicaRank(rank=5, node_rank=1, local_rank=0)
)
rank_manager.assign_rank("r4", "node_1") # should get 0 (reused)
rank_manager.release_rank("r2") # release 1
rank_manager.assign_rank("r5", "node_2") # should get 1 (reused)
# Verify final state
assert rank_manager.get_replica_rank("r4").rank == 0
assert rank_manager.get_replica_rank("r5").rank == 1
assert rank_manager.get_replica_rank("r3").rank == 5
def test_release_all_then_reassign_all(self):
"""Test releasing all replicas then reassigning maintains correct state."""
rank_manager = DeploymentRankManager()
# Assign replicas across nodes
rank_manager.assign_rank("r1", "node_1")
rank_manager.assign_rank("r2", "node_2")
rank_manager.assign_rank("r3", "node_1")
# Release all
rank_manager.release_rank("r1")
rank_manager.release_rank("r2")
rank_manager.release_rank("r3")
# Verify mapping is empty
assert rank_manager.get_replica_ranks_mapping() == {}
# Reassign new replicas - should reuse ranks 0, 1, 2
new_rank1 = rank_manager.assign_rank("r4", "node_3")
new_rank2 = rank_manager.assign_rank("r5", "node_3")
new_rank3 = rank_manager.assign_rank("r6", "node_3")
assert new_rank1.rank == 0
assert new_rank2.rank == 1
assert new_rank3.rank == 2
def test_recover_rank_with_same_replica_different_rank(self):
"""Test that recovering the same replica_id twice raises an error."""
rank_manager = DeploymentRankManager()
# First recovery
rank_manager.recover_rank(
"r1", "node_1", ReplicaRank(rank=0, node_rank=0, local_rank=0)
)
# Try to recover same replica_id again - should raise error
with pytest.raises(RuntimeError, match="already assigned"):
rank_manager.recover_rank(
"r1", "node_1", ReplicaRank(rank=1, node_rank=0, local_rank=1)
)
def test_large_scale_rank_management(self):
"""Test with many nodes and many replicas per node."""
rank_manager = DeploymentRankManager()
num_nodes = 50
replicas_per_node = 10
total_replicas = num_nodes * replicas_per_node
# Assign replicas
replica_ids = []
for node_idx in range(num_nodes):
for replica_idx in range(replicas_per_node):
replica_id = f"n{node_idx}_r{replica_idx}"
replica_ids.append(replica_id)
rank_manager.assign_rank(replica_id, f"node_{node_idx}")
# Verify total count
mapping = rank_manager.get_replica_ranks_mapping()
assert len(mapping) == total_replicas
# Verify global ranks are contiguous [0, total-1]
global_ranks = sorted([r.rank for r in mapping.values()])
assert global_ranks == list(range(total_replicas))
# Verify node ranks are contiguous [0, num_nodes-1]
node_ranks = sorted({r.node_rank for r in mapping.values()})
assert node_ranks == list(range(num_nodes))
# Verify local ranks per node
for node_idx in range(num_nodes):
node_replicas = [
rid for rid in replica_ids if rid.startswith(f"n{node_idx}_")
]
local_ranks = sorted([mapping[rid].local_rank for rid in node_replicas])
assert local_ranks == list(range(replicas_per_node))
def test_consistency_check_with_released_ranks_in_system(self):
"""Test consistency check works correctly when released_ranks exist."""
rank_manager = DeploymentRankManager()
replica1 = MockDeploymentReplica("r1")
replica2 = MockDeploymentReplica("r2")
replica3 = MockDeploymentReplica("r3")
# Assign 5 replicas
rank_manager.assign_rank("r1", "node_1") # 0
rank_manager.assign_rank("r2", "node_1") # 1
rank_manager.assign_rank("r3", "node_1") # 2
rank_manager.assign_rank("r4", "node_1") # 3
rank_manager.assign_rank("r5", "node_1") # 4
# Release two (creating gaps)
rank_manager.release_rank("r4") # Release 3
rank_manager.release_rank("r5") # Release 4
# Check consistency with only remaining 3 replicas
result = rank_manager.check_rank_consistency_and_reassign_minimally(
[replica1, replica2, replica3]
)
# Should not need reassignment (ranks are already 0, 1, 2)
assert result == []
# Verify ranks are correct
mapping = rank_manager.get_replica_ranks_mapping()
assert sorted([r.rank for r in mapping.values()]) == [0, 1, 2]
def test_has_replica_rank_returns_false_for_unassigned(self):
"""Test has_replica_rank returns False for various unassigned states."""
rank_manager = DeploymentRankManager()
# Completely unassigned replica
assert not rank_manager.has_replica_rank("nonexistent")
# Assign then release
rank_manager.assign_rank("r1", "node_1")
rank_manager.release_rank("r1")
assert not rank_manager.has_replica_rank("r1")
class TestDeploymentRankManagerErrorHandling:
"""Test cases for DeploymentRankManager error handling with fail_on_rank_error flag.
This test class can be easily removed in the future when the error handling
feature is no longer needed.
"""
def test_assign_rank_error_with_fail_on_rank_error_true(self):
"""Test that assign_rank raises exception when fail_on_rank_error=True."""
rank_manager = DeploymentRankManager(fail_on_rank_error=True)
rank_manager.assign_rank("replica_1", "node_1")
# Should raise RuntimeError for duplicate assignment
with pytest.raises(RuntimeError, match="already assigned"):
rank_manager.assign_rank("replica_1", "node_1")
def test_assign_rank_error_with_fail_on_rank_error_false(self):
"""Test that assign_rank returns safe default when fail_on_rank_error=False."""
rank_manager = DeploymentRankManager(fail_on_rank_error=False)
rank_manager.assign_rank("replica_1", "node_1")
# Should return safe default (ReplicaRank(rank=0)) instead of raising
result = rank_manager.assign_rank("replica_1", "node_1")
assert result is not None
assert isinstance(result, ReplicaRank)
assert result.rank == 0
def test_release_rank_error_with_fail_on_rank_error_true(self):
"""Test that release_rank raises exception when fail_on_rank_error=True."""
rank_manager = DeploymentRankManager(fail_on_rank_error=True)
# Should raise RuntimeError for non-existent replica
with pytest.raises(RuntimeError, match="not assigned"):
rank_manager.release_rank("nonexistent")
def test_release_rank_error_with_fail_on_rank_error_false(self):
"""Test that release_rank returns safe default when fail_on_rank_error=False."""
rank_manager = DeploymentRankManager(fail_on_rank_error=False)
# Should return None instead of raising
result = rank_manager.release_rank("nonexistent")
assert result is None
def test_recover_rank_error_with_fail_on_rank_error_true(self):
"""Test that recover_rank raises exception when fail_on_rank_error=True."""
rank_manager = DeploymentRankManager(fail_on_rank_error=True)
rank_manager.assign_rank("replica_1", "node_1")
# Should raise RuntimeError for duplicate recovery
with pytest.raises(RuntimeError, match="already assigned"):
rank_manager.recover_rank(
"replica_1", "node_1", ReplicaRank(rank=5, node_rank=0, local_rank=0)
)
def test_recover_rank_error_with_fail_on_rank_error_false(self):
"""Test that recover_rank returns safe default when fail_on_rank_error=False."""
rank_manager = DeploymentRankManager(fail_on_rank_error=False)
rank_manager.assign_rank("replica_1", "node_1")
# Should return None instead of raising
result = rank_manager.recover_rank(
"replica_1", "node_1", ReplicaRank(rank=5, node_rank=0, local_rank=0)
)
assert result is None
def test_get_replica_rank_error_with_fail_on_rank_error_true(self):
"""Test that get_replica_rank raises exception when fail_on_rank_error=True."""
rank_manager = DeploymentRankManager(fail_on_rank_error=True)
# Should raise RuntimeError for non-existent replica
with pytest.raises(RuntimeError, match="not assigned"):
rank_manager.get_replica_rank("nonexistent")
def test_get_replica_rank_error_with_fail_on_rank_error_false(self):
"""Test that get_replica_rank returns safe default when fail_on_rank_error=False."""
rank_manager = DeploymentRankManager(fail_on_rank_error=False)
# Should return safe default (ReplicaRank(rank=0)) instead of raising
result = rank_manager.get_replica_rank("nonexistent")
assert result is not None
assert isinstance(result, ReplicaRank)
assert result.rank == 0
def test_check_rank_consistency_error_with_fail_on_rank_error_true(self):
"""Test that check_rank_consistency raises exception when fail_on_rank_error=True."""
rank_manager = DeploymentRankManager(fail_on_rank_error=True)
replica1 = MockDeploymentReplica("replica_1")
# Set up invalid state: active replica without rank
with pytest.raises(RuntimeError, match="Rank system is in an invalid state"):
rank_manager.check_rank_consistency_and_reassign_minimally([replica1])
def test_check_rank_consistency_error_with_fail_on_rank_error_false(self):
"""Test that check_rank_consistency returns safe default when fail_on_rank_error=False."""
rank_manager = DeploymentRankManager(fail_on_rank_error=False)
replica1 = MockDeploymentReplica("replica_1")
# Should return empty list instead of raising
result = rank_manager.check_rank_consistency_and_reassign_minimally([replica1])
assert result == []
def test_check_rank_consistency_with_stale_ranks_error_handling(self):
"""Test check_rank_consistency with stale ranks and fail_on_rank_error=False."""
rank_manager = DeploymentRankManager(fail_on_rank_error=False)
replica1 = MockDeploymentReplica("replica_1")
# Set up stale rank (replica not in active list)
rank_manager.assign_rank("replica_1", "node_1")
rank_manager.assign_rank("stale_replica", "node_1")
# Should return empty list instead of raising
result = rank_manager.check_rank_consistency_and_reassign_minimally([replica1])
assert result == []
def test_check_rank_consistency_with_duplicate_ranks_error_handling(self):
"""Test check_rank_consistency with duplicate ranks and fail_on_rank_error=False."""
rank_manager = DeploymentRankManager(fail_on_rank_error=False)
replica1 = MockDeploymentReplica("replica_1")
replica2 = MockDeploymentReplica("replica_2")
# Manually create duplicate ranks
rank_manager.recover_rank(
"replica_1", "node_1", ReplicaRank(rank=0, node_rank=0, local_rank=0)
)
rank_manager.recover_rank(
"replica_2", "node_1", ReplicaRank(rank=0, node_rank=0, local_rank=0)
)
# Should return empty list instead of raising
result = rank_manager.check_rank_consistency_and_reassign_minimally(
[replica1, replica2]
)
assert result == []
def test_normal_operations_work_with_fail_on_rank_error_false(self):
"""Test that normal operations still work correctly with fail_on_rank_error=False."""
rank_manager = DeploymentRankManager(fail_on_rank_error=False)
# Test normal assign
rank1 = rank_manager.assign_rank("replica_1", "node_1")
assert rank1.rank == 0
# Test normal get
rank = rank_manager.get_replica_rank("replica_1")
assert rank.rank == 0
# Test normal release
rank_manager.release_rank("replica_1")
assert not rank_manager.has_replica_rank("replica_1")
# Test normal recover
rank_manager.recover_rank(
"replica_2", "node_1", ReplicaRank(rank=5, node_rank=0, local_rank=0)
)
assert rank_manager.get_replica_rank("replica_2").rank == 5
# Test normal consistency check
replica2 = MockDeploymentReplica("replica_2")
replica3 = MockDeploymentReplica("replica_3")
rank_manager.assign_rank("replica_3", "node_1")
result = rank_manager.check_rank_consistency_and_reassign_minimally(
[replica2, replica3]
)
# Should reassign to make ranks contiguous
assert len(result) > 0
def test_multiple_errors_do_not_crash_with_fail_on_rank_error_false(self):
"""Test that multiple consecutive errors don't crash when fail_on_rank_error=False."""
rank_manager = DeploymentRankManager(fail_on_rank_error=False)
# Multiple errors in a row should all return safe defaults
result1 = rank_manager.get_replica_rank("nonexistent1")
result2 = rank_manager.get_replica_rank("nonexistent2")
result3 = rank_manager.release_rank("nonexistent3")
assert result1 is not None
assert result2 is not None
assert result3 is None
# And normal operations should still work after errors
rank = rank_manager.assign_rank("replica_1", "node_1")
assert rank.rank == 0
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/unit/test_deployment_rank_manager.py",
"license": "Apache License 2.0",
"lines": 1017,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:rllib/env/env_errors.py | """Error classes for RLlib environment operations."""
from ray.util.annotations import PublicAPI
@PublicAPI(stability="alpha")
class StepFailedRecreateEnvError(Exception):
"""An exception that signals that the environment step failed and the environment needs to be reset.
This exception may be raised by the environment's `step` method.
It is then caught by the `EnvRunner` and the environment is reset.
This can be useful if your environment is unstable, regularely crashing in a certain way.
For example, if you connect to an external simulator that you have little control over.
You can detect such crashes in your step method and throw this error to not log the error.
Use this with caution, as it may lead to infinite loops of resetting the environment.
"""
pass
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/env/env_errors.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:rllib/examples/learners/classes/vpg_torch_learner_shared_optimizer.py | from ray.rllib.core.learner.torch.torch_learner import TorchLearner
from ray.rllib.examples.learners.classes.vpg_torch_learner import VPGTorchLearner
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
torch, _ = try_import_torch()
class VPGTorchLearnerSharedOptimizer(VPGTorchLearner):
"""
In order for a shared module to learn properly, a special, multi-agent Learner
has been set up. There is only one optimizer (used to train all submodules, e.g.
a shared encoder and n policy nets), in order to not destabilize learning. The
latter may happen if more than one optimizer would try to alternatingly optimize
the same shared submodule.
"""
@override(TorchLearner)
def configure_optimizers(self) -> None:
# Get and aggregate parameters for every module
param_list = []
for m in self.module.values():
if self.rl_module_is_compatible(m):
param_list.extend(m.parameters())
self.register_optimizer(
optimizer_name="shared_optimizer",
optimizer=torch.optim.Adam(params=param_list),
params=param_list,
# For the policy learning rate, we use the "main" lr in the AlgorithmConfig.
lr_or_lr_schedule=self.config.lr,
)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/learners/classes/vpg_torch_learner_shared_optimizer.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:rllib/examples/multi_agent/shared_encoder_cartpole.py | """A runnable example involving the use of a shared encoder module.
How to run this script
----------------------
`python [script file name].py --num-agents=2`
Control the number of agents and policies (RLModules) via --num-agents.
--encoder-emb-dim sets the encoder output dimension, and --no-shared-encoder
runs the experiment with independent encoders.
For debugging, use the following additional command line options
`--no-tune --num-env-runners=0`
which should allow you to set breakpoints anywhere in the RLlib code and
have the execution stop there for inspection and debugging.
For logging to your WandB account, use:
`--wandb-key=[your WandB API key] --wandb-project=[some project name]
--wandb-run-name=[optional: WandB run name (within the defined project)]`
Results to expect
-----------------
Under the shared encoder architecture, the target reward of 700 will typically be reached well before 100,000 iterations. A trial concludes as below:
+---------------------+------------+-----------------+--------+------------------+-------+-------------------+-------------+-------------+
| Trial name | status | loc | iter | total time (s) | ts | combined return | return p1 | return p0 |
|---------------------+------------+-----------------+--------+------------------+-------+-------------------+-------------+-------------|
| VPG_env_ab318_00000 | TERMINATED | 127.0.0.1:37375 | 33 | 44.2689 | 74197 | 611.35 | 191.71 | 419.64 |
+---------------------+------------+-----------------+--------+------------------+-------+-------------------+-------------+-------------+
Without a shared encoder, a lower reward is typically achieved after training for the full 100,000 timesteps:
+---------------------+------------+-----------------+--------+------------------+--------+-------------------+-------------+-------------+
| Trial name | status | loc | iter | total time (s) | ts | combined return | return p0 | return p1 |
|---------------------+------------+-----------------+--------+------------------+--------+-------------------+-------------+-------------|
| VPG_env_2e79e_00000 | TERMINATED | 127.0.0.1:39076 | 37 | 52.127 | 103894 | 526.66 | 85.78 | 440.88 |
+---------------------+------------+-----------------+--------+------------------+--------+-------------------+-------------+-------------+
"""
import gymnasium as gym
from ray.rllib.core.rl_module.multi_rl_module import MultiRLModuleSpec
from ray.rllib.core.rl_module.rl_module import RLModuleSpec
from ray.rllib.examples.algorithms.classes.vpg import VPGConfig
from ray.rllib.examples.envs.classes.multi_agent import MultiAgentCartPole
from ray.rllib.examples.learners.classes.vpg_torch_learner_shared_optimizer import (
VPGTorchLearnerSharedOptimizer,
)
from ray.rllib.examples.rl_modules.classes.vpg_using_shared_encoder_rlm import (
SHARED_ENCODER_ID,
SharedEncoder,
VPGMultiRLModuleWithSharedEncoder,
VPGPolicyAfterSharedEncoder,
VPGPolicyNoSharedEncoder,
)
from ray.rllib.examples.utils import (
add_rllib_example_script_args,
run_rllib_example_script_experiment,
)
from ray.tune.registry import register_env
parser = add_rllib_example_script_args(
default_iters=200,
default_timesteps=100000,
default_reward=600.0,
)
parser.set_defaults(
algo="VPG",
num_agents=2,
)
parser.add_argument("--encoder-emb-dim", type=int, default=64)
parser.add_argument("--no-shared-encoder", action="store_true")
if __name__ == "__main__":
args = parser.parse_args()
assert args.algo == "VPG", "The shared encoder example is meant for VPG agents."
assert args.num_agents == 2, "This example makes use of two agents."
single_agent_env = gym.make(
"CartPole-v1"
) # To allow instantiation of shared encoder
EMBEDDING_DIM = args.encoder_emb_dim # encoder output dim
if args.no_shared_encoder:
print("Running experiment without shared encoder")
specs = MultiRLModuleSpec(
rl_module_specs={
# Large policy net.
"p0": RLModuleSpec(
module_class=VPGPolicyNoSharedEncoder,
model_config={
"embedding_dim": EMBEDDING_DIM,
"hidden_dim": 64,
},
),
# Small policy net.
"p1": RLModuleSpec(
module_class=VPGPolicyNoSharedEncoder,
model_config={
"embedding_dim": EMBEDDING_DIM,
"hidden_dim": 64,
},
),
}
)
else:
specs = MultiRLModuleSpec(
multi_rl_module_class=VPGMultiRLModuleWithSharedEncoder,
rl_module_specs={
# Shared encoder.
SHARED_ENCODER_ID: RLModuleSpec(
module_class=SharedEncoder,
model_config={"embedding_dim": EMBEDDING_DIM},
observation_space=single_agent_env.observation_space,
action_space=single_agent_env.action_space,
),
# Large policy net.
"p0": RLModuleSpec(
module_class=VPGPolicyAfterSharedEncoder,
model_config={
"embedding_dim": EMBEDDING_DIM,
"hidden_dim": 64,
},
),
# Small policy net.
"p1": RLModuleSpec(
module_class=VPGPolicyAfterSharedEncoder,
model_config={
"embedding_dim": EMBEDDING_DIM,
"hidden_dim": 64,
},
),
},
)
# Register our environment with tune.
register_env(
"env",
lambda _: MultiAgentCartPole(config={"num_agents": args.num_agents}),
)
base_config = (
VPGConfig()
.environment("env" if args.num_agents > 0 else "CartPole-v1")
.training(
learner_class=VPGTorchLearnerSharedOptimizer
if not args.no_shared_encoder
else None,
train_batch_size=2048,
lr=1e-2,
)
.multi_agent(
policies={"p0", "p1"},
policy_mapping_fn=lambda agent_id, episode, **kw: f"p{agent_id}",
)
.rl_module(
rl_module_spec=specs,
)
)
run_rllib_example_script_experiment(base_config, args)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/multi_agent/shared_encoder_cartpole.py",
"license": "Apache License 2.0",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/nightly_tests/dataset/text_embeddings_benchmark.py | """
Benchmark a text embeddings job
"""
import argparse
import uuid
import time
from typing import Dict, List
from numpy import ndarray
import ray
import torch
from sentence_transformers import SentenceTransformer
from langchain_text_splitters import (
RecursiveCharacterTextSplitter,
CharacterTextSplitter,
)
from benchmark import Benchmark, BenchmarkMetric
# Subset of the data so that benchmark completes in ~20 minutes.
DEFAULT_SOURCE_DIRECTORY_S3 = "s3://air-example-data/common-pile-mirror/arxiv_papers/arxiv_papers-train-00001-of-00042.parquet"
# Add a random prefix to avoid conflicts between different runs.
WRITE_PATH = f"s3://ray-data-write-benchmark/{uuid.uuid4().hex}/"
def parse_args():
parser = argparse.ArgumentParser(
description="Text Embeddings Batch Inference Benchmark"
)
parser.add_argument(
"--source-directory",
type=str,
default=DEFAULT_SOURCE_DIRECTORY_S3,
help="S3 URI of source documents",
)
parser.add_argument(
"--chunk-concurrency",
type=int,
default=20,
help="Concurrency for Chunker stage",
)
parser.add_argument(
"--chunk-cpus", type=int, default=None, help="Number of CPUs per Chunker"
)
parser.add_argument(
"--chunk-method",
choices=["fixed", "recursive"],
default="recursive",
help="Chunking method",
)
parser.add_argument(
"--chunk-size", type=int, default=1200, help="Chunk size for text splitting"
)
parser.add_argument(
"--chunk-overlap",
type=int,
default=100,
help="Number of overlapping boundary characters between text chunks.",
)
parser.add_argument(
"--embed-batch-size",
type=int,
default=256,
help="Batch size for embedding inference",
)
parser.add_argument(
"--embed-concurrency",
type=int,
default=15,
help="Number of Embedder replicas",
)
parser.add_argument(
"--num-gpus", type=int, default=1, help="Number of GPUs per Embedder"
)
parser.add_argument(
"--model-name",
type=str,
default="Salesforce/SFR-Embedding-Code-400M_R",
help="Embedding model name",
)
parser.add_argument(
"--smoke-test",
action="store_true",
help="Runs a smoke test with a small subset of the data",
)
parser.add_argument(
"--chaos-test",
action="store_true",
default=False,
help="Enable chaos testing to simulate node failures",
)
return parser.parse_args()
class Chunker:
def __init__(self, method: str, chunk_size: int, chunk_overlap: int):
if method == "fixed":
self.splitter = CharacterTextSplitter(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
else:
self.splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
def __call__(self, page: Dict) -> List[Dict]:
return [
{
"text": text,
"source": page["source"],
"chunk_id": f"{page['id']}_{str(uuid.uuid4())}",
"doc_id": page["id"],
}
for text in self.splitter.split_text(page["text"])
]
class Embedder:
def __init__(self, model_name: str):
self.model = SentenceTransformer(
model_name,
device="cuda" if torch.cuda.is_available() else "cpu",
trust_remote_code=True,
)
def __call__(self, batch: Dict[str, ndarray]) -> Dict[str, ndarray]:
batch["embeddings"] = self.model.encode(
batch["text"], convert_to_numpy=True, batch_size=len(batch["text"])
)
return batch
def main(args):
start_time = time.time()
ds = ray.data.read_parquet(
args.source_directory,
include_paths=True,
)
metadata_fetch_end = time.time()
metadata_fetching_s = metadata_fetch_end - start_time
if args.smoke_test:
ds = ds.limit(100)
ds = ds.flat_map(
Chunker(
method=args.chunk_method,
chunk_size=args.chunk_size,
chunk_overlap=args.chunk_overlap,
),
concurrency=args.chunk_concurrency,
num_cpus=args.chunk_cpus,
)
ds = ds.map_batches(
Embedder,
fn_constructor_kwargs={"model_name": args.model_name},
batch_size=args.embed_batch_size,
concurrency=args.embed_concurrency,
num_gpus=args.num_gpus,
)
ds.write_parquet(WRITE_PATH, num_rows_per_file=5_000)
end_time = time.time()
runtime_s = end_time - start_time
num_rows = ray.data.read_parquet(WRITE_PATH).count()
throughput_rows_s = num_rows / runtime_s
# Compute metrics for time and throughput without metadata fetch
runtime_s_wo_metadata_fetch = end_time - metadata_fetch_end
throughput_rows_s_wo_metadata_fetch = num_rows / runtime_s_wo_metadata_fetch
# Report chaos testing node failures
if args.chaos_test:
dead_nodes = [node["NodeID"] for node in ray.nodes() if not node["Alive"]]
assert dead_nodes, "No dead nodes during chaos test"
print(f"Total chaos killed: {dead_nodes}")
return {
BenchmarkMetric.RUNTIME: runtime_s,
BenchmarkMetric.NUM_ROWS: num_rows,
BenchmarkMetric.THROUGHPUT: throughput_rows_s,
"source_directory": args.source_directory,
"model_name": args.model_name,
"chunk_method": args.chunk_method,
"metadata_fetching_s": metadata_fetching_s,
"runtime_s_wo_metadata_fetch": runtime_s_wo_metadata_fetch,
"throughput_rows_s_wo_metadata_fetch": throughput_rows_s_wo_metadata_fetch,
"chaos_test": args.chaos_test,
}
if __name__ == "__main__":
args = parse_args()
print(f"Writing to {WRITE_PATH}")
benchmark = Benchmark()
benchmark.run_fn("text-embeddings-benchmark", main, args)
benchmark.write_result()
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/dataset/text_embeddings_benchmark.py",
"license": "Apache License 2.0",
"lines": 176,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/_internal/actor_autoscaler/base_actor_autoscaler.py | from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
from ray.util.annotations import DeveloperAPI
if TYPE_CHECKING:
from ray.data._internal.execution.resource_manager import ResourceManager
from ray.data._internal.execution.streaming_executor_state import Topology
@DeveloperAPI
class ActorAutoscaler(ABC):
"""Abstract interface for Ray Data actor autoscaler."""
def __init__(
self,
topology: "Topology",
resource_manager: "ResourceManager",
):
self._topology = topology
self._resource_manager = resource_manager
@abstractmethod
def try_trigger_scaling(self):
"""Try trigger autoscaling.
This method will be called each time when StreamingExecutor makes
a scheduling decision. A subclass should override this method to
handle the autoscaling of `AutoscalingActorPool`s.
"""
...
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/actor_autoscaler/base_actor_autoscaler.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/_internal/cluster_autoscaler/default_cluster_autoscaler.py | import math
import time
import warnings
from typing import TYPE_CHECKING, Dict
import ray
from .base_cluster_autoscaler import ClusterAutoscaler
from .util import cap_resource_request_to_limits
from ray.data._internal.execution.autoscaling_requester import (
get_or_create_autoscaling_requester_actor,
)
from ray.data._internal.execution.interfaces import ExecutionResources
from ray.util.annotations import Deprecated, RayDeprecationWarning
if TYPE_CHECKING:
from ray.data._internal.execution.streaming_executor_state import Topology
@Deprecated(
message="DefaultClusterAutoscaler (V1) is deprecated and will be removed "
"after June 2026. Use DefaultClusterAutoscalerV2 instead by setting "
"RAY_DATA_CLUSTER_AUTOSCALER=V2 or using the default."
)
class DefaultClusterAutoscaler(ClusterAutoscaler):
# Min number of seconds between two autoscaling requests.
MIN_GAP_BETWEEN_AUTOSCALING_REQUESTS = 20
def __init__(
self,
topology: "Topology",
resource_limits: ExecutionResources,
*,
execution_id: str,
):
warnings.warn(
"DefaultClusterAutoscaler (V1) is deprecated and will be removed "
"after June 2026. Use DefaultClusterAutoscalerV2 instead by setting "
"RAY_DATA_CLUSTER_AUTOSCALER=V2 or using the default.",
RayDeprecationWarning,
stacklevel=2,
)
self._topology = topology
self._resource_limits = resource_limits
self._execution_id = execution_id
# Last time when a request was sent to Ray's autoscaler.
self._last_request_time = 0
def try_trigger_scaling(self):
"""Try to scale up the cluster to accommodate the provided in-progress workload.
This makes a resource request to Ray's autoscaler consisting of the current,
aggregate usage of all operators in the DAG + the incremental usage of all
operators that are ready for dispatch (i.e. that have inputs queued). If the
autoscaler were to grant this resource request, it would allow us to dispatch
one task for every ready operator.
The resource request is capped to not exceed user-configured resource limits
set via ExecutionOptions.resource_limits.
"""
# Limit the frequency of autoscaling requests.
now = time.time()
if now - self._last_request_time < self.MIN_GAP_BETWEEN_AUTOSCALING_REQUESTS:
return
# Scale up the cluster, if no ops are allowed to run, but there are still data
# in the input queues.
no_runnable_op = all(
not op_state._scheduling_status.runnable
for _, op_state in self._topology.items()
)
any_has_input = any(
op_state.has_pending_bundles() for _, op_state in self._topology.items()
)
if not (no_runnable_op and any_has_input):
return
self._last_request_time = now
# Get resource usage for all ops + additional resources needed to launch one
# more task for each ready op.
# We separate active bundles (running tasks) from pending bundles (future work)
# to ensure running tasks' resources are never crowded out by pending work.
active_bundles = []
pending_bundles = []
def to_bundle(resource: ExecutionResources) -> Dict:
req = {}
if resource.cpu:
req["CPU"] = math.ceil(resource.cpu)
if resource.gpu:
req["GPU"] = math.ceil(resource.gpu)
if resource.memory:
req["memory"] = math.ceil(resource.memory)
return req
for op, state in self._topology.items():
per_task_resource = op.incremental_resource_usage()
task_bundle = to_bundle(per_task_resource)
# Bundles for running tasks -> active (must include)
active_bundles.extend([task_bundle] * op.num_active_tasks())
# Only include incremental resource usage for ops that are ready for
# dispatch.
if state.has_pending_bundles():
# TODO(Clark): Scale up more aggressively by adding incremental resource
# usage for more than one bundle in the queue for this op?
# Bundle for pending work -> pending (best-effort)
pending_bundles.append(task_bundle)
# Cap the resource request to respect user-configured limits.
# Active bundles are always included; pending bundles are best-effort.
resource_request = cap_resource_request_to_limits(
active_bundles, pending_bundles, self._resource_limits
)
self._send_resource_request(resource_request)
def _send_resource_request(self, resource_request):
# Make autoscaler resource request.
actor = get_or_create_autoscaling_requester_actor()
actor.request_resources.remote(resource_request, self._execution_id)
def on_executor_shutdown(self):
# Make request for zero resources to autoscaler for this execution.
actor = get_or_create_autoscaling_requester_actor()
actor.request_resources.remote({}, self._execution_id)
def get_total_resources(self) -> ExecutionResources:
"""Get total resources available in the cluster."""
return ExecutionResources.from_resource_dict(ray.cluster_resources())
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/cluster_autoscaler/default_cluster_autoscaler.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:release/ray_release/scripts/custom_byod_build.py | from typing import Optional, Tuple
import click
from ray_release.byod.build import build_anyscale_custom_byod_image
from ray_release.byod.build_context import BuildContext
@click.command()
@click.option("--image-name", type=str, required=True)
@click.option("--base-image", type=str, required=True)
@click.option("--post-build-script", type=str)
@click.option("--python-depset", type=str)
@click.option("--env", "envs", type=str, multiple=True)
def main(
image_name: str,
base_image: str,
post_build_script: Optional[str],
python_depset: Optional[str],
envs: Tuple[str, ...],
):
if not post_build_script and not python_depset and not envs:
raise click.UsageError(
"At least one of post_build_script, python_depset, or env must be provided"
)
build_context: BuildContext = {}
if envs:
build_context["envs"] = dict(e.split("=", 1) for e in envs)
if post_build_script:
build_context["post_build_script"] = post_build_script
if python_depset:
build_context["python_depset"] = python_depset
build_anyscale_custom_byod_image(image_name, base_image, build_context)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "release/ray_release/scripts/custom_byod_build.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/ray_release/tests/test_custom_byod_build.py | import sys
from unittest.mock import patch
import pytest
from click.testing import CliRunner
from ray_release.scripts.custom_byod_build import main
@patch("ray_release.scripts.custom_byod_build.build_anyscale_custom_byod_image")
def test_custom_byod_build(mock_build_anyscale_custom_byod_image):
mock_build_anyscale_custom_byod_image.return_value = None
runner = CliRunner()
result = runner.invoke(
main,
[
"--image-name",
"test-image",
"--base-image",
"test-base-image",
"--post-build-script",
"test_post_build_script.sh",
"--python-depset",
"python_depset.lock",
],
)
assert result.exit_code == 0
@patch("ray_release.scripts.custom_byod_build.build_anyscale_custom_byod_image")
def test_custom_byod_build_without_lock_file(
mock_build_anyscale_custom_byod_image,
):
mock_build_anyscale_custom_byod_image.return_value = None
runner = CliRunner()
result = runner.invoke(
main,
[
"--image-name",
"test-image",
"--base-image",
"test-base-image",
"--post-build-script",
"test_post_build_script.sh",
],
)
assert result.exit_code == 0
@patch("ray_release.scripts.custom_byod_build.build_anyscale_custom_byod_image")
def test_custom_byod_build_missing_arg(mock_build_anyscale_custom_byod_image):
mock_build_anyscale_custom_byod_image.return_value = None
runner = CliRunner()
result = runner.invoke(
main,
[
"--base-image",
"test-base-image",
"--post-build-script",
"test_post_build_script.sh",
],
)
assert result.exit_code == 2
assert "Error: Missing option '--image-name'" in result.output
result = runner.invoke(
main,
[
"--image-name",
"test-image",
"--post-build-script",
"test_post_build_script.sh",
],
)
assert result.exit_code == 2
assert "Error: Missing option '--base-image'" in result.output
result = runner.invoke(
main, ["--image-name", "test-image", "--base-image", "test-base-image"]
)
assert result.exit_code == 2
assert (
"At least one of post_build_script, python_depset, or env must be provided"
in result.output
)
result = runner.invoke(
main,
[
"--image-name",
"test-image",
"--base-image",
"test-base-image",
"--python-depset",
"python_depset.lock",
],
)
assert result.exit_code == 0
@patch("ray_release.scripts.custom_byod_build.build_anyscale_custom_byod_image")
def test_custom_byod_build_with_env(mock_build_anyscale_custom_byod_image):
mock_build_anyscale_custom_byod_image.return_value = None
runner = CliRunner()
result = runner.invoke(
main,
[
"--image-name",
"test-image",
"--base-image",
"test-base-image",
"--env",
"FOO=bar",
"--env",
"BAZ=qux",
],
)
assert result.exit_code == 0
build_context = mock_build_anyscale_custom_byod_image.call_args[0][2]
assert build_context["envs"] == {"FOO": "bar", "BAZ": "qux"}
assert "post_build_script" not in build_context
assert "python_depset" not in build_context
@patch("ray_release.scripts.custom_byod_build.build_anyscale_custom_byod_image")
def test_custom_byod_build_with_env_and_script(mock_build_anyscale_custom_byod_image):
mock_build_anyscale_custom_byod_image.return_value = None
runner = CliRunner()
result = runner.invoke(
main,
[
"--image-name",
"test-image",
"--base-image",
"test-base-image",
"--post-build-script",
"test_post_build_script.sh",
"--env",
"KEY=value",
],
)
assert result.exit_code == 0
build_context = mock_build_anyscale_custom_byod_image.call_args[0][2]
assert build_context["envs"] == {"KEY": "value"}
assert build_context["post_build_script"] == "test_post_build_script.sh"
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "release/ray_release/tests/test_custom_byod_build.py",
"license": "Apache License 2.0",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/llm/doc_code/serve/prefix_aware_router/prefix_aware_example.py | """
This file serves as a documentation example and CI test.
Structure:
1. Monkeypatch setup: Ensures serve.run is non-blocking and removes accelerator requirements for CI testing.
2. Docs example (between __prefix_aware_example_start/end__): Embedded in Sphinx docs via literalinclude.
3. Test validation (deployment status polling + cleanup)
"""
import time
from ray import serve
from ray.serve.schema import ApplicationStatus
from ray.serve._private.constants import SERVE_DEFAULT_APP_NAME
from ray.serve import llm
_original_serve_run = serve.run
_original_build_openai_app = llm.build_openai_app
def _non_blocking_serve_run(app, **kwargs):
"""Forces blocking=False for testing"""
kwargs["blocking"] = False
return _original_serve_run(app, **kwargs)
def _testing_build_openai_app(llm_serving_args):
"""Removes accelerator requirements for testing"""
for config in llm_serving_args["llm_configs"]:
config.accelerator_type = None
return _original_build_openai_app(llm_serving_args)
serve.run = _non_blocking_serve_run
llm.build_openai_app = _testing_build_openai_app
# __prefix_aware_example_start__
from ray import serve
from ray.serve.llm import LLMConfig, build_openai_app
from ray.serve.llm.request_router import PrefixCacheAffinityRouter
llm_config = LLMConfig(
model_loading_config={
"model_id": "qwen-0.5b",
"model_source": "Qwen/Qwen2.5-0.5B-Instruct",
},
deployment_config={
"autoscaling_config": {
"min_replicas": 4,
"max_replicas": 4,
},
"request_router_config": {
"request_router_class": PrefixCacheAffinityRouter,
"request_router_kwargs": {
"imbalanced_threshold": 5, # More aggressive load balancing
"match_rate_threshold": 0.15, # Require 15% match rate
"do_eviction": True, # Enable memory management
"eviction_threshold_chars": 500_000,
"eviction_target_chars": 400_000,
"eviction_interval_secs": 30,
},
},
},
runtime_env={"env_vars": {"VLLM_USE_V1": "1"}},
)
app = build_openai_app({"llm_configs": [llm_config]})
serve.run(app, blocking=True)
# __prefix_aware_example_end__
status = ApplicationStatus.NOT_STARTED
timeout_seconds = 180
start_time = time.time()
while (
status != ApplicationStatus.RUNNING and time.time() - start_time < timeout_seconds
):
status = serve.status().applications[SERVE_DEFAULT_APP_NAME].status
if status in [ApplicationStatus.DEPLOY_FAILED, ApplicationStatus.UNHEALTHY]:
raise AssertionError(f"Deployment failed with status: {status}")
time.sleep(1)
if status != ApplicationStatus.RUNNING:
raise AssertionError(
f"Deployment failed to reach RUNNING status within {timeout_seconds}s. Current status: {status}"
)
serve.shutdown()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/llm/doc_code/serve/prefix_aware_router/prefix_aware_example.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/_internal/execution/dataset_state.py | import enum
class DatasetState(enum.IntEnum):
"""Enum representing the possible states of a dataset during execution."""
UNKNOWN = 0
RUNNING = 1
FINISHED = 2
FAILED = 3
PENDING = 4
def __str__(self):
return self.name
@classmethod
def from_string(cls, text):
"""Get enum by name."""
try:
return cls[text] # This uses the name to lookup the enum
except KeyError:
return cls.UNKNOWN
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/execution/dataset_state.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/serve/task_consumer.py | import inspect
import logging
from functools import wraps
from typing import Callable, Optional
from ray._common.utils import import_attr
from ray.serve._private.constants import (
DEFAULT_CONSUMER_CONCURRENCY,
SERVE_LOGGER_NAME,
)
from ray.serve._private.task_consumer import TaskConsumerWrapper
from ray.serve._private.utils import copy_class_metadata
from ray.serve.schema import (
TaskProcessorAdapter,
TaskProcessorConfig,
)
from ray.util.annotations import PublicAPI
logger = logging.getLogger(SERVE_LOGGER_NAME)
def _instantiate_adapter(
task_processor_config: TaskProcessorConfig,
consumer_concurrency: int = DEFAULT_CONSUMER_CONCURRENCY,
) -> TaskProcessorAdapter:
adapter = task_processor_config.adapter
# Handle string-based adapter specification (module path)
if isinstance(adapter, str):
adapter_class = import_attr(adapter)
elif callable(adapter):
adapter_class = adapter
else:
raise TypeError(
f"Adapter must be either a string path or a callable class, got {type(adapter).__name__}: {adapter}"
)
try:
adapter_instance = adapter_class(task_processor_config)
except Exception as e:
raise RuntimeError(f"Failed to instantiate {adapter_class.__name__}: {e}")
if not isinstance(adapter_instance, TaskProcessorAdapter):
raise TypeError(
f"{adapter_class.__name__} must inherit from TaskProcessorAdapter, got {type(adapter_instance).__name__}"
)
try:
adapter_instance.initialize(consumer_concurrency)
except Exception as e:
raise RuntimeError(f"Failed to initialize {adapter_class.__name__}: {e}")
return adapter_instance
@PublicAPI(stability="alpha")
def instantiate_adapter_from_config(
task_processor_config: TaskProcessorConfig,
) -> TaskProcessorAdapter:
"""
Create a TaskProcessorAdapter instance from the provided configuration and call .initialize(). This function supports two ways to specify an adapter:
1. String path: A fully qualified module path to an adapter class
Example: "ray.serve.task_processor.CeleryTaskProcessorAdapter"
2. Class reference: A direct reference to an adapter class
Example: CeleryTaskProcessorAdapter
Args:
task_processor_config: Configuration object containing adapter specification.
Returns:
An initialized TaskProcessorAdapter instance ready for use.
Raises:
ValueError: If the adapter string path is malformed or cannot be imported.
TypeError: If the adapter is not a string or callable class.
Example:
.. code-block:: python
config = TaskProcessorConfig(
adapter="my.module.CustomAdapter",
adapter_config={"param": "value"},
queue_name="my_queue"
)
adapter = instantiate_adapter_from_config(config)
"""
return _instantiate_adapter(task_processor_config)
@PublicAPI(stability="alpha")
def task_consumer(*, task_processor_config: TaskProcessorConfig):
"""
Decorator to mark a class as a TaskConsumer.
Args:
task_processor_config: Configuration for the task processor (required)
Note:
This decorator must be used with parentheses:
@task_consumer(task_processor_config=config)
Returns:
A wrapper class that inherits from the target class and implements the task consumer functionality.
Example:
.. code-block:: python
from ray import serve
from ray.serve.task_consumer import task_consumer, task_handler
@serve.deployment
@task_consumer(task_processor_config=config)
class MyTaskConsumer:
@task_handler(name="my_task")
def my_task(self, *args, **kwargs):
pass
"""
def decorator(target_cls):
class _TaskConsumerWrapper(target_cls, TaskConsumerWrapper):
_adapter: TaskProcessorAdapter
def __init__(self, *args, **kwargs):
target_cls.__init__(self, *args, **kwargs)
def initialize_callable(self, consumer_concurrency: int):
self._adapter = _instantiate_adapter(
task_processor_config, consumer_concurrency
)
for name, method in inspect.getmembers(
target_cls, predicate=inspect.isfunction
):
if getattr(method, "_is_task_handler", False):
task_name = getattr(method, "_task_name", name)
# Create a callable that properly binds the method to this instance
bound_method = getattr(self, name)
self._adapter.register_task_handle(bound_method, task_name)
try:
self._adapter.start_consumer()
logger.info("task consumer started successfully")
except Exception as e:
logger.error(f"Failed to start task consumer: {e}")
raise
def __del__(self):
self._adapter.stop_consumer()
if hasattr(target_cls, "__del__"):
target_cls.__del__(self)
copy_class_metadata(_TaskConsumerWrapper, target_cls)
return _TaskConsumerWrapper
return decorator
@PublicAPI(stability="alpha")
def task_handler(
_func: Optional[Callable] = None, *, name: Optional[str] = None
) -> Callable:
"""
Decorator to mark a method as a task handler.
Optionally specify a task name. Default is the method name.
Arguments:
_func: The function to decorate.
name: The name of the task. Default is the method name.
Returns:
A wrapper function that is marked as a task handler.
Example:
.. code-block:: python
from ray import serve
from ray.serve.task_consumer import task_consumer, task_handler
@serve.deployment
@task_consumer(task_processor_config=config)
class MyTaskConsumer:
@task_handler(name="my_task")
def my_task(self, *args, **kwargs):
pass
"""
# Validate name parameter if provided
if name is not None and (not isinstance(name, str) or not name.strip()):
raise ValueError(f"Task name must be a non-empty string, got {name}")
def decorator(f):
# Handle both sync and async functions
if inspect.iscoroutinefunction(f):
@wraps(f)
async def async_wrapper(*args, **kwargs):
return await f(*args, **kwargs)
async_wrapper._is_task_handler = True # type: ignore
async_wrapper._task_name = name or f.__name__ # type: ignore
return async_wrapper
else:
@wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
wrapper._is_task_handler = True # type: ignore
wrapper._task_name = name or f.__name__ # type: ignore
return wrapper
if _func is not None:
# Used without arguments: @task_handler
return decorator(_func)
else:
# Used with arguments: @task_handler(name="...")
return decorator
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/task_consumer.py",
"license": "Apache License 2.0",
"lines": 171,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/serve/task_processor.py | import asyncio
import logging
import threading
import time
from typing import Any, Dict, List, Optional
from celery import Celery
from celery.signals import task_failure, task_unknown
from ray.serve import get_replica_context
from ray.serve._private.constants import (
DEFAULT_CONSUMER_CONCURRENCY,
SERVE_LOGGER_NAME,
)
from ray.serve.schema import (
CeleryAdapterConfig,
TaskProcessorAdapter,
TaskProcessorConfig,
TaskResult,
)
from ray.util.annotations import PublicAPI
logger = logging.getLogger(SERVE_LOGGER_NAME)
CELERY_WORKER_POOL = "worker_pool"
CELERY_WORKER_CONCURRENCY = "worker_concurrency"
CELERY_TASK_IGNORE_RESULT = "task_ignore_result"
CELERY_TASK_ACKS_LATE = "task_acks_late"
CELERY_TASK_REJECT_ON_WORKER_LOST = "task_reject_on_worker_lost"
CELERY_DEFAULT_APP_CONFIG = [
CELERY_WORKER_POOL,
CELERY_WORKER_CONCURRENCY,
CELERY_TASK_IGNORE_RESULT,
CELERY_TASK_ACKS_LATE,
CELERY_TASK_REJECT_ON_WORKER_LOST,
]
@PublicAPI(stability="alpha")
class CeleryTaskProcessorAdapter(TaskProcessorAdapter):
"""
Celery-based task processor adapter.
This adapter does NOT support any async operations.
All operations must be performed synchronously.
"""
_app: Celery
_config: TaskProcessorConfig
_worker_thread: Optional[threading.Thread] = None
_worker_hostname: Optional[str] = None
_worker_concurrency: int = DEFAULT_CONSUMER_CONCURRENCY
def __init__(self, config: TaskProcessorConfig, *args, **kwargs):
super().__init__(*args, **kwargs)
if not isinstance(config.adapter_config, CeleryAdapterConfig):
raise TypeError(
"TaskProcessorConfig.adapter_config must be an instance of CeleryAdapterConfig"
)
# Check if any app_custom_config keys conflict with default Celery app config
if config.adapter_config.app_custom_config:
conflicting_keys = set(
config.adapter_config.app_custom_config.keys()
) & set(CELERY_DEFAULT_APP_CONFIG)
if conflicting_keys:
raise ValueError(
f"The following configuration keys cannot be changed via app_custom_config: {sorted(conflicting_keys)}. "
f"These are managed internally by the CeleryTaskProcessorAdapter."
)
self._config = config
# Celery adapter does not support any async capabilities
# self._async_capabilities is already an empty set from parent class
def initialize(self, consumer_concurrency: int = DEFAULT_CONSUMER_CONCURRENCY):
self._app = Celery(
self._config.queue_name,
backend=self._config.adapter_config.backend_url,
broker=self._config.adapter_config.broker_url,
)
app_configuration = {
CELERY_WORKER_POOL: "threads",
CELERY_WORKER_CONCURRENCY: consumer_concurrency,
CELERY_TASK_IGNORE_RESULT: False, # Store task results so they can be retrieved after completion
CELERY_TASK_ACKS_LATE: True, # Acknowledge tasks only after completion (not when received) for better reliability
CELERY_TASK_REJECT_ON_WORKER_LOST: True, # Reject and requeue tasks when worker is lost to prevent data loss
}
if self._config.adapter_config.app_custom_config:
app_configuration.update(self._config.adapter_config.app_custom_config)
self._app.conf.update(app_configuration)
queue_config = {
self._config.queue_name: {
"exchange": self._config.queue_name,
"exchange_type": "direct",
"routing_key": self._config.queue_name,
},
}
if self._config.failed_task_queue_name:
queue_config[self._config.failed_task_queue_name] = {
"exchange": self._config.failed_task_queue_name,
"exchange_type": "direct",
"routing_key": self._config.failed_task_queue_name,
}
if self._config.unprocessable_task_queue_name:
queue_config[self._config.unprocessable_task_queue_name] = {
"exchange": self._config.unprocessable_task_queue_name,
"exchange_type": "direct",
"routing_key": self._config.unprocessable_task_queue_name,
}
self._app.conf.update(
task_queues=queue_config,
task_routes={
# Default tasks go to main queue
"*": {"queue": self._config.queue_name},
},
)
if self._config.adapter_config.broker_transport_options is not None:
self._app.conf.update(
broker_transport_options=self._config.adapter_config.broker_transport_options,
)
if self._config.failed_task_queue_name:
task_failure.connect(self._handle_task_failure)
if self._config.unprocessable_task_queue_name:
task_unknown.connect(self._handle_unknown_task)
def register_task_handle(self, func, name=None):
# Celery does not support async task handlers
if asyncio.iscoroutinefunction(func):
raise NotImplementedError(
"Async task handlers are not supported yet. "
"Please use synchronous functions for task handlers."
)
task_options = {
"autoretry_for": (Exception,),
"retry_kwargs": {"max_retries": self._config.max_retries},
"retry_backoff": True,
"retry_backoff_max": 60, # Max backoff of 60 seconds
"retry_jitter": False, # Disable jitter for predictable testing
}
if self._config.adapter_config.task_custom_config:
task_options.update(self._config.adapter_config.task_custom_config)
if name:
self._app.task(name=name, **task_options)(func)
else:
self._app.task(**task_options)(func)
def enqueue_task_sync(
self, task_name, args=None, kwargs=None, **options
) -> TaskResult:
task_response = self._app.send_task(
task_name,
args=args,
kwargs=kwargs,
queue=self._config.queue_name,
**options,
)
return TaskResult(
id=task_response.id,
status=task_response.status,
created_at=time.time(),
result=task_response.result,
)
def get_task_status_sync(self, task_id) -> TaskResult:
task_details = self._app.AsyncResult(task_id)
return TaskResult(
id=task_details.id,
result=task_details.result,
status=task_details.status,
)
def start_consumer(self, **kwargs):
"""Starts the Celery worker thread."""
if self._worker_thread is not None and self._worker_thread.is_alive():
logger.info("Celery worker thread is already running.")
return
unique_id = get_replica_context().replica_tag
self._worker_hostname = f"{self._app.main}_{unique_id}"
worker_args = [
"worker",
f"--hostname={self._worker_hostname}",
"-Q",
self._config.queue_name,
]
self._worker_thread = threading.Thread(
target=self._app.worker_main,
args=(worker_args,),
)
self._worker_thread.start()
logger.info(
f"Celery worker thread started with hostname: {self._worker_hostname}"
)
def stop_consumer(self, timeout: float = 10.0):
"""Signals the Celery worker to shut down and waits for it to terminate."""
if self._worker_thread is None or not self._worker_thread.is_alive():
logger.info("Celery worker thread is not running.")
return
logger.info("Sending shutdown signal to Celery worker...")
# Use the worker's hostname for targeted shutdown
self._app.control.broadcast(
"shutdown", destination=[f"celery@{self._worker_hostname}"]
)
self._worker_thread.join(timeout=timeout)
if self._worker_thread.is_alive():
logger.warning(f"Worker thread did not terminate after {timeout} seconds.")
else:
logger.info("Celery worker thread has stopped.")
self._worker_thread = None
def cancel_task_sync(self, task_id):
"""
Cancels a task synchronously. Only supported for Redis and RabbitMQ brokers by Celery.
More details can be found here: https://docs.celeryq.dev/en/stable/userguide/workers.html#revoke-revoking-tasks
"""
self._app.control.revoke(task_id)
def get_metrics_sync(self) -> Dict[str, Any]:
"""
Returns the metrics of the Celery worker synchronously.
More details can be found here: https://docs.celeryq.dev/en/stable/reference/celery.app.control.html#celery.app.control.Inspect.stats
"""
return self._app.control.inspect().stats()
def health_check_sync(self) -> List[Dict]:
"""
Checks the health of the Celery worker synchronously.
Returns a list of dictionaries, each containing the worker name and a dictionary with the health status.
Example: [{'celery@192.168.1.100': {'ok': 'pong'}}]
More details can be found here: https://docs.celeryq.dev/en/stable/reference/celery.app.control.html#celery.app.control.Control.ping
"""
return self._app.control.ping()
def _handle_task_failure(
self,
sender: Any = None,
task_id: str = None,
args: Any = None,
kwargs: Any = None,
einfo: Any = None,
**kw,
):
"""Handle task failures and route them to appropriate dead letter queues.
This method is called when a task fails after all retry attempts have been
exhausted. It logs the failure and moves the task to failed_task_queue
Args:
sender: The task object that failed
task_id: Unique identifier of the failed task
args: Positional arguments passed to the task
kwargs: Keyword arguments passed to the task
einfo: Exception info object containing exception details and traceback
**kw: Additional keyword arguments passed by Celery
"""
logger.info(
f"Task failure detected for task_id: {task_id}, einfo: {str(einfo)}"
)
dlq_args = [
task_id,
str(einfo.exception),
str(args),
str(kwargs),
str(einfo),
]
if self._config.failed_task_queue_name:
self._move_task_to_queue(
self._config.failed_task_queue_name,
sender.name,
dlq_args,
)
logger.error(
f"Task {task_id} failed after max retries. Exception: {einfo}. Moved it to the {self._config.failed_task_queue_name} queue."
)
def _handle_unknown_task(
self,
sender: Any = None,
name: str = None,
id: str = None,
message: Any = None,
exc: Any = None,
**kwargs,
):
"""Handle unknown or unregistered tasks received by Celery.
This method is called when Celery receives a task that it doesn't recognize
(i.e., a task that hasn't been registered with the Celery app). These tasks
are moved to the unprocessable task queue if configured.
Args:
sender: The Celery app or worker that detected the unknown task
name: Name of the unknown task
id: Task ID of the unknown task
message: The raw message received for the unknown task
exc: The exception raised when trying to process the unknown task
**kwargs: Additional context information from Celery
"""
logger.info(
f"Unknown task detected by Celery. Name: {name}, ID: {id}, Exc: {str(exc)}"
)
if self._config.unprocessable_task_queue_name:
self._move_task_to_queue(
self._config.unprocessable_task_queue_name,
name,
[
name,
id,
str(message),
str(exc),
str(kwargs),
],
)
def _move_task_to_queue(self, queue_name: str, task_name: str, args: list):
"""Helper function to move a task to a specified queue."""
try:
logger.info(
f"Moving task: {task_name} to queue: {queue_name}, args: {args}"
)
self._app.send_task(
name=task_name,
queue=queue_name,
args=args,
)
except Exception as e:
logger.error(
f"Failed to move task: {task_name} to queue: {queue_name}, error: {e}"
)
raise e
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/task_processor.py",
"license": "Apache License 2.0",
"lines": 304,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/serve/tests/test_task_processor.py | import json
import os
import sys
import tempfile
from collections import defaultdict
from pathlib import Path
import pytest
import ray
from ray import serve
from ray._common.test_utils import SignalActor, wait_for_condition
from ray.serve.schema import CeleryAdapterConfig, TaskProcessorConfig
from ray.serve.task_consumer import (
instantiate_adapter_from_config,
task_consumer,
task_handler,
)
from ray.tests.conftest import external_redis # noqa: F401
@ray.remote
class ProcessedTasksTracker:
def __init__(self):
self.processed_tasks = set()
def add_task(self, task_data):
self.processed_tasks.add(task_data)
def get_processed_tasks(self):
return self.processed_tasks
def get_count(self):
return len(self.processed_tasks)
@ray.remote
def send_request_to_queue(
processor_config: TaskProcessorConfig, data, task_name="process_request"
):
adapter_instance_global = instantiate_adapter_from_config(
task_processor_config=processor_config
)
result = adapter_instance_global.enqueue_task_sync(task_name, args=[data])
assert result.id is not None
return result.id
@pytest.fixture(scope="function")
def temp_queue_directory():
"""Creates a temporary directory with 'queue', 'results', and 'control' subdirectories for task consumer tests."""
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir_path = Path(tmpdir)
data_folder_queue = tmpdir_path / "queue"
data_folder_queue.mkdir()
results_path = tmpdir_path / "results"
results_path.mkdir()
control_path = tmpdir_path / "control"
control_path.mkdir()
yield {
"queue_path": data_folder_queue,
"results_path": results_path,
"control_path": control_path,
}
@pytest.fixture(scope="function")
def transport_options(temp_queue_directory):
"""Create standard transport options for filesystem broker."""
queue_path = temp_queue_directory["queue_path"]
control_path = temp_queue_directory["control_path"]
return {
# Incoming message queue - where new task messages are written when sent to broker
"data_folder_in": str(queue_path),
# Outgoing message storage - where task results and responses are written after completion
"data_folder_out": str(queue_path),
# Processed message archive - where messages are moved after successful processing
"data_folder_processed": str(queue_path),
# Control message storage - where Celery management and control commands are stored
"control_folder": str(control_path),
}
@pytest.fixture(scope="function")
def create_processor_config(temp_queue_directory, transport_options):
"""Create a TaskProcessorConfig with common defaults."""
def _create(
failed_task_queue_name=None, unprocessable_task_queue_name=None, **kwargs
):
results_path = temp_queue_directory["results_path"]
config_params = {
"queue_name": "my_default_app_queue",
"adapter_config": CeleryAdapterConfig(
broker_url="filesystem://",
backend_url=f"file://{results_path}",
broker_transport_options=transport_options,
),
}
# Add dead letter queue names if provided
if failed_task_queue_name is not None:
config_params["failed_task_queue_name"] = failed_task_queue_name
if unprocessable_task_queue_name is not None:
config_params[
"unprocessable_task_queue_name"
] = unprocessable_task_queue_name
config_params.update(kwargs)
return TaskProcessorConfig(**config_params)
return _create
def _get_task_counts_by_routing_key(queue_path):
"""Counts tasks in a queue directory by reading the routing key from each message."""
counts = defaultdict(int)
if not queue_path.exists():
return counts
# Celery doesn't provide a way to get the queue size.
# so we've to levarage the broker's API to get the queue size.
# Since we are using the filesystem broker in tests, we can read the files in the queue directory to get the queue size.
for msg_file in queue_path.iterdir():
if msg_file.is_file():
try:
with open(msg_file, "r") as f:
data = json.load(f)
routing_key = (
data.get("properties", {})
.get("delivery_info", {})
.get("routing_key")
)
if routing_key:
counts[routing_key] += 1
except (json.JSONDecodeError, IOError):
# Ignore files that aren't valid JSON or are otherwise unreadable
continue
return counts
@pytest.mark.skipif(sys.platform == "win32", reason="Flaky on Windows.")
class TestTaskConsumerWithRayServe:
"""Test task consumer integration with Ray Serve."""
def test_task_consumer_as_serve_deployment(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that task consumers can be used as Ray Serve deployments."""
processor_config = create_processor_config()
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class ServeTaskConsumer:
def __init__(self):
self.data_received = None
self.task_received = False
@task_handler(name="process_request")
def process_request(self, data):
self.task_received = True
self.data_received = data
def assert_task_received(self):
assert self.task_received is True
assert self.data_received is not None
assert self.data_received == "test_data_1"
# Deploy the consumer as a Serve deployment
handle = serve.run(ServeTaskConsumer.bind())
send_request_to_queue.remote(processor_config, "test_data_1")
def assert_result():
try:
# `assert_task_received` will throw AssertionError if the task was not received or data is not as expected
handle.assert_task_received.remote().result()
return True
except Exception:
return False
wait_for_condition(assert_result)
def test_task_consumer_as_serve_deployment_with_failed_task(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that task consumers can be used as Ray Serve deployments."""
processor_config = create_processor_config(
failed_task_queue_name="my_failed_task_queue"
)
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class ServeTaskConsumer:
def __init__(self):
self.num_calls = 0
@task_handler(name="process_request")
def process_request(self, data):
self.num_calls += 1
raise ValueError("Task failed as expected")
def get_num_calls(self):
return self.num_calls
handle = serve.run(ServeTaskConsumer.bind())
task_id_ref = send_request_to_queue.remote(processor_config, "test_data_1")
task_id = ray.get(task_id_ref)
adapter_instance = instantiate_adapter_from_config(
task_processor_config=processor_config
)
def assert_result():
result = adapter_instance.get_task_status_sync(task_id)
if (
result.status == "FAILURE"
and result.result is not None
and isinstance(result.result, ValueError)
and str(result.result) == "Task failed as expected"
and handle.get_num_calls.remote().result()
== 1 + processor_config.max_retries
):
return True
else:
return False
wait_for_condition(assert_result, timeout=20)
def test_task_consumer_persistence_across_restarts(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that tasks persist in queue and get executed after deployment restart."""
# Setup
config = create_processor_config()
tracker = ProcessedTasksTracker.remote()
signal1 = SignalActor.remote()
@serve.deployment(
num_replicas=1, graceful_shutdown_timeout_s=60, max_ongoing_requests=1
)
@task_consumer(task_processor_config=config)
class TaskConsumer:
def __init__(self, tracker_ref, signal_ref):
self.tracker, self.signal = tracker_ref, signal_ref
self.local_processed = []
@task_handler(name="process_request")
def process_request(self, data):
ray.get(self.signal.wait.remote()) # Block until signal
self.local_processed.append(data)
ray.get(self.tracker.add_task.remote(data))
return f"Processed: {data}"
def get_local_processed(self):
return self.local_processed
# Deploy first version and send tasks
serve.run(TaskConsumer.bind(tracker, signal1), name="app_v1")
num_tasks = 20
for i in range(num_tasks):
ray.get(send_request_to_queue.remote(config, f"task_{i}"))
# Process exactly 1 task, then restart deployment
wait_for_condition(
lambda: ray.get(signal1.cur_num_waiters.remote()) == 1, timeout=10
)
ray.get(signal1.send.remote(clear=True)) # Allow 1 task to complete
wait_for_condition(lambda: ray.get(tracker.get_count.remote()) == 1, timeout=10)
# Shutdown first deployment
serve.delete("app_v1", _blocking=False)
ray.get(signal1.send.remote()) # Release any stuck tasks
wait_for_condition(
lambda: "app_v1" not in serve.status().applications, timeout=100
)
tasks_before_restart = ray.get(tracker.get_count.remote())
assert (
tasks_before_restart >= 2 and tasks_before_restart < num_tasks
), f"Expected at least 2 tasks processed and atleast one less than num_tasks, got {tasks_before_restart}"
# Deploy second version and process remaining tasks
signal2 = SignalActor.remote()
handle = serve.run(TaskConsumer.bind(tracker, signal2), name="app_v2")
wait_for_condition(
lambda: ray.get(signal2.cur_num_waiters.remote()) == 1, timeout=10
)
ray.get(signal2.send.remote()) # Process all remaining tasks
wait_for_condition(
lambda: ray.get(tracker.get_count.remote()) == num_tasks, timeout=100
)
# Verify all tasks were processed and distributed correctly
expected_tasks = {f"task_{i}" for i in range(num_tasks)}
final_tasks = ray.get(tracker.get_processed_tasks.remote())
second_deployment_tasks = handle.get_local_processed.remote().result()
assert (
final_tasks == expected_tasks
), f"Missing tasks: {expected_tasks - final_tasks}"
assert (
len(second_deployment_tasks) == num_tasks - tasks_before_restart
), f"Second deployment processed {len(second_deployment_tasks)} tasks, expected {num_tasks - tasks_before_restart}"
def test_task_consumer_as_serve_deployment_with_async_task_handler(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that Celery adapter raises NotImplementedError for async task handlers."""
processor_config = create_processor_config()
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class ServeTaskConsumer:
def __init__(self):
self.data_received = None
self.task_received = False
# This async task handler should raise NotImplementedError when registered
@task_handler(name="process_request")
async def process_request(self, data):
self.task_received = True
self.data_received = data
# Error is raised during deployment initialization when Celery adapter
# tries to register the async handler. The deployment fails with a
# RuntimeError (the underlying NotImplementedError is logged but wrapped).
with pytest.raises(RuntimeError):
serve.run(ServeTaskConsumer.bind())
def test_task_consumer_metrics(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that task processor metrics are collected and exposed correctly."""
processor_config = create_processor_config()
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class ServeTaskConsumer:
def __init__(self):
self.task_received = False
@task_handler(name="process_request")
def process_request(self, data):
self.task_received = True
def get_task_received(self) -> bool:
return self.task_received
handle = serve.run(ServeTaskConsumer.bind())
send_request_to_queue.remote(processor_config, "test_data_1")
def assert_task_received():
return handle.get_task_received.remote().result()
wait_for_condition(assert_task_received, timeout=20)
adapter_instance = instantiate_adapter_from_config(
task_processor_config=processor_config
)
metrics = adapter_instance.get_metrics_sync()
assert len(metrics) == 1
worker_name = next(iter(metrics))
worker_stats = metrics[worker_name]
# Check that the total number of processed tasks is correct.
assert worker_stats["pool"]["threads"] == 1
assert worker_stats["pool"]["max-concurrency"] == 1
assert worker_stats["total"]["process_request"] == 1
assert worker_stats["broker"]["transport"] == "filesystem"
def test_task_consumer_health_check(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that the health check for the task processor works correctly."""
processor_config = create_processor_config()
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class ServeTaskConsumer:
pass
serve.run(ServeTaskConsumer.bind())
adapter_instance = instantiate_adapter_from_config(
task_processor_config=processor_config
)
def check_health():
health_status = adapter_instance.health_check_sync()
return len(health_status) > 0
# Wait for the worker to be ready
wait_for_condition(check_health, timeout=20)
health_status = adapter_instance.health_check_sync()
assert len(health_status) == 1
worker_reply = health_status[0]
assert len(worker_reply) == 1
worker_name = next(iter(worker_reply))
assert worker_reply[worker_name] == {"ok": "pong"}
def test_task_processor_with_cancel_tasks_and_app_custom_config(
self, external_redis, serve_instance # noqa: F811
):
"""Test the cancel task functionality with celery broker."""
redis_address = os.environ.get("RAY_REDIS_ADDRESS")
processor_config = TaskProcessorConfig(
queue_name="my_app_queue",
adapter_config=CeleryAdapterConfig(
broker_url=f"redis://{redis_address}/0",
backend_url=f"redis://{redis_address}/1",
app_custom_config={"worker_prefetch_multiplier": 1},
),
)
signal = SignalActor.remote()
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class MyTaskConsumer:
def __init__(self, signal_actor):
self._signal = signal_actor
self.message_received = []
@task_handler(name="process")
def process(self, data):
ray.get(self._signal.wait.remote())
self.message_received.append(data)
def get_message_received(self):
return self.message_received
handle = serve.run(MyTaskConsumer.bind(signal), name="app_v1")
task_ids = []
for i in range(2):
task_id_ref = send_request_to_queue.remote(
processor_config, f"test_data_{i}", task_name="process"
)
task_ids.append(ray.get(task_id_ref))
wait_for_condition(
lambda: ray.get(signal.cur_num_waiters.remote()) == 1, timeout=10
)
adapter_instance = instantiate_adapter_from_config(
task_processor_config=processor_config
)
adapter_instance.cancel_task_sync(task_ids[1])
ray.get(signal.send.remote())
def check_revoked():
status = adapter_instance.get_task_status_sync(task_ids[1])
return status.status == "REVOKED"
wait_for_condition(check_revoked, timeout=20)
assert "test_data_0" in handle.get_message_received.remote().result()
assert "test_data_1" not in handle.get_message_received.remote().result()
serve.delete("app_v1")
def test_task_consumer_with_task_custom_config(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that task consumer works with app custom config."""
processor_config = create_processor_config()
processor_config.adapter_config.task_custom_config = {
"retry_backoff_max": 1,
"retry_kwargs": {"max_retries": 10},
}
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class ServeTaskConsumer:
def __init__(self):
self.num_calls = 0
@task_handler(name="process_request")
def process_request(self, data):
self.num_calls += 1
raise ValueError("Task failed as expected")
def get_num_calls(self):
return self.num_calls
handle = serve.run(ServeTaskConsumer.bind())
send_request_to_queue.remote(processor_config, "test_data_0")
wait_for_condition(
lambda: handle.get_num_calls.remote().result() == 11, timeout=20
)
def test_task_consumer_failed_task_queue_consumption(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that failed tasks can be consumed from the failed task queue with the correct arguments."""
# Create first processor config with failed task queue
failed_queue_name = "failed_task_queue"
failing_processor_config = create_processor_config(
failed_task_queue_name=failed_queue_name
)
# Create second processor config that consumes from the failed queue
failed_processor_config = create_processor_config()
failed_processor_config.queue_name = failed_queue_name
# First consumer that always fails
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=failing_processor_config)
class FailingTaskConsumer:
@task_handler(name="process_request")
def process_request(self, data):
raise ValueError("Test error message from first consumer")
# Second consumer that processes failed tasks
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=failed_processor_config)
class FailedTaskConsumer:
def __init__(self):
self.received_error = None
self.received_task_id = None
self.received_original_args = None
@task_handler(name="process_request")
def process_request(self, task_id, exception_msg, args, kwargs, einfo):
self.received_task_id = task_id
self.received_error = exception_msg
self.received_original_args = args
def get_received_error(self):
return self.received_error
def get_received_task_id(self):
return self.received_task_id
def get_received_original_args(self):
return self.received_original_args
# Deploy both consumers
serve.run(
FailingTaskConsumer.bind(),
name="failing_task_consumer",
route_prefix="/failing_task_consumer",
)
handle_2 = serve.run(
FailedTaskConsumer.bind(),
name="failed_task_consumer",
route_prefix="/failed_task_consumer",
)
# Send a task to the first consumer (which will fail)
task_id = send_request_to_queue.remote(failing_processor_config, "test_data_1")
# Verify the received data
def assert_failed_task_received():
received_error = handle_2.get_received_error.remote().result()
received_task_id = handle_2.get_received_task_id.remote().result()
received_original_args = (
handle_2.get_received_original_args.remote().result()
)
args_data = "['test_data_1']"
err_msg = "ValueError: Test error message from first consumer"
assert err_msg in received_error
assert received_task_id == ray.get(task_id)
assert received_original_args == args_data
return True
wait_for_condition(assert_failed_task_received, timeout=20)
def test_multiple_task_consumers_in_single_app(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that multiple task consumers can coexist in a single Ray Serve application."""
orchestrator_config = create_processor_config()
orchestrator_config.queue_name = "orchestrator_queue"
worker_config = create_processor_config()
worker_config.queue_name = "worker_queue"
@serve.deployment(name="worker-deployment")
@task_consumer(task_processor_config=worker_config)
class WorkerTaskConsumer:
def __init__(self):
self.task_count = 0
@task_handler(name="process_data")
def process_data(self, payload):
self.task_count += 1
return f"Worker processed: {payload}"
def get_worker_task_count(self):
return self.task_count
@serve.deployment(name="orchestrator-deployment")
@task_consumer(task_processor_config=orchestrator_config)
class OrchestratorTaskConsumer:
def __init__(self, worker_deployment):
self.worker_deployment = worker_deployment
self.message_received = []
@task_handler(name="orchestrate_task")
def orchestrate_task(self, payload):
send_request_to_queue.remote(
worker_config, payload, task_name="process_data"
)
self.message_received.append(payload)
return f"Orchestrated complex task for payload: {payload}"
async def get_worker_task_count(self):
return await self.worker_deployment.get_worker_task_count.remote()
def get_message_received(self):
return self.message_received
worker_deployment = WorkerTaskConsumer.bind()
orchestrator_deployment = OrchestratorTaskConsumer.bind(worker_deployment)
handle = serve.run(orchestrator_deployment, name="multi_consumer_app")
num_tasks_to_send = 3
data_sent_to_orchestrator = []
for i in range(num_tasks_to_send):
data_id = f"data_{i}"
send_request_to_queue.remote(
orchestrator_config, data_id, task_name="orchestrate_task"
)
data_sent_to_orchestrator.append(data_id)
# Wait for tasks to be processed properly
def check_data_processed_properly():
worker_count = handle.get_worker_task_count.remote().result()
data_received_by_orchestrator = (
handle.get_message_received.remote().result()
)
return worker_count == num_tasks_to_send and set(
data_received_by_orchestrator
) == set(data_sent_to_orchestrator)
wait_for_condition(check_data_processed_properly, timeout=300)
def test_task_consumer_with_one_queue_and_multiple_different_tasks(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that task consumers can handle multiple different tasks in the same queue."""
processor_config = create_processor_config()
@serve.deployment
@task_consumer(task_processor_config=processor_config)
class MyTaskConsumer:
def __init__(self):
self.message_received = []
@task_handler(name="process_data")
def process_data(self, data):
self.message_received.append(data)
@task_handler(name="process_data2")
def process_data2(self, data):
self.message_received.append(data)
def get_message_received(self):
return self.message_received
handle = serve.run(MyTaskConsumer.bind())
send_request_to_queue.remote(
processor_config, "test_data_1", task_name="process_data"
)
send_request_to_queue.remote(
processor_config, "test_data_2", task_name="process_data2"
)
send_request_to_queue.remote(
processor_config, "test_data_3", task_name="process_data"
)
wait_for_condition(
lambda: "test_data_1" in handle.get_message_received.remote().result()
)
wait_for_condition(
lambda: "test_data_2" in handle.get_message_received.remote().result()
)
wait_for_condition(
lambda: "test_data_3" in handle.get_message_received.remote().result()
)
@pytest.mark.skipif(sys.platform == "win32", reason="Flaky on Windows.")
class TestTaskConsumerWithDLQsConfiguration:
"""Test task consumer with dead letter queues."""
def _assert_queue_counts(
self,
temp_queue_directory,
processor_config,
expected_main=0,
expected_unprocessable=0,
expected_failed=0,
timeout=15,
):
"""Helper to assert expected task counts in different queues."""
def check_counts():
queue_path = Path(temp_queue_directory["queue_path"])
counts = _get_task_counts_by_routing_key(queue_path)
main_count = counts.get(processor_config.queue_name, 0)
unprocessable_count = counts.get(
getattr(processor_config, "unprocessable_task_queue_name", ""), 0
)
failed_count = counts.get(
getattr(processor_config, "failed_task_queue_name", ""), 0
)
return (
main_count == expected_main
and unprocessable_count == expected_unprocessable
and failed_count == expected_failed
)
wait_for_condition(check_counts, timeout=timeout)
def test_task_consumer_as_serve_deployment_with_unknown_task(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that unknown tasks are sent to the unprocessable task queue."""
processor_config = create_processor_config(
unprocessable_task_queue_name="unprocessable_task_queue"
)
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class ServeTaskConsumer:
@task_handler(name="process_request")
def process_request(self, data):
pass
serve.run(ServeTaskConsumer.bind())
# Send a task with an unknown name
send_request_to_queue.remote(
processor_config, "test_data_1", task_name="unregistered_task"
)
self._assert_queue_counts(
temp_queue_directory,
processor_config,
expected_main=0,
expected_unprocessable=1,
timeout=10,
)
def test_task_consumer_as_serve_deployment_with_failed_task_and_dead_letter_queue(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that failed tasks are sent to the failed task queue."""
processor_config = create_processor_config(
failed_task_queue_name="failed_task_queue"
)
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class ServeTaskConsumer:
@task_handler(name="process_request")
def process_request(self, data):
raise ValueError("Task failed as expected")
serve.run(ServeTaskConsumer.bind())
send_request_to_queue.remote(processor_config, "test_data_1")
self._assert_queue_counts(
temp_queue_directory, processor_config, expected_main=0, expected_failed=1
)
def test_task_consumer_with_mismatched_arguments(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that tasks with mismatched arguments are sent to the unprocessable task queue."""
processor_config = create_processor_config(
unprocessable_task_queue_name="unprocessable_task_queue",
failed_task_queue_name="failed_task_queue",
)
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class ServeTaskConsumer:
@task_handler(name="process_request")
def process_request(self, arg1, arg2): # Expects two arguments
pass
serve.run(ServeTaskConsumer.bind())
# Send a task with only one argument, which should cause a TypeError
send_request_to_queue.remote(processor_config, ["test_data_1"])
self._assert_queue_counts(
temp_queue_directory,
processor_config,
expected_main=0,
expected_failed=1,
)
def test_task_consumer_with_argument_type_mismatch(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that tasks with argument type mismatches are sent to the unprocessable task queue."""
processor_config = create_processor_config(
unprocessable_task_queue_name="unprocessable_task_queue",
failed_task_queue_name="failed_task_queue",
)
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class ServeTaskConsumer:
@task_handler(name="process_request")
def process_request(self, data: str):
return len(data) # This will fail if data is not a sequence
serve.run(ServeTaskConsumer.bind())
# Send an integer, for which len() is undefined, causing a TypeError
send_request_to_queue.remote(processor_config, 12345)
self._assert_queue_counts(
temp_queue_directory,
processor_config,
expected_main=0,
expected_failed=1,
)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_task_processor.py",
"license": "Apache License 2.0",
"lines": 682,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/tests/unit/test_task_consumer.py | import sys
import uuid
from typing import Any, Dict, List
from unittest.mock import MagicMock, call
import pytest
from ray.serve.api import deployment
from ray.serve.schema import (
CeleryAdapterConfig,
TaskProcessorAdapter,
TaskProcessorConfig,
TaskResult,
)
from ray.serve.task_consumer import task_consumer, task_handler
class MockTaskProcessorAdapter(TaskProcessorAdapter):
"""Mock adapter for testing task processor functionality."""
_start_consumer_received: bool = False
_stop_consumer_received: bool = False
def __init__(self, config: TaskProcessorConfig):
self._config = config
self.register_task_handle_mock = MagicMock()
def initialize(self, consumer_concurrency: int = 3):
pass
def register_task_handle(self, func, name=None):
self.register_task_handle_mock(func, name=name)
def enqueue_task_sync(
self, task_name, args=None, kwargs=None, **options
) -> TaskResult:
pass
def get_task_status_sync(self, task_id) -> TaskResult:
pass
def start_consumer(self, **kwargs):
self._start_consumer_received = True
def stop_consumer(self, timeout: float = 10.0):
self._stop_consumer_received = True
def cancel_task_sync(self, task_id) -> bool:
pass
def get_metrics_sync(self) -> Dict[str, Any]:
pass
def health_check_sync(self) -> List[Dict]:
pass
@pytest.fixture
def config():
"""Provides a mock TaskProcessorConfig."""
queue_name = f"test_queue_{uuid.uuid4().hex}"
return TaskProcessorConfig(
queue_name=queue_name,
adapter_config=CeleryAdapterConfig(
broker_url="fake://",
backend_url="fake://",
),
adapter=MockTaskProcessorAdapter,
)
class TestTaskHandlerDecorator:
"""Test the task_handler decorator."""
def _create_and_test_handler(self, decorator_args=None, expected_name=None):
"""Helper to create and test a task handler."""
mock = MagicMock()
if decorator_args is None:
@task_handler
def test_handler():
mock()
else:
@task_handler(**decorator_args)
def test_handler():
mock()
test_handler()
assert mock.call_count == 1
assert test_handler._task_name == expected_name
def test_task_handler_decorator_with_name(self):
self._create_and_test_handler(
decorator_args={"name": "my_task"}, expected_name="my_task"
)
def test_task_handler_decorator_without_name(self):
self._create_and_test_handler(expected_name="test_handler")
@pytest.mark.parametrize("invalid_name", ["", " ", 123])
def test_task_handler_decorator_invalid_name(self, invalid_name):
"""Test various invalid task names."""
with pytest.raises(
ValueError,
match=f"Task name must be a non-empty string, got {invalid_name}",
):
@task_handler(name=invalid_name)
def my_task_handler():
pass
def test_task_handler_on_callable_object_without_name_attr(self):
"""Test that AttributeError is raised for callables with no __name__."""
class MyCallable:
"""A simple callable class without a __name__ attribute on instances."""
def __call__(self):
pass
with pytest.raises(AttributeError):
task_handler(MyCallable())
class TestTaskConsumerDecorator:
"""Test the task_consumer decorator."""
def _verify_and_cleanup(self, instance, expected_calls=None):
"""Verify consumer and cleanup instance."""
instance.initialize_callable(5)
adapter = instance._adapter
assert adapter._start_consumer_received
if expected_calls is not None:
if expected_calls:
calls = [call(method, name=name) for method, name in expected_calls]
adapter.register_task_handle_mock.assert_has_calls(
calls, any_order=False
)
assert adapter.register_task_handle_mock.call_count == len(
expected_calls
)
else:
adapter.register_task_handle_mock.assert_not_called()
del instance
def _run_consumer_test(
self, config, consumer_class_factory, expected_calls_factory=None
):
"""Run a consumer test with factory functions."""
consumer_class = consumer_class_factory(config)
instance = consumer_class()
expected_calls = (
expected_calls_factory(instance) if expected_calls_factory else None
)
self._verify_and_cleanup(instance, expected_calls)
def test_task_consumer_basic(self, config):
"""Test basic functionality of the task_consumer decorator."""
def make_consumer(cfg):
@task_consumer(task_processor_config=cfg)
class MyConsumer:
@task_handler
def my_task(self):
pass
return MyConsumer
self._run_consumer_test(
config, make_consumer, lambda inst: [(inst.my_task, "my_task")]
)
def test_task_consumer_multiple_handlers(self, config):
"""Test with multiple task handlers."""
def make_consumer(cfg):
@task_consumer(task_processor_config=cfg)
class MyConsumer:
@task_handler
def task1(self):
pass
@task_handler
def task2(self):
pass
return MyConsumer
self._run_consumer_test(
config,
make_consumer,
lambda inst: [(inst.task1, "task1"), (inst.task2, "task2")],
)
def test_task_consumer_custom_names(self, config):
"""Test task handlers with and without custom names."""
def make_consumer(cfg):
@task_consumer(task_processor_config=cfg)
class MyConsumer:
@task_handler(name="custom_task")
def task1(self):
pass
@task_handler
def task2(self):
pass
return MyConsumer
self._run_consumer_test(
config,
make_consumer,
lambda inst: [(inst.task1, "custom_task"), (inst.task2, "task2")],
)
def test_task_consumer_init_args(self, config):
"""Test that __init__ arguments are passed correctly."""
@task_consumer(task_processor_config=config)
class MyConsumer:
def __init__(self, value):
self.value = value
instance = MyConsumer(value=42)
assert instance.value == 42
self._verify_and_cleanup(instance)
def test_task_consumer_no_handlers(self, config):
"""Test with a class that has no task handlers."""
def make_consumer(cfg):
@task_consumer(task_processor_config=cfg)
class MyConsumer:
def some_method(self):
pass
return MyConsumer
self._run_consumer_test(config, make_consumer, lambda inst: [])
def test_task_consumer_inheritance(self, config):
"""Test that inherited task handlers are registered."""
def make_consumer(cfg):
class BaseConsumer:
@task_handler
def base_task(self):
pass
@task_consumer(task_processor_config=cfg)
class DerivedConsumer(BaseConsumer):
@task_handler
def derived_task(self):
pass
return DerivedConsumer
self._run_consumer_test(
config,
make_consumer,
lambda inst: [
(inst.base_task, "base_task"),
(inst.derived_task, "derived_task"),
],
)
def test_task_consumer_no_args_decorator(self):
"""Test using @task_consumer without arguments raises TypeError."""
with pytest.raises(TypeError):
@task_consumer
class MyConsumer:
pass
def test_default_deployment_name_stays_same_with_task_consumer(config):
"""Test that the default deployment name is the class name when using task_consumer with serve.deployment."""
@deployment
@task_consumer(task_processor_config=config)
class MyTaskConsumer:
@task_handler
def my_task(self):
pass
# The deployment name should default to the class name
assert MyTaskConsumer.name == "MyTaskConsumer"
def test_task_consumer_preserves_metadata(config):
class OriginalConsumer:
"""Docstring for a task consumer."""
value: int
wrapped_cls = task_consumer(task_processor_config=config)(OriginalConsumer)
assert wrapped_cls.__name__ == OriginalConsumer.__name__
assert wrapped_cls.__qualname__ == OriginalConsumer.__qualname__
assert wrapped_cls.__module__ == OriginalConsumer.__module__
assert wrapped_cls.__doc__ == OriginalConsumer.__doc__
assert (
wrapped_cls.__annotations__["value"]
== OriginalConsumer.__annotations__["value"]
)
assert wrapped_cls.__annotations__["_adapter"] is TaskProcessorAdapter
assert getattr(wrapped_cls, "__wrapped__", None) is OriginalConsumer
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/unit/test_task_consumer.py",
"license": "Apache License 2.0",
"lines": 239,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/llm/vllm/kv_transfer_backends/test_lmcache_connector_v1.py | import sys
from unittest.mock import patch
import pytest
from ray.llm._internal.serve.engines.vllm.kv_transfer.lmcache import (
LMCacheConnectorV1Backend,
)
from ray.serve.llm import LLMConfig
class TestLMCacheConnectorV1Backend:
@pytest.fixture(autouse=True)
def mock_lmcache_check(self):
"""Mock the lmcache installation check for all tests."""
with patch(
"ray.llm._internal.serve.engines.vllm.kv_transfer.lmcache._check_lmcache_installed"
):
yield
@pytest.fixture
def lmcache_backend_basic(self):
"""Fixture for basic LMCacheConnectorV1Backend."""
return LMCacheConnectorV1Backend(
llm_config=LLMConfig(
model_loading_config=dict(
model_id="Qwen/Qwen3-0.6B",
),
engine_kwargs=dict(
kv_transfer_config=dict(
kv_connector="LMCacheConnectorV1",
kv_role="kv_both",
)
),
),
)
@pytest.fixture
def lmcache_backend_with_extra(self):
"""Fixture for LMCacheConnectorV1Backend with extra config."""
return LMCacheConnectorV1Backend(
llm_config=LLMConfig(
model_loading_config=dict(
model_id="Qwen/Qwen3-0.6B",
),
engine_kwargs=dict(
kv_transfer_config=dict(
kv_connector="LMCacheConnectorV1",
kv_role="kv_both",
kv_connector_extra_config={},
)
),
),
)
@pytest.fixture
def lmcache_backend_with_port(self):
"""Fixture for LMCacheConnectorV1Backend with port config."""
return LMCacheConnectorV1Backend(
llm_config=LLMConfig(
model_loading_config=dict(
model_id="Qwen/Qwen3-0.6B",
),
engine_kwargs=dict(
kv_transfer_config=dict(
kv_connector="LMCacheConnectorV1",
kv_role="kv_both",
kv_connector_extra_config={
"lmcache_rpc_port": LMCacheConnectorV1Backend.DEFAULT_LMCACHE_RPC_PORT_NAME,
},
)
),
),
)
def test_setup_basic_config(self, lmcache_backend_basic):
"""Test setup with basic configuration (no kv_connector_extra_config)."""
lmcache_backend_basic.setup()
# Configuration should remain unchanged
assert (
"kv_connector_extra_config" not in lmcache_backend_basic.kv_transfer_config
)
def test_setup_with_extra_config_no_port(self, lmcache_backend_with_extra):
"""Test setup with extra config but no lmcache_rpc_port."""
lmcache_backend_with_extra.setup()
# Should add lmcache_rpc_port with default DEFAULT_LMCACHE_RPC_PORT_NAME + random string
assert (
"lmcache_rpc_port"
in lmcache_backend_with_extra.kv_transfer_config[
"kv_connector_extra_config"
]
)
port_value = lmcache_backend_with_extra.kv_transfer_config[
"kv_connector_extra_config"
]["lmcache_rpc_port"]
assert port_value.startswith(
LMCacheConnectorV1Backend.DEFAULT_LMCACHE_RPC_PORT_NAME
)
assert len(port_value) > len(
LMCacheConnectorV1Backend.DEFAULT_LMCACHE_RPC_PORT_NAME
) # Should have random string appended
def test_setup_with_existing_port(self, lmcache_backend_with_port):
"""Test setup with existing lmcache_rpc_port configuration."""
original_port = lmcache_backend_with_port.kv_transfer_config[
"kv_connector_extra_config"
]["lmcache_rpc_port"]
lmcache_backend_with_port.setup()
# Should modify the existing port by appending random string
new_port = lmcache_backend_with_port.kv_transfer_config[
"kv_connector_extra_config"
]["lmcache_rpc_port"]
assert new_port.startswith(original_port)
assert len(new_port) > len(original_port) # Should have random string appended
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/serve/cpu/deployments/llm/vllm/kv_transfer_backends/test_lmcache_connector_v1.py",
"license": "Apache License 2.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/llm/vllm/kv_transfer_backends/test_nixl_connector.py | import os
import sys
import uuid
from unittest.mock import patch
import pytest
from ray.llm._internal.serve.engines.vllm.kv_transfer.nixl import (
NixlConnectorBackend,
)
from ray.serve.llm import LLMConfig
@pytest.fixture
def engine_id():
"""Fixture for the engine ID."""
return str(uuid.uuid4())
class TestNixlConnectorBackend:
@pytest.fixture
def nixl_backend(self, engine_id: str):
"""Fixture for the NixlConnectorBackend."""
return NixlConnectorBackend(
llm_config=LLMConfig(
model_loading_config=dict(
model_id="Qwen/Qwen3-0.6B",
),
engine_kwargs=dict(
kv_transfer_config=dict(
kv_connector="NixlConnector",
kv_role="kv_both",
engine_id=engine_id,
)
),
),
)
@pytest.mark.parametrize(
"env_vars",
[
{},
{"VLLM_NIXL_SIDE_CHANNEL_PORT": "8080"},
{"VLLM_NIXL_SIDE_CHANNEL_HOST": "127.0.0.1"},
{
"VLLM_NIXL_SIDE_CHANNEL_PORT": "8080",
"VLLM_NIXL_SIDE_CHANNEL_HOST": "127.0.0.1",
},
],
)
def test_setup_environment_variables(self, nixl_backend, env_vars, engine_id: str):
"""Test that setup configures environment variables and overrides engine_id correctly."""
with patch.dict("os.environ", env_vars, clear=True):
nixl_backend.setup()
assert "VLLM_NIXL_SIDE_CHANNEL_PORT" in os.environ
assert "VLLM_NIXL_SIDE_CHANNEL_HOST" in os.environ
assert engine_id in nixl_backend.kv_transfer_config["engine_id"]
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/serve/cpu/deployments/llm/vllm/kv_transfer_backends/test_nixl_connector.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/train/v2/jax/config.py | import logging
import os
from dataclasses import dataclass
from typing import Optional
import ray
from ray._private import ray_constants
from ray.train._internal.utils import get_address_and_port
from ray.train._internal.worker_group import WorkerGroup
from ray.train.backend import Backend, BackendConfig
from ray.train.constants import (
DEFAULT_JAX_DISTRIBUTED_SHUTDOWN_TIMEOUT_S,
JAX_DISTRIBUTED_SHUTDOWN_TIMEOUT_S,
)
from ray.util import PublicAPI
from ray.util.tpu import get_tpu_coordinator_env_vars, get_tpu_worker_resources
logger = logging.getLogger(__name__)
@PublicAPI(stability="alpha")
@dataclass
class JaxConfig(BackendConfig):
use_tpu: bool = False
use_gpu: bool = False
@property
def backend_cls(self):
return _JaxBackend
def _setup_jax_distributed_environment(
master_addr_with_port: str,
num_workers: int,
index: int,
use_tpu: bool,
use_gpu: bool,
resources_per_worker: dict,
jax_env_vars: Optional[dict] = None,
):
"""Set up distributed Jax training information.
This function should be called on each worker. It sets JAX environment
variables and initializes JAX distributed training.
Args:
master_addr_with_port: The master address with port for coordination.
num_workers: Total number of workers.
index: Index of this worker.
use_tpu: Whether to configure for TPU. If True and JAX_PLATFORMS is not
already set, it will be set to "tpu".
use_gpu: Whether to configure for GPU. If True and JAX_PLATFORMS is not
already set, it will be set to "cuda".
resources_per_worker: The resources per worker.
jax_env_vars: The JAX coordinator env vars to inject for multi-slice. These
values do not override existing values if specified.
"""
# Get JAX_PLATFORMS from environment if already set
jax_platforms = os.environ.get("JAX_PLATFORMS", "").lower()
if not jax_platforms and use_tpu:
os.environ["JAX_PLATFORMS"] = "tpu"
jax_platforms = "tpu"
if jax_env_vars:
for k, v in jax_env_vars.items():
# Respect configured JAX env vars if set.
if k not in os.environ:
os.environ[k] = v
if not jax_platforms and use_gpu:
os.environ["JAX_PLATFORMS"] = "cuda"
jax_platforms = "cuda"
if "cuda" in jax_platforms.split(","):
num_gpus_per_worker = resources_per_worker.get("GPU", 0)
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
str(i) for i in range(num_gpus_per_worker)
)
import jax
if "tpu" in jax_platforms.split(","):
jax.distributed.initialize(master_addr_with_port, num_workers, index)
logger.info("Initialized JAX distributed on TPU.")
if "cuda" in jax_platforms.split(","):
if num_gpus_per_worker > 0:
local_device_ids = list(range(num_gpus_per_worker))
else:
local_device_ids = 0
jax.distributed.initialize(
master_addr_with_port, num_workers, index, local_device_ids
)
logger.info("Initialized JAX distributed on CUDA.")
def _shutdown_jax_distributed():
"""Shutdown JAX distributed environment.
This function should be called on each worker during cleanup.
If JAX distributed was not initialized, this is a no-op.
"""
try:
import jax
jax.distributed.shutdown()
except Exception as e:
logger.warning(f"Error during JAX distributed shutdown: {e}")
class _JaxBackend(Backend):
def on_start(self, worker_group: WorkerGroup, backend_config: JaxConfig):
if not backend_config.use_tpu and not backend_config.use_gpu:
return
master_addr, master_port = worker_group.execute_single(0, get_address_and_port)
master_addr_with_port = f"{master_addr}:{master_port}"
if backend_config.use_tpu and hasattr(worker_group, "get_worker_group_context"):
num_slices = worker_group.get_worker_group_context().num_slices
else:
num_slices = 1
# Calculate the number of workers per slice for multi-slice env setup.
if backend_config.use_tpu and num_slices > 1:
# Handle the case where a user requests less workers than the total
# capacity of the TPU slice.
scaling_config = worker_group._train_run_context.scaling_config
workers_per_slice, _ = get_tpu_worker_resources(
topology=scaling_config.topology,
accelerator_type=scaling_config.accelerator_type,
resources_per_unit=scaling_config.resources_per_worker,
num_slices=1,
)
else:
# Assume even distribution based on the requested number of workers.
workers_per_slice = max(1, len(worker_group) // num_slices)
# Set up JAX distributed environment on all workers
num_workers_total = len(worker_group)
setup_futures = []
for i in range(num_workers_total):
env_vars = {}
if num_slices > 1:
slice_id = min(i // workers_per_slice, num_slices - 1)
env_vars = get_tpu_coordinator_env_vars(
coordinator_address=master_addr,
num_slices=num_slices,
slice_id=slice_id,
)
setup_futures.append(
worker_group.execute_single_async(
i,
_setup_jax_distributed_environment,
master_addr_with_port=master_addr_with_port,
num_workers=len(worker_group),
index=i,
use_tpu=backend_config.use_tpu,
use_gpu=backend_config.use_gpu,
resources_per_worker=worker_group.get_resources_per_worker(),
jax_env_vars=env_vars,
)
)
ray.get(setup_futures)
def on_shutdown(self, worker_group: WorkerGroup, backend_config: JaxConfig):
"""Cleanup JAX distributed resources when shutting down worker group."""
if not backend_config.use_tpu and not backend_config.use_gpu:
return
# Shutdown JAX distributed on all workers
shutdown_futures = worker_group.execute_async(_shutdown_jax_distributed)
timeout_s = ray_constants.env_integer(
JAX_DISTRIBUTED_SHUTDOWN_TIMEOUT_S,
DEFAULT_JAX_DISTRIBUTED_SHUTDOWN_TIMEOUT_S,
)
try:
ray.get(shutdown_futures, timeout=timeout_s)
logger.debug("JAX distributed shutdown completed")
except ray.exceptions.GetTimeoutError:
logger.warning(
f"JAX distributed shutdown timed out after {timeout_s} seconds. "
"This may indicate workers are hung or unresponsive."
)
except Exception as e:
logger.warning(f"Error during JAX distributed shutdown: {e}")
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/jax/config.py",
"license": "Apache License 2.0",
"lines": 159,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/train/v2/jax/jax_trainer.py | import logging
from typing import TYPE_CHECKING, Callable, Dict, Optional, Union
from ray.air._internal.config import ensure_only_allowed_dataclass_keys_updated
from ray.train import DataConfig
from ray.train.trainer import GenDataset
from ray.train.v2.api.config import RunConfig, ScalingConfig
from ray.train.v2.api.data_parallel_trainer import DataParallelTrainer
from ray.train.v2.api.validation_config import ValidationConfig
from ray.train.v2.jax.config import JaxConfig
from ray.util import PublicAPI
if TYPE_CHECKING:
pass
logger = logging.getLogger(__name__)
@PublicAPI(stability="alpha")
class JaxTrainer(DataParallelTrainer):
"""A Trainer for Single-Program Multi-Data (SPMD) JAX training.
At a high level, this Trainer does the following:
1. Launches multiple workers as defined by the ``scaling_config``.
2. Sets up a distributed JAX environment for TPUs or GPUs
on these workers as defined by the ``jax_config``.
3. Ingests the input ``datasets`` based on the ``dataset_config``.
4. Runs the input ``train_loop_per_worker(train_loop_config)``
on all workers.
For more details, see:
* :ref:`Jax Guide <train-jax>`
.. testcode::
:skipif: True
import os
from absl import app
import logging
from typing import Sequence
import ray
from ray.train import ScalingConfig, RunConfig
from ray.train.v2.jax import JaxTrainer
from MaxText.train import main as maxtext_main
def train_loop_per_worker(config):
argv = config["argv"]
maxtext_main(argv)
def main(argv: Sequence[str]):
ray.init()
# If you want to use TPUs, specify the TPU topology and accelerator type.
tpu_scaling_config = ScalingConfig(
use_tpu=True,
num_workers=4,
topology="4x4",
accelerator_type="TPU-V6E",
placement_strategy="SPREAD",
resources_per_worker={"TPU": 4},
)
# If you want to use GPUs, specify the GPU scaling config like below.
# gpu_scaling_config = ScalingConfig(
# use_gpu=True,
# num_workers=4,
# resources_per_worker={"GPU": 1},
# )
trainer = JaxTrainer(
train_loop_per_worker=train_loop_per_worker,
train_loop_config={"argv": absolute_argv},
scaling_config=tpu_scaling_config,
run_config=RunConfig(
name="maxtext_jaxtrainer",
worker_runtime_env={
"env_vars": {
"JAX_PLATFORMS": "tpu",
# If you want to use GPUs, set the JAX_PLATFORMS to "cuda".
# "JAX_PLATFORMS": "cuda",
}
},
),
)
result = trainer.fit()
If the ``datasets`` dict contains datasets (e.g. "train" and "val"), then it will be split into multiple dataset
shards that can then be accessed by ``ray.train.get_dataset_shard("train")`` and ``ray.train.get_dataset_shard("val")``.
Note:
* If you are using TPUs, importing `jax` should occur within `train_loop_per_worker` to
avoid driver-side TPU lock issues.
Args:
train_loop_per_worker: The training function to execute on each worker.
This function can either take in zero arguments or a single ``Dict``
argument which is set by defining ``train_loop_config``.
Within this function you can use any of the
:ref:`Ray Train Loop utilities <train-loop-api>`.
train_loop_config: A configuration ``Dict`` to pass in as an argument to
``train_loop_per_worker``.
This is typically used for specifying hyperparameters. Passing large
datasets via `train_loop_config` is not recommended and may introduce
large overhead and unknown issues with serialization and deserialization.
jax_config: The configuration for setting up the JAX backend.
If set to None, a default configuration will be used based on the ``scaling_config`` and ``JAX_PLATFORMS`` environment variable.
scaling_config: Configuration for how to scale data parallel training
with SPMD. ``num_workers`` should be set to the number of TPU hosts or GPU workers.
If using TPUs, ``topology`` should be set to the TPU topology.
See :class:`~ray.train.ScalingConfig` for more info.
dataset_config: The configuration for ingesting the input ``datasets``.
By default, all the Ray Dataset are split equally across workers.
See :class:`~ray.train.DataConfig` for more details.
run_config: The configuration for the execution of the training run.
See :class:`~ray.train.RunConfig` for more info.
datasets: The Ray Datasets to ingest for training.
Datasets are keyed by name (``{name: dataset}``).
Each dataset can be accessed from within the ``train_loop_per_worker``
by calling ``ray.train.get_dataset_shard(name)``.
Sharding and additional configuration can be done by
passing in a ``dataset_config``.
validation_config: [Alpha] Configuration for checkpoint validation.
If provided and ``ray.train.report`` is called with the ``validation``
argument, Ray Train will validate the reported checkpoint using
the validation function specified in this config.
"""
def __init__(
self,
train_loop_per_worker: Union[Callable[[], None], Callable[[Dict], None]],
*,
train_loop_config: Optional[Dict] = None,
jax_config: Optional[JaxConfig] = None,
scaling_config: Optional[ScalingConfig] = None,
dataset_config: Optional[Dict[str, DataConfig]] = None,
run_config: Optional[RunConfig] = None,
datasets: Optional[Dict[str, GenDataset]] = None,
validation_config: Optional[ValidationConfig] = None,
):
if not jax_config:
jax_config = JaxConfig(
use_tpu=scaling_config.use_tpu,
use_gpu=scaling_config.use_gpu,
)
super(JaxTrainer, self).__init__(
train_loop_per_worker=train_loop_per_worker,
train_loop_config=train_loop_config,
backend_config=jax_config,
scaling_config=scaling_config,
dataset_config=dataset_config,
run_config=run_config,
datasets=datasets,
validation_config=validation_config,
)
@classmethod
def _validate_scaling_config(cls, scaling_config: ScalingConfig) -> ScalingConfig:
"""Return scaling config dataclass after validating updated keys."""
ensure_only_allowed_dataclass_keys_updated(
dataclass=scaling_config,
allowed_keys=cls._scaling_config_allowed_keys,
)
return scaling_config
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/jax/jax_trainer.py",
"license": "Apache License 2.0",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/train/v2/tests/test_jax_trainer.py | import os
import sys
import pytest
import ray
from ray.tests.conftest import _ray_start_cluster
from ray.train import RunConfig, ScalingConfig, UserCallback
from ray.train.v2._internal.constants import (
HEALTH_CHECK_INTERVAL_S_ENV_VAR,
is_v2_enabled,
)
from ray.train.v2.jax import JaxTrainer
assert is_v2_enabled()
@pytest.fixture
def ray_tpu_single_host():
"""Start a mock single-host TPU Ray cluster with 2x4 v6e (8 chips per host)."""
with _ray_start_cluster() as cluster:
# Simulate one node with 8 TPU chips.
cluster.add_node(
num_cpus=4,
resources={"TPU": 8, "accelerator_type:TPU-V6E": 1},
env_vars={"TPU_ACCELERATOR_TYPE": "v6e-8"},
)
ray.init(address=cluster.address)
yield cluster
ray.shutdown()
@pytest.fixture(scope="module")
def ray_tpu_multi_host():
"""
Simulates a Ray cluster with two multi-host TPU v4-16 slices.
"""
pod_type = "v4-16"
topology = "2x2x2"
with _ray_start_cluster() as cluster:
# First TPU slice - v4-16 with 2 hosts and 4 chips/host.
slice_a_head_env = {
"TPU_NAME": "slice-A",
"TPU_WORKER_ID": "0",
"TPU_ACCELERATOR_TYPE": pod_type,
"TPU_TOPOLOGY": topology,
}
slice_a_head_labels = {
"ray.io/tpu-slice-name": "slice-A",
"ray.io/tpu-worker-id": "0",
"ray.io/tpu-pod-type": pod_type,
}
slice_a_worker_env = {
"TPU_NAME": "slice-A",
"TPU_WORKER_ID": "1",
"TPU_ACCELERATOR_TYPE": pod_type,
"TPU_TOPOLOGY": topology,
}
slice_a_worker_labels = {
"ray.io/tpu-slice-name": "slice-A",
"ray.io/tpu-worker-id": "1",
"ray.io/tpu-pod-type": pod_type,
}
cluster.add_node(
num_cpus=8,
resources={
"TPU": 4,
f"TPU-{pod_type}-head": 1,
"accelerator_type:TPU-V4": 1,
},
env_vars=slice_a_head_env,
labels=slice_a_head_labels,
)
cluster.add_node(
num_cpus=8,
resources={"TPU": 4, "accelerator_type:TPU-V4": 1},
env_vars=slice_a_worker_env,
labels=slice_a_worker_labels,
)
# Second TPU slice - v4-16 with 2 hosts and 4 chips/host.
slice_b_head_env = {
"TPU_NAME": "slice-B",
"TPU_WORKER_ID": "0",
"TPU_ACCELERATOR_TYPE": pod_type,
"TPU_TOPOLOGY": topology,
}
slice_b_head_labels = {
"ray.io/tpu-slice-name": "slice-B",
"ray.io/tpu-worker-id": "0",
"ray.io/tpu-pod-type": pod_type,
}
slice_b_worker_env = {
"TPU_NAME": "slice-B",
"TPU_WORKER_ID": "1",
"TPU_ACCELERATOR_TYPE": pod_type,
"TPU_TOPOLOGY": topology,
}
slice_b_worker_labels = {
"ray.io/tpu-slice-name": "slice-B",
"ray.io/tpu-worker-id": "1",
"ray.io/tpu-pod-type": pod_type,
}
cluster.add_node(
num_cpus=8,
resources={
"TPU": 4,
f"TPU-{pod_type}-head": 1,
"accelerator_type:TPU-V4": 1,
},
env_vars=slice_b_head_env,
labels=slice_b_head_labels,
)
cluster.add_node(
num_cpus=8,
resources={"TPU": 4, "accelerator_type:TPU-V4": 1},
env_vars=slice_b_worker_env,
labels=slice_b_worker_labels,
)
ray.init(address=cluster.address)
yield cluster
ray.shutdown()
@pytest.fixture(autouse=True)
def reduce_health_check_interval(monkeypatch):
monkeypatch.setenv(HEALTH_CHECK_INTERVAL_S_ENV_VAR, "0.2")
yield
def train_func():
import jax
import ray
from ray import train
train_ctx = train.get_context()
rank = train_ctx.get_world_rank()
devices = jax.devices()
node_labels = ray.get_runtime_context().get_node_labels()
slice_name = node_labels.get("ray.io/tpu-slice-name")
current_ip = ray.util.get_node_ip_address()
megascale_vars = {
"MEGASCALE_SLICE_ID": os.environ.get("MEGASCALE_SLICE_ID"),
"MEGASCALE_NUM_SLICES": os.environ.get("MEGASCALE_NUM_SLICES"),
"MEGASCALE_COORDINATOR_ADDRESS": os.environ.get(
"MEGASCALE_COORDINATOR_ADDRESS"
),
}
train.report(
{
"worker_id": rank,
"slice_name": slice_name,
"node_ip": current_ip,
"devices": [str(d) for d in devices],
**megascale_vars,
}
)
class CustomMetricsCallback(UserCallback):
"""
In Ray Train V2, reporting metrics from all workers is a no-op, so we
utilize this callback to access the results in our tests.
"""
def __init__(self, actor_name: str):
self.actor_name = actor_name
def after_report(self, run_context, metrics, checkpoint):
# Connect to the specific verify actor for this test.
sink = ray.get_actor(self.actor_name)
sink.log.remote(metrics)
@ray.remote
class VerificationActor:
"""
This Actor is called from the custom metrics callback and saves
the reported metrics from each test.
"""
def __init__(self):
self.reports = []
def log(self, metrics):
self.reports.extend(metrics)
def get_reports(self):
return self.reports
@pytest.mark.skipif(
sys.version_info >= (3, 12),
reason="Current jax version (0.4.13) is not supported in python 3.12+",
)
def test_tpu_single_host(ray_tpu_single_host, tmp_path):
"""
Tests single-host scheduling with no topology value. In this case, the
JaxTrainer should skip the multi-host slice scheduling logic and setup
with a single TPU worker.
"""
actor_name = "test_tpu_single_host"
verify_actor = VerificationActor.options(name=actor_name).remote()
trainer = JaxTrainer(
train_loop_per_worker=train_func,
scaling_config=ScalingConfig(
use_tpu=True,
num_workers=1,
resources_per_worker={"TPU": 8},
accelerator_type="TPU-V6E",
),
run_config=RunConfig(
storage_path=str(tmp_path),
callbacks=[CustomMetricsCallback(actor_name)],
worker_runtime_env={"env_vars": {"JAX_PLATFORMS": "cpu"}},
),
)
result = trainer.fit()
assert result.error is None
# Fetch metrics result using the verification actor.
reports = ray.get(verify_actor.get_reports.remote())
# The train func should have ran on one single-host TPU.
assert len(reports) == 1, f"Expected 1 report, got {len(reports)}"
report = reports[0]
assert report["worker_id"] == 0
# Validate we do not automatically set megascale vars for single-slice.
for r in reports:
assert r.get("MEGASCALE_SLICE_ID") is None
assert r.get("MEGASCALE_NUM_SLICES") is None
assert r.get("MEGASCALE_COORDINATOR_ADDRESS") is None
@pytest.mark.skipif(
sys.version_info >= (3, 12),
reason="Current jax version (0.4.13) is not supported in python 3.12+",
)
def test_tpu_single_slice_multi_host(ray_tpu_multi_host, tmp_path):
"""
Tests scheduling on a single multi-host slice. The number of workers
is set by the user to match the number of hosts in the slice, with each
worker consuming the full resources on that host.
"""
actor_name = "test_tpu_single_slice_multi_host"
verify_actor = VerificationActor.options(name=actor_name).remote()
trainer = JaxTrainer(
train_loop_per_worker=train_func,
scaling_config=ScalingConfig(
use_tpu=True,
accelerator_type="TPU-V4",
topology="2x2x2",
num_workers=2,
),
run_config=RunConfig(
storage_path=str(tmp_path),
callbacks=[CustomMetricsCallback(actor_name)],
worker_runtime_env={"env_vars": {"JAX_PLATFORMS": "cpu"}},
),
)
result = trainer.fit()
assert result.error is None
# Fetch metrics result from each worker using the verification actor.
reports = ray.get(verify_actor.get_reports.remote())
# Verify two TPU workers on the same slice ran the training func.
assert (
len(reports) == 2
), f"Expected 2 workers to report metrics, got {len(reports)}"
worker_ids = {r["worker_id"] for r in reports}
assert worker_ids == {0, 1}, "Expected unique worker IDs from 0 to N-1."
slices_used = {r["slice_name"] for r in reports}
assert len(slices_used) == 1, "Expected workers to be scheduled to 1 slice."
assert next(iter(slices_used)) in ("slice-A", "slice-B")
# Validate we do not automatically set megascale vars for single-slice.
for r in reports:
assert r.get("MEGASCALE_SLICE_ID") is None
assert r.get("MEGASCALE_NUM_SLICES") is None
assert r.get("MEGASCALE_COORDINATOR_ADDRESS") is None
@pytest.mark.skipif(
sys.version_info >= (3, 12),
reason="Current jax version (0.4.13) is not supported in python 3.12+",
)
def test_tpu_multi_slice_multi_host(ray_tpu_multi_host, tmp_path):
"""
Tests execution of TPU workers across multiple multi-host slices. The
user specifies num_workers equal to the total number of hosts across all
slices.
"""
actor_name = "test_tpu_multi_slice_multi_host"
verify_actor = VerificationActor.options(name=actor_name).remote()
trainer = JaxTrainer(
train_loop_per_worker=train_func,
scaling_config=ScalingConfig(
use_tpu=True,
accelerator_type="TPU-V4",
topology="2x2x2",
num_workers=4,
),
run_config=RunConfig(
storage_path=str(tmp_path),
callbacks=[CustomMetricsCallback(actor_name)],
worker_runtime_env={"env_vars": {"JAX_PLATFORMS": "cpu"}},
),
)
result = trainer.fit()
assert result.error is None
# Fetch metrics result from each worker using the verification actor.
reports = ray.get(verify_actor.get_reports.remote())
# Verify execution of all 4 TPU workers across both slices.
assert (
len(reports) == 4
), f"Expected 4 workers to report metrics, got {len(reports)}"
worker_ids = {r["worker_id"] for r in reports}
assert worker_ids == {0, 1, 2, 3}, "Expected unique worker IDs from 0 to N-1."
slices_used = {r["slice_name"] for r in reports}
assert len(slices_used) == 2, "Expected workers to schedule across 2 slices."
assert "slice-A" in slices_used
assert "slice-B" in slices_used
# Verify megascale coordinator address set to IP of worker 0.
worker_0_report = next(r for r in reports if r["worker_id"] == 0)
expected_coordinator_ip = worker_0_report["node_ip"]
for r in reports:
assert r["MEGASCALE_COORDINATOR_ADDRESS"] == expected_coordinator_ip
assert r["MEGASCALE_NUM_SLICES"] == "2"
# Validate MEGASCALE_SLICE_ID set based on indexed TPU Pod name.
slice_a_reports = [r for r in reports if r["slice_name"] == "slice-A"]
slice_b_reports = [r for r in reports if r["slice_name"] == "slice-B"]
assert list({r["MEGASCALE_SLICE_ID"] for r in slice_a_reports}) == ["0"]
assert list({r["MEGASCALE_SLICE_ID"] for r in slice_b_reports}) == ["1"]
@pytest.mark.skipif(
sys.version_info >= (3, 12),
reason="Current jax version (0.4.13) is not supported in python 3.12+",
)
def test_multi_slice_manual_resources(ray_tpu_multi_host, tmp_path):
"""
Tests execution of TPU workers across multiple multi-host slices when
`resources_per_worker` is specified. The JaxTrainer should execute across
both slices with num_workers workers of the specified resources.
"""
actor_name = "test_multi_slice_manual_resources"
verify_actor = VerificationActor.options(name=actor_name).remote()
trainer = JaxTrainer(
train_loop_per_worker=train_func,
scaling_config=ScalingConfig(
use_tpu=True,
accelerator_type="TPU-V4",
topology="2x2x2",
resources_per_worker={"TPU": 1}, # 1 CPU added by default per-bundle.
num_workers=16,
),
run_config=RunConfig(
storage_path=str(tmp_path),
callbacks=[CustomMetricsCallback(actor_name)],
worker_runtime_env={"env_vars": {"JAX_PLATFORMS": "cpu"}},
),
)
result = trainer.fit()
assert result.error is None
# Fetch metrics result from each worker using the verification actor.
reports = ray.get(verify_actor.get_reports.remote())
# Verify execution of all 16 TPU workers across both v4-16 slices.
assert (
len(reports) == 16
), f"Expected 16 workers to report metrics, got {len(reports)}"
worker_ids = {r["worker_id"] for r in reports}
assert worker_ids == set(range(16)), "Expected unique worker IDs from 0 to N-1."
slices_used = {r["slice_name"] for r in reports}
assert len(slices_used) == 2, "Expected workers to span 2 slices."
assert "slice-A" in slices_used
assert "slice-B" in slices_used
# Verify megascale coordinator address set to IP of worker 0.
worker_0_report = next(r for r in reports if r["worker_id"] == 0)
expected_coordinator_ip = worker_0_report["node_ip"]
for r in reports:
assert r["MEGASCALE_COORDINATOR_ADDRESS"] == expected_coordinator_ip
assert r["MEGASCALE_NUM_SLICES"] == "2"
# Validate MEGASCALE_SLICE_ID set based on indexed TPU Pod name.
slice_a_reports = [r for r in reports if r["slice_name"] == "slice-A"]
slice_b_reports = [r for r in reports if r["slice_name"] == "slice-B"]
assert list({r["MEGASCALE_SLICE_ID"] for r in slice_a_reports}) == ["0"]
assert list({r["MEGASCALE_SLICE_ID"] for r in slice_b_reports}) == ["1"]
@pytest.mark.skipif(
sys.version_info >= (3, 12),
reason="Current jax version (0.4.13) is not supported in python 3.12+",
)
def test_tpu_multi_slice_uneven_workers(ray_tpu_multi_host, tmp_path):
"""
Tests that ScalingConfig raises a ValueError if the requested num_workers
does not divide evenly across TPU slices of the requested topology.
"""
# Default resources (1 worker per host).
with pytest.raises(ValueError, match="must be a multiple of"):
ScalingConfig(
use_tpu=True,
accelerator_type="TPU-V4",
topology="2x2x2",
num_workers=3, # Expect a multiple of 2.
)
# Explicit resources (1 TPU chip per worker).
with pytest.raises(ValueError, match="must be a multiple of"):
ScalingConfig(
use_tpu=True,
accelerator_type="TPU-V4",
topology="2x2x1",
resources_per_worker={"TPU": 1},
num_workers=6, # Expect a multiple of 4.
)
def test_scaling_config_validation():
with pytest.raises(
ValueError, match="Cannot set `label_selector` when `use_tpu=True`"
):
ScalingConfig(
num_workers=2,
use_tpu=True,
topology="2x2x2",
accelerator_type="TPU-V4",
label_selector={"subcluster": "my_subcluster"},
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/test_jax_trainer.py",
"license": "Apache License 2.0",
"lines": 393,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:rllib/algorithms/iql/default_iql_rl_module.py | from ray.rllib.algorithms.sac.default_sac_rl_module import DefaultSACRLModule
from ray.rllib.core.models.configs import MLPHeadConfig
from ray.rllib.core.rl_module.apis.value_function_api import ValueFunctionAPI
from ray.rllib.utils.annotations import (
OverrideToImplementCustomLogic_CallToSuperRecommended,
override,
)
class DefaultIQLRLModule(DefaultSACRLModule, ValueFunctionAPI):
@override(DefaultSACRLModule)
def setup(self):
# Setup the `DefaultSACRLModule` to get the catalog.
super().setup()
# Only, if the `RLModule` is used on a `Learner` we build the value network.
if not self.inference_only:
# Build the encoder for the value function.
self.vf_encoder = self.catalog.build_encoder(framework=self.framework)
# Build the vf head.
self.vf = MLPHeadConfig(
input_dims=self.catalog.latent_dims,
# Note, we use the same layers as for the policy and Q-network.
hidden_layer_dims=self.catalog.pi_and_qf_head_hiddens,
hidden_layer_activation=self.catalog.pi_and_qf_head_activation,
output_layer_activation="linear",
output_layer_dim=1,
).build(framework=self.framework)
@override(DefaultSACRLModule)
@OverrideToImplementCustomLogic_CallToSuperRecommended
def get_non_inference_attributes(self):
# Use all of `super`'s attributes and add the value function attributes.
return super().get_non_inference_attributes() + ["vf_encoder", "vf"]
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/algorithms/iql/default_iql_rl_module.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:rllib/algorithms/iql/iql.py | from typing import Optional, Type, Union
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig, NotProvided
from ray.rllib.algorithms.marwil.marwil import MARWIL, MARWILConfig
from ray.rllib.connectors.common.add_observations_from_episodes_to_batch import (
AddObservationsFromEpisodesToBatch,
)
from ray.rllib.connectors.learner.add_next_observations_from_episodes_to_train_batch import ( # noqa
AddNextObservationsFromEpisodesToTrainBatch,
)
from ray.rllib.core.learner.learner import Learner
from ray.rllib.core.rl_module.rl_module import RLModuleSpec
from ray.rllib.utils.annotations import override
from ray.rllib.utils.typing import LearningRateOrSchedule, RLModuleSpecType
class IQLConfig(MARWILConfig):
"""Defines a configuration class from which a new IQL Algorithm can be built
.. testcode::
:skipif: True
from ray.rllib.algorithms.iql import IQLConfig
# Run this from the ray directory root.
config = IQLConfig().training(actor_lr=0.00001, gamma=0.99)
config = config.offline_data(
input_="./rllib/offline/tests/data/pendulum/pendulum-v1_enormous")
# Build an Algorithm object from the config and run 1 training iteration.
algo = config.build()
algo.train()
.. testcode::
:skipif: True
from ray.rllib.algorithms.iql import IQLConfig
from ray import tune
config = IQLConfig()
# Print out some default values.
print(config.beta)
# Update the config object.
config.training(
lr=tune.grid_search([0.001, 0.0001]), beta=0.75
)
# Set the config object's data path.
# Run this from the ray directory root.
config.offline_data(
input_="./rllib/offline/tests/data/pendulum/pendulum-v1_enormous"
)
# Set the config object's env, used for evaluation.
config.environment(env="Pendulum-v1")
# Use to_dict() to get the old-style python config dict
# when running with tune.
tune.Tuner(
"IQL",
param_space=config.to_dict(),
).fit()
"""
def __init__(self, algo_class=None):
super().__init__(algo_class=algo_class or IQL)
# fmt: off
# __sphinx_doc_begin__
# The temperature for the actor loss.
self.beta = 0.1
# The expectile to use in expectile regression.
self.expectile = 0.8
# The learning rates for the actor, critic and value network(s).
self.actor_lr = 3e-4
self.critic_lr = 3e-4
self.value_lr = 3e-4
# Set `lr` parameter to `None` and ensure it is not used.
self.lr = None
# If a twin-Q architecture should be used (advisable).
self.twin_q = True
# How often the target network should be updated.
self.target_network_update_freq = 0
# The weight for Polyak averaging.
self.tau = 1.0
# __sphinx_doc_end__
# fmt: on
@override(MARWILConfig)
def training(
self,
*,
twin_q: Optional[bool] = NotProvided,
expectile: Optional[float] = NotProvided,
actor_lr: Optional[LearningRateOrSchedule] = NotProvided,
critic_lr: Optional[LearningRateOrSchedule] = NotProvided,
value_lr: Optional[LearningRateOrSchedule] = NotProvided,
target_network_update_freq: Optional[int] = NotProvided,
tau: Optional[float] = NotProvided,
**kwargs,
) -> "IQLConfig":
"""Sets the training related configuration.
Args:
beta: The temperature to scaling advantages in exponential terms.
Must be >> 0.0. The higher this parameter the less greedy
(exploitative) the policy becomes. It also means that the policy
is fitting less to the best actions in the dataset.
twin_q: If a twin-Q architecture should be used (advisable).
expectile: The expectile to use in expectile regression for the value
function. For high expectiles the value function tries to match
the upper tail of the Q-value distribution.
actor_lr: The learning rate for the actor network. Actor learning rates
greater than critic learning rates work well in experiments.
critic_lr: The learning rate for the Q-network. Critic learning rates
greater than value function learning rates work well in experiments.
value_lr: The learning rate for the value function network.
target_network_update_freq: The number of timesteps in between the target
Q-network is fixed. Note, too high values here could harm convergence.
The target network is updated via Polyak-averaging.
tau: The update parameter for Polyak-averaging of the target Q-network.
The higher this value the faster the weights move towards the actual
Q-network.
Return:
This updated `AlgorithmConfig` object.
"""
super().training(**kwargs)
if twin_q is not NotProvided:
self.twin_q = twin_q
if expectile is not NotProvided:
self.expectile = expectile
if actor_lr is not NotProvided:
self.actor_lr = actor_lr
if critic_lr is not NotProvided:
self.critic_lr = critic_lr
if value_lr is not NotProvided:
self.value_lr = value_lr
if target_network_update_freq is not NotProvided:
self.target_network_update_freq = target_network_update_freq
if tau is not NotProvided:
self.tau = tau
return self
@override(MARWILConfig)
def get_default_learner_class(self) -> Union[Type["Learner"], str]:
if self.framework_str == "torch":
from ray.rllib.algorithms.iql.torch.iql_torch_learner import IQLTorchLearner
return IQLTorchLearner
else:
raise ValueError(
f"The framework {self.framework_str} is not supported. "
"Use `'torch'` instead."
)
@override(MARWILConfig)
def get_default_rl_module_spec(self) -> RLModuleSpecType:
if self.framework_str == "torch":
from ray.rllib.algorithms.iql.torch.default_iql_torch_rl_module import (
DefaultIQLTorchRLModule,
)
return RLModuleSpec(module_class=DefaultIQLTorchRLModule)
else:
raise ValueError(
f"The framework {self.framework_str} is not supported. "
"Use `torch` instead."
)
@override(MARWILConfig)
def build_learner_connector(
self,
input_observation_space,
input_action_space,
device=None,
):
pipeline = super().build_learner_connector(
input_observation_space=input_observation_space,
input_action_space=input_action_space,
device=device,
)
# Remove unneeded connectors from the MARWIL connector pipeline.
pipeline.remove("AddOneTsToEpisodesAndTruncate")
pipeline.remove("GeneralAdvantageEstimation")
# Prepend the "add-NEXT_OBS-from-episodes-to-train-batch" connector piece (right
# after the corresponding "add-OBS-..." default piece).
pipeline.insert_after(
AddObservationsFromEpisodesToBatch,
AddNextObservationsFromEpisodesToTrainBatch(),
)
return pipeline
@override(MARWILConfig)
def validate(self) -> None:
# Call super's validation method.
super().validate()
# Ensure hyperparameters are meaningful.
if self.beta <= 0.0:
self._value_error(
"For meaningful results, `beta` (temperature) parameter must be >> 0.0!"
)
if not 0.0 < self.expectile < 1.0:
self._value_error(
"For meaningful results, `expectile` parameter must be in (0, 1)."
)
@property
def _model_config_auto_includes(self):
return super()._model_config_auto_includes | {"twin_q": self.twin_q}
class IQL(MARWIL):
"""Implicit Q-learning (derived from MARWIL).
Uses MARWIL training step.
"""
@classmethod
@override(MARWIL)
def get_default_config(cls) -> AlgorithmConfig:
return IQLConfig()
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/algorithms/iql/iql.py",
"license": "Apache License 2.0",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/algorithms/iql/iql_learner.py | from typing import Dict
from ray.rllib.algorithms.dqn.dqn_learner import DQNLearner
from ray.rllib.utils.annotations import (
OverrideToImplementCustomLogic_CallToSuperRecommended,
override,
)
from ray.rllib.utils.lambda_defaultdict import LambdaDefaultDict
from ray.rllib.utils.typing import ModuleID, TensorType
QF_TARGET_PREDS = "qf_target_preds"
VF_PREDS_NEXT = "vf_preds_next"
VF_LOSS = "value_loss"
class IQLLearner(DQNLearner):
@OverrideToImplementCustomLogic_CallToSuperRecommended
@override(DQNLearner)
def build(self) -> None:
# Build the `DQNLearner` (builds the target network).
super().build()
# Define the expectile parameter(s).
self.expectile: Dict[ModuleID, TensorType] = LambdaDefaultDict(
lambda module_id: self._get_tensor_variable(
# Note, we want to train with a certain expectile.
[self.config.get_config_for_module(module_id).expectile],
trainable=False,
)
)
# Define the temperature for the actor advantage loss.
self.temperature: Dict[ModuleID, TensorType] = LambdaDefaultDict(
lambda module_id: self._get_tensor_variable(
# Note, we want to train with a certain expectile.
[self.config.get_config_for_module(module_id).beta],
trainable=False,
)
)
# Store loss tensors here temporarily inside the loss function for (exact)
# consumption later by the compute gradients function.
# Keys=(module_id, optimizer_name), values=loss tensors (in-graph).
self._temp_losses = {}
@override(DQNLearner)
def remove_module(self, module_id: ModuleID) -> None:
"""Removes the expectile and temperature for removed modules."""
# First call `super`'s `remove_module` method.
super().remove_module(module_id)
# Remove the expectile from the mapping.
self.expectile.pop(module_id, None)
# Remove the temperature from the mapping.
self.temperature.pop(module_id, None)
@override(DQNLearner)
def add_module(
self,
*,
module_id,
module_spec,
config_overrides=None,
new_should_module_be_updated=None
):
"""Adds the expectile and temperature for new modules."""
# First call `super`'s `add_module` method.
super().add_module(
module_id=module_id,
module_spec=module_spec,
config_overrides=config_overrides,
new_should_module_be_updated=new_should_module_be_updated,
)
# Add the expectile to the mapping.
self.expectile[module_id] = self._get_tensor_variable(
# Note, we want to train with a certain expectile.
[self.config.get_config_for_module(module_id).beta],
trainable=False,
)
# Add the temperature to the mapping.
self.temperature[module_id] = self._get_tensor_variable(
# Note, we want to train with a certain expectile.
[self.config.get_config_for_module(module_id).beta],
trainable=False,
)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/algorithms/iql/iql_learner.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:rllib/algorithms/iql/torch/default_iql_torch_rl_module.py | from typing import Any, Dict, Optional
import gymnasium as gym
from ray.rllib.algorithms.iql.default_iql_rl_module import DefaultIQLRLModule
from ray.rllib.algorithms.iql.iql_learner import QF_TARGET_PREDS, VF_PREDS_NEXT
from ray.rllib.algorithms.sac.torch.default_sac_torch_rl_module import (
DefaultSACTorchRLModule,
)
from ray.rllib.core.columns import Columns
from ray.rllib.core.models.base import ENCODER_OUT
from ray.rllib.core.rl_module.apis.value_function_api import ValueFunctionAPI
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.typing import TensorType
torch, nn = try_import_torch()
class DefaultIQLTorchRLModule(DefaultSACTorchRLModule, DefaultIQLRLModule):
framework: str = "torch"
@override(DefaultSACTorchRLModule)
def _forward_train(self, batch: Dict, **kwargs) -> Dict[str, Any]:
# Right now, IQL runs only with continuous action spaces.
# TODO (simon): Implement it also for discrete action spaces.
if not isinstance(self.action_space, gym.spaces.Box):
raise ValueError(
f"Unsupported action space type: {type(self.action_space)}. "
"Only continuous action spaces are supported."
)
# Call the forward pass of the SAC module.
output = super()._forward_train(batch, **kwargs)
# Create batches for the forward passes of the target Q-networks and the
# value function.
batch_curr = {
Columns.OBS: batch[Columns.OBS],
Columns.ACTIONS: batch[Columns.ACTIONS],
}
batch_next = {Columns.OBS: batch[Columns.NEXT_OBS]}
# These target q-values are needed for the value loss and actor loss.
output[QF_TARGET_PREDS] = self._qf_forward_train_helper(
batch_curr, encoder=self.target_qf_encoder, head=self.target_qf
)
# If a twin-Q architecture is used run its target Q-network.
if self.twin_q:
output[QF_TARGET_PREDS] = torch.min(
output[QF_TARGET_PREDS],
self._qf_forward_train_helper(
batch_curr, encoder=self.target_qf_twin_encoder, head=self.qf_twin
),
)
# Compute values for the current observations.
output[Columns.VF_PREDS] = self.compute_values(batch_curr)
# The values of the next observations are needed for the critic loss.
output[VF_PREDS_NEXT] = self.compute_values(batch_next)
return output
@override(ValueFunctionAPI)
def compute_values(
self,
batch: Dict[str, Any],
embeddings: Optional[Any] = None,
) -> TensorType:
# If no embeddings are provided make a forward pass on the encoder.
if embeddings is None:
embeddings = self.vf_encoder(batch)[ENCODER_OUT]
# Value head.
vf_out = self.vf(embeddings)
# Squeeze out last dimension (single node value head).
return vf_out.squeeze(-1)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/algorithms/iql/torch/default_iql_torch_rl_module.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:rllib/algorithms/iql/torch/iql_torch_learner.py | from typing import Dict
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
from ray.rllib.algorithms.dqn.dqn_learner import QF_LOSS_KEY, QF_PREDS
from ray.rllib.algorithms.iql.iql_learner import (
QF_TARGET_PREDS,
VF_LOSS,
VF_PREDS_NEXT,
IQLLearner,
)
from ray.rllib.algorithms.sac.sac_learner import QF_TWIN_LOSS_KEY, QF_TWIN_PREDS
from ray.rllib.core import ALL_MODULES
from ray.rllib.core.columns import Columns
from ray.rllib.core.learner.learner import (
POLICY_LOSS_KEY,
)
from ray.rllib.core.learner.torch.torch_learner import TorchLearner
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.typing import ModuleID, ParamDict, TensorType
torch, nn = try_import_torch()
class IQLTorchLearner(TorchLearner, IQLLearner):
"""Implements the IQL loss on top of `IQLLearner`.
This Learner implements configure_optimizers_for_module to define
separate optimizers for the policy, Q-, and value networks. When
using a twin-Q network architecture, each Q-network is assigned its
own optimizer—consistent with the SAC algorithm.
The IQL loss is defined in compute_loss_for_module and consists of
three components: value loss, Q-loss (TD error), and actor (policy)
loss.
Note that the original IQL implementation performs separate backward
passes for each network. However, due to RLlib's reliance on TorchDDP,
all backward passes must be executed within a single update step. This
constraint can lead to parameter lag and cyclical loss behavior, though
it does not hinder convergence.
"""
@override(TorchLearner)
def configure_optimizers_for_module(
self, module_id: ModuleID, config: AlgorithmConfig = None
) -> None:
# Note, we could have derived directly from SACTorchLearner to
# inherit the setup of optimizers, but that learner comes with
# additional parameters which we do not need.
# Receive the module.
module = self._module[module_id]
# Define the optimizer for the critic.
# TODO (sven): Maybe we change here naming to `qf` for unification.
params_critic = self.get_parameters(module.qf_encoder) + self.get_parameters(
module.qf
)
optim_critic = torch.optim.Adam(params_critic, eps=1e-7)
self.register_optimizer(
module_id=module_id,
optimizer_name="qf",
optimizer=optim_critic,
params=params_critic,
lr_or_lr_schedule=config.critic_lr,
)
# If necessary register also an optimizer for a twin Q network.
if config.twin_q:
params_twin_critic = self.get_parameters(
module.qf_twin_encoder
) + self.get_parameters(module.qf_twin)
optim_twin_critic = torch.optim.Adam(params_twin_critic, eps=1e-7)
self.register_optimizer(
module_id=module_id,
optimizer_name="qf_twin",
optimizer=optim_twin_critic,
params=params_twin_critic,
lr_or_lr_schedule=config.critic_lr,
)
# Define the optimizer for the actor.
params_actor = self.get_parameters(module.pi_encoder) + self.get_parameters(
module.pi
)
optim_actor = torch.optim.Adam(params_actor, eps=1e-7)
self.register_optimizer(
module_id=module_id,
optimizer_name="policy",
optimizer=optim_actor,
params=params_actor,
lr_or_lr_schedule=config.actor_lr,
)
# Define the optimizer for the value function.
params_value = self.get_parameters(module.vf_encoder) + self.get_parameters(
module.vf
)
optim_value = torch.optim.Adam(params_value, eps=1e-7)
self.register_optimizer(
module_id=module_id,
optimizer_name="value",
optimizer=optim_value,
params=params_value,
lr_or_lr_schedule=config.value_lr,
)
@override(TorchLearner)
def compute_loss_for_module(
self,
*,
module_id: ModuleID,
config: AlgorithmConfig,
batch: Dict,
fwd_out: Dict
):
# Get the module and hyperparameters.
module = self._module[module_id]
expectile = self.expectile[module_id]
temperature = self.temperature[module_id]
# Get the action distribution for the actor loss.
action_train_dist_class = module.get_train_action_dist_cls()
action_train_dist = action_train_dist_class.from_logits(
fwd_out[Columns.ACTION_DIST_INPUTS]
)
# First, compute the value loss via the target Q-network and current observations.
value_loss = torch.mean(
self._expectile_loss(
fwd_out[QF_TARGET_PREDS] - fwd_out[Columns.VF_PREDS], expectile
)
)
# Second, compute the actor loss using the target-Q network and values.
exp_advantages = torch.minimum(
torch.exp(
temperature * (fwd_out[QF_TARGET_PREDS] - fwd_out[Columns.VF_PREDS])
),
torch.Tensor([100.0]).to(self.device),
)
# Note, we are using here the actions from the data sample.
action_logps = action_train_dist.logp(batch[Columns.ACTIONS])
# Compute the actor loss.
actor_loss = -torch.mean(exp_advantages.detach() * action_logps)
# Third, compute the critic loss.
target_critic = (
batch[Columns.REWARDS]
+ config.gamma
* (1 - batch[Columns.TERMINATEDS].float())
* fwd_out[VF_PREDS_NEXT].detach()
)
critic_loss = torch.mean(
torch.nn.MSELoss(reduction="none")(target_critic, fwd_out[QF_PREDS])
)
# If we have a twin-Q architecture, calculate the its loss, too.
if config.twin_q:
critic_twin_loss = (
torch.mean(
torch.nn.MSELoss(reduction="none")(
target_critic, fwd_out[QF_TWIN_PREDS]
)
)
* 0.5
)
critic_loss *= 0.5
# Compute the total loss.
total_loss = value_loss + actor_loss + critic_loss
# If we have a twin-Q architecture, add its loss.
if config.twin_q:
total_loss += critic_twin_loss
# Log metrics.
self.metrics.log_dict(
{
POLICY_LOSS_KEY: actor_loss,
QF_LOSS_KEY: critic_loss,
},
key=module_id,
window=1, # <- single items (should not be mean/ema-reduced over time).
)
# Log the losses also in the temporary containers for gradient computation.
self._temp_losses[(module_id, POLICY_LOSS_KEY)] = actor_loss
self._temp_losses[(module_id, QF_LOSS_KEY)] = critic_loss
self._temp_losses[(module_id, VF_LOSS)] = value_loss
# If a twin-Q architecture is used add metrics and loss.
if config.twin_q:
self.metrics.log_value(
key=(module_id, QF_TWIN_LOSS_KEY),
value=critic_twin_loss,
window=1, # <- single items (should not be mean/ema-reduced over time).
)
self._temp_losses[(module_id, QF_TWIN_LOSS_KEY)] = critic_twin_loss
return total_loss
@override(TorchLearner)
def compute_gradients(
self, loss_per_module: Dict[ModuleID, TensorType], **kwargs
) -> ParamDict:
grads = {}
for module_id in set(loss_per_module.keys()) - {ALL_MODULES}:
# Loop through optimizers registered for this module.
for optim_name, optim in self.get_optimizers_for_module(module_id):
# Zero the gradients. Note, we need to reset the gradients b/c
# each component for a module operates on the same graph.
optim.zero_grad(set_to_none=True)
# Compute the gradients for the component and module.
loss_tensor = self._temp_losses.pop((module_id, optim_name + "_loss"))
loss_tensor.backward(retain_graph=True)
# Store the gradients for the component and module.
grads.update(
{
pid: p.grad
for pid, p in self.filter_param_dict_for_optimizer(
self._params, optim
).items()
}
)
# Make sure we updated on all loss terms.
assert not self._temp_losses
return grads
def _expectile_loss(self, diff: TensorType, expectile: TensorType) -> TensorType:
"""Computes the expectile loss.
Args:
diff: A tensor containing a difference loss.
expectile: The expectile to use for the expectile loss.
Returns:
The expectile loss of `diff` using `expectile`.
"""
weight = torch.where(diff > 0, expectile, 1 - expectile)
return weight * torch.pow(diff, 2)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/algorithms/iql/torch/iql_torch_learner.py",
"license": "Apache License 2.0",
"lines": 213,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/examples/curriculum/pong_curriculum_learning.py | """Example of using curriculum learning for Atari Pong by implementing a custom callback.
This example:
- demonstrates how to define a curriculum for an agent playing gymnasium's Atari
Pong.
- defines a custom callback that gets called once per iteration and - if the agent
performs well enough - increases the task difficulty, i.e. the `frameskip` for all
environments on all EnvRunners (the agent must act now faster).
- also demonstrates how to provide the callback with varying curriculum parameters
(like threshold maps, returns at which the curriculum ends, etc.).
- uses Ray Tune and RLlib to curriculum-learn Atari Pong with a high frameskip.
We use Atari Pong with a framestack of 4 images (i.e. observation dimensions of 64x64x4)
and start with a frameskip of 1. At a return of 15.0 we increase the frameskip to 2, at
a return of 17.0 to 3, at 19.0 to 4, and the task is solved at a frameskip of 21.0.
How to run this script
----------------------
`python [script file name].py`
Use the `--solved-return` flag to define the threshold at which curriculum learning ends.
Note that a PPO agent on Atari Pong will need a long time to learn.
To ensure the agent has not collapsed, but rather made had a bad seed, we only decrease
the frameskip when the agent performed worse than the next lower threshold. The margin by
which the agent has to be worse is defined by the `--demotion-margin` argument and defaults
to 2.0.
For debugging, use the following additional command line options
`--no-tune --num-env-runners=0`
which should allow you to set breakpoints anywhere in the RLlib code and
have the execution stop there for inspection and debugging.
For logging to your WandB account, use:
`--wandb-key=[your WandB API key] --wandb-project=[some project name]
--wandb-run-name=[optional: WandB run name (within the defined project)]`
"""
import functools
from typing import Callable
import gymnasium as gym
from ray import tune
from ray.rllib.algorithms.algorithm import Algorithm
from ray.rllib.algorithms.ppo import PPOConfig
from ray.rllib.callbacks.callbacks import RLlibCallback
from ray.rllib.connectors.env_to_module.frame_stacking import FrameStackingEnvToModule
from ray.rllib.connectors.learner.frame_stacking import FrameStackingLearner
from ray.rllib.core.rl_module.default_model_config import DefaultModelConfig
from ray.rllib.env.wrappers.atari_wrappers import wrap_atari_for_new_api_stack
from ray.rllib.examples.utils import (
add_rllib_example_script_args,
run_rllib_example_script_experiment,
)
from ray.rllib.utils.metrics import ENV_RUNNER_RESULTS, EPISODE_RETURN_MEAN
parser = add_rllib_example_script_args(
default_reward=float("inf"),
default_timesteps=3000000,
default_iters=100000000000,
)
parser.set_defaults(
env="ale_py:ALE/Pong-v5",
)
parser.add_argument(
"--solved-return",
type=float,
default=21.0,
help=("The mean episode return at which we consider the task to be fully solved."),
)
parser.add_argument(
"--demotion-margin",
type=float,
default=2.0,
help=(
"The margin below the next lower task threshold, beneath which the agent "
" is considered to have collapsed, prompting a downgrade of the task."
),
)
# Use `parser` to add your own custom command line options to this script
# and (if needed) use their values to set up `config` below.
args = parser.parse_args()
NUM_LEARNERS = args.num_learners or 1
ENV = args.env
class PongEnvTaskCallback(RLlibCallback):
"""Custom callback changing the frameskip in Atari Pong dependent on return."""
def __init__(
self,
task_threshold_map: dict,
remote_fn: Callable,
demotion_margin: float = 0.0,
solved_return: float = float("inf"),
):
self.task_threshold_map = task_threshold_map
self.remote_fn = remote_fn
self.demotion_margin = demotion_margin
self.solved_return = solved_return
def on_algorithm_init(
self,
*,
algorithm: "Algorithm",
**kwargs,
) -> None:
# Set the initial task to 1, which corresponds to a frameskip of 1.
algorithm.metrics.log_value("current_env_task", 1, reduce="sum")
def on_train_result(
self,
*,
algorithm: Algorithm,
metrics_logger=None,
result: dict,
**kwargs,
) -> None:
# Store the current task inside the metrics logger in our Algorithm.
current_task = metrics_logger.peek("current_env_task")
# If episode return is consistently above `task_threshold_map[current_task]`,
# we switch to a more difficult task (i.e. higher `frameskip`` if possible).
# If we already mastered the most difficult task, we publish our victory in
# the result dict.
result["task_solved"] = 0.0
# Note, in the first callback executions there may be no completed episode
# (and therefore no episode return) reported. In this case we will skip the
# the logic to manage task difficulty.
if EPISODE_RETURN_MEAN in result[ENV_RUNNER_RESULTS]:
current_return = result[ENV_RUNNER_RESULTS][EPISODE_RETURN_MEAN]
else:
return
# Get the threshold of the current task from the threshold map.
threshold = self.task_threshold_map.get(current_task, float("inf"))
# Check, if curriculum is solved.
final_task = max(self.task_threshold_map.keys())
if current_task == final_task and current_return >= self.solved_return:
# Hardest task was solved -> report this in the results dict.
result["task_solved"] = 1.0
# Check promotion (increasing task). Note, we could use here also a promotion_patience
# that ensures that the return is collected in a stable manner instead of a lucky shot.
if (
current_return >= threshold
): # & result[ENV_RUNNER_RESULTS][NUM_EPISODES] > promotion_patience.
next_task = current_task + 1
if next_task in self.task_threshold_map:
print(
f"Switching task on all EnvRunners up to #{next_task} (1=easiest, "
f"4=hardest), b/c R={current_return} on current task."
)
# Increase task.
algorithm.env_runner_group.foreach_env_runner(
func=functools.partial(self.remote_fn, new_task=next_task)
)
metrics_logger.log_value("current_env_task", next_task, window=1)
# Check demotion (decreasing task). The demotion is used to avoid decreasing the task
# in case of an unlucky episode run. Only if the return is singificantly lower we
# decrease the task.
previous_task = current_task - 1
if previous_task in self.task_threshold_map:
previous_threshold = self.task_threshold_map[previous_task]
if current_return < previous_threshold - self.demotion_margin:
print(
f"Switching task on all EnvRunners back to #{previous_task} (1=easiest, "
f"4=hardest), b/c R={current_return} on current task."
)
# Decrease to previous level.
algorithm.env_runner_group.foreach_env_runner(
func=functools.partial(self.remote_fn, new_task=previous_task)
)
metrics_logger.log_value("current_env_task", previous_task, window=1)
# These tags allow extracting portions of this script on Anyscale.
# ws-template-code-start
def _make_env_to_module_connector(env, spaces, device):
return FrameStackingEnvToModule(num_frames=4)
def _make_learner_connector(input_observation_space, input_action_space):
return FrameStackingLearner(num_frames=4)
# Create a custom Atari setup (w/o the usual RLlib-hard-coded framestacking in it).
# We would like our frame stacking connector to do this job.
def _env_creator(cfg):
return wrap_atari_for_new_api_stack(
gym.make(ENV, **cfg, render_mode="rgb_array"),
# Perform frame-stacking through ConnectorV2 API.
framestack=None,
)
# Simple function sent to an EnvRunner to change the map of all its gym. Envs from
# the current one to a new (tougher) one, in which the frameskip is higher
# and the agent must therefore act faster.
def _remote_fn(env_runner, new_task: int):
# Override the env_config with the new setting.
env_runner.config.env_config.update(
{
"frameskip": new_task,
}
)
# We recreate the entire env object by changing the env_config on the worker,
# then calling its `make_env()` method.
env_runner.make_env()
# Task threshold map keeps track of thresholds for each task. If the threshold has
# been surpassed the task difficulty is increased.
task_threshold_map = {
# Frameskip: Return.
1: 15.0,
2: 17.0,
3: 19.0,
4: float("inf"),
}
tune.register_env("env", _env_creator)
config = (
PPOConfig()
.environment(
"env",
env_config={
# Make analogous to old v4 + NoFrameskip.
"frameskip": 1,
"full_action_space": False,
"repeat_action_probability": 0.0,
},
clip_rewards=True,
)
.env_runners(
env_to_module_connector=_make_env_to_module_connector,
)
.training(
learner_connector=_make_learner_connector,
train_batch_size_per_learner=4000,
minibatch_size=128,
lambda_=0.95,
kl_coeff=0.5,
clip_param=0.1,
vf_clip_param=10.0,
entropy_coeff=0.01,
num_epochs=10,
lr=0.00015 * NUM_LEARNERS,
grad_clip=100.0,
grad_clip_by="global_norm",
)
.rl_module(
model_config=DefaultModelConfig(
conv_filters=[[16, 4, 2], [32, 4, 2], [64, 4, 2], [128, 4, 2]],
conv_activation="relu",
head_fcnet_hiddens=[256],
vf_share_layers=True,
),
)
.callbacks(
functools.partial(
PongEnvTaskCallback,
task_threshold_map=task_threshold_map,
remote_fn=_remote_fn,
# Avoids downgrading the task to early when the agent had an unlucky run.
demotion_margin=args.demotion_margin,
# The return at which the task is learned.
solved_return=args.solved_return,
)
)
)
if __name__ == "__main__":
run_rllib_example_script_experiment(config, args=args)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/curriculum/pong_curriculum_learning.py",
"license": "Apache License 2.0",
"lines": 244,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/llm/doc_code/serve/qwen/llm_yaml_config_example.py | """
This file serves as a documentation example and CI test for YAML config deployment.
Structure:
1. Monkeypatch setup: Ensures serve.run is non-blocking and removes accelerator requirements for CI testing.
2. Load YAML config and convert to Python using build_openai_app
3. Test validation (deployment status polling + cleanup)
"""
import time
import os
import yaml
from ray import serve
from ray.serve.schema import ApplicationStatus
from ray.serve._private.constants import SERVE_DEFAULT_APP_NAME
from ray.serve import llm
config_path = os.path.join(os.path.dirname(__file__), "llm_config_example.yaml")
with open(config_path, "r") as f:
config_dict = yaml.safe_load(f)
llm_configs = config_dict["applications"][0]["args"]["llm_configs"]
for config in llm_configs:
config.pop("accelerator_type", None)
# Disable compile cache to avoid cache corruption in CI
if "runtime_env" not in config:
config["runtime_env"] = {}
if "env_vars" not in config["runtime_env"]:
config["runtime_env"]["env_vars"] = {}
config["runtime_env"]["env_vars"]["VLLM_DISABLE_COMPILE_CACHE"] = "1"
app = llm.build_openai_app({"llm_configs": llm_configs})
serve.run(app, blocking=False)
status = ApplicationStatus.NOT_STARTED
timeout_seconds = 180
start_time = time.time()
while (
status != ApplicationStatus.RUNNING and time.time() - start_time < timeout_seconds
):
status = serve.status().applications[SERVE_DEFAULT_APP_NAME].status
if status in [ApplicationStatus.DEPLOY_FAILED, ApplicationStatus.UNHEALTHY]:
raise AssertionError(f"Deployment failed with status: {status}")
time.sleep(1)
if status != ApplicationStatus.RUNNING:
raise AssertionError(
f"Deployment failed to reach RUNNING status within {timeout_seconds}s. Current status: {status}"
)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/llm/doc_code/serve/qwen/llm_yaml_config_example.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/llm/doc_code/serve/qwen/qwen_example.py | """
This file serves as a documentation example and CI test.
Structure:
1. Monkeypatch setup: Ensures serve.run is non-blocking and removes accelerator requirements for CI testing.
2. Docs example (between __qwen_example_start/end__): Embedded in Sphinx docs via literalinclude.
3. Test validation (deployment status polling + cleanup)
"""
import time
from ray import serve
from ray.serve.schema import ApplicationStatus
from ray.serve._private.constants import SERVE_DEFAULT_APP_NAME
from ray.serve import llm
_original_serve_run = serve.run
_original_build_openai_app = llm.build_openai_app
def _non_blocking_serve_run(app, **kwargs):
"""Forces blocking=False for testing"""
kwargs["blocking"] = False
return _original_serve_run(app, **kwargs)
def _testing_build_openai_app(llm_serving_args):
"""Removes accelerator requirements for testing"""
for config in llm_serving_args["llm_configs"]:
config.accelerator_type = None
# Disable compile cache to avoid cache corruption in CI
if not config.runtime_env:
config.runtime_env = {}
if "env_vars" not in config.runtime_env:
config.runtime_env["env_vars"] = {}
config.runtime_env["env_vars"]["VLLM_DISABLE_COMPILE_CACHE"] = "1"
return _original_build_openai_app(llm_serving_args)
serve.run = _non_blocking_serve_run
llm.build_openai_app = _testing_build_openai_app
# __qwen_example_start__
from ray import serve
from ray.serve.llm import LLMConfig, build_openai_app
llm_config = LLMConfig(
model_loading_config={
"model_id": "qwen-0.5b",
"model_source": "Qwen/Qwen2.5-0.5B-Instruct",
},
deployment_config={
"autoscaling_config": {
"min_replicas": 1,
"max_replicas": 2,
}
},
# Pass the desired accelerator type (e.g. A10G, L4, etc.)
accelerator_type="A10G",
# You can customize the engine arguments (e.g. vLLM engine kwargs)
engine_kwargs={
"tensor_parallel_size": 2,
},
)
app = build_openai_app({"llm_configs": [llm_config]})
serve.run(app, blocking=True)
# __qwen_example_end__
status = ApplicationStatus.NOT_STARTED
timeout_seconds = 180
start_time = time.time()
while (
status != ApplicationStatus.RUNNING and time.time() - start_time < timeout_seconds
):
status = serve.status().applications[SERVE_DEFAULT_APP_NAME].status
if status in [ApplicationStatus.DEPLOY_FAILED, ApplicationStatus.UNHEALTHY]:
raise AssertionError(f"Deployment failed with status: {status}")
time.sleep(1)
if status != ApplicationStatus.RUNNING:
raise AssertionError(
f"Deployment failed to reach RUNNING status within {timeout_seconds}s. Current status: {status}"
)
serve.shutdown()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/llm/doc_code/serve/qwen/qwen_example.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/train/v2/tests/test_worker_group_poll_status.py | import pytest
from ray.train.v2._internal.execution.worker_group.poll import (
ERR_CHAR_LIMIT,
WorkerGroupPollStatus,
WorkerStatus,
_normalize_error_string,
)
def test_get_error_string_basic():
"""
Simulate four workers, two with the same error, one with a different error,
and one without an error.
"""
statuses = {
0: WorkerStatus(running=False, error=ValueError("An error")),
1: WorkerStatus(running=False, error=None),
2: WorkerStatus(running=False, error=RuntimeError("Different error")),
3: WorkerStatus(running=False, error=ValueError("An error")),
}
poll_status = WorkerGroupPollStatus(worker_statuses=statuses)
error_str = poll_status.get_error_string()
expected_error_str = (
"[Rank 0,3 Error Snippet]:\nAn error\n[Rank 2 Error Snippet]:\nDifferent error"
)
assert error_str == expected_error_str
def test_get_error_string_with_numbers():
"""
Simulate workers with similar errors that differ only by numbers.
These should be grouped together.
"""
statuses = {
0: WorkerStatus(
running=False, error=ValueError("Error parsing object at 0x7f8b12345678")
),
1: WorkerStatus(
running=False, error=ValueError("Error parsing object at 0x7f8b12345679")
),
}
poll_status = WorkerGroupPollStatus(worker_statuses=statuses)
error_str = poll_status.get_error_string()
assert (
error_str == "[Rank 0,1 Error Snippet]:\nError parsing object at 0x7f8b12345678"
)
def test_get_error_string_long_error():
"""
Simulate two workers with identical long error string.
"""
long_error_str = "test string" * 200
statuses = {
0: WorkerStatus(running=False, error=long_error_str),
1: WorkerStatus(running=False, error=long_error_str),
}
poll_status = WorkerGroupPollStatus(worker_statuses=statuses)
error_str = poll_status.get_error_string()
expected_error_str = (
"[Rank 0,1 Error Snippet]:\n"
+ long_error_str[: ERR_CHAR_LIMIT // 2]
+ "...\n... (Output truncated. See individual worker logs for full details) ...\n"
+ long_error_str[len(long_error_str) - ERR_CHAR_LIMIT // 2 :]
)
assert error_str == expected_error_str
def test_normalize_error_string():
"""Test that _normalize_error_string properly handles all types of numbers."""
error = """Traceback (most recent call last):
File "/home/ray/default/train_benchmark.py", line 35, in train_fn_per_worker
File "/tmp/ray/session_2025-08-07_23-49-55_617067_2585/runtime_resources/working_dir_files/_ray_pkg_5abd79ca51ba0ed4/runner.py", line 282, in run"""
result = _normalize_error_string(error)
assert (
result
== """Traceback (most recent call last):
File "/home/ray/default/train_benchmark.py", line <NUM>, in train_fn_per_worker
File "/tmp/ray/session_<NUM>-<NUM>-<NUM>_<NUM>-<NUM>-<NUM>_<NUM>_<NUM>/runtime_resources/working_dir_files/_ray_pkg_<NUM>abd<NUM>ca<NUM>ba<NUM>ed<NUM>/runner.py", line <NUM>, in run"""
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/test_worker_group_poll_status.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:rllib/env/external/env_runner_server_for_external_inference.py | import pickle
import socket
import threading
import time
from collections import defaultdict
from typing import Collection, DefaultDict, List, Optional, Union
from ray.rllib.core import (
COMPONENT_RL_MODULE,
DEFAULT_AGENT_ID,
DEFAULT_MODULE_ID,
)
from ray.rllib.env import INPUT_ENV_SPACES
from ray.rllib.env.env_runner import EnvRunner
from ray.rllib.env.external.rllink import (
RLlink,
get_rllink_message,
send_rllink_message,
)
from ray.rllib.env.single_agent_env_runner import SingleAgentEnvRunner
from ray.rllib.env.single_agent_episode import SingleAgentEpisode
from ray.rllib.utils.annotations import override
from ray.rllib.utils.checkpoints import Checkpointable
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.metrics import (
EPISODE_AGENT_RETURN_MEAN,
EPISODE_DURATION_SEC_MEAN,
EPISODE_LEN_MAX,
EPISODE_LEN_MEAN,
EPISODE_LEN_MIN,
EPISODE_MODULE_RETURN_MEAN,
EPISODE_RETURN_MAX,
EPISODE_RETURN_MEAN,
EPISODE_RETURN_MIN,
WEIGHTS_SEQ_NO,
)
from ray.rllib.utils.metrics.metrics_logger import MetricsLogger
from ray.rllib.utils.typing import EpisodeID, StateDict
from ray.util.annotations import DeveloperAPI
torch, _ = try_import_torch()
@DeveloperAPI
class EnvRunnerServerForExternalInference(EnvRunner, Checkpointable):
"""An EnvRunner communicating with an external env through a TCP socket.
This implementation assumes:
- Only one external client ever connects to this env runner.
- The external client owns the connector pipelines (env-to-module and module-to-env)
as well as the RLModule and thus performs inference locally. Samples are sent in
bulk as lists of RLlib episodes once a certain number of timesteps has been executed
on the client's side.
- A copy of the RLModule is kept at all times on this EnvRunner, but is never used
for inference, only as a weights container.
TODO (sven): The above might be inefficient as we have to store basically two
models, one in this EnvRunner, one in the env (as ONNX).
- As a consequence, there are no environment and no connectors on this env runner.
The external env is responsible for generating all the data to create episodes.
"""
@override(EnvRunner)
def __init__(self, *, config, **kwargs):
"""
Initializes an EnvRunnerServerForExternalInference instance.
Args:
config: The AlgorithmConfig to use for setup.
Keyword Args:
port: The base port number. The server socket is then actually bound to
`port` + self.worker_index.
"""
super().__init__(config=config, **kwargs)
self.worker_index: int = kwargs.get("worker_index", 0)
self._weights_seq_no = 0
# Build the module from its spec.
module_spec = self.config.get_rl_module_spec(
spaces=self.get_spaces(), inference_only=True
)
self.module = module_spec.build()
self.host = "localhost"
self.port = int(self.config.env_config.get("port", 5555)) + self.worker_index
self.server_socket = None
self.client_socket = None
self.address = None
self.metrics: MetricsLogger = MetricsLogger(
stats_cls_lookup=config.stats_cls_lookup, root=False
)
self._episode_chunks_to_return: Optional[List[SingleAgentEpisode]] = None
self._done_episodes_for_metrics: List[SingleAgentEpisode] = []
self._ongoing_episodes_for_metrics: DefaultDict[
EpisodeID, List[SingleAgentEpisode]
] = defaultdict(list)
self._sample_lock = threading.Lock()
self._on_policy_lock = threading.Lock()
self._blocked_on_state = False
# Start a background thread for client communication.
self.thread = threading.Thread(
target=self._client_message_listener, daemon=True
)
self.thread.start()
@override(EnvRunner)
def assert_healthy(self):
"""Checks that the server socket is open and listening."""
assert (
self.server_socket is not None
), "Server socket is None (not connected, not listening)."
@override(EnvRunner)
def sample(self, **kwargs):
"""Waits for the client to send episodes."""
while True:
with self._sample_lock:
if self._episode_chunks_to_return is not None:
num_env_steps = 0
num_episodes_completed = 0
for eps in self._episode_chunks_to_return:
if eps.is_done:
self._done_episodes_for_metrics.append(eps)
num_episodes_completed += 1
else:
self._ongoing_episodes_for_metrics[eps.id_].append(eps)
num_env_steps += len(eps)
ret = self._episode_chunks_to_return
self._episode_chunks_to_return = None
SingleAgentEnvRunner._increase_sampled_metrics(
self, num_env_steps, num_episodes_completed
)
return ret
time.sleep(0.01)
@override(EnvRunner)
def get_metrics(self):
# TODO (sven): We should probably make this a utility function to be called
# from within Single/MultiAgentEnvRunner and other EnvRunner subclasses, as
# needed.
# Compute per-episode metrics (only on already completed episodes).
for eps in self._done_episodes_for_metrics:
assert eps.is_done
episode_length = len(eps)
episode_return = eps.get_return()
episode_duration_s = eps.get_duration_s()
# Don't forget about the already returned chunks of this episode.
if eps.id_ in self._ongoing_episodes_for_metrics:
for eps2 in self._ongoing_episodes_for_metrics[eps.id_]:
episode_length += len(eps2)
episode_return += eps2.get_return()
episode_duration_s += eps2.get_duration_s()
del self._ongoing_episodes_for_metrics[eps.id_]
self._log_episode_metrics(
episode_length, episode_return, episode_duration_s
)
# Now that we have logged everything, clear cache of done episodes.
self._done_episodes_for_metrics.clear()
# Return reduced metrics.
return self.metrics.reduce()
def get_spaces(self):
return {
INPUT_ENV_SPACES: (self.config.observation_space, self.config.action_space),
DEFAULT_MODULE_ID: (
self.config.observation_space,
self.config.action_space,
),
}
@override(EnvRunner)
def stop(self):
"""Closes the client and server sockets."""
self._close_sockets_if_necessary()
@override(Checkpointable)
def get_ctor_args_and_kwargs(self):
return (
(), # *args
{"config": self.config}, # **kwargs
)
@override(Checkpointable)
def get_checkpointable_components(self):
return [
(COMPONENT_RL_MODULE, self.module),
]
@override(Checkpointable)
def get_state(
self,
components: Optional[Union[str, Collection[str]]] = None,
*,
not_components: Optional[Union[str, Collection[str]]] = None,
**kwargs,
) -> StateDict:
return {
COMPONENT_RL_MODULE: self.module.get_state(),
WEIGHTS_SEQ_NO: self._weights_seq_no,
}
@override(Checkpointable)
def set_state(self, state: StateDict) -> None:
# Update the RLModule state.
if COMPONENT_RL_MODULE in state:
# A missing value for WEIGHTS_SEQ_NO or a value of 0 means: Force the
# update.
weights_seq_no = state.get(WEIGHTS_SEQ_NO, 0)
# Only update the weigths, if this is the first synchronization or
# if the weights of this `EnvRunner` lacks behind the actual ones.
if weights_seq_no == 0 or self._weights_seq_no < weights_seq_no:
rl_module_state = state[COMPONENT_RL_MODULE]
if (
isinstance(rl_module_state, dict)
and DEFAULT_MODULE_ID in rl_module_state
):
rl_module_state = rl_module_state[DEFAULT_MODULE_ID]
self.module.set_state(rl_module_state)
# Update our weights_seq_no, if the new one is > 0.
if weights_seq_no > 0:
self._weights_seq_no = weights_seq_no
if self._blocked_on_state is True:
self._send_set_state_message()
self._blocked_on_state = False
def _client_message_listener(self):
"""Entry point for the listener thread."""
# Set up the server socket and bind to the specified host and port.
self._recycle_sockets()
# Enter an endless message receival- and processing loop.
while True:
# As long as we are blocked on a new state, sleep a bit and continue.
# Do NOT process any incoming messages (until we send out the new state
# back to the client).
if self._blocked_on_state is True:
time.sleep(0.01)
continue
try:
# Blocking call to get next message.
msg_type, msg_body = get_rllink_message(self.client_socket)
# Process the message received based on its type.
# Initial handshake.
if msg_type == RLlink.PING:
self._send_pong_message()
# Episode data from the client.
elif msg_type in [
RLlink.EPISODES,
RLlink.EPISODES_AND_GET_STATE,
]:
self._process_episodes_message(msg_type, msg_body)
# Client requests the state (model weights).
elif msg_type == RLlink.GET_STATE:
self._send_set_state_message()
# Clients requests config information.
elif msg_type == RLlink.GET_CONFIG:
self._send_set_config_message()
except ConnectionError as e:
print(f"Messaging/connection error {e}! Recycling sockets ...")
self._recycle_sockets(5.0)
continue
def _recycle_sockets(self, sleep: float = 0.0):
# Close all old sockets, if they exist.
self._close_sockets_if_necessary()
time.sleep(sleep)
# Start listening on the configured port.
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Allow reuse of the address.
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.bind((self.host, self.port))
# Listen for a single connection.
self.server_socket.listen(1)
print(f"Waiting for client to connect to port {self.port}...")
self.client_socket, self.address = self.server_socket.accept()
print(f"Connected to client at {self.address}")
def _close_sockets_if_necessary(self):
if self.client_socket:
self.client_socket.close()
if self.server_socket:
self.server_socket.close()
def _send_pong_message(self):
send_rllink_message(self.client_socket, {"type": RLlink.PONG.name})
def _process_episodes_message(self, msg_type, msg_body):
# On-policy training -> we have to block until we get a new `set_state` call
# (b/c the learning step is done and we can send new weights back to all
# clients).
if msg_type == RLlink.EPISODES_AND_GET_STATE:
self._blocked_on_state = True
episodes = []
for episode_state in msg_body["episodes"]:
episode = SingleAgentEpisode.from_state(episode_state)
episodes.append(episode.to_numpy())
# Push episodes into the to-be-returned list (for `sample()` requests).
with self._sample_lock:
if isinstance(self._episode_chunks_to_return, list):
self._episode_chunks_to_return.extend(episodes)
else:
self._episode_chunks_to_return = episodes
def _send_set_state_message(self):
send_rllink_message(
self.client_socket,
{
"type": RLlink.SET_STATE.name,
"state": self.get_state(inference_only=True),
},
)
def _send_set_config_message(self):
send_rllink_message(
self.client_socket,
{
"type": RLlink.SET_CONFIG.name,
# TODO (sven): We need AlgorithmConfig to be a `Checkpointable` with a
# msgpack'able state.
"config": pickle.dumps(self.config),
},
)
def _log_episode_metrics(self, length, ret, sec):
# Log general episode metrics.
# To mimic the old API stack behavior, we'll use `window` here for
# these particular stats (instead of the default EMA).
win = self.config.metrics_num_episodes_for_smoothing
self.metrics.log_value(EPISODE_LEN_MEAN, length, window=win)
self.metrics.log_value(EPISODE_RETURN_MEAN, ret, window=win)
self.metrics.log_value(EPISODE_DURATION_SEC_MEAN, sec, window=win)
# Per-agent returns.
self.metrics.log_value(
(EPISODE_AGENT_RETURN_MEAN, DEFAULT_AGENT_ID), ret, window=win
)
# Per-RLModule returns.
self.metrics.log_value(
(EPISODE_MODULE_RETURN_MEAN, DEFAULT_MODULE_ID), ret, window=win
)
# For some metrics, log min/max as well.
self.metrics.log_value(EPISODE_LEN_MIN, length, reduce="min", window=win)
self.metrics.log_value(EPISODE_RETURN_MIN, ret, reduce="min", window=win)
self.metrics.log_value(EPISODE_LEN_MAX, length, reduce="max", window=win)
self.metrics.log_value(EPISODE_RETURN_MAX, ret, reduce="max", window=win)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/env/external/env_runner_server_for_external_inference.py",
"license": "Apache License 2.0",
"lines": 316,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/env/external/rllink.py | from enum import Enum
from packaging.version import Version
from ray.rllib.utils.checkpoints import try_import_msgpack
from ray.util.annotations import DeveloperAPI
msgpack = None
@DeveloperAPI
class RLlink(Enum):
PROTOCOL_VERSION = Version("0.0.1")
# Requests: Client (external env) -> Server (RLlib).
# ----
# Ping command (initial handshake).
PING = "PING"
# List of episodes (similar to what an EnvRunner.sample() call would return).
EPISODES = "EPISODES"
# Request state (e.g. model weights).
GET_STATE = "GET_STATE"
# Request Algorithm config.
GET_CONFIG = "GET_CONFIG"
# Send episodes and request the next state update right after that.
# Clients sending this message should wait for a SET_STATE message as an immediate
# response. Useful for external samplers that must collect on-policy data.
EPISODES_AND_GET_STATE = "EPISODES_AND_GET_STATE"
# Responses: Server (RLlib) -> Client (external env).
# ----
# Pong response (initial handshake).
PONG = "PONG"
# Set state (e.g. model weights).
SET_STATE = "SET_STATE"
# Set Algorithm config.
SET_CONFIG = "SET_CONFIG"
# @OldAPIStack (to be deprecated soon).
ACTION_SPACE = "ACTION_SPACE"
OBSERVATION_SPACE = "OBSERVATION_SPACE"
GET_WORKER_ARGS = "GET_WORKER_ARGS"
GET_WEIGHTS = "GET_WEIGHTS"
REPORT_SAMPLES = "REPORT_SAMPLES"
START_EPISODE = "START_EPISODE"
GET_ACTION = "GET_ACTION"
LOG_ACTION = "LOG_ACTION"
LOG_RETURNS = "LOG_RETURNS"
END_EPISODE = "END_EPISODE"
def __str__(self):
return self.name
@DeveloperAPI
def send_rllink_message(sock_, message: dict):
"""Sends a message to the client with a length header."""
global msgpack
if msgpack is None:
msgpack = try_import_msgpack(error=True)
body = msgpack.packb(message, use_bin_type=True) # .encode("utf-8")
header = str(len(body)).zfill(8).encode("utf-8")
try:
sock_.sendall(header + body)
except Exception as e:
raise ConnectionError(
f"Error sending message {message} to server on socket {sock_}! "
f"Original error was: {e}"
)
@DeveloperAPI
def get_rllink_message(sock_):
"""Receives a message from the client following the length-header protocol."""
global msgpack
if msgpack is None:
msgpack = try_import_msgpack(error=True)
try:
# Read the length header (8 bytes)
header = _get_num_bytes(sock_, 8)
msg_length = int(header.decode("utf-8"))
# Read the message body
body = _get_num_bytes(sock_, msg_length)
# Decode JSON.
message = msgpack.unpackb(body, raw=False) # .loads(body.decode("utf-8"))
# Check for proper protocol.
if "type" not in message:
raise ConnectionError(
"Protocol Error! Message from peer does not contain `type` field."
)
return RLlink(message.pop("type")), message
except Exception as e:
raise ConnectionError(
f"Error receiving message from peer on socket {sock_}! "
f"Original error was: {e}"
)
def _get_num_bytes(sock_, num_bytes):
"""Helper function to receive a specific number of bytes."""
data = b""
while len(data) < num_bytes:
packet = sock_.recv(num_bytes - len(data))
if not packet:
raise ConnectionError(f"No data received from socket {sock_}!")
data += packet
return data
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/env/external/rllink.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/examples/envs/classes/utils/dummy_external_client.py | import pickle
import socket
import time
import gymnasium as gym
import numpy as np
from ray.rllib.core import (
COMPONENT_RL_MODULE,
Columns,
)
from ray.rllib.env.external.rllink import (
RLlink,
get_rllink_message,
send_rllink_message,
)
from ray.rllib.env.single_agent_episode import SingleAgentEpisode
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.numpy import softmax
torch, _ = try_import_torch()
def _dummy_external_client(port: int = 5556):
"""A dummy client that runs CartPole and acts as a testing external env."""
def _set_state(msg_body, rl_module):
rl_module.set_state(msg_body[COMPONENT_RL_MODULE])
# return msg_body[WEIGHTS_SEQ_NO]
# Connect to server.
while True:
try:
print(f"Trying to connect to localhost:{port} ...")
sock_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_.connect(("localhost", port))
break
except ConnectionRefusedError:
time.sleep(5)
# Send ping-pong.
send_rllink_message(sock_, {"type": RLlink.PING.name})
msg_type, msg_body = get_rllink_message(sock_)
assert msg_type == RLlink.PONG
# Request config.
send_rllink_message(sock_, {"type": RLlink.GET_CONFIG.name})
msg_type, msg_body = get_rllink_message(sock_)
assert msg_type == RLlink.SET_CONFIG
config = pickle.loads(msg_body["config"])
# Create the RLModule.
rl_module = config.get_rl_module_spec().build()
# Request state/weights.
send_rllink_message(sock_, {"type": RLlink.GET_STATE.name})
msg_type, msg_body = get_rllink_message(sock_)
assert msg_type == RLlink.SET_STATE
_set_state(msg_body["state"], rl_module)
env_steps_per_sample = config.get_rollout_fragment_length()
# Start actual env loop.
env = gym.make("CartPole-v1")
obs, _ = env.reset()
episode = SingleAgentEpisode(observations=[obs])
episodes = [episode]
while True:
# Perform action inference using the RLModule.
logits = rl_module.forward_exploration(
batch={
Columns.OBS: torch.tensor(np.array([obs], np.float32)),
}
)[Columns.ACTION_DIST_INPUTS][
0
].numpy() # [0]=batch size 1
# Stochastic sample.
action_probs = softmax(logits)
action = int(np.random.choice(list(range(env.action_space.n)), p=action_probs))
logp = float(np.log(action_probs[action]))
# Perform the env step.
obs, reward, terminated, truncated, _ = env.step(action)
# Collect step data.
episode.add_env_step(
action=action,
reward=reward,
observation=obs,
terminated=terminated,
truncated=truncated,
extra_model_outputs={
Columns.ACTION_DIST_INPUTS: logits,
Columns.ACTION_LOGP: logp,
},
)
# We collected enough samples -> Send them to server.
if sum(map(len, episodes)) == env_steps_per_sample:
# Send the data to the server.
send_rllink_message(
sock_,
{
"type": RLlink.EPISODES_AND_GET_STATE.name,
"episodes": [e.get_state() for e in episodes],
"timesteps": env_steps_per_sample,
},
)
# We are forced to sample on-policy. Have to wait for a response
# with the state (weights) in it.
msg_type, msg_body = get_rllink_message(sock_)
assert msg_type == RLlink.SET_STATE
_set_state(msg_body["state"], rl_module)
episodes = []
if not episode.is_done:
episode = episode.cut()
episodes.append(episode)
# If episode is done, reset env and create a new episode.
if episode.is_done:
obs, _ = env.reset()
episode = SingleAgentEpisode(observations=[obs])
episodes.append(episode)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/envs/classes/utils/dummy_external_client.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:ci/raydepsets/tests/test_workspace.py | import sys
import tempfile
import unittest
from pathlib import Path
import pytest
from ci.raydepsets.tests.utils import (
copy_data_to_tmpdir,
get_depset_by_name,
write_to_config_file,
)
from ci.raydepsets.workspace import (
BuildArgSet,
Depset,
Workspace,
_substitute_build_args,
)
def test_workspace_init():
with tempfile.TemporaryDirectory() as tmpdir:
workspace = Workspace(tmpdir)
assert workspace.dir is not None
def test_parse_build_arg_sets():
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
workspace = Workspace(dir=tmpdir)
config = workspace.load_config(config_path=Path(tmpdir) / "test.depsets.yaml")
assert "general_depset__py311_cpu" in [depset.name for depset in config.depsets]
assert "build_args_test_depset__py311_cpu" in [
depset.name for depset in config.depsets
]
assert "expanded_depset__py311_cpu" in [
depset.name for depset in config.depsets
]
def test_substitute_build_args():
build_arg_set = BuildArgSet(
build_args={
"PYTHON_VERSION": "py311",
"CUDA_VERSION": "cu128",
},
)
depset_dict = {
"name": "test_depset_${PYTHON_VERSION}_${CUDA_VERSION}",
"operation": "compile",
"requirements": ["requirements_test.txt"],
"output": "requirements_compiled_test_${PYTHON_VERSION}_${CUDA_VERSION}.txt",
}
substituted_depset = _substitute_build_args(depset_dict, build_arg_set)
assert substituted_depset["output"] == "requirements_compiled_test_py311_cu128.txt"
assert substituted_depset["name"] == "test_depset_py311_cu128"
def test_invalid_build_arg_set():
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
with open(Path(tmpdir) / "test.depsets.yaml", "w") as f:
f.write(
"""
depsets:
- name: invalid_build_arg_set
operation: compile
requirements:
- requirements_test.txt
output: requirements_compiled_invalid_build_arg_set.txt
build_arg_sets:
- invalid_build_arg_set
"""
)
with pytest.raises(KeyError):
workspace = Workspace(dir=tmpdir)
workspace.load_config(config_path=Path(tmpdir) / "test.depsets.yaml")
def test_parse_pre_hooks():
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
workspace = Workspace(dir=tmpdir)
config = workspace.load_config(config_path=Path(tmpdir) / "test2.depsets.yaml")
pre_hook_depset = get_depset_by_name(config.depsets, "pre_hook_test_depset")
assert pre_hook_depset.pre_hooks == ["pre-hook-test.sh"]
def test_load_first_config():
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
workspace = Workspace(dir=tmpdir)
config = workspace.load_config(config_path=Path(tmpdir) / "test.depsets.yaml")
assert config.depsets is not None
assert len(config.depsets) == 7
def test_load_second_config():
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
workspace = Workspace(dir=tmpdir)
config = workspace.load_config(config_path=Path(tmpdir) / "test2.depsets.yaml")
assert config.depsets is not None
assert len(config.depsets) == 3
# load all configs should always load all depsets
def test_load_all_configs_first_config():
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
workspace = Workspace(dir=tmpdir)
config = workspace.load_configs(config_path=Path(tmpdir) / "test.depsets.yaml")
assert config.depsets is not None
assert len(config.depsets) == 10
# load all configs should always load all depsets
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
workspace = Workspace(dir=tmpdir)
config = workspace.load_configs(config_path=Path(tmpdir) / "test2.depsets.yaml")
assert config.depsets is not None
assert len(config.depsets) == 10
def test_merge_configs():
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
workspace = Workspace(dir=tmpdir)
config = workspace.load_config(config_path=Path(tmpdir) / "test.depsets.yaml")
config2 = workspace.load_config(config_path=Path(tmpdir) / "test2.depsets.yaml")
merged_config = workspace.merge_configs([config, config2])
assert merged_config.depsets is not None
assert len(merged_config.depsets) == 10
def test_get_configs_dir():
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
workspace = Workspace(dir=tmpdir)
configs_dir = workspace.get_configs_dir(
configs_path=Path(tmpdir) / "test.depsets.yaml"
)
assert len(configs_dir) == 2
assert f"{tmpdir}/test.depsets.yaml" in configs_dir
assert f"{tmpdir}/test2.depsets.yaml" in configs_dir
def test_load_configs_with_wildcard_config_path():
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
workspace = Workspace(dir=tmpdir)
config = workspace.load_configs(config_path=f"{tmpdir}/*.depsets.yaml")
assert config.depsets is not None
assert len(config.depsets) == 10
def test_invalid_build_arg_set_in_config():
with tempfile.TemporaryDirectory() as tmpdir:
copy_data_to_tmpdir(tmpdir)
depset = Depset(
name="invalid_build_arg_set",
operation="compile",
requirements=["requirements_test.txt"],
output="requirements_compiled_invalid_build_arg_set.txt",
config_name="test.depsets.yaml",
)
write_to_config_file(
tmpdir,
[depset],
"test.depsets.yaml",
build_arg_sets=["invalid_build_arg_set"],
)
workspace = Workspace(dir=tmpdir)
with unittest.TestCase().assertRaises(KeyError) as e:
workspace.load_config(config_path=Path(tmpdir) / "test.depsets.yaml")
assert (
"Build arg set invalid_build_arg_set not found in config test.depsets.yaml"
in str(e.exception)
)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "ci/raydepsets/tests/test_workspace.py",
"license": "Apache License 2.0",
"lines": 154,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:ci/raydepsets/tests/utils.py | """Shared test utilities for raydepsets tests."""
import shutil
from pathlib import Path
from typing import List, Optional
import runfiles
from ci.raydepsets.workspace import Depset
_REPO_NAME = "io_ray"
_runfiles = runfiles.Create()
def copy_data_to_tmpdir(tmpdir, ignore_patterns: Optional[str] = None):
"""Copy test data to a temporary directory."""
shutil.copytree(
_runfiles.Rlocation(f"{_REPO_NAME}/ci/raydepsets/tests/test_data"),
tmpdir,
dirs_exist_ok=True,
ignore=shutil.ignore_patterns(ignore_patterns) if ignore_patterns else None,
)
def replace_in_file(filepath, old, new):
with open(filepath, "r") as f:
contents = f.read()
contents = contents.replace(old, new)
with open(filepath, "w") as f:
f.write(contents)
def save_packages_to_file(filepath, packages):
with open(filepath, "w") as f:
for package in packages:
f.write(package + "\n")
def save_file_as(input_file, output_file):
with open(input_file, "rb") as f:
contents = f.read()
with open(output_file, "wb") as f:
f.write(contents)
def append_to_file(filepath, new):
with open(filepath, "a") as f:
f.write(new + "\n")
def get_depset_by_name(depsets, name):
for depset in depsets:
if depset.name == name:
return depset
def write_to_config_file(
tmpdir: str,
depsets: List[Depset],
config_name: str,
build_arg_sets: List[str] = None,
):
with open(Path(tmpdir) / config_name, "w") as f:
f.write(
"""
depsets:\n"""
)
for depset in depsets:
f.write(
f"""\n
- name: {depset.name}
operation: {depset.operation}
{f"constraints: {depset.constraints}" if depset.constraints else ""}
{f"requirements: {depset.requirements}" if depset.requirements else ""}
output: {depset.output}
{f"pre_hooks: {depset.pre_hooks}" if depset.pre_hooks else ""}
{f"depsets: {depset.depsets}" if depset.depsets else ""}
{f"source_depset: {depset.source_depset}" if depset.source_depset else ""}
{f"append_flags: {depset.append_flags}" if depset.append_flags else ""}
{f"override_flags: {depset.override_flags}" if depset.override_flags else ""}
{f"packages: {depset.packages}" if depset.packages else ""}
{f"build_arg_sets: {build_arg_sets}" if build_arg_sets else ""}
"""
)
| {
"repo_id": "ray-project/ray",
"file_path": "ci/raydepsets/tests/utils.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/test_util_helpers.py | import sys
import pytest
import ray
from ray._common.test_utils import SignalActor
from ray.util import as_completed, map_unordered
@pytest.fixture(scope="module")
def ray_init_4_cpu_shared():
ray.init(num_cpus=4)
yield
ray.shutdown()
@pytest.mark.parametrize("yield_obj_refs", [True, False])
def test_as_completed_chunk_size_1(ray_init_4_cpu_shared, yield_obj_refs):
"""Test as_completed with chunk_size=1.
Use SignalActor to control task completion order and mimic time.sleep(x) behavior.
"""
inputs = [10, 8, 6, 4, 2]
# Create signals for each task
signals = [SignalActor.remote() for _ in range(len(inputs))]
# Create tasks
@ray.remote
def f(x, signal):
ray.get(signal.wait.remote())
return x
# Submit tasks with their corresponding signals in the original order
refs = [f.remote(x, signal) for x, signal in zip(inputs, signals)]
# Use as_completed() lazily
it = as_completed(refs, chunk_size=1, yield_obj_refs=yield_obj_refs)
# Send signal in reverse order to mimic time.sleep(x), i.e.,
# smallest value releases first. At the same time, collect results
results = []
for signal in reversed(signals):
ray.get(signal.send.remote())
results.append(next(it))
if yield_obj_refs:
results = ray.get(results)
assert results == [2, 4, 6, 8, 10]
@pytest.mark.parametrize("yield_obj_refs", [True, False])
def test_as_completed_chunk_size_2(ray_init_4_cpu_shared, yield_obj_refs):
"""Test as_completed with chunk_size=2.
Use SignalActor to control task completion order and mimic time.sleep(x) behavior.
"""
inputs = [10, 8, 6, 4, 2]
# Create signals for each task
signals = [SignalActor.remote() for _ in range(len(inputs))]
# Create tasks
@ray.remote
def f(x, signal):
ray.get(signal.wait.remote())
return x
# Submit tasks with their corresponding signals in the original order
refs = [f.remote(x, signal) for x, signal in zip(inputs, signals)]
# Use as_completed() lazily
it = as_completed(refs, chunk_size=2, yield_obj_refs=yield_obj_refs)
# Send signal in reverse order to mimic time.sleep(x), i.e.,
# smallest value releases first. At the same time, collect results
results = []
ray.get(signals[4].send.remote())
ray.get(signals[3].send.remote())
results.append(next(it))
results.append(next(it))
ray.get(signals[2].send.remote())
ray.get(signals[1].send.remote())
results.append(next(it))
results.append(next(it))
ray.get(signals[0].send.remote())
results.append(next(it))
if yield_obj_refs:
results = ray.get(results)
assert results == [4, 2, 8, 6, 10]
@pytest.mark.parametrize("yield_obj_refs", [True, False])
def test_map_unordered_chunk_size_1(ray_init_4_cpu_shared, yield_obj_refs):
"""Test map_unordered with chunk_size=1.
Use SignalActor to control task completion order and mimic time.sleep(x) behavior.
"""
inputs = [10, 8, 6, 4, 2]
# Create signals for each task
signals = [SignalActor.remote() for _ in range(len(inputs))]
# Create tasks
@ray.remote
def f(args):
x, signal = args
ray.get(signal.wait.remote())
return x
# Submit tasks with their corresponding signals in the original order
it = map_unordered(
f, zip(inputs, signals), chunk_size=1, yield_obj_refs=yield_obj_refs
)
# Send signal in reverse order to mimic time.sleep(x), i.e.,
# smallest value releases first. At the same time, collect results
results = []
for signal in reversed(signals):
ray.get(signal.send.remote())
results.append(next(it))
if yield_obj_refs:
results = ray.get(results)
assert results == [2, 4, 6, 8, 10]
@pytest.mark.parametrize("yield_obj_refs", [True, False])
def test_map_unordered_chunk_size_2(ray_init_4_cpu_shared, yield_obj_refs):
"""Test map_unordered with chunk_size=2.
Use SignalActor to control task completion order and mimic time.sleep(x) behavior.
"""
inputs = [10, 8, 6, 4, 2]
# Create signals for each task
signals = [SignalActor.remote() for _ in range(len(inputs))]
# Create tasks
@ray.remote
def f(args):
x, signal = args
ray.get(signal.wait.remote())
return x
# Submit tasks with their corresponding signals in the original order
it = map_unordered(
f, zip(inputs, signals), chunk_size=2, yield_obj_refs=yield_obj_refs
)
# Send signal in reverse order to mimic time.sleep(x), i.e.,
# smallest value releases first. At the same time, collect results
results = []
ray.get(signals[4].send.remote())
ray.get(signals[3].send.remote())
results.append(next(it))
results.append(next(it))
ray.get(signals[2].send.remote())
ray.get(signals[1].send.remote())
results.append(next(it))
results.append(next(it))
ray.get(signals[0].send.remote())
results.append(next(it))
if yield_obj_refs:
results = ray.get(results)
assert results == [4, 2, 8, 6, 10]
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_util_helpers.py",
"license": "Apache License 2.0",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/util/helpers.py | from typing import TYPE_CHECKING, Any, Iterable, Iterator, Optional, Sequence, Union
import ray
from ray.util.annotations import PublicAPI
if TYPE_CHECKING:
from ray import ObjectRef
from ray.remote_function import RemoteFunction
# ray.wait() has a default num_returns of 1.
# Using a slightly larger batch until the optimization is fully implemented, see
# https://github.com/ray-project/ray/issues/49905
DEFAULT_CHUNK_SIZE = 10
DEFAULT_BACKPRESSURE_SIZE = 100
def _wait_and_get_single_batch(
refs: "Sequence[ObjectRef]",
*,
chunk_size: int,
yield_obj_refs: bool = False,
**kwargs,
) -> tuple[list[Union[Any, "ObjectRef"]], "list[ObjectRef]"]:
"""Call ray.wait and explicitly return the ready objects/results
and remaining Ray remote refs.
Args:
refs: A list of Ray object refs.
chunk_size: The `num_returns` parameter to pass to `ray.wait()`.
yield_obj_refs: If True, return Ray remote refs instead of results (by calling :meth:`~ray.get`).
**kwargs: Additional keyword arguments to pass to `ray.wait()`.
Returns:
A tuple of two lists, ready and not ready. This is the same as the return value of `ray.wait()`.
"""
if chunk_size < 1:
raise ValueError("`chunk_size` must be >= 1")
kwargs = kwargs or {}
# num_returns must be <= len(refs)
ready, refs = ray.wait(
refs,
num_returns=min(chunk_size, len(refs)),
**kwargs,
)
if not yield_obj_refs:
return ray.get(ready), refs
return ready, refs
@PublicAPI(stability="alpha")
def as_completed(
refs: "Sequence[ObjectRef]",
*,
chunk_size: int = DEFAULT_CHUNK_SIZE,
yield_obj_refs: bool = False,
**kwargs,
) -> Iterator[Union[Any, "ObjectRef"]]:
"""Given a list of Ray task references, yield results as they become available.
Unlike calling :meth:`~ray.get` on a list of references (i.e., `ray.get(refs)`) which
waits for all results to be ready, this function begins to yield result as soon as
a batch of `chunk_size` results are ready.
.. note::
Generally there is no guarantee on the order of results. For example, the first result
is not necessarily the first one completed, but rather the first one submitted in the
first available batch (See :meth:`~ray.wait` for more details about
preservation of submission order).
.. note::
Use this function instead of calling :meth:`~ray.get` inside a for loop. See
https://docs.ray.io/en/latest/ray-core/patterns/ray-get-loop.html for more details.
Example:
Suppose we have a function that sleeps for x seconds depending on the input.
We expect to obtain a partially sorted list of results.
.. testcode:: python
import ray
import time
@ray.remote
def f(x):
time.sleep(x)
return x
refs = [f.remote(i) for i in [10, 4, 6, 8, 2]]
for x in ray.util.as_completed(refs, chunk_size=2):
print(x)
.. testoutput::
:options: +MOCK
# Output:
4
2
6
8
10
Args:
refs: A list of Ray object refs.
chunk_size: The number of tasks to wait for in each iteration (default 10).
The parameter is passed as `num_returns` to :meth:`~ray.wait` internally.
yield_obj_refs: If True, return Ray remote refs instead of results (by calling :meth:`~ray.get`).
**kwargs: Additional keyword arguments to pass to :meth:`~ray.wait`, e.g.,
`timeout` and `fetch_local`.
Yields:
Union[Any, ObjectRef]: The results (or optionally their Ray references) of the Ray tasks as they complete.
"""
if chunk_size < 1:
raise ValueError("`chunk_size` must be >= 1")
if "num_returns" in kwargs:
raise ValueError("Use the `chunksize` argument instead of `num_returns`.")
while refs:
results, refs = _wait_and_get_single_batch(
refs,
chunk_size=chunk_size,
yield_obj_refs=yield_obj_refs,
**kwargs,
)
yield from results
@PublicAPI(stability="alpha")
def map_unordered(
fn: "RemoteFunction",
items: Iterable[Any],
*,
backpressure_size: Optional[int] = DEFAULT_BACKPRESSURE_SIZE,
chunk_size: int = DEFAULT_CHUNK_SIZE,
yield_obj_refs: bool = False,
**kwargs,
) -> Iterator[Union[Any, "ObjectRef"]]:
"""Apply a Ray remote function to a list of items and return an iterator that yields
the completed results as they become available.
This helper function applies backpressure to control the number of pending tasks, following the
design pattern described in
https://docs.ray.io/en/latest/ray-core/patterns/limit-pending-tasks.html.
.. note::
There is generally no guarantee on the order of results.
Example:
Suppose we have a function that sleeps for x seconds depending on the input.
We expect to obtain a partially sorted list of results.
.. testcode:: python
import ray
import time
@ray.remote
def f(x):
time.sleep(x)
return x
# Example 1: chunk_size=2
for x in ray.util.map_unordered(f, [10, 4, 6, 8, 2], chunk_size=2):
print(x)
.. testoutput::
:options: +MOCK
4
2
6
8
10
.. testcode:: python
# Example 2: backpressure_size=2, chunk_size=1
for x in ray.util.map_unordered(f, [10, 4, 6, 8, 2], backpressure_size=2, chunk_size=1):
print(x)
.. testoutput::
:options: +MOCK
4
10
6
8
2
Args:
fn: A remote function to apply to the list of items. For more complex use cases, use Ray Data's
:meth:`~ray.data.Dataset.map` / :meth:`~ray.data.Dataset.map_batches` instead.
items: An iterable of items to apply the function to.
backpressure_size: Maximum number of in-flight tasks allowed before
calling a blocking :meth:`~ray.wait` (default 100). If None, no backpressure is applied.
chunk_size: The number of tasks to wait for when the number of in-flight tasks exceeds
`backpressure_size`. The parameter is passed as `num_returns` to :meth:`~ray.wait` internally.
yield_obj_refs: If True, return Ray remote refs instead of results (by calling :meth:`~ray.get`).
**kwargs: Additional keyword arguments to pass to :meth:`~ray.wait`, e.g.,
`timeout` and `fetch_local`.
Yields:
Union[Any, ObjectRef]: The results (or optionally their Ray references) of the Ray tasks as they complete.
.. seealso::
:meth:`~ray.util.as_completed`
Call this method for an existing list of Ray object refs.
:meth:`~ray.data.Dataset.map`
Use Ray Data APIs (e.g., :meth:`~ray.data.Dataset.map` and :meth:`~ray.data.Dataset.map_batches`)
for better control and complex use cases, e.g., functions with multiple arguments.
.. note::
This is an altenative to `pool.imap_unordered()` in Ray's Actor-based `multiprocessing.Pool`.
See https://docs.ray.io/en/latest/ray-more-libs/multiprocessing.html for more details.
"""
if backpressure_size is None:
backpressure_size: float = float("inf")
elif backpressure_size <= 0:
raise ValueError("backpressure_size must be positive.")
if chunk_size < 1:
raise ValueError("`chunk_size` must be >= 1")
if "num_returns" in kwargs:
raise ValueError("Use the `chunk_size` argument instead of `num_returns`.")
refs = []
for item in items:
refs.append(fn.remote(item))
if len(refs) >= backpressure_size:
results, refs = _wait_and_get_single_batch(
refs,
chunk_size=chunk_size,
yield_obj_refs=yield_obj_refs,
**kwargs,
)
yield from results
else:
yield from as_completed(
refs,
chunk_size=chunk_size,
yield_obj_refs=yield_obj_refs,
**kwargs,
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/util/helpers.py",
"license": "Apache License 2.0",
"lines": 200,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/train/v2/_internal/execution/train_fn_utils.py | import logging
import threading
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union
from ray.train.v2._internal.data_integration.interfaces import DatasetShardMetadata
from ray.train.v2._internal.execution import collective_impl
from ray.train.v2._internal.execution.context import (
get_train_context as get_internal_train_context,
)
from ray.train.v2.api.context import (
DistributedTrainContext,
LocalTrainContext,
TrainContext as ExternalTrainContext,
)
from ray.train.v2.api.report_config import (
CheckpointConsistencyMode,
CheckpointUploadMode,
)
from ray.train.v2.api.validation_config import ValidationTaskConfig
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from ray.data import DataIterator
from ray.train import Checkpoint
from ray.train.v2.api.reported_checkpoint import ReportedCheckpoint
class TrainFnUtils(ABC):
"""Utility class providing an abstraction layer between user-facing APIs
and :class:`~ray.train.v2.api.context.TrainContext`.
It should be set before the users' training function is called.
This class can be patched if new user APIs behaviors is wanted.
"""
@abstractmethod
def report(
self,
metrics: Dict[str, Any],
checkpoint: Optional["Checkpoint"] = None,
checkpoint_dir_name: Optional[str] = None,
checkpoint_upload_mode: CheckpointUploadMode = CheckpointUploadMode.SYNC,
delete_local_checkpoint_after_upload: Optional[bool] = None,
checkpoint_upload_fn: Optional[
Callable[["Checkpoint", str], "Checkpoint"]
] = None,
validation: Union[bool, ValidationTaskConfig] = False,
) -> None:
"""Upload checkpoint to remote storage and put a training result on the result queue.
Args:
metrics: The metrics to report.
checkpoint: The checkpoint to report.
checkpoint_dir_name: The name of the checkpoint dir
in this iteration. Note: If not set, the checkpoint will
be stored in the default storage path. If set, make sure
this value is unique for each iteration.
checkpoint_upload_mode: The manner in which we want to upload the checkpoint.
Defaults to uploading the checkpoint synchronously.
This works when no checkpoint is provided but is not useful in that case.
delete_local_checkpoint_after_upload: Whether to delete the checkpoint after it is uploaded.
checkpoint_upload_fn: A user defined function that will be called with the
checkpoint to upload it. If not provided, defaults to using the `pyarrow.fs.copy_files`
utility for copying to the destination `storage_path`.
validation: [Alpha] If True, triggers validation with default kwargs from validation_config.
If a ValidationTaskConfig, validation is run using fn_kwargs merged with validation_config
defaults, with fn_kwargs taking precedence on conflicts. If False, no validation.
"""
pass
@abstractmethod
def get_checkpoint(self) -> Optional["Checkpoint"]:
"""Get the latest checkpoint to resume training from.
Returns:
The latest checkpoint if available, None otherwise.
"""
pass
@abstractmethod
def get_all_reported_checkpoints(
self,
consistency_mode: CheckpointConsistencyMode = CheckpointConsistencyMode.VALIDATED,
) -> List["ReportedCheckpoint"]:
"""Get all the checkpoints reported by the workers.
Args:
consistency_mode: Read semantics for checkpoint retrieval. Defaults to VALIDATED.
Returns:
A list of ReportedCheckpoint objects that represent the checkpoints and
corresponding metrics reported by the workers.
"""
pass
@abstractmethod
def get_dataset_shard(self, dataset_info: DatasetShardMetadata) -> "DataIterator":
"""Get the dataset shard for this training process.
Args:
dataset_info: The metadata of the dataset to get the shard for.
Returns:
The DataIterator shard for this worker.
"""
pass
@abstractmethod
def get_context(self) -> ExternalTrainContext:
"""Get the TrainContext for this training process.
The specific type of TrainContext returned depends on the implementation of TrainFnUtils.
Returns:
The train context for this training process.
"""
pass
@abstractmethod
def is_distributed(self) -> bool:
pass
@abstractmethod
def barrier(self) -> None:
"""Create a barrier across all workers.
All workers must call this method before the training function can continue.
This method is used by the public API function :func:`ray.train.collective.barrier`.
Users should typically call ``ray.train.collective.barrier()`` instead of calling this method directly.
"""
pass
@abstractmethod
def broadcast_from_rank_zero(self, data: Any) -> Any:
"""Broadcast data from the rank 0 worker to all other workers.
This method is used by the public API function :func:`ray.train.collective.broadcast_from_rank_zero`.
Users should typically call ``ray.train.collective.broadcast_from_rank_zero()`` instead of calling this method directly.
"""
pass
class DistributedTrainFnUtils(TrainFnUtils):
def report(
self,
metrics: Dict[str, Any],
checkpoint: Optional["Checkpoint"] = None,
checkpoint_dir_name: Optional[str] = None,
checkpoint_upload_mode: CheckpointUploadMode = CheckpointUploadMode.SYNC,
delete_local_checkpoint_after_upload: Optional[bool] = None,
checkpoint_upload_fn: Optional[
Callable[["Checkpoint", str], "Checkpoint"]
] = None,
validation: Union[bool, ValidationTaskConfig] = False,
) -> None:
return get_internal_train_context().report(
metrics,
checkpoint,
checkpoint_dir_name,
checkpoint_upload_mode,
delete_local_checkpoint_after_upload,
checkpoint_upload_fn,
validation,
)
def get_checkpoint(self):
return get_internal_train_context().get_checkpoint()
def get_dataset_shard(self, dataset_info: DatasetShardMetadata) -> "DataIterator":
return get_internal_train_context().get_dataset_shard(dataset_info)
def get_context(self) -> DistributedTrainContext:
return DistributedTrainContext()
def is_distributed(self) -> bool:
return True
def barrier(self) -> None:
return collective_impl.barrier()
def broadcast_from_rank_zero(self, data: Any) -> Any:
return collective_impl.broadcast_from_rank_zero(data)
def get_all_reported_checkpoints(
self,
consistency_mode: CheckpointConsistencyMode = CheckpointConsistencyMode.VALIDATED,
) -> List["ReportedCheckpoint"]:
return get_internal_train_context().get_all_reported_checkpoints(
consistency_mode=consistency_mode
)
class LocalTrainFnUtils(TrainFnUtils):
def __init__(
self,
experiment_name: str,
dataset_shards: Optional[Dict[str, "DataIterator"]] = None,
world_size: int = 1,
world_rank: int = 0,
local_rank: int = 0,
local_world_size: int = 1,
node_rank: int = 0,
):
self._context = LocalTrainContext(
experiment_name=experiment_name,
world_size=world_size,
world_rank=world_rank,
local_rank=local_rank,
local_world_size=local_world_size,
node_rank=node_rank,
)
self._dataset_shards = dataset_shards
self._last_metrics = None
self._last_checkpoint = None
def report(
self,
metrics: Dict[str, Any],
checkpoint: Optional["Checkpoint"] = None,
checkpoint_dir_name: Optional[str] = None,
checkpoint_upload_mode: CheckpointUploadMode = CheckpointUploadMode.SYNC,
delete_local_checkpoint_after_upload: Optional[bool] = None,
checkpoint_upload_fn: Optional[
Callable[["Checkpoint", str], "Checkpoint"]
] = None,
validation: Union[bool, ValidationTaskConfig] = False,
) -> None:
self._last_metrics = metrics
self._last_checkpoint = checkpoint
logger.info(f"Reported metrics: {metrics}")
def get_checkpoint(self) -> Optional["Checkpoint"]:
return self._last_checkpoint
def get_dataset_shard(self, dataset_info: DatasetShardMetadata) -> "DataIterator":
dataset_name = dataset_info.dataset_name
assert (
self._dataset_shards is not None and dataset_name in self._dataset_shards
), f"Dataset shard {dataset_name} not found."
return self._dataset_shards[dataset_name]
def get_context(self) -> LocalTrainContext:
return self._context
def is_distributed(self) -> bool:
return False
def barrier(self) -> None:
pass
def broadcast_from_rank_zero(self, data: Any) -> Any:
return data
def _get_last_metrics(self) -> Optional[Dict[str, Any]]:
"""Return the last metrics reported by the training function.
This function should only be called by LocalController
"""
return self._last_metrics
def get_all_reported_checkpoints(
self,
consistency_mode: CheckpointConsistencyMode = CheckpointConsistencyMode.VALIDATED,
) -> List["ReportedCheckpoint"]:
return []
_train_fn_utils: Optional[TrainFnUtils] = None
_train_fn_utils_lock = threading.Lock()
def get_train_fn_utils() -> TrainFnUtils:
"""Return the Ray Train function utilities.
Returns:
The TrainFnUtils instance for the current worker.
Raises:
RuntimeError: If the Ray Train function utilities are not initialized.
"""
global _train_fn_utils
with _train_fn_utils_lock:
if _train_fn_utils is None:
raise RuntimeError("Ray Train function utilities not initialized.")
return _train_fn_utils
def set_train_fn_utils(train_fn_utils) -> None:
global _train_fn_utils
with _train_fn_utils_lock:
_train_fn_utils = train_fn_utils
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/_internal/execution/train_fn_utils.py",
"license": "Apache License 2.0",
"lines": 240,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/tests/test_core_worker_fault_tolerance.py | import json
import sys
import numpy as np
import pytest
import ray
from ray._common.test_utils import (
SignalActor,
wait_for_condition,
)
from ray._private.test_utils import (
RPC_FAILURE_MAP,
RPC_FAILURE_TYPES,
)
from ray.core.generated import common_pb2, gcs_pb2
from ray.exceptions import GetTimeoutError, TaskCancelledError
from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy
@pytest.mark.parametrize(
"allow_out_of_order_execution",
[True, False],
)
@pytest.mark.parametrize("deterministic_failure", RPC_FAILURE_TYPES)
def test_push_actor_task_failure(
monkeypatch,
ray_start_cluster,
allow_out_of_order_execution: bool,
deterministic_failure: str,
):
with monkeypatch.context() as m:
failure = RPC_FAILURE_MAP[deterministic_failure].copy()
failure["num_failures"] = 2
m.setenv(
"RAY_testing_rpc_failure",
json.dumps({"CoreWorkerService.grpc_client.PushTask": failure}),
)
m.setenv("RAY_actor_scheduling_queue_max_reorder_wait_seconds", "0")
cluster = ray_start_cluster
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
@ray.remote(
max_task_retries=-1,
allow_out_of_order_execution=allow_out_of_order_execution,
)
class RetryActor:
def echo(self, value):
return value
refs = []
actor = RetryActor.remote()
for i in range(10):
refs.append(actor.echo.remote(i))
assert ray.get(refs) == list(range(10))
@pytest.mark.parametrize("deterministic_failure", RPC_FAILURE_TYPES)
def test_update_object_location_batch_failure(
monkeypatch, ray_start_cluster, deterministic_failure
):
with monkeypatch.context() as m:
failure = RPC_FAILURE_MAP[deterministic_failure].copy()
failure["num_failures"] = 1
m.setenv(
"RAY_testing_rpc_failure",
json.dumps(
{"CoreWorkerService.grpc_client.UpdateObjectLocationBatch": failure}
),
)
cluster = ray_start_cluster
head_node_id = cluster.add_node(
num_cpus=0,
).node_id
ray.init(address=cluster.address)
worker_node_id = cluster.add_node(num_cpus=1).node_id
@ray.remote(num_cpus=1)
def create_large_object():
return np.zeros(100 * 1024 * 1024, dtype=np.uint8)
@ray.remote(num_cpus=0)
def consume_large_object(obj):
return sys.getsizeof(obj)
obj_ref = create_large_object.options(
scheduling_strategy=NodeAffinitySchedulingStrategy(
node_id=worker_node_id, soft=False
)
).remote()
consume_ref = consume_large_object.options(
scheduling_strategy=NodeAffinitySchedulingStrategy(
node_id=head_node_id, soft=False
)
).remote(obj_ref)
assert ray.get(consume_ref, timeout=10) > 0
@pytest.mark.parametrize("deterministic_failure", RPC_FAILURE_TYPES)
def test_get_object_status_rpc_retry_and_idempotency(
monkeypatch, shutdown_only, deterministic_failure
):
"""Test that GetObjectStatus RPC retries work correctly.
Verify that the RPC is idempotent when network failures occur.
Cross_worker_access_task triggers GetObjectStatus because it does
not own objects and needs to request it from the driver.
"""
failure = RPC_FAILURE_MAP[deterministic_failure].copy()
failure["num_failures"] = 1
monkeypatch.setenv(
"RAY_testing_rpc_failure",
json.dumps({"CoreWorkerService.grpc_client.GetObjectStatus": failure}),
)
ray.init()
@ray.remote
def test_task(i):
return i * 2
@ray.remote
def cross_worker_access_task(objects):
data = ray.get(objects)
return data
object_refs = [test_task.remote(i) for i in range(5)]
result_object_ref = cross_worker_access_task.remote(object_refs)
final_result = ray.get(result_object_ref)
assert final_result == [0, 2, 4, 6, 8]
@pytest.mark.parametrize("deterministic_failure", RPC_FAILURE_TYPES)
def test_wait_for_actor_ref_deleted_rpc_retry_and_idempotency(
monkeypatch, shutdown_only, deterministic_failure
):
"""Test that WaitForActorRefDeleted RPC retries work correctly.
Verify that the RPC is idempotent when network failures occur.
The GCS actor manager will trigger this RPC during actor initialization
to monitor when the actor handles have gone out of scope and the actor should be destroyed.
"""
failure = RPC_FAILURE_MAP[deterministic_failure].copy()
failure["num_failures"] = 1
monkeypatch.setenv(
"RAY_testing_rpc_failure",
json.dumps({"CoreWorkerService.grpc_client.WaitForActorRefDeleted": failure}),
)
ray.init()
@ray.remote(max_restarts=1)
class SimpleActor:
def ping(self):
return "pong"
actor = SimpleActor.remote()
result = ray.get(actor.ping.remote())
assert result == "pong"
actor_id = actor._actor_id
del actor
def verify_actor_ref_deleted():
actor_info = ray._private.state.state.get_actor_info(actor_id)
if actor_info is None:
return False
actor_info = gcs_pb2.ActorTableData.FromString(actor_info)
return (
actor_info.state == gcs_pb2.ActorTableData.ActorState.DEAD
and actor_info.death_cause.actor_died_error_context.reason
== common_pb2.ActorDiedErrorContext.Reason.REF_DELETED
)
wait_for_condition(verify_actor_ref_deleted, timeout=30)
@pytest.fixture
def inject_cancel_remote_task_rpc_failure(monkeypatch, request):
deterministic_failure = request.param
failure = RPC_FAILURE_MAP[deterministic_failure].copy()
failure["num_failures"] = 1
monkeypatch.setenv(
"RAY_testing_rpc_failure",
json.dumps({"CoreWorkerService.grpc_client.RequestOwnerToCancelTask": failure}),
)
@pytest.mark.parametrize(
"inject_cancel_remote_task_rpc_failure",
RPC_FAILURE_TYPES,
indirect=True,
)
def test_cancel_remote_task_rpc_retry_and_idempotency(
inject_cancel_remote_task_rpc_failure, ray_start_cluster
):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
ray.init(address=cluster.address)
cluster.add_node(num_cpus=1, resources={"worker1": 1})
cluster.add_node(num_cpus=1, resources={"worker2": 1})
signaler = SignalActor.remote()
@ray.remote(num_cpus=1, resources={"worker1": 1})
def wait_for(y):
return ray.get(y[0])
@ray.remote(num_cpus=1, resources={"worker2": 1})
def remote_wait(sg):
return [wait_for.remote([sg[0]])]
sig = signaler.wait.remote()
outer = remote_wait.remote([sig])
inner = ray.get(outer)[0]
with pytest.raises(GetTimeoutError):
ray.get(inner, timeout=1)
ray.cancel(inner)
with pytest.raises(TaskCancelledError):
ray.get(inner, timeout=10)
def test_double_borrowing_with_rpc_failure(monkeypatch, shutdown_only):
"""Regression test for https://github.com/ray-project/ray/issues/57997"""
monkeypatch.setenv(
"RAY_testing_rpc_failure",
json.dumps(
{
"CoreWorkerService.grpc_client.PushTask": {
"num_failures": 3,
"req_failure_prob": 0,
"resp_failure_prob": 100,
"in_flight_failure_prob": 0,
}
}
),
)
ray.init()
@ray.remote(max_task_retries=-1, max_restarts=-1)
class Actor:
def __init__(self, objs):
# Actor is a borrower of obj
self.obj = objs[0]
def test(self):
# Return the borrowed object inside the list
# so the caller is a borrower as well.
# This actor task will be retried since
# the first PushTask RPC response will be lost.
return [self.obj]
obj = ray.put(31)
actor = Actor.remote([obj])
result = ray.get(actor.test.remote())
assert ray.get(result[0]) == 31
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_core_worker_fault_tolerance.py",
"license": "Apache License 2.0",
"lines": 219,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/_private/constants_utils.py | import os
import warnings
from typing import Callable, List, Optional, Type, TypeVar
def str_to_list(s: str) -> List[str]:
"""Return a list from a comma-separated string.
Trims whitespace and skips empty entries.
"""
return [part for part in (part.strip() for part in s.split(",")) if part]
def parse_latency_buckets(bucket_str: str, default_buckets: List[float]) -> List[float]:
"""Parse a comma-separated string of latency bucket values.
Args:
bucket_str: A comma-separated string of positive numbers in ascending order.
default_buckets: Default bucket values to use if bucket_str is empty.
Returns:
A list of parsed float values.
Raises:
ValueError: If the format is invalid or values don't meet requirements.
"""
if bucket_str.strip() == "":
return default_buckets
try:
# Convert string to list of floats
buckets = [float(x.strip()) for x in bucket_str.split(",")]
if not buckets:
raise ValueError("Empty bucket list")
if any(x <= 0 for x in buckets):
raise ValueError("Bucket values must be positive")
if sorted(set(buckets)) != buckets:
raise ValueError("Bucket values must be in strictly ascending order")
return buckets
except Exception as e:
raise ValueError(
f"Invalid format for `{bucket_str}`. "
f"Expected comma-separated positive numbers in ascending order. Error: {str(e)}"
) from e
T = TypeVar("T")
# todo: remove for the '3.0.0' release.
_wrong_names_white_list = {
"REQUEST_LATENCY_BUCKETS_MS",
"MODEL_LOAD_LATENCY_BUCKETS_MS",
"MAX_CACHED_HANDLES",
"SERVE_REQUEST_PROCESSING_TIMEOUT_S",
}
def _validate_name(name: str) -> None:
"""Validate Ray Serve environment variable name."""
required_prefix = "RAY_SERVE_"
if not name.startswith(required_prefix):
if name in _wrong_names_white_list:
return
raise ValueError(
f"Got unexpected environment variable name `{name}`! "
f"Ray Serve environment variables require prefix `{required_prefix}`. "
)
def _get_env_value(
name: str,
default: Optional[T],
value_type: Type[T],
validation_func: Optional[Callable[[T], bool]] = None,
expected_value_description: Optional[str] = None,
) -> Optional[T]:
"""Get environment variable with type conversion and validation.
This function retrieves an environment variable, converts it to the specified type,
and optionally validates the converted value.
Args:
name: The name of the environment variable.
default: Default value to use if the environment variable is not set.
If None, the function will return None without validation.
value_type: Type to convert the environment variable value to (e.g., int, float, str).
validation_func: Optional function that takes the converted value and returns
a boolean indicating whether the value is valid.
expected_value_description: Description of the expected value characteristics
(e.g., "positive", "non-negative") used in error messages.
Optional, expected only if validation_func is provided.
Returns:
The environment variable value converted to the specified type and validated,
or the default value if the environment variable is not set.
Raises:
ValueError: If the environment variable value cannot be converted to the specified
type, or if it fails the optional validation check. Also, if name validation fails.
"""
_validate_name(name)
explicitly_defined_value = os.environ.get(name)
if explicitly_defined_value is None:
if default is None:
return None
else:
raw = default
else:
_deprecation_warning(name)
raw = explicitly_defined_value
try:
value = value_type(raw)
except ValueError as e:
raise ValueError(
f"Environment variable `{name}` value `{raw}` cannot be converted to `{value_type.__name__}`!"
) from e
if validation_func and not validation_func(value):
raise ValueError(
f"Got unexpected value `{value}` for `{name}` environment variable! "
f"Expected {expected_value_description} `{value_type.__name__}`."
)
return value
def get_env_int(name: str, default: Optional[int]) -> Optional[int]:
"""Get environment variable as an integer.
Args:
name: The name of the environment variable.
default: Default value to use if the environment variable is not set.
Returns:
The environment variable value as an integer.
Raises:
ValueError: If the value cannot be converted to an integer.
"""
return _get_env_value(name, default, int)
def get_env_int_positive(name: str, default: Optional[int]) -> Optional[int]:
"""Get environment variable as a positive integer.
Args:
name: The name of the environment variable.
default: Default value to use if the environment variable is not set.
Returns:
The environment variable value as a positive integer.
Raises:
ValueError: If the value cannot be converted to an integer or is not positive.
"""
return _get_env_value(name, default, int, lambda x: x > 0, "positive")
def get_env_int_non_negative(name: str, default: Optional[int]) -> Optional[int]:
"""Get environment variable as a non-negative integer.
Args:
name: The name of the environment variable.
default: Default value to use if the environment variable is not set.
Returns:
The environment variable value as a non-negative integer.
Raises:
ValueError: If the value cannot be converted to an integer or is negative.
"""
return _get_env_value(name, default, int, lambda x: x >= 0, "non negative")
def get_env_float(name: str, default: Optional[float]) -> Optional[float]:
"""Get environment variable as a float.
Args:
name: The name of the environment variable.
default: Default value to use if the environment variable is not set.
Returns:
The environment variable value as a float.
Raises:
ValueError: If the value cannot be converted to a float.
"""
return _get_env_value(name, default, float)
def get_env_float_positive(name: str, default: Optional[float]) -> Optional[float]:
"""Get environment variable as a positive float.
Args:
name: The name of the environment variable.
default: Default value to use if the environment variable is not set.
Returns:
The environment variable value as a positive float.
Raises:
ValueError: If the value cannot be converted to a float or is not positive.
"""
return _get_env_value(name, default, float, lambda x: x > 0, "positive")
def get_env_float_non_negative(name: str, default: Optional[float]) -> Optional[float]:
"""Get environment variable as a non-negative float.
Args:
name: The name of the environment variable.
default: Default value to use if the environment variable is not set.
Returns:
The environment variable value as a non-negative float.
Raises:
ValueError: If the value cannot be converted to a float or is negative.
"""
return _get_env_value(name, default, float, lambda x: x >= 0, "non negative")
def get_env_str(name: str, default: Optional[str]) -> Optional[str]:
"""Get environment variable as a string.
Args:
name: The name of the environment variable.
default: Default value to use if the environment variable is not set.
Returns:
The environment variable value as a string.
Returns `None` if default is `None` and value not found.
"""
return _get_env_value(name, default, str)
def get_env_bool(name: str, default: str) -> bool:
"""Get environment variable as a boolean.
Environment variable values of "1" are interpreted as True, all others as False.
Args:
name: The name of the environment variable.
default: Default value to use if the environment variable is not set.
Expects "0" or "1".
Returns:
True if the environment variable value is "1", False otherwise.
"""
env_value_str = _get_env_value(name, default, str)
return env_value_str == "1"
# Environment variables that are fully deprecated and will be ignored.
_fully_deprecated_env_vars = {
"RAY_SERVE_HTTP_KEEP_ALIVE_TIMEOUT_S": "http_options.keep_alive_timeout_s",
}
def _deprecation_warning(name: str) -> None:
"""Log replacement warning for wrong or legacy environment variables.
TODO: remove this function for the '3.0.0' release.
:param name: environment variable name
"""
def get_new_name(name: str) -> str:
if name == "RAY_SERVE_HANDLE_METRIC_PUSH_INTERVAL_S":
return "RAY_SERVE_HANDLE_AUTOSCALING_METRIC_PUSH_INTERVAL_S"
elif name == "SERVE_REQUEST_PROCESSING_TIMEOUT_S":
return "RAY_SERVE_REQUEST_PROCESSING_TIMEOUT_S"
else:
return f"{required_prefix}{name}"
change_version = "3.0.0"
required_prefix = "RAY_SERVE_"
if (
name in _wrong_names_white_list
or name == "RAY_SERVE_HANDLE_METRIC_PUSH_INTERVAL_S"
):
new_name = get_new_name(name)
warnings.warn(
f"Starting from version `{change_version}` environment variable "
f"`{name}` will be deprecated. Please use `{new_name}` instead.",
FutureWarning,
stacklevel=4,
)
def warn_if_deprecated_env_var_set(name: str) -> None:
"""Warn if a fully deprecated environment variable is set.
:param name: environment variable name
"""
if name in _fully_deprecated_env_vars and os.environ.get(name):
config_option = _fully_deprecated_env_vars[name]
warnings.warn(
f"`{name}` environment variable will be deprecated in the future. "
f"Use `{config_option}` in the Serve config instead.",
DeprecationWarning,
stacklevel=2,
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/_private/constants_utils.py",
"license": "Apache License 2.0",
"lines": 231,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/serve/tests/unit/test_constants_utils.py | import os
from unittest.mock import patch
import pytest
from ray.serve._private.constants_utils import (
_validate_name,
get_env_bool,
get_env_float,
get_env_float_non_negative,
get_env_float_positive,
get_env_int,
get_env_int_non_negative,
get_env_int_positive,
get_env_str,
parse_latency_buckets,
str_to_list,
)
class TestStrToList:
def test_str_to_list_basic(self):
assert str_to_list("a,b,c") == ["a", "b", "c"]
def test_str_to_list_with_whitespace(self):
assert str_to_list(" a , b , c ") == ["a", "b", "c"]
def test_str_to_list_empty_string(self):
assert str_to_list("") == []
def test_str_to_list_with_empty_entries(self):
assert str_to_list("a,,b,c,") == ["a", "b", "c"]
def test_str_to_list_only_whitespace(self):
assert str_to_list(" ") == []
def test_str_to_list_single_entry(self):
assert str_to_list("single") == ["single"]
def test_str_to_list_only_commas(self):
assert str_to_list(",,,,") == []
def test_str_to_list_whitespace_entries(self):
assert str_to_list("a, ,b") == ["a", "b"]
class TestParseLatencyBuckets:
def test_parse_latency_buckets(self):
# Test valid inputs with different formats
assert parse_latency_buckets("1,2,3", []) == [1.0, 2.0, 3.0]
assert parse_latency_buckets("1,2,3,4 ", []) == [1.0, 2.0, 3.0, 4.0]
assert parse_latency_buckets(" 1,2,3,4,5", []) == [1.0, 2.0, 3.0, 4.0, 5.0]
assert parse_latency_buckets(" 1, 2,3 ,4,5 ,6 ", []) == [
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
]
# Test decimal numbers
assert parse_latency_buckets("0.5,1.5,2.5", []) == [0.5, 1.5, 2.5]
def test_parse_latency_buckets_invalid(self):
# Test negative numbers
with pytest.raises(ValueError, match=".*must be positive.*"):
parse_latency_buckets("-1,1,2,3,4", [])
# Test non-ascending order
with pytest.raises(ValueError, match=".*be in strictly ascending order*"):
parse_latency_buckets("4,3,2,1", [])
# Test duplicate values
with pytest.raises(ValueError, match=".*be in strictly ascending order.*"):
parse_latency_buckets("1,2,2,3,4", [])
# Test invalid number format
with pytest.raises(ValueError, match=".*Invalid.*format.*"):
parse_latency_buckets("1,2,3,4,a", [])
# Test empty list
with pytest.raises(ValueError, match=".*could not convert.*"):
parse_latency_buckets(",,,", [])
# Test invalid separators
with pytest.raises(ValueError, match=".*could not convert.*"):
parse_latency_buckets("1;2;3;4", [])
@pytest.fixture
def mock_environ():
with patch.dict(os.environ, {}, clear=True) as mock_env:
yield mock_env
class TestEnvValueFunctions:
def test_get_env_int(self, mock_environ):
assert get_env_int("RAY_SERVE_TEST_VAR", 0) == 0
mock_environ["RAY_SERVE_TEST_VAR"] = "42"
assert get_env_int("RAY_SERVE_TEST_VAR", 0) == 42
mock_environ["RAY_SERVE_TEST_VAR"] = "-1"
assert get_env_int("RAY_SERVE_TEST_VAR", 0) == -1
mock_environ["RAY_SERVE_TEST_VAR"] = "0.1"
with pytest.raises(ValueError, match=".*`0.1` cannot be converted to `int`!*"):
get_env_int_positive("RAY_SERVE_TEST_VAR", 5)
mock_environ["RAY_SERVE_TEST_VAR"] = "abc"
with pytest.raises(ValueError, match=".*`abc` cannot be converted to `int`!*"):
get_env_int_positive("RAY_SERVE_TEST_VAR", 5)
with pytest.raises(ValueError, match=".*require prefix `RAY_SERVE_`*"):
get_env_int_positive("NO_PREFIX", 5)
def test_get_env_int_positive(self, mock_environ):
assert get_env_int_positive("RAY_SERVE_TEST_VAR", 1) == 1
mock_environ["RAY_SERVE_TEST_VAR"] = "42"
assert get_env_int_positive("RAY_SERVE_TEST_VAR", 1) == 42
mock_environ["RAY_SERVE_TEST_VAR"] = "-1"
with pytest.raises(ValueError, match=".*Expected positive `int`.*"):
get_env_int_positive("RAY_SERVE_TEST_VAR", 5)
def test_get_env_int_non_negative(self, mock_environ):
assert get_env_int_non_negative("RAY_SERVE_TEST_VAR", 0) == 0
assert get_env_int_non_negative("RAY_SERVE_TEST_VAR", 1) == 1
mock_environ["RAY_SERVE_TEST_VAR"] = "42"
assert get_env_int_non_negative("RAY_SERVE_TEST_VAR", 0) == 42
mock_environ["RAY_SERVE_TEST_VAR"] = "-1"
with pytest.raises(ValueError, match=".*Expected non negative `int`.*"):
get_env_int_non_negative("RAY_SERVE_TEST_VAR", 5)
with pytest.raises(ValueError, match=".*Expected non negative `int`.*"):
get_env_int_non_negative("RAY_SERVE_TEST_VAR_FROM_DEFAULT", -1)
def test_get_env_float(self, mock_environ):
assert get_env_float("RAY_SERVE_TEST_VAR", 0.0) == 0.0
mock_environ["RAY_SERVE_TEST_VAR"] = "3.14"
assert get_env_float("RAY_SERVE_TEST_VAR", 0.0) == 3.14
mock_environ["RAY_SERVE_TEST_VAR"] = "-2.5"
assert get_env_float("RAY_SERVE_TEST_VAR", 0.0) == -2.5
mock_environ["RAY_SERVE_TEST_VAR"] = "abc"
with pytest.raises(
ValueError, match=".*`abc` cannot be converted to `float`!*"
):
get_env_float("RAY_SERVE_TEST_VAR", 0.0)
def test_get_env_float_positive(self, mock_environ):
assert get_env_float_positive("RAY_SERVE_TEST_VAR", 1.5) == 1.5
assert get_env_float_positive("RAY_SERVE_TEST_VAR", None) is None
mock_environ["RAY_SERVE_TEST_VAR"] = "42.5"
assert get_env_float_positive("RAY_SERVE_TEST_VAR", 1.0) == 42.5
mock_environ["RAY_SERVE_TEST_VAR"] = "-1.2"
with pytest.raises(ValueError, match=".*Expected positive `float`.*"):
get_env_float_positive("RAY_SERVE_TEST_VAR", 5.0)
with pytest.raises(ValueError, match=".*Expected positive `float`.*"):
get_env_float_positive("RAY_SERVE_TEST_VAR_FROM_DEFAULT", 0.0)
with pytest.raises(ValueError, match=".*Expected positive `float`.*"):
get_env_float_positive("RAY_SERVE_TEST_VAR_FROM_DEFAULT", -1)
def test_get_env_float_non_negative(self, mock_environ):
assert get_env_float_non_negative("RAY_SERVE_TEST_VAR", 0.0) == 0.0
assert get_env_float_non_negative("RAY_SERVE_TEST_VAR", 1.5) == 1.5
mock_environ["RAY_SERVE_TEST_VAR"] = "42.5"
assert get_env_float_non_negative("RAY_SERVE_TEST_VAR", 0.0) == 42.5
mock_environ["RAY_SERVE_TEST_VAR"] = "-1.2"
with pytest.raises(ValueError, match=".*Expected non negative `float`.*"):
get_env_float_non_negative("RAY_SERVE_TEST_VAR", 5.0)
def test_get_env_str(self, mock_environ):
mock_environ["RAY_SERVE_TEST_STR"] = "hello"
assert get_env_str("RAY_SERVE_TEST_STR", "default") == "hello"
assert get_env_str("RAY_SERVE_NONEXISTENT_VAR", "default_str") == "default_str"
assert get_env_str("RAY_SERVE_NONEXISTENT_VAR", None) is None
def test_get_env_bool(self, mock_environ):
mock_environ["RAY_SERVE_TEST_BOOL_TRUE"] = "1"
assert get_env_bool("RAY_SERVE_TEST_BOOL_TRUE", "0") is True
# Test with any other value (False)
mock_environ["RAY_SERVE_TEST_BOOL_FALSE"] = "true"
assert get_env_bool("RAY_SERVE_TEST_BOOL_FALSE", "0") is False
mock_environ["RAY_SERVE_TEST_BOOL_FALSE2"] = "yes"
assert get_env_bool("RAY_SERVE_TEST_BOOL_FALSE2", "0") is False
# Test with default when environment variable not set
assert get_env_bool("RAY_SERVE_NONEXISTENT_VAR", "1") is True
assert get_env_bool("RAY_SERVE_NONEXISTENT_VAR", "0") is False
class TestValidation:
@pytest.mark.parametrize(
"name",
[
"RAY_SERVE_FOO",
"RAY_SERVE__DOUBLE_UNDERSCORE",
"RAY_SERVE_123",
"RAY_SERVE_VAR_NAME",
],
)
def test_validate_name_accepts_valid_prefix(self, name):
# Should not raise
assert _validate_name(name) is None
@pytest.mark.parametrize(
"name",
[
"",
"RAY_SERVE", # missing trailing underscore and name
"SERVE_VAR",
"ray_SERVE_BAR",
"RAY_service_VAR",
],
)
def test_validate_name_rejects_invalid_prefix(self, name):
with pytest.raises(ValueError, match=".*require prefix `RAY_SERVE_`*"):
_validate_name(name)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/unit/test_constants_utils.py",
"license": "Apache License 2.0",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/_common/network_utils.py | import socket
from contextlib import closing
from functools import lru_cache
from typing import Optional, Tuple, Union
from ray._raylet import (
build_address as _build_address,
is_ipv6 as _is_ipv6,
node_ip_address_from_perspective as _node_ip_address_from_perspective,
parse_address as _parse_address,
)
def parse_address(address: str) -> Optional[Tuple[str, str]]:
"""Parse a network address string into host and port.
Args:
address: The address string to parse (e.g., "localhost:8000", "[::1]:8000").
Returns:
Tuple with (host, port) if port found, None if no colon separator.
"""
return _parse_address(address)
def build_address(host: str, port: Union[int, str]) -> str:
"""Build a network address string from host and port.
Args:
host: The hostname or IP address.
port: The port number (int or string).
Returns:
Formatted address string (e.g., "localhost:8000" or "[::1]:8000").
"""
return _build_address(host, port)
def node_ip_address_from_perspective(address: Optional[str] = None) -> str:
"""IP address by which the local node can be reached *from* the `address`.
If no address is given, defaults to public DNS servers for detection.
Args:
address: The IP address and port of any known live service on the
network you care about.
Returns:
The IP address by which the local node can be reached from the address.
"""
return _node_ip_address_from_perspective(address)
def is_ipv6(host: str) -> bool:
"""Check if a host is resolved to IPv6.
Args:
host: The IP or domain name to check (must be without port).
Returns:
True if the host is resolved to IPv6, False if IPv4.
"""
return _is_ipv6(host)
@lru_cache(maxsize=1)
def get_localhost_ip() -> str:
"""Get localhost loopback ip with IPv4/IPv6 support.
Returns:
The localhost loopback IP.
"""
# Try IPv4 first, then IPv6 localhost resolution
for family in [socket.AF_INET, socket.AF_INET6]:
try:
dns_result = socket.getaddrinfo(
"localhost", None, family, socket.SOCK_STREAM
)
return dns_result[0][4][0]
except Exception:
continue
# Final fallback to IPv4 loopback
return "127.0.0.1"
def is_localhost(host: str) -> bool:
"""Check if the given host string represents a localhost address.
Args:
host: The hostname or IP address to check.
Returns:
True if the host is a localhost address, False otherwise.
"""
return host in ("localhost", "127.0.0.1", "::1")
def find_free_port(family: socket.AddressFamily = socket.AF_INET) -> int:
"""Find a free port on the local machine.
Args:
family: The socket address family (AF_INET for IPv4, AF_INET6 for IPv6).
Defaults to AF_INET.
Returns:
An available port number.
"""
with closing(socket.socket(family, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
return s.getsockname()[1]
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_common/network_utils.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/_common/tests/test_network_utils.py | import sys
import pytest
from ray._common.network_utils import is_localhost
def test_is_localhost():
assert is_localhost("localhost")
assert is_localhost("127.0.0.1")
assert is_localhost("::1")
assert not is_localhost("8.8.8.8")
assert not is_localhost("2001:db8::1")
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_common/tests/test_network_utils.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/_private/telemetry/metric_cardinality.py | from enum import Enum
from typing import Callable, Dict, List
from ray._private.ray_constants import RAY_METRIC_CARDINALITY_LEVEL
from ray._private.telemetry.metric_types import MetricType
# Keep in sync with the WorkerIdKey in src/ray/stats/tag_defs.cc
WORKER_ID_TAG_KEY = "WorkerId"
# Keep in sync with the NameKey in src/ray/stats/tag_defs.cc
TASK_OR_ACTOR_NAME_TAG_KEY = "Name"
# Aggregation functions for high-cardinality gauge metrics when labels are dropped.
# Counter and Sum metrics always use sum() aggregation.
HIGH_CARDINALITY_GAUGE_AGGREGATION: Dict[str, Callable[[List[float]], float]] = {
"tasks": sum,
"actors": sum,
}
_CARDINALITY_LEVEL = None
_HIGH_CARDINALITY_LABELS: Dict[str, List[str]] = {}
class MetricCardinality(str, Enum):
"""Cardinality level configuration for all Ray metrics (ray_tasks, ray_actors,
etc.). This configurtion is used to determine whether to globally drop high
cardinality labels. This is important for high scale clusters that might consist
thousands of workers, millions of tasks.
- LEGACY: Keep all labels. This is the default behavior.
- RECOMMENDED: Drop high cardinality labels. The set of high cardinality labels
are determined internally by Ray and not exposed to users. Currently, this includes
the following labels: WorkerId
- LOW: Same as RECOMMENDED, but also drop the Name label for tasks and actors.
"""
LEGACY = "legacy"
RECOMMENDED = "recommended"
LOW = "low"
@staticmethod
def get_cardinality_level() -> "MetricCardinality":
global _CARDINALITY_LEVEL
if _CARDINALITY_LEVEL is not None:
return _CARDINALITY_LEVEL
try:
_CARDINALITY_LEVEL = MetricCardinality(RAY_METRIC_CARDINALITY_LEVEL.lower())
except ValueError:
_CARDINALITY_LEVEL = MetricCardinality.LEGACY
return _CARDINALITY_LEVEL
@staticmethod
def get_aggregation_function(
metric_name: str, metric_type: MetricType = MetricType.GAUGE
) -> Callable[[List[float]], float]:
"""Get the aggregation function for a metric when labels are dropped. This method does not currently support histogram metrics.
Args:
metric_name: The name of the metric.
metric_type: The type of the metric. If provided, Counter and Sum
metrics always use sum() aggregation.
Returns:
A function that takes a list of values and returns the aggregated value.
"""
# Counter and Sum metrics always aggregate by summing
if metric_type in (MetricType.COUNTER, MetricType.SUM):
return sum
# Histogram metrics are not supported by this method
if metric_type == MetricType.HISTOGRAM:
raise ValueError("No Aggregation function for histogram metrics.")
# Gauge metrics use metric-specific aggregation or default to first value
if metric_name in HIGH_CARDINALITY_GAUGE_AGGREGATION:
return HIGH_CARDINALITY_GAUGE_AGGREGATION[metric_name]
return lambda values: values[0]
@staticmethod
def get_high_cardinality_metrics() -> List[str]:
return list(HIGH_CARDINALITY_GAUGE_AGGREGATION.keys())
@staticmethod
def get_high_cardinality_labels_to_drop(metric_name: str) -> List[str]:
"""
Get the high cardinality labels of the metric.
"""
if metric_name in _HIGH_CARDINALITY_LABELS:
return _HIGH_CARDINALITY_LABELS[metric_name]
cardinality_level = MetricCardinality.get_cardinality_level()
if (
cardinality_level == MetricCardinality.LEGACY
or metric_name not in MetricCardinality.get_high_cardinality_metrics()
):
_HIGH_CARDINALITY_LABELS[metric_name] = []
return []
_HIGH_CARDINALITY_LABELS[metric_name] = [WORKER_ID_TAG_KEY]
if cardinality_level == MetricCardinality.LOW and metric_name in [
"tasks",
"actors",
]:
_HIGH_CARDINALITY_LABELS[metric_name].append(TASK_OR_ACTOR_NAME_TAG_KEY)
return _HIGH_CARDINALITY_LABELS[metric_name]
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_private/telemetry/metric_cardinality.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/serve/llm/deployment.py | from ray.llm._internal.serve.core.server.llm_server import (
LLMServer as InternalLLMServer,
)
from ray.llm._internal.serve.serving_patterns.data_parallel.dp_server import (
DPServer as _DPServer,
)
from ray.llm._internal.serve.serving_patterns.prefill_decode.pd_server import (
PDProxyServer as _PDProxyServer,
)
from ray.util.annotations import PublicAPI
#############
# Deployments
#############
@PublicAPI(stability="alpha")
class LLMServer(InternalLLMServer):
"""The implementation of the vLLM engine deployment.
To build a Deployment object you should use `build_llm_deployment` function.
We also expose a lower level API for more control over the deployment class
through `serve.deployment` function.
Examples:
.. testcode::
:skipif: True
from ray import serve
from ray.serve.llm import LLMConfig
from ray.serve.llm.deployment import LLMServer
# Configure the model
llm_config = LLMConfig(
model_loading_config=dict(
served_model_name="llama-3.1-8b",
model_source="meta-llama/Llama-3.1-8b-instruct",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=1,
max_replicas=8,
)
),
)
# Build the deployment directly
serve_options = LLMServer.get_deployment_options(llm_config)
llm_app = serve.deployment(LLMServer).options(
**serve_options).bind(llm_config)
model_handle = serve.run(llm_app)
# Query the model via `chat` api
from ray.serve.llm.openai_api_models import ChatCompletionRequest
request = ChatCompletionRequest(
model="llama-3.1-8b",
messages=[
{
"role": "user",
"content": "Hello, world!"
}
]
)
response = ray.get(model_handle.chat(request))
print(response)
"""
pass
@PublicAPI(stability="alpha")
class PDProxyServer(_PDProxyServer):
"""A proxy server for prefill-decode disaggregation.
This server acts as a proxy in a prefill-decode disaggregated system.
For chat and completions, proxy sends the request to the prefill server
with max_tokens=1 and then sends the returned metadata to the decode server.
Args:
prefill_server: The prefill server deployment handle.
decode_server: The decode server deployment handle.
"""
pass
@PublicAPI(stability="alpha")
class DPServer(_DPServer):
"""Data Parallel LLM Server.
This class is used to serve data parallel attention (DP Attention)
deployment paradigm, where the attention layers are replicated and
the MoE layers are sharded. DP Attention is typically used for models
like DeepSeek-V3.
To build a Deployment object you should use `build_dp_deployment` function.
We also expose a lower level API for more control over the deployment class
through `serve.deployment` function.
Examples:
.. testcode::
:skipif: True
from ray import serve
from ray.serve.llm import LLMConfig, build_dp_deployment
# Configure the model
llm_config = LLMConfig(
model_loading_config=dict(
model_id="Qwen/Qwen2.5-0.5B-Instruct",
),
engine_kwargs=dict(
data_parallel_size=2,
tensor_parallel_size=1,
),
experimental_configs=dict(
dp_size_per_node=2,
),
accelerator_type="A10G",
)
# Build the deployment
dp_app = build_dp_deployment(llm_config)
# Deploy the application
model_handle = serve.run(dp_app)
"""
pass
__all__ = ["LLMServer", "PDProxyServer", "DPServer"]
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/llm/deployment.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/serve/llm/ingress.py | from ray.llm._internal.serve.core.ingress.ingress import (
OpenAiIngress as _OpenAiIngress,
make_fastapi_ingress,
)
from ray.util.annotations import PublicAPI
@PublicAPI(stability="alpha")
class OpenAiIngress(_OpenAiIngress):
"""The implementation of the OpenAI compatible model router.
This deployment creates the following endpoints:
- /v1/chat/completions: Chat interface (OpenAI-style)
- /v1/completions: Text completion
- /v1/models: List available models
- /v1/models/{model}: Model information
- /v1/embeddings: Text embeddings
- /v1/audio/transcriptions: Audio transcription
- /v1/score: Text scoring
Examples:
.. testcode::
:skipif: True
from ray import serve
from ray.serve.llm import LLMConfig
from ray.serve.llm.deployment import LLMServer
from ray.serve.llm.ingress import OpenAiIngress, make_fastapi_ingress
llm_config1 = LLMConfig(
model_loading_config=dict(
model_id="qwen-0.5b",
model_source="Qwen/Qwen2.5-0.5B-Instruct",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=1, max_replicas=2,
)
),
accelerator_type="A10G",
)
llm_config2 = LLMConfig(
model_loading_config=dict(
model_id="qwen-1.5b",
model_source="Qwen/Qwen2.5-1.5B-Instruct",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=1, max_replicas=2,
)
),
accelerator_type="A10G",
)
# deployment #1
server_options1 = LLMServer.get_deployment_options(llm_config1)
server_deployment1 = serve.deployment(LLMServer).options(
**server_options1).bind(llm_config1)
# deployment #2
server_options2 = LLMServer.get_deployment_options(llm_config2)
server_deployment2 = serve.deployment(LLMServer).options(
**server_options2).bind(llm_config2)
# ingress
ingress_options = OpenAiIngress.get_deployment_options(
llm_configs=[llm_config1, llm_config2])
ingress_cls = make_fastapi_ingress(OpenAiIngress)
ingress_deployment = serve.deployment(ingress_cls).options(
**ingress_options).bind([server_deployment1, server_deployment2])
# run
serve.run(ingress_deployment, blocking=True)
"""
pass
__all__ = ["OpenAiIngress", "make_fastapi_ingress"]
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/llm/ingress.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/serve/llm/request_router.py | from ray.llm._internal.serve.routing_policies.prefix_aware.prefix_aware_router import (
PrefixCacheAffinityRouter as _PrefixCacheAffinityRouter,
)
from ray.util.annotations import PublicAPI
@PublicAPI(stability="alpha")
class PrefixCacheAffinityRouter(_PrefixCacheAffinityRouter):
"""A request router that is aware of the KV cache.
This router optimizes request routing by considering KV cache locality,
directing requests with similar prefixes to the same replica to improve
cache hit rates.
The internal policy is this (it may change in the future):
1. Mixes between three strategies to balance prefix cache hit rate and load
balancing:
- When load is balanced (queue length difference < threshold), it
selects replicas with the highest prefix match rate for the input text
- When load is balanced but match rate is below 10%, it falls back to
the smallest tenants (i.e. the replica with the least kv cache)
- When load is imbalanced, it uses the default Power of Two selection
2. Maintains a prefix tree to track which replicas have processed similar
inputs:
- Inserts prompt text into the prefix tree after routing
- Uses this history to inform future routing decisions
Parameters:
imbalanced_threshold: The threshold for considering the load imbalanced.
match_rate_threshold: The threshold for considering the match rate.
do_eviction: Whether to do eviction.
eviction_threshold_chars: Number of characters in the tree to trigger
eviction.
eviction_target_chars: Number of characters in the tree to target for
eviction.
eviction_interval_secs: How often (in seconds) to run the eviction
policy.
"""
pass
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/llm/request_router.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/data/_internal/logical/operators/streaming_split_operator.py | from typing import TYPE_CHECKING, List, Optional
from ray.data._internal.logical.interfaces import LogicalOperator
if TYPE_CHECKING:
from ray.data._internal.execution.interfaces import NodeIdStr
__all__ = [
"StreamingSplit",
]
class StreamingSplit(LogicalOperator):
"""Logical operator that represents splitting the input data to `n` splits."""
def __init__(
self,
input_op: LogicalOperator,
num_splits: int,
equal: bool,
locality_hints: Optional[List["NodeIdStr"]] = None,
):
super().__init__(input_dependencies=[input_op])
self.num_splits = num_splits
self.equal = equal
self.locality_hints = locality_hints
@property
def num_outputs(self) -> Optional[int]:
return self._num_outputs
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/logical/operators/streaming_split_operator.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.