sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
ray-project/ray:release/nightly_tests/dataset/tpch/tpch_q6.py | import ray
from ray.data.aggregate import Sum
from ray.data.expressions import col
from common import parse_tpch_args, load_table, to_f64, run_tpch_benchmark
def main(args):
def benchmark_fn():
from datetime import datetime
ds = load_table("lineitem", args.sf)
# Q6 parameters
date = datetime(1994, 1, 1)
discount = 0.06
quantity = 24
# Filter by date, discount, and quantity
ds = ds.filter(
expr=(
(col("l_shipdate") >= date)
& (col("l_shipdate") < datetime(date.year + 1, date.month, date.day))
& (col("l_discount") >= discount - 0.01)
& (col("l_discount") <= discount + 0.01)
& (col("l_quantity") < quantity)
)
)
# Calculate revenue
ds = ds.with_column(
"revenue", to_f64(col("l_extendedprice")) * to_f64(col("l_discount"))
)
# Aggregate
result = ds.aggregate(Sum(on="revenue", alias_name="revenue"))
return result
run_tpch_benchmark("tpch_q6", benchmark_fn)
if __name__ == "__main__":
ray.init()
args = parse_tpch_args()
main(args)
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/dataset/tpch/tpch_q6.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/_internal/execution/bundle_queue/reordering.py | from __future__ import annotations
from collections import defaultdict, deque
from typing import TYPE_CHECKING, DefaultDict, Deque, Optional, Set
from typing_extensions import override
from ray.data._internal.execution.bundle_queue import BaseBundleQueue
if TYPE_CHECKING:
from ray.data._internal.execution.interfaces import RefBundle
class ReorderingBundleQueue(BaseBundleQueue):
"""A queue that iterates over the bundles in the order of provided "keys" rather than
insertion order (for bundles inserted with the same key, insertion order is used)
User of this queue has to adhere to following invariants of this queue:
1. (!) Used keys have to be a *contiguous* range of `[0, N]`
Failure to follow this requirement might result in this queue getting
irreversibly stuck.
"""
def __init__(self):
super().__init__()
self._inner: DefaultDict[int, Deque[RefBundle]] = defaultdict(lambda: deque())
self._current_key: int = 0
self._finalized_keys: Set[int] = set()
def _move_to_next_key(self):
"""Move the output index to the next task.
This method should only be called when the current task is complete and all
outputs have been taken.
"""
assert len(self._inner[self._current_key]) == 0
assert self._current_key in self._finalized_keys
self._current_key += 1
@override
def _add_inner(self, bundle: RefBundle, key: int) -> None:
assert key is not None
self._inner[key].append(bundle)
@override
def has_next(self) -> bool:
while (
self._current_key in self._finalized_keys
and len(self._inner[self._current_key]) == 0
):
self._move_to_next_key()
return len(self._inner[self._current_key]) > 0
@override
def _get_next_inner(self) -> RefBundle:
# It's vital to invoke `has_next` here, to potentially advance the pointer
# to the next key
if not self.has_next():
raise ValueError("Cannot pop from empty queue.")
return self._inner[self._current_key].popleft()
@override
def peek_next(self) -> Optional[RefBundle]:
# It's vital to invoke `has_next` here, to potentially advance the pointer
# to the next key
if not self.has_next():
return None
return self._inner[self._current_key][0]
@override
def finalize(self, key: int):
assert key is not None and key >= self._current_key
self._finalized_keys.add(key)
@override
def clear(self):
self._reset_metrics()
self._inner.clear()
self._finalized_keys.clear()
self._current_key = 0
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/execution/bundle_queue/reordering.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:rllib/algorithms/dqn/tests/test_dqn_rl_module.py | import dataclasses
import numpy as np
import pytest
import tree
from gymnasium.spaces import Box, Dict, Discrete
from ray.rllib.algorithms.dqn.dqn_catalog import DQNCatalog
from ray.rllib.algorithms.dqn.torch.default_dqn_torch_rl_module import (
DefaultDQNTorchRLModule,
)
from ray.rllib.core.columns import Columns
from ray.rllib.core.models.base import ENCODER_OUT
from ray.rllib.core.rl_module.default_model_config import DefaultModelConfig
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_utils import convert_to_torch_tensor
torch, nn = try_import_torch()
# Custom encoder, config and catalog to test Dict observation spaces.
# RLlib does not build encoders for Dict observation spaces out of the box so we define our own.
class DictFlattenEncoder(nn.Module):
def __init__(self, obs_space, output_dim=64):
super().__init__()
total_dim = sum(
int(np.prod(space.shape)) for space in obs_space.spaces.values()
)
self.net = nn.Sequential(
nn.Linear(total_dim, output_dim),
nn.ReLU(),
)
def forward(self, inputs):
obs = inputs[Columns.OBS]
flat_obs = torch.cat(
[obs[k].reshape(obs[k].shape[0], -1) for k in sorted(obs.keys())],
dim=-1,
)
return {ENCODER_OUT: self.net(flat_obs)}
class DictEncoderConfig:
def __init__(self, obs_space, output_dim=64):
self.obs_space = obs_space
self.output_dims = (output_dim,)
def build(self, framework):
return DictFlattenEncoder(self.obs_space, output_dim=self.output_dims[0])
class DictObsDQNCatalog(DQNCatalog):
@classmethod
def _get_encoder_config(
cls, observation_space, model_config_dict, action_space=None
):
return DictEncoderConfig(observation_space, output_dim=64)
# Observation space definitions.
OBS_SPACES = {
"box": Box(low=-1.0, high=1.0, shape=(8,), dtype=np.float32),
"image": Box(low=0, high=255, shape=(64, 64, 3), dtype=np.uint8),
"dict": Dict(
{
"sensors": Box(low=-1.0, high=1.0, shape=(4,), dtype=np.float32),
"position": Box(low=-10.0, high=10.0, shape=(3,), dtype=np.float32),
"mode": Discrete(4),
}
),
}
def _get_dqn_module(observation_space, action_space, **config_overrides):
model_config = dataclasses.asdict(DefaultModelConfig())
model_config.update(
{
"double_q": True,
"dueling": True,
"epsilon": [(0, 1.0), (10000, 0.05)],
"num_atoms": 1,
"v_min": -10.0,
"v_max": 10.0,
}
)
model_config.update(config_overrides)
# Use custom catalog for Dict observation spaces.
catalog_class = (
DictObsDQNCatalog if isinstance(observation_space, Dict) else DQNCatalog
)
module = DefaultDQNTorchRLModule(
observation_space=observation_space,
action_space=action_space,
model_config=model_config,
catalog_class=catalog_class,
inference_only=False,
)
# Create target networks (normally done by the learner).
module.make_target_networks()
return module
class TestDQNRLModule:
@pytest.mark.parametrize("obs_space_name", ["box", "image", "dict"])
@pytest.mark.parametrize("forward_method", ["train", "exploration", "inference"])
@pytest.mark.parametrize("double_q", [True, False])
@pytest.mark.parametrize("dueling", [True, False])
def test_forward(self, obs_space_name, forward_method, double_q, dueling):
"""Test forward methods with different obs spaces and config settings."""
obs_space = OBS_SPACES[obs_space_name]
action_space = Discrete(4)
module = _get_dqn_module(
obs_space, action_space, double_q=double_q, dueling=dueling
)
if (
forward_method == "train"
): # forward train needs batching, exploration and inference don't
module.train()
# Create a batch first
batch_size = 4
obs_list = [obs_space.sample() for _ in range(batch_size)]
next_obs_list = [obs_space.sample() for _ in range(batch_size)]
obs_batch = tree.map_structure(
lambda *x: np.stack(x, axis=0, dtype=np.float32), *obs_list
)
next_obs_batch = tree.map_structure(
lambda *x: np.stack(x, axis=0, dtype=np.float32), *next_obs_list
)
batch = {
Columns.OBS: convert_to_torch_tensor(obs_batch),
Columns.NEXT_OBS: convert_to_torch_tensor(next_obs_batch),
Columns.ACTIONS: convert_to_torch_tensor(
np.array([0] * batch_size, dtype=np.int64)
),
Columns.REWARDS: convert_to_torch_tensor(
np.array([1.0] * batch_size, dtype=np.float32)
),
Columns.TERMINATEDS: convert_to_torch_tensor(
np.array([False] * batch_size, dtype=np.bool_)
),
Columns.TRUNCATEDS: convert_to_torch_tensor(
np.array([False] * batch_size, dtype=np.bool_)
),
}
# Forward pass and check outputs
output = module.forward_train(batch)
assert "qf_preds" in output
assert output["qf_preds"].shape == (4, action_space.n)
if double_q:
assert "qf_next_preds" in output
assert output["qf_next_preds"].shape == (4, action_space.n)
else:
assert "qf_next_preds" not in output
else:
module.eval()
# Create a single observation batch
obs = obs_space.sample()
if isinstance(obs_space, Dict):
obs_tensor = tree.map_structure(
lambda x: convert_to_torch_tensor(x.astype(np.float32)[None]),
obs,
)
else:
obs_tensor = convert_to_torch_tensor(obs.astype(np.float32)[None])
batch = {Columns.OBS: obs_tensor}
# Forward pass and check outputs
if forward_method == "exploration":
output = module.forward_exploration(batch, t=0)
else:
output = module.forward_inference(batch)
assert Columns.ACTIONS in output
assert output[Columns.ACTIONS].shape == (1,)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/algorithms/dqn/tests/test_dqn_rl_module.py",
"license": "Apache License 2.0",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/nightly_tests/dataset/tpch/common.py | import argparse
import pyarrow as pa
import pyarrow.compute as pc
import ray
from ray.data.datatype import DataType
from ray.data.expressions import udf
from benchmark import Benchmark
# Define schemas for TPC-H tables
TABLE_COLUMNS = {
"lineitem": {
"column00": "l_orderkey",
"column01": "l_partkey",
"column02": "l_suppkey",
"column03": "l_linenumber",
"column04": "l_quantity",
"column05": "l_extendedprice",
"column06": "l_discount",
"column07": "l_tax",
"column08": "l_returnflag",
"column09": "l_linestatus",
"column10": "l_shipdate",
"column11": "l_commitdate",
"column12": "l_receiptdate",
"column13": "l_shipinstruct",
"column14": "l_shipmode",
"column15": "l_comment",
}
}
@udf(return_dtype=DataType.float64())
def to_f64(arr: pa.Array) -> pa.Array:
"""Cast any numeric type to float64."""
return pc.cast(arr, pa.float64())
def parse_tpch_args(description: str = "TPC-H Benchmark") -> argparse.Namespace:
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"--sf",
choices=[1, 10, 100, 1000, 10000],
type=int,
default=1,
help="Scale factor",
)
return parser.parse_args()
def load_table(
table_name: str,
scale_factor: int,
base_uri: str = "s3://ray-benchmark-data/tpch/parquet",
) -> ray.data.Dataset:
path = f"{base_uri}/sf{scale_factor}/{table_name}"
ds = ray.data.read_parquet(path)
if table_name in TABLE_COLUMNS:
ds = ds.rename_columns(TABLE_COLUMNS[table_name])
return ds
def run_tpch_benchmark(name: str, benchmark_fn):
benchmark = Benchmark()
benchmark.run_fn(name, benchmark_fn)
benchmark.write_result()
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/dataset/tpch/common.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/train_tests/benchmark/image_classification/s3_url/factory.py | # Standard library imports
import logging
from typing import Dict
# Third-party imports
import ray.data
# Local imports
from constants import DatasetKey
from config import BenchmarkConfig
from image_classification.factory import ImageClassificationRayDataLoaderFactory
from .imagenet import (
create_s3_url_dataset,
)
logger = logging.getLogger(__name__)
class ImageClassificationS3UrlRayDataLoaderFactory(
ImageClassificationRayDataLoaderFactory
):
"""Factory for creating Ray DataLoader that downloads images from S3 URLs.
This factory:
1. Lists JPEG files from S3 using boto3
2. Creates a Ray dataset from the file records
3. Uses map_batches to download and process images from S3
This approach separates file listing from image downloading, which can be
more efficient for certain workloads as it allows parallel downloads during
the map_batches execution on CPU workers.
"""
def __init__(
self, benchmark_config: BenchmarkConfig, data_dirs: Dict[str, str]
) -> None:
super().__init__(benchmark_config)
self._data_dirs = data_dirs
def get_ray_datasets(self) -> Dict[str, ray.data.Dataset]:
"""Get Ray datasets for training and validation.
Returns:
Dictionary containing:
- "train": Training dataset with random transforms
- "val": Validation dataset without transforms
"""
dataloader_config = self.get_dataloader_config()
# Create training dataset
train_limit = (
dataloader_config.limit_training_rows
if dataloader_config.limit_training_rows > 0
else None
)
train_ds = create_s3_url_dataset(
data_dir=self._data_dirs[DatasetKey.TRAIN],
random_transforms=True,
limit_rows=train_limit,
)
# Create validation dataset
val_limit = (
dataloader_config.limit_validation_rows
if dataloader_config.limit_validation_rows > 0
else None
)
val_ds = create_s3_url_dataset(
data_dir=self._data_dirs[DatasetKey.TRAIN],
random_transforms=False,
limit_rows=val_limit,
)
return {
DatasetKey.TRAIN: train_ds,
DatasetKey.VALID: val_ds,
}
| {
"repo_id": "ray-project/ray",
"file_path": "release/train_tests/benchmark/image_classification/s3_url/factory.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/train_tests/benchmark/image_classification/s3_url/imagenet.py | """ImageNet dataset loading via S3 URL download with Ray Data expressions.
This module provides dataset loading that:
1. Lists JPEG files from S3 using boto3 (parallelized via Ray tasks)
2. Creates a Ray dataset from the file records
3. Uses Ray Data expressions (alpha) to download image bytes efficiently
4. Uses map_batches to decode and process images
This approach leverages Ray Data's expressions API for optimized parallel I/O,
separating the download step from image processing for better throughput.
"""
import io
import logging
from functools import lru_cache
from typing import Callable, Dict, List, Optional, Tuple
import boto3
import numpy as np
from PIL import Image
from torchvision.transforms.functional import pil_to_tensor
import ray.data
from ray.data.expressions import download
from constants import DatasetKey
from image_classification.imagenet import (
get_transform,
IMAGENET_WNID_TO_ID,
)
logger = logging.getLogger(__name__)
# S3 configuration for ImageNet JPEG data
AWS_REGION = "us-west-2"
S3_ROOT = "s3://anyscale-imagenet/ILSVRC/Data/CLS-LOC"
IMAGENET_S3_URL_SPLIT_DIRS = {
DatasetKey.TRAIN: f"{S3_ROOT}/train",
DatasetKey.VALID: f"{S3_ROOT}/val",
DatasetKey.TEST: f"{S3_ROOT}/test",
}
def _get_class_labels(bucket: str, prefix: str) -> List[str]:
"""Get all class label directories from S3.
Args:
bucket: S3 bucket name
prefix: S3 prefix path
Returns:
List of class label directory names
"""
from typing import Set
# Ensure prefix ends with /
if prefix and not prefix.endswith("/"):
prefix += "/"
# List directories using delimiter
s3_client = boto3.client("s3", region_name=AWS_REGION)
paginator = s3_client.get_paginator("list_objects_v2")
# Use delimiter to get "directory" level
labels: Set[str] = set()
for page in paginator.paginate(Bucket=bucket, Prefix=prefix, Delimiter="/"):
# CommonPrefixes contains the "directories"
for common_prefix in page.get("CommonPrefixes", []):
prefix_path = common_prefix["Prefix"]
# Extract the directory name
label = prefix_path.rstrip("/").split("/")[-1]
labels.add(label)
return sorted(labels)
@ray.remote
def _list_files_for_label(
bucket: str, prefix: str, label: str
) -> List[Tuple[str, str]]:
"""Ray task to list all image files for a specific label.
Args:
bucket: S3 bucket name
prefix: S3 prefix (parent directory)
label: Class label (subdirectory name)
Returns:
List of tuples with (file_path, class_name)
"""
s3_client = boto3.client("s3", region_name=AWS_REGION)
paginator = s3_client.get_paginator("list_objects_v2")
# Construct the full prefix for this label
label_prefix = f"{prefix}/{label}/" if prefix else f"{label}/"
file_records = []
for page in paginator.paginate(Bucket=bucket, Prefix=label_prefix):
for obj in page.get("Contents", []):
key = obj["Key"]
if key.lower().endswith((".jpg", ".jpeg")):
file_path = f"s3://{bucket}/{key}"
file_records.append((file_path, label))
return file_records
@lru_cache(maxsize=8)
def _list_s3_image_files_cached(data_dir: str) -> Tuple[Tuple[str, str], ...]:
"""Cached implementation of S3 file listing using Ray tasks for parallelism.
Returns a tuple of tuples for hashability (required by lru_cache).
"""
logger.info(f"Listing JPEG files from {data_dir}...")
# Parse S3 URL: s3://bucket/prefix
s3_path = data_dir
if s3_path.startswith("s3://"):
s3_path = s3_path[5:]
parts = s3_path.split("/", 1)
bucket = parts[0]
prefix = parts[1].rstrip("/") if len(parts) > 1 else ""
# Get all class labels
labels = _get_class_labels(bucket, prefix)
logger.info(
f"Found {len(labels)} class labels, launching Ray tasks for parallel listing..."
)
# Launch Ray tasks for each label
futures = [_list_files_for_label.remote(bucket, prefix, label) for label in labels]
# Wait for all tasks to complete and aggregate results
results = ray.get(futures)
# Flatten the list of lists
file_records = []
for records in results:
file_records.extend(records)
logger.info(f"Listed and cached {len(file_records)} JPEG files")
return tuple(file_records)
def list_s3_image_files(data_dir: str) -> List[Dict[str, str]]:
"""List JPEG files from S3 with class labels extracted from path.
Results are cached to avoid repeated S3 listings.
Args:
data_dir: S3 path to list files from (e.g., "s3://bucket/prefix")
Returns:
List of dicts with "path" (S3 URL) and "class" (WNID) keys
"""
cached_records = _list_s3_image_files_cached(data_dir)
return [{"path": path, "class": cls} for path, cls in cached_records]
def get_process_batch_fn(
random_transforms: bool = True,
label_to_id_map: Optional[Dict[str, int]] = None,
) -> Callable[[Dict[str, np.ndarray]], Dict[str, np.ndarray]]:
"""Get a map_batches function that processes pre-downloaded image bytes.
This function expects image bytes to already be downloaded (via Ray Data
expressions) and handles decoding and transformations.
Args:
random_transforms: Whether to use random transforms for training
label_to_id_map: Mapping from WNID strings to integer IDs
Returns:
A function suitable for use with dataset.map_batches()
"""
if label_to_id_map is None:
label_to_id_map = IMAGENET_WNID_TO_ID
transform = get_transform(
to_torch_tensor=False, random_transforms=random_transforms
)
def process_batch(
batch: Dict[str, np.ndarray],
) -> Dict[str, np.ndarray]:
"""Process pre-downloaded image bytes.
Args:
batch: Dict with "bytes" (image data) and "class" arrays
Returns:
Dict with "image" (numpy array) and "label" (int) arrays
"""
processed_images = []
labels = []
image_bytes_list = list(batch["bytes"])
classes = list(batch["class"])
for data, wnid in zip(image_bytes_list, classes):
# Decode and transform image
image_pil = Image.open(io.BytesIO(data)).convert("RGB")
image_tensor = pil_to_tensor(image_pil) / 255.0
processed_image = np.array(transform(image_tensor))
processed_images.append(processed_image)
# Convert label
labels.append(label_to_id_map[wnid])
return {
"image": np.stack(processed_images),
"label": np.array(labels),
}
return process_batch
def create_s3_url_dataset(
data_dir: str,
random_transforms: bool = True,
limit_rows: Optional[int] = None,
) -> ray.data.Dataset:
"""Create a Ray dataset that downloads images from S3 URLs.
Uses Ray Data expressions (alpha) for efficient parallel downloads,
then map_batches for image decoding and transformations.
Args:
data_dir: S3 path to the image directory
random_transforms: Whether to use random transforms
limit_rows: Optional row limit
Returns:
Ray dataset with "image" and "label" columns
"""
file_records = list_s3_image_files(data_dir)
ds = ray.data.from_items(file_records)
if limit_rows is not None and limit_rows > 0:
ds = ds.limit(limit_rows)
# Download image bytes using Ray Data expressions (alpha)
# This enables optimized parallel I/O managed by Ray Data
ds = ds.with_column("bytes", download("path"))
# Process downloaded bytes (decode and transform)
process_fn = get_process_batch_fn(random_transforms=random_transforms)
ds = ds.map_batches(process_fn)
return ds
| {
"repo_id": "ray-project/ray",
"file_path": "release/train_tests/benchmark/image_classification/s3_url/imagenet.py",
"license": "Apache License 2.0",
"lines": 190,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/serve/tests/test_metrics_3.py | import asyncio
import concurrent.futures
import os
import sys
import threading
import time
from typing import Dict, List
import httpx
import pytest
import redis
from starlette.requests import Request
import ray
from ray import serve
from ray._common.test_utils import PrometheusTimeseries, SignalActor, wait_for_condition
from ray.serve._private.common import DeploymentID
from ray.serve._private.constants import (
RAY_SERVE_RUN_ROUTER_IN_SEPARATE_LOOP,
RAY_SERVE_RUN_USER_CODE_IN_SEPARATE_THREAD,
SERVE_CONTROLLER_NAME,
SERVE_NAMESPACE,
)
from ray.serve._private.long_poll import LongPollClient, LongPollHost, UpdatedObject
from ray.serve._private.queue_monitor import (
create_queue_monitor_actor,
kill_queue_monitor_actor,
)
from ray.serve._private.test_utils import (
check_metric_float_eq,
get_application_url,
get_metric_dictionaries,
get_metric_float,
)
from ray.tests.conftest import external_redis # noqa: F401
from ray.util.state import list_actors
def test_deployment_and_application_status_metrics(metrics_start_shutdown):
"""Test that deployment and application status metrics are exported correctly.
These metrics track the numeric status of deployments and applications:
- serve_deployment_status: 0=UNKNOWN, 1=DEPLOY_FAILED, 2=UNHEALTHY,
3=UPDATING, 4=UPSCALING, 5=DOWNSCALING, 6=HEALTHY
- serve_application_status: 0=UNKNOWN, 1=NOT_STARTED, 2=DEPLOYING,
3=DEPLOY_FAILED, 4=RUNNING, 5=UNHEALTHY, 6=DELETING
"""
signal = SignalActor.remote()
@serve.deployment(name="deployment_a")
class DeploymentA:
async def __init__(self):
await signal.wait.remote()
async def __call__(self):
return "hello"
@serve.deployment
def deployment_b():
return "world"
# Deploy two applications with different deployments
serve._run(DeploymentA.bind(), name="app1", route_prefix="/app1", _blocking=False)
serve._run(deployment_b.bind(), name="app2", route_prefix="/app2", _blocking=False)
timeseries = PrometheusTimeseries()
# Wait for deployments to become healthy
def check_status_metrics():
# Check deployment status metrics
deployment_metrics = get_metric_dictionaries(
"ray_serve_deployment_status", timeseries=timeseries
)
if len(deployment_metrics) < 2:
return False
# Check application status metrics
app_metrics = get_metric_dictionaries(
"ray_serve_application_status", timeseries=timeseries
)
if len(app_metrics) < 2:
return False
return True
wait_for_condition(check_status_metrics, timeout=30)
wait_for_condition(
check_metric_float_eq,
metric="ray_serve_deployment_status",
expected=3, # UPDATING
expected_tags={"deployment": "deployment_a", "application": "app1"},
timeseries=timeseries,
)
wait_for_condition(
check_metric_float_eq,
metric="ray_serve_application_status",
expected=5, # DEPLOYING
expected_tags={"application": "app1"},
timeseries=timeseries,
)
wait_for_condition(
check_metric_float_eq,
metric="ray_serve_deployment_status",
expected=6,
expected_tags={"deployment": "deployment_b", "application": "app2"},
timeseries=timeseries,
)
wait_for_condition(
check_metric_float_eq,
metric="ray_serve_application_status",
expected=6,
expected_tags={"application": "app2"},
timeseries=timeseries,
)
ray.get(signal.send.remote())
wait_for_condition(
check_metric_float_eq,
metric="ray_serve_deployment_status",
expected=6,
expected_tags={"deployment": "deployment_a", "application": "app1"},
timeseries=timeseries,
)
wait_for_condition(
check_metric_float_eq,
metric="ray_serve_application_status",
expected=6,
expected_tags={"application": "app1"},
timeseries=timeseries,
)
def test_replica_startup_and_initialization_latency_metrics(metrics_start_shutdown):
"""Test that replica startup and initialization latency metrics are recorded."""
@serve.deployment(num_replicas=2)
class MyDeployment:
def __init__(self):
time.sleep(1)
def __call__(self):
return "hello"
serve.run(MyDeployment.bind(), name="app", route_prefix="/f")
url = get_application_url("HTTP", "app")
assert "hello" == httpx.get(url).text
# Verify startup latency metric count is exactly 1 (one replica started)
wait_for_condition(
check_metric_float_eq,
timeout=20,
metric="ray_serve_replica_startup_latency_ms_count",
expected=1,
expected_tags={"deployment": "MyDeployment", "application": "app"},
)
# Verify initialization latency metric count is exactly 1
wait_for_condition(
check_metric_float_eq,
timeout=20,
metric="ray_serve_replica_initialization_latency_ms_count",
expected=1,
expected_tags={"deployment": "MyDeployment", "application": "app"},
)
# Verify initialization latency metric value is greater than 500ms
def check_initialization_latency_value():
value = get_metric_float(
"ray_serve_replica_initialization_latency_ms_sum",
expected_tags={"deployment": "MyDeployment", "application": "app"},
)
assert (
value > 500
), f"Initialization latency value is {value}, expected to be greater than 500ms"
return True
wait_for_condition(check_initialization_latency_value, timeout=20)
# Assert that 2 metrics are recorded (one per replica)
def check_metrics_count():
metrics = get_metric_dictionaries(
"ray_serve_replica_initialization_latency_ms_count"
)
assert len(metrics) == 2, f"Expected 2 metrics, got {len(metrics)}"
# All metrics should have same deployment and application
for metric in metrics:
assert metric["deployment"] == "MyDeployment"
assert metric["application"] == "app"
# Each replica should have a unique replica tag
replica_ids = {metric["replica"] for metric in metrics}
assert (
len(replica_ids) == 2
), f"Expected 2 unique replica IDs, got {replica_ids}"
return True
wait_for_condition(check_metrics_count, timeout=20)
def test_replica_reconfigure_latency_metrics(metrics_start_shutdown):
"""Test that replica reconfigure latency metrics are recorded when user_config changes."""
@serve.deployment(version="1")
class Configurable:
def __init__(self):
self.config = None
def reconfigure(self, config):
time.sleep(1)
self.config = config
def __call__(self):
return self.config
# Initial deployment with version specified to enable in-place reconfigure
serve.run(
Configurable.options(user_config={"version": 1}).bind(),
name="app",
route_prefix="/config",
)
url = get_application_url("HTTP", "app")
assert httpx.get(url).json() == {"version": 1}
# Update user_config to trigger in-place reconfigure (same version, different config)
serve.run(
Configurable.options(user_config={"version": 2}).bind(),
name="app",
route_prefix="/config",
)
# Wait for the new config to take effect
def config_updated():
return httpx.get(url).json() == {"version": 2}
wait_for_condition(config_updated, timeout=20)
# Verify reconfigure latency metric count is exactly 1 (one reconfigure happened)
wait_for_condition(
check_metric_float_eq,
timeout=20,
metric="ray_serve_replica_reconfigure_latency_ms_count",
expected=1,
expected_tags={"deployment": "Configurable", "application": "app"},
)
# Verify reconfigure latency metric value is greater than 500ms (we slept for 1s)
def check_reconfigure_latency_value():
value = get_metric_float(
"ray_serve_replica_reconfigure_latency_ms_sum",
expected_tags={"deployment": "Configurable", "application": "app"},
)
assert value > 500, f"Reconfigure latency value is {value}, expected > 500ms"
return True
wait_for_condition(check_reconfigure_latency_value, timeout=20)
def test_health_check_latency_metrics(metrics_start_shutdown):
"""Test that health check latency metrics are recorded."""
@serve.deployment(health_check_period_s=1)
class MyDeployment:
def __call__(self):
return "hello"
def check_health(self):
time.sleep(1)
serve.run(MyDeployment.bind(), name="app", route_prefix="/f")
url = get_application_url("HTTP", "app")
assert "hello" == httpx.get(url).text
# Wait for at least one health check to complete and verify metric is recorded
def check_health_check_latency_metrics():
value = get_metric_float(
"ray_serve_health_check_latency_ms_count",
expected_tags={"deployment": "MyDeployment", "application": "app"},
)
# Health check count should be at least 1
assert value >= 1, f"Health check count is {value}, expected to be 1"
return True
wait_for_condition(check_health_check_latency_metrics, timeout=30)
# Verify health check latency metric value is greater than 500ms
def check_health_check_latency_value():
value = get_metric_float(
"ray_serve_health_check_latency_ms_sum",
expected_tags={"deployment": "MyDeployment", "application": "app"},
)
assert (
value > 500
), f"Health check latency value is {value}, expected to be greater than 500ms"
return True
wait_for_condition(check_health_check_latency_value, timeout=30)
def test_health_check_failures_metrics(metrics_start_shutdown):
"""Test that health check failure metrics are recorded when health checks fail."""
@serve.deployment(health_check_period_s=1, health_check_timeout_s=2)
class FailingHealthCheck:
def __init__(self):
self.should_fail = False
async def check_health(self):
if self.should_fail:
raise Exception("Health check failed!")
async def __call__(self, request):
action = (await request.body()).decode("utf-8")
if action == "fail":
self.should_fail = True
return "ok"
serve.run(FailingHealthCheck.bind(), name="app", route_prefix="/health")
url = get_application_url("HTTP", "app")
# Verify deployment is healthy initially
assert httpx.get(url).text == "ok"
# Trigger health check failure
httpx.request("GET", url, content=b"fail")
# Wait for at least one health check failure to be recorded
def check_health_check_failure_metrics():
value = get_metric_float(
"ray_serve_health_check_failures_total",
expected_tags={"deployment": "FailingHealthCheck", "application": "app"},
)
# Should have at least 1 failure
return value >= 1
wait_for_condition(check_health_check_failure_metrics, timeout=30)
def test_replica_shutdown_duration_metrics(metrics_start_shutdown):
"""Test that replica shutdown duration metrics are recorded."""
@serve.deployment
class MyDeployment:
def __call__(self):
return "hello"
def __del__(self):
time.sleep(1)
# Deploy the application
serve.run(MyDeployment.bind(), name="app", route_prefix="/f")
url = get_application_url("HTTP", "app")
assert "hello" == httpx.get(url).text
# Delete the application to trigger shutdown
serve.delete("app", _blocking=True)
# Verify shutdown duration metric count is exactly 1 (one replica stopped)
wait_for_condition(
check_metric_float_eq,
timeout=30,
metric="ray_serve_replica_shutdown_duration_ms_count",
expected=1,
expected_tags={"deployment": "MyDeployment", "application": "app"},
)
print("serve_replica_shutdown_duration_ms working as expected.")
# Verify shutdown duration metric value is greater than 500ms
def check_shutdown_duration_value():
value = get_metric_float(
"ray_serve_replica_shutdown_duration_ms_sum",
expected_tags={"deployment": "MyDeployment", "application": "app"},
)
assert (
value > 500
), f"Shutdown duration value is {value}, expected to be greater than 500ms"
return True
wait_for_condition(check_shutdown_duration_value, timeout=30)
def test_batching_metrics(metrics_start_shutdown):
@serve.deployment
class BatchedDeployment:
@serve.batch(max_batch_size=4, batch_wait_timeout_s=0.5)
async def batch_handler(self, requests: List[str]) -> List[str]:
# Simulate some processing time
await asyncio.sleep(0.05)
return [f"processed:{r}" for r in requests]
async def __call__(self, request: Request):
data = await request.body()
return await self.batch_handler(data.decode())
app_name = "batched_app"
serve.run(BatchedDeployment.bind(), name=app_name, route_prefix="/batch")
http_url = "http://localhost:8000/batch"
# Send multiple concurrent requests to trigger batching
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
futures = [
executor.submit(lambda i=i: httpx.post(http_url, content=f"req{i}"))
for i in range(8)
]
results = [f.result() for f in futures]
# Verify all requests succeeded
assert all(r.status_code == 200 for r in results)
# Verify specific metric values and tags
timeseries = PrometheusTimeseries()
expected_tags = {
"deployment": "BatchedDeployment",
"application": app_name,
"function_name": "batch_handler",
}
# Check batches_processed_total counter exists and has correct tags
wait_for_condition(
lambda: check_metric_float_eq(
"ray_serve_batches_processed_total",
expected=2,
expected_tags=expected_tags,
timeseries=timeseries,
),
timeout=10,
)
# Check batch_wait_time_ms histogram was recorded for 2 batches
wait_for_condition(
lambda: check_metric_float_eq(
"ray_serve_batch_wait_time_ms_count",
expected=2,
expected_tags=expected_tags,
timeseries=timeseries,
),
timeout=10,
)
# Check batch_execution_time_ms histogram was recorded for 2 batches
wait_for_condition(
lambda: check_metric_float_eq(
"ray_serve_batch_execution_time_ms_count",
expected=2,
expected_tags=expected_tags,
timeseries=timeseries,
),
timeout=10,
)
# Check batch_utilization_percent histogram: 2 batches at 100% each = 200 sum
wait_for_condition(
lambda: check_metric_float_eq(
"ray_serve_batch_utilization_percent_count",
expected=2,
expected_tags=expected_tags,
timeseries=timeseries,
),
timeout=10,
)
# Check actual_batch_size histogram: 2 batches of 4 requests each = 8 sum
wait_for_condition(
lambda: check_metric_float_eq(
"ray_serve_actual_batch_size_count",
expected=2,
expected_tags=expected_tags,
timeseries=timeseries,
),
timeout=10,
)
# Check batch_queue_length gauge exists (should be 0 after processing)
wait_for_condition(
lambda: check_metric_float_eq(
"ray_serve_batch_queue_length",
expected=0,
expected_tags=expected_tags,
timeseries=timeseries,
),
timeout=10,
)
def test_autoscaling_metrics(metrics_start_shutdown):
"""Test that autoscaling metrics are emitted correctly.
This tests the following metrics:
- ray_serve_autoscaling_target_replicas: Target number of replicas
Tags: deployment, application
- ray_serve_autoscaling_desired_replicas: Raw decision before bounds
Tags: deployment, application
- ray_serve_autoscaling_total_requests: Total requests seen by autoscaler
Tags: deployment, application
- ray_serve_autoscaling_policy_execution_time_ms: Policy execution time
Tags: deployment, application, policy_scope
- ray_serve_autoscaling_replica_metrics_delay_ms: Replica metrics delay
Tags: deployment, application, replica
- ray_serve_autoscaling_handle_metrics_delay_ms: Handle metrics delay
Tags: deployment, application, handle
"""
signal = SignalActor.remote()
@serve.deployment(
autoscaling_config={
"metrics_interval_s": 0.1,
"min_replicas": 1,
"max_replicas": 5,
"target_ongoing_requests": 2,
"upscale_delay_s": 0,
"downscale_delay_s": 5,
"look_back_period_s": 1,
},
max_ongoing_requests=10,
graceful_shutdown_timeout_s=0.1,
)
class AutoscalingDeployment:
async def __call__(self):
await signal.wait.remote()
serve.run(AutoscalingDeployment.bind(), name="autoscaling_app")
# Send requests to trigger autoscaling
handle = serve.get_deployment_handle("AutoscalingDeployment", "autoscaling_app")
[handle.remote() for _ in range(10)]
timeseries = PrometheusTimeseries()
base_tags = {
"deployment": "AutoscalingDeployment",
"application": "autoscaling_app",
}
# Test 1: Check that target_replicas metric is 5 (10 requests / target_ongoing_requests=2)
wait_for_condition(
check_metric_float_eq,
timeout=15,
metric="ray_serve_autoscaling_target_replicas",
expected=5,
expected_tags=base_tags,
timeseries=timeseries,
)
print("Target replicas metric verified.")
# Test 2: Check that autoscaling decision metric is 5 (10 requests / target_ongoing_requests=2)
wait_for_condition(
check_metric_float_eq,
timeout=15,
metric="ray_serve_autoscaling_desired_replicas",
expected=5,
expected_tags=base_tags,
timeseries=timeseries,
)
print("Autoscaling decision metric verified.")
# Test 3: Check that total requests metric is 10
wait_for_condition(
check_metric_float_eq,
timeout=15,
metric="ray_serve_autoscaling_total_requests",
expected=10,
expected_tags=base_tags,
timeseries=timeseries,
)
print("Total requests metric verified.")
# Test 4: Check that policy execution time metric is emitted with policy_scope=deployment
def check_policy_execution_time_metric():
value = get_metric_float(
"ray_serve_autoscaling_policy_execution_time_ms",
expected_tags={**base_tags, "policy_scope": "deployment"},
timeseries=timeseries,
)
assert value >= 0
return True
wait_for_condition(check_policy_execution_time_metric, timeout=15)
print("Policy execution time metric verified.")
# Test 5: Check that metrics delay gauges are emitted with proper tags
def check_metrics_delay_metrics():
# Check for handle metrics delay (depends on where metrics are collected)
value = get_metric_float(
"ray_serve_autoscaling_handle_metrics_delay_ms",
expected_tags=base_tags,
timeseries=timeseries,
)
if value >= 0:
# Verify handle tag exists by checking metric dictionaries
metrics_dicts = get_metric_dictionaries(
"ray_serve_autoscaling_handle_metrics_delay_ms",
timeout=5,
timeseries=timeseries,
)
for m in metrics_dicts:
if (
m.get("deployment") == "AutoscalingDeployment"
and m.get("application") == "autoscaling_app"
):
assert m.get("handle") is not None
print(
f"Handle delay metric verified with handle tag: {m.get('handle')}"
)
return True
# Fallback: Check for replica metrics delay
value = get_metric_float(
"ray_serve_autoscaling_replica_metrics_delay_ms",
expected_tags=base_tags,
timeseries=timeseries,
)
if value >= 0:
metrics_dicts = get_metric_dictionaries(
"ray_serve_autoscaling_replica_metrics_delay_ms",
timeout=5,
timeseries=timeseries,
)
for m in metrics_dicts:
if (
m.get("deployment") == "AutoscalingDeployment"
and m.get("application") == "autoscaling_app"
):
assert m.get("replica") is not None
print(
f"Replica delay metric verified with replica tag: {m.get('replica')}"
)
return True
return False
wait_for_condition(check_metrics_delay_metrics, timeout=15)
print("Metrics delay metrics verified.")
# Release signal to complete requests
ray.get(signal.send.remote())
@pytest.mark.skipif(
sys.platform == "win32",
reason="Async Inference feature testing is flaky on Windows.",
)
def test_async_inference_task_queue_metrics_delay(
metrics_start_shutdown, external_redis # noqa: F811
):
"""Test that async inference task queue metrics delay is emitted correctly.
This tests the metric:
- ray_serve_autoscaling_async_inference_task_queue_metrics_delay_ms
Tags: deployment, application
The QueueMonitor periodically pushes queue length metrics to the controller,
and the controller records the delay between when the metrics were collected
and when they were received.
"""
# Setup Redis client
redis_address = os.environ.get("RAY_REDIS_ADDRESS")
host, port = redis_address.split(":")
redis_client = redis.Redis(host=host, port=int(port), db=0)
redis_broker_url = f"redis://{redis_address}/0"
test_deployment_id = DeploymentID("test_deployment", "test_app")
test_queue_name = "test_metrics_queue"
try:
# Create QueueMonitor with the Serve controller
controller = ray.get_actor(SERVE_CONTROLLER_NAME, namespace=SERVE_NAMESPACE)
queue_monitor = create_queue_monitor_actor(
deployment_id=test_deployment_id,
broker_url=redis_broker_url,
queue_name=test_queue_name,
controller_handle=controller,
namespace=SERVE_NAMESPACE,
)
# Push some messages to the queue so the metrics pusher has data to report
for i in range(5):
redis_client.lpush(test_queue_name, f"message_{i}")
# Wait for the queue length to be picked up
def check_length():
return ray.get(queue_monitor.get_queue_length.remote()) == 5
wait_for_condition(check_length, timeout=30)
timeseries = PrometheusTimeseries()
base_tags = {
"deployment": test_deployment_id.name,
"application": test_deployment_id.app_name,
}
# Wait for the metrics delay metric to be emitted with correct tags and value
def check_metrics_delay_metric():
value = get_metric_float(
"ray_serve_autoscaling_async_inference_task_queue_metrics_delay_ms",
expected_tags=base_tags,
timeseries=timeseries,
)
if value <= 0:
return False
# Verify correct tags are attached
metrics_dicts = get_metric_dictionaries(
"ray_serve_autoscaling_async_inference_task_queue_metrics_delay_ms",
timeout=5,
timeseries=timeseries,
)
for m in metrics_dicts:
if (
m.get("deployment") == test_deployment_id.name
and m.get("application") == test_deployment_id.app_name
):
# Verify both required tags exist
assert "deployment" in m, "Missing 'deployment' tag"
assert "application" in m, "Missing 'application' tag"
return True
return False
wait_for_condition(check_metrics_delay_metric, timeout=30)
finally:
# Cleanup
redis_client.delete(test_queue_name)
redis_client.close()
try:
kill_queue_monitor_actor(test_deployment_id, namespace=SERVE_NAMESPACE)
except ValueError:
pass # Actor may already be killed
def test_user_autoscaling_stats_metrics(metrics_start_shutdown):
"""Test that user-defined autoscaling stats metrics are emitted correctly.
This tests the following metrics:
- ray_serve_user_autoscaling_stats_latency_ms: Time to execute user stats function
Tags: application, deployment, replica
- ray_serve_record_autoscaling_stats_failed_total: Failed stats collection
Tags: application, deployment, replica, exception_name
"""
@serve.deployment(
autoscaling_config={
"metrics_interval_s": 0.1,
"min_replicas": 1,
"max_replicas": 5,
"target_ongoing_requests": 2,
},
)
class DeploymentWithCustomStats:
def __init__(self):
self.call_count = 0
async def record_autoscaling_stats(self):
"""Custom autoscaling stats function."""
self.call_count += 1
return {"custom_metric": self.call_count}
def __call__(self):
return "ok"
serve.run(DeploymentWithCustomStats.bind(), name="custom_stats_app")
# Make a request to ensure the deployment is running
handle = serve.get_deployment_handle(
"DeploymentWithCustomStats", "custom_stats_app"
)
handle.remote().result()
timeseries = PrometheusTimeseries()
base_tags = {
"deployment": "DeploymentWithCustomStats",
"application": "custom_stats_app",
}
# Test: Check that user autoscaling stats latency metric is emitted
def check_user_stats_latency_metric():
value = get_metric_float(
"ray_serve_user_autoscaling_stats_latency_ms_sum",
expected_tags=base_tags,
timeseries=timeseries,
)
if value >= 0:
# Verify replica tag exists
metrics_dicts = get_metric_dictionaries(
"ray_serve_user_autoscaling_stats_latency_ms_sum",
timeout=5,
timeseries=timeseries,
)
for m in metrics_dicts:
if (
m.get("deployment") == "DeploymentWithCustomStats"
and m.get("application") == "custom_stats_app"
):
assert m.get("replica") is not None
print(
f"User stats latency metric verified with replica tag: {m.get('replica')}"
)
return True
return False
wait_for_condition(check_user_stats_latency_metric, timeout=15)
print("User autoscaling stats latency metric verified.")
def test_user_autoscaling_stats_failure_metrics(metrics_start_shutdown):
"""Test that user autoscaling stats failure metrics are emitted on error."""
@serve.deployment(
autoscaling_config={
"metrics_interval_s": 0.1,
"min_replicas": 1,
"max_replicas": 5,
"target_ongoing_requests": 2,
},
)
class DeploymentWithFailingStats:
async def record_autoscaling_stats(self):
"""Custom autoscaling stats function that raises an error."""
raise ValueError("Intentional error for testing")
def __call__(self):
return "ok"
serve.run(DeploymentWithFailingStats.bind(), name="failing_stats_app")
# Make a request to ensure the deployment is running
handle = serve.get_deployment_handle(
"DeploymentWithFailingStats", "failing_stats_app"
)
handle.remote().result()
timeseries = PrometheusTimeseries()
# Test: Check that failure counter is incremented
def check_stats_failure_metric():
metrics_dicts = get_metric_dictionaries(
"ray_serve_record_autoscaling_stats_failed_total",
timeout=5,
timeseries=timeseries,
)
for m in metrics_dicts:
if (
m.get("deployment") == "DeploymentWithFailingStats"
and m.get("application") == "failing_stats_app"
):
assert m.get("replica") is not None
assert m.get("exception_name") == "ValueError"
print(
f"Stats failure metric verified with exception_name: {m.get('exception_name')}"
)
return True
return False
wait_for_condition(check_stats_failure_metric, timeout=15)
print("User autoscaling stats failure metric verified.")
def test_long_poll_pending_clients_metric(metrics_start_shutdown):
"""Check that pending clients gauge is tracked correctly."""
timeseries = PrometheusTimeseries()
# Create a LongPollHost with a longer timeout so we can observe pending state
host = ray.remote(LongPollHost).remote(
listen_for_change_request_timeout_s=(5.0, 5.0)
)
# Write initial values
ray.get(host.notify_changed.remote({"key_1": 100}))
ray.get(host.notify_changed.remote({"key_2": 200}))
# Get the current snapshot IDs
result = ray.get(host.listen_for_change.remote({"key_1": -1, "key_2": -1}))
key_1_snapshot_id = result["key_1"].snapshot_id
key_2_snapshot_id = result["key_2"].snapshot_id
# Start a listen call that will block waiting for updates
# (since we're using up-to-date snapshot IDs)
pending_ref = host.listen_for_change.remote(
{"key_1": key_1_snapshot_id, "key_2": key_2_snapshot_id}
)
# Check that pending clients gauge shows 1 for each key
# (wait_for_condition will retry until the metric is available)
wait_for_condition(
check_metric_float_eq,
timeout=15,
metric="ray_serve_long_poll_pending_clients",
expected=1,
expected_tags={"namespace": "key_1"},
timeseries=timeseries,
)
wait_for_condition(
check_metric_float_eq,
timeout=15,
metric="ray_serve_long_poll_pending_clients",
expected=1,
expected_tags={"namespace": "key_2"},
timeseries=timeseries,
)
# Trigger an update for key_1
ray.get(host.notify_changed.remote({"key_1": 101}))
# Wait for the pending call to complete
ray.get(pending_ref)
# After update, pending clients for key_1 should be 0
wait_for_condition(
check_metric_float_eq,
timeout=15,
metric="ray_serve_long_poll_pending_clients",
expected=0,
expected_tags={"namespace": "key_1"},
timeseries=timeseries,
)
def test_long_poll_latency_metric(metrics_start_shutdown):
"""Check that long poll latency histogram is recorded on the client side."""
timeseries = PrometheusTimeseries()
# Create a LongPollHost
host = ray.remote(LongPollHost).remote(
listen_for_change_request_timeout_s=(0.5, 0.5)
)
# Write initial value so the key exists
ray.get(host.notify_changed.remote({"test_key": "initial_value"}))
# Track received updates
received_updates = []
update_event = threading.Event()
def on_update(value):
received_updates.append(value)
update_event.set()
# Create event loop for the client
loop = asyncio.new_event_loop()
def run_loop():
asyncio.set_event_loop(loop)
loop.run_forever()
loop_thread = threading.Thread(target=run_loop, daemon=True)
loop_thread.start()
# Create the LongPollClient
client = LongPollClient(
host_actor=host,
key_listeners={"test_key": on_update},
call_in_event_loop=loop,
)
# Wait for initial update (client starts with snapshot_id -1)
assert update_event.wait(timeout=10), "Timed out waiting for initial update"
assert len(received_updates) == 1
assert received_updates[0] == "initial_value"
# Clear event and trigger another update
update_event.clear()
ray.get(host.notify_changed.remote({"test_key": "updated_value"}))
# Wait for the update to be received
assert update_event.wait(timeout=10), "Timed out waiting for update"
assert len(received_updates) == 2
assert received_updates[1] == "updated_value"
# Stop the client
client.stop()
loop.call_soon_threadsafe(loop.stop)
loop_thread.join(timeout=5)
# Check that latency metric was recorded
# The metric should have at least 2 observations (initial + update)
def check_latency_metric_exists():
metric_value = get_metric_float(
"ray_serve_long_poll_latency_ms_count",
expected_tags={"namespace": "test_key"},
timeseries=timeseries,
)
# Should have at least 2 observations
return metric_value == 2
wait_for_condition(check_latency_metric_exists, timeout=15)
# Verify the latency sum is positive (latency > 0)
latency_sum = get_metric_float(
"ray_serve_long_poll_latency_ms_sum",
expected_tags={"namespace": "test_key"},
timeseries=timeseries,
)
assert latency_sum > 0, "Latency sum should be positive"
def test_long_poll_host_sends_counted(metrics_start_shutdown):
"""Check that the transmissions by the long_poll are counted."""
timeseries = PrometheusTimeseries()
host = ray.remote(LongPollHost).remote(
listen_for_change_request_timeout_s=(0.01, 0.01)
)
# Write a value.
ray.get(host.notify_changed.remote({"key_1": 999}))
object_ref = host.listen_for_change.remote({"key_1": -1})
# Check that the result's size is reported.
result_1: Dict[str, UpdatedObject] = ray.get(object_ref)
wait_for_condition(
check_metric_float_eq,
timeout=15,
metric="ray_serve_long_poll_host_transmission_counter_total",
expected=1,
expected_tags={"namespace_or_state": "key_1"},
timeseries=timeseries,
)
# Write two new values.
ray.get(host.notify_changed.remote({"key_1": 1000}))
ray.get(host.notify_changed.remote({"key_2": 1000}))
object_ref = host.listen_for_change.remote(
{"key_1": result_1["key_1"].snapshot_id, "key_2": -1}
)
# Check that the new objects are transmitted.
result_2: Dict[str, UpdatedObject] = ray.get(object_ref)
wait_for_condition(
check_metric_float_eq,
timeout=15,
metric="ray_serve_long_poll_host_transmission_counter_total",
expected=1,
expected_tags={"namespace_or_state": "key_2"},
timeseries=timeseries,
)
wait_for_condition(
check_metric_float_eq,
timeout=15,
metric="ray_serve_long_poll_host_transmission_counter_total",
expected=2,
expected_tags={"namespace_or_state": "key_1"},
timeseries=timeseries,
)
# Check that a timeout result is counted.
object_ref = host.listen_for_change.remote({"key_2": result_2["key_2"].snapshot_id})
_ = ray.get(object_ref)
wait_for_condition(
check_metric_float_eq,
timeout=15,
metric="ray_serve_long_poll_host_transmission_counter_total",
expected=1,
expected_tags={"namespace_or_state": "TIMEOUT"},
timeseries=timeseries,
)
def test_event_loop_monitoring_metrics(metrics_start_shutdown):
"""Test that event loop monitoring metrics are emitted correctly.
This tests the following metrics:
- serve_event_loop_scheduling_latency_ms: Event loop lag in milliseconds
Tags: component, loop_type, actor_id
- serve_event_loop_monitoring_iterations: Heartbeat counter
Tags: component, loop_type, actor_id
- serve_event_loop_tasks: Number of pending asyncio tasks
Tags: component, loop_type, actor_id
Components monitored:
- Proxy: main loop only
- Replica: main loop + user_code loop (when separate thread enabled)
- Router: router loop (when separate loop enabled, runs on replica)
"""
@serve.deployment(name="g")
class ChildDeployment:
def __call__(self):
return "child"
@serve.deployment(name="f")
class SimpleDeployment:
def __init__(self, child):
self.child = child
async def __call__(self):
return await self.child.remote()
serve.run(
SimpleDeployment.bind(ChildDeployment.bind()), name="app", route_prefix="/test"
)
# Make a request to ensure everything is running
url = get_application_url("HTTP", "app")
assert httpx.get(url).text == "child"
timeseries = PrometheusTimeseries()
# Test 1: Check proxy main loop metrics
def check_proxy_main_loop_metrics():
metrics = get_metric_dictionaries(
"ray_serve_event_loop_monitoring_iterations_total",
timeout=10,
timeseries=timeseries,
)
for m in metrics:
if m.get("component") == "proxy" and m.get("loop_type") == "main":
assert "actor_id" in m, "actor_id tag should be present"
print(f"Proxy main loop metric found: {m}")
return True
return False
wait_for_condition(check_proxy_main_loop_metrics, timeout=30)
print("Proxy main loop monitoring metrics verified.")
# Test 1a: Check proxy router loop metrics
def check_proxy_router_loop_metrics():
metrics = get_metric_dictionaries(
"ray_serve_event_loop_monitoring_iterations_total",
timeout=10,
timeseries=timeseries,
)
for m in metrics:
if m.get("component") == "proxy" and m.get("loop_type") == "router":
assert "actor_id" in m, "actor_id tag should be present"
print(f"Proxy router loop metric found: {m}")
return True
return False
if RAY_SERVE_RUN_ROUTER_IN_SEPARATE_LOOP:
wait_for_condition(check_proxy_router_loop_metrics, timeout=30)
print("Proxy router loop monitoring metrics verified.")
else:
print("Proxy router loop monitoring metrics not verified.")
# Test 2: Check replica main loop metrics
def check_replica_main_loop_metrics():
metrics = get_metric_dictionaries(
"ray_serve_event_loop_monitoring_iterations_total",
timeout=10,
timeseries=timeseries,
)
for m in metrics:
if m.get("component") == "replica" and m.get("loop_type") == "main":
assert "actor_id" in m, "actor_id tag should be present"
assert m.get("deployment") in [
"f",
"g",
], "deployment tag should be 'f' or 'g'"
assert m.get("application") == "app", "application tag should be 'app'"
print(f"Replica main loop metric found: {m}")
return True
return False
wait_for_condition(check_replica_main_loop_metrics, timeout=30)
print("Replica main loop monitoring metrics verified.")
# Test 3: Check replica user_code loop metrics (enabled by default)
def check_replica_user_code_loop_metrics():
metrics = get_metric_dictionaries(
"ray_serve_event_loop_monitoring_iterations_total",
timeout=10,
timeseries=timeseries,
)
for m in metrics:
if m.get("component") == "replica" and m.get("loop_type") == "user_code":
assert "actor_id" in m, "actor_id tag should be present"
assert m.get("deployment") in [
"f",
"g",
], "deployment tag should be 'f' or 'g'"
assert m.get("application") == "app", "application tag should be 'app'"
print(f"Replica user_code loop metric found: {m}")
return True
return False
if RAY_SERVE_RUN_USER_CODE_IN_SEPARATE_THREAD:
wait_for_condition(check_replica_user_code_loop_metrics, timeout=30)
print("Replica user_code loop monitoring metrics verified.")
else:
print("Replica user_code loop monitoring metrics not verified.")
# Test 4: Check router loop metrics (enabled by default)
def check_router_loop_metrics():
metrics = get_metric_dictionaries(
"ray_serve_event_loop_monitoring_iterations_total",
timeout=10,
timeseries=timeseries,
)
for m in metrics:
if m.get("component") == "replica" and m.get("loop_type") == "router":
assert "actor_id" in m, "actor_id tag should be present"
print(f"Router loop metric found: {m}")
return True
return False
if RAY_SERVE_RUN_ROUTER_IN_SEPARATE_LOOP:
wait_for_condition(check_router_loop_metrics, timeout=30)
print("Router loop monitoring metrics verified.")
else:
print("Router loop monitoring metrics not verified.")
# Test 5: Check that scheduling latency histogram exists and has reasonable values
def check_scheduling_latency_metric():
# Check for the histogram count metric
metrics = get_metric_dictionaries(
"ray_serve_event_loop_scheduling_latency_ms_count",
timeout=10,
timeseries=timeseries,
)
# Should have metrics for proxy main, replica main, replica user_code, router
component_loop_pairs = set()
for m in metrics:
component = m.get("component")
loop_type = m.get("loop_type")
if component and loop_type:
component_loop_pairs.add((component, loop_type))
expected_pairs = {
("proxy", "main"),
("replica", "main"),
}
if RAY_SERVE_RUN_USER_CODE_IN_SEPARATE_THREAD:
expected_pairs.add(("replica", "user_code"))
if RAY_SERVE_RUN_ROUTER_IN_SEPARATE_LOOP:
expected_pairs.add(("replica", "router"))
expected_pairs.add(("proxy", "router"))
return expected_pairs.issubset(component_loop_pairs)
wait_for_condition(check_scheduling_latency_metric, timeout=30)
print("Scheduling latency histogram metrics verified.")
# Test 6: Check that tasks gauge exists
def check_tasks_gauge_metric():
metrics = get_metric_dictionaries(
"ray_serve_event_loop_tasks",
timeout=10,
timeseries=timeseries,
)
# Should have metrics for proxy main, replica main, replica user_code, router
component_loop_pairs = set()
for m in metrics:
component = m.get("component")
loop_type = m.get("loop_type")
if component and loop_type:
component_loop_pairs.add((component, loop_type))
expected_pairs = {
("proxy", "main"),
("replica", "main"),
}
if RAY_SERVE_RUN_USER_CODE_IN_SEPARATE_THREAD:
expected_pairs.add(("replica", "user_code"))
if RAY_SERVE_RUN_ROUTER_IN_SEPARATE_LOOP:
expected_pairs.add(("replica", "router"))
expected_pairs.add(("proxy", "router"))
return expected_pairs.issubset(component_loop_pairs)
wait_for_condition(check_tasks_gauge_metric, timeout=30)
print("Event loop tasks gauge metrics verified.")
def test_actor_summary(serve_instance):
@serve.deployment
def f():
pass
serve.run(f.bind(), name="app")
actors = list_actors(filters=[("state", "=", "ALIVE")])
class_names = {actor.class_name for actor in actors}
assert class_names.issuperset(
{"ServeController", "ProxyActor", "ServeReplica:app:f"}
)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_metrics_3.py",
"license": "Apache License 2.0",
"lines": 1076,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/execution/bundle_queue/fifo.py | from __future__ import annotations
from collections import deque
from typing import TYPE_CHECKING, Any, Deque, Iterator, List, Optional
from typing_extensions import override
from .base import BaseBundleQueue
if TYPE_CHECKING:
from ray.data._internal.execution.interfaces import RefBundle
class FIFOBundleQueue(BaseBundleQueue):
"""A bundle queue that follows fifo-policy. Conceptually
[ ] <- [ ] <- [ ] ...
^ where the leftmost is popped first
"""
def __init__(self, bundles: Optional[List[RefBundle]] = None):
super().__init__()
self._inner: Deque[RefBundle] = deque([])
if bundles is not None:
for bundle in bundles:
self.add(bundle)
@override
def _add_inner(self, bundle: RefBundle, **kwargs: Any):
self._inner.append(bundle)
@override
def _get_next_inner(self) -> RefBundle:
if not self.has_next():
raise ValueError(
f"Popping from empty {self.__class__.__name__} is prohibited"
)
bundle = self._inner.popleft()
return bundle
@override
def peek_next(self) -> Optional[RefBundle]:
if not self.has_next():
return None
return self._inner[0]
@override
def has_next(self) -> bool:
return len(self) > 0
@override
def finalize(self, **kwargs: Any):
pass
def __iter__(self) -> Iterator[RefBundle]:
yield from self._inner
def to_list(self) -> List[RefBundle]:
return list(self._inner)
@override
def clear(self):
self._reset_metrics()
self._inner.clear()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/execution/bundle_queue/fifo.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/tests/unit/test_fifo_bundle_queue.py | from typing import Any
from uuid import uuid4
import pandas as pd
import pytest
import ray
from ray.data._internal.execution.bundle_queue import FIFOBundleQueue
from ray.data._internal.execution.interfaces import RefBundle
from ray.data.block import BlockAccessor
def _create_bundle(data: Any) -> RefBundle:
"""Create a RefBundle with a single row with the given data using artificial refs."""
block = pd.DataFrame({"data": [data]})
# Create artificial object ref without calling ray.put()
block_ref = ray.ObjectRef(uuid4().hex[:28].encode())
metadata = BlockAccessor.for_block(block).get_metadata()
schema = BlockAccessor.for_block(block).schema()
return RefBundle([(block_ref, metadata)], owns_blocks=False, schema=schema)
def test_fifo_queue_add_and_length():
"""Test adding bundles and checking length."""
queue = FIFOBundleQueue()
bundle1 = _create_bundle("data1")
bundle2 = _create_bundle("data11")
queue.add(bundle1)
assert len(queue) == 1
queue.add(bundle2)
assert len(queue) == 2
def test_fifo_queue_get_next_fifo_order():
"""Test that bundles are returned in FIFO order."""
queue = FIFOBundleQueue()
bundle1 = _create_bundle("data1")
bundle2 = _create_bundle("data11")
bundle3 = _create_bundle("data111")
queue.add(bundle1)
queue.add(bundle2)
queue.add(bundle3)
assert queue.get_next() is bundle1
assert queue.get_next() is bundle2
assert queue.get_next() is bundle3
def test_fifo_queue_init_with_bundles():
"""Test initializing queue with a list of bundles."""
bundle1 = _create_bundle("data1")
bundle2 = _create_bundle("data11")
queue = FIFOBundleQueue(bundles=[bundle1, bundle2])
assert len(queue) == 2
assert queue.get_next() is bundle1
assert queue.get_next() is bundle2
def test_fifo_queue_peek_next():
"""Test peeking at the next bundle without removing it."""
queue = FIFOBundleQueue()
bundle1 = _create_bundle("data1")
bundle2 = _create_bundle("data11")
queue.add(bundle1)
queue.add(bundle2)
# Peek should return bundle1 without removing
assert queue.peek_next() is bundle1
assert len(queue) == 2
# Peek again should return the same bundle
assert queue.peek_next() is bundle1
def test_fifo_queue_peek_next_empty():
"""Test peeking when queue is empty."""
queue = FIFOBundleQueue()
assert queue.peek_next() is None
def test_fifo_queue_has_next():
"""Test has_next correctly reflects queue state."""
queue = FIFOBundleQueue()
assert not queue.has_next()
bundle1 = _create_bundle("data1")
queue.add(bundle1)
assert queue.has_next()
queue.get_next()
assert not queue.has_next()
def test_fifo_queue_get_next_empty_raises():
"""Test that get_next raises when queue is empty."""
queue = FIFOBundleQueue()
with pytest.raises(ValueError, match="Popping from empty"):
queue.get_next()
def test_fifo_queue_clear():
"""Test clearing the queue resets everything."""
queue = FIFOBundleQueue()
bundle1 = _create_bundle("data1")
bundle2 = _create_bundle("data11")
queue.add(bundle1)
queue.add(bundle2)
queue.clear()
assert len(queue) == 0
assert queue.estimate_size_bytes() == 0
assert queue.num_blocks() == 0
assert not queue.has_next()
def test_fifo_queue_metrics():
"""Test that metrics are tracked correctly."""
queue = FIFOBundleQueue()
bundle1 = _create_bundle("data1")
bundle2 = _create_bundle("data11")
queue.add(bundle1)
assert queue.estimate_size_bytes() == bundle1.size_bytes()
assert queue.num_blocks() == 1
queue.add(bundle2)
assert queue.estimate_size_bytes() == bundle1.size_bytes() + bundle2.size_bytes()
assert queue.num_blocks() == 2
queue.get_next()
assert queue.estimate_size_bytes() == bundle2.size_bytes()
assert queue.num_blocks() == 1
def test_fifo_queue_iter():
"""Test iterating over the queue."""
queue = FIFOBundleQueue()
bundle1 = _create_bundle("data1")
bundle2 = _create_bundle("data11")
bundle3 = _create_bundle("data111")
queue.add(bundle1)
queue.add(bundle2)
queue.add(bundle3)
# Iterate without consuming
bundles = list(queue)
assert bundles == [bundle1, bundle2, bundle3]
assert len(queue) == 3 # Queue unchanged
def test_fifo_queue_to_list():
"""Test converting queue to list."""
queue = FIFOBundleQueue()
bundle1 = _create_bundle("data1")
bundle2 = _create_bundle("data11")
queue.add(bundle1)
queue.add(bundle2)
bundles = queue.to_list()
assert bundles == [bundle1, bundle2]
assert len(queue) == 2 # Queue unchanged
def test_fifo_queue_finalize_is_noop():
"""Test that finalize does nothing (it's a no-op for FIFO queue)."""
queue = FIFOBundleQueue()
bundle1 = _create_bundle("data1")
queue.add(bundle1)
queue.finalize() # Should not raise or change anything
assert len(queue) == 1
assert queue.get_next() is bundle1
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/unit/test_fifo_bundle_queue.py",
"license": "Apache License 2.0",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/train/v2/api/validation_config.py | from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Dict, Optional, Protocol
from ray.util.annotations import PublicAPI
if TYPE_CHECKING:
from ray.train import Checkpoint
@PublicAPI(stability="alpha")
class ValidationFn(Protocol):
"""Protocol for a function that validates a checkpoint."""
def __call__(self, checkpoint: "Checkpoint", **kwargs: Any) -> Dict:
...
@dataclass
@PublicAPI(stability="alpha")
class ValidationTaskConfig:
"""Configuration for a specific validation task, passed to report().
Args:
fn_kwargs: json-serializable keyword arguments to pass to the validation function.
Note that we always pass `checkpoint` as the first argument to the
validation function.
"""
fn_kwargs: Optional[Dict[str, Any]] = None
def __post_init__(self):
if self.fn_kwargs is None:
self.fn_kwargs = {}
@PublicAPI(stability="alpha")
class ValidationConfig:
"""Configuration for validation, passed to the trainer.
Args:
fn: The validation function to run on checkpoints.
This function should accept a checkpoint as the first argument
and return a dictionary of metrics.
task_config: Default configuration for validation tasks.
The fn_kwargs in this config can be overridden by
ValidationTaskConfig passed to report().
ray_remote_kwargs: Keyword arguments to pass to `ray.remote()` for the validation task.
This can be used to specify resource requirements, number of retries, etc.
"""
def __init__(
self,
fn: ValidationFn,
task_config: Optional[ValidationTaskConfig] = None,
ray_remote_kwargs: Optional[Dict[str, Any]] = None,
):
self.fn = fn
if task_config is None:
self.task_config = ValidationTaskConfig()
else:
self.task_config = task_config
# TODO: ray_remote_kwargs is not json-serializable because retry_exceptions
# can be a list of exception types. If ray core makes ray_remote_kwargs json-serializable
# we can move this to ValidationTaskConfig.
if ray_remote_kwargs is None:
self.ray_remote_kwargs = {}
else:
self.ray_remote_kwargs = ray_remote_kwargs
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/api/validation_config.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:rllib/examples/ray_tune/appo_hyperparameter_tune.py | """Hyperparameter tuning script for APPO on CartPole using BasicVariantGenerator.
This script uses Ray Tune's BasicVariantGenerator to perform grid/random search
over APPO hyperparameters for CartPole-v1 (though is applicable to any RLlib algorithm).
BasicVariantGenerator is Tune's default search algorithm that generates trial
configurations from the search space without using historical trial results.
It supports grid search (tune.grid_search), random sampling (tune.uniform, etc.),
and combinations thereof.
Alternative Search Algorithms
-----------------------------
Ray Tune supports many search algorithms that can leverage results from previous
trials to guide the search more efficiently:
- HyperOptSearch: Bayesian optimization using Tree-structured Parzen Estimators (TPE)
- OptunaSearch: Bayesian optimization with pruning support via Optuna
- BayesOptSearch: Gaussian process-based Bayesian optimization
- AxSearch: Adaptive experimentation platform from Meta
- BlendSearch/CFO: Cost-aware optimization algorithms from Microsoft FLAML
- BOHB: Bayesian Optimization and HyperBand
- Nevergrad: Derivative-free optimization
- ZOOpt: Zeroth-order optimization
See the full list and usage examples at:
https://docs.ray.io/en/latest/tune/api/suggestion.html
Note: When using these advanced search algorithms, wrap them with ConcurrencyLimiter
to control parallelism (e.g., `ConcurrencyLimiter(HyperOptSearch(), max_concurrent=4)`).
BasicVariantGenerator has built-in concurrency control via its `max_concurrent` parameter.
The script runs 4 parallel trials by default.
For each trial, it defaults to using 1 GPU per learner, meaning that
you need to be running on a cluster with 4 GPUs available.
Otherwise, we recommend users change `num_gpus_per_learner` to zero
or `max_concurrent_trials` to one (if only single GPU is available).
Key hyperparameters being tuned:
- lr: Learning rate
- entropy_coeff: Entropy coefficient for exploration
- vf_loss_coeff: Value function loss coefficient
- train_batch_size_per_learner: Batch size per learner
- circular_buffer_num_batches: Number of batches in circular buffer
- circular_buffer_iterations_per_batch: Replay iterations per batch
- target_network_update_freq: Target network update frequency
- broadcast_interval: Weight synchronization interval
Note on storage for multi-node clusters
---------------------------------------
Ray Tune requires centralized storage accessible by all nodes in a multi-node cluster.
This can be an S3 bucket or local storage accessible to all nodes.
If running on an Anyscale job, it has an internal S3 bucket defined by the
ANYSCALE_ARTIFACT_STORAGE environment variable.
See https://docs.ray.io/en/latest/train/user-guides/persistent-storage.html for more details.
How to run this script
----------------------
Run with 4 parallel trials (default):
`python appo_hyperparameter_tune.py`
Run with custom number of parallel trials (max-concurrent-trials) and
the total number of trials (num_samples):
`python appo_hyperparameter_tune.py --max-concurrent-trials=2 --num_samples=20`
Run on a cluster with cloud or local filesystem storage:
`python appo_hyperparameter_tune.py --storage-path=s3://my-bucket/appo-hyperopt`
`python appo_hyperparameter_tune.py --storage-path=/mnt/nfs/appo-hyperopt`
Run locally with only a single GPU
`python appo_hyperparameter_tune.py --max-concurrent-trials=1 --num_samples=5 --storage-path=/mnt/nfs/appo-hyperopt`
Results to expect
-----------------
The tuner will explore the hyperparameter space via random sampling and find
configurations that achieve reward of 475+ on CartPole within 2 million timesteps.
The best trial's hyperparameters will be logged at the end of training.
"""
from ray import tune
from ray.air.constants import TRAINING_ITERATION
from ray.rllib.algorithms.appo import APPOConfig
from ray.rllib.examples.utils import (
add_rllib_example_script_args,
)
from ray.rllib.utils.metrics import (
ENV_RUNNER_RESULTS,
EPISODE_RETURN_MEAN,
NUM_ENV_STEPS_SAMPLED_LIFETIME,
)
from ray.tune import CLIReporter
from ray.tune.search import BasicVariantGenerator
parser = add_rllib_example_script_args(
default_reward=475.0,
default_timesteps=2_000_000,
)
parser.add_argument(
"--storage-path",
default="~/ray_results",
type=str,
help="The storage path for checkpoints and related tuning data.",
)
parser.set_defaults(
num_env_runners=4,
num_envs_per_env_runner=6,
num_learners=1,
num_gpus_per_learner=1,
num_samples=12, # Run 12 training trials
max_concurrent_trials=4, # Run 4 trials in parallel
)
args = parser.parse_args()
config = (
APPOConfig()
.environment("CartPole-v1")
.env_runners(
num_env_runners=args.num_env_runners,
num_envs_per_env_runner=args.num_envs_per_env_runner,
)
.learners(
num_learners=args.num_learners,
num_gpus_per_learner=args.num_gpus_per_learner,
num_aggregator_actors_per_learner=2,
)
.training(
# Hyperparameters to tune with initial random values
# Use tune.uniform for continuous params
lr=tune.loguniform(0.0001, 0.005),
vf_loss_coeff=tune.uniform(0.5, 2.0),
entropy_coeff=tune.uniform(0.001, 0.02),
# Use tune.qrandint(a, b, q) for discrete params in [a, b) with step q (defaults to 1)
train_batch_size_per_learner=tune.qrandint(256, 2048, 64),
target_network_update_freq=tune.qrandint(1, 6),
broadcast_interval=tune.qrandint(2, 11),
circular_buffer_num_batches=tune.qrandint(2, 6),
circular_buffer_iterations_per_batch=tune.qrandint(1, 5),
)
)
# Stopping criteria: either reach target reward or max timesteps
stop = {
f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": args.stop_reward,
NUM_ENV_STEPS_SAMPLED_LIFETIME: args.stop_timesteps,
}
if __name__ == "__main__":
# BasicVariantGenerator generates trial configurations from the search space
# without using historical trial results. It's Tune's default search algorithm
# and supports grid search, random sampling, and combinations.
# max_concurrent limits how many trials run in parallel.
search_alg = BasicVariantGenerator(max_concurrent=args.max_concurrent_trials)
tuner = tune.Tuner(
config.algo_class,
param_space=config,
run_config=tune.RunConfig(
stop=stop,
storage_path=args.storage_path,
checkpoint_config=tune.CheckpointConfig(
checkpoint_at_end=True,
),
progress_reporter=CLIReporter(
metric_columns={
TRAINING_ITERATION: "iter",
"time_total_s": "total time (s)",
NUM_ENV_STEPS_SAMPLED_LIFETIME: "ts",
f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": "episode return mean",
},
max_report_frequency=30,
),
),
tune_config=tune.TuneConfig(
metric=f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}",
mode="max",
num_samples=args.num_samples,
search_alg=search_alg,
),
)
results = tuner.fit()
print("Best hyperparameters:", results.get_best_result().config)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/ray_tune/appo_hyperparameter_tune.py",
"license": "Apache License 2.0",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/train/tutorials/ci/py_scripts/01_02_03_intro_to_ray_train.py | # 00. Runtime setup — install same deps as build.sh and set env vars
import os
import sys
import subprocess
# Non-secret env var
os.environ["RAY_TRAIN_V2_ENABLED"] = "1"
# Install Python dependencies (
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"install",
"--no-cache-dir",
"torch==2.8.0",
"torchvision==0.23.0",
"matplotlib==3.10.6",
"pyarrow==14.0.2",
]
)
# 01. Imports
# --- Standard library: file IO, paths, timestamps, temp dirs, cleanup ---
import csv # Simple CSV logging for metrics in single-GPU section
import datetime # Timestamps for run directories / filenames
import os # Filesystem utilities (paths, env vars)
import tempfile # Ephemeral dirs for checkpoint staging with ray.train.report()
import shutil # Cleanup of artifacts (later cells)
import gc # Manual garbage collection to cleanup after inference
from pathlib import Path # Convenient, cross-platform path handling
# --- Visualization & data wrangling ---
import matplotlib.pyplot as plt # Plot sample digits and metrics curves
from PIL import Image # Image utilities for inspection/debug
import numpy as np # Numeric helpers (random sampling, arrays)
import pandas as pd # Read metrics.csv into a DataFrame
# --- PyTorch & TorchVision (model + dataset) ---
import torch
from torch.nn import CrossEntropyLoss # Classification loss for MNIST
from torch.optim import Adam # Optimizer
from torchvision.models import (
resnet18,
) # Baseline CNN (we’ll adapt for 1-channel input)
from torchvision.datasets import MNIST # Dataset
from torchvision.transforms import (
ToTensor,
Normalize,
Compose,
) # Preprocessing pipeline
# --- Ray Train (distributed orchestration) ---
import ray
from ray.train import ScalingConfig, RunConfig # Configure scale and storage
from ray.train.torch import TorchTrainer # Multi-GPU PyTorch trainer (DDP/FSDP)
# 02. Download MNIST Dataset
dataset = MNIST(root="/mnt/cluster_storage/data", train=True, download=True)
# 03. Visualize Sample Digits
# Create a square figure for plotting 9 samples (3x3 grid)
figure = plt.figure(figsize=(8, 8))
cols, rows = 3, 3
# Loop through grid slots and plot a random digit each time
for i in range(1, cols * rows + 1):
# Randomly select an index from the dataset
sample_idx = np.random.randint(0, len(dataset.data))
img, label = dataset[sample_idx] # image (PIL) and its digit label
# Add subplot to the figure
figure.add_subplot(rows, cols, i)
plt.title(label) # show the digit label above each subplot
plt.axis("off") # remove axes for cleaner visualization
plt.imshow(img, cmap="gray") # display as grayscale image
# 04. Define ResNet-18 Model for MNIST
def build_resnet18():
# Start with a torchvision ResNet-18 backbone
# Set num_classes=10 since MNIST has digits 0–9
model = resnet18(num_classes=10)
# Override the first convolution layer:
# - Default expects 3 channels (RGB images)
# - MNIST is grayscale → only 1 channel
# - Keep kernel size/stride/padding consistent with original ResNet
model.conv1 = torch.nn.Conv2d(
in_channels=1, # input = grayscale
out_channels=64, # number of filters remains the same as original ResNet
kernel_size=(7, 7),
stride=(2, 2),
padding=(3, 3),
bias=False,
)
# Return the customized ResNet-18
return model
# 05. Define the Ray Train per-worker training loop
def train_loop_ray_train(config: dict): # pass in hyperparameters in config
# config holds hyperparameters passed from TorchTrainer (e.g. num_epochs, global_batch_size)
# Define loss function for MNIST classification
criterion = CrossEntropyLoss()
# Build and prepare the model for distributed training.
# load_model_ray_train() calls ray.train.torch.prepare_model()
# → moves model to GPU and wraps it in DistributedDataParallel (DDP).
model = load_model_ray_train()
# Standard optimizer (learning rate fixed for demo)
optimizer = Adam(model.parameters(), lr=1e-5)
# Calculate the batch size for each worker
global_batch_size = config["global_batch_size"]
world_size = (
ray.train.get_context().get_world_size()
) # total # of workers in the job
batch_size = global_batch_size // world_size # split global batch evenly
print(f"{world_size=}\n{batch_size=}")
# Wrap DataLoader with prepare_data_loader()
# → applies DistributedSampler (shards data across workers)
# → ensures batches are automatically moved to correct device
data_loader = build_data_loader_ray_train(batch_size=batch_size)
# ----------------------- Training loop ----------------------- #
for epoch in range(config["num_epochs"]):
# Ensure each worker shuffles its shard differently every epoch
data_loader.sampler.set_epoch(epoch)
# Iterate over batches (sharded across workers)
for images, labels in data_loader:
outputs = model(images) # forward pass
loss = criterion(outputs, labels) # compute loss
optimizer.zero_grad() # reset gradients
loss.backward() # backward pass (grads averaged across workers via DDP)
optimizer.step() # update model weights
# After each epoch: report loss and log metrics
metrics = print_metrics_ray_train(loss, epoch)
# Save checkpoint (only rank-0 worker persists the model)
save_checkpoint_and_metrics_ray_train(model, metrics)
# 06. Define the configuration dictionary passed into the training loop
# train_loop_config is provided to TorchTrainer and injected into
# train_loop_ray_train(config) as the "config" argument.
# → Any values defined here are accessible inside the training loop.
train_loop_config = {
"num_epochs": 2, # Number of full passes through the dataset
"global_batch_size": 128 # Effective batch size across ALL workers
# (Ray will split this evenly per worker, e.g.
# with 8 workers → 16 samples/worker/step)
}
# 07. Configure the scaling of the training job
# ScalingConfig defines how many parallel training workers Ray should launch
# and whether each worker should be assigned a GPU or CPU.Z
# → Each worker runs train_loop_ray_train(config) independently,
# with Ray handling synchronization via DDP under the hood.
scaling_config = ScalingConfig(
num_workers=8, # Launch 8 training workers (1 process per worker)
use_gpu=True, # Allocate 1 GPU to each worker
)
# 08. Build and prepare the model for Ray Train
def load_model_ray_train() -> torch.nn.Module:
model = build_resnet18()
# prepare_model() → move to correct device + wrap in DDP automatically
model = ray.train.torch.prepare_model(model)
return model
# 09. Build a Ray Train–ready DataLoader for MNIST
def build_data_loader_ray_train(batch_size: int) -> torch.utils.data.DataLoader:
# Define preprocessing: convert to tensor + normalize pixel values
transform = Compose([ToTensor(), Normalize((0.5,), (0.5,))])
# Load the MNIST training set from persistent cluster storage
train_data = MNIST(
root="/mnt/cluster_storage/data",
train=True,
download=True,
transform=transform,
)
# Standard PyTorch DataLoader (batching, shuffling, drop last incomplete batch)
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=batch_size, shuffle=True, drop_last=True
)
# prepare_data_loader():
# - Adds a DistributedSampler when using multiple workers
# - Moves batches to the correct device automatically
train_loader = ray.train.torch.prepare_data_loader(train_loader)
return train_loader
# 10. Report training metrics from each worker
def print_metrics_ray_train(loss: torch.Tensor, epoch: int) -> None:
metrics = {"loss": loss.item(), "epoch": epoch}
world_rank = ray.train.get_context().get_world_rank() # report from all workers
print(f"{metrics=} {world_rank=}")
return metrics
# 11. Save checkpoint and report metrics with Ray Train
def save_checkpoint_and_metrics_ray_train(
model: torch.nn.Module, metrics: dict[str, float]
) -> None:
# Create a temporary directory to stage checkpoint files
with tempfile.TemporaryDirectory() as temp_checkpoint_dir:
# Save the model weights.
# Note: under DDP the model is wrapped in DistributedDataParallel,
# so we unwrap it with `.module` before calling state_dict().
torch.save(
model.module.state_dict(), # note the `.module` to unwrap the DistributedDataParallel
os.path.join(temp_checkpoint_dir, "model.pt"),
)
# Report metrics and attach a checkpoint to Ray Train.
# → metrics are logged centrally
# → checkpoint allows resuming training or running inference later
ray.train.report(
metrics,
checkpoint=ray.train.Checkpoint.from_directory(temp_checkpoint_dir),
)
# 12. Save checkpoint only from the rank-0 worker
def save_checkpoint_and_metrics_ray_train(
model: torch.nn.Module, metrics: dict[str, float]
) -> None:
with tempfile.TemporaryDirectory() as temp_checkpoint_dir:
checkpoint = None
# Only the rank-0 worker writes the checkpoint file
if ray.train.get_context().get_world_rank() == 0:
torch.save(
model.module.state_dict(), # unwrap DDP before saving
os.path.join(temp_checkpoint_dir, "model.pt"),
)
checkpoint = ray.train.Checkpoint.from_directory(temp_checkpoint_dir)
# All workers still call ray.train.report()
# → keeps training loop synchronized
# → metrics are logged from each worker
# → only rank-0 attaches a checkpoint
ray.train.report(
metrics,
checkpoint=checkpoint,
)
# 13. Configure persistent storage and run name
storage_path = "/mnt/cluster_storage/training/"
run_config = RunConfig(
storage_path=storage_path, # where to store checkpoints/logs
name="distributed-mnist-resnet18", # identifier for this run
)
# 14. Set up the TorchTrainer
trainer = TorchTrainer(
train_loop_ray_train, # training loop to run on each worker
scaling_config=scaling_config, # number of workers and resource config
run_config=run_config, # storage path + run name for artifacts
train_loop_config=train_loop_config, # hyperparameters passed to the loop
)
# 15. Launch distributed training
# trainer.fit() starts the training job:
# - Spawns workers according to scaling_config
# - Runs train_loop_ray_train() on each worker
# - Collects metrics and checkpoints into result
result = trainer.fit()
# 16. Show the training results
result # contains metrics, checkpoints, and run history
# 17. Display the full metrics history as a pandas DataFrame
result.metrics_dataframe
# 18. Define a Ray actor to load the trained model and run inference
@ray.remote(num_gpus=1) # allocate 1 GPU to this actor
class ModelWorker:
def __init__(self, checkpoint):
# Load model weights from the Ray checkpoint (on CPU first)
with checkpoint.as_directory() as ckpt_dir:
model_path = os.path.join(ckpt_dir, "model.pt")
state_dict = torch.load(
model_path,
map_location=torch.device("cpu"),
weights_only=True,
)
# Rebuild the model, load weights, move to GPU, and set to eval mode
self.model = build_resnet18()
self.model.load_state_dict(state_dict)
self.model.to("cuda")
self.model.eval()
@torch.inference_mode() # disable autograd for faster inference
def predict(self, batch):
"""
batch: torch.Tensor or numpy array with shape [B,C,H,W] or [C,H,W]
returns: list[int] predicted class indices
"""
x = torch.as_tensor(batch)
if x.ndim == 3: # single image → add batch dimension
x = x.unsqueeze(0) # shape becomes [1,C,H,W]
x = x.to("cuda", non_blocking=True)
logits = self.model(x)
preds = torch.argmax(logits, dim=1)
return preds.detach().cpu().tolist()
# Create a fresh actor instance (avoid naming conflicts)
worker = ModelWorker.remote(result.checkpoint)
# 19. CPU preprocessing + GPU inference via Ray actor
to_tensor = ToTensor()
def normalize_cpu(img):
# Convert image (PIL) to tensor on CPU → shape [C,H,W]
t = to_tensor(img) # [C,H,W] on CPU
C = t.shape[0]
# Apply channel-wise normalization (grayscale vs RGB)
if C == 3:
norm = Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
else:
norm = Normalize((0.5,), (0.5,))
return norm(t)
figure = plt.figure(figsize=(8, 8))
cols, rows = 3, 3
# Plot a 3x3 grid of random MNIST samples with predictions
for i in range(1, cols * rows + 1):
idx = np.random.randint(0, len(dataset))
img, label = dataset[idx]
# Preprocess on CPU, add batch dim → [1,C,H,W]
x = normalize_cpu(img).unsqueeze(0)
# Run inference on GPU via Ray actor, fetch result
pred = ray.get(worker.predict.remote(x))[0] # int
# Plot image with true label and predicted label
figure.add_subplot(rows, cols, i)
plt.title(f"label: {label}; pred: {int(pred)}")
plt.axis("off")
arr = np.array(img)
plt.imshow(arr, cmap="gray" if arr.ndim == 2 else None)
plt.tight_layout()
plt.show()
# 20.
# stop the actor process and free its GPU
ray.kill(worker, no_restart=True)
# drop local references so nothing pins it
del worker
# Forcing garbage collection is optional:
# - Cluster resources are already freed by ray.kill()
# - Python will clean up the local handle eventually
# - gc.collect() is usually unnecessary unless debugging memory issues
gc.collect()
# 01. Training loop using Ray Data
def train_loop_ray_train_ray_data(config: dict):
# Same as before: define loss, model, optimizer
criterion = CrossEntropyLoss()
model = load_model_ray_train()
optimizer = Adam(model.parameters(), lr=1e-3)
# Different: build data loader from Ray Data instead of PyTorch DataLoader
global_batch_size = config["global_batch_size"]
batch_size = global_batch_size // ray.train.get_context().get_world_size()
data_loader = build_data_loader_ray_train_ray_data(batch_size=batch_size)
# Same: loop over epochs
for epoch in range(config["num_epochs"]):
# Different: no sampler.set_epoch(), Ray Data handles shuffling internally
# Different: batches are dicts {"image": ..., "label": ...} not tuples
for batch in data_loader:
outputs = model(batch["image"])
loss = criterion(outputs, batch["label"])
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Same: report metrics and save checkpoint each epoch
metrics = print_metrics_ray_train(loss, epoch)
save_checkpoint_and_metrics_ray_train(model, metrics)
# 02. Build a Ray Data–backed data loader
def build_data_loader_ray_train_ray_data(batch_size: int, prefetch_batches: int = 2):
# Different: instead of creating a PyTorch DataLoader,
# fetch the training dataset shard for this worker
dataset_iterator = ray.train.get_dataset_shard("train")
# Convert the shard into a PyTorch-style iterator
# - Returns dict batches: {"image": ..., "label": ...}
# - prefetch_batches controls pipeline buffering
data_loader = dataset_iterator.iter_torch_batches(
batch_size=batch_size, prefetch_batches=prefetch_batches
)
return data_loader
# 03. Convert MNIST dataset into Parquet for Ray Data
# Build a DataFrame with image arrays and labels
df = pd.DataFrame(
{
"image": dataset.data.tolist(), # raw image pixels (as lists)
"label": dataset.targets, # digit labels 0–9
}
)
# Persist the dataset in Parquet format (columnar, efficient for Ray Data)
df.to_parquet("/mnt/cluster_storage/MNIST.parquet")
# 04. Load the Parquet dataset into a Ray Dataset
# Read the Parquet file → creates a distributed Ray Dataset
train_ds = ray.data.read_parquet("/mnt/cluster_storage/MNIST.parquet")
# 05. Define preprocessing transform for Ray Data
def transform_images(row: dict):
# Convert numpy array to a PIL image, then apply TorchVision transforms
transform = Compose(
[
ToTensor(), # convert to tensor
Normalize((0.5,), (0.5,)), # normalize to [-1, 1]
]
)
# Ensure image is in uint8 before conversion
image_arr = np.array(row["image"], dtype=np.uint8)
# Apply transforms and replace the "image" field with tensor
row["image"] = transform(Image.fromarray(image_arr))
return row
# 06. Apply the preprocessing transform across the Ray Dataset
# Run transform_images() on each row (parallelized across cluster workers)
train_ds = train_ds.map(transform_images)
# 07. Configure TorchTrainer with Ray Data integration
# Wrap Ray Dataset in a dict → accessible as "train" inside the training loop
datasets = {"train": train_ds}
trainer = TorchTrainer(
train_loop_ray_train_ray_data, # training loop consuming Ray Data
train_loop_config={ # hyperparameters
"num_epochs": 1,
"global_batch_size": 512,
},
scaling_config=scaling_config, # number of workers + GPU/CPU resources
run_config=RunConfig(
storage_path=storage_path, name="dist-MNIST-res18-ray-data"
), # where to store checkpoints/logs
datasets=datasets, # provide Ray Dataset shards to workers
)
# 08. Start the distributed training job with Ray Data integration
# Launches the training loop across all workers
# - Streams preprocessed Ray Dataset batches into each worker
# - Reports metrics and checkpoints to cluster storage
trainer.fit()
# 01. Training loop with checkpoint loading for fault tolerance
def train_loop_ray_train_with_checkpoint_loading(config: dict):
# Same setup as before: loss, model, optimizer
criterion = CrossEntropyLoss()
model = load_model_ray_train()
optimizer = Adam(model.parameters(), lr=1e-3)
# Same data loader logic as before
global_batch_size = config["global_batch_size"]
batch_size = global_batch_size // ray.train.get_context().get_world_size()
data_loader = build_data_loader_ray_train_ray_data(batch_size=batch_size)
# Default: start at epoch 0 unless a checkpoint is available
start_epoch = 0
# Attempt to load from latest checkpoint
checkpoint = ray.train.get_checkpoint()
if checkpoint:
# Continue training from a previous checkpoint
with checkpoint.as_directory() as ckpt_dir:
# Restore model + optimizer state
model_state_dict = torch.load(
os.path.join(ckpt_dir, "model.pt"),
)
# Load the model and optimizer state
model.module.load_state_dict(model_state_dict)
optimizer.load_state_dict(
torch.load(os.path.join(ckpt_dir, "optimizer.pt"))
)
# Resume from last epoch + 1
start_epoch = (
torch.load(os.path.join(ckpt_dir, "extra_state.pt"))["epoch"] + 1
)
# Same training loop as before except it starts at a parameterized start_epoch
for epoch in range(start_epoch, config["num_epochs"]):
for batch in data_loader:
outputs = model(batch["image"])
loss = criterion(outputs, batch["label"])
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Report metrics and save model + optimizer + epoch state
metrics = print_metrics_ray_train(loss, epoch)
# We now save the optimizer and epoch state in addition to the model
save_checkpoint_and_metrics_ray_train_with_extra_state(
model, metrics, optimizer, epoch
)
# 02. Save checkpoint with model, optimizer, and epoch state
def save_checkpoint_and_metrics_ray_train_with_extra_state(
model: torch.nn.Module,
metrics: dict[str, float],
optimizer: torch.optim.Optimizer,
epoch: int,
) -> None:
with tempfile.TemporaryDirectory() as temp_checkpoint_dir:
checkpoint = None
# Only rank-0 worker saves files to disk
if ray.train.get_context().get_world_rank() == 0:
# Save all state required for full recovery
torch.save(
model.module.state_dict(), # unwrap DDP before saving
os.path.join(temp_checkpoint_dir, "model.pt"),
)
torch.save(
optimizer.state_dict(), # include optimizer state
os.path.join(temp_checkpoint_dir, "optimizer.pt"),
)
torch.save(
{"epoch": epoch}, # store last completed epoch
os.path.join(temp_checkpoint_dir, "extra_state.pt"),
)
# Package into a Ray checkpoint
checkpoint = ray.train.Checkpoint.from_directory(temp_checkpoint_dir)
# Report metrics and attach checkpoint (only rank-0 attaches checkpoint)
ray.train.report(
metrics,
checkpoint=checkpoint,
)
# 03. Configure TorchTrainer with fault-tolerance enabled
# Allow up to 3 automatic retries if workers fail
failure_config = ray.train.FailureConfig(max_failures=3)
experiment_name = "fault-tolerant-MNIST-vit"
trainer = TorchTrainer(
train_loop_per_worker=train_loop_ray_train_with_checkpoint_loading, # fault-tolerant loop
train_loop_config={ # hyperparameters
"num_epochs": 1,
"global_batch_size": 512,
},
scaling_config=scaling_config, # resource scaling as before
run_config=ray.train.RunConfig(
name="fault-tolerant-MNIST-vit",
storage_path=storage_path, # persistent checkpoint storage
failure_config=failure_config, # enable automatic retries
),
datasets=datasets, # Ray Dataset shard for each worker
)
# 04. Start the fault-tolerant training job
# Launches training with checkpointing + automatic retries enabled
# If workers fail, Ray will reload the latest checkpoint and resume
trainer.fit()
# 05. Manually restore a trainer from the last checkpoint
restored_trainer = TorchTrainer(
train_loop_per_worker=train_loop_ray_train_with_checkpoint_loading, # loop supports checkpoint loading
train_loop_config={ # hyperparameters must match
"num_epochs": 1,
"global_batch_size": 512,
},
scaling_config=scaling_config, # same resource setup as before
run_config=ray.train.RunConfig(
name="fault-tolerant-MNIST-vit", # must match previous run name
storage_path=storage_path, # path where checkpoints are saved
failure_config=failure_config, # still allow retries
),
datasets=datasets, # same dataset as before
)
# 06. Resume training from the last checkpoint
# Fit the restored trainer → continues from last saved epoch
# If all epochs are already complete, training ends immediately
result = restored_trainer.fit()
# Display final training results (metrics, checkpoints, etc.)
result
# 07. Cleanup Cluster Storage
# Paths to remove → include MNIST data, training outputs, and MNIST.parquet
paths_to_delete = [
"/mnt/cluster_storage/MNIST",
"/mnt/cluster_storage/training",
"/mnt/cluster_storage/MNIST.parquet",
]
for path in paths_to_delete:
if os.path.exists(path):
# Handle directories vs. files
if os.path.isdir(path):
shutil.rmtree(path) # recursively delete directory
else:
os.remove(path) # delete single file
print(f"Deleted: {path}")
else:
print(f"Not found: {path}")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/train/tutorials/ci/py_scripts/01_02_03_intro_to_ray_train.py",
"license": "Apache License 2.0",
"lines": 530,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/train/tutorials/ci/py_scripts/04a_vision_pattern.py | # 00. Runtime setup
import os
import sys
import subprocess
# Non-secret env var
os.environ["RAY_TRAIN_V2_ENABLED"] = "1"
# Install Python dependencies
subprocess.check_call([
sys.executable, "-m", "pip", "install", "--no-cache-dir",
"torch==2.8.0",
"torchvision==0.23.0",
"matplotlib==3.10.6",
"pyarrow==14.0.2",
"datasets==2.19.2",
])
# 01. Imports
# ————————————————————————
# Standard Library Utilities
# ————————————————————————
import os
import io
import tempfile
import shutil # file I/O and temp dirs
import json # reading/writing configs
import random, uuid # randomness and unique IDs
# ————————————————————————
# Core Data & Storage Libraries
# ————————————————————————
import pandas as pd # tabular data handling
import numpy as np # numerical ops
import pyarrow as pa # in-memory columnar format
import pyarrow.parquet as pq # reading/writing Parquet files
from tqdm import tqdm # progress bars
# ————————————————————————
# Image Handling & Visualization
# ————————————————————————
from PIL import Image
import matplotlib.pyplot as plt # plotting loss curves, images
# ————————————————————————
# PyTorch + TorchVision Core
# ————————————————————————
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms as T
from torchvision.models import resnet18
from torchvision.transforms import Compose, Resize, CenterCrop
# ————————————————————————
# Ray Train: Distributed Training Primitives
# ————————————————————————
import ray
import ray.train as train
from ray.train.torch import (
prepare_model,
prepare_data_loader,
TorchTrainer,
)
from ray.train import (
ScalingConfig,
RunConfig,
FailureConfig,
CheckpointConfig,
Checkpoint,
get_checkpoint,
get_context,
)
# ————————————————————————
# Dataset Access
# ————————————————————————
from datasets import load_dataset # Hugging Face Datasets
from ray.data import DataContext
DataContext.get_current().use_streaming_executor = False
# 02. Load 10% of food101 (~7,500 images)
ds = load_dataset("ethz/food101", split="train[:10%]")
# 03. Resize and encode as JPEG bytes
transform = Compose([Resize(256), CenterCrop(224)])
records = []
for example in tqdm(ds, desc="Preprocessing images", unit="img"):
try:
img = transform(example["image"])
buf = io.BytesIO()
img.save(buf, format="JPEG")
records.append({
"image_bytes": buf.getvalue(),
"label": example["label"]
})
except Exception as e:
continue
# 04. Visualize the dataset
label_names = ds.features["label"].names # maps int → string
samples = random.sample(records, 9)
fig, axs = plt.subplots(3, 3, figsize=(8, 8))
fig.suptitle("Sample Resized Images from food101-lite", fontsize=16)
for ax, rec in zip(axs.flatten(), samples):
img = Image.open(io.BytesIO(rec["image_bytes"]))
label_name = label_names[rec["label"]]
ax.imshow(img)
ax.set_title(label_name)
ax.axis("off")
plt.tight_layout()
plt.show()
# 05. Write Dataset to Parquet
output_dir = "/mnt/cluster_storage/food101_lite/parquet_256"
os.makedirs(output_dir, exist_ok=True)
table = pa.Table.from_pydict({
"image_bytes": [r["image_bytes"] for r in records],
"label": [r["label"] for r in records]
})
pq.write_table(table, os.path.join(output_dir, "shard_0.parquet"))
print(f"Wrote {len(records)} records to {output_dir}")
# 06. Define PyTorch Dataset that loads from Parquet
class Food101Dataset(Dataset):
def __init__(self, parquet_path: str, transform=None):
self.parquet_file = pq.ParquetFile(parquet_path)
self.transform = transform
# Precompute a global row index to (row_group_idx, local_idx) map
self.row_group_map = []
for rg_idx in range(self.parquet_file.num_row_groups):
rg_meta = self.parquet_file.metadata.row_group(rg_idx)
num_rows = rg_meta.num_rows
self.row_group_map.extend([(rg_idx, i) for i in range(num_rows)])
def __len__(self):
return len(self.row_group_map)
def __getitem__(self, idx):
row_group_idx, local_idx = self.row_group_map[idx]
# Read only the relevant row group (in memory-efficient batch---for scalability)
table = self.parquet_file.read_row_group(row_group_idx, columns=["image_bytes", "label"])
row = table.to_pandas().iloc[local_idx]
img = Image.open(io.BytesIO(row["image_bytes"])).convert("RGB")
if self.transform:
img = self.transform(img)
return img, row["label"]
# 07. Define data preprocessing transform
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
transform = T.Compose([
T.ToTensor(),
T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
])
# 08. Create train/val Parquet splits
full_path = "/mnt/cluster_storage/food101_lite/parquet_256/shard_0.parquet"
df = (
pq.read_table(full_path)
.to_pandas()
.sample(frac=1.0, random_state=42) # shuffle for reproducibility
)
df[:-500].to_parquet("/mnt/cluster_storage/food101_lite/train.parquet") # training
df[-500:].to_parquet("/mnt/cluster_storage/food101_lite/val.parquet") # validation
# 09. Observe data shape
loader = DataLoader(
Food101Dataset("/mnt/cluster_storage/food101_lite/train.parquet", transform=transform),
batch_size=16,
shuffle=True,
num_workers=4,
)
for images, labels in loader:
print(images.shape, labels.shape)
break
# 10. Define helper to create prepared DataLoader
def build_dataloader(parquet_path: str, batch_size: int, shuffle=True):
dataset = Food101Dataset(parquet_path, transform=transform)
# Let Ray handle DistributedSampler and device placement via prepare_data_loader.
loader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=2,
)
return prepare_data_loader(loader)
# 11. Define Ray Train train_loop_per_worker (tempdir checkpoints + Ray-managed metrics)
def train_loop_per_worker(config):
import tempfile
rank = get_context().get_world_rank()
# === Model ===
net = resnet18(num_classes=101)
model = prepare_model(net)
# === Optimizer / Loss ===
optimizer = optim.Adam(model.parameters(), lr=config["lr"])
criterion = nn.CrossEntropyLoss()
# === Resume from Checkpoint ===
start_epoch = 0
ckpt = get_checkpoint()
if ckpt:
with ckpt.as_directory() as ckpt_dir:
# Map to CPU is fine; prepare_model will handle device placement.
model.load_state_dict(torch.load(os.path.join(ckpt_dir, "model.pt"), map_location="cpu"))
opt_path = os.path.join(ckpt_dir, "optimizer.pt")
if os.path.exists(opt_path):
optimizer.load_state_dict(torch.load(opt_path, map_location="cpu"))
meta_path = os.path.join(ckpt_dir, "meta.pt")
if os.path.exists(meta_path):
# Continue from the next epoch after the saved one
start_epoch = int(torch.load(meta_path).get("epoch", -1)) + 1
if rank == 0:
print(f"[Rank {rank}] Resumed from checkpoint at epoch {start_epoch}")
# === DataLoaders ===
train_loader = build_dataloader(
"/mnt/cluster_storage/food101_lite/train.parquet", config["batch_size"], shuffle=True
)
val_loader = build_dataloader(
"/mnt/cluster_storage/food101_lite/val.parquet", config["batch_size"], shuffle=False
)
# === Training Loop ===
for epoch in range(start_epoch, config["epochs"]):
# Required when using DistributedSampler
if hasattr(train_loader, "sampler") and hasattr(train_loader.sampler, "set_epoch"):
train_loader.sampler.set_epoch(epoch)
model.train()
train_loss_total, train_batches = 0.0, 0
for xb, yb in train_loader:
optimizer.zero_grad()
loss = criterion(model(xb), yb)
loss.backward()
optimizer.step()
train_loss_total += loss.item()
train_batches += 1
train_loss = train_loss_total / max(train_batches, 1)
# === Validation Loop ===
model.eval()
val_loss_total, val_batches = 0.0, 0
with torch.no_grad():
for val_xb, val_yb in val_loader:
val_loss_total += criterion(model(val_xb), val_yb).item()
val_batches += 1
val_loss = val_loss_total / max(val_batches, 1)
metrics = {"epoch": epoch, "train_loss": train_loss, "val_loss": val_loss}
if rank == 0:
print(metrics)
# ---- Save checkpoint to fast local temp dir; Ray persists it via report() ----
if rank == 0:
with tempfile.TemporaryDirectory() as tmpdir:
torch.save(model.state_dict(), os.path.join(tmpdir, "model.pt"))
torch.save(optimizer.state_dict(), os.path.join(tmpdir, "optimizer.pt"))
torch.save({"epoch": epoch}, os.path.join(tmpdir, "meta.pt"))
ckpt_out = Checkpoint.from_directory(tmpdir)
train.report(metrics, checkpoint=ckpt_out)
else:
# Non-zero ranks report metrics only (no checkpoint attachment)
train.report(metrics)
# === Final validation accuracy (distributed via TorchMetrics) ===
from torchmetrics.classification import MulticlassAccuracy
model.eval()
device = next(model.parameters()).device
# Sync across DDP workers when computing the final value
acc_metric = MulticlassAccuracy(
num_classes=101, average="micro", sync_on_compute=True
).to(device)
with torch.no_grad():
for xb, yb in val_loader:
logits = model(xb)
preds = torch.argmax(logits, dim=1)
acc_metric.update(preds, yb)
dist_val_acc = acc_metric.compute().item()
if rank == 0:
print(f"Val Accuracy (distributed): {dist_val_acc:.2%}")
# 12. Run Training with Ray Train
trainer = TorchTrainer(
train_loop_per_worker=train_loop_per_worker,
train_loop_config={"lr": 1e-3, "batch_size": 64, "epochs": 5},
scaling_config=ScalingConfig(num_workers=8, use_gpu=True),
run_config=RunConfig(
name="food101_ft_resume",
storage_path="/mnt/cluster_storage/food101_lite/results",
checkpoint_config=CheckpointConfig(
num_to_keep=5,
checkpoint_score_attribute="val_loss",
checkpoint_score_order="min"
),
failure_config=FailureConfig(max_failures=3),
),
)
result = trainer.fit()
print("Final metrics:", result.metrics)
best_ckpt = result.checkpoint # this is the one with lowest val_loss
# 13. Plot training / validation loss curves
# Pull the full metrics history Ray stored for this run
df = result.metrics_dataframe.copy()
# Keep only the columns we need (guard against extra columns)
cols = [c for c in ["epoch", "train_loss", "val_loss"] if c in df.columns]
df = df[cols].dropna()
# If multiple rows per epoch exist, keep the last report per epoch
if "epoch" in df.columns:
df = df.sort_index().groupby("epoch", as_index=False).last()
# Plot
plt.figure(figsize=(8, 5))
if "train_loss" in df.columns:
plt.plot(df["epoch"], df["train_loss"], marker="o", label="Train Loss")
if "val_loss" in df.columns:
plt.plot(df["epoch"], df["val_loss"], marker="o", label="Val Loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title("Train/Val Loss across Epochs")
plt.grid(True)
plt.legend()
plt.tight_layout()
plt.show()
# 14. Run the trainer again to demonstrate resuming from latest checkpoint
result = trainer.fit()
print("Final metrics:", result.metrics)
# 15. Batch inference with Ray Data (force GPU actors if available on the cluster)
import ray.data as rdata
class ImageBatchPredictor:
"""Stateful per-actor batch predictor that keeps the model in memory."""
def __init__(self, checkpoint_path: str):
# Pick the best available device on the ACTOR (worker), not the driver.
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# === Load model & weights once per actor ===
model = resnet18(num_classes=101)
checkpoint = Checkpoint.from_directory(checkpoint_path)
with checkpoint.as_directory() as ckpt_dir:
state_dict = torch.load(
os.path.join(ckpt_dir, "model.pt"),
map_location=self.device,
)
# Strip DDP "module." prefix if present
state_dict = {k.replace("module.", "", 1): v for k, v in state_dict.items()}
model.load_state_dict(state_dict)
self.model = model.eval().to(self.device)
self.transform = T.Compose([
T.ToTensor(),
T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
])
torch.set_grad_enabled(False)
def __call__(self, batch):
"""batch: Pandas DataFrame with columns ['image_bytes', 'label']"""
imgs = []
for b in batch["image_bytes"]:
img = Image.open(io.BytesIO(b)).convert("RGB")
imgs.append(self.transform(img).numpy()) # (C,H,W) as numpy
x = torch.from_numpy(np.stack(imgs, axis=0)).to(self.device) # (N,C,H,W)
logits = self.model(x)
preds = torch.argmax(logits, dim=1).cpu().numpy()
out = batch.copy()
out["predicted_label"] = preds.astype(int)
return out[["predicted_label", "label"]]
def build_inference_dataset(
checkpoint_path: str,
parquet_path: str,
*,
num_actors: int = 1,
batch_size: int = 64,
use_gpu_actors: bool = True, # <— default to GPU actors on the cluster
):
"""
Create a Ray Dataset pipeline that performs batch inference using
stateful per-actor model loading. By default, requests 1 GPU per actor
so each actor runs on a GPU worker (driver may have no GPU).
"""
ds = rdata.read_parquet(parquet_path, columns=["image_bytes", "label"])
pred_ds = ds.map_batches(
ImageBatchPredictor, # pass the CLASS (stateful actors)
fn_constructor_args=(checkpoint_path,), # ctor args for each actor
batch_size=batch_size,
batch_format="pandas",
concurrency=num_actors, # number of actor workers
num_gpus=1 if use_gpu_actors else 0, # <— force GPU placement on workers
)
return pred_ds
# 16. Perform inference with Ray Data using the best checkpoint
checkpoint_root = "/mnt/cluster_storage/food101_lite/results/food101_ft_resume"
checkpoint_dirs = sorted(
[
d for d in os.listdir(checkpoint_root)
if d.startswith("checkpoint_") and os.path.isdir(os.path.join(checkpoint_root, d))
],
reverse=True,
)
if not checkpoint_dirs:
raise FileNotFoundError("No checkpoint directories found.")
# Use the best checkpoint from the training result
with result.checkpoint.as_directory() as ckpt_dir:
print("Best checkpoint contents:", os.listdir(ckpt_dir))
best_ckpt_path = ckpt_dir
parquet_path = "/mnt/cluster_storage/food101_lite/val.parquet"
# Which item to visualize
idx = 2
import itertools
pred_ds = build_inference_dataset(
checkpoint_path=best_ckpt_path,
parquet_path=parquet_path,
num_actors=1, # adjust to scale out
batch_size=64, # adjust for throughput
)
# Avoid .take() / limit(); stream rows and grab the idx-th one.
row_iter = pred_ds.iter_rows()
inference_row = next(itertools.islice(row_iter, idx, idx + 1))
print(inference_row) # {"predicted_label": ..., "label": ...}
# Load label map from Hugging Face (for pretty titles)
ds_tmp = load_dataset("ethz/food101", split="train[:1%]") # just to get label names
label_names = ds_tmp.features["label"].names
# Load the raw image locally for visualization
dataset = Food101Dataset(parquet_path, transform=None)
img, _ = dataset[idx]
# Plot the image with predicted and true labels
plt.imshow(img)
plt.axis("off")
plt.title(
f"Pred: {label_names[int(inference_row['predicted_label'])]}\n"
f"True: {label_names[int(inference_row['label'])]}"
)
plt.show()
# 17. Cleanup---delete checkpoints and metrics from model training
# Base directory
BASE_DIR = "/mnt/cluster_storage/food101_lite"
# Paths to clean
paths_to_delete = [
os.path.join(BASE_DIR, "tmp_checkpoints"), # custom checkpoints
os.path.join(BASE_DIR, "results", "history.csv"), # metrics history file
os.path.join(BASE_DIR, "results", "food101_ft_resume"), # ray trainer run dir
os.path.join(BASE_DIR, "results", "food101_ft_run"),
os.path.join(BASE_DIR, "results", "food101_single_run"),
]
# Delete each path if it exists
for path in paths_to_delete:
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
print(f"Deleted file: {path}")
else:
shutil.rmtree(path)
print(f"Deleted directory: {path}")
else:
print(f"Not found (skipped): {path}")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/train/tutorials/ci/py_scripts/04a_vision_pattern.py",
"license": "Apache License 2.0",
"lines": 424,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/train/tutorials/ci/py_scripts/04b_tabular_workload_pattern.py | # 00. Runtime setup
import os
import sys
import subprocess
# Non-secret env var
os.environ["RAY_TRAIN_V2_ENABLED"] = "1"
# Install Python dependencies
subprocess.check_call([
sys.executable, "-m", "pip", "install", "--no-cache-dir",
"matplotlib==3.10.6",
"scikit-learn==1.7.2",
"pyarrow==14.0.2",
"xgboost-cpu==3.0.5",
"seaborn==0.13.2",
])
# 01. Imports
import os
import shutil
import json
import uuid
import tempfile
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import fetch_covtype
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.model_selection import train_test_split
import xgboost as xgb
import pyarrow as pa
import ray
import ray.data as rd
from ray.data import ActorPoolStrategy
from ray.train import RunConfig, ScalingConfig, CheckpointConfig, FailureConfig, get_dataset_shard, get_checkpoint, get_context
from ray.train.xgboost import XGBoostTrainer, RayTrainReportCallback
# 02. Load the UCI Cover type dataset (~580k rows, 54 features)
data = fetch_covtype(as_frame=True)
df = data.frame
df.rename(columns={"Cover_Type": "label"}, inplace=True) # Ray expects "label"
df["label"] = df["label"] - 1 # 1-7 → 0-6
assert df["label"].between(0, 6).all()
print(df.shape, df.label.value_counts(normalize=True).head())
# 03. Visualize class distribution
df.label.value_counts().plot(kind="bar", figsize=(6,3), title="Cover Type distribution")
plt.ylabel("Frequency"); plt.show()
# 04. Write separate train/val Parquets to /mnt/cluster_storage/covtype/
PARQUET_DIR = "/mnt/cluster_storage/covtype/parquet"
os.makedirs(PARQUET_DIR, exist_ok=True)
TRAIN_PARQUET = os.path.join(PARQUET_DIR, "train.parquet")
VAL_PARQUET = os.path.join(PARQUET_DIR, "val.parquet")
# Stratified 80/20 split for reproducibility
train_df, val_df = train_test_split(
df, test_size=0.2, random_state=42, stratify=df["label"]
)
train_df.to_parquet(TRAIN_PARQUET, index=False)
val_df.to_parquet(VAL_PARQUET, index=False)
print(f"Wrote Train → {TRAIN_PARQUET} ({len(train_df):,} rows)")
print(f"Wrote Val → {VAL_PARQUET} ({len(val_df):,} rows)")
# 05. Load the two splits as Ray Datasets (lazy, columnar)
train_ds = rd.read_parquet(TRAIN_PARQUET).random_shuffle()
val_ds = rd.read_parquet(VAL_PARQUET)
print(train_ds)
print(val_ds)
print(f"Train rows: {train_ds.count():,}, Val rows: {val_ds.count():,}") # Note that this will materialize the dataset (skip at scale)
# 07. Look into one batch to confirm feature dimensionality
batch = train_ds.take_batch(batch_size=5, batch_format="pandas")
print(batch.head())
feature_columns = [c for c in batch.columns if c != "label"]
INDEX_COLS = {"__index_level_0__"} # extend if needed
def _arrow_table_from_shard(name: str) -> pa.Table:
"""Collect this worker's Ray Dataset shard into one pyarrow. Table and
drop accidental index columns (e.g., from pandas Parquet)."""
ds_iter = get_dataset_shard(name)
arrow_refs = ds_iter.materialize().to_arrow_refs()
tables = [ray.get(r) for r in arrow_refs]
tbl = pa.concat_tables(tables, promote_options="none") if tables else pa.table({})
# Drop index columns if present
keep = [c for c in tbl.column_names if c not in INDEX_COLS]
if len(keep) != len(tbl.column_names):
tbl = tbl.select(keep)
return tbl
def _dmat_from_arrow(table: pa.Table, feature_cols, label_col: str):
"""Build XGBoost DMatrix from pyarrow.Table with explicit feature_names."""
X = np.column_stack([table[c].to_numpy(zero_copy_only=False) for c in feature_cols])
y = table[label_col].to_numpy(zero_copy_only=False)
return xgb.DMatrix(X, label=y, feature_names=feature_cols)
def train_func(config):
label_col = config["label_column"]
# Arrow tables
train_arrow = _arrow_table_from_shard("train")
eval_arrow = _arrow_table_from_shard("evaluation")
# Use the SAME ordered feature list for both splits
feature_cols = [c for c in train_arrow.column_names if c != label_col]
dtrain = _dmat_from_arrow(train_arrow, feature_cols, label_col)
deval = _dmat_from_arrow(eval_arrow, feature_cols, label_col)
# -------- 2) Optional resume from checkpoint ------------------------------
ckpt = get_checkpoint()
if ckpt:
with ckpt.as_directory() as d:
model_path = os.path.join(d, RayTrainReportCallback.CHECKPOINT_NAME)
booster = xgb.Booster()
booster.load_model(model_path)
print(f"[Rank {get_context().get_world_rank()}] Resumed from checkpoint")
else:
booster = None
# -------- 3) Train with per-round reporting & checkpointing ---------------
evals_result = {}
xgb.train(
params = config["params"],
dtrain = dtrain,
evals = [(dtrain, "train"), (deval, "validation")],
num_boost_round = config["num_boost_round"],
xgb_model = booster,
evals_result = evals_result,
callbacks = [RayTrainReportCallback()],
)
# 09. XGBoost config and Trainer (full-node CPU workers)
# Adjust this to your node size if different (e.g., 16, 32, etc.)
CPUS_PER_WORKER = 4
xgb_params = {
"objective": "multi:softprob",
"num_class": 7,
"eval_metric": "mlogloss",
"tree_method": "hist",
"eta": 0.3,
"max_depth": 8,
"nthread": CPUS_PER_WORKER,
"device": "cpu", # use CPU for training
}
trainer = XGBoostTrainer(
train_func,
scaling_config=ScalingConfig(
num_workers=2,
use_gpu=False,
resources_per_worker={"CPU": CPUS_PER_WORKER},
),
datasets={"train": train_ds, "evaluation": val_ds},
train_loop_config={
"label_column": "label",
"params": xgb_params,
"num_boost_round": 50,
},
run_config=RunConfig(
name="covtype_xgb_cpu",
storage_path="/mnt/cluster_storage/covtype/results",
checkpoint_config=CheckpointConfig(
num_to_keep=1,
checkpoint_score_attribute="validation-mlogloss", # score by val loss
checkpoint_score_order="min",
),
failure_config=FailureConfig(max_failures=1),
),
)
# 10. Fit the trainer (reports eval metrics every boosting round)
result = trainer.fit()
best_ckpt = result.checkpoint # saved automatically by Trainer
# 11. Retrieve Booster object from Ray checkpoint
booster = RayTrainReportCallback.get_model(best_ckpt)
# Convert Ray Dataset to pandas for quick local scoring
val_pd = val_ds.to_pandas()
dmatrix = xgb.DMatrix(val_pd[feature_columns])
pred_prob = booster.predict(dmatrix)
pred_labels = np.argmax(pred_prob, axis=1)
acc = accuracy_score(val_pd.label, pred_labels)
print(f"Validation accuracy: {acc:.3f}")
# 12. Confusion matrix
cm = confusion_matrix(val_pd.label, pred_labels) # or sample_batch.label if used
sns.heatmap(cm, annot=True, fmt="d", cmap="viridis")
plt.title("Confusion Matrix with Counts")
plt.xlabel("Predicted")
plt.ylabel("True")
plt.show()
cm_norm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
sns.heatmap(cm_norm, annot=True, fmt=".2f", cmap="viridis")
plt.title("Normalized Confusion Matrix")
plt.xlabel("Predicted")
plt.ylabel("True")
plt.show()
# 13. CPU batch inference with Ray Data
# Assumes: val_ds, feature_columns, best_ckpt already defined.
class XGBPredictor:
"""Stateful actor: load Booster once, reuse across batches."""
def __init__(self, ckpt, feature_cols):
self.model = RayTrainReportCallback.get_model(ckpt)
self.feature_cols = feature_cols
def __call__(self, batch: pd.DataFrame) -> pd.DataFrame:
dmatrix = xgb.DMatrix(batch[self.feature_cols])
probs = self.model.predict(dmatrix)
preds = np.argmax(probs, axis=1)
return pd.DataFrame(
{"pred": preds.astype(np.int32), "label": batch["label"].astype(np.int32)}
)
# Use an ActorPoolStrategy instead of compute="actors"
pred_ds = val_ds.map_batches(
XGBPredictor,
fn_constructor_args=(best_ckpt, feature_columns),
batch_format="pandas",
compute=ActorPoolStrategy(),
num_cpus=1, # per-actor CPU; tune as needed
)
# Aggregate accuracy without collecting to driver
stats_ds = pred_ds.map_batches(
lambda df: pd.DataFrame({
"correct": [int((df["pred"].to_numpy() == df["label"].to_numpy()).sum())],
"n": [int(len(df))]
}),
batch_format="pandas",
)
correct = int(stats_ds.sum("correct"))
n = int(stats_ds.sum("n"))
print(f"Validation accuracy (Ray Data inference): {correct / n:.3f}")
# 14. Gain‑based feature importance
importances = booster.get_score(importance_type="gain")
keys, gains = zip(*sorted(importances.items(), key=lambda kv: kv[1], reverse=True)[:15])
plt.barh(range(len(gains)), gains)
plt.yticks(range(len(gains)), keys)
plt.gca().invert_yaxis()
plt.title("Top-15 Feature Importances (gain)"); plt.xlabel("Average gain"); plt.show()
# 15. Run 50 more training iterations from the last saved checkpoint
result = trainer.fit()
best_ckpt = result.checkpoint # Saved automatically by Trainer
# 16. Rerun Ray Data inference to verify improved accuracy after continued training
# Reuse the existing Ray Data inference setup with the latest checkpoint
pred_ds = val_ds.map_batches(
XGBPredictor,
fn_constructor_args=(best_ckpt, feature_columns),
batch_format="pandas",
compute=ActorPoolStrategy(),
num_cpus=1,
)
# Aggregate accuracy across all batches
stats_ds = pred_ds.map_batches(
lambda df: pd.DataFrame({
"correct": [int((df["pred"] == df["label"]).sum())],
"n": [int(len(df))]
}),
batch_format="pandas",
)
correct = int(stats_ds.sum("correct"))
n = int(stats_ds.sum("n"))
print(f"Validation accuracy after continued training: {correct / n:.3f}")
# 17. Optional cleanup to free space
ARTIFACT_DIR = "/mnt/cluster_storage/covtype"
if os.path.exists(ARTIFACT_DIR):
shutil.rmtree(ARTIFACT_DIR)
print(f"Deleted {ARTIFACT_DIR}")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/train/tutorials/ci/py_scripts/04b_tabular_workload_pattern.py",
"license": "Apache License 2.0",
"lines": 247,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/train/tutorials/ci/py_scripts/04c_time_series_workload_pattern.py | # 00. Runtime setup
import os
import sys
import subprocess
# Non-secret env var
os.environ["RAY_TRAIN_V2_ENABLED"] = "1"
# Install Python dependencies
subprocess.check_call([
sys.executable, "-m", "pip", "install", "--no-cache-dir",
"torch==2.8.0",
"matplotlib==3.10.6",
"pyarrow==14.0.2",
"datasets==2.19.2",
])
# 01. Imports
import os
import io
import math
import uuid
import shutil
import random
import requests
import sys
from pathlib import Path
from datetime import datetime, timedelta
from datasets import load_dataset
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
import ray
import ray.data as rdata
import ray.train as train
from ray.train import (
ScalingConfig, RunConfig, FailureConfig,
CheckpointConfig, Checkpoint, get_checkpoint, get_context
)
from ray.train.torch import prepare_model, prepare_data_loader, TorchTrainer
# 02. Load NYC taxi passenger counts (30-min) from GitHub raw – no auth, ~1 MB
DATA_DIR = "/mnt/cluster_storage/nyc_taxi_ts"
os.makedirs(DATA_DIR, exist_ok=True)
url = "https://raw.githubusercontent.com/numenta/NAB/master/data/realKnownCause/nyc_taxi.csv"
csv_path = os.path.join(DATA_DIR, "nyc_taxi.csv")
if not os.path.exists(csv_path):
print("Downloading nyc_taxi.csv …")
df = pd.read_csv(url)
df.to_csv(csv_path, index=False)
else:
print("File already present.")
df = pd.read_csv(csv_path)
# Parse timestamp and tidy
df["timestamp"] = pd.to_datetime(df["timestamp"])
df = df.set_index("timestamp").rename(columns={"value": "passengers"})
print("Rows:", len(df), "| Time span:", df.index.min(), "→", df.index.max())
df.head()
# 03. Resample to hourly, then normalize
hourly = df.resample("30min").mean()
mean, std = hourly["passengers"].mean(), hourly["passengers"].std()
hourly["norm"] = (hourly["passengers"] - mean) / std
print(f"Half-Hourly rows: {len(hourly)} | mean={mean:.1f}, std={std:.1f}")
hourly.head()
# 04. Quick visual sanity-check — first two weeks
plt.figure(figsize=(10, 4))
hourly["passengers"].iloc[:24*14].plot()
plt.title("NYC-Taxi passengers - first 2 weeks of 2014")
plt.ylabel("# trips in hour")
plt.grid(True)
plt.tight_layout()
plt.show()
# 05. Build sliding-window dataset and write to Parquet
# ----------------------------------------------------
INPUT_WINDOW = 24 * 7 # 1/2 week history (in 30-min steps = 168)
HORIZON = 48 # predict next 24 h
STRIDE = 12 # slide 6 hours at a time
values = hourly["norm"].to_numpy(dtype="float32") # already normalised
# ---- Time-aware split to avoid leakage between train and val ----
cut = int(0.9 * len(values)) # split by time index on the original series
train_records, val_records = [], []
for s in range(0, len(values) - INPUT_WINDOW - HORIZON + 1, STRIDE):
past = values[s : s + INPUT_WINDOW]
future = values[s + INPUT_WINDOW : s + INPUT_WINDOW + HORIZON]
end = s + INPUT_WINDOW + HORIZON # last index consumed by this window
rec = {
"series_id": 0,
"past": past.tolist(),
"future": future.tolist(),
}
if end <= cut: # Entire window ends before the cut to train
train_records.append(rec)
elif s >= cut: # Window starts after the cut to val
val_records.append(rec)
# else: window crosses the cut to drop to prevent leakage
print(f"Windows → train: {len(train_records)}, val: {len(val_records)}")
# Write to Parquet
DATA_DIR = "/mnt/cluster_storage/nyc_taxi_ts"
PARQUET_DIR = os.path.join(DATA_DIR, "parquet")
os.makedirs(PARQUET_DIR, exist_ok=True)
schema = pa.schema([
("series_id", pa.int32()),
("past", pa.list_(pa.float32())),
("future", pa.list_(pa.float32()))
])
def write_parquet(records, fname):
pq.write_table(pa.Table.from_pylist(records, schema=schema), fname, version="2.6")
write_parquet(train_records, os.path.join(PARQUET_DIR, "train.parquet"))
write_parquet(val_records, os.path.join(PARQUET_DIR, "val.parquet"))
print("Parquet shards written →", PARQUET_DIR)
# 06. PyTorch Dataset that reads the Parquet shards
class TaxiWindowDataset(Dataset):
def __init__(self, parquet_path):
self.table = pq.read_table(parquet_path)
self.past = self.table.column("past").to_pylist()
self.future = self.table.column("future").to_pylist()
def __len__(self):
return len(self.past)
def __getitem__(self, idx):
past = torch.tensor(self.past[idx], dtype=torch.float32).unsqueeze(-1) # (T, 1)
future = torch.tensor(self.future[idx], dtype=torch.float32) # (H,)
return past, future
# 07. Inspect one random batch
loader = DataLoader(TaxiWindowDataset(os.path.join(PARQUET_DIR, "train.parquet")),
batch_size=4, shuffle=True)
xb, yb = next(iter(loader))
print("Past:", xb.shape, "Future:", yb.shape)
# 08. Helper to build Ray-prepared DataLoader
from ray.train.torch import prepare_data_loader
def build_dataloader(parquet_path, batch_size, shuffle=True):
ds = TaxiWindowDataset(parquet_path)
loader = DataLoader(
ds, batch_size=batch_size, shuffle=shuffle, num_workers=2, drop_last=False,
)
return prepare_data_loader(loader)
# 09. PositionalEncoding and Transformer model (univariate)
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.2, max_len=1024):
super().__init__()
self.dropout = nn.Dropout(dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float32).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, d_model, 2, dtype=torch.float32) * -(math.log(10000.0) / d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
self.register_buffer("pe", pe.unsqueeze(0))
def forward(self, x):
return self.dropout(x + self.pe[:, : x.size(1)])
class TimeSeriesTransformer(nn.Module):
def __init__(self, input_window, horizon, d_model=64, nhead=8, num_layers=2):
super().__init__()
self.horizon = horizon
self.d_model = d_model
self.in_proj = nn.Linear(1, d_model)
self.pos_enc = PositionalEncoding(d_model)
self.tr_model = nn.Transformer(
d_model=d_model,
nhead=nhead,
num_encoder_layers=num_layers,
num_decoder_layers=num_layers,
batch_first=True,
)
self.out_proj = nn.Linear(d_model, 1)
def forward(self, past, decoder_input=None):
"""
Args:
past : (B, T, 1) — encoder input
decoder_input : (B, F, 1) — optional decoder input (teacher forcing)
Returns:
preds : (B, F) — predicted future values
"""
B = past.size(0)
# Encoder input
src = self.in_proj(past) * math.sqrt(self.d_model)
src = self.pos_enc(src)
# Decoder input
if decoder_input is None:
decoder_input = past[:, -1:, :].repeat(1, self.horizon, 1)
tgt = self.in_proj(decoder_input) * math.sqrt(self.d_model)
tgt = self.pos_enc(tgt)
# Transformer forward
output = self.tr_model(src, tgt) # shape: (B, F, d_model)
return self.out_proj(output).squeeze(-1) # shape: (B, F)
# 10. Ray Train train_loop_per_worker with checkpointing, teacher forcing, and clean structure
def train_loop_per_worker(config):
import tempfile
import torch
import torch.nn as nn
import torch.optim as optim
from ray import train
from ray.train import Checkpoint, get_context
from ray.train import get_checkpoint
torch.manual_seed(0)
# ─────────────────────────────────────────────────────────────
# 1) Model (DDP-prepared)
# ─────────────────────────────────────────────────────────────
model = TimeSeriesTransformer(
input_window=INPUT_WINDOW,
horizon=HORIZON,
d_model=config["d_model"],
nhead=config["nhead"],
num_layers=config["num_layers"],
)
model = train.torch.prepare_model(model)
# ─────────────────────────────────────────────────────────────
# 2) Optimizer / Loss
# ─────────────────────────────────────────────────────────────
optimizer = optim.Adam(model.parameters(), lr=config["lr"])
loss_fn = nn.SmoothL1Loss()
# ─────────────────────────────────────────────────────────────
# 3) Resume from checkpoint (if provided by Ray)
# ─────────────────────────────────────────────────────────────
rank = get_context().get_world_rank()
start_epoch = 0
ckpt = get_checkpoint()
if ckpt:
with ckpt.as_directory() as ckpt_dir:
# Safe CPU load in case of device mismatch on resume
model.load_state_dict(torch.load(os.path.join(ckpt_dir, "model.pt"), map_location="cpu"))
opt_state_path = os.path.join(ckpt_dir, "optim.pt")
if os.path.exists(opt_state_path):
optimizer.load_state_dict(torch.load(opt_state_path, map_location="cpu"))
meta = torch.load(os.path.join(ckpt_dir, "meta.pt"))
start_epoch = int(meta.get("epoch", -1)) + 1
if rank == 0:
print(f"[Rank {rank}] ✅ Resumed from checkpoint at epoch {start_epoch}")
# ─────────────────────────────────────────────────────────────
# 4) Dataloaders for this worker
# ─────────────────────────────────────────────────────────────
train_loader = build_dataloader(
os.path.join(PARQUET_DIR, "train.parquet"),
batch_size=config["bs"],
shuffle=True,
)
val_loader = build_dataloader(
os.path.join(PARQUET_DIR, "val.parquet"),
batch_size=config["bs"],
shuffle=False,
)
# ─────────────────────────────────────────────────────────────
# 5) Epoch loop
# ─────────────────────────────────────────────────────────────
for epoch in range(start_epoch, config["epochs"]):
# ---- Train ----
model.train()
train_loss_sum = 0.0
for past, future in train_loader:
optimizer.zero_grad()
# Teacher forcing: shift future targets to use as decoder input
future = future.unsqueeze(-1) # (B, F, 1)
start_token = torch.zeros_like(future[:, :1]) # (B, 1, 1)
decoder_input = torch.cat([start_token, future[:, :-1]], 1) # (B, F, 1)
pred = model(past, decoder_input) # (B, F)
loss = loss_fn(pred, future.squeeze(-1)) # (B, F) vs (B, F)
loss.backward()
optimizer.step()
train_loss_sum += float(loss.item())
avg_train_loss = train_loss_sum / max(1, len(train_loader))
# ---- Validate ----
model.eval()
val_loss_sum = 0.0
with torch.no_grad():
for past, future in val_loader:
pred = model(past) # zeros-as-decoder-input path
loss = loss_fn(pred, future)
val_loss_sum += float(loss.item())
avg_val_loss = val_loss_sum / max(1, len(val_loader))
if rank == 0:
print({"epoch": epoch, "train_loss": avg_train_loss, "val_loss": avg_val_loss})
metrics = {
"epoch": epoch,
"train_loss": avg_train_loss,
"val_loss": avg_val_loss,
}
# ─────────────────────────────────────────────────────────────
# 6) Report + temp checkpoint (rank 0 attaches; others metrics-only)
# ─────────────────────────────────────────────────────────────
if rank == 0:
with tempfile.TemporaryDirectory() as tmpdir:
torch.save(model.state_dict(), os.path.join(tmpdir, "model.pt"))
torch.save(optimizer.state_dict(), os.path.join(tmpdir, "optim.pt"))
torch.save({"epoch": epoch}, os.path.join(tmpdir, "meta.pt"))
ckpt_out = Checkpoint.from_directory(tmpdir)
train.report(metrics, checkpoint=ckpt_out)
else:
train.report(metrics, checkpoint=None)
# 11. Launch training
trainer = TorchTrainer(
train_loop_per_worker=train_loop_per_worker,
train_loop_config={"lr": 1e-3, "bs": 4, "epochs": 20,
"d_model": 128, "nhead": 4, "num_layers": 3},
scaling_config=ScalingConfig(num_workers=8, use_gpu=True),
run_config=RunConfig(
name="nyc_taxi_transformer",
storage_path=os.path.join(DATA_DIR, "results"),
checkpoint_config=CheckpointConfig(
num_to_keep=20,
# Let your loop decide when to checkpoint (each epoch). Scoring still applies.
checkpoint_score_attribute="val_loss",
checkpoint_score_order="min",
# (Optional) If you want the last epoch’s checkpoint regardless of score:
# checkpoint_at_end=True,
),
failure_config=FailureConfig(max_failures=3),
),
)
result = trainer.fit()
print("Final metrics:", result.metrics)
# Best checkpoint (by val_loss) thanks to checkpoint_score_* above:
best_ckpt = result.checkpoint
# 12. Plot train/val loss curves (from Ray Train results)
# Pull full metrics history Ray stored for this run
df = result.metrics_dataframe.copy()
# Keep only relevant columns (defensive in case Ray adds extras)
cols = [c for c in ["epoch", "train_loss", "val_loss"] if c in df.columns]
df = df[cols].dropna()
# If multiple reports per epoch exist, keep the latest one
if "epoch" in df.columns:
df = df.sort_index().groupby("epoch", as_index=False).last()
# Plot
plt.figure(figsize=(7, 4))
if "train_loss" in df.columns:
plt.plot(df["epoch"], df["train_loss"], marker="o", label="Train")
if "val_loss" in df.columns:
plt.plot(df["epoch"], df["val_loss"], marker="o", label="Val")
plt.xlabel("Epoch")
plt.ylabel("SmoothL1 Loss")
plt.title("TimeSeriesTransformer — Train vs. Val Loss")
plt.grid(True)
plt.legend()
plt.tight_layout()
plt.show()
# 13. Demonstrate fault-tolerant resume
result = trainer.fit()
print("Metrics after resume run:", result.metrics)
# 14. Ray Data inference helper — stateful per-actor predictor
class TimeSeriesBatchPredictor:
"""
Keeps the TimeSeriesTransformer in memory per actor (GPU if available).
Expects a Pandas batch with a 'past' column containing np.ndarray of shape (INPUT_WINDOW,).
Returns a batch with a 'pred' column (np.ndarray of shape (HORIZON,)).
"""
def __init__(self, checkpoint_path: str, model_kwargs: dict):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Recreate model with the *same* hyperparams used during training
self.model = TimeSeriesTransformer(
input_window=model_kwargs["input_window"],
horizon=model_kwargs["horizon"],
d_model=model_kwargs["d_model"],
nhead=model_kwargs["nhead"],
num_layers=model_kwargs["num_layers"],
).to(self.device).eval()
# Load checkpoint weights once per actor
ckpt = Checkpoint.from_directory(checkpoint_path)
with ckpt.as_directory() as ckpt_dir:
state_dict = torch.load(os.path.join(ckpt_dir, "model.pt"), map_location="cpu")
# Strip DDP prefix if present
state_dict = {k.replace("module.", "", 1): v for k, v in state_dict.items()}
self.model.load_state_dict(state_dict)
torch.set_grad_enabled(False)
def __call__(self, batch):
import pandas as pd
past_list = batch["past"] # each entry: np.ndarray shape (INPUT_WINDOW,)
# Stack into (B, T, 1)
x = np.stack([p.astype(np.float32) for p in past_list], axis=0)
x = torch.from_numpy(x).unsqueeze(-1).to(self.device) # (B, INPUT_WINDOW, 1)
# Inference path uses the model's "zeros as decoder input" forward
preds = self.model(x).detach().cpu().numpy() # (B, HORIZON)
out = batch.copy()
out["pred"] = list(preds) # each row: np.ndarray (HORIZON,)
return out[["pred"]]
# 15. Run inference on the latest window with Ray Data and plot
# 1) Prepare the latest window on the driver
past_norm = hourly["norm"].iloc[-INPUT_WINDOW:].to_numpy().astype(np.float32)
future_true = hourly["passengers"].iloc[-HORIZON:].to_numpy() # for visualization only
# 2) Get the best checkpoint directory selected by Ray
with result.checkpoint.as_directory() as ckpt_dir:
best_ckpt_path = ckpt_dir # path visible to workers
# 3) Build a tiny Ray Dataset and run inference on a GPU actor
model_kwargs = {
"input_window": INPUT_WINDOW,
"horizon": HORIZON,
"d_model": 128,
"nhead": 4,
"num_layers": 3,
}
ds = rdata.from_items([{"past": past_norm}])
pred_ds = ds.map_batches(
TimeSeriesBatchPredictor,
fn_constructor_args=(best_ckpt_path, model_kwargs),
batch_size=1,
batch_format="pandas",
concurrency=1,
num_gpus=1, # force placement on a GPU worker if available
)
pred_row = pred_ds.take(1)[0]
pred_norm = pred_row["pred"] # np.ndarray (HORIZON,)
# 4) De-normalize on the driver
mean, std = hourly["passengers"].mean(), hourly["passengers"].std()
pred = pred_norm * std + mean
past = past_norm * std + mean
# 5) Plot
t_past = np.arange(-INPUT_WINDOW, 0)
STEP_SIZE_HOURS = 0.5 # you mentioned 30-min data
t_future = np.arange(0, HORIZON) * STEP_SIZE_HOURS
plt.figure(figsize=(10, 4))
plt.plot(t_past, past, label="History", marker="o")
plt.plot(t_future, future_true, "--", label="Ground Truth")
plt.plot(t_future, pred, "-.", label="Forecast")
plt.axvline(0)
plt.xlabel("Hours relative")
plt.ylabel("# trips")
plt.title("NYC-Taxi Forecast (Ray Data Inference)")
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.show()
# 16. Cleanup – optionally remove all artifacts to free space
if os.path.exists(DATA_DIR):
shutil.rmtree(DATA_DIR)
print(f"Deleted {DATA_DIR}")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/train/tutorials/ci/py_scripts/04c_time_series_workload_pattern.py",
"license": "Apache License 2.0",
"lines": 425,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/train/tutorials/ci/py_scripts/04d1_generative_cv_pattern.py | # 00. Runtime setup
import os
import sys
import subprocess
# Non-secret env var (safe to set here)
os.environ["RAY_TRAIN_V2_ENABLED"] = "1"
# Install Python dependencies (same pinned versions as build.sh)
subprocess.check_call([
sys.executable, "-m", "pip", "install", "--no-cache-dir",
"torch==2.8.0",
"torchvision==0.23.0",
"matplotlib==3.10.6",
"pyarrow==14.0.2",
"datasets==2.19.2",
"lightning==2.5.5",
])
# 01. Imports
# Standard libraries
import os
import io
import json
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from PIL import Image
# Ray
import ray, ray.data
from ray.train import ScalingConfig, get_context, RunConfig, FailureConfig, CheckpointConfig, Checkpoint, get_checkpoint
from ray.train.torch import TorchTrainer
from ray.train.lightning import RayLightningEnvironment
# PyTorch / Lightning
import lightning.pytorch as pl
import torch
from torch import nn
# Dataset
from datasets import load_dataset
import pyarrow as pa
import pyarrow.parquet as pq
from tqdm import tqdm
from torchvision.transforms import Compose, Resize, CenterCrop
import random
# 02. Load 10% of food101 (~7,500 images)
hf_ds = load_dataset("ethz/food101", split="train[:10%]")
# 03. Resize + encode as JPEG bytes (Ray Data; BYTES-BASED)
# Build Ray items with RAW BYTES (serializable) + label
rows = []
buf = io.BytesIO()
for ex in hf_ds:
img = ex["image"].convert("RGB")
buf.seek(0); buf.truncate(0)
img.save(buf, format="JPEG")
rows.append({"image_bytes_raw": buf.getvalue(), "label": ex["label"]})
# Create a Ray Dataset from serializable dicts
ds = ray.data.from_items(rows)
# Define preprocessing (runs on Ray workers)
transform = Compose([Resize(256), CenterCrop(224)])
def preprocess_images(batch_df):
out_img_bytes, out_labels = [], []
for b, lbl in zip(batch_df["image_bytes_raw"], batch_df["label"]):
try:
img = Image.open(io.BytesIO(b)).convert("RGB")
img = transform(img)
out = io.BytesIO()
img.save(out, format="JPEG")
out_img_bytes.append(out.getvalue())
out_labels.append(lbl)
except Exception:
# Skip unreadable/corrupt rows but don't kill the batch
continue
return {"image_bytes": out_img_bytes, "label": out_labels}
# Parallel preprocessing
processed_ds = ds.map_batches(
preprocess_images,
batch_format="pandas",
num_cpus=1,
)
print("✅ Processed records:", processed_ds.count())
processed_ds.show(3)
# 04. Visualize the dataset (Ray Data version)
label_names = hf_ds.features["label"].names # int -> class name
samples = processed_ds.random_shuffle().take(9)
fig, axs = plt.subplots(3, 3, figsize=(8, 8))
fig.suptitle("Sample Resized Images from food101-lite", fontsize=16)
for ax, rec in zip(axs.flatten(), samples):
img = Image.open(io.BytesIO(rec["image_bytes"]))
ax.imshow(img)
ax.set_title(label_names[rec["label"]])
ax.axis("off")
plt.tight_layout()
plt.show()
# 05. Persist Ray Dataset to Parquet
import os
output_dir = "/mnt/cluster_storage/food101_lite/parquet_256"
os.makedirs(output_dir, exist_ok=True)
# Write each block as its own Parquet shard
processed_ds.write_parquet(output_dir)
print(f"✅ Wrote {processed_ds.count()} records to {output_dir}")
# 06. Load & Decode Food-101-Lite
# Path to Parquet shards written earlier
PARQUET_PATH = "/mnt/cluster_storage/food101_lite/parquet_256"
# Read the Parquet files (≈7 500 rows with JPEG bytes + label)
ds = ray.data.read_parquet(PARQUET_PATH)
print("Raw rows:", ds.count())
# Decode JPEG → CHW float32 in [‑1, 1]
def decode_and_normalize(batch_df):
"""Decode JPEG bytes and scale to [-1, 1]."""
images = []
for b in batch_df["image_bytes"]:
img = Image.open(io.BytesIO(b)).convert("RGB")
arr = np.asarray(img, dtype=np.float32) / 255.0 # H × W × 3, 0‑1
arr = (arr - 0.5) / 0.5 # ‑1 … 1
arr = arr.transpose(2, 0, 1) # 3 × H × W (CHW)
images.append(arr)
return {"image": images}
# Apply in parallel
# batch_format="pandas" → batch_df is a DataFrame, return dict of lists.
# default task‑based compute is sufficient for a stateless function.
ds = ds.map_batches(
decode_and_normalize,
batch_format="pandas",
# Use the default (task‑based) compute strategy since `decode_and_normalize` is a plain function.
num_cpus=1,
)
# Drop the original JPEG column to save memory
if "image_bytes" in ds.schema().names:
ds = ds.drop_columns(["image_bytes", "label"])
print("Decoded rows:", ds.count())
# 07. Shuffle & Train/Val Split
# Typical 80 / 20 split
TOTAL = ds.count()
train_count = int(TOTAL * 0.8)
ds = ds.random_shuffle() # expensive operation -- for large datasets, consider file shuffling or local shuffling. Ray offers both options
train_ds, val_ds = ds.split_at_indices([train_count])
print("Train rows:", train_ds.count())
print("Val rows:", val_ds.count())
# 08. Pixel De-noising Diffusion Model — final, logging via Lightning/Ray
class PixelDiffusion(pl.LightningModule):
"""Tiny CNN that predicts noise ϵ given noisy image + timestep."""
def __init__(self, max_t=1000):
super().__init__()
self.max_t = max_t
# Network: (3+1)-channel input → 3-channel noise prediction
self.net = nn.Sequential(
nn.Conv2d(4, 32, 3, padding=1), nn.ReLU(),
nn.Conv2d(32, 32, 3, padding=1), nn.ReLU(),
nn.Conv2d(32, 3, 3, padding=1),
)
self.loss_fn = nn.MSELoss()
# ---------- forward ----------
def forward(self, noisy_img, t):
"""noisy_img: Bx3xHxW, t: B (int) or Bx1 scalar"""
b, _, h, w = noisy_img.shape
t_scaled = (t / self.max_t).view(-1, 1, 1, 1).float().to(noisy_img.device)
t_img = t_scaled.expand(-1, 1, h, w)
x = torch.cat([noisy_img, t_img], dim=1) # 4 channels
return self.net(x)
# ---------- shared loss ----------
def _shared_step(self, batch):
clean = batch["image"].to(self.device) # Bx3xHxW, -1…1
noise = torch.randn_like(clean) # ϵ ~ N(0, 1)
t = torch.randint(0, self.max_t, (clean.size(0),), device=self.device)
noisy = clean + noise # x_t = x_0 + ϵ
pred_noise = self(noisy, t)
return self.loss_fn(pred_noise, noise)
# ---------- training / validation ----------
def training_step(self, batch, batch_idx):
loss = self._shared_step(batch)
# Let Lightning aggregate + Ray callback report
self.log("train_loss", loss, on_epoch=True, prog_bar=False, sync_dist=True)
return loss
def validation_step(self, batch, batch_idx):
loss = self._shared_step(batch)
self.log("val_loss", loss, on_epoch=True, prog_bar=False, sync_dist=True)
return loss
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=2e-4)
# 09. Train loop for Ray TorchTrainer (RayDDP + Lightning-native, NEW API aligned)
def train_loop(config):
"""
Lightning-owned loop with Ray integration:
- RayDDPStrategy for multi-worker DDP
- RayLightningEnvironment for ranks/addrs
- RayTrainReportCallback to forward metrics + checkpoints to Ray
- Resume from the Ray-provided Lightning checkpoint ("checkpoint.ckpt")
"""
import warnings
warnings.filterwarnings(
"ignore",
message="barrier.*using the device under current context",
)
import os
import torch
import lightning.pytorch as pl
from ray.train import get_checkpoint, get_context
from ray.train.lightning import (
RayLightningEnvironment,
RayDDPStrategy,
RayTrainReportCallback,
prepare_trainer,
)
# ---- Data shards from Ray Data → iterable loaders ----
train_ds = ray.train.get_dataset_shard("train")
val_ds = ray.train.get_dataset_shard("val")
train_loader = train_ds.iter_torch_batches(batch_size=config.get("batch_size", 32))
val_loader = val_ds.iter_torch_batches(batch_size=config.get("batch_size", 32))
# ---- Model ----
model = PixelDiffusion()
# ---- Lightning Trainer configured for Ray ----
CKPT_ROOT = os.path.join(tempfile.gettempdir(), "ray_pl_ckpts")
os.makedirs(CKPT_ROOT, exist_ok=True)
trainer = pl.Trainer(
max_epochs=config.get("epochs", 10),
devices="auto",
accelerator="auto",
strategy=RayDDPStrategy(),
plugins=[RayLightningEnvironment()],
callbacks=[
RayTrainReportCallback(),
pl.callbacks.ModelCheckpoint(
dirpath=CKPT_ROOT, # local scratch is fine (or leave None to use default)
filename="epoch-{epoch:03d}",
every_n_epochs=1,
save_top_k=-1,
save_last=True,
),
],
default_root_dir=CKPT_ROOT, # also local
enable_progress_bar=False,
check_val_every_n_epoch=1,
)
# Wire up ranks/world size with Ray
trainer = prepare_trainer(trainer)
# ---- Resume from latest Ray-provided Lightning checkpoint (if any) ----
ckpt_path = None
ckpt = get_checkpoint()
if ckpt:
with ckpt.as_directory() as d:
candidate = os.path.join(d, "checkpoint.ckpt")
if os.path.exists(candidate):
ckpt_path = candidate
if get_context().get_world_rank() == 0:
print(f"✅ Resuming from Lightning checkpoint: {ckpt_path}")
# ---- Let Lightning own the loop ----
trainer.fit(
model,
train_dataloaders=train_loader,
val_dataloaders=val_loader,
ckpt_path=ckpt_path,
)
# 10. Launch distributed training (same API, now Lightning-native inside)
trainer = TorchTrainer(
train_loop_per_worker=train_loop,
scaling_config=ScalingConfig(num_workers=8, use_gpu=True),
datasets={"train": train_ds, "val": val_ds},
run_config=RunConfig(
name="food101_diffusion_ft",
storage_path="/mnt/cluster_storage/generative_cv/food101_diffusion_results",
checkpoint_config=CheckpointConfig(
num_to_keep=5,
checkpoint_score_attribute="epoch",
checkpoint_score_order="max",
),
failure_config=FailureConfig(max_failures=1),
),
)
result = trainer.fit()
print("Training complete →", result.metrics)
best_ckpt = result.checkpoint
# 11. Plot train/val loss curves (Ray + Lightning integration)
# Ray stores all metrics emitted by Lightning in a dataframe
df = result.metrics_dataframe
# Display first few rows (optional sanity check)
print(df.head())
# Convert and clean up
if "train_loss" not in df.columns or "val_loss" not in df.columns:
raise ValueError("Expected train_loss and val_loss in metrics. "
"Did you call self.log('train_loss') / self.log('val_loss') in PixelDiffusion?")
plt.figure(figsize=(7, 4))
plt.plot(df["epoch"], df["train_loss"], marker="o", label="Train")
plt.plot(df["epoch"], df["val_loss"], marker="o", label="Val")
plt.xlabel("Epoch")
plt.ylabel("MSE Loss")
plt.title("Pixel Diffusion – Loss per Epoch (Ray Train + Lightning)")
plt.grid(True)
plt.legend()
plt.tight_layout()
plt.show()
# 12. Run the trainer again to demonstrate resuming from latest checkpoint
result = trainer.fit()
print("Training complete →", result.metrics)
# 13. Reverse diffusion sampling
def sample_image(model, steps=50, device="cpu"):
"""Generate an image by iteratively de-noising random noise."""
model.eval()
with torch.no_grad():
img = torch.randn(1, 3, 224, 224, device=device)
for step in reversed(range(steps)):
t = torch.tensor([step], device=device)
pred_noise = model(img, t)
img = img - pred_noise * 0.1 # simple Euler update
# Rescale back to [0,1]
img = torch.clamp((img * 0.5 + 0.5), 0.0, 1.0)
return img.squeeze(0).cpu().permute(1,2,0).numpy()
# 14. Generate and display samples
import glob
from ray.train import Checkpoint
assert best_ckpt is not None, "Checkpoint is missing. Did training run and complete?"
# Restore model weights from Ray Train checkpoint (Lightning-first)
model = PixelDiffusion()
with best_ckpt.as_directory() as ckpt_dir:
# Prefer Lightning checkpoints (*.ckpt) saved by ModelCheckpoint
ckpt_files = glob.glob(os.path.join(ckpt_dir, "*.ckpt"))
if ckpt_files:
pl_ckpt = torch.load(ckpt_files[0], map_location="cpu")
state = pl_ckpt.get("state_dict", pl_ckpt)
model.load_state_dict(state, strict=False)
elif os.path.exists(os.path.join(ckpt_dir, "model.pt")):
# Fallback for older/manual checkpoints
state = torch.load(os.path.join(ckpt_dir, "model.pt"), map_location="cpu")
model.load_state_dict(state, strict=False)
else:
raise FileNotFoundError(
f"No Lightning .ckpt or model.pt found in: {ckpt_dir}"
)
# Move to device and sample
device = "cuda" if torch.cuda.is_available() else "cpu"
model = model.to(device)
# Generate three images
samples = [sample_image(model, steps=50, device=device) for _ in range(3)]
fig, axs = plt.subplots(1, 3, figsize=(9, 3))
for ax, img in zip(axs, samples):
ax.imshow(img)
ax.axis("off")
plt.suptitle("Food-101 Diffusion Samples (unconditional)")
plt.tight_layout()
plt.show()
# 15. Cleanup -- delete checkpoints and metrics from model training
TARGET_PATH = "/mnt/cluster_storage/generative_cv"
if os.path.exists(TARGET_PATH):
shutil.rmtree(TARGET_PATH)
print(f"✅ Deleted everything under {TARGET_PATH}")
else:
print(f"⚠️ Path does not exist: {TARGET_PATH}")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/train/tutorials/ci/py_scripts/04d1_generative_cv_pattern.py",
"license": "Apache License 2.0",
"lines": 345,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/train/tutorials/ci/py_scripts/04d2_policy_learning_pattern.py | # 00. Runtime setup
import os
import sys
import subprocess
# Non-secret env var
os.environ["RAY_TRAIN_V2_ENABLED"] = "1"
# Install Python dependencies
subprocess.check_call([
sys.executable, "-m", "pip", "install", "--no-cache-dir",
"torch==2.8.0",
"matplotlib==3.10.6",
"lightning==2.5.5",
"pyarrow==14.0.2",
])
# 01. Imports
# Standard Python packages for math, plotting, and data handling
import os
import shutil
import glob
import json
import uuid
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import gymnasium as gym
# Ray libraries for distributed data and training
import ray
import ray.data
from ray.train.lightning import RayLightningEnvironment
from ray.train import ScalingConfig, RunConfig, FailureConfig, CheckpointConfig, get_context, get_checkpoint, report, Checkpoint
from ray.train.torch import TorchTrainer
# PyTorch Lightning and base PyTorch for model definition and training
import lightning.pytorch as pl
import torch
from torch.utils.data import DataLoader
from torch import nn
# 02. Generate Pendulum offline dataset
def make_pendulum_dataset(n_steps: int = 10_000):
"""
Roll out a random policy in Pendulum-v1 and log (obs, noisy_action, noise, timestep).
Returns a Ray Dataset ready for sharding.
"""
env = gym.make("Pendulum-v1")
obs, _ = env.reset(seed=0)
data = []
for _ in range(n_steps):
action = env.action_space.sample().astype(np.float32) # shape (1,)
noise = np.random.randn(*action.shape).astype(np.float32)
noisy_action = action + noise # add Gaussian noise
timestep = np.random.randint(0, 1000, dtype=np.int64)
data.append(
{
"obs": obs.astype(np.float32), # shape (3,)
"noisy_action": noisy_action, # shape (1,)
"noise": noise, # shape (1,)
"timestep": timestep,
}
)
# Step environment
obs, _, terminated, truncated, _ = env.step(action)
if terminated or truncated:
obs, _ = env.reset()
return ray.data.from_items(data)
ds = make_pendulum_dataset()
# 03. Normalize and split (vector obs ∈ [-π, π])
# Normalize pixel values from [0, 1] to [-1, 1] for training
def normalize(batch):
# Pendulum observations are roughly in [-π, π] → scale to [-1, 1]
batch["obs"] = batch["obs"] / np.pi
return batch
# Apply normalization in parallel using Ray Data
ds = ds.map_batches(normalize, batch_format="numpy")
# Count total number of items (triggers actual execution)
total = ds.count()
print("Total dataset size:", total)
# Shuffle and split dataset into 80% training and 20% validation
split_idx = int(total * 0.8)
ds = ds.random_shuffle()
train_ds, val_ds = ds.split_at_indices([split_idx])
print("Train size:", train_ds.count())
print("Val size:", val_ds.count())
# 04. DiffusionPolicy for low-dim observation (3D) and action (1D)
class DiffusionPolicy(pl.LightningModule):
"""Tiny MLP that predicts injected noise ϵ given (obs, noisy_action, timestep)."""
def __init__(self, obs_dim: int = 3, act_dim: int = 1, max_t: int = 1000):
super().__init__()
self.max_t = max_t
# 3D obs + 1D action + 1 timestep → 1D noise
self.net = nn.Sequential(
nn.Linear(obs_dim + act_dim + 1, 128),
nn.ReLU(),
nn.Linear(128, act_dim),
)
self.loss_fn = nn.MSELoss()
# ---------- forward ----------
def forward(self, obs, noisy_action, timestep):
t = timestep.view(-1, 1).float() / self.max_t
x = torch.cat([obs, noisy_action, t], dim=1)
return self.net(x)
# ---------- shared loss ----------
def _shared_step(self, batch):
pred = self.forward(
batch["obs"].float(),
batch["noisy_action"],
batch["timestep"],
)
return self.loss_fn(pred, batch["noise"])
# ---------- training / validation ----------
def training_step(self, batch, batch_idx):
loss = self._shared_step(batch)
self.log("train_loss", loss, on_epoch=True, prog_bar=False, sync_dist=True)
return loss
def validation_step(self, batch, batch_idx):
loss = self._shared_step(batch)
self.log("val_loss", loss, on_epoch=True, prog_bar=False, sync_dist=True)
return loss
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=1e-3)
# 05. Ray Train Lightning-native training loop
def train_loop(config):
import os, tempfile, torch, warnings
import lightning.pytorch as pl
from ray.train import get_checkpoint, get_context
from ray.train.lightning import (
RayLightningEnvironment,
RayDDPStrategy,
RayTrainReportCallback,
prepare_trainer,
)
warnings.filterwarnings(
"ignore", message="barrier.*using the device under current context"
)
# ---- Ray Dataset shards → iterable torch batches ----
train_ds = ray.train.get_dataset_shard("train")
val_ds = ray.train.get_dataset_shard("val")
train_loader = train_ds.iter_torch_batches(batch_size=config.get("batch_size", 32))
val_loader = val_ds.iter_torch_batches(batch_size=config.get("batch_size", 32))
# ---- Model ----
model = DiffusionPolicy()
# ---- Local scratch for PL checkpoints (Ray will persist to storage_path) ----
CKPT_ROOT = os.path.join(tempfile.gettempdir(), "ray_pl_ckpts")
os.makedirs(CKPT_ROOT, exist_ok=True)
# ---- Lightning Trainer configured for Ray ----
trainer = pl.Trainer(
max_epochs=config.get("epochs", 10),
devices="auto",
accelerator="auto",
strategy=RayDDPStrategy(),
plugins=[RayLightningEnvironment()],
callbacks=[
RayTrainReportCallback(), # forwards metrics + ckpt to Ray
pl.callbacks.ModelCheckpoint( # local PL checkpoints each epoch
dirpath=CKPT_ROOT,
filename="epoch-{epoch:03d}",
every_n_epochs=1,
save_top_k=-1,
save_last=True,
),
],
default_root_dir=CKPT_ROOT,
enable_progress_bar=False,
check_val_every_n_epoch=1,
)
# ---- Prepare trainer for Ray environment ----
trainer = prepare_trainer(trainer)
# ---- Resume from Ray checkpoint if available ----
ckpt_path = None
ckpt = get_checkpoint()
if ckpt:
with ckpt.as_directory() as d:
candidate = os.path.join(d, "checkpoint.ckpt")
if os.path.exists(candidate):
ckpt_path = candidate
if get_context().get_world_rank() == 0:
print(f"✅ Resuming from Lightning checkpoint: {ckpt_path}")
# ---- Run training (Lightning owns the loop) ----
trainer.fit(model, train_dataloaders=train_loader, val_dataloaders=val_loader, ckpt_path=ckpt_path)
# 06. Launch distributed training with Ray TorchTrainer
trainer = TorchTrainer(
train_loop,
scaling_config=ScalingConfig(num_workers=8, use_gpu=True),
datasets={"train": train_ds, "val": val_ds},
run_config=RunConfig(
name="pendulum_diffusion_ft",
storage_path="/mnt/cluster_storage/pendulum_diffusion/pendulum_diffusion_results",
checkpoint_config=CheckpointConfig(
num_to_keep=5,
checkpoint_score_attribute="epoch",
checkpoint_score_order="max",
),
failure_config=FailureConfig(max_failures=3),
),
)
result = trainer.fit()
print("Training complete →", result.metrics)
best_ckpt = result.checkpoint # latest Ray-managed Lightning checkpoint
# 07. Plot training and validation loss (Ray + Lightning integration)
df = result.metrics_dataframe
print(df.head()) # optional sanity check
if "train_loss" not in df.columns or "val_loss" not in df.columns:
raise ValueError("train_loss / val_loss missing. Did you log them via self.log()?")
plt.figure(figsize=(7, 4))
plt.plot(df["epoch"], df["train_loss"], marker="o", label="Train")
plt.plot(df["epoch"], df["val_loss"], marker="o", label="Val")
plt.xlabel("Epoch")
plt.ylabel("MSE Loss")
plt.title("Pendulum Diffusion - Loss per Epoch (Ray Train + Lightning)")
plt.grid(True)
plt.legend()
plt.tight_layout()
plt.show()
# 08. Reverse diffusion sampling for 1-D action
# Function to simulate reverse diffusion process
def sample_action(model, obs, n_steps=50, device="cpu"):
"""
Runs reverse diffusion starting from noise to generate a Pendulum action.
obs: torch.Tensor of shape (3,)
returns: torch.Tensor of shape (1,)
"""
model.eval()
with torch.no_grad():
obs = obs.unsqueeze(0).to(device) # [1, 3]
obs = obs / np.pi # Same normalization used in training
x = torch.randn(1, 1).to(device) # Start from noise in action space
for step in reversed(range(n_steps)):
t = torch.tensor([step], device=device)
pred_noise = model(obs, x, t)
x = x - pred_noise * 0.1
return x.squeeze(0)
# 09. In-notebook sampling from trained model (Ray Lightning checkpoint)
# A plausible pendulum state: [cos(theta), sin(theta), theta_dot]
obs_sample = torch.tensor([1.0, 0.0, 0.0], dtype=torch.float32) # shape (3,)
assert best_ckpt is not None, "No checkpoint found — did training complete successfully?"
# Load the trained model from Ray's latest Lightning checkpoint
model = DiffusionPolicy(obs_dim=3, act_dim=1)
with best_ckpt.as_directory() as ckpt_dir:
# RayTrainReportCallback saves a file named "checkpoint.ckpt"
ckpt_file = os.path.join(ckpt_dir, "checkpoint.ckpt")
if not os.path.exists(ckpt_file):
# Fallback: search any .ckpt file if name differs
candidates = glob.glob(os.path.join(ckpt_dir, "*.ckpt"))
ckpt_file = candidates[0] if candidates else None
assert ckpt_file is not None, f"No Lightning checkpoint found in {ckpt_dir}"
state = torch.load(ckpt_file, map_location="cpu")
model.load_state_dict(state.get("state_dict", state), strict=False)
# Move to device
device = "cuda" if torch.cuda.is_available() else "cpu"
model = model.to(device)
# Run reverse diffusion sampling
action = sample_action(model, obs_sample, n_steps=50, device=device)
print("Sampled action:", action)
# 10. Cleanup -- delete checkpoints and metrics from model training
TARGET_PATH = "/mnt/cluster_storage/pendulum_diffusion"
if os.path.exists(TARGET_PATH):
shutil.rmtree(TARGET_PATH)
print(f"✅ Deleted everything under {TARGET_PATH}")
else:
print(f"⚠️ Path does not exist: {TARGET_PATH}")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/train/tutorials/ci/py_scripts/04d2_policy_learning_pattern.py",
"license": "Apache License 2.0",
"lines": 258,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/train/tutorials/ci/py_scripts/04e_rec_sys_workload_pattern.py | # 00. Runtime setup
import os
import sys
import subprocess
# Non-secret env var
os.environ["RAY_TRAIN_V2_ENABLED"] = "1"
# Install Python dependencies
subprocess.check_call([
sys.executable, "-m", "pip", "install", "--no-cache-dir",
"torch==2.8.0",
"matplotlib==3.10.6",
"pyarrow==14.0.2",
])
# 01. Imports
# Standard libraries
import os
import uuid
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import zipfile
import shutil
import tempfile
# PyTorch
import torch
from torch import nn
import torch.nn.functional as F
# Ray
import ray
import ray.data
from ray.train import ScalingConfig, RunConfig, CheckpointConfig, FailureConfig, Checkpoint, get_checkpoint, get_context, get_dataset_shard, report
from ray.train.torch import TorchTrainer, prepare_model
# Other
from tqdm import tqdm
import subprocess
# 02. Load MovieLens 100K Dataset and store in /mnt/cluster_storage/ as CSV + Parquet
# Define clean working paths
DATA_URL = "http://files.grouplens.org/datasets/movielens/ml-100k.zip"
LOCAL_ZIP = "/mnt/cluster_storage/rec_sys_tutorial/ml-100k.zip"
EXTRACT_DIR = "/mnt/cluster_storage/rec_sys_tutorial/ml-100k"
OUTPUT_CSV = "/mnt/cluster_storage/rec_sys_tutorial/raw/ratings.csv"
PARQUET_DIR = "/mnt/cluster_storage/rec_sys_tutorial/raw/ratings_parquet"
# Ensure target directories exist
os.makedirs("/mnt/cluster_storage/rec_sys_tutorial/raw", exist_ok=True)
# Download only if not already done
if not os.path.exists(LOCAL_ZIP):
subprocess.run(
["wget", "-q", DATA_URL, "-O", LOCAL_ZIP],
check=True,
)
# Extract cleanly
if not os.path.exists(EXTRACT_DIR):
import zipfile
with zipfile.ZipFile(LOCAL_ZIP, 'r') as zip_ref:
zip_ref.extractall("/mnt/cluster_storage/rec_sys_tutorial")
# Load raw file
raw_path = os.path.join(EXTRACT_DIR, "u.data")
df = pd.read_csv(raw_path, sep="\t", names=["user_id", "item_id", "rating", "timestamp"])
# Persist CSV (kept for later inference cell that expects CSV)
df.to_csv(OUTPUT_CSV, index=False)
# Persist a Parquet *dataset* (multiple files) to simulate blob storage layout
if os.path.exists(PARQUET_DIR):
shutil.rmtree(PARQUET_DIR)
os.makedirs(PARQUET_DIR, exist_ok=True)
NUM_PARQUET_SHARDS = 8
for i, shard in enumerate(np.array_split(df, NUM_PARQUET_SHARDS)):
shard.to_parquet(os.path.join(PARQUET_DIR, f"part-{i:02d}.parquet"), index=False)
print(f"✅ Loaded {len(df):,} ratings → CSV: {OUTPUT_CSV}")
print(f"✅ Wrote Parquet dataset with {NUM_PARQUET_SHARDS} shards → {PARQUET_DIR}")
# 03. Point to Parquet dataset URI
DATASET_URI = os.environ.get(
"RATINGS_PARQUET_URI",
"/mnt/cluster_storage/rec_sys_tutorial/raw/ratings_parquet",
)
print("Parquet dataset URI:", DATASET_URI)
# 04. Visualize dataset: ratings, user and item activity
# Use encoded indices if present; otherwise fall back to raw IDs
user_col = "user_idx" if "user_idx" in df.columns else "user_id"
item_col = "item_idx" if "item_idx" in df.columns else "item_id"
plt.figure(figsize=(12, 4))
# Rating distribution
plt.subplot(1, 3, 1)
df["rating"].hist(bins=[0.5,1.5,2.5,3.5,4.5,5.5], edgecolor='black')
plt.title("Rating Distribution")
plt.xlabel("Rating"); plt.ylabel("Frequency")
# Number of ratings per user
plt.subplot(1, 3, 2)
df[user_col].value_counts().hist(bins=30, edgecolor='black')
plt.title("Ratings per User")
plt.xlabel("# Ratings"); plt.ylabel("Users")
# Number of ratings per item
plt.subplot(1, 3, 3)
df[item_col].value_counts().hist(bins=30, edgecolor='black')
plt.title("Ratings per Item")
plt.xlabel("# Ratings"); plt.ylabel("Items")
plt.tight_layout()
plt.show()
# 05. Create Ray Dataset by reading Parquet, then encode IDs via Ray
# Read Parquet dataset directly
ratings_ds = ray.data.read_parquet(DATASET_URI)
print("✅ Parquet dataset loaded (streaming, non-materialized)")
ratings_ds.show(3)
# ---- Build global ID mappings on the driver ----
user_ids = sorted([r["user_id"] for r in ratings_ds.groupby("user_id").count().take_all()])
item_ids = sorted([r["item_id"] for r in ratings_ds.groupby("item_id").count().take_all()])
user2idx = {uid: j for j, uid in enumerate(user_ids)}
item2idx = {iid: j for j, iid in enumerate(item_ids)}
NUM_USERS = len(user2idx)
NUM_ITEMS = len(item2idx)
print(f"Users: {NUM_USERS:,} | Items: {NUM_ITEMS:,}")
# ---- Encode to contiguous indices within Ray (keeps everything distributed) ----
def encode_batch(pdf: pd.DataFrame) -> pd.DataFrame:
pdf["user_idx"] = pdf["user_id"].map(user2idx).astype("int64")
pdf["item_idx"] = pdf["item_id"].map(item2idx).astype("int64")
return pdf[["user_idx", "item_idx", "rating", "timestamp"]]
ratings_ds = ratings_ds.map_batches(encode_batch, batch_format="pandas")
print("✅ Encoded Ray Dataset schema:", ratings_ds.schema())
ratings_ds.show(3)
# 06. Train/val split using Ray Data (lazy, avoids materialization)
TRAIN_FRAC = 0.8
SEED = 42 # for reproducibility
# Block-level shuffle + proportional split (approximate by block, lazy)
train_ds, val_ds = (
ratings_ds
.randomize_block_order(seed=SEED) # lightweight; no row-level materialization
.split_proportionately([TRAIN_FRAC]) # returns [train, remainder]
)
print("✅ Train/Val Split:")
print(f" Train → {train_ds.count():,} rows")
print(f" Val → {val_ds.count():,} rows")
# 07. Define matrix factorization model
class MatrixFactorizationModel(nn.Module):
def __init__(self, num_users: int, num_items: int, embedding_dim: int = 64):
super().__init__()
self.user_embedding = nn.Embedding(num_users, embedding_dim)
self.item_embedding = nn.Embedding(num_items, embedding_dim)
def forward(self, user_idx, item_idx):
user_vecs = self.user_embedding(user_idx)
item_vecs = self.item_embedding(item_idx)
dot_product = (user_vecs * item_vecs).sum(dim=1)
return dot_product
# 08. Define Ray Train loop (with val loss, checkpointing, and Ray-managed metrics)
def train_loop_per_worker(config):
import tempfile
# ---------------- Dataset shards -> PyTorch-style iterators ---------------- #
train_ds = get_dataset_shard("train")
val_ds = get_dataset_shard("val")
train_loader = train_ds.iter_torch_batches(batch_size=512, dtypes=torch.float32)
val_loader = val_ds.iter_torch_batches(batch_size=512, dtypes=torch.float32)
# ---------------- Model / Optimizer ---------------- #
model = MatrixFactorizationModel(
num_users=config["num_users"],
num_items=config["num_items"],
embedding_dim=config.get("embedding_dim", 64),
)
model = prepare_model(model)
optimizer = torch.optim.Adam(model.parameters(), lr=config.get("lr", 1e-3))
# ---------------- Checkpointing setup ---------------- #
rank = get_context().get_world_rank()
start_epoch = 0
# If a checkpoint exists (auto-resume), load it
ckpt = get_checkpoint()
if ckpt:
with ckpt.as_directory() as ckpt_dir:
model.load_state_dict(
torch.load(os.path.join(ckpt_dir, "model.pt"), map_location="cpu")
)
start_epoch = torch.load(os.path.join(ckpt_dir, "meta.pt")).get("epoch", 0) + 1
if rank == 0:
print(f"[Rank {rank}] ✅ Resumed from checkpoint at epoch {start_epoch}")
# ---------------- Training loop ---------------- #
for epoch in range(start_epoch, config.get("epochs", 5)):
# ---- Train ----
model.train()
train_losses = []
for batch in train_loader:
user = batch["user_idx"].long()
item = batch["item_idx"].long()
rating = batch["rating"].float()
pred = model(user, item)
loss = F.mse_loss(pred, rating)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_losses.append(loss.item())
avg_train_loss = sum(train_losses) / max(1, len(train_losses))
# ---- Validate ----
model.eval()
val_losses = []
with torch.no_grad():
for batch in val_loader:
user = batch["user_idx"].long()
item = batch["item_idx"].long()
rating = batch["rating"].float()
pred = model(user, item)
loss = F.mse_loss(pred, rating)
val_losses.append(loss.item())
avg_val_loss = sum(val_losses) / max(1, len(val_losses))
# Console log (optional)
if rank == 0:
print(f"[Epoch {epoch}] Train MSE: {avg_train_loss:.4f} | Val MSE: {avg_val_loss:.4f}")
metrics = {
"epoch": epoch,
"train_loss": avg_train_loss,
"val_loss": avg_val_loss,
}
# ---- Save checkpoint & report (rank 0 attaches checkpoint; others report metrics only) ----
if rank == 0:
with tempfile.TemporaryDirectory() as tmpdir:
torch.save(model.state_dict(), os.path.join(tmpdir, "model.pt"))
torch.save({"epoch": epoch}, os.path.join(tmpdir, "meta.pt"))
ckpt_out = Checkpoint.from_directory(tmpdir)
report(metrics, checkpoint=ckpt_out)
else:
report(metrics, checkpoint=None)
# 09. Launch distributed training with Ray TorchTrainer
# Define config params (use Ray-derived counts)
train_config = {
"num_users": NUM_USERS,
"num_items": NUM_ITEMS,
"embedding_dim": 64,
"lr": 1e-3,
"epochs": 20,
}
trainer = TorchTrainer(
train_loop_per_worker=train_loop_per_worker,
train_loop_config=train_config,
scaling_config=ScalingConfig(
num_workers=8, # Increase as needed
use_gpu=True # Set to True if training on GPUs
),
datasets={"train": train_ds, "val": val_ds},
run_config=RunConfig(
name="mf_ray_train",
storage_path="/mnt/cluster_storage/rec_sys_tutorial/results",
checkpoint_config=CheckpointConfig(num_to_keep=20),
failure_config=FailureConfig(max_failures=2)
)
)
# Run distributed training
result = trainer.fit()
# 10. Plot train/val loss curves (from Ray Train results)
# Pull the full metrics history Ray stored for this run
df = result.metrics_dataframe.copy()
# Keep only the columns we need (guard against extra columns)
cols = [c for c in ["epoch", "train_loss", "val_loss"] if c in df.columns]
df = df[cols].dropna()
# If multiple rows per epoch exist, keep the last report per epoch
if "epoch" in df.columns:
df = df.sort_index().groupby("epoch", as_index=False).last()
# Plot
plt.figure(figsize=(7, 4))
if "train_loss" in df.columns:
plt.plot(df["epoch"], df["train_loss"], marker="o", label="Train")
if "val_loss" in df.columns:
plt.plot(df["epoch"], df["val_loss"], marker="o", label="Val")
plt.xlabel("Epoch")
plt.ylabel("MSE Loss")
plt.title("Matrix Factorization - Loss per Epoch")
plt.grid(True)
plt.legend()
plt.tight_layout()
plt.show()
# 11. Run trainer.fit() again to resume from last checkpoint
result = trainer.fit()
# 12. Inference: recommend top-N items for a user
# ---------------------------------------------
# Step 1: Reload original ratings CSV and mappings
# ---------------------------------------------
df = pd.read_csv("/mnt/cluster_storage/rec_sys_tutorial/raw/ratings.csv")
# Recompute ID mappings (same as during preprocessing)
unique_users = sorted(df["user_id"].unique())
unique_items = sorted(df["item_id"].unique())
user2idx = {uid: j for j, uid in enumerate(unique_users)}
item2idx = {iid: j for j, iid in enumerate(unique_items)}
idx2item = {v: k for k, v in item2idx.items()}
# ---------------------------------------------
# Step 2: Load model from checkpoint
# ---------------------------------------------
model = MatrixFactorizationModel(
num_users=len(user2idx),
num_items=len(item2idx),
embedding_dim=train_config["embedding_dim"]
)
with result.checkpoint.as_directory() as ckpt_dir:
state_dict = torch.load(os.path.join(ckpt_dir, "model.pt"), map_location="cpu")
# Remove 'module.' prefix if using DDP-trained model
if any(k.startswith("module.") for k in state_dict):
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
model.load_state_dict(state_dict)
model.eval()
# ---------------------------------------------
# Step 3: Select a user and generate recommendations
# ---------------------------------------------
# Choose a random user from the original dataset
original_user_id = df["user_id"].sample(1).iloc[0]
user_idx = user2idx[original_user_id]
print(f"Generating recommendations for user_id={original_user_id} (internal idx={user_idx})")
# Compute scores for all items for this user
with torch.no_grad():
user_vector = model.user_embedding(torch.tensor([user_idx])) # [1, D]
item_vectors = model.item_embedding.weight # [num_items, D]
scores = torch.matmul(user_vector, item_vectors.T).squeeze(0) # [num_items]
topk = torch.topk(scores, k=10)
top_item_ids = [idx2item[j.item()] for j in topk.indices]
top_scores = topk.values.tolist()
# ---------------------------------------------
# Step 4: Print top-N recommendations
# ---------------------------------------------
print("\nTop 10 Recommended Item IDs:")
for i, (item_id, score) in enumerate(zip(top_item_ids, top_scores), 1):
print(f"{i:2d}. Item ID: {item_id} | Score: {score:.2f}")
# 13. Join top-N item IDs with movie titles from u.item
item_metadata = pd.read_csv(
"/mnt/cluster_storage/rec_sys_tutorial/ml-100k/u.item",
sep="|",
encoding="latin-1",
header=None,
usecols=[0, 1], # Only item_id and title
names=["item_id", "title"]
)
# Join with top-N items
top_items_df = pd.DataFrame({
"item_id": top_item_ids,
"score": top_scores
})
merged = top_items_df.merge(item_metadata, on="item_id", how="left")
print("\nTop 10 Recommended Movies:")
for j, row in merged.iterrows():
print(f"{j+1:2d}. {row['title']} | Score: {row['score']:.2f}")
# 14. Cleanup -- delete checkpoints and metrics from model training
TARGET_PATH = "/mnt/cluster_storage/rec_sys_tutorial" # please note, that /mnt/cluster_storage/ only exists on Anyscale
if os.path.exists(TARGET_PATH):
shutil.rmtree(TARGET_PATH)
print(f"✅ Deleted everything under {TARGET_PATH}")
else:
print(f"⚠️ Path does not exist: {TARGET_PATH}")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/train/tutorials/ci/py_scripts/04e_rec_sys_workload_pattern.py",
"license": "Apache License 2.0",
"lines": 338,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/tune/examples/tune_pytorch_asha/ci/tune_pytorch_asha.py | # 00. Runtime setup — install same deps as build.sh and set env vars
import os
import sys
import subprocess
# Non-secret env var
os.environ["RAY_TRAIN_V2_ENABLED"] = "1"
# Install Python dependencies
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"install",
"--no-cache-dir",
"torch==2.8.0",
"torchvision==0.23.0",
"matplotlib==3.10.6",
"pyarrow==14.0.2",
]
)
# 01. Imports
# --- Standard libraries ---
import os # Filesystem utilities (paths, directories)
import tempfile # Temporary directories for checkpoints
import shutil # Cleanup of files and directories
# --- Analytics / plotting ---
import pandas as pd # Converting output to dataframe for plotting
import matplotlib.pyplot as plt # For generating plots
# --- Scientific computing ---
import numpy as np # Numerical operations, used for random sampling in search space
# --- PyTorch (deep learning) ---
import torch
import torch.nn as nn # Neural network modules (layers, models)
import torch.nn.functional as F # Functional API for activations/losses
import torch.optim as optim # Optimizers (e.g., SGD, Adam)
import torchvision # Popular vision datasets and pretrained models
import torchvision.transforms as transforms # Image preprocessing pipelines
from torch.utils.data import random_split # Train/validation dataset splitting
# --- Utilities ---
from filelock import (
FileLock,
) # Prevents race conditions when multiple workers download CIFAR-10
# --- Ray (tuning and orchestration) ---
from ray import train, tune # Core APIs for metric reporting and trial execution
from ray.tune.schedulers import (
ASHAScheduler,
) # Asynchronous HyperBand for early stopping
from ray.air.config import (
RunConfig,
) # Configure experiment metadata (name, storage, logging)
# 02. Load and prepare CIFAR-10 data
def load_data(data_dir="/mnt/cluster_storage/cifar10"):
"""
Download and load the CIFAR-10 dataset with standard preprocessing.
Returns the full training set and the test set.
"""
# Define preprocessing: convert to tensor and normalize channels
transform = transforms.Compose(
[
transforms.ToTensor(), # Convert images to PyTorch tensors [0,1]
transforms.Normalize( # Normalize with dataset mean & std (per channel)
(0.4914, 0.4822, 0.4465), # mean (R, G, B)
(0.2023, 0.1994, 0.2010), # std (R, G, B)
),
]
)
# FileLock ensures that multiple parallel workers downloading CIFAR-10
# don't interfere with each other (prevents race conditions).
with FileLock(os.path.expanduser("~/.data.lock")):
trainset = torchvision.datasets.CIFAR10(
root=data_dir, train=True, download=True, transform=transform
)
testset = torchvision.datasets.CIFAR10(
root=data_dir, train=False, download=True, transform=transform
)
return trainset, testset
def create_dataloaders(trainset, batch_size, num_workers=8):
"""
Split the CIFAR-10 training set into train/validation subsets,
and wrap them in DataLoader objects.
"""
# Compute split sizes: 80% train, 20% validation
train_size = int(len(trainset) * 0.8)
# Randomly partition the dataset into train/val subsets
train_subset, val_subset = random_split(
trainset, [train_size, len(trainset) - train_size]
)
# Training loader: shuffle for stochastic gradient descent
train_loader = torch.utils.data.DataLoader(
train_subset, batch_size=batch_size, shuffle=True, num_workers=num_workers
)
# Validation loader: no shuffle (deterministic evaluation)
val_loader = torch.utils.data.DataLoader(
val_subset, batch_size=batch_size, shuffle=False, num_workers=num_workers
)
return train_loader, val_loader
# 03. Load synthetic test data
def load_test_data():
"""
Create small synthetic datasets for quick smoke testing.
Useful to validate the training loop and Ray Tune integration
without downloading or processing the full CIFAR-10 dataset.
"""
# Generate a fake training set of 128 samples
# Each sample is shaped like a CIFAR-10 image: (3 channels, 32x32 pixels)
# Labels are drawn from 10 possible classes.
trainset = torchvision.datasets.FakeData(
size=128, # number of samples
image_size=(3, 32, 32), # match CIFAR-10 format
num_classes=10, # same number of categories as CIFAR-10
transform=transforms.ToTensor(), # convert to PyTorch tensors
)
# Generate a smaller fake test set of 16 samples
testset = torchvision.datasets.FakeData(
size=16, image_size=(3, 32, 32), num_classes=10, transform=transforms.ToTensor()
)
# Return both sets so they can be wrapped into DataLoaders
return trainset, testset
# 04. Define CNN model
class Net(nn.Module):
"""
A simple convolutional neural network (CNN) for CIFAR-10 classification.
Consists of two convolutional layers with pooling, followed by three
fully connected (dense) layers. Hidden layer sizes l1 and l2 are tunable.
"""
def __init__(self, l1=120, l2=84):
super(Net, self).__init__()
# --- Convolutional feature extractor ---
# First conv: input = 3 channels (RGB), output = 6 feature maps, kernel size = 5
self.conv1 = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=5)
# Max pooling: downsample feature maps by factor of 2 (2x2 pooling window)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
# Second conv: input = 6 feature maps, output = 16 feature maps, kernel size = 5
self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5)
# --- Fully connected classifier ---
# Flattened input size = 16 feature maps * 5 * 5 spatial size = 400
# Map this to first hidden layer of size l1 (tunable hyperparameter)
self.fc1 = nn.Linear(in_features=16 * 5 * 5, out_features=l1)
# Second hidden layer of size l2 (also tunable)
self.fc2 = nn.Linear(in_features=l1, out_features=l2)
# Final classification layer: map to 10 classes (CIFAR-10)
self.fc3 = nn.Linear(in_features=l2, out_features=10)
def forward(self, x):
"""
Define forward pass through the network.
"""
# Apply conv1 → ReLU → pooling
x = self.pool(F.relu(self.conv1(x)))
# Apply conv2 → ReLU → pooling
x = self.pool(F.relu(self.conv2(x)))
# Flatten from (batch_size, 16, 5, 5) → (batch_size, 400)
x = x.view(-1, 16 * 5 * 5)
# Fully connected layers with ReLU activations
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
# Final linear layer (logits for 10 classes)
x = self.fc3(x)
return x
# 05. Define a training function
def train_cifar(config):
"""
Train a CIFAR-10 CNN model with hyperparameters provided in `config`.
Supports checkpointing and metric reporting for Ray Tune.
"""
# --- Model setup ---
# Initialize network with tunable hidden sizes (l1, l2).
net = Net(config["l1"], config["l2"])
device = config["device"]
# If using CUDA with multiple GPUs, wrap in DataParallel for multi-GPU training.
if device == "cuda":
net = nn.DataParallel(net)
net.to(device)
# --- Loss and optimizer ---
criterion = nn.CrossEntropyLoss() # standard classification loss
optimizer = optim.SGD(
net.parameters(),
lr=config["lr"], # learning rate (tunable)
momentum=0.9, # helps accelerate gradients
weight_decay=5e-5, # L2 regularization
)
# --- Resume from checkpoint (if available) ---
# This allows interrupted or failed trials to pick up from the last saved state.
if tune.get_checkpoint():
loaded_checkpoint = tune.get_checkpoint()
with loaded_checkpoint.as_directory() as loaded_checkpoint_dir:
model_state, optimizer_state = torch.load(
os.path.join(loaded_checkpoint_dir, "checkpoint.pt")
)
net.load_state_dict(model_state) # restore model weights
optimizer.load_state_dict(optimizer_state) # restore optimizer state
# --- Data setup ---
# Use synthetic data for quick smoke tests, otherwise load full CIFAR-10.
if config["smoke_test"]:
trainset, _ = load_test_data()
else:
trainset, _ = load_data()
# Create train/validation DataLoaders
train_loader, val_loader = create_dataloaders(
trainset,
config["batch_size"], # tunable batch size
num_workers=0 if config["smoke_test"] else 8, # no workers for synthetic data
)
# --- Training loop ---
for epoch in range(config["max_num_epochs"]): # loop over epochs
net.train() # set model to training mode
running_loss = 0.0
# Iterate over training batches
for inputs, labels in train_loader:
inputs, labels = inputs.to(device), labels.to(device)
# forward, backward, and optimize
optimizer.zero_grad() # reset gradients
outputs = net(inputs) # forward pass
loss = criterion(outputs, labels) # compute loss
loss.backward() # backpropagation
optimizer.step() # update weights
running_loss += loss.item()
# --- Validation loop ---
net.eval() # set model to eval mode
val_loss = 0.0
correct = total = 0
with torch.no_grad(): # no gradients during evaluation
for inputs, labels in val_loader:
inputs, labels = inputs.to(device), labels.to(device)
outputs = net(inputs)
val_loss += criterion(outputs, labels).item()
# Compute classification accuracy
_, predicted = outputs.max(1) # predicted class = argmax
total += labels.size(0)
correct += predicted.eq(labels).sum().item()
# --- Report metrics to Ray Tune ---
metrics = {
"loss": val_loss / len(val_loader), # average validation loss
"accuracy": correct / total, # validation accuracy
}
# --- Save checkpoint ---
# Store model and optimizer state so trial can resume later if needed.
with tempfile.TemporaryDirectory() as temp_checkpoint_dir:
path = os.path.join(temp_checkpoint_dir, "checkpoint.pt")
torch.save((net.state_dict(), optimizer.state_dict()), path)
checkpoint = tune.Checkpoint.from_directory(temp_checkpoint_dir)
# Report both metrics and checkpoint to Ray Tune
tune.report(metrics, checkpoint=checkpoint)
print("Finished Training!") # Final message at end of training
# 06. Evaluate the best model
def test_best_model(best_result, smoke_test=False):
"""
Evaluate the best model found during Ray Tune search on the test set.
Restores the trained weights from the best trial's checkpoint and
computes classification accuracy.
"""
# --- Rebuild the best model architecture ---
# Use the trial’s hyperparameters (hidden layer sizes l1 and l2)
best_trained_model = Net(best_result.config["l1"], best_result.config["l2"])
device = best_result.config["device"]
# If running on GPU(s), wrap the model in DataParallel for multi-GPU support
if device == "cuda":
best_trained_model = nn.DataParallel(best_trained_model)
best_trained_model.to(device)
# --- Load weights from checkpoint ---
# Convert checkpoint object to a directory, then restore model state_dict
checkpoint_path = os.path.join(
best_result.checkpoint.to_directory(), "checkpoint.pt"
)
model_state, _optimizer_state = torch.load(
checkpoint_path
) # optimizer state not needed here
best_trained_model.load_state_dict(model_state)
# --- Select test dataset ---
# Use synthetic FakeData if in smoke test mode, otherwise use real CIFAR-10 test set
if smoke_test:
_trainset, testset = load_test_data()
else:
_trainset, testset = load_data()
# --- Prepare DataLoader for evaluation ---
# Small testing batch size (4) is fine since evaluation isn’t performance-critical
testloader = torch.utils.data.DataLoader(
testset, batch_size=4, shuffle=False, num_workers=2
)
# --- Run evaluation ---
correct = 0
total = 0
with torch.no_grad(): # disable gradients for faster inference
for data in testloader:
images, labels = data
images, labels = images.to(device), labels.to(device)
# Forward pass through trained model
outputs = best_trained_model(images)
# Get predicted class = index of max logit
_, predicted = outputs.max(1)
# Update totals for accuracy calculation
total += labels.size(0)
correct += predicted.eq(labels).sum().item()
# --- Print final accuracy ---
print(f"Best trial test set accuracy: {correct / total}")
# 07. Smoke test flag
# Set this to True for a smoke test that runs with a small synthetic dataset.
SMOKE_TEST = False
# 08. Define the hyperparameter search space and configuration
config = {
"l1": tune.sample_from(
lambda _: 2 ** np.random.randint(2, 9)
), # size of 1st FC layer
"l2": tune.sample_from(
lambda _: 2 ** np.random.randint(2, 9)
), # size of 2nd FC layer
"lr": tune.loguniform(1e-4, 1e-1), # learning rate
"batch_size": tune.choice([2, 4, 8, 16]), # training batch size
"smoke_test": SMOKE_TEST, # toggle for FakeData vs CIFAR-10
"num_trials": 10 if not SMOKE_TEST else 2, # number of hyperparam trials
"max_num_epochs": 10 if not SMOKE_TEST else 2, # training epochs per trial
"device": "cuda" if torch.cuda.is_available() else "cpu", # use GPU if available
}
# 09. Run hyperparameter tuning with Ray Tune
def main(config, gpus_per_trial=1):
"""
Run Ray Tune hyperparameter search on CIFAR-10 with the given config.
Uses ASHAScheduler for early stopping and reports the best trial result.
"""
# --- Define scheduler ---
# ASHAScheduler prunes bad trials early and promotes promising ones.
scheduler = ASHAScheduler(
time_attr="training_iteration", # metric for progress = epoch count
max_t=config["max_num_epochs"], # maximum epochs per trial
grace_period=1, # min epochs before pruning is allowed
reduction_factor=2, # at each rung, keep ~1/2 of trials
)
# --- Define Ray Tune tuner ---
tuner = tune.Tuner(
# Wrap training function and specify trial resources
tune.with_resources(
tune.with_parameters(train_cifar), # training loop
resources={"cpu": 2, "gpu": gpus_per_trial}, # per-trial resources
),
tune_config=tune.TuneConfig(
metric="loss", # optimize validation loss
mode="min", # minimize the metric
scheduler=scheduler, # use ASHA for early stopping
num_samples=config["num_trials"], # number of hyperparam trials
),
run_config=RunConfig(
name="cifar10_tune_demo", # experiment name
storage_path="/mnt/cluster_storage/ray-results", # save results here
),
param_space=config, # hyperparameter search space
)
# --- Execute trials ---
results = tuner.fit() # launch tuning job
# --- Retrieve best result ---
best_result = results.get_best_result("loss", "min") # lowest validation loss
# --- Print summary of best trial ---
print(f"Best trial config: {best_result.config}")
print(f"Best trial final validation loss: {best_result.metrics['loss']}")
print(f"Best trial final validation accuracy: {best_result.metrics['accuracy']}")
# --- Evaluate best model on test set ---
test_best_model(best_result, smoke_test=config["smoke_test"])
return results, best_result
# --- Run main entry point ---
# Use 1 GPU per trial if available, otherwise run on CPU only
results, best_result = main(
config, gpus_per_trial=1 if torch.cuda.is_available() else 0
)
# 10. Analyze and visualize tuning results
# Convert all trial results into a DataFrame
df = results.get_dataframe()
# Show the top 5 trials by validation accuracy
top5 = df.sort_values("accuracy", ascending=False).head(5)
# Plot learning rate versus validation accuracy
plt.figure(figsize=(6, 4))
plt.scatter(df["config/lr"], df["accuracy"], alpha=0.7)
plt.xscale("log")
plt.xlabel("Learning Rate")
plt.ylabel("Validation Accuracy")
plt.title("Learning Rate vs Accuracy")
plt.grid(True)
plt.show()
# Plot batch size versus validation accuracy
plt.figure(figsize=(6, 4))
plt.scatter(df["config/batch_size"], df["accuracy"], alpha=0.7)
plt.xlabel("Batch Size")
plt.ylabel("Validation Accuracy")
plt.title("Batch Size vs Accuracy")
plt.grid(True)
plt.show()
# 11. Plot learning curves across all trials
# Expect: results, best_result already defined from:
# results, best_result = main(config, gpus_per_trial=1 if torch.cuda.is_available() else 0)
import matplotlib.pyplot as plt
import pandas as pd
fig, axes = plt.subplots(2, 1, figsize=(8, 8), sharex=True)
# --- Plot validation loss ---
for res in results:
hist = res.metrics_dataframe
if hist is None or hist.empty:
continue
epoch = (
hist["training_iteration"]
if "training_iteration" in hist
else pd.Series(range(1, len(hist) + 1))
)
axes[0].plot(epoch, hist["loss"], color="blue", alpha=0.15)
best_hist = best_result.metrics_dataframe
epoch_best = (
best_hist["training_iteration"]
if "training_iteration" in best_hist
else pd.Series(range(1, len(best_hist) + 1))
)
axes[0].plot(
epoch_best,
best_hist["loss"],
marker="o",
linewidth=2.5,
color="blue",
label="Best — Val Loss",
)
axes[0].set_ylabel("Validation Loss")
axes[0].set_title("All Trials (faded) + Best Trial (bold)")
axes[0].grid(True)
axes[0].legend()
# --- Plot validation accuracy ---
for res in results:
hist = res.metrics_dataframe
if hist is None or hist.empty:
continue
epoch = (
hist["training_iteration"]
if "training_iteration" in hist
else pd.Series(range(1, len(hist) + 1))
)
axes[1].plot(epoch, hist["accuracy"], color="orange", alpha=0.15)
axes[1].plot(
epoch_best,
best_hist["accuracy"],
marker="s",
linewidth=2.5,
color="orange",
label="Best — Val Accuracy",
)
axes[1].set_xlabel("Epoch")
axes[1].set_ylabel("Validation Accuracy")
axes[1].grid(True)
axes[1].legend()
plt.tight_layout()
plt.show()
# 12. Cleanup cluster storage
# Paths you used in the script
paths_to_clean = [
"/mnt/cluster_storage/cifar10", # dataset
"/mnt/cluster_storage/ray-results", # Tune results & checkpoints
]
for p in paths_to_clean:
if os.path.exists(p):
print(f"Removing {p} ...")
shutil.rmtree(p, ignore_errors=True)
else:
print(f"{p} does not exist, skipping.")
print("Cleanup complete ✅")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/tune/examples/tune_pytorch_asha/ci/tune_pytorch_asha.py",
"license": "Apache License 2.0",
"lines": 456,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:release/nightly_tests/dataset/training_ingest_benchmark.py | """Benchmark script for training data ingest with Ray Data.
This script benchmarks different approaches for loading and preprocessing images:
- Loads images from S3 (parquet or JPEG format)
- Applies image transforms (crop, scale, flip)
- Iterates through batches with configurable batch sizes and prefetch settings
- Tests all hyperparameter combinations:
- transform_type: Image transform (random_crop, large_crop, small_crop, etc.)
- batch_size: Batch size for processing
- prefetch_batches: Number of batches to prefetch
- num_image_columns: Number of image columns per row
Supported data loaders:
- s3_parquet: Uses ray.data.read_parquet() with embedded image bytes
- s3_url_image: Lists JPEG files via boto3, downloads with map_batches
- s3_read_images: Uses ray.data.read_images() with Partitioning
"""
import argparse
import io
import itertools
import logging
import time
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Dict, List, Optional
import boto3
import numpy as np
import ray
import ray.data
import torchvision.transforms as transforms
from PIL import Image
from pyarrow import fs
from ray.data.datasource.partitioning import Partitioning
from tabulate import tabulate
from benchmark import Benchmark
from dataset_benchmark_util import IMAGENET_WNID_TO_ID
logger = logging.getLogger(__name__)
# S3 configuration for image datasets
# (shared by S3UrlImageDataLoader and S3ReadImagesDataLoader)
S3_IMAGE_AWS_REGION = "us-west-2"
S3_IMAGE_ROOT = "s3://anyscale-imagenet/ILSVRC/Data/CLS-LOC"
S3_IMAGE_SPLIT_DIRS = {
"train": f"{S3_IMAGE_ROOT}/train",
"val": f"{S3_IMAGE_ROOT}/val",
"test": f"{S3_IMAGE_ROOT}/test",
}
@dataclass
class BenchmarkConfig:
"""Configuration for the training ingest benchmark."""
# Data loader options
data_loader: str = "s3_parquet"
# Transform types to benchmark
transform_types: List[str] = field(
default_factory=lambda: [
"random_crop",
"large_crop",
"small_crop",
"center_crop",
"scale_up",
"scale_down",
]
)
# Batch sizes to test
batch_sizes: List[int] = field(default_factory=lambda: [32, 256])
# Prefetch batch counts to test
prefetch_batches_list: List[int] = field(default_factory=lambda: [1, 4])
# Number of image columns per row to test
num_image_columns_list: List[int] = field(default_factory=lambda: [32])
# Number of batches to process per benchmark run
num_batches: int = 32
# Optional simulated training time (seconds) per batch
simulated_training_time: Optional[float] = None
# Data split to use
split: str = "train"
# Device for tensor placement ("cpu" or "cuda")
device: str = "cpu"
# Pin memory for faster GPU transfer
pin_memory: bool = False
@property
def supported_data_loaders(self) -> List[str]:
"""Return list of supported data loaders."""
return [
"s3_parquet",
"s3_url_image",
"s3_read_images",
]
def validate(self):
"""Validate configuration values."""
if self.data_loader not in self.supported_data_loaders:
raise ValueError(
f"Unknown data loader: {self.data_loader}. "
f"Supported: {self.supported_data_loaders}"
)
def log_config(self):
"""Log the current configuration."""
logger.info("=" * 80)
logger.info("BENCHMARK CONFIGURATION")
logger.info("=" * 80)
logger.info(f"Data loader: {self.data_loader}")
logger.info(f"Split: {self.split}")
logger.info(f"Transform types: {self.transform_types}")
logger.info(f"Batch sizes: {self.batch_sizes}")
logger.info(f"Prefetch batches: {self.prefetch_batches_list}")
logger.info(f"Number of image columns: {self.num_image_columns_list}")
logger.info(f"Number of batches: {self.num_batches}")
logger.info(f"Simulated training time: {self.simulated_training_time}")
logger.info(f"Device: {self.device}")
logger.info(f"Pin memory: {self.pin_memory}")
logger.info("=" * 80)
class BaseDataLoader(ABC):
"""Abstract base class for benchmark data loaders.
Provides shared functionality for loading and transforming image datasets.
Subclasses implement format-specific data loading logic.
"""
# Transform configurations: {name: (base_transforms, use_horizontal_flip)}
TRANSFORM_CONFIGS = {
"random_crop": (
lambda: transforms.RandomResizedCrop(
antialias=True, size=224, scale=(0.05, 1.0), ratio=(0.75, 1.33)
),
True,
),
"large_crop": (
lambda: transforms.RandomResizedCrop(
antialias=True, size=224, scale=(0.2, 1.0), ratio=(0.5, 2.0)
),
True,
),
"small_crop": (
lambda: transforms.RandomResizedCrop(
antialias=True, size=224, scale=(0.05, 0.5), ratio=(0.9, 1.1)
),
True,
),
"center_crop": (
lambda: transforms.Compose(
[transforms.Resize(256), transforms.CenterCrop(224)]
),
False,
),
"scale_up": (
lambda: transforms.Compose(
[transforms.Resize(320), transforms.RandomCrop(224)]
),
True,
),
"scale_down": (
lambda: transforms.Compose(
[
transforms.Resize(180),
transforms.RandomCrop(180),
transforms.Resize(224),
]
),
True,
),
}
def __init__(self, data_dir: str, label_to_id_map: Dict[str, int] = None):
"""Initialize the data loader.
Args:
data_dir: Path to data directory
label_to_id_map: Mapping from label strings to integer IDs
"""
self.data_dir = data_dir
self.label_to_id_map = label_to_id_map or IMAGENET_WNID_TO_ID
@classmethod
def get_transform(cls, transform_type: str) -> transforms.Compose:
"""Get an image transform pipeline for the specified transform type."""
if transform_type not in cls.TRANSFORM_CONFIGS:
raise ValueError(f"Unknown transform_type: {transform_type}")
base_fn, use_flip = cls.TRANSFORM_CONFIGS[transform_type]
transform_list = [base_fn()]
if use_flip:
transform_list.append(transforms.RandomHorizontalFlip())
return transforms.Compose(
[
transforms.Compose(transform_list),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
@staticmethod
def tensor_to_numpy(tensor) -> np.ndarray:
"""Convert a tensor to numpy array."""
if hasattr(tensor, "detach"):
return tensor.detach().cpu().numpy()
elif hasattr(tensor, "numpy"):
return tensor.numpy()
return np.array(tensor)
@staticmethod
def add_image_columns(result: Dict, processed_image: np.ndarray, num_columns: int):
"""Add multiple image columns to result dict."""
result["image"] = processed_image
for i in range(1, num_columns):
result[f"image_{i}"] = processed_image.copy()
@staticmethod
def make_split_dirs(s3_root: str) -> Dict[str, str]:
"""Generate split directories from an S3 root path."""
return {
"train": f"{s3_root}/train",
"val": f"{s3_root}/val",
"test": f"{s3_root}/test",
}
@staticmethod
def compute_limit(batch_size: int, num_batches: int) -> int:
"""Compute the row limit for a benchmark run."""
return batch_size * num_batches
@abstractmethod
def create_dataset(
self,
transform_type: str,
batch_size: int,
num_batches: int,
num_image_columns: int,
) -> ray.data.Dataset:
"""Create a Ray dataset with the specified configuration.
Args:
transform_type: Type of image transform to apply
batch_size: Batch size for processing
num_batches: Number of batches to prepare (for limiting data)
num_image_columns: Number of image columns per row
Returns:
Configured Ray dataset ready for iteration
"""
raise NotImplementedError
class S3ParquetDataLoader(BaseDataLoader):
"""Data loader that reads parquet files from S3 using read_parquet.
Caches the base dataset (before map) to avoid repeated file listings.
"""
# S3 configuration
S3_ROOT = "s3://ray-benchmark-data-internal-us-west-2/imagenet/parquet_split"
SPLIT_DIRS = BaseDataLoader.make_split_dirs(S3_ROOT)
def __init__(self, data_dir: str, label_to_id_map: Dict[str, int] = None):
"""Initialize the data loader with base dataset cache."""
super().__init__(data_dir, label_to_id_map)
self._base_dataset_cache: Optional[ray.data.Dataset] = None
@classmethod
def get_data_dir(cls, split: str = "train") -> str:
"""Get the data directory for the specified split."""
if split not in cls.SPLIT_DIRS:
raise ValueError(f"Unknown split: {split}")
return cls.SPLIT_DIRS[split]
def get_base_dataset(self) -> ray.data.Dataset:
"""Get the base dataset, creating and caching if needed."""
if self._base_dataset_cache is not None:
logger.info("Using cached base dataset")
return self._base_dataset_cache
logger.info(f"Reading parquet from {self.data_dir}...")
ds = ray.data.read_parquet(self.data_dir, columns=["image", "label"])
self._base_dataset_cache = ds
logger.info("Created and cached base dataset")
return ds
def create_dataset(
self,
transform_type: str,
batch_size: int,
num_batches: int,
num_image_columns: int,
) -> ray.data.Dataset:
"""Create dataset by applying map to the cached base dataset."""
limit = self.compute_limit(batch_size, num_batches)
transform = self.get_transform(transform_type)
# Capture instance variables for closure
label_to_id_map = self.label_to_id_map
def process_row(row: Dict) -> Dict:
image_pil = Image.open(io.BytesIO(row["image"])).convert("RGB")
processed = BaseDataLoader.tensor_to_numpy(transform(image_pil))
BaseDataLoader.add_image_columns(row, processed, num_image_columns)
row["label"] = label_to_id_map.get(row["label"], -1)
return row
return self.get_base_dataset().limit(limit).map(process_row)
class S3UrlImageDataLoader(BaseDataLoader):
"""Data loader that reads images from S3 URLs using map_batches.
Uses boto3 for S3 file listing.
Caches the file listing and base dataset to avoid repeated slow listings.
"""
def __init__(self, data_dir: str, label_to_id_map: Dict[str, int] = None):
"""Initialize the data loader with file listing cache."""
super().__init__(data_dir, label_to_id_map)
self._file_records_cache: Optional[List[Dict[str, str]]] = None
self._base_dataset_cache: Optional[ray.data.Dataset] = None
@classmethod
def get_data_dir(cls, split: str = "train") -> str:
"""Get the data directory for the specified split."""
if split not in S3_IMAGE_SPLIT_DIRS:
raise ValueError(f"Unknown split: {split}")
return S3_IMAGE_SPLIT_DIRS[split]
def _list_files(self) -> List[Dict[str, str]]:
"""List JPEG files from S3 with class labels extracted from path.
Uses boto3's S3 listing. Results are cached.
"""
if self._file_records_cache is not None:
logger.info(
f"Using cached file list ({len(self._file_records_cache)} files)"
)
return self._file_records_cache
logger.info(f"Listing JPEG files from {self.data_dir}...")
# Parse S3 URL: s3://bucket/prefix
s3_path = self.data_dir
if s3_path.startswith("s3://"):
s3_path = s3_path[5:]
parts = s3_path.split("/", 1)
bucket = parts[0]
prefix = parts[1] if len(parts) > 1 else ""
# List all files using boto3
s3_client = boto3.client("s3", region_name=S3_IMAGE_AWS_REGION)
paginator = s3_client.get_paginator("list_objects_v2")
# Extract class labels from path structure: .../class_name/image.jpg
file_records = []
for page in paginator.paginate(Bucket=bucket, Prefix=prefix):
for obj in page.get("Contents", []):
key = obj["Key"]
if not key.lower().endswith((".jpg", ".jpeg")):
continue
# Extract class from path: prefix/class/image.jpg
key_parts = key.rstrip("/").split("/")
if len(key_parts) >= 2:
class_name = key_parts[-2] # Parent directory is the class
file_path = f"s3://{bucket}/{key}"
file_records.append({"path": file_path, "class": class_name})
logger.info(f"Listed and cached {len(file_records)} JPEG files")
self._file_records_cache = file_records
return file_records
def get_base_dataset(self) -> ray.data.Dataset:
"""Get the base dataset (from_items with file records), creating and caching if needed."""
if self._base_dataset_cache is not None:
logger.info("Using cached base dataset")
return self._base_dataset_cache
file_records = self._list_files()
ds = ray.data.from_items(file_records)
self._base_dataset_cache = ds
logger.info("Created and cached base dataset")
return ds
def create_dataset(
self,
transform_type: str,
batch_size: int,
num_batches: int,
num_image_columns: int,
) -> ray.data.Dataset:
"""Create dataset by applying map_batches to the cached base dataset."""
limit = self.compute_limit(batch_size, num_batches)
transform = self.get_transform(transform_type)
label_to_id_map = self.label_to_id_map
def download_and_process_batch(
batch: Dict[str, np.ndarray]
) -> Dict[str, np.ndarray]:
s3_client = boto3.client("s3", region_name=S3_IMAGE_AWS_REGION)
processed_images = []
labels = []
# Download files using boto3
paths = list(batch["path"])
classes = list(batch["class"])
for s3_url, wnid in zip(paths, classes):
# Parse S3 URL: s3://bucket/key
url_path = s3_url[5:] if s3_url.startswith("s3://") else s3_url
bucket, key = url_path.split("/", 1)
response = s3_client.get_object(Bucket=bucket, Key=key)
data = response["Body"].read()
image_pil = Image.open(io.BytesIO(data)).convert("RGB")
processed_images.append(
BaseDataLoader.tensor_to_numpy(transform(image_pil))
)
labels.append(label_to_id_map.get(wnid, -1))
result = {"label": np.array(labels)}
BaseDataLoader.add_image_columns(
result, np.stack(processed_images), num_image_columns
)
return result
return (
self.get_base_dataset()
.limit(limit)
.map_batches(download_and_process_batch, batch_size=batch_size)
)
class S3ReadImagesDataLoader(BaseDataLoader):
"""Data loader that reads images from S3 using read_images.
Uses the same approach as multi_node_train_benchmark.py for reading images.
Caches the base dataset (before map) to avoid repeated file listings.
"""
def __init__(self, data_dir: str, label_to_id_map: Dict[str, int] = None):
"""Initialize the data loader with base dataset cache."""
super().__init__(data_dir, label_to_id_map)
self._base_dataset_cache: Optional[ray.data.Dataset] = None
@classmethod
def get_data_dir(cls, split: str = "train") -> str:
"""Get the data directory for the specified split."""
if split not in S3_IMAGE_SPLIT_DIRS:
raise ValueError(f"Unknown split: {split}")
return S3_IMAGE_SPLIT_DIRS[split]
@staticmethod
def _get_s3fs_with_boto_creds():
"""Get S3 filesystem with boto credentials.
Same as multi_node_train_benchmark.py to avoid ACCESS_DENIED errors.
"""
credentials = boto3.Session().get_credentials()
s3fs = fs.S3FileSystem(
access_key=credentials.access_key,
secret_key=credentials.secret_key,
session_token=credentials.token,
region=S3_IMAGE_AWS_REGION,
)
return s3fs
def get_base_dataset(self) -> ray.data.Dataset:
"""Get the base dataset, creating and caching if needed."""
if self._base_dataset_cache is not None:
logger.info("Using cached base dataset")
return self._base_dataset_cache
# Use partitioning to extract class from directory structure
partitioning = Partitioning(
"dir",
field_names=["class"],
base_dir=self.data_dir,
)
# Use S3 filesystem with boto credentials
filesystem = self._get_s3fs_with_boto_creds()
logger.info(f"Reading images from {self.data_dir} using read_images()...")
ds = ray.data.read_images(
self.data_dir,
filesystem=filesystem,
mode="RGB",
partitioning=partitioning,
)
self._base_dataset_cache = ds
logger.info("Created and cached base dataset")
return ds
def create_dataset(
self,
transform_type: str,
batch_size: int,
num_batches: int,
num_image_columns: int,
) -> ray.data.Dataset:
"""Create dataset by applying map to the cached base dataset."""
limit = self.compute_limit(batch_size, num_batches)
transform = self.get_transform(transform_type)
label_to_id_map = self.label_to_id_map
def process_row(row: Dict) -> Dict:
# Image is already loaded as numpy array by read_images
image_pil = Image.fromarray(row["image"])
processed = BaseDataLoader.tensor_to_numpy(transform(image_pil))
BaseDataLoader.add_image_columns(row, processed, num_image_columns)
row["label"] = label_to_id_map.get(row["class"], -1)
del row["class"]
return row
return self.get_base_dataset().limit(limit).map(process_row)
def create_data_loader(data_loader: str, split: str = "train") -> BaseDataLoader:
"""Factory function to create the appropriate data loader.
Args:
data_loader: One of "s3_parquet", "s3_url_image", or "s3_read_images"
split: Data split to use ("train", "val", or "test")
Returns:
Configured data loader instance
"""
if data_loader == "s3_parquet":
data_dir = S3ParquetDataLoader.get_data_dir(split)
return S3ParquetDataLoader(data_dir)
elif data_loader == "s3_url_image":
data_dir = S3UrlImageDataLoader.get_data_dir(split)
return S3UrlImageDataLoader(data_dir)
elif data_loader == "s3_read_images":
data_dir = S3ReadImagesDataLoader.get_data_dir(split)
return S3ReadImagesDataLoader(data_dir)
else:
raise ValueError(f"Unknown data loader: {data_loader}")
@ray.remote
def benchmark_iteration(
dataset: ray.data.Dataset,
batch_size: int,
prefetch_batches: int,
num_batches: int = 100,
simulated_training_time: float = None,
device: str = "auto",
pin_memory: bool = False,
) -> Dict[str, float]:
"""Benchmark iterating through batches.
Args:
dataset: Ray dataset to iterate through
batch_size: Batch size for iter_torch_batches
prefetch_batches: Number of batches to prefetch
num_batches: Number of batches to iterate through for timing
simulated_training_time: Time in seconds to sleep per batch to simulate training.
If None, no sleep is performed.
device: Device for tensor placement ("cpu" or "cuda")
pin_memory: Pin memory for faster GPU transfer
Returns:
Dictionary with timing metrics
"""
start_time = time.time()
iterator = dataset.iter_torch_batches(
batch_size=batch_size,
prefetch_batches=prefetch_batches,
drop_last=True,
device=device,
pin_memory=pin_memory,
)
# Iterate through batches
batch_count = 0
total_rows = 0
for batch in iterator:
batch_count += 1
if "image" in batch:
total_rows += len(batch["image"])
# Simulate training time if configured
if simulated_training_time is not None:
time.sleep(simulated_training_time)
if batch_count >= num_batches:
break
elapsed_time = time.time() - start_time
return {
"elapsed_time": elapsed_time,
"batches_processed": batch_count,
"rows_processed": total_rows,
"rows_per_second": total_rows / elapsed_time if elapsed_time > 0 else 0,
"batches_per_second": batch_count / elapsed_time if elapsed_time > 0 else 0,
}
def run_benchmark(config: BenchmarkConfig) -> List[Dict]:
"""Run benchmarks with all hyperparameter combinations.
Args:
config: Benchmark configuration
Returns:
List of benchmark results
"""
config.validate()
results = []
# Create data loader for the specified format
data_loader = create_data_loader(config.data_loader, config.split)
logger.info(
f"Using {data_loader.__class__.__name__} with "
f"{len(data_loader.label_to_id_map)} classes"
)
logger.info(f"Data directory: {data_loader.data_dir}")
# Generate all combinations
combinations = list(
itertools.product(
config.transform_types,
config.batch_sizes,
config.prefetch_batches_list,
config.num_image_columns_list,
)
)
logger.info(f"Running {len(combinations)} benchmark combinations...")
for transform_type, batch_size, prefetch_batches, num_image_columns in combinations:
logger.info(
f"Benchmarking: transform={transform_type}, "
f"batch_size={batch_size}, prefetch_batches={prefetch_batches}, "
f"num_image_columns={num_image_columns}"
)
# Create dataset using the data loader
ds = data_loader.create_dataset(
transform_type=transform_type,
batch_size=batch_size,
num_batches=config.num_batches,
num_image_columns=num_image_columns,
)
# Run benchmark (request GPU if device is cuda)
num_gpus = 1 if config.device == "cuda" else 0
metrics = ray.get(
benchmark_iteration.options(num_gpus=num_gpus).remote(
dataset=ds,
batch_size=batch_size,
prefetch_batches=prefetch_batches,
num_batches=config.num_batches,
simulated_training_time=config.simulated_training_time,
device=config.device,
pin_memory=config.pin_memory,
)
)
# Store results
result = {
"transform_type": transform_type,
"batch_size": batch_size,
"prefetch_batches": prefetch_batches,
"num_image_columns": num_image_columns,
**metrics,
}
results.append(result)
logger.info(
f" Results: {metrics['rows_per_second']:.2f} rows/sec, "
f"{metrics['batches_per_second']:.2f} batches/sec"
)
return results
def print_summary(results: List[Dict]):
"""Print summary of benchmark results using tabulate."""
if not results:
logger.warning("No results to display.")
return
# Sort results by batch_size, prefetch_batches, and num_image_columns
sorted_results = sorted(
results,
key=lambda x: (x["batch_size"], x["prefetch_batches"], x["num_image_columns"]),
)
# Prepare table data
headers = [
"Transform",
"Batch Size",
"Prefetch",
"Image Cols",
"Rows/sec",
"Batches/sec",
"Rows",
"Batches",
"Time (s)",
]
table_data = []
for result in sorted_results:
table_data.append(
[
result["transform_type"],
result["batch_size"],
result["prefetch_batches"],
result["num_image_columns"],
f"{result['rows_per_second']:.2f}",
f"{result['batches_per_second']:.2f}",
result["rows_processed"],
result["batches_processed"],
f"{result['elapsed_time']:.2f}",
]
)
# Print table to stdout
logger.info("\n" + tabulate(table_data, headers=headers, tablefmt="grid"))
def main():
"""Main entry point for the benchmark."""
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
# Create default config to get supported formats
default_config = BenchmarkConfig()
parser = argparse.ArgumentParser(
description="Benchmark Ray Data image loading with parquet or JPEG formats."
)
parser.add_argument(
"--num-batches",
type=int,
default=default_config.num_batches,
help=f"Number of batches to process. Default: {default_config.num_batches}",
)
parser.add_argument(
"--simulated-training-time",
type=float,
default=default_config.simulated_training_time,
help="Time in seconds to sleep per batch to simulate training.",
)
parser.add_argument(
"--data-loader",
type=str,
choices=default_config.supported_data_loaders,
default=default_config.data_loader,
help=f"Data loader. Default: {default_config.data_loader}",
)
parser.add_argument(
"--split",
type=str,
choices=["train", "val", "test"],
default=default_config.split,
help=f"Data split to use. Default: {default_config.split}",
)
parser.add_argument(
"--device",
type=str,
default=default_config.device,
help=f"Device for tensor placement. Default: {default_config.device}",
)
parser.add_argument(
"--pin-memory",
action="store_true",
default=default_config.pin_memory,
help="Pin memory for faster GPU transfer.",
)
parser.add_argument(
"--batch-sizes",
type=int,
nargs="+",
default=default_config.batch_sizes,
help=f"Batch sizes to test. Default: {default_config.batch_sizes}",
)
parser.add_argument(
"--prefetch-batches",
type=int,
nargs="+",
default=default_config.prefetch_batches_list,
help=f"Prefetch batch counts to test. Default: {default_config.prefetch_batches_list}",
)
args = parser.parse_args()
# Build configuration from CLI args
config = BenchmarkConfig(
data_loader=args.data_loader,
num_batches=args.num_batches,
simulated_training_time=args.simulated_training_time,
split=args.split,
device=args.device,
pin_memory=args.pin_memory,
batch_sizes=args.batch_sizes,
prefetch_batches_list=args.prefetch_batches,
)
# Log benchmark configuration
config.log_config()
# Run benchmarks
results = run_benchmark(config)
# Print summary table
print_summary(results)
if results:
return {
"results": results,
"data_loader": config.data_loader,
"transform_types": config.transform_types,
"batch_sizes": config.batch_sizes,
"prefetch_batches_list": config.prefetch_batches_list,
"num_image_columns_list": config.num_image_columns_list,
"num_batches": config.num_batches,
"device": config.device,
"pin_memory": config.pin_memory,
}
if __name__ == "__main__":
benchmark = Benchmark()
benchmark.run_fn("training-ingest-benchmark", main)
benchmark.write_result()
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/dataset/training_ingest_benchmark.py",
"license": "Apache License 2.0",
"lines": 717,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/_internal/cluster_autoscaler/util.py | import logging
from typing import Dict, List
from ray.data._internal.execution.interfaces import ExecutionResources
logger = logging.getLogger(__name__)
def cap_resource_request_to_limits(
active_bundles: List[Dict],
pending_bundles: List[Dict],
resource_limits: ExecutionResources,
) -> List[Dict]:
"""Cap the resource request to not exceed user-configured resource limits.
Active bundles (for running tasks or existing nodes) are always included first
since they represent resources already in use. Pending bundles (for future work
or scale-up requests) are then added best-effort, sorted smallest-first to
maximize packing within limits.
This ensures that resources for already-running tasks are never crowded out
by pending work from smaller operators.
Args:
active_bundles: Bundles for already-running tasks or existing nodes
(must include - these represent current resource usage).
pending_bundles: Bundles for pending work or scale-up requests
(best-effort - only added if within limits).
resource_limits: The user-configured resource limits.
Returns:
A list of resource bundles that respects user limits, with active bundles
always included first.
"""
# If no explicit limits are set (all infinite), return everything
if resource_limits == ExecutionResources.inf():
return active_bundles + pending_bundles
# Always include active bundles first - they're already running/allocated
capped_request = list(active_bundles)
total = ExecutionResources.zero()
for bundle in active_bundles:
total = total.add(ExecutionResources.from_resource_dict(bundle))
# Sort pending bundles by size (smallest first) to maximize packing.
# This ensures smaller bundles aren't excluded due to larger bundles
# appearing earlier in arbitrary iteration order.
def bundle_sort_key(bundle: Dict) -> tuple:
return (
bundle.get("CPU", 0),
bundle.get("GPU", 0),
bundle.get("memory", 0),
)
sorted_pending = sorted(pending_bundles, key=bundle_sort_key)
for bundle in sorted_pending:
new_total = total.add(ExecutionResources.from_resource_dict(bundle))
# Skip bundles that don't fit, continue checking smaller ones
if not new_total.satisfies_limit(resource_limits):
continue
capped_request.append(bundle)
total = new_total
total_input = len(active_bundles) + len(pending_bundles)
if len(capped_request) < total_input:
logger.debug(
f"Capped autoscaling resource request from {total_input} "
f"bundles to {len(capped_request)} bundles to respect "
f"user-configured resource limits: {resource_limits}. "
f"({len(active_bundles)} active bundles kept, "
f"{len(capped_request) - len(active_bundles)}/{len(pending_bundles)} "
f"pending bundles included)."
)
return capped_request
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/cluster_autoscaler/util.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/data/examples/llm_batch_inference_text/ci/nb2py.py | #!/usr/bin/env python3
import argparse
import nbformat
def convert_notebook(
input_path: str, output_path: str, ignore_cmds: bool = False
) -> None:
"""
Read a Jupyter notebook and write a Python script, converting all %%bash
cells and IPython "!" commands into subprocess.run calls that raise on error.
Cells that load or autoreload extensions are ignored.
"""
nb = nbformat.read(input_path, as_version=4)
with open(output_path, "w") as out:
for cell in nb.cells:
# Only process code cells
if cell.cell_type != "code":
continue
lines = cell.source.splitlines()
# Detect a %%bash cell
if lines:
# Detect any IPython '!' shell commands in code lines
has_bang = any(line.lstrip().startswith("!") for line in lines)
# Detect %pip magic commands
has_pip_magic = any(line.lstrip().startswith("%pip") for line in lines)
# Start with "serve run" "serve shutdown" "curl" or "anyscale service" commands
to_ignore_cmd = (
"serve run",
"serve shutdown",
"curl",
"anyscale service",
)
has_ignored_start = any(
line.lstrip().startswith(to_ignore_cmd) for line in lines
)
# Skip %pip cells entirely
if has_pip_magic:
continue
if has_bang or has_ignored_start:
if ignore_cmds:
continue
out.write("import subprocess\n")
for line in lines:
stripped = line.lstrip()
if stripped.startswith("!"):
cmd = stripped[1:].lstrip()
out.write(
f"subprocess.run(r'''{cmd}''',\n"
" shell=True,\n"
" check=True,\n"
" executable='/bin/bash')\n"
)
else:
out.write(line.rstrip() + "\n")
out.write("\n")
else:
# Regular Python cell:
code = cell.source.rstrip()
if "ds_large = ds.limit(1_000_000)" in code:
# Instead of testing a large dataset in CI, test a small dataset
code = code.replace("ds.limit(1_000_000)", "ds.limit(10_000)")
# else, dump as-is
out.write(code + "\n\n")
def main() -> None:
parser = argparse.ArgumentParser(
description="Convert a Jupyter notebook to a Python script, preserving bash cells and '!' commands as subprocess calls unless ignored with --ignore-cmds."
)
parser.add_argument("input_nb", help="Path to the input .ipynb file")
parser.add_argument("output_py", help="Path for the output .py script")
parser.add_argument(
"--ignore-cmds", action="store_true", help="Ignore bash cells and '!' commands"
)
args = parser.parse_args()
convert_notebook(args.input_nb, args.output_py, ignore_cmds=args.ignore_cmds)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/data/examples/llm_batch_inference_text/ci/nb2py.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/data/examples/llm_batch_inference_text/content/batch_inference_text.py | from typing import Any
from pprint import pprint
import ray
from ray.data.llm import build_llm_processor, vLLMEngineProcessorConfig
DATASET_LIMIT = 10_000
# Define the path to the sample CSV file hosted on S3.
# This dataset contains 2 million rows of synthetic customer data.
path = "https://llm-guide.s3.us-west-2.amazonaws.com/data/ray-data-llm/customers-2000000.csv"
# Load the CSV file into a Ray Dataset.
print("Loading dataset from remote URL...")
ds = ray.data.read_csv(path)
# Limit the dataset. If DATASET_LIMIT > dataset size, the entire dataset will be processed.
print(f"Limiting dataset to {DATASET_LIMIT} rows for initial processing.")
ds_small = ds.limit(DATASET_LIMIT)
# Repartition the dataset to enable parallelism across multiple workers (GPUs).
# By default, streaming datasets might not be optimally partitioned. Repartitioning
# splits the data into a specified number of blocks, allowing Ray to process them
# in parallel.
# Tip: Repartition count should typically be 2-4x your worker (GPU) count.
# Example: 4 GPUs → 8-16 partitions, 10 GPUs → 20-40 partitions.
# This ensures enough parallelism while avoiding excessive overhead.
num_partitions = 128
print(f"Repartitioning dataset into {num_partitions} blocks for parallelism...")
ds_small = ds_small.repartition(num_blocks=num_partitions)
processor_config = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.1-8B-Instruct",
engine_kwargs=dict(
max_model_len=256, # Hard cap: system prompt + user prompt + output tokens must fit within this limit
),
batch_size=256,
accelerator_type="L4",
concurrency=4,
)
# For better output token control, restrain generation to these choices
CHOICES = [
"Law Firm",
"Healthcare",
"Technology",
"Retail",
"Consulting",
"Manufacturing",
"Finance",
"Real Estate",
"Other",
]
# Preprocess function prepares `messages` and `sampling_params` for vLLM engine.
# All other fields are ignored by the engine.
def preprocess(row: dict[str, Any]) -> dict[str, Any]:
return dict(
messages=[
{
"role": "system",
"content": "You are a helpful assistant that infers company industries. "
"Based on the company name provided, output only the industry category. "
f"Choose from: {', '.join(CHOICES)}."
},
{
"role": "user",
"content": f"What industry is this company in: {row['Company']}"
},
],
sampling_params=dict(
temperature=0, # Use 0 for deterministic output
max_tokens=16, # Max output tokens. Industry names are short
structured_outputs=dict(choice=CHOICES), # Constraint generation
),
)
# Postprocess function extracts the generated text from the engine output.
# The **row syntax returns all original columns in the input dataset.
def postprocess(row: dict[str, Any]) -> dict[str, Any]:
return {
"inferred_industry": row["generated_text"],
**row, # Include all original columns.
}
# Build the LLM processor with the configuration and functions.
processor = build_llm_processor(
processor_config,
preprocess=preprocess,
postprocess=postprocess,
)
# Run the processor on the small dataset.
processed_small = processor(ds_small)
# Materialize the dataset to memory.
processed_small = processed_small.materialize()
print(f"\nProcessed {processed_small.count()} rows successfully.")
# Display the first 3 entries to verify the output.
sampled = processed_small.take(3)
print("\n==================GENERATED OUTPUT===============\n")
pprint(sampled)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/data/examples/llm_batch_inference_text/content/batch_inference_text.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/data/examples/llm_batch_inference_text/content/batch_inference_text_scaled.py | from typing import Any
from pprint import pprint
import ray
from ray.data.llm import build_llm_processor, vLLMEngineProcessorConfig
DATASET_LIMIT = 1_000_000
# Define the path to the sample CSV file hosted on S3.
# This dataset contains 2 million rows of synthetic customer data.
path = "https://llm-guide.s3.us-west-2.amazonaws.com/data/ray-data-llm/customers-2000000.csv"
# Load the CSV file into a Ray Dataset.
print("Loading dataset from remote URL...")
ds = ray.data.read_csv(path)
# Limit the dataset. If DATASET_LIMIT > dataset size, the entire dataset will be processed.
print(f"Limiting dataset to {DATASET_LIMIT} rows for initial processing.")
ds_large = ds.limit(DATASET_LIMIT)
# As we increase our compute, we can increase the number of partitions for more parallelism
num_partitions_large = 256
print(f"Repartitioning dataset into {num_partitions_large} blocks for parallelism...")
ds_large = ds_large.repartition(num_blocks=num_partitions_large)
processor_config_large = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.1-8B-Instruct",
engine_kwargs=dict(
max_model_len=256, # Hard cap: system prompt + user prompt + output tokens must fit within this limit
),
batch_size=256,
accelerator_type="L4", # Or upgrade to larger GPU
concurrency=10, # Deploy 10 workers across 10 GPUs to maximize throughput
)
# For better output token control, restrain generation to these choices
CHOICES = [
"Law Firm",
"Healthcare",
"Technology",
"Retail",
"Consulting",
"Manufacturing",
"Finance",
"Real Estate",
"Other",
]
# Preprocess function prepares `messages` and `sampling_params` for vLLM engine.
# All other fields are ignored by the engine.
def preprocess(row: dict[str, Any]) -> dict[str, Any]:
return dict(
messages=[
{
"role": "system",
"content": "You are a helpful assistant that infers company industries. "
"Based on the company name provided, output only the industry category. "
f"Choose from: {', '.join(CHOICES)}."
},
{
"role": "user",
"content": f"What industry is this company in: {row['Company']}"
},
],
sampling_params=dict(
temperature=0, # Use 0 for deterministic output
max_tokens=16, # Max output tokens. Industry names are short
structured_outputs=dict(choice=CHOICES), # Constraint generation
),
)
# Postprocess function extracts the generated text from the engine output.
# The **row syntax returns all original columns in the input dataset.
def postprocess(row: dict[str, Any]) -> dict[str, Any]:
return {
"inferred_industry": row["generated_text"],
**row, # Include all original columns.
}
# Build the LLM processor with the configuration and functions.
processor_large = build_llm_processor(
processor_config_large,
preprocess=preprocess,
postprocess=postprocess,
)
# Run the processor on the small dataset.
processed_large = processor_large(ds_large)
# Materialize the dataset to memory.
processed_large = processed_large.materialize()
print(f"\nProcessed {processed_large.count()} rows successfully.")
# Display the first 3 entries to verify the output.
sampled = processed_large.take(3)
print("\n==================GENERATED OUTPUT===============\n")
pprint(sampled)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/data/examples/llm_batch_inference_text/content/batch_inference_text_scaled.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/data/examples/llm_batch_inference_vision/ci/nb2py.py | #!/usr/bin/env python3
import argparse
import nbformat
def convert_notebook(
input_path: str, output_path: str, ignore_cmds: bool = False
) -> None:
"""
Read a Jupyter notebook and write a Python script, converting all %%bash
cells and IPython "!" commands into subprocess.run calls that raise on error.
Cells that load or autoreload extensions are ignored.
"""
nb = nbformat.read(input_path, as_version=4)
with open(output_path, "w") as out:
for cell in nb.cells:
# Only process code cells
if cell.cell_type != "code":
continue
lines = cell.source.splitlines()
if lines:
# Detect any IPython '!' shell commands in code lines
has_bang = any(line.lstrip().startswith("!") for line in lines)
# Detect %pip magic commands
has_pip_magic = any(line.lstrip().startswith("%pip") for line in lines)
# Start with "serve run" "serve shutdown" "curl" or "anyscale service" commands
to_ignore_cmd = (
"serve run",
"serve shutdown",
"curl",
"anyscale service",
)
has_ignored_start = any(
line.lstrip().startswith(to_ignore_cmd) for line in lines
)
# Skip %pip cells entirely
if has_pip_magic:
continue
if has_bang or has_ignored_start:
if ignore_cmds:
continue
out.write("import subprocess\n")
for line in lines:
stripped = line.lstrip()
if stripped.startswith("!"):
cmd = stripped[1:].lstrip()
out.write(
f"subprocess.run(r'''{cmd}''',\n"
" shell=True,\n"
" check=True,\n"
" executable='/bin/bash')\n"
)
else:
out.write(line.rstrip() + "\n")
out.write("\n")
else:
# Regular Python cell:
code = cell.source.rstrip()
if "ds_large = ds.limit(1_000_000)" in code:
# Instead of testing a large dataset in CI, test a small dataset
code = code.replace("ds.limit(1_000_000)", "ds.limit(10_000)")
# else, dump as-is
out.write(code + "\n\n")
def main() -> None:
parser = argparse.ArgumentParser(
description="Convert a Jupyter notebook to a Python script, preserving bash cells and '!' commands as subprocess calls unless ignored with --ignore-cmds."
)
parser.add_argument("input_nb", help="Path to the input .ipynb file")
parser.add_argument("output_py", help="Path for the output .py script")
parser.add_argument(
"--ignore-cmds", action="store_true", help="Ignore bash cells and '!' commands"
)
args = parser.parse_args()
convert_notebook(args.input_nb, args.output_py, ignore_cmds=args.ignore_cmds)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/data/examples/llm_batch_inference_vision/ci/nb2py.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/data/examples/llm_batch_inference_vision/content/batch_inference_vision.py | from io import BytesIO
from typing import Any
import datasets
from PIL import Image
from pprint import pprint
import ray
from ray.data.llm import build_llm_processor, vLLMEngineProcessorConfig
DATASET_LIMIT = 10_000
# Load the BLIP3o/BLIP3o-Pretrain-Short-Caption dataset from Hugging Face with ~5M images.
print("Loading BLIP3o/BLIP3o-Pretrain-Short-Caption dataset from Hugging Face...")
hf_dataset = datasets.load_dataset("BLIP3o/BLIP3o-Pretrain-Short-Caption", split="train", streaming=True)
hf_dataset = hf_dataset.select_columns(["jpg"])
ds = ray.data.from_huggingface(hf_dataset)
print("Dataset loaded successfully.")
# Limit the dataset. If DATASET_LIMIT > dataset size, the entire dataset will be processed.
print(f"Limiting dataset to {DATASET_LIMIT} images for initial processing.")
ds_small = ds.limit(DATASET_LIMIT)
# Repartition the dataset to enable parallelism across multiple workers (GPUs).
# By default, streaming datasets might not be optimally partitioned. Repartitioning
# splits the data into a specified number of blocks, allowing Ray to process them
# in parallel.
# Tip: Repartition count should typically be 2-4x your worker (GPU) count.
# Example: 4 GPUs → 8-16 partitions, 10 GPUs → 20-40 partitions.
# This ensures enough parallelism while avoiding excessive overhead.
num_partitions = 64
print(f"Repartitioning dataset into {num_partitions} blocks for parallelism...")
ds_small = ds_small.repartition(num_blocks=num_partitions)
processor_config = vLLMEngineProcessorConfig(
model_source="Qwen/Qwen2.5-VL-3B-Instruct",
engine_kwargs=dict(
max_model_len=8192
),
batch_size=16,
accelerator_type="L4",
concurrency=4,
has_image=True, # Enable image input.
)
# Filter function to validate images before processing.
# Returns True for valid images, False for corrupt/malformed ones.
def is_valid_image(row: dict[str, Any]) -> bool:
try:
Image.open(BytesIO(row['jpg']['bytes']))
return True
except Exception:
return False
# Preprocess function prepares messages with image content for the VLM.
def preprocess(row: dict[str, Any]) -> dict[str, Any]:
# Convert bytes image to PIL
image = row['jpg']['bytes']
image = Image.open(BytesIO(image))
# Resize to 225x225 for consistency and predictable vision-token budget.
# This resolution balances quality with memory usage. Adjust based on your
# model's expected input size and available GPU memory.
image = image.resize((225, 225), Image.Resampling.BICUBIC)
return dict(
messages=[
{
"role": "system",
"content": "You are a helpful assistant that generates accurate and descriptive captions for images."
},
{
"role": "user",
"content": [
{
"type": "text",
"text": "Describe this image in detail. Focus on the main subjects, actions, and setting."
},
{
"type": "image",
"image": image # Ray Data accepts PIL Image or image URL.
}
]
},
],
sampling_params=dict(
temperature=0.3,
max_tokens=256
),
)
# Postprocess function extracts the generated caption.
def postprocess(row: dict[str, Any]) -> dict[str, Any]:
return {
"generated_caption": row["generated_text"],
# Note: Don't include **row here to avoid returning the large image data.
# Include only the fields you need in the output.
}
# Build the LLM processor with the configuration and functions.
processor = build_llm_processor(
processor_config,
preprocess=preprocess,
postprocess=postprocess,
)
# Filter out invalid images before processing.
ds_small_filtered = ds_small.filter(is_valid_image)
# Run the processor on the filtered dataset.
processed_small = processor(ds_small_filtered)
# Materialize the dataset to memory.
processed_small = processed_small.materialize()
print(f"\nProcessed {processed_small.count()} rows successfully.")
# Display the first 3 entries to verify the output.
sampled = processed_small.take(3)
print("\n==================GENERATED OUTPUT===============\n")
pprint(sampled)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/data/examples/llm_batch_inference_vision/content/batch_inference_vision.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/data/examples/llm_batch_inference_vision/content/batch_inference_vision_scaled.py | from io import BytesIO
from typing import Any
import datasets
from PIL import Image
from pprint import pprint
import ray
from ray.data.llm import build_llm_processor, vLLMEngineProcessorConfig
# Dataset limit for this example.
DATASET_LIMIT = 1_000_000
# Load the BLIP3o/BLIP3o-Pretrain-Short-Caption dataset from Hugging Face with ~5M images.
print("Loading BLIP3o/BLIP3o-Pretrain-Short-Caption dataset from Hugging Face...")
hf_dataset = datasets.load_dataset("BLIP3o/BLIP3o-Pretrain-Short-Caption", split="train", streaming=True)
hf_dataset = hf_dataset.select_columns(["jpg"])
ds = ray.data.from_huggingface(hf_dataset)
print("Dataset loaded successfully.")
# Limit the dataset. If DATASET_LIMIT > dataset size, the entire dataset will be processed.
print(f"Limiting dataset to {DATASET_LIMIT} images for initial processing.")
ds_large = ds.limit(DATASET_LIMIT)
# As we increase our compute, we can increase the number of partitions for more parallelism
num_partitions_large = 128
print(f"Repartitioning dataset into {num_partitions_large} blocks for parallelism...")
ds_large = ds_large.repartition(num_blocks=num_partitions_large)
processor_config_large = vLLMEngineProcessorConfig(
model_source="Qwen/Qwen2.5-VL-3B-Instruct",
engine_kwargs=dict(
max_model_len=8192,
),
batch_size=16,
accelerator_type="L4", # Or upgrade to larger GPU
concurrency=10, # Increase the number of parallel workers
has_image=True, # Enable image input
)
# Filter function to validate images before processing.
# Returns True for valid images, False for corrupt/malformed ones.
def is_valid_image(row: dict[str, Any]) -> bool:
try:
Image.open(BytesIO(row['jpg']['bytes']))
return True
except Exception:
return False
# Preprocess function prepares messages with image content for the VLM.
def preprocess(row: dict[str, Any]) -> dict[str, Any]:
# Convert bytes image to PIL
image = row['jpg']['bytes']
image = Image.open(BytesIO(image))
# Resize to 225x225 for consistency and predictable vision-token budget.
# This resolution balances quality with memory usage. Adjust based on your
# model's expected input size and available GPU memory.
image = image.resize((225, 225), Image.Resampling.BICUBIC)
return dict(
messages=[
{
"role": "system",
"content": "You are a helpful assistant that generates accurate and descriptive captions for images."
},
{
"role": "user",
"content": [
{
"type": "text",
"text": "Describe this image in detail. Focus on the main subjects, actions, and setting."
},
{
"type": "image",
"image": image # Ray Data accepts PIL Image or image URL.
}
]
},
],
sampling_params=dict(
temperature=0.3,
max_tokens=256
),
)
# Postprocess function extracts the generated caption.
def postprocess(row: dict[str, Any]) -> dict[str, Any]:
return {
"generated_caption": row["generated_text"],
# Note: Don't include **row here to avoid returning the large image data.
# Include only the fields you need in the output.
}
# Build the LLM processor with the configuration and functions.
processor_large = build_llm_processor(
processor_config_large,
preprocess=preprocess,
postprocess=postprocess,
)
# Filter out invalid images before processing.
ds_large_filtered = ds_large.filter(is_valid_image)
# Run the processor on the filtered dataset.
processed_large = processor_large(ds_large_filtered)
# Materialize the dataset to memory.
processed_large = processed_large.materialize()
print(f"\nProcessed {processed_large.count()} rows successfully.")
# Display the first 3 entries to verify the output.
sampled = processed_large.take(3)
print("\n==================GENERATED OUTPUT===============\n")
pprint(sampled)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/data/examples/llm_batch_inference_vision/content/batch_inference_vision_scaled.py",
"license": "Apache License 2.0",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/_internal/dataset_repr.py | from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import numpy as np
import ray
from ray.data.block import Block, BlockAccessor, BlockMetadata
from ray.exceptions import RayError
from ray.types import ObjectRef
if TYPE_CHECKING:
from ray.data.dataset import Dataset, Schema
_DATASET_REPR_ELLIPSIS = "…" # Ellipsis marker for truncated cells/rows.
_DATASET_REPR_MAX_ROWS = 10 # Total preview row budget when materialized.
_DATASET_REPR_HEAD_ROWS = 5 # Number of head rows to show before the gap.
_DATASET_REPR_MAX_COLUMN_WIDTH = 40 # Max width per column cell in the table.
_DATASET_REPR_GET_TIMEOUT_S = 30.0 # Timeout for fetching preview blocks.
__all__ = [
"_build_dataset_ascii_repr",
]
def _build_dataset_ascii_repr(
dataset: "Dataset",
schema: "Schema",
is_materialized: bool,
) -> str:
"""Render the dataset as a multi-line tabular string."""
columns = list(schema.names)
if not columns:
return dataset._plan.get_plan_as_string(dataset.__class__)
num_rows = dataset._meta_count()
head_rows: List[List[str]] = []
tail_rows: List[List[str]] = []
if is_materialized:
try:
head_data, tail_data, _ = _collect_materialized_rows_for_repr(
dataset, num_rows
)
head_rows = _format_rows_for_repr(head_data, columns)
tail_rows = _format_rows_for_repr(tail_data, columns)
except RayError:
head_rows = []
tail_rows = []
return _build_dataset_ascii_repr_from_rows(
schema=schema,
num_rows=num_rows,
dataset_name=dataset.name,
is_materialized=is_materialized,
head_rows=head_rows,
tail_rows=tail_rows,
)
def _build_dataset_ascii_repr_from_rows(
*,
schema: "Schema",
num_rows: Optional[int],
dataset_name: Optional[str],
is_materialized: bool,
head_rows: List[List[str]],
tail_rows: List[List[str]],
) -> str:
"""Render the dataset repr given schema metadata and preview rows."""
columns = list(schema.names)
num_cols = len(columns)
shape_line = f"shape: ({num_rows if num_rows is not None else '?'}, {num_cols})"
# Build header rows from schema.
dtype_strings = [_repr_format_dtype(t) for t in schema.types]
column_headers = [
_truncate_to_cell_width(str(col), _DATASET_REPR_MAX_COLUMN_WIDTH)
for col in columns
]
dtype_headers = [
_truncate_to_cell_width(dtype, _DATASET_REPR_MAX_COLUMN_WIDTH)
for dtype in dtype_strings
]
separator_row = ["---"] * len(columns)
# Assemble rows, including an ellipsis gap if needed.
show_gap = bool(head_rows) and bool(tail_rows)
display_rows: List[List[str]] = []
display_rows.extend(head_rows)
if show_gap:
display_rows.append([_DATASET_REPR_ELLIPSIS] * len(columns))
display_rows.extend(tail_rows)
# Render the table with computed column widths.
column_widths = _compute_column_widths(
column_headers, dtype_headers, separator_row, display_rows
)
table_lines = _render_table_lines(
column_headers,
dtype_headers,
separator_row,
display_rows,
column_widths,
)
# Append a summary line describing row coverage.
num_rows_shown = len(head_rows) + len(tail_rows)
summary_line = (
f"(Showing {num_rows_shown} of {num_rows} rows)"
if is_materialized
else "(Dataset isn't materialized)"
)
if is_materialized and num_rows is None:
summary_line = f"(Showing {num_rows_shown} of ? rows)"
components = []
if dataset_name is not None:
components.append(f"name: {dataset_name}")
components.extend([shape_line, "\n".join(table_lines), summary_line])
return "\n".join(components)
def _repr_format_dtype(dtype: object) -> str:
"""Format a dtype into a compact string for the schema row.
Dtypes may come from PyArrow, pandas/NumPy, or be plain Python types.
"""
if isinstance(dtype, type):
return dtype.__name__
name = getattr(dtype, "name", None)
if isinstance(name, str):
return name
return str(dtype)
def _collect_materialized_rows_for_repr(
dataset: "Dataset",
num_rows: Optional[int],
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]], bool]:
"""Collect head/tail rows for preview and whether to show a gap row."""
block_entries: List[Tuple[ObjectRef, BlockMetadata]] = []
for ref_bundle in dataset.iter_internal_ref_bundles():
block_entries.extend(zip(ref_bundle.block_refs, ref_bundle.metadata))
if not block_entries:
return [], [], False
# Compute how many head/tail rows to show within the preview budget.
head_row_limit, tail_row_limit = _determine_preview_row_targets(num_rows)
block_cache: Dict[ObjectRef, Block] = {}
def _resolve_block(block_ref: ObjectRef) -> Block:
if block_ref not in block_cache:
block_cache[block_ref] = ray.get(
block_ref, timeout=_DATASET_REPR_GET_TIMEOUT_S
)
return block_cache[block_ref]
head_rows: List[Dict[str, Any]] = []
head_remaining = head_row_limit
for block_ref, _ in block_entries:
if head_remaining <= 0:
break
block = _resolve_block(block_ref)
accessor = BlockAccessor.for_block(block)
for row in accessor.iter_rows(public_row_format=True):
head_rows.append(row)
head_remaining -= 1
if head_remaining <= 0:
break
tail_rows: List[Dict[str, Any]] = []
tail_remaining = tail_row_limit
tail_parts: List[List[Dict[str, Any]]] = []
if tail_remaining > 0:
for block_ref, metadata in reversed(block_entries):
if tail_remaining <= 0:
break
block = _resolve_block(block_ref)
accessor = BlockAccessor.for_block(block)
total_rows = metadata.num_rows
if total_rows is None:
total_rows = accessor.num_rows()
if total_rows == 0:
continue
start = max(0, total_rows - tail_remaining)
sliced_block = accessor.slice(start, total_rows, copy=False)
slice_accessor = BlockAccessor.for_block(sliced_block)
block_rows = list(slice_accessor.iter_rows(public_row_format=True))
tail_parts.append(block_rows)
tail_remaining -= len(block_rows)
if tail_remaining <= 0:
break
for part in reversed(tail_parts):
tail_rows.extend(part)
show_gap = bool(head_rows) and bool(tail_rows)
return head_rows, tail_rows, show_gap
def _determine_preview_row_targets(num_rows: Optional[int]) -> Tuple[int, int]:
"""Compute how many head and tail rows to preview."""
max_rows = _DATASET_REPR_MAX_ROWS
if num_rows is None or num_rows <= max_rows:
head = num_rows if num_rows is not None else max_rows
return head, 0
head = min(_DATASET_REPR_HEAD_ROWS, max_rows)
tail = max_rows - head
return head, tail
def _format_rows_for_repr(
rows: List[Dict[str, Any]],
column_names: List[str],
) -> List[List[str]]:
"""Format row dicts into string cell rows for table rendering."""
formatted_rows: List[List[str]] = []
for row in rows:
formatted_row = []
for column in column_names:
value = row.get(column)
formatted_value = _format_value(value)
formatted_row.append(
_truncate_to_cell_width(formatted_value, _DATASET_REPR_MAX_COLUMN_WIDTH)
)
formatted_rows.append(formatted_row)
return formatted_rows
def _format_value(value: Any) -> str:
if isinstance(value, np.generic):
value = value.item()
return str(value).replace("\n", " ").replace("\r", " ")
def _truncate_to_cell_width(value: str, max_width: int) -> str:
"""Truncate a single cell to the configured max width."""
if max_width is None:
return value
if max_width <= 0:
return _DATASET_REPR_ELLIPSIS if value else ""
if len(value) <= max_width:
return value
if max_width == 1:
return _DATASET_REPR_ELLIPSIS
return value[: max_width - 1] + _DATASET_REPR_ELLIPSIS
def _compute_column_widths(
headers: List[str],
dtype_headers: List[str],
separator_row: List[str],
data_rows: List[List[str]],
) -> List[int]:
"""Compute per-column widths for table rendering."""
column_widths: List[int] = []
for idx in range(len(headers)):
widths = [
len(headers[idx]),
len(dtype_headers[idx]),
len(separator_row[idx]),
]
for row in data_rows:
widths.append(len(row[idx]))
column_widths.append(max(widths))
return column_widths
def _render_table_lines(
headers: List[str],
dtype_headers: List[str],
separator_row: List[str],
data_rows: List[List[str]],
column_widths: List[int],
) -> List[str]:
"""Render the full table (borders, headers, data) as lines."""
lines: List[str] = []
top = _render_border("╭", "┬", "╮", "─", column_widths)
header_row = _render_row(headers, column_widths)
separator_line = _render_row(separator_row, column_widths)
dtype_row = _render_row(dtype_headers, column_widths)
lines.extend([top, header_row, separator_line, dtype_row])
if data_rows:
middle = _render_border("╞", "╪", "╡", "═", column_widths)
lines.append(middle)
for row in data_rows:
lines.append(_render_row(row, column_widths))
bottom = _render_border("╰", "┴", "╯", "─", column_widths)
lines.append(bottom)
return lines
def _render_border(
left: str, middle: str, right: str, fill: str, column_widths: List[int]
) -> str:
"""Render a table border line given column widths."""
segments = [fill * (width + 2) for width in column_widths]
return f"{left}{middle.join(segments)}{right}"
def _render_row(values: List[str], column_widths: List[int]) -> str:
"""Render a single table row with padding."""
cells = []
for idx, value in enumerate(values):
padded = value.ljust(column_widths[idx])
cells.append(f" {padded} ")
return f"│{'┆'.join(cells)}│"
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/dataset_repr.py",
"license": "Apache License 2.0",
"lines": 262,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/tests/unit/test_dataset_repr.py | import pyarrow as pa
from ray.data._internal.dataset_repr import _build_dataset_ascii_repr_from_rows
from ray.data.dataset import Schema
def test_dataset_repr_from_rows_not_materialized():
schema = Schema(pa.schema([("a", pa.int64()), ("b", pa.string())]))
text = _build_dataset_ascii_repr_from_rows(
schema=schema,
num_rows=5,
dataset_name="test_ds",
is_materialized=False,
head_rows=[],
tail_rows=[],
)
assert text == (
"name: test_ds\n"
"shape: (5, 2)\n"
"╭───────┬────────╮\n"
"│ a ┆ b │\n"
"│ --- ┆ --- │\n"
"│ int64 ┆ string │\n"
"╰───────┴────────╯\n"
"(Dataset isn't materialized)"
)
def test_dataset_repr_from_rows_gap():
schema = Schema(pa.schema([("id", pa.int64())]))
text = _build_dataset_ascii_repr_from_rows(
schema=schema,
num_rows=12,
dataset_name=None,
is_materialized=True,
head_rows=[["0"], ["1"]],
tail_rows=[["10"], ["11"]],
)
assert text == (
"shape: (12, 1)\n"
"╭───────╮\n"
"│ id │\n"
"│ --- │\n"
"│ int64 │\n"
"╞═══════╡\n"
"│ 0 │\n"
"│ 1 │\n"
"│ … │\n"
"│ 10 │\n"
"│ 11 │\n"
"╰───────╯\n"
"(Showing 4 of 12 rows)"
)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/unit/test_dataset_repr.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/progress/logging_progress.py | import dataclasses
import logging
import time
import typing
from collections import defaultdict
from typing import Callable, Dict, List, Optional
from ray._common.utils import env_integer
from ray.data._internal.execution.operators.input_data_buffer import InputDataBuffer
from ray.data._internal.execution.operators.sub_progress import SubProgressBarMixin
from ray.data._internal.execution.streaming_executor_state import (
format_op_state_summary,
)
from ray.data._internal.progress.base_progress import (
BaseExecutionProgressManager,
BaseProgressBar,
NoopSubProgressBar,
)
from ray.data._internal.progress.utils import truncate_operator_name
if typing.TYPE_CHECKING:
from ray.data._internal.execution.resource_manager import ResourceManager
from ray.data._internal.execution.streaming_executor_state import OpState, Topology
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class _LoggingMetrics:
name: str
desc: Optional[str]
completed: int
total: Optional[int]
class LoggingSubProgressBar(BaseProgressBar):
"""Thin wrapper to provide identical interface to the ProgressBar.
Internally passes relevant logging metrics to `LoggingExecutionProgressManager`.
Sub-progress is actually handled by Ray through operators, while operator-level
and total progress is handled by the `StreamingExecutor`. To ensure log-order,
this class helps to pass metric data to the progress manager so progress metrics
are logged centrally.
"""
def __init__(
self,
name: str,
total: Optional[int] = None,
max_name_length: int = 100,
):
"""Initialize sub-progress bar
Args:
name: name of sub-progress bar
total: total number of output rows. None for unknown.
max_name_length: maximum operator name length (unused).
"""
del max_name_length # unused
self._total = total
self._completed = 0
self._name = name
def set_description(self, name: str) -> None:
pass # unused
def get_description(self) -> str:
return "" # unused
def update(self, increment: int = 0, total: Optional[int] = None):
if total is not None:
self._total = total
self._completed += increment
def get_logging_metrics(self) -> _LoggingMetrics:
return _LoggingMetrics(
name=f" - {self._name}",
desc=None,
completed=self._completed,
total=self._total,
)
class LoggingExecutionProgressManager(BaseExecutionProgressManager):
"""Execution progress display for non-tty situations, preventing
spamming of progress reporting."""
# Refer to following issues for more context about this feature:
# https://github.com/ray-project/ray/issues/60083
# https://github.com/ray-project/ray/issues/57734
# This progress manager needs to refresh (log) based on elapsed time
# not scheduling steps. This elapsed time handling is done within
# this class.
TOTAL_PROGRESS_REFRESH_EVERY_N_STEPS = 1
# Time interval (seconds) in which progress is logged to console again.
LOG_REPORT_INTERVAL_SEC = env_integer("RAY_DATA_NON_TTY_PROGRESS_LOG_INTERVAL", 10)
def __init__(
self,
dataset_id: str,
topology: "Topology",
show_op_progress: bool,
verbose_progress: bool,
*,
_get_time: Callable[[], float] = time.time,
):
self._dataset_id = dataset_id
self._topology = topology
self._get_time = _get_time
self._last_log_time = self._get_time() - self.LOG_REPORT_INTERVAL_SEC
self._global_progress_metric = _LoggingMetrics(
name="Total Progress", desc=None, completed=0, total=None
)
self._op_progress_metrics: Dict["OpState", _LoggingMetrics] = {}
self._sub_progress_metrics: Dict[
"OpState", List[LoggingSubProgressBar]
] = defaultdict(list)
for state in self._topology.values():
op = state.op
if isinstance(op, InputDataBuffer):
continue
total = op.num_output_rows_total() or 1
contains_sub_progress_bars = isinstance(op, SubProgressBarMixin)
sub_progress_bar_enabled = show_op_progress and (
contains_sub_progress_bars or verbose_progress
)
if sub_progress_bar_enabled:
self._op_progress_metrics[state] = _LoggingMetrics(
name=truncate_operator_name(op.name, self.MAX_NAME_LENGTH),
desc=None,
completed=0,
total=total,
)
if not contains_sub_progress_bars:
continue
sub_pg_names = op.get_sub_progress_bar_names()
if sub_pg_names is None:
continue
for name in sub_pg_names:
if sub_progress_bar_enabled:
pg = LoggingSubProgressBar(
name=name, total=total, max_name_length=self.MAX_NAME_LENGTH
)
self._sub_progress_metrics[state].append(pg)
else:
pg = NoopSubProgressBar(
name=name, max_name_length=self.MAX_NAME_LENGTH
)
op.set_sub_progress_bar(name, pg)
# Management
def start(self):
# logging progress manager doesn't need separate start
pass
def refresh(self):
current_time = self._get_time()
if current_time - self._last_log_time < self.LOG_REPORT_INTERVAL_SEC:
return
self._last_log_time = current_time
# starting delimiter
firstline = f"======= Running Dataset: {self._dataset_id} ======="
lastline = "=" * len(firstline)
logger.info(firstline)
# log global progress
_log_global_progress(self._global_progress_metric)
# log operator-level progress
if len(self._op_progress_metrics.keys()) > 0:
logger.info("")
for opstate in self._topology.values():
metrics = self._op_progress_metrics.get(opstate)
if metrics is None:
continue
_log_op_or_sub_progress(metrics)
for pg in self._sub_progress_metrics[opstate]:
_log_op_or_sub_progress(pg.get_logging_metrics())
# finish logging
logger.info(lastline)
def close_with_finishing_description(self, desc: str, success: bool):
# We log in StreamingExecutor. No need for duplicate logging.
pass
# Total Progress
def update_total_progress(self, new_rows: int, total_rows: Optional[int]):
if total_rows is not None:
self._global_progress_metric.total = total_rows
self._global_progress_metric.completed += new_rows
def update_total_resource_status(self, resource_status: str):
self._global_progress_metric.desc = resource_status
# Operator Progress
def update_operator_progress(
self, opstate: "OpState", resource_manager: "ResourceManager"
):
op_metrics = self._op_progress_metrics.get(opstate)
if op_metrics is not None:
op_metrics.completed = opstate.op.metrics.row_outputs_taken
total = opstate.op.num_output_rows_total()
if total is not None:
op_metrics.total = total
op_metrics.desc = format_op_state_summary(opstate, resource_manager)
def _format_progress(m: _LoggingMetrics) -> str:
return f"{m.name}: {m.completed}/{m.total or '?'}"
def _log_global_progress(m: _LoggingMetrics):
logger.info(_format_progress(m))
if m.desc is not None:
logger.info(m.desc)
def _log_op_or_sub_progress(m: _LoggingMetrics):
logger.info(_format_progress(m))
if m.desc is not None:
logger.info(f" {m.desc}")
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/progress/logging_progress.py",
"license": "Apache License 2.0",
"lines": 189,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/tests/test_progress_manager.py | import builtins
from unittest.mock import MagicMock, patch
import pytest
import ray
from ray.data._internal.progress import get_progress_manager
from ray.data._internal.progress.base_progress import (
NoopExecutionProgressManager,
)
from ray.data._internal.progress.logging_progress import (
LoggingExecutionProgressManager,
)
from ray.data._internal.progress.rich_progress import (
RichExecutionProgressManager,
)
from ray.data._internal.progress.tqdm_progress import (
TqdmExecutionProgressManager,
)
from ray.data.context import DataContext
from ray.data.tests.conftest import * # noqa
class TestGetProgressManager:
@pytest.fixture
def mock_topology(self):
"""Create a mock Topology object that supports iteration."""
topology = MagicMock()
# Make it iterable by having .values() return an empty list
topology.values.return_value = []
return topology
@pytest.fixture
def setup_ray_worker(self):
"""Setup Ray worker state."""
with patch("ray._private.worker.global_worker") as mock_worker:
mock_worker.mode = ray._private.worker.WORKER_MODE
yield mock_worker
def test_progress_bars_disabled_uses_noop(
self, mock_topology, restore_data_context
):
"""Test that NoopExecutionProgressManager is returned when progress bars are disabled."""
ctx = DataContext.get_current()
ctx.enable_progress_bars = False
manager = get_progress_manager(ctx, "test_id", mock_topology, False)
assert isinstance(manager, NoopExecutionProgressManager)
@patch("ray.data._internal.progress.logger")
def test_operator_progress_disabled_logs_warning(
self, mock_logger, mock_topology, restore_data_context
):
"""Test warning when operator progress bars are disabled."""
ctx = DataContext.get_current()
ctx.enable_operator_progress_bars = False
with patch("sys.stdout.isatty", return_value=True):
manager = get_progress_manager(ctx, "test_id", mock_topology, False)
# should still create some non-noop progress manager.
assert not isinstance(manager, NoopExecutionProgressManager)
mock_logger.warning.assert_any_call(
"Progress bars for operators disabled. To enable, "
"set `ray.data.DataContext.get_current()."
"enable_operator_progress_bars = True`."
)
@patch("sys.stdout.isatty", return_value=False)
def test_non_atty_uses_logging_progress(
self, mock_isatty, mock_topology, restore_data_context
):
"""Test that LoggingExecutionProgressManager is used for non-interactive terminals."""
ctx = DataContext.get_current()
ctx.use_ray_tqdm = False
with patch("ray._private.worker.global_worker") as mock_worker:
mock_worker.mode = ray._private.worker.SCRIPT_MODE
manager = get_progress_manager(ctx, "test_id", mock_topology, False)
assert isinstance(manager, LoggingExecutionProgressManager)
@patch("sys.stdout.isatty", return_value=False)
def test_ray_tqdm_in_worker_uses_tqdm(
self, mock_isatty, mock_topology, setup_ray_worker, restore_data_context
):
"""Test that TqdmExecutionProgressManager is used when use_ray_tqdm is True in Ray worker."""
ctx = DataContext.get_current()
ctx.use_ray_tqdm = True
manager = get_progress_manager(ctx, "test_id", mock_topology, False)
assert isinstance(manager, TqdmExecutionProgressManager)
@patch("sys.stdout.isatty", return_value=True)
def test_tqdm_when_rich_disabled(
self, mock_isatty, mock_topology, restore_data_context
):
"""Test that TqdmExecutionProgressManager is used when rich is disabled."""
ctx = DataContext.get_current()
ctx.enable_rich_progress_bars = False
ctx.use_ray_tqdm = False # this combo was tested above.
manager = get_progress_manager(ctx, "test_id", mock_topology, False)
assert isinstance(manager, TqdmExecutionProgressManager)
@patch("sys.stdout.isatty", return_value=True)
def test_tqdm_when_use_ray_tqdm_enabled(
self, mock_isatty, mock_topology, restore_data_context
):
"""Test that TqdmExecutionProgressManager is used when use_ray_tqdm is True,
even if RichExecutionProgressManager is enabled."""
ctx = DataContext.get_current()
ctx.enable_rich_progress_bars = True
ctx.use_ray_tqdm = True
manager = get_progress_manager(ctx, "test_id", mock_topology, False)
assert isinstance(manager, TqdmExecutionProgressManager)
@patch("sys.stdout.isatty", return_value=True)
def test_tqdm_progress_default(self, mock_isatty, mock_topology):
"""Test that TqdmExecutionProgressManager is used by default in interactive terminal.
Currently, RichExecutionProgressManager is considered experimental. Change this test
to have default as rich progress reporting once it becomes an official api."""
ctx = DataContext()
manager = get_progress_manager(ctx, "test_id", mock_topology, False)
assert isinstance(manager, TqdmExecutionProgressManager)
@patch("sys.stdout.isatty", return_value=True)
@patch("ray.data._internal.progress.logger")
def test_rich_import_error_fallback(
self, mock_logger, mock_isatty, mock_topology, restore_data_context
):
"""Test fallback to NoopExecutionProgressManager when rich import fails."""
real_import = builtins.__import__
def mock_import(name, *args, **kwargs):
if "rich_progress" in name:
raise ImportError("No module named 'rich'")
return real_import(name, *args, **kwargs)
with patch("builtins.__import__", side_effect=mock_import):
ctx = DataContext.get_current()
ctx.enable_progress_bars = True
ctx.enable_rich_progress_bars = True
ctx.use_ray_tqdm = False
manager = get_progress_manager(ctx, "test_id", mock_topology, False)
assert isinstance(manager, NoopExecutionProgressManager)
mock_logger.warning.assert_any_call(
"[dataset]: Run `pip install rich` to enable progress reporting."
)
@pytest.mark.parametrize(
"enable_progress,enable_op_progress,expected_type",
[
(False, False, NoopExecutionProgressManager),
(False, True, NoopExecutionProgressManager),
(True, False, RichExecutionProgressManager),
(True, True, RichExecutionProgressManager),
],
)
@patch("sys.stdout.isatty", return_value=True)
def test_progress_toggle_flag_combinations(
self,
mock_isatty,
mock_topology,
enable_progress,
enable_op_progress,
expected_type,
restore_data_context,
):
"""Test various combinations of progress bar settings."""
ctx = DataContext.get_current()
ctx.enable_rich_progress_bars = True
ctx.use_ray_tqdm = False
ctx.enable_progress_bars = enable_progress
ctx.enable_operator_progress_bars = enable_op_progress
manager = get_progress_manager(ctx, "test_id", mock_topology, False)
assert isinstance(manager, expected_type)
class TestLoggingProgressManager:
@pytest.fixture
def mock_topology(self):
"""Create a mock Topology object that supports iteration."""
topology = MagicMock()
# Make it iterable by having .values() return an empty list
topology.values.return_value = []
return topology
@patch("sys.stdout.isatty", return_value=False)
@patch("ray.data._internal.progress.logging_progress.logger")
def test_logging_progress_manager_properly_logs_per_interval(
self, mock_logger, mock_isatty, mock_topology, restore_data_context
):
"""Test logging progress manager logs correct output based on time intervals."""
ctx = DataContext.get_current()
ctx.enable_progress_bars = True
ctx.enable_operator_progress_bars = True
current_time = 0
pg = LoggingExecutionProgressManager(
"dataset_123", mock_topology, False, False, _get_time=lambda: current_time
)
# Initial logging of progress
mock_logger.info.reset_mock()
pg.refresh()
mock_logger.info.assert_any_call("======= Running Dataset: dataset_123 =======")
mock_logger.info.assert_any_call("Total Progress: 0/?")
# Only 5 seconds passed from previous log, so logging doesn't occur
current_time += 5
mock_logger.info.reset_mock()
pg.update_total_progress(1, 10)
pg.refresh()
assert mock_logger.info.call_count == 0
# 10 seconds has passed, so must log previous progress.
current_time += 10
mock_logger.info.reset_mock()
pg.refresh()
mock_logger.info.assert_any_call("======= Running Dataset: dataset_123 =======")
mock_logger.info.assert_any_call("Total Progress: 1/10")
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_progress_manager.py",
"license": "Apache License 2.0",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_torch_iter_batches.py | import numpy as np
import pandas as pd
import pytest
import torch
import ray
from ray.data.tests.conftest import * # noqa
from ray.tests.conftest import * # noqa
def test_iter_torch_batches(ray_start_10_cpus_shared):
df1 = pd.DataFrame(
{"one": [1, 2, 3], "two": [1.0, 2.0, 3.0], "label": [1.0, 2.0, 3.0]}
)
df2 = pd.DataFrame(
{"one": [4, 5, 6], "two": [4.0, 5.0, 6.0], "label": [4.0, 5.0, 6.0]}
)
df3 = pd.DataFrame({"one": [7, 8], "two": [7.0, 8.0], "label": [7.0, 8.0]})
df = pd.concat([df1, df2, df3])
ds = ray.data.from_pandas([df1, df2, df3])
num_epochs = 2
for _ in range(num_epochs):
iterations = []
for batch in ds.iter_torch_batches(batch_size=3):
iterations.append(
torch.stack(
(batch["one"], batch["two"], batch["label"]),
dim=1,
).numpy()
)
combined_iterations = np.concatenate(iterations)
np.testing.assert_array_equal(np.sort(df.values), np.sort(combined_iterations))
def test_iter_torch_batches_tensor_ds(ray_start_10_cpus_shared):
arr1 = np.arange(12).reshape((3, 2, 2))
arr2 = np.arange(12, 24).reshape((3, 2, 2))
arr = np.concatenate((arr1, arr2))
ds = ray.data.from_numpy([arr1, arr2])
num_epochs = 2
for _ in range(num_epochs):
iterations = []
for batch in ds.iter_torch_batches(batch_size=2):
iterations.append(batch["data"].numpy())
combined_iterations = np.concatenate(iterations)
np.testing.assert_array_equal(arr, combined_iterations)
# This test catches an error in stream_split_iterator dealing with empty blocks,
# which is difficult to reproduce outside of TorchTrainer.
def test_torch_trainer_crash(ray_start_10_cpus_shared):
from ray import train
from ray.train import ScalingConfig
from ray.train.torch import TorchTrainer
ray.data.DataContext.get_current().execution_options.verbose_progress = True
train_ds = ray.data.range_tensor(100)
train_ds = train_ds.materialize()
def train_loop_per_worker():
it = train.get_dataset_shard("train")
for i in range(2):
count = 0
for batch in it.iter_batches():
count += len(batch["data"])
assert count == 50
my_trainer = TorchTrainer(
train_loop_per_worker,
scaling_config=ScalingConfig(num_workers=2),
datasets={"train": train_ds},
)
my_trainer.fit()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_torch_iter_batches.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:rllib/examples/algorithms/impala/tictactoe_impala.py | """Example showing how to run multi-agent IMPALA on TicTacToe with self-play.
This example demonstrates multi-agent reinforcement learning using IMPALA on a
TicTacToe environment. The setup includes trainable policies that learn to play
against each other and a frozen random policy that provides diverse opponents.
This self-play with random opponents approach helps prevent overfitting to a
single opponent strategy.
This example:
- trains multiple policies on the TicTacToe multi-agent environment
- uses a RandomRLModule as a frozen opponent that is not trained
- randomly maps agents to policies (including the random policy) each episode
- demonstrates MultiRLModuleSpec for configuring multiple policies
- uses 4 env runners by default for parallel experience collection
How to run this script
----------------------
`python tictactoe_impala.py [options]`
To run with default settings (5 trainable agents):
`python tictactoe_impala.py`
To run with a different number of trainable agents:
`python tictactoe_impala.py --num-agents=4`
To scale up with distributed learning using multiple learners and env-runners:
`python tictactoe_impala.py --num-learners=2 --num-env-runners=8`
To use a GPU-based learner add the number of GPUs per learner:
`python tictactoe_impala.py --num-learners=1 --num-gpus-per-learner=1`
For debugging, use the following additional command line options
`--no-tune --num-env-runners=0 --num-learners=0`
which should allow you to set breakpoints anywhere in the RLlib code and
have the execution stop there for inspection and debugging.
By setting `--num-learners=0` and `--num-env-runners=0` will make them run locally
instead of as remote Ray Actors where breakpoints aren't possible.
For logging to your WandB account, use:
`--wandb-key=[your WandB API key]
--wandb-project=[some project name]
--wandb-run-name=[optional: WandB run name (within the defined project)]`
Results to expect
-----------------
Four policies are trained plus a fifth random policy are randomly paired against
each other. Training is stopped when policy 0 achieves a return of < -0.3 within
2 million timesteps. A reward close to 0 or positive indicates
the policies are learning to win or draw more often than they lose.
"""
import random
from ray.air.constants import TRAINING_ITERATION
from ray.rllib.algorithms.impala import IMPALAConfig
from ray.rllib.core.rl_module import MultiRLModuleSpec, RLModuleSpec
from ray.rllib.core.rl_module.default_model_config import DefaultModelConfig
from ray.rllib.examples.envs.classes.multi_agent.tic_tac_toe import TicTacToe
from ray.rllib.examples.rl_modules.classes.random_rlm import RandomRLModule
from ray.rllib.examples.utils import (
add_rllib_example_script_args,
run_rllib_example_script_experiment,
)
from ray.rllib.utils.metrics import (
ENV_RUNNER_RESULTS,
EPISODE_MODULE_RETURN_MEAN,
NUM_ENV_STEPS_SAMPLED_LIFETIME,
)
parser = add_rllib_example_script_args(
default_reward=-0.5,
default_timesteps=2_000_000,
)
parser.set_defaults(
num_env_runners=4,
num_envs_per_env_runner=3,
num_learners=1,
num_agents=5,
)
args = parser.parse_args()
config = (
IMPALAConfig()
.environment(TicTacToe)
.env_runners(
num_env_runners=args.num_env_runners,
num_envs_per_env_runner=args.num_envs_per_env_runner,
)
.learners(
num_learners=args.num_learners,
)
.training(
train_batch_size_per_learner=1000,
grad_clip=30.0,
grad_clip_by="global_norm",
lr=0.0005,
vf_loss_coeff=0.01,
entropy_coeff=0.0,
)
.rl_module(
rl_module_spec=MultiRLModuleSpec(
rl_module_specs=(
{
f"p{i}": RLModuleSpec(
model_config=DefaultModelConfig(vf_share_layers=True),
)
for i in range(args.num_agents)
}
| {"random": RLModuleSpec(module_class=RandomRLModule)}
),
),
)
.multi_agent(
policies={f"p{i}" for i in range(args.num_agents)} | {"random"},
policy_mapping_fn=lambda aid, eps, **kw: (
random.choice([f"p{i}" for i in range(args.num_agents)] + ["random"])
),
policies_to_train=[f"p{i}" for i in range(args.num_agents)],
)
)
stop = {
f"{ENV_RUNNER_RESULTS}/{EPISODE_MODULE_RETURN_MEAN}/p0": args.stop_reward,
f"{ENV_RUNNER_RESULTS}/{NUM_ENV_STEPS_SAMPLED_LIFETIME}": args.stop_timesteps,
TRAINING_ITERATION: args.stop_iters,
}
success_metric = {
f"{ENV_RUNNER_RESULTS}/{EPISODE_MODULE_RETURN_MEAN}/p0": args.stop_reward
}
if __name__ == "__main__":
run_rllib_example_script_experiment(
config, args, stop=stop, success_metric=success_metric
)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/algorithms/impala/tictactoe_impala.py",
"license": "Apache License 2.0",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:ci/ray_ci/automation/push_ray_image.py | import logging
import sys
from datetime import datetime
from typing import List
import click
from ci.ray_ci.automation.image_tags_lib import (
ImageTagsError,
copy_image,
format_platform_tag,
get_platform_suffixes,
get_python_suffixes,
image_exists,
)
from ci.ray_ci.configs import (
ARCHITECTURE,
PYTHON_VERSIONS,
)
from ci.ray_ci.docker_container import (
PLATFORMS_RAY,
RayType,
)
from ci.ray_ci.ray_image import IMAGE_TYPE_CONFIG, RayImage, RayImageError
from ci.ray_ci.utils import ci_init, ecr_docker_login
from ray_release.configs.global_config import get_global_config
VALID_IMAGE_TYPES = list(IMAGE_TYPE_CONFIG.keys())
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
stream=sys.stdout,
)
logger = logging.getLogger(__name__)
class PushRayImageError(Exception):
"""Error raised when pushing ray images fails."""
# Re-export for backward compatibility with tests
def compact_cuda_suffix(platform: str) -> str:
"""Convert a CUDA platform string to compact suffix (e.g. cu12.1.1-cudnn8 -> -cu121)."""
return format_platform_tag(platform)
class RayImagePushContext:
"""Context for publishing a ray image from Wanda cache to Docker Hub."""
ray_type: RayType
python_version: str
platform: str
architecture: str
branch: str
commit: str
rayci_schedule: str
rayci_build_id: str
pull_request: str # buildkite uses "false" or number string
# Computed fields (set in __init__)
arch_suffix: str
wanda_tag: str
docker_hub_repo: str
def __init__(
self,
ray_type: RayType,
python_version: str,
platform: str,
architecture: str,
branch: str,
commit: str,
rayci_schedule: str,
rayci_build_id: str,
pull_request: str,
) -> None:
self.ray_type = ray_type
self.python_version = python_version
self.platform = platform
self.architecture = architecture
self.branch = branch
self.commit = commit
self.rayci_schedule = rayci_schedule
self.rayci_build_id = rayci_build_id
self.pull_request = pull_request
self.ray_image = RayImage(
image_type=ray_type.value,
python_version=python_version,
platform=platform,
architecture=architecture,
)
self.arch_suffix = self.ray_image.arch_suffix
self.wanda_tag = f"{rayci_build_id}-{self.wanda_image_name()}"
self.docker_hub_repo = f"rayproject/{self.ray_image.repo}"
def assert_published_image_type(self) -> None:
try:
self.ray_image.validate()
except RayImageError as e:
raise PushRayImageError(str(e)) from e
def destination_tags(self) -> List[str]:
"""
Compute the destination tags for this context.
Tags are formed as:
{version}{variation}{python_suffix}{platform}{architecture_suffix}
For example:
- nightly.260107.abc123-py310-cpu
- nightly-extra-py310-cu121
- nightly.260107.abc123-extra-py310-gpu
- 2.53.0.abc123-py310-cu121
- 2.53.0.abc123-extra-py310-cu121
"""
tags = []
for version in self._versions():
for plat in self._platform_suffixes():
for py in self._python_suffixes():
tags.append(
f"{version}{self._variation_suffix()}{py}{plat}{self.arch_suffix}"
)
return tags
def _versions(self) -> List[str]:
"""Compute version tags based on branch/schedule/PR status."""
is_master = self.branch == "master"
is_nightly = self.rayci_schedule == "nightly"
is_pull_request = self.pull_request != "false"
is_release = self.branch and self.branch.startswith("releases/")
sha_tag = self.commit[:6]
formatted_date = datetime.now().strftime("%y%m%d")
if is_master:
if is_nightly:
return [f"nightly.{formatted_date}.{sha_tag}", "nightly"]
return [sha_tag, self.rayci_build_id]
elif is_release:
release_name = self.branch[len("releases/") :]
return [f"{release_name}.{sha_tag}"]
elif is_pull_request:
return [f"pr-{self.pull_request}.{sha_tag}", self.rayci_build_id]
else:
return [sha_tag, self.rayci_build_id]
def wanda_image_name(self) -> str:
"""Get the wanda source image name for this context."""
return self.ray_image.wanda_image_name
def _variation_suffix(self) -> str:
"""Get -extra suffix for extra image types."""
return self.ray_image.variation_suffix
def _python_suffixes(self) -> List[str]:
"""Get python version suffixes (includes empty for default version)."""
return get_python_suffixes(self.python_version)
def _platform_suffixes(self) -> List[str]:
"""Get platform suffixes (includes aliases like -gpu for GPU_PLATFORM)."""
return get_platform_suffixes(self.platform, self.ray_type.value)
def _image_exists(tag: str) -> bool:
"""Check if a container image manifest exists using crane."""
return image_exists(tag)
def _copy_image(reference: str, destination: str, dry_run: bool = False) -> None:
"""Copy a container image from source to destination using crane."""
try:
copy_image(reference, destination, dry_run)
except ImageTagsError as e:
raise PushRayImageError(str(e))
def _should_upload(pipeline_id: str, branch: str, rayci_schedule: str) -> bool:
"""
Check if upload should proceed based on pipeline and branch context.
Mirrors the logic from RayDockerContainer._should_upload() to prevent
accidental pushes from feature branches or non-postmerge pipelines.
Returns True only if:
- Pipeline is a postmerge pipeline AND
- Branch is releases/* OR (branch is master AND schedule is nightly)
"""
postmerge_pipelines = get_global_config()["ci_pipeline_postmerge"]
if pipeline_id not in postmerge_pipelines:
logger.info(
f"Pipeline {pipeline_id} is not a postmerge pipeline, skipping upload"
)
return False
if branch.startswith("releases/"):
return True
if branch == "master" and rayci_schedule == "nightly":
return True
logger.info(
f"Branch '{branch}' with schedule '{rayci_schedule}' is not eligible for upload. "
"Upload is only allowed for releases/* branches or master with nightly schedule."
)
return False
@click.command()
@click.option(
"--python-version", type=click.Choice(list(PYTHON_VERSIONS.keys())), required=True
)
@click.option(
"--platform",
type=click.Choice(list(PLATFORMS_RAY)),
required=True,
multiple=True,
help="Platform(s) to push. Can be specified multiple times.",
)
@click.option(
"--image-type",
type=click.Choice(VALID_IMAGE_TYPES),
required=True,
)
@click.option("--architecture", type=click.Choice(ARCHITECTURE), required=True)
@click.option("--rayci-work-repo", type=str, required=True, envvar="RAYCI_WORK_REPO")
@click.option("--rayci-build-id", type=str, required=True, envvar="RAYCI_BUILD_ID")
@click.option("--pipeline-id", type=str, required=True, envvar="BUILDKITE_PIPELINE_ID")
@click.option("--branch", type=str, required=True, envvar="BUILDKITE_BRANCH")
@click.option("--commit", type=str, required=True, envvar="BUILDKITE_COMMIT")
@click.option("--rayci-schedule", type=str, default="", envvar="RAYCI_SCHEDULE")
@click.option(
"--pull-request", type=str, default="false", envvar="BUILDKITE_PULL_REQUEST"
)
def main(
python_version: str,
platform: tuple,
image_type: str,
architecture: str,
rayci_work_repo: str,
rayci_build_id: str,
pipeline_id: str,
branch: str,
commit: str,
rayci_schedule: str,
pull_request: str,
) -> None:
"""
Publish Wanda-cached ray image(s) to Docker Hub.
Tags are generated matching the original RayDockerContainer format:
{version}{variation}{python_suffix}{platform}{architecture_suffix}
Multiple platforms can be specified to push in a single invocation.
"""
ci_init()
dry_run = not _should_upload(pipeline_id, branch, rayci_schedule)
if dry_run:
logger.info(
"DRY RUN MODE - upload conditions not met, no images will be pushed"
)
platforms = list(platform)
logger.info(f"Processing {len(platforms)} platform(s): {platforms}")
ecr_registry = rayci_work_repo.split("/")[0]
ecr_docker_login(ecr_registry)
all_tags = []
for plat in platforms:
logger.info(f"\n{'='*60}\nProcessing platform: {plat}\n{'='*60}")
ctx = RayImagePushContext(
ray_type=RayType(image_type),
python_version=python_version,
platform=plat,
architecture=architecture,
branch=branch,
commit=commit,
rayci_schedule=rayci_schedule,
rayci_build_id=rayci_build_id,
pull_request=pull_request,
)
ctx.assert_published_image_type()
src_ref = f"{rayci_work_repo}:{ctx.wanda_tag}"
logger.info(f"Verifying source image in Wanda cache: {src_ref}")
if not _image_exists(src_ref):
raise PushRayImageError(f"Source image not found in Wanda cache: {src_ref}")
destination_tags = ctx.destination_tags()
for tag in destination_tags:
dest_ref = f"{ctx.docker_hub_repo}:{tag}"
_copy_image(src_ref, dest_ref, dry_run=dry_run)
all_tags.extend(destination_tags)
logger.info(f"Completed platform {plat} with tags: {destination_tags}")
logger.info(
f"\nSuccessfully processed {len(platforms)} platform(s) for {image_type}"
)
logger.info(f"Total tags: {len(all_tags)}")
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "ci/ray_ci/automation/push_ray_image.py",
"license": "Apache License 2.0",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:ci/ray_ci/automation/test_push_ray_image.py | import sys
from unittest import mock
import pytest
from ci.ray_ci.automation.push_ray_image import RayImagePushContext, compact_cuda_suffix
from ci.ray_ci.configs import DEFAULT_ARCHITECTURE, DEFAULT_PYTHON_TAG_VERSION
from ci.ray_ci.docker_container import GPU_PLATFORM, RayType
def make_ctx(**overrides) -> RayImagePushContext:
"""Create a RayImagePushContext with defaults for testing."""
defaults = {
"ray_type": RayType.RAY,
"python_version": DEFAULT_PYTHON_TAG_VERSION,
"platform": "cpu",
"architecture": DEFAULT_ARCHITECTURE,
"branch": "master",
"commit": "abc123",
"rayci_schedule": "",
"rayci_build_id": "build123",
"pull_request": "false",
}
defaults.update(overrides)
return RayImagePushContext(**defaults)
class TestWandaImageName:
DEFAULT_TEST_CUDA_PLATFORM = "cu12.1.1-cudnn8"
@pytest.mark.parametrize(
("ray_type", "python_version", "platform", "architecture", "expected"),
[
# CPU images
(RayType.RAY, "3.10", "cpu", DEFAULT_ARCHITECTURE, "ray-py3.10-cpu"),
(RayType.RAY, "3.10", "cpu", "aarch64", "ray-py3.10-cpu-aarch64"),
(
RayType.RAY_EXTRA,
"3.10",
"cpu",
DEFAULT_ARCHITECTURE,
"ray-extra-py3.10-cpu",
),
# TPU images
(RayType.RAY, "3.10", "tpu", DEFAULT_ARCHITECTURE, "ray-py3.10-tpu"),
# CUDA images
(
RayType.RAY,
"3.11",
DEFAULT_TEST_CUDA_PLATFORM,
DEFAULT_ARCHITECTURE,
f"ray-py3.11-{DEFAULT_TEST_CUDA_PLATFORM}",
),
(
RayType.RAY,
"3.11",
DEFAULT_TEST_CUDA_PLATFORM,
"aarch64",
f"ray-py3.11-{DEFAULT_TEST_CUDA_PLATFORM}-aarch64",
),
(
RayType.RAY_EXTRA,
"3.11",
DEFAULT_TEST_CUDA_PLATFORM,
DEFAULT_ARCHITECTURE,
f"ray-extra-py3.11-{DEFAULT_TEST_CUDA_PLATFORM}",
),
(
RayType.RAY_LLM,
"3.11",
DEFAULT_TEST_CUDA_PLATFORM,
DEFAULT_ARCHITECTURE,
f"ray-llm-py3.11-{DEFAULT_TEST_CUDA_PLATFORM}",
),
(
RayType.RAY_LLM_EXTRA,
"3.11",
DEFAULT_TEST_CUDA_PLATFORM,
DEFAULT_ARCHITECTURE,
f"ray-llm-extra-py3.11-{DEFAULT_TEST_CUDA_PLATFORM}",
),
],
)
def test_wanda_image_name(
self, ray_type, python_version, platform, architecture, expected
):
ctx = make_ctx(
ray_type=ray_type,
python_version=python_version,
platform=platform,
architecture=architecture,
)
assert ctx.wanda_image_name() == expected
class TestVariationSuffix:
@pytest.mark.parametrize(
("ray_type", "expected"),
[
(RayType.RAY, ""),
(RayType.RAY_EXTRA, "-extra"),
(RayType.RAY_ML, ""),
(RayType.RAY_ML_EXTRA, "-extra"),
(RayType.RAY_LLM, ""),
(RayType.RAY_LLM_EXTRA, "-extra"),
],
)
def test_variation_suffix(self, ray_type, expected):
ctx = make_ctx(ray_type=ray_type)
assert ctx._variation_suffix() == expected
class TestPythonSuffixes:
@pytest.mark.parametrize(
("python_version", "expected"),
[
(
DEFAULT_PYTHON_TAG_VERSION,
["-py" + DEFAULT_PYTHON_TAG_VERSION.replace(".", ""), ""],
), # default gets empty suffix too
("3.99", ["-py399"]), # non-default gets no empty suffix
],
)
def test_python_suffixes(self, python_version, expected):
ctx = make_ctx(python_version=python_version)
assert ctx._python_suffixes() == expected
class TestPlatformSuffixes:
@pytest.mark.parametrize(
("platform", "ray_type", "expected"),
[
# CPU images
("cpu", RayType.RAY, ["-cpu", ""]),
("cpu", RayType.RAY_EXTRA, ["-cpu", ""]),
("cpu", RayType.RAY_ML, ["-cpu"]), # ray-ml doesn't get empty for cpu
# TPU images
("tpu", RayType.RAY, ["-tpu"]),
# CUDA images
("cu11.7.1-cudnn8", RayType.RAY, ["-cu117"]),
("cu11.8.0-cudnn8", RayType.RAY, ["-cu118"]),
(GPU_PLATFORM, RayType.RAY, [compact_cuda_suffix(GPU_PLATFORM), "-gpu"]),
(
GPU_PLATFORM,
RayType.RAY_ML,
[compact_cuda_suffix(GPU_PLATFORM), "-gpu", ""],
), # ray-ml gets empty for GPU_PLATFORM
],
)
def test_platform_suffixes(self, platform, ray_type, expected):
ctx = make_ctx(platform=platform, ray_type=ray_type)
assert ctx._platform_suffixes() == expected
class TestVersions:
@mock.patch("ci.ray_ci.automation.push_ray_image.datetime")
def test_nightly_master(self, mock_datetime):
mock_datetime.now.return_value.strftime.return_value = "260107"
ctx = make_ctx(branch="master", commit="abc123def456", rayci_schedule="nightly")
assert ctx._versions() == ["nightly.260107.abc123", "nightly"]
def test_release_branch(self):
ctx = make_ctx(branch="releases/2.44.0", commit="abc123def456")
assert ctx._versions() == ["2.44.0.abc123"]
def test_pull_request(self):
ctx = make_ctx(
branch="feature-branch", commit="abc123def456", pull_request="12345"
)
assert ctx._versions() == ["pr-12345.abc123", "build123"]
def test_other_branch(self):
ctx = make_ctx(branch="feature-branch", commit="abc123def456")
assert ctx._versions() == ["abc123", "build123"]
def test_master_non_nightly(self):
"""Master branch without nightly schedule returns sha tags, not PR tags."""
ctx = make_ctx(
branch="master",
commit="abc123def456",
rayci_schedule="",
pull_request="123",
)
# Even with pull_request set, master branch should return sha tags
assert ctx._versions() == ["abc123", "build123"]
class TestDestinationTags:
"""
Test destination_tags method.
Tags are formed as: {version}{variation}{python_suffix}{platform}{architecture_suffix}
"""
@mock.patch("ci.ray_ci.automation.push_ray_image.datetime")
def test_nightly_cpu_default_python(self, mock_datetime):
"""Test: nightly.260107.abc123-py310-cpu"""
mock_datetime.now.return_value.strftime.return_value = "260107"
ctx = make_ctx(branch="master", commit="abc123def456", rayci_schedule="nightly")
tags = ctx.destination_tags()
# nightly versions x cpu suffixes x python suffixes
# ["nightly.260107.abc123", "nightly"] x ["-cpu", ""] x ["-py310", ""]
assert "nightly.260107.abc123-py310-cpu" in tags
assert "nightly.260107.abc123-cpu" in tags
assert "nightly.260107.abc123-py310" in tags
assert "nightly.260107.abc123" in tags
assert "nightly-py310-cpu" in tags
assert "nightly-cpu" in tags
assert "nightly-py310" in tags
assert "nightly" in tags
@mock.patch("ci.ray_ci.automation.push_ray_image.datetime")
def test_nightly_extra_gpu(self, mock_datetime):
"""Test: nightly-extra-py310-cu121 and nightly.260107.abc123-extra-py310-gpu"""
mock_datetime.now.return_value.strftime.return_value = "260107"
ctx = make_ctx(
ray_type=RayType.RAY_EXTRA,
platform=GPU_PLATFORM,
branch="master",
commit="abc123def456",
rayci_schedule="nightly",
)
tags = ctx.destination_tags()
# Should include -extra variation and -gpu alias
assert "nightly.260107.abc123-extra-py310-cu121" in tags
assert "nightly.260107.abc123-extra-py310-gpu" in tags
assert "nightly-extra-py310-cu121" in tags
assert "nightly-extra-py310-gpu" in tags
assert "nightly.260107.abc123-extra-cu121" in tags
assert "nightly-extra-gpu" in tags
@mock.patch("ci.ray_ci.automation.push_ray_image.datetime")
def test_nightly_gpu_platform_non_default_python(self, mock_datetime):
"""Test: nightly.260107.abc123-py311-cu121"""
mock_datetime.now.return_value.strftime.return_value = "260107"
ctx = make_ctx(
python_version="3.11",
platform=GPU_PLATFORM,
branch="master",
commit="abc123def456",
rayci_schedule="nightly",
)
tags = ctx.destination_tags()
# Should include -cu121, -gpu aliases but NOT empty python suffix (3.11 is not default)
assert "nightly.260107.abc123-py311-cu121" in tags
assert "nightly.260107.abc123-py311-gpu" in tags
assert "nightly-py311-cu121" in tags
assert "nightly-py311-gpu" in tags
# Should NOT have empty python suffix variants
assert "nightly.260107.abc123-cu121" not in tags
assert "nightly-gpu" not in tags
def test_release_gpu(self):
"""Test: 2.53.0.abc123-py310-cu121"""
ctx = make_ctx(
platform=GPU_PLATFORM, branch="releases/2.53.0", commit="abc123def456"
)
tags = ctx.destination_tags()
assert "2.53.0.abc123-py310-cu121" in tags
assert "2.53.0.abc123-py310-gpu" in tags
# Default python suffix variants
assert "2.53.0.abc123-cu121" in tags
assert "2.53.0.abc123-gpu" in tags
def test_release_extra_gpu(self):
"""Test: 2.53.0.abc123-extra-py310-cu121"""
ctx = make_ctx(
ray_type=RayType.RAY_EXTRA,
platform=GPU_PLATFORM,
branch="releases/2.53.0",
commit="abc123def456",
)
tags = ctx.destination_tags()
assert "2.53.0.abc123-extra-py310-cu121" in tags
assert "2.53.0.abc123-extra-py310-gpu" in tags
# Default python suffix variants
assert "2.53.0.abc123-extra-cu121" in tags
assert "2.53.0.abc123-extra-gpu" in tags
def test_release_non_gpu_platform_cuda(self):
"""Test release with non-GPU_PLATFORM CUDA version (no -gpu alias)."""
ctx = make_ctx(
python_version="3.11",
platform="cu12.3.2-cudnn9", # Not GPU_PLATFORM
branch="releases/2.44.0",
commit="abc123def456",
)
tags = ctx.destination_tags()
assert "2.44.0.abc123-py311-cu123" in tags
# Should NOT have -gpu alias since this isn't GPU_PLATFORM
assert "2.44.0.abc123-py311-gpu" not in tags
def test_release_cpu_aarch64(self):
"""Test release with architecture suffix."""
ctx = make_ctx(
architecture="aarch64",
branch="releases/2.44.0",
commit="abc123def456",
)
tags = ctx.destination_tags()
assert "2.44.0.abc123-py310-cpu-aarch64" in tags
assert "2.44.0.abc123-cpu-aarch64" in tags
# Empty platform suffix variant (ray cpu alias)
assert "2.44.0.abc123-py310-aarch64" in tags
assert "2.44.0.abc123-aarch64" in tags
def test_pull_request_tags(self):
"""Test PR builds include pr-{number} prefix."""
ctx = make_ctx(
branch="feature-branch", commit="abc123def456", pull_request="12345"
)
tags = ctx.destination_tags()
assert "pr-12345.abc123-py310-cpu" in tags
assert "build123-py310-cpu" in tags
def test_feature_branch_non_pr(self):
"""Test non-PR feature branch uses sha and build_id."""
ctx = make_ctx(python_version="3.12", commit="abc123def456")
tags = ctx.destination_tags()
assert "abc123-py312-cpu" in tags
assert "build123-py312-cpu" in tags
class TestShouldUpload:
"""Test _should_upload function."""
POSTMERGE_PIPELINE_ID = "test-postmerge-pipeline-id"
NON_POSTMERGE_PIPELINE_ID = "some-other-pipeline-id"
@mock.patch("ci.ray_ci.automation.push_ray_image.get_global_config")
def test_non_postmerge_pipeline_returns_false(self, mock_config):
"""Non-postmerge pipelines should not upload."""
from ci.ray_ci.automation.push_ray_image import _should_upload
mock_config.return_value = {
"ci_pipeline_postmerge": [self.POSTMERGE_PIPELINE_ID]
}
result = _should_upload(
pipeline_id=self.NON_POSTMERGE_PIPELINE_ID,
branch="master",
rayci_schedule="nightly",
)
assert result is False
@mock.patch("ci.ray_ci.automation.push_ray_image.get_global_config")
def test_release_branch_returns_true(self, mock_config):
"""Release branches on postmerge should upload."""
from ci.ray_ci.automation.push_ray_image import _should_upload
mock_config.return_value = {
"ci_pipeline_postmerge": [self.POSTMERGE_PIPELINE_ID]
}
result = _should_upload(
pipeline_id=self.POSTMERGE_PIPELINE_ID,
branch="releases/2.44.0",
rayci_schedule="",
)
assert result is True
@mock.patch("ci.ray_ci.automation.push_ray_image.get_global_config")
def test_master_nightly_returns_true(self, mock_config):
"""Master branch with nightly schedule on postmerge should upload."""
from ci.ray_ci.automation.push_ray_image import _should_upload
mock_config.return_value = {
"ci_pipeline_postmerge": [self.POSTMERGE_PIPELINE_ID]
}
result = _should_upload(
pipeline_id=self.POSTMERGE_PIPELINE_ID,
branch="master",
rayci_schedule="nightly",
)
assert result is True
@mock.patch("ci.ray_ci.automation.push_ray_image.get_global_config")
def test_master_non_nightly_returns_false(self, mock_config):
"""Master branch without nightly schedule should not upload."""
from ci.ray_ci.automation.push_ray_image import _should_upload
mock_config.return_value = {
"ci_pipeline_postmerge": [self.POSTMERGE_PIPELINE_ID]
}
result = _should_upload(
pipeline_id=self.POSTMERGE_PIPELINE_ID,
branch="master",
rayci_schedule="",
)
assert result is False
@mock.patch("ci.ray_ci.automation.push_ray_image.get_global_config")
def test_feature_branch_returns_false(self, mock_config):
"""Feature branches should not upload even on postmerge."""
from ci.ray_ci.automation.push_ray_image import _should_upload
mock_config.return_value = {
"ci_pipeline_postmerge": [self.POSTMERGE_PIPELINE_ID]
}
result = _should_upload(
pipeline_id=self.POSTMERGE_PIPELINE_ID,
branch="andrew/revup/master/feature",
rayci_schedule="",
)
assert result is False
@mock.patch("ci.ray_ci.automation.push_ray_image.get_global_config")
def test_pr_branch_returns_false(self, mock_config):
"""PR branches should not upload even on postmerge."""
from ci.ray_ci.automation.push_ray_image import _should_upload
mock_config.return_value = {
"ci_pipeline_postmerge": [self.POSTMERGE_PIPELINE_ID]
}
result = _should_upload(
pipeline_id=self.POSTMERGE_PIPELINE_ID,
branch="feature-branch",
rayci_schedule="",
)
assert result is False
@mock.patch("ci.ray_ci.automation.push_ray_image.get_global_config")
def test_master_with_other_schedule_returns_false(self, mock_config):
"""Master branch with non-nightly schedule should not upload."""
from ci.ray_ci.automation.push_ray_image import _should_upload
mock_config.return_value = {
"ci_pipeline_postmerge": [self.POSTMERGE_PIPELINE_ID]
}
result = _should_upload(
pipeline_id=self.POSTMERGE_PIPELINE_ID,
branch="master",
rayci_schedule="weekly",
)
assert result is False
class TestCopyImage:
"""Test _copy_image function."""
@mock.patch("ci.ray_ci.automation.image_tags_lib.call_crane_copy")
def test_copy_image_dry_run_skips_crane(self, mock_copy):
"""Test that dry run mode does not call crane copy."""
from ci.ray_ci.automation.push_ray_image import _copy_image
_copy_image("src", "dest", dry_run=True)
mock_copy.assert_not_called()
@mock.patch("ci.ray_ci.automation.image_tags_lib.call_crane_copy")
def test_copy_image_calls_crane(self, mock_copy):
"""Test that non-dry-run mode calls crane copy."""
from ci.ray_ci.automation.push_ray_image import _copy_image
_copy_image("src", "dest", dry_run=False)
mock_copy.assert_called_once_with("src", "dest")
@mock.patch("ci.ray_ci.automation.image_tags_lib.call_crane_copy")
def test_copy_image_raises_on_crane_error(self, mock_copy):
"""Test that crane errors are wrapped in PushRayImageError."""
from ci.ray_ci.automation.crane_lib import CraneError
from ci.ray_ci.automation.push_ray_image import PushRayImageError, _copy_image
mock_copy.side_effect = CraneError("Copy failed")
with pytest.raises(PushRayImageError, match="Crane copy failed"):
_copy_image("src", "dest", dry_run=False)
class TestMultiplePlatforms:
"""Test main function handling of multiple platforms."""
POSTMERGE_PIPELINE_ID = "test-postmerge-pipeline-id"
WORK_REPO = "123456789.dkr.ecr.us-west-2.amazonaws.com/rayci-work"
@mock.patch("ci.ray_ci.automation.push_ray_image.ci_init")
@mock.patch("ci.ray_ci.automation.push_ray_image.ecr_docker_login")
@mock.patch("ci.ray_ci.automation.push_ray_image._copy_image")
@mock.patch("ci.ray_ci.automation.push_ray_image._image_exists")
@mock.patch("ci.ray_ci.automation.push_ray_image.get_global_config")
def test_multiple_platforms_processed(
self, mock_config, mock_exists, mock_copy, mock_ecr_login, mock_ci_init
):
"""Test that multiple platforms are each processed with correct source refs."""
from click.testing import CliRunner
from ci.ray_ci.automation.push_ray_image import main
mock_config.return_value = {
"ci_pipeline_postmerge": [self.POSTMERGE_PIPELINE_ID]
}
mock_exists.return_value = True
runner = CliRunner()
result = runner.invoke(
main,
[
"--python-version",
"3.10",
"--platform",
"cpu",
"--platform",
"cu12.1.1-cudnn8",
"--image-type",
"ray",
"--architecture",
"x86_64",
"--rayci-work-repo",
self.WORK_REPO,
"--rayci-build-id",
"build123",
"--pipeline-id",
self.POSTMERGE_PIPELINE_ID,
"--branch",
"releases/2.44.0",
"--commit",
"abc123def456",
],
)
assert result.exit_code == 0, f"CLI failed: {result.output}"
# Should check image exists for both platforms
assert mock_exists.call_count == 2
exists_calls = [call[0][0] for call in mock_exists.call_args_list]
assert any("ray-py3.10-cpu" in call for call in exists_calls)
assert any("ray-py3.10-cu12.1.1-cudnn8" in call for call in exists_calls)
# Should have tags from both platforms
copy_calls = [call.args for call in mock_copy.call_args_list]
assert any(
"ray-py3.10-cpu" in src and "-cpu" in dest for src, dest in copy_calls
)
assert any(
"ray-py3.10-cu12.1.1-cudnn8" in src and "-cu121" in dest
for src, dest in copy_calls
)
@mock.patch("ci.ray_ci.automation.push_ray_image.ci_init")
@mock.patch("ci.ray_ci.automation.push_ray_image.ecr_docker_login")
@mock.patch("ci.ray_ci.automation.push_ray_image._copy_image")
@mock.patch("ci.ray_ci.automation.push_ray_image._image_exists")
@mock.patch("ci.ray_ci.automation.push_ray_image.get_global_config")
def test_multiple_platforms_fails_if_one_missing(
self, mock_config, mock_exists, mock_copy, mock_ecr_login, mock_ci_init
):
"""Test that processing fails if any platform's source image is missing."""
from click.testing import CliRunner
from ci.ray_ci.automation.push_ray_image import PushRayImageError, main
mock_config.return_value = {
"ci_pipeline_postmerge": [self.POSTMERGE_PIPELINE_ID]
}
mock_exists.side_effect = [True, False] # First exists, second doesn't
runner = CliRunner()
result = runner.invoke(
main,
[
"--python-version",
"3.10",
"--platform",
"cpu",
"--platform",
"cu12.1.1-cudnn8",
"--image-type",
"ray",
"--architecture",
"x86_64",
"--rayci-work-repo",
self.WORK_REPO,
"--rayci-build-id",
"build123",
"--pipeline-id",
self.POSTMERGE_PIPELINE_ID,
"--branch",
"releases/2.44.0",
"--commit",
"abc123def456",
],
)
assert result.exit_code != 0
assert isinstance(result.exception, PushRayImageError)
assert "Source image not found" in str(result.exception)
if __name__ == "__main__":
sys.exit(pytest.main(["-vv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "ci/ray_ci/automation/test_push_ray_image.py",
"license": "Apache License 2.0",
"lines": 513,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/experimental/gpu_object_manager/cuda_ipc_transport.py | from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, List, Optional
import ray
from ray.experimental.gpu_object_manager.tensor_transport_manager import (
CommunicatorMetadata,
TensorTransportManager,
TensorTransportMetadata,
)
if TYPE_CHECKING:
import torch
@dataclass
class CudaIpcCommunicatorMetadata(CommunicatorMetadata):
"""Metadata for the CUDA IPC communicator."""
@dataclass
class CudaIpcTransportMetadata(TensorTransportMetadata):
"""Metadata for tensors stored in the GPU object store for CUDA IPC transport."""
# List of tuples, each containing the function and metadata to reconstruct the tensor.
cuda_ipc_handles: Optional[List[Any]] = None
# The IPC handle of the event that is used to synchronize the sender and receiver.
cuda_ipc_event_ipc_handle: Optional[bytes] = None
# The index of the GPU that the tensors are on. This requires that the GPU is
# assigned by Ray, e.g., using @ray.remote(num_gpus=1).
ray_gpu_idx: Optional[int] = None
# The node that the GPU that the tensors are on is on.
ray_node_id: Optional[str] = None
class CudaIpcTransport(TensorTransportManager):
def __init__(self):
pass
@property
def tensor_transport_backend(self) -> str:
return "CUDA_IPC"
@staticmethod
def is_one_sided() -> bool:
return True
@staticmethod
def can_abort_transport() -> bool:
return False
def actor_has_tensor_transport(self, actor: "ray.actor.ActorHandle") -> bool:
# TODO: Ideally we would check if torch.cuda.is_available() on the actor
# and if so, return True. But we want to avoid blocking in ray.get() in
# this method since it gets called before submitting an actor task.
return True
def extract_tensor_transport_metadata(
self,
obj_id: str,
gpu_object: List["torch.Tensor"],
) -> CudaIpcTransportMetadata:
tensor_meta = []
device = None
cuda_ipc_handles = []
event_ipc_handle = None
ray_gpu_idx = None
ray_node_id = None
if gpu_object:
import torch
from torch.multiprocessing.reductions import reduce_tensor
device = gpu_object[0].device
ray_gpu_idx = ray.get_gpu_ids()[device.index]
ray_node_id = ray.get_runtime_context().get_node_id()
# Create an interprocess-shareable CUDA event so that the receiver
# can wait for the sender's computations to complete.
event = torch.cuda.Event(interprocess=True)
torch.cuda.current_stream(device).record_event(event)
for t in gpu_object:
if t.device.type != device.type:
raise ValueError(
"All tensors in an RDT object must have the same device type."
)
if t.device.index != device.index:
raise ValueError(
"All tensors in an RDT object must be on the same GPU."
)
tensor_meta.append((t.shape, t.dtype))
ipc_handle = reduce_tensor(t)
cuda_ipc_handles.append(ipc_handle)
event_ipc_handle = event.ipc_handle()
return CudaIpcTransportMetadata(
tensor_meta=tensor_meta,
tensor_device=device.type if device else None,
cuda_ipc_handles=cuda_ipc_handles,
cuda_ipc_event_ipc_handle=event_ipc_handle,
ray_gpu_idx=ray_gpu_idx,
ray_node_id=ray_node_id,
)
def get_communicator_metadata(
self,
src_actor: "ray.actor.ActorHandle",
dst_actor: "ray.actor.ActorHandle",
backend: Optional[str] = None,
) -> CudaIpcCommunicatorMetadata:
communicator_metadata = CudaIpcCommunicatorMetadata()
return communicator_metadata
def recv_multiple_tensors(
self,
obj_id: str,
tensor_transport_metadata: TensorTransportMetadata,
communicator_metadata: CommunicatorMetadata,
target_buffers: Optional[List["torch.Tensor"]] = None,
) -> List["torch.Tensor"]:
assert isinstance(
tensor_transport_metadata, CudaIpcTransportMetadata
), "metadata must be a CudaIpcTransportMetadata object for CUDA IPC transport"
assert isinstance(
communicator_metadata, CudaIpcCommunicatorMetadata
), "metadata must be a CudaIpcCommunicatorMetadata object for CUDA IPC transport"
if target_buffers:
raise ValueError(
"The CUDA IPC transport does not support receiving into buffers."
)
tensors = []
if tensor_transport_metadata.tensor_meta:
import torch
cur_node_id = ray.get_runtime_context().get_node_id()
if cur_node_id != tensor_transport_metadata.ray_node_id:
raise ValueError(
f"CUDA IPC transport only supports tensors on the same node, but the current node ID: {cur_node_id} and the sender node ID: {tensor_transport_metadata.ray_node_id} are different."
)
try:
device_idx = ray.get_gpu_ids().index(
tensor_transport_metadata.ray_gpu_idx
)
except ValueError:
raise ValueError(
f"CUDA IPC transport only supports tensors on the same GPU, but the receiver was not allocated the same GPUs by Ray as the sender (GPU: {tensor_transport_metadata.ray_gpu_idx}). To use the CUDA IPC RDT transport, ensure that the receiver is allocated the same GPU by Ray as the sender, and that CUDA_VISIBLE_DEVICES is set to `ray.get_gpu_ids()`, the GPUs assigned by Ray (this is the default behavior)."
)
device = torch.device(f"cuda:{device_idx}")
event_ipc_handle = tensor_transport_metadata.cuda_ipc_event_ipc_handle
if event_ipc_handle is not None:
# Reconstruct the event from IPC handle
event_remote = torch.cuda.Event.from_ipc_handle(
device=device, handle=event_ipc_handle
)
# Make current stream wait for the sender's event
# This ensures sender's computation is complete before we use the tensor
# This is asynchronous - doesn't block CPU, only GPU stream
torch.cuda.current_stream(device).wait_event(event_remote)
for i, ipc_handle in enumerate(tensor_transport_metadata.cuda_ipc_handles):
# Reconstruct the tensor
func, args = ipc_handle
list_args = list(args)
# Fields specified in https://github.com/pytorch/pytorch/blob/1495b35d29512f303ab37780760c5e692158514b/torch/multiprocessing/reductions.py#L155
# Update device ID to match current process's device mapping
if not isinstance(list_args[6], int):
raise RuntimeError(
f"Expected CUDA IPC tensor reconstruction list_args[6] to be device ID, but got {list_args[6]}. Please file an issue at https://github.com/ray-project/ray/issues/new/choose."
)
list_args[6] = device.index
try:
tensor = func(*list_args)
except Exception as e:
raise RuntimeError(
"Error reconstructing CUDA IPC tensor. Source actor may have failed."
) from e
tensors.append(tensor)
return tensors
def send_multiple_tensors(
self,
tensors: List["torch.Tensor"],
tensor_transport_metadata: CudaIpcTransportMetadata,
communicator_metadata: CudaIpcCommunicatorMetadata,
):
raise NotImplementedError(
"CUDA IPC transport does not support send_multiple_tensors, since it is a one-sided transport."
)
def garbage_collect(
self,
obj_id: str,
tensor_transport_meta: CudaIpcTransportMetadata,
tensors: List["torch.Tensor"],
):
pass
def abort_transport(
self,
obj_id: str,
communicator_metadata: CudaIpcCommunicatorMetadata,
):
# TODO: Implement CUDA IPC abort transport.
raise NotImplementedError(
"CUDA IPC transport does not support abort_transport for now."
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/experimental/gpu_object_manager/cuda_ipc_transport.py",
"license": "Apache License 2.0",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/tests/gpu_objects/test_gpu_objects_ipc.py | import sys
import pytest
import torch
import ray
@ray.remote(enable_tensor_transport=True)
class GPUTestActor:
def __init__(self):
self.tensor = None
@ray.method(tensor_transport="cuda_ipc")
def echo(self, data):
self.tensor = data.to("cuda")
return self.tensor
def double(self, data):
data.mul_(2)
return data
def wait_tensor_freed(self):
gpu_manager = ray.worker.global_worker.gpu_object_manager
ray.experimental.wait_tensor_freed(self.tensor, timeout=10)
assert not gpu_manager.gpu_object_store.has_tensor(self.tensor)
return "freed"
@pytest.mark.parametrize("ray_start_regular", [{"num_gpus": 1}], indirect=True)
def test_colocated_actors(ray_start_regular):
world_size = 2
actors = [
GPUTestActor.options(num_gpus=0.5, num_cpus=0).remote()
for _ in range(world_size)
]
src_actor, dst_actor = actors[0], actors[1]
# Create test tensor
tensor = torch.tensor([1, 2, 3])
gpu_ref = src_actor.echo.remote(tensor)
# Trigger tensor transfer from src to dst actor
ray.get(dst_actor.double.remote(gpu_ref))
# Check that the tensor is modified in place, and is reflected on the source actor
assert torch.equal(
ray.get(gpu_ref, _use_object_store=True),
torch.tensor([2, 4, 6], device="cuda"),
)
@pytest.mark.parametrize("ray_start_regular", [{"num_gpus": 2}], indirect=True)
def test_different_devices(ray_start_regular):
world_size = 2
actors = [
GPUTestActor.options(num_gpus=1, num_cpus=0).remote() for _ in range(world_size)
]
src_actor, dst_actor = actors[0], actors[1]
# Create test tensor
tensor = torch.tensor([1, 2, 3])
gpu_ref = src_actor.echo.remote(tensor)
# Trigger tensor transfer from src to dst actor. Since CUDA IPC transport does not
# support cross-device tensor transfers, this should raise a ValueError.
with pytest.raises(
ValueError, match="CUDA IPC transport only supports tensors on the same GPU*"
):
ray.get(dst_actor.double.remote(gpu_ref))
def test_different_nodes(ray_start_cluster):
# Test that inter-node CUDA IPC transfers throw an error.
cluster = ray_start_cluster
num_nodes = 2
num_cpus = 1
num_gpus = 1
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_cpus, num_gpus=num_gpus)
ray.init(address=cluster.address)
world_size = 2
actors = [
GPUTestActor.options(num_gpus=1, num_cpus=0).remote() for _ in range(world_size)
]
src_actor, dst_actor = actors[0], actors[1]
# Create test tensor
tensor = torch.tensor([1, 2, 3])
gpu_ref = src_actor.echo.remote(tensor)
# Trigger tensor transfer from src to dst actor. Since CUDA IPC transport does not
# support cross-device tensor transfers, this should raise a ValueError.
with pytest.raises(
ValueError, match="CUDA IPC transport only supports tensors on the same node.*"
):
ray.get(dst_actor.double.remote(gpu_ref))
@pytest.mark.parametrize("ray_start_regular", [{"num_gpus": 1}], indirect=True)
def test_ref_freed(ray_start_regular):
world_size = 2
actors = [
GPUTestActor.options(num_gpus=0.5, num_cpus=0).remote()
for _ in range(world_size)
]
src_actor, dst_actor = actors[0], actors[1]
# Create test tensor
tensor = torch.tensor([1, 2, 3])
gpu_ref = src_actor.echo.remote(tensor)
# Trigger tensor transfer from src to dst actor
res_ref = dst_actor.double.remote(gpu_ref)
del gpu_ref
free_res = ray.get(src_actor.wait_tensor_freed.remote())
assert free_res == "freed"
assert torch.equal(
ray.get(res_ref, _use_object_store=True),
torch.tensor([2, 4, 6], device="cuda"),
)
@pytest.mark.parametrize("ray_start_regular", [{"num_gpus": 1}], indirect=True)
def test_source_actor_fails_after_transfer(ray_start_regular):
world_size = 2
actors = [
GPUTestActor.options(num_gpus=0.5, num_cpus=0).remote()
for _ in range(world_size)
]
src_actor, dst_actor = actors[0], actors[1]
# Create test tensor
tensor = torch.tensor([1, 2, 3])
gpu_ref = src_actor.echo.remote(tensor)
# Trigger tensor transfer from src to dst actor
res_ref = dst_actor.double.remote(gpu_ref)
assert torch.equal(
ray.get(res_ref, _use_object_store=True),
torch.tensor([2, 4, 6], device="cuda"),
)
# Kill the source actor.
ray.kill(src_actor)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(src_actor.wait_tensor_freed.remote())
# Check that the tensor is still available on the destination actor.
assert torch.equal(
ray.get(res_ref, _use_object_store=True),
torch.tensor([2, 4, 6], device="cuda"),
)
@pytest.mark.parametrize("ray_start_regular", [{"num_gpus": 1}], indirect=True)
def test_source_actor_fails_before_transfer(ray_start_regular):
world_size = 2
actors = [
GPUTestActor.options(num_gpus=0.5, num_cpus=0).remote()
for _ in range(world_size)
]
src_actor, dst_actor = actors[0], actors[1]
# Create test tensor
tensor = torch.tensor([1, 2, 3])
gpu_ref = src_actor.echo.remote(tensor)
# Wait for object to be created.
assert torch.equal(
ray.get(gpu_ref, _use_object_store=True),
torch.tensor([1, 2, 3], device="cuda"),
)
# Kill the source actor.
ray.kill(src_actor)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(src_actor.wait_tensor_freed.remote())
# Check that the tensor is still available on the destination actor.
with pytest.raises(ray.exceptions.RayTaskError):
res_ref = dst_actor.double.remote(gpu_ref)
ray.get(res_ref, _use_object_store=True)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/gpu_objects/test_gpu_objects_ipc.py",
"license": "Apache License 2.0",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/execution/bundle_queue/base.py | from __future__ import annotations
import abc
from typing import (
TYPE_CHECKING,
Any,
Optional,
)
from ray.data._internal.execution.util import memory_string
if TYPE_CHECKING:
from ray.data._internal.execution.interfaces import RefBundle
class BaseBundleQueue:
"""Base class for storing bundles. Here and subclasses should adhere to the mental
model that "first", "front", or "head" is the next bundle to be dequeued. Consequently,
"last", "back", or "tail" is the last bundle to be dequeued.
Subclasses may choose to use the _on_dequeue_bundle and _on_enqueue_bundle methods to
track num_blocks, nbytes, etc... If not, they should override those methods.
"""
def __init__(self):
self._nbytes: int = 0
self._num_blocks: int = 0
self._num_bundles: int = 0
self._num_rows: int = 0
def _on_enqueue_bundle(self, bundle: RefBundle):
self._nbytes += bundle.size_bytes()
self._num_blocks += len(bundle.block_refs)
self._num_bundles += 1
self._num_rows += bundle.num_rows() or 0
def _on_dequeue_bundle(self, bundle: RefBundle):
self._nbytes -= bundle.size_bytes()
self._num_blocks -= len(bundle.block_refs)
self._num_bundles -= 1
self._num_rows -= bundle.num_rows() or 0
def estimate_size_bytes(self) -> int:
"""Return the estimated size in bytes of all bundles."""
return self._nbytes
def num_blocks(self) -> int:
"""Return the total # of blocks across all bundles."""
return self._num_blocks
def num_bundles(self) -> int:
return self._num_bundles
def num_rows(self) -> int:
"""Return the total # of rows across all bundles."""
return self._num_rows
def _reset_metrics(self):
self._num_rows = 0
self._num_blocks = 0
self._num_bundles = 0
self._nbytes = 0
def __len__(self) -> int:
"""Return the total # bundles."""
return self._num_bundles
def __repr__(self) -> str:
"""Return a string representation showing queue metrics."""
nbytes = memory_string(self.estimate_size_bytes())
return (
f"{self.__class__.__name__}("
f"num_bundles={len(self)}, "
f"num_blocks={self.num_blocks()}, "
f"num_rows={self.num_rows()}, "
f"nbytes={nbytes})"
)
def add(self, bundle: RefBundle, **kwargs: Any):
"""Add a bundle to the tail(end) of the queue. Base classes should override
the `_add_inner` method for simple use cases. For more complex metrics tracking,
they can override this method.
Args:
bundle: The bundle to add.
**kwargs: Additional queue-specific parameters (e.g., `key` for ordered queues).
This is used for `finalize`.
"""
self._on_enqueue_bundle(bundle)
self._add_inner(bundle, **kwargs)
def _add_inner(self, bundle: RefBundle, **kwargs: Any):
raise NotImplementedError
def get_next(self) -> RefBundle:
"""Remove and return the head of the queue. Base classes should override
the `_get_next_inner` method for simple use cases. For more complex metrics tracking,
they can override this method.
Raises:
IndexError: If the queue is empty.
Returns:
The `RefBundle` at the head of the queue.
"""
bundle = self._get_next_inner()
self._on_dequeue_bundle(bundle)
return bundle
def _get_next_inner(self) -> RefBundle:
raise NotImplementedError
@abc.abstractmethod
def peek_next(self) -> Optional[RefBundle]:
"""Return the head of the queue. The only invariant is
that the # of blocks, rows, and bytes must remain unchanged
before and after this method call.
If queue.has_next() == False, return `None`.
"""
...
@abc.abstractmethod
def has_next(self) -> bool:
"""Check if the queue has a valid bundle."""
...
@abc.abstractmethod
def clear(self):
"""Remove all bundles from the queue."""
...
def finalize(self, **kwargs: Any):
"""Signal that no additional bundles will be added to the bundler so
the bundler can be finalized. The keys of kwargs provided should be the same
as the ones passed into the `add()` method. This is important for ordered
queues."""
return None
class QueueWithRemoval(BaseBundleQueue):
"""Base class for storing bundles AND supporting remove(bundle)
and contains(bundle) operations."""
def __contains__(self, bundle: RefBundle) -> bool:
"""Return whether the key is in the queue."""
...
def remove(self, bundle: RefBundle) -> RefBundle:
"""Remove the specified bundle from the queue. If multiple instances exist, remove the first one."""
bundle = self._remove_inner(bundle)
self._on_dequeue_bundle(bundle)
return bundle
def _remove_inner(self, bundle: RefBundle) -> RefBundle:
raise NotImplementedError
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/execution/bundle_queue/base.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/model_composition_recsys/ci/nb2py.py | #!/usr/bin/env python3
import argparse
import nbformat
def convert_notebook(input_path: str, output_path: str) -> None:
"""
Read a Jupyter notebook and write a Python script, converting !serve run
and !serve shutdown commands appropriately.
"""
nb = nbformat.read(input_path, as_version=4)
with open(output_path, "w") as out:
for cell in nb.cells:
# Only process code cells
if cell.cell_type != "code":
continue
lines = cell.source.splitlines()
if not lines:
continue
# Check for anyscale commands (expensive/redundant to test in CI)
if lines[0].lstrip().startswith("# client_anyscale_service.py"):
continue
else:
# Regular Python cell: dump as-is
out.write(cell.source.rstrip() + "\n\n")
def main() -> None:
parser = argparse.ArgumentParser(
description="Convert a Jupyter notebook to a Python script."
)
parser.add_argument("input_nb", help="Path to the input .ipynb file")
parser.add_argument("output_py", help="Path for the output .py script")
args = parser.parse_args()
convert_notebook(args.input_nb, args.output_py)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/model_composition_recsys/ci/nb2py.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/model_composition_recsys/content/client.py | # client.py
import requests
response = requests.post(
"http://localhost:8000",
json={
"user_id": "user_42",
"top_k": 5
}
)
print(response.json())
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/model_composition_recsys/content/client.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/model_composition_recsys/content/client_anyscale_service.py | # client_anyscale_service.py
import requests
ENDPOINT = "<YOUR-ENDPOINT>" # From the deployment output
TOKEN = "<YOUR-TOKEN>" # From the deployment output
response = requests.post(
ENDPOINT,
headers={"Authorization": f"Bearer {TOKEN}"},
json={
"user_id": "user_42",
"top_k": 5
}
)
print(response.json())
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/model_composition_recsys/content/client_anyscale_service.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/model_composition_recsys/content/client_multiple_requests.py | # client_concurrent_requests.py
import requests
import random
from concurrent.futures import ThreadPoolExecutor, as_completed
def send_request(user_id):
response = requests.post(
"http://localhost:8000",
json={"user_id": user_id, "top_k": 3}
)
return user_id, response.json()
user_ids = [f"user_{random.randint(1, 1000)}" for _ in range(100)]
with ThreadPoolExecutor(max_workers=10) as executor:
futures = [executor.submit(send_request, uid) for uid in user_ids]
for future in as_completed(futures):
user_id, result = future.result()
top_items = [rec["item_id"] for rec in result["recommendations"]]
print(f"{user_id}: {top_items}")
print("\nSent 100 concurrent requests")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/model_composition_recsys/content/client_multiple_requests.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/model_composition_recsys/content/serve_recommendation_pipeline.py | # serve_recommendation_pipeline.py
import asyncio
import numpy as np
from typing import List, Dict
from ray import serve
from ray.serve.handle import DeploymentHandle
from starlette.requests import Request
# Component 1: User Feature Extractor
@serve.deployment(num_replicas=2)
class UserFeatureExtractor:
"""Extracts user features from user ID.
In production, this queries a database or feature store.
For this example, the code generates mock features.
"""
async def extract_features(self, user_id: str) -> Dict[str, float]:
"""Extract user features."""
# Simulate database lookup
await asyncio.sleep(0.01)
# In production:
# features = await db.query("SELECT * FROM user_features WHERE user_id = ?", user_id)
# return features
# Mock features based on user_id hash
np.random.seed(hash(user_id) % 10000)
return {
"age_group": float(np.random.randint(18, 65)),
"avg_session_duration": float(np.random.uniform(5, 60)),
"total_purchases": float(np.random.randint(0, 100)),
"engagement_score": float(np.random.uniform(0, 1)),
}
# Component 2: Item Ranking Model
@serve.deployment(
autoscaling_config={
"min_replicas": 1,
"max_replicas": 5,
"target_ongoing_requests": 3
},
ray_actor_options={"num_cpus": 2}
)
class ItemRankingModel:
"""Ranks items for a user based on features.
In production, this runs a trained ML model (XGBoost, neural network, etc.).
For this example, the code uses a simple scoring function.
"""
# Mock item catalog. In production, this comes from a database query.
CANDIDATE_ITEMS = [f"item_{i}" for i in range(1000)]
def __init__(self):
# In production, this is your cloud storage path or model registry
# self.model = load_model("/models/ranking_model.pkl")
pass
def _score_items(self, user_features: Dict[str, float]) -> List[Dict[str, any]]:
"""Score and rank items for a single user."""
ranked_items = []
for item_id in self.CANDIDATE_ITEMS:
item_popularity = (hash(item_id) % 100) / 100.0
score = (
user_features["engagement_score"] * 0.6 +
item_popularity * 0.4
)
ranked_items.append({
"item_id": item_id,
"score": round(score, 3)
})
ranked_items.sort(key=lambda x: x["score"], reverse=True)
return ranked_items
@serve.batch(max_batch_size=32, batch_wait_timeout_s=0.01)
async def rank_items(
self,
user_features_batch: List[Dict[str, float]]
) -> List[List[Dict[str, any]]]:
"""Rank candidate items for a batch of users."""
# Simulate model inference time
await asyncio.sleep(0.05)
# In production, use vectorized batch inference:
# return self.model.batch_predict(user_features_batch, self.CANDIDATE_ITEMS)
return [self._score_items(features) for features in user_features_batch]
# Component 3: Recommendation Service (Orchestrator)
@serve.deployment
class RecommendationService:
"""Orchestrates the recommendation pipeline."""
def __init__(
self,
user_feature_extractor: DeploymentHandle,
ranking_model: DeploymentHandle
):
self.user_feature_extractor = user_feature_extractor
self.ranking_model = ranking_model
async def __call__(self, request: Request) -> Dict:
"""Generate recommendations for a user."""
data = await request.json()
user_id = data["user_id"]
top_k = data.get("top_k", 5)
# Step 1: Extract user features
user_features = await self.user_feature_extractor.extract_features.remote(user_id)
# Step 2: Rank candidate items (batched automatically by @serve.batch)
ranked_items = await self.ranking_model.rank_items.remote(user_features)
# Step 3: Return top-k recommendations
return {
"user_id": user_id,
"recommendations": ranked_items[:top_k]
}
# Build the application
app = RecommendationService.bind(
user_feature_extractor=UserFeatureExtractor.bind(),
ranking_model=ItemRankingModel.bind()
)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/model_composition_recsys/content/serve_recommendation_pipeline.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/model_multiplexing_forecast/ci/nb2py.py | #!/usr/bin/env python3
import argparse
import nbformat
def convert_notebook(
input_path: str, output_path: str, ignore_cmds: bool = False
) -> None:
"""
Convert a Jupyter notebook to a Python script for CI testing.
- Converts !serve run to serve.run() with 5-minute sleep
- Converts !serve shutdown to serve.shutdown()
- Keeps all other code cells as-is
"""
nb = nbformat.read(input_path, as_version=4)
with open(output_path, "w") as out:
for cell in nb.cells:
# Only process code cells
if cell.cell_type != "code":
continue
lines = cell.source.splitlines()
if not lines:
continue
# Check for anyscale commands (expensive/redundant to test in CI)
if lines[0].lstrip().startswith("# client_anyscale_service.py"):
continue
else:
# Dump regular Python cell as-is
out.write(cell.source.rstrip() + "\n\n")
def main() -> None:
parser = argparse.ArgumentParser(
description="Convert a Jupyter notebook to a Python script, converting serve commands to Python API calls."
)
parser.add_argument("input_nb", help="Path to the input .ipynb file")
parser.add_argument("output_py", help="Path for the output .py script")
args = parser.parse_args()
convert_notebook(args.input_nb, args.output_py)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/model_multiplexing_forecast/ci/nb2py.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/model_multiplexing_forecast/content/client.py | # client.py
import requests
# time series data
sequence_data = [100, 102, 98, 105, 110, 108, 112, 115, 118, 120]
# Send request with customer_id in header
response = requests.post(
"http://localhost:8000",
headers={"serve_multiplexed_model_id": "customer_123"},
json={"sequence_data": sequence_data}
)
print(response.json())
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/model_multiplexing_forecast/content/client.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/model_multiplexing_forecast/content/client_anyscale_service.py | # client_anyscale_service.py
import requests
ENDPOINT = "<YOUR-ENDPOINT>" # From the deployment output
TOKEN = "<YOUR-TOKEN>" # From the deployment output
response = requests.post(
ENDPOINT,
headers={
"Authorization": f"Bearer {TOKEN}",
"serve_multiplexed_model_id": "customer_123"
},
json={"sequence_data": [100, 102, 98, 105, 110]}
)
print(response.json())
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/model_multiplexing_forecast/content/client_anyscale_service.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/model_multiplexing_forecast/content/client_multiple_requests.py | # client_multiple_requests.py
import random
import requests
customer_ids = ["customer_123", "customer_456", "customer_789", "customer_abc", "customer_def", "customer_hij"]
# Create a random list of 100 requests from those 3 customers
random_requests = [random.choice(customer_ids) for _ in range(100)]
# Send all 100 requests
for i, customer_id in enumerate(random_requests):
# Generate random "live" data for each request
live_sequence_data = [random.uniform(90, 110) for _ in range(10)]
response = requests.post(
"http://localhost:8000",
headers={"serve_multiplexed_model_id": customer_id},
json={"sequence_data": live_sequence_data}
)
forecast = response.json()["forecast"]
print(f"Request {i+1} - {customer_id}: {forecast[:3]}...")
print(f"\nSent {len(random_requests)} requests total")
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/model_multiplexing_forecast/content/client_multiple_requests.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/model_multiplexing_forecast/content/serve_forecast_multiplex.py | # serve_forecast_multiplex.py
import asyncio
import numpy as np
import pickle
from ray import serve
from starlette.requests import Request
# Simple forecasting model
class ForecastModel:
"""A customer-specific forecasting model."""
def __init__(self, customer_id: str):
self.customer_id = customer_id
# Each customer has different model parameters
np.random.seed(hash(customer_id) % 1000)
self.trend = np.random.uniform(-1, 3)
self.base_level = np.random.uniform(90, 110)
def predict(self, sequence_data: list) -> list:
"""Generate a 7-day forecast."""
last_value = sequence_data[-1] if sequence_data else self.base_level
forecast = []
for i in range(7):
# Simple forecast: last value + trend
value = last_value + self.trend * (i + 1)
forecast.append(round(value, 2))
return forecast
@serve.deployment
class ForecastingService:
def __init__(self):
# In production, this is your cloud storage path or model registry
self.model_storage_path = "/customer-models"
@serve.multiplexed(max_num_models_per_replica=4)
async def get_model(self, customer_id: str):
"""Load a customer's forecasting model.
In production, this function downloads from cloud storage or loads from a database.
For this example, the code mocks the I/O with asyncio.sleep().
"""
# Simulate downloading model from remote storage
await asyncio.sleep(0.1) # Mock network I/O delay
# In production:
# model_bytes = await download_from_storage(f"{self.model_storage_path}/{customer_id}/model.pkl")
# return pickle.loads(model_bytes)
# For this example, create a mock model
return ForecastModel(customer_id)
async def __call__(self, request: Request):
"""Generate forecast for a customer."""
# Get the serve_multiplexed_model_id from the request header
customer_id = serve.get_multiplexed_model_id()
# Load the model (cached if already loaded)
model = await self.get_model(customer_id)
# Get input data
data = await request.json()
sequence_data = data.get("sequence_data", [])
# Generate forecast
forecast = model.predict(sequence_data)
return {"customer_id": customer_id, "forecast": forecast}
app = ForecastingService.bind()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/model_multiplexing_forecast/content/serve_forecast_multiplex.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/serve/_private/serialization.py | import logging
import pickle
from typing import Any, Dict, Tuple
from ray import cloudpickle
from ray.serve._private.constants import SERVE_LOGGER_NAME
try:
import orjson
except ImportError:
orjson = None
try:
import ormsgpack
except ImportError:
ormsgpack = None
logger = logging.getLogger(SERVE_LOGGER_NAME)
class SerializationMethod:
"""Available serialization methods for RPC communication."""
CLOUDPICKLE = "cloudpickle"
PICKLE = "pickle"
MSGPACK = "msgpack"
ORJSON = "orjson"
NOOP = "noop"
# Global cache for serializer instances to avoid per-request instantiation overhead
_serializer_cache: Dict[Tuple[str, str], "RPCSerializer"] = {}
class RPCSerializer:
"""Serializer for RPC communication with configurable serialization methods."""
def __init__(
self,
request_method: str = SerializationMethod.CLOUDPICKLE,
response_method: str = SerializationMethod.CLOUDPICKLE,
):
self.request_method = request_method.lower()
self.response_method = response_method.lower()
self._validate_methods()
self._setup_serializers()
@classmethod
def get_cached_serializer(
cls,
request_method: str = SerializationMethod.CLOUDPICKLE,
response_method: str = SerializationMethod.CLOUDPICKLE,
) -> "RPCSerializer":
"""Get a cached serializer instance to avoid per-request instantiation overhead.
This method maintains a cache of serializer instances based on
(request_method, response_method) pairs, significantly reducing overhead
in high-throughput systems.
"""
# Normalize method names
req_method = request_method.lower()
resp_method = response_method.lower()
cache_key = (req_method, resp_method)
if cache_key not in _serializer_cache:
_serializer_cache[cache_key] = cls(req_method, resp_method)
return _serializer_cache[cache_key]
def _validate_methods(self):
"""Validate that the serialization methods are supported."""
valid_methods = {
SerializationMethod.CLOUDPICKLE,
SerializationMethod.PICKLE,
SerializationMethod.MSGPACK,
SerializationMethod.ORJSON,
SerializationMethod.NOOP,
}
if self.request_method not in valid_methods:
raise ValueError(
f"Unsupported request serialization method: {self.request_method}. "
f"Valid options: {valid_methods}"
)
if self.response_method not in valid_methods:
raise ValueError(
f"Unsupported response serialization method: {self.response_method}. "
f"Valid options: {valid_methods}"
)
def _setup_serializers(self):
"""Setup the serialization functions based on the selected methods."""
self._request_dumps, self._request_loads = self._get_serializer_funcs(
self.request_method
)
self._response_dumps, self._response_loads = self._get_serializer_funcs(
self.response_method
)
def _get_serializer_funcs(self, method: str) -> Tuple[Any, Any]:
"""Get dumps and loads functions for a given serialization method."""
if method == SerializationMethod.CLOUDPICKLE:
return cloudpickle.dumps, cloudpickle.loads
elif method == SerializationMethod.PICKLE:
return self._get_pickle_funcs()
elif method == SerializationMethod.MSGPACK:
return self._get_msgpack_funcs()
elif method == SerializationMethod.ORJSON:
return self._get_orjson_funcs()
elif method == SerializationMethod.NOOP:
return self._get_noop_funcs()
def _get_noop_funcs(self) -> Tuple[Any, Any]:
"""Get no-op serialization functions for binary data."""
def _noop_dumps(obj: Any) -> bytes:
if not isinstance(obj, bytes):
raise TypeError(
f"a bytes-like object is required, got {type(obj).__name__}. "
"Use a different serialization method for non-binary data."
)
return obj
def _noop_loads(data: bytes) -> Any:
if not isinstance(data, bytes):
raise TypeError(
f"a bytes-like object is required, got {type(data).__name__}. "
"Use a different serialization method for non-binary data."
)
return data
return _noop_dumps, _noop_loads
def _get_pickle_funcs(self) -> Tuple[Any, Any]:
"""Get pickle serialization functions with highest protocol."""
def _pickle_dumps(obj: Any) -> bytes:
return pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL)
def _pickle_loads(data: bytes) -> Any:
return pickle.loads(data)
return _pickle_dumps, _pickle_loads
def _get_msgpack_funcs(self) -> Tuple[Any, Any]:
"""Get msgpack serialization functions."""
if ormsgpack is None:
raise ImportError(
"ormsgpack is not installed. Please install it with `pip install ormsgpack`."
)
# Configure ormsgpack with appropriate options
def _msgpack_dumps(obj: Any) -> bytes:
return ormsgpack.packb(obj)
def _msgpack_loads(data: bytes) -> Any:
return ormsgpack.unpackb(data)
return _msgpack_dumps, _msgpack_loads
def _get_orjson_funcs(self) -> Tuple[Any, Any]:
"""Get orjson serialization functions."""
if orjson is None:
raise ImportError(
"orjson is not installed. Please install it with `pip install orjson`."
)
# orjson only supports JSON-serializable types
def _orjson_dumps(obj: Any) -> bytes:
try:
return orjson.dumps(obj)
except TypeError as e:
raise TypeError(
f"orjson serialization failed: {e}. "
"Only JSON-serializable types are supported with orjson. "
"Consider using 'cloudpickle' or 'pickle' for complex objects."
)
def _orjson_loads(data: bytes) -> Any:
return orjson.loads(data)
return _orjson_dumps, _orjson_loads
def dumps_request(self, obj: Any) -> bytes:
"""Serialize a request object to bytes."""
return self._request_dumps(obj)
def loads_request(self, data: bytes) -> Any:
"""Deserialize bytes to a request object."""
return self._request_loads(data)
def dumps_response(self, obj: Any) -> bytes:
"""Serialize a response object to bytes."""
return self._response_dumps(obj)
def loads_response(self, data: bytes) -> Any:
"""Deserialize bytes to a response object."""
return self._response_loads(data)
def clear_serializer_cache():
"""Clear the cached serializer instances. Useful for testing or memory management."""
global _serializer_cache
_serializer_cache.clear()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/_private/serialization.py",
"license": "Apache License 2.0",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/serve/tests/test_grpc_e2e.py | import os
import signal
import subprocess
import sys
from pathlib import Path
import pytest
import requests
from ray import serve
from ray._common.test_utils import wait_for_condition
from ray.serve._private.constants import SERVE_DEFAULT_APP_NAME
from ray.serve.schema import ApplicationStatus, LoggingConfig
from ray.serve.tests.conftest import * # noqa
from ray.serve.tests.conftest import _shared_serve_instance # noqa
@serve.deployment
class Downstream:
def __call__(self):
return "hi"
downstream_node = Downstream.bind()
@serve.deployment
class Ingress:
def __init__(self, handle, **handle_options):
self._handle = handle
self._handle._init(**handle_options)
async def __call__(self):
return await self._handle.options(_by_reference=False).remote()
@pytest.mark.skipif(
sys.platform == "win32", reason="Temp directory cleanup fails on Windows"
)
@pytest.mark.parametrize(
"ray_instance",
[{"RAY_SERVE_LOG_TO_STDERR": "1"}],
indirect=True,
)
def test_no_spammy_errors_in_composed_app(ray_instance, tmp_dir):
"""Direct all stdout/stderr to logs, and check that the false errors
from gRPC are not there in replica logs."""
logs_dir = Path(tmp_dir)
logging_config = LoggingConfig(encoding="JSON", logs_dir=str(logs_dir))
h = serve.run(
Ingress.options(logging_config=logging_config).bind(
Downstream.options(logging_config=logging_config).bind(),
)
)
for _ in range(10):
assert h.options(_by_reference=False).remote().result() == "hi"
for log_file in os.listdir(logs_dir):
if not log_file.startswith("replica_default"):
continue
with open(logs_dir / log_file) as f:
logs = f.read()
assert "Exception in callback" not in logs
assert "PollerCompletionQueue._handle_events" not in logs
assert "BlockingIOError" not in logs
assert "Resource temporarily unavailable" not in logs
def check_running():
assert (
serve.status().applications[SERVE_DEFAULT_APP_NAME].status
== ApplicationStatus.RUNNING
)
return True
@pytest.mark.skipif(
sys.platform == "win32", reason="Windows signal handling not compatible"
)
@pytest.mark.parametrize(
"ray_instance",
[{"RAY_SERVE_PROXY_USE_GRPC": "1"}],
indirect=True,
)
def test_no_spammy_errors_in_grpc_proxy(ray_instance, tmp_dir):
"""Direct all stdout/stderr to logs, and check that the false errors
from gRPC are not there in proxy logs."""
serve.start(
http_options={"host": "0.0.0.0"},
grpc_options={
"port": 9000,
"grpc_servicer_functions": [
"ray.serve.generated.serve_pb2_grpc.add_UserDefinedServiceServicer_to_server", # noqa
],
},
)
p = subprocess.Popen(
[
"serve",
"run",
"--address=auto",
"ray.serve.tests.test_grpc_e2e.downstream_node",
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
# Since we start Serve in a subprocess which is non-blocking, we won't know
# when `serve run` completes (i.e. done waiting on proxies to be serving), so
# we query the application until we get a valid response.
wait_for_condition(
lambda: requests.post("http://localhost:8000").status_code == 200,
)
for _ in range(10):
assert requests.post("http://localhost:8000").text == "hi"
p.send_signal(signal.SIGINT)
p.wait()
process_output, _ = p.communicate()
logs = process_output.decode("utf-8").strip()
assert "Exception in callback" not in logs
assert "PollerCompletionQueue._handle_events" not in logs
assert "BlockingIOError" not in logs
assert "Resource temporarily unavailable" not in logs
def test_same_loop_handle(serve_instance):
# With a local handle, where there is no running asyncio loop,
# setting _run_router_in_separate_loop=False should error.
h = serve.run(Downstream.bind())
with pytest.raises(RuntimeError, match="No event loop running"):
h._init(_run_router_in_separate_loop=False)
# However setting _run_router_in_separate_loop=False in a replica
# should work since there is a running asyncio event loop.
h = serve.run(
Ingress.bind(
Downstream.bind(),
_run_router_in_separate_loop=False,
)
)
assert h.options(_by_reference=False).remote().result() == "hi"
def test_custom_serialization_method(serve_instance):
@serve.deployment
class Downstream:
def __call__(self, message: str):
return f"Hello {message}!"
h = serve.run(Downstream.bind())
assert (
h.options(
_by_reference=False,
request_serialization="pickle",
response_serialization="pickle",
)
.remote("world1")
.result()
== "Hello world1!"
)
assert (
h.options(
_by_reference=False,
request_serialization="pickle",
response_serialization="cloudpickle",
)
.remote("world2")
.result()
== "Hello world2!"
)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_grpc_e2e.py",
"license": "Apache License 2.0",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/tests/test_grpc_replica_wrapper.py | import asyncio
import pickle
import sys
import grpc
import pytest
import ray
from ray import cloudpickle
from ray._common.test_utils import SignalActor
from ray._common.utils import get_or_create_event_loop
from ray.serve._private.common import (
DeploymentID,
ReplicaID,
ReplicaQueueLengthInfo,
RequestMetadata,
RunningReplicaInfo,
)
from ray.serve._private.constants import SERVE_NAMESPACE
from ray.serve._private.request_router.common import PendingRequest
from ray.serve._private.request_router.replica_wrapper import (
RunningReplica,
)
from ray.serve._private.test_utils import send_signal_on_cancellation
from ray.serve.generated import serve_pb2, serve_pb2_grpc
from ray.serve.tests.conftest import * # noqa
@ray.remote(num_cpus=0)
class FakeReplicaActor:
def __init__(self):
self._replica_queue_length_info = None
self._server = grpc.aio.server()
async def start(self):
serve_pb2_grpc.add_ASGIServiceServicer_to_server(self, self._server)
self._internal_grpc_port = self._server.add_insecure_port("[::]:0")
await self._server.start()
return self._internal_grpc_port
def set_replica_queue_length_info(self, info: ReplicaQueueLengthInfo):
self._replica_queue_length_info = info
async def HandleRequest(
self,
request: serve_pb2.ASGIRequest,
context: grpc.aio.ServicerContext,
):
args = cloudpickle.loads(request.request_args)
return serve_pb2.ASGIResponse(serialized_message=cloudpickle.dumps(args[0]))
async def HandleRequestStreaming(
self,
request: serve_pb2.ASGIRequest,
context: grpc.aio.ServicerContext,
):
request_metadata = pickle.loads(request.pickled_request_metadata)
args = cloudpickle.loads(request.request_args)
message = args[0]
for i in range(5):
if request_metadata.is_http_request:
yield serve_pb2.ASGIResponse(serialized_message=f"{message}-{i}")
else:
yield serve_pb2.ASGIResponse(
serialized_message=cloudpickle.dumps(f"{message}-{i}")
)
async def HandleRequestWithRejection(
self,
request: serve_pb2.ASGIRequest,
context: grpc.aio.ServicerContext,
):
args = cloudpickle.loads(request.request_args)
kwargs = cloudpickle.loads(request.request_kwargs)
cancelled_signal_actor = kwargs.pop("cancelled_signal_actor", None)
if cancelled_signal_actor is not None:
executing_signal_actor = kwargs.pop("executing_signal_actor")
async with send_signal_on_cancellation(cancelled_signal_actor):
await executing_signal_actor.send.remote()
return
await context.send_initial_metadata(
[
("accepted", str(int(self._replica_queue_length_info.accepted))),
(
"num_ongoing_requests",
str(self._replica_queue_length_info.num_ongoing_requests),
),
]
)
if not self._replica_queue_length_info.accepted:
# NOTE(edoakes): in gRPC, it's not guaranteed that the initial metadata sent
# by the server will be delivered for a stream with no messages. Therefore,
# we send a dummy message here to ensure it is populated in every case.
# The same is done in the actual gRPC replica implementation.
return serve_pb2.ASGIResponse(serialized_message=b"")
message = args[0]
return serve_pb2.ASGIResponse(serialized_message=cloudpickle.dumps(message))
async def HandleRequestWithRejectionStreaming(
self,
request: serve_pb2.ASGIRequest,
context: grpc.aio.ServicerContext,
):
request_metadata = pickle.loads(request.pickled_request_metadata)
args = cloudpickle.loads(request.request_args)
kwargs = cloudpickle.loads(request.request_kwargs)
cancelled_signal_actor = kwargs.pop("cancelled_signal_actor", None)
if cancelled_signal_actor is not None:
executing_signal_actor = kwargs.pop("executing_signal_actor")
async with send_signal_on_cancellation(cancelled_signal_actor):
await executing_signal_actor.send.remote()
return
await context.send_initial_metadata(
[
("accepted", str(int(self._replica_queue_length_info.accepted))),
(
"num_ongoing_requests",
str(self._replica_queue_length_info.num_ongoing_requests),
),
]
)
if not self._replica_queue_length_info.accepted:
# NOTE(edoakes): in gRPC, it's not guaranteed that the initial metadata sent
# by the server will be delivered for a stream with no messages. Therefore,
# we send a dummy message here to ensure it is populated in every case.
# The same is done in the actual gRPC replica implementation.
yield serve_pb2.ASGIResponse(serialized_message=b"")
return
message = args[0]
for i in range(5):
if request_metadata.is_http_request:
yield serve_pb2.ASGIResponse(
serialized_message=pickle.dumps(f"{message}-{i}")
)
else:
yield serve_pb2.ASGIResponse(
serialized_message=cloudpickle.dumps(f"{message}-{i}")
)
@pytest.fixture
def setup_fake_replica(ray_instance, request) -> RunningReplicaInfo:
replica_id = ReplicaID(
"fake_replica", deployment_id=DeploymentID(name="fake_deployment")
)
actor_name = replica_id.to_full_id_str()
# Create actor with a name so it can be retrieved by get_actor_handle()
actor_handle = FakeReplicaActor.options(
name=actor_name, namespace=SERVE_NAMESPACE, lifetime="detached"
).remote()
port = ray.get(actor_handle.start.remote())
return RunningReplicaInfo(
replica_id=replica_id,
node_id=None,
# Just use local node IP
node_ip="127.0.0.1",
availability_zone=None,
actor_name=actor_name,
max_ongoing_requests=10,
is_cross_language=False,
# Get grpc port from FakeReplicaActor
port=port,
)
@pytest.mark.asyncio
@pytest.mark.parametrize("is_streaming", [False, True])
@pytest.mark.parametrize("on_separate_loop", [True, False])
async def test_to_object_ref_not_supported(
setup_fake_replica: RunningReplicaInfo, is_streaming: bool, on_separate_loop: bool
):
replica = RunningReplica(setup_fake_replica)
pr = PendingRequest(
args=["Hello"],
kwargs={"is_streaming": is_streaming},
metadata=RequestMetadata(
request_id="abc",
internal_request_id="def",
is_streaming=is_streaming,
_by_reference=False, # use gRPC transport
_on_separate_loop=on_separate_loop,
),
)
err_msg = "Converting by-value DeploymentResponses to ObjectRefs is not supported."
replica_result = replica.try_send_request(pr, with_rejection=False)
if is_streaming:
with pytest.raises(RuntimeError, match=err_msg):
replica_result.to_object_ref_gen()
else:
with pytest.raises(RuntimeError, match=err_msg):
await replica_result.to_object_ref_async()
with pytest.raises(RuntimeError, match=err_msg):
replica_result.to_object_ref(timeout_s=0.0)
@pytest.mark.asyncio
@pytest.mark.parametrize("is_streaming", [False, True])
@pytest.mark.parametrize("on_separate_loop", [True, False])
async def test_send_request(
setup_fake_replica: RunningReplicaInfo, is_streaming: bool, on_separate_loop: bool
):
replica = RunningReplica(setup_fake_replica)
pr = PendingRequest(
args=["Hello"],
kwargs={"is_streaming": is_streaming},
metadata=RequestMetadata(
request_id="abc",
internal_request_id="def",
is_streaming=is_streaming,
_by_reference=False, # use gRPC transport
_on_separate_loop=on_separate_loop,
),
)
replica_result = replica.try_send_request(pr, with_rejection=False)
if is_streaming:
for i in range(5):
assert await replica_result.__anext__() == f"Hello-{i}"
else:
assert await replica_result.get_async() == "Hello"
@pytest.mark.asyncio
@pytest.mark.parametrize("accepted", [False, True])
@pytest.mark.parametrize("is_streaming", [False, True])
@pytest.mark.parametrize("on_separate_loop", [True, False])
async def test_send_request_with_rejection(
setup_fake_replica: RunningReplicaInfo,
accepted: bool,
is_streaming: bool,
on_separate_loop: bool,
):
actor_handle = setup_fake_replica.get_actor_handle()
replica = RunningReplica(setup_fake_replica)
ray.get(
actor_handle.set_replica_queue_length_info.remote(
ReplicaQueueLengthInfo(accepted=accepted, num_ongoing_requests=10),
)
)
pr = PendingRequest(
args=["Hello"],
kwargs={"is_streaming": is_streaming},
metadata=RequestMetadata(
request_id="abc",
internal_request_id="def",
is_streaming=is_streaming,
_by_reference=False, # use gRPC transport
_on_separate_loop=on_separate_loop,
),
)
replica_result = replica.try_send_request(pr, with_rejection=True)
info = await replica_result.get_rejection_response()
assert info.accepted == accepted
assert info.num_ongoing_requests == 10
if not accepted:
pass
elif is_streaming:
for i in range(5):
assert await replica_result.__anext__() == f"Hello-{i}"
else:
assert await replica_result.__anext__() == "Hello"
@pytest.mark.asyncio
@pytest.mark.parametrize("on_separate_loop", [True, False])
async def test_send_request_with_rejection_cancellation(
setup_fake_replica: RunningReplicaInfo, on_separate_loop: bool
):
"""
Verify that the downstream actor method call is cancelled if the call to send the
request to the replica is cancelled.
"""
replica = RunningReplica(setup_fake_replica)
executing_signal_actor = SignalActor.remote()
cancelled_signal_actor = SignalActor.remote()
pr = PendingRequest(
args=["Hello"],
kwargs={
"cancelled_signal_actor": cancelled_signal_actor,
"executing_signal_actor": executing_signal_actor,
},
metadata=RequestMetadata(
request_id="abc",
internal_request_id="def",
_by_reference=False, # use gRPC transport
_on_separate_loop=on_separate_loop,
),
)
# Send request should hang because the downstream actor method call blocks
# before sending the system message.
replica_result = replica.try_send_request(pr, with_rejection=True)
request_task = get_or_create_event_loop().create_task(
replica_result.get_rejection_response()
)
# Check that the downstream actor method call has started.
await executing_signal_actor.wait.remote()
_, pending = await asyncio.wait([request_task], timeout=0.001)
assert len(pending) == 1
# Cancel the task. This should cause the downstream actor method call to
# be cancelled (verified via signal actor).
request_task.cancel()
with pytest.raises(asyncio.CancelledError):
await request_task
await cancelled_signal_actor.wait.remote()
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_grpc_replica_wrapper.py",
"license": "Apache License 2.0",
"lines": 285,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/tests/unit/test_grpc_replica_result.py | import asyncio
import sys
import threading
import pytest
from ray import ActorID, cloudpickle
from ray._common.test_utils import wait_for_condition
from ray.serve._private.common import RequestMetadata
from ray.serve._private.replica_result import gRPCReplicaResult
from ray.serve.generated import serve_pb2
class FakegRPCUnaryCall:
def __init__(self, item, is_error: bool = False):
self._loop = asyncio.get_running_loop()
self._item = item
self._is_error = is_error
def __await__(self):
if asyncio.get_running_loop() != self._loop:
raise RuntimeError("Tried to fetch from a different loop!")
yield
return serve_pb2.ASGIResponse(
serialized_message=cloudpickle.dumps(self._item), is_error=self._is_error
)
def add_done_callback(self, cb):
pass
class FakegRPCStreamCall:
def __init__(self, items, *, event: threading.Event = None):
self._loop = asyncio.get_running_loop()
self._items = items
self._event = event
def is_empty(self) -> bool:
assert len(self._items) == 0
return True
def __aiter__(self):
return self
async def __anext__(self):
if asyncio.get_running_loop() != self._loop:
raise RuntimeError("Tried to fetch from a different loop!")
if not self._items:
raise StopAsyncIteration
if self._event:
await self._loop.run_in_executor(None, self._event.wait)
item, is_error = self._items.pop(0)
return serve_pb2.ASGIResponse(
serialized_message=cloudpickle.dumps(item),
is_error=is_error,
)
def add_done_callback(self, cb):
pass
@pytest.fixture
def create_asyncio_event_loop_in_thread():
async_loop = asyncio.new_event_loop()
thread = threading.Thread(daemon=True, target=async_loop.run_forever)
thread.start()
event = threading.Event()
yield async_loop, event
# Unblock event in case it's blocking shutdown
event.set()
@pytest.mark.asyncio
class TestSameLoop:
def make_fake_call(self, is_streaming: bool, *, data=None, error=None):
if is_streaming:
fake_call = FakegRPCStreamCall(data)
else:
if error:
fake_call = FakegRPCUnaryCall(error, is_error=True)
else:
fake_call = FakegRPCUnaryCall(data, is_error=False)
return gRPCReplicaResult(
fake_call,
metadata=RequestMetadata(
request_id="",
internal_request_id="",
is_streaming=False,
_on_separate_loop=False,
),
actor_id=ActorID(b"2" * 16),
loop=asyncio.get_running_loop(),
)
async def test_unary(self):
replica_result = self.make_fake_call(is_streaming=False, data="hello")
assert await replica_result.get_async() == "hello"
async def test_streaming(self):
replica_result = self.make_fake_call(
is_streaming=True, data=[(1, False), (2, False), (3, False), (4, False)]
)
assert [r async for r in replica_result] == [1, 2, 3, 4]
async def test_unary_with_gen(self):
replica_result = self.make_fake_call(is_streaming=True, data=[("hello", False)])
assert await replica_result.get_async() == "hello"
async def test_unary_error(self):
"""Test error is raised correctly."""
replica_result = self.make_fake_call(
is_streaming=False, error=RuntimeError("oh no!")
)
with pytest.raises(RuntimeError, match="oh no!"):
await replica_result.get_async()
async def test_streaming_error(self):
"""Test error is raised correctly."""
replica_result = self.make_fake_call(
is_streaming=True, data=[(RuntimeError("oh no!"), True)]
)
with pytest.raises(RuntimeError, match="oh no!"):
await replica_result.__anext__()
class TestSeparateLoop:
async def make_fake_unary_request(self, data, loop: asyncio.AbstractEventLoop):
fake_call = FakegRPCUnaryCall(data)
replica_result = gRPCReplicaResult(
fake_call,
metadata=RequestMetadata(
request_id="",
internal_request_id="",
is_streaming=False,
_on_separate_loop=True,
),
actor_id=ActorID(b"2" * 16),
loop=loop,
)
return replica_result
async def make_fake_streaming_request(
self,
data,
loop: asyncio.AbstractEventLoop,
on_separate_loop: bool,
*,
is_streaming: bool = True,
event: threading.Event = None,
error=None,
):
if error:
fake_call = FakegRPCStreamCall([(error, True)], event=event)
else:
fake_call = FakegRPCStreamCall([(d, False) for d in data], event=event)
return gRPCReplicaResult(
fake_call,
metadata=RequestMetadata(
request_id="",
internal_request_id="",
is_streaming=is_streaming,
_on_separate_loop=on_separate_loop,
),
actor_id=ActorID(b"2" * 16),
loop=loop,
)
def test_unary_sync(self, create_asyncio_event_loop_in_thread):
loop, _ = create_asyncio_event_loop_in_thread
fut = asyncio.run_coroutine_threadsafe(
self.make_fake_unary_request("hello", loop), loop=loop
)
replica_result = fut.result()
assert replica_result.get(None) == "hello"
@pytest.mark.asyncio
async def test_unary_async(self, create_asyncio_event_loop_in_thread):
loop, _ = create_asyncio_event_loop_in_thread
fut = asyncio.run_coroutine_threadsafe(
self.make_fake_unary_request("hello", loop), loop=loop
)
replica_result = fut.result()
assert await replica_result.get_async() == "hello"
def test_streaming_sync(self, create_asyncio_event_loop_in_thread):
loop, _ = create_asyncio_event_loop_in_thread
# Instantiate gRPCReplicaResult with FakegRPCStreamCall. This needs
# to be run on the "other loop"
fut = asyncio.run_coroutine_threadsafe(
self.make_fake_streaming_request([1, 2, 3, 4], loop, on_separate_loop=True),
loop=loop,
)
replica_result = fut.result()
# The async generator should be consumed even if we don't fetch
# the items explicitly through the ReplicaResult object
wait_for_condition(replica_result._call.is_empty, retry_interval_ms=10)
# Finally, check results given by gRPCReplicaResult fetched from
# the queue are correct
assert list(replica_result) == [1, 2, 3, 4]
@pytest.mark.asyncio
async def test_streaming_async(self, create_asyncio_event_loop_in_thread):
loop, _ = create_asyncio_event_loop_in_thread
fut = asyncio.run_coroutine_threadsafe(
self.make_fake_streaming_request([1, 2, 3, 4], loop, on_separate_loop=True),
loop=loop,
)
replica_result = fut.result()
# Check async generator is consumed on its own
wait_for_condition(replica_result._call.is_empty, retry_interval_ms=10)
assert [r async for r in replica_result] == [1, 2, 3, 4]
@pytest.mark.asyncio
async def test_streaming_blocked(self, create_asyncio_event_loop_in_thread):
"""Use threading event to block async generator, check everything works"""
loop, event = create_asyncio_event_loop_in_thread
fut = asyncio.run_coroutine_threadsafe(
self.make_fake_streaming_request(
[1, 2, 3, 4], loop, on_separate_loop=True, event=event
),
loop=loop,
)
replica_result = fut.result()
async def fetch():
return [r async for r in replica_result]
t = asyncio.create_task(fetch())
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(asyncio.shield(t), 0.01)
event.set()
assert await t == [1, 2, 3, 4]
def test_unary_with_gen_sync(self, create_asyncio_event_loop_in_thread):
loop, _ = create_asyncio_event_loop_in_thread
fut = asyncio.run_coroutine_threadsafe(
self.make_fake_streaming_request(
["hello"], loop, on_separate_loop=True, is_streaming=False
),
loop=loop,
)
replica_result = fut.result()
# Check async generator is consumed on its own
wait_for_condition(replica_result._call.is_empty, retry_interval_ms=10)
assert replica_result.get(None) == "hello"
@pytest.mark.asyncio
async def test_unary_with_gen_async(self, create_asyncio_event_loop_in_thread):
loop, _ = create_asyncio_event_loop_in_thread
fut = asyncio.run_coroutine_threadsafe(
self.make_fake_streaming_request(
["hello"], loop, on_separate_loop=True, is_streaming=False
),
loop=loop,
)
replica_result = fut.result()
# Check async generator is consumed on its own
wait_for_condition(replica_result._call.is_empty, retry_interval_ms=10)
assert await replica_result.get_async() == "hello"
@pytest.mark.asyncio
async def test_unary_with_gen_blocked(self, create_asyncio_event_loop_in_thread):
"""Use threading event to block async generator, check everything works"""
loop, event = create_asyncio_event_loop_in_thread
fut = asyncio.run_coroutine_threadsafe(
self.make_fake_streaming_request(
["hello"], loop, on_separate_loop=True, event=event
),
loop=loop,
)
replica_result = fut.result()
t = asyncio.create_task(replica_result.get_async())
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(asyncio.shield(t), 0.01)
event.set()
assert await t == "hello"
def test_unary_with_timeout(self, create_asyncio_event_loop_in_thread):
"""Test get() with timeout."""
loop, event = create_asyncio_event_loop_in_thread
fut = asyncio.run_coroutine_threadsafe(
self.make_fake_streaming_request(
["hello"], loop, on_separate_loop=True, event=event
),
loop=loop,
)
replica_result = fut.result()
with pytest.raises(TimeoutError):
replica_result.get(timeout_s=0.01)
event.set()
assert replica_result.get(timeout_s=0.01) == "hello"
def test_unary_error_sync(self, create_asyncio_event_loop_in_thread):
"""Test error is raised correctly."""
loop, _ = create_asyncio_event_loop_in_thread
fut = asyncio.run_coroutine_threadsafe(
self.make_fake_streaming_request(
None, loop, on_separate_loop=True, error=RuntimeError("oh no!")
),
loop=loop,
)
replica_result = fut.result()
with pytest.raises(RuntimeError, match="oh no!"):
replica_result.get(None)
@pytest.mark.asyncio
async def test_unary_error_async(self, create_asyncio_event_loop_in_thread):
"""Test error is raised correctly."""
loop, _ = create_asyncio_event_loop_in_thread
fut = asyncio.run_coroutine_threadsafe(
self.make_fake_streaming_request(
None, loop, on_separate_loop=True, error=RuntimeError("oh no!")
),
loop=loop,
)
replica_result = fut.result()
with pytest.raises(RuntimeError, match="oh no!"):
await replica_result.get_async()
def test_streaming_error_sync(self, create_asyncio_event_loop_in_thread):
"""Test error is raised correctly."""
loop, _ = create_asyncio_event_loop_in_thread
fut = asyncio.run_coroutine_threadsafe(
self.make_fake_streaming_request(
None, loop, on_separate_loop=True, error=RuntimeError("oh no!")
),
loop=loop,
)
replica_result = fut.result()
with pytest.raises(RuntimeError, match="oh no!"):
replica_result.__next__()
@pytest.mark.asyncio
async def test_streaming_error_async(self, create_asyncio_event_loop_in_thread):
"""Test error is raised correctly."""
loop, _ = create_asyncio_event_loop_in_thread
fut = asyncio.run_coroutine_threadsafe(
self.make_fake_streaming_request(
None, loop, on_separate_loop=True, error=RuntimeError("oh no!")
),
loop=loop,
)
replica_result = fut.result()
with pytest.raises(RuntimeError, match="oh no!"):
await replica_result.__anext__()
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/unit/test_grpc_replica_result.py",
"license": "Apache License 2.0",
"lines": 308,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/logical/rules/combine_shuffles.py | from ray.data._internal.logical.interfaces import (
LogicalOperator,
LogicalPlan,
Plan,
Rule,
)
from ray.data._internal.logical.operators import (
Aggregate,
Repartition,
Sort,
StreamingRepartition,
)
__all__ = [
"CombineShuffles",
]
class CombineShuffles(Rule):
"""This logical rule combines chained shuffles together. For example,
``Repartition`` and ``StreamingRepartition`` ops fusing them into a single one.
"""
def apply(self, plan: Plan) -> Plan:
assert isinstance(plan, LogicalPlan)
original_dag = plan.dag
transformed_dag = original_dag._apply_transform(self._combine)
if transformed_dag is original_dag:
return plan
# TODO replace w/ Plan.copy
return LogicalPlan(
dag=transformed_dag,
context=plan.context,
)
@classmethod
def _combine(self, op: LogicalOperator) -> LogicalOperator:
# Repartitions should have exactly 1 input
if len(op.input_dependencies) != 1:
return op
input_op = op.input_dependencies[0]
if isinstance(input_op, Repartition) and isinstance(op, Repartition):
shuffle = input_op.shuffle or op.shuffle
return Repartition(
input_op.input_dependencies[0],
num_outputs=op.num_outputs,
shuffle=shuffle,
keys=op.keys,
sort=op.sort,
)
elif isinstance(input_op, StreamingRepartition) and isinstance(
op, StreamingRepartition
):
strict = input_op._strict or op._strict
return StreamingRepartition(
input_op.input_dependencies[0],
target_num_rows_per_block=op.target_num_rows_per_block,
strict=strict,
)
elif isinstance(input_op, Repartition) and isinstance(op, Aggregate):
return Aggregate(
input_op=input_op.input_dependencies[0],
key=op.key,
aggs=op.aggs,
num_partitions=op.num_partitions,
batch_format=op.batch_format,
)
elif isinstance(input_op, StreamingRepartition) and isinstance(op, Repartition):
return Repartition(
input_op.input_dependencies[0],
num_outputs=op.num_outputs,
shuffle=op.shuffle,
keys=op.keys,
sort=op.sort,
)
elif isinstance(input_op, Sort) and isinstance(op, Sort):
return Sort(
input_op.input_dependencies[0],
sort_key=op.sort_key,
batch_format=op.batch_format,
)
return op
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/logical/rules/combine_shuffles.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:ci/ray_ci/automation/extract_wanda_wheels.py | import os
import shutil
import tempfile
from pathlib import Path
import click
from ci.ray_ci.automation.crane_lib import call_crane_export
from ci.ray_ci.utils import ecr_docker_login, logger
def _default_output_dir() -> str:
"""Get default output directory, using BUILD_WORKSPACE_DIRECTORY if available."""
workspace = os.environ.get("BUILD_WORKSPACE_DIRECTORY", "")
if workspace:
return os.path.join(workspace, ".whl")
return ".whl"
class ExtractWandaWheelsError(Exception):
"""Error raised when extracting wheels from a Wanda-cached image fails."""
@click.command()
@click.option(
"--wanda-image-name",
type=str,
required=True,
help="Name of the Wanda-cached image (e.g., 'forge').",
)
@click.option(
"--rayci-work-repo",
type=str,
envvar="RAYCI_WORK_REPO",
required=True,
help="RAYCI work repository URL. Read from RAYCI_WORK_REPO env var if not set.",
)
@click.option(
"--rayci-build-id",
type=str,
envvar="RAYCI_BUILD_ID",
required=True,
help="RAYCI build ID. Read from RAYCI_BUILD_ID env var if not set.",
)
@click.option(
"--output-dir",
default=None,
help="Directory to output extracted wheels (default: .whl in workspace)",
)
def main(
wanda_image_name: str,
rayci_work_repo: str,
rayci_build_id: str,
output_dir: str | None,
) -> None:
"""
Extract wheels from a Wanda-cached image to the specified output directory.
"""
# Clear existing wheels and create output directory
if output_dir is None:
output_dir = _default_output_dir()
output_path = Path(output_dir)
if output_path.exists():
shutil.rmtree(output_path)
output_path.mkdir(parents=True)
wanda_image = f"{rayci_work_repo}:{rayci_build_id}-{wanda_image_name}"
logger.info(f"Extracting wheels from: {wanda_image}")
ecr_registry = rayci_work_repo.split("/")[0]
ecr_docker_login(ecr_registry)
with tempfile.TemporaryDirectory() as tmpdir:
call_crane_export(wanda_image, tmpdir)
wheels = list(Path(tmpdir).rglob("*.whl"))
for wheel in wheels:
shutil.move(wheel, output_path / wheel.name)
# Verify that wheels were actually extracted by looking at the output directory
wheels = list(output_path.rglob("*.whl"))
if not wheels:
raise ExtractWandaWheelsError(
f"No wheel files were extracted from image: {wanda_image}."
)
logger.info(
f"Extracted {len(wheels)} wheel(s) to: {output_path.absolute().resolve()}"
)
for wheel in wheels:
logger.info(f" {wheel.name}")
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "ci/ray_ci/automation/extract_wanda_wheels.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/train/v2/_internal/execution/worker_group/placement_group_handle.py | import logging
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Union
from ray.types import ObjectRef
from ray.util.placement_group import PlacementGroup, remove_placement_group
if TYPE_CHECKING:
from ray.util.tpu import SlicePlacementGroup
logger = logging.getLogger(__name__)
class PlacementGroupHandle(ABC):
"""Unified interface for placement groups in Ray Train.
This abstract base class provides a common interface for both standard
PlacementGroup and SlicePlacementGroup, allowing WorkerGroup to handle
them uniformly without conditional logic.
"""
@property
@abstractmethod
def placement_group(self) -> PlacementGroup:
"""The underlying PlacementGroup for worker scheduling."""
...
@abstractmethod
def ready(self) -> ObjectRef:
"""Returns an ObjectRef to check if the placement group is ready.
Compatible with ray.get() and ray.wait().
"""
...
@abstractmethod
def wait(self, timeout_seconds: Union[float, int] = 30) -> bool:
"""Wait for the placement group to be ready within the specified time.
Args:
timeout_seconds: Timeout in seconds.
Returns:
True if the placement group is created. False otherwise.
"""
...
@abstractmethod
def shutdown(self) -> None:
"""Release all resources associated with this placement group.
After calling this method, the placement group should no longer be used.
"""
...
class DefaultPlacementGroupHandle(PlacementGroupHandle):
"""Wrapper for standard PlacementGroup."""
def __init__(self, pg: PlacementGroup):
self._pg = pg
@property
def placement_group(self) -> PlacementGroup:
return self._pg
def ready(self) -> ObjectRef:
return self._pg.ready()
def wait(self, timeout_seconds: Union[float, int] = 30) -> bool:
try:
return self._pg.wait(timeout_seconds)
except Exception:
logger.warning(
"Placement group wait failed; treating as not ready.",
exc_info=True,
)
return False
def shutdown(self) -> None:
remove_placement_group(self._pg)
class SlicePlacementGroupHandle(PlacementGroupHandle):
"""Wrapper for SlicePlacementGroup that delegates to its underlying PlacementGroup."""
def __init__(self, spg: "SlicePlacementGroup"):
self._spg = spg
@property
def placement_group(self) -> PlacementGroup:
return self._spg.placement_group
def ready(self) -> ObjectRef:
return self._spg.placement_group.ready()
def wait(self, timeout_seconds: Union[float, int] = 30) -> bool:
try:
return self._spg.placement_group.wait(timeout_seconds)
except Exception:
logger.warning(
"Slice placement group wait failed; treating as not ready.",
exc_info=True,
)
return False
def shutdown(self) -> None:
self._spg.shutdown()
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/_internal/execution/worker_group/placement_group_handle.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/train/v2/tests/test_placement_group_handle.py | from unittest.mock import MagicMock
import pytest
import ray
from ray.train.v2._internal.execution.worker_group.placement_group_handle import (
DefaultPlacementGroupHandle,
PlacementGroupHandle,
SlicePlacementGroupHandle,
)
from ray.util.placement_group import placement_group
@pytest.fixture(autouse=True)
def ray_start():
ray.init(num_cpus=4)
yield
ray.shutdown()
# DefaultPlacementGroupHandle tests
def test_default_handle_is_placement_group_handle():
"""DefaultPlacementGroupHandle should be a PlacementGroupHandle."""
pg = placement_group([{"CPU": 1}])
handle = DefaultPlacementGroupHandle(pg)
assert isinstance(handle, PlacementGroupHandle)
handle.shutdown()
def test_default_handle_placement_group_property():
"""placement_group property should return the underlying PlacementGroup."""
pg = placement_group([{"CPU": 1}])
handle = DefaultPlacementGroupHandle(pg)
assert handle.placement_group is pg
handle.shutdown()
def test_default_handle_ready():
"""ready() should return an ObjectRef that can be waited on."""
pg = placement_group([{"CPU": 1}])
handle = DefaultPlacementGroupHandle(pg)
ready_ref = handle.ready()
# Should be able to ray.get the ready ref
ray.get(ready_ref, timeout=10)
handle.shutdown()
def test_default_handle_wait():
"""wait() should return True if the placement group is ready."""
pg = placement_group([{"CPU": 1}])
handle = DefaultPlacementGroupHandle(pg)
assert handle.wait(timeout_seconds=10)
handle.shutdown()
def test_default_handle_wait_not_found_returns_false():
"""wait() should return False if the placement group no longer exists."""
mock_pg = MagicMock()
mock_pg.wait.side_effect = Exception(
"Placement group PlacementGroupID(abc) does not exist."
)
handle = DefaultPlacementGroupHandle(mock_pg)
assert handle.wait(timeout_seconds=0) is False
def test_default_handle_shutdown():
"""shutdown() should remove the placement group."""
pg = placement_group([{"CPU": 1}])
handle = DefaultPlacementGroupHandle(pg)
# Wait for PG to be ready
ray.get(handle.ready(), timeout=10)
handle.shutdown()
# SlicePlacementGroupHandle tests
def test_slice_handle_is_placement_group_handle():
"""SlicePlacementGroupHandle should be a PlacementGroupHandle."""
mock_spg = MagicMock()
mock_pg = MagicMock()
mock_spg.placement_group = mock_pg
handle = SlicePlacementGroupHandle(mock_spg)
assert isinstance(handle, PlacementGroupHandle)
def test_slice_handle_placement_group_property():
"""placement_group property should return the underlying PlacementGroup."""
mock_spg = MagicMock()
mock_pg = MagicMock()
mock_spg.placement_group = mock_pg
handle = SlicePlacementGroupHandle(mock_spg)
assert handle.placement_group is mock_pg
def test_slice_handle_ready():
"""ready() should delegate to the underlying PlacementGroup."""
mock_spg = MagicMock()
mock_pg = MagicMock()
mock_ready_ref = MagicMock()
mock_pg.ready.return_value = mock_ready_ref
mock_spg.placement_group = mock_pg
handle = SlicePlacementGroupHandle(mock_spg)
result = handle.ready()
mock_pg.ready.assert_called_once()
assert result is mock_ready_ref
def test_slice_handle_wait():
"""wait() should delegate to the underlying PlacementGroup."""
mock_spg = MagicMock()
mock_pg = MagicMock()
mock_pg.wait.return_value = True
mock_spg.placement_group = mock_pg
handle = SlicePlacementGroupHandle(mock_spg)
assert handle.wait(timeout_seconds=10)
mock_pg.wait.assert_called_once()
def test_slice_handle_shutdown():
"""shutdown() should call shutdown on the SlicePlacementGroup."""
mock_spg = MagicMock()
mock_pg = MagicMock()
mock_spg.placement_group = mock_pg
handle = SlicePlacementGroupHandle(mock_spg)
handle.shutdown()
mock_spg.shutdown.assert_called_once()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/test_placement_group_handle.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/namespace_expressions/arr_namespace.py | """Array namespace for expression operations on array-typed columns."""
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
import pyarrow
from ray.data.datatype import DataType
from ray.data.expressions import pyarrow_udf
if TYPE_CHECKING:
from ray.data.expressions import Expr, UDFExpr
@dataclass
class _ArrayNamespace:
"""Namespace for array operations on expression columns.
Example:
>>> from ray.data.expressions import col
>>> # Convert fixed-size lists to variable-length lists
>>> expr = col("features").arr.to_list()
"""
_expr: Expr
def to_list(self) -> "UDFExpr":
"""Convert FixedSizeList columns into variable-length lists."""
return_dtype = DataType(object)
expr_dtype = self._expr.data_type
if expr_dtype.is_list_type():
arrow_type = expr_dtype.to_arrow_dtype()
if pyarrow.types.is_fixed_size_list(arrow_type):
return_dtype = DataType.from_arrow(pyarrow.list_(arrow_type.value_type))
else:
return_dtype = expr_dtype
@pyarrow_udf(return_dtype=return_dtype)
def _to_list(arr: pyarrow.Array) -> pyarrow.Array:
arr_dtype = DataType.from_arrow(arr.type)
if not arr_dtype.is_list_type():
raise pyarrow.lib.ArrowInvalid(
"to_list() can only be called on list-like columns, "
f"but got {arr.type}"
)
if isinstance(arr.type, pyarrow.FixedSizeListType):
return arr.cast(pyarrow.list_(arr.type.value_type))
return arr
return _to_list(self._expr)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/namespace_expressions/arr_namespace.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/tests/expressions/test_namespace_arr.py | """Integration tests for array namespace expressions.
These tests require Ray and test end-to-end array namespace expression evaluation.
"""
import pandas as pd
import pyarrow as pa
import pytest
from packaging import version
import ray
from ray.data._internal.util import rows_same
from ray.data.expressions import col
from ray.data.tests.conftest import * # noqa
from ray.tests.conftest import * # noqa
pytestmark = pytest.mark.skipif(
version.parse(pa.__version__) < version.parse("19.0.0"),
reason="Namespace expressions tests require PyArrow >= 19.0",
)
def _make_fixed_size_list_table() -> pa.Table:
values = pa.array([1, 2, 3, 4, 5, 6], type=pa.int64())
fixed = pa.FixedSizeListArray.from_arrays(values, list_size=2)
return pa.Table.from_arrays([fixed], names=["features"])
def test_arr_to_list_fixed_size(ray_start_regular_shared):
table = _make_fixed_size_list_table()
ds = ray.data.from_arrow(table)
result = (
ds.with_column("features", col("features").arr.to_list())
.select_columns(["features"])
.to_pandas()
)
expected = pd.DataFrame(
[
{"features": [1, 2]},
{"features": [3, 4]},
{"features": [5, 6]},
]
)
assert rows_same(result, expected)
def test_arr_to_list_invalid_dtype_raises(ray_start_regular_shared):
ds = ray.data.from_items([{"value": 1}, {"value": 2}])
with pytest.raises(
(ray.exceptions.RayTaskError, ray.exceptions.UserCodeException)
) as exc_info:
ds.with_column("value_list", col("value").arr.to_list()).to_pandas()
assert "to_list() can only be called on list-like columns" in str(exc_info.value)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/expressions/test_namespace_arr.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/serve/tutorials/video-analysis/app.py | """
Ray Serve application: Video Embedding → Multi-Decoder.
Processes entire videos by chunking into segments.
Videos are downloaded from S3 to temp file, then processed locally (faster than streaming).
Encoder refs are passed directly to decoder; Ray Serve resolves dependencies automatically.
Usage:
serve run app:app
# With custom bucket:
S3_BUCKET=my-bucket serve run app:app
"""
import logging
import os
import tempfile
import time
from collections import defaultdict
from pathlib import Path
from urllib.parse import urlparse
import aioboto3
import numpy as np
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from ray import serve
from ray.serve.handle import DeploymentResponse
from deployments.encoder import VideoEncoder
from deployments.decoder import MultiDecoder
from utils.video import chunk_video_async
from constants import DEFAULT_NUM_FRAMES, DEFAULT_CHUNK_DURATION, FFMPEG_THREADS, NUM_WORKERS
logger = logging.getLogger(__name__)
def parse_s3_uri(s3_uri: str) -> tuple[str, str]:
"""Parse s3://bucket/key into (bucket, key)."""
parsed = urlparse(s3_uri)
if parsed.scheme != "s3":
raise ValueError(f"Invalid S3 URI: {s3_uri}")
bucket = parsed.netloc
key = parsed.path.lstrip("/")
return bucket, key
class AnalyzeRequest(BaseModel):
"""Request schema for /analyze endpoint."""
stream_id: str
video_path: str # S3 URI: s3://bucket/key
num_frames: int = DEFAULT_NUM_FRAMES
chunk_duration: float = DEFAULT_CHUNK_DURATION
use_batching: bool = False # Set False to compare unbatched performance
class TagResult(BaseModel):
text: str
score: float
class CaptionResult(BaseModel):
text: str
score: float
class TimingResult(BaseModel):
s3_download_ms: float
decode_video_ms: float
encode_ms: float
decode_ms: float
total_ms: float
class SceneChange(BaseModel):
"""Detected scene change event."""
timestamp: float # Seconds from video start
score: float # Scene change score (higher = bigger change)
chunk_index: int
frame_index: int # Frame index within chunk
class ChunkResult(BaseModel):
"""Result for a single chunk."""
chunk_index: int
start_time: float
duration: float
tags: list[TagResult]
retrieval_caption: CaptionResult
# Detected scene changes in this chunk
scene_changes: list[SceneChange]
class AnalyzeResponse(BaseModel):
"""Response schema for /analyze endpoint."""
stream_id: str
# Aggregated results (across all chunks)
tags: list[TagResult]
retrieval_caption: CaptionResult
# Scene change detection
scene_changes: list[SceneChange] # All detected scene changes
num_scene_changes: int
# Per-chunk results
chunks: list[ChunkResult]
num_chunks: int
video_duration: float
timing_ms: TimingResult
# FastAPI app
fastapi_app = FastAPI(
title="Video Embedding API",
description="GPU encoder → CPU multi-decoder using SigLIP embeddings",
)
@serve.deployment(
# setting this to twice that of the encoder. So that requests can complete the
# upfront CPU work and be queued for GPU processing.
num_replicas="auto",
ray_actor_options={"num_cpus": FFMPEG_THREADS},
max_ongoing_requests=4,
autoscaling_config={
"min_replicas": 2,
"max_replicas": 20,
"target_num_ongoing_requests": 2,
},
)
@serve.ingress(fastapi_app)
class VideoAnalyzer:
"""
Main ingress deployment that orchestrates VideoEncoder and MultiDecoder.
Encoder refs are passed directly to decoder; Ray Serve resolves dependencies.
Downloads video from S3 to temp file for fast local processing.
"""
def __init__(self, encoder: VideoEncoder, decoder: MultiDecoder):
self.encoder = encoder
self.decoder = decoder
self._s3_session = aioboto3.Session()
self._s3_client = None # Cached client for reuse across requests
logger.info("VideoAnalyzer ready")
async def _get_s3_client(self):
"""Get or create a reusable S3 client."""
if self._s3_client is None:
self._s3_client = await self._s3_session.client("s3").__aenter__()
return self._s3_client
async def _download_video(self, s3_uri: str) -> Path:
"""Download video from S3 to temp file. Returns local path."""
bucket, key = parse_s3_uri(s3_uri)
# Create temp file with video extension
suffix = Path(key).suffix or ".mp4"
temp_file = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
temp_path = Path(temp_file.name)
temp_file.close()
try:
s3 = await self._get_s3_client()
await s3.download_file(bucket, key, str(temp_path))
except Exception:
# Clean up temp file if download fails
temp_path.unlink(missing_ok=True)
raise
return temp_path
def _aggregate_results(
self,
chunk_results: list[dict],
top_k_tags: int = 5,
) -> dict:
"""
Aggregate results from multiple chunks.
Strategy:
- Tags: Average scores across chunks, return top-k
- Caption: Return the one with highest score across all chunks
"""
# Aggregate tag scores
tag_scores = defaultdict(list)
for result in chunk_results:
for tag in result["tags"]:
tag_scores[tag["text"]].append(tag["score"])
# Average tag scores and sort
aggregated_tags = [
{"text": text, "score": np.mean(scores)}
for text, scores in tag_scores.items()
]
aggregated_tags.sort(key=lambda x: x["score"], reverse=True)
top_tags = aggregated_tags[:top_k_tags]
# Best caption across all chunks
best_caption = max(
(r["retrieval_caption"] for r in chunk_results),
key=lambda x: x["score"],
)
return {
"tags": top_tags,
"retrieval_caption": best_caption,
}
def _encode_chunk(self, frames: np.ndarray, use_batching: bool = False) -> DeploymentResponse:
"""Encode a single chunk's frames to embeddings. Returns DeploymentResponse ref."""
return self.encoder.remote(frames, use_batching=use_batching)
async def _decode_chunk(
self,
encoder_output: dict,
chunk_index: int,
chunk_start_time: float,
chunk_duration: float,
ema_state=None,
) -> dict:
"""Decode embeddings to tags, caption, scene changes."""
return await self.decoder.remote(
encoder_output=encoder_output,
chunk_index=chunk_index,
chunk_start_time=chunk_start_time,
chunk_duration=chunk_duration,
ema_state=ema_state,
)
@fastapi_app.post("/analyze", response_model=AnalyzeResponse)
async def analyze(self, request: AnalyzeRequest) -> AnalyzeResponse:
"""
Analyze a video from S3 and return tags, caption, and scene changes.
Downloads video to temp file for fast local processing.
Chunks the entire video and aggregates results.
Encoder refs are passed directly to decoder for dependency resolution.
"""
total_start = time.perf_counter()
temp_path = None
try:
# Download video from S3 to temp file
download_start = time.perf_counter()
try:
temp_path = await self._download_video(request.video_path)
except Exception as e:
raise HTTPException(status_code=400, detail=f"Cannot download S3 video: {e}")
s3_download_ms = (time.perf_counter() - download_start) * 1000
# Chunk video with PARALLEL frame extraction from local file
decode_start = time.perf_counter()
try:
chunks = await chunk_video_async(
str(temp_path),
chunk_duration=request.chunk_duration,
num_frames_per_chunk=request.num_frames,
ffmpeg_threads=FFMPEG_THREADS,
use_single_ffmpeg=True,
)
except Exception as e:
raise HTTPException(status_code=400, detail=f"Cannot process video: {e}")
decode_video_ms = (time.perf_counter() - decode_start) * 1000
if not chunks:
raise HTTPException(status_code=400, detail="No chunks extracted from video")
# Calculate video duration from chunks
video_duration = chunks[-1].start_time + chunks[-1].duration
# Fire off all encoder calls (returns refs, not awaited)
encode_start = time.perf_counter()
encode_refs = [
self._encode_chunk(chunk.frames, use_batching=request.use_batching)
for chunk in chunks
]
encode_ms = (time.perf_counter() - encode_start) * 1000
# Decode chunks SERIALLY, passing encoder refs directly.
# Ray Serve resolves the encoder result when decoder needs it.
# EMA state is tracked here (not in decoder) to ensure continuity
# even when autoscaling routes requests to different replicas.
decode_start = time.perf_counter()
decode_results = []
ema_state = None # Will be initialized from first chunk's first frame
for chunk, enc_ref in zip(chunks, encode_refs):
dec_result = await self._decode_chunk(
encoder_output=enc_ref,
chunk_index=chunk.index,
chunk_start_time=chunk.start_time,
chunk_duration=chunk.duration,
ema_state=ema_state,
)
decode_results.append(dec_result)
ema_state = dec_result["ema_state"] # Carry forward for next chunk
decode_ms = (time.perf_counter() - decode_start) * 1000
# Collect results
chunk_results = []
per_chunk_results = []
all_scene_changes = []
for chunk, decoder_result in zip(chunks, decode_results):
chunk_results.append(decoder_result)
# Scene changes come directly from decoder
chunk_scene_changes = [
SceneChange(**sc) for sc in decoder_result["scene_changes"]
]
all_scene_changes.extend(chunk_scene_changes)
per_chunk_results.append(ChunkResult(
chunk_index=chunk.index,
start_time=chunk.start_time,
duration=chunk.duration,
tags=[TagResult(**t) for t in decoder_result["tags"]],
retrieval_caption=CaptionResult(**decoder_result["retrieval_caption"]),
scene_changes=chunk_scene_changes,
))
# Aggregate results
aggregated = self._aggregate_results(chunk_results)
total_ms = (time.perf_counter() - total_start) * 1000
return AnalyzeResponse(
stream_id=request.stream_id,
tags=[TagResult(**t) for t in aggregated["tags"]],
retrieval_caption=CaptionResult(**aggregated["retrieval_caption"]),
scene_changes=all_scene_changes,
num_scene_changes=len(all_scene_changes),
chunks=per_chunk_results,
num_chunks=len(chunks),
video_duration=video_duration,
timing_ms=TimingResult(
s3_download_ms=round(s3_download_ms, 2),
decode_video_ms=round(decode_video_ms, 2),
encode_ms=round(encode_ms, 2),
decode_ms=round(decode_ms, 2),
total_ms=round(total_ms, 2),
),
)
finally:
# Clean up temp file
if temp_path and temp_path.exists():
temp_path.unlink(missing_ok=True)
@fastapi_app.get("/health")
async def health(self):
"""Health check endpoint."""
return {"status": "healthy"}
encoder = VideoEncoder.bind()
decoder = MultiDecoder.bind(bucket=os.environ.get("S3_BUCKET"))
app = VideoAnalyzer.bind(encoder=encoder, decoder=decoder)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/video-analysis/app.py",
"license": "Apache License 2.0",
"lines": 296,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/serve/tutorials/video-analysis/autoscaling_policy.py | """
Application-level autoscaling policy for video processing pipeline.
Scaling strategy:
- VideoAnalyzer: scales based on its load (error_ratio = requests / capacity)
- VideoEncoder: scales based on its load, with floor = VideoAnalyzer replicas
- MultiDecoder: 0.5x VideoEncoder replicas
Error ratio formula:
error_ratio = total_ongoing_requests / (target_per_replica × current_replicas)
- error_ratio > 1.0 → over capacity → scale up
- error_ratio < 1.0 → under capacity → scale down
- error_ratio = 1.0 → at capacity → no change
"""
import math
from typing import Dict, Tuple
from ray.serve._private.common import DeploymentID
from ray.serve.config import AutoscalingContext
def _get_error_ratio(ctx: AutoscalingContext) -> float:
"""
Calculate error ratio: how much over/under target capacity we are.
Returns 1.0 when idle to maintain current replicas.
"""
target_per_replica = ctx.config.target_ongoing_requests or 1
total_requests = ctx.total_num_requests
current_replicas = ctx.current_num_replicas
if total_requests == 0:
return 1.0 # Idle: maintain current replicas
total_capacity = target_per_replica * current_replicas
return total_requests / total_capacity
def _scale_by_error_ratio(ctx: AutoscalingContext, floor: int = 0) -> int:
"""
Calculate target replicas based on error ratio.
Args:
ctx: Deployment autoscaling context
floor: Minimum replicas (in addition to capacity_adjusted_min)
Returns:
Target replica count, clamped to min/max limits
"""
error_ratio = _get_error_ratio(ctx)
# Scale current replicas by error ratio
target = int(math.ceil(ctx.current_num_replicas * error_ratio))
# Apply floor (e.g., encoder should have at least as many as analyzer)
target = max(target, floor)
# Clamp to configured limits
return max(
ctx.capacity_adjusted_min_replicas,
min(ctx.capacity_adjusted_max_replicas, target),
)
def _find_deployment(
contexts: Dict[DeploymentID, AutoscalingContext],
name: str,
) -> Tuple[DeploymentID, AutoscalingContext]:
"""Find deployment by name."""
for dep_id, ctx in contexts.items():
if dep_id.name == name:
return dep_id, ctx
raise KeyError(f"Deployment '{name}' not found")
def coordinated_scaling_policy(
contexts: Dict[DeploymentID, AutoscalingContext],
) -> Tuple[Dict[DeploymentID, int], Dict]:
"""
Coordinated scaling for video processing pipeline.
Scaling rules:
VideoAnalyzer: scale by its own load
VideoEncoder: scale by its own load, floor = analyzer replicas
MultiDecoder: 0.5x encoder replicas
"""
decisions: Dict[DeploymentID, int] = {}
# 1. VideoAnalyzer: scale by load
analyzer_id, analyzer_ctx = _find_deployment(contexts, "VideoAnalyzer")
analyzer_replicas = _scale_by_error_ratio(analyzer_ctx)
decisions[analyzer_id] = analyzer_replicas
# 2. VideoEncoder: scale by load, but at least as many as analyzer
encoder_id, encoder_ctx = _find_deployment(contexts, "VideoEncoder")
encoder_replicas = _scale_by_error_ratio(encoder_ctx, floor=analyzer_replicas)
decisions[encoder_id] = encoder_replicas
# 3. MultiDecoder: 0.5x encoder replicas
decoder_id, decoder_ctx = _find_deployment(contexts, "MultiDecoder")
decoder_replicas = max(1, math.ceil(encoder_replicas / 2))
decoder_replicas = max(
decoder_ctx.capacity_adjusted_min_replicas,
min(decoder_ctx.capacity_adjusted_max_replicas, decoder_replicas),
)
decisions[decoder_id] = decoder_replicas
return decisions, {}
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/video-analysis/autoscaling_policy.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/video-analysis/ci/nb2py.py | #!/usr/bin/env python3
import argparse
import nbformat
def convert_notebook(input_path: str, output_path: str) -> None:
"""
Read a Jupyter notebook and write a Python script.
Cells that load or autoreload extensions are ignored.
"""
nb = nbformat.read(input_path, as_version=4)
with open(output_path, "w") as out:
for cell in nb.cells:
if cell.cell_type != "code":
continue
lines = cell.source.splitlines()
# Skip cells that load or autoreload extensions
if any(
l.strip().startswith("%load_ext autoreload")
or l.strip().startswith("%autoreload all")
for l in lines
):
continue
out.write(cell.source.rstrip() + "\n\n")
def main() -> None:
parser = argparse.ArgumentParser(
description="Convert a Jupyter notebook to a Python script."
)
parser.add_argument("input_nb", help="Path to the input .ipynb file")
parser.add_argument("output_py", help="Path for the output .py script")
args = parser.parse_args()
convert_notebook(args.input_nb, args.output_py)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/video-analysis/ci/nb2py.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/video-analysis/client/load_test.py | #!/usr/bin/env python3
"""
Load testing script for the Ray Serve video API.
Usage:
python -m client.load_test --video s3://bucket/path/to/video.mp4 --concurrency 4
python -m client.load_test --video s3://bucket/video.mp4 --concurrency 8 --url http://localhost:8000
python -m client.load_test --video s3://bucket/video.mp4 --concurrency 4 --token YOUR_TOKEN
Press Ctrl+C to stop and save results to CSV.
"""
import argparse
import asyncio
import csv
import signal
import sys
import time
import uuid
from dataclasses import dataclass, field, asdict
from datetime import datetime
from pathlib import Path
from typing import Optional
import httpx
@dataclass
class RequestResult:
"""Result of a single request."""
request_id: str
stream_id: str
start_time: float
end_time: float
latency_ms: float
success: bool
status_code: int
error: Optional[str] = None
# Server-side timing (if successful)
s3_download_ms: Optional[float] = None
decode_video_ms: Optional[float] = None
encode_ms: Optional[float] = None
decode_ms: Optional[float] = None
server_total_ms: Optional[float] = None
# Response data
num_chunks: Optional[int] = None
video_duration: Optional[float] = None
num_scene_changes: Optional[int] = None
@dataclass
class LoadTestStats:
"""Aggregated statistics for the load test."""
total_requests: int = 0
successful_requests: int = 0
failed_requests: int = 0
total_latency_ms: float = 0.0
min_latency_ms: float = float('inf')
max_latency_ms: float = 0.0
latencies: list = field(default_factory=list)
def add_result(self, result: RequestResult):
self.total_requests += 1
if result.success:
self.successful_requests += 1
self.total_latency_ms += result.latency_ms
self.min_latency_ms = min(self.min_latency_ms, result.latency_ms)
self.max_latency_ms = max(self.max_latency_ms, result.latency_ms)
self.latencies.append(result.latency_ms)
else:
self.failed_requests += 1
@property
def avg_latency_ms(self) -> float:
if not self.latencies:
return 0.0
return self.total_latency_ms / len(self.latencies)
@property
def p50_latency_ms(self) -> float:
return self._percentile(50)
@property
def p95_latency_ms(self) -> float:
return self._percentile(95)
@property
def p99_latency_ms(self) -> float:
return self._percentile(99)
def _percentile(self, p: float) -> float:
if not self.latencies:
return 0.0
sorted_latencies = sorted(self.latencies)
idx = int(len(sorted_latencies) * p / 100)
idx = min(idx, len(sorted_latencies) - 1)
return sorted_latencies[idx]
class LoadTester:
def __init__(
self,
video_path: str,
url: str,
concurrency: int,
num_frames: int = 16,
chunk_duration: float = 10.0,
timeout: float = 300.0,
token: Optional[str] = None,
):
self.video_path = video_path
self.url = url
self.concurrency = concurrency
self.num_frames = num_frames
self.chunk_duration = chunk_duration
self.timeout = timeout
self.token = token
self.results: list[RequestResult] = []
self.stats = LoadTestStats()
self.running = True
self.start_time: Optional[float] = None
self.request_counter = 0
self._lock = asyncio.Lock()
self._semaphore: asyncio.Semaphore = None
def _build_payload(self) -> dict:
"""Build the request payload."""
stream_id = uuid.uuid4().hex[:8]
return {
"stream_id": stream_id,
"video_path": self.video_path,
"num_frames": self.num_frames,
"chunk_duration": self.chunk_duration,
"use_batching": False,
}
async def _make_request(self, client: httpx.AsyncClient, request_id: str) -> RequestResult:
"""Make a single request and return the result."""
payload = self._build_payload()
stream_id = payload["stream_id"]
start = time.perf_counter()
start_timestamp = time.time()
headers = {}
if self.token:
headers["Authorization"] = f"Bearer {self.token}"
try:
response = await client.post(f"{self.url}/analyze", json=payload, headers=headers)
end = time.perf_counter()
latency_ms = (end - start) * 1000
if response.status_code == 200:
data = response.json()
timing = data.get("timing_ms", {})
return RequestResult(
request_id=request_id,
stream_id=stream_id,
start_time=start_timestamp,
end_time=time.time(),
latency_ms=latency_ms,
success=True,
status_code=response.status_code,
s3_download_ms=timing.get("s3_download_ms"),
decode_video_ms=timing.get("decode_video_ms"),
encode_ms=timing.get("encode_ms"),
decode_ms=timing.get("decode_ms"),
server_total_ms=timing.get("total_ms"),
num_chunks=data.get("num_chunks"),
video_duration=data.get("video_duration"),
num_scene_changes=data.get("num_scene_changes"),
)
else:
return RequestResult(
request_id=request_id,
stream_id=stream_id,
start_time=start_timestamp,
end_time=time.time(),
latency_ms=latency_ms,
success=False,
status_code=response.status_code,
error=response.text[:200],
)
except Exception as e:
end = time.perf_counter()
latency_ms = (end - start) * 1000
return RequestResult(
request_id=request_id,
stream_id=stream_id,
start_time=start_timestamp,
end_time=time.time(),
latency_ms=latency_ms,
success=False,
status_code=0,
error=str(e)[:200],
)
async def _request_task(self, client: httpx.AsyncClient, request_id: str):
"""Execute a single request and record the result. Releases semaphore when done."""
try:
result = await self._make_request(client, request_id)
async with self._lock:
self.results.append(result)
self.stats.add_result(result)
# Print progress
status = "✓" if result.success else "✗"
print(
f"{status} {result.request_id} | "
f"{result.latency_ms:7.1f}ms | "
f"Total: {self.stats.total_requests} "
f"(OK: {self.stats.successful_requests}, Fail: {self.stats.failed_requests})"
)
finally:
self._semaphore.release()
async def run(self):
"""Run the load test until interrupted."""
self.start_time = time.time()
self._semaphore = asyncio.Semaphore(self.concurrency)
pending_tasks: set[asyncio.Task] = set()
print("=" * 70)
print("🚀 Starting Load Test")
print("=" * 70)
print(f" Video: {self.video_path}")
print(f" URL: {self.url}")
print(f" Concurrency: {self.concurrency}")
print(f" Chunk dur: {self.chunk_duration}s")
print(f" Frames/chunk:{self.num_frames}")
print()
print("Press Ctrl+C to stop and save results...")
print("-" * 70)
async with httpx.AsyncClient(timeout=self.timeout) as client:
while self.running:
# Block until a slot is available
await self._semaphore.acquire()
if not self.running:
self._semaphore.release()
break
# Spawn task (it will release semaphore when done)
self.request_counter += 1
request_id = f"req_{self.request_counter:06d}"
task = asyncio.create_task(self._request_task(client, request_id))
pending_tasks.add(task)
task.add_done_callback(pending_tasks.discard)
# Wait for in-flight requests
if pending_tasks:
print(f"\n⏳ Waiting for {len(pending_tasks)} in-flight requests...")
await asyncio.gather(*pending_tasks, return_exceptions=True)
def stop(self):
"""Stop the load test."""
self.running = False
def save_results(self, output_path: str):
"""Save results to CSV."""
if not self.results:
print("No results to save.")
return
fieldnames = [
"request_id", "stream_id", "start_time", "end_time", "latency_ms",
"success", "status_code", "error",
"s3_download_ms", "decode_video_ms", "encode_ms", "decode_ms", "server_total_ms",
"num_chunks", "video_duration", "num_scene_changes"
]
with open(output_path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for result in self.results:
writer.writerow(asdict(result))
print(f"📁 Results saved to: {output_path}")
def print_summary(self):
"""Print summary statistics."""
if not self.results:
print("No results to summarize.")
return
duration = time.time() - self.start_time if self.start_time else 0
throughput = self.stats.total_requests / duration if duration > 0 else 0
print()
print("=" * 70)
print("📊 Load Test Summary")
print("=" * 70)
print(f" Duration: {duration:.1f}s")
print(f" Total requests: {self.stats.total_requests}")
print(f" Successful: {self.stats.successful_requests}")
print(f" Failed: {self.stats.failed_requests}")
print(f" Success rate: {self.stats.successful_requests / self.stats.total_requests * 100:.1f}%")
print(f" Throughput: {throughput:.2f} req/s")
print()
print("⏱️ Latency (successful requests):")
if self.stats.latencies:
print(f" Min: {self.stats.min_latency_ms:8.1f} ms")
print(f" Max: {self.stats.max_latency_ms:8.1f} ms")
print(f" Avg: {self.stats.avg_latency_ms:8.1f} ms")
print(f" P50: {self.stats.p50_latency_ms:8.1f} ms")
print(f" P95: {self.stats.p95_latency_ms:8.1f} ms")
print(f" P99: {self.stats.p99_latency_ms:8.1f} ms")
else:
print(" (no successful requests)")
print("=" * 70)
def main():
parser = argparse.ArgumentParser(description="Load test the Ray Serve video API")
parser.add_argument("--video", type=str, required=True, help="S3 URI: s3://bucket/key")
parser.add_argument("--url", type=str, default="http://127.0.0.1:8000", help="Server URL")
parser.add_argument("--concurrency", type=int, default=4, help="Number of concurrent workers")
parser.add_argument("--num-frames", type=int, default=16, help="Frames per chunk")
parser.add_argument("--chunk-duration", type=float, default=10.0, help="Chunk duration in seconds")
parser.add_argument("--timeout", type=float, default=300.0, help="Request timeout in seconds")
parser.add_argument("--output", type=str, default=None, help="Output CSV path (default: load_test_<timestamp>.csv)")
parser.add_argument("--token", type=str, default=None, help="Bearer token for Authorization header")
args = parser.parse_args()
# Generate output path if not provided
if args.output is None:
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
args.output = f"load_test_{timestamp}_{args.concurrency}.csv"
tester = LoadTester(
video_path=args.video,
url=args.url,
concurrency=args.concurrency,
num_frames=args.num_frames,
chunk_duration=args.chunk_duration,
timeout=args.timeout,
token=args.token,
)
# Track interrupt count for force exit
interrupt_count = 0
# Handle Ctrl+C gracefully
def signal_handler(sig, frame):
nonlocal interrupt_count
interrupt_count += 1
if interrupt_count == 1:
print("\n\n🛑 Stopping load test (press Ctrl+C again to force exit)...")
tester.stop()
else:
print("\n\n⚠️ Force exiting...")
tester.print_summary()
tester.save_results(args.output)
sys.exit(1)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
try:
asyncio.run(tester.run())
except KeyboardInterrupt:
pass
finally:
tester.print_summary()
tester.save_results(args.output)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/video-analysis/client/load_test.py",
"license": "Apache License 2.0",
"lines": 323,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/serve/tutorials/video-analysis/client/send_video.py | #!/usr/bin/env python3
"""
Client script to send video to the Ray Serve API.
Usage:
python -m client.send_video --video s3://bucket/path/to/video.mp4
python -m client.send_video --video s3://bucket/video.mp4 --chunk-duration 5.0
python -m client.send_video --video s3://bucket/video.mp4 --token YOUR_TOKEN
"""
import argparse
import time
import uuid
import httpx
def main():
parser = argparse.ArgumentParser(description="Send video to Ray Serve API")
parser.add_argument("--video", type=str, required=True, help="S3 URI: s3://bucket/key")
parser.add_argument("--stream-id", type=str, default=None, help="Stream ID (random if not provided)")
parser.add_argument("--num-frames", type=int, default=16, help="Frames per chunk")
parser.add_argument("--chunk-duration", type=float, default=10.0, help="Chunk duration in seconds")
parser.add_argument("--url", type=str, default="http://127.0.0.1:8000", help="Server URL")
parser.add_argument("--token", type=str, default=None, help="Bearer token for Authorization header")
args = parser.parse_args()
# Generate random stream ID if not provided
stream_id = args.stream_id or uuid.uuid4().hex[:8]
payload = {
"stream_id": stream_id,
"video_path": args.video,
"num_frames": args.num_frames,
"chunk_duration": args.chunk_duration,
"use_batching": True
}
print(f"📹 Processing video: {args.video}")
print(f" Stream ID: {stream_id}")
print(f" Chunk duration: {args.chunk_duration}s, Frames/chunk: {args.num_frames}")
print()
start = time.perf_counter()
headers = {}
if args.token:
headers["Authorization"] = f"Bearer {args.token}"
with httpx.Client(timeout=300.0) as client:
response = client.post(f"{args.url}/analyze", json=payload, headers=headers)
latency_ms = (time.perf_counter() - start) * 1000
if response.status_code != 200:
print(f"❌ Error {response.status_code}: {response.text}")
return
result = response.json()
print("=" * 60)
print("✅ Response")
print("=" * 60)
print(f"Stream ID: {result['stream_id']}")
print(f"Video duration: {result['video_duration']:.1f}s")
print(f"Chunks processed: {result['num_chunks']}")
print()
print("🏷️ Top Tags (aggregated):")
for tag in result["tags"]:
print(f" {tag['score']:.3f} {tag['text']}")
print()
print("📝 Best Caption:")
caption = result["retrieval_caption"]
print(f" {caption['score']:.3f} {caption['text']}")
print()
# Scene changes
scene_changes = result["scene_changes"]
print(f"🎬 Scene Changes Detected: {result['num_scene_changes']}")
if scene_changes:
for sc in scene_changes:
print(f" {sc['timestamp']:6.2f}s score={sc['score']:.3f} (chunk {sc['chunk_index']}, frame {sc['frame_index']})")
else:
print(" (none detected)")
print()
# Show per-chunk results
print("📊 Per-Chunk Results:")
print("-" * 60)
for chunk in result["chunks"]:
print(f" Chunk {chunk['chunk_index']}: {chunk['start_time']:.1f}s - {chunk['start_time'] + chunk['duration']:.1f}s")
print(f" Top tag: {chunk['tags'][0]['text']} ({chunk['tags'][0]['score']:.3f})")
print(f" Caption: {chunk['retrieval_caption']['text'][:50]}...")
num_changes = len(chunk["scene_changes"])
print(f" Scene changes: {num_changes}")
print()
timing = result["timing_ms"]
print("⏱️ Timing:")
print(f" S3 download: {timing['s3_download_ms']:.1f} ms")
print(f" Video decode: {timing['decode_video_ms']:.1f} ms")
print(f" Encode (GPU): {timing['encode_ms']:.1f} ms")
print(f" Decode (CPU): {timing['decode_ms']:.1f} ms")
print(f" Total server: {timing['total_ms']:.1f} ms")
print(f" Round-trip: {latency_ms:.1f} ms")
if result['num_chunks'] > 1:
avg_per_chunk = timing['total_ms'] / result['num_chunks']
print(f" Avg/chunk: {avg_per_chunk:.1f} ms")
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/video-analysis/client/send_video.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/video-analysis/constants.py | # Project constants
# SigLIP model
MODEL_NAME = "google/siglip-so400m-patch14-384"
# S3 paths
S3_VIDEOS_PREFIX = "stock-videos/"
S3_EMBEDDINGS_PREFIX = "embeddings/"
# Scene change detection
SCENE_CHANGE_THRESHOLD = 0.15 # EMA score threshold for detecting scene changes
EMA_ALPHA = 0.9 # EMA decay factor (higher = slower adaptation)
# Pexels API
PEXELS_API_BASE = "https://api.pexels.com/videos"
# Concurrency limits
MAX_CONCURRENT_DOWNLOADS = 5
MAX_CONCURRENT_UPLOADS = 5
# Video normalization defaults (384x384 matches model input size for fastest inference)
NORMALIZE_WIDTH = 384
NORMALIZE_HEIGHT = 384
NORMALIZE_FPS = 30
# Video search queries (for downloading stock videos)
SEARCH_QUERIES = [
"kitchen cooking",
"office meeting",
"street city traffic",
"living room home",
"restaurant cafe",
"parking lot cars",
"classroom students",
"warehouse industrial",
"grocery store shopping",
"gym exercise workout",
"person speaking",
"crowd people walking",
"laptop computer work",
"outdoor nature",
"presentation business",
"conversation talking",
"running jogging",
"dining food",
"shopping mall",
"park outdoor",
]
# Video chunking defaults
DEFAULT_NUM_FRAMES = 16
DEFAULT_CHUNK_DURATION = 10.0
# FFmpeg configuration, restricting to 2 threads to avoid over subscription.
FFMPEG_THREADS = 6
# Setting this to 2 because i am assuming average
# video length in my corpus is 20 seconds.
# 20/10(default chunk duration) = 2
# this means we want to set num_cpus to 4 for deployment.
NUM_WORKERS = 3
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/video-analysis/constants.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/video-analysis/deployments/decoder.py | """MultiDecoder deployment - CPU-based classification, retrieval, and scene detection."""
import io
import logging
import os
import aioboto3
import numpy as np
from ray import serve
from constants import (
S3_EMBEDDINGS_PREFIX,
SCENE_CHANGE_THRESHOLD,
EMA_ALPHA,
)
from utils.s3 import get_s3_region
logger = logging.getLogger(__name__)
@serve.deployment(
num_replicas="auto",
ray_actor_options={"num_cpus": 1},
max_ongoing_requests=4, # can be set higher than 4, but since the encoder is limited to 4, we need to keep it at 4.
autoscaling_config={
"min_replicas": 1,
"max_replicas": 10,
"target_num_ongoing_requests": 2,
},
)
class MultiDecoder:
"""
Decodes video embeddings into tags, captions, and scene changes.
Uses precomputed text embeddings loaded from S3.
This deployment is stateless - EMA state for scene detection is passed
in and returned with each call, allowing the caller to maintain state
continuity across multiple replicas.
"""
async def __init__(self, bucket: str, s3_prefix: str = S3_EMBEDDINGS_PREFIX):
"""Initialize decoder with text embeddings from S3."""
self.bucket = bucket
self.ema_alpha = EMA_ALPHA
self.scene_threshold = SCENE_CHANGE_THRESHOLD
self.s3_prefix = s3_prefix
logger.info(f"MultiDecoder initializing (bucket={self.bucket}, ema_alpha={self.ema_alpha}, threshold={self.scene_threshold})")
await self._load_embeddings()
logger.info(f"MultiDecoder ready (tags={len(self.tag_texts)}, descriptions={len(self.desc_texts)})")
async def _load_embeddings(self):
"""Load precomputed text embeddings from S3."""
session = aioboto3.Session(region_name=get_s3_region(self.bucket))
async with session.client("s3") as s3:
# Load tag embeddings
tag_key = f"{self.s3_prefix}tag_embeddings.npz"
response = await s3.get_object(Bucket=self.bucket, Key=tag_key)
tag_data = await response["Body"].read()
tag_npz = np.load(io.BytesIO(tag_data), allow_pickle=True)
self.tag_embeddings = tag_npz["embeddings"]
self.tag_texts = tag_npz["texts"].tolist()
# Load description embeddings
desc_key = f"{self.s3_prefix}description_embeddings.npz"
response = await s3.get_object(Bucket=self.bucket, Key=desc_key)
desc_data = await response["Body"].read()
desc_npz = np.load(io.BytesIO(desc_data), allow_pickle=True)
self.desc_embeddings = desc_npz["embeddings"]
self.desc_texts = desc_npz["texts"].tolist()
def _cosine_similarity(self, embedding: np.ndarray, bank: np.ndarray) -> np.ndarray:
"""Compute cosine similarity between embedding and all vectors in bank."""
return bank @ embedding
def _get_top_tags(self, embedding: np.ndarray, top_k: int = 5) -> list[dict]:
"""Get top-k matching tags with scores."""
scores = self._cosine_similarity(embedding, self.tag_embeddings)
top_indices = np.argsort(scores)[::-1][:top_k]
return [
{"text": self.tag_texts[i], "score": float(scores[i])}
for i in top_indices
]
def _get_retrieval_caption(self, embedding: np.ndarray) -> dict:
"""Get best matching description."""
scores = self._cosine_similarity(embedding, self.desc_embeddings)
best_idx = np.argmax(scores)
return {
"text": self.desc_texts[best_idx],
"score": float(scores[best_idx]),
}
def _detect_scene_changes(
self,
frame_embeddings: np.ndarray,
chunk_index: int,
chunk_start_time: float,
chunk_duration: float,
ema_state: np.ndarray | None = None,
) -> tuple[list[dict], np.ndarray]:
"""
Detect scene changes using EMA-based scoring.
score_t = 1 - cosine(E_t, ema_t)
ema_t = α * ema_{t-1} + (1-α) * E_t
Args:
frame_embeddings: (T, D) normalized embeddings
chunk_index: Index of this chunk in the video
chunk_start_time: Start time of chunk in video (seconds)
chunk_duration: Duration of chunk (seconds)
ema_state: EMA state from previous chunk, or None for first chunk
Returns:
Tuple of (scene_changes list, updated ema_state)
"""
num_frames = len(frame_embeddings)
if num_frames == 0:
# Return empty changes and unchanged state (or zeros if no state)
return [], ema_state if ema_state is not None else np.zeros(0)
# Initialize EMA from first frame if no prior state
ema = ema_state.copy() if ema_state is not None else frame_embeddings[0].copy()
scene_changes = []
for frame_idx, embedding in enumerate(frame_embeddings):
# Compute score: how different is current frame from recent history
similarity = float(np.dot(embedding, ema))
score = max(0.0, 1.0 - similarity)
# Detect scene change if score exceeds threshold
if score >= self.scene_threshold:
# Calculate timestamp within video
frame_offset = (frame_idx / max(1, num_frames - 1)) * chunk_duration
timestamp = chunk_start_time + frame_offset
scene_changes.append({
"timestamp": round(timestamp, 3),
"score": round(score, 4),
"chunk_index": chunk_index,
"frame_index": frame_idx,
})
# Update EMA
ema = self.ema_alpha * ema + (1 - self.ema_alpha) * embedding
# Re-normalize
ema = ema / np.linalg.norm(ema)
return scene_changes, ema
def __call__(
self,
encoder_output: dict,
chunk_index: int,
chunk_start_time: float,
chunk_duration: float,
top_k_tags: int = 5,
ema_state: np.ndarray | None = None,
) -> dict:
"""
Decode embeddings into tags, caption, and scene changes.
Args:
encoder_output: Dict with 'frame_embeddings' and 'embedding_dim'
chunk_index: Index of this chunk in the video
chunk_start_time: Start time of chunk (seconds)
chunk_duration: Duration of chunk (seconds)
top_k_tags: Number of top tags to return
ema_state: EMA state from previous chunk for scene detection continuity.
Pass None for the first chunk of a stream.
Returns:
Dict containing tags, retrieval_caption, scene_changes, and updated ema_state.
The caller should pass the returned ema_state to the next chunk's call.
"""
# Get frame embeddings from encoder output
frame_embeddings = encoder_output["frame_embeddings"]
# Calculate pooled embedding (mean across frames, normalized)
pooled_embedding = frame_embeddings.mean(axis=0)
pooled_embedding = pooled_embedding / np.linalg.norm(pooled_embedding)
# Classification and retrieval on pooled embedding
tags = self._get_top_tags(pooled_embedding, top_k=top_k_tags)
caption = self._get_retrieval_caption(pooled_embedding)
# Scene change detection on frame embeddings
scene_changes, new_ema_state = self._detect_scene_changes(
frame_embeddings=frame_embeddings,
chunk_index=chunk_index,
chunk_start_time=chunk_start_time,
chunk_duration=chunk_duration,
ema_state=ema_state,
)
return {
"tags": tags,
"retrieval_caption": caption,
"scene_changes": scene_changes,
"ema_state": new_ema_state,
}
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/video-analysis/deployments/decoder.py",
"license": "Apache License 2.0",
"lines": 171,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/serve/tutorials/video-analysis/deployments/encoder.py | """VideoEncoder deployment - GPU-based frame encoding using SigLIP."""
import asyncio
import logging
from typing import List
import numpy as np
import torch
from ray import serve
from transformers import AutoModel, AutoProcessor
from constants import MODEL_NAME
from utils.video import frames_to_pil_list
logger = logging.getLogger(__name__)
@serve.deployment(
num_replicas="auto",
ray_actor_options={"num_gpus": 1, "num_cpus": 2},
# GPU utilization is at 100% when this is set to 2. with L4
# aka number on ongoing chunks that can be processed at once.
max_ongoing_requests=2,
autoscaling_config={
"min_replicas": 1,
"max_replicas": 10,
"target_num_ongoing_requests": 2,
},
)
class VideoEncoder:
"""
Encodes video frames into embeddings using SigLIP.
Returns both per-frame embeddings and pooled embedding.
"""
def __init__(self):
self.device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"VideoEncoder initializing on {self.device}")
# Load SigLIP model and processor
self.processor = AutoProcessor.from_pretrained(MODEL_NAME)
self.model = AutoModel.from_pretrained(MODEL_NAME).to(self.device)
self.model.eval()
# Get embedding dimension
self.embedding_dim = self.model.config.vision_config.hidden_size
print(f"VideoEncoder ready (embedding_dim={self.embedding_dim})")
def encode_frames(self, frames: np.ndarray) -> np.ndarray:
"""
Encode frames and return per-frame embeddings.
Args:
frames: np.ndarray of shape (T, H, W, 3) uint8 RGB
Returns:
np.ndarray of shape (T, D) float32, L2-normalized per-frame embeddings
"""
# Convert to PIL images
pil_images = frames_to_pil_list(frames)
# Process images
inputs = self.processor(images=pil_images, return_tensors="pt").to(self.device)
# Get embeddings
with torch.no_grad():
with torch.amp.autocast(device_type=self.device, enabled=self.device == "cuda"):
outputs = self.model.get_image_features(**inputs)
# get_image_features returns BaseModelOutputWithPooling; use pooler_output for embeddings
frame_embeddings = torch.nn.functional.normalize(outputs.pooler_output, p=2, dim=1)
# Move to CPU and convert to numpy
result = frame_embeddings.cpu().numpy().astype(np.float32)
return result
async def encode_unbatched(self, frames: np.ndarray) -> dict:
"""
Unbatched entry point - processes single request directly.
Args:
frames: np.ndarray of shape (T, H, W, 3)
Returns:
dict with 'frame_embeddings' and 'embedding_dim'
"""
print(f"Unbatched: {frames.shape[0]} frames")
frame_embeddings = await asyncio.to_thread(self.encode_frames, frames)
return {
"frame_embeddings": frame_embeddings,
"embedding_dim": self.embedding_dim,
}
@serve.batch(max_batch_size=2, batch_wait_timeout_s=0.1)
async def encode_batched(self, frames_batch: List[np.ndarray]) -> List[dict]:
"""
Batched entry point - collects multiple requests into single GPU call.
Args:
frames_batch: List of frame arrays, each of shape (T, H, W, 3)
Returns:
List of dicts, each with 'frame_embeddings' and 'embedding_dim'
"""
frame_counts = [f.shape[0] for f in frames_batch]
total_frames = sum(frame_counts)
print(f"Batched: {len(frames_batch)} requests ({total_frames} total frames)")
# Concatenate all frames into single batch
all_frames = np.concatenate(frames_batch, axis=0)
# Single forward pass for all frames
all_embeddings = await asyncio.to_thread(self.encode_frames, all_frames)
# Split results back per request
results = []
offset = 0
for n_frames in frame_counts:
chunk_embeddings = all_embeddings[offset:offset + n_frames]
results.append({
"frame_embeddings": chunk_embeddings,
"embedding_dim": self.embedding_dim,
})
offset += n_frames
return results
async def __call__(self, frames: np.ndarray, use_batching: bool = False) -> dict:
"""
Main entry point. Set use_batching=False for direct comparison.
"""
if use_batching:
return await self.encode_batched(frames)
else:
return await self.encode_unbatched(frames)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/video-analysis/deployments/encoder.py",
"license": "Apache License 2.0",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/video-analysis/jobs/generate_text_embeddings.py | #!/usr/bin/env python3
"""
Ray job to generate SigLIP text embeddings for tags and descriptions.
Usage:
ray job submit --working-dir . -- python jobs/generate_text_embeddings.py --bucket my-bucket
"""
import argparse
import asyncio
import io
import json
import time
import aioboto3
import numpy as np
import ray
import torch
from transformers import AutoModel, AutoProcessor
from constants import MODEL_NAME, S3_EMBEDDINGS_PREFIX
from textbanks import TAGS, DESCRIPTIONS
from utils.s3 import get_s3_region
def load_textbanks() -> tuple[list[str], list[str]]:
"""Load tags and descriptions from textbanks module."""
return TAGS, DESCRIPTIONS
def compute_text_embeddings(
texts: list[str],
processor,
model,
device: str,
batch_size: int = 32,
) -> np.ndarray:
"""Compute normalized text embeddings using SigLIP."""
all_embeddings = []
for i in range(0, len(texts), batch_size):
batch_texts = texts[i:i + batch_size]
# Process text
inputs = processor(
text=batch_texts,
padding="max_length",
truncation=True,
return_tensors="pt",
)
inputs = {k: v.to(device) for k, v in inputs.items()}
# Get embeddings
with torch.no_grad():
outputs = model.get_text_features(**inputs)
# Handle case where outputs is a model output object vs raw tensor
if hasattr(outputs, 'pooler_output'):
embeddings = outputs.pooler_output.cpu().numpy()
else:
embeddings = outputs.cpu().numpy()
embeddings = embeddings / np.linalg.norm(embeddings, axis=1, keepdims=True)
all_embeddings.append(embeddings)
return np.vstack(all_embeddings).astype(np.float32)
async def save_to_s3(
session: aioboto3.Session,
embeddings: np.ndarray,
texts: list[str],
bucket: str,
key: str,
) -> str:
"""Save embeddings and texts to S3 as npz file."""
# Save as npz (embeddings + texts)
buffer = io.BytesIO()
np.savez_compressed(
buffer,
embeddings=embeddings,
texts=np.array(texts, dtype=object),
)
buffer.seek(0)
async with session.client("s3") as s3:
await s3.put_object(
Bucket=bucket,
Key=key,
Body=buffer.getvalue(),
ContentType="application/octet-stream",
)
return f"s3://{bucket}/{key}"
async def generate_and_upload(
bucket: str,
s3_prefix: str = S3_EMBEDDINGS_PREFIX,
) -> dict:
"""Generate embeddings and upload to S3."""
print("=" * 60)
print("Starting text embedding generation")
print("=" * 60)
# Load textbanks
print("\n📚 Loading text banks...")
tags, descriptions = load_textbanks()
print(f" Tags: {len(tags)}")
print(f" Descriptions: {len(descriptions)}")
# Load model
print(f"\n🤖 Loading SigLIP model: {MODEL_NAME}")
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f" Device: {device}")
start = time.time()
processor = AutoProcessor.from_pretrained(MODEL_NAME)
model = AutoModel.from_pretrained(MODEL_NAME).to(device)
model.eval()
load_time = time.time() - start
print(f" Model loaded in {load_time:.1f}s")
# Generate tag embeddings
print("\n🏷️ Generating tag embeddings...")
start = time.time()
tag_embeddings = compute_text_embeddings(tags, processor, model, device)
tag_time = time.time() - start
print(f" Shape: {tag_embeddings.shape}")
print(f" Time: {tag_time:.2f}s")
# Generate description embeddings
print("\n📝 Generating description embeddings...")
start = time.time()
desc_embeddings = compute_text_embeddings(descriptions, processor, model, device)
desc_time = time.time() - start
print(f" Shape: {desc_embeddings.shape}")
print(f" Time: {desc_time:.2f}s")
# Upload to S3 concurrently
print(f"\n☁️ Uploading to S3 bucket: {bucket}")
session = aioboto3.Session(region_name=get_s3_region(bucket))
tag_uri, desc_uri = await asyncio.gather(
save_to_s3(session, tag_embeddings, tags, bucket, f"{s3_prefix}tag_embeddings.npz"),
save_to_s3(session, desc_embeddings, descriptions, bucket, f"{s3_prefix}description_embeddings.npz"),
)
print(f" Tags: {tag_uri}")
print(f" Descriptions: {desc_uri}")
print("\n✅ Done!")
return {
"tag_embeddings": {
"s3_uri": tag_uri,
"shape": list(tag_embeddings.shape),
"count": len(tags),
},
"description_embeddings": {
"s3_uri": desc_uri,
"shape": list(desc_embeddings.shape),
"count": len(descriptions),
},
"model": MODEL_NAME,
"timing": {
"model_load_s": load_time,
"tag_embed_s": tag_time,
"desc_embed_s": desc_time,
},
}
@ray.remote(num_gpus=1)
def generate_embeddings_task(bucket: str, s3_prefix: str = S3_EMBEDDINGS_PREFIX) -> dict:
"""Ray task wrapper for async embedding generation."""
return asyncio.run(generate_and_upload(bucket, s3_prefix))
def main():
parser = argparse.ArgumentParser(description="Generate text embeddings for decoder")
parser.add_argument("--bucket", type=str, required=True, help="S3 bucket name")
args = parser.parse_args()
# Run the task on existing Ray cluster
result = ray.get(generate_embeddings_task.remote(args.bucket))
print("\n" + "=" * 60)
print("Results:")
print("=" * 60)
print(json.dumps(result, indent=2))
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/video-analysis/jobs/generate_text_embeddings.py",
"license": "Apache License 2.0",
"lines": 157,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/video-analysis/scripts/download_stock_videos.py | #!/usr/bin/env python3
"""
Download stock videos from Pexels and upload to S3 (async version).
Videos are normalized to consistent specs before upload for predictable performance.
Usage:
python scripts/download_stock_videos.py --api-key YOUR_KEY --bucket YOUR_BUCKET
"""
import argparse
import asyncio
import json
import os
import shutil
import subprocess
import sys
import tempfile
from pathlib import Path
from typing import Optional
import aioboto3
import httpx
from botocore.exceptions import ClientError
from constants import (
SEARCH_QUERIES,
PEXELS_API_BASE,
MAX_CONCURRENT_DOWNLOADS,
MAX_CONCURRENT_UPLOADS,
S3_VIDEOS_PREFIX,
NORMALIZE_WIDTH,
NORMALIZE_HEIGHT,
NORMALIZE_FPS,
)
from utils.s3 import get_s3_region
def normalize_video(
input_path: Path,
output_path: Path,
width: int = NORMALIZE_WIDTH,
height: int = NORMALIZE_HEIGHT,
fps: int = NORMALIZE_FPS,
preset: str = "fast",
) -> bool:
"""
Normalize video to consistent specs using ffmpeg.
Applies:
- Resolution scaling with letterboxing to preserve aspect ratio
- Consistent FPS
- H.264 codec with main profile
- 1-second GOP for fast seeking
- Removes audio
Args:
input_path: Path to input video
output_path: Path for normalized output
width: Target width (default 1280)
height: Target height (default 720)
fps: Target FPS (default 30)
preset: x264 encoding preset (default "fast")
Returns:
True if successful, False otherwise
"""
cmd = [
"ffmpeg", "-y",
"-i", str(input_path),
"-vf", f"scale={width}:{height}:force_original_aspect_ratio=decrease,pad={width}:{height}:(ow-iw)/2:(oh-ih)/2,fps={fps}",
"-c:v", "libx264",
"-pix_fmt", "yuv420p",
"-profile:v", "main",
"-preset", preset,
"-g", str(fps), # GOP size = 1 second
"-keyint_min", str(fps),
"-sc_threshold", "0",
"-movflags", "+faststart",
"-an", # Remove audio
str(output_path),
]
result = subprocess.run(cmd, capture_output=True, text=True)
if result.returncode != 0:
print(f" ⚠️ ffmpeg normalization failed: {result.stderr[:200]}")
return False
return True
async def search_videos(
client: httpx.AsyncClient,
api_key: str,
query: str,
per_page: int = 5
) -> list[dict]:
"""Search for videos on Pexels."""
headers = {"Authorization": api_key}
params = {
"query": query,
"per_page": per_page,
"orientation": "landscape",
"size": "medium",
}
try:
response = await client.get(
f"{PEXELS_API_BASE}/search",
headers=headers,
params=params,
timeout=30.0
)
response.raise_for_status()
data = response.json()
return data.get("videos", [])
except httpx.HTTPError as e:
print(f" ⚠️ Error searching for '{query}': {e}")
return []
def get_best_video_file(video: dict, max_width: int = 1280) -> Optional[dict]:
"""Get the best quality video file under max_width."""
video_files = video.get("video_files", [])
# Filter to reasonable sizes and sort by quality
suitable = [
vf for vf in video_files
if vf.get("width", 0) <= max_width and vf.get("quality") in ("hd", "sd")
]
if not suitable:
suitable = video_files
if not suitable:
return None
# Sort by width descending (prefer higher quality)
suitable.sort(key=lambda x: x.get("width", 0), reverse=True)
return suitable[0]
async def download_video(
client: httpx.AsyncClient,
url: str,
dest_path: Path
) -> bool:
"""Download a video file."""
try:
async with client.stream("GET", url, timeout=120.0) as response:
response.raise_for_status()
with open(dest_path, "wb") as f:
async for chunk in response.aiter_bytes(chunk_size=8192):
f.write(chunk)
return True
except Exception as e:
print(f" ⚠️ Download error: {e}")
return False
def sanitize_metadata(metadata: dict) -> dict:
"""Sanitize metadata values to ASCII-only for S3."""
result = {}
for k, v in metadata.items():
# Convert to string and encode as ASCII, replacing non-ASCII chars
val = str(v).encode("ascii", errors="replace").decode("ascii")
result[k] = val
return result
async def upload_to_s3(
s3_client,
local_path: Path,
bucket: str,
s3_key: str,
metadata: dict
) -> bool:
"""Upload a file to S3 with metadata."""
try:
extra_args = {
"ContentType": "video/mp4",
"Metadata": sanitize_metadata(metadata)
}
await s3_client.upload_file(str(local_path), bucket, s3_key, ExtraArgs=extra_args)
return True
except ClientError as e:
print(f" ⚠️ S3 upload error: {e}")
return False
def generate_filename(video: dict, query: str, index: int) -> str:
"""Generate a descriptive filename for the video."""
clean_query = query.replace(" ", "_").replace("/", "-")[:30]
video_id = video.get("id", "unknown")
return f"{clean_query}_{video_id}_{index:02d}.mp4"
async def process_video(
http_client: httpx.AsyncClient,
s3_client,
video: dict,
index: int,
temp_dir: Path,
local_dir: Optional[Path],
bucket: Optional[str],
s3_prefix: str,
download_sem: asyncio.Semaphore,
upload_sem: asyncio.Semaphore,
normalize: bool = True,
normalize_width: int = NORMALIZE_WIDTH,
normalize_height: int = NORMALIZE_HEIGHT,
normalize_fps: int = NORMALIZE_FPS,
) -> Optional[dict]:
"""Download, normalize, and upload a single video."""
video_file = get_best_video_file(video)
if not video_file:
print(f" {index+1:2d}. ⚠️ No suitable video file found, skipping")
return None
filename = generate_filename(video, video["_query"], index)
raw_path = temp_dir / f"raw_{filename}"
normalized_path = temp_dir / filename
print(f" {index+1:2d}. Downloading: {filename}")
download_url = video_file.get("link")
if not download_url:
print(f" ⚠️ No download URL, skipping")
return None
# Download with semaphore
async with download_sem:
if not await download_video(http_client, download_url, raw_path):
return None
raw_size_mb = raw_path.stat().st_size / (1024 * 1024)
print(f" ✅ Downloaded ({raw_size_mb:.1f} MB)")
# Normalize video
if normalize:
print(f" 🔄 Normalizing to {normalize_width}x{normalize_height}@{normalize_fps}fps...")
# Run normalization in thread pool to not block event loop
success = await asyncio.to_thread(
normalize_video,
raw_path,
normalized_path,
normalize_width,
normalize_height,
normalize_fps,
)
if not success:
print(f" ⚠️ Normalization failed, using original")
shutil.move(raw_path, normalized_path)
else:
normalized_size_mb = normalized_path.stat().st_size / (1024 * 1024)
print(f" ✅ Normalized ({raw_size_mb:.1f} MB → {normalized_size_mb:.1f} MB)")
raw_path.unlink(missing_ok=True)
# Update dimensions to normalized values
final_width = normalize_width
final_height = normalize_height
else:
# No normalization, just rename
shutil.move(raw_path, normalized_path)
final_width = video_file.get("width")
final_height = video_file.get("height")
# Copy to local dir if specified
if local_dir:
local_path = local_dir / filename
shutil.copy2(normalized_path, local_path)
print(f" 📁 Saved locally: {local_path}")
result = None
# Upload to S3
if s3_client and bucket:
s3_key = f"{s3_prefix}{filename}"
metadata = {
"pexels_id": str(video.get("id", "")),
"query": video["_query"],
"width": str(final_width),
"height": str(final_height),
"duration": str(video.get("duration", "")),
"photographer": video.get("user", {}).get("name", ""),
"normalized": str(normalize),
}
async with upload_sem:
if await upload_to_s3(s3_client, normalized_path, bucket, s3_key, metadata):
print(f" ☁️ Uploaded to: s3://{bucket}/{s3_key}")
result = {
"filename": filename,
"s3_uri": f"s3://{bucket}/{s3_key}",
"s3_key": s3_key,
"pexels_id": video.get("id"),
"query": video["_query"],
"duration": video.get("duration"),
"width": final_width,
"height": final_height,
}
elif local_dir:
# Local only mode
result = {
"filename": filename,
"local_path": str(local_dir / filename),
"pexels_id": video.get("id"),
"query": video["_query"],
"duration": video.get("duration"),
"width": final_width,
"height": final_height,
}
# Clean up temp file
normalized_path.unlink(missing_ok=True)
return result
async def download_sample_videos(
api_key: str | None = None,
bucket: str | None = None,
total: int = 20,
per_query: int = 1,
local_dir: str | None = None,
dry_run: bool = False,
skip_s3: bool = False,
normalize: bool = True,
width: int = NORMALIZE_WIDTH,
height: int = NORMALIZE_HEIGHT,
fps: int = NORMALIZE_FPS,
s3_prefix: str = S3_VIDEOS_PREFIX,
overwrite: bool = True,
) -> list[str]:
"""Download videos from Pexels, normalize them, and upload to S3.
If a manifest already exists in S3, returns the existing video paths
without downloading new videos.
Args:
api_key: Pexels API key. Falls back to PEXELS_API_KEY env var.
bucket: S3 bucket name. Falls back to S3_BUCKET env var.
total: Total number of videos to download.
per_query: Number of videos per search query.
local_dir: Optional local directory to save videos.
dry_run: If True, only show what would be downloaded.
skip_s3: If True, skip S3 upload (local download only).
normalize: If True, normalize videos to consistent specs.
width: Normalized video width.
height: Normalized video height.
fps: Normalized video FPS.
Returns:
List of video paths (S3 URIs or local paths).
"""
# Get configuration from args or environment
api_key = api_key or os.environ.get("PEXELS_API_KEY")
bucket = bucket or os.environ.get("S3_BUCKET")
if not bucket and not skip_s3:
print("❌ Error: S3 bucket required (--bucket or S3_BUCKET)")
print(" Set it or use --skip-s3 to only download locally")
sys.exit(1)
# Setup S3 session
session = aioboto3.Session(region_name=get_s3_region(bucket))
s3_client = None
if not skip_s3:
async with session.client("s3") as s3:
try:
await s3.head_bucket(Bucket=bucket)
print(f"✅ S3 bucket '{bucket}' accessible")
except ClientError as e:
print(f"❌ Error: Cannot access S3 bucket '{bucket}': {e}")
sys.exit(1)
if overwrite:
print("🔄 Overwriting existing manifest")
await s3.delete_object(Bucket=bucket, Key=f"{s3_prefix}manifest.json")
# Check if manifest already exists - return early if so
manifest_key = f"{s3_prefix}manifest.json"
try:
response = await s3.get_object(Bucket=bucket, Key=manifest_key)
manifest_data = await response["Body"].read()
manifest = json.loads(manifest_data.decode("utf-8"))
# Extract paths from existing manifest
video_paths = []
for v in manifest.get("videos", []):
if "s3_uri" in v:
video_paths.append(v["s3_uri"])
elif "local_path" in v:
video_paths.append(v["local_path"])
print(f"✅ Found existing manifest with {len(video_paths)} videos in S3")
print(f" Skipping Pexels API download")
return video_paths
except ClientError as e:
if e.response["Error"]["Code"] != "NoSuchKey":
raise
# Manifest doesn't exist, continue with download
print("📥 No existing manifest found, will download from Pexels")
# Need API key for downloading
if not api_key:
print("❌ Error: Pexels API key required (--api-key or PEXELS_API_KEY)")
print(" Get your free API key at: https://www.pexels.com/api/")
sys.exit(1)
# Create local directory if specified
local_dir_path = None
if local_dir:
local_dir_path = Path(local_dir)
local_dir_path.mkdir(parents=True, exist_ok=True)
print(f"📁 Local directory: {local_dir_path}")
# Create temp directory for downloads
temp_dir = Path(tempfile.mkdtemp(prefix="pexels_videos_"))
print(f"📁 Temp directory: {temp_dir}")
# Track downloaded videos
video_ids_seen = set()
print(f"\n🔍 Searching for {total} videos across {len(SEARCH_QUERIES)} queries...\n")
# Search and collect videos concurrently
all_videos = []
async with httpx.AsyncClient() as http_client:
# Search all queries concurrently
search_tasks = [
search_videos(http_client, api_key, query, per_page=per_query + 2)
for query in SEARCH_QUERIES
]
results = await asyncio.gather(*search_tasks)
for query, videos in zip(SEARCH_QUERIES, results):
print(f" Found {len(videos)} for '{query}'")
for video in videos:
if len(all_videos) >= total:
break
video_id = video.get("id")
if video_id and video_id not in video_ids_seen:
video_ids_seen.add(video_id)
video["_query"] = query
all_videos.append(video)
all_videos = all_videos[:total]
print(f"\n📹 Selected {len(all_videos)} unique videos\n")
if dry_run:
print("🔍 DRY RUN - Would download these videos:\n")
for i, video in enumerate(all_videos):
video_file = get_best_video_file(video)
if video_file:
filename = generate_filename(video, video["_query"], i)
print(f" {i+1:2d}. {filename}")
print(f" URL: {video_file.get('link', 'N/A')[:80]}...")
print(f" Size: {video_file.get('width')}x{video_file.get('height')}")
print()
return []
# Download and upload videos concurrently
if normalize:
print(f"⬇️ Downloading, normalizing ({width}x{height}@{fps}fps), and uploading videos...\n")
else:
print("⬇️ Downloading and uploading videos (no normalization)...\n")
download_sem = asyncio.Semaphore(MAX_CONCURRENT_DOWNLOADS)
upload_sem = asyncio.Semaphore(MAX_CONCURRENT_UPLOADS)
downloaded_videos = []
async with httpx.AsyncClient() as http_client:
if skip_s3:
# No S3, just download
tasks = [
process_video(
http_client, None, video, i, temp_dir, local_dir_path,
None, s3_prefix, download_sem, upload_sem,
normalize=normalize,
normalize_width=width,
normalize_height=height,
normalize_fps=fps,
)
for i, video in enumerate(all_videos)
]
results = await asyncio.gather(*tasks, return_exceptions=True)
downloaded_videos = [r for r in results if r is not None and not isinstance(r, Exception)]
for r in results:
if isinstance(r, Exception):
print(f" ⚠️ Task failed: {r}")
else:
# With S3
async with session.client("s3") as s3_client:
tasks = [
process_video(
http_client, s3_client, video, i, temp_dir, local_dir_path,
bucket, s3_prefix, download_sem, upload_sem,
normalize=normalize,
normalize_width=width,
normalize_height=height,
normalize_fps=fps,
)
for i, video in enumerate(all_videos)
]
results = await asyncio.gather(*tasks, return_exceptions=True)
downloaded_videos = [r for r in results if r is not None and not isinstance(r, Exception)]
# Log any exceptions
for r in results:
if isinstance(r, Exception):
print(f" ⚠️ Task failed: {r}")
# Save manifest
manifest = {
"total_videos": len(downloaded_videos),
"s3_bucket": bucket if not skip_s3 else None,
"s3_prefix": s3_prefix if not skip_s3 else None,
"local_dir": str(local_dir_path) if local_dir_path else None,
"normalized": normalize,
"normalize_settings": {
"width": width,
"height": height,
"fps": fps,
} if normalize else None,
"videos": downloaded_videos,
}
manifest_path = Path("video_manifest.json")
with open(manifest_path, "w") as f:
json.dump(manifest, f, indent=2)
print(f"\n📋 Manifest saved to: {manifest_path}")
# Also upload manifest to S3
if not skip_s3 and bucket:
async with session.client("s3") as s3_client:
manifest_s3_key = f"{s3_prefix}manifest.json"
try:
await s3_client.put_object(
Bucket=bucket,
Key=manifest_s3_key,
Body=json.dumps(manifest, indent=2),
ContentType="application/json"
)
print(f"☁️ Manifest uploaded to: s3://{bucket}/{manifest_s3_key}")
except ClientError as e:
print(f"⚠️ Failed to upload manifest: {e}")
# Cleanup temp dir
try:
temp_dir.rmdir()
except OSError:
pass # May not be empty if some downloads failed
print(f"\n✅ Done! Processed {len(downloaded_videos)} videos.")
# Extract paths from downloaded videos
video_paths = []
for v in downloaded_videos:
if "s3_uri" in v:
video_paths.append(v["s3_uri"])
elif "local_path" in v:
video_paths.append(v["local_path"])
if video_paths:
print("\n📝 Sample paths for testing:")
for path in video_paths[:5]:
print(f" {path}")
return video_paths
def main():
parser = argparse.ArgumentParser(
description="Download Pexels videos, normalize them, and upload to S3",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Normalization applies:
- Resolution scaling with letterboxing (preserves aspect ratio)
- Consistent FPS
- H.264 codec (libx264, main profile)
- 1-second GOP for fast seeking
- Removes audio
Examples:
# Download and normalize to default 1280x720@30fps
python scripts/download_stock_videos.py --api-key KEY --bucket BUCKET
# Custom resolution
python scripts/download_stock_videos.py --api-key KEY --bucket BUCKET --width 1920 --height 1080 --fps 24
# Skip normalization (upload original files)
python scripts/download_stock_videos.py --api-key KEY --bucket BUCKET --no-normalize
"""
)
parser.add_argument("--api-key", type=str, help="Pexels API key (or set PEXELS_API_KEY)")
parser.add_argument("--bucket", type=str, help="S3 bucket name (or set S3_BUCKET)")
parser.add_argument("--total", type=int, default=20, help="Total videos to download")
parser.add_argument("--per-query", type=int, default=1, help="Videos per search query")
parser.add_argument("--local-dir", type=str, help="Also save videos locally to this directory")
parser.add_argument("--dry-run", action="store_true", help="Just show what would be downloaded")
parser.add_argument("--skip-s3", action="store_true", help="Skip S3 upload, only download locally")
# Normalization options
parser.add_argument("--no-normalize", action="store_true",
help="Skip video normalization (upload original files)")
parser.add_argument("--width", type=int, default=NORMALIZE_WIDTH,
help=f"Normalized video width (default: {NORMALIZE_WIDTH})")
parser.add_argument("--height", type=int, default=NORMALIZE_HEIGHT,
help=f"Normalized video height (default: {NORMALIZE_HEIGHT})")
parser.add_argument("--fps", type=int, default=NORMALIZE_FPS,
help=f"Normalized video FPS (default: {NORMALIZE_FPS})")
args = parser.parse_args()
asyncio.run(download_sample_videos(
api_key=args.api_key,
bucket=args.bucket,
total=args.total,
per_query=args.per_query,
local_dir=args.local_dir,
dry_run=args.dry_run,
skip_s3=args.skip_s3,
normalize=not args.no_normalize,
width=args.width,
height=args.height,
fps=args.fps,
))
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/video-analysis/scripts/download_stock_videos.py",
"license": "Apache License 2.0",
"lines": 546,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:doc/source/serve/tutorials/video-analysis/textbanks/descriptions.py | # Descriptions for caption retrieval
DESCRIPTIONS = [
"A person cooking in a kitchen",
"Someone preparing food on a counter",
"A chef working in a professional kitchen",
"People eating at a dining table",
"A group having a meal together",
"A person working at a desk",
"Someone typing on a laptop",
"A business meeting in progress",
"A presentation being given",
"People collaborating in an office",
"A teacher lecturing in a classroom",
"Students sitting at desks",
"A person giving a speech",
"Someone writing on a whiteboard",
"A customer shopping in a store",
"People browsing products on shelves",
"A cashier at a checkout counter",
"A person exercising at a gym",
"Someone lifting weights",
"A person running on a treadmill",
"People walking on a city sidewalk",
"Pedestrians crossing a street",
"Traffic moving through an intersection",
"Cars driving on a road",
"A vehicle parked in a lot",
"People walking through a park",
"Someone jogging outdoors",
"A group having a conversation",
"Two people talking face to face",
"A person on a phone call",
"Someone reading a book",
"A person watching television",
"People waiting in line",
"A crowded public space",
"An empty hallway or corridor",
"A person entering a building",
"Someone opening a door",
"A delivery being made",
"A person carrying boxes",
"Workers in a warehouse",
]
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/video-analysis/textbanks/descriptions.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/video-analysis/textbanks/tags.py | # Tags for zero-shot scene classification (short category labels)
TAGS = [
"kitchen",
"living room",
"office",
"meeting room",
"classroom",
"restaurant",
"cafe",
"grocery store",
"gym",
"warehouse",
"parking lot",
"city street",
"park",
"shopping mall",
"beach",
"sports field",
"hallway",
"lobby",
"bathroom",
"bedroom",
]
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/video-analysis/textbanks/tags.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/video-analysis/utils/s3.py | import boto3
def get_s3_region(bucket: str) -> str:
"""Get the region of the S3 bucket"""
s3 = boto3.client("s3")
response = s3.get_bucket_location(Bucket=bucket)
# AWS returns None for us-east-1, otherwise returns the region name
return response["LocationConstraint"] or "us-east-1"
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/video-analysis/utils/s3.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/serve/tutorials/video-analysis/utils/video.py | """Video loading and frame sampling utilities using ffmpeg."""
import asyncio
import json
import subprocess
from dataclasses import dataclass
from typing import Optional
import numpy as np
from PIL import Image
from constants import NUM_WORKERS
@dataclass
class VideoMetadata:
"""Video metadata extracted from ffprobe."""
duration: float # seconds
fps: float
width: int
height: int
num_frames: int
def get_video_metadata(video_path: str) -> VideoMetadata:
"""Get video metadata using ffprobe. Works with local files and URLs."""
# Use JSON output for reliable field parsing (CSV order is unpredictable)
cmd = [
"ffprobe",
"-v", "error",
"-select_streams", "v:0",
"-show_entries", "stream=width,height,r_frame_rate,nb_frames,duration",
"-of", "json",
video_path,
]
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
data = json.loads(result.stdout)
stream = data["streams"][0]
width = int(stream["width"])
height = int(stream["height"])
# Parse frame rate (can be "30/1" or "29.97")
fps_str = stream["r_frame_rate"]
if "/" in fps_str:
num, den = fps_str.split("/")
fps = float(num) / float(den)
else:
fps = float(fps_str)
# nb_frames might be N/A for some formats
try:
num_frames = int(stream.get("nb_frames", 0))
except (ValueError, TypeError):
num_frames = 0
# Duration might be in stream or need to be fetched from format
try:
duration = float(stream.get("duration", 0))
except (ValueError, TypeError):
duration = 0
if duration == 0:
# Fallback: get duration from format
cmd2 = [
"ffprobe",
"-v", "error",
"-show_entries", "format=duration",
"-of", "json",
video_path,
]
result2 = subprocess.run(cmd2, capture_output=True, text=True, check=True)
data2 = json.loads(result2.stdout)
duration = float(data2["format"]["duration"])
if num_frames == 0:
num_frames = int(duration * fps)
return VideoMetadata(
duration=duration,
fps=fps,
width=width,
height=height,
num_frames=num_frames,
)
def extract_frames_ffmpeg(
video_path: str,
start_time: float,
duration: float,
num_frames: int,
target_size: int = 384,
ffmpeg_threads: int = 0,
) -> np.ndarray:
"""
Extract frames from a video segment using ffmpeg.
Works with local files and URLs (including presigned S3 URLs).
Args:
video_path: Path to video file or URL
start_time: Start time in seconds
duration: Duration to extract in seconds
num_frames: Number of frames to extract (uniformly sampled)
target_size: Output frame size (square)
ffmpeg_threads: Number of threads for FFmpeg (0 = auto)
Returns:
np.ndarray of shape (num_frames, target_size, target_size, 3) uint8 RGB
"""
# Calculate output fps to get exactly num_frames
output_fps = num_frames / duration if duration > 0 else num_frames
cmd = [
"ffmpeg",
"-threads", str(ffmpeg_threads),
"-ss", str(start_time),
"-t", str(duration),
"-i", video_path,
"-vf", f"fps={output_fps},scale={target_size}:{target_size}",
"-pix_fmt", "rgb24",
"-f", "rawvideo",
"-",
]
result = subprocess.run(
cmd,
capture_output=True,
check=True,
)
# Parse raw video frames
frame_size = target_size * target_size * 3
raw_data = result.stdout
actual_frames = len(raw_data) // frame_size
if actual_frames == 0:
raise ValueError(f"No frames extracted from {video_path} at {start_time}s")
frames = np.frombuffer(raw_data[:actual_frames * frame_size], dtype=np.uint8)
frames = frames.reshape(actual_frames, target_size, target_size, 3)
# Pad or truncate to exact num_frames
if len(frames) < num_frames:
# Pad by repeating last frame
padding = np.tile(frames[-1:], (num_frames - len(frames), 1, 1, 1))
frames = np.concatenate([frames, padding], axis=0)
elif len(frames) > num_frames:
frames = frames[:num_frames]
return frames
@dataclass
class VideoChunk:
"""Represents a chunk of video to process."""
index: int
start_time: float
duration: float
frames: Optional[np.ndarray] = None
async def extract_frames_async(
video_path: str,
start_time: float,
duration: float,
num_frames: int,
target_size: int = 384,
ffmpeg_threads: int = 0,
) -> np.ndarray:
"""Async wrapper for extract_frames_ffmpeg using thread pool."""
return await asyncio.to_thread(
extract_frames_ffmpeg,
video_path,
start_time,
duration,
num_frames,
target_size,
ffmpeg_threads,
)
def _extract_all_chunks_single_ffmpeg(
video_path: str,
chunk_defs: list[tuple[int, float, float]],
num_frames_per_chunk: int,
target_size: int,
ffmpeg_threads: int = 0,
) -> list[np.ndarray]:
"""
Extract frames for ALL chunks in a single FFmpeg call.
Uses the select filter to pick specific frame timestamps, avoiding
multiple process spawns and file seeks.
Args:
video_path: Path to video file or URL
chunk_defs: List of (index, start_time, duration) tuples
num_frames_per_chunk: Frames to extract per chunk
target_size: Output frame size (square)
ffmpeg_threads: Number of threads for FFmpeg (0 = auto)
Returns:
List of numpy arrays, one per chunk
"""
# Build list of all timestamps to extract
all_timestamps = []
for idx, start, duration in chunk_defs:
# Uniformly sample timestamps within each chunk
for i in range(num_frames_per_chunk):
t = start + (i * duration / num_frames_per_chunk)
all_timestamps.append(t)
if not all_timestamps:
return []
# Build select filter expression: select frames nearest to our timestamps
# Using eq(n,frame_num) would require knowing frame numbers, so instead
# we use pts-based selection with a small tolerance
# The 'select' filter with 'lt(prev_pts,T)*gte(pts,T)' picks first frame >= T
# For efficiency, we'll extract at a high fps and pick specific frames,
# or use the thumbnail filter. But simplest: extract all frames near our
# timestamps using the 'select' filter.
# Build the select expression for all timestamps
# select='eq(n,0)+eq(n,10)+eq(n,20)...' but we need PTS-based selection
# Better approach: use fps filter to get enough frames, then select in numpy
# Calculate total time span and required fps
min_t = min(all_timestamps)
max_t = max(all_timestamps)
total_duration = max_t - min_t + 0.1 # small buffer
# We need at least len(all_timestamps) frames over total_duration
# But we want to be precise, so let's use select filter with expressions
# Build select expression: for each timestamp T, select frame where pts >= T and prev_pts < T
# This is complex. Simpler approach: output frames at specific PTS values.
# Most efficient single-pass approach: use the 'select' filter with timestamp checks
# select='between(t,T1-eps,T1+eps)+between(t,T2-eps,T2+eps)+...'
eps = 0.02 # 20ms tolerance for frame selection
select_parts = [f"between(t,{t-eps},{t+eps})" for t in all_timestamps]
select_expr = "+".join(select_parts)
cmd = [
"ffmpeg",
"-threads", str(ffmpeg_threads),
"-i", video_path,
"-vf", f"select='{select_expr}',scale={target_size}:{target_size}",
"-vsync", "vfr", # Variable frame rate to preserve selected frames
"-pix_fmt", "rgb24",
"-f", "rawvideo",
"-",
]
result = subprocess.run(cmd, capture_output=True, check=True)
# Parse raw video frames
frame_size = target_size * target_size * 3
raw_data = result.stdout
total_frames = len(raw_data) // frame_size
if total_frames == 0:
raise ValueError(f"No frames extracted from {video_path}")
all_frames = np.frombuffer(raw_data[:total_frames * frame_size], dtype=np.uint8)
all_frames = all_frames.reshape(total_frames, target_size, target_size, 3)
# Split into chunks
chunk_frames = []
frame_idx = 0
for idx, start, duration in chunk_defs:
# Take num_frames_per_chunk frames for this chunk
end_idx = min(frame_idx + num_frames_per_chunk, total_frames)
chunk_data = all_frames[frame_idx:end_idx]
# Pad if needed
if len(chunk_data) < num_frames_per_chunk:
if len(chunk_data) == 0:
# No frames for this chunk, create black frames
chunk_data = np.zeros((num_frames_per_chunk, target_size, target_size, 3), dtype=np.uint8)
else:
padding = np.tile(chunk_data[-1:], (num_frames_per_chunk - len(chunk_data), 1, 1, 1))
chunk_data = np.concatenate([chunk_data, padding], axis=0)
chunk_frames.append(chunk_data)
frame_idx = end_idx
return chunk_frames
async def chunk_video_async(
video_path: str,
chunk_duration: float = 10.0,
num_frames_per_chunk: int = 16,
target_size: int = 384,
use_single_ffmpeg: bool = False,
ffmpeg_threads: int = 0,
) -> list[VideoChunk]:
"""
Split video into fixed-duration chunks with frame extraction.
Works with local files and URLs (including presigned S3 URLs).
Args:
video_path: Path to video file or URL
chunk_duration: Duration of each chunk in seconds
num_frames_per_chunk: Frames to extract per chunk
target_size: Frame size
use_single_ffmpeg: If True, extract all chunks in one FFmpeg call (faster).
If False, use parallel FFmpeg calls per chunk.
ffmpeg_threads: Number of threads for FFmpeg decoding (0 = auto)
Returns:
List of VideoChunk with frames loaded
"""
# Get metadata (sync call, fast)
metadata = await asyncio.to_thread(get_video_metadata, video_path)
# Build chunk definitions
chunk_defs = []
start = 0.0
index = 0
while start < metadata.duration:
duration = min(chunk_duration, metadata.duration - start)
# Skip very short final chunks
if duration < 0.5:
break
chunk_defs.append((index, start, duration))
start += chunk_duration
index += 1
if not chunk_defs:
return []
if use_single_ffmpeg:
# Single FFmpeg call - more efficient, especially for URLs
frame_results = await asyncio.to_thread(
_extract_all_chunks_single_ffmpeg,
video_path,
chunk_defs,
num_frames_per_chunk,
target_size,
ffmpeg_threads,
)
else:
# Multiple parallel FFmpeg calls, limited to NUM_WORKERS concurrency
semaphore = asyncio.Semaphore(NUM_WORKERS)
async def extract_with_limit(idx, start, duration):
async with semaphore:
return await extract_frames_async(
video_path,
start_time=start,
duration=duration,
num_frames=num_frames_per_chunk,
target_size=target_size,
ffmpeg_threads=ffmpeg_threads,
)
extraction_tasks = [
extract_with_limit(idx, start, duration)
for idx, start, duration in chunk_defs
]
frame_results = await asyncio.gather(*extraction_tasks)
# Build chunk objects
chunks = [
VideoChunk(
index=idx,
start_time=start,
duration=duration,
frames=frames,
)
for (idx, start, duration), frames in zip(chunk_defs, frame_results)
]
return chunks
def chunk_video(
video_path: str,
chunk_duration: float = 10.0,
num_frames_per_chunk: int = 16,
target_size: int = 384,
use_single_ffmpeg: bool = True,
ffmpeg_threads: int = 0,
) -> list[VideoChunk]:
"""
Split video into fixed-duration chunks.
Args:
video_path: Path to video file or URL
chunk_duration: Duration of each chunk in seconds
num_frames_per_chunk: Frames to extract per chunk
target_size: Frame size
use_single_ffmpeg: If True, extract all chunks in one FFmpeg call (faster).
If False, use sequential FFmpeg calls per chunk.
ffmpeg_threads: Number of threads for FFmpeg decoding (0 = auto)
Returns:
List of VideoChunk with frames loaded
"""
metadata = get_video_metadata(video_path)
# Build chunk definitions
chunk_defs = []
start = 0.0
index = 0
while start < metadata.duration:
duration = min(chunk_duration, metadata.duration - start)
# Skip very short final chunks
if duration < 0.5:
break
chunk_defs.append((index, start, duration))
start += chunk_duration
index += 1
if not chunk_defs:
return []
if use_single_ffmpeg:
# Single FFmpeg call - more efficient, especially for URLs
frame_results = _extract_all_chunks_single_ffmpeg(
video_path,
chunk_defs,
num_frames_per_chunk,
target_size,
ffmpeg_threads,
)
else:
# Sequential FFmpeg calls (original approach)
frame_results = []
for idx, start, duration in chunk_defs:
frames = extract_frames_ffmpeg(
video_path,
start_time=start,
duration=duration,
num_frames=num_frames_per_chunk,
target_size=target_size,
ffmpeg_threads=ffmpeg_threads,
)
frame_results.append(frames)
# Build chunk objects
chunks = [
VideoChunk(
index=idx,
start_time=start,
duration=duration,
frames=frames,
)
for (idx, start, duration), frames in zip(chunk_defs, frame_results)
]
return chunks
def frames_to_pil_list(frames: np.ndarray) -> list[Image.Image]:
"""Convert numpy frames array to list of PIL Images."""
return [Image.fromarray(frame) for frame in frames]
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/serve/tutorials/video-analysis/utils/video.py",
"license": "Apache License 2.0",
"lines": 389,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/_internal/cluster_autoscaler/fake_autoscaling_coordinator.py | import time
from dataclasses import dataclass
from typing import Callable, Dict, List, Optional
from .base_autoscaling_coordinator import (
AutoscalingCoordinator,
ResourceDict,
ResourceRequestPriority,
)
class FakeAutoscalingCoordinator(AutoscalingCoordinator):
"""A lightweight implementation for testing.
This implementation always allocates the requested resources to the
requester. It doesn't support the `priority` parameter.
"""
@dataclass
class Allocation:
resources: List[ResourceDict]
expiration_time_s: float
request_remaining: bool
def __init__(
self,
get_time: Callable[[], float] = time.time,
remaining: Optional[List[ResourceDict]] = None,
):
if remaining is None:
remaining = []
self._get_time = get_time
self._remaining = remaining
self._allocations: Dict[str, FakeAutoscalingCoordinator.Allocation] = {}
def request_resources(
self,
requester_id: str,
resources: List[ResourceDict],
expire_after_s: float,
request_remaining: bool = False,
priority: ResourceRequestPriority = ResourceRequestPriority.MEDIUM,
) -> None:
if priority != ResourceRequestPriority.MEDIUM:
raise NotImplementedError(
"This fake implementation doesn't support the `priority` parameter."
)
# Always accept the request and record it.
self._allocations[requester_id] = self.Allocation(
resources=resources,
expiration_time_s=self._get_time() + expire_after_s,
request_remaining=request_remaining,
)
def cancel_request(self, requester_id: str):
if requester_id in self._allocations:
del self._allocations[requester_id]
def get_allocated_resources(self, requester_id: str) -> List[ResourceDict]:
"""Return the allocated resources if they haven't expired."""
allocation = self._allocations.get(requester_id)
# Case 1: no allocation.
if allocation is None:
return []
# Case 2: request expired.
if allocation.expiration_time_s < self._get_time():
del self._allocations[requester_id]
return []
# Case 3: allocation still valid.
allocated_resources = list(allocation.resources)
if allocation.request_remaining:
# Unlike DefaultAutoscalingCoordinator, this fake returns all remaining
# resources to each requester to keep tests simple.
allocated_resources.extend(self._remaining)
return allocated_resources
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/cluster_autoscaler/fake_autoscaling_coordinator.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/nightly_tests/dataset/model_inference_pipeline_benchmark.py | """
Model Inference Pipeline Benchmark
This benchmark mimics a production ML inference pipeline with the following structure:
1. Read parquet data with configurable columns
2. Preprocessing with map_batches (CPU tasks) using Pandas
3. Inference with map_batches using actors (GPU) with concurrency control
4. Consume output
Key features mirrored from production:
- Separate worker configurations for preprocessing and inference
- Metadata column passthrough
- Extra output columns added during inference
"""
import argparse
import time
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
import numpy as np
import pandas as pd
import torch
from benchmark import Benchmark, BenchmarkMetric
from transformers import AutoModel, AutoTokenizer
import ray
from ray.data import Dataset, ActorPoolStrategy
# Default HuggingFace model for inference
DEFAULT_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"
@dataclass
class WorkerConfig:
"""Configuration for a worker pool (preprocessing or inference)."""
batch_size: int
num_cpus: float
num_gpus: float
# Actor pool sizing (only for inference actors)
min_actors: Optional[int] = None
max_actors: Optional[int] = None
@dataclass
class PipelineConfig:
"""Full pipeline configuration."""
input_path: str
preprocessing_config: WorkerConfig
inference_config: WorkerConfig
metadata_columns: List[str]
feature_columns: List[str]
text_columns: List[str]
extra_output_columns: Dict[str, Any]
tokenizer_max_length: int = 128
model_name: str = DEFAULT_MODEL_NAME
def parse_args():
parser = argparse.ArgumentParser(description="Model Inference Pipeline Benchmark")
parser.add_argument(
"--input-path",
default="s3://ray-benchmark-data/tpch/parquet/sf10/lineitem",
help="Path to the input parquet data.",
)
parser.add_argument(
"--preprocessing-batch-size",
type=int,
default=4096,
help="Batch size for preprocessing step.",
)
parser.add_argument(
"--preprocessing-num-cpus",
type=float,
default=1.0,
help="CPUs per preprocessing task.",
)
parser.add_argument(
"--inference-batch-size",
type=int,
default=1024,
help="Batch size for inference step.",
)
parser.add_argument(
"--inference-num-cpus",
type=float,
default=1.0,
help="CPUs per inference actor.",
)
parser.add_argument(
"--inference-num-gpus",
type=float,
default=1.0,
help="GPUs per inference actor.",
)
parser.add_argument(
"--inference-min-actors",
type=int,
default=1,
help="Minimum number of inference actors.",
)
parser.add_argument(
"--inference-max-actors",
type=int,
default=10,
help="Maximum number of inference actors.",
)
parser.add_argument(
"--tokenizer-max-length",
type=int,
default=128,
help="Max sequence length for tokenization.",
)
parser.add_argument(
"--model-name",
type=str,
default=DEFAULT_MODEL_NAME,
help="HuggingFace model name for inference.",
)
return parser.parse_args()
# =============================================================================
# Preprocessing Function (Pandas)
# =============================================================================
def preprocessing_task_pandas(
batch: pd.DataFrame,
metadata_columns: List[str],
feature_columns: List[str],
text_columns: List[str],
metadata_prefix: str = "metadata_",
) -> pd.DataFrame:
"""
Preprocessing task using Pandas.
Mimics production preprocessing with:
- Metadata columns passed through with prefix
- Text columns passed through for model tokenization
- Feature columns normalized
"""
result = {}
# Pass through metadata columns with prefix
for col in metadata_columns:
if col in batch.columns:
result[f"{metadata_prefix}{col}"] = batch[col]
# Pass through text columns (tokenization happens in inference actor)
for col in text_columns:
if col in batch.columns:
result[f"text_{col}"] = batch[col].fillna("").astype(str)
# Process feature columns (numeric)
# Cast to float64 to handle DECIMAL types from Parquet which become object dtype
for col in feature_columns:
if col in batch.columns:
col_data = pd.to_numeric(batch[col], errors="coerce").values
normalized = (col_data - np.nanmean(col_data)) / (
np.nanstd(col_data) + 1e-8
)
result[f"feature_{col}"] = normalized
# Add preprocessing timestamp
result["preprocessing_timestamp"] = np.full(len(batch), time.time())
return pd.DataFrame(result)
# =============================================================================
# Inference Actor with HuggingFace Model
# =============================================================================
class InferenceActor:
"""
Stateful inference actor that performs GPU inference using HuggingFace models.
Downloads model weights on initialization and performs inference on batches
using the configured device (GPU or CPU).
Supports metadata passthrough and extra output columns.
"""
def __init__(
self,
model_name: str,
text_columns: List[str],
metadata_columns: List[str],
extra_output_columns: Dict[str, Any],
max_length: int = 128,
device: str = "cuda",
):
self.model_name = model_name
self.text_columns = text_columns
self.metadata_columns = metadata_columns
self.extra_output_columns = extra_output_columns
self.max_length = max_length
self.device = torch.device(device if torch.cuda.is_available() else "cpu")
self._init_model()
def _init_model(self):
"""Download and initialize HuggingFace model on the appropriate device."""
print(f"Loading HuggingFace model: {self.model_name}")
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.model = AutoModel.from_pretrained(self.model_name)
self.model.to(self.device)
self.model.eval()
print(f"Model loaded on device: {self.device}")
def _mean_pooling(self, model_output, attention_mask):
"""Apply mean pooling to get sentence embeddings."""
token_embeddings = model_output.last_hidden_state
input_mask_expanded = (
attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
)
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(
input_mask_expanded.sum(1), min=1e-9
)
@torch.inference_mode()
def __call__(self, batch: pd.DataFrame) -> pd.DataFrame:
"""
Run inference on a batch using HuggingFace model.
Performs:
- Text concatenation from configured columns
- Tokenization using HuggingFace tokenizer
- Model inference on GPU
- Mean pooling to get embeddings
- Metadata column passthrough
- Extra output columns addition
"""
batch_size = len(batch)
result = {}
# Pass through metadata columns (prefixed from preprocessing)
for col in batch.columns:
if col.startswith("metadata_"):
result[col] = batch[col].values
# Concatenate text columns into single text for each row
text_col_names = [f"text_{col}" for col in self.text_columns]
available_text_cols = [c for c in text_col_names if c in batch.columns]
if available_text_cols:
texts = (
batch[available_text_cols].astype(str).agg(" ".join, axis=1).tolist()
)
else:
texts = [""] * batch_size
# Tokenize with HuggingFace tokenizer
encoded = self.tokenizer(
texts,
padding=True,
truncation=True,
max_length=self.max_length,
return_tensors="pt",
)
# Move to device
input_ids = encoded["input_ids"].to(self.device)
attention_mask = encoded["attention_mask"].to(self.device)
# Run model inference
model_output = self.model(input_ids=input_ids, attention_mask=attention_mask)
# Get embeddings via mean pooling
embeddings = self._mean_pooling(model_output, attention_mask)
# Move results back to CPU
embeddings_np = embeddings.cpu().numpy()
# Store embeddings as list of arrays
result["embeddings"] = [emb.tolist() for emb in embeddings_np]
result["embedding_dim"] = np.full(batch_size, embeddings_np.shape[1])
# Add extra output columns (static values from config)
for col_name, col_value in self.extra_output_columns.items():
result[col_name] = np.full(batch_size, col_value)
# Add inference timestamp
result["inference_timestamp"] = np.full(batch_size, time.time())
return pd.DataFrame(result)
# =============================================================================
# Pipeline Execution
# =============================================================================
def preprocess_dataset(
dataset: Dataset,
config: PipelineConfig,
) -> Dataset:
"""Apply preprocessing to dataset using Pandas task."""
preprocessed = dataset.map_batches(
preprocessing_task_pandas,
fn_kwargs=dict(
metadata_columns=config.metadata_columns,
feature_columns=config.feature_columns,
text_columns=config.text_columns,
metadata_prefix="metadata_",
),
batch_format="pandas",
batch_size=config.preprocessing_config.batch_size,
num_cpus=config.preprocessing_config.num_cpus,
)
preprocessed._set_name("preprocessed_data")
return preprocessed
def infer_dataset(
dataset: Dataset,
config: PipelineConfig,
) -> Dataset:
"""Run inference on dataset using configured inference actor."""
inferred = dataset.map_batches(
InferenceActor,
fn_constructor_kwargs=dict(
model_name=config.model_name,
text_columns=config.text_columns,
metadata_columns=config.metadata_columns,
extra_output_columns=config.extra_output_columns,
max_length=config.tokenizer_max_length,
device="cuda" if config.inference_config.num_gpus > 0 else "cpu",
),
batch_format="pandas",
batch_size=config.inference_config.batch_size,
compute=ActorPoolStrategy(
min_size=config.inference_config.min_actors,
max_size=config.inference_config.max_actors,
),
num_cpus=config.inference_config.num_cpus,
num_gpus=config.inference_config.num_gpus,
)
inferred._set_name("inference_output")
return inferred
def execute_pipeline(
dataset: Dataset,
config: PipelineConfig,
) -> Dataset:
"""Execute full end-to-end pipeline."""
preprocessed = preprocess_dataset(dataset, config)
return infer_dataset(preprocessed, config)
# =============================================================================
# Main Benchmark
# =============================================================================
def main(args):
print("Running model inference pipeline benchmark")
print(f" Input path: {args.input_path}")
print(f" Preprocessing batch size: {args.preprocessing_batch_size}")
print(f" Inference batch size: {args.inference_batch_size}")
print(
f" Inference actors: min={args.inference_min_actors}, max={args.inference_max_actors}"
)
print(f" Tokenizer max length: {args.tokenizer_max_length}")
print(f" Model: {args.model_name}")
# Build pipeline configuration
# Use TPC-H lineitem columns:
# - column00, column01: metadata (l_orderkey, l_partkey)
# - column04-07: numeric features (l_quantity, l_extendedprice, l_discount, l_tax)
# - column08, column09: text columns (l_returnflag, l_linestatus) for tokenization
config = PipelineConfig(
input_path=args.input_path,
preprocessing_config=WorkerConfig(
batch_size=args.preprocessing_batch_size,
num_cpus=args.preprocessing_num_cpus,
num_gpus=0,
),
inference_config=WorkerConfig(
batch_size=args.inference_batch_size,
num_cpus=args.inference_num_cpus,
num_gpus=args.inference_num_gpus,
min_actors=args.inference_min_actors,
max_actors=args.inference_max_actors,
),
metadata_columns=["column00", "column01"],
feature_columns=["column04", "column05", "column06", "column07"],
text_columns=["column08", "column09"],
extra_output_columns={
"model_version": "v1.0.0",
"pipeline_id": "benchmark_run",
},
tokenizer_max_length=args.tokenizer_max_length,
model_name=args.model_name,
)
start_time = time.time()
# Load input data
columns_to_load = list(
set(config.metadata_columns + config.feature_columns + config.text_columns)
)
ds = ray.data.read_parquet(
config.input_path,
columns=columns_to_load,
).limit(15_000_000)
ds._set_name("input_data")
# Execute end-to-end pipeline
output_ds = execute_pipeline(ds, config)
# Consume output
total_rows = 0
for batch in output_ds.iter_batches(batch_size=None, batch_format="pandas"):
total_rows += len(batch)
end_time = time.time()
total_time = end_time - start_time
throughput = total_rows / total_time if total_time > 0 else 0
print(f"Total rows processed: {total_rows}")
print(f"Total time (sec): {total_time:.2f}")
print(f"Throughput (rows/sec): {throughput:.2f}")
return {
BenchmarkMetric.RUNTIME: total_time,
BenchmarkMetric.THROUGHPUT: throughput,
BenchmarkMetric.NUM_ROWS: total_rows,
"preprocessing_batch_size": args.preprocessing_batch_size,
"inference_batch_size": args.inference_batch_size,
"inference_min_actors": args.inference_min_actors,
"inference_max_actors": args.inference_max_actors,
"tokenizer_max_length": args.tokenizer_max_length,
}
if __name__ == "__main__":
args = parse_args()
benchmark = Benchmark()
benchmark.run_fn("model-inference-pipeline", main, args)
benchmark.write_result()
| {
"repo_id": "ray-project/ray",
"file_path": "release/nightly_tests/dataset/model_inference_pipeline_benchmark.py",
"license": "Apache License 2.0",
"lines": 376,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/tests/test_hash_shuffle_aggregator.py | """Unit tests for HashShuffleAggregator."""
from typing import Dict, Iterator, List
import pyarrow as pa
import pytest
from ray.data._internal.arrow_ops import transform_pyarrow
from ray.data._internal.execution.operators.hash_shuffle import (
HashShuffleAggregator,
ShuffleAggregation,
)
from ray.data._internal.planner.exchange.sort_task_spec import SortKey
from ray.data.block import Block
# Access underlying class for direct instantiation (bypassing Ray actor)
_HashShuffleAggregatorClass = HashShuffleAggregator.__ray_actor_class__
def make_block(n: int = 10, offset: int = 0) -> pa.Table:
return pa.table({"x": list(range(offset, offset + n))})
def split_block(block: pa.Table, chunk_size: int) -> List[pa.Table]:
"""Split block into chunks of given size."""
return [block.slice(i, chunk_size) for i in range(0, block.num_rows, chunk_size)]
class MockCompactingAggregation(ShuffleAggregation):
"""Tracks compact/finalize calls and input blocks."""
def __init__(self):
self.compact_calls: List[int] = []
self.finalize_input: Dict[int, List[Block]] = {}
@classmethod
def is_compacting(cls):
return True
def compact(self, shards: List[Block]) -> Block:
self.compact_calls.append(len(shards))
return pa.concat_tables(shards) if shards else make_block(0)
def finalize(self, shards: Dict[int, List[Block]]) -> Iterator[Block]:
self.finalize_input = dict(shards)
blocks = [b for bs in shards.values() for b in bs]
yield pa.concat_tables(blocks) if blocks else make_block(0)
class MockNonCompactingAggregation(ShuffleAggregation):
"""Tracks finalize input blocks."""
def __init__(self):
self.finalize_input: Dict[int, List[Block]] = {}
@classmethod
def is_compacting(cls):
return False
def compact(self, shards: List[Block]) -> Block:
raise RuntimeError("Should not be called")
def finalize(self, shards: Dict[int, List[Block]]) -> Iterator[Block]:
self.finalize_input = dict(shards)
blocks = [b for bs in shards.values() for b in bs]
yield pa.concat_tables(blocks) if blocks else make_block(0)
class TestHashShuffleAggregator:
def test_compacting_workflow(self, ray_start_regular_shared):
"""Tests compaction triggers, threshold doubling, multi-partition/sequence."""
agg = MockCompactingAggregation()
aggregator = _HashShuffleAggregatorClass(
aggregator_id=0,
num_input_seqs=2,
target_partition_ids=[0, 1, 2],
agg_factory=lambda: agg,
target_max_block_size=None,
min_max_shards_compaction_thresholds=(3, 2000),
)
# Pre-generate blocks: split a 100-row block into 10 chunks of 10 rows
full_block = make_block(80)
input_seq0_part0 = split_block(full_block, 10)
def get_compaction_thresholds():
"""Helper to extract compaction thresholds from partition buckets."""
# Thresholds are now per-partition in PartitionBucket
return {
part_id: bucket.compaction_threshold
for part_id, bucket in aggregator._input_seq_partition_buckets[
0
].items()
if bucket.compaction_threshold is not None
}
# Submit 2 blocks (below threshold=3) - no compaction
for b in input_seq0_part0[:2]:
aggregator.submit(0, 0, b)
assert agg.compact_calls == []
assert get_compaction_thresholds() == {0: 3, 1: 3, 2: 3}
# Submit 3rd block - triggers compaction, threshold doubles
aggregator.submit(0, 0, input_seq0_part0[2])
assert agg.compact_calls == [3]
assert get_compaction_thresholds() == {0: 6, 1: 3, 2: 3}
# Submit 5 more (queue: 1+5=6) - triggers at new threshold
for b in input_seq0_part0[3:8]:
aggregator.submit(0, 0, b)
assert agg.compact_calls == [3, 6]
assert get_compaction_thresholds() == {0: 12, 1: 3, 2: 3}
# Partition 1 has independent threshold (starts at 3)
for b in split_block(make_block(30, offset=1000), 10):
aggregator.submit(0, 1, b)
assert agg.compact_calls == [3, 6, 3]
assert get_compaction_thresholds() == {0: 12, 1: 6, 2: 3}
# Multiple sequences (join scenario) - seq_id=1 for partition 0
input_seq1_part0 = split_block(make_block(20, offset=2000), 10)
for b in input_seq1_part0:
aggregator.submit(1, 0, b)
# Finalize partition 0 - receives blocks from both sequences
results = list(aggregator.finalize(0))
block, metadata = results
assert len(agg.finalize_input) == 2 # dict with 2 sequences
# Verify output equals concatenation of seq0 (first 8 chunks) + seq1
expected = transform_pyarrow.sort(
pa.concat_tables(tables=[*input_seq0_part0, *input_seq1_part0]),
sort_key=SortKey("x"),
)
assert transform_pyarrow.sort(block, sort_key=SortKey("x")) == expected
# Empty partition
results = list(aggregator.finalize(2))
assert results[0] == make_block(0)
def test_non_compacting_workflow(self, ray_start_regular_shared):
"""Tests non-compacting aggregation with and without block splitting."""
# Without splitting
full_block = make_block(50)
input_seq = split_block(full_block, 10)
aggregator = _HashShuffleAggregatorClass(
aggregator_id=1,
num_input_seqs=1,
target_partition_ids=[0],
agg_factory=MockNonCompactingAggregation,
target_max_block_size=None,
)
for b in input_seq:
aggregator.submit(0, 0, b)
results = list(aggregator.finalize(0))
block, metadata = results
assert block == full_block
# With splitting - output blocks should reconstruct to original
full_block = make_block(500)
input_seq = split_block(full_block, 100)
aggregator = _HashShuffleAggregatorClass(
aggregator_id=2,
num_input_seqs=1,
target_partition_ids=[0],
agg_factory=MockNonCompactingAggregation,
target_max_block_size=50,
)
for b in input_seq:
aggregator.submit(0, 0, b)
results = list(aggregator.finalize(0))
output_blocks = [results[i] for i in range(0, len(results), 2)]
assert pa.concat_tables(output_blocks) == full_block
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_hash_shuffle_aggregator.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/gpu_objects/test_gpu_objects_custom.py | import multiprocessing.shared_memory as shm
import pickle
import sys
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
import numpy
import pytest
import ray
from ray.experimental import (
CommunicatorMetadata,
TensorTransportManager,
TensorTransportMetadata,
register_tensor_transport,
)
@dataclass
class ShmTransportMetadata(TensorTransportMetadata):
shm_name: Optional[str] = None
shm_size: Optional[int] = None
@dataclass
class ShmCommunicatorMetadata(CommunicatorMetadata):
pass
class SharedMemoryTransport(TensorTransportManager):
def __init__(self):
self.shared_memory_objects: Dict[str, shm.SharedMemory] = {}
def tensor_transport_backend(self) -> str:
return "shared_memory"
@staticmethod
def is_one_sided() -> bool:
return True
@staticmethod
def can_abort_transport() -> bool:
return False
def actor_has_tensor_transport(self, actor: "ray.actor.ActorHandle") -> bool:
return True
def extract_tensor_transport_metadata(
self,
obj_id: str,
gpu_object: List[numpy.ndarray],
) -> TensorTransportMetadata:
tensor_meta = []
if gpu_object:
for tensor in gpu_object:
tensor_meta.append((tensor.shape, tensor.dtype))
serialized_gpu_object = pickle.dumps(gpu_object)
size = len(serialized_gpu_object)
# Shm name can't be as long as the obj_id, so we truncate it.
name = obj_id[:20]
shm_obj = shm.SharedMemory(name=name, create=True, size=size)
shm_obj.buf[:size] = serialized_gpu_object
self.shared_memory_objects[obj_id] = shm_obj
return ShmTransportMetadata(
tensor_meta=tensor_meta, tensor_device="cpu", shm_name=name, shm_size=size
)
def get_communicator_metadata(
self,
src_actor: "ray.actor.ActorHandle",
dst_actor: "ray.actor.ActorHandle",
backend: Optional[str] = None,
) -> CommunicatorMetadata:
return ShmCommunicatorMetadata()
def recv_multiple_tensors(
self,
obj_id: str,
tensor_transport_metadata: TensorTransportMetadata,
communicator_metadata: CommunicatorMetadata,
target_buffers: Optional[List[Any]] = None,
):
shm_name = tensor_transport_metadata.shm_name
size = tensor_transport_metadata.shm_size
shm_block = shm.SharedMemory(name=shm_name)
recv_tensors = pickle.loads(shm_block.buf[:size])
shm_block.close()
return recv_tensors
def send_multiple_tensors(
self,
tensors: List[numpy.ndarray],
tensor_transport_metadata: TensorTransportMetadata,
communicator_metadata: CommunicatorMetadata,
):
pass
def garbage_collect(
self,
obj_id: str,
tensor_transport_meta: TensorTransportMetadata,
tensors: List[numpy.ndarray],
):
self.shared_memory_objects[obj_id].close()
self.shared_memory_objects[obj_id].unlink()
del self.shared_memory_objects[obj_id]
def abort_transport(
self,
obj_id: str,
communicator_metadata: CommunicatorMetadata,
):
pass
def test_register_and_use_custom_transport(ray_start_regular):
register_tensor_transport(
"shared_memory", ["cpu"], SharedMemoryTransport, numpy.ndarray
)
@ray.remote
class Actor:
@ray.method(tensor_transport="shared_memory")
def echo(self, data):
return data
def non_rdt_echo(self, data):
return data
def sum(self, data):
return data.sum().item()
# Classes defined in test files get pickled by ref. So we need to
# explicitly pickle the transport class in this module by value.
# Note that this doesn't happen if you define the transport class on the
# driver, something with pytest convinces cloudpickle to pickle by ref.
from ray import cloudpickle
cloudpickle.register_pickle_by_value(sys.modules[SharedMemoryTransport.__module__])
actors = [Actor.remote() for _ in range(2)]
ref = actors[0].echo.remote(numpy.array([1, 2, 3]))
result = actors[1].sum.remote(ref)
assert ray.get(result) == 6
# Test that non-rdt methods that return the data type still work.
ref = actors[0].non_rdt_echo.remote(numpy.array([1, 2, 3]))
result = actors[1].sum.remote(ref)
assert ray.get(result) == 6
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/gpu_objects/test_gpu_objects_custom.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/dashboard/modules/metrics/default_impl.py | from ray.dashboard.modules.metrics.dashboards.common import DashboardConfig
def get_serve_dashboard_config() -> DashboardConfig:
from ray.dashboard.modules.metrics.dashboards.serve_dashboard_panels import (
serve_dashboard_config,
)
return serve_dashboard_config
# Anyscale overrides
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/metrics/default_impl.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/serve/tests/test_serve_with_tracing.py | import glob
import json
import os
import shutil
import sys
import pytest
from opentelemetry import trace
import ray
from ray import serve
from ray._common.test_utils import wait_for_condition
from ray.serve.schema import ReplicaState
from ray.util.tracing.setup_local_tmp_tracing import spans_dir
setup_tracing_path = "ray.util.tracing.setup_local_tmp_tracing:setup_tracing"
@pytest.fixture
def cleanup_spans():
"""Cleanup temporary spans_dir folder at beginning and end of test."""
if os.path.exists(spans_dir):
shutil.rmtree(spans_dir)
os.makedirs(spans_dir, exist_ok=True)
yield
# Enable tracing only sets up tracing once per driver process.
# We set ray.__traced__ to False here so that each
# test will re-set up tracing.
ray.__traced__ = False
if os.path.exists(spans_dir):
shutil.rmtree(spans_dir)
@pytest.fixture
def ray_serve_with_tracing(cleanup_spans):
"""Start Ray with tracing enabled and Serve."""
ray.shutdown()
ray.init(_tracing_startup_hook=setup_tracing_path)
yield
serve.shutdown()
# Shutdown the tracer provider to close file handles before cleanup_spans
# tries to delete the spans directory.
tracer_provider = trace.get_tracer_provider()
if hasattr(tracer_provider, "shutdown"):
tracer_provider.shutdown()
ray.shutdown()
def get_span_list():
"""Read span files and return list of span objects."""
span_list = []
for entry in glob.glob(f"{spans_dir}**/*.txt", recursive=True):
with open(entry) as f:
for line in f.readlines():
try:
span_list.append(json.loads(line))
except json.JSONDecodeError:
continue
return span_list
@pytest.mark.skipif(
sys.platform == "win32", reason="Temp directory cleanup fails on Windows"
)
def test_deployment_remote_calls_with_tracing(ray_serve_with_tracing):
serve.start()
# Create a deployment with custom methods
@serve.deployment
class TracedDeployment:
def __init__(self):
self.counter = 0
def get_value(self):
_ray_trace_ctx = serve.context._get_serve_request_context()._ray_trace_ctx
assert _ray_trace_ctx is not None
return 42
def increment(self):
_ray_trace_ctx = serve.context._get_serve_request_context()._ray_trace_ctx
assert _ray_trace_ctx is not None
self.counter += 1
return self.counter
# Deploy the application
handle = serve.run(TracedDeployment.bind(), name="traced_app")
# Wait for deployment to be ready
def check_deployment_ready():
status = serve.status()
assert "traced_app" in status.applications
app_status = status.applications["traced_app"]
deployment_status = list(app_status.deployments.values())[0]
num_running = deployment_status.replica_states.get(ReplicaState.RUNNING, 0)
assert num_running == 1
return True
wait_for_condition(check_deployment_ready, timeout=15)
# Make remote calls to the deployment methods
# These should work without TypeError about _ray_trace_ctx
result1 = handle.get_value.remote().result()
assert result1 == 42
result2 = handle.increment.remote().result()
assert result2 == 1
result3 = handle.increment.remote().result()
assert result3 == 2
# Verify that spans were generated for the calls
def check_spans_generated():
spans = get_span_list()
assert len(spans) > 0, "No spans were generated"
# ServeReplica actor spans should exist
replica_spans = [s for s in spans if "ServeReplica" in s.get("name", "")]
assert (
len(replica_spans) > 0
), f"No ServeReplica spans found. Generated {len(spans)} total spans"
return True
wait_for_condition(check_spans_generated, timeout=10, retry_interval_ms=500)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_serve_with_tracing.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_block_ref_bundler.py | from typing import Any, List
import numpy as np
import pandas as pd
import pytest
import ray
from ray.data._internal.execution.interfaces import RefBundle
from ray.data._internal.execution.operators.map_operator import BlockRefBundler
from ray.data._internal.execution.util import make_ref_bundles
from ray.data.block import BlockAccessor
from ray.tests.conftest import * # noqa
def _get_bundles(bundle: RefBundle):
output = []
for block_ref in bundle.block_refs:
output.append(list(ray.get(block_ref)["id"]))
return output
def _make_ref_bundles(raw_bundles: List[List[List[Any]]]) -> List[RefBundle]:
rbs = []
for raw_bundle in raw_bundles:
blocks = []
schema = None
for raw_block in raw_bundle:
print(f">>> {raw_block=}")
block = pd.DataFrame({"id": raw_block})
blocks.append(
(ray.put(block), BlockAccessor.for_block(block).get_metadata())
)
schema = BlockAccessor.for_block(block).schema()
rb = RefBundle(blocks=blocks, owns_blocks=True, schema=schema)
rbs.append(rb)
return rbs
@pytest.mark.parametrize(
"target,in_bundles,expected_bundles",
[
(
# Unit target, should leave unchanged.
1,
[
# Input bundles
[[1]],
[[2]],
[[3, 4]],
[[5]],
],
[
# Output bundles
[[1]],
[[2]],
[[3, 4]],
[[5]],
],
),
(
# No target, should leave unchanged.
None,
[
# Input bundles
[[1]],
[[2]],
[[3, 4]],
[[5]],
],
[
# Output bundles
[[1]],
[[2]],
[[3, 4]],
[[5]],
],
),
(
# Proper handling of empty blocks
2,
[
# Input bundles
[[1]],
[[]],
[[]],
[[2, 3]],
[[]],
[[]],
],
[
# Output bundles
[[1], [], [], [2, 3]],
[[], []],
],
),
(
# Test bundling, finalizing, passing, leftovers, etc.
2,
[
# Input bundles
[[1], [2]],
[[3, 4, 5]],
[[6], [7]],
[[8]],
[[9, 10], [11]],
],
[[[1], [2]], [[3, 4, 5]], [[6], [7]], [[8], [9, 10], [11]]],
),
(
# Test bundling, finalizing, passing, leftovers, etc.
3,
[
# Input bundles
[[1]],
[[2, 3]],
[[4, 5, 6, 7]],
[[8, 9], [10, 11]],
],
[
# Output bundles
[[1], [2, 3]],
[[4, 5, 6, 7]],
[[8, 9], [10, 11]],
],
),
],
)
def test_block_ref_bundler_basic(target, in_bundles, expected_bundles):
# Test that the bundler creates the expected output bundles.
bundler = BlockRefBundler(target)
bundles = _make_ref_bundles(in_bundles)
out_bundles = []
for bundle in bundles:
bundler.add_bundle(bundle)
while bundler.has_bundle():
out_bundle = _get_bundles(bundler.get_next_bundle()[1])
out_bundles.append(out_bundle)
bundler.done_adding_bundles()
if bundler.has_bundle():
out_bundle = _get_bundles(bundler.get_next_bundle()[1])
out_bundles.append(out_bundle)
# Assert expected output
assert out_bundles == expected_bundles
# Assert that all bundles have been ingested
assert bundler.num_blocks() == 0
for bundle, expected in zip(out_bundles, expected_bundles):
assert bundle == expected
@pytest.mark.parametrize(
"target,n,num_bundles,num_out_bundles,out_bundle_size",
[
(5, 20, 20, 4, 5),
(5, 24, 10, 4, 6),
(8, 16, 4, 2, 8),
],
)
def test_block_ref_bundler_uniform(
target, n, num_bundles, num_out_bundles, out_bundle_size
):
# Test that the bundler creates the expected number of bundles with the expected
# size.
bundler = BlockRefBundler(target)
data = np.arange(n)
pre_bundles = [arr.tolist() for arr in np.array_split(data, num_bundles)]
bundles = make_ref_bundles(pre_bundles)
out_bundles = []
for bundle in bundles:
bundler.add_bundle(bundle)
while bundler.has_bundle():
_, out_bundle = bundler.get_next_bundle()
out_bundles.append(out_bundle)
bundler.done_adding_bundles()
if bundler.has_bundle():
_, out_bundle = bundler.get_next_bundle()
out_bundles.append(out_bundle)
assert len(out_bundles) == num_out_bundles
for out_bundle in out_bundles:
assert out_bundle.num_rows() == out_bundle_size
flat_out = [
i
for bundle in out_bundles
for block, _ in bundle.blocks
for i in list(ray.get(block)["id"])
]
assert flat_out == list(range(n))
def test_block_ref_bundler_get_next_regression():
"""Test that all remaining bundles are appropriately preserved after `get_next_bundle`."""
# Create 4 blocks, each with 1 row
bundler = BlockRefBundler(min_rows_per_bundle=2)
bundles = _make_ref_bundles([[[1]], [[2]], [[3]], [[4]]])
# Add all bundles at once
for b in bundles:
bundler.add_bundle(b)
# Buffer now has 4 rows total
assert bundler.num_blocks() == 4
# First get_next_bundle should return bundles with 2 rows
assert bundler.has_bundle()
_, out_bundle = bundler.get_next_bundle()
assert out_bundle.num_rows() == 2
assert _get_bundles(out_bundle) == [[1], [2]]
# Remainder should have 2 bundles with 2 rows total
# BUG: Without the break statement, only 1 bundle (1 row) remains
assert bundler.num_blocks() == 2, (
f"Expected 2 rows remaining, got {bundler._bundle_buffer_size}. "
"This indicates the remainder was overwritten in the loop."
)
# Second get_next_bundle (after finalization) should return remaining bundles
bundler.done_adding_bundles()
assert bundler.has_bundle()
_, out_bundle = bundler.get_next_bundle()
assert out_bundle.num_rows() == 2
assert _get_bundles(out_bundle) == [[3], [4]]
# Buffer should now be empty
assert bundler.num_blocks() == 0
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_block_ref_bundler.py",
"license": "Apache License 2.0",
"lines": 208,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_limit_operator.py | from unittest.mock import MagicMock
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import ray
from ray.data._internal.execution.interfaces.task_context import TaskContext
from ray.data._internal.execution.operators.input_data_buffer import InputDataBuffer
from ray.data._internal.execution.operators.limit_operator import LimitOperator
from ray.data._internal.execution.operators.map_operator import _per_block_limit_fn
from ray.data._internal.execution.streaming_executor import StreamingExecutor
from ray.data._internal.execution.util import make_ref_bundles
from ray.data._internal.logical.optimizers import get_execution_plan
from ray.data.context import DataContext
from ray.data.tests.util import run_op_tasks_sync
from ray.tests.conftest import * # noqa
def test_limit_operator(ray_start_regular_shared):
"""Test basic functionalities of LimitOperator."""
num_refs = 3
num_rows_per_block = 3
total_rows = num_refs * num_rows_per_block
# Test limits with different values, from 0 to more than input size.
limits = list(range(0, total_rows + 2))
for limit in limits:
refs = make_ref_bundles([[i] * num_rows_per_block for i in range(num_refs)])
input_op = InputDataBuffer(DataContext.get_current(), refs)
limit_op = LimitOperator(limit, input_op, DataContext.get_current())
limit_op.mark_execution_finished = MagicMock(
wraps=limit_op.mark_execution_finished
)
if limit == 0:
# If the limit is 0, the operator should be completed immediately.
assert limit_op.has_completed()
assert limit_op._limit_reached()
cur_rows = 0
loop_count = 0
while input_op.has_next() and not limit_op._limit_reached():
loop_count += 1
assert not limit_op.has_completed(), limit
assert not limit_op.has_execution_finished(), limit
limit_op.add_input(input_op.get_next(), 0)
while limit_op.has_next():
# Drain the outputs. So the limit operator
# will be completed when the limit is reached.
limit_op.get_next()
cur_rows += num_rows_per_block
if cur_rows >= limit:
assert limit_op.mark_execution_finished.call_count == 1, limit
assert limit_op.has_completed(), limit
assert limit_op._limit_reached(), limit
assert limit_op.has_execution_finished(), limit
else:
assert limit_op.mark_execution_finished.call_count == 0, limit
assert not limit_op.has_completed(), limit
assert not limit_op._limit_reached(), limit
assert not limit_op.has_execution_finished(), limit
limit_op.mark_execution_finished()
# After inputs done, the number of output bundles
# should be the same as the number of `add_input`s.
assert limit_op.num_outputs_total() == loop_count, limit
assert limit_op.has_completed(), limit
def test_limit_operator_memory_leak_fix(ray_start_regular_shared, tmp_path):
"""Test that LimitOperator properly drains upstream output queues.
This test verifies the memory leak fix by directly using StreamingExecutor
to access the actual topology and check queued blocks after execution.
"""
for i in range(100):
data = [{"id": i * 5 + j, "value": f"row_{i * 5 + j}"} for j in range(5)]
table = pa.Table.from_pydict(
{"id": [row["id"] for row in data], "value": [row["value"] for row in data]}
)
parquet_file = tmp_path / f"test_data_{i}.parquet"
pq.write_table(table, str(parquet_file))
parquet_files = [str(tmp_path / f"test_data_{i}.parquet") for i in range(100)]
ds = (
ray.data.read_parquet(parquet_files, override_num_blocks=100)
.limit(5)
.map(lambda x: x)
)
execution_plan = ds._plan
physical_plan = get_execution_plan(execution_plan._logical_plan)
# Use StreamingExecutor directly to have access to the actual topology
executor = StreamingExecutor(DataContext.get_current())
output_iterator = executor.execute(physical_plan.dag)
# Collect all results and count rows
total_rows = 0
for bundle in output_iterator:
for block_ref in bundle.block_refs:
block = ray.get(block_ref)
total_rows += block.num_rows
assert (
total_rows == 5
), f"Expected exactly 5 rows after limit(5), but got {total_rows}"
# Find the ReadParquet operator's OpState
topology = executor._topology
read_parquet_op_state = None
for op, op_state in topology.items():
if "ReadParquet" in op.name:
read_parquet_op_state = op_state
break
# Check the output queue size
output_queue_size = len(read_parquet_op_state.output_queue)
assert output_queue_size == 0, f"Expected 0 items, but got {output_queue_size}."
def test_limit_estimated_num_output_bundles():
# Test limit operator estimation
input_op = InputDataBuffer(
DataContext.get_current(), make_ref_bundles([[i, i] for i in range(100)])
)
op = LimitOperator(100, input_op, DataContext.get_current())
while input_op.has_next():
op.add_input(input_op.get_next(), 0)
run_op_tasks_sync(op)
assert op._estimated_num_output_bundles == 50
op.all_inputs_done()
# 2 rows per bundle, 100 / 2 = 50 blocks output
assert op._estimated_num_output_bundles == 50
# Test limit operator estimation where: limit > # of rows
input_op = InputDataBuffer(
DataContext.get_current(), make_ref_bundles([[i, i] for i in range(100)])
)
op = LimitOperator(300, input_op, DataContext.get_current())
while input_op.has_next():
op.add_input(input_op.get_next(), 0)
run_op_tasks_sync(op)
assert op._estimated_num_output_bundles == 100
op.all_inputs_done()
# all blocks are outputted
assert op._estimated_num_output_bundles == 100
@pytest.mark.parametrize(
"blocks_data,per_block_limit,expected_output",
[
# Test case 1: Single block, limit less than block size
([[1, 2, 3, 4, 5]], 3, [[1, 2, 3]]),
# Test case 2: Single block, limit equal to block size
([[1, 2, 3]], 3, [[1, 2, 3]]),
# Test case 3: Single block, limit greater than block size
([[1, 2]], 5, [[1, 2]]),
# Test case 4: Multiple blocks, limit spans across blocks
([[1, 2], [3, 4], [5, 6]], 3, [[1, 2], [3]]),
# Test case 5: Multiple blocks, limit exactly at block boundary
([[1, 2], [3, 4]], 2, [[1, 2]]),
# Test case 6: Empty blocks
([], 5, []),
# Test case 7: Zero limit
([[1, 2, 3]], 0, []),
],
)
def test_per_block_limit_fn(blocks_data, per_block_limit, expected_output):
"""Test the _per_block_limit_fn function with various inputs."""
import pandas as pd
# Convert test data to pandas blocks
blocks = [pd.DataFrame({"value": data}) for data in blocks_data]
# Create a mock TaskContext
ctx = TaskContext(op_name="test", task_idx=0, target_max_block_size_override=None)
# Call the function
result_blocks = list(_per_block_limit_fn(blocks, ctx, per_block_limit))
# Convert result back to lists for comparison
result_data = []
for block in result_blocks:
block_data = block["value"].tolist()
result_data.append(block_data)
assert result_data == expected_output
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_limit_operator.py",
"license": "Apache License 2.0",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_map_operator.py | import time
from typing import Iterable
import numpy as np
import pandas as pd
import pytest
import ray
from ray._common.test_utils import wait_for_condition
from ray.data._internal.compute import ActorPoolStrategy, TaskPoolStrategy
from ray.data._internal.execution.interfaces import (
ExecutionOptions,
)
from ray.data._internal.execution.interfaces.task_context import TaskContext
from ray.data._internal.execution.operators.actor_pool_map_operator import (
ActorPoolMapOperator,
)
from ray.data._internal.execution.operators.input_data_buffer import InputDataBuffer
from ray.data._internal.execution.operators.map_operator import (
MapOperator,
)
from ray.data._internal.execution.operators.task_pool_map_operator import (
TaskPoolMapOperator,
)
from ray.data._internal.execution.util import make_ref_bundles
from ray.data._internal.output_buffer import OutputBlockSizeOption
from ray.data._internal.stats import Timer
from ray.data.block import Block
from ray.data.context import (
DataContext,
)
from ray.data.tests.util import (
_get_blocks,
_mul2_transform,
_take_outputs,
create_map_transformer_from_block_fn,
run_one_op_task,
run_op_tasks_sync,
)
from ray.tests.conftest import * # noqa
_mul2_map_data_prcessor = create_map_transformer_from_block_fn(_mul2_transform)
def _run_map_operator_test(
ray_start_regular_shared,
use_actors,
preserve_order,
transform_fn,
output_block_size_option,
expected_blocks,
test_name="TestMapper",
):
"""Shared test function for MapOperator output unbundling tests."""
# Create with inputs.
input_op = InputDataBuffer(
DataContext.get_current(), make_ref_bundles([[i] for i in range(10)])
)
compute_strategy = ActorPoolStrategy() if use_actors else TaskPoolStrategy()
transformer = create_map_transformer_from_block_fn(
transform_fn,
output_block_size_option=output_block_size_option,
)
op = MapOperator.create(
transformer,
input_op=input_op,
data_context=DataContext.get_current(),
name=test_name,
compute_strategy=compute_strategy,
# Send everything in a single bundle of 10 blocks.
min_rows_per_bundle=10,
)
# Feed data and block on exec.
op.start(ExecutionOptions(preserve_order=preserve_order))
if use_actors:
# Wait for actors to be ready before adding inputs.
run_op_tasks_sync(op, only_existing=True)
while input_op.has_next():
assert op.can_add_input()
op.add_input(input_op.get_next(), 0)
op.all_inputs_done()
run_op_tasks_sync(op)
# Check that bundles are unbundled in the output queue.
outputs = []
while op.has_next():
outputs.append(op.get_next())
assert len(outputs) == expected_blocks
assert op.has_completed()
@pytest.mark.parametrize("use_actors", [False, True])
def test_map_operator_streamed(ray_start_regular_shared, use_actors):
# Create with inputs.
input_op = InputDataBuffer(
DataContext.get_current(),
make_ref_bundles([[np.ones(1024) * i] for i in range(100)]),
)
compute_strategy = ActorPoolStrategy() if use_actors else TaskPoolStrategy()
op = MapOperator.create(
_mul2_map_data_prcessor,
input_op,
DataContext.get_current(),
name="TestMapper",
compute_strategy=compute_strategy,
)
# Feed data and implement streaming exec.
output = []
op.start(ExecutionOptions(actor_locality_enabled=True))
if use_actors:
# Wait for actors to be ready before adding inputs.
run_op_tasks_sync(op, only_existing=True)
while input_op.has_next():
# If actor pool at capacity run 1 task and allow it to copmlete
while not op.can_add_input():
run_one_op_task(op)
op.add_input(input_op.get_next(), 0)
# Complete ingesting inputs
op.all_inputs_done()
run_op_tasks_sync(op)
assert op.has_execution_finished()
# NOTE: Op is not considered completed until its outputs are drained
assert not op.has_completed()
# Fetch all outputs
while op.has_next():
ref = op.get_next()
assert ref.owns_blocks, ref
_get_blocks(ref, output)
assert op.has_completed()
expected = [[np.ones(1024) * i * 2] for i in range(100)]
output_sorted = sorted(output, key=lambda x: np.asarray(x[0]).flat[0])
expected_sorted = sorted(expected, key=lambda x: np.asarray(x[0]).flat[0])
assert np.array_equal(output_sorted, expected_sorted)
metrics = op.metrics.as_dict()
assert metrics["obj_store_mem_freed"] == pytest.approx(832200, 0.5), metrics
if use_actors:
assert "locality_hits" in metrics, metrics
assert "locality_misses" in metrics, metrics
else:
assert "locality_hits" not in metrics, metrics
assert "locality_misses" not in metrics, metrics
def test_map_operator_actor_locality_stats(ray_start_regular_shared):
# Create with inputs.
input_op = InputDataBuffer(
DataContext.get_current(),
make_ref_bundles([[np.ones(100) * i] for i in range(100)]),
)
compute_strategy = ActorPoolStrategy()
op = MapOperator.create(
_mul2_map_data_prcessor,
input_op=input_op,
data_context=DataContext.get_current(),
name="TestMapper",
compute_strategy=compute_strategy,
min_rows_per_bundle=None,
)
# Feed data and implement streaming exec.
output = []
options = ExecutionOptions()
options.preserve_order = True
options.actor_locality_enabled = True
op.start(options)
# Wait for actors to be ready before adding inputs.
run_op_tasks_sync(op, only_existing=True)
while input_op.has_next():
# If actor pool at capacity run 1 task and allow it to copmlete
while not op.can_add_input():
run_one_op_task(op)
op.add_input(input_op.get_next(), 0)
# Complete ingesting inputs
op.all_inputs_done()
run_op_tasks_sync(op)
assert op.has_execution_finished()
# NOTE: Op is not considered completed until its outputs are drained
assert not op.has_completed()
# Fetch all outputs
while op.has_next():
ref = op.get_next()
assert ref.owns_blocks, ref
_get_blocks(ref, output)
assert op.has_completed()
# Check equivalent to bulk execution in order.
assert np.array_equal(output, [[np.ones(100) * i * 2] for i in range(100)])
metrics = op.metrics.as_dict()
assert metrics["obj_store_mem_freed"] == pytest.approx(92900, 0.5), metrics
# Check e2e locality manager working.
assert metrics["locality_hits"] == 100, metrics
assert metrics["locality_misses"] == 0, metrics
@pytest.mark.parametrize("use_actors", [False, True])
def test_map_operator_min_rows_per_bundle(ray_start_regular_shared, use_actors):
# Simple sanity check of batching behavior.
def _check_batch(block_iter: Iterable[Block], ctx) -> Iterable[Block]:
block_iter = list(block_iter)
assert len(block_iter) == 5, block_iter
data = [block["id"][0] for block in block_iter]
assert data == list(range(5)) or data == list(range(5, 10)), data
for block in block_iter:
yield block
# Create with inputs.
input_op = InputDataBuffer(
DataContext.get_current(), make_ref_bundles([[i] for i in range(10)])
)
compute_strategy = ActorPoolStrategy() if use_actors else TaskPoolStrategy()
op = MapOperator.create(
create_map_transformer_from_block_fn(_check_batch),
input_op=input_op,
data_context=DataContext.get_current(),
name="TestMapper",
compute_strategy=compute_strategy,
min_rows_per_bundle=5,
)
# Feed data and block on exec.
op.start(ExecutionOptions())
if use_actors:
# Wait for actors to be ready before adding inputs.
run_op_tasks_sync(op, only_existing=True)
while input_op.has_next():
# Should be able to launch 2 tasks:
# - Input: 10 blocks of 1 row each
# - Bundled into 2 bundles (5 rows each)
assert op.can_add_input()
op.add_input(input_op.get_next(), 0)
op.all_inputs_done()
run_op_tasks_sync(op)
_take_outputs(op)
assert op.has_completed()
@pytest.mark.parametrize("use_actors", [False, True])
@pytest.mark.parametrize("preserve_order", [False, True])
@pytest.mark.parametrize(
"target_max_block_size,num_expected_blocks", [(1, 10), (2**20, 1), (None, 1)]
)
def test_map_operator_output_unbundling(
ray_start_regular_shared,
use_actors,
preserve_order,
target_max_block_size,
num_expected_blocks,
):
"""Test that MapOperator's output queue unbundles bundles from tasks."""
def noop(block_iter: Iterable[Block], ctx) -> Iterable[Block]:
for block in block_iter:
yield block
_run_map_operator_test(
ray_start_regular_shared,
use_actors,
preserve_order,
noop,
OutputBlockSizeOption.of(target_max_block_size=target_max_block_size),
num_expected_blocks,
)
@pytest.mark.parametrize("preserve_order", [False, True])
@pytest.mark.parametrize(
"output_block_size_option,expected_blocks",
[
# Test target_max_block_size
(OutputBlockSizeOption.of(target_max_block_size=1), 10),
(OutputBlockSizeOption.of(target_max_block_size=2**20), 1),
(OutputBlockSizeOption.of(target_max_block_size=None), 1),
# Test target_num_rows_per_block
(OutputBlockSizeOption.of(target_num_rows_per_block=1), 10),
(OutputBlockSizeOption.of(target_num_rows_per_block=5), 2),
(OutputBlockSizeOption.of(target_num_rows_per_block=10), 1),
(OutputBlockSizeOption.of(target_num_rows_per_block=None), 1),
# Test disable_block_shaping
(OutputBlockSizeOption.of(disable_block_shaping=True), 10),
(OutputBlockSizeOption.of(disable_block_shaping=False), 1),
# Test combinations
(
OutputBlockSizeOption.of(
target_max_block_size=1, target_num_rows_per_block=5
),
10,
),
(
OutputBlockSizeOption.of(
target_max_block_size=2**20, disable_block_shaping=True
),
10,
),
(
OutputBlockSizeOption.of(
target_num_rows_per_block=5, disable_block_shaping=True
),
10,
),
],
)
def test_map_operator_output_block_size_options(
ray_start_regular_shared,
preserve_order,
output_block_size_option,
expected_blocks,
):
"""Test MapOperator with various OutputBlockSizeOption configurations."""
def noop(block_iter: Iterable[Block], ctx) -> Iterable[Block]:
for block in block_iter:
yield block
_run_map_operator_test(
ray_start_regular_shared,
use_actors=False,
preserve_order=preserve_order,
transform_fn=noop,
output_block_size_option=output_block_size_option,
expected_blocks=expected_blocks,
)
@pytest.mark.parametrize("preserve_order", [False, True])
def test_map_operator_disable_block_shaping_with_batches(
ray_start_regular_shared,
preserve_order,
):
"""Test MapOperator with disable_block_shaping=True using batch operations."""
def batch_transform(batch_iter, ctx):
for batch in batch_iter:
# Simple transformation: add 1 to each value
if hasattr(batch, "to_pandas"):
df = batch.to_pandas()
df = df + 1
yield df
else:
yield batch
_run_map_operator_test(
ray_start_regular_shared,
use_actors=False,
preserve_order=preserve_order,
transform_fn=batch_transform,
output_block_size_option=OutputBlockSizeOption.of(disable_block_shaping=True),
expected_blocks=10, # With disable_block_shaping=True, we expect 10 blocks
test_name="TestBatchMapper",
)
@pytest.mark.parametrize("use_actors", [False, True])
def test_map_operator_ray_args(shutdown_only, use_actors):
ray.shutdown()
ray.init(num_cpus=0, num_gpus=1)
# Create with inputs.
input_op = InputDataBuffer(
DataContext.get_current(), make_ref_bundles([[i] for i in range(10)])
)
compute_strategy = ActorPoolStrategy(size=1) if use_actors else TaskPoolStrategy()
op = MapOperator.create(
_mul2_map_data_prcessor,
input_op=input_op,
data_context=DataContext.get_current(),
name="TestMapper",
compute_strategy=compute_strategy,
ray_remote_args={"num_cpus": 0, "num_gpus": 1},
)
# Feed data and block on exec.
op.start(ExecutionOptions())
if use_actors:
# Wait for the actor to start.
run_op_tasks_sync(op)
while input_op.has_next():
if use_actors:
# For actors, we need to check capacity before adding input
# and process tasks when the actor pool is at capacity.
while not op.can_add_input():
run_one_op_task(op)
assert op.can_add_input()
op.add_input(input_op.get_next(), 0)
op.all_inputs_done()
run_op_tasks_sync(op)
# Check we don't hang and complete with num_gpus=1.
outputs = _take_outputs(op)
expected = [[i * 2] for i in range(10)]
assert sorted(outputs) == expected, f"Expected {expected}, got {outputs}"
assert op.has_completed()
@pytest.mark.parametrize("use_actors", [False, True])
def test_map_operator_shutdown(shutdown_only, use_actors):
ray.shutdown()
ray.init(num_cpus=0, num_gpus=1)
def _sleep(block_iter: Iterable[Block]) -> Iterable[Block]:
time.sleep(999)
# Create with inputs.
input_op = InputDataBuffer(
DataContext.get_current(), make_ref_bundles([[i] for i in range(10)])
)
compute_strategy = ActorPoolStrategy(size=1) if use_actors else TaskPoolStrategy()
op = MapOperator.create(
create_map_transformer_from_block_fn(_sleep),
input_op=input_op,
data_context=DataContext.get_current(),
name="TestMapper",
compute_strategy=compute_strategy,
ray_remote_args={"num_cpus": 0, "num_gpus": 1},
)
# Start one task and then cancel.
op.start(ExecutionOptions())
if use_actors:
# Wait for the actor to start.
run_op_tasks_sync(op)
op.add_input(input_op.get_next(), 0)
assert op.num_active_tasks() == 1
# Regular Ray tasks can be interrupted/cancelled, so graceful shutdown works.
# Actors running time.sleep() cannot be interrupted gracefully and need ray.kill() to release resources.
# After proper shutdown, both should return the GPU to ray.available_resources().
force_shutdown = use_actors
op.shutdown(timer=Timer(), force=force_shutdown)
# Tasks/actors should be cancelled/killed.
wait_for_condition(lambda: (ray.available_resources().get("GPU", 0) == 1.0))
@pytest.mark.parametrize(
"compute,expected",
[
(TaskPoolStrategy(), TaskPoolMapOperator),
(ActorPoolStrategy(), ActorPoolMapOperator),
],
)
def test_map_operator_pool_delegation(compute, expected):
# Test that the MapOperator factory delegates to the appropriate pool
# implementation.
input_op = InputDataBuffer(
DataContext.get_current(), make_ref_bundles([[i] for i in range(100)])
)
op = MapOperator.create(
_mul2_map_data_prcessor,
input_op=input_op,
data_context=DataContext.get_current(),
name="TestMapper",
compute_strategy=compute,
)
assert isinstance(op, expected)
@pytest.mark.parametrize("use_actors", [False, True])
def test_map_kwargs(ray_start_regular_shared, use_actors):
"""Test propagating additional kwargs to map tasks."""
foo = 1
bar = np.random.random(1024 * 1024)
kwargs = {
"foo": foo, # Pass by value
"bar": ray.put(bar), # Pass by ObjectRef
}
def map_fn(block_iter: Iterable[Block], ctx: TaskContext) -> Iterable[Block]:
nonlocal foo, bar
assert ctx.kwargs["foo"] == foo
# bar should be automatically deref'ed.
assert np.array_equal(ctx.kwargs["bar"], bar)
yield from block_iter
input_op = InputDataBuffer(
DataContext.get_current(),
make_ref_bundles([[i] for i in range(10)]),
)
compute_strategy = ActorPoolStrategy() if use_actors else TaskPoolStrategy()
op = MapOperator.create(
create_map_transformer_from_block_fn(map_fn),
input_op=input_op,
data_context=DataContext.get_current(),
name="TestMapper",
compute_strategy=compute_strategy,
)
op.add_map_task_kwargs_fn(lambda: kwargs)
op.start(ExecutionOptions())
if use_actors:
# Wait for the actor to start.
run_op_tasks_sync(op)
while input_op.has_next():
if use_actors:
# For actors, we need to check capacity before adding input
# and process tasks when the actor pool is at capacity.
while not op.can_add_input():
run_one_op_task(op)
assert op.can_add_input()
op.add_input(input_op.get_next(), 0)
op.all_inputs_done()
run_op_tasks_sync(op)
_take_outputs(op)
assert op.has_completed()
@pytest.mark.parametrize(
"target_max_block_size, expected_num_outputs_per_task",
[
# 5 blocks (8b each) // 1 = 5 outputs / task
[1, 5],
# 5 blocks (8b each) // 1024 = 1 output / task
[1024, 1],
# All outputs combined in a single output
[None, 1],
],
)
def test_map_estimated_num_output_bundles(
target_max_block_size,
expected_num_outputs_per_task,
):
# Test map operator estimation
input_op = InputDataBuffer(
DataContext.get_current(), make_ref_bundles([[i] for i in range(100)])
)
def yield_five(block_iter: Iterable[Block], ctx) -> Iterable[Block]:
for i in range(5):
yield pd.DataFrame({"id": [i]})
min_rows_per_bundle = 10
# 100 inputs -> 100 / 10 = 10 tasks
num_tasks = 10
op = MapOperator.create(
create_map_transformer_from_block_fn(
yield_five,
# Limit single block to hold no more than 1 byte
output_block_size_option=OutputBlockSizeOption.of(
target_max_block_size=target_max_block_size,
),
),
input_op=input_op,
data_context=DataContext.get_current(),
name="TestEstimatedNumBlocks",
min_rows_per_bundle=min_rows_per_bundle,
)
op.start(ExecutionOptions())
while input_op.has_next():
op.add_input(input_op.get_next(), 0)
if op.metrics.num_inputs_received % min_rows_per_bundle == 0:
# enough inputs for a task bundle
run_op_tasks_sync(op)
assert (
op._estimated_num_output_bundles
== expected_num_outputs_per_task * num_tasks
)
op.all_inputs_done()
assert op._estimated_num_output_bundles == expected_num_outputs_per_task * num_tasks
def test_map_estimated_blocks_split():
# Test read output splitting
min_rows_per_bundle = 10
input_op = InputDataBuffer(
DataContext.get_current(),
make_ref_bundles(
[[i, i + 1] for i in range(100)]
), # create 2-row blocks so split_blocks can split into 2 blocks
)
def yield_five(block_iter: Iterable[Block], ctx) -> Iterable[Block]:
for i in range(5):
yield pd.DataFrame({"id": [i]})
op = MapOperator.create(
create_map_transformer_from_block_fn(
yield_five,
# NOTE: Disable output block-shaping to keep blocks from being
# combined
disable_block_shaping=True,
),
input_op=input_op,
data_context=DataContext.get_current(),
name="TestEstimatedNumBlocksSplit",
min_rows_per_bundle=min_rows_per_bundle,
)
op.set_additional_split_factor(2)
op.start(ExecutionOptions())
while input_op.has_next():
op.add_input(input_op.get_next(), 0)
if op.metrics.num_inputs_received % min_rows_per_bundle == 0:
# enough inputs for a task bundle
run_op_tasks_sync(op)
assert op._estimated_num_output_bundles == 100
op.all_inputs_done()
# Each output block is split in 2, so the number of blocks double.
assert op._estimated_num_output_bundles == 100
def test_operator_metrics():
NUM_INPUTS = 100
NUM_BLOCKS_PER_TASK = 5
MIN_ROWS_PER_BUNDLE = 10
inputs = make_ref_bundles([[i] for i in range(NUM_INPUTS)])
input_op = InputDataBuffer(DataContext.get_current(), inputs)
def map_fn(block_iter: Iterable[Block], ctx) -> Iterable[Block]:
for i in range(NUM_BLOCKS_PER_TASK):
yield pd.DataFrame({"id": [i]})
op = MapOperator.create(
create_map_transformer_from_block_fn(
map_fn,
output_block_size_option=OutputBlockSizeOption.of(
target_max_block_size=1,
),
),
input_op=input_op,
data_context=DataContext.get_current(),
name="TestEstimatedNumBlocks",
min_rows_per_bundle=MIN_ROWS_PER_BUNDLE,
)
op.start(ExecutionOptions())
num_outputs_taken = 0
bytes_outputs_taken = 0
for i in range(len(inputs)):
# Add an input, run all tasks, and take all outputs.
op.add_input(input_op.get_next(), 0)
run_op_tasks_sync(op)
while op.has_next():
output = op.get_next()
num_outputs_taken += 1
bytes_outputs_taken += output.size_bytes()
num_tasks_submitted = (i + 1) // MIN_ROWS_PER_BUNDLE
metrics = op.metrics
# Check input metrics
assert metrics.num_inputs_received == i + 1, i
assert metrics.bytes_inputs_received == sum(
inputs[k].size_bytes() for k in range(i + 1)
), i
assert (
metrics.num_task_inputs_processed
== num_tasks_submitted * MIN_ROWS_PER_BUNDLE
), i
assert metrics.bytes_task_inputs_processed == sum(
inputs[k].size_bytes()
for k in range(num_tasks_submitted * MIN_ROWS_PER_BUNDLE)
), i
# Check outputs metrics
assert num_outputs_taken == num_tasks_submitted * NUM_BLOCKS_PER_TASK, i
assert metrics.num_task_outputs_generated == num_outputs_taken, i
assert metrics.bytes_task_outputs_generated == bytes_outputs_taken, i
assert metrics.num_outputs_taken == num_outputs_taken, i
assert metrics.bytes_outputs_taken == bytes_outputs_taken, i
assert metrics.num_outputs_of_finished_tasks == num_outputs_taken, i
assert metrics.bytes_outputs_of_finished_tasks == bytes_outputs_taken, i
# Check task metrics
assert metrics.num_tasks_submitted == num_tasks_submitted, i
assert metrics.num_tasks_running == 0, i
assert metrics.num_tasks_have_outputs == num_tasks_submitted, i
assert metrics.num_tasks_finished == num_tasks_submitted, i
# Check object store metrics
assert metrics.obj_store_mem_freed == metrics.bytes_task_inputs_processed, i
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_map_operator.py",
"license": "Apache License 2.0",
"lines": 606,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_output_splitter.py | import collections
import itertools
import random
import pytest
import ray
from ray.data._internal.execution.interfaces import ExecutionOptions
from ray.data._internal.execution.operators.input_data_buffer import InputDataBuffer
from ray.data._internal.execution.operators.output_splitter import OutputSplitter
from ray.data._internal.execution.util import make_ref_bundles
from ray.data.context import DataContext
from ray.tests.conftest import * # noqa
@pytest.mark.parametrize("equal", [False, True])
@pytest.mark.parametrize("chunk_size", [1, 10])
def test_split_operator(ray_start_regular_shared, equal, chunk_size):
num_input_blocks = 100
num_splits = 3
# Add this many input blocks each time.
# Make sure it is greater than num_splits * 2,
# so we can test the output order of `OutputSplitter.get_next`.
num_add_input_blocks = 10
input_op = InputDataBuffer(
DataContext.get_current(),
make_ref_bundles([[i] * chunk_size for i in range(num_input_blocks)]),
)
op = OutputSplitter(
input_op,
num_splits,
equal=equal,
data_context=DataContext.get_current(),
)
# Feed data and implement streaming exec.
output_splits = [[] for _ in range(num_splits)]
op.start(ExecutionOptions())
while input_op.has_next():
for _ in range(num_add_input_blocks):
if not input_op.has_next():
break
op.add_input(input_op.get_next(), 0)
while op.has_next():
ref = op.get_next()
assert ref.owns_blocks, ref
for block_ref in ref.block_refs:
assert ref.output_split_idx is not None
output_splits[ref.output_split_idx].extend(
list(ray.get(block_ref)["id"])
)
op.all_inputs_done()
expected_splits = [[] for _ in range(num_splits)]
for i in range(num_splits):
for j in range(i, num_input_blocks, num_splits):
expected_splits[i].extend([j] * chunk_size)
if equal:
min_len = min(len(expected_splits[i]) for i in range(num_splits))
for i in range(num_splits):
expected_splits[i] = expected_splits[i][:min_len]
for i in range(num_splits):
assert output_splits[i] == expected_splits[i], (
output_splits[i],
expected_splits[i],
)
@pytest.mark.parametrize("equal", [False, True])
@pytest.mark.parametrize("random_seed", list(range(10)))
def test_split_operator_random(ray_start_regular_shared, equal, random_seed):
random.seed(random_seed)
inputs = make_ref_bundles([[i] * random.randint(0, 10) for i in range(100)])
num_inputs = sum(x.num_rows() for x in inputs)
input_op = InputDataBuffer(DataContext.get_current(), inputs)
op = OutputSplitter(
input_op, 3, equal=equal, data_context=DataContext.get_current()
)
# Feed data and implement streaming exec.
output_splits = collections.defaultdict(list)
op.start(ExecutionOptions())
while input_op.has_next():
op.add_input(input_op.get_next(), 0)
op.all_inputs_done()
while op.has_next():
ref = op.get_next()
assert ref.owns_blocks, ref
for block_ref in ref.block_refs:
output_splits[ref.output_split_idx].extend(list(ray.get(block_ref)["id"]))
if equal:
actual = [len(output_splits[i]) for i in range(3)]
expected = [num_inputs // 3] * 3
assert actual == expected
else:
assert sum(len(output_splits[i]) for i in range(3)) == num_inputs, output_splits
def test_split_operator_locality_hints(ray_start_regular_shared):
input_op = InputDataBuffer(
DataContext.get_current(), make_ref_bundles([[i] for i in range(10)])
)
op = OutputSplitter(
input_op,
2,
equal=False,
data_context=DataContext.get_current(),
locality_hints=["node1", "node2"],
)
def get_fake_loc(item):
assert isinstance(item, int), item
if item in [0, 1, 4, 5, 8]:
return "node1"
else:
return "node2"
def get_bundle_loc(bundle):
block = ray.get(bundle.blocks[0][0])
fval = list(block["id"])[0]
return [get_fake_loc(fval)]
op._get_locations = get_bundle_loc
# Feed data and implement streaming exec.
output_splits = collections.defaultdict(list)
op.start(ExecutionOptions(actor_locality_enabled=True))
while input_op.has_next():
op.add_input(input_op.get_next(), 0)
op.all_inputs_done()
while op.has_next():
ref = op.get_next()
assert ref.owns_blocks, ref
for block_ref in ref.block_refs:
output_splits[ref.output_split_idx].extend(list(ray.get(block_ref)["id"]))
total = 0
for i in range(2):
if i == 0:
node = "node1"
else:
node = "node2"
split = output_splits[i]
for item in split:
assert get_fake_loc(item) == node
total += 1
assert total == 10, total
assert "all objects local" in op.progress_str()
@pytest.mark.parametrize("equal", [False, True])
@pytest.mark.parametrize("random_seed", list(range(10)))
def test_split_operator_with_locality(ray_start_regular_shared, equal, random_seed):
"""Test locality-based dispatching with equal=True and equal=False modes.
This test verifies that the OutputSplitter:
1. Correctly buffers data to ensure equal distribution when equal=True
2. Respects locality hints in both modes
3. Yields blocks incrementally when locality is matched (streaming behavior)
4. The fix ensures that _can_safely_dispatch correctly calculates remaining
buffer requirements.
"""
random.seed(random_seed)
# Create bundles with varying sizes to test buffer management
input_bundles = make_ref_bundles([[i] * random.randint(1, 10) for i in range(100)])
num_inputs = sum(x.num_rows() for x in input_bundles)
input_op = InputDataBuffer(DataContext.get_current(), input_bundles)
op = OutputSplitter(
input_op,
3,
equal=equal,
data_context=DataContext.get_current(),
locality_hints=["node0", "node1", "node2"],
)
# Mock locality function: distribute items across 3 nodes
def _map_row_to_node(first_row_id_val) -> str:
return f"node{first_row_id_val % 3}"
def _get_fake_bundle_loc(bundle):
block = ray.get(bundle.block_refs[0])
first_row_id_val = block["id"][0]
return [_map_row_to_node(first_row_id_val)]
op._get_locations = _get_fake_bundle_loc
# Feed data and implement streaming exec
output_splits = [[] for _ in range(3)]
yielded_incrementally = 0
op.start(ExecutionOptions(actor_locality_enabled=True))
while input_op.has_next():
op.add_input(input_op.get_next(), 0)
# Drain some outputs to simulate streaming consumption
while op.has_next():
yielded_incrementally += 1
ref = op.get_next()
assert ref.owns_blocks, ref
for block_ref in ref.block_refs:
output_splits[ref.output_split_idx].extend(
list(ray.get(block_ref)["id"])
)
op.all_inputs_done()
# Collect remaining outputs
while op.has_next():
ref = op.get_next()
assert ref.owns_blocks, ref
for block_ref in ref.block_refs:
output_splits[ref.output_split_idx].extend(list(ray.get(block_ref)["id"]))
# Verify streaming behavior: outputs should be yielded before all inputs are done
# With locality hints, we should see outputs during input phase
assert yielded_incrementally > 0, (
f"Expected incremental output with locality hints, but got 0 outputs during "
f"{len(input_bundles)} input blocks. This suggests buffering all data instead of streaming."
)
# Verify equal distribution when equal=True
if equal:
actual = [len(output_splits[i]) for i in range(3)]
expected = [num_inputs // 3] * 3
assert (
actual == expected
), f"Expected equal distribution {expected}, got {actual}"
else:
# In non-equal mode, verify all data is output with correct row IDs
all_output_row_ids = set(itertools.chain.from_iterable(output_splits))
# Reconstruct expected row IDs from the input bundles
expected_row_ids = set()
for b in input_bundles:
id_col = ray.get(b.block_refs[0])["id"]
expected_row_ids.update(list(id_col))
assert all_output_row_ids == expected_row_ids
# Verify locality was respected (most items should be on their preferred node)
locality_hits = 0
total = 0
for split_idx in range(3):
actual_node = f"node{split_idx}"
for row_id in output_splits[split_idx]:
total += 1
expected_node = _map_row_to_node(row_id)
assert expected_node in ["node0", "node1", "node2"], expected_node
if expected_node == actual_node:
locality_hits += 1
# Should have excellent locality since bundles are dispatched based on locality hints.
# With perfect locality we'd get 100%, but buffering for equal distribution and
# occasional forced dispatches when buffer is full may cause some misses.
# We expect at least 85% locality hit rate, which validates the feature is working.
locality_ratio = locality_hits / total if total > 0 else 0
# NOTE: 90% is an observed locality ratio that should be fixed for this test
assert locality_ratio >= 0.85, (
f"Locality ratio {locality_ratio:.2f} too low. "
f"Expected >=85% with locality-aware dispatching. "
f"Hits: {locality_hits}/{total}"
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_output_splitter.py",
"license": "Apache License 2.0",
"lines": 232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/_private/broker.py | # This module provides broker clients for querying queue lengths from message brokers.
# Adapted from Flower's broker.py (https://github.com/mher/flower/blob/master/flower/utils/broker.py)
# with the following modification:
# - Added close() method to BrokerBase and RedisBase for resource cleanup
import json
import logging
import numbers
import socket
from urllib.parse import quote, unquote, urljoin, urlparse
from tornado import httpclient, ioloop
from ray.serve._private.constants import SERVE_LOGGER_NAME
try:
import redis
except ImportError:
redis = None
logger = logging.getLogger(SERVE_LOGGER_NAME)
class BrokerBase:
def __init__(self, broker_url, *_, **__):
purl = urlparse(broker_url)
self.host = purl.hostname
self.port = purl.port
self.vhost = purl.path[1:]
username = purl.username
password = purl.password
self.username = unquote(username) if username else username
self.password = unquote(password) if password else password
async def queues(self, names):
raise NotImplementedError
def close(self):
"""Close any open connections. Override in subclasses as needed."""
pass
class RabbitMQ(BrokerBase):
def __init__(self, broker_url, http_api, io_loop=None, **__):
super().__init__(broker_url)
self.io_loop = io_loop or ioloop.IOLoop.instance()
self.host = self.host or "localhost"
self.port = self.port or 15672
self.vhost = quote(self.vhost, "") or "/" if self.vhost != "/" else self.vhost
self.username = self.username or "guest"
self.password = self.password or "guest"
if not http_api:
http_api = f"http://{self.username}:{self.password}@{self.host}:{self.port}/api/{self.vhost}"
try:
self.validate_http_api(http_api)
except ValueError:
logger.error("Invalid broker api url: %s", http_api)
self.http_api = http_api
async def queues(self, names):
url = urljoin(self.http_api, "queues/" + self.vhost)
api_url = urlparse(self.http_api)
username = unquote(api_url.username or "") or self.username
password = unquote(api_url.password or "") or self.password
http_client = httpclient.AsyncHTTPClient()
try:
response = await http_client.fetch(
url,
auth_username=username,
auth_password=password,
connect_timeout=1.0,
request_timeout=2.0,
validate_cert=False,
)
except (socket.error, httpclient.HTTPError) as e:
logger.error("RabbitMQ management API call failed: %s", e)
return []
finally:
http_client.close()
if response.code == 200:
info = json.loads(response.body.decode())
return [x for x in info if x["name"] in names]
response.rethrow()
@classmethod
def validate_http_api(cls, http_api):
url = urlparse(http_api)
if url.scheme not in ("http", "https"):
raise ValueError(f"Invalid http api schema: {url.scheme}")
class RedisBase(BrokerBase):
DEFAULT_SEP = "\x06\x16"
DEFAULT_PRIORITY_STEPS = [0, 3, 6, 9]
def __init__(self, broker_url, *_, **kwargs):
super().__init__(broker_url)
self.redis = None
if not redis:
raise ImportError("redis library is required")
broker_options = kwargs.get("broker_options", {})
self.priority_steps = broker_options.get(
"priority_steps", self.DEFAULT_PRIORITY_STEPS
)
self.sep = broker_options.get("sep", self.DEFAULT_SEP)
self.broker_prefix = broker_options.get("global_keyprefix", "")
def _q_for_pri(self, queue, pri):
if pri not in self.priority_steps:
raise ValueError("Priority not in priority steps")
# pylint: disable=consider-using-f-string
return "{0}{1}{2}".format(*((queue, self.sep, pri) if pri else (queue, "", "")))
async def queues(self, names):
queue_stats = []
for name in names:
priority_names = [
self.broker_prefix + self._q_for_pri(name, pri)
for pri in self.priority_steps
]
queue_stats.append(
{
"name": name,
"messages": sum((self.redis.llen(x) for x in priority_names)),
}
)
return queue_stats
def close(self):
"""Close the Redis connection."""
if self.redis is not None:
self.redis.close()
self.redis = None
class Redis(RedisBase):
def __init__(self, broker_url, *args, **kwargs):
super().__init__(broker_url, *args, **kwargs)
self.host = self.host or "localhost"
self.port = self.port or 6379
self.vhost = self._prepare_virtual_host(self.vhost)
self.redis = self._get_redis_client()
def _prepare_virtual_host(self, vhost):
if not isinstance(vhost, numbers.Integral):
if not vhost or vhost == "/":
vhost = 0
elif vhost.startswith("/"):
vhost = vhost[1:]
try:
vhost = int(vhost)
except ValueError as exc:
raise ValueError(
f"Database is int between 0 and limit - 1, not {vhost}"
) from exc
return vhost
def _get_redis_client_args(self):
return {
"host": self.host,
"port": self.port,
"db": self.vhost,
"username": self.username,
"password": self.password,
}
def _get_redis_client(self):
return redis.Redis(**self._get_redis_client_args())
class RedisSentinel(RedisBase):
def __init__(self, broker_url, *args, **kwargs):
super().__init__(broker_url, *args, **kwargs)
broker_options = kwargs.get("broker_options", {})
broker_use_ssl = kwargs.get("broker_use_ssl", None)
self.host = self.host or "localhost"
self.port = self.port or 26379
self.vhost = self._prepare_virtual_host(self.vhost)
self.master_name = self._prepare_master_name(broker_options)
self.redis = self._get_redis_client(broker_options, broker_use_ssl)
def _prepare_virtual_host(self, vhost):
if not isinstance(vhost, numbers.Integral):
if not vhost or vhost == "/":
vhost = 0
elif vhost.startswith("/"):
vhost = vhost[1:]
try:
vhost = int(vhost)
except ValueError as exc:
raise ValueError(
f"Database is int between 0 and limit - 1, not {vhost}"
) from exc
return vhost
def _prepare_master_name(self, broker_options):
try:
master_name = broker_options["master_name"]
except KeyError as exc:
raise ValueError("master_name is required for Sentinel broker") from exc
return master_name
def _get_redis_client(self, broker_options, broker_use_ssl):
connection_kwargs = {
"password": self.password,
"sentinel_kwargs": broker_options.get("sentinel_kwargs"),
}
if isinstance(broker_use_ssl, dict):
connection_kwargs["ssl"] = True
connection_kwargs.update(broker_use_ssl)
# get all sentinel hosts from Celery App config and use them to initialize Sentinel
sentinel = redis.sentinel.Sentinel(
[(self.host, self.port)], **connection_kwargs
)
redis_client = sentinel.master_for(self.master_name)
return redis_client
class RedisSocket(RedisBase):
def __init__(self, broker_url, *args, **kwargs):
super().__init__(broker_url, *args, **kwargs)
self.redis = redis.Redis(
unix_socket_path="/" + self.vhost, password=self.password
)
class RedisSsl(Redis):
"""
Redis SSL class offering connection to the broker over SSL.
This does not currently support SSL settings through the url, only through
the broker_use_ssl celery configuration.
"""
def __init__(self, broker_url, *args, **kwargs):
if "broker_use_ssl" not in kwargs:
raise ValueError("rediss broker requires broker_use_ssl")
self.broker_use_ssl = kwargs.get("broker_use_ssl", {})
super().__init__(broker_url, *args, **kwargs)
def _get_redis_client_args(self):
client_args = super()._get_redis_client_args()
client_args["ssl"] = True
if isinstance(self.broker_use_ssl, dict):
client_args.update(self.broker_use_ssl)
return client_args
class Broker:
"""Factory returning the appropriate broker client based on URL scheme.
Supported schemes:
``amqp`` or ``amqps`` -> :class:`RabbitMQ`
``redis`` -> :class:`Redis`
``rediss`` -> :class:`RedisSsl`
``redis+socket`` -> :class:`RedisSocket`
``sentinel`` -> :class:`RedisSentinel`
"""
def __new__(cls, broker_url, *args, **kwargs):
scheme = urlparse(broker_url).scheme
if scheme in ("amqp", "amqps"):
return RabbitMQ(broker_url, *args, **kwargs)
if scheme == "redis":
return Redis(broker_url, *args, **kwargs)
if scheme == "rediss":
return RedisSsl(broker_url, *args, **kwargs)
if scheme == "redis+socket":
return RedisSocket(broker_url, *args, **kwargs)
if scheme == "sentinel":
return RedisSentinel(broker_url, *args, **kwargs)
raise NotImplementedError
async def queues(self, names):
raise NotImplementedError
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/_private/broker.py",
"license": "Apache License 2.0",
"lines": 234,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/serve/_private/queue_monitor.py | import logging
import time
import ray
from ray._common.constants import HEAD_NODE_RESOURCE_NAME
from ray.actor import ActorHandle
from ray.serve._private.broker import Broker
from ray.serve._private.common import (
AsyncInferenceTaskQueueMetricReport,
DeploymentID,
)
from ray.serve._private.constants import (
RAY_SERVE_ASYNC_INFERENCE_TASK_QUEUE_METRIC_PUSH_INTERVAL_S,
SERVE_LOGGER_NAME,
)
from ray.serve._private.metrics_utils import MetricsPusher
logger = logging.getLogger(SERVE_LOGGER_NAME)
# Actor name prefix for QueueMonitor actors
QUEUE_MONITOR_ACTOR_PREFIX = "QUEUE_MONITOR::"
def get_queue_monitor_actor_name(deployment_id: DeploymentID) -> str:
"""Get the Ray actor name for a deployment's QueueMonitor.
Args:
deployment_id: ID of the deployment (contains app_name and name)
Returns:
The full actor name in format "QUEUE_MONITOR::<app_name>#<deployment_name>#"
"""
return f"{QUEUE_MONITOR_ACTOR_PREFIX}{deployment_id.app_name}#{deployment_id.name}#"
@ray.remote(num_cpus=0)
class QueueMonitorActor:
"""
Actor that monitors queue length by directly querying the broker.
Returns pending tasks in the queue.
Uses native broker clients:
- Redis: Uses redis-py library with LLEN command
- RabbitMQ: Uses HTTP management API
Periodically pushes queue length metrics to the controller for autoscaling.
"""
PUSH_METRICS_TO_CONTROLLER_TASK_NAME = "push_metrics_to_controller"
async def __init__(
self,
broker_url: str,
queue_name: str,
deployment_id: DeploymentID,
controller_handle: ActorHandle,
rabbitmq_http_url: str = "http://guest:guest@localhost:15672/api/",
):
self._broker_url = broker_url
self._queue_name = queue_name
self._deployment_id = deployment_id
self._controller_handle = controller_handle
self._rabbitmq_http_url = rabbitmq_http_url
self._broker = Broker(self._broker_url, http_api=self._rabbitmq_http_url)
self._metrics_pusher = MetricsPusher()
self._start_metrics_pusher()
def _start_metrics_pusher(self):
"""Start the metrics pusher to periodically push metrics to the controller."""
self._metrics_pusher.register_or_update_task(
self.PUSH_METRICS_TO_CONTROLLER_TASK_NAME,
self._push_metrics_to_controller,
RAY_SERVE_ASYNC_INFERENCE_TASK_QUEUE_METRIC_PUSH_INTERVAL_S,
)
self._metrics_pusher.start()
def __ray_shutdown__(self):
# Note: This must be synchronous (not async) because Ray's core code
# in _raylet.pyx calls __ray_shutdown__() without awaiting.
if self._metrics_pusher is not None:
self._metrics_pusher.stop_tasks()
self._metrics_pusher = None
if self._broker is not None:
self._broker.close()
self._broker = None
async def get_queue_length(self) -> int:
"""
Fetch queue length from the broker.
Returns:
Number of pending tasks in the queue.
Raises:
ValueError: If queue is not found in broker response or
if queue data is missing the 'messages' field.
"""
queues = await self._broker.queues([self._queue_name])
if queues is not None:
for q in queues:
if q.get("name") == self._queue_name:
queue_length = q.get("messages")
if queue_length is None:
raise ValueError(
f"Queue '{self._queue_name}' is missing 'messages' field"
)
return queue_length
raise ValueError(f"Queue '{self._queue_name}' not found in broker response")
async def _push_metrics_to_controller(self) -> None:
"""Push queue length metrics to the controller for autoscaling."""
try:
queue_length = await self.get_queue_length()
except Exception as e:
logger.warning(
f"[{self._deployment_id}] Failed to get queue length for metrics push: {e}"
)
raise e
report = AsyncInferenceTaskQueueMetricReport(
deployment_id=self._deployment_id,
queue_length=queue_length,
timestamp_s=time.time(),
)
# Fire-and-forget push to controller
self._controller_handle.record_autoscaling_metrics_from_async_inference_task_queue.remote(
report
)
def create_queue_monitor_actor(
deployment_id: DeploymentID,
broker_url: str,
queue_name: str,
controller_handle: ActorHandle,
rabbitmq_http_url: str = "http://guest:guest@localhost:15672/api/",
namespace: str = "serve",
) -> ray.actor.ActorHandle:
"""
Create a named QueueMonitor Ray actor.
Args:
deployment_id: ID of the deployment (contains name and app_name)
broker_url: URL of the message broker
queue_name: Name of the queue to monitor
controller_handle: Handle to the Serve controller for pushing metrics
rabbitmq_http_url: HTTP API URL for RabbitMQ management (only for RabbitMQ)
namespace: Ray namespace for the actor
Returns:
ActorHandle for the QueueMonitor actor
"""
try:
existing = get_queue_monitor_actor(deployment_id, namespace=namespace)
logger.info(
f"QueueMonitor actor for deployment '{deployment_id}' already exists, reusing"
)
return existing
except ValueError:
actor_name = get_queue_monitor_actor_name(deployment_id)
actor = QueueMonitorActor.options(
name=actor_name,
namespace=namespace,
max_restarts=-1,
max_task_retries=-1,
resources={HEAD_NODE_RESOURCE_NAME: 0.001},
).remote(
broker_url=broker_url,
queue_name=queue_name,
deployment_id=deployment_id,
controller_handle=controller_handle,
rabbitmq_http_url=rabbitmq_http_url,
)
logger.info(
f"Created QueueMonitor actor '{actor_name}' in namespace '{namespace}'"
)
return actor
def get_queue_monitor_actor(
deployment_id: DeploymentID,
namespace: str = "serve",
) -> ray.actor.ActorHandle:
"""
Get an existing QueueMonitor actor by name.
Args:
deployment_id: ID of the deployment (contains app_name and name)
namespace: Ray namespace
Returns:
ActorHandle for the QueueMonitor actor
Raises:
ValueError: If actor doesn't exist
"""
actor_name = get_queue_monitor_actor_name(deployment_id)
return ray.get_actor(actor_name, namespace=namespace)
def kill_queue_monitor_actor(
deployment_id: DeploymentID,
namespace: str = "serve",
) -> None:
"""
Delete a QueueMonitor actor by name.
Args:
deployment_id: ID of the deployment (contains app_name and name)
namespace: Ray namespace
Raises:
ValueError: If actor doesn't exist
"""
actor_name = get_queue_monitor_actor_name(deployment_id)
actor = get_queue_monitor_actor(deployment_id, namespace=namespace)
ray.kill(actor, no_restart=True)
logger.info(f"Deleted QueueMonitor actor '{actor_name}'")
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/_private/queue_monitor.py",
"license": "Apache License 2.0",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/serve/tests/test_queue_monitor.py | import os
import sys
import pytest
import redis
import ray
from ray._common.test_utils import wait_for_condition
from ray.serve._private.common import DeploymentID
from ray.serve._private.constants import SERVE_CONTROLLER_NAME, SERVE_NAMESPACE
from ray.serve._private.queue_monitor import (
create_queue_monitor_actor,
kill_queue_monitor_actor,
)
from ray.tests.conftest import external_redis # noqa: F401
TEST_DEPLOYMENT_ID = DeploymentID("test_deployment", "test_app")
TEST_QUEUE_NAME = "test_queue"
@pytest.fixture
def redis_client(external_redis): # noqa: F811
"""Create a Redis client connected to the external Redis."""
redis_address = os.environ.get("RAY_REDIS_ADDRESS")
host, port = redis_address.split(":")
client = redis.Redis(host=host, port=int(port), db=0)
yield client
client.delete(TEST_QUEUE_NAME)
client.close()
@pytest.fixture
def redis_broker_url(external_redis): # noqa: F811
"""Get the Redis broker URL for the external Redis."""
redis_address = os.environ.get("RAY_REDIS_ADDRESS")
return f"redis://{redis_address}/0"
@pytest.fixture
def queue_monitor(serve_instance, redis_broker_url): # noqa: F811
"""Create a QueueMonitor with the real Serve controller."""
controller = ray.get_actor(SERVE_CONTROLLER_NAME, namespace=SERVE_NAMESPACE)
monitor = create_queue_monitor_actor(
deployment_id=TEST_DEPLOYMENT_ID,
broker_url=redis_broker_url,
queue_name=TEST_QUEUE_NAME,
controller_handle=controller,
namespace=SERVE_NAMESPACE,
)
yield monitor
kill_queue_monitor_actor(TEST_DEPLOYMENT_ID, namespace=SERVE_NAMESPACE)
class TestQueueMonitorActor:
"""Integration tests for QueueMonitorActor with real Redis and Serve controller."""
def test_queue_length_fetch(self, redis_client, queue_monitor):
"""Test QueueMonitor correctly fetches queue length from broker."""
for i in range(30):
redis_client.lpush(TEST_QUEUE_NAME, f"message_{i}")
def check_length():
return ray.get(queue_monitor.get_queue_length.remote()) == 30
wait_for_condition(check_length, timeout=30)
def test_queue_length_updates_on_change(self, redis_client, queue_monitor):
"""Test QueueMonitor returns updated length when queue changes."""
for i in range(10):
redis_client.lpush(TEST_QUEUE_NAME, f"message_{i}")
def check_initial_length():
return ray.get(queue_monitor.get_queue_length.remote()) == 10
wait_for_condition(check_initial_length, timeout=30)
for i in range(10, 25):
redis_client.lpush(TEST_QUEUE_NAME, f"message_{i}")
def check_updated_length():
return ray.get(queue_monitor.get_queue_length.remote()) == 25
wait_for_condition(check_updated_length, timeout=30)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_queue_monitor.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/unit/test_runtime_env_working_dir.py | """Unit tests for working_dir runtime environment functionality."""
import sys
import pytest
from ray._private.ray_constants import get_runtime_env_default_excludes
ENV_VAR = "RAY_OVERRIDE_RUNTIME_ENV_DEFAULT_EXCLUDES"
class TestGetRuntimeEnvDefaultExcludes:
"""Tests for get_runtime_env_default_excludes()."""
def test_returns_defaults_when_env_var_not_set(self, monkeypatch):
monkeypatch.delenv(ENV_VAR, raising=False)
result = get_runtime_env_default_excludes()
assert ".git" in result and ".venv" in result
def test_empty_env_var_disables_defaults(self, monkeypatch):
monkeypatch.setenv(ENV_VAR, "")
assert get_runtime_env_default_excludes() == []
def test_custom_env_var_overrides_defaults(self, monkeypatch):
monkeypatch.setenv(ENV_VAR, "foo, bar ,baz")
assert get_runtime_env_default_excludes() == ["foo", "bar", "baz"]
if __name__ == "__main__":
sys.exit(pytest.main(["-vv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/unit/test_runtime_env_working_dir.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/accelerators/test_intel_gpu_e2e.py | """
Manual Intel GPU validation tests, not executed in automated runs.
These tests are basic acceptance tests to validate Intel GPU support in Ray. They
require a suitable Intel GPU environment with dpctl installed. They are intended to
serve as an approved method to verify Intel GPU-based Ray deployments.
"""
import os
import re
from typing import Any, Dict, List
import pytest
import ray
try:
import dpctl
except ImportError:
pytest.skip(
"dpctl is not installed, skipping Intel GPU tests.", allow_module_level=True
)
DEFAULT_SCALE_OUT_NODES = 2
DEFAULT_SCALE_UP_DEVICES = 2
USE_GPU = bool(os.environ.get("RAY_PYTEST_USE_GPU", 0))
if not USE_GPU:
pytest.skip("Skipping, these tests require GPUs.", allow_module_level=True)
@pytest.fixture
def ray_gpu_session():
"""Start a Ray session with caller-provided init kwargs."""
def _start_session(**init_kwargs):
if ray.is_initialized():
ray.shutdown()
ray.init(**init_kwargs)
try:
yield _start_session
finally:
if ray.is_initialized():
ray.shutdown()
def _is_cluster_configured(address: str = "auto") -> bool:
try:
ray.init(
address=address,
)
return True
except (ray.exceptions.RaySystemError, ConnectionError, TimeoutError):
return False
finally:
if ray.is_initialized():
ray.shutdown()
def _detect_available_gpu_count() -> int:
"""Return the number of GPU devices detected via dpctl."""
try:
return dpctl.SyclContext("level_zero:gpu").device_count
except Exception:
# If dpctl cannot enumerate devices, assume no additional GPUs.
return 0
def _require_min_gpus(required: int, context: str) -> None:
available = _detect_available_gpu_count()
if available < required:
pytest.skip(
f"Skipping {context}: requires {required} GPUs, detected {available} via dpctl."
)
def _require_min_cluster_nodes(required_nodes: int, context: str) -> None:
alive_nodes = [node for node in ray.nodes() if node.get("Alive")]
unique_node_ids = {node.get("NodeID") for node in alive_nodes if node.get("NodeID")}
if len(unique_node_ids) < required_nodes:
pytest.skip(
f"Skipping {context}: requires {required_nodes} alive Ray nodes, detected {len(unique_node_ids)}."
)
@ray.remote(num_gpus=1)
def gpu_task() -> Dict[str, Any]:
context = ray.get_runtime_context()
gpu_ids = context.get_accelerator_ids().get("GPU", [])
return {
"gpu_ids": gpu_ids,
"pid": os.getpid(),
"oneapi_selector": os.environ.get("ONEAPI_DEVICE_SELECTOR"),
}
@ray.remote(num_gpus=1)
def cluster_probe_task() -> Dict[str, Any]:
context = ray.get_runtime_context()
return {
"node_id": context.get_node_id(),
"node_ip": ray.util.get_node_ip_address(),
"worker_id": context.get_worker_id(),
"gpu_ids": context.get_accelerator_ids().get("GPU", []),
"selector": os.environ.get("ONEAPI_DEVICE_SELECTOR"),
}
def assert_valid_gpu_binding(result: Dict[str, Any], label: str) -> None:
primary_gpu_id = _validate_gpu_binding_common(result, label)
assert (
primary_gpu_id >= 0
), f"Expected {label} to bind to a valid GPU, got {result.get('gpu_ids')}"
def _validate_gpu_binding_common(
result: Dict[str, Any], label: str, selector_key: str = "oneapi_selector"
) -> int:
"""Validate basic GPU binding properties shared by single- and multi-GPU tests."""
gpu_ids = result.get("gpu_ids")
assert gpu_ids, f"No GPU IDs assigned for {label}."
primary_gpu_id = int(gpu_ids[0])
selector = result.get(selector_key)
assert selector, f"ONEAPI_DEVICE_SELECTOR not set in environment for {label}."
selector_lower = selector.lower()
assert (
"level_zero:" in selector_lower
), f"ONEAPI_DEVICE_SELECTOR should target GPU devices for {label}, got: {selector}."
selector_gpu_ids = {int(match) for match in re.findall(r"\b\d+\b", selector_lower)}
assert (
primary_gpu_id in selector_gpu_ids
), f"ONEAPI_DEVICE_SELECTOR does not reference bound GPU id for {label}: {selector}."
return primary_gpu_id
def assert_valid_multi_gpu_binding(
results: List[Dict[str, Any]], num_gpus: int, label: str
) -> None:
"""Assert that multiple GPU tasks bind to different GPUs correctly."""
assert (
len(results) == num_gpus
), f"Expected {num_gpus} results for {label}, got {len(results)}."
gpu_ids = []
for i, result in enumerate(results):
primary_gpu_id = _validate_gpu_binding_common(result, f"{label} instance {i}")
gpu_ids.append(primary_gpu_id)
assert (
len(set(gpu_ids)) == num_gpus
), f"Expected {label} to bind to {num_gpus} distinct GPUs, got bindings to GPU IDs: {gpu_ids}."
@pytest.mark.skipif(
_is_cluster_configured(),
reason="Environment setup for scale-out, skipping single-node test.",
)
def test_gpu_task_binding(ray_gpu_session) -> None:
_require_min_gpus(1, "single GPU task binding test")
ray_gpu_session(num_gpus=1)
task_result = ray.get(gpu_task.remote())
assert_valid_gpu_binding(task_result, "GPU task")
@pytest.mark.skipif(
_is_cluster_configured(),
reason="Environment setup for scale-out, skipping single-node test.",
)
@pytest.mark.parametrize(
"num_gpus", [DEFAULT_SCALE_UP_DEVICES]
) # To be extended to required configurations
def test_multi_gpu_task_binding(ray_gpu_session, num_gpus) -> None:
"""Test that multiple GPU tasks bind to different GPUs correctly."""
_require_min_gpus(num_gpus, "multi-GPU task binding test")
ray_gpu_session(num_gpus=num_gpus)
task_futures = [gpu_task.remote() for _ in range(num_gpus)]
task_results = ray.get(task_futures)
assert_valid_multi_gpu_binding(task_results, num_gpus, f"GPU tasks (n={num_gpus})")
@pytest.mark.skipif(
not _is_cluster_configured(), reason="Environment not setup for scale-out test."
)
@pytest.mark.parametrize(
"num_nodes", [DEFAULT_SCALE_OUT_NODES]
) # To be extended to required configurations
def test_scale_out_task_distribution(ray_gpu_session, num_nodes) -> None:
"""Ensure tasks can be scheduled across multiple nodes in the cluster."""
ray_gpu_session(address="auto")
_require_min_cluster_nodes(num_nodes, "scale-out task distribution test")
probe_handles = [
cluster_probe_task.options(scheduling_strategy="SPREAD").remote()
for _ in range(num_nodes)
]
probe_results = ray.get(probe_handles)
node_ids = {
result.get("node_id") for result in probe_results if result.get("node_id")
}
node_ips = {
result.get("node_ip") for result in probe_results if result.get("node_ip")
}
for result in probe_results:
_validate_gpu_binding_common(result, "scale-out probe task", "selector")
assert len(node_ids) == num_nodes or len(node_ips) == num_nodes, (
f"Expected probe tasks to execute on {num_nodes} distinct nodes, "
f"got node_ids={node_ids} node_ips={node_ips}."
)
gpu_capable_results = [result for result in probe_results if result.get("gpu_ids")]
assert (
len(gpu_capable_results) == num_nodes
), "Not all probe tasks reported GPU accelerator bindings in the cluster."
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/accelerators/test_intel_gpu_e2e.py",
"license": "Apache License 2.0",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/expressions/test_arithmetic.py | """Integration tests for arithmetic expression operations.
These tests require Ray and test end-to-end arithmetic expression evaluation.
"""
import math
import pandas as pd
import pytest
from packaging.version import parse as parse_version
import ray
from ray.data._internal.util import rows_same
from ray.data._internal.utils.arrow_utils import get_pyarrow_version
from ray.data.expressions import col, lit
from ray.data.tests.conftest import * # noqa
from ray.tests.conftest import * # noqa
pytestmark = pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="Expression integration tests require PyArrow >= 20.0.0",
)
class TestArithmeticIntegration:
"""Integration tests for arithmetic expressions with Ray Dataset."""
def test_arithmetic_with_dataset(self, ray_start_regular_shared):
"""Test arithmetic expressions work correctly with Ray Dataset."""
ds = ray.data.from_items(
[
{"price": 10.0, "quantity": 2},
{"price": 20.0, "quantity": 3},
{"price": 15.0, "quantity": 4},
]
)
result = ds.with_column("total", col("price") * col("quantity")).to_pandas()
expected = pd.DataFrame(
{
"price": [10.0, 20.0, 15.0],
"quantity": [2, 3, 4],
"total": [20.0, 60.0, 60.0],
}
)
assert rows_same(result, expected)
def test_chained_arithmetic_with_dataset(self, ray_start_regular_shared):
"""Test chained arithmetic expressions with Ray Dataset."""
ds = ray.data.from_items(
[
{"a": 10, "b": 5},
{"a": 20, "b": 3},
]
)
result = (
ds.with_column("sum", col("a") + col("b"))
.with_column("diff", col("a") - col("b"))
.with_column("product", col("a") * col("b"))
.to_pandas()
)
expected = pd.DataFrame(
{
"a": [10, 20],
"b": [5, 3],
"sum": [15, 23],
"diff": [5, 17],
"product": [50, 60],
}
)
assert rows_same(result, expected)
def test_floor_division_with_dataset(self, ray_start_regular_shared):
"""Test floor division operations with Ray Dataset."""
ds = ray.data.range(5)
result = ds.with_column("result", col("id") // 2).to_pandas()
expected = pd.DataFrame({"id": [0, 1, 2, 3, 4], "result": [0, 0, 1, 1, 2]})
assert rows_same(result, expected)
def test_literal_floor_division_with_dataset(self, ray_start_regular_shared):
"""Test literal floor division by expression with Ray Dataset."""
ds = ray.data.range(5)
result = ds.with_column("result", lit(10) // (col("id") + 2)).to_pandas()
expected = pd.DataFrame({"id": [0, 1, 2, 3, 4], "result": [5, 3, 2, 2, 1]})
assert rows_same(result, expected)
@pytest.mark.parametrize(
"expr_factory,expected_values",
[
pytest.param(lambda: col("value").ceil(), [-1, 0, 0, 1, 2], id="ceil"),
pytest.param(lambda: col("value").floor(), [-2, -1, 0, 0, 1], id="floor"),
pytest.param(lambda: col("value").round(), [-2, 0, 0, 0, 2], id="round"),
pytest.param(lambda: col("value").trunc(), [-1, 0, 0, 0, 1], id="trunc"),
],
)
def test_rounding_with_dataset(
self, ray_start_regular_shared, expr_factory, expected_values
):
"""Test rounding operations with Ray Dataset."""
values = [-1.75, -0.25, 0.0, 0.25, 1.75]
ds = ray.data.from_items([{"value": v} for v in values])
result = ds.with_column("result", expr_factory()).to_pandas()
expected = pd.DataFrame({"value": values, "result": expected_values})
assert rows_same(result, expected)
@pytest.mark.parametrize(
"expr_factory,expected_fn",
[
pytest.param(lambda: col("value").ln(), math.log, id="ln"),
pytest.param(lambda: col("value").log10(), math.log10, id="log10"),
pytest.param(lambda: col("value").log2(), math.log2, id="log2"),
pytest.param(lambda: col("value").exp(), math.exp, id="exp"),
],
)
def test_logarithmic_with_dataset(
self, ray_start_regular_shared, expr_factory, expected_fn
):
"""Test logarithmic operations with Ray Dataset."""
values = [1.0, math.e, 10.0, 4.0]
ds = ray.data.from_items([{"value": v} for v in values])
expected_values = [expected_fn(v) for v in values]
result = ds.with_column("result", expr_factory()).to_pandas()
expected = pd.DataFrame({"value": values, "result": expected_values})
assert rows_same(result, expected)
@pytest.mark.parametrize(
"expr_factory,expected_fn",
[
pytest.param(lambda: col("value").sin(), math.sin, id="sin"),
pytest.param(lambda: col("value").cos(), math.cos, id="cos"),
pytest.param(lambda: col("value").tan(), math.tan, id="tan"),
pytest.param(lambda: col("value").atan(), math.atan, id="atan"),
],
)
def test_trigonometric_with_dataset(
self, ray_start_regular_shared, expr_factory, expected_fn
):
"""Test trigonometric operations with Ray Dataset."""
values = [0.0, math.pi / 6, math.pi / 4, math.pi / 3]
ds = ray.data.from_items([{"value": v} for v in values])
expected_values = [expected_fn(v) for v in values]
result = ds.with_column("result", expr_factory()).to_pandas()
expected = pd.DataFrame({"value": values, "result": expected_values})
assert rows_same(result, expected)
@pytest.mark.parametrize(
"test_data,expr_factory,expected_results",
[
pytest.param(
[{"x": 5}, {"x": -3}, {"x": 0}],
lambda: col("x").negate(),
[-5, 3, 0],
id="negate",
),
pytest.param(
[{"x": 5}, {"x": -3}, {"x": 0}],
lambda: col("x").sign(),
[1, -1, 0],
id="sign",
),
pytest.param(
[{"x": 5}, {"x": -3}, {"x": 0}],
lambda: col("x").abs(),
[5, 3, 0],
id="abs",
),
pytest.param(
[{"x": 2}, {"x": 3}, {"x": 4}],
lambda: col("x").power(2),
[4, 9, 16],
id="power_int",
),
pytest.param(
[{"x": 4}, {"x": 9}, {"x": 16}],
lambda: col("x").power(0.5),
[2.0, 3.0, 4.0],
id="power_sqrt",
),
],
)
def test_arithmetic_helpers_with_dataset(
self, ray_start_regular_shared, test_data, expr_factory, expected_results
):
"""Test arithmetic helper operations with Ray Dataset."""
ds = ray.data.from_items(test_data)
result = ds.with_column("result", expr_factory()).to_pandas()
expected = pd.DataFrame(test_data)
expected["result"] = expected_results
assert rows_same(result, expected)
def test_age_group_calculation_with_dataset(self, ray_start_regular_shared):
"""Test floor division for grouping values (e.g., age into decades)."""
test_data = [
{"age": 25},
{"age": 17},
{"age": 30},
]
ds = ray.data.from_items(test_data)
result = ds.with_column("age_group", col("age") // 10 * 10).to_pandas()
expected = pd.DataFrame({"age": [25, 17, 30], "age_group": [20, 10, 30]})
assert rows_same(result, expected)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/expressions/test_arithmetic.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/expressions/test_boolean.py | """Integration tests for boolean/logical expression operations.
These tests require Ray and test end-to-end boolean expression evaluation.
"""
import pandas as pd
import pytest
from packaging.version import parse as parse_version
import ray
from ray.data._internal.utils.arrow_utils import get_pyarrow_version
from ray.data.expressions import col, lit
from ray.data.tests.conftest import * # noqa
from ray.tests.conftest import * # noqa
pytestmark = pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="Expression integration tests require PyArrow >= 20.0.0",
)
class TestBooleanIntegration:
"""Integration tests for boolean expressions with Ray Dataset."""
def test_boolean_filter_with_dataset(self, ray_start_regular_shared):
"""Test boolean expressions used for filtering with Ray Dataset."""
ds = ray.data.from_items(
[
{"age": 17, "is_student": True, "name": "Alice"},
{"age": 21, "is_student": True, "name": "Bob"},
{"age": 25, "is_student": False, "name": "Charlie"},
{"age": 30, "is_student": False, "name": "Diana"},
]
)
# Add boolean columns using expressions
result = (
ds.with_column("is_adult", col("age") >= 18)
.with_column("adult_student", (col("age") >= 18) & col("is_student"))
.with_column("minor_or_student", (col("age") < 18) | col("is_student"))
.to_pandas()
)
expected = pd.DataFrame(
{
"age": [17, 21, 25, 30],
"is_student": [True, True, False, False],
"name": ["Alice", "Bob", "Charlie", "Diana"],
"is_adult": [False, True, True, True],
"adult_student": [False, True, False, False],
"minor_or_student": [True, True, False, False],
}
)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
def test_complex_boolean_with_dataset(self, ray_start_regular_shared):
"""Test complex boolean expressions with Ray Dataset."""
ds = ray.data.from_items(
[
{"score": 85, "passed": True, "bonus": False},
{"score": 70, "passed": True, "bonus": True},
{"score": 45, "passed": False, "bonus": False},
]
)
# Complex: (score > 80) OR (passed AND bonus)
result = ds.with_column(
"eligible", (col("score") > 80) | (col("passed") & col("bonus"))
).to_pandas()
expected = pd.DataFrame(
{
"score": [85, 70, 45],
"passed": [True, True, False],
"bonus": [False, True, False],
"eligible": [True, True, False],
}
)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
def test_logical_not_with_dataset(self, ray_start_regular_shared):
"""Test logical NOT operation with Ray Dataset."""
ds = ray.data.range(5)
result = ds.with_column("result", ~(col("id") == 2)).to_pandas()
expected = pd.DataFrame(
{"id": [0, 1, 2, 3, 4], "result": [True, True, False, True, True]}
)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize(
"expression_factory,expected_results,test_id",
[
pytest.param(
lambda: (col("age") > 18) & (col("country") == "USA"),
[True, False, False],
"complex_and",
),
pytest.param(
lambda: (col("age") < 18) | (col("country") == "USA"),
[True, True, False],
"complex_or",
),
pytest.param(
lambda: ~((col("age") < 25) & (col("country") != "USA")),
[True, False, True],
"complex_not",
),
pytest.param(
lambda: (col("age") >= 21)
& (col("score") >= 10)
& col("active").is_not_null()
& (col("active") == lit(True)),
[True, False, False],
"eligibility_flag",
),
],
)
def test_complex_boolean_expressions_with_dataset(
self, ray_start_regular_shared, expression_factory, expected_results, test_id
):
"""Test complex boolean expressions with Ray Dataset."""
test_data = [
{"age": 25, "country": "USA", "active": True, "score": 20},
{"age": 17, "country": "Canada", "active": False, "score": 10},
{"age": 30, "country": "UK", "active": None, "score": 20},
]
ds = ray.data.from_items(test_data)
expression = expression_factory()
result = ds.with_column("result", expression).to_pandas()
expected = pd.DataFrame(
{
"age": [25, 17, 30],
"country": ["USA", "Canada", "UK"],
"active": [True, False, None],
"score": [20, 10, 20],
"result": expected_results,
}
)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/expressions/test_boolean.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/expressions/test_comparison.py | """Integration tests for comparison expression operations.
These tests require Ray and test end-to-end comparison expression evaluation.
"""
import pandas as pd
import pytest
from packaging.version import parse as parse_version
import ray
from ray.data._internal.utils.arrow_utils import get_pyarrow_version
from ray.data.expressions import col
from ray.data.tests.conftest import * # noqa
from ray.tests.conftest import * # noqa
pytestmark = pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="Expression integration tests require PyArrow >= 20.0.0",
)
class TestComparisonIntegration:
"""Integration tests for comparison expressions with Ray Dataset."""
def test_comparison_with_dataset(self, ray_start_regular_shared):
"""Test comparison expressions work correctly with Ray Dataset."""
ds = ray.data.from_items(
[
{"age": 17, "name": "Alice"},
{"age": 21, "name": "Bob"},
{"age": 25, "name": "Charlie"},
{"age": 18, "name": "Diana"},
]
)
result = ds.with_column("is_adult", col("age") >= 18).to_pandas()
expected = pd.DataFrame(
{
"age": [17, 21, 25, 18],
"name": ["Alice", "Bob", "Charlie", "Diana"],
"is_adult": [False, True, True, True],
}
)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
def test_multiple_comparisons_with_dataset(self, ray_start_regular_shared):
"""Test multiple comparison expressions with Ray Dataset."""
ds = ray.data.from_items(
[
{"score": 45, "passing": 50},
{"score": 75, "passing": 50},
{"score": 50, "passing": 50},
]
)
result = (
ds.with_column("passed", col("score") >= col("passing"))
.with_column("failed", col("score") < col("passing"))
.with_column("borderline", col("score") == col("passing"))
.to_pandas()
)
expected = pd.DataFrame(
{
"score": [45, 75, 50],
"passing": [50, 50, 50],
"passed": [False, True, True],
"failed": [True, False, False],
"borderline": [False, False, True],
}
)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/expressions/test_comparison.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/expressions/test_namespace_datetime.py | """Integration tests for datetime namespace expressions.
These tests require Ray and test end-to-end datetime namespace expression evaluation.
"""
import datetime
import pandas as pd
import pyarrow as pa
import pytest
from packaging import version
import ray
from ray.data._internal.util import rows_same
from ray.data.expressions import col
from ray.data.tests.conftest import * # noqa
from ray.tests.conftest import * # noqa
pytestmark = pytest.mark.skipif(
version.parse(pa.__version__) < version.parse("19.0.0"),
reason="Namespace expressions tests require PyArrow >= 19.0",
)
class TestDatetimeNamespace:
"""Tests for datetime namespace operations."""
def test_datetime_namespace_all_operations(self, ray_start_regular_shared):
"""Test all datetime namespace operations on a datetime column."""
ts = datetime.datetime(2024, 1, 2, 10, 30, 0)
ds = ray.data.from_items([{"ts": ts}])
result_ds = (
ds.with_column("year", col("ts").dt.year())
.with_column("month", col("ts").dt.month())
.with_column("day", col("ts").dt.day())
.with_column("hour", col("ts").dt.hour())
.with_column("minute", col("ts").dt.minute())
.with_column("second", col("ts").dt.second())
.with_column("date_str", col("ts").dt.strftime("%Y-%m-%d"))
.with_column("ts_floor", col("ts").dt.floor("day"))
.with_column("ts_ceil", col("ts").dt.ceil("day"))
.with_column("ts_round", col("ts").dt.round("day"))
.drop_columns(["ts"])
)
actual = result_ds.to_pandas()
expected = pd.DataFrame(
[
{
"year": 2024,
"month": 1,
"day": 2,
"hour": 10,
"minute": 30,
"second": 0,
"date_str": "2024-01-02",
"ts_floor": pd.Timestamp("2024-01-02"),
"ts_ceil": pd.Timestamp("2024-01-03"),
# round("day") rounds to nearest day; 10:30 < 12:00 so rounds down
"ts_round": pd.Timestamp("2024-01-02"),
}
]
)
assert rows_same(actual, expected)
def test_dt_namespace_invalid_dtype_raises(self, ray_start_regular_shared):
"""Test that dt namespace on non-datetime column raises an error."""
ds = ray.data.from_items([{"value": 1}])
with pytest.raises(Exception):
ds.with_column("year", col("value").dt.year()).to_pandas()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/expressions/test_namespace_datetime.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/expressions/test_namespace_list.py | """Integration tests for list namespace expressions.
These tests require Ray and test end-to-end list namespace expression evaluation.
"""
import pandas as pd
import pyarrow as pa
import pytest
from packaging import version
import ray
from ray.data._internal.util import rows_same
from ray.data.expressions import col
from ray.data.tests.conftest import * # noqa
from ray.exceptions import RayTaskError
from ray.tests.conftest import * # noqa
pytestmark = pytest.mark.skipif(
version.parse(pa.__version__) < version.parse("19.0.0"),
reason="Namespace expressions tests require PyArrow >= 19.0",
)
def _create_dataset(items_data, dataset_format, arrow_table=None):
if dataset_format == "arrow":
if arrow_table is not None:
ds = ray.data.from_arrow(arrow_table)
else:
table = pa.Table.from_pylist(items_data)
ds = ray.data.from_arrow(table)
elif dataset_format == "pandas":
if arrow_table is not None:
df = arrow_table.to_pandas()
else:
df = pd.DataFrame(items_data)
ds = ray.data.from_blocks([df])
return ds
DATASET_FORMATS = ["pandas", "arrow"]
@pytest.mark.parametrize("dataset_format", DATASET_FORMATS)
class TestListNamespace:
"""Tests for list namespace operations."""
def test_list_len(self, ray_start_regular_shared, dataset_format):
"""Test list.len() returns length of each list."""
data = [
{"items": [1, 2, 3]},
{"items": [4, 5]},
{"items": []},
]
ds = _create_dataset(data, dataset_format)
result = ds.with_column("len", col("items").list.len()).to_pandas()
expected = pd.DataFrame(
{
"items": [[1, 2, 3], [4, 5], []],
"len": [3, 2, 0],
}
)
assert rows_same(result, expected)
def test_list_get(self, ray_start_regular_shared, dataset_format):
"""Test list.get() extracts element at index."""
data = [
{"items": [10, 20, 30]},
{"items": [40, 50, 60]},
]
ds = _create_dataset(data, dataset_format)
result = ds.with_column("first", col("items").list.get(0)).to_pandas()
expected = pd.DataFrame(
{
"items": [[10, 20, 30], [40, 50, 60]],
"first": [10, 40],
}
)
assert rows_same(result, expected)
def test_list_bracket_index(self, ray_start_regular_shared, dataset_format):
"""Test list[i] bracket notation for element access."""
data = [{"items": [10, 20, 30]}]
ds = _create_dataset(data, dataset_format)
result = ds.with_column("elem", col("items").list[1]).to_pandas()
expected = pd.DataFrame(
{
"items": [[10, 20, 30]],
"elem": [20],
}
)
assert rows_same(result, expected)
def test_list_with_arithmetic(self, ray_start_regular_shared, dataset_format):
"""Test list operations combined with arithmetic."""
data = [{"items": [1, 2, 3]}]
ds = _create_dataset(data, dataset_format)
result = ds.with_column("len_plus_one", col("items").list.len() + 1).to_pandas()
expected = pd.DataFrame({"items": [[1, 2, 3]], "len_plus_one": [4]})
assert rows_same(result, expected)
def test_list_sort(self, ray_start_regular_shared, dataset_format):
"""Test list.sort() sorts each list with custom options."""
data = [
{"items": [3, 1, 2]},
{"items": [None, 4, 2]},
]
ds = _create_dataset(data, dataset_format)
method = col("items").list.sort(order="descending", null_placement="at_start")
result = ds.with_column("sorted", method).to_pandas()
expected = pd.DataFrame(
{
"items": [[3, 1, 2], [None, 4, 2]],
"sorted": [[3, 2, 1], [None, 4, 2]],
}
)
assert rows_same(result, expected)
def test_list_flatten(self, ray_start_regular_shared, dataset_format):
"""Test list.flatten() removes one nesting level."""
data = [
{"items": [[1, 2], [3]]},
{"items": [[], [4, 5]]},
]
ds = _create_dataset(data, dataset_format)
result = ds.with_column("flattened", col("items").list.flatten()).to_pandas()
expected = pd.DataFrame(
{
"items": [[[1, 2], [3]], [[], [4, 5]]],
"flattened": [[1, 2, 3], [4, 5]],
}
)
assert rows_same(result, expected)
def test_list_flatten_requires_nested_lists(
self, ray_start_regular_shared, dataset_format
):
"""list.flatten() should raise if elements aren't lists."""
data = [{"items": [1, 2]}, {"items": [3, 4]}]
ds = _create_dataset(data, dataset_format)
with pytest.raises(RayTaskError):
ds.with_column("flattened", col("items").list.flatten()).materialize()
def test_list_flatten_large_list_type(
self, ray_start_regular_shared, dataset_format
):
"""Flatten should preserve LargeList type when present."""
if dataset_format != "arrow":
pytest.skip("LargeList type only available via Arrow tables.")
arrow_type = pa.large_list(pa.list_(pa.int64()))
table = pa.Table.from_arrays(
[
pa.array(
[
[[1, 2], [3]],
[[], [4, 5]],
],
type=arrow_type,
)
],
names=["items"],
)
ds = _create_dataset(None, dataset_format, arrow_table=table)
result = ds.with_column("flattened", col("items").list.flatten())
arrow_refs = result.to_arrow_refs()
tables = ray.get(arrow_refs)
result_table = pa.concat_tables(tables) if len(tables) > 1 else tables[0]
flattened_type = result_table.schema.field("flattened").type
assert flattened_type == pa.large_list(pa.int64())
expected = pa.Table.from_arrays(
[
pa.array([[1, 2, 3], [4, 5]], type=pa.large_list(pa.int64())),
],
names=["flattened"],
)
assert result_table.select(["flattened"]).combine_chunks().equals(expected)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/expressions/test_namespace_list.py",
"license": "Apache License 2.0",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/expressions/test_namespace_string.py | """Integration tests for string namespace expressions.
These tests require Ray and test end-to-end string namespace expression evaluation.
"""
import pandas as pd
import pyarrow as pa
import pytest
from packaging import version
import ray
from ray.data._internal.util import rows_same
from ray.data.expressions import col
from ray.data.tests.conftest import * # noqa
from ray.tests.conftest import * # noqa
pytestmark = pytest.mark.skipif(
version.parse(pa.__version__) < version.parse("19.0.0"),
reason="Namespace expressions tests require PyArrow >= 19.0",
)
def _create_dataset(items_data, dataset_format, arrow_table=None):
if dataset_format == "arrow":
if arrow_table is not None:
ds = ray.data.from_arrow(arrow_table)
else:
table = pa.Table.from_pylist(items_data)
ds = ray.data.from_arrow(table)
elif dataset_format == "pandas":
if arrow_table is not None:
df = arrow_table.to_pandas()
else:
df = pd.DataFrame(items_data)
ds = ray.data.from_blocks([df])
return ds
DATASET_FORMATS = ["pandas", "arrow"]
@pytest.mark.parametrize("dataset_format", DATASET_FORMATS)
@pytest.mark.parametrize(
"method_name,input_values,expected_results",
[
("len", ["Alice", "Bob"], [5, 3]),
("byte_len", ["ABC"], [3]),
],
)
class TestStringLength:
"""Tests for string length operations."""
def test_string_length(
self,
ray_start_regular_shared,
dataset_format,
method_name,
input_values,
expected_results,
):
"""Test string length methods."""
data = [{"name": v} for v in input_values]
ds = _create_dataset(data, dataset_format)
method = getattr(col("name").str, method_name)
result = ds.with_column("result", method()).to_pandas()
expected = pd.DataFrame({"name": input_values, "result": expected_results})
assert rows_same(result, expected)
@pytest.mark.parametrize("dataset_format", DATASET_FORMATS)
@pytest.mark.parametrize(
"method_name,input_values,expected_values",
[
("upper", ["alice", "bob"], ["ALICE", "BOB"]),
("lower", ["ALICE", "BOB"], ["alice", "bob"]),
("capitalize", ["alice", "bob"], ["Alice", "Bob"]),
("title", ["alice smith", "bob jones"], ["Alice Smith", "Bob Jones"]),
("swapcase", ["AlIcE"], ["aLiCe"]),
],
)
class TestStringCase:
"""Tests for string case conversion."""
def test_string_case(
self,
ray_start_regular_shared,
dataset_format,
method_name,
input_values,
expected_values,
):
"""Test string case conversion methods."""
data = [{"name": v} for v in input_values]
ds = _create_dataset(data, dataset_format)
method = getattr(col("name").str, method_name)
result = ds.with_column("result", method()).to_pandas()
expected = pd.DataFrame({"name": input_values, "result": expected_values})
assert rows_same(result, expected)
@pytest.mark.parametrize("dataset_format", DATASET_FORMATS)
@pytest.mark.parametrize(
"method_name,input_values,expected_results",
[
("is_alpha", ["abc", "abc123", "123"], [True, False, False]),
("is_alnum", ["abc123", "abc-123"], [True, False]),
("is_digit", ["123", "12a"], [True, False]),
("is_space", [" ", " a "], [True, False]),
("is_lower", ["abc", "Abc"], [True, False]),
("is_upper", ["ABC", "Abc"], [True, False]),
("is_ascii", ["hello", "hello😊"], [True, False]),
],
)
class TestStringPredicates:
"""Tests for string predicate methods (is_*)."""
def test_string_predicate(
self,
ray_start_regular_shared,
dataset_format,
method_name,
input_values,
expected_results,
):
"""Test string predicate methods."""
data = [{"val": v} for v in input_values]
ds = _create_dataset(data, dataset_format)
method = getattr(col("val").str, method_name)
result = ds.with_column("result", method()).to_pandas()
expected = pd.DataFrame({"val": input_values, "result": expected_results})
assert rows_same(result, expected)
@pytest.mark.parametrize("dataset_format", DATASET_FORMATS)
@pytest.mark.parametrize(
"method_name,method_args,input_values,expected_values",
[
("strip", (), [" hello ", " world "], ["hello", "world"]),
("strip", ("x",), ["xxxhelloxxx"], ["hello"]),
("lstrip", (), [" hello "], ["hello "]),
("rstrip", (), [" hello "], [" hello"]),
],
)
class TestStringTrimming:
"""Tests for string trimming operations."""
def test_string_trimming(
self,
ray_start_regular_shared,
dataset_format,
method_name,
method_args,
input_values,
expected_values,
):
"""Test string trimming methods."""
data = [{"val": v} for v in input_values]
ds = _create_dataset(data, dataset_format)
method = getattr(col("val").str, method_name)
result = ds.with_column("result", method(*method_args)).to_pandas()
expected = pd.DataFrame({"val": input_values, "result": expected_values})
assert rows_same(result, expected)
@pytest.mark.parametrize("dataset_format", DATASET_FORMATS)
@pytest.mark.parametrize(
"method_name,method_kwargs,expected_value",
[
("pad", {"width": 5, "fillchar": "*", "side": "right"}, "hi***"),
("pad", {"width": 5, "fillchar": "*", "side": "left"}, "***hi"),
("pad", {"width": 6, "fillchar": "*", "side": "both"}, "**hi**"),
("lpad", {"width": 5, "padding": "*"}, "***hi"),
("rpad", {"width": 5, "padding": "*"}, "hi***"),
("center", {"width": 6, "padding": "*"}, "**hi**"),
],
)
class TestStringPadding:
"""Tests for string padding operations."""
def test_string_padding(
self,
ray_start_regular_shared,
dataset_format,
method_name,
method_kwargs,
expected_value,
):
"""Test string padding methods."""
data = [{"val": "hi"}]
ds = _create_dataset(data, dataset_format)
method = getattr(col("val").str, method_name)
result = ds.with_column("result", method(**method_kwargs)).to_pandas()
expected = pd.DataFrame({"val": ["hi"], "result": [expected_value]})
assert rows_same(result, expected)
@pytest.mark.parametrize("dataset_format", DATASET_FORMATS)
@pytest.mark.parametrize(
"method_name,method_args,method_kwargs,input_values,expected_results",
[
("starts_with", ("A",), {}, ["Alice", "Bob", "Alex"], [True, False, True]),
("starts_with", ("A",), {"ignore_case": True}, ["alice", "bob"], [True, False]),
("ends_with", ("e",), {}, ["Alice", "Bob"], [True, False]),
("contains", ("li",), {}, ["Alice", "Bob", "Charlie"], [True, False, True]),
("find", ("i",), {}, ["Alice", "Bob"], [2, -1]),
("count", ("a",), {}, ["banana", "apple"], [3, 1]),
("match", ("Al%",), {}, ["Alice", "Bob", "Alex"], [True, False, True]),
],
)
class TestStringSearch:
"""Tests for string searching operations."""
def test_string_search(
self,
ray_start_regular_shared,
dataset_format,
method_name,
method_args,
method_kwargs,
input_values,
expected_results,
):
"""Test string searching methods."""
data = [{"val": v} for v in input_values]
ds = _create_dataset(data, dataset_format)
method = getattr(col("val").str, method_name)
result = ds.with_column(
"result", method(*method_args, **method_kwargs)
).to_pandas()
expected = pd.DataFrame({"val": input_values, "result": expected_results})
assert rows_same(result, expected)
@pytest.mark.parametrize("dataset_format", DATASET_FORMATS)
class TestStringTransform:
"""Tests for string transformation operations."""
def test_reverse(self, ray_start_regular_shared, dataset_format):
"""Test str.reverse() reverses strings."""
data = [{"val": "hello"}, {"val": "world"}]
ds = _create_dataset(data, dataset_format)
result = ds.with_column("rev", col("val").str.reverse()).to_pandas()
expected = pd.DataFrame({"val": ["hello", "world"], "rev": ["olleh", "dlrow"]})
assert rows_same(result, expected)
def test_slice(self, ray_start_regular_shared, dataset_format):
"""Test str.slice() extracts substring."""
data = [{"val": "hello"}]
ds = _create_dataset(data, dataset_format)
result = ds.with_column("sliced", col("val").str.slice(1, 4)).to_pandas()
expected = pd.DataFrame({"val": ["hello"], "sliced": ["ell"]})
assert rows_same(result, expected)
def test_replace(self, ray_start_regular_shared, dataset_format):
"""Test str.replace() replaces substring."""
data = [{"val": "hello world"}]
ds = _create_dataset(data, dataset_format)
result = ds.with_column(
"replaced", col("val").str.replace("world", "universe")
).to_pandas()
expected = pd.DataFrame(
{"val": ["hello world"], "replaced": ["hello universe"]}
)
assert rows_same(result, expected)
def test_replace_with_max(self, ray_start_regular_shared, dataset_format):
"""Test str.replace() with max_replacements."""
data = [{"val": "aaa"}]
ds = _create_dataset(data, dataset_format)
result = ds.with_column(
"replaced", col("val").str.replace("a", "X", max_replacements=2)
).to_pandas()
expected = pd.DataFrame({"val": ["aaa"], "replaced": ["XXa"]})
assert rows_same(result, expected)
def test_repeat(self, ray_start_regular_shared, dataset_format):
"""Test str.repeat() repeats strings."""
data = [{"val": "A"}]
ds = _create_dataset(data, dataset_format)
result = ds.with_column("repeated", col("val").str.repeat(3)).to_pandas()
expected = pd.DataFrame({"val": ["A"], "repeated": ["AAA"]})
assert rows_same(result, expected)
def test_string_with_comparison(self, ray_start_regular_shared, dataset_format):
"""Test string operations combined with comparison."""
data = [{"name": "Alice"}, {"name": "Bo"}]
ds = _create_dataset(data, dataset_format)
result = ds.with_column("long_name", col("name").str.len() > 3).to_pandas()
expected = pd.DataFrame({"name": ["Alice", "Bo"], "long_name": [True, False]})
assert rows_same(result, expected)
def test_multiple_string_operations(self, ray_start_regular_shared, dataset_format):
"""Test multiple namespace operations in single pipeline."""
data = [{"name": "alice"}]
ds = _create_dataset(data, dataset_format)
result = (
ds.with_column("upper", col("name").str.upper())
.with_column("len", col("name").str.len())
.with_column("starts_a", col("name").str.starts_with("a"))
.to_pandas()
)
expected = pd.DataFrame(
{
"name": ["alice"],
"upper": ["ALICE"],
"len": [5],
"starts_a": [True],
}
)
assert rows_same(result, expected)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/expressions/test_namespace_string.py",
"license": "Apache License 2.0",
"lines": 278,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/expressions/test_namespace_struct.py | """Integration tests for struct namespace expressions.
These tests require Ray and test end-to-end struct namespace expression evaluation.
"""
import pandas as pd
import pyarrow as pa
import pytest
from packaging import version
import ray
from ray.data._internal.util import rows_same
from ray.data.expressions import col
from ray.data.tests.conftest import * # noqa
from ray.tests.conftest import * # noqa
pytestmark = pytest.mark.skipif(
version.parse(pa.__version__) < version.parse("19.0.0"),
reason="Namespace expressions tests require PyArrow >= 19.0",
)
def _create_dataset(items_data, dataset_format, arrow_table=None):
if dataset_format == "arrow":
if arrow_table is not None:
ds = ray.data.from_arrow(arrow_table)
else:
table = pa.Table.from_pylist(items_data)
ds = ray.data.from_arrow(table)
elif dataset_format == "pandas":
if arrow_table is not None:
df = arrow_table.to_pandas()
else:
df = pd.DataFrame(items_data)
ds = ray.data.from_blocks([df])
return ds
DATASET_FORMATS = ["pandas", "arrow"]
@pytest.mark.parametrize("dataset_format", DATASET_FORMATS)
class TestStructNamespace:
"""Tests for struct namespace operations."""
def test_struct_field(self, ray_start_regular_shared, dataset_format):
"""Test struct.field() extracts field."""
arrow_table = pa.table(
{
"user": pa.array(
[
{"name": "Alice", "age": 30},
{"name": "Bob", "age": 25},
],
type=pa.struct(
[
pa.field("name", pa.string()),
pa.field("age", pa.int32()),
]
),
)
}
)
items_data = [
{"user": {"name": "Alice", "age": 30}},
{"user": {"name": "Bob", "age": 25}},
]
ds = _create_dataset(items_data, dataset_format, arrow_table)
result = ds.with_column("age", col("user").struct.field("age")).to_pandas()
expected = pd.DataFrame(
{
"user": [{"name": "Alice", "age": 30}, {"name": "Bob", "age": 25}],
"age": [30, 25],
}
)
assert rows_same(result, expected)
def test_struct_bracket(self, ray_start_regular_shared, dataset_format):
"""Test struct['field'] bracket notation."""
arrow_table = pa.table(
{
"user": pa.array(
[
{"name": "Alice", "age": 30},
{"name": "Bob", "age": 25},
],
type=pa.struct(
[
pa.field("name", pa.string()),
pa.field("age", pa.int32()),
]
),
)
}
)
items_data = [
{"user": {"name": "Alice", "age": 30}},
{"user": {"name": "Bob", "age": 25}},
]
ds = _create_dataset(items_data, dataset_format, arrow_table)
result = ds.with_column("name", col("user").struct["name"]).to_pandas()
expected = pd.DataFrame(
{
"user": [{"name": "Alice", "age": 30}, {"name": "Bob", "age": 25}],
"name": ["Alice", "Bob"],
}
)
assert rows_same(result, expected)
def test_struct_nested_field(self, ray_start_regular_shared, dataset_format):
"""Test nested struct field access with .field()."""
arrow_table = pa.table(
{
"user": pa.array(
[
{"name": "Alice", "address": {"city": "NYC", "zip": "10001"}},
{"name": "Bob", "address": {"city": "LA", "zip": "90001"}},
],
type=pa.struct(
[
pa.field("name", pa.string()),
pa.field(
"address",
pa.struct(
[
pa.field("city", pa.string()),
pa.field("zip", pa.string()),
]
),
),
]
),
)
}
)
items_data = [
{"user": {"name": "Alice", "address": {"city": "NYC", "zip": "10001"}}},
{"user": {"name": "Bob", "address": {"city": "LA", "zip": "90001"}}},
]
ds = _create_dataset(items_data, dataset_format, arrow_table)
result = ds.with_column(
"city", col("user").struct.field("address").struct.field("city")
).to_pandas()
expected = pd.DataFrame(
{
"user": [
{"name": "Alice", "address": {"city": "NYC", "zip": "10001"}},
{"name": "Bob", "address": {"city": "LA", "zip": "90001"}},
],
"city": ["NYC", "LA"],
}
)
assert rows_same(result, expected)
def test_struct_nested_bracket(self, ray_start_regular_shared, dataset_format):
"""Test nested struct field access with brackets."""
arrow_table = pa.table(
{
"user": pa.array(
[
{"name": "Alice", "address": {"city": "NYC", "zip": "10001"}},
{"name": "Bob", "address": {"city": "LA", "zip": "90001"}},
],
type=pa.struct(
[
pa.field("name", pa.string()),
pa.field(
"address",
pa.struct(
[
pa.field("city", pa.string()),
pa.field("zip", pa.string()),
]
),
),
]
),
)
}
)
items_data = [
{"user": {"name": "Alice", "address": {"city": "NYC", "zip": "10001"}}},
{"user": {"name": "Bob", "address": {"city": "LA", "zip": "90001"}}},
]
ds = _create_dataset(items_data, dataset_format, arrow_table)
result = ds.with_column(
"zip", col("user").struct["address"].struct["zip"]
).to_pandas()
expected = pd.DataFrame(
{
"user": [
{"name": "Alice", "address": {"city": "NYC", "zip": "10001"}},
{"name": "Bob", "address": {"city": "LA", "zip": "90001"}},
],
"zip": ["10001", "90001"],
}
)
assert rows_same(result, expected)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/expressions/test_namespace_struct.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/expressions/test_predicate.py | """Integration tests for predicate expression operations.
These tests require Ray and test end-to-end predicate expression evaluation.
"""
import pandas as pd
import pytest
from packaging.version import parse as parse_version
import ray
from ray.data._internal.util import rows_same
from ray.data._internal.utils.arrow_utils import get_pyarrow_version
from ray.data.expressions import col
from ray.data.tests.conftest import * # noqa
from ray.tests.conftest import * # noqa
pytestmark = pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="Expression integration tests require PyArrow >= 20.0.0",
)
class TestPredicateIntegration:
"""Integration tests for predicate expressions with Ray Dataset."""
def test_null_predicates_with_dataset(self, ray_start_regular_shared):
"""Test null predicate expressions with Ray Dataset."""
ds = ray.data.from_items(
[
{"value": 10, "name": "Alice"},
{"value": None, "name": "Bob"},
{"value": 30, "name": None},
{"value": None, "name": None},
]
)
result = (
ds.with_column("value_is_null", col("value").is_null())
.with_column("name_not_null", col("name").is_not_null())
.with_column(
"both_present", col("value").is_not_null() & col("name").is_not_null()
)
.to_pandas()
)
expected = pd.DataFrame(
{
"value": [10, None, 30, None],
"name": ["Alice", "Bob", None, None],
"value_is_null": [False, True, False, True],
"name_not_null": [True, True, False, False],
"both_present": [True, False, False, False],
}
)
assert rows_same(result, expected)
def test_membership_predicates_with_dataset(self, ray_start_regular_shared):
"""Test membership predicate expressions with Ray Dataset."""
ds = ray.data.from_items(
[
{"status": "active", "category": "A"},
{"status": "inactive", "category": "B"},
{"status": "pending", "category": "A"},
{"status": "deleted", "category": "C"},
]
)
result = (
ds.with_column(
"is_valid_status", col("status").is_in(["active", "pending"])
)
.with_column("not_deleted", col("status").not_in(["deleted"]))
.with_column("category_a", col("category").is_in(["A"]))
.to_pandas()
)
expected = pd.DataFrame(
{
"status": ["active", "inactive", "pending", "deleted"],
"category": ["A", "B", "A", "C"],
"is_valid_status": [True, False, True, False],
"not_deleted": [True, True, True, False],
"category_a": [True, False, True, False],
}
)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize(
"test_data,expression,expected_results,test_id",
[
pytest.param(
[{"value": 1}, {"value": None}, {"value": 3}],
col("value").is_null(),
[False, True, False],
"is_null_with_actual_nulls",
),
pytest.param(
[{"value": 1}, {"value": None}, {"value": 3}],
col("value").is_not_null(),
[True, False, True],
"is_not_null_with_actual_nulls",
),
pytest.param(
[{"value": 1}, {"value": 2}, {"value": 3}],
col("value").is_in([1, 3]),
[True, False, True],
"isin_operation",
),
pytest.param(
[{"value": 1}, {"value": 2}, {"value": 3}],
col("value").not_in([1, 3]),
[False, True, False],
"not_in_operation",
),
pytest.param(
[{"name": "Alice"}, {"name": "Bob"}, {"name": "Charlie"}],
col("name") == "Bob",
[False, True, False],
"string_equality",
),
pytest.param(
[{"name": "Alice"}, {"name": "Bob"}, {"name": "Charlie"}],
col("name") != "Bob",
[True, False, True],
"string_not_equal",
),
pytest.param(
[{"name": "included"}, {"name": "excluded"}, {"name": None}],
col("name").is_not_null() & (col("name") != "excluded"),
[True, False, False],
"string_filter",
),
],
)
def test_null_and_membership_with_dataset(
self, ray_start_regular_shared, test_data, expression, expected_results, test_id
):
"""Test null checking and membership operations with Ray Dataset."""
ds = ray.data.from_items(test_data)
result = ds.with_column("result", expression).to_pandas()
expected_data = {}
for key in test_data[0].keys():
expected_data[key] = [row[key] for row in test_data]
expected_data["result"] = expected_results
expected = pd.DataFrame(expected_data)
assert rows_same(result, expected)
@pytest.mark.parametrize(
"filter_expr,test_data,expected_flags,test_id",
[
pytest.param(
col("age") >= 21,
[
{"age": 20, "name": "Alice"},
{"age": 21, "name": "Bob"},
{"age": 25, "name": "Charlie"},
],
[False, True, True],
"age_filter",
),
pytest.param(
col("score") > 50,
[
{"score": 30, "status": "fail"},
{"score": 50, "status": "pass"},
{"score": 70, "status": "pass"},
],
[False, False, True],
"score_filter",
),
pytest.param(
(col("age") >= 18) & col("active"),
[
{"age": 17, "active": True},
{"age": 18, "active": False},
{"age": 25, "active": True},
],
[False, False, True],
"complex_and_filter",
),
pytest.param(
(col("status") == "approved") | (col("priority") == "high"),
[
{"status": "pending", "priority": "low"},
{"status": "approved", "priority": "low"},
{"status": "pending", "priority": "high"},
],
[False, True, True],
"complex_or_filter",
),
pytest.param(
col("value").is_not_null() & (col("value") > 0),
[{"value": None}, {"value": -5}, {"value": 10}],
[False, False, True],
"null_aware_filter",
),
pytest.param(
col("name").is_not_null() & (col("name") != "excluded"),
[{"name": "included"}, {"name": "excluded"}, {"name": None}],
[True, False, False],
"string_filter",
),
pytest.param(
col("category").is_in(["A", "B"]),
[
{"category": "A"},
{"category": "B"},
{"category": "C"},
{"category": "D"},
],
[True, True, False, False],
"membership_filter",
),
pytest.param(
(col("score") >= 50) & (col("grade") != "F"),
[
{"score": 45, "grade": "F"},
{"score": 55, "grade": "D"},
{"score": 75, "grade": "B"},
{"score": 30, "grade": "F"},
],
[False, True, True, False],
"nested_filters",
),
],
)
def test_filter_expressions_with_dataset(
self, ray_start_regular_shared, filter_expr, test_data, expected_flags, test_id
):
"""Test filter expressions with Ray Dataset."""
ds = ray.data.from_items(test_data)
result = ds.with_column("is_filtered", filter_expr).to_pandas()
expected = pd.DataFrame(test_data)
expected["is_filtered"] = expected_flags
assert rows_same(result, expected)
def test_filter_in_pipeline_with_dataset(self, ray_start_regular_shared):
"""Test filter expressions in a data processing pipeline."""
test_data = [
{"product": "A", "quantity": 10, "price": 100, "region": "North"},
{"product": "B", "quantity": 5, "price": 200, "region": "South"},
{"product": "C", "quantity": 20, "price": 50, "region": "North"},
{"product": "D", "quantity": 15, "price": 75, "region": "East"},
{"product": "E", "quantity": 3, "price": 300, "region": "West"},
]
ds = ray.data.from_items(test_data)
result = (
ds.with_column("revenue", col("quantity") * col("price"))
.with_column("is_high_value", col("revenue") >= 1000)
.with_column("is_bulk_order", col("quantity") >= 10)
.with_column("is_premium", col("price") >= 100)
.with_column(
"needs_special_handling",
(col("is_high_value")) | (col("is_bulk_order") & col("is_premium")),
)
.with_column("is_north_region", col("region") == "North")
.to_pandas()
)
expected = pd.DataFrame(
{
"product": ["A", "B", "C", "D", "E"],
"quantity": [10, 5, 20, 15, 3],
"price": [100, 200, 50, 75, 300],
"region": ["North", "South", "North", "East", "West"],
"revenue": [1000, 1000, 1000, 1125, 900],
"is_high_value": [True, True, True, True, False],
"is_bulk_order": [True, False, True, True, False],
"is_premium": [True, True, False, False, True],
"needs_special_handling": [True, True, True, True, False],
"is_north_region": [True, False, True, False, False],
}
)
assert rows_same(result, expected)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/expressions/test_predicate.py",
"license": "Apache License 2.0",
"lines": 261,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/unit/expressions/test_arithmetic.py | """Tests for arithmetic expression operations.
This module tests:
- Basic arithmetic: ADD, SUB, MUL, DIV, FLOORDIV
- Reverse arithmetic: radd, rsub, rmul, rtruediv, rfloordiv
- Rounding helpers: ceil, floor, round, trunc
- Logarithmic helpers: ln, log10, log2, exp
- Trigonometric helpers: sin, cos, tan, asin, acos, atan
- Arithmetic helpers: negate, sign, power, abs
"""
import math
import pandas as pd
import pyarrow as pa
import pytest
from pkg_resources import parse_version
from ray.data._internal.planner.plan_expression.expression_evaluator import eval_expr
from ray.data.expressions import BinaryExpr, Operation, UDFExpr, col, lit
from ray.data.tests.conftest import get_pyarrow_version
pytestmark = pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="Expression unit tests require PyArrow >= 20.0.0",
)
# ──────────────────────────────────────
# Basic Arithmetic Operations
# ──────────────────────────────────────
class TestBasicArithmetic:
"""Tests for basic arithmetic operations (+, -, *, /, //)."""
@pytest.fixture
def sample_data(self):
"""Sample data for arithmetic tests."""
return pd.DataFrame(
{
"a": [10, 20, 30, 40],
"b": [2, 4, 5, 8],
"c": [1.5, 2.5, 3.5, 4.5],
}
)
# ── Addition ──
@pytest.mark.parametrize(
"expr,expected_name,expected_values",
[
(col("a") + 5, "add_literal", [15, 25, 35, 45]),
(col("a") + col("b"), "add_cols", [12, 24, 35, 48]),
(col("a") + lit(10), "add_lit", [20, 30, 40, 50]),
],
ids=["col_plus_int", "col_plus_col", "col_plus_lit"],
)
def test_addition(self, sample_data, expr, expected_name, expected_values):
"""Test addition operations."""
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.ADD
result = eval_expr(expr, sample_data)
pd.testing.assert_series_equal(
result.reset_index(drop=True),
pd.Series(expected_values, name=None),
check_names=False,
)
def test_reverse_addition(self, sample_data):
"""Test reverse addition (literal + expr)."""
expr = 5 + col("a")
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.ADD
result = eval_expr(expr, sample_data)
expected = pd.Series([15, 25, 35, 45])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_string_concat_invalid_input_type(self):
"""Reject non-string-like inputs in string concatenation."""
table = pa.table({"name": ["a", "b"], "age": [1, 2]})
expr = col("name") + col("age")
with pytest.raises(TypeError, match="string-like pyarrow.*int64"):
eval_expr(expr, table)
# ── Subtraction ──
@pytest.mark.parametrize(
"expr,expected_values",
[
(col("a") - 5, [5, 15, 25, 35]),
(col("a") - col("b"), [8, 16, 25, 32]),
],
ids=["col_minus_int", "col_minus_col"],
)
def test_subtraction(self, sample_data, expr, expected_values):
"""Test subtraction operations."""
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.SUB
result = eval_expr(expr, sample_data)
pd.testing.assert_series_equal(
result.reset_index(drop=True),
pd.Series(expected_values, name=None),
check_names=False,
)
def test_reverse_subtraction(self, sample_data):
"""Test reverse subtraction (literal - expr)."""
expr = 100 - col("a")
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.SUB
result = eval_expr(expr, sample_data)
expected = pd.Series([90, 80, 70, 60])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
# ── Multiplication ──
@pytest.mark.parametrize(
"expr,expected_values",
[
(col("a") * 2, [20, 40, 60, 80]),
(col("a") * col("b"), [20, 80, 150, 320]),
],
ids=["col_times_int", "col_times_col"],
)
def test_multiplication(self, sample_data, expr, expected_values):
"""Test multiplication operations."""
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.MUL
result = eval_expr(expr, sample_data)
pd.testing.assert_series_equal(
result.reset_index(drop=True),
pd.Series(expected_values, name=None),
check_names=False,
)
def test_reverse_multiplication(self, sample_data):
"""Test reverse multiplication (literal * expr)."""
expr = 3 * col("b")
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.MUL
result = eval_expr(expr, sample_data)
expected = pd.Series([6, 12, 15, 24])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
# ── Division ──
@pytest.mark.parametrize(
"expr,expected_values",
[
(col("a") / 2, [5.0, 10.0, 15.0, 20.0]),
(col("a") / col("b"), [5.0, 5.0, 6.0, 5.0]),
],
ids=["col_div_int", "col_div_col"],
)
def test_division(self, sample_data, expr, expected_values):
"""Test division operations."""
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.DIV
result = eval_expr(expr, sample_data)
pd.testing.assert_series_equal(
result.reset_index(drop=True),
pd.Series(expected_values, name=None),
check_names=False,
)
def test_reverse_division(self, sample_data):
"""Test reverse division (literal / expr)."""
expr = 100 / col("a")
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.DIV
result = eval_expr(expr, sample_data)
expected = pd.Series([10.0, 5.0, 100 / 30, 2.5])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
# ── Floor Division ──
@pytest.mark.parametrize(
"expr,expected_values",
[
(col("a") // 3, [3, 6, 10, 13]),
(col("a") // col("b"), [5, 5, 6, 5]),
],
ids=["col_floordiv_int", "col_floordiv_col"],
)
def test_floor_division(self, sample_data, expr, expected_values):
"""Test floor division operations."""
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.FLOORDIV
result = eval_expr(expr, sample_data)
pd.testing.assert_series_equal(
result.reset_index(drop=True),
pd.Series(expected_values, name=None),
check_names=False,
)
def test_reverse_floor_division(self, sample_data):
"""Test reverse floor division (literal // expr)."""
expr = 100 // col("a")
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.FLOORDIV
result = eval_expr(expr, sample_data)
expected = pd.Series([10, 5, 3, 2])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
# ── Modulo ──
@pytest.mark.parametrize(
"expr,expected_values",
[
(col("a") % 3, [1, 2, 0, 1]),
(col("a") % col("c"), [1.0, 0.0, 2.0, 4.0]),
(10 % col("b"), [0, 2, 0, 2]),
],
ids=["col_mod_int", "col_mod_fp", "col_rmod_int"],
)
def test_modulo(self, sample_data, expr, expected_values):
"""Test modulo operations."""
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.MOD
result = eval_expr(expr, sample_data)
pd.testing.assert_series_equal(
result.reset_index(drop=True),
pd.Series(expected_values, name=None),
check_names=False,
)
# ──────────────────────────────────────
# Complex Arithmetic Expressions
# ──────────────────────────────────────
class TestComplexArithmetic:
"""Tests for complex arithmetic expressions with multiple operations."""
@pytest.fixture
def sample_data(self):
"""Sample data for complex arithmetic tests."""
return pd.DataFrame(
{
"x": [1.0, 2.0, 3.0, 4.0],
"y": [4.0, 3.0, 2.0, 1.0],
"z": [2.0, 2.0, 2.0, 2.0],
}
)
def test_chained_operations(self, sample_data):
"""Test chained arithmetic operations."""
expr = (col("x") + col("y")) * col("z")
result = eval_expr(expr, sample_data)
expected = pd.Series([10.0, 10.0, 10.0, 10.0])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_nested_operations(self, sample_data):
"""Test nested arithmetic operations."""
expr = ((col("x") * 2) + (col("y") / 2)) - 1
result = eval_expr(expr, sample_data)
expected = pd.Series([3.0, 4.5, 6.0, 7.5])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_order_of_operations(self, sample_data):
"""Test that order of operations is respected."""
# Should compute x + (y * z) due to operator precedence
expr = col("x") + col("y") * col("z")
result = eval_expr(expr, sample_data)
expected = pd.Series([9.0, 8.0, 7.0, 6.0]) # 1+8, 2+6, 3+4, 4+2
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
# ──────────────────────────────────────
# Rounding Operations
# ──────────────────────────────────────
class TestRoundingOperations:
"""Tests for rounding helper methods."""
@pytest.fixture
def sample_data(self):
"""Sample data with decimal values for rounding tests."""
return pd.DataFrame(
{
"value": [1.2, 2.5, 3.7, -1.3, -2.5, -3.8],
}
)
@pytest.mark.parametrize(
"method,expected_values",
[
("ceil", [2, 3, 4, -1, -2, -3]),
("floor", [1, 2, 3, -2, -3, -4]),
("trunc", [1, 2, 3, -1, -2, -3]),
],
ids=["ceil", "floor", "trunc"],
)
def test_rounding_methods(self, sample_data, method, expected_values):
"""Test rounding methods (ceil, floor, trunc)."""
expr = getattr(col("value"), method)()
assert isinstance(expr, UDFExpr)
result = eval_expr(expr, sample_data)
# Convert to list for comparison since PyArrow might return different types
result_list = result.tolist()
assert result_list == expected_values
def test_round_method(self, sample_data):
"""Test round method (may differ due to banker's rounding)."""
expr = col("value").round()
assert isinstance(expr, UDFExpr)
result = eval_expr(expr, sample_data)
# PyArrow uses banker's rounding (round half to even)
# Just verify it runs and returns numeric values
assert len(result) == len(sample_data)
# ──────────────────────────────────────
# Logarithmic Operations
# ──────────────────────────────────────
class TestLogarithmicOperations:
"""Tests for logarithmic helper methods."""
@pytest.fixture
def sample_data(self):
"""Sample data with positive values for logarithmic tests."""
return pd.DataFrame(
{
"value": [1.0, math.e, 10.0, 100.0],
}
)
def test_ln(self, sample_data):
"""Test natural logarithm."""
expr = col("value").ln()
assert isinstance(expr, UDFExpr)
result = eval_expr(expr, sample_data)
expected = [0.0, 1.0, math.log(10), math.log(100)]
for r, e in zip(result.tolist(), expected):
assert abs(r - e) < 1e-10
def test_log10(self, sample_data):
"""Test base-10 logarithm."""
expr = col("value").log10()
assert isinstance(expr, UDFExpr)
result = eval_expr(expr, sample_data)
expected = [0.0, math.log10(math.e), 1.0, 2.0]
for r, e in zip(result.tolist(), expected):
assert abs(r - e) < 1e-10
def test_log2(self):
"""Test base-2 logarithm."""
data = pd.DataFrame({"value": [1.0, 2.0, 4.0, 8.0]})
expr = col("value").log2()
assert isinstance(expr, UDFExpr)
result = eval_expr(expr, data)
expected = [0.0, 1.0, 2.0, 3.0]
for r, e in zip(result.tolist(), expected):
assert abs(r - e) < 1e-10
def test_exp(self):
"""Test exponential function."""
data = pd.DataFrame({"value": [0.0, 1.0, 2.0]})
expr = col("value").exp()
assert isinstance(expr, UDFExpr)
result = eval_expr(expr, data)
expected = [1.0, math.e, math.e**2]
for r, e in zip(result.tolist(), expected):
assert abs(r - e) < 1e-10
# ──────────────────────────────────────
# Trigonometric Operations
# ──────────────────────────────────────
class TestTrigonometricOperations:
"""Tests for trigonometric helper methods."""
@pytest.fixture
def sample_data(self):
"""Sample data with angles in radians for trig tests."""
return pd.DataFrame(
{
"angle": [0.0, math.pi / 6, math.pi / 4, math.pi / 3, math.pi / 2],
}
)
def test_sin(self, sample_data):
"""Test sine function."""
expr = col("angle").sin()
assert isinstance(expr, UDFExpr)
result = eval_expr(expr, sample_data)
expected = [0.0, 0.5, math.sqrt(2) / 2, math.sqrt(3) / 2, 1.0]
for r, e in zip(result.tolist(), expected):
assert abs(r - e) < 1e-10
def test_cos(self, sample_data):
"""Test cosine function."""
expr = col("angle").cos()
assert isinstance(expr, UDFExpr)
result = eval_expr(expr, sample_data)
expected = [1.0, math.sqrt(3) / 2, math.sqrt(2) / 2, 0.5, 0.0]
for r, e in zip(result.tolist(), expected):
assert abs(r - e) < 1e-10
def test_tan(self):
"""Test tangent function."""
data = pd.DataFrame({"angle": [0.0, math.pi / 4]})
expr = col("angle").tan()
assert isinstance(expr, UDFExpr)
result = eval_expr(expr, data)
expected = [0.0, 1.0]
for r, e in zip(result.tolist(), expected):
assert abs(r - e) < 1e-10
def test_asin(self):
"""Test arcsine function."""
data = pd.DataFrame({"value": [0.0, 0.5, 1.0]})
expr = col("value").asin()
assert isinstance(expr, UDFExpr)
result = eval_expr(expr, data)
expected = [0.0, math.pi / 6, math.pi / 2]
for r, e in zip(result.tolist(), expected):
assert abs(r - e) < 1e-10
def test_acos(self):
"""Test arccosine function."""
data = pd.DataFrame({"value": [1.0, 0.5, 0.0]})
expr = col("value").acos()
assert isinstance(expr, UDFExpr)
result = eval_expr(expr, data)
expected = [0.0, math.pi / 3, math.pi / 2]
for r, e in zip(result.tolist(), expected):
assert abs(r - e) < 1e-10
def test_atan(self):
"""Test arctangent function."""
data = pd.DataFrame({"value": [0.0, 1.0]})
expr = col("value").atan()
assert isinstance(expr, UDFExpr)
result = eval_expr(expr, data)
expected = [0.0, math.pi / 4]
for r, e in zip(result.tolist(), expected):
assert abs(r - e) < 1e-10
# ──────────────────────────────────────
# Arithmetic Helper Operations
# ──────────────────────────────────────
class TestArithmeticHelpers:
"""Tests for arithmetic helper methods (negate, sign, power, abs)."""
@pytest.fixture
def sample_data(self):
"""Sample data for arithmetic helper tests."""
return pd.DataFrame(
{
"value": [5, -3, 0, 10, -7],
}
)
def test_negate(self, sample_data):
"""Test negate method."""
expr = col("value").negate()
assert isinstance(expr, UDFExpr)
result = eval_expr(expr, sample_data)
expected = [-5, 3, 0, -10, 7]
assert result.tolist() == expected
def test_sign(self, sample_data):
"""Test sign method."""
expr = col("value").sign()
assert isinstance(expr, UDFExpr)
result = eval_expr(expr, sample_data)
expected = [1, -1, 0, 1, -1]
assert result.tolist() == expected
def test_abs(self, sample_data):
"""Test abs method."""
expr = col("value").abs()
assert isinstance(expr, UDFExpr)
result = eval_expr(expr, sample_data)
expected = [5, 3, 0, 10, 7]
assert result.tolist() == expected
@pytest.mark.parametrize(
"base_values,exponent,expected",
[
([2, 3, 4], 2, [4, 9, 16]),
([2, 3, 4], 3, [8, 27, 64]),
([4, 9, 16], 0.5, [2.0, 3.0, 4.0]),
],
ids=["square", "cube", "sqrt"],
)
def test_power(self, base_values, exponent, expected):
"""Test power method with various exponents."""
data = pd.DataFrame({"value": base_values})
expr = col("value").power(exponent)
assert isinstance(expr, UDFExpr)
result = eval_expr(expr, data)
for r, e in zip(result.tolist(), expected):
assert abs(r - e) < 1e-10
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/unit/expressions/test_arithmetic.py",
"license": "Apache License 2.0",
"lines": 452,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/unit/expressions/test_boolean.py | """Tests for boolean/logical expression operations.
This module tests:
- Logical operators: AND (&), OR (|), NOT (~)
- Boolean expression combinations
- Complex nested boolean expressions
"""
import pandas as pd
import pytest
from ray.data._internal.planner.plan_expression.expression_evaluator import eval_expr
from ray.data.expressions import BinaryExpr, Operation, UnaryExpr, col, lit
# ──────────────────────────────────────
# Logical AND Operations
# ──────────────────────────────────────
class TestLogicalAnd:
"""Tests for logical AND (&) operations."""
@pytest.fixture
def sample_data(self):
"""Sample data for logical AND tests."""
return pd.DataFrame(
{
"is_active": [True, True, False, False],
"is_verified": [True, False, True, False],
"age": [25, 17, 30, 15],
}
)
def test_and_two_booleans(self, sample_data):
"""Test AND of two boolean columns."""
expr = col("is_active") & col("is_verified")
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.AND
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, False, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_and_two_comparisons(self, sample_data):
"""Test AND of two comparison expressions."""
expr = (col("is_active") == lit(True)) & (col("age") >= 18)
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, False, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_and_chained(self, sample_data):
"""Test chained AND operations."""
expr = (col("is_active")) & (col("is_verified")) & (col("age") >= 18)
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, False, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
# ──────────────────────────────────────
# Logical OR Operations
# ──────────────────────────────────────
class TestLogicalOr:
"""Tests for logical OR (|) operations."""
@pytest.fixture
def sample_data(self):
"""Sample data for logical OR tests."""
return pd.DataFrame(
{
"is_admin": [True, False, False, False],
"is_moderator": [False, True, False, False],
"age": [25, 17, 30, 15],
}
)
def test_or_two_booleans(self, sample_data):
"""Test OR of two boolean columns."""
expr = col("is_admin") | col("is_moderator")
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.OR
result = eval_expr(expr, sample_data)
expected = pd.Series([True, True, False, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_or_two_comparisons(self, sample_data):
"""Test OR of two comparison expressions."""
expr = (col("is_admin") == lit(True)) | (col("age") >= 18)
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, True, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_or_chained(self, sample_data):
"""Test chained OR operations."""
expr = (col("is_admin")) | (col("is_moderator")) | (col("age") >= 21)
result = eval_expr(expr, sample_data)
expected = pd.Series([True, True, True, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
# ──────────────────────────────────────
# Logical NOT Operations
# ──────────────────────────────────────
class TestLogicalNot:
"""Tests for logical NOT (~) operations."""
@pytest.fixture
def sample_data(self):
"""Sample data for logical NOT tests."""
return pd.DataFrame(
{
"is_active": [True, False, True, False],
"is_banned": [False, False, True, True],
"age": [25, 17, 30, 15],
}
)
def test_not_boolean_column(self, sample_data):
"""Test NOT of a boolean column."""
expr = ~col("is_active")
assert isinstance(expr, UnaryExpr)
assert expr.op == Operation.NOT
result = eval_expr(expr, sample_data)
expected = pd.Series([False, True, False, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_not_comparison(self, sample_data):
"""Test NOT of a comparison expression."""
expr = ~(col("age") >= 18)
result = eval_expr(expr, sample_data)
expected = pd.Series([False, True, False, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_double_negation(self, sample_data):
"""Test double negation (~~)."""
expr = ~~col("is_active")
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, True, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
# ──────────────────────────────────────
# Complex Boolean Combinations
# ──────────────────────────────────────
class TestComplexBooleanExpressions:
"""Tests for complex boolean expression combinations."""
@pytest.fixture
def sample_data(self):
"""Sample data for complex boolean tests."""
return pd.DataFrame(
{
"age": [17, 21, 25, 30, 65],
"is_student": [True, True, False, False, False],
"is_member": [False, True, True, False, True],
"country": ["USA", "UK", "USA", "Canada", "USA"],
}
)
def test_and_or_combination(self, sample_data):
"""Test combination of AND and OR."""
# (age >= 21) AND (is_student OR is_member)
expr = (col("age") >= 21) & (col("is_student") | col("is_member"))
result = eval_expr(expr, sample_data)
expected = pd.Series([False, True, True, False, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_not_with_and_or(self, sample_data):
"""Test NOT combined with AND and OR."""
# NOT(age < 18) AND (is_member OR is_student)
expr = ~(col("age") < 18) & (col("is_member") | col("is_student"))
result = eval_expr(expr, sample_data)
expected = pd.Series([False, True, True, False, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_demorgans_law_and(self, sample_data):
"""Test De Morgan's law: ~(A & B) == (~A) | (~B)."""
# ~(is_student & is_member)
expr1 = ~(col("is_student") & col("is_member"))
# (~is_student) | (~is_member)
expr2 = (~col("is_student")) | (~col("is_member"))
result1 = eval_expr(expr1, sample_data)
result2 = eval_expr(expr2, sample_data)
pd.testing.assert_series_equal(
result1.reset_index(drop=True),
result2.reset_index(drop=True),
check_names=False,
)
def test_demorgans_law_or(self, sample_data):
"""Test De Morgan's law: ~(A | B) == (~A) & (~B)."""
# ~(is_student | is_member)
expr1 = ~(col("is_student") | col("is_member"))
# (~is_student) & (~is_member)
expr2 = (~col("is_student")) & (~col("is_member"))
result1 = eval_expr(expr1, sample_data)
result2 = eval_expr(expr2, sample_data)
pd.testing.assert_series_equal(
result1.reset_index(drop=True),
result2.reset_index(drop=True),
check_names=False,
)
def test_deeply_nested_boolean(self, sample_data):
"""Test deeply nested boolean expression."""
# ((age >= 21) & (country == "USA")) | ((is_student) & (is_member))
expr = ((col("age") >= 21) & (col("country") == "USA")) | (
(col("is_student")) & (col("is_member"))
)
result = eval_expr(expr, sample_data)
# Row 0: (17>=21 & USA) | (True & False) = False | False = False
# Row 1: (21>=21 & UK) | (True & True) = False | True = True
# Row 2: (25>=21 & USA) | (False & True) = True | False = True
# Row 3: (30>=21 & Canada) | (False & False) = False | False = False
# Row 4: (65>=21 & USA) | (False & True) = True | False = True
expected = pd.Series([False, True, True, False, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
# ──────────────────────────────────────
# Boolean Expression Structural Equality
# ──────────────────────────────────────
class TestBooleanStructuralEquality:
"""Tests for structural equality of boolean expressions."""
def test_and_structural_equality(self):
"""Test structural equality for AND expressions."""
expr1 = col("a") & col("b")
expr2 = col("a") & col("b")
expr3 = col("b") & col("a") # Order matters
assert expr1.structurally_equals(expr2)
assert not expr1.structurally_equals(expr3)
def test_or_structural_equality(self):
"""Test structural equality for OR expressions."""
expr1 = col("a") | col("b")
expr2 = col("a") | col("b")
expr3 = col("a") | col("c")
assert expr1.structurally_equals(expr2)
assert not expr1.structurally_equals(expr3)
def test_not_structural_equality(self):
"""Test structural equality for NOT expressions."""
expr1 = ~col("a")
expr2 = ~col("a")
expr3 = ~col("b")
assert expr1.structurally_equals(expr2)
assert not expr1.structurally_equals(expr3)
def test_complex_boolean_structural_equality(self):
"""Test structural equality for complex boolean expressions."""
expr1 = (col("a") > 10) & ((col("b") < 5) | ~col("c"))
expr2 = (col("a") > 10) & ((col("b") < 5) | ~col("c"))
expr3 = (col("a") > 10) & ((col("b") < 6) | ~col("c"))
assert expr1.structurally_equals(expr2)
assert not expr1.structurally_equals(expr3)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/unit/expressions/test_boolean.py",
"license": "Apache License 2.0",
"lines": 245,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/unit/expressions/test_comparison.py | """Tests for comparison expression operations.
This module tests:
- Comparison operators: GT (>), LT (<), GE (>=), LE (<=), EQ (==), NE (!=)
- Comparison with columns and literals
- Reverse comparisons (literal compared to column)
"""
import pandas as pd
import pytest
from ray.data._internal.planner.plan_expression.expression_evaluator import eval_expr
from ray.data.expressions import BinaryExpr, Operation, col, lit
# ──────────────────────────────────────
# Basic Comparison Operations
# ──────────────────────────────────────
class TestComparisonOperators:
"""Tests for comparison operators (>, <, >=, <=, ==, !=)."""
@pytest.fixture
def sample_data(self):
"""Sample data for comparison tests."""
return pd.DataFrame(
{
"age": [18, 21, 25, 30, 16],
"score": [50, 75, 100, 60, 85],
"status": ["active", "inactive", "active", "pending", "active"],
}
)
# ── Greater Than ──
@pytest.mark.parametrize(
"expr,expected_values",
[
(col("age") > 21, [False, False, True, True, False]),
(col("age") > col("score") / 10, [True, True, True, True, True]),
],
ids=["col_gt_literal", "col_gt_col_expr"],
)
def test_greater_than(self, sample_data, expr, expected_values):
"""Test greater than (>) comparisons."""
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.GT
result = eval_expr(expr, sample_data)
pd.testing.assert_series_equal(
result.reset_index(drop=True),
pd.Series(expected_values),
check_names=False,
)
def test_greater_than_reverse(self, sample_data):
"""Test reverse greater than (literal > col)."""
expr = 22 > col("age")
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.LT # Reverse: 22 > age becomes age < 22
result = eval_expr(expr, sample_data)
expected = pd.Series([True, True, False, False, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
# ── Less Than ──
@pytest.mark.parametrize(
"expr,expected_values",
[
(col("age") < 21, [True, False, False, False, True]),
(col("score") < 70, [True, False, False, True, False]),
],
ids=["col_lt_literal", "score_lt_70"],
)
def test_less_than(self, sample_data, expr, expected_values):
"""Test less than (<) comparisons."""
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.LT
result = eval_expr(expr, sample_data)
pd.testing.assert_series_equal(
result.reset_index(drop=True),
pd.Series(expected_values),
check_names=False,
)
def test_less_than_reverse(self, sample_data):
"""Test reverse less than (literal < col)."""
expr = 20 < col("age")
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.GT # Reverse: 20 < age becomes age > 20
result = eval_expr(expr, sample_data)
expected = pd.Series([False, True, True, True, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
# ── Greater Than or Equal ──
@pytest.mark.parametrize(
"expr,expected_values",
[
(col("age") >= 21, [False, True, True, True, False]),
(col("score") >= 75, [False, True, True, False, True]),
],
ids=["col_ge_21", "score_ge_75"],
)
def test_greater_equal(self, sample_data, expr, expected_values):
"""Test greater than or equal (>=) comparisons."""
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.GE
result = eval_expr(expr, sample_data)
pd.testing.assert_series_equal(
result.reset_index(drop=True),
pd.Series(expected_values),
check_names=False,
)
def test_greater_equal_reverse(self, sample_data):
"""Test reverse greater equal (literal >= col)."""
expr = 21 >= col("age")
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.LE # Reverse
result = eval_expr(expr, sample_data)
expected = pd.Series([True, True, False, False, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
# ── Less Than or Equal ──
@pytest.mark.parametrize(
"expr,expected_values",
[
(col("age") <= 21, [True, True, False, False, True]),
(col("score") <= 60, [True, False, False, True, False]),
],
ids=["col_le_21", "score_le_60"],
)
def test_less_equal(self, sample_data, expr, expected_values):
"""Test less than or equal (<=) comparisons."""
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.LE
result = eval_expr(expr, sample_data)
pd.testing.assert_series_equal(
result.reset_index(drop=True),
pd.Series(expected_values),
check_names=False,
)
def test_less_equal_reverse(self, sample_data):
"""Test reverse less equal (literal <= col)."""
expr = 25 <= col("age")
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.GE # Reverse
result = eval_expr(expr, sample_data)
expected = pd.Series([False, False, True, True, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
# ── Equality ──
@pytest.mark.parametrize(
"expr,expected_values",
[
(col("age") == 21, [False, True, False, False, False]),
(col("status") == "active", [True, False, True, False, True]),
(col("score") == lit(100), [False, False, True, False, False]),
],
ids=["age_eq_21", "status_eq_active", "score_eq_100"],
)
def test_equality(self, sample_data, expr, expected_values):
"""Test equality (==) comparisons."""
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.EQ
result = eval_expr(expr, sample_data)
pd.testing.assert_series_equal(
result.reset_index(drop=True),
pd.Series(expected_values),
check_names=False,
)
# ── Not Equal ──
@pytest.mark.parametrize(
"expr,expected_values",
[
(col("age") != 21, [True, False, True, True, True]),
(col("status") != "active", [False, True, False, True, False]),
],
ids=["age_ne_21", "status_ne_active"],
)
def test_not_equal(self, sample_data, expr, expected_values):
"""Test not equal (!=) comparisons."""
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.NE
result = eval_expr(expr, sample_data)
pd.testing.assert_series_equal(
result.reset_index(drop=True),
pd.Series(expected_values),
check_names=False,
)
# ──────────────────────────────────────
# Column vs Column Comparisons
# ──────────────────────────────────────
class TestColumnToColumnComparison:
"""Tests for comparing columns against other columns."""
@pytest.fixture
def sample_data(self):
"""Sample data with comparable columns."""
return pd.DataFrame(
{
"value_a": [10, 20, 30, 40],
"value_b": [15, 20, 25, 45],
"threshold": [12, 18, 35, 35],
}
)
@pytest.mark.parametrize(
"expr_fn,expected_values",
[
(lambda: col("value_a") > col("value_b"), [False, False, True, False]),
(lambda: col("value_a") < col("threshold"), [True, False, True, False]),
(lambda: col("value_a") == col("value_b"), [False, True, False, False]),
(lambda: col("value_a") >= col("threshold"), [False, True, False, True]),
],
ids=["a_gt_b", "a_lt_threshold", "a_eq_b", "a_ge_threshold"],
)
def test_column_to_column_comparisons(self, sample_data, expr_fn, expected_values):
"""Test various column-to-column comparisons."""
expr = expr_fn()
result = eval_expr(expr, sample_data)
pd.testing.assert_series_equal(
result.reset_index(drop=True),
pd.Series(expected_values),
check_names=False,
)
# ──────────────────────────────────────
# Comparison with Expressions
# ──────────────────────────────────────
class TestComparisonWithExpressions:
"""Tests for comparing expressions against other expressions."""
@pytest.fixture
def sample_data(self):
"""Sample data for expression comparison tests."""
return pd.DataFrame(
{
"price": [100, 200, 150],
"discount": [10, 50, 30],
"min_price": [80, 160, 130],
}
)
def test_compare_computed_values(self, sample_data):
"""Test comparing computed expression results."""
# (price - discount) > min_price
expr = (col("price") - col("discount")) > col("min_price")
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, False]) # 90>80, 150>160, 120>130
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_compare_scaled_values(self, sample_data):
"""Test comparing scaled column values."""
# price * 0.9 >= min_price (check if 10% discount still meets minimum)
expr = col("price") * 0.9 >= col("min_price")
result = eval_expr(expr, sample_data)
expected = pd.Series([True, True, True]) # 90>=80, 180>=160, 135>=130
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
# ──────────────────────────────────────
# String Comparisons
# ──────────────────────────────────────
class TestStringComparison:
"""Tests for string equality and inequality."""
@pytest.fixture
def sample_data(self):
"""Sample data with string columns."""
return pd.DataFrame(
{
"name": ["Alice", "Bob", "Charlie", "Alice"],
"city": ["NYC", "LA", "NYC", "SF"],
}
)
def test_string_equality(self, sample_data):
"""Test string equality comparison."""
expr = col("name") == "Alice"
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, False, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_string_inequality(self, sample_data):
"""Test string inequality comparison."""
expr = col("city") != "NYC"
result = eval_expr(expr, sample_data)
expected = pd.Series([False, True, False, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
# ──────────────────────────────────────
# Boolean Comparisons
# ──────────────────────────────────────
class TestBooleanComparison:
"""Tests for boolean value comparisons."""
@pytest.fixture
def sample_data(self):
"""Sample data with boolean columns."""
return pd.DataFrame(
{
"is_active": [True, False, True, False],
"is_verified": [True, True, False, False],
}
)
def test_boolean_equality_true(self, sample_data):
"""Test boolean equality with True."""
expr = col("is_active") == lit(True)
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, True, False])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_boolean_equality_false(self, sample_data):
"""Test boolean equality with False."""
expr = col("is_active") == lit(False)
result = eval_expr(expr, sample_data)
expected = pd.Series([False, True, False, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
def test_boolean_column_to_column(self, sample_data):
"""Test comparing two boolean columns."""
expr = col("is_active") == col("is_verified")
result = eval_expr(expr, sample_data)
expected = pd.Series([True, False, False, True])
pd.testing.assert_series_equal(
result.reset_index(drop=True), expected, check_names=False
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/unit/expressions/test_comparison.py",
"license": "Apache License 2.0",
"lines": 318,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/unit/expressions/test_core.py | """Tests for core expression types and basic functionality.
This module tests:
- ColumnExpr, LiteralExpr, BinaryExpr, UnaryExpr, AliasExpr, StarExpr
- Structural equality for all expression types
- Expression tree repr (string representation)
- UDFExpr structural equality
"""
import pyarrow as pa
import pyarrow.compute as pc
import pytest
from ray.data._internal.planner.plan_expression.expression_visitors import (
_InlineExprReprVisitor,
)
from ray.data.datatype import DataType
from ray.data.expressions import (
BinaryExpr,
ColumnExpr,
Expr,
LiteralExpr,
Operation,
StarExpr,
UDFExpr,
UnaryExpr,
col,
download,
lit,
star,
udf,
)
# ──────────────────────────────────────
# Column Expression Tests
# ──────────────────────────────────────
class TestColumnExpr:
"""Tests for ColumnExpr functionality."""
def test_column_creation(self):
"""Test that col() creates a ColumnExpr with correct name."""
expr = col("age")
assert isinstance(expr, ColumnExpr)
assert expr.name == "age"
def test_column_name_property(self):
"""Test that name property returns the column name."""
expr = col("my_column")
assert expr.name == "my_column"
@pytest.mark.parametrize(
"name1,name2,expected",
[
("a", "a", True),
("a", "b", False),
("column_name", "column_name", True),
("COL", "col", False), # Case sensitive
],
ids=["same_name", "different_name", "long_name", "case_sensitive"],
)
def test_column_structural_equality(self, name1, name2, expected):
"""Test structural equality for column expressions."""
assert col(name1).structurally_equals(col(name2)) is expected
# ──────────────────────────────────────
# Literal Expression Tests
# ──────────────────────────────────────
class TestLiteralExpr:
"""Tests for LiteralExpr functionality."""
@pytest.mark.parametrize(
"value",
[42, 3.14, "hello", True, False, None, [1, 2, 3]],
ids=["int", "float", "string", "bool_true", "bool_false", "none", "list"],
)
def test_literal_creation(self, value):
"""Test that lit() creates a LiteralExpr with correct value."""
expr = lit(value)
assert isinstance(expr, LiteralExpr)
assert expr.value == value
@pytest.mark.parametrize(
"val1,val2,expected",
[
(1, 1, True),
(1, 2, False),
("x", "y", False),
("x", "x", True),
(1, 1.0, False), # Different types
(True, True, True),
(True, False, False),
([1, 2], [1, 2], True),
([1, 2], [1, 3], False),
],
ids=[
"same_int",
"different_int",
"different_str",
"same_str",
"int_vs_float",
"same_bool",
"different_bool",
"same_list",
"different_list",
],
)
def test_literal_structural_equality(self, val1, val2, expected):
"""Test structural equality for literal expressions."""
assert lit(val1).structurally_equals(lit(val2)) is expected
# ──────────────────────────────────────
# Binary Expression Tests
# ──────────────────────────────────────
class TestBinaryExpr:
"""Tests for BinaryExpr structure (not operation semantics)."""
def test_binary_expression_structure(self):
"""Test that binary expressions have correct structure."""
expr = col("a") + lit(1)
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.ADD
assert isinstance(expr.left, ColumnExpr)
assert isinstance(expr.right, LiteralExpr)
@pytest.mark.parametrize(
"expr1,expr2,expected",
[
(col("a") + 1, col("a") + 1, True),
(col("a") + 1, col("a") + 2, False), # Different literal
(col("a") + 1, col("b") + 1, False), # Different column
(col("a") + 1, col("a") - 1, False), # Different operator
# Nested binary expressions
((col("a") * 2) + (col("b") / 3), (col("a") * 2) + (col("b") / 3), True),
((col("a") * 2) + (col("b") / 3), (col("a") * 2) - (col("b") / 3), False),
((col("a") * 2) + (col("b") / 3), (col("c") * 2) + (col("b") / 3), False),
((col("a") * 2) + (col("b") / 3), (col("a") * 2) + (col("b") / 4), False),
# Commutative operations are not structurally equal
(col("a") + col("b"), col("b") + col("a"), False),
(lit(1) * col("c"), col("c") * lit(1), False),
],
ids=[
"same_simple",
"different_literal",
"different_column",
"different_operator",
"same_nested",
"nested_diff_op",
"nested_diff_col",
"nested_diff_lit",
"commutative_add",
"commutative_mul",
],
)
def test_binary_structural_equality(self, expr1, expr2, expected):
"""Test structural equality for binary expressions."""
assert expr1.structurally_equals(expr2) is expected
# Test symmetry
assert expr2.structurally_equals(expr1) is expected
# ──────────────────────────────────────
# Unary Expression Tests
# ──────────────────────────────────────
class TestUnaryExpr:
"""Tests for UnaryExpr structure."""
@pytest.mark.parametrize(
"expr,expected_op",
[
(col("age").is_null(), Operation.IS_NULL),
(col("name").is_not_null(), Operation.IS_NOT_NULL),
(~col("active"), Operation.NOT),
],
ids=["is_null", "is_not_null", "not"],
)
def test_unary_expression_structure(self, expr, expected_op):
"""Test that unary expressions have correct structure."""
assert isinstance(expr, UnaryExpr)
assert expr.op == expected_op
assert isinstance(expr.operand, Expr)
def test_unary_structural_equality(self):
"""Test structural equality for unary expressions."""
# Same expressions should be equal
assert col("age").is_null().structurally_equals(col("age").is_null())
assert (
col("active").is_not_null().structurally_equals(col("active").is_not_null())
)
assert (~col("flag")).structurally_equals(~col("flag"))
# Different operations should not be equal
assert not col("age").is_null().structurally_equals(col("age").is_not_null())
# Different operands should not be equal
assert not col("age").is_null().structurally_equals(col("name").is_null())
# ──────────────────────────────────────
# Alias Expression Tests
# ──────────────────────────────────────
class TestAliasExpr:
"""Tests for AliasExpr functionality."""
@pytest.mark.parametrize(
"expr,alias_name,expected_alias",
[
(col("price"), "product_price", "product_price"),
(lit(42), "answer", "answer"),
(col("a") + col("b"), "sum", "sum"),
((col("price") * col("qty")) + lit(5), "total_with_fee", "total_with_fee"),
(col("age") >= lit(18), "is_adult", "is_adult"),
],
ids=["col_alias", "lit_alias", "binary_alias", "complex_alias", "comparison"],
)
def test_alias_functionality(self, expr, alias_name, expected_alias):
"""Test alias creation and properties."""
aliased_expr = expr.alias(alias_name)
assert aliased_expr.name == expected_alias
assert aliased_expr.expr.structurally_equals(expr)
# Data type should be preserved
assert aliased_expr.data_type == expr.data_type
@pytest.mark.parametrize(
"expr1,expr2,expected",
[
(col("a").alias("b"), col("a").alias("b"), True),
(col("a").alias("b"), col("a").alias("c"), False), # Different alias
(col("a").alias("b"), col("b").alias("b"), False), # Different column
((col("a") + 1).alias("result"), (col("a") + 1).alias("result"), True),
(
(col("a") + 1).alias("result"),
(col("a") + 2).alias("result"),
False,
), # Different expr
(col("a").alias("b"), col("a"), False), # Alias vs non-alias
],
ids=[
"same_alias",
"different_alias_name",
"different_column",
"same_complex",
"different_expr",
"alias_vs_non_alias",
],
)
def test_alias_structural_equality(self, expr1, expr2, expected):
"""Test structural equality for alias expressions."""
assert expr1.structurally_equals(expr2) is expected
def test_alias_structural_equality_respects_rename_flag(self):
expr = col("a")
aliased = expr.alias("b")
renamed = expr._rename("b")
assert aliased.structurally_equals(aliased)
assert renamed.structurally_equals(renamed)
assert not aliased.structurally_equals(renamed)
assert not aliased.structurally_equals(expr.alias("c"))
def test_alias_evaluation_equivalence(self):
"""Test that alias evaluation produces same result as original."""
import pandas as pd
from ray.data._internal.planner.plan_expression.expression_evaluator import (
eval_expr,
)
test_data = pd.DataFrame({"price": [10, 20], "qty": [2, 3]})
expr = col("price") * col("qty")
aliased = expr.alias("total")
original_result = eval_expr(expr, test_data)
aliased_result = eval_expr(aliased, test_data)
assert original_result.equals(aliased_result)
# ──────────────────────────────────────
# Star Expression Tests
# ──────────────────────────────────────
class TestStarExpr:
"""Tests for StarExpr functionality."""
def test_star_creation(self):
"""Test that star() creates a StarExpr."""
expr = star()
assert isinstance(expr, StarExpr)
def test_star_structural_equality(self):
"""Test structural equality for star expressions."""
assert star().structurally_equals(star())
assert not star().structurally_equals(col("a"))
# ──────────────────────────────────────
# UDF Expression Tests
# ──────────────────────────────────────
class TestUDFExpr:
"""Tests for UDFExpr structural equality."""
def test_regular_function_udf_structural_equality(self):
"""Test that regular function UDFs compare fn correctly."""
@udf(return_dtype=DataType.int32())
def add_one(x: pa.Array) -> pa.Array:
return pc.add(x, 1)
@udf(return_dtype=DataType.int32())
def add_two(x: pa.Array) -> pa.Array:
return pc.add(x, 2)
expr1 = add_one(col("value"))
expr2 = add_one(col("value"))
expr3 = add_two(col("value"))
# Same function should be equal
assert expr1.structurally_equals(expr2)
# Different functions should not be equal
assert not expr1.structurally_equals(expr3)
def test_callable_class_udf_structural_equality(self):
"""Test that callable class UDFs with same spec are structurally equal."""
@udf(return_dtype=DataType.int32())
class AddOffset:
def __init__(self, offset):
self.offset = offset
def __call__(self, x: pa.Array) -> pa.Array:
return pc.add(x, self.offset)
# Create the same callable class instance
add_five = AddOffset(5)
# Each call creates a new _placeholder function internally,
# but the callable_class_spec should be the same
expr1 = add_five(col("value"))
expr2 = add_five(col("value"))
# These should be structurally equal
assert expr1.structurally_equals(expr2)
assert expr2.structurally_equals(expr1)
# Different constructor args should not be equal
add_ten = AddOffset(10)
expr3 = add_ten(col("value"))
assert not expr1.structurally_equals(expr3)
# Different column args should not be equal
expr4 = add_five(col("other"))
assert not expr1.structurally_equals(expr4)
def test_callable_class_vs_regular_function_udf(self):
"""Test that callable class UDFs are not equal to regular function UDFs."""
@udf(return_dtype=DataType.int32())
class AddOne:
def __call__(self, x: pa.Array) -> pa.Array:
return pc.add(x, 1)
@udf(return_dtype=DataType.int32())
def add_one(x: pa.Array) -> pa.Array:
return pc.add(x, 1)
class_expr = AddOne()(col("value"))
func_expr = add_one(col("value"))
# Different types of UDFs should not be equal
assert not class_expr.structurally_equals(func_expr)
assert not func_expr.structurally_equals(class_expr)
# ──────────────────────────────────────
# Cross-type Equality Tests
# ──────────────────────────────────────
class TestCrossTypeEquality:
"""Test that different expression types are not structurally equal."""
@pytest.mark.parametrize(
"expr1,expr2",
[
(col("a"), lit("a")),
(col("a"), col("a") + 0),
(lit(1), lit(1) + 0),
(col("a"), col("a").alias("a")),
(col("a"), star()),
],
ids=[
"col_vs_lit",
"col_vs_binary",
"lit_vs_binary",
"col_vs_alias",
"col_vs_star",
],
)
def test_different_types_not_equal(self, expr1, expr2):
"""Test that different expression types are not structurally equal."""
assert not expr1.structurally_equals(expr2)
assert not expr2.structurally_equals(expr1)
def test_operator_eq_is_not_structural_eq(self):
"""Confirms that == builds an expression, while structurally_equals compares."""
# `==` returns a BinaryExpr, not a boolean
op_eq_expr = col("a") == col("a")
assert isinstance(op_eq_expr, Expr)
assert not isinstance(op_eq_expr, bool)
# `structurally_equals` returns a boolean
struct_eq_result = col("a").structurally_equals(col("a"))
assert isinstance(struct_eq_result, bool)
assert struct_eq_result is True
# ──────────────────────────────────────
# Expression Repr Tests
# ──────────────────────────────────────
def _build_complex_expr():
"""Build a convoluted expression that exercises all visitor code paths."""
def custom_udf(x, y):
return x + y
# Create UDF expression
udf_expr = UDFExpr(
fn=custom_udf,
args=[col("value"), lit(10)],
kwargs={"z": col("multiplier")},
data_type=DataType(int),
)
# Build the mega-complex expression
inner_expr = (
((col("age") + lit(10)) * col("rate") / lit(2.5) >= lit(100))
& (
col("name").is_not_null()
| (col("status").is_in(["active", "pending"]) & col("verified"))
)
& ((col("count") - lit(5)) // lit(2) <= col("limit"))
& ~(col("deleted").is_null() | (col("score") != lit(0)))
& (download("uri") < star())
& (udf_expr.alias("udf_result") > lit(50))
).alias("complex_filter")
return ~inner_expr
class TestExpressionRepr:
"""Test expression string representations."""
def test_tree_repr(self):
"""Test tree representation of expressions."""
expr = _build_complex_expr()
expected = """NOT
└── operand: ALIAS('complex_filter')
└── AND
├── left: AND
│ ├── left: AND
│ │ ├── left: AND
│ │ │ ├── left: AND
│ │ │ │ ├── left: GE
│ │ │ │ │ ├── left: DIV
│ │ │ │ │ │ ├── left: MUL
│ │ │ │ │ │ │ ├── left: ADD
│ │ │ │ │ │ │ │ ├── left: COL('age')
│ │ │ │ │ │ │ │ └── right: LIT(10)
│ │ │ │ │ │ │ └── right: COL('rate')
│ │ │ │ │ │ └── right: LIT(2.5)
│ │ │ │ │ └── right: LIT(100)
│ │ │ │ └── right: OR
│ │ │ │ ├── left: IS_NOT_NULL
│ │ │ │ │ └── operand: COL('name')
│ │ │ │ └── right: AND
│ │ │ │ ├── left: IN
│ │ │ │ │ ├── left: COL('status')
│ │ │ │ │ └── right: LIT(['active', 'pending'])
│ │ │ │ └── right: COL('verified')
│ │ │ └── right: LE
│ │ │ ├── left: FLOORDIV
│ │ │ │ ├── left: SUB
│ │ │ │ │ ├── left: COL('count')
│ │ │ │ │ └── right: LIT(5)
│ │ │ │ └── right: LIT(2)
│ │ │ └── right: COL('limit')
│ │ └── right: NOT
│ │ └── operand: OR
│ │ ├── left: IS_NULL
│ │ │ └── operand: COL('deleted')
│ │ └── right: NE
│ │ ├── left: COL('score')
│ │ └── right: LIT(0)
│ └── right: LT
│ ├── left: DOWNLOAD('uri')
│ └── right: COL(*)
└── right: GT
├── left: ALIAS('udf_result')
│ └── UDF(custom_udf)
│ ├── arg[0]: COL('value')
│ ├── arg[1]: LIT(10)
│ └── kwarg['z']: COL('multiplier')
└── right: LIT(50)"""
assert repr(expr) == expected
def test_inline_repr_prefix(self):
"""Test that inline representation starts correctly."""
expr = _build_complex_expr()
visitor = _InlineExprReprVisitor()
inline_repr = visitor.visit(expr)
expected_prefix = "~((((((((col('age') + 10) * col('rate')) / 2.5) >= 100) & (col('name').is_not_null() | ((col('status')"
assert inline_repr.startswith(expected_prefix)
assert inline_repr.endswith(".alias('complex_filter')")
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/unit/expressions/test_core.py",
"license": "Apache License 2.0",
"lines": 444,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/unit/expressions/test_namespace_list.py | """Unit tests for list namespace expressions.
These tests verify expression construction logic without requiring Ray.
"""
import pytest
from ray.data.expressions import col
class TestListNamespaceErrors:
"""Tests for proper error handling in list namespace."""
def test_list_invalid_index_type(self):
"""Test list bracket notation rejects invalid types."""
with pytest.raises(TypeError, match="List indices must be integers or slices"):
col("items").list["invalid"]
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/unit/expressions/test_namespace_list.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.