sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
ray-project/ray:doc/source/ray-overview/examples/e2e-timeseries/e2e_timeseries/metrics.py | import numpy as np
def RSE(pred, true):
return (
np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2))
).item()
def MAE(pred, true):
return np.mean(np.abs(pred - true)).item()
def MSE(pred, true):
return np.mean((pred - true) ** 2).item()
def RMSE(pred, true):
return np.sqrt(MSE(pred, true)).item()
def MAPE(pred, true):
return np.mean(np.abs((pred - true) / true)).item()
def MSPE(pred, true):
return np.mean(np.square((pred - true) / true)).item()
def metric(pred, true):
mae = MAE(pred, true)
mse = MSE(pred, true)
rmse = RMSE(pred, true)
mape = MAPE(pred, true)
mspe = MSPE(pred, true)
rse = RSE(pred, true)
return mae, mse, rmse, mape, mspe, rse
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/e2e-timeseries/e2e_timeseries/metrics.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/e2e-timeseries/e2e_timeseries/model.py | from typing import Any, Dict, Tuple
import torch
import torch.nn as nn
KERNEL_SIZE = 25
STRIDE = 1
class moving_avg(nn.Module):
"""
Moving average block to highlight the trend of time series.
This block applies a 1D average pooling to the input tensor.
"""
def __init__(self, kernel_size: int = KERNEL_SIZE, stride: int = STRIDE):
super().__init__()
self.kernel_size = kernel_size
self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Forward pass for the moving average block.
Args:
x (torch.Tensor): Input tensor of shape (batch_size, seq_len, num_features).
Returns:
torch.Tensor: Output tensor of shape (batch_size, seq_len, num_features)
after applying moving average.
"""
# Pad both ends of time series.
# Input x has shape: [Batch, SeqLen, Features].
front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1)
end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1)
x_padded = torch.cat(
[front, x, end], dim=1
) # Shape: [Batch, padded_seq_len, Features].
# self.avg expects input shape: [Batch, Features, padded_seq_len].
x_avg = self.avg(x_padded.permute(0, 2, 1))
# Permute back to shape: [Batch, SeqLen, Features].
x_out = x_avg.permute(0, 2, 1)
return x_out
class series_decomp(nn.Module):
"""
Series decomposition block.
This block decomposes the input time series into trend and seasonal components.
"""
def __init__(self, kernel_size: int):
super().__init__()
# Use stride=1 here to ensure the moving average output has the same sequence length.
self.moving_avg = moving_avg(kernel_size, stride=1)
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Forward pass for the series decomposition block.
Args:
x (torch.Tensor): Input tensor of shape (batch_size, seq_len, num_features).
Returns:
Tuple[torch.Tensor, torch.Tensor]: A tuple containing:
- res (torch.Tensor): Seasonal component of shape (batch_size, seq_len, num_features).
- moving_mean (torch.Tensor): Trend component of shape (batch_size, seq_len, num_features).
"""
moving_mean = self.moving_avg(x)
res = x - moving_mean # Extract seasonal part.
return res, moving_mean
class DLinear(nn.Module):
"""
Decomposition-Linear (DLinear) model.
"""
def __init__(self, configs: Dict[str, Any]):
super().__init__()
self.seq_len: int = configs["seq_len"]
self.pred_len: int = configs["pred_len"]
self.decompsition = series_decomp(kernel_size=KERNEL_SIZE)
self.individual: bool = configs["individual"]
self.channels: int = configs["enc_in"]
if self.individual:
self.Linear_Seasonal = nn.ModuleList()
self.Linear_Trend = nn.ModuleList()
for _ in range(self.channels):
self.Linear_Seasonal.append(nn.Linear(self.seq_len, self.pred_len))
self.Linear_Trend.append(nn.Linear(self.seq_len, self.pred_len))
else:
self.Linear_Seasonal = nn.Linear(self.seq_len, self.pred_len)
self.Linear_Trend = nn.Linear(self.seq_len, self.pred_len)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Forward pass for the DLinear model.
Args:
x (torch.Tensor): Input tensor. Can be 2D [Batch, SeqLen] (interpreted as 1 channel)
or 3D [Batch, SeqLen, Channels].
Returns:
torch.Tensor: Output tensor of shape [Batch, PredLen, Channels].
"""
# DLinear model (and many time series models) expect input of shape:
# (batch_size, sequence_length, num_input_features).
# seasonal_init, trend_init shapes: [Batch, SeqLen, Channel].
seasonal_init, trend_init = self.decompsition(x)
# Permute to [Batch, Channel, SeqLen] for Linear layers.
seasonal_init = seasonal_init.permute(0, 2, 1)
trend_init = trend_init.permute(0, 2, 1)
if self.individual:
seasonal_output = torch.zeros(
[seasonal_init.size(0), seasonal_init.size(1), self.pred_len],
dtype=seasonal_init.dtype,
).to(seasonal_init.device)
trend_output = torch.zeros(
[trend_init.size(0), trend_init.size(1), self.pred_len],
dtype=trend_init.dtype,
).to(trend_init.device)
for i in range(self.channels):
seasonal_output[:, i, :] = self.Linear_Seasonal[i](
seasonal_init[:, i, :]
)
trend_output[:, i, :] = self.Linear_Trend[i](trend_init[:, i, :])
else:
# seasonal_init shape: [Batch, Channel, SeqLen].
# Linear layer applies to the last dim (SeqLen).
seasonal_output = self.Linear_Seasonal(
seasonal_init
) # Output: [Batch, Channel, PredLen].
trend_output = self.Linear_Trend(
trend_init
) # Output: [Batch, Channel, PredLen].
output_x = seasonal_output + trend_output # Shape: [Batch, Channel, PredLen].
return output_x.permute(0, 2, 1) # Transform to [Batch, PredLen, Channel].
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/e2e-timeseries/e2e_timeseries/model.py",
"license": "Apache License 2.0",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/e2e-timeseries/e2e_timeseries/tools.py | def adjust_learning_rate(optimizer, epoch, config: dict):
if config["lradj"] == "type1":
lr_adjust = {epoch: config["learning_rate"] * (0.5 ** ((epoch - 1) // 1))}
elif config["lradj"] == "type2":
lr_adjust = {2: 5e-5, 4: 1e-5, 6: 5e-6, 8: 1e-6, 10: 5e-7, 15: 1e-7, 20: 5e-8}
elif config["lradj"] == "3":
lr_adjust = {
epoch: (
config["learning_rate"] if epoch < 10 else config["learning_rate"] * 0.1
)
}
elif config["lradj"] == "4":
lr_adjust = {
epoch: (
config["learning_rate"] if epoch < 15 else config["learning_rate"] * 0.1
)
}
elif config["lradj"] == "5":
lr_adjust = {
epoch: (
config["learning_rate"] if epoch < 25 else config["learning_rate"] * 0.1
)
}
elif config["lradj"] == "6":
lr_adjust = {
epoch: (
config["learning_rate"] if epoch < 5 else config["learning_rate"] * 0.1
)
}
else:
print(
f"Warning: learning rate adjustment type '{config['lradj']}' not recognized. Learning rate not adjusted."
)
return
if epoch in lr_adjust:
lr = lr_adjust[epoch]
for param_group in optimizer.param_groups:
param_group["lr"] = lr
print("Updating learning rate to {}".format(lr))
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/e2e-timeseries/e2e_timeseries/tools.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/object-detection/ci/test_myst_doc.py | """Convert a jupytext-compliant format in to a python script
and execute it with parsed arguments.
Any cell with 'remove-cell-ci' tag in metadata will not be included
in the converted python script.
"""
import argparse
import subprocess
import sys
import tempfile
from pathlib import Path
import jupytext
import os
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--path",
help="path to the jupytext-compatible file",
)
parser.add_argument(
"--find-recursively",
action="store_true",
help="if true, will attempt to find path recursively in cwd",
)
parser.add_argument(
"--no-postprocess",
action="store_true",
help="if true, will not postprocess the notebook",
)
def filter_out_cells_with_remove_cell_ci_tag(cells: list):
"""Filters out cells which contain the 'remove-cell-ci' tag in metadata"""
def should_keep_cell(cell):
tags = cell.metadata.get("tags")
if tags:
# Both - and _ for consistent behavior with built-in tags
return "remove_cell_ci" not in tags and "remove-cell-ci" not in tags
return True
return [cell for cell in cells if should_keep_cell(cell)]
def postprocess_notebook(notebook):
notebook.cells = filter_out_cells_with_remove_cell_ci_tag(notebook.cells)
return notebook
DISPLAY_FUNCTION = """
def display(*args, **kwargs):
print(*args, **kwargs)
"""
if __name__ == "__main__":
args, remainder = parser.parse_known_args()
path = Path(args.path)
cwd = Path.cwd()
if args.find_recursively and not path.exists():
path = next((p for p in cwd.rglob("*") if str(p).endswith(args.path)), None)
assert path and path.exists()
with open(path, "r") as f:
notebook = jupytext.read(f)
if not args.no_postprocess:
notebook = postprocess_notebook(notebook)
name = ""
with tempfile.NamedTemporaryFile("w", delete=False) as f:
# Define the display function, which is available in notebooks,
# but not in normal Python scripts.
f.write(DISPLAY_FUNCTION)
jupytext.write(notebook, f, fmt="py:percent")
name = f.name
remainder.insert(0, name)
remainder.insert(0, sys.executable)
# # Run the notebook
# subprocess.run(remainder, check=True)
# Run the notebook
try:
subprocess.run(remainder, check=True)
finally:
# clean up the temp script no matter what
os.unlink(name)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/object-detection/ci/test_myst_doc.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/ray-overview/examples/object-detection/object_detection.py | import os
import io
from io import BytesIO
from typing import Dict
import boto3
from botocore import UNSIGNED
from botocore.config import Config
import torch
import requests
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from fastapi import FastAPI
from fastapi.responses import Response
import torchvision
from torchvision import models
from ray import serve
from ray.serve.handle import DeploymentHandle
from smart_open import open as smart_open
# New dictionary mapping class names to labels.
CLASS_TO_LABEL: Dict[str, int] = {
"background": 0,
"with_mask": 1,
"without_mask": 2,
"mask_weared_incorrect": 3,
}
# Create the reverse mapping (label integer to class name) from CLASS_TO_LABEL.
LABEL_TO_CLASS: Dict[int, str] = {value: key for key, value in CLASS_TO_LABEL.items()}
LABEL_COLORS: Dict[str, str] = {
"with_mask": "green",
"without_mask": "red",
"mask_weared_incorrect": "yellow",
}
# Model paths can be overridden using environment variables.
REMOTE_MODEL_PATH: str = os.getenv(
"REMOTE_MODEL_PATH",
"s3://face-masks-data/finetuned-models/fasterrcnn_model_mask_detection.pth",
)
CLUSTER_MODEL_PATH: str = os.getenv(
"CLUSTER_MODEL_PATH", "/mnt/cluster_storage/fasterrcnn_model_mask_detection.pth"
)
app = FastAPI()
@serve.deployment(num_replicas=1)
@serve.ingress(app)
class APIIngress:
def __init__(self, object_detection_handle: DeploymentHandle):
self.handle = object_detection_handle
@app.get(
"/detect",
responses={200: {"content": {"image/jpeg": {}}}},
response_class=Response,
)
async def detect(self, image_url: str) -> Response:
# Call the object detection service and return the processed image as JPEG.
image = await self.handle.detect.remote(image_url)
file_stream = BytesIO()
image.save(file_stream, "jpeg")
return Response(content=file_stream.getvalue(), media_type="image/jpeg")
@serve.deployment(
ray_actor_options={"num_gpus": 1},
autoscaling_config={"min_replicas": 1, "max_replicas": 10},
)
class ObjectDetection:
def __init__(self):
# Load the pre-trained Faster R-CNN model for mask detection.
self.model = self._load_faster_rcnn_model()
if torch.cuda.is_available():
self.model = self.model.cuda()
def _load_faster_rcnn_model(self):
"""Loads the Faster R-CNN model from a remote source if not already available locally."""
# Download model only once from the remote storage to the cluster path.
if not os.path.exists(CLUSTER_MODEL_PATH):
os.makedirs(os.path.dirname(CLUSTER_MODEL_PATH), exist_ok=True)
# Create S3 client, falling back to unsigned for public buckets
session = boto3.Session()
# session.get_credentials() will return None if no credentials can be found.
if session.get_credentials():
# If credentials are found, use a standard signed client.
s3_client = session.client("s3")
else:
# No credentials found, fall back to an unsigned client for public buckets.
s3_client = boto3.client(
"s3", config=Config(signature_version=UNSIGNED)
)
transport_params = {"client": s3_client}
# Stream-download from S3 to cluster storage
with smart_open(
REMOTE_MODEL_PATH, "rb", transport_params=transport_params
) as src, open(CLUSTER_MODEL_PATH, "wb") as dst:
for chunk in iter(lambda: src.read(1024 * 1024), b""):
dst.write(chunk)
# Load the model with the correct number of classes and weights.
loaded_model = models.detection.fasterrcnn_resnet50_fpn(
num_classes=len(LABEL_TO_CLASS)
)
loaded_model.load_state_dict(torch.load(CLUSTER_MODEL_PATH, map_location="cpu"))
loaded_model.eval()
return loaded_model
def _load_image_from_url(self, url: str) -> Image.Image:
"""
Loads an image from the given URL and converts it to RGB format.
:param url: URL of the image.
:return: PIL Image in RGB format.
"""
response = requests.get(url)
response.raise_for_status()
return Image.open(BytesIO(response.content)).convert("RGB")
def _predict_and_visualize(
self, image: Image.Image, confidence_threshold: float = 0.5
) -> Image.Image:
"""
Runs the detection model on the provided image and draws bounding boxes with labels.
:param image: Input PIL Image.
:param confidence_threshold: Score threshold to filter predictions.
:return: PIL Image with visualized detections.
"""
draw = ImageDraw.Draw(image)
font = ImageFont.load_default()
# Convert image to tensor and move to GPU if available.
image_np = np.array(image)
image_tensor = torch.from_numpy(image_np).permute(2, 0, 1).float() / 255.0
image_tensor = image_tensor.to(
torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
)
with torch.no_grad():
predictions = self.model([image_tensor])[0]
# Filter predictions by confidence threshold.
keep = predictions["scores"] > confidence_threshold
boxes = predictions["boxes"][keep].cpu().numpy()
labels = predictions["labels"][keep].cpu().numpy()
scores = predictions["scores"][keep].cpu().numpy()
for box, label, score in zip(boxes, labels, scores):
x1, y1, x2, y2 = box
class_name = LABEL_TO_CLASS.get(label, "unknown")
box_color = LABEL_COLORS.get(class_name, "white")
# Draw bounding box.
draw.rectangle([x1, y1, x2, y2], outline=box_color, width=2)
# Prepare and draw label text.
text = f"{class_name} {score:.2f}"
text_bbox = draw.textbbox((0, 0), text, font=font)
text_height = text_bbox[3] - text_bbox[1]
# Draw background for text.
draw.rectangle(
[x1, y1 - text_height - 2, x1 + (text_bbox[2] - text_bbox[0]), y1],
fill=box_color,
)
# Draw text on top of the background.
draw.text(
(x1, y1 - text_height - 2),
text,
fill="black" if box_color == "yellow" else "white",
font=font,
)
return image
def detect(self, image_url: str) -> Image.Image:
"""
Orchestrates the detection process: loads an image from a URL, runs prediction and visualization,
and returns the annotated image.
:param image_url: URL of the image to process.
:return: Annotated PIL Image.
"""
pil_image = self._load_image_from_url(image_url)
result_image = self._predict_and_visualize(pil_image)
return result_image
# Bind the deployments.
entrypoint = APIIngress.bind(ObjectDetection.bind())
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/object-detection/object_detection.py",
"license": "Apache License 2.0",
"lines": 165,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/serve/tests/test_record_routing_stats.py | import asyncio
from typing import Any, Dict, Optional
import pytest
import ray
from ray import serve
from ray._common.test_utils import wait_for_condition
from ray.serve._private.common import ReplicaID
from ray.serve.config import RequestRouterConfig
from ray.serve.context import _get_internal_replica_context
from ray.serve.handle import DeploymentHandle
@serve.deployment(
request_router_config=RequestRouterConfig(
request_routing_stats_period_s=0.1, request_routing_stats_timeout_s=0.1
)
)
class Patient:
def __init__(self):
self.routing_stats: Dict[str, Any] = {}
self.should_hang: Optional[asyncio.Event] = None
self.should_fail: bool = False
context = _get_internal_replica_context()
self.replica_id: ReplicaID = context.replica_id
async def record_routing_stats(self):
if self.should_hang:
await self.should_hang.wait()
if self.should_fail:
raise Exception("intended to fail")
return self.routing_stats
def __call__(self, *args) -> ReplicaID:
return self.replica_id
def set_routing_stats(self, routing_stats: Dict[str, Any]) -> ReplicaID:
self.routing_stats = routing_stats
return self.replica_id
def set_should_fail(self):
self.should_fail = True
def set_should_hang(self):
self.should_hang = asyncio.Event()
def check_routing_stats_recorded(
handle: DeploymentHandle,
expected_stats: Dict[str, Any],
replica_id: Optional[ReplicaID] = None,
) -> bool:
running_replicas = handle._router._asyncio_router.request_router._replicas
if replica_id:
target_running_replica = running_replicas[replica_id]
else:
target_running_replica = next(iter(running_replicas.values()))
assert (
target_running_replica.routing_stats == expected_stats
), f"{target_running_replica.routing_stats=} != {expected_stats=}"
return True
@pytest.mark.parametrize("use_class", [True, False])
def test_no_user_defined_method(serve_instance, use_class):
"""Check the default behavior."""
if use_class:
@serve.deployment
class A:
def __call__(self, *args):
return ray.get_runtime_context().current_actor
else:
@serve.deployment
def A(*args):
return ray.get_runtime_context().current_actor
h = serve.run(A.bind())
_ = h.remote().result()
replicas = list(h._router._asyncio_router.request_router._replicas.values())
assert len(replicas) == 1
assert replicas[0].routing_stats == {}
@pytest.mark.asyncio
async def test_user_defined_method_fails(serve_instance):
"""Check the behavior when a user-defined method fails."""
expected_stats = {"foo": "bar"}
h = serve.run(Patient.bind())
await h.set_routing_stats.remote(expected_stats)
replica_id = await h.remote()
# Ensure the routing stats are recorded correctly before the failure
wait_for_condition(
check_routing_stats_recorded,
handle=h,
expected_stats=expected_stats,
replica_id=replica_id,
)
await h.set_should_fail.remote()
await asyncio.gather(*[h.remote() for _ in range(100)])
# After the failure, the previous routing stats should still accessible
wait_for_condition(
check_routing_stats_recorded,
handle=h,
expected_stats=expected_stats,
replica_id=replica_id,
)
@pytest.mark.asyncio
async def test_user_defined_method_hangs(serve_instance):
"""Check the behavior when a user-defined method hangs."""
expected_stats = {"foo": "bar"}
h = serve.run(Patient.bind())
await h.set_routing_stats.remote(expected_stats)
replica_id = await h.remote()
# Ensure the routing stats are recorded correctly before the failure
wait_for_condition(
check_routing_stats_recorded,
handle=h,
expected_stats=expected_stats,
replica_id=replica_id,
)
await h.set_should_hang.remote()
await asyncio.gather(*[h.remote() for _ in range(100)])
# After the hang, the previous routing stats should still accessible
wait_for_condition(
check_routing_stats_recorded,
handle=h,
expected_stats=expected_stats,
replica_id=replica_id,
)
@pytest.mark.asyncio
async def test_multiple_replicas(serve_instance):
"""Check the behavior with multiple replicas."""
h = serve.run(Patient.options(num_replicas=2).bind())
replica_ids = set(await asyncio.gather(*[h.remote() for _ in range(100)]))
assert len(replica_ids) == 2
# Ensure that the routing stats is set for one of the replicas.
expected_stats = {"foo": "bar"}
updated_stats_replica_id = await h.set_routing_stats.remote(expected_stats)
wait_for_condition(
check_routing_stats_recorded,
handle=h,
expected_stats=expected_stats,
replica_id=updated_stats_replica_id,
)
# Ensure that the routing stats is not set for the other replica.
replica_ids.remove(updated_stats_replica_id)
unupdated_stats_replica_id = replica_ids.pop()
wait_for_condition(
check_routing_stats_recorded,
handle=h,
expected_stats={},
replica_id=unupdated_stats_replica_id,
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/tests/test_record_routing_stats.py",
"license": "Apache License 2.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/ray-core/doc_code/anti_pattern_nested_ray_get.py | # __anti_pattern_start__
import ray
import time
@ray.remote
def f():
return 1
@ray.remote
def pass_via_nested_ref(refs):
print(sum(ray.get(refs)))
@ray.remote
def pass_via_direct_arg(*args):
print(sum(args))
# Anti-pattern: Passing nested refs requires `ray.get` in a nested task.
ray.get(pass_via_nested_ref.remote([f.remote() for _ in range(3)]))
# Better approach: Pass refs as direct arguments. Use *args syntax to unpack
# multiple arguments.
ray.get(pass_via_direct_arg.remote(*[f.remote() for _ in range(3)]))
# __anti_pattern_end__
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-core/doc_code/anti_pattern_nested_ray_get.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/tests/test_runtime_env_standalone.py | """runtime_env tests that require their own custom fixture.
The other runtime_env tests use a shared Ray instance across the test module
to reduce overheads & overall test runtime.
"""
import fnmatch
import logging
import os
import sys
import time
from pathlib import Path
from typing import List
import pytest
import ray
from ray._common.test_utils import wait_for_condition
from ray._private.runtime_env.context import RuntimeEnvContext
from ray._private.runtime_env.plugin import RuntimeEnvPlugin
from ray._private.test_utils import (
get_error_message,
get_log_sources,
)
from ray.exceptions import RuntimeEnvSetupError
from ray.job_submission import JobStatus, JobSubmissionClient
from ray.runtime_env import RuntimeEnv
@pytest.mark.skipif(sys.platform == "win32", reason="Flaky on Windows.")
def test_no_spurious_worker_startup(shutdown_only, monkeypatch):
"""Test that no extra workers start up during a long env installation."""
# Causes agent to sleep for 15 seconds to simulate creating a runtime env.
monkeypatch.setenv("RAY_RUNTIME_ENV_SLEEP_FOR_TESTING_S", "15")
ray.init(num_cpus=1)
@ray.remote
class Counter(object):
def __init__(self):
self.value = 0
def get(self):
return self.value
# Set a nonempty runtime env so that the runtime env setup hook is called.
runtime_env = RuntimeEnv(env_vars={"a": "b"})
# Instantiate an actor that requires the long runtime env installation.
a = Counter.options(runtime_env=runtime_env).remote()
assert ray.get(a.get.remote()) == 0
# Check "debug_state.txt" to ensure no extra workers were started.
session_dir = ray._private.worker.global_worker.node.address_info["session_dir"]
session_path = Path(session_dir)
debug_state_path = session_path / "logs" / "debug_state.txt"
def get_num_workers():
with open(debug_state_path) as f:
for line in f.readlines():
num_workers_prefix = "- num PYTHON workers: "
if num_workers_prefix in line:
return int(line[len(num_workers_prefix) :])
return None
# Wait for "debug_state.txt" to be updated to reflect the started worker.
start = time.time()
wait_for_condition(lambda: get_num_workers() is not None and get_num_workers() > 0)
time_waited = time.time() - start
print(f"Waited {time_waited} for debug_state.txt to be updated")
# If any workers were unnecessarily started during the initial env
# installation, they will bypass the runtime env setup hook (because the
# created env will have been cached) and should be added to num_workers
# within a few seconds. Adjusting the default update period for
# debut_state.txt via this cluster_utils pytest fixture seems to be broken,
# so just check it for the next 10 seconds (the default period).
start = time.time()
got_num_workers = False
while time.time() - start < 10:
# Check that no more than one extra worker is started. We add one
# because Ray will prestart an idle worker for the one available CPU.
num_workers = get_num_workers()
if num_workers is not None:
got_num_workers = True
assert num_workers <= 2
time.sleep(0.1)
assert got_num_workers, "failed to read num workers for 10 seconds"
@pytest.fixture
def runtime_env_local_dev_env_var(monkeypatch):
monkeypatch.setenv("RAY_RUNTIME_ENV_LOCAL_DEV_MODE", "1")
yield
@pytest.mark.skipif(sys.platform == "win32", reason="very slow on Windows.")
def test_runtime_env_no_spurious_resource_deadlock_msg(
runtime_env_local_dev_env_var, ray_start_regular, error_pubsub
):
p = error_pubsub
runtime_env = RuntimeEnv(pip=["tensorflow", "torch"])
@ray.remote(runtime_env=runtime_env)
def f():
pass
# Check no warning printed.
ray.get(f.remote())
errors = get_error_message(
p, 5, ray._private.ray_constants.RESOURCE_DEADLOCK_ERROR, timeout=5
)
assert len(errors) == 0
RT_ENV_AGENT_SLOW_STARTUP_PLUGIN_CLASS_PATH = (
"ray.tests.test_runtime_env_standalone.RtEnvAgentSlowStartupPlugin" # noqa
)
RT_ENV_AGENT_SLOW_STARTUP_PLUGIN_NAME = "RtEnvAgentSlowStartupPlugin"
RT_ENV_AGENT_SLOW_STARTUP_PLUGIN_CLASS_PATH = (
"ray.tests.test_runtime_env_standalone.RtEnvAgentSlowStartupPlugin"
)
class RtEnvAgentSlowStartupPlugin(RuntimeEnvPlugin):
name = RT_ENV_AGENT_SLOW_STARTUP_PLUGIN_NAME
def __init__(self):
# This happens in Runtime Env Agent start up process. Make it slow.
time.sleep(5)
print("starting...")
@pytest.mark.parametrize(
"set_runtime_env_plugins",
[
'[{"class":"' + RT_ENV_AGENT_SLOW_STARTUP_PLUGIN_CLASS_PATH + '"}]',
],
indirect=True,
)
def test_slow_runtime_env_agent_startup_on_task_pressure(
shutdown_only, set_runtime_env_plugins
):
"""
Starts nodes with runtime env agent and a slow plugin. Then when the runtime env
agent is still starting up, we submit a lot of tasks to the cluster. The tasks
should wait for the runtime env agent to start up and then run.
https://github.com/ray-project/ray/issues/45353
"""
@ray.remote(num_cpus=0.1)
def get_foo():
return os.environ.get("foo")
print("Submitting 20 tasks...")
# Each task has a different runtime env to ensure the agent is invoked for each.
vals = ray.get(
[
get_foo.options(runtime_env={"env_vars": {"foo": f"bar{i}"}}).remote()
for i in range(20)
]
)
print("20 tasks done.")
assert vals == [f"bar{i}" for i in range(20)]
MY_PLUGIN_CLASS_PATH = "ray.tests.test_runtime_env_standalone.MyPlugin"
MY_PLUGIN_NAME = "MyPlugin"
success_retry_number = 3
runtime_env_retry_times = 0
# This plugin can make runtime env creation failed before the retry times
# increased to `success_retry_number`.
class MyPlugin(RuntimeEnvPlugin):
name = MY_PLUGIN_NAME
@staticmethod
def validate(runtime_env_dict: dict) -> str:
return runtime_env_dict[MY_PLUGIN_NAME]
@staticmethod
def modify_context(
uris: List[str],
runtime_env: dict,
ctx: RuntimeEnvContext,
logger: logging.Logger,
) -> None:
global runtime_env_retry_times
runtime_env_retry_times += 1
if runtime_env_retry_times != success_retry_number:
raise ValueError(f"Fault injection {runtime_env_retry_times}")
pass
@pytest.mark.parametrize(
"set_runtime_env_retry_times",
[
str(success_retry_number - 1),
str(success_retry_number),
],
indirect=True,
)
@pytest.mark.parametrize(
"set_runtime_env_plugins",
[
'[{"class":"' + MY_PLUGIN_CLASS_PATH + '"}]',
],
indirect=True,
)
def test_runtime_env_retry(
set_runtime_env_retry_times, set_runtime_env_plugins, ray_start_regular
):
@ray.remote
def f():
return "ok"
runtime_env_retry_times = int(set_runtime_env_retry_times)
if runtime_env_retry_times >= success_retry_number:
# Enough retry times
output = ray.get(
f.options(runtime_env={MY_PLUGIN_NAME: {"key": "value"}}).remote()
)
assert output == "ok"
else:
# No enough retry times
with pytest.raises(
RuntimeEnvSetupError, match=f"Fault injection {runtime_env_retry_times}"
):
ray.get(f.options(runtime_env={MY_PLUGIN_NAME: {"key": "value"}}).remote())
@pytest.fixture
def enable_dev_mode(local_env_var_enabled, monkeypatch):
enabled = "1" if local_env_var_enabled else "0"
monkeypatch.setenv("RAY_RUNTIME_ENV_LOG_TO_DRIVER_ENABLED", enabled)
yield
@pytest.mark.skipif(
sys.platform == "win32", reason="conda in runtime_env unsupported on Windows."
)
@pytest.mark.parametrize("local_env_var_enabled", [False, True])
def test_runtime_env_log_msg(
local_env_var_enabled,
enable_dev_mode,
ray_start_cluster_head,
log_pubsub,
):
p = log_pubsub
@ray.remote
def f():
pass
good_env = RuntimeEnv(pip=["requests"])
ray.get(f.options(runtime_env=good_env).remote())
sources = get_log_sources(p, 5)
if local_env_var_enabled:
assert "runtime_env" in sources
else:
assert "runtime_env" not in sources
def assert_no_user_info_in_logs(user_info: str, file_whitelist: List[str] = None):
"""Assert that the user info is not in the logs, except for any file that
glob pattern matches a file in the whitelist.
"""
if file_whitelist is None:
file_whitelist = []
node = ray._private.worker.global_worker.node
log_dir = os.path.join(node.get_session_dir_path(), "logs")
for root, dirs, files in os.walk(log_dir):
for file in files:
if any(fnmatch.fnmatch(file, pattern) for pattern in file_whitelist):
continue
# Some lines contain hex IDs, so ignore the UTF decoding errors.
with open(os.path.join(root, file), "r", errors="ignore") as f:
for line in f:
assert user_info not in line, (file, user_info, line)
class TestNoUserInfoInLogs:
"""Test that no user info (e.g. runtime env env vars) show up in the logs."""
def test_assert_no_user_info_in_logs(self, shutdown_only):
"""Test assert_no_user_info_in_logs does not spuriously pass."""
ray.init()
with pytest.raises(AssertionError):
assert_no_user_info_in_logs("ray")
assert_no_user_info_in_logs("ray", file_whitelist=["*"])
def test_basic(self, tmp_path, shutdown_only):
"""Test that no user info shows up in the logs."""
# Runtime env logs may still appear in debug logs. Check the debug flag is off.
assert os.getenv("RAY_BACKEND_LOG_LEVEL") != "debug"
# Reuse the same "secret" for working_dir, pip, env_vars for convenience.
USER_SECRET = "pip-install-test"
working_dir = tmp_path / USER_SECRET
working_dir.mkdir()
runtime_env = {
"working_dir": str(working_dir),
"pip": [USER_SECRET],
"env_vars": {USER_SECRET: USER_SECRET},
}
ray.init(runtime_env=runtime_env, include_dashboard=True)
# Run a function to ensure the runtime env is set up.
@ray.remote
def f():
return os.environ.get(USER_SECRET)
assert USER_SECRET in ray.get(f.remote())
@ray.remote
class Foo:
def __init__(self):
self.x = os.environ.get(USER_SECRET)
def get_x(self):
return self.x
foo = Foo.remote()
assert USER_SECRET in ray.get(foo.get_x.remote())
# Generate runtime env failure logs too.
bad_runtime_env = {
"pip": ["pkg-which-sadly-does-not-exist"],
"env_vars": {USER_SECRET: USER_SECRET},
}
with pytest.raises(Exception):
ray.get(f.options(runtime_env=bad_runtime_env).remote())
with pytest.raises(Exception):
foo2 = Foo.options(runtime_env=bad_runtime_env).remote()
ray.get(foo2.get_x.remote())
# Test Ray Jobs API codepath.
# Skip for Windows because Dashboard and Ray Jobs are not tested on Windows.
if sys.platform != "win32":
client = JobSubmissionClient()
job_id_good_runtime_env = client.submit_job(
entrypoint="echo 'hello world'", runtime_env=runtime_env
)
job_id_bad_runtime_env = client.submit_job(
entrypoint="echo 'hello world'", runtime_env=bad_runtime_env
)
def job_succeeded(job_id):
job_status = client.get_job_status(job_id)
return job_status == JobStatus.SUCCEEDED
def job_failed(job_id):
job_status = client.get_job_status(job_id)
return job_status == JobStatus.FAILED
wait_for_condition(lambda: job_succeeded(job_id_good_runtime_env))
wait_for_condition(lambda: job_failed(job_id_bad_runtime_env), timeout=30)
with pytest.raises(AssertionError):
assert_no_user_info_in_logs(USER_SECRET)
assert_no_user_info_in_logs(
USER_SECRET, file_whitelist=["runtime_env*.log", "event_EXPORT*.log"]
)
@pytest.mark.skipif(sys.platform == "win32", reason="Hangs on windows.")
def test_failed_job_env_no_hang(shutdown_only):
"""Test that after a failed job-level env, tasks can still be run."""
runtime_env_for_init = RuntimeEnv(pip=["ray-doesnotexist-123"])
ray.init(runtime_env=runtime_env_for_init)
@ray.remote
def f():
import pip_install_test # noqa: F401
return True
runtime_env_for_f = RuntimeEnv(pip=["pip-install-test==0.5"])
assert ray.get(f.options(runtime_env=runtime_env_for_f).remote())
# Task with no runtime env should inherit the bad job env.
with pytest.raises(RuntimeEnvSetupError):
ray.get(f.remote())
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_runtime_env_standalone.py",
"license": "Apache License 2.0",
"lines": 317,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/ray-overview/examples/e2e-multimodal-ai-workloads/ci/nb2py.py | #!/usr/bin/env python3
import argparse
import nbformat
def convert_notebook(input_path: str, output_path: str) -> None:
"""
Read a Jupyter notebook and write a Python script, converting all %%bash
cells and IPython "!" commands into subprocess.run calls that raise on error.
Cells that load or autoreload extensions are ignored.
"""
nb = nbformat.read(input_path, as_version=4)
with open(output_path, "w") as out:
for cell in nb.cells:
# Only process code cells
if cell.cell_type != "code":
continue
lines = cell.source.splitlines()
# Skip cells that load or autoreload extensions
if any(
l.strip().startswith("%load_ext autoreload")
or l.strip().startswith("%autoreload all")
for l in lines
):
continue
# Detect a %%bash cell
if lines and lines[0].strip().startswith("%%bash"):
bash_script = "\n".join(lines[1:]).rstrip()
out.write("import subprocess\n")
out.write(
f"subprocess.run(r'''{bash_script}''',\n"
" shell=True,\n"
" check=True,\n"
" executable='/bin/bash')\n\n"
)
else:
# Detect any IPython '!' shell commands in code lines
has_bang = any(line.lstrip().startswith("!") for line in lines)
if has_bang:
out.write("import subprocess\n")
for line in lines:
stripped = line.lstrip()
if stripped.startswith("!"):
cmd = stripped[1:].lstrip()
out.write(
f"subprocess.run(r'''{cmd}''',\n"
" shell=True,\n"
" check=True,\n"
" executable='/bin/bash')\n"
)
else:
out.write(line.rstrip() + "\n")
out.write("\n")
else:
# Regular Python cell: dump as-is
out.write(cell.source.rstrip() + "\n\n")
def main() -> None:
parser = argparse.ArgumentParser(
description="Convert a Jupyter notebook to a Python script, preserving bash cells and '!' commands as subprocess calls."
)
parser.add_argument("input_nb", help="Path to the input .ipynb file")
parser.add_argument("output_py", help="Path for the output .py script")
args = parser.parse_args()
convert_notebook(args.input_nb, args.output_py)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/e2e-multimodal-ai-workloads/ci/nb2py.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/_common/test_utils.py | """Test utilities for Ray.
This module contains test utility classes that are distributed with the Ray package
and can be used by external libraries and tests. These utilities must remain in
_common/ (not in tests/) to be accessible in the Ray package distribution.
"""
import asyncio
import inspect
import logging
import os
import subprocess
import sys
import threading
import time
import traceback
import uuid
from collections import defaultdict
from collections.abc import Awaitable
from contextlib import contextmanager
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Callable, Dict, Iterator, List, Optional, Set
import ray
import ray._common.usage.usage_lib as ray_usage_lib
import ray._private.utils
from ray._common.network_utils import build_address
from ray._common.utils import decode
logger = logging.getLogger(__name__)
try:
from prometheus_client.core import Metric
from prometheus_client.parser import Sample, text_string_to_metric_families
except (ImportError, ModuleNotFoundError):
Metric = None
Sample = None
def text_string_to_metric_families(*args, **kwargs):
raise ModuleNotFoundError("`prometheus_client` not found")
@ray.remote(num_cpus=0)
class SignalActor:
"""A Ray actor for coordinating test execution through signals.
Useful for testing async coordination, waiting for specific states,
and synchronizing multiple actors or tasks in tests.
"""
def __init__(self):
self.ready_event = asyncio.Event()
self.num_waiters = 0
def send(self, clear: bool = False):
self.ready_event.set()
if clear:
self.ready_event.clear()
async def wait(self, should_wait: bool = True):
if should_wait:
self.num_waiters += 1
await self.ready_event.wait()
self.num_waiters -= 1
async def cur_num_waiters(self) -> int:
return self.num_waiters
@ray.remote(num_cpus=0)
class Semaphore:
"""A Ray actor implementing a semaphore for test coordination.
Useful for testing resource limiting, concurrency control,
and coordination between multiple actors or tasks.
"""
def __init__(self, value: int = 1):
self._sema = asyncio.Semaphore(value=value)
async def acquire(self):
await self._sema.acquire()
async def release(self):
self._sema.release()
async def locked(self) -> bool:
return self._sema.locked()
__all__ = ["SignalActor", "Semaphore"]
def wait_for_condition(
condition_predictor: Callable[..., bool],
timeout: float = 10,
retry_interval_ms: float = 100,
raise_exceptions: bool = False,
**kwargs: Any,
):
"""Wait until a condition is met or time out with an exception.
Args:
condition_predictor: A function that predicts the condition.
timeout: Maximum timeout in seconds.
retry_interval_ms: Retry interval in milliseconds.
raise_exceptions: If true, exceptions that occur while executing
condition_predictor won't be caught and instead will be raised.
**kwargs: Arguments to pass to the condition_predictor.
Returns:
None: Returns when the condition is met.
Raises:
RuntimeError: If the condition is not met before the timeout expires.
"""
start = time.monotonic()
last_ex = None
while time.monotonic() - start <= timeout:
try:
if condition_predictor(**kwargs):
return
except Exception:
if raise_exceptions:
raise
last_ex = ray._private.utils.format_error_message(traceback.format_exc())
time.sleep(retry_interval_ms / 1000.0)
message = "The condition wasn't met before the timeout expired."
if last_ex is not None:
message += f" Last exception: {last_ex}"
raise RuntimeError(message)
async def async_wait_for_condition(
condition_predictor: Callable[..., Awaitable[bool]],
timeout: float = 10,
retry_interval_ms: float = 100,
**kwargs: Any,
):
"""Wait until a condition is met or time out with an exception.
Args:
condition_predictor: A function that predicts the condition.
timeout: Maximum timeout in seconds.
retry_interval_ms: Retry interval in milliseconds.
**kwargs: Arguments to pass to the condition_predictor.
Returns:
None: Returns when the condition is met.
Raises:
RuntimeError: If the condition is not met before the timeout expires.
"""
start = time.monotonic()
last_ex = None
while time.monotonic() - start <= timeout:
try:
if inspect.iscoroutinefunction(condition_predictor):
if await condition_predictor(**kwargs):
return
else:
if condition_predictor(**kwargs):
return
except Exception as ex:
last_ex = ex
await asyncio.sleep(retry_interval_ms / 1000.0)
message = "The condition wasn't met before the timeout expired."
if last_ex is not None:
message += f" Last exception: {last_ex}"
raise RuntimeError(message)
@contextmanager
def simulate_s3_bucket(
port: int = 5002,
region: str = "us-west-2",
) -> Iterator[str]:
"""Context manager that simulates an S3 bucket and yields the URI.
Args:
port: The port of the localhost endpoint where S3 is being served.
region: The S3 region.
Yields:
str: URI for the simulated S3 bucket.
"""
from moto.server import ThreadedMotoServer
old_env = os.environ
os.environ["AWS_ACCESS_KEY_ID"] = "testing"
os.environ["AWS_SECRET_ACCESS_KEY"] = "testing"
os.environ["AWS_SECURITY_TOKEN"] = "testing"
os.environ["AWS_SESSION_TOKEN"] = "testing"
s3_server = f"http://{build_address('localhost', port)}"
server = ThreadedMotoServer(port=port)
server.start()
url = f"s3://{uuid.uuid4().hex}?region={region}&endpoint_override={s3_server}"
yield url
server.stop()
os.environ = old_env
class TelemetryCallsite(Enum):
DRIVER = "driver"
ACTOR = "actor"
TASK = "task"
def _get_library_usages() -> Set[str]:
return set(
ray_usage_lib.get_library_usages_to_report(
ray.experimental.internal_kv.internal_kv_get_gcs_client()
)
)
def _get_extra_usage_tags() -> Dict[str, str]:
return ray_usage_lib.get_extra_usage_tags_to_report(
ray.experimental.internal_kv.internal_kv_get_gcs_client()
)
def check_library_usage_telemetry(
use_lib_fn: Callable[[], None],
*,
callsite: TelemetryCallsite,
expected_library_usages: List[Set[str]],
expected_extra_usage_tags: Optional[Dict[str, str]] = None,
):
"""Helper for writing tests to validate library usage telemetry.
`use_lib_fn` is a callable that will be called from the provided callsite.
After calling it, the telemetry data to export will be validated against
expected_library_usages and expected_extra_usage_tags.
"""
assert len(_get_library_usages()) == 0, _get_library_usages()
if callsite == TelemetryCallsite.DRIVER:
use_lib_fn()
elif callsite == TelemetryCallsite.ACTOR:
@ray.remote
class A:
def __init__(self):
use_lib_fn()
a = A.remote()
ray.get(a.__ray_ready__.remote())
elif callsite == TelemetryCallsite.TASK:
@ray.remote
def f():
use_lib_fn()
ray.get(f.remote())
else:
assert False, f"Unrecognized callsite: {callsite}"
library_usages = _get_library_usages()
extra_usage_tags = _get_extra_usage_tags()
assert library_usages in expected_library_usages, library_usages
if expected_extra_usage_tags:
assert all(
[extra_usage_tags[k] == v for k, v in expected_extra_usage_tags.items()]
), extra_usage_tags
class FakeTimer:
def __init__(self, start_time: Optional[float] = None):
self._lock = threading.Lock()
self.reset(start_time=start_time)
def reset(self, start_time: Optional[float] = None):
with self._lock:
if start_time is None:
start_time = time.time()
self._curr = start_time
def time(self) -> float:
return self._curr
def advance(self, by: float):
with self._lock:
self._curr += by
def realistic_sleep(self, amt: float):
with self._lock:
self._curr += amt + 0.001
def is_named_tuple(cls):
"""Return True if cls is a namedtuple and False otherwise."""
b = cls.__bases__
if len(b) != 1 or b[0] is not tuple:
return False
f = getattr(cls, "_fields", None)
if not isinstance(f, tuple):
return False
return all(type(n) is str for n in f)
def assert_tensors_equivalent(obj1, obj2):
"""
Recursively compare objects with special handling for torch.Tensor.
Tensors are considered equivalent if:
- Same dtype and shape
- Same device type (e.g., both 'cpu' or both 'cuda'), index ignored
- Values are equal (or close for floats)
"""
import torch
if isinstance(obj1, torch.Tensor) and isinstance(obj2, torch.Tensor):
# 1. dtype
assert obj1.dtype == obj2.dtype, f"dtype mismatch: {obj1.dtype} vs {obj2.dtype}"
# 2. shape
assert obj1.shape == obj2.shape, f"shape mismatch: {obj1.shape} vs {obj2.shape}"
# 3. device type must match (cpu/cpu or cuda/cuda), ignore index
assert (
obj1.device.type == obj2.device.type
), f"Device type mismatch: {obj1.device} vs {obj2.device}"
# 4. Compare values safely on CPU
t1_cpu = obj1.cpu()
t2_cpu = obj2.cpu()
if obj1.dtype.is_floating_point or obj1.dtype.is_complex:
assert torch.allclose(
t1_cpu, t2_cpu, atol=1e-6, rtol=1e-5
), "Floating-point tensors not close"
else:
assert torch.equal(t1_cpu, t2_cpu), "Integer/bool tensors not equal"
return
# Type must match
if type(obj1) is not type(obj2):
raise AssertionError(f"Type mismatch: {type(obj1)} vs {type(obj2)}")
# Handle namedtuples
if is_named_tuple(type(obj1)):
assert len(obj1) == len(obj2)
for a, b in zip(obj1, obj2):
assert_tensors_equivalent(a, b)
elif isinstance(obj1, dict):
assert obj1.keys() == obj2.keys()
for k in obj1:
assert_tensors_equivalent(obj1[k], obj2[k])
elif isinstance(obj1, (list, tuple)):
assert len(obj1) == len(obj2)
for a, b in zip(obj1, obj2):
assert_tensors_equivalent(a, b)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
# Compare user-defined objects by their public attributes
keys1 = {
k
for k in obj1.__dict__.keys()
if not k.startswith("_ray_") and k != "_pytype_"
}
keys2 = {
k
for k in obj2.__dict__.keys()
if not k.startswith("_ray_") and k != "_pytype_"
}
assert keys1 == keys2, f"Object attribute keys differ: {keys1} vs {keys2}"
for k in keys1:
assert_tensors_equivalent(obj1.__dict__[k], obj2.__dict__[k])
else:
# Fallback for primitives: int, float, str, bool, etc.
assert obj1 == obj2, f"Non-tensor values differ: {obj1} vs {obj2}"
def run_string_as_driver(
driver_script: str, env: Dict = None, encode: str = "utf-8"
) -> str:
"""Run a driver as a separate process.
Args:
driver_script: A string to run as a Python script.
env: The environment variables for the driver.
encode: The encoding to use for the driver script.
Returns:
The script's output.
"""
proc = subprocess.Popen(
[sys.executable, "-"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
)
with proc:
output = proc.communicate(driver_script.encode(encoding=encode))[0]
if proc.returncode:
print(decode(output, encode_type=encode))
logger.error(proc.stderr)
raise subprocess.CalledProcessError(
proc.returncode, proc.args, output, proc.stderr
)
out = decode(output, encode_type=encode)
return out
@dataclass
class MetricSamplePattern:
name: Optional[str] = None
value: Optional[str] = None
partial_label_match: Optional[Dict[str, str]] = None
def matches(self, sample: "Sample"):
if self.name is not None:
if self.name != sample.name:
return False
if self.value is not None:
if self.value != sample.value:
return False
if self.partial_label_match is not None:
for label, value in self.partial_label_match.items():
if sample.labels.get(label) != value:
return False
return True
@dataclass
class PrometheusTimeseries:
"""A collection of timeseries from multiple addresses. Each timeseries is a
collection of samples with the same metric name and labels. Concretely:
- components_dict: a dictionary of addresses to the Component labels
- metric_descriptors: a dictionary of metric names to the Metric object
- metric_samples: the latest value of each label
"""
components_dict: Dict[str, Set[str]] = field(default_factory=dict)
metric_descriptors: Dict[str, "Metric"] = field(default_factory=dict)
metric_samples: Dict[frozenset, "Sample"] = field(default_factory=dict)
def flush(self):
self.components_dict.clear()
self.metric_descriptors.clear()
self.metric_samples.clear()
def fetch_raw_prometheus(prom_addresses, timeout=None):
# Local import so minimal dependency tests can run without requests
import requests
for address in prom_addresses:
try:
kwargs = {} if timeout is None else {"timeout": timeout}
response = requests.get(f"http://{address}/metrics", **kwargs)
yield address, response.text
except requests.exceptions.ConnectionError:
continue
except requests.exceptions.Timeout:
continue
def fetch_prometheus(prom_addresses, timeout=None):
components_dict = {}
metric_descriptors = {}
metric_samples = []
for address in prom_addresses:
if address not in components_dict:
components_dict[address] = set()
for address, response in fetch_raw_prometheus(prom_addresses, timeout=timeout):
for metric in text_string_to_metric_families(response):
for sample in metric.samples:
metric_descriptors[sample.name] = metric
metric_samples.append(sample)
if "Component" in sample.labels:
components_dict[address].add(sample.labels["Component"])
return components_dict, metric_descriptors, metric_samples
def fetch_prometheus_timeseries(
prom_addreses: List[str],
result: PrometheusTimeseries,
timeout=None,
) -> PrometheusTimeseries:
components_dict, metric_descriptors, metric_samples = fetch_prometheus(
prom_addreses, timeout=timeout
)
for address, components in components_dict.items():
if address not in result.components_dict:
result.components_dict[address] = set()
result.components_dict[address].update(components)
result.metric_descriptors.update(metric_descriptors)
for sample in metric_samples:
# udpate sample to the latest value
result.metric_samples[
frozenset(list(sample.labels.items()) + [("_metric_name_", sample.name)])
] = sample
return result
def fetch_prometheus_metrics(prom_addresses: List[str]) -> Dict[str, List[Any]]:
"""Return prometheus metrics from the given addresses.
Args:
prom_addresses: List of metrics_agent addresses to collect metrics from.
Returns:
Dict mapping from metric name to list of samples for the metric.
"""
_, _, samples = fetch_prometheus(prom_addresses)
samples_by_name = defaultdict(list)
for sample in samples:
samples_by_name[sample.name].append(sample)
return samples_by_name
def fetch_prometheus_metric_timeseries(
prom_addresses: List[str],
result: PrometheusTimeseries,
timeout=None,
) -> Dict[str, List[Any]]:
samples = fetch_prometheus_timeseries(
prom_addresses, result, timeout=timeout
).metric_samples.values()
samples_by_name = defaultdict(list)
for sample in samples:
samples_by_name[sample.name].append(sample)
return samples_by_name
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_common/test_utils.py",
"license": "Apache License 2.0",
"lines": 432,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/test_runtime_env_get_wheel_names.py | import sys
import pytest
import requests
import ray._private.ray_constants as ray_constants
from ray._private.utils import (
get_master_wheel_url,
get_release_wheel_url,
get_wheel_filename,
)
def test_get_wheel_filename():
"""Test the code that generates the filenames of the `latest` wheels."""
# NOTE: These should not be changed for releases.
ray_version = "3.0.0.dev0"
for arch in ["x86_64", "aarch64", "arm64"]:
for sys_platform in ["darwin", "linux", "win32"]:
# Windows only has x86_64 wheels
if sys_platform == "win32" and arch != "x86_64":
continue
# MacOS only has arm64 wheels
if sys_platform == "darwin" and arch == "x86_64":
continue
for py_version in ray_constants.RUNTIME_ENV_CONDA_PY_VERSIONS:
filename = get_wheel_filename(
sys_platform, ray_version, py_version, arch
)
prefix = "https://s3-us-west-2.amazonaws.com/ray-wheels/latest/"
url = f"{prefix}{filename}"
assert requests.head(url).status_code == 200, url
def test_get_master_wheel_url():
"""Test the code that generates the filenames of `master` commit wheels."""
# NOTE: These should not be changed for releases.
ray_version = "3.0.0.dev0"
# This should be a commit for which wheels have already been built for
# all platforms and python versions at
# `s3://ray-wheels/master/<test_commit>/`.
#
# Link to commit:
# https://github.com/ray-project/ray/commit/faf06e09e55558fb36c72e91a5cf8a7e3da8b8c6
test_commit = "faf06e09e55558fb36c72e91a5cf8a7e3da8b8c6"
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ray_constants.RUNTIME_ENV_CONDA_PY_VERSIONS:
url = get_master_wheel_url(
test_commit, sys_platform, ray_version, py_version
)
assert requests.head(url).status_code == 200, url
def test_get_release_wheel_url():
"""Test the code that generates the filenames of the `release` branch wheels."""
# This should be a commit for which wheels have already been built for
# all platforms and python versions at
# `s3://ray-wheels/releases/2.2.0/<commit>/`.
test_commits = {"2.49.2": "479fa716904109d9df4b56b98ca3c3350e1ec13c"}
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ray_constants.RUNTIME_ENV_CONDA_PY_VERSIONS:
for version, commit in test_commits.items():
url = get_release_wheel_url(commit, sys_platform, version, py_version)
assert requests.head(url).status_code == 200, url
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_runtime_env_get_wheel_names.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/unit/test_runtime_env.py | import dataclasses
import json
import os
import subprocess
import sys
import tempfile
from dataclasses import dataclass
from typing import Any, Dict
from unittest import mock
import pytest
import ray
import ray._private.ray_constants as ray_constants
from ray._private.runtime_env.uri_cache import URICache
from ray._private.runtime_env.utils import (
SubprocessCalledProcessError,
check_output_cmd,
)
from ray._private.test_utils import (
chdir,
)
from ray.runtime_env import RuntimeEnv
from ray.runtime_env.runtime_env import (
RuntimeEnvConfig,
_merge_runtime_env,
)
def test_runtime_env_merge():
# Both are None.
parent = None
child = None
assert _merge_runtime_env(parent, child) == {}
parent = {}
child = None
assert _merge_runtime_env(parent, child) == {}
parent = None
child = {}
assert _merge_runtime_env(parent, child) == {}
parent = {}
child = {}
assert _merge_runtime_env(parent, child) == {}
# Only parent is given.
parent = {"conda": ["requests"], "env_vars": {"A": "1"}}
child = None
assert _merge_runtime_env(parent, child) == parent
# Only child is given.
parent = None
child = {"conda": ["requests"], "env_vars": {"A": "1"}}
assert _merge_runtime_env(parent, child) == child
# Successful case.
parent = {"conda": ["requests"], "env_vars": {"A": "1"}}
child = {"pip": ["requests"], "env_vars": {"B": "2"}}
assert _merge_runtime_env(parent, child) == {
"conda": ["requests"],
"pip": ["requests"],
"env_vars": {"A": "1", "B": "2"},
}
# Failure case
parent = {"pip": ["requests"], "env_vars": {"A": "1"}}
child = {"pip": ["colors"], "env_vars": {"B": "2"}}
assert _merge_runtime_env(parent, child) is None
# Failure case (env_vars)
parent = {"pip": ["requests"], "env_vars": {"A": "1"}}
child = {"conda": ["requests"], "env_vars": {"A": "2"}}
assert _merge_runtime_env(parent, child) is None
# override = True
parent = {"pip": ["requests"], "env_vars": {"A": "1"}}
child = {"pip": ["colors"], "env_vars": {"B": "2"}}
assert _merge_runtime_env(parent, child, override=True) == {
"pip": ["colors"],
"env_vars": {"A": "1", "B": "2"},
}
# override = True + env vars
parent = {"pip": ["requests"], "env_vars": {"A": "1"}}
child = {"pip": ["colors"], "conda": ["requests"], "env_vars": {"A": "2"}}
assert _merge_runtime_env(parent, child, override=True) == {
"pip": ["colors"],
"env_vars": {"A": "2"},
"conda": ["requests"],
}
def test_current_py_version_supported():
"""Test that the running python version is supported.
This is run as a check in the Ray `runtime_env` `conda` code
before downloading the Ray wheel into the conda environment.
If Ray wheels are not available for this python version, then
the `conda` environment installation will fail.
When a new python version is added to the Ray wheels, please update
`ray_constants.RUNTIME_ENV_CONDA_PY_VERSIONS`. In a subsequent commit,
once wheels have been built for the new python version, please update
the tests test_get_wheel_filename, test_get_master_wheel_url, and
(after the first Ray release with the new python version)
test_get_release_wheel_url.
"""
py_version = sys.version_info[:2]
assert py_version in ray_constants.RUNTIME_ENV_CONDA_PY_VERSIONS
def test_compatible_with_dataclasses():
"""Test that the output of RuntimeEnv.to_dict() can be used as a dataclass field."""
config = RuntimeEnvConfig(setup_timeout_seconds=1)
runtime_env = RuntimeEnv(
pip={
"packages": ["tensorflow", "requests"],
"pip_check": False,
"pip_version": "==23.3.2;python_version=='3.9.16'",
},
env_vars={"FOO": "BAR"},
config=config,
)
@dataclass
class RuntimeEnvDataClass:
runtime_env: Dict[str, Any]
dataclasses.asdict(RuntimeEnvDataClass(runtime_env.to_dict()))
@dataclass
class RuntimeEnvConfigDataClass:
config: Dict[str, Any]
dataclasses.asdict(RuntimeEnvConfigDataClass(config.to_dict()))
@pytest.mark.parametrize("runtime_env_class", [dict, RuntimeEnv])
def test_container_option_serialize(runtime_env_class):
runtime_env = runtime_env_class(
container={"image": "ray:latest", "run_options": ["--name=test"]}
)
job_config = ray.job_config.JobConfig(runtime_env=runtime_env)
job_config_serialized = job_config._serialize()
# job_config_serialized is JobConfig protobuf serialized string,
# job_config.runtime_env_info.serialized_runtime_env
# has container_option info
assert job_config_serialized.count(b"ray:latest") == 1
assert job_config_serialized.count(b"--name=test") == 1
class TestURICache:
def test_zero_cache_size(self):
uris_to_sizes = {"5": 5, "3": 3}
def delete_fn(uri, logger):
return uris_to_sizes[uri]
cache = URICache(delete_fn, max_total_size_bytes=0, debug_mode=True)
cache.add("5", 5)
assert cache.get_total_size_bytes() == 5
cache.mark_unused("5")
assert cache.get_total_size_bytes() == 0
cache.add("3", 3)
cache.add("5", 5)
assert cache.get_total_size_bytes() == 8
cache.mark_unused("3")
cache.mark_unused("5")
assert cache.get_total_size_bytes() == 0
def test_nonzero_cache_size(self):
uris_to_sizes = {"a": 4, "b": 4, "c": 4}
def delete_fn(uri, logger):
return uris_to_sizes[uri]
cache = URICache(delete_fn, max_total_size_bytes=10, debug_mode=True)
cache.add("a", 4)
cache.add("b", 4)
cache.mark_unused("a")
assert "a" in cache
cache.add("c", 4)
# Now we have total size 12, which exceeds the max size 10.
assert cache.get_total_size_bytes() == 8
# "a" was the only unused URI, so it must have been deleted.
assert "b" and "c" in cache and "a" not in cache
def test_mark_used_nonadded_uri_error(self):
cache = URICache(debug_mode=True)
with pytest.raises(ValueError):
cache.mark_used("nonadded_uri")
def test_mark_used(self):
uris_to_sizes = {"a": 3, "b": 3, "big": 300}
def delete_fn(uri, logger):
return uris_to_sizes[uri]
cache = URICache(delete_fn, max_total_size_bytes=10, debug_mode=True)
cache.add("a", 3)
cache.add("b", 3)
cache.mark_unused("a")
cache.mark_unused("b")
assert "a" in cache and "b" in cache
assert cache.get_total_size_bytes() == 6
cache.mark_used("a")
cache.add("big", 300)
# We are over capacity and the only unused URI is "b", so we delete it
assert "a" in cache and "big" in cache and "b" not in cache
assert cache.get_total_size_bytes() == 303
cache.mark_unused("big")
assert "big" not in cache
assert cache.get_total_size_bytes() == 3
def test_many_URIs(self):
uris_to_sizes = {str(i): i for i in range(1000)}
def delete_fn(uri, logger):
return uris_to_sizes[uri]
cache = URICache(delete_fn, debug_mode=True)
for i in range(1000):
cache.add(str(i), i)
for i in range(1000):
cache.mark_unused(str(i))
for i in range(1000):
assert str(i) in cache
def test_delete_fn_called(self):
num_delete_fn_calls = 0
uris_to_sizes = {"a": 8, "b": 6, "c": 4, "d": 20}
def delete_fn(uri, logger):
nonlocal num_delete_fn_calls
num_delete_fn_calls += 1
return uris_to_sizes[uri]
cache = URICache(delete_fn, max_total_size_bytes=10, debug_mode=True)
cache.add("a", 8)
cache.add("b", 6)
cache.mark_unused("b")
# Total size is 14 > 10, so we need to delete "b".
assert num_delete_fn_calls == 1
cache.add("c", 4)
cache.mark_unused("c")
# Total size is 12 > 10, so we delete "c".
assert num_delete_fn_calls == 2
cache.mark_unused("a")
# Total size is 8 <= 10, so we shouldn't delete anything.
assert num_delete_fn_calls == 2
cache.add("d", 20)
# Total size is 28 > 10, so we delete "a".
assert num_delete_fn_calls == 3
cache.mark_unused("d")
# Total size is 20 > 10, so we delete "d".
assert num_delete_fn_calls == 4
@pytest.fixture
def enable_dev_mode(local_env_var_enabled, monkeypatch):
enabled = "1" if local_env_var_enabled else "0"
monkeypatch.setenv("RAY_RUNTIME_ENV_LOG_TO_DRIVER_ENABLED", enabled)
yield
def test_subprocess_error():
ex = SubprocessCalledProcessError
with pytest.raises(subprocess.SubprocessError) as e:
raise ex(123, "abc")
assert "test_out" not in str(e.value)
assert "test_err" not in str(e.value)
with pytest.raises(subprocess.SubprocessError) as e:
raise ex(123, "abc", stderr="test_err")
assert "test_out" not in str(e.value)
assert "test_err" in str(e.value)
with pytest.raises(subprocess.SubprocessError) as e:
raise ex(123, "abc", output="test_out")
assert "test_out" in str(e.value)
assert "test_err" not in str(e.value)
with pytest.raises(subprocess.SubprocessError) as e:
raise ex(123, "abc", output="test_out", stderr="test_err")
assert "test_out" in str(e.value)
assert "test_err" in str(e.value)
def test_subprocess_error_with_last_n_lines():
stdout = "1\n2\n3\n4\n5\n"
stderr = "5\n4\n3\n2\n1\n"
exception = SubprocessCalledProcessError(888, "abc", output=stdout, stderr=stderr)
exception.LAST_N_LINES = 3
exception_str = str(exception)
assert "cmd" not in exception_str
assert "Last 3 lines" in exception_str
s = "".join([s.strip() for s in exception_str.splitlines()])
assert "345" in s
assert "321" in s
@pytest.mark.asyncio
async def test_check_output_cmd():
cmd = "dir" if sys.platform.startswith("win") else "pwd"
logs = []
class _FakeLogger:
def __getattr__(self, item):
def _log(formatter, *args):
logs.append(formatter % args)
return _log
for _ in range(2):
output = await check_output_cmd([cmd], logger=_FakeLogger())
assert len(output) > 0
all_log_string = "\n".join(logs)
# Check the cmd index generator works.
assert "cmd[1]" in all_log_string
assert "cmd[2]" in all_log_string
# Test communicate fails.
with mock.patch(
"asyncio.subprocess.Process.communicate",
side_effect=Exception("fake exception"),
):
with pytest.raises(RuntimeError) as e:
await check_output_cmd([cmd], logger=_FakeLogger())
# Make sure the exception has cmd trace info.
assert "cmd[3]" in str(e.value)
# Test asyncio.create_subprocess_exec fails.
with pytest.raises(RuntimeError) as e:
await check_output_cmd(["not_exist_cmd"], logger=_FakeLogger())
# Make sure the exception has cmd trace info.
assert "cmd[4]" in str(e.value)
# Test returncode != 0.
with pytest.raises(SubprocessCalledProcessError) as e:
await check_output_cmd([cmd, "--abc"], logger=_FakeLogger())
# Make sure the exception has cmd trace info.
assert "cmd[5]" in str(e.value)
@pytest.mark.parametrize(
"option",
["pip_list", "pip_dict", "conda_name", "conda_dict", "container"],
)
def test_serialize_deserialize(option):
runtime_env = dict()
if option == "pip_list":
runtime_env["pip"] = ["pkg1", "pkg2"]
elif option == "pip_dict":
runtime_env["pip"] = {
"packages": ["pkg1", "pkg2"],
"pip_check": False,
"pip_version": "<22,>20",
}
elif option == "conda_name":
runtime_env["conda"] = "env_name"
elif option == "conda_dict":
runtime_env["conda"] = {"dependencies": ["dep1", "dep2"]}
elif option == "container":
runtime_env["container"] = {
"image": "anyscale/ray-ml:nightly-py38-cpu",
"worker_path": "/root/python/ray/_private/workers/default_worker.py",
"run_options": ["--cap-drop SYS_ADMIN", "--log-level=debug"],
}
else:
raise ValueError("unexpected option " + str(option))
typed_runtime_env = RuntimeEnv(**runtime_env)
serialized_runtime_env = typed_runtime_env.serialize()
cls_runtime_env = RuntimeEnv.deserialize(serialized_runtime_env)
cls_runtime_env_dict = cls_runtime_env.to_dict()
if "pip" in typed_runtime_env and isinstance(typed_runtime_env["pip"], list):
pip_config_in_cls_runtime_env = cls_runtime_env_dict.pop("pip")
pip_config_in_runtime_env = typed_runtime_env.pop("pip")
assert {
"packages": pip_config_in_runtime_env,
"pip_check": False,
} == pip_config_in_cls_runtime_env
assert cls_runtime_env_dict == typed_runtime_env
def test_runtime_env_interface():
# Test the interface related to working_dir
default_working_dir = "s3://bucket/key.zip"
modify_working_dir = "s3://bucket/key_A.zip"
runtime_env = RuntimeEnv(working_dir=default_working_dir)
runtime_env_dict = runtime_env.to_dict()
assert runtime_env.working_dir_uri() == default_working_dir
runtime_env["working_dir"] = modify_working_dir
runtime_env_dict["working_dir"] = modify_working_dir
assert runtime_env.working_dir_uri() == modify_working_dir
assert runtime_env.to_dict() == runtime_env_dict
runtime_env.pop("working_dir")
assert runtime_env.to_dict() == {}
# Test the interface related to py_modules
init_py_modules = ["s3://bucket/key_1.zip", "s3://bucket/key_2.zip"]
addition_py_modules = ["s3://bucket/key_3.zip", "s3://bucket/key_4.zip"]
runtime_env = RuntimeEnv(py_modules=init_py_modules)
runtime_env_dict = runtime_env.to_dict()
assert set(runtime_env.py_modules_uris()) == set(init_py_modules)
runtime_env["py_modules"].extend(addition_py_modules)
runtime_env_dict["py_modules"].extend(addition_py_modules)
assert set(runtime_env.py_modules_uris()) == set(
init_py_modules + addition_py_modules
)
assert runtime_env.to_dict() == runtime_env_dict
runtime_env.pop("py_modules")
assert runtime_env.to_dict() == {}
# Test the interface related to env_vars
init_env_vars = {"A": "a", "B": "b"}
update_env_vars = {"C": "c"}
runtime_env = RuntimeEnv(env_vars=init_env_vars)
runtime_env_dict = runtime_env.to_dict()
runtime_env["env_vars"].update(update_env_vars)
runtime_env_dict["env_vars"].update(update_env_vars)
init_env_vars_copy = init_env_vars.copy()
init_env_vars_copy.update(update_env_vars)
assert runtime_env["env_vars"] == init_env_vars_copy
assert runtime_env_dict == runtime_env.to_dict()
runtime_env.pop("env_vars")
assert runtime_env.to_dict() == {}
# Test the interface related to conda
conda_name = "conda"
modify_conda_name = "conda_A"
conda_config = {"dependencies": ["dep1", "dep2"]}
runtime_env = RuntimeEnv(conda=conda_name)
runtime_env_dict = runtime_env.to_dict()
assert runtime_env.has_conda()
assert runtime_env.conda_env_name() == conda_name
assert runtime_env.conda_config() is None
runtime_env["conda"] = modify_conda_name
runtime_env_dict["conda"] = modify_conda_name
assert runtime_env_dict == runtime_env.to_dict()
assert runtime_env.has_conda()
assert runtime_env.conda_env_name() == modify_conda_name
assert runtime_env.conda_config() is None
runtime_env["conda"] = conda_config
runtime_env_dict["conda"] = conda_config
assert runtime_env_dict == runtime_env.to_dict()
assert runtime_env.has_conda()
assert runtime_env.conda_env_name() is None
assert runtime_env.conda_config() == json.dumps(conda_config, sort_keys=True)
runtime_env.pop("conda")
assert runtime_env.to_dict() == {"_ray_commit": "{{RAY_COMMIT_SHA}}"}
# Test the interface related to pip
with tempfile.TemporaryDirectory() as tmpdir, chdir(tmpdir):
requirement_file = os.path.join(tmpdir, "requirements.txt")
requirement_packages = ["dep5", "dep6"]
with open(requirement_file, "wt") as f:
for package in requirement_packages:
f.write(package)
f.write("\n")
pip_packages = ["dep1", "dep2"]
addition_pip_packages = ["dep3", "dep4"]
runtime_env = RuntimeEnv(pip=pip_packages)
runtime_env_dict = runtime_env.to_dict()
assert runtime_env.has_pip()
assert set(runtime_env.pip_config()["packages"]) == set(pip_packages)
assert runtime_env.virtualenv_name() is None
runtime_env["pip"]["packages"].extend(addition_pip_packages)
runtime_env_dict["pip"]["packages"].extend(addition_pip_packages)
# The default value of pip_check is False
runtime_env_dict["pip"]["pip_check"] = False
assert runtime_env_dict == runtime_env.to_dict()
assert runtime_env.has_pip()
assert set(runtime_env.pip_config()["packages"]) == set(
pip_packages + addition_pip_packages
)
assert runtime_env.virtualenv_name() is None
runtime_env["pip"] = requirement_file
runtime_env_dict["pip"] = requirement_packages
assert runtime_env.has_pip()
assert set(runtime_env.pip_config()["packages"]) == set(requirement_packages)
assert runtime_env.virtualenv_name() is None
# The default value of pip_check is False
runtime_env_dict["pip"] = dict(
packages=runtime_env_dict["pip"], pip_check=False
)
assert runtime_env_dict == runtime_env.to_dict()
runtime_env.pop("pip")
assert runtime_env.to_dict() == {"_ray_commit": "{{RAY_COMMIT_SHA}}"}
# Test conflict
with pytest.raises(ValueError):
RuntimeEnv(pip=pip_packages, conda=conda_name)
runtime_env = RuntimeEnv(pip=pip_packages)
runtime_env["conda"] = conda_name
with pytest.raises(ValueError):
runtime_env.serialize()
# Test the interface related to container
container_init = {
"image": "anyscale/ray-ml:nightly-py38-cpu",
"run_options": ["--cap-drop SYS_ADMIN", "--log-level=debug"],
}
update_container = {"image": "test_modify"}
runtime_env = RuntimeEnv(container=container_init)
runtime_env_dict = runtime_env.to_dict()
assert runtime_env.has_py_container()
assert runtime_env.py_container_image() == container_init["image"]
assert runtime_env.py_container_run_options() == container_init["run_options"]
runtime_env["container"].update(update_container)
runtime_env_dict["container"].update(update_container)
container_copy = container_init
container_copy.update(update_container)
assert runtime_env_dict == runtime_env.to_dict()
assert runtime_env.has_py_container()
assert runtime_env.py_container_image() == container_copy["image"]
assert runtime_env.py_container_run_options() == container_copy["run_options"]
runtime_env.pop("container")
assert runtime_env.to_dict() == {}
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/unit/test_runtime_env.py",
"license": "Apache License 2.0",
"lines": 452,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/test_bundle_label_selector.py | import os
import sys
import pytest
import ray
from ray._private.test_utils import placement_group_assert_no_leak
from ray.util.placement_group import placement_group, placement_group_table
from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy
def test_bundle_label_selector_with_repeated_labels(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=4, labels={"ray.io/accelerator-type": "A100"})
node = cluster.add_node(num_cpus=4, labels={"ray.io/accelerator-type": "TPU"})
ray.init(address=cluster.address)
bundles = [{"CPU": 1}, {"CPU": 1}]
label_selector = [{"ray.io/accelerator-type": "TPU"}] * 2
pg = placement_group(
name="repeated_labels_pg",
bundles=bundles,
bundle_label_selector=label_selector,
)
ray.get(pg.ready())
bundles_to_node_id = placement_group_table()[pg.id.hex()]["bundles_to_node_id"]
assert bundles_to_node_id[0] == node.node_id
assert bundles_to_node_id[1] == node.node_id
placement_group_assert_no_leak([pg])
def test_unschedulable_bundle_label_selector(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, labels={"ray.io/accelerator-type": "A100"})
cluster.add_node(num_cpus=1, labels={"ray.io/accelerator-type": "TPU"})
ray.init(address=cluster.address)
# request 2 CPUs total, but only 1 CPU available with label ray.io/accelerator-type=A100
bundles = [{"CPU": 1}, {"CPU": 1}]
label_selector = [{"ray.io/accelerator-type": "A100"}] * 2
pg = placement_group(
name="unschedulable_labels_pg",
bundles=bundles,
bundle_label_selector=label_selector,
)
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.get(pg.ready(), timeout=3)
state = placement_group_table()[pg.id.hex()]["stats"]["scheduling_state"]
assert state == "NO_RESOURCES"
def test_bundle_label_selectors_match_bundle_resources(ray_start_cluster):
cluster = ray_start_cluster
# Add nodes with unique labels and custom resources
cluster.add_node(
num_cpus=1, resources={"resource-0": 1}, labels={"region": "us-west4"}
)
cluster.add_node(
num_cpus=1, resources={"resource-1": 1}, labels={"region": "us-east5"}
)
cluster.add_node(
num_cpus=1, resources={"resource-2": 1}, labels={"region": "us-central2"}
)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
# Bundle label selectors to match the node labels above
bundle_label_selectors = [
{"region": "us-west4"},
{"region": "us-east5"},
{"region": "us-central2"},
]
# Each bundle requests CPU and a unique custom resource
bundles = [
{"CPU": 1, "resource-0": 1},
{"CPU": 1, "resource-1": 1},
{"CPU": 1, "resource-2": 1},
]
pg = placement_group(
name="label_selectors_match_resources",
bundles=bundles,
bundle_label_selector=bundle_label_selectors,
)
ray.get(pg.ready())
@ray.remote
def get_assigned_resources():
return (
ray.get_runtime_context().get_node_id(),
ray.get_runtime_context().get_assigned_resources(),
)
node_id_to_label = {
node["NodeID"]: node["Labels"]["region"] for node in ray.nodes()
}
# Launch one task per bundle to check resource mapping
for i in range(len(bundles)):
result = ray.get(
get_assigned_resources.options(
num_cpus=1,
resources={f"resource-{i}": 1},
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=pg, placement_group_bundle_index=i
),
).remote()
)
node_id, assigned = result
# Check node label matches expected
assert node_id_to_label[node_id] == bundle_label_selectors[i]["region"]
# Check resource assignment includes the expected custom resource
assert f"resource-{i}" in assigned
assert assigned[f"resource-{i}"] == 1.0
# Check CPU was assigned
assert "CPU" in assigned and assigned["CPU"] == 1.0
def test_strict_pack_bundle_label_selector(ray_start_cluster):
"""
Verifies that placement groups with STRICT_PACK strategy respect bundle_label_selector.
If the PG is ready, it should schedule all bundles to the same node which satisfies the
label constraints. If the `bundle_label_selector` is unsatisfiable on a single node,
the PG should remain pending.
"""
cluster = ray_start_cluster
cluster.add_node(num_cpus=4, labels={"region": "us-east"})
cluster.add_node(num_cpus=4, labels={"region": "us-west"})
ray.init(address=cluster.address)
# Success case - both label selectors can be satisfied on a single node.
success_pg = placement_group(
bundles=[{"CPU": 1}, {"CPU": 1}],
strategy="STRICT_PACK",
bundle_label_selector=[{"region": "us-east"}, {"region": "us-east"}],
)
ray.get(success_pg.ready(), timeout=5)
table = placement_group_table(success_pg)
assert table["state"] == "CREATED"
# Failure case - conflicting label selectors match two distinct nodes.
fail_pg = placement_group(
bundles=[{"CPU": 1}, {"CPU": 1}],
strategy="STRICT_PACK",
bundle_label_selector=[{"region": "us-east"}, {"region": "us-west"}],
)
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.get(fail_pg.ready(), timeout=5)
pg_info = placement_group_table(fail_pg)
assert pg_info["state"] == "PENDING"
ray.util.remove_placement_group(success_pg)
ray.util.remove_placement_group(fail_pg)
def test_strict_spread_bundle_label_selector(ray_start_cluster):
"""
Verifies that placement groups with STRICT_SPREAD strategy respect bundle_label_selector.
If the PG is ready, it should schedule all bundles to different which each satisfy their
respective label constraints. If the `bundle_label_selector` is unsatisfiable on len(bundles)
unique nodes, the PG should remain pending.
"""
cluster = ray_start_cluster
cluster.add_node(num_cpus=4, labels={"type": "A"})
cluster.add_node(num_cpus=4, labels={"type": "A"})
cluster.add_node(num_cpus=4, labels={"type": "B"})
ray.init(address=cluster.address)
# Success case - label selectors can be satisfied on different nodes.
success_pg = placement_group(
bundles=[{"CPU": 1}, {"CPU": 1}],
strategy="STRICT_SPREAD",
bundle_label_selector=[{"type": "A"}, {"type": "A"}],
)
ray.get(success_pg.ready(), timeout=5)
assert placement_group_table(success_pg)["state"] == "CREATED"
# Failure case - conflicting label selectors only satisfied by one node.
fail_pg = placement_group(
bundles=[{"CPU": 1}, {"CPU": 1}],
strategy="STRICT_SPREAD",
bundle_label_selector=[{"type": "B"}, {"type": "B"}],
)
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.get(fail_pg.ready(), timeout=5)
pg_info = placement_group_table(fail_pg)
assert pg_info["state"] == "PENDING"
ray.util.remove_placement_group(success_pg)
ray.util.remove_placement_group(fail_pg)
def test_pack_strategy_bundle_label_selector(ray_start_cluster):
"""
Verifies that PACK strategy respects bundle_label_selector.
"""
cluster = ray_start_cluster
cluster.add_node(num_cpus=4, labels={"type": "A"})
cluster.add_node(num_cpus=4, labels={"type": "B"})
ray.init(address=cluster.address)
# Success case - label selectors satisfied on one node.
success_pg_1 = placement_group(
bundles=[{"CPU": 1}, {"CPU": 1}],
strategy="PACK",
bundle_label_selector=[{"type": "A"}, {"type": "A"}],
)
ray.get(success_pg_1.ready(), timeout=5)
assert placement_group_table(success_pg_1)["state"] == "CREATED"
# Success case (best effort) - label selectors satisfied on different nodes.
success_pg_2 = placement_group(
bundles=[{"CPU": 1}, {"CPU": 1}],
strategy="PACK",
bundle_label_selector=[{"type": "A"}, {"type": "B"}],
)
ray.get(success_pg_2.ready(), timeout=5)
assert placement_group_table(success_pg_2)["state"] == "CREATED"
# Failure case - label selectors unsatisfiable by any node.
fail_pg = placement_group(
bundles=[{"CPU": 1}, {"CPU": 1}],
strategy="PACK",
bundle_label_selector=[{"type": "A"}, {"type": "C"}],
)
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.get(fail_pg.ready(), timeout=3)
pg_info = placement_group_table(fail_pg)
assert pg_info["state"] == "PENDING"
ray.util.remove_placement_group(success_pg_1)
ray.util.remove_placement_group(success_pg_2)
ray.util.remove_placement_group(fail_pg)
def test_spread_strategy_bundle_label_selector(ray_start_cluster):
"""
Verifies that SPREAD strategy respects bundle_label_selector.
"""
cluster = ray_start_cluster
cluster.add_node(num_cpus=4, labels={"type": "A"})
cluster.add_node(num_cpus=4, labels={"type": "B"})
ray.init(address=cluster.address)
# Success case - label selectors satisfied and SPREAD across nodes.
success_pg_spread = placement_group(
bundles=[{"CPU": 1}, {"CPU": 1}],
strategy="SPREAD",
bundle_label_selector=[{"type": "A"}, {"type": "B"}],
)
ray.get(success_pg_spread.ready(), timeout=5)
assert placement_group_table(success_pg_spread)["state"] == "CREATED"
# Success case - label selectors satisfied but forced to use same node.
success_pg_packed = placement_group(
bundles=[{"CPU": 1}, {"CPU": 1}],
strategy="SPREAD",
bundle_label_selector=[{"type": "A"}, {"type": "A"}],
)
ray.get(success_pg_packed.ready(), timeout=5)
assert placement_group_table(success_pg_packed)["state"] == "CREATED"
# Failure case - label selectors unsatisfiable by any node.
fail_pg = placement_group(
bundles=[{"CPU": 1}], strategy="SPREAD", bundle_label_selector=[{"type": "C"}]
)
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.get(fail_pg.ready(), timeout=3)
pg_info = placement_group_table(fail_pg)
assert pg_info["state"] == "PENDING"
ray.util.remove_placement_group(success_pg_spread)
ray.util.remove_placement_group(success_pg_packed)
ray.util.remove_placement_group(fail_pg)
if __name__ == "__main__":
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_bundle_label_selector.py",
"license": "Apache License 2.0",
"lines": 242,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:ci/ray_ci/automation/generate_index.py | import click
from ci.ray_ci.automation.docker_tags_lib import generate_index, list_image_tags
from ci.ray_ci.docker_container import (
ARCHITECTURES_RAY,
PLATFORMS_RAY,
PYTHON_VERSIONS_RAY,
RayType,
)
@click.command()
@click.option("--prefix", required=True, type=str)
def main(prefix):
tags = list_image_tags(
prefix, RayType.RAY, PYTHON_VERSIONS_RAY, PLATFORMS_RAY, ARCHITECTURES_RAY
)
tags = [f"rayproject/ray:{tag}" for tag in tags]
indexes_to_publish = []
for tag in tags:
if not tag.endswith("-aarch64") and tag + "-aarch64" in tags:
indexes_to_publish.append((tag, tag + "-aarch64"))
for tags in indexes_to_publish:
generate_index(index_name=tags[0], tags=tags)
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "ci/ray_ci/automation/generate_index.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/llm_tests/serve/test_llm_serve_integration.py | import pytest
import sys
from ray import serve
from ray.serve.llm import LLMConfig, build_openai_app
from vllm import AsyncEngineArgs
from vllm.v1.engine.async_llm import AsyncLLM
from vllm.v1.metrics.ray_wrappers import RayPrometheusStatLogger
from vllm.sampling_params import SamplingParams
from ray._common.test_utils import wait_for_condition
from ray.serve._private.constants import SERVE_DEFAULT_APP_NAME
from ray.serve.schema import ApplicationStatus
import time
@pytest.mark.asyncio(scope="function")
async def test_engine_metrics():
"""
Test that the stat logger can be created successfully.
Keeping this test small to focus on instantiating the
derived class correctly.
"""
engine_args = AsyncEngineArgs(
model="Qwen/Qwen2.5-0.5B-Instruct",
dtype="auto",
disable_log_stats=False,
enforce_eager=True,
)
engine = AsyncLLM.from_engine_args(
engine_args, stat_loggers=[RayPrometheusStatLogger]
)
for i, prompt in enumerate(["What is the capital of France?", "What is 2+2?"]):
results = engine.generate(
request_id=f"request-id-{i}",
prompt=prompt,
sampling_params=SamplingParams(max_tokens=10),
)
async for _ in results:
pass
@pytest.mark.asyncio(scope="function")
async def test_engine_metrics_with_lora():
"""
Test that the stat logger can be created successfully with LoRA configuration.
This test validates LoRA-enabled engine initialization and basic functionality.
"""
engine_args = AsyncEngineArgs(
model="Qwen/Qwen2.5-0.5B-Instruct", # Using smaller model for testing
disable_log_stats=False,
enforce_eager=True,
enable_prefix_caching=True,
max_model_len=512,
max_lora_rank=64,
enable_lora=True,
max_loras=3,
max_cpu_loras=5,
)
engine = AsyncLLM.from_engine_args(
engine_args, stat_loggers=[RayPrometheusStatLogger]
)
for i, prompt in enumerate(["What is the capital of France?", "What is 2+2?"]):
results = engine.generate(
request_id=f"lora-request-id-{i}",
prompt=prompt,
sampling_params=SamplingParams(max_tokens=10),
)
async for _ in results:
pass
@pytest.mark.asyncio(scope="function")
async def test_engine_metrics_with_spec_decode():
"""
Test that the stat logger can be created successfully with speculative decoding configuration.
This test validates speculative decoding engine initialization and basic functionality.
"""
engine_args = AsyncEngineArgs(
model="Qwen/Qwen2.5-0.5B-Instruct",
dtype="auto",
disable_log_stats=False,
enforce_eager=True,
trust_remote_code=True,
enable_prefix_caching=True,
max_model_len=256,
speculative_config={
"method": "ngram",
"num_speculative_tokens": 5,
"prompt_lookup_max": 4,
},
)
engine = AsyncLLM.from_engine_args(
engine_args, stat_loggers=[RayPrometheusStatLogger]
)
for i, prompt in enumerate(["What is the capital of France?", "What is 2+2?"]):
results = engine.generate(
request_id=f"spec-request-id-{i}",
prompt=prompt,
sampling_params=SamplingParams(max_tokens=10),
)
async for _ in results:
pass
def is_default_app_running():
"""Check if the default application is running successfully."""
try:
default_app = serve.status().applications[SERVE_DEFAULT_APP_NAME]
return default_app.status == ApplicationStatus.RUNNING
except (KeyError, AttributeError):
return False
@pytest.mark.parametrize("model_name", ["deepseek-ai/DeepSeek-V2-Lite"])
def test_deepseek_model(model_name):
"""
Test that the deepseek model can be loaded successfully.
"""
llm_config = LLMConfig(
model_loading_config=dict(
model_id=model_name,
),
deployment_config=dict(
autoscaling_config=dict(min_replicas=1, max_replicas=1),
),
engine_kwargs=dict(
tensor_parallel_size=2,
pipeline_parallel_size=2,
gpu_memory_utilization=0.92,
dtype="auto",
max_num_seqs=40,
max_model_len=8192,
enable_chunked_prefill=True,
enable_prefix_caching=True,
enforce_eager=True,
trust_remote_code=True,
),
)
app = build_openai_app({"llm_configs": [llm_config]})
serve.run(app, blocking=False)
wait_for_condition(is_default_app_running, timeout=300)
serve.shutdown()
time.sleep(1)
@pytest.mark.parametrize("model_name", ["openai/whisper-small"])
def test_transcription_model(model_name):
"""
Test that the transcription models can be loaded successfully.
"""
llm_config = LLMConfig(
model_loading_config=dict(
model_id=model_name,
model_source=model_name,
),
deployment_config=dict(
autoscaling_config=dict(min_replicas=1, max_replicas=4),
),
engine_kwargs=dict(
trust_remote_code=True,
gpu_memory_utilization=0.9,
enable_prefix_caching=True,
),
)
app = build_openai_app({"llm_configs": [llm_config]})
serve.run(app, blocking=False)
wait_for_condition(is_default_app_running, timeout=180)
serve.shutdown()
time.sleep(1)
@pytest.fixture
def remote_model_app(request):
"""
Fixture that creates an app with a remote code model for testing.
The remote_code parameter controls whether trust_remote_code is enabled.
This helps avoid regressions for pickling issues for custom huggingface configs,
since this custom code needs to be registered and imported across processes and workers.
"""
remote_code = request.param
base_config = {
"model_loading_config": dict(
model_id="hmellor/Ilama-3.2-1B",
),
"deployment_config": dict(
autoscaling_config=dict(min_replicas=1, max_replicas=1),
),
"engine_kwargs": dict(
trust_remote_code=remote_code,
),
}
llm_config = LLMConfig(**base_config)
app = build_openai_app({"llm_configs": [llm_config]})
yield app
# Cleanup
serve.shutdown()
time.sleep(1)
class TestRemoteCode:
"""Tests for remote code model loading behavior."""
@pytest.mark.parametrize("remote_model_app", [False], indirect=True)
def test_remote_code_failure(self, remote_model_app):
"""
Tests that a remote code model fails to load when trust_remote_code=False.
If it loads successfully without remote code, the fixture should be changed to one that does require remote code.
"""
app = remote_model_app
with pytest.raises(RuntimeError, match="Deploying application default failed"):
serve.run(app, blocking=False)
def check_for_failed_deployment():
"""Check if the application deployment has failed."""
try:
default_app = serve.status().applications[SERVE_DEFAULT_APP_NAME]
return default_app.status == ApplicationStatus.DEPLOY_FAILED
except (KeyError, AttributeError):
return False
# Wait for either failure or success (timeout after 2 minutes)
try:
wait_for_condition(check_for_failed_deployment, timeout=120)
except TimeoutError:
# If deployment didn't fail, check if it succeeded
if is_default_app_running():
pytest.fail(
"App deployed successfully without trust_remote_code=True. "
"This model may not actually require remote code. "
"Consider using a different model that requires remote code."
)
else:
pytest.fail("Deployment did not fail or succeed within timeout period.")
@pytest.mark.parametrize("remote_model_app", [True], indirect=True)
def test_remote_code_success(self, remote_model_app):
"""
Tests that a remote code model succeeds to load when trust_remote_code=True.
"""
app = remote_model_app
serve.run(app, blocking=False)
# Wait for the application to be running (timeout after 5 minutes)
wait_for_condition(is_default_app_running, timeout=300)
def test_nested_engine_kwargs_structured_outputs():
"""Regression test for https://github.com/ray-project/ray/pull/60380"""
llm_config = LLMConfig(
model_loading_config=dict(
model_id="Qwen/Qwen2.5-0.5B-Instruct",
),
deployment_config=dict(
autoscaling_config=dict(min_replicas=1, max_replicas=1),
),
engine_kwargs=dict(
enforce_eager=True,
max_model_len=512,
structured_outputs_config={
"backend": "xgrammar",
},
),
)
app = build_openai_app({"llm_configs": [llm_config]})
serve.run(app, blocking=False)
wait_for_condition(is_default_app_running, timeout=180)
serve.shutdown()
time.sleep(1)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "release/llm_tests/serve/test_llm_serve_integration.py",
"license": "Apache License 2.0",
"lines": 246,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/test_worker_graceful_shutdown.py | import os
import signal
import sys
from typing import List
import pytest
import ray
from ray._common.test_utils import SignalActor, wait_for_condition
@pytest.mark.skipif(
sys.platform == "win32", reason="Windows doesn't handle SIGTERM gracefully."
)
@pytest.mark.parametrize("actor_type", ["asyncio", "threaded"])
def test_ray_get_during_graceful_shutdown(ray_start_regular_shared, actor_type: str):
"""Test that ray.get works as expected when draining tasks during shutdown.
This currently only applies to concurrent actors, because single-threaded actors do
not allow tasks to finish exiting after SIGTERM.
"""
signal_actor = SignalActor.remote()
assert actor_type in {"asyncio", "threaded"}
if actor_type == "asyncio":
@ray.remote
class A:
def exit(self):
os.kill(os.getpid(), signal.SIGTERM)
async def wait_then_get(self, nested_ref: List[ray.ObjectRef]) -> str:
print("Waiting for signal...")
await signal_actor.wait.remote()
print("Got signal, calling ray.get")
return await nested_ref[0]
elif actor_type == "threaded":
@ray.remote(max_concurrency=2)
class A:
def exit(self):
os.kill(os.getpid(), signal.SIGTERM)
def wait_then_get(self, nested_ref: List[ray.ObjectRef]):
print("Waiting for signal...")
ray.get(signal_actor.wait.remote())
print("Got signal, calling ray.get")
return ray.get(nested_ref[0])
# Start the actor and wait for the method to begin executing and then block.
actor = A.remote()
wait_ref = actor.wait_then_get.remote([ray.put("hi")])
wait_for_condition(lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 1)
# SIGTERM the process and then signal the method to unblock.
ray.get(actor.exit.remote())
ray.get(signal_actor.send.remote())
# Check that the method succeeds as expected.
assert ray.get(wait_ref) == "hi"
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_worker_graceful_shutdown.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/serve/request_router.py | from ray.serve._private.common import ReplicaID # noqa: F401
from ray.serve._private.replica_result import ReplicaResult # noqa: F401
from ray.serve._private.request_router.common import ( # noqa: F401
PendingRequest,
)
from ray.serve._private.request_router.replica_wrapper import ( # noqa: F401
RunningReplica,
)
from ray.serve._private.request_router.request_router import ( # noqa: F401
FIFOMixin,
LocalityMixin,
MultiplexMixin,
RequestRouter,
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/request_router.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/_private/telemetry/open_telemetry_metric_recorder.py | import logging
import threading
from collections import defaultdict
from typing import Callable, List
from opentelemetry import metrics
from opentelemetry.exporter.prometheus import PrometheusMetricReader
from opentelemetry.metrics import Observation
from opentelemetry.sdk.metrics import MeterProvider
from ray._private.metrics_agent import Record
from ray._private.telemetry.metric_cardinality import MetricCardinality
from ray._private.telemetry.metric_types import MetricType
logger = logging.getLogger(__name__)
NAMESPACE = "ray"
class OpenTelemetryMetricRecorder:
"""
A class to record OpenTelemetry metrics. This is the main entry point for exporting
all ray telemetries to Prometheus server.
It uses OpenTelemetry's Prometheus exporter to export metrics.
"""
_metrics_initialized = False
_metrics_initialized_lock = threading.Lock()
def __init__(self):
self._lock = threading.Lock()
self._registered_instruments = {}
self._gauge_observations_by_name = defaultdict(dict)
self._counter_observations_by_name = defaultdict(dict)
self._sum_observations_by_name = defaultdict(dict)
self._histogram_bucket_midpoints = defaultdict(list)
self._init_metrics()
self.meter = metrics.get_meter(__name__)
def _create_observable_callback(
self, metric_name: str, metric_type: MetricType
) -> Callable[[dict], List[Observation]]:
"""
Factory method to create callbacks for observable metrics.
Args:
metric_name: name of the metric for which the callback is being created
metric_type: type of the metric for which the callback is being created
Returns:
Callable: A callback function that can be used to record observations for the metric.
"""
def callback(options):
with self._lock:
# Select appropriate storage based on metric type
if metric_type == MetricType.GAUGE:
observations = self._gauge_observations_by_name.get(metric_name, {})
# Clear after reading (gauges report last value)
self._gauge_observations_by_name[metric_name] = {}
elif metric_type == MetricType.COUNTER:
observations = self._counter_observations_by_name.get(
metric_name, {}
)
# Don't clear - counters are cumulative
elif metric_type == MetricType.SUM:
observations = self._sum_observations_by_name.get(metric_name, {})
# Don't clear - sums are cumulative
else:
return []
# Aggregate by filtered tags (drop high cardinality labels)
high_cardinality_labels = (
MetricCardinality.get_high_cardinality_labels_to_drop(metric_name)
)
# First, collect all values that share the same filtered tag set
values_by_filtered_tags = defaultdict(list)
for tag_set, val in observations.items():
filtered = frozenset(
(k, v) for k, v in tag_set if k not in high_cardinality_labels
)
values_by_filtered_tags[filtered].append(val)
# Then aggregate each group using the appropriate aggregation function
agg_fn = MetricCardinality.get_aggregation_function(
metric_name, metric_type
)
return [
Observation(agg_fn(values), attributes=dict(filtered))
for filtered, values in values_by_filtered_tags.items()
]
return callback
def _init_metrics(self):
# Initialize the global metrics provider and meter. We only do this once on
# the first initialization of the class, because re-setting the meter provider
# can result in loss of metrics.
with self._metrics_initialized_lock:
if self._metrics_initialized:
return
prometheus_reader = PrometheusMetricReader()
provider = MeterProvider(metric_readers=[prometheus_reader])
metrics.set_meter_provider(provider)
self._metrics_initialized = True
def register_gauge_metric(self, name: str, description: str) -> None:
with self._lock:
if name in self._registered_instruments:
# Gauge with the same name is already registered.
return
callback = self._create_observable_callback(name, MetricType.GAUGE)
instrument = self.meter.create_observable_gauge(
name=f"{NAMESPACE}_{name}",
description=description,
unit="1",
callbacks=[callback],
)
self._registered_instruments[name] = instrument
self._gauge_observations_by_name[name] = {}
def register_counter_metric(self, name: str, description: str) -> None:
"""
Register an observable counter metric with the given name and description.
"""
with self._lock:
if name in self._registered_instruments:
# Counter with the same name is already registered. This is a common
# case when metrics are exported from multiple Ray components (e.g.,
# raylet, worker, etc.) running in the same node. Since each component
# may export metrics with the same name, the same metric might be
# registered multiple times.
return
callback = self._create_observable_callback(name, MetricType.COUNTER)
instrument = self.meter.create_observable_counter(
name=f"{NAMESPACE}_{name}",
description=description,
unit="1",
callbacks=[callback],
)
self._registered_instruments[name] = instrument
self._counter_observations_by_name[name] = {}
def register_sum_metric(self, name: str, description: str) -> None:
"""
Register an observable sum metric with the given name and description.
"""
with self._lock:
if name in self._registered_instruments:
# Sum with the same name is already registered. This is a common
# case when metrics are exported from multiple Ray components (e.g.,
# raylet, worker, etc.) running in the same node. Since each component
# may export metrics with the same name, the same metric might be
# registered multiple times.
return
callback = self._create_observable_callback(name, MetricType.SUM)
instrument = self.meter.create_observable_up_down_counter(
name=f"{NAMESPACE}_{name}",
description=description,
unit="1",
callbacks=[callback],
)
self._registered_instruments[name] = instrument
self._sum_observations_by_name[name] = {}
def register_histogram_metric(
self, name: str, description: str, buckets: List[float]
) -> None:
"""
Register a histogram metric with the given name and description.
"""
with self._lock:
if name in self._registered_instruments:
# Histogram with the same name is already registered. This is a common
# case when metrics are exported from multiple Ray components (e.g.,
# raylet, worker, etc.) running in the same node. Since each component
# may export metrics with the same name, the same metric might be
# registered multiple times.
return
instrument = self.meter.create_histogram(
name=f"{NAMESPACE}_{name}",
description=description,
unit="1",
explicit_bucket_boundaries_advisory=buckets,
)
self._registered_instruments[name] = instrument
# calculate the bucket midpoints; this is used for converting histogram
# internal representation to approximated histogram data points.
for i in range(len(buckets)):
if i == 0:
lower_bound = 0.0 if buckets[0] > 0 else buckets[0] * 2.0
self._histogram_bucket_midpoints[name].append(
(lower_bound + buckets[0]) / 2.0
)
else:
self._histogram_bucket_midpoints[name].append(
(buckets[i] + buckets[i - 1]) / 2.0
)
# Approximated mid point for Inf+ bucket. Inf+ bucket is an implicit bucket
# that is not part of buckets.
self._histogram_bucket_midpoints[name].append(
1.0 if buckets[-1] <= 0 else buckets[-1] * 2.0
)
def get_histogram_bucket_midpoints(self, name: str) -> List[float]:
"""
Get the bucket midpoints for a histogram metric with the given name.
"""
return self._histogram_bucket_midpoints[name]
def set_metric_value(self, name: str, tags: dict, value: float):
"""
Set the value of a metric with the given name and tags.
For observable metrics (gauge, counter, sum), this stores the value internally
and returns immediately. The value will be exported asynchronously when
OpenTelemetry collects metrics.
For histograms, this calls record() synchronously since there is no observable
histogram in OpenTelemetry.
If the metric is not registered, it lazily records the value for observable metrics or is a no-op for
synchronous metrics.
"""
with self._lock:
tag_key = frozenset(tags.items())
if self._gauge_observations_by_name.get(name) is not None:
# Gauge - store the most recent value for the given tags.
self._gauge_observations_by_name[name][tag_key] = value
elif name in self._counter_observations_by_name:
# Counter - increment the value for the given tags.
self._counter_observations_by_name[name][tag_key] = (
self._counter_observations_by_name[name].get(tag_key, 0) + value
)
elif name in self._sum_observations_by_name:
# Sum - add the value for the given tags.
self._sum_observations_by_name[name][tag_key] = (
self._sum_observations_by_name[name].get(tag_key, 0) + value
)
else:
# Histogram - record the value synchronously.
instrument = self._registered_instruments.get(name)
if isinstance(instrument, metrics.Histogram):
# Filter out high cardinality labels.
filtered_tags = {
k: v
for k, v in tags.items()
if k
not in MetricCardinality.get_high_cardinality_labels_to_drop(
name
)
}
instrument.record(value, attributes=filtered_tags)
else:
logger.warning(
f"Metric {name} is not registered or unsupported type."
)
def record_histogram_aggregated_batch(
self,
name: str,
data_points: List[dict],
) -> None:
"""
Record pre-aggregated histogram data for multiple data points in a single batch.
This method takes pre-aggregated bucket counts and reconstructs individual
observations using bucket midpoints. It acquires the lock once and performs
all record() calls for ALL data points, minimizing lock contention.
Note: The histogram sum value will be an approximation since we use bucket midpoints instead of actual values.
"""
with self._lock:
instrument = self._registered_instruments.get(name)
if not isinstance(instrument, metrics.Histogram):
logger.warning(
f"Metric {name} is not a registered histogram, skipping recording."
)
return
bucket_midpoints = self._histogram_bucket_midpoints[name]
high_cardinality_labels = (
MetricCardinality.get_high_cardinality_labels_to_drop(name)
)
for dp in data_points:
tags = dp["tags"]
bucket_counts = dp["bucket_counts"]
assert len(bucket_counts) == len(
bucket_midpoints
), "Number of bucket counts and midpoints must match"
filtered_tags = {
k: v for k, v in tags.items() if k not in high_cardinality_labels
}
for i, bucket_count in enumerate(bucket_counts):
if bucket_count == 0:
continue
midpoint = bucket_midpoints[i]
for _ in range(bucket_count):
instrument.record(midpoint, attributes=filtered_tags)
def record_and_export(self, records: List[Record], global_tags=None):
"""
Record a list of telemetry records and export them to Prometheus.
"""
global_tags = global_tags or {}
for record in records:
gauge = record.gauge
value = record.value
tags = {**record.tags, **global_tags}
try:
self.register_gauge_metric(gauge.name, gauge.description or "")
self.set_metric_value(gauge.name, tags, value)
except Exception as e:
logger.error(
f"Failed to record metric {gauge.name} with value {value} with tags {tags!r} and global tags {global_tags!r} due to: {e!r}"
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_private/telemetry/open_telemetry_metric_recorder.py",
"license": "Apache License 2.0",
"lines": 286,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/tests/test_open_telemetry_metric_recorder.py | import sys
from unittest.mock import MagicMock, patch
import pytest
from opentelemetry.metrics import NoOpHistogram
from ray._private.metrics_agent import Gauge, Record
from ray._private.telemetry.open_telemetry_metric_recorder import (
OpenTelemetryMetricRecorder,
)
@patch("opentelemetry.metrics.set_meter_provider")
@patch("opentelemetry.metrics.get_meter")
def test_register_gauge_metric(mock_get_meter, mock_set_meter_provider):
"""
Test the register_gauge_metric method of OpenTelemetryMetricRecorder.
- Test that it registers a gauge metric with the correct name and description.
- Test that a value can be recorded for the gauge metric successfully.
"""
mock_get_meter.return_value = MagicMock()
recorder = OpenTelemetryMetricRecorder()
recorder.register_gauge_metric(name="test_gauge", description="Test Gauge")
# Record a value for the gauge
recorder.set_metric_value(
name="test_gauge",
tags={"label_key": "label_value"},
value=42.0,
)
assert recorder._gauge_observations_by_name == {
"test_gauge": {
frozenset({("label_key", "label_value")}): 42.0,
}
}
@patch("ray._private.telemetry.open_telemetry_metric_recorder.logger.warning")
@patch("opentelemetry.metrics.set_meter_provider")
@patch("opentelemetry.metrics.get_meter")
def test_register_counter_metric(
mock_get_meter, mock_set_meter_provider, mock_logger_warning
):
"""
Test the register_counter_metric method of OpenTelemetryMetricRecorder.
- Test that it registers an observable counter metric with the correct name and description.
- Test that values are accumulated in _counter_observations.
"""
mock_meter = MagicMock()
mock_get_meter.return_value = mock_meter
recorder = OpenTelemetryMetricRecorder()
recorder.register_counter_metric(name="test_counter", description="Test Counter")
assert "test_counter" in recorder._registered_instruments
assert "test_counter" in recorder._counter_observations_by_name
recorder.set_metric_value(
name="test_counter",
tags={"label_key": "label_value"},
value=10.0,
)
assert recorder._counter_observations_by_name["test_counter"] == {
frozenset({("label_key", "label_value")}): 10.0
}
# Ensure that the value is accumulated correctly
recorder.set_metric_value(
name="test_counter",
tags={"label_key": "label_value"},
value=5.0,
)
assert recorder._counter_observations_by_name["test_counter"] == {
frozenset({("label_key", "label_value")}): 15.0 # 10 + 5 = 15
}
mock_logger_warning.assert_not_called()
recorder.set_metric_value(
name="test_counter_unregistered",
tags={"label_key": "label_value"},
value=10.0,
)
mock_logger_warning.assert_called_once_with(
"Metric test_counter_unregistered is not registered or unsupported type."
)
@patch("ray._private.telemetry.open_telemetry_metric_recorder.logger.warning")
@patch("opentelemetry.metrics.set_meter_provider")
@patch("opentelemetry.metrics.get_meter")
def test_register_sum_metric(
mock_get_meter, mock_set_meter_provider, mock_logger_warning
):
"""
Test the register_sum_metric method of OpenTelemetryMetricRecorder.
- Test that it registers an observable up_down_counter metric.
- Test that a value can be set for the sum metric successfully without warnings.
"""
mock_meter = MagicMock()
mock_get_meter.return_value = mock_meter
recorder = OpenTelemetryMetricRecorder()
recorder.register_sum_metric(name="test_sum", description="Test Sum")
assert "test_sum" in recorder._registered_instruments
assert "test_sum" in recorder._sum_observations_by_name
recorder.set_metric_value(
name="test_sum",
tags={"label_key": "label_value"},
value=10.0,
)
assert recorder._sum_observations_by_name["test_sum"] == {
frozenset({("label_key", "label_value")}): 10.0
}
# Test accumulation with negative value (up_down_counter can go down)
recorder.set_metric_value(
name="test_sum",
tags={"label_key": "label_value"},
value=-3.0,
)
assert recorder._sum_observations_by_name["test_sum"] == {
frozenset({("label_key", "label_value")}): 7.0 # 10 - 3 = 7
}
mock_logger_warning.assert_not_called()
@patch("ray._private.telemetry.open_telemetry_metric_recorder.logger.warning")
@patch("opentelemetry.metrics.set_meter_provider")
@patch("opentelemetry.metrics.get_meter")
def test_register_histogram_metric(
mock_get_meter, mock_set_meter_provider, mock_logger_warning
):
"""
Test the register_histogram_metric method of OpenTelemetryMetricRecorder.
- Test that it registers a histogram metric with the correct name and description.
- Test that a value can be set for the histogram metric successfully without warnings.
"""
mock_meter = MagicMock()
mock_meter.create_histogram.return_value = NoOpHistogram(name="test_histogram")
mock_get_meter.return_value = mock_meter
recorder = OpenTelemetryMetricRecorder()
recorder.register_histogram_metric(
name="test_histogram", description="Test Histogram", buckets=[1.0, 2.0, 3.0]
)
assert "test_histogram" in recorder._registered_instruments
recorder.set_metric_value(
name="test_histogram",
tags={"label_key": "label_value"},
value=10.0,
)
mock_logger_warning.assert_not_called()
mock_meter.create_histogram.return_value = NoOpHistogram(name="neg_histogram")
recorder.register_histogram_metric(
name="neg_histogram",
description="Histogram with negative first boundary",
buckets=[-5.0, 0.0, 10.0],
)
mids = recorder.get_histogram_bucket_midpoints("neg_histogram")
assert mids == pytest.approx([-7.5, -2.5, 5.0, 20.0])
@patch("opentelemetry.metrics.set_meter_provider")
@patch("opentelemetry.metrics.get_meter")
def test_record_and_export(mock_get_meter, mock_set_meter_provider):
"""
Test the record_and_export method of OpenTelemetryMetricRecorder. Test that
- The state of _observations_by_gauge_name is correct after recording a metric.
- If there are multiple records with the same gauge name and tags, only the last
value is kept.
- If there are multiple records with the same gauge name but different tags, all
values are kept.
"""
mock_get_meter.return_value = MagicMock()
recorder = OpenTelemetryMetricRecorder()
recorder.record_and_export(
[
Record(
gauge=Gauge(
name="hi",
description="Hi",
unit="unit",
tags={},
),
value=1.0,
tags={"label_key": "label_value"},
),
Record(
gauge=Gauge(
name="w00t",
description="w00t",
unit="unit",
tags={},
),
value=2.0,
tags={"label_key": "label_value"},
),
Record(
gauge=Gauge(
name="w00t",
description="w00t",
unit="unit",
tags={},
),
value=20.0,
tags={"another_label_key": "another_label_value"},
),
Record(
gauge=Gauge(
name="hi",
description="Hi",
unit="unit",
tags={},
),
value=3.0,
tags={"label_key": "label_value"},
),
],
global_tags={"global_label_key": "global_label_value"},
)
assert recorder._gauge_observations_by_name == {
"hi": {
frozenset(
{
("label_key", "label_value"),
("global_label_key", "global_label_value"),
}
): 3.0
},
"w00t": {
frozenset(
{
("label_key", "label_value"),
("global_label_key", "global_label_value"),
}
): 2.0,
frozenset(
{
("another_label_key", "another_label_value"),
("global_label_key", "global_label_value"),
}
): 20.0,
},
}
@patch("ray._private.telemetry.open_telemetry_metric_recorder.logger.warning")
@patch("opentelemetry.metrics.set_meter_provider")
@patch("opentelemetry.metrics.get_meter")
def test_record_histogram_aggregated_batch(
mock_get_meter, mock_set_meter_provider, mock_logger_warning
):
"""
Test the record_histogram_aggregated_batch method of OpenTelemetryMetricRecorder.
- Test that it records histogram data for multiple data points in a single batch.
- Test that it calls instrument.record() for each observation.
- Test that it warns if the histogram is not registered.
"""
mock_meter = MagicMock()
real_histogram = NoOpHistogram(name="test_histogram")
mock_histogram = MagicMock(wraps=real_histogram, spec=real_histogram)
mock_meter.create_histogram.return_value = mock_histogram
mock_get_meter.return_value = mock_meter
recorder = OpenTelemetryMetricRecorder()
# Test warning when histogram not registered
recorder.record_histogram_aggregated_batch(
name="unregistered_histogram",
data_points=[{"tags": {"key": "value"}, "bucket_counts": [1, 2, 3]}],
)
mock_logger_warning.assert_called_once_with(
"Metric unregistered_histogram is not a registered histogram, skipping recording."
)
mock_logger_warning.reset_mock()
# Register histogram
recorder.register_histogram_metric(
name="test_histogram",
description="Test Histogram",
buckets=[1.0, 10.0, 100.0],
)
# Record batch data - 2 data points with different tags
# bucket_counts: [2, 3, 0, 1] means:
# 2 observations in bucket 0-1 (midpoint 0.5)
# 3 observations in bucket 1-10 (midpoint 5.5)
# 0 observations in bucket 10-100 (midpoint 55.0)
# 1 observation in bucket 100-Inf+ (midpoint 200.0)
recorder.record_histogram_aggregated_batch(
name="test_histogram",
data_points=[
{"tags": {"endpoint": "/api/v1"}, "bucket_counts": [2, 3, 0, 1]},
{"tags": {"endpoint": "/api/v2"}, "bucket_counts": [1, 0, 1, 0]},
],
)
# Verify record() was called the correct number of times
# First data point: 2 + 3 + 0 + 1 = 6 calls
# Second data point: 1 + 0 + 1 + 0 = 2 calls
# Total: 8 calls
assert mock_histogram.record.call_count == 8
# No warnings should be logged for registered histogram
mock_logger_warning.assert_not_called()
if __name__ == "__main__":
sys.exit(pytest.main(["-svv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_open_telemetry_metric_recorder.py",
"license": "Apache License 2.0",
"lines": 278,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/benchmarks/object_store/test_large_objects.py | import json
import os
from time import perf_counter
import numpy as np
from tqdm import tqdm
import ray
NUM_NODES = 9
OBJECT_SIZE = 2**32
def test_object_many_to_one():
@ray.remote(num_cpus=1, resources={"node": 1})
class Actor:
def foo(self):
pass
def send_objects(self):
return np.ones(OBJECT_SIZE, dtype=np.uint8)
actors = [Actor.remote() for _ in range(NUM_NODES)]
for actor in tqdm(actors, desc="Ensure all actors have started."):
ray.get(actor.foo.remote())
start = perf_counter()
result_refs = []
for actor in tqdm(actors, desc="Tasks kickoff"):
result_refs.append(actor.send_objects.remote())
results = ray.get(result_refs)
end = perf_counter()
for result in results:
assert len(result) == OBJECT_SIZE
return end - start
def test_object_one_to_many():
@ray.remote(num_cpus=1, resources={"node": 1})
class Actor:
def foo(self):
pass
def data_len(self, arr):
return len(arr)
actors = [Actor.remote() for _ in range(NUM_NODES)]
arr = np.ones(OBJECT_SIZE, dtype=np.uint8)
ref = ray.put(arr)
for actor in tqdm(actors, desc="Ensure all actors have started."):
ray.get(actor.foo.remote())
start = perf_counter()
result_refs = []
for actor in tqdm(actors, desc="Tasks kickoff"):
result_refs.append(actor.data_len.remote(ref))
results = ray.get(result_refs)
end = perf_counter()
for result in results:
assert result == OBJECT_SIZE
return end - start
ray.init(address="auto")
many_to_one_duration = test_object_many_to_one()
print(f"many_to_one time: {many_to_one_duration} ({OBJECT_SIZE} B x {NUM_NODES} nodes)")
one_to_many_duration = test_object_one_to_many()
print(f"one_to_many time: {one_to_many_duration} ({OBJECT_SIZE} B x {NUM_NODES} nodes)")
if "TEST_OUTPUT_JSON" in os.environ:
with open(os.environ["TEST_OUTPUT_JSON"], "w") as out_file:
results = {
"many_to_one_time": many_to_one_duration,
"one_to_many_time": one_to_many_duration,
"object_size": OBJECT_SIZE,
"num_nodes": NUM_NODES,
}
results["perf_metrics"] = [
{
"perf_metric_name": f"time_many_to_one_{OBJECT_SIZE}_bytes_from_{NUM_NODES}_nodes",
"perf_metric_value": many_to_one_duration,
"perf_metric_type": "LATENCY",
},
{
"perf_metric_name": f"time_one_to_many_{OBJECT_SIZE}_bytes_to_{NUM_NODES}_nodes",
"perf_metric_value": one_to_many_duration,
"perf_metric_type": "LATENCY",
},
]
json.dump(results, out_file)
| {
"repo_id": "ray-project/ray",
"file_path": "release/benchmarks/object_store/test_large_objects.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/benchmarks/object_store/test_small_objects.py | import json
import os
import time
import numpy as np
import ray
def test_small_objects_many_to_one():
@ray.remote(num_cpus=1)
class Actor:
def send(self, _, actor_idx):
# this size is chosen because it's >100kb so big enough to be stored in plasma
numpy_arr = np.ones((20, 1024))
return (numpy_arr, actor_idx)
actors = [Actor.remote() for _ in range(64)]
not_ready = []
for index, actor in enumerate(actors):
not_ready.append(actor.send.remote(0, index))
num_messages = 0
start_time = time.time()
while time.time() - start_time < 60:
ready, not_ready = ray.wait(not_ready, num_returns=10)
for ready_ref in ready:
_, actor_idx = ray.get(ready_ref)
not_ready.append(actors[actor_idx].send.remote(0, actor_idx))
num_messages += 10
return num_messages / 60
def test_small_objects_one_to_many():
@ray.remote(num_cpus=1)
class Actor:
def receive(self, numpy_arr, actor_idx):
return actor_idx
actors = [Actor.remote() for _ in range(64)]
numpy_arr_ref = ray.put(np.ones((20, 1024)))
not_ready = []
num_messages = 0
start_time = time.time()
for idx, actor in enumerate(actors):
not_ready.append(actor.receive.remote(numpy_arr_ref, idx))
while time.time() - start_time < 60:
ready, not_ready = ray.wait(not_ready, num_returns=10)
actor_idxs = ray.get(ready)
for actor_idx in actor_idxs:
not_ready.append(actors[actor_idx].receive.remote(numpy_arr_ref, actor_idx))
num_messages += 10
return num_messages / 60
ray.init(address="auto")
many_to_one_throughput = test_small_objects_many_to_one()
print(f"Number of messages per second many_to_one: {many_to_one_throughput}")
one_to_many_throughput = test_small_objects_one_to_many()
print(f"Number of messages per second one_to_many: {one_to_many_throughput}")
if "TEST_OUTPUT_JSON" in os.environ:
with open(os.environ["TEST_OUTPUT_JSON"], "w") as out_file:
results = {
"num_messages_many_to_one": many_to_one_throughput,
"num_messages_one_to_many": one_to_many_throughput,
}
results["perf_metrics"] = [
{
"perf_metric_name": "num_small_objects_many_to_one",
"perf_metric_value": many_to_one_throughput,
"perf_metric_type": "THROUGHPUT",
},
{
"perf_metric_name": "num_small_objects_one_to_many_per_second",
"perf_metric_value": one_to_many_throughput,
"perf_metric_type": "THROUGHPUT",
},
]
json.dump(results, out_file)
| {
"repo_id": "ray-project/ray",
"file_path": "release/benchmarks/object_store/test_small_objects.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/ray-overview/examples/e2e-xgboost/dist_xgboost/constants.py | import os
# on Anyscale, /mnt/cluster_storage/ is persisted for the cluster
# across jobs and clusters
# https://docs.anyscale.com/configuration/storage/#storage-shared-across-nodes
if os.path.exists("/mnt/cluster_storage/"):
storage_path = "/mnt/cluster_storage/"
print(f"Using Anyscale storage path: {storage_path}")
else:
storage_path = "/tmp/"
print(
f"/mnt/cluster_storage/ not available, using local storage path: {storage_path}"
)
preprocessor_fname = "preprocessor.pkl"
preprocessor_path = os.path.join(storage_path, preprocessor_fname)
model_fname = "model.ubj" # name used by XGBoost
model_registry = os.path.join(storage_path, "mlflow")
experiment_name = "breast_cancer_all_features"
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/e2e-xgboost/dist_xgboost/constants.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/e2e-xgboost/dist_xgboost/data.py | import os
import pickle
import shutil
from tempfile import TemporaryDirectory
from typing import Tuple
from urllib.parse import urlparse
import mlflow
import ray
from ray.data import Dataset
from ray.train import Checkpoint
from ray.train.xgboost import RayTrainReportCallback
from dist_xgboost.constants import (
experiment_name,
model_fname,
model_registry,
preprocessor_fname,
)
def prepare_data() -> Tuple[Dataset, Dataset, Dataset]:
"""Load and split the dataset into train, validation, and test sets."""
dataset = ray.data.read_csv("s3://anonymous@air-example-data/breast_cancer.csv")
seed = 42
train_dataset, rest = dataset.train_test_split(
test_size=0.3, shuffle=True, seed=seed
)
# 15% for validation, 15% for testing
valid_dataset, test_dataset = rest.train_test_split(
test_size=0.5, shuffle=True, seed=seed
)
return train_dataset, valid_dataset, test_dataset
def get_best_model_from_registry():
mlflow.set_tracking_uri(f"file:{model_registry}")
sorted_runs = mlflow.search_runs(
experiment_names=[experiment_name], order_by=["metrics.validation_error ASC"]
)
best_run = sorted_runs.iloc[0]
best_artifacts_dir = urlparse(best_run.artifact_uri).path
return best_run, best_artifacts_dir
def load_model_and_preprocessor():
best_run, best_artifacts_dir = get_best_model_from_registry()
# load the preprocessor
with open(os.path.join(best_artifacts_dir, preprocessor_fname), "rb") as f:
preprocessor = pickle.load(f)
# load the model
checkpoint = Checkpoint.from_directory(best_artifacts_dir)
model = RayTrainReportCallback.get_model(checkpoint)
return preprocessor, model
def clean_up_old_runs():
# clean up old MLFlow runs
os.path.isdir(model_registry) and shutil.rmtree(model_registry)
mlflow.delete_experiment(experiment_name)
os.makedirs(model_registry, exist_ok=True)
def log_run_to_mlflow(model_config, result, preprocessor_path):
# create a model registry in our user storage
mlflow.set_tracking_uri(f"file:{model_registry}")
# create a new experiment and log metrics and artifacts
mlflow.set_experiment(experiment_name)
with mlflow.start_run(
description="xgboost breast cancer classifier on all features"
):
mlflow.log_params(model_config)
mlflow.log_metrics(result.metrics)
# Selectively log just the preprocessor and model weights
with TemporaryDirectory() as tmp_dir:
shutil.copy(
os.path.join(result.checkpoint.path, model_fname),
os.path.join(tmp_dir, model_fname),
)
shutil.copy(
preprocessor_path,
os.path.join(tmp_dir, preprocessor_fname),
)
mlflow.log_artifacts(tmp_dir)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/e2e-xgboost/dist_xgboost/data.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/e2e-xgboost/dist_xgboost/infer.py | # Note: requires train.py to be run first for the model and preprocessor to be saved to MLFlow
import os
os.environ["RAY_TRAIN_V2_ENABLED"] = "1"
import pandas as pd
import ray
import xgboost
from sklearn.metrics import confusion_matrix
from dist_xgboost.data import load_model_and_preprocessor, prepare_data
def transform_with_preprocessor(batch_df, preprocessor):
# The preprocessor does not know about the `target` column,
# so we need to remove it temporarily then add it back
target = batch_df.pop("target")
transformed_features = preprocessor.transform_batch(batch_df)
transformed_features["target"] = target
return transformed_features
class Validator:
def __init__(self, loader):
# pass in loader function from the outer scope to
# make it easier to mock during testing
_, self.model = loader()
def __call__(self, batch: pd.DataFrame) -> pd.DataFrame:
# remove the target column for inference
target = batch.pop("target")
dmatrix = xgboost.DMatrix(batch)
predictions = self.model.predict(dmatrix)
results = pd.DataFrame({"prediction": predictions, "target": target})
return results
def confusion_matrix_batch(batch, threshold=0.5):
# apply a threshold to the predictions to get binary labels
batch["prediction"] = (batch["prediction"] > threshold).astype(int)
result = {}
cm = confusion_matrix(batch["target"], batch["prediction"], labels=[0, 1])
result["TN"] = cm[0, 0]
result["FP"] = cm[0, 1]
result["FN"] = cm[1, 0]
result["TP"] = cm[1, 1]
return pd.DataFrame(result, index=[0])
def main():
_, _, test_dataset = prepare_data()
preprocessor, _ = load_model_and_preprocessor()
# Apply the transformation to each batch
test_dataset = test_dataset.map_batches(
transform_with_preprocessor,
fn_kwargs={"preprocessor": preprocessor},
batch_format="pandas",
batch_size=1000,
)
# Make predictions
test_predictions = test_dataset.map_batches(
Validator,
fn_constructor_kwargs={"loader": load_model_and_preprocessor},
compute=ray.data.ActorPoolStrategy(size=4), # Number of model replicas
batch_format="pandas",
)
# Calculate confusion matrix
test_results = test_predictions.map_batches(
confusion_matrix_batch, batch_format="pandas", batch_size=1000
)
# Calculate metrics
# Sum all confusion matrix values across batches
cm_sums = test_results.sum(["TN", "FP", "FN", "TP"])
# Extract confusion matrix components
tn = cm_sums["sum(TN)"]
fp = cm_sums["sum(FP)"]
fn = cm_sums["sum(FN)"]
tp = cm_sums["sum(TP)"]
# Calculate metrics
accuracy = (tp + tn) / (tp + tn + fp + fn)
precision = tp / (tp + fp) if (tp + fp) > 0 else 0
recall = tp / (tp + fn) if (tp + fn) > 0 else 0
f1 = (
2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
)
metrics = {"precision": precision, "recall": recall, "f1": f1, "accuracy": accuracy}
print("Validation results:")
for key, value in metrics.items():
print(f"{key}: {value:.4f}")
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/e2e-xgboost/dist_xgboost/infer.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/e2e-xgboost/dist_xgboost/serve.py | # Note: requires train.py to be run first for the model and preprocessor to be saved to MLFlow
import os
os.environ["RAY_TRAIN_V2_ENABLED"] = "1"
import asyncio
import aiohttp
import pandas as pd
import requests
import xgboost
from ray import serve
from ray.serve.handle import DeploymentHandle
from starlette.requests import Request
from dist_xgboost.data import load_model_and_preprocessor
@serve.deployment(num_replicas=2, ray_actor_options={"num_cpus": 2})
class XGBoostModel:
def __init__(self, loader):
# pass in loader function from the outer context to
# make it easier to mock during testing
self.preprocessor, self.model = loader()
@serve.batch(max_batch_size=16, batch_wait_timeout_s=0.1)
async def predict_batch(self, input_data: list[dict]) -> list[float]:
print(f"Batch size: {len(input_data)}")
# Convert list of dictionaries to DataFrame
input_df = pd.DataFrame(input_data)
# Preprocess the input
preprocessed_batch = self.preprocessor.transform_batch(input_df)
# Create DMatrix for prediction
dmatrix = xgboost.DMatrix(preprocessed_batch)
# Get predictions
predictions = self.model.predict(dmatrix)
return predictions.tolist()
async def __call__(self, request: Request):
# Parse the request body as JSON
input_data = await request.json()
return await self.predict_batch(input_data)
xgboost_model = XGBoostModel.bind(load_model_and_preprocessor)
_handle: DeploymentHandle = serve.run(
xgboost_model, name="xgboost-breast-cancer-classifier"
)
def main():
sample_input = {
"mean radius": 14.9,
"mean texture": 22.53,
"mean perimeter": 102.1,
"mean area": 685.0,
"mean smoothness": 0.09947,
"mean compactness": 0.2225,
"mean concavity": 0.2733,
"mean concave points": 0.09711,
"mean symmetry": 0.2041,
"mean fractal dimension": 0.06898,
"radius error": 0.253,
"texture error": 0.8749,
"perimeter error": 3.466,
"area error": 24.19,
"smoothness error": 0.006965,
"compactness error": 0.06213,
"concavity error": 0.07926,
"concave points error": 0.02234,
"symmetry error": 0.01499,
"fractal dimension error": 0.005784,
"worst radius": 16.35,
"worst texture": 27.57,
"worst perimeter": 125.4,
"worst area": 832.7,
"worst smoothness": 0.1419,
"worst compactness": 0.709,
"worst concavity": 0.9019,
"worst concave points": 0.2475,
"worst symmetry": 0.2866,
"worst fractal dimension": 0.1155,
}
sample_target = 0
# create a batch of 100 requests and send them at once
url = "http://127.0.0.1:8000/"
# Example with a single request (synchronous call)
prediction = requests.post(url, json=sample_input).json()
print(f"Prediction: {prediction:.4f}")
print(f"Ground truth: {sample_target}")
# Send many requests at once instead of blocking after each request
# using asyncio and aiohttp
sample_input_list = [sample_input] * 100
async def fetch(session, url, data):
async with session.post(url, json=data) as response:
return await response.json()
async def fetch_all(requests: list):
async with aiohttp.ClientSession() as session:
tasks = [fetch(session, url, input) for input in requests]
responses = await asyncio.gather(*tasks)
return responses
responses = asyncio.run(fetch_all(sample_input_list))
print(f"First prediction: {responses[0]:.4f}")
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/e2e-xgboost/dist_xgboost/serve.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/e2e-xgboost/dist_xgboost/tests.py | from unittest.mock import patch, MagicMock
import ray
import numpy as np
from dist_xgboost.serve import main as serve_main
from dist_xgboost.train import main as train_main
from dist_xgboost.infer import main as inference_main
def mock_load_model_and_preprocessor():
mock_preprocessor = MagicMock()
mock_preprocessor.transform_batch.side_effect = lambda x: x
mock_model = MagicMock()
mock_model.predict.side_effect = lambda dmatrix: np.random.random(
size=(dmatrix.num_row(),)
)
return mock_preprocessor, mock_model
@patch("dist_xgboost.train.log_run_to_mlflow")
@patch("dist_xgboost.train.save_preprocessor")
# @patch("dist_xgboost.train.NUM_WORKERS", new=1) # uncomment to run the test locally
# @patch("dist_xgboost.train.USE_GPU", new=False) # uncomment to run the test locally
def test_train_main(mock_log_run_to_mlflow, mock_save_preprocessor):
ray.data.DataContext.log_internal_stack_trace_to_stdout = True
train_main()
mock_save_preprocessor.assert_called_once()
mock_log_run_to_mlflow.assert_called_once()
@patch(
"dist_xgboost.serve.load_model_and_preprocessor", mock_load_model_and_preprocessor
)
def test_serve_main():
serve_main()
@patch(
"dist_xgboost.infer.load_model_and_preprocessor", mock_load_model_and_preprocessor
)
def test_infer_main():
inference_main()
if __name__ == "__main__":
test_train_main()
test_serve_main()
test_infer_main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/e2e-xgboost/dist_xgboost/tests.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/e2e-xgboost/dist_xgboost/train.py | import os
import pickle
# Enable Ray Train v2. This will be the default in an upcoming release.
os.environ["RAY_TRAIN_V2_ENABLED"] = "1"
# It is now safe to import Ray Train.
import ray
import xgboost
from ray.data.preprocessors import StandardScaler
from ray.train import CheckpointConfig, Result, RunConfig, ScalingConfig
from ray.train.xgboost import RayTrainReportCallback, XGBoostTrainer
from dist_xgboost.constants import storage_path, preprocessor_path
from dist_xgboost.data import log_run_to_mlflow, prepare_data
import dist_xgboost
NUM_WORKERS = 5
USE_GPU = True
def train_preprocessor(train_dataset: ray.data.Dataset) -> StandardScaler:
# pick some dataset columns to scale
columns_to_scale = [c for c in train_dataset.columns() if c != "target"]
# Initialize the preprocessor
preprocessor = StandardScaler(columns=columns_to_scale)
# train the preprocessor on the training set
preprocessor.fit(train_dataset)
return preprocessor
def save_preprocessor(preprocessor: StandardScaler):
with open(preprocessor_path, "wb") as f:
pickle.dump(preprocessor, f)
def train_fn_per_worker(config: dict):
# Get this worker's dataset shard convert
train_ds, val_ds = (
ray.train.get_dataset_shard("train"),
ray.train.get_dataset_shard("validation"),
)
train_ds = train_ds.materialize().to_pandas()
val_ds = val_ds.materialize().to_pandas()
# Separate the labels from the features
train_X, train_y = train_ds.drop("target", axis=1), train_ds["target"]
eval_X, eval_y = val_ds.drop("target", axis=1), val_ds["target"]
# Convert the data into a DMatrix
dtrain = xgboost.DMatrix(train_X, label=train_y)
deval = xgboost.DMatrix(eval_X, label=eval_y)
# Do distributed data-parallel training.
# Ray Train sets up the necessary coordinator processes and
# environment variables for your workers to communicate with each other.
# it also handles checkpointing via the `RayTrainReportCallback`
_booster = xgboost.train(
config["xgboost_params"],
dtrain=dtrain,
evals=[(dtrain, "train"), (deval, "validation")],
num_boost_round=10,
callbacks=[RayTrainReportCallback()],
)
def main():
ray.init(runtime_env={"py_modules": [dist_xgboost]})
# Load and split the dataset
train_dataset, valid_dataset, _test_dataset = prepare_data()
# Train the preprocessor
preprocessor = train_preprocessor(train_dataset)
# Save the preprocessor
save_preprocessor(preprocessor)
train_dataset = preprocessor.transform(train_dataset)
valid_dataset = preprocessor.transform(valid_dataset)
# Configure checkpointing to save progress during training
run_config = RunConfig(
checkpoint_config=CheckpointConfig(
# Only keep the latest checkpoint.
num_to_keep=1,
),
## If running in a multi-node cluster, this is where you
## should configure the run's persistent storage that is accessible
## across all worker nodes with `storage_path="s3://..."`
storage_path=storage_path,
)
# Define the scaling config
scaling_config = ScalingConfig(
# Number of workers to use for data parallelism.
num_workers=NUM_WORKERS,
# Whether to use GPU acceleration. Set to True to schedule GPU workers.
use_gpu=USE_GPU,
)
# Params that will be passed to the base XGBoost model.
model_config = {
"xgboost_params": {
"objective": "binary:logistic",
"eval_metric": ["logloss", "error"],
}
}
trainer = XGBoostTrainer(
train_fn_per_worker,
train_loop_config=model_config,
# Register the data subsets.
datasets={"train": train_dataset, "validation": valid_dataset},
scaling_config=scaling_config,
run_config=run_config,
)
result: Result = trainer.fit()
print(f"Training metrics: {result.metrics}")
log_run_to_mlflow(model_config, result, preprocessor_path)
print("Training complete")
if __name__ == "__main__":
main()
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/e2e-xgboost/dist_xgboost/train.py",
"license": "Apache License 2.0",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:doc/source/ray-overview/examples/e2e-xgboost/notebooks/clean_cell_nums.py | from pathlib import Path
import nbformat
def clear_execution_numbers(nb_path):
with open(nb_path, "r", encoding="utf-8") as f:
nb = nbformat.read(f, as_version=4)
cleaned = False
for cell in nb["cells"]:
if cell["cell_type"] == "code":
cell["execution_count"] = None
for output in cell["outputs"]:
if (
"execution_count" in output
and output["execution_count"] is not None
):
output["execution_count"] = None
cleaned = True
if cleaned:
with open(nb_path, "w", encoding="utf-8") as f:
nbformat.write(nb, f)
print(f"Cleared execution numbers from {nb_path}")
if __name__ == "__main__":
NOTEBOOK_DIR = Path(__file__).parent
notebook_fps = list(NOTEBOOK_DIR.glob("**/*.ipynb"))
for fp in notebook_fps:
clear_execution_numbers(fp)
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/ray-overview/examples/e2e-xgboost/notebooks/clean_cell_nums.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/tests/test_reference_counting_standalone.py | """Reference counting tests that require their own custom fixture.
The other reference counting tests use a shared Ray instance across the test module
to reduce overheads & overall test runtime.
"""
# coding: utf-8
import logging
import platform
import random
import sys
import time
import numpy as np
import pytest
import ray
import ray.cluster_utils
from ray._common.test_utils import (
SignalActor,
wait_for_condition,
)
from ray._private.internal_api import memory_summary
logger = logging.getLogger(__name__)
def _fill_object_store_and_get(obj, succeed=True, object_MiB=20, num_objects=5):
for _ in range(num_objects):
ray.put(np.zeros(object_MiB * 1024 * 1024, dtype=np.uint8))
if type(obj) is bytes:
obj = ray.ObjectRef(obj)
if succeed:
wait_for_condition(
lambda: ray._private.worker.global_worker.core_worker.object_exists(obj)
)
else:
wait_for_condition(
lambda: not ray._private.worker.global_worker.core_worker.object_exists(obj)
)
@pytest.mark.skipif(platform.system() in ["Windows"], reason="Failing on Windows.")
def test_object_unpin(ray_start_cluster):
nodes = []
cluster = ray_start_cluster
head_node = cluster.add_node(
num_cpus=0,
object_store_memory=100 * 1024 * 1024,
_system_config={
"subscriber_timeout_ms": 100,
"health_check_initial_delay_ms": 0,
"health_check_period_ms": 1000,
"health_check_failure_threshold": 5,
},
)
ray.init(address=cluster.address)
# Add worker nodes.
for i in range(2):
nodes.append(
cluster.add_node(
num_cpus=1,
resources={f"node_{i}": 1},
object_store_memory=100 * 1024 * 1024,
)
)
cluster.wait_for_nodes()
one_mb_array = np.ones(1 * 1024 * 1024, dtype=np.uint8)
ten_mb_array = np.ones(10 * 1024 * 1024, dtype=np.uint8)
@ray.remote
class ObjectsHolder:
def __init__(self):
self.ten_mb_objs = []
self.one_mb_objs = []
def put_10_mb(self):
self.ten_mb_objs.append(ray.put(ten_mb_array))
def put_1_mb(self):
self.one_mb_objs.append(ray.put(one_mb_array))
def pop_10_mb(self):
if len(self.ten_mb_objs) == 0:
return False
self.ten_mb_objs.pop()
return True
def pop_1_mb(self):
if len(self.one_mb_objs) == 0:
return False
self.one_mb_objs.pop()
return True
# Head node contains 11MB of data.
one_mb_arrays = []
ten_mb_arrays = []
one_mb_arrays.append(ray.put(one_mb_array))
ten_mb_arrays.append(ray.put(ten_mb_array))
def check_memory(mb):
return f"Plasma memory usage {mb} MiB" in memory_summary(
address=head_node.address, stats_only=True
)
def wait_until_node_dead(node):
for n in ray.nodes():
if n["ObjectStoreSocketName"] == node.address_info["object_store_address"]:
return not n["Alive"]
return False
wait_for_condition(lambda: check_memory(11))
# Pop one mb array and see if it works.
one_mb_arrays.pop()
wait_for_condition(lambda: check_memory(10))
# Pop 10 MB.
ten_mb_arrays.pop()
wait_for_condition(lambda: check_memory(0))
# Put 11 MB for each actor.
# actor 1: 1MB + 10MB
# actor 2: 1MB + 10MB
actor_on_node_1 = ObjectsHolder.options(resources={"node_0": 1}).remote()
actor_on_node_2 = ObjectsHolder.options(resources={"node_1": 1}).remote()
ray.get(actor_on_node_1.put_1_mb.remote())
ray.get(actor_on_node_1.put_10_mb.remote())
ray.get(actor_on_node_2.put_1_mb.remote())
ray.get(actor_on_node_2.put_10_mb.remote())
wait_for_condition(lambda: check_memory(22))
# actor 1: 10MB
# actor 2: 1MB
ray.get(actor_on_node_1.pop_1_mb.remote())
ray.get(actor_on_node_2.pop_10_mb.remote())
wait_for_condition(lambda: check_memory(11))
# The second node is dead, and actor 2 is dead.
cluster.remove_node(nodes[1], allow_graceful=False)
wait_for_condition(lambda: wait_until_node_dead(nodes[1]))
wait_for_condition(lambda: check_memory(10))
# The first actor is dead, so object should be GC'ed.
ray.kill(actor_on_node_1)
wait_for_condition(lambda: check_memory(0))
@pytest.mark.skipif(platform.system() in ["Windows"], reason="Failing on Windows.")
def test_object_unpin_stress(ray_start_cluster):
nodes = []
cluster = ray_start_cluster
cluster.add_node(
num_cpus=1, resources={"head": 1}, object_store_memory=1000 * 1024 * 1024
)
ray.init(address=cluster.address)
# Add worker nodes.
for i in range(2):
nodes.append(
cluster.add_node(
num_cpus=1,
resources={f"node_{i}": 1},
object_store_memory=1000 * 1024 * 1024,
)
)
cluster.wait_for_nodes()
one_mb_array = np.ones(1 * 1024 * 1024, dtype=np.uint8)
ten_mb_array = np.ones(10 * 1024 * 1024, dtype=np.uint8)
@ray.remote
class ObjectsHolder:
def __init__(self):
self.ten_mb_objs = []
self.one_mb_objs = []
def put_10_mb(self):
self.ten_mb_objs.append(ray.put(ten_mb_array))
def put_1_mb(self):
self.one_mb_objs.append(ray.put(one_mb_array))
def pop_10_mb(self):
if len(self.ten_mb_objs) == 0:
return False
self.ten_mb_objs.pop()
return True
def pop_1_mb(self):
if len(self.one_mb_objs) == 0:
return False
self.one_mb_objs.pop()
return True
def get_obj_size(self):
return len(self.ten_mb_objs) * 10 + len(self.one_mb_objs)
actor_on_node_1 = ObjectsHolder.options(resources={"node_0": 1}).remote()
actor_on_node_2 = ObjectsHolder.options(resources={"node_1": 1}).remote()
actor_on_head_node = ObjectsHolder.options(resources={"head": 1}).remote()
ray.get(actor_on_node_1.get_obj_size.remote())
ray.get(actor_on_node_2.get_obj_size.remote())
ray.get(actor_on_head_node.get_obj_size.remote())
def random_ops(actors):
r = random.random()
for actor in actors:
if r <= 0.25:
actor.put_10_mb.remote()
elif r <= 0.5:
actor.put_1_mb.remote()
elif r <= 0.75:
actor.pop_10_mb.remote()
else:
actor.pop_1_mb.remote()
total_iter = 15
for _ in range(total_iter):
random_ops([actor_on_node_1, actor_on_node_2, actor_on_head_node])
# Simulate node dead.
cluster.remove_node(nodes[1])
for _ in range(total_iter):
random_ops([actor_on_node_1, actor_on_head_node])
total_size = sum(
[
ray.get(actor_on_node_1.get_obj_size.remote()),
ray.get(actor_on_head_node.get_obj_size.remote()),
]
)
wait_for_condition(
lambda: (
(f"Plasma memory usage {total_size} MiB") in memory_summary(stats_only=True)
)
)
@pytest.mark.parametrize("inline_args", [True, False])
def test_inlined_nested_refs(ray_start_cluster, inline_args):
cluster = ray_start_cluster
config = {}
if not inline_args:
config["max_direct_call_object_size"] = 0
cluster.add_node(
num_cpus=2, object_store_memory=100 * 1024 * 1024, _system_config=config
)
ray.init(address=cluster.address)
@ray.remote
class Actor:
def __init__(self):
return
def nested(self):
return ray.put("x")
@ray.remote
def nested_nested(a):
return a.nested.remote()
@ray.remote
def foo(ref):
time.sleep(1)
return ray.get(ref)
a = Actor.remote()
nested_nested_ref = nested_nested.remote(a)
# We get nested_ref's value directly from its owner.
nested_ref = ray.get(nested_nested_ref)
del nested_nested_ref
x = foo.remote(nested_ref)
del nested_ref
ray.get(x)
# https://github.com/ray-project/ray/issues/17553
@pytest.mark.parametrize("inline_args", [True, False])
def test_return_nested_ids(shutdown_only, inline_args):
config = dict()
if inline_args:
config["max_direct_call_object_size"] = 100 * 1024 * 1024
else:
config["max_direct_call_object_size"] = 0
ray.init(object_store_memory=100 * 1024 * 1024, _system_config=config)
class Nested:
def __init__(self, blocks):
self._blocks = blocks
@ray.remote
def echo(fn):
return fn()
@ray.remote
def create_nested():
refs = [ray.put(np.random.random(1024 * 1024)) for _ in range(10)]
return Nested(refs)
@ray.remote
def test():
ref = create_nested.remote()
result1 = ray.get(ref)
del ref
result = echo.remote(lambda: result1) # noqa
del result1
time.sleep(5)
block = ray.get(result)._blocks[0]
print(ray.get(block))
ray.get(test.remote())
def _check_refcounts(expected):
actual = ray._private.worker.global_worker.core_worker.get_all_reference_counts()
assert len(expected) == len(actual)
for object_ref, (local, submitted) in expected.items():
hex_id = object_ref.hex().encode("ascii")
assert hex_id in actual
assert local == actual[hex_id]["local"]
assert submitted == actual[hex_id]["submitted"]
def test_out_of_band_serialized_object_ref(ray_start_regular):
assert (
len(ray._private.worker.global_worker.core_worker.get_all_reference_counts())
== 0
)
obj_ref = ray.put("hello")
_check_refcounts({obj_ref: (1, 0)})
obj_ref_str = ray.cloudpickle.dumps(obj_ref)
_check_refcounts({obj_ref: (2, 0)})
del obj_ref
assert (
len(ray._private.worker.global_worker.core_worker.get_all_reference_counts())
== 1
)
assert ray.get(ray.cloudpickle.loads(obj_ref_str)) == "hello"
def test_captured_object_ref(ray_start_regular):
captured_id = ray.put(np.zeros(1024, dtype=np.uint8))
@ray.remote
def f(signal):
ray.get(signal.wait.remote())
ray.get(captured_id) # noqa: F821
signal = SignalActor.remote()
obj_ref = f.remote(signal)
# Delete local references.
del f
del captured_id
# Test that the captured object ref is pinned despite having no local
# references.
ray.get(signal.send.remote())
_fill_object_store_and_get(obj_ref)
captured_id = ray.put(np.zeros(1024, dtype=np.uint8))
@ray.remote
class Actor:
def get(self, signal):
ray.get(signal.wait.remote())
ray.get(captured_id) # noqa: F821
signal = SignalActor.remote()
actor = Actor.remote()
obj_ref = actor.get.remote(signal)
# Delete local references.
del Actor
del captured_id
# Test that the captured object ref is pinned despite having no local
# references.
ray.get(signal.send.remote())
_fill_object_store_and_get(obj_ref)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_reference_counting_standalone.py",
"license": "Apache License 2.0",
"lines": 315,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/prefill_decode_disagg/test_prefill_decode_disagg.py | import sys
import pytest
from ray.llm._internal.serve.core.configs.llm_config import ModelLoadingConfig
from ray.llm._internal.serve.core.ingress.builder import (
IngressClsConfig,
)
from ray.llm._internal.serve.core.ingress.ingress import OpenAiIngress
from ray.llm._internal.serve.serving_patterns.prefill_decode.builder import (
PDServingArgs,
ProxyClsConfig,
build_pd_openai_app,
)
from ray.llm._internal.serve.serving_patterns.prefill_decode.pd_server import (
PDProxyServer,
)
from ray.serve.llm import LLMConfig
class TestPDServingArgs:
"""Test suite for PDServingArgs data model."""
@pytest.fixture
def pd_configs(self):
"""Prefill and decode configs with required kv_transfer_config."""
base_config = {
"model_loading_config": {
"model_id": "test-model",
"model_source": "test-source",
},
"engine_kwargs": {
"kv_transfer_config": {
"kv_connector": "NixlConnector",
"kv_role": "kv_both",
},
},
}
prefill = LLMConfig.model_validate(base_config)
decode = LLMConfig.model_validate(base_config)
return prefill, decode
def test_basic_creation_and_defaults(self, pd_configs):
"""Test creation with minimal config and verify defaults."""
prefill, decode = pd_configs
args = PDServingArgs(prefill_config=prefill, decode_config=decode)
# Verify configs
assert isinstance(args.prefill_config, LLMConfig)
assert isinstance(args.decode_config, LLMConfig)
# Verify defaults
assert isinstance(args.proxy_cls_config, ProxyClsConfig)
assert args.proxy_cls_config.proxy_cls == PDProxyServer
assert isinstance(args.ingress_cls_config, IngressClsConfig)
assert args.ingress_cls_config.ingress_cls == OpenAiIngress
assert args.proxy_deployment_config == {}
assert args.ingress_deployment_config == {}
def test_flexible_input_types(self):
"""Test accepts dicts for prefill and decode configs."""
config_dict = {
"model_loading_config": {
"model_id": "test-model",
"model_source": "test-source",
},
"engine_kwargs": {
"kv_transfer_config": {
"kv_connector": "NixlConnector",
"kv_role": "kv_both",
},
},
}
args = PDServingArgs(prefill_config=config_dict, decode_config=config_dict)
assert isinstance(args.prefill_config, LLMConfig)
assert isinstance(args.decode_config, LLMConfig)
def test_proxy_config_flexibility(self, pd_configs):
"""Test proxy_cls_config: defaults, dict input, object input, and class loading."""
prefill, decode = pd_configs
# Test defaults
args_default = PDServingArgs(prefill_config=prefill, decode_config=decode)
assert isinstance(args_default.proxy_cls_config, ProxyClsConfig)
assert args_default.proxy_cls_config.proxy_cls == PDProxyServer
assert args_default.proxy_cls_config.proxy_extra_kwargs == {}
# Test as dict with custom kwargs
args_dict = PDServingArgs(
prefill_config=prefill,
decode_config=decode,
proxy_cls_config={"proxy_extra_kwargs": {"key": "value"}},
)
assert isinstance(args_dict.proxy_cls_config, ProxyClsConfig)
assert args_dict.proxy_cls_config.proxy_extra_kwargs == {"key": "value"}
# Test as object
args_obj = PDServingArgs(
prefill_config=prefill,
decode_config=decode,
proxy_cls_config=ProxyClsConfig(proxy_extra_kwargs={"key": "value"}),
)
assert isinstance(args_obj.proxy_cls_config, ProxyClsConfig)
assert args_obj.proxy_cls_config.proxy_extra_kwargs == {"key": "value"}
# Test class loading from string
args_str = PDServingArgs(
prefill_config=prefill,
decode_config=decode,
proxy_cls_config={
"proxy_cls": "ray.llm._internal.serve.serving_patterns.prefill_decode.pd_server:PDProxyServer"
},
)
assert args_str.proxy_cls_config.proxy_cls == PDProxyServer
def test_ingress_config_flexibility(self, pd_configs):
"""Test ingress_cls_config: defaults, dict input, object input, and class loading."""
prefill, decode = pd_configs
# Test defaults
args_default = PDServingArgs(prefill_config=prefill, decode_config=decode)
assert isinstance(args_default.ingress_cls_config, IngressClsConfig)
assert args_default.ingress_cls_config.ingress_cls == OpenAiIngress
assert args_default.ingress_cls_config.ingress_extra_kwargs == {}
# Test as dict with custom kwargs
args_dict = PDServingArgs(
prefill_config=prefill,
decode_config=decode,
ingress_cls_config={"ingress_extra_kwargs": {"key": "value"}},
)
assert isinstance(args_dict.ingress_cls_config, IngressClsConfig)
assert args_dict.ingress_cls_config.ingress_extra_kwargs == {"key": "value"}
# Test as object
args_obj = PDServingArgs(
prefill_config=prefill,
decode_config=decode,
ingress_cls_config=IngressClsConfig(ingress_extra_kwargs={"key": "value"}),
)
assert isinstance(args_obj.ingress_cls_config, IngressClsConfig)
assert args_obj.ingress_cls_config.ingress_extra_kwargs == {"key": "value"}
# Test class loading from string
args_str = PDServingArgs(
prefill_config=prefill,
decode_config=decode,
ingress_cls_config={
"ingress_cls": "ray.llm._internal.serve.core.ingress.ingress:OpenAiIngress"
},
)
assert args_str.ingress_cls_config.ingress_cls == OpenAiIngress
def test_validation_rules(self):
"""Test validation: matching model IDs and required kv_transfer_config."""
# Mismatched model IDs
prefill = LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id="model-1", model_source="source"
),
engine_kwargs={"kv_transfer_config": {"kv_connector": "NixlConnector"}},
)
decode = LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id="model-2", model_source="source"
),
engine_kwargs={"kv_transfer_config": {"kv_connector": "NixlConnector"}},
)
with pytest.raises(ValueError, match="P/D model id mismatch"):
PDServingArgs(prefill_config=prefill, decode_config=decode)
# Missing kv_transfer_config
prefill_no_kv = LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id="test-model", model_source="test-source"
)
)
decode_no_kv = LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id="test-model", model_source="test-source"
)
)
with pytest.raises(ValueError, match="kv_transfer_config is required"):
PDServingArgs(prefill_config=prefill_no_kv, decode_config=decode_no_kv)
class TestServingArgsParsing:
@pytest.mark.parametrize("kv_connector", ["NixlConnector", "LMCacheConnectorV1"])
def test_parse_dict(self, kv_connector: str):
prefill_config = LLMConfig(
model_loading_config=dict(
model_id="qwen-0.5b",
model_source="Qwen/Qwen2.5-0.5B-Instruct",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=2,
max_replicas=2,
)
),
engine_kwargs=dict(
tensor_parallel_size=1,
kv_transfer_config=dict(
kv_connector=kv_connector,
kv_role="kv_both",
),
),
)
decode_config = LLMConfig(
model_loading_config=dict(
model_id="qwen-0.5b",
model_source="Qwen/Qwen2.5-0.5B-Instruct",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=1,
max_replicas=1,
)
),
engine_kwargs=dict(
tensor_parallel_size=1,
kv_transfer_config=dict(
kv_connector=kv_connector,
kv_role="kv_both",
),
),
)
pd_config = {"prefill_config": prefill_config, "decode_config": decode_config}
app = build_pd_openai_app(pd_config)
assert app is not None
class TestBuildPDOpenaiApp:
"""Test suite for build_pd_openai_app function."""
@pytest.fixture
def pd_configs(self):
"""Prefill and decode configs with required kv_transfer_config."""
base_config = {
"model_loading_config": {
"model_id": "test-model",
"model_source": "test-source",
},
"engine_kwargs": {
"kv_transfer_config": {
"kv_connector": "NixlConnector",
"kv_role": "kv_both",
},
},
}
prefill = LLMConfig.model_validate(base_config)
decode = LLMConfig.model_validate(base_config)
return prefill, decode
def test_deployment_config_merging(self, pd_configs):
"""Test that deployment configs are properly merged with default options.
This test ensures that deep_merge_dicts return value is properly assigned
for both proxy and ingress deployments, and that nested dictionaries are
properly deep-merged without losing default values.
"""
prefill, decode = pd_configs
# Build app with custom configs for both proxy and ingress including nested options
app = build_pd_openai_app(
{
"prefill_config": prefill,
"decode_config": decode,
"proxy_deployment_config": {
"num_replicas": 2,
"ray_actor_options": {
"num_cpus": 4,
"memory": 2048,
},
"max_ongoing_requests": 150, # Override default
},
"ingress_deployment_config": {
"num_replicas": 5,
"ray_actor_options": {
"num_cpus": 8,
"memory": 4096,
},
"max_ongoing_requests": 300, # Override default
},
}
)
# The app should have an ingress deployment bound to a proxy deployment
# The proxy is passed as an Application via llm_deployments in init_kwargs
ingress_deployment = app._bound_deployment
proxy_app = ingress_deployment.init_kwargs["llm_deployments"][0]
proxy_deployment = proxy_app._bound_deployment
# Verify proxy config was applied with deep merge
assert proxy_deployment._deployment_config.num_replicas == 2
assert proxy_deployment.ray_actor_options["num_cpus"] == 4
assert proxy_deployment.ray_actor_options["memory"] == 2048
assert proxy_deployment._deployment_config.max_ongoing_requests == 150
# Verify ingress config was applied with deep merge
assert ingress_deployment._deployment_config.num_replicas == 5
assert ingress_deployment.ray_actor_options["num_cpus"] == 8
assert ingress_deployment.ray_actor_options["memory"] == 4096
assert ingress_deployment._deployment_config.max_ongoing_requests == 300
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/serve/cpu/deployments/prefill_decode_disagg/test_prefill_decode_disagg.py",
"license": "Apache License 2.0",
"lines": 274,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/tests/serve/gpu/deployments/llm/prefill_decode_disagg/test_prefill_decode_disagg_gpu.py | import sys
from unittest.mock import MagicMock
import pytest
from ray.llm._internal.serve.core.configs.llm_config import (
LLMConfig,
)
from ray.llm._internal.serve.engines.vllm.vllm_engine import (
VLLMEngine,
)
class TestPDDisaggVLLMEngine:
"""Test vLLM engine under PD disagg."""
@pytest.mark.asyncio
@pytest.mark.parametrize("kv_connector", ["NixlConnector", "LMCacheConnectorV1"])
async def test_pd_disagg_vllm_engine(
self,
# llm_config is a fixture defined in serve.tests.conftest.py
llm_config: LLMConfig,
kv_connector: str,
monkeypatch,
):
"""Test vLLM engine under PD disagg."""
if kv_connector == "LMCacheConnectorV1":
lmcache_mock = MagicMock()
monkeypatch.setitem(sys.modules, "lmcache", lmcache_mock)
llm_config = llm_config.model_copy(deep=True)
llm_config.engine_kwargs.update(
{
"kv_transfer_config": dict(
kv_connector=kv_connector,
kv_role="kv_both",
),
}
)
vllm_engine = VLLMEngine(llm_config)
assert vllm_engine is not None
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/serve/gpu/deployments/llm/prefill_decode_disagg/test_prefill_decode_disagg_gpu.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/dashboard/modules/reporter/gpu_profile_manager.py | import asyncio
import functools
import logging
import os
import shutil
import subprocess
from datetime import datetime
from pathlib import Path
from typing import Optional, Tuple
from ray.dashboard.modules.reporter.profile_manager import (
_format_failed_profiler_command,
)
import psutil
logger = logging.getLogger(__name__)
class GpuProfilingManager:
"""GPU profiling manager for Ray Dashboard.
NOTE: The current implementation is based on the `dynolog` OSS project,
but these are mostly implementation details that can be changed in the future.
`dynolog` needs to be installed on the nodes where profiling is being done.
This only supports Torch training scripts with KINETO_USE_DAEMON=1 set.
It is not supported for other frameworks.
"""
# Port for the monitoring daemon.
# This port was chosen arbitrarily to a value to avoid conflicts.
_DYNOLOG_PORT = 65406
# Default timeout for the profiling operation.
_DEFAULT_TIMEOUT_S = 5 * 60
_NO_PROCESSES_MATCHED_ERROR_MESSAGE_PREFIX = "No processes were matched"
_DISABLED_ERROR_MESSAGE = (
"GPU profiling is not enabled on node {ip_address}. "
"This is the case if no GPUs are detected on the node or if "
"the profiling dependency `dynolog` is not installed on the node.\n"
"Please ensure that GPUs are available on the node and that "
"`dynolog` is installed."
)
_NO_PROCESSES_MATCHED_ERROR_MESSAGE = (
"The profiling command failed for pid={pid} on node {ip_address}. "
"There are a few potential reasons for this:\n"
"1. The `KINETO_USE_DAEMON=1 KINETO_DAEMON_INIT_DELAY_S=5` environment variables "
"are not set for the training worker processes.\n"
"2. The process requested for profiling is not running a "
"PyTorch training script. GPU profiling is only supported for "
"PyTorch training scripts, typically launched via "
"`ray.train.torch.TorchTrainer`."
)
_DEAD_PROCESS_ERROR_MESSAGE = (
"The requested process to profile with pid={pid} on node "
"{ip_address} is no longer running. "
"GPU profiling is not available for this process."
)
def __init__(self, profile_dir_path: str, *, ip_address: str):
# Dump trace files to: /tmp/ray/session_latest/logs/profiles/
self._root_log_dir = Path(profile_dir_path)
self._profile_dir_path = self._root_log_dir / "profiles"
self._daemon_log_file_path = (
self._profile_dir_path / f"dynolog_daemon_{os.getpid()}.log"
)
self._ip_address = ip_address
self._dynolog_bin = shutil.which("dynolog")
self._dyno_bin = shutil.which("dyno")
self._dynolog_daemon_process: Optional[subprocess.Popen] = None
if not self.node_has_gpus():
logger.warning(
"[GpuProfilingManager] No GPUs found on this node, GPU profiling will not be setup."
)
if not self._dynolog_bin or not self._dyno_bin:
logger.warning(
"[GpuProfilingManager] `dynolog` is not installed, GPU profiling will not be available."
)
self._profile_dir_path.mkdir(parents=True, exist_ok=True)
@property
def enabled(self) -> bool:
return (
self.node_has_gpus()
and self._dynolog_bin is not None
and self._dyno_bin is not None
)
@property
def is_monitoring_daemon_running(self) -> bool:
return (
self._dynolog_daemon_process is not None
and self._dynolog_daemon_process.poll() is None
)
@classmethod
@functools.cache
def node_has_gpus(cls) -> bool:
try:
subprocess.check_output(["nvidia-smi"], stderr=subprocess.DEVNULL)
return True
except Exception:
return False
@classmethod
def is_pid_alive(cls, pid: int) -> bool:
try:
return psutil.pid_exists(pid) and psutil.Process(pid).is_running()
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
return False
def start_monitoring_daemon(self):
"""Start the GPU profiling monitoring daemon if it's possible.
This must be called before profiling.
"""
if not self.enabled:
logger.warning(
"[GpuProfilingManager] GPU profiling is disabled, skipping daemon setup."
)
return
if self.is_monitoring_daemon_running:
logger.warning(
"[GpuProfilingManager] GPU profiling monitoring daemon is already running."
)
return
try:
with open(self._daemon_log_file_path, "ab") as log_file:
daemon = subprocess.Popen(
[
self._dynolog_bin,
"--enable_ipc_monitor",
"--port",
str(self._DYNOLOG_PORT),
],
stdout=log_file,
stderr=log_file,
stdin=subprocess.DEVNULL,
start_new_session=True,
)
except (FileNotFoundError, PermissionError, OSError) as e:
logger.error(
f"[GpuProfilingManager] Failed to launch GPU profiling monitoring daemon: {e}\n"
f"Check error log for more details: {self._daemon_log_file_path}"
)
return
logger.info(
"[GpuProfilingManager] Launched GPU profiling monitoring daemon "
f"(pid={daemon.pid}, port={self._DYNOLOG_PORT})\n"
f"Redirecting logs to: {self._daemon_log_file_path}"
)
self._dynolog_daemon_process = daemon
def _get_trace_filename(self) -> str:
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
return f"gputrace_{self._ip_address}_{timestamp}.json"
async def gpu_profile(
self, pid: int, num_iterations: int, _timeout_s: int = _DEFAULT_TIMEOUT_S
) -> Tuple[bool, str]:
"""
Perform GPU profiling on a specified process.
Args:
pid: The process ID (PID) of the target process to be profiled.
num_iterations: The number of iterations to profile.
_timeout_s: Maximum time in seconds to wait for profiling to complete.
This is an advanced parameter that catches edge cases where the
profiling request never completes and hangs indefinitely.
Returns:
Tuple[bool, str]: A tuple containing a boolean indicating the success
of the profiling operation and a string with the
filepath of the trace file relative to the root log directory,
or an error message.
"""
if not self.enabled:
return False, self._DISABLED_ERROR_MESSAGE.format(
ip_address=self._ip_address
)
if not self._dynolog_daemon_process:
raise RuntimeError("Must call `start_monitoring_daemon` before profiling.")
if not self.is_monitoring_daemon_running:
error_msg = (
f"GPU monitoring daemon (pid={self._dynolog_daemon_process.pid}) "
f"is not running on node {self._ip_address}. "
f"See log for more details: {self._daemon_log_file_path}"
)
logger.error(f"[GpuProfilingManager] {error_msg}")
return False, error_msg
if not self.is_pid_alive(pid):
error_msg = self._DEAD_PROCESS_ERROR_MESSAGE.format(
pid=pid, ip_address=self._ip_address
)
logger.error(f"[GpuProfilingManager] {error_msg}")
return False, error_msg
trace_file_name = self._get_trace_filename()
trace_file_path = self._profile_dir_path / trace_file_name
cmd = [
self._dyno_bin,
"--port",
str(self._DYNOLOG_PORT),
"gputrace",
"--pids",
str(pid),
"--log-file",
str(trace_file_path),
"--process-limit",
str(1),
"--iterations",
str(num_iterations),
]
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = await process.communicate()
if process.returncode != 0:
return False, _format_failed_profiler_command(cmd, "dyno", stdout, stderr)
stdout_str = stdout.decode("utf-8")
logger.info(f"[GpuProfilingManager] Launched profiling: {stdout_str}")
# The initial launch command returns immediately,
# so wait for the profiling to actually finish before returning.
# The indicator of the profiling finishing is the creation of the trace file,
# when the completed trace is moved from <prefix>.tmp.json -> <prefix>.json
# If the profiling request is invalid (e.g. "No processes were matched"),
# the trace file will not be created and this will hang indefinitely,
# up until the timeout is reached.
# TODO(ml-team): This logic is brittle, we should find a better way to do this.
if self._NO_PROCESSES_MATCHED_ERROR_MESSAGE_PREFIX in stdout_str:
error_msg = self._NO_PROCESSES_MATCHED_ERROR_MESSAGE.format(
pid=pid, ip_address=self._ip_address
)
logger.error(f"[GpuProfilingManager] {error_msg}")
return False, error_msg
# The actual trace file gets dumped with a suffix of `_{pid}.json
trace_file_name_pattern = trace_file_name.replace(".json", "*.json")
return await self._wait_for_trace_file(pid, trace_file_name_pattern, _timeout_s)
async def _wait_for_trace_file(
self,
pid: int,
trace_file_name_pattern: str,
timeout_s: int,
sleep_interval_s: float = 0.25,
) -> Tuple[bool, str]:
"""Wait for the trace file to be created.
Args:
pid: The target process to be profiled.
trace_file_name_pattern: The pattern of the trace file to be created
within the `<log_dir>/profiles` directory.
timeout_s: Maximum time in seconds to wait for profiling to complete.
sleep_interval_s: Time in seconds to sleep between checking for the trace file.
Returns:
Tuple[bool, str]: (success, trace file path relative to the *root* log directory)
"""
remaining_timeout_s = timeout_s
logger.info(
"[GpuProfilingManager] Waiting for trace file to be created "
f"with the pattern: {trace_file_name_pattern}"
)
while True:
dumped_trace_file_path = next(
self._profile_dir_path.glob(trace_file_name_pattern), None
)
if dumped_trace_file_path is not None:
break
await asyncio.sleep(sleep_interval_s)
remaining_timeout_s -= sleep_interval_s
if remaining_timeout_s <= 0:
return (
False,
f"GPU profiling timed out after {timeout_s} seconds, please try again.",
)
# If the process has already exited, return an error.
if not self.is_pid_alive(pid):
return (
False,
self._DEAD_PROCESS_ERROR_MESSAGE.format(
pid=pid, ip_address=self._ip_address
),
)
logger.info(
f"[GpuProfilingManager] GPU profiling finished, trace file: {dumped_trace_file_path}"
)
return True, str(dumped_trace_file_path.relative_to(self._root_log_dir))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/reporter/gpu_profile_manager.py",
"license": "Apache License 2.0",
"lines": 267,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/dashboard/modules/reporter/tests/test_gpu_profiler_manager.py | """Unit tests for the GPU profiler manager.
All GPU and dynolog dependencies are mocked out.
This test just verifies that commands are launched correctly and that
validations are correctly performed.
"""
import asyncio
import sys
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock
import pytest
from ray.dashboard.modules.reporter.gpu_profile_manager import GpuProfilingManager
@pytest.fixture
def mock_node_has_gpus(monkeypatch):
monkeypatch.setattr(GpuProfilingManager, "node_has_gpus", lambda cls: True)
yield
@pytest.fixture
def mock_dynolog_binaries(monkeypatch):
monkeypatch.setattr("shutil.which", lambda cmd: f"/usr/bin/fake_{cmd}")
yield
@pytest.fixture
def mock_subprocess_popen(monkeypatch):
mock_popen = MagicMock()
mock_proc = MagicMock()
mock_popen.return_value = mock_proc
monkeypatch.setattr("subprocess.Popen", mock_popen)
yield (mock_popen, mock_proc)
LOCALHOST = "127.0.0.1"
@pytest.fixture
def mock_asyncio_create_subprocess_exec(monkeypatch):
mock_create_subprocess_exec = AsyncMock()
mock_async_proc = mock_create_subprocess_exec.return_value = AsyncMock()
mock_async_proc.communicate.return_value = b"mock stdout", b"mock stderr"
mock_async_proc.returncode = 0
monkeypatch.setattr("asyncio.create_subprocess_exec", mock_create_subprocess_exec)
yield (mock_create_subprocess_exec, mock_async_proc)
def test_enabled(tmp_path, mock_node_has_gpus, mock_dynolog_binaries):
gpu_profiler = GpuProfilingManager(tmp_path, ip_address=LOCALHOST)
assert gpu_profiler.enabled
def test_disabled_no_gpus(tmp_path, monkeypatch):
monkeypatch.setattr(
GpuProfilingManager, "node_has_gpus", classmethod(lambda cls: False)
)
gpu_profiler = GpuProfilingManager(tmp_path, ip_address=LOCALHOST)
assert not gpu_profiler.enabled
def test_disabled_no_dynolog_bin(tmp_path, mock_node_has_gpus):
gpu_profiler = GpuProfilingManager(tmp_path, ip_address=LOCALHOST)
assert not gpu_profiler.enabled
def test_start_monitoring_daemon(
tmp_path, mock_node_has_gpus, mock_dynolog_binaries, mock_subprocess_popen
):
gpu_profiler = GpuProfilingManager(tmp_path, ip_address=LOCALHOST)
mocked_popen, mocked_proc = mock_subprocess_popen
mocked_proc.pid = 123
mocked_proc.poll.return_value = None
gpu_profiler.start_monitoring_daemon()
assert gpu_profiler.is_monitoring_daemon_running
assert mocked_popen.call_count == 1
assert mocked_popen.call_args[0][0] == [
"/usr/bin/fake_dynolog",
"--enable_ipc_monitor",
"--port",
str(gpu_profiler._DYNOLOG_PORT),
]
# "Terminate" the daemon
mocked_proc.poll.return_value = 0
assert not gpu_profiler.is_monitoring_daemon_running
@pytest.mark.asyncio
async def test_gpu_profile_disabled(tmp_path):
gpu_profiler = GpuProfilingManager(tmp_path, ip_address=LOCALHOST)
assert not gpu_profiler.enabled
success, output = await gpu_profiler.gpu_profile(pid=123, num_iterations=1)
assert not success
assert output == gpu_profiler._DISABLED_ERROR_MESSAGE.format(
ip_address=gpu_profiler._ip_address
)
@pytest.mark.asyncio
async def test_gpu_profile_without_starting_daemon(
tmp_path, mock_node_has_gpus, mock_dynolog_binaries
):
gpu_profiler = GpuProfilingManager(tmp_path, ip_address=LOCALHOST)
assert not gpu_profiler.is_monitoring_daemon_running
with pytest.raises(RuntimeError, match="start_monitoring_daemon"):
await gpu_profiler.gpu_profile(pid=123, num_iterations=1)
@pytest.mark.asyncio
async def test_gpu_profile_with_dead_daemon(
tmp_path, mock_node_has_gpus, mock_dynolog_binaries, mock_subprocess_popen
):
gpu_profiler = GpuProfilingManager(tmp_path, ip_address=LOCALHOST)
gpu_profiler.start_monitoring_daemon()
mocked_popen, mocked_proc = mock_subprocess_popen
mocked_proc.pid = 123
# "Terminate" the daemon
mocked_proc.poll.return_value = 0
assert not gpu_profiler.is_monitoring_daemon_running
success, output = await gpu_profiler.gpu_profile(pid=456, num_iterations=1)
assert not success
print(output)
assert "GPU monitoring daemon" in output
@pytest.mark.asyncio
async def test_gpu_profile_on_dead_process(
tmp_path,
monkeypatch,
mock_node_has_gpus,
mock_dynolog_binaries,
mock_subprocess_popen,
):
gpu_profiler = GpuProfilingManager(tmp_path, ip_address=LOCALHOST)
gpu_profiler.start_monitoring_daemon()
_, mocked_proc = mock_subprocess_popen
mocked_proc.pid = 123
mocked_proc.poll.return_value = None
monkeypatch.setattr(GpuProfilingManager, "is_pid_alive", lambda cls, pid: False)
success, output = await gpu_profiler.gpu_profile(pid=456, num_iterations=1)
assert not success
assert output == gpu_profiler._DEAD_PROCESS_ERROR_MESSAGE.format(
pid=456, ip_address=gpu_profiler._ip_address
)
@pytest.mark.asyncio
async def test_gpu_profile_no_matched_processes(
tmp_path,
monkeypatch,
mock_node_has_gpus,
mock_dynolog_binaries,
mock_subprocess_popen,
mock_asyncio_create_subprocess_exec,
):
gpu_profiler = GpuProfilingManager(tmp_path, ip_address=LOCALHOST)
gpu_profiler.start_monitoring_daemon()
# Mock the daemon process
_, mocked_daemon_proc = mock_subprocess_popen
mocked_daemon_proc.pid = 123
mocked_daemon_proc.poll.return_value = None
monkeypatch.setattr(GpuProfilingManager, "is_pid_alive", lambda cls, pid: True)
# Mock the asyncio.create_subprocess_exec
(
mocked_create_subprocess_exec,
mocked_async_proc,
) = mock_asyncio_create_subprocess_exec
mocked_async_proc.communicate.return_value = (
f"{gpu_profiler._NO_PROCESSES_MATCHED_ERROR_MESSAGE_PREFIX}".encode(),
b"dummy stderr",
)
process_pid = 456
num_iterations = 1
success, output = await gpu_profiler.gpu_profile(
pid=process_pid, num_iterations=num_iterations
)
assert mocked_create_subprocess_exec.call_count == 1
assert not success
assert output == gpu_profiler._NO_PROCESSES_MATCHED_ERROR_MESSAGE.format(
pid=process_pid, ip_address=gpu_profiler._ip_address
)
@pytest.mark.asyncio
async def test_gpu_profile_timeout(
tmp_path,
monkeypatch,
mock_node_has_gpus,
mock_dynolog_binaries,
mock_subprocess_popen,
mock_asyncio_create_subprocess_exec,
):
gpu_profiler = GpuProfilingManager(tmp_path, ip_address=LOCALHOST)
gpu_profiler.start_monitoring_daemon()
# Mock the daemon process
_, mocked_daemon_proc = mock_subprocess_popen
mocked_daemon_proc.pid = 123
mocked_daemon_proc.poll.return_value = None
monkeypatch.setattr(GpuProfilingManager, "is_pid_alive", lambda cls, pid: True)
process_pid = 456
num_iterations = 1
task = asyncio.create_task(
gpu_profiler.gpu_profile(
pid=process_pid, num_iterations=num_iterations, _timeout_s=0.1
)
)
await asyncio.sleep(0.2)
success, output = await task
assert not success
assert "timed out" in output
@pytest.mark.asyncio
async def test_gpu_profile_process_dies_during_profiling(
tmp_path,
monkeypatch,
mock_node_has_gpus,
mock_dynolog_binaries,
mock_subprocess_popen,
mock_asyncio_create_subprocess_exec,
):
gpu_profiler = GpuProfilingManager(tmp_path, ip_address=LOCALHOST)
gpu_profiler.start_monitoring_daemon()
# Mock the daemon process
_, mocked_daemon_proc = mock_subprocess_popen
mocked_daemon_proc.pid = 123
mocked_daemon_proc.poll.return_value = None
monkeypatch.setattr(GpuProfilingManager, "is_pid_alive", lambda cls, pid: True)
process_pid = 456
num_iterations = 1
task = asyncio.create_task(
gpu_profiler.gpu_profile(pid=process_pid, num_iterations=num_iterations)
)
monkeypatch.setattr(GpuProfilingManager, "is_pid_alive", lambda cls, pid: False)
await asyncio.sleep(0.2)
success, output = await task
assert not success
assert output == gpu_profiler._DEAD_PROCESS_ERROR_MESSAGE.format(
pid=process_pid, ip_address=gpu_profiler._ip_address
)
@pytest.mark.asyncio
async def test_gpu_profile_success(
tmp_path,
monkeypatch,
mock_node_has_gpus,
mock_dynolog_binaries,
mock_subprocess_popen,
mock_asyncio_create_subprocess_exec,
):
gpu_profiler = GpuProfilingManager(tmp_path, ip_address=LOCALHOST)
gpu_profiler.start_monitoring_daemon()
# Mock the daemon process
_, mocked_daemon_proc = mock_subprocess_popen
mocked_daemon_proc.pid = 123
mocked_daemon_proc.poll.return_value = None
monkeypatch.setattr(GpuProfilingManager, "is_pid_alive", lambda cls, pid: True)
monkeypatch.setattr(
GpuProfilingManager, "_get_trace_filename", lambda cls: "dummy_trace.json"
)
dumped_trace_filepath = gpu_profiler._profile_dir_path / "dummy_trace.json"
dumped_trace_filepath.touch()
# Mock the asyncio.create_subprocess_exec
(
mocked_create_subprocess_exec,
mocked_async_proc,
) = mock_asyncio_create_subprocess_exec
process_pid = 456
num_iterations = 1
success, output = await gpu_profiler.gpu_profile(
pid=process_pid, num_iterations=num_iterations
)
# Verify the command was launched correctly
assert mocked_create_subprocess_exec.call_count == 1
profile_launch_args = list(mocked_create_subprocess_exec.call_args[0])
assert profile_launch_args[:6] == [
"/usr/bin/fake_dyno",
"--port",
str(gpu_profiler._DYNOLOG_PORT),
"gputrace",
"--pids",
str(process_pid),
]
assert "--log-file" in profile_launch_args
profile_log_file_arg = profile_launch_args[
profile_launch_args.index("--log-file") + 1
]
assert Path(profile_log_file_arg).is_relative_to(tmp_path)
assert "--iterations" in profile_launch_args
assert profile_launch_args[profile_launch_args.index("--iterations") + 1] == str(
num_iterations
)
assert success
assert output == str(dumped_trace_filepath.relative_to(tmp_path))
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/reporter/tests/test_gpu_profiler_manager.py",
"license": "Apache License 2.0",
"lines": 262,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/llm_tests/serve/test_llm_serve_correctness.py | import subprocess
import time
from typing import Literal
import requests
import pytest
from openai import OpenAI
from ray import serve
from ray.serve.llm import LLMConfig, build_openai_app, ModelLoadingConfig
MODEL_ID = "Qwen/Qwen2.5-0.5B-Instruct"
RAY_MODEL_ID = "qwen-0.5b"
MAX_OUTPUT_TOKENS = 256
SEED = 42
def get_llm_config(
tensor_parallel_size: int = 1, pipeline_parallel_size: int = 1
) -> LLMConfig:
"""Create LLMConfig with specified parallelism parameters."""
return LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id=RAY_MODEL_ID,
model_source=MODEL_ID,
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=1,
max_replicas=1,
),
),
engine_kwargs=dict(
tensor_parallel_size=tensor_parallel_size,
pipeline_parallel_size=pipeline_parallel_size,
),
runtime_env=None,
)
def start_ray_serve(
tensor_parallel_size: int = 1, pipeline_parallel_size: int = 1
) -> str:
"""Start Ray Serve with specified parallelism parameters."""
ray_url = "http://localhost:8000"
llm_config: LLMConfig = get_llm_config(tensor_parallel_size, pipeline_parallel_size)
app = build_openai_app({"llm_configs": [llm_config]})
serve.run(app, blocking=False)
return ray_url
def create_openai_client(server_url: str) -> OpenAI:
"""Create an OpenAI client."""
openai_api_base = f"{server_url}/v1"
return OpenAI(base_url=openai_api_base, api_key="fake-key")
def generate_completion(client: OpenAI, model_id: str, test_prompt: str) -> str:
"""Generate completion using the provided OpenAI client."""
response = client.completions.create(
model=model_id,
prompt=test_prompt,
temperature=0.0,
max_tokens=MAX_OUTPUT_TOKENS,
seed=SEED,
)
return response.choices[0].text
def generate_chat_completion(client: OpenAI, model_id: str, test_message: str) -> str:
"""Generate chat completion using the provided OpenAI client."""
messages = [{"role": "user", "content": test_message}]
response = client.chat.completions.create(
model=model_id,
messages=messages,
temperature=0.0,
max_tokens=MAX_OUTPUT_TOKENS,
seed=SEED,
)
return response.choices[0].message.content
class VllmServer:
def __init__(
self,
tensor_parallel_size: int = 1,
pipeline_parallel_size: int = 1,
model_id: str = MODEL_ID,
):
self.tensor_parallel_size = tensor_parallel_size
self.pipeline_parallel_size = pipeline_parallel_size
self.model_id = model_id
self.vllm_url = self._start_vllm_server()
self.openai_client = create_openai_client(self.vllm_url)
wait_for_server_ready(self.vllm_url, server_type="vllm", timeout=240)
def _start_vllm_server(self) -> str:
"""Start vLLM server with specified parallelism parameters."""
vllm_port = 8001
cmd = [
"vllm",
"serve",
self.model_id,
"--port",
str(vllm_port),
"--distributed-executor-backend=ray",
"--tensor-parallel-size",
str(self.tensor_parallel_size),
"--pipeline-parallel-size",
str(self.pipeline_parallel_size),
]
self.process = subprocess.Popen(cmd)
return f"http://localhost:{vllm_port}"
def generate_completion(self, test_prompt: str) -> str:
"""Generate completion using the provided OpenAI client."""
return generate_completion(self.openai_client, self.model_id, test_prompt)
def generate_chat_completion(self, test_message: str) -> str:
"""Generate chat completion using the provided OpenAI client."""
return generate_chat_completion(self.openai_client, self.model_id, test_message)
def shutdown(self):
"""Shutdown the vLLM server."""
self.process.terminate()
for _ in range(5):
if self.process.poll() is not None:
break
time.sleep(1)
if self.process.poll() is None:
self.process.kill()
def wait_for_server_ready(
url: str,
server_type: Literal["ray", "vllm"] = "ray",
timeout: int = 120,
retry_interval: int = 2,
) -> bool:
"""Poll the server until it's ready or timeout is reached.
Args:
url: The server URL to check
server_type: Either "ray" or "vllm"
timeout: Maximum time to wait in seconds
retry_interval: Time between retry attempts
"""
start_time = time.time()
while time.time() - start_time < timeout:
try:
# Directly test if the server can handle a completion request
model_id = MODEL_ID if server_type == "vllm" else RAY_MODEL_ID
test_data = {
"model": model_id,
"prompt": "test",
"max_tokens": 5,
"temperature": 0,
}
completion_response = requests.post(
f"{url}/v1/completions", json=test_data, timeout=10
)
if completion_response.status_code == 200:
print(
f"{server_type.upper()} server at {url} is ready to handle requests!"
)
return True
except Exception:
pass
print(f"Waiting for {server_type.upper()} server at {url} to be ready...")
time.sleep(retry_interval)
raise TimeoutError(
f"{server_type.upper()} server at {url} did not become ready within {timeout} seconds"
)
@pytest.mark.parametrize(
"tensor_parallel_size, pipeline_parallel_size",
[
(1, 1),
(2, 1),
(1, 2),
(2, 2),
],
)
def test_llm_serve_correctness(
tensor_parallel_size: int, pipeline_parallel_size: int
) -> None:
"""Test that Ray Serve and vLLM produce the same completion output for the same input."""
test_prompt = "Two households, both alike in dignity,"
test_message = "What is the capital of France?"
print(
f"Starting Ray Serve LLM with tensor_parallel_size={tensor_parallel_size}, pipeline_parallel_size={pipeline_parallel_size}"
)
ray_url = start_ray_serve(tensor_parallel_size, pipeline_parallel_size)
ray_client = create_openai_client(ray_url)
wait_for_server_ready(ray_url, server_type="ray", timeout=240)
time.sleep(5) # Buffer time for server to be ready
ray_completion_output = generate_completion(ray_client, RAY_MODEL_ID, test_prompt)
ray_chat_output = generate_chat_completion(ray_client, RAY_MODEL_ID, test_message)
serve.shutdown()
print(
f"Starting vLLM server with tensor_parallel_size={tensor_parallel_size}, pipeline_parallel_size={pipeline_parallel_size}"
)
vllm_server = VllmServer(tensor_parallel_size, pipeline_parallel_size)
time.sleep(5) # Buffer time for server to be ready
vllm_completion_output = vllm_server.generate_completion(test_prompt)
vllm_chat_output = vllm_server.generate_chat_completion(test_message)
vllm_server.shutdown()
assert ray_completion_output == vllm_completion_output, (
f"Ray and vLLM outputs do not match with TP={tensor_parallel_size}, PP={pipeline_parallel_size}\n"
f"Ray output: {ray_completion_output}\n"
f"vLLM output: {vllm_completion_output}"
)
assert ray_chat_output == vllm_chat_output, (
f"Ray and vLLM chat outputs do not match with TP={tensor_parallel_size}, PP={pipeline_parallel_size}\n"
f"Ray output: {ray_chat_output}\n"
f"vLLM output: {vllm_chat_output}"
)
if __name__ == "__main__":
pytest.main(["-xvs", __file__])
| {
"repo_id": "ray-project/ray",
"file_path": "release/llm_tests/serve/test_llm_serve_correctness.py",
"license": "Apache License 2.0",
"lines": 196,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/util/dask/tests/test_dask_multi_node.py | import sys
import dask
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pytest
import ray
from ray.tests.conftest import * # noqa: F403, F401
from ray.util.dask import enable_dask_on_ray
@pytest.fixture
def ray_enable_dask_on_ray():
with enable_dask_on_ray():
yield
def test_ray_dask_resources(ray_start_cluster, ray_enable_dask_on_ray):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1)
cluster.add_node(num_cpus=1, resources={"other_pin": 1})
pinned_node = cluster.add_node(num_cpus=1, num_gpus=1, resources={"pin": 1})
ray.init(address=cluster.address)
def get_node_id():
return ray._private.worker.global_worker.node.unique_id
# Test annotations on collection.
with dask.annotate(ray_remote_args=dict(num_cpus=1, resources={"pin": 0.01})):
c = dask.delayed(get_node_id)()
result = c.compute(optimize_graph=False)
assert result == pinned_node.unique_id
# Test annotations on compute.
c = dask.delayed(get_node_id)()
with dask.annotate(ray_remote_args=dict(num_gpus=1, resources={"pin": 0.01})):
result = c.compute(optimize_graph=False)
assert result == pinned_node.unique_id
# Test compute global Ray remote args.
c = dask.delayed(get_node_id)
result = c().compute(ray_remote_args={"resources": {"pin": 0.01}})
assert result == pinned_node.unique_id
# Test annotations on collection override global resource.
with dask.annotate(ray_remote_args=dict(resources={"pin": 0.01})):
c = dask.delayed(get_node_id)()
result = c.compute(
ray_remote_args=dict(resources={"other_pin": 0.01}), optimize_graph=False
)
assert result == pinned_node.unique_id
# Test top-level resources raises an error.
with pytest.raises(ValueError):
with dask.annotate(resources={"pin": 0.01}):
c = dask.delayed(get_node_id)()
result = c.compute(optimize_graph=False)
with pytest.raises(ValueError):
c = dask.delayed(get_node_id)
result = c().compute(resources={"pin": 0.01})
def get_node_id(row):
return pd.Series(ray._private.worker.global_worker.node.unique_id)
# Test annotations on compute.
df = dd.from_pandas(
pd.DataFrame(np.random.randint(0, 2, size=(2, 2)), columns=["age", "grade"]),
npartitions=2,
)
c = df.apply(get_node_id, axis=1, meta={0: str})
with dask.annotate(ray_remote_args=dict(num_gpus=1, resources={"pin": 0.01})):
result = c.compute(optimize_graph=False)
assert result[0].iloc[0] == pinned_node.unique_id
# Test compute global Ray remote args.
c = df.apply(get_node_id, axis=1, meta={0: str})
result = c.compute(
ray_remote_args={"resources": {"pin": 0.01}}, optimize_graph=False
)
assert result[0].iloc[0] == pinned_node.unique_id
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/util/dask/tests/test_dask_multi_node.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/test_channel_serialization.py | # coding: utf-8
import logging
import os
import sys
import pytest
import torch
from ray.experimental.channel.serialization_context import _SerializationContext
from ray.experimental.util.types import Device
logger = logging.getLogger(__name__)
@pytest.mark.parametrize(
"scalar_and_dtype",
[
# Basic tests
(1.23456, torch.float16),
(1.23456, torch.bfloat16),
(1.23456, torch.float32),
(1.23456, torch.float64),
(123, torch.int8),
(123, torch.int16),
(123456, torch.int32),
(123456, torch.int64),
(123, torch.uint8),
(123, torch.uint16),
(123456, torch.uint32),
(123456, torch.uint64),
(True, torch.bool),
# Boundary values tests - integers
(127, torch.int8), # INT8_MAX
(-128, torch.int8), # INT8_MIN
(32767, torch.int16), # INT16_MAX
(-32768, torch.int16), # INT16_MIN
(2147483647, torch.int32), # INT32_MAX
(-2147483648, torch.int32), # INT32_MIN
(9223372036854775807, torch.int64), # INT64_MAX
(-9223372036854775808, torch.int64), # INT64_MIN
# Boundary values tests - unsigned integers
(255, torch.uint8), # UINT8_MAX
(0, torch.uint8), # UINT8_MIN
(65535, torch.uint16), # UINT16_MAX
(0, torch.uint16), # UINT16_MIN
(4294967295, torch.uint32), # UINT32_MAX
(0, torch.uint32), # UINT32_MIN
(18446744073709551615, torch.uint64), # UINT64_MAX
(0, torch.uint64), # UINT64_MIN
# Floating point special values
(float("inf"), torch.float32),
(float("-inf"), torch.float32),
(float("nan"), torch.float32),
(float("inf"), torch.float64),
(float("-inf"), torch.float64),
(float("nan"), torch.float64),
# Float precision tests
(1.2345678901234567, torch.float32), # Beyond float32 precision
(1.2345678901234567, torch.float64), # Within float64 precision
(1e-45, torch.float32), # Near float32 smallest positive normal
(
2.2250738585072014e-308,
torch.float64,
), # Near float64 smallest positive normal
],
)
def test_scalar_tensor(scalar_and_dtype):
scalar, dtype = scalar_and_dtype
context = _SerializationContext()
scalar_tensor = torch.tensor(scalar, dtype=dtype)
np_array, tensor_dtype, tensor_device_type = context.serialize_to_numpy_or_scalar(
scalar_tensor
)
assert tensor_dtype == dtype
deserialized_tensor = context.deserialize_from_numpy_or_scalar(
np_array, dtype, tensor_device_type, Device.CPU
)
# Special handling for NaN values
if torch.is_floating_point(scalar_tensor) and torch.isnan(scalar_tensor):
assert torch.isnan(deserialized_tensor)
else:
assert (deserialized_tensor == scalar_tensor).all()
@pytest.mark.parametrize(
"tensor_shape_and_dtype",
[
((10, 10), torch.float16),
((10, 10), torch.bfloat16),
((10, 10, 10), torch.float32),
((10, 10, 10, 10), torch.float64),
((10, 10), torch.int8),
((10, 10), torch.int16),
((10, 10), torch.int32),
((10, 10), torch.int64),
((10, 10), torch.uint8),
((10, 10), torch.uint16),
((10, 10), torch.uint32),
((10, 10), torch.uint64),
],
)
def test_non_scalar_tensor(tensor_shape_and_dtype):
tensor_shape, dtype = tensor_shape_and_dtype
context = _SerializationContext()
# Create tensor based on dtype with varying values
if dtype in [torch.float16, torch.bfloat16, torch.float32, torch.float64]:
# For floating point types, use randn
tensor = torch.randn(*tensor_shape).to(dtype)
else:
# For integer types, create varying values within appropriate ranges
total_elements = torch.prod(torch.tensor(tensor_shape)).item()
if dtype == torch.uint8:
# Range: 0 to 255
values = torch.arange(0, min(total_elements, 256), dtype=torch.int32) % 256
elif dtype == torch.uint16:
# Range: 0 to 65535
values = (
torch.arange(0, min(total_elements, 65536), dtype=torch.int32) % 65536
)
elif dtype == torch.int8:
# Range: -128 to 127
values = (
torch.arange(0, min(total_elements, 256), dtype=torch.int32) % 256
) - 128
elif dtype == torch.int16:
# Range: -32768 to 32767
values = (
torch.arange(0, min(total_elements, 65536), dtype=torch.int32) % 65536
) - 32768
elif dtype == torch.int32:
# Use a smaller range to avoid overflow
values = torch.arange(0, total_elements, dtype=torch.int32) % 10000 - 5000
else: # int64
# Use a smaller range to avoid overflow
values = torch.arange(0, total_elements, dtype=torch.int64) % 10000 - 5000
# Reshape the values to match the target shape
tensor = values.reshape(tensor_shape).to(dtype)
np_array, tensor_dtype, tensor_device_type = context.serialize_to_numpy_or_scalar(
tensor
)
deserialized_tensor = context.deserialize_from_numpy_or_scalar(
np_array, tensor_dtype, tensor_device_type, Device.CPU
)
assert (tensor == deserialized_tensor).all()
if __name__ == "__main__":
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_channel_serialization.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:rllib/examples/algorithms/appo_custom_algorithm_w_shared_data_actor.py | """Example of how to write a custom APPO that uses a global shared data actor.
The actor is custom code and its remote APIs can be designed as the user requires.
It is created inside the Algorithm's `setup` method and then shared through its
reference with all of the Algorithm's other actors, like EnvRunners, Learners, and
aggregator actors.
During sampling and through using callbacks, each EnvRunner assigns a unique ID
to each sampled episode chunk, then sends manipulated reward data for each sampled
episode chunk to the shared data actor. In particular, the manipulation consists of
each individual reward being multiplied by the EnvRunner's index (from 1 to ...).
Note that the actual reward in the episode is not altered and thus the metrics
reporting continues to show the original reward.
In the learner connector, which creates the train batch from episode data, a custom
connector piece then gets the manipulated rewards from the shared data actor using
the episode chunk's unique ID (see above) and uses the manipulated reward for training.
Note that because of this, different EnvRunners provide different reward signals, which
should make it slightly harder for the value function to learn consistently.
Nevertheless, because the default config here only uses 2 EnvRunners, each multiplying
their rewards by 1 and 2, respectively, this effect is negligible here and the example
should learn how to solve the CartPole-1 env either way.
This example shows:
- how to write a custom, global shared data actor class with a custom remote API.
- how an instance of this shared data actor is created upon algorithm
initialization.
- how to distribute the actor reference of the shared actor to all other actors
in the Algorithm, for example EnvRunners, AggregatorActors, and Learners
- how to subclass an existing algorithm class (APPO) to implement a custom
Algorithm, overriding the `setup` method to control, which additional actors
should be created (and shared) by the algo, the `get_state/set_state` methods
to include the state of the new actor.
- how - through custom callbacks - the new actor can be written to and queried
from anywhere within the algorithm, for example its EnvRunner actors or Learners.
How to run this script
----------------------
`python [script file name].py`
For debugging, use the following additional command line options
`--no-tune --num-env-runners=0`
which should allow you to set breakpoints anywhere in the RLlib code and
have the execution stop there for inspection and debugging.
For logging to your WandB account, use:
`--wandb-key=[your WandB API key] --wandb-project=[some project name]
--wandb-run-name=[optional: WandB run name (within the defined project)]`
Results to expect
-----------------
The experiment should work regardless of whether you are using aggregator
actors or not. By default, the experiment provides one agg. actor per Learner,
but you can set `--num-aggregator-actors-per-learner=0` to have the learner
connector pipeline work directly inside the Learner actor(s).
+-------------------------------------------------+------------+--------+
| Trial name | status | iter |
| | | |
|-------------------------------------------------+------------+--------+
| APPOWithSharedDataActor_CartPole-v1_4e860_00000 | TERMINATED | 7 |
+-------------------------------------------------+------------+--------+
+------------------+------------------------+
| total time (s) | episode_return_mean |
| | |
|------------------+------------------------+
| 70.0315 | 468.42 |
+------------------+------------------------+
"""
import uuid
import ray
from ray.rllib.algorithms.appo import APPOConfig
from ray.rllib.connectors.connector_v2 import ConnectorV2
from ray.rllib.core import Columns
from ray.rllib.env.single_agent_episode import SingleAgentEpisode
from ray.rllib.examples.algorithms.classes.appo_w_shared_data_actor import (
APPOWithSharedDataActor,
)
from ray.rllib.examples.utils import (
add_rllib_example_script_args,
run_rllib_example_script_experiment,
)
parser = add_rllib_example_script_args(
default_reward=450.0,
default_iters=200,
default_timesteps=2000000,
)
parser.set_defaults(
num_aggregator_actors_per_learner=1,
)
SPECIAL_REWARDS_KEY = "special_(double)_rewards"
ENV_RUNNER_IDX_KEY = "env_runner_index"
UNIQUE_EPISODE_CHUNK_KEY = "unique_eps_chunk"
# Define 2 simple EnvRunner-based callbacks:
def on_episode_step(*, episode, env_runner, **kwargs):
# Multiplies the received reward by the env runner index.
if SPECIAL_REWARDS_KEY not in episode.custom_data:
episode.custom_data[SPECIAL_REWARDS_KEY] = []
episode.custom_data[SPECIAL_REWARDS_KEY].append(
episode.get_rewards(-1) * env_runner.worker_index
)
def on_sample_end(*, samples, env_runner, **kwargs):
# Sends the (manipulated) reward sequence to the shared data actor for "pickup" by
# a Learner. Alternatively, one could also just store the information in the
# `custom_data` property.
for episode in samples:
# Provide a unique key for both episode AND record in the shared
# data actor.
unique_key = str(uuid.uuid4())
# Store the EnvRunner index and unique key in the episode.
episode.custom_data[ENV_RUNNER_IDX_KEY] = env_runner.worker_index
episode.custom_data[UNIQUE_EPISODE_CHUNK_KEY] = unique_key
# Get the manipulated rewards from the episode ..
special_rewards = episode.custom_data.pop(SPECIAL_REWARDS_KEY)
# .. and send them under the unique key to the shared data actor.
env_runner._shared_data_actor.put.remote(
key=unique_key,
value=special_rewards,
)
class ManipulatedRewardConnector(ConnectorV2):
def __call__(self, *, episodes, batch, metrics, **kwargs):
if not isinstance(episodes[0], SingleAgentEpisode):
raise ValueError("This connector only works on `SingleAgentEpisodes`.")
# Get the manipulated rewards from the shared actor and add them to the train
# batch.
for sa_episode in self.single_agent_episode_iterator(episodes):
unique_key = sa_episode.custom_data[UNIQUE_EPISODE_CHUNK_KEY]
special_rewards = ray.get(
self._shared_data_actor.get.remote(unique_key, delete=True)
)
if special_rewards is None:
continue
assert int(special_rewards[0]) == sa_episode.custom_data[ENV_RUNNER_IDX_KEY]
# Add one more fake reward, b/c all episodes will be extended
# (in PPO-style algos) by one artificial timestep for GAE/v-trace
# computation purposes.
special_rewards += [0.0]
self.add_n_batch_items(
batch=batch,
column=Columns.REWARDS,
items_to_add=special_rewards[-len(sa_episode) :],
num_items=len(sa_episode),
single_agent_episode=sa_episode,
)
return batch
if __name__ == "__main__":
args = parser.parse_args()
base_config = (
APPOConfig(algo_class=APPOWithSharedDataActor)
.environment("CartPole-v1")
.callbacks(
on_episode_step=on_episode_step,
on_sample_end=on_sample_end,
)
.training(
learner_connector=(lambda obs_sp, act_sp: ManipulatedRewardConnector()),
)
)
run_rllib_example_script_experiment(base_config, args)
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/algorithms/appo_custom_algorithm_w_shared_data_actor.py",
"license": "Apache License 2.0",
"lines": 150,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:rllib/examples/algorithms/classes/appo_w_shared_data_actor.py | from typing import List
import ray
from ray.rllib.algorithms import AlgorithmConfig
from ray.rllib.algorithms.appo import APPO
from ray.rllib.env.env_runner_group import EnvRunnerGroup
@ray.remote
class SharedDataActor:
"""Simple example of an actor that's accessible from all other actors of an algo.
Exposes remote APIs `put` and `get` to other actors for storing and retrieving
arbitrary data.
"""
def __init__(self):
self.storage = {}
def get(self, key, delete: bool = False):
value = self.storage.get(key)
if delete and key in self.storage:
del self.storage[key]
return value
def put(self, key, value):
self.storage[key] = value
def get_state(self):
return self.storage
def set_state(self, state):
self.storage = state
class APPOWithSharedDataActor(APPO):
def setup(self, config: AlgorithmConfig):
# Call to parent `setup`.
super().setup(config)
# Create shared data actor.
self.shared_data_actor = SharedDataActor.remote()
# Share the actor with all other relevant actors.
def _share(actor, shared_act=self.shared_data_actor):
actor._shared_data_actor = shared_act
# Also add shared actor reference to all the learner connector pieces,
# if applicable.
if hasattr(actor, "_learner_connector") and actor._learner_connector:
for conn in actor._learner_connector:
conn._shared_data_actor = shared_act
self.env_runner_group.foreach_env_runner(func=_share)
if self.eval_env_runner_group:
self.eval_env_runner_group.foreach_env_runner(func=_share)
self.learner_group.foreach_learner(func=_share)
if self._aggregator_actor_manager:
self._aggregator_actor_manager.foreach_actor(func=_share)
def get_state(self, *args, **kwargs):
state = super().get_state(*args, **kwargs)
# Add shared actor's state.
state["shared_data_actor"] = ray.get(self.shared_data_actor.get_state.remote())
return state
def set_state(self, state, *args, **kwargs):
super().set_state(state, *args, **kwargs)
# Set shared actor's state.
if "shared_data_actor" in state:
self.shared_data_actor.set_state.remote(state["shared_data_actor"])
def restore_env_runners(self, env_runner_group: EnvRunnerGroup) -> List[int]:
restored = super().restore_env_runners(env_runner_group)
# For the restored EnvRunners, send them the latest shared, global state
# from the `SharedDataActor`.
for restored_idx in restored:
state_ref = self.shared_data_actor.get.remote(
key=f"EnvRunner_{restored_idx}"
)
env_runner_group.foreach_env_runner(
lambda env_runner, state=state_ref: env_runner._global_state,
remote_worker_ids=[restored_idx],
timeout_seconds=0.0,
)
return restored
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/algorithms/classes/appo_w_shared_data_actor.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/_private/path_utils.py | import pathlib
import urllib
"""Cross-platform utilities for manipulating paths and URIs.
NOTE: All functions in this file must support POSIX and Windows.
"""
def is_path(path_or_uri: str) -> bool:
"""Returns True if uri_or_path is a path and False otherwise.
Windows paths start with a drive name which can be interpreted as
a URI scheme by urlparse and thus needs to be treated differently
form POSIX paths.
E.g. Creating a directory returns the path 'C:\\Users\\mp5n6ul72w\\working_dir'
will have the scheme 'C:'.
"""
if not isinstance(path_or_uri, str):
raise TypeError(f" path_or_uri must be a string, got {type(path_or_uri)}.")
parsed_path = pathlib.Path(path_or_uri)
parsed_uri = urllib.parse.urlparse(path_or_uri)
if isinstance(parsed_path, pathlib.PurePosixPath):
return not parsed_uri.scheme
elif isinstance(parsed_path, pathlib.PureWindowsPath):
return parsed_uri.scheme == parsed_path.drive.strip(":").lower()
else:
# this should never happen.
raise TypeError(f"Unsupported path type: {type(parsed_path).__name__}")
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/_private/path_utils.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/tests/test_path_utils.py | import sys
import pytest
import ray._private.path_utils as PathUtils
# NOTE: Since PathUtils is cross-platform, each function _must_
# have tests for Windows and Posix (Linux/MacOS).
_is_posix = sys.platform in {"linux", "darwin"}
@pytest.mark.skipif(_is_posix, reason="Cannot create a Windows Path on POSIX systems.")
def test_is_path_returns_true_for_windows_path():
path_str = "C:\\Some\\Dir"
assert PathUtils.is_path(path_str)
@pytest.mark.skipif(not _is_posix, reason="Cannot create a POSIX Path on Windows.")
def test_is_path_returns_true_for_posix_path():
path_str = "/home/some/dir"
assert PathUtils.is_path(path_str)
def test_is_path_returns_false_for_uri():
uri_str = "s3://some/remote/file"
assert not PathUtils.is_path(uri_str)
def test_is_path_raises_error_for_non_string_input():
with pytest.raises(TypeError, match="must be a string"):
PathUtils.is_path(1)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_path_utils.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:rllib/utils/metrics/tests/test_metrics_logger.py | import time
import numpy as np
import pytest
from ray.rllib.utils.metrics.metrics_logger import MetricsLogger
from ray.rllib.utils.metrics.stats import (
EmaStats,
LifetimeSumStats,
MeanStats,
SumStats,
)
from ray.rllib.utils.test_utils import check
@pytest.fixture
def root_logger():
return MetricsLogger(root=True)
@pytest.fixture
def leaf1():
return MetricsLogger(root=False)
@pytest.fixture
def leaf2():
return MetricsLogger(root=False)
@pytest.fixture
def intermediate():
return MetricsLogger(root=False)
@pytest.mark.parametrize(
"reduce_method,values,expected",
[
("mean", [0.1, 0.2], 0.15),
("min", [0.3, 0.1, 0.2], 0.1),
("sum", [10, 20], 30),
("lifetime_sum", [10, 20], 30),
("ema", [1.0, 2.0], 1.01),
("item", [0.1, 0.2], 0.2),
("item_series", [0.1, 0.2], [0.1, 0.2]),
],
)
def test_basic_peek_and_reduce(root_logger, reduce_method, values, expected):
"""Test different reduction methods (mean, min, sum) with parameterization."""
key = f"{reduce_method}_metric"
for val in values:
root_logger.log_value(key, val, reduce=reduce_method)
# Check the result
check(root_logger.peek(key), expected)
# Test that reduce() returns the same result
results = root_logger.reduce()
check(results[key], expected)
@pytest.mark.parametrize(
"reduce_method,leaf1_values,leaf2_values,intermediate_values,"
"leaf1_expected,leaf2_expected,intermediate_expected_after_aggregate,"
"intermediate_expected_after_log,root_expected_leafs,root_expected_intermediate",
[
# MeanStats
(
"mean", # reduction method name
[1.0, 2.0], # values logged to leaf1 logger
[3.0, 4.0], # values logged to leaf2 logger
[5.0, 6.0], # values logged at intermediate logger
1.5, # result from leaf1 after logging (mean of [1, 2])
3.5, # result from leaf2 after logging (mean of [3, 4])
2.5, # result at intermediate after aggregating from leafs (mean of [1.5, 3.5])
5.5, # result at intermediate after logging values (mean of [5.0, 6.0])
2.5, # result at root from aggregated leafs (mean of [1.5, 3.5])
5.5, # result at root from intermediate logged values (mean of [5.0, 6.0])
),
# EmaStats with default coefficient (0.01)
(
"ema", # reduction method name
[1.0, 2.0], # values logged to leaf1 logger
[3.0, 4.0], # values logged to leaf2 logger
[5.0, 6.0], # values logged at intermediate logger
1.01, # result from leaf1 after logging (EMA of [1, 2] with coeff 0.01)
3.01, # result from leaf2 after logging (EMA of [3, 4] with coeff 0.01)
2.01, # result at intermediate after aggregating from leafs (mean of [1.01, 3.01])
5.01, # result at intermediate after logging values (EMA of [5.0, 6.0] with coeff 0.01)
2.01, # result at root from aggregated leafs (mean of [1.01, 3.01])
5.01, # result at root from intermediate logged values (EMA of [5.0, 6.0] with coeff 0.01)
),
# SumStats
(
"sum", # reduction method name
[10, 20], # values logged to leaf1 logger
[30, 40], # values logged to leaf2 logger
[50, 60], # values logged at intermediate logger
30, # result from leaf1 after logging (sum of [10, 20])
70, # result from leaf2 after logging (sum of [30, 40])
100, # result at intermediate after aggregating from leafs (sum of [30, 70])
110, # result at intermediate after logging values (sum of [50, 60])
100, # result at root from aggregated leafs (sum of [30, 70])
110, # result at root from intermediate logged values (sum of [50, 60])
),
# LifetimeSumStats
(
"lifetime_sum", # reduction method name
[10, 20], # values logged to leaf1 logger
[30, 40], # values logged to leaf2 logger
[50, 60], # values logged at intermediate logger
[
30
], # result from leaf1 after logging (lifetime sum of [10, 20], returns list)
[
70
], # result from leaf2 after logging (lifetime sum of [30, 40], returns list)
[
100
], # result at intermediate after aggregating from leafs (sum of [30, 70], returns list)
[
110
], # result at intermediate after logging values (sum of [50, 60], returns list)
100, # result at root from aggregated leafs (root logger converts list to scalar)
110, # result at root from intermediate logged values (root logger converts list to scalar)
),
# MinStats
(
"min", # reduction method name
[5.0, 3.0], # values logged to leaf1 logger
[4.0, 2.0], # values logged to leaf2 logger
[1.0, 0.5], # values logged at intermediate logger
3.0, # result from leaf1 after logging (min of [5.0, 3.0])
2.0, # result from leaf2 after logging (min of [4.0, 2.0])
2.0, # result at intermediate after aggregating from leafs (min of [3.0, 2.0])
0.5, # result at intermediate after logging values (min of [1.0, 0.5])
2.0, # result at root from aggregated leafs (min of [3.0, 2.0])
0.5, # result at root from intermediate logged values (min of [1.0, 0.5])
),
# MaxStats
(
"max", # reduction method name
[5.0, 7.0], # values logged to leaf1 logger
[4.0, 6.0], # values logged to leaf2 logger
[8.0, 9.0], # values logged at intermediate logger
7.0, # result from leaf1 after logging (max of [5.0, 7.0])
6.0, # result from leaf2 after logging (max of [4.0, 6.0])
7.0, # result at intermediate after aggregating from leafs (max of [7.0, 6.0])
9.0, # result at intermediate after logging values (max of [8.0, 9.0])
7.0, # result at root from aggregated leafs (max of [7.0, 6.0])
9.0, # result at root from intermediate logged values (max of [8.0, 9.0])
),
# PercentilesStats
(
"percentiles", # reduction method name
[10.0, 20.0], # values logged to leaf1 logger
[30.0, 40.0], # values logged to leaf2 logger
[50.0, 60.0], # values logged at intermediate logger
{
0.5: 10.05
}, # result from leaf1 after logging (percentile 0.5 of [10.0, 20.0])
{
0.5: 30.05
}, # result from leaf2 after logging (percentile 0.5 of [30.0, 40.0])
{
0.5: 10.15
}, # result at intermediate after aggregating from leafs (percentile 0.5 of merged [10.0, 20.0, 30.0, 40.0])
{
0.5: 50.05
}, # result at intermediate after logging values (percentile 0.5 of [50.0, 60.0])
{
0.5: 10.15
}, # result at root from aggregated leafs (same as intermediate after aggregate)
{
0.5: 50.05
}, # result at root from intermediate logged values (percentile 0.5 of [50.0, 60.0])
),
# ItemSeriesStats
(
"item_series", # reduction method name
[1.0, 2.0], # values logged to leaf1 logger
[3.0, 4.0], # values logged to leaf2 logger
[5.0, 6.0], # values logged at intermediate logger
[
1.0,
2.0,
], # result from leaf1 after logging (series of [1.0, 2.0])
[
3.0,
4.0,
], # result from leaf2 after logging (series of [3.0, 4.0])
[
1.0,
2.0,
3.0,
4.0,
], # result at intermediate after aggregating from leafs (concatenated series from leafs)
[
5.0,
6.0,
], # result at intermediate after logging values (series of [5.0, 6.0])
[
1.0,
2.0,
3.0,
4.0,
], # result at root from aggregated leafs (concatenated series from leafs)
[
5.0,
6.0,
], # result at root from intermediate logged values (series of [5.0, 6.0])
),
],
)
def test_multi_stage_aggregation(
root_logger,
leaf1,
leaf2,
intermediate,
reduce_method,
leaf1_values,
leaf2_values,
intermediate_values,
leaf1_expected,
leaf2_expected,
intermediate_expected_after_aggregate,
intermediate_expected_after_log,
root_expected_leafs,
root_expected_intermediate,
):
"""Test multi-stage aggregation for different Stats classes.
This is a comprehensive test of how we envision MetricsLogger to be used in RLlib.
It also creates a bunch of test coverage for Stats classes, which are tighly cloupled with MetricsLogger.
Tests the aggregation flow:
1. Two leaf loggers log values
2. One intermediate logger aggregates from leaf loggers and logs values
3. One root logger aggregates only
"""
metric_name_leafs = reduce_method + "_metric_leaf"
metric_name_intermediate = reduce_method + "_metric_intermediate"
# Helper function to check values (handles PercentilesStats specially)
def check_value(actual, expected):
if reduce_method == "percentiles":
# If actual is a PercentilesStats object (from reduce(compile=False)), call peek() to get dict
if hasattr(actual, "peek"):
actual = actual.peek()
assert isinstance(actual, dict)
assert 0.5 in actual
if expected is not None:
check(actual[0.5], expected[0.5], atol=0.01)
elif expected is not None:
check(actual, expected)
# Leaf stage
# Prepare kwargs for PercentileStats if needed
if reduce_method == "percentiles":
log_kwargs = {"window": 10, "percentiles": [0.5]}
else:
log_kwargs = {}
for val in leaf1_values:
leaf1.log_value(metric_name_leafs, val, reduce=reduce_method, **log_kwargs)
for val in leaf2_values:
leaf2.log_value(metric_name_leafs, val, reduce=reduce_method, **log_kwargs)
check_value(leaf1.peek(metric_name_leafs), leaf1_expected)
check_value(leaf2.peek(metric_name_leafs), leaf2_expected)
leaf1_metrics = leaf1.reduce(compile=False)
leaf2_metrics = leaf2.reduce(compile=False)
# Intermediate stage
# Note: For percentiles, intermediate loggers cannot log values directly
# So we skip intermediate logging for percentiles and only test aggregation
if reduce_method != "percentiles":
for val in intermediate_values:
intermediate.log_value(
metric_name_intermediate, val, reduce=reduce_method, **log_kwargs
)
intermediate.aggregate([leaf1_metrics, leaf2_metrics])
intermediate_metrics_after_aggregate = intermediate.reduce(compile=False)
check_value(
intermediate_metrics_after_aggregate[metric_name_leafs],
intermediate_expected_after_aggregate,
)
if reduce_method != "percentiles":
check_value(
intermediate_metrics_after_aggregate[metric_name_intermediate],
intermediate_expected_after_log,
)
# Aggregate at root level
root_logger.aggregate([intermediate_metrics_after_aggregate])
root_value_leafs = root_logger.peek(metric_name_leafs)
check_value(root_value_leafs, root_expected_leafs)
if reduce_method != "percentiles":
root_value_intermediate = root_logger.peek(metric_name_intermediate)
check_value(root_value_intermediate, root_expected_intermediate)
def test_windowed_reduction(root_logger, leaf1, leaf2):
"""Test window-based reduction with various window sizes."""
# Test window with 'mean' reduction method
leaf1.log_value("window_loss", 0.1, reduce="mean", window=2)
leaf1.log_value("window_loss", 0.2)
leaf1.log_value("window_loss", 0.3)
leaf2.log_value("window_loss", 0.1, reduce="mean", window=2)
leaf2.log_value("window_loss", 0.2)
leaf2.log_value("window_loss", 0.3)
leaf1_metrics = leaf1.reduce(compile=False)
leaf2_metrics = leaf2.reduce(compile=False)
root_logger.aggregate([leaf1_metrics, leaf2_metrics])
check(root_logger.peek("window_loss"), 0.25) # mean of [0.2, 0.3]
# Test window with 'min' reduction method
leaf1.log_value("window_min", 0.3, reduce="min", window=2)
leaf1.log_value("window_min", 0.1)
leaf1.log_value("window_min", 0.2)
leaf2.log_value("window_min", 0.3, reduce="min", window=2)
leaf2.log_value("window_min", 0.1)
leaf2.log_value("window_min", 0.2)
leaf1_metrics = leaf1.reduce(compile=False)
leaf2_metrics = leaf2.reduce(compile=False)
root_logger.aggregate([leaf1_metrics, leaf2_metrics])
check(root_logger.peek("window_min"), 0.1) # min of [0.1, 0.2]
# Test window with 'sum' reduction method
leaf1.log_value("window_sum", 10, reduce="sum", window=2)
leaf1.log_value("window_sum", 20)
leaf1.log_value("window_sum", 30)
leaf2.log_value("window_sum", 10, reduce="sum", window=2)
leaf2.log_value("window_sum", 20)
leaf2.log_value("window_sum", 30)
leaf1_metrics = leaf1.reduce(compile=False)
leaf2_metrics = leaf2.reduce(compile=False)
root_logger.aggregate([leaf1_metrics, leaf2_metrics])
check(root_logger.peek("window_sum"), 100) # sum of [20, 30]
def test_nested_keys(root_logger):
"""Test logging with nested key structures."""
# Test nested key logging
root_logger.log_value(("nested", "key"), 1.0)
root_logger.log_value(("nested", "key"), 2.0)
# Test peek with nested key
check(root_logger.peek(("nested", "key")), 1.01)
# Test reduce with nested key
results = root_logger.reduce()
check(results["nested"]["key"], 1.01)
def test_time_logging(root_logger):
# Test time logging with window
with root_logger.log_time("mean_time", reduce="mean", window=2):
time.sleep(0.01)
with root_logger.log_time("mean_time", reduce="mean", window=2):
time.sleep(0.02)
check(root_logger.peek("mean_time"), 0.015, atol=0.05)
def test_state_management(root_logger):
"""Test state management (get_state and set_state)."""
# Log some values
root_logger.log_value("state_test", 0.1)
root_logger.log_value("state_test", 0.2)
# Get state
state = root_logger.get_state()
# Create new logger and set state
new_logger = MetricsLogger()
new_logger.set_state(state)
# Check that state was properly transferred
check(new_logger.peek("state_test"), 0.101)
def test_throughput_tracking(root_logger, leaf1, leaf2):
"""Test throughput tracking functionality."""
# Override the initialization time to make the test more accurate.
root_logger._time_when_initialized = time.perf_counter()
start_time = time.perf_counter()
leaf1.log_value("value", 1, reduce="sum", with_throughput=True)
leaf1.log_value("value", 2)
leaf2.log_value("value", 3, reduce="sum", with_throughput=True)
leaf2.log_value("value", 4)
metrics = [leaf1.reduce(compile=False), leaf2.reduce(compile=False)]
time.sleep(0.1)
end_time = time.perf_counter()
throughput = 10 / (end_time - start_time)
root_logger.aggregate(metrics)
check(root_logger.peek("value"), 10)
check(root_logger.stats["value"].throughputs, throughput, rtol=0.1)
# Test again but now don't initialize time since we are not starting a new experiment.
leaf1.log_value("value", 5)
leaf1.log_value("value", 6)
leaf2.log_value("value", 7)
leaf2.log_value("value", 8)
metrics = [leaf1.reduce(compile=False), leaf2.reduce(compile=False)]
time.sleep(0.1)
end_time = time.perf_counter()
throughput = 36 / (end_time - start_time)
root_logger.aggregate(metrics)
check(root_logger.peek("value"), 36)
check(root_logger.peek("value", throughput=True), throughput, rtol=0.1)
def test_reset_and_delete(root_logger):
"""Test reset and delete functionality."""
# Log some values
root_logger.log_value("test1", 0.1)
root_logger.log_value("test2", 0.2)
# Test delete
root_logger.delete("test1")
with pytest.raises(KeyError):
root_logger.peek("test1")
# Test reset
root_logger.reset()
check(root_logger.reduce(), {})
def test_compile(root_logger):
"""Test the compile method that combines values and throughputs."""
# Override the initialization time to make the test more accurate.
root_logger._time_when_initialized = time.perf_counter()
start_time = time.perf_counter()
# Log some values with throughput tracking
root_logger.log_value("count", 1, reduce="sum", with_throughput=True)
root_logger.log_value("count", 2)
# Log some nested values with throughput tracking
root_logger.log_value(
["nested", "count"], 1, reduce="lifetime_sum", with_throughput=True
)
root_logger.log_value(["nested", "count"], 2)
# Log some values without throughput tracking
root_logger.log_value("simple", 1)
root_logger.log_value("simple", 2)
time.sleep(0.1)
end_time = time.perf_counter()
throughput = 3 / (end_time - start_time)
# Get compiled results
compiled = root_logger.compile()
# Check that values and throughputs are correctly combined
check(compiled["count"], 3) # sum of [1, 2]
check(compiled["count_throughput"], throughput, rtol=0.1) # initial throughput
check(compiled["nested"]["count"], 3) # sum of [1, 2]
check(
compiled["nested"]["count_throughput"]["throughput_since_last_reduce"],
throughput,
rtol=0.1,
) # initial throughput
check(
compiled["nested"]["count_throughput"]["throughput_since_last_restore"],
throughput,
rtol=0.1,
) # initial throughput
check(compiled["simple"], 1.01)
assert (
"simple_throughput" not in compiled
) # no throughput for non-throughput metric
def test_peek_with_default(root_logger):
"""Test peek method with default argument."""
# Test with non-existent key
check(root_logger.peek("non_existent", default=0.0), 0.0)
# Test with existing key
root_logger.log_value("existing", 1.0)
ret = root_logger.peek("existing", default=0.0)
check(ret, 1.0) # Should return actual value, not default
def test_edge_cases(root_logger):
"""Test edge cases and error handling."""
# Test invalid reduction method
with pytest.raises(ValueError):
root_logger.log_value("invalid_reduce", 0.1, reduce="invalid")
# Test window and ema_coeff together
with pytest.raises(ValueError):
root_logger.log_value("invalid_window_ema", 0.1, window=2, ema_coeff=0.1)
# Test clearing on reduce
root_logger.log_value("clear_test", 0.1)
root_logger.log_value("clear_test", 0.2)
results = root_logger.reduce()
check(results["clear_test"], 0.101)
check(root_logger.peek("clear_test"), np.nan) # Should be cleared
def test_legacy_stats_conversion():
"""Test converting legacy Stats objects to MetricsLogger state dict."""
from ray.rllib.utils.metrics.legacy_stats import Stats
# Create a nested structure of legacy Stats objects with various configurations
legacy_stats = {}
# 1. Top-level stats with different reduction methods
# Mean with window
legacy_stats["mean_metric"] = Stats(
init_values=[1.0, 2.0, 3.0],
reduce="mean",
window=10,
)
# Mean with EMA coefficient
legacy_stats["ema_metric"] = Stats(
init_values=[5.0, 6.0],
reduce="mean",
ema_coeff=0.1,
)
# Min with window
legacy_stats["min_metric"] = Stats(
init_values=[10.0, 5.0, 15.0],
reduce="min",
window=5,
)
# Max with window
legacy_stats["max_metric"] = Stats(
init_values=[10.0, 25.0, 15.0],
reduce="max",
window=5,
)
# Sum with window
legacy_stats["sum_metric"] = Stats(
init_values=[1.0, 2.0, 3.0],
reduce="sum",
window=10,
clear_on_reduce=True,
)
# Lifetime sum (sum with clear_on_reduce=False)
legacy_stats["lifetime_sum_metric"] = Stats(
init_values=[10.0, 20.0, 30.0],
reduce="sum",
window=None,
clear_on_reduce=False,
)
# 2. Nested stats (one level deep)
legacy_stats["nested"] = {
"loss": Stats(
init_values=[0.5, 0.4, 0.3],
reduce="mean",
window=100,
),
"reward": Stats(
init_values=[10.0, 15.0, 20.0],
reduce="mean",
window=50,
),
}
# Create a MetricsLogger state dict from legacy stats
def create_state_from_legacy(legacy_stats_dict, prefix=""):
"""Recursively convert legacy stats to MetricsLogger state format."""
state = {}
def traverse(d, path_parts):
for key, value in d.items():
current_path = path_parts + [key]
if isinstance(value, Stats):
# Convert Stats to state dict
flat_key = "--".join(current_path)
state[flat_key] = value.get_state()
elif isinstance(value, dict):
# Recurse into nested dict
traverse(value, current_path)
traverse(legacy_stats_dict, [])
return {"stats": state}
# Create state dict from legacy stats
legacy_state_dict = create_state_from_legacy(legacy_stats)
# Create a new MetricsLogger and load the legacy state
logger = MetricsLogger(root=False)
logger.set_state(legacy_state_dict)
# Verify that values are correctly loaded
# Check top-level stats
check(logger.peek("mean_metric"), 2.0) # mean of [1, 2, 3]
check(logger.peek("min_metric"), 5.0) # min of [10, 5, 15]
check(logger.peek("max_metric"), 25.0) # max of [10, 25, 15]
check(logger.peek("sum_metric"), 6.0) # sum of [1, 2, 3]
check(
logger.peek("lifetime_sum_metric"), 0.0
) # logger is not a root logger, so lifetime sum is 0
# Check nested stats
check(logger.peek(("nested", "loss")), 0.4) # mean of [0.5, 0.4, 0.3]
check(logger.peek(("nested", "reward")), 15.0) # mean of [10, 15, 20]
# Verify that we can continue logging to the restored logger
logger.log_value("mean_metric", 4.0, reduce="mean", window=10)
logger.log_value(("nested", "loss"), 0.2, reduce="mean", window=100)
# Check that new values are properly integrated
results = logger.reduce(compile=True)
assert "mean_metric" in results
assert "nested" in results
assert "loss" in results["nested"]
def test_log_dict():
"""Test logging dictionaries of values.
MetricsLogger.log_dict is a thin wrapper around MetricsLogger.log_value.
We therefore don't test extensively here.
Note: log_dict can only be used with non-root loggers. Root loggers can only aggregate.
"""
# Create a non-root logger for logging values
logger = MetricsLogger(root=False)
# Test simple flat dictionary
flat_dict = {
"metric1": 1.0,
"metric2": 2.0,
}
logger.log_dict(flat_dict, reduce="mean")
check(logger.peek("metric1"), 1.0)
check(logger.peek("metric2"), 2.0)
# Test logging more values to the same keys
flat_dict2 = {
"metric1": 2.0,
"metric2": 3.0,
}
logger.log_dict(flat_dict2, reduce="mean")
check(logger.peek("metric1"), 1.5)
check(logger.peek("metric2"), 2.5)
def test_log_dict_root_logger(root_logger):
"""Test that root loggers can use log_dict and create leaf stats."""
flat_dict = {
"metric1": 1.0,
"metric2": 2.0,
}
# Root loggers should be able to use log_dict
root_logger.log_dict(flat_dict, reduce="mean")
check(root_logger.peek("metric1"), 1.0)
check(root_logger.peek("metric2"), 2.0)
# Should be able to push to these leaf stats
root_logger.log_value("metric1", 2.0)
check(root_logger.peek("metric1"), 1.5)
root_logger.log_value("metric3", 3.0)
check(root_logger.peek("metric3"), 3.0)
def test_compatibility_logic(root_logger):
"""Test compatibility logic that supersedes the 'legacy usage of MetricsLogger' comment."""
# Test behavior 1: No reduce method + window -> should use mean reduction
root_logger.log_value("metric_with_window", 1, window=2)
root_logger.log_value("metric_with_window", 2)
root_logger.log_value("metric_with_window", 3)
check(root_logger.peek("metric_with_window"), 2.5)
assert isinstance(root_logger.stats["metric_with_window"], MeanStats)
# Test behavior 2: No reduce method (and no window) -> should default to "ema"
root_logger.log_value("metric_no_reduce", 1.0)
root_logger.log_value("metric_no_reduce", 2.0)
check(root_logger.peek("metric_no_reduce"), 1.01)
assert isinstance(root_logger.stats["metric_no_reduce"], EmaStats)
# Test behavior 3: reduce=sum + clear_on_reduce=False -> should use lifetime_sum
root_logger.log_value("metric_lifetime", 10, reduce="sum", clear_on_reduce=False)
root_logger.log_value("metric_lifetime", 20)
check(root_logger.peek("metric_lifetime"), 30)
assert isinstance(root_logger.stats["metric_lifetime"], LifetimeSumStats)
# Test behavior 4: reduce=sum + clear_on_reduce=True -> should use SumStats (not lifetime_sum)
root_logger.log_value("metric_sum_clear", 10, reduce="sum", clear_on_reduce=True)
root_logger.log_value("metric_sum_clear", 20)
check(root_logger.peek("metric_sum_clear"), 30)
assert isinstance(root_logger.stats["metric_sum_clear"], SumStats)
# Test behavior 5: reduce=sum + clear_on_reduce=None -> should use SumStats
root_logger.log_value("metric_sum_default", 10, reduce="sum")
root_logger.log_value("metric_sum_default", 20)
check(root_logger.peek("metric_sum_default"), 30)
assert isinstance(root_logger.stats["metric_sum_default"], SumStats)
# Test behavior 6: clear_on_reduce=True with other reduce methods -> should warn but still work
root_logger.log_value(
"metric_mean_clear", 1.0, reduce="mean", clear_on_reduce=True, window=5
)
root_logger.log_value("metric_mean_clear", 2.0)
check(root_logger.peek("metric_mean_clear"), 1.5)
assert isinstance(root_logger.stats["metric_mean_clear"], MeanStats)
# Test behavior 7: Compatibility logic works with log_dict
logger = MetricsLogger(root=False)
logger.log_dict({"metric_dict": 1.0}, window=3)
logger.log_dict({"metric_dict": 2.0})
logger.log_dict({"metric_dict": 3.0})
check(logger.peek("metric_dict"), 2.0) # mean of [1, 2, 3]
assert isinstance(logger.stats["metric_dict"], MeanStats)
# Test behavior 9: Default EMA coefficient (0.01) is used when not specified
root_logger.log_value("metric_ema_default", 1.0)
assert root_logger.stats["metric_ema_default"]._ema_coeff == 0.01
# Test behavior 10: Custom EMA coefficient is preserved
root_logger.log_value("metric_ema_custom", 1.0, reduce="ema", ema_coeff=0.1)
assert root_logger.stats["metric_ema_custom"]._ema_coeff == 0.1
# Test behavior 11: reduce=None with window -> should use mean (not ema)
root_logger.log_value("metric_none_window", 1.0, reduce=None, window=2)
root_logger.log_value("metric_none_window", 2.0)
check(root_logger.peek("metric_none_window"), 1.5)
assert isinstance(root_logger.stats["metric_none_window"], MeanStats)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/utils/metrics/tests/test_metrics_logger.py",
"license": "Apache License 2.0",
"lines": 635,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:rllib/utils/metrics/tests/test_stats.py | """Tests for RLlib's Stats classes.
This file mostly test Stats atomically.
Howver, Stats are supposed to be used to aggregate data in a tree-like structure.
Therefore, we achieve a more comprehensive test coverage by testing tree-like aggregation of Stats in the MetricsLogger tests.
"""
import time
import warnings
import numpy as np
import pytest
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.metrics.stats import (
EmaStats,
ItemSeriesStats,
ItemStats,
LifetimeSumStats,
MaxStats,
MeanStats,
MinStats,
PercentilesStats,
SeriesStats,
SumStats,
)
from ray.rllib.utils.test_utils import check
torch, _ = try_import_torch()
def get_device(use_gpu):
"""Helper to get device based on GPU availability and test parameter."""
if use_gpu:
if not torch.cuda.is_available():
pytest.skip("GPU not available")
return torch.device("cuda")
return torch.device("cpu")
@pytest.mark.parametrize(
"stats_class,init_kwargs_list,setup_values,expected_reduced",
[
(ItemStats, [{}], [5], 5),
(MeanStats, [{"window": 4}, {}], [2, 4, 6], 4.0),
(MaxStats, [{"window": 4}, {}], [1, 5, 3], 5),
(MinStats, [{"window": 4}, {}], [1, 5, 3], 1),
(SumStats, [{"window": 4}, {}], [1, 5, 3], 9),
(LifetimeSumStats, [{}], [10, 20], 30),
(EmaStats, [{"ema_coeff": 0.01}], [10, 20], 10.1),
(ItemSeriesStats, [{"window": 4}], [1, 2, 3, 4, 5], [2, 3, 4, 5]),
# Don't test Percentile Stats because reduce beahviour is quite different from other stats
],
)
@pytest.mark.parametrize("use_gpu", [False, True])
def test_peek_and_reduce(
stats_class, init_kwargs_list, setup_values, expected_reduced, use_gpu
):
for init_kwargs in init_kwargs_list:
stats = stats_class(**init_kwargs)
for value in setup_values:
stats.push(value)
check(stats.peek(), expected_reduced)
result = stats.reduce(compile=True)
check(result, expected_reduced)
if stats_class != LifetimeSumStats:
# After clear, peek should return default value
if stats_class == ItemStats:
expected_cleared = None
else:
expected_cleared = np.nan
check(stats.peek(), expected_cleared)
# Test with PyTorch tensors of different dtypes (for numeric stats only)
if torch is not None:
device = get_device(use_gpu)
dtypes_to_test = [
torch.float32,
torch.float64,
torch.int32,
torch.int64,
torch.float16,
]
for dtype in dtypes_to_test:
if dtype == torch.float16 and stats_class is EmaStats:
# float16 values are less precise and errors add up quickly when calculating EMA
decimals = 1
else:
decimals = 5
tensor_stats = stats_class(**init_kwargs)
for val in setup_values:
tensor_val = torch.tensor(val, dtype=dtype, device=device)
tensor_stats.push(tensor_val)
# Verify tensors stay on device before reduce
if isinstance(tensor_stats, SeriesStats) or isinstance(
tensor_stats, PercentilesStats
):
for value in tensor_stats.values:
if torch and isinstance(value, torch.Tensor):
assert value.device.type == device.type
elif (
isinstance(tensor_stats, EmaStats)
and torch
and isinstance(tensor_stats._value, torch.Tensor)
):
assert tensor_stats._value.device.type == device.type
elif (
isinstance(tensor_stats, LifetimeSumStats)
and torch
and isinstance(tensor_stats._lifetime_sum, torch.Tensor)
):
assert tensor_stats._lifetime_sum.device.type == device.type
result = tensor_stats.reduce(compile=True)
if stats_class is ItemSeriesStats:
assert isinstance(result, list)
assert isinstance(result[0], (int, float))
else:
assert isinstance(result, (int, float))
check(result, expected_reduced, decimals=decimals)
tensor_stats_with_nan = stats_class(**init_kwargs)
if stats_class not in (ItemSeriesStats, ItemStats):
# Test with some NaN values mixed in
# This part of the test is not applicable to ItemSeriesStats and ItemStats because
# they reduced values are explicitly expected to change when adding NaNs
for val in setup_values:
tensor_val = torch.tensor(val, dtype=dtype, device=device)
tensor_stats_with_nan.push(tensor_val)
nan_tensor_val = torch.tensor(float("nan"), device=device)
tensor_stats_with_nan.push(nan_tensor_val)
result_with_nan = tensor_stats_with_nan.reduce(compile=True)
# Result should still be valid (stats should handle NaN)
assert isinstance(result_with_nan, (int, float))
check(result_with_nan, expected_reduced, decimals=decimals)
@pytest.mark.parametrize("use_gpu", [False, True])
def test_peek_and_reduce_percentiles_stats(use_gpu):
# Test with regular Python values
values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
stats = PercentilesStats(percentiles=[0, 50, 100], window=10)
for value in values:
stats.push(value)
check(stats.peek(compile=False), values)
check(stats.peek(compile=True), {0: 1, 50: 5.5, 100: 10})
result = stats.reduce(compile=True)
check(result, {0: 1, 50: 5.5, 100: 10})
# Test with PyTorch tensors on the specified device
if torch is not None:
device = get_device(use_gpu)
dtypes_to_test = [
torch.float32,
torch.float64,
torch.int32,
torch.int64,
torch.float16,
]
for dtype in dtypes_to_test:
tensor_stats = PercentilesStats(percentiles=[0, 50, 100], window=10)
for val in values:
tensor_val = torch.tensor(val, dtype=dtype, device=device)
tensor_stats.push(tensor_val)
# Verify tensors stay on device before reduce
for value in tensor_stats.values:
if torch and isinstance(value, torch.Tensor):
assert value.device.type == device.type
result = tensor_stats.reduce(compile=True)
# Check the percentile values with tolerance
check(result[0], 1, decimals=1)
check(result[50], 5.5, decimals=1)
check(result[100], 10, decimals=1)
def test_peek_and_reduce_item_series_stats():
# We test GPU behaviour for these elsewhere
stats = ItemSeriesStats(window=10)
for value in ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]:
stats.push(value)
assert stats.peek(compile=False) == [
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
]
assert stats.peek(compile=True) == [
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
]
result = stats.reduce(compile=True)
assert result == ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]
@pytest.mark.parametrize(
"stats_class,init_kwargs,test_values",
[
(ItemStats, {}, 123),
(MeanStats, {"window": 5}, [1, 2, 3]),
(MaxStats, {"window": 5}, [1, 5, 3]),
(MinStats, {"window": 5}, [5, 1, 3]),
(SumStats, {"window": 5}, [1, 2, 3]),
(LifetimeSumStats, {}, [10, 20]),
(EmaStats, {"ema_coeff": 0.01}, [10, 20]),
(PercentilesStats, {"percentiles": [50], "window": 10}, [1, 2, 3]),
(ItemSeriesStats, {"window": 5}, ["a", "b", "c"]),
],
)
def test_state_save_and_load(stats_class, init_kwargs, test_values):
stats = stats_class(**init_kwargs)
# Push test values
if isinstance(test_values, list):
for value in test_values:
stats.push(value)
else:
stats.push(test_values)
# Save state
state = stats.get_state()
check(state["stats_cls_identifier"], stats.stats_cls_identifier)
# Load state
loaded_stats = stats_class.from_state(state)
# Verify loaded stats matches original
original_peek = stats.peek()
loaded_peek = loaded_stats.peek()
if isinstance(original_peek, dict):
check(isinstance(loaded_peek, dict), True)
for key in original_peek.keys():
check(loaded_peek[key], original_peek[key])
else:
check(loaded_peek, original_peek)
@pytest.mark.parametrize(
"stats_class,init_kwargs,values1,values2,expected_result",
[
(MeanStats, {"window": 10}, [1, 2, 3], [4, 5], 3.0),
(MaxStats, {"window": 10}, [1, 2, 3], [4, 5], 5),
(MinStats, {"window": 10}, [1, 2, 3], [4, 5], 1),
(SumStats, {"window": 10}, [1, 2, 3], [4, 5], 15),
(EmaStats, {"ema_coeff": 0.01}, [1, 2], [3, 4], 2.01),
(ItemSeriesStats, {"window": 10}, [1, 2], [3, 4], [1, 2, 3, 4]),
(LifetimeSumStats, {}, [10, 20], [30, 40], 100),
# Merging multiple stats is not intended to work for ItemStats (because it only tracks a single item)
],
)
def test_merge(stats_class, init_kwargs, values1, values2, expected_result):
root_stats = stats_class(**init_kwargs, is_root=True, is_leaf=False)
stats1 = stats_class(**init_kwargs, is_root=False, is_leaf=True)
for value in values1:
stats1.push(value)
stats2 = stats_class(**init_kwargs, is_root=False, is_leaf=True)
for value in values2:
stats2.push(value)
root_stats.merge([stats1, stats2])
result = root_stats.peek()
check(result, expected_result)
# Items stats only allow us to log a single item that should not be reduced.
def test_merge_item_stats():
root_stats = ItemStats(is_root=True, is_leaf=False)
# ItemStats can only be merged with a single incoming stats object
incoming_stats = ItemStats(is_root=False, is_leaf=True)
incoming_stats.push(42)
root_stats.merge([incoming_stats])
check(root_stats.peek(), 42)
# Test with another merge
incoming_stats2 = ItemStats(is_root=False, is_leaf=True)
incoming_stats2.push(100)
root_stats.merge([incoming_stats2])
check(root_stats.peek(), 100)
# Test that merging with multiple stats raises an assertion error
stats1 = ItemStats(is_root=False, is_leaf=True)
stats1.push(1)
stats2 = ItemStats()
stats2.push(2)
with pytest.raises(AssertionError, match="should only be merged with one other"):
root_stats.merge([stats1, stats2])
@pytest.mark.parametrize(
"stats_class,init_kwargs",
[
(ItemStats, {}),
(MeanStats, {"window": 10}),
(MaxStats, {"window": 5}),
(MinStats, {"window": 5}),
(SumStats, {"window": 5}),
(LifetimeSumStats, {}),
(EmaStats, {"ema_coeff": 0.1}),
(PercentilesStats, {"percentiles": [50], "window": 10}),
(ItemSeriesStats, {"window": 5}),
],
)
@pytest.mark.parametrize("is_root", [True, False])
@pytest.mark.parametrize("is_leaf", [True, False])
def test_clone(stats_class, init_kwargs, is_root, is_leaf):
original = stats_class(**init_kwargs, is_root=is_root, is_leaf=is_leaf)
# Skip pushing for root stats (they can't be pushed to)
if original.is_leaf:
original.push(123)
else:
# Create another stats object to merge from
merge_from = stats_class(**init_kwargs, is_root=False, is_leaf=True)
merge_from.push(123)
original.merge([merge_from])
# Create similar stats
similar = original.clone()
# Check class-specific attributes
# Note: PercentilesStats._get_init_args() doesn't preserve window (implementation issue)
if hasattr(original, "_window") or hasattr(similar, "_window"):
check(similar._window, original._window)
if hasattr(original, "_ema_coeff") or hasattr(similar, "_ema_coeff"):
check(similar._ema_coeff, original._ema_coeff)
if hasattr(original, "_percentiles") or hasattr(similar, "_percentiles"):
check(similar._percentiles, original._percentiles)
if hasattr(original, "is_root") or hasattr(similar, "is_root"):
check(similar.is_root, original.is_root)
if hasattr(original, "is_leaf") or hasattr(similar, "is_leaf"):
check(similar.is_leaf, original.is_leaf)
result = similar.peek()
if stats_class == ItemStats:
check(result, None)
elif stats_class == LifetimeSumStats:
check(result, 0)
elif stats_class == ItemSeriesStats:
check(result, [])
elif stats_class == PercentilesStats:
# Should have dict with percentile keys, but empty
check(list(result.keys()), original._percentiles)
check(list(result.values()), [None])
elif isinstance(result, float):
# All others should be NaN
check(result, np.nan)
# Series stats allow us to set a window size and reduce the values in the window.
@pytest.mark.parametrize(
"stats_class,window,values,expected_result",
[
# Basic tests with window=5
(MeanStats, 5, [1, 2, 3], 2.0),
(MaxStats, 5, [1, 2, 3], 3),
(MinStats, 5, [1, 2, 3], 1),
(SumStats, 5, [1, 2, 3], 6),
# Window tests with window=3, values exceeding window size (fills window)
(MeanStats, 3, [1, 2, 3, 4, 5], 4.0), # Mean of 3, 4, 5
(MaxStats, 3, [1, 2, 3, 4, 5], 5), # Max of 3, 4, 5
(MinStats, 3, [1, 2, 3, 4, 5], 3), # Min of 3, 4, 5
(SumStats, 3, [1, 2, 3, 4, 5], 12), # Sum of 3, 4, 5
],
)
def test_series_stats_windowed(stats_class, window, values, expected_result):
# All examples chosen such that we should end up with a length of three
expected_len = 3
stats = stats_class(window=window)
for value in values:
stats.push(value)
check(len(stats), expected_len)
check(stats.peek(), expected_result)
# Series stats without a window are used to track running values that are not reduced.
@pytest.mark.parametrize(
"stats_class,values,expected_results",
[
(MeanStats, [10, 20, 30], [10.0, 15.0, 20.0]), # Running mean
(MaxStats, [5, 10, 3], [5, 10, 10]), # Running max
(MinStats, [5, 2, 10], [5, 2, 2]), # Running min
(SumStats, [10, 20, 30], [10, 30, 60]), # Running sum
],
)
def test_series_stats_no_window(stats_class, values, expected_results):
stats = stats_class(window=None)
for value, expected in zip(values, expected_results):
stats.push(value)
check(stats.peek(), expected)
def test_sum_stats_throughput():
"""Test SumStats with throughput for different node types."""
stats = SumStats(window=None, with_throughput=True)
check(stats.has_throughputs, True)
# First batch: push 10, then 20 (total: 30)
stats.push(10)
time.sleep(0.1)
stats.push(20)
time.sleep(0.2)
# 30 over ~0.3 seconds = ~100
throughput = stats.throughputs
check(throughput, 100, atol=20)
stats.reduce()
# Second batch: push 20, then 40 (total: 60)
stats.push(20)
time.sleep(0.1)
stats.push(40)
time.sleep(0.2)
# 60 over ~0.3 seconds = ~200
throughput = stats.throughputs
check(throughput, 200, atol=20)
@pytest.mark.parametrize(
"is_root,is_leaf",
[
(True, True), # Root + Leaf: standalone, never resets
(False, True), # Non-root + Leaf: worker, resets after reduce
],
)
def test_lifetime_sum_stats_throughput(is_root, is_leaf):
"""Test LifetimeSumStats with throughput for different node types."""
stats = LifetimeSumStats(with_throughput=True, is_root=is_root, is_leaf=is_leaf)
check(stats.has_throughputs, True)
# First batch: push 10, then 20 (total: 30)
stats.push(10)
time.sleep(0.1)
stats.push(20)
time.sleep(0.2)
throughputs = stats.throughputs
# 30 over ~0.3 seconds = ~100
check(throughputs["throughput_since_last_reduce"], 100, atol=20)
if is_root:
# Only root stats track throughput_since_last_restore
check(throughputs["throughput_since_last_restore"], 100, atol=20)
else:
# Non-root stats should not have throughput_since_last_restore
assert "throughput_since_last_restore" not in throughputs
stats.reduce()
# Second batch: push 20, then 40 (total: 60)
stats.push(20)
time.sleep(0.1)
stats.push(40)
time.sleep(0.2)
throughputs = stats.throughputs
# 60 over ~0.3 seconds = ~200
check(throughputs["throughput_since_last_reduce"], 200, atol=20)
if is_root:
# Root stats never reset, so lifetime total is 30 + 60 = 90 over ~0.6 seconds = ~150
check(throughputs["throughput_since_last_restore"], 150, atol=20)
else:
# Non-root stats should not have throughput_since_last_restore
assert "throughput_since_last_restore" not in throughputs
@pytest.mark.parametrize(
"stats_class,setup_values,expected_value",
[
(MeanStats, [10, 20], 15.0), # Mean of 10, 20
(MaxStats, [10, 20], 20), # Max of 10, 20
(MinStats, [10, 20], 10), # Min of 10, 20
(SumStats, [10, 20], 30), # Sum of 10, 20
(EmaStats, [10, 20], 10.1), # EMA with coeff 0.01: 0.99*10 + 0.01*20
(LifetimeSumStats, [10, 20], 30), # Lifetime sum of 10, 20
],
)
def test_stats_numeric_operations(stats_class, setup_values, expected_value):
"""Test numeric operations on stats objects."""
# Create stats with appropriate settings
if stats_class == EmaStats:
stats = stats_class(ema_coeff=0.01)
elif stats_class == LifetimeSumStats:
stats = stats_class()
else:
stats = stats_class(window=5)
# Push values
for value in setup_values:
stats.push(value)
# Test numeric operations
check(float(stats), expected_value)
check(stats + 5, expected_value + 5)
check(stats - 5, expected_value - 5)
check(stats * 2, expected_value * 2)
check(stats == expected_value, True)
check(stats > expected_value - 1, True)
check(stats < expected_value + 1, True)
check(stats >= expected_value, True)
check(stats <= expected_value, True)
@pytest.mark.parametrize(
"stats_class,init_kwargs,expected_result",
[
# SeriesStats return NaN when empty
(MeanStats, {"window": 5}, np.nan),
(MaxStats, {"window": 5}, np.nan),
(MinStats, {"window": 5}, np.nan),
# SumStats returns NaN when empty (with window)
(SumStats, {"window": 5}, np.nan),
# LifetimeSumStats returns 0 when empty
(LifetimeSumStats, {}, 0),
# EmaStats returns NaN when empty
(EmaStats, {"ema_coeff": 0.01}, np.nan),
# ItemStats returns None when empty
(ItemStats, {}, None),
# PercentilesStats returns dict with NaN values when empty
(PercentilesStats, {"percentiles": [50], "window": 10}, {50: None}),
# ItemSeriesStats returns empty list when empty
(ItemSeriesStats, {"window": 5}, []),
],
)
def test_stats_empty_reduce(stats_class, init_kwargs, expected_result):
"""Test reducing stats with no values across all stats types."""
stats = stats_class(**init_kwargs)
# Peek on empty stats should return appropriate default value
result = stats.peek()
# Handle NaN comparison specially
if isinstance(expected_result, float) and np.isnan(expected_result):
check(np.isnan(result), True)
elif isinstance(expected_result, dict):
assert isinstance(stats, PercentilesStats)
assert isinstance(result, dict)
check(list(result.keys()), list(expected_result.keys()))
check(list(result.values()), list(expected_result.values()))
else:
check(result, expected_result)
@pytest.mark.parametrize(
"stats_class,kwargs,expected_first,expected_first_compile_false,expected_second_normal,expected_second_latest,expected_second_compile_false",
[
(
MeanStats,
{},
2.5,
[2.5],
10.0,
20.0,
[20.0],
),
(
EmaStats,
{"ema_coeff": 0.1},
2.1, # mean of EMA values [1.1, 3.1] from first merge
[2.1],
11.3, # mean of all EMA values [1.1, 3.1, 11.0, 30.0] (approximate)
20.5, # mean of [11.0, 30.0] (second merge)
[20.5],
),
(
ItemSeriesStats,
{"window": 10},
[1.0, 2.0, 3.0, 4.0],
[
1.0,
2.0,
3.0,
4.0,
], # compile=False is the same as compile=True for ItemSeriesStats
[1.0, 2.0, 3.0, 4.0, 10.0, 20.0, 30.0],
[10.0, 20.0, 30.0],
[
10.0,
20.0,
30.0,
], # compile=False is the same as compile=True for ItemSeriesStats
),
(
PercentilesStats,
{"window": 10},
{
0: 1.0,
50: 2.5,
75: 3.25,
90: 3.7,
95: 3.85,
99: 3.97,
100: 4.0,
},
[1.0, 2.0, 3.0, 4.0], # compile=False returns sorted list of values
{
0: 1.0,
50: 4.0,
75: 15.0,
90: 24.0,
95: 27.0,
99: 29.4,
100: 30.0,
}, # compile=True returns percentiles of [1, 2, 3, 4, 10, 20, 30]
{
0: 10.0,
50: 20.0,
75: 25.0,
90: 28.0,
95: 29.0,
99: 29.8,
100: 30.0,
}, # percentiles of [10, 20, 30]
[10.0, 20.0, 30.0], # compile=False returns sorted list of values
),
(
LifetimeSumStats,
{},
10.0,
[10.0],
70.0,
60.0,
[60.0],
),
],
)
def test_latest_merged_only_stats_types(
stats_class,
kwargs,
expected_first,
expected_first_compile_false,
expected_second_normal,
expected_second_latest,
expected_second_compile_false,
):
"""Test latest_merged_only parameter for various Stats types."""
# Each batch has values for two child stats
first_batch_values = [[1.0, 2.0], [3.0, 4.0]]
second_batch_values = [[10.0, 20.0], [30.0]]
root_stats = stats_class(**kwargs, is_root=True, is_leaf=False)
first_batch_stats = []
for values in first_batch_values:
child_stats = stats_class(**kwargs, is_root=False, is_leaf=True)
for value in values:
child_stats.push(value)
first_batch_stats.append(child_stats)
root_stats.merge(first_batch_stats)
# Normal peek should include all merged values
first_normal_result = root_stats.peek(compile=True, latest_merged_only=False)
check(first_normal_result, expected_first)
# Latest merged only should only consider the latest merge (same as normal after first merge)
first_latest_result = root_stats.peek(compile=True, latest_merged_only=True)
check(first_latest_result, expected_first)
# Test compile=False behavior after first merge
first_latest_result_compile_false = root_stats.peek(
compile=False, latest_merged_only=True
)
check(first_latest_result_compile_false, expected_first_compile_false)
# Create and merge second batch
second_batch_stats = []
for values in second_batch_values:
child_stats = stats_class(**kwargs, is_root=False, is_leaf=True)
for value in values:
child_stats.push(value)
second_batch_stats.append(child_stats)
root_stats.merge(second_batch_stats)
# Normal peek should include all values
second_normal_result = root_stats.peek(compile=True, latest_merged_only=False)
check(second_normal_result, expected_second_normal)
# Latest merged only should only consider the latest merge
second_latest_result = root_stats.peek(compile=True, latest_merged_only=True)
check(second_latest_result, expected_second_latest)
# Test compile=False behavior after second merge
second_latest_result_compile_false = root_stats.peek(
compile=False, latest_merged_only=True
)
check(second_latest_result_compile_false, expected_second_compile_false)
def test_latest_merged_only_no_merge_yet():
"""Test latest_merged_only when no merge has occurred yet."""
root_stats = MeanStats(window=10, is_root=True, is_leaf=False)
# Before any merge, latest_merged_only should return NaN
result = root_stats.peek(compile=True, latest_merged_only=True)
check(np.isnan(result), True)
# Normal peek should also return NaN for empty stats
result = root_stats.peek(compile=True, latest_merged_only=False)
check(np.isnan(result), True)
def test_latest_merged_only_non_root_stats():
"""Test that latest_merged_only raises error on non-root stats."""
stats = MeanStats(window=10)
stats.push(1.0)
# Should raise error when using latest_merged_only on non-root stats
with pytest.raises(
ValueError,
match="latest_merged_only can only be used on aggregation stats objects",
):
stats.peek(compile=True, latest_merged_only=True)
def test_ema_stats_quiet_nanmean():
"""Test that EmaStats suppresses 'Mean of empty slice' warnings.
np.nanmean can trigger a warning "Mean of empty slice". EmaStats should suppress this warning.
"""
root_stats = EmaStats(ema_coeff=0.01, is_root=True, is_leaf=False)
child1 = EmaStats(ema_coeff=0.01, is_root=False, is_leaf=True)
child2 = EmaStats(ema_coeff=0.01, is_root=False, is_leaf=True)
root_stats.merge([child1, child2])
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
root_stats.peek(compile=True)
# Filter for RuntimeWarning about "Mean of empty slice"
empty_slice_warnings = [
w
for w in caught_warnings
if issubclass(w.category, RuntimeWarning)
and "Mean of empty slice" in str(w.message)
]
# With the correct filter, no warning should be raised
assert (
len(empty_slice_warnings) == 0
), f"Expected no 'Mean of empty slice' warning but got: {empty_slice_warnings}"
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/utils/metrics/tests/test_stats.py",
"license": "Apache License 2.0",
"lines": 670,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/train/v2/_internal/metrics/base.py | from abc import ABC, abstractmethod
from enum import Enum
from typing import Dict, Generic, Optional, Tuple, TypeVar
from ray.util.metrics import Gauge
RUN_NAME_TAG_KEY = "ray_train_run_name"
RUN_ID_TAG_KEY = "ray_train_run_id"
T = TypeVar("T")
E = TypeVar("E", bound=Enum)
class Metric(ABC):
def __init__(
self,
name: str,
default: T,
description: str,
base_tags: Dict[str, str],
):
"""
Initialize a new metric.
Args:
name: The name of the metric.
default: The default value of the metric.
description: The description of the metric.
base_tags: The base tags for the metric.
"""
self._default = default
self._base_tags = base_tags
self._gauge = Gauge(
name,
description=description,
tag_keys=self._get_tag_keys(),
)
@abstractmethod
def record(self, value: T):
"""Update the metric value.
Args:
value: The value to update the metric with.
"""
pass
@abstractmethod
def get_value(self) -> T:
"""Get the value of the metric.
Returns:
The value of the metric. If the metric has not been recorded,
the default value is returned.
"""
pass
@abstractmethod
def reset(self):
"""Reset values and clean up resources."""
pass
def _get_tag_keys(self) -> Tuple[str, ...]:
return tuple(self._base_tags.keys())
class TimeMetric(Metric):
"""A metric for tracking elapsed time."""
def __init__(
self,
name: str,
description: str,
base_tags: Dict[str, str],
):
self._current_value = 0.0
super().__init__(
name=name,
default=0.0,
description=description,
base_tags=base_tags,
)
def record(self, value: float):
"""Update the time metric value by accumulating the time.
Args:
value: The time value to increment the metric by.
"""
self._current_value += value
self._gauge.set(self._current_value, self._base_tags)
def get_value(self) -> float:
return self._current_value
def reset(self):
self._current_value = self._default
self._gauge.set(self._default, self._base_tags)
class EnumMetric(Metric, Generic[E]):
"""A metric for tracking enum values."""
DEFAULT_VALUE = 0
RECORDED_VALUE = 1
def __init__(
self,
name: str,
description: str,
base_tags: Dict[str, str],
enum_tag_key: str,
):
self._enum_tag_key = enum_tag_key
self._current_value: Optional[E] = None
super().__init__(
name=name,
default=self.DEFAULT_VALUE,
description=description,
base_tags=base_tags,
)
def record(self, enum_value: E):
"""Record a specific enum value.
The metric will be reset to 0 for the previous value and set to 1 for the new value.
Args:
enum_value: The enum value to record for.
"""
if enum_value == self._current_value:
return
if self._current_value is not None:
previous_tags = self._get_tags(self._current_value)
self._gauge.set(self._default, previous_tags)
current_tags = self._get_tags(enum_value)
self._gauge.set(self.RECORDED_VALUE, current_tags)
self._current_value = enum_value
def get_value(self, enum_value: E) -> int:
"""Get the value for a specific enum value.
Args:
enum_value: The enum value to get the value for
Returns:
The value for the enum value
"""
return int(enum_value == self._current_value)
def reset(self):
if self._current_value is not None:
tags = self._get_tags(self._current_value)
self._gauge.set(self._default, tags)
self._current_value = None
def _get_tag_keys(self) -> Tuple[str, ...]:
return tuple(self._base_tags.keys()) + (self._enum_tag_key,)
def _get_tags(self, enum_value: E) -> Dict[str, str]:
tags = self._base_tags.copy()
tags[self._enum_tag_key] = enum_value.name
return tags
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/_internal/metrics/base.py",
"license": "Apache License 2.0",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/train/v2/_internal/metrics/controller.py | from typing import Dict, Union
from ray.train.v2._internal.execution.controller.state import TrainControllerStateType
from ray.train.v2._internal.metrics.base import (
RUN_ID_TAG_KEY,
RUN_NAME_TAG_KEY,
EnumMetric,
TimeMetric,
)
class ControllerMetrics:
"""Factory for creating controller-specific metrics.
This class defines all metrics used to track the state and performance of the
training controller. Each metric is defined with its name, type, default value,
description, and required tags.
"""
# ===== Metric Names =====
CONTROLLER_STATE = "train_controller_state"
WORKER_GROUP_START_TOTAL_TIME_S = "train_worker_group_start_total_time_s"
WORKER_GROUP_SHUTDOWN_TOTAL_TIME_S = "train_worker_group_shutdown_total_time_s"
# ===== Tag Keys =====
CONTROLLER_STATE_TAG_KEY = "ray_train_controller_state"
@classmethod
def _create_time_metric(
cls, name: str, description: str, base_tags: Dict[str, str]
) -> TimeMetric:
return TimeMetric(
name=name,
description=description,
base_tags=base_tags,
)
@classmethod
def _create_controller_state_metric(
cls, base_tags: Dict[str, str]
) -> EnumMetric[TrainControllerStateType]:
return EnumMetric[TrainControllerStateType](
name=cls.CONTROLLER_STATE,
description="Current state of the Ray Train controller",
base_tags=base_tags,
enum_tag_key=cls.CONTROLLER_STATE_TAG_KEY,
)
@classmethod
def get_controller_metrics(
cls, run_name: str, run_id: str
) -> Dict[str, Union[TimeMetric, EnumMetric[TrainControllerStateType]]]:
base_tags = {RUN_NAME_TAG_KEY: run_name, RUN_ID_TAG_KEY: run_id}
return {
cls.WORKER_GROUP_START_TOTAL_TIME_S: cls._create_time_metric(
cls.WORKER_GROUP_START_TOTAL_TIME_S,
"Total time taken to start the worker group",
base_tags,
),
cls.WORKER_GROUP_SHUTDOWN_TOTAL_TIME_S: cls._create_time_metric(
cls.WORKER_GROUP_SHUTDOWN_TOTAL_TIME_S,
"Total time taken to shutdown the worker group",
base_tags,
),
cls.CONTROLLER_STATE: cls._create_controller_state_metric(base_tags),
}
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/_internal/metrics/controller.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/train/v2/_internal/metrics/worker.py | from typing import Dict
from ray.train.v2._internal.metrics.base import (
RUN_ID_TAG_KEY,
RUN_NAME_TAG_KEY,
TimeMetric,
)
WORKER_WORLD_RANK_TAG_KEY = "ray_train_worker_world_rank"
WORKER_ACTOR_ID_TAG_KEY = "ray_train_worker_actor_id"
class WorkerMetrics:
"""Factory for creating worker-specific metrics.
This class defines all metrics used to track the state and performance of the
training workers. Each metric is defined with its name, type, default value,
description, and required tags.
"""
# ===== Metric Names =====
REPORT_TOTAL_BLOCKED_TIME_S = "train_report_total_blocked_time_s"
@classmethod
def _create_time_metric(
cls, name: str, description: str, base_tags: Dict[str, str]
) -> TimeMetric:
"""Create a time-based metric."""
return TimeMetric(
name=name,
description=description,
base_tags=base_tags,
)
@classmethod
def get_worker_metrics(
cls, run_name: str, run_id: str, world_rank: int, worker_actor_id: str
) -> Dict[str, TimeMetric]:
"""Get all worker metrics."""
base_tags = {
RUN_NAME_TAG_KEY: run_name,
RUN_ID_TAG_KEY: run_id,
WORKER_WORLD_RANK_TAG_KEY: str(world_rank),
WORKER_ACTOR_ID_TAG_KEY: worker_actor_id,
}
return {
cls.REPORT_TOTAL_BLOCKED_TIME_S: cls._create_time_metric(
cls.REPORT_TOTAL_BLOCKED_TIME_S,
"Cumulative time in seconds to report a checkpoint to the storage.",
base_tags,
),
}
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/_internal/metrics/worker.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/test_prefix_tree.py | import asyncio
from typing import List, Set
import pytest
import ray
from ray.llm._internal.serve.routing_policies.prefix_aware.prefix_tree import (
Node,
PrefixTree,
PrefixTreeActor,
)
# Fixtures
@pytest.fixture
def tree() -> PrefixTree:
"""Create a fresh PrefixTree instance for each local test."""
return PrefixTree()
@pytest.fixture
def tree_actor():
"""Create a fresh PrefixTreeActor instance for each ray.remote test."""
return PrefixTreeActor.remote()
# Helper to get LRU chain texts
def get_lru_texts_from_tree(tree: PrefixTree, tenant_id: str) -> List[str]:
"""Gets LRU chain texts directly from a PrefixTree instance."""
chain = tree._get_lru_chain(tenant_id)
return [node.text for node in chain]
async def get_lru_texts_from_tree_actor(
tree_actor: PrefixTreeActor, tenant_id: str
) -> List[str]:
"""Gets LRU chain texts from a PrefixTreeActor."""
chain = ray.get(tree_actor._get_lru_chain.remote(tenant_id))
return [node.text for node in chain]
class TestPrefixTreeInitialization:
"""Tests for the PrefixTree class initialization and basic tenant management."""
def test_initial_state(self, tree: PrefixTree) -> None:
"""Test the initial state of a new PrefixTree."""
assert tree.tenant_to_char_count == {}
assert tree.tenant_to_lru_tail == {}
assert tree.root is not None
assert tree.root.text == ""
assert tree.root.parent is None
assert tree.root.tenant_to_last_access_time == {}
assert tree.root.edge_label_to_child == {}
def test_add_tenant(self, tree: PrefixTree) -> None:
"""Test adding a new tenant via add_tenants."""
tree.add_tenants(["tenant_1"], 0)
assert tree.tenant_to_char_count == {"tenant_1": 0}
assert tree.tenant_to_lru_tail.get("tenant_1") == tree.root
assert tree.root.tenant_to_last_access_time == {"tenant_1": 0}
assert get_lru_texts_from_tree(tree, "tenant_1") == [""]
def test_add_existing_tenant_noop(self, tree: PrefixTree) -> None:
"""Test that adding an existing tenant via add_tenants is a no-op."""
tree.add_tenants(["tenant_1"], 0)
assert tree.tenant_to_char_count == {"tenant_1": 0}
assert tree.tenant_to_lru_tail.get("tenant_1") == tree.root
assert tree.root.tenant_to_last_access_time == {"tenant_1": 0}
assert get_lru_texts_from_tree(tree, "tenant_1") == [""]
tree.add_tenants(["tenant_1"], 0) # Add again
assert tree.tenant_to_char_count == {"tenant_1": 0}
assert tree.tenant_to_lru_tail.get("tenant_1") == tree.root
assert tree.root.tenant_to_last_access_time == {"tenant_1": 0}
assert get_lru_texts_from_tree(tree, "tenant_1") == [""]
def test_add_multiple_tenants(self, tree: PrefixTree) -> None:
"""Test adding multiple tenants at once."""
tree.add_tenants(["tenant_1", "tenant_2", "tenant_3"], 0)
assert tree.tenant_to_char_count == {
"tenant_1": 0,
"tenant_2": 0,
"tenant_3": 0,
}
for tenant in ["tenant_1", "tenant_2", "tenant_3"]:
assert tree.tenant_to_lru_tail.get(tenant) == tree.root
assert tree.root.tenant_to_newer_node.get(tenant) is None
assert tree.root.tenant_to_older_node.get(tenant) is None
assert tree.root.tenant_to_last_access_time == {
"tenant_1": 0,
"tenant_2": 0,
"tenant_3": 0,
}
assert get_lru_texts_from_tree(tree, tenant) == [""]
def test_add_multiple_tenants_with_existing(self, tree: PrefixTree) -> None:
"""Test adding multiple tenants when some already exist."""
tree.add_tenants(["tenant_1"], 0)
assert tree.root.tenant_to_last_access_time == {"tenant_1": 0}
assert tree.tenant_to_char_count == {"tenant_1": 0}
assert "tenant_1" in tree.tenant_to_lru_tail
# Add a mix of new and existing tenants
tree.add_tenants(["tenant_1", "tenant_2", "tenant_3"], 0)
# Existing tenants should remain unchanged
assert tree.root.tenant_to_last_access_time == {
"tenant_1": 0,
"tenant_2": 0,
"tenant_3": 0,
}
assert tree.tenant_to_char_count == {
"tenant_1": 0,
"tenant_2": 0,
"tenant_3": 0,
}
assert all(
tenant in tree.tenant_to_lru_tail
for tenant in ["tenant_1", "tenant_2", "tenant_3"]
)
class TestPrefixTreeInsert:
def test_insert_non_existent_tenant(self, tree: PrefixTree) -> None:
"""Test inserting a string for a non-existent tenant fails."""
# Insert without adding tenant first
tree.insert("hello", "nonexistent", 1)
# Verify insert did nothing since tenant doesn't exist
assert "nonexistent" not in tree.tenant_to_char_count
assert get_lru_texts_from_tree(tree, "nonexistent") == []
assert "h" not in tree.root.edge_label_to_child
def test_insert_single_string(self, tree: PrefixTree) -> None:
"""Test inserting a single string after adding a tenant."""
tree.add_tenants(["tenant_1"], 0)
tree.insert("hello", "tenant_1", 1)
assert tree.tenant_to_char_count == {"tenant_1": 5}
assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "hello"]
root_node = tree.root
assert root_node.tenant_to_last_access_time == {"tenant_1": 1}
assert set(root_node.edge_label_to_child.keys()) == {"h"}
hello_node = root_node.edge_label_to_child["h"]
assert hello_node.text == "hello"
assert hello_node.parent == root_node
assert hello_node.tenant_to_last_access_time == {"tenant_1": 1}
assert hello_node.edge_label_to_child == {}
def test_insert_duplicate_string(self, tree: PrefixTree) -> None:
"""Test inserting a duplicate string for the same tenant."""
tree.add_tenants(["tenant_1"], 0)
tree.insert("hello", "tenant_1", 1) # Initial insert
tree.insert("hello", "tenant_1", 1) # Duplicate insert with the same timestamp
assert tree.tenant_to_char_count == {"tenant_1": 5} # Char count unchanged
assert get_lru_texts_from_tree(tree, "tenant_1") == [
"",
"hello",
] # LRU order same
hello_node = tree.root.edge_label_to_child["h"]
assert tree.root.tenant_to_last_access_time == {"tenant_1": 1}
assert hello_node.tenant_to_last_access_time == {"tenant_1": 1}
tree.insert("hello", "tenant_1", 2) # Duplicate insert with new timestamp
assert tree.tenant_to_char_count == {"tenant_1": 5} # Char count unchanged
assert get_lru_texts_from_tree(tree, "tenant_1") == [
"",
"hello",
] # LRU order same
hello_node = tree.root.edge_label_to_child["h"]
assert tree.root.tenant_to_last_access_time == {"tenant_1": 2}
assert hello_node.tenant_to_last_access_time == {"tenant_1": 2}
def test_insert_multiple_tenants(self, tree: PrefixTree) -> None:
"""Test inserting the same string for different tenants."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("hello", "tenant_1", 1)
tree.insert("hello", "tenant_2", 2)
assert tree.tenant_to_char_count == {"tenant_1": 5, "tenant_2": 5}
assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "hello"]
assert get_lru_texts_from_tree(tree, "tenant_2") == ["", "hello"]
hello_node = tree.root.edge_label_to_child["h"]
assert tree.root.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 2}
assert hello_node.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 2}
def test_insert_node_split(self, tree: PrefixTree) -> None:
"""Test insertion that causes an existing node to split due to differing suffixes."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("helloworld", "tenant_1", 1)
tree.insert("hellothere", "tenant_2", 2) # "hello" is common prefix
assert tree.tenant_to_char_count == {"tenant_1": 10, "tenant_2": 10}
assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "hello", "world"]
assert get_lru_texts_from_tree(tree, "tenant_2") == ["", "there", "hello"]
hello_node = tree.root.edge_label_to_child["h"]
assert hello_node.text == "hello"
assert hello_node.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 2}
assert set(hello_node.edge_label_to_child.keys()) == {"w", "t"}
world_node = hello_node.edge_label_to_child["w"]
assert world_node.text == "world"
assert world_node.tenant_to_last_access_time == {"tenant_1": 1}
there_node = hello_node.edge_label_to_child["t"]
assert there_node.text == "there"
assert there_node.tenant_to_last_access_time == {"tenant_2": 2}
def test_insert_longer_string_with_shared_prefix(self, tree: PrefixTree) -> None:
"""Test inserting a longer string that shares a prefix with an existing node string."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("hello", "tenant_1", 1)
tree.insert("helloworld", "tenant_2", 2) # "hello" is prefix of "helloworld"
assert tree.tenant_to_char_count == {"tenant_1": 5, "tenant_2": 10}
assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "hello"]
assert get_lru_texts_from_tree(tree, "tenant_2") == ["", "world", "hello"]
hello_node = tree.root.edge_label_to_child["h"]
assert hello_node.text == "hello"
assert hello_node.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 2}
assert set(hello_node.edge_label_to_child.keys()) == {"w"}
world_node = hello_node.edge_label_to_child["w"]
assert world_node.text == "world"
assert world_node.tenant_to_last_access_time == {"tenant_2": 2}
# Ensure no empty non-root nodes created
empty_text_nodes: List[Node] = []
nodes_to_check: List[Node] = [tree.root]
visited_nodes: Set[Node] = {tree.root}
while nodes_to_check:
node: Node = nodes_to_check.pop()
if node.text == "" and node != tree.root: # check for non-root empty nodes
empty_text_nodes.append(node)
for child in node.edge_label_to_child.values():
if child not in visited_nodes:
nodes_to_check.append(child)
visited_nodes.add(child)
assert not empty_text_nodes
def test_insert_shorter_string_with_shared_prefix(self, tree: PrefixTree) -> None:
"""Test inserting a shorter string that is a prefix of an existing longer string, causing split."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("helloworld", "tenant_1", 1)
tree.insert(
"hello", "tenant_2", 2
) # "hello" is prefix, causes "helloworld" to split
assert tree.tenant_to_char_count == {"tenant_1": 10, "tenant_2": 5}
assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "hello", "world"]
assert get_lru_texts_from_tree(tree, "tenant_2") == ["", "hello"]
hello_node = tree.root.edge_label_to_child["h"]
assert hello_node.text == "hello"
assert hello_node.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 2}
assert set(hello_node.edge_label_to_child.keys()) == {"w"}
world_node = hello_node.edge_label_to_child["w"]
assert world_node.text == "world"
assert world_node.tenant_to_last_access_time == {"tenant_1": 1}
class TestPrefixTreeMatch:
def test_prefix_match_empty_tree(self, tree: PrefixTree) -> None:
"""Test prefix_match on an empty tree returns empty string and None tenants."""
matched_text, matched_tenants = tree.prefix_match("hello")
assert matched_text == ""
assert matched_tenants is None
def test_prefix_match_no_match(self, tree: PrefixTree) -> None:
"""Test prefix_match for a non-matching prefix returns empty string and all tenants."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("hello", "tenant_1", 1)
tree.insert("world", "tenant_2", 2)
matched_text, matched_tenants = tree.prefix_match("foobar")
assert matched_text == ""
assert matched_tenants is not None
assert sorted(matched_tenants) == sorted(["tenant_1", "tenant_2"])
def test_prefix_match_query_longer_than_stored_strings(
self, tree: PrefixTree
) -> None:
"""Test prefix_match where query is longer than any stored string but matches a full path."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("helloworld", "tenant_1", 1)
tree.insert("hellothere", "tenant_2", 2)
matched_text, matched_tenants = tree.prefix_match("hellothereextra")
assert matched_text == "hellothere"
assert matched_tenants == ["tenant_2"]
def test_prefix_match_exact_match(self, tree: PrefixTree) -> None:
"""Test prefix_match with an exact match for a single tenant."""
tree.add_tenants(["tenant_1"], 0)
tree.insert("hello", "tenant_1", 1)
matched_text, matched_tenants = tree.prefix_match("hello")
assert matched_text == "hello"
assert matched_tenants == ["tenant_1"]
def test_prefix_match_partial_match(self, tree: PrefixTree) -> None:
"""Test prefix_match with a partial query matching the longest common part of a branch."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("apple", "tenant_1", 1)
tree.insert("apricot", "tenant_2", 2)
matched_text, matched_tenants = tree.prefix_match("application")
assert matched_text == "appl" # Longest of ("appl", "ap")
assert matched_tenants == ["tenant_1"]
def test_prefix_match_with_tenant_filter(self, tree: PrefixTree) -> None:
"""Test prefix_match with a tenant filter selecting a specific branch."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("apple", "tenant_1", 1)
tree.insert("apricot", "tenant_2", 2)
matched_text, matched_tenants = tree.prefix_match("application", ["tenant_2"])
assert matched_text == "ap"
assert matched_tenants == ["tenant_2"]
def test_prefix_match_with_shared_prefix_tenant_filter(
self, tree: PrefixTree
) -> None:
"""Test prefix_match with a tenant filter when one tenant has a prefix of a longer string."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("apple", "tenant_1", 1)
tree.insert("applepie", "tenant_2", 2)
# Match the longer string but only allow tenant_1
matched_text, matched_tenants = tree.prefix_match("applepie", ["tenant_1"])
# Should only match up to "apple" as that's what tenant_1 owns
assert matched_text == "apple"
assert matched_tenants == ["tenant_1"]
# Verify that using both tenants would match the full string for tenant_2 only
matched_text, matched_tenants = tree.prefix_match(
"applepie", ["tenant_1", "tenant_2"]
)
assert matched_text == "applepie"
assert matched_tenants == ["tenant_2"]
# And both tenants should be returned for "apple"
matched_text, matched_tenants = tree.prefix_match(
"apple", ["tenant_1", "tenant_2"]
)
assert matched_text == "apple"
assert set(matched_tenants) == {"tenant_1", "tenant_2"}
def test_prefix_match_with_non_existent_tenant_filter(
self, tree: PrefixTree
) -> None:
"""Test prefix_match with a filter for a non-existent tenant returns no match."""
tree.add_tenants(["tenant_1"], 0)
tree.insert("apple", "tenant_1", 1)
matched_text, matched_tenants = tree.prefix_match(
"application", ["non_existent_tenant"]
)
assert matched_text == ""
assert matched_tenants is None
class TestPrefixTreeRemove:
def test_remove_single_leaf_node_pruned(self, tree: PrefixTree) -> None:
"""Test _remove_tenant_single_node for a leaf node; node should be pruned."""
tree.add_tenants(["tenant_1"], 0)
tree.insert("hello", "tenant_1", 1)
hello_node = tree.root.edge_label_to_child["h"]
assert hello_node.tenant_to_last_access_time == {"tenant_1": 1}
assert tree.tenant_to_char_count == {"tenant_1": 5}
assert tree.root.edge_label_to_child == {"h": hello_node}
removed_chars = tree._remove_tenant_single_node("tenant_1", hello_node)
assert removed_chars == 5
assert hello_node.tenant_to_last_access_time == {}
assert tree.tenant_to_char_count == {"tenant_1": 0}
assert tree.root.edge_label_to_child == {} # Node pruned
def test_remove_single_leaf_node_not_pruned(self, tree: PrefixTree) -> None:
"""Test _remove_tenant_single_node for a leaf node; node should not be pruned."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("hello", "tenant_1", 1)
tree.insert("hello", "tenant_2", 2)
hello_node = tree.root.edge_label_to_child["h"]
assert hello_node.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 2}
assert tree.tenant_to_char_count == {"tenant_1": 5, "tenant_2": 5}
assert tree.root.edge_label_to_child == {"h": hello_node}
removed_chars = tree._remove_tenant_single_node("tenant_1", hello_node)
assert removed_chars == 5
assert hello_node.tenant_to_last_access_time == {"tenant_2": 2}
assert tree.tenant_to_char_count == {"tenant_1": 0, "tenant_2": 5}
assert tree.root.edge_label_to_child == {"h": hello_node} # Node not pruned
def test_remove_single_node_with_non_existent_tenant(
self, tree: PrefixTree
) -> None:
"""Test _remove_tenant_single_node for a non-existent tenant is a no-op."""
tree.add_tenants(["tenant_1"], 0)
tree.insert("hello", "tenant_1", 1)
hello_node = tree.root.edge_label_to_child["h"]
removed_chars = tree._remove_tenant_single_node(
"non_existent_tenant", hello_node
)
assert removed_chars == 0
def test_remove_single_node_with_non_matching_tenant(
self, tree: PrefixTree
) -> None:
"""Test _remove_tenant_single_node if node doesn't belong to specified tenant is a no-op."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("hello", "tenant_1", 1)
tree.insert("world", "tenant_2", 2) # Node for tenant_2
hello_node = tree.root.edge_label_to_child["h"] # Belongs to tenant_1
removed_chars = tree._remove_tenant_single_node(
"tenant_2", hello_node
) # Try removing tenant_2 from tenant_1's node
assert removed_chars == 0
def test_remove_tenant(self, tree: PrefixTree) -> None:
"""Test remove_tenant for a tree with multiple tenants only removes the specified tenant."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("hello", "tenant_1", 1)
tree.insert("foobar", "tenant_1", 2)
tree.insert("helloworld", "tenant_2", 3)
removed_chars = tree.remove_tenants(["tenant_1"])
assert removed_chars == {"tenant_1": 11}
hello_node = tree.root.edge_label_to_child["h"]
assert hello_node.tenant_to_last_access_time == {"tenant_2": 3}
assert tree.tenant_to_char_count == {"tenant_2": 10}
assert set(tree.tenant_to_lru_tail.keys()) == {"tenant_2"}
tenant_2_lru_texts = get_lru_texts_from_tree(tree, "tenant_2")
assert tenant_2_lru_texts == ["", "world", "hello"]
def test_remove_non_existent_tenant(self, tree: PrefixTree) -> None:
"""Test remove_tenant for a non-existent tenant returns 0."""
tree.add_tenants(["tenant_1"], 0)
tree.insert("hello", "tenant_1", 1)
removed_chars = tree.remove_tenants(["non_existent_tenant"])
assert removed_chars == {"non_existent_tenant": 0}
def test_remove_tenant_prunes_nodes(self, tree: PrefixTree) -> None:
"""Test remove_tenant prunes nodes that become tenant-less and childless."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("helloworld", "tenant_1", 1) # Creates "helloworld"
tree.insert(
"hellothere", "tenant_2", 2
) # Splits into "hello" -> "world" and "hello" -> "there"
tree.remove_tenants(["tenant_1"])
# "world" node should be pruned. "hello" and "there" remain for tenant_2.
hello_node = tree.root.edge_label_to_child["h"]
assert set(hello_node.edge_label_to_child.keys()) == {"t"}
assert hello_node.edge_label_to_child["t"].text == "there"
assert hello_node.edge_label_to_child["t"].tenant_to_last_access_time == {
"tenant_2": 2
}
def test_remove_tenants(self, tree: PrefixTree) -> None:
"""Test remove_tenants for multiple tenants with different structures."""
tree.add_tenants(["tenant_1", "tenant_2", "tenant_3"], 0)
tree.insert("hello", "tenant_1", 1) # 5 chars
tree.insert("foobar", "tenant_1", 2) # 6 chars
tree.insert("helloworld", "tenant_2", 3) # 10 chars
tree.insert("test", "tenant_3", 4) # 4 chars
removed_chars = tree.remove_tenants(["tenant_1", "tenant_3"])
# Check return value contains correct char counts
assert removed_chars == {"tenant_1": 11, "tenant_3": 4}
# Check tree state is correct
assert "tenant_1" not in tree.tenant_to_char_count
assert "tenant_3" not in tree.tenant_to_char_count
assert "tenant_2" in tree.tenant_to_char_count
assert tree.tenant_to_char_count == {"tenant_2": 10}
# Check nodes are correctly maintained
assert (
"h" in tree.root.edge_label_to_child
) # hello node still exists for tenant_2
assert "t" not in tree.root.edge_label_to_child # test node removed
assert "f" not in tree.root.edge_label_to_child # foobar node removed
# Check LRU structure
assert set(tree.tenant_to_lru_tail.keys()) == {"tenant_2"}
tenant_2_lru_texts = get_lru_texts_from_tree(tree, "tenant_2")
assert tenant_2_lru_texts == ["", "world", "hello"]
def test_remove_tenants_with_nonexistent(self, tree: PrefixTree) -> None:
"""Test remove_tenants with a mix of existing and non-existent tenants."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("hello", "tenant_1", 1)
tree.insert("world", "tenant_2", 2)
removed_chars = tree.remove_tenants(["tenant_1", "nonexistent", "alsonotfound"])
# Check return value
assert removed_chars == {"tenant_1": 5, "nonexistent": 0, "alsonotfound": 0}
# Check tree state
assert "tenant_1" not in tree.tenant_to_char_count
assert tree.tenant_to_char_count == {"tenant_2": 5}
assert "h" not in tree.root.edge_label_to_child # hello node removed
assert "w" in tree.root.edge_label_to_child # world node still exists
class TestPrefixTreeEviction:
def test_eviction_non_existent_tenant(self, tree: PrefixTree) -> None:
"""Test evict_tenant_by_lru for a non-existent tenant returns 0."""
assert tree.evict_tenant_by_lru("nonexistent_tenant", 5) == 0
def test_eviction_exact_min_remove_size_single_node(self, tree: PrefixTree) -> None:
"""Test evicting exactly min_remove_size characters from a single oldest node."""
tree.add_tenants(["tenant_1"], 0)
tree.insert("a", "tenant_1", 1) # Oldest (1 char)
tree.insert("bb", "tenant_1", 2)
tree.insert("ccc", "tenant_1", 3)
assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "ccc", "bb", "a"]
evicted_count = tree.evict_tenant_by_lru("tenant_1", 1) # Evict "a"
assert evicted_count == 1
assert tree.tenant_to_char_count == {"tenant_1": 5} # 6 - 1
assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "ccc", "bb"]
def test_eviction_exceed_min_remove_size_single_node(
self, tree: PrefixTree
) -> None:
"""Test evicting more than min_remove_size characters from a single oldest node."""
tree.add_tenants(["tenant_1"], 0)
tree.insert("aaa", "tenant_1", 1) # Oldest (2 chars)
tree.insert("bb", "tenant_1", 2)
tree.insert("c", "tenant_1", 3)
assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "c", "bb", "aaa"]
evicted_count = tree.evict_tenant_by_lru("tenant_1", 1) # Evict "aaa"
assert evicted_count == 3
assert tree.tenant_to_char_count == {"tenant_1": 3} # 6 - 3
assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "c", "bb"]
def test_eviction_multiple_nodes(self, tree: PrefixTree) -> None:
"""Test evicting multiple oldest nodes to meet min_remove_size."""
tree.add_tenants(["tenant_1"], 0)
tree.insert("a", "tenant_1", 1) # Oldest (1 char)
tree.insert("bb", "tenant_1", 2) # Next oldest (2 chars)
tree.insert("ccc", "tenant_1", 3)
assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "ccc", "bb", "a"]
evicted_count = tree.evict_tenant_by_lru("tenant_1", 2) # Evict "a" and "b"
assert evicted_count == 3 # 1 ("a") + 2 ("b")
assert tree.tenant_to_char_count["tenant_1"] == 3 # 6 - 3
assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "ccc"]
def test_eviction_same_timestamps(self, tree: PrefixTree) -> None:
"""Test evicting more than min_remove_size if multiple nodes share the oldest timestamp."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("helloworld", "tenant_1", 1)
tree.insert("hellothere", "tenant_2", 2)
assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "hello", "world"]
assert get_lru_texts_from_tree(tree, "tenant_2") == ["", "there", "hello"]
# Should remove both "hello" and "world" because they have the same timestamp
evicted_count = tree.evict_tenant_by_lru("tenant_1", 1) # Request 1 char
assert evicted_count == 10 # Removes "hello" and "world"
assert tree.tenant_to_char_count == {"tenant_1": 0, "tenant_2": 10}
assert get_lru_texts_from_tree(tree, "tenant_1") == [""]
assert get_lru_texts_from_tree(tree, "tenant_2") == ["", "there", "hello"]
def test_eviction_insufficient_chars_evicts_all(self, tree: PrefixTree) -> None:
"""Test evicting when min_remove_size is larger than available; evicts all."""
tree.add_tenants(["tenant_1"], 0)
tree.insert("xyz", "tenant_1", 1) # 3 chars available
evicted_count = tree.evict_tenant_by_lru("tenant_1", 10)
assert evicted_count == 3
assert tree.tenant_to_char_count == {"tenant_1": 0}
assert get_lru_texts_from_tree(tree, "tenant_1") == [""]
class TestPrefixTreeGetSmallestTenants:
"""Tests for the get_smallest_tenants method."""
def test_get_smallest_tenants(self, tree: PrefixTree) -> None:
"""Test get_smallest_tenants identifies the tenant with the fewest characters."""
tree.add_tenants(["tenant_1", "tenant_2", "tenant_3"], 0)
tree.insert("aaaa", "tenant_1", 1) # 4 chars
tree.insert("bb", "tenant_2", 2) # 2 chars
tree.insert("c", "tenant_3", 3) # 1 char
smallest_tenants = tree.get_smallest_tenants()
assert smallest_tenants == ["tenant_3"]
def test_get_smallest_tenants_empty_tree(self, tree: PrefixTree) -> None:
"""Test get_smallest_tenants on an empty tree returns None."""
assert tree.get_smallest_tenants() is None
def test_get_smallest_tenants_after_update(self, tree: PrefixTree) -> None:
"""Test get_smallest_tenants after removing the current smallest tenant."""
tree.add_tenants(["tenant_1", "tenant_2", "tenant_3"], 0)
tree.insert("aaaa", "tenant_1", 1)
tree.insert("bb", "tenant_2", 2)
tree.insert("c", "tenant_3", 3)
tree.remove_tenants(["tenant_3"]) # Remove "c" (1 char)
smallest_tenants = tree.get_smallest_tenants()
assert smallest_tenants == ["tenant_2"] # "bb" (2 chars) is now smallest
def test_get_smallest_tenants_with_ties(self, tree: PrefixTree) -> None:
"""Test get_smallest_tenants when multiple tenants have the same minimum count."""
tree.add_tenants(["tenant_1", "tenant_2", "tenant_3"], 0)
tree.insert("aa", "tenant_1", 1) # 2 chars
tree.insert("bb", "tenant_2", 2) # 2 chars
tree.insert("cccc", "tenant_3", 3) # 4 chars
smallest_tenants = tree.get_smallest_tenants()
assert set(smallest_tenants) == {"tenant_1", "tenant_2"}
class TestPrefixTreeComprehensive:
"""Comprehensive tests for the PrefixTree"""
def test_tree_structure_multiple_insertions(self, tree: PrefixTree) -> None:
"""Test tree structure after multiple insertions."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("helloworld", "tenant_1", 1)
tree.insert("hellothere", "tenant_2", 2)
tree.insert("hellothomas", "tenant_2", 3)
# Access tree directly
root: Node = tree.root
# Test tree structure - validate each node
# Root node
assert root.text == ""
assert root.parent is None
assert root.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 3}
assert set(root.edge_label_to_child.keys()) == {"h"}
# Hello node
hello_node: Node = root.edge_label_to_child["h"]
assert hello_node.text == "hello"
assert hello_node.parent.text == ""
assert hello_node.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 3}
assert set(hello_node.edge_label_to_child.keys()) == {"w", "t"}
# World node
world_node: Node = hello_node.edge_label_to_child["w"]
assert world_node.text == "world"
assert world_node.parent.text == "hello"
assert world_node.tenant_to_last_access_time == {"tenant_1": 1}
assert set(world_node.edge_label_to_child.keys()) == set()
# Th node
th_node: Node = hello_node.edge_label_to_child["t"]
assert th_node.text == "th"
assert th_node.parent.text == "hello"
assert th_node.tenant_to_last_access_time == {"tenant_2": 3}
assert set(th_node.edge_label_to_child.keys()) == {"e", "o"}
# Ere node
ere_node: Node = th_node.edge_label_to_child["e"]
assert ere_node.text == "ere"
assert ere_node.parent.text == "th"
assert ere_node.tenant_to_last_access_time == {"tenant_2": 2}
assert set(ere_node.edge_label_to_child.keys()) == set()
# Omas node
omas_node: Node = th_node.edge_label_to_child["o"]
assert omas_node.text == "omas"
assert omas_node.parent.text == "th"
assert omas_node.tenant_to_last_access_time == {"tenant_2": 3}
assert set(omas_node.edge_label_to_child.keys()) == set()
def test_multiple_evictions_maintains_lru_order(self, tree: PrefixTree) -> None:
"""Test multiple evictions maintain LRU order."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("helloworld", "tenant_1", 1)
tree.insert("hellothere", "tenant_2", 2)
tree.insert("hellothomas", "tenant_2", 3)
assert tree.tenant_to_char_count == {"tenant_1": 10, "tenant_2": 14}
assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "hello", "world"]
assert get_lru_texts_from_tree(tree, "tenant_2") == [
"",
"omas",
"th",
"hello",
"ere",
]
# Eviction 1 (tenant_1): min_remove_size=1. "hello" and "world" removed.
evicted_1 = tree.evict_tenant_by_lru("tenant_1", 1)
assert evicted_1 == 10
assert tree.tenant_to_char_count == {"tenant_1": 0, "tenant_2": 14}
assert get_lru_texts_from_tree(tree, "tenant_1") == [""]
assert get_lru_texts_from_tree(tree, "tenant_2") == [
"",
"omas",
"th",
"hello",
"ere",
] # T2 unchanged
# Eviction 2 (tenant_2): min_remove_size=1. "ere" is oldest timestamp, removed.
evicted_2 = tree.evict_tenant_by_lru("tenant_2", 1)
assert evicted_2 == 3 # "ere" is 3 chars
assert tree.tenant_to_char_count == {"tenant_1": 0, "tenant_2": 11} # 14 - 3
assert get_lru_texts_from_tree(tree, "tenant_2") == ["", "omas", "th", "hello"]
# Eviction 3 (tenant_2): min_remove_size=1. "omas"(ts3), "th"(ts3), "hello"(ts3) removed.
evicted_3 = tree.evict_tenant_by_lru("tenant_2", 1)
assert evicted_3 == 11 # 4+2+5 chars
assert tree.tenant_to_char_count == {"tenant_1": 0, "tenant_2": 0}
assert get_lru_texts_from_tree(tree, "tenant_2") == [""]
@pytest.mark.asyncio
class TestPrefixTreeActorComprehensive:
"""Comprehensive tests for the PrefixTreeActor"""
async def test_tree_structure_multiple_insertions_actor(
self, tree_actor: PrefixTreeActor
) -> None:
# Add tenants and insert strings in specified order
ray.get(tree_actor.add_tenants.remote(["tenant_1", "tenant_2"], 0))
ray.get(tree_actor.insert.remote("helloworld", "tenant_1", 1))
ray.get(tree_actor.insert.remote("hellothere", "tenant_2", 2))
ray.get(tree_actor.insert.remote("hellothomas", "tenant_2", 3))
assert await get_lru_texts_from_tree_actor(tree_actor, "tenant_1") == [
"",
"hello",
"world",
]
# Access tree directly
root: Node = ray.get(tree_actor.getattr.remote("root"))
# Test tree structure - validate each node
# Root node
assert root.text == ""
assert root.parent is None
assert root.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 3}
assert set(root.edge_label_to_child.keys()) == {"h"}
# Hello node
hello_node: Node = root.edge_label_to_child["h"]
assert hello_node.text == "hello"
assert hello_node.parent.text == ""
assert hello_node.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 3}
assert set(hello_node.edge_label_to_child.keys()) == {"w", "t"}
# World node
world_node: Node = hello_node.edge_label_to_child["w"]
assert world_node.text == "world"
assert world_node.parent.text == "hello"
assert world_node.tenant_to_last_access_time == {"tenant_1": 1}
assert set(world_node.edge_label_to_child.keys()) == set()
# Th node
th_node: Node = hello_node.edge_label_to_child["t"]
assert th_node.text == "th"
assert th_node.parent.text == "hello"
assert th_node.tenant_to_last_access_time == {"tenant_2": 3}
assert set(th_node.edge_label_to_child.keys()) == {"e", "o"}
# Ere node
ere_node: Node = th_node.edge_label_to_child["e"]
assert ere_node.text == "ere"
assert ere_node.parent.text == "th"
assert ere_node.tenant_to_last_access_time == {"tenant_2": 2}
assert set(ere_node.edge_label_to_child.keys()) == set()
# Omas node
omas_node: Node = th_node.edge_label_to_child["o"]
assert omas_node.text == "omas"
assert omas_node.parent.text == "th"
assert omas_node.tenant_to_last_access_time == {"tenant_2": 3}
assert set(omas_node.edge_label_to_child.keys()) == set()
async def test_multiple_evictions_maintains_lru_order_actor(
self, tree_actor: PrefixTreeActor
) -> None:
"""Test multiple evictions maintain LRU order."""
# Add tenants and insert test data
ray.get(tree_actor.add_tenants.remote(["tenant_1", "tenant_2"], 0))
ray.get(tree_actor.insert.remote("helloworld", "tenant_1", 1))
ray.get(tree_actor.insert.remote("hellothere", "tenant_2", 2))
ray.get(tree_actor.insert.remote("hellothomas", "tenant_2", 3))
assert ray.get(tree_actor.getattr.remote("tenant_to_char_count")) == {
"tenant_1": 10,
"tenant_2": 14,
}
assert await get_lru_texts_from_tree_actor(tree_actor, "tenant_1") == [
"",
"hello",
"world",
]
assert await get_lru_texts_from_tree_actor(tree_actor, "tenant_2") == [
"",
"omas",
"th",
"hello",
"ere",
]
# Eviction 1 (tenant_1): min_remove_size=1. "hello" and "world" removed.
evicted_1 = ray.get(tree_actor.evict_tenant_by_lru.remote("tenant_1", 1))
assert evicted_1 == 10
assert ray.get(tree_actor.getattr.remote("tenant_to_char_count")) == {
"tenant_1": 0,
"tenant_2": 14,
}
assert await get_lru_texts_from_tree_actor(tree_actor, "tenant_1") == [""]
assert await get_lru_texts_from_tree_actor(tree_actor, "tenant_2") == [
"",
"omas",
"th",
"hello",
"ere",
] # T2 unchanged
# Eviction 2 (tenant_2): min_remove_size=1. "ere" is oldest timestamp, removed.
evicted_2 = ray.get(tree_actor.evict_tenant_by_lru.remote("tenant_2", 1))
assert evicted_2 == 3 # "ere" is 3 chars
assert ray.get(tree_actor.getattr.remote("tenant_to_char_count")) == {
"tenant_1": 0,
"tenant_2": 11,
} # 14 - 3
assert await get_lru_texts_from_tree_actor(tree_actor, "tenant_2") == [
"",
"omas",
"th",
"hello",
]
# Eviction 3 (tenant_2): min_remove_size=1. "omas"(ts3), "th"(ts3), "hello"(ts3) removed.
evicted_3 = ray.get(tree_actor.evict_tenant_by_lru.remote("tenant_2", 1))
assert evicted_3 == 11 # 4+2+5 chars
assert ray.get(tree_actor.getattr.remote("tenant_to_char_count")) == {
"tenant_1": 0,
"tenant_2": 0,
}
assert await get_lru_texts_from_tree_actor(tree_actor, "tenant_2") == [""]
@pytest.mark.asyncio
class TestPrefixTreeActorEvictionLoop:
"""Tests for the automatic eviction loop in PrefixTreeActor"""
async def test_eviction_loop_triggers_automatically(
self, tree_actor: PrefixTreeActor
) -> None:
"""Test that the eviction loop automatically evicts data when threshold is exceeded."""
# Set up eviction parameters
eviction_threshold = 10 # Low threshold for testing
eviction_target = 8 # Target to evict down to
interval_secs = 0.1 # Short interval for testing
# Start the eviction loop
ray.get(
tree_actor.start_eviction_loop.remote(
eviction_threshold, eviction_target, interval_secs
)
)
# Add tenant and insert data over the threshold
ray.get(tree_actor.add_tenants.remote(["tenant_1"], 0))
ray.get(tree_actor.insert.remote("hello", "tenant_1", 1)) # 5 chars
ray.get(
tree_actor.insert.remote("excess", "tenant_1", 2)
) # 6 more chars, total: 11
# Verify initial count
assert ray.get(tree_actor.getattr.remote("tenant_to_char_count")) == {
"tenant_1": 11
}
# Wait for eviction loop to run (interval + small buffer)
await asyncio.sleep(interval_secs + 0.2)
# Verify data was automatically evicted down to target (8 chars)
# The eviction should have removed 5 chars, so we should be at 6, which is <= 8
char_count = ray.get(tree_actor.getattr.remote("tenant_to_char_count"))
assert char_count["tenant_1"] == 6
async def test_eviction_loop_multiple_tenants(
self, tree_actor: PrefixTreeActor
) -> None:
"""Test that eviction loop evicts from each tenant that exceeds the threshold."""
# Set up eviction parameters
eviction_threshold = 10
eviction_target = 8
interval_secs = 0.1
# Start the eviction loop
ray.get(
tree_actor.start_eviction_loop.remote(
eviction_threshold, eviction_target, interval_secs
)
)
# Add two tenants with data over threshold
ray.get(tree_actor.add_tenants.remote(["tenant_1", "tenant_2"], 0))
ray.get(tree_actor.insert.remote("hello", "tenant_1", 1)) # 5 chars
ray.get(
tree_actor.insert.remote("excess", "tenant_1", 2)
) # 6 more chars, total: 11
ray.get(tree_actor.insert.remote("bigstring", "tenant_2", 3)) # 9 chars
ray.get(
tree_actor.insert.remote("more", "tenant_2", 4)
) # 4 more chars, total: 13
# Verify initial counts
initial_count = ray.get(tree_actor.getattr.remote("tenant_to_char_count"))
assert initial_count["tenant_1"] == 11
assert initial_count["tenant_2"] == 13
# Wait for eviction loop to run
await asyncio.sleep(interval_secs + 0.2)
# Verify both tenants were evicted to target
char_count = ray.get(tree_actor.getattr.remote("tenant_to_char_count"))
# Tenant 1 should have "hello" evicted, so 11 - 5 = 6
assert char_count["tenant_1"] == 6
# Tenant 2 should have "bigstring" evicted, so 13 - 9 = 4
assert char_count["tenant_2"] == 4
async def test_eviction_loop_respects_threshold(
self, tree_actor: PrefixTreeActor
) -> None:
"""Test that eviction loop only evicts tenants that exceed the threshold."""
# Set up eviction parameters
eviction_threshold = 10
eviction_target = 8
interval_secs = 0.1
# Start the eviction loop
ray.get(
tree_actor.start_eviction_loop.remote(
eviction_threshold, eviction_target, interval_secs
)
)
# Add two tenants - one over threshold, one under
ray.get(tree_actor.add_tenants.remote(["over_tenant", "under_tenant"], 0))
ray.get(tree_actor.insert.remote("hello", "over_tenant", 1)) # 5 chars
ray.get(
tree_actor.insert.remote("excess", "over_tenant", 2)
) # 6 more chars, total: 11
ray.get(tree_actor.insert.remote("small", "under_tenant", 3)) # 5 chars
# Verify initial counts
initial_count = ray.get(tree_actor.getattr.remote("tenant_to_char_count"))
assert initial_count["over_tenant"] == 11
assert initial_count["under_tenant"] == 5
# Wait for eviction loop to run
await asyncio.sleep(interval_secs + 0.2)
# Verify only the tenant over threshold was evicted
char_count = ray.get(tree_actor.getattr.remote("tenant_to_char_count"))
# Tenant 1 should have "hello" evicted, so 11 - 5 = 6
assert char_count["over_tenant"] == 6
# Tenant 2 should be unchanged
assert char_count["under_tenant"] == 5
async def test_eviction_loop_can_be_started_multiple_times(
self, tree_actor: PrefixTreeActor
) -> None:
"""Test that only the first call to start_eviction_loop starts a new loop."""
# Call start_eviction_loop multiple times
eviction_task_1 = ray.get(tree_actor.start_eviction_loop.remote(10, 8, 0.1))
eviction_task_2 = ray.get(tree_actor.start_eviction_loop.remote(10, 0, 0.1))
assert eviction_task_1 and not eviction_task_2
# Add tenant and insert data over the threshold
ray.get(tree_actor.add_tenants.remote(["tenant_1"], 0))
ray.get(tree_actor.insert.remote("hello", "tenant_1", 1)) # 5 chars
ray.get(
tree_actor.insert.remote("excess", "tenant_1", 2)
) # 6 more chars, total: 11
# Wait for eviction loop to run
await asyncio.sleep(0.3)
# Verify the first eviction_target_chars is respected.
# Should evict "hello" to bring the char count down from 11 to 6.
char_count = ray.get(tree_actor.getattr.remote("tenant_to_char_count"))
assert char_count["tenant_1"] == 6
if __name__ == "__main__":
import sys
exit_code = pytest.main(["-v", __file__])
sys.exit(exit_code)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/serve/cpu/deployments/test_prefix_tree.py",
"license": "Apache License 2.0",
"lines": 840,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/_internal/savemode.py | from enum import Enum
from ray.util.annotations import PublicAPI
@PublicAPI(stability="alpha")
class SaveMode(str, Enum):
"""Enum of possible modes for saving/writing data."""
"""Add new data without modifying existing data."""
APPEND = "append"
"""Replace all existing data with new data."""
OVERWRITE = "overwrite"
"""Don't write if data already exists."""
IGNORE = "ignore"
"""Raise an error if data already exists."""
ERROR = "error"
"""Update existing rows that match on key fields, or insert new rows.
Requires identifier/key fields to be specified.
"""
UPSERT = "upsert"
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/savemode.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/data/collate_fn.py | import abc
from concurrent.futures import ThreadPoolExecutor
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generic,
List,
Mapping,
Optional,
Tuple,
TypeVar,
Union,
)
import numpy as np
from ray._common.utils import env_integer
from ray.util.annotations import DeveloperAPI
if TYPE_CHECKING:
import pandas
import pyarrow
import torch
from ray.data.block import DataBatch
from ray.data.dataset import CollatedData, TorchDeviceType
DataBatchType = TypeVar("DataBatchType", bound="DataBatch")
TensorSequenceType = Union[
List["torch.Tensor"],
Tuple["torch.Tensor", ...],
]
TensorBatchType = Union[
"torch.Tensor",
TensorSequenceType,
# For nested sequences of tensors, the inner sequence of tensors is combined during
# GPU transfer in `move_tensors_to_device`.
List[TensorSequenceType],
Tuple[TensorSequenceType, ...],
Mapping[str, "torch.Tensor"],
# For mapping (e.g., dict) of keys to sequences of tensors, the sequence of tensors
# is combined during GPU transfer in `move_tensors_to_device`.
Mapping[str, TensorSequenceType],
]
def _is_tensor(batch: Any) -> bool:
"""Check if a batch is a single torch.Tensor."""
import torch
return isinstance(batch, torch.Tensor)
def _is_tensor_sequence(batch: Any) -> bool:
"""Check if a batch is a sequence of torch.Tensors.
>>> import torch
>>> _is_tensor_sequence(torch.ones(1))
False
>>> _is_tensor_sequence([torch.ones(1), torch.ones(1)])
True
>>> _is_tensor_sequence((torch.ones(1), torch.ones(1)))
True
>>> _is_tensor_sequence([torch.ones(1), 1])
False
"""
return isinstance(batch, (list, tuple)) and all(_is_tensor(t) for t in batch)
def _is_nested_tensor_sequence(batch: Any) -> bool:
"""Check if a batch is a sequence of sequences of torch.Tensors.
Stops at one level of nesting.
>>> import torch
>>> _is_nested_tensor_sequence([torch.ones(1), torch.ones(1)])
False
>>> _is_nested_tensor_sequence(
... ([torch.ones(1), torch.ones(1)], [torch.ones(1)])
... )
True
"""
return isinstance(batch, (list, tuple)) and all(
_is_tensor_sequence(t) for t in batch
)
def _is_tensor_mapping(batch: Any) -> bool:
"""Check if a batch is a mapping of keys to torch.Tensors.
>>> import torch
>>> _is_tensor_mapping({"a": torch.ones(1), "b": torch.ones(1)})
True
>>> _is_tensor_mapping({"a": torch.ones(1), "b": [torch.ones(1), torch.ones(1)]})
False
"""
return isinstance(batch, Mapping) and all(_is_tensor(v) for v in batch.values())
def _is_tensor_sequence_mapping(batch: Any) -> bool:
"""Check if a batch is a mapping of keys to sequences of torch.Tensors.
>>> import torch
>>> _is_tensor_sequence_mapping({"a": torch.ones(1), "b": torch.ones(1)})
False
>>> _is_tensor_sequence_mapping(
... {"a": (torch.ones(1), torch.ones(1)), "b": [torch.ones(1), torch.ones(1)]}
... )
True
"""
return isinstance(batch, Mapping) and all(
_is_tensor_sequence(v) for v in batch.values()
)
@DeveloperAPI
def is_tensor_batch_type(batch: Any) -> bool:
"""Check if a batch matches any of the TensorBatchType variants.
This function checks if the input batch is one of the following types:
1. A single torch.Tensor
2. A sequence of torch.Tensors
3. A sequence of sequences of torch.Tensors
4. A mapping (e.g., dict) of keys to torch.Tensors
5. A mapping (e.g., dict) of keys to sequences of torch.Tensors
Args:
batch: The input batch to check. Can be any type.
Returns:
bool: True if the batch matches any TensorBatchType variant, False otherwise.
"""
return (
_is_tensor(batch)
or _is_tensor_sequence(batch)
or _is_nested_tensor_sequence(batch)
or _is_tensor_mapping(batch)
or _is_tensor_sequence_mapping(batch)
)
TensorBatchReturnType = Union[
"torch.Tensor",
Tuple["torch.Tensor", ...],
Dict[str, "torch.Tensor"],
]
@DeveloperAPI
class CollateFn(Generic[DataBatchType]):
"""Abstract interface for collate_fn for `iter_torch_batches`. See doc-string of
`collate_fn` in `iter_torch_batches` API for more details.
"""
@abc.abstractmethod
def __call__(self, batch: DataBatchType) -> "CollatedData":
"""Convert a batch of data to collated format.
Args:
batch: The input batch to collate.
Returns:
The collated data in the format expected by the model.
"""
...
@DeveloperAPI
class ArrowBatchCollateFn(CollateFn["pyarrow.Table"]):
"""Collate function that takes pyarrow.Table as the input batch type.
Arrow tables with chunked arrays can be efficiently transferred to GPUs without
combining the chunks with the `arrow_batch_to_tensors` utility function.
See `DefaultCollateFn` for example.
"""
def __call__(self, batch: "pyarrow.Table") -> "CollatedData":
"""Convert a batch of pyarrow.Table to collated format.
Args:
batch: The input pyarrow.Table batch to collate.
Returns:
The collated data in the format expected by the model.
"""
...
@DeveloperAPI
class NumpyBatchCollateFn(CollateFn[Dict[str, np.ndarray]]):
"""Collate function that takes a dictionary of numpy arrays as the input batch type."""
def __call__(self, batch: Dict[str, np.ndarray]) -> "CollatedData":
"""Convert a batch of numpy arrays to collated format.
Args:
batch: The input dictionary of numpy arrays batch to collate.
Returns:
The collated data in the format expected by the model.
"""
...
@DeveloperAPI
class PandasBatchCollateFn(CollateFn["pandas.DataFrame"]):
"""Collate function that takes a pandas.DataFrame as the input batch type."""
def __call__(self, batch: "pandas.DataFrame") -> "CollatedData":
"""Convert a batch of pandas.DataFrame to collated format.
Args:
batch: The input pandas.DataFrame batch to collate.
Returns:
The collated data in the format expected by the model.
"""
...
@DeveloperAPI
class DefaultCollateFn(ArrowBatchCollateFn):
"""Default collate function for converting Arrow batches to PyTorch tensors."""
_DEFAULT_NUM_WORKERS = env_integer(
"RAY_DATA_DEFAULT_COLLATE_FN_THREADPOOL_MAX_WORKERS",
4,
)
def __init__(
self,
dtypes: Optional[Union["torch.dtype", Dict[str, "torch.dtype"]]] = None,
device: Optional["TorchDeviceType"] = None,
pin_memory: bool = False,
num_workers: int = _DEFAULT_NUM_WORKERS,
):
"""Initialize the collate function.
Args:
dtypes: The torch dtype(s) for the created tensor(s); if None, the dtype
will be inferred from the tensor data.
device: The device on which the tensor should be placed. Can be a string
(e.g. "cpu", "cuda:0") or a torch.device object.
pin_memory: Whether to pin the memory of the created tensors.
num_workers: Number of worker threads for parallel tensor conversion.
Defaults to `RAY_DATA_DEFAULT_COLLATE_FN_THREADPOOL_MAX_WORKERS`.
"""
import torch
super().__init__()
self.dtypes = dtypes
if isinstance(device, (str, int)):
self.device = torch.device(device)
else:
self.device = device
self.pin_memory = pin_memory
self.num_workers = num_workers
self._threadpool: Optional[ThreadPoolExecutor] = None
def __del__(self):
"""Clean up threadpool on destruction."""
if getattr(self, "_threadpool", None):
self._threadpool.shutdown(wait=False)
def __call__(
self, batch: "pyarrow.Table"
) -> Union[Dict[str, "torch.Tensor"], Dict[str, List["torch.Tensor"]]]:
"""Convert an Arrow batch to PyTorch tensors.
Args:
batch: PyArrow Table to convert
Returns:
Dictionary mapping column names to lists of tensors
"""
from ray.data.util.torch_utils import (
arrow_batch_to_tensors,
)
if self.num_workers > 0 and self._threadpool is None:
self._threadpool = ThreadPoolExecutor(max_workers=self.num_workers)
# For GPU transfer, we can skip the combining chunked arrays. This is because
# we can convert the chunked arrays to corresponding numpy format and then to
# Tensors and transfer the corresponding list of Tensors to GPU directly.
# However, for CPU transfer, we need to combine the chunked arrays first
# before converting to numpy format and then to Tensors.
combine_chunks = self.device is not None and self.device.type == "cpu"
return arrow_batch_to_tensors(
batch,
dtypes=self.dtypes,
combine_chunks=combine_chunks,
pin_memory=self.pin_memory,
threadpool=self._threadpool,
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/collate_fn.py",
"license": "Apache License 2.0",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ray-project/ray:python/ray/train/tests/test_iter_torch_batches_gpu.py | from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
import torch
import ray
import ray.train.torch
from ray.data.iterator import (
ArrowBatchCollateFn,
NumpyBatchCollateFn,
PandasBatchCollateFn,
)
from ray.data.util.torch_utils import (
arrow_batch_to_tensors,
convert_ndarray_batch_to_torch_tensor_batch,
)
@pytest.fixture(scope="module")
def ray_start_4_cpus_1_gpu():
address_info = ray.init(num_cpus=4, num_gpus=1)
yield address_info
ray.shutdown()
def _chunk_table_in_half(table: pa.Table) -> pa.Table:
num_rows = table.num_rows
mid = num_rows // 2
new_columns = []
for col in table.itercolumns():
# Slice the column in two halves
first_half = col.slice(0, mid)
second_half = col.slice(mid)
# Create a chunked array with two chunks
chunked_array = pa.chunked_array([first_half, second_half], type=col.type)
new_columns.append(chunked_array)
# Create a new table with the same schema but chunked columns
return pa.Table.from_arrays(new_columns, schema=table.schema)
class SingleTensorArrowBatchCollateFn(ArrowBatchCollateFn):
"""Collate function that returns only the id column as a tensor."""
def __call__(self, batch: pa.Table) -> torch.Tensor:
"""Return only the id column as a tensor."""
assert isinstance(batch, pa.Table)
tensor_dict = arrow_batch_to_tensors(batch, combine_chunks=True)
return tensor_dict["id"]
class TupleArrowBatchCollateFn(ArrowBatchCollateFn):
"""Collate function that returns id and value as a tuple of tensors."""
def __call__(self, batch: pa.Table) -> Tuple[torch.Tensor, torch.Tensor]:
"""Return id and value as a tuple of tensors."""
assert isinstance(batch, pa.Table)
tensor_dict = arrow_batch_to_tensors(batch, combine_chunks=True)
return tensor_dict["id"], tensor_dict["value"]
class ListArrowBatchCollateFn(TupleArrowBatchCollateFn):
"""Collate function that returns id and value as a list of tensors."""
def __call__(self, batch: pa.Table) -> List[torch.Tensor]:
return list(super().__call__(batch))
class DictArrowBatchCollateFn(ArrowBatchCollateFn):
"""Collate function that returns id and value as a dictionary of tensors."""
def __call__(self, batch: pa.Table) -> Dict[str, torch.Tensor]:
"""Return id and value as a dictionary of tensors."""
assert isinstance(batch, pa.Table)
return arrow_batch_to_tensors(batch, combine_chunks=True)
class ChunkedDictArrowBatchCollateFn(ArrowBatchCollateFn):
"""Collate function that returns id and value as a dictionary of chunked tensors."""
def __call__(self, batch: pa.Table) -> Dict[str, List[torch.Tensor]]:
assert isinstance(batch, pa.Table)
modified_batch = _chunk_table_in_half(batch)
return arrow_batch_to_tensors(modified_batch, combine_chunks=False)
class SingleTensorNumpyBatchCollateFn(NumpyBatchCollateFn):
"""Collate function that returns only the id array as a tensor."""
def __call__(self, batch: Dict[str, np.ndarray]) -> torch.Tensor:
"""Return only the id array as a tensor."""
assert isinstance(batch, dict)
tensor_dict = convert_ndarray_batch_to_torch_tensor_batch(batch)
return tensor_dict["id"]
class TupleNumpyBatchCollateFn(NumpyBatchCollateFn):
"""Collate function that returns id and value as a tuple of tensors."""
def __call__(
self, batch: Dict[str, np.ndarray]
) -> Tuple[torch.Tensor, torch.Tensor]:
assert isinstance(batch, dict)
tensor_dict = convert_ndarray_batch_to_torch_tensor_batch(batch)
return tensor_dict["id"], tensor_dict["value"]
class ListNumpyBatchCollateFn(TupleNumpyBatchCollateFn):
"""Collate function that returns id and value as a list of tensors."""
def __call__(self, batch: Dict[str, np.ndarray]) -> List[torch.Tensor]:
return list(super().__call__(batch))
class DictNumpyBatchCollateFn(NumpyBatchCollateFn):
"""Collate function that returns id and value as a dictionary of tensors."""
def __call__(self, batch: Dict[str, np.ndarray]) -> Dict[str, torch.Tensor]:
assert isinstance(batch, dict)
return convert_ndarray_batch_to_torch_tensor_batch(batch)
class BasePandasBatchCollateFn(PandasBatchCollateFn):
"""Base class for Pandas batch collate functions that process and convert to tensors.
This class provides common functionality for processing Pandas DataFrames and converting
them to PyTorch tensors. It handles device placement and dtype conversion.
Attributes:
device: Optional device to place tensors on. Can be a string (e.g. "cpu", "cuda:0")
or a torch.device object.
"""
device: Optional[str]
def __init__(
self,
device: Optional[Union[str, torch.device]] = None,
) -> None:
super().__init__()
if isinstance(device, str):
self.device = torch.device(device)
else:
self.device = device
def _process_batch(self, batch: pd.DataFrame) -> pd.DataFrame:
"""Process the batch by adding 5 to the id column.
Args:
batch: Input Pandas DataFrame.
Returns:
A new DataFrame with modified "id" column and original "value" column.
"""
return pd.DataFrame({"id": batch["id"] + 5, "value": batch["id"]})
def _get_tensors(self, batch: pd.DataFrame) -> Dict[str, torch.Tensor]:
"""Convert batch to tensors.
Args:
batch: Input Pandas DataFrame to convert to tensors.
Returns:
Dictionary mapping column names to PyTorch tensors.
"""
return convert_ndarray_batch_to_torch_tensor_batch(
batch.to_dict("series"), dtypes=None, device=None
)
class SingleTensorPandasBatchCollateFn(PandasBatchCollateFn):
"""Collate function that returns only the id column as a tensor."""
def __call__(self, batch: pd.DataFrame) -> torch.Tensor:
tensor_dict = convert_ndarray_batch_to_torch_tensor_batch(
batch.to_dict("series")
)
return tensor_dict["id"]
class TuplePandasBatchCollateFn(PandasBatchCollateFn):
"""Collate function that returns id and value as a tuple of tensors."""
def __call__(self, batch: pd.DataFrame) -> Tuple[torch.Tensor, torch.Tensor]:
tensor_dict = convert_ndarray_batch_to_torch_tensor_batch(
batch.to_dict("series")
)
return tensor_dict["id"], tensor_dict["value"]
class ListPandasBatchCollateFn(TuplePandasBatchCollateFn):
"""Collate function that returns id and value as a list of tensors."""
def __call__(self, batch: pd.DataFrame) -> List[torch.Tensor]:
return list(super().__call__(batch))
class DictPandasBatchCollateFn(PandasBatchCollateFn):
"""Collate function that returns id and value as a dictionary of tensors."""
def __call__(self, batch: pd.DataFrame) -> Dict[str, torch.Tensor]:
return convert_ndarray_batch_to_torch_tensor_batch(batch.to_dict("series"))
@pytest.fixture
def collate_fn_map():
"""Fixture that provides Arrow, Numpy, Pandas custom collate functions."""
return {
"arrow": {
"default": None,
"single": SingleTensorArrowBatchCollateFn(),
"tuple": TupleArrowBatchCollateFn(),
"list": ListArrowBatchCollateFn(),
"dict": DictArrowBatchCollateFn(),
"chunked_dict": ChunkedDictArrowBatchCollateFn(),
},
"numpy": {
"single": SingleTensorNumpyBatchCollateFn(),
"tuple": TupleNumpyBatchCollateFn(),
"dict": DictNumpyBatchCollateFn(),
"list": ListNumpyBatchCollateFn(),
},
"pandas": {
"single": SingleTensorPandasBatchCollateFn(),
"tuple": TuplePandasBatchCollateFn(),
"dict": DictPandasBatchCollateFn(),
"list": ListPandasBatchCollateFn(),
},
}
@pytest.mark.parametrize("collate_batch_type", ["arrow", "numpy", "pandas"])
@pytest.mark.parametrize(
"return_type", ["single", "tuple", "dict", "list", "chunked_dict", "default"]
)
@pytest.mark.parametrize("device", ["cpu", "cuda:0"])
@pytest.mark.parametrize("pin_memory", [True, False])
def test_custom_batch_collate_fn(
ray_start_4_cpus_1_gpu,
monkeypatch,
collate_batch_type,
return_type,
device,
collate_fn_map,
pin_memory,
):
"""Tests that custom batch collate functions can be used to modify
the batch before it is converted to a PyTorch tensor.
Note that the collate_fn doesn't move the tensors to the device --
that happens in the iterator (finalize_fn).
"""
# Skip GPU tests if CUDA is not available
if device == "cuda:0" and not torch.cuda.is_available():
pytest.skip("CUDA is not available")
# Skip pin_memory tests if CUDA is not available
if pin_memory and not torch.cuda.is_available():
pytest.skip("pin_memory is set to True, but CUDA is not available.")
# Skip tests if pin_memory is set to True and the collate function is not the
# DefaultCollateFn.
if pin_memory and not (collate_batch_type == "arrow" and return_type == "default"):
pytest.skip(
"pin_memory is set to True, but the collate function is not the DefaultCollateFn."
)
collate_fn = collate_fn_map[collate_batch_type].get(return_type)
if collate_fn is None:
pytest.skip(
f"Collate function not found for ({collate_batch_type}, {return_type})"
)
# Set the device that's returned by device="auto" -> get_device()
# This is used in `finalize_fn` to move the tensors to the correct device.
device = torch.device(device)
monkeypatch.setattr(ray.train.utils, "_in_ray_train_worker", lambda: True)
monkeypatch.setattr(ray.train.torch, "get_device", lambda: device)
ds = ray.data.from_items(
[{"id": i + 5, "value": i} for i in range(5)],
)
it = ds.iterator()
for batch in it.iter_torch_batches(collate_fn=collate_fn, pin_memory=pin_memory):
if return_type == "single":
assert isinstance(batch, torch.Tensor)
assert sorted(batch.tolist()) == list(range(5, 10))
assert batch.device == device
if pin_memory and device.type == "cpu":
assert batch.is_pinned()
elif return_type == "dict" or return_type == "chunked_dict":
# Chunked dicts get concatenated to single Tensors on the device,
# so the assertions are shared with the dict case.
assert isinstance(batch, dict)
assert sorted(batch["id"].tolist()) == list(range(5, 10))
assert sorted(batch["value"].tolist()) == list(range(5))
assert batch["id"].device == device
assert batch["value"].device == device
if pin_memory and device.type == "cpu":
assert batch["id"].is_pinned()
assert batch["value"].is_pinned()
else: # tuple or list
assert isinstance(batch, (tuple, list))
assert len(batch) == 2
assert sorted(batch[0].tolist()) == list(range(5, 10))
assert sorted(batch[1].tolist()) == list(range(5))
assert batch[0].device == device
assert batch[1].device == device
if pin_memory and device.type == "cpu":
assert batch[0].is_pinned()
assert batch[1].is_pinned()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/tests/test_iter_torch_batches_gpu.py",
"license": "Apache License 2.0",
"lines": 249,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/data/tests/test_ref_bundle.py | from unittest.mock import patch
import pytest
from ray import ObjectRef
from ray.data._internal.execution.interfaces import BlockSlice, RefBundle
from ray.data.block import BlockMetadata
def test_get_preferred_locations():
first_block_ref = ObjectRef(b"1" * 28)
second_block_ref = ObjectRef(b"2" * 28)
third_block_ref = ObjectRef(b"3" * 28)
meta = BlockMetadata(num_rows=None, size_bytes=1, exec_stats=None, input_files=None)
bundle = RefBundle(
blocks=[
(first_block_ref, meta),
(second_block_ref, meta),
(third_block_ref, meta),
],
owns_blocks=True,
schema=None,
)
def _get_obj_locs(obj_refs):
assert obj_refs == [first_block_ref, second_block_ref, third_block_ref]
return {
first_block_ref: {
"object_size": 1024,
"did_spill": False,
"node_ids": ["1", "2", "3"],
},
second_block_ref: {
"object_size": 2048,
"did_spill": False,
"node_ids": ["2", "3"],
},
third_block_ref: {
"object_size": 4096,
"did_spill": False,
"node_ids": ["2"],
},
}
with patch("ray.experimental.get_local_object_locations", _get_obj_locs):
preferred_object_locs = bundle.get_preferred_object_locations()
assert {
"1": 1024, # first_block_ref]
"2": 7168, # first_block_ref, second_block_ref, third_block_ref
"3": 3072, # first_block_ref, second_block_ref
} == preferred_object_locs
def test_ref_bundle_num_rows_size_bytes():
block_ref_one = ObjectRef(b"1" * 28)
block_ref_two = ObjectRef(b"2" * 28)
meta_one = BlockMetadata(
num_rows=10, size_bytes=100, exec_stats=None, input_files=None
)
meta_two = BlockMetadata(
num_rows=5, size_bytes=50, exec_stats=None, input_files=None
)
# Before slice
bundle = RefBundle(
blocks=[
(block_ref_one, meta_one),
(block_ref_two, meta_two),
],
owns_blocks=True,
schema=None,
)
assert bundle.num_rows() == 15
assert bundle.size_bytes() == 150
# After slice
bundle.slices = [
BlockSlice(start_offset=2, end_offset=6), # 4 rows
BlockSlice(start_offset=0, end_offset=2), # 2 rows
]
assert bundle.num_rows() == 6
assert bundle.size_bytes() == 60
@pytest.mark.parametrize(
"start_offset, end_offset",
[
(-1, 0), # Negative start_offset
(0, 11), # end_offset > num_rows
(1, 0), # start_offset > end_offset
],
)
def test_ref_bundle_with_invalid_slices(start_offset, end_offset):
block_ref = ObjectRef(b"1" * 28)
metadata = BlockMetadata(
num_rows=10, size_bytes=100, exec_stats=None, input_files=None
)
with pytest.raises(AssertionError):
RefBundle(
blocks=[(block_ref, metadata)],
owns_blocks=True,
schema=None,
slices=[
BlockSlice(start_offset=start_offset, end_offset=end_offset),
],
)
def test_slice_ref_bundle_basic():
block_ref_one = ObjectRef(b"1" * 28)
block_ref_two = ObjectRef(b"2" * 28)
meta_one = BlockMetadata(
num_rows=6, size_bytes=60, exec_stats=None, input_files=None
)
meta_two = BlockMetadata(
num_rows=4, size_bytes=40, exec_stats=None, input_files=None
)
bundle = RefBundle(
blocks=[
(block_ref_one, meta_one),
(block_ref_two, meta_two),
],
owns_blocks=True,
schema="schema",
)
consumed, remaining = bundle.slice(8)
assert consumed.num_rows() == 8
assert remaining.num_rows() == 2
assert consumed.slices == [
BlockSlice(start_offset=0, end_offset=6),
BlockSlice(start_offset=0, end_offset=2),
]
assert remaining.slices == [
BlockSlice(start_offset=2, end_offset=4),
]
def test_slice_ref_bundle_should_raise_error_if_needed_rows_is_not_less_than_num_rows():
block_ref = ObjectRef(b"1" * 28)
metadata = BlockMetadata(
num_rows=5, size_bytes=50, exec_stats=None, input_files=None
)
bundle = RefBundle(
blocks=[(block_ref, metadata)],
owns_blocks=True,
schema=None,
)
with pytest.raises(AssertionError):
bundle.slice(5)
def test_slice_ref_bundle_with_existing_slices():
block_ref_one = ObjectRef(b"1" * 28)
block_ref_two = ObjectRef(b"2" * 28)
meta_one = BlockMetadata(
num_rows=10, size_bytes=100, exec_stats=None, input_files=None
)
meta_two = BlockMetadata(
num_rows=5, size_bytes=50, exec_stats=None, input_files=None
)
bundle = RefBundle(
blocks=[
(block_ref_one, meta_one),
(block_ref_two, meta_two),
],
owns_blocks=True,
schema="schema",
slices=[
BlockSlice(start_offset=2, end_offset=10),
BlockSlice(start_offset=0, end_offset=3),
],
)
consumed, remaining = bundle.slice(7)
assert consumed.num_rows() == 7
assert consumed.slices == [
BlockSlice(start_offset=2, end_offset=9),
]
assert consumed.size_bytes() == 70
assert remaining.num_rows() == 4
assert remaining.slices == [
BlockSlice(start_offset=9, end_offset=10),
BlockSlice(start_offset=0, end_offset=3),
]
assert remaining.size_bytes() == 40
@pytest.mark.parametrize(
"num_rows,slice_rows",
[
(5, 0), # Zero rows requested
(5, 5), # Equal to total (must be less than)
(5, 6), # More than available
],
)
def test_slice_ref_bundle_invalid_rows(num_rows, slice_rows):
"""Test that slicing with invalid row counts raises appropriate errors."""
block_ref = ObjectRef(b"1" * 28)
metadata = BlockMetadata(
num_rows=num_rows, size_bytes=num_rows * 10, exec_stats=None, input_files=None
)
bundle = RefBundle(
blocks=[(block_ref, metadata)],
owns_blocks=True,
schema=None,
)
with pytest.raises(AssertionError):
bundle.slice(slice_rows)
def test_ref_bundle_with_none_slices():
"""Test that None can be used to represent full blocks in slices."""
block_ref_one = ObjectRef(b"1" * 28)
block_ref_two = ObjectRef(b"2" * 28)
meta_one = BlockMetadata(
num_rows=10, size_bytes=100, exec_stats=None, input_files=None
)
meta_two = BlockMetadata(
num_rows=5, size_bytes=50, exec_stats=None, input_files=None
)
# Test with all None slices (representing full blocks)
bundle = RefBundle(
blocks=[
(block_ref_one, meta_one),
(block_ref_two, meta_two),
],
owns_blocks=True,
schema=None,
slices=[None, None],
)
assert bundle.num_rows() == 15 # 10 + 5
assert bundle.size_bytes() == 150 # 100 + 50
def test_ref_bundle_with_mixed_none_and_explicit_slices():
"""Test mixing None and explicit BlockSlice objects."""
block_ref_one = ObjectRef(b"1" * 28)
block_ref_two = ObjectRef(b"2" * 28)
block_ref_three = ObjectRef(b"3" * 28)
meta_one = BlockMetadata(
num_rows=10, size_bytes=100, exec_stats=None, input_files=None
)
meta_two = BlockMetadata(
num_rows=8, size_bytes=80, exec_stats=None, input_files=None
)
meta_three = BlockMetadata(
num_rows=6, size_bytes=60, exec_stats=None, input_files=None
)
# Mix None (full block) with explicit slices
bundle = RefBundle(
blocks=[
(block_ref_one, meta_one),
(block_ref_two, meta_two),
(block_ref_three, meta_three),
],
owns_blocks=True,
schema=None,
slices=[
None, # Full block: 10 rows, 100 bytes
BlockSlice(start_offset=2, end_offset=6), # 4 rows, ~40 bytes
None, # Full block: 6 rows, 60 bytes
],
)
assert bundle.num_rows() == 20 # 10 + 4 + 6
assert bundle.size_bytes() == 200 # 100 + 40 + 60
def test_slice_ref_bundle_with_none_slices():
"""Test slicing a bundle that has None slices."""
block_ref_one = ObjectRef(b"1" * 28)
block_ref_two = ObjectRef(b"2" * 28)
meta_one = BlockMetadata(
num_rows=6, size_bytes=60, exec_stats=None, input_files=None
)
meta_two = BlockMetadata(
num_rows=4, size_bytes=40, exec_stats=None, input_files=None
)
# Start with None slices (full blocks)
bundle = RefBundle(
blocks=[
(block_ref_one, meta_one),
(block_ref_two, meta_two),
],
owns_blocks=True,
schema="schema",
slices=[None, None],
)
# Slice it to get first 8 rows
consumed, remaining = bundle.slice(8)
assert consumed.num_rows() == 8
assert remaining.num_rows() == 2
# The None slices should be converted to explicit BlockSlice objects
assert consumed.slices == [
BlockSlice(start_offset=0, end_offset=6),
BlockSlice(start_offset=0, end_offset=2),
]
assert remaining.slices == [
BlockSlice(start_offset=2, end_offset=4),
]
def test_ref_bundle_str():
"""Test the __str__ method returns a readable representation."""
block_ref_one = ObjectRef(b"1" * 28)
block_ref_two = ObjectRef(b"2" * 28)
block_ref_three = ObjectRef(b"3" * 28)
meta_one = BlockMetadata(
num_rows=10, size_bytes=100, exec_stats=None, input_files=None
)
meta_two = BlockMetadata(
num_rows=5, size_bytes=50, exec_stats=None, input_files=None
)
meta_three = BlockMetadata(
num_rows=3, size_bytes=30, exec_stats=None, input_files=None
)
slice_three = BlockSlice(start_offset=0, end_offset=3)
bundle = RefBundle(
blocks=[
(block_ref_one, meta_one),
(block_ref_two, meta_two),
(block_ref_three, meta_three),
],
owns_blocks=True,
schema="test_schema",
slices=[None, None, slice_three],
)
expected = """RefBundle(3 blocks,
18 rows,
schema=test_schema,
owns_blocks=True,
blocks=(
0: 10 rows, 100 bytes, slice=None (full block)
1: 5 rows, 50 bytes, slice=None (full block)
2: 3 rows, 30 bytes, slice=BlockSlice(start_offset=0, end_offset=3)
)
)"""
assert str(bundle) == expected
def test_merge_ref_bundles():
block_ref_one = ObjectRef(b"1" * 28)
block_ref_two = ObjectRef(b"2" * 28)
metadata_one = BlockMetadata(
num_rows=10, size_bytes=100, exec_stats=None, input_files=None
)
metadata_two = BlockMetadata(
num_rows=10, size_bytes=10, exec_stats=None, input_files=None
)
bundle_one = RefBundle(
blocks=[(block_ref_one, metadata_one), (block_ref_one, metadata_one)],
owns_blocks=True,
schema="schema",
slices=[
BlockSlice(start_offset=0, end_offset=1),
BlockSlice(start_offset=1, end_offset=2),
],
)
bundle_two = RefBundle(
blocks=[(block_ref_two, metadata_two), (block_ref_two, metadata_two)],
owns_blocks=False,
schema="schema",
slices=[
BlockSlice(start_offset=2, end_offset=3),
BlockSlice(start_offset=3, end_offset=4),
],
)
merged = RefBundle.merge_ref_bundles([bundle_one, bundle_two])
assert merged.schema == "schema"
# The merged bundle should own the blocks if all input bundles own their blocks.
# Since bundle_two doesn't own its blocks, the merged bundle should not own its
# blocks.
assert merged.owns_blocks is False
assert len(merged.blocks) == 4
assert merged.slices == [
BlockSlice(start_offset=0, end_offset=1),
BlockSlice(start_offset=1, end_offset=2),
BlockSlice(start_offset=2, end_offset=3),
BlockSlice(start_offset=3, end_offset=4),
]
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_ref_bundle.py",
"license": "Apache License 2.0",
"lines": 352,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/llm/test_llm_server.py | import asyncio
import sys
import time
from typing import AsyncGenerator, Optional
from unittest.mock import patch
import numpy as np
import pytest
from ray import serve
from ray.llm._internal.serve.core.configs.llm_config import (
LLMConfig,
LoraConfig,
ModelLoadingConfig,
)
from ray.llm._internal.serve.core.server.llm_server import LLMServer
from ray.llm.tests.serve.mocks.mock_vllm_engine import (
FakeLoraModelLoader,
MockVLLMEngine,
)
from ray.llm.tests.serve.utils.testing_utils import LLMResponseValidator
@pytest.fixture
def serve_handle(mock_llm_config, stream_batching_interval_ms=0):
mock_llm_config.experimental_configs = {
"stream_batching_interval_ms": stream_batching_interval_ms,
}
app = serve.deployment(LLMServer).bind(mock_llm_config, engine_cls=MockVLLMEngine)
handle = serve.run(app)
# We set stream=True because the interfaces are async generators regardless
# of the stream flag on request.
handle = handle.options(stream=True)
yield handle
serve.shutdown()
@pytest.fixture
def multiplexed_serve_handle(mock_llm_config, stream_batching_interval_ms=0):
mock_llm_config.experimental_configs = {
"stream_batching_interval_ms": stream_batching_interval_ms,
}
# Set minimal lora_config to enable multiplexing but avoid telemetry S3 calls
mock_llm_config.lora_config = LoraConfig(
dynamic_lora_loading_path=None, # No S3 path = no telemetry S3 calls
download_timeout_s=60,
max_download_tries=3,
)
app = serve.deployment(LLMServer).bind(
mock_llm_config,
engine_cls=MockVLLMEngine,
model_downloader=FakeLoraModelLoader,
)
handle = serve.run(app)
handle = handle.options(stream=True, multiplexed_model_id="test_model_id")
yield handle
serve.shutdown()
async def count_tpot_ms_from_stream(stream: AsyncGenerator) -> list[float]:
all_tpots_in_ms = []
start = None
async for _ in stream:
now = time.perf_counter()
if start is not None:
all_tpots_in_ms.append((now - start) * 1e3)
start = now
return all_tpots_in_ms
class TestLLMServer:
@pytest.mark.parametrize("api_type", ["chat", "completion"])
@pytest.mark.parametrize("stream", [False, True])
@pytest.mark.parametrize("max_tokens", [5])
@pytest.mark.parametrize("stream_batching_interval_ms", [0, 10000])
@pytest.mark.asyncio
async def test_unified_llm_server(
self,
serve_handle,
mock_llm_config,
mock_chat_request,
mock_completion_request,
api_type: str,
stream: bool,
max_tokens: int,
stream_batching_interval_ms: int,
):
"""Unified test for both chat and completion APIs, streaming and non-streaming."""
# Create request based on API type
if api_type == "chat":
request = mock_chat_request
batched_chunks = serve_handle.chat.remote(request)
elif api_type == "completion":
request = mock_completion_request
batched_chunks = serve_handle.completions.remote(request)
print(
f"\n\n_____ {api_type.upper()} ({'STREAMING' if stream else 'NON-STREAMING'}) max_tokens={max_tokens} batching_interval_ms={stream_batching_interval_ms} _____\n\n"
)
if stream:
# Collect responses from the stream
chunks = []
async for batch in batched_chunks:
chunks.extend(batch)
# Check that we got responses
assert len(chunks) > 0
# Validate streaming response
LLMResponseValidator.validate_streaming_chunks(chunks, api_type, max_tokens)
else:
# Collect non-streaming response
chunks = []
async for batch in batched_chunks:
chunks.append(batch)
# Check that we got one response
assert len(chunks) == 1
# Validate non-streaming response
LLMResponseValidator.validate_non_streaming_response(
chunks[0], api_type, max_tokens
)
@pytest.mark.parametrize("dimensions", [None, 512])
@pytest.mark.asyncio
async def test_embedding_llm_server(
self,
serve_handle,
mock_llm_config,
mock_embedding_request,
dimensions: Optional[int],
):
"""Test embedding API from LLMServer perspective."""
# Create embedding request
request = mock_embedding_request
print(f"\n\n_____ EMBEDDING SERVER dimensions={dimensions} _____\n\n")
# Get the response
batched_chunks = serve_handle.embeddings.remote(request)
# Collect responses (should be just one)
chunks = []
async for batch in batched_chunks:
chunks.append(batch)
# Check that we got one response
assert len(chunks) == 1
# Validate embedding response
LLMResponseValidator.validate_embedding_response(chunks[0], dimensions)
@pytest.mark.parametrize("stream", [False, True])
@pytest.mark.parametrize("temperature", [0.0])
@pytest.mark.parametrize("language", ["en", "hi"])
@pytest.mark.asyncio
async def test_transcription_llm_server(
self,
serve_handle,
mock_llm_config,
mock_transcription_request,
stream: bool,
temperature: float,
language: Optional[str],
):
"""Test transcription API from LLMServer perspective."""
# Create transcription request
request = mock_transcription_request
print(
f"\n\n_____ TRANSCRIPTION SERVER ({'STREAMING' if stream else 'NON-STREAMING'}) language={language} temperature={temperature} _____\n\n"
)
# Get the response
batched_chunks = serve_handle.transcriptions.remote(request)
if stream:
# Collect streaming responses
chunks = []
async for batch in batched_chunks:
if isinstance(batch, list):
chunks.extend(batch)
else:
chunks.append(batch)
# Check that we got responses
assert len(chunks) > 0
# Validate streaming response
LLMResponseValidator.validate_transcription_response(
chunks, temperature, language
)
else:
# Collect non-streaming response
chunks = []
async for batch in batched_chunks:
chunks.append(batch)
# Check that we got one response
assert len(chunks) == 1
# Validate non-streaming response
LLMResponseValidator.validate_transcription_response(
chunks[0], temperature, language
)
@pytest.mark.asyncio
async def test_score_llm_server(
self,
serve_handle,
mock_llm_config,
mock_score_request,
):
"""Test score API from LLMServer perspective."""
# Create score request
request = mock_score_request
print("\n\n_____ SCORE SERVER _____\n\n")
# Get the response
batched_chunks = serve_handle.score.remote(request)
# Collect responses (should be just one)
chunks = []
async for batch in batched_chunks:
chunks.append(batch)
# Check that we got one response
assert len(chunks) == 1
# Validate score response
LLMResponseValidator.validate_score_response(chunks[0])
@pytest.mark.parametrize("return_token_strs", [False, True])
@pytest.mark.asyncio
async def test_tokenize_llm_server(
self,
serve_handle,
mock_llm_config,
mock_tokenize_request,
return_token_strs: bool,
):
"""Test tokenize API from LLMServer perspective."""
# Create tokenize request
request = mock_tokenize_request
print(
f"\n\n_____ TOKENIZE SERVER return_token_strs={return_token_strs} _____\n\n"
)
# Get the response
batched_chunks = serve_handle.tokenize.remote(request)
# Collect responses (should be just one)
chunks = []
async for batch in batched_chunks:
chunks.append(batch)
# Check that we got one response
assert len(chunks) == 1
# Validate tokenize response
LLMResponseValidator.validate_tokenize_response(
chunks[0],
expected_prompt="Hello, world!",
return_token_strs=return_token_strs,
)
@pytest.mark.asyncio
async def test_detokenize_llm_server(
self,
serve_handle,
mock_llm_config,
mock_detokenize_request,
):
"""Test detokenize API from LLMServer perspective."""
# Create detokenize request
request = mock_detokenize_request
print("\n\n_____ DETOKENIZE SERVER _____\n\n")
# Get the response
batched_chunks = serve_handle.detokenize.remote(request)
# Collect responses (should be just one)
chunks = []
async for batch in batched_chunks:
chunks.append(batch)
# Check that we got one response
assert len(chunks) == 1
# Validate detokenize response
LLMResponseValidator.validate_detokenize_response(
chunks[0],
expected_text="Hello", # [72, 101, 108, 108, 111] = "Hello"
)
@pytest.mark.asyncio
async def test_check_health(self, mock_llm_config):
"""Test health check functionality."""
# Mock the engine's check_health method
class LocalMockEngine(MockVLLMEngine):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_health_called = False
async def check_health(self):
self.check_health_called = True
# Create a server with a mocked engine
server = LLMServer.sync_init(mock_llm_config, engine_cls=LocalMockEngine)
await server.start()
# Perform the health check, no exceptions should be raised
await server.check_health()
# Check that the health check method was called
assert server.engine.check_health_called
@pytest.mark.asyncio
async def test_reset_prefix_cache(self, mock_llm_config):
"""Test reset prefix cache functionality."""
# Mock the engine's reset_prefix_cache method
class LocalMockEngine(MockVLLMEngine):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reset_prefix_cache_called = False
async def reset_prefix_cache(self):
self.reset_prefix_cache_called = True
# Create a server with a mocked engine
server = LLMServer.sync_init(mock_llm_config, engine_cls=LocalMockEngine)
await server.start()
# Reset prefix cache, no exceptions should be raised
await server.reset_prefix_cache()
# Check that the reset prefix cache method was called
assert server.engine.reset_prefix_cache_called
@pytest.mark.asyncio
async def test_start_profile(self, mock_llm_config):
"""Test start profile functionality."""
# Mock the engine's start_profile method
class LocalMockEngine(MockVLLMEngine):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.start_profile_called = False
async def start_profile(self):
self.start_profile_called = True
# Create a server with a mocked engine
server = LLMServer.sync_init(mock_llm_config, engine_cls=LocalMockEngine)
await server.start()
# Start profile, no exceptions should be raised
await server.start_profile()
# Check that the start profile method was called
assert server.engine.start_profile_called
@pytest.mark.asyncio
async def test_stop_profile(self, mock_llm_config):
"""Test stop profile functionality."""
# Mock the engine's stop_profile method
class LocalMockEngine(MockVLLMEngine):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stop_profile_called = False
async def stop_profile(self):
self.stop_profile_called = True
# Create a server with a mocked engine
server = LLMServer.sync_init(mock_llm_config, engine_cls=LocalMockEngine)
await server.start()
# Stop profile, no exceptions should be raised
await server.stop_profile()
# Check that the stop profile method was called
assert server.engine.stop_profile_called
@pytest.mark.asyncio
async def test_llm_config_property(self, mock_llm_config):
"""Test the llm_config property."""
server = LLMServer.sync_init(mock_llm_config, engine_cls=MockVLLMEngine)
await server.start()
llm_config = await server.llm_config()
assert isinstance(llm_config, type(mock_llm_config))
@pytest.mark.parametrize("stream", [False])
@pytest.mark.parametrize("max_tokens", [5])
@pytest.mark.asyncio
async def test_request_id_handling(
self,
serve_handle,
mock_llm_config,
mock_chat_request,
stream: bool,
max_tokens: int,
):
"""Test that the request id is handled correctly."""
# Create a chat completion request
# We should patch get_server_request_id to return a test_request_id
serve.context._serve_request_context.set(
serve.context._RequestContext(**{"request_id": "test_request_id"})
)
# Get the response
chunks = []
async for chunk in serve_handle.chat.remote(mock_chat_request):
chunks.append(chunk)
assert len(chunks) == 1
assert chunks[0].id == "test_request_id"
@pytest.mark.parametrize("api_type", ["chat", "completion"])
@pytest.mark.parametrize("stream", [False, True])
@pytest.mark.parametrize("max_tokens", [5])
@pytest.mark.parametrize("stream_batching_interval_ms", [0, 10000])
@pytest.mark.asyncio
async def test_multiplexed_request_handling(
self,
multiplexed_serve_handle,
mock_chat_request,
mock_completion_request,
api_type: str,
stream: bool,
max_tokens: int,
stream_batching_interval_ms: int,
):
"""Unified test for multiplexed (LoRA) requests - both chat and completion APIs, streaming and non-streaming."""
# Create request based on API type and set model ID for multiplexing
if api_type == "chat":
request = mock_chat_request
batched_chunks = multiplexed_serve_handle.chat.remote(request)
elif api_type == "completion":
request = mock_completion_request
batched_chunks = multiplexed_serve_handle.completions.remote(request)
request.model = "test_model_id"
print(
f"\n\n_____ MULTIPLEXED {api_type.upper()} ({'STREAMING' if stream else 'NON-STREAMING'}) max_tokens={max_tokens} batching_interval_ms={stream_batching_interval_ms} _____\n\n"
)
if stream:
# Collect responses from the stream
chunks = []
async for batch in batched_chunks:
if isinstance(batch, list):
chunks.extend(batch)
else:
chunks.append(batch)
# Check that we got responses
assert len(chunks) > 0
# Validate streaming response with LoRA model ID
LLMResponseValidator.validate_streaming_chunks(
chunks, api_type, max_tokens, lora_model_id=request.model
)
else:
# Collect non-streaming response
chunks = []
async for batch in batched_chunks:
if isinstance(batch, list):
chunks.extend(batch)
else:
chunks.append(batch)
# Check that we got one response
assert len(chunks) == 1
# Validate non-streaming response with LoRA model ID
LLMResponseValidator.validate_non_streaming_response(
chunks[0], api_type, max_tokens, lora_model_id=request.model
)
@pytest.mark.asyncio
async def test_push_telemetry(self, mock_llm_config):
"""Test that the telemetry push is called properly."""
with patch(
"ray.llm._internal.serve.core.server.llm_server.push_telemetry_report_for_all_models"
) as mock_push_telemetry:
server = LLMServer.sync_init(mock_llm_config, engine_cls=MockVLLMEngine)
await server.start()
mock_push_telemetry.assert_called_once()
@pytest.mark.parametrize("api_type", ["chat", "completions"])
@pytest.mark.parametrize("stream", [True])
@pytest.mark.parametrize("max_tokens", [64])
@pytest.mark.parametrize("concurrency", [1, 16])
@pytest.mark.parametrize("stream_batching_interval_ms", [0])
@pytest.mark.asyncio
async def test_stable_streaming_tpot(
self,
serve_handle,
mock_llm_config,
mock_chat_request,
mock_completion_request,
api_type: str,
stream: bool,
max_tokens: int,
concurrency: int,
stream_batching_interval_ms: int,
):
"""Test that the streaming TPOT is stable when batching is disabled."""
# Create request based on API type
if api_type == "chat":
request = mock_chat_request
elif api_type == "completions":
request = mock_completion_request
batched_chunks: list[AsyncGenerator] = [
getattr(serve_handle, api_type).remote(request) for _ in range(concurrency)
]
print(
f"\n\n_____ {api_type.upper()} ({'STREAMING' if stream else 'NON-STREAMING'}) max_tokens={max_tokens} batching_interval_ms={stream_batching_interval_ms} _____\n\n"
)
# Collect responses from llm_server
tpots_ms = await asyncio.gather(
*[
count_tpot_ms_from_stream(server_stream)
for server_stream in batched_chunks
]
)
mean_llm_server = np.mean(tpots_ms)
std_var_llm_server = np.std(tpots_ms)
# Run same request with vllm engine
vllm_engine = MockVLLMEngine(llm_config=mock_llm_config)
await vllm_engine.start()
engine_streams: list[AsyncGenerator] = [
getattr(vllm_engine, api_type)(request) for _ in range(concurrency)
]
tpots_ms_engine = await asyncio.gather(
*[
count_tpot_ms_from_stream(engine_stream)
for engine_stream in engine_streams
]
)
mean_engine = np.mean(tpots_ms_engine)
std_var_engine = np.std(tpots_ms_engine)
assert np.isclose(
mean_llm_server, mean_engine, rtol=0.1
), f"{mean_llm_server=}, {mean_engine=}"
assert np.isclose(
std_var_llm_server, std_var_engine, atol=1.0
), f"{std_var_llm_server=}, {std_var_engine=}"
class TestGetDeploymentOptions:
def test_placement_group_config(self):
"""Test that placement_group_config is correctly parsed."""
# Test the default resource bundle
llm_config = LLMConfig(
model_loading_config=dict(model_id="test_model"),
engine_kwargs=dict(tensor_parallel_size=3, pipeline_parallel_size=2),
)
serve_options = LLMServer.get_deployment_options(llm_config)
assert serve_options["placement_group_bundles"] == [{"CPU": 1, "GPU": 1}] + [
{"GPU": 1} for _ in range(5)
]
# Test the custom placement group config
# Note: The first bundle gets merged with replica actor resources (CPU: 1, GPU: 0)
llm_config = LLMConfig(
model_loading_config=dict(model_id="test_model"),
engine_kwargs=dict(tensor_parallel_size=3, pipeline_parallel_size=2),
placement_group_config={
"bundles": [{"CPU": 1, "XPU": 1}] + [{"XPU": 1}] * 5,
"strategy": "PACK",
},
)
serve_options = LLMServer.get_deployment_options(llm_config)
# First bundle has replica actor resources merged in (CPU: 1 from config + 1 from replica = 2)
# All bundles get GPU: 1.0 added as accelerator hint (and CPU: 0.0 for workers)
assert serve_options["placement_group_bundles"] == [
{"CPU": 2.0, "GPU": 1.0, "XPU": 1}
] + [{"CPU": 0.0, "GPU": 1.0, "XPU": 1} for _ in range(5)]
assert serve_options["placement_group_strategy"] == "PACK"
def test_get_serve_options_with_accelerator_type(self):
"""Test that get_serve_options returns the correct options when accelerator_type is set."""
llm_config = LLMConfig(
model_loading_config=ModelLoadingConfig(model_id="test_model"),
accelerator_type="A100-40G",
deployment_config={
"autoscaling_config": {
"min_replicas": 0,
"initial_replicas": 1,
"max_replicas": 10,
},
},
runtime_env={"env_vars": {"FOO": "bar"}},
)
serve_options = LLMServer.get_deployment_options(llm_config)
# Test the core functionality without being strict about Ray's automatic runtime env additions
assert serve_options["autoscaling_config"] == {
"min_replicas": 0,
"initial_replicas": 1,
"max_replicas": 10,
}
assert serve_options["placement_group_bundles"] == [
{"CPU": 1, "GPU": 1, "accelerator_type:A100-40G": 0.001},
]
# Default strategy is PACK (cross-node allowed by default)
assert serve_options["placement_group_strategy"] == "PACK"
# Check that our custom env vars are present
assert (
serve_options["ray_actor_options"]["runtime_env"]["env_vars"]["FOO"]
== "bar"
)
assert (
"worker_process_setup_hook"
in serve_options["ray_actor_options"]["runtime_env"]
)
def test_get_serve_options_without_accelerator_type(self):
"""Test that get_serve_options returns the correct options when accelerator_type is not set."""
llm_config = LLMConfig(
model_loading_config=ModelLoadingConfig(model_id="test_model"),
deployment_config={
"autoscaling_config": {
"min_replicas": 0,
"initial_replicas": 1,
"max_replicas": 10,
},
},
runtime_env={"env_vars": {"FOO": "bar"}},
)
serve_options = LLMServer.get_deployment_options(llm_config)
# Test the core functionality without being strict about Ray's automatic runtime env additions
assert serve_options["autoscaling_config"] == {
"min_replicas": 0,
"initial_replicas": 1,
"max_replicas": 10,
}
assert serve_options["placement_group_bundles"] == [{"CPU": 1, "GPU": 1}]
# Default strategy is PACK (cross-node allowed by default)
assert serve_options["placement_group_strategy"] == "PACK"
# Check that our custom env vars are present
assert (
serve_options["ray_actor_options"]["runtime_env"]["env_vars"]["FOO"]
== "bar"
)
assert (
"worker_process_setup_hook"
in serve_options["ray_actor_options"]["runtime_env"]
)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/serve/cpu/deployments/llm/test_llm_server.py",
"license": "Apache License 2.0",
"lines": 570,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/utils/test_batcher.py | import asyncio
import sys
import time
from typing import List, Optional
import numpy as np
import pytest
from ray.llm._internal.serve.constants import MODEL_RESPONSE_BATCH_TIMEOUT_MS
from ray.llm._internal.serve.utils.batcher import Batcher
TEXT_VALUE = "foo"
FINAL_TEXT_VALUE = "bar"
async def fake_generator():
"""Returns 100 responses with no delay"""
for _i in range(100):
yield dict(num_generated_tokens=1, generated_text=TEXT_VALUE)
async def fake_generator_slow(num_batches: int):
"""Returns 100 responses with small delay.
Delay is set such that the responses are batched into roughly num_batches
batches.
"""
for _i in range(100):
await asyncio.sleep(MODEL_RESPONSE_BATCH_TIMEOUT_MS / 1000 / num_batches)
yield dict(num_generated_tokens=1, generated_text=TEXT_VALUE)
async def fake_generator_slow_last_return_immediate():
"""Returns 11 responses with small delay, aside from the last one which is immediate"""
for _i in range(10):
await asyncio.sleep(MODEL_RESPONSE_BATCH_TIMEOUT_MS / 1000)
yield dict(num_generated_tokens=1, generated_text=TEXT_VALUE)
yield dict(num_generated_tokens=1, generated_text=FINAL_TEXT_VALUE)
async def count_interval_ms_from_stream(stream) -> list[float]:
output_intervals: list[float] = []
start = None
async for _ in stream:
if start is None:
start = time.perf_counter()
else:
end = time.perf_counter()
output_intervals.append((end - start) * 1e3)
start = end
return output_intervals
class TestBatcher(Batcher):
def _merge_results(self, results: List[dict]) -> dict:
merged_result = {"num_generated_tokens": 0, "generated_text": ""}
for result in results:
for key, value in result.items():
merged_result[key] += value
return merged_result
class TestBatching:
@pytest.mark.asyncio
async def test_batch(self):
count = 0
batcher = TestBatcher(fake_generator())
async for x in batcher.stream():
count += 1
assert x["num_generated_tokens"] == 100
assert x["generated_text"] == TEXT_VALUE * 100
# Should only have been called once
assert count == 1
assert batcher.queue.empty()
@pytest.mark.asyncio
async def test_batch_timing(self):
count = 0
batcher = TestBatcher(fake_generator_slow(num_batches=10))
async for _x in batcher.stream():
count += 1
assert 9 <= count <= 12, (
"Count should have been called between 9 and 12 times, "
"because each iteration takes 1/10th of an interval to yield."
)
assert batcher.queue.empty()
@pytest.mark.asyncio
async def test_batch_last_return_is_immediate(self):
"""Test that we don't wait the entire interval for
the last response if it returns quickly."""
count = 0
token_count = 0
batcher = TestBatcher(fake_generator_slow_last_return_immediate())
last_response = None
async for _x in batcher.stream():
count += 1
token_count += _x["num_generated_tokens"]
last_response = _x
assert (
last_response["generated_text"] == TEXT_VALUE + FINAL_TEXT_VALUE
), "the last generated response should be batched with previous one"
assert token_count == 11, "token_count should be exactly 11"
assert (
count == 10
), "Count should have been called exactly 10 times (as many as we generated - 1)"
assert batcher.queue.empty()
@pytest.mark.asyncio
async def test_batch_no_interval(self):
"""Check that the class creates only one batch if there's no interval."""
batcher = TestBatcher(fake_generator_slow(num_batches=10), interval_ms=None)
count = 0
async for _x in batcher.stream():
count += 1
assert count == 1
assert batcher.queue.empty()
@pytest.mark.asyncio
@pytest.mark.parametrize("interval_ms", [100, None])
async def test_exception_propagation(self, interval_ms: Optional[float]):
"""Test that exceptions are propagated correctly to parent."""
async def generator_should_raise():
for _i in range(100):
await asyncio.sleep(0.01)
yield dict(num_generated_tokens=1, generated_text=TEXT_VALUE)
raise ValueError()
count = 0
batched = TestBatcher(generator_should_raise(), interval_ms=interval_ms)
async def parent():
nonlocal count
nonlocal batched
async for _x in batched.stream():
count += 1
task = asyncio.create_task(parent())
await asyncio.sleep(0.2)
with pytest.raises(ValueError):
task.result()
assert count == 1
@pytest.mark.asyncio
@pytest.mark.parametrize("interval_ms", [100, None])
@pytest.mark.parametrize("to_cancel", ["parent", "inner", "stream"])
async def test_cancellation(self, interval_ms: Optional[float], to_cancel: str):
"""There are 3 ways cancellation can happen:
1. The parent is cancelled
2. The generator is cancelled
3. The stream task is directly cancelled.
Make sure all associated tasks are cancelled in each instance.
"""
async def generator_should_raise():
with pytest.raises(asyncio.CancelledError):
for _i in range(100):
await asyncio.sleep(0.01)
yield dict(num_generated_tokens=1, generated_text=TEXT_VALUE)
if to_cancel == "inner":
raise asyncio.CancelledError()
batched = TestBatcher(generator_should_raise(), interval_ms=interval_ms)
async def parent():
nonlocal batched
async for _x in batched.stream():
pass
task = asyncio.create_task(parent())
await asyncio.sleep(0.2)
cancel_task = {
"parent": task,
"stream": batched.read_task,
}.get(to_cancel)
if cancel_task:
assert not task.done()
assert not batched.read_task.done()
cancel_task.cancel()
await asyncio.sleep(0.3)
assert batched.read_task.done(), "Read task should be completed"
assert task.done(), "All tasks should be done"
# Inner task is checked automatically with pytest.raises
@pytest.mark.asyncio
async def test_stable_streaming(self):
"""Test that the batcher does not add jitter to the stream when interval_ms is 0"""
async def generator():
for i in range(100):
await asyncio.sleep(0.01)
yield i
concurrency = 10
output_intervals = await asyncio.gather(
*[
count_interval_ms_from_stream(
Batcher(generator(), interval_ms=0).stream()
)
for _ in range(concurrency)
]
)
mean_batcher_interval = np.mean(output_intervals)
std_batcher_interval = np.std(output_intervals)
generator_intervals = await asyncio.gather(
*[count_interval_ms_from_stream(generator()) for _ in range(concurrency)]
)
mean_generator_interval = np.mean(generator_intervals)
std_generator_interval = np.std(generator_intervals)
assert np.isclose(
mean_batcher_interval, mean_generator_interval, rtol=0.1
), f"{mean_batcher_interval=}, {mean_generator_interval=}"
assert np.isclose(
std_batcher_interval, std_generator_interval, atol=0.1
), f"{std_batcher_interval=}, {std_generator_interval=}"
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/serve/cpu/deployments/utils/test_batcher.py",
"license": "Apache License 2.0",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:rllib/examples/envs/classes/multi_agent/double_row_corridor_env.py | import gymnasium as gym
import numpy as np
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.utils.annotations import override
from ray.rllib.utils.typing import AgentID
class DoubleRowCorridorEnv(MultiAgentEnv):
"""A MultiAgentEnv with a single, global observation space for all agents.
There are two agents in this grid-world-style environment, `agent_0` and `agent_1`.
The grid has two-rows and multiple columns and agents must, each
separately, reach their individual goal position to receive a final reward of +10:
+---------------+
|0 |
| 1|
+---------------+
Legend:
0 = agent_0 + goal state for agent_1
1 = agent_1 + goal state for agent_0
You can change the length of the grid through providing the "length" key in the
`config` dict passed to the env's constructor.
The action space for both agents is Discrete(4), which encodes to moving up, down,
left, or right in the grid.
If the two agents collide, meaning they end up in the exact same field after both
taking their actions at any timestep, an additional reward of +5 is given to both
agents. Thus, optimal policies aim at seeking the respective other agent first, and
only then proceeding to their agent's goal position.
Each agent in the env has an observation space of a 2-tuple containing its own
x/y-position, where x is the row index, being either 0 (1st row) or 1 (2nd row),
and y is the column index (starting from 0).
"""
def __init__(self, config=None):
super().__init__()
config = config or {}
self.length = config.get("length", 15)
self.terminateds = {}
self.collided = False
# Provide information about agents and possible agents.
self.agents = self.possible_agents = ["agent_0", "agent_1"]
self.terminateds = {}
# Observations: x/y, where the first number is the row index, the second number
# is the column index, for both agents.
# For example: [0.0, 2.0] means the agent is in row 0 and column 2.
self._obs_spaces = gym.spaces.Box(
0.0, self.length - 1, shape=(2,), dtype=np.int32
)
self._act_spaces = gym.spaces.Discrete(4)
@override(MultiAgentEnv)
def reset(self, *, seed=None, options=None):
self.agent_pos = {
"agent_0": [0, 0],
"agent_1": [1, self.length - 1],
}
self.goals = {
"agent_0": [0, self.length - 1],
"agent_1": [1, 0],
}
self.terminateds = {agent_id: False for agent_id in self.agent_pos}
self.collided = False
return self._get_obs(), {}
@override(MultiAgentEnv)
def step(self, action_dict):
rewards = {
agent_id: -0.1
for agent_id in self.agent_pos
if not self.terminateds[agent_id]
}
for agent_id, action in action_dict.items():
row, col = self.agent_pos[agent_id]
# up
if action == 0 and row > 0:
row -= 1
# down
elif action == 1 and row < 1:
row += 1
# left
elif action == 2 and col > 0:
col -= 1
# right
elif action == 3 and col < self.length - 1:
col += 1
# Update positions.
self.agent_pos[agent_id] = [row, col]
obs = self._get_obs()
# Check for collision (only if both agents are still active).
if (
not any(self.terminateds.values())
and self.agent_pos["agent_0"] == self.agent_pos["agent_1"]
):
if not self.collided:
rewards["agent_0"] += 5
rewards["agent_1"] += 5
self.collided = True
# Check goals.
for agent_id in self.agent_pos:
if (
self.agent_pos[agent_id] == self.goals[agent_id]
and not self.terminateds[agent_id]
):
rewards[agent_id] += 10
self.terminateds[agent_id] = True
terminateds = {
agent_id: self.terminateds[agent_id] for agent_id in self.agent_pos
}
terminateds["__all__"] = all(self.terminateds.values())
return obs, rewards, terminateds, {}, {}
@override(MultiAgentEnv)
def get_observation_space(self, agent_id: AgentID) -> gym.Space:
return self._obs_spaces
@override(MultiAgentEnv)
def get_action_space(self, agent_id: AgentID) -> gym.Space:
return self._act_spaces
def _get_obs(self):
obs = {}
pos = self.agent_pos
for agent_id in self.agent_pos:
if self.terminateds[agent_id]:
continue
obs[agent_id] = np.array(pos[agent_id], dtype=np.int32)
return obs
| {
"repo_id": "ray-project/ray",
"file_path": "rllib/examples/envs/classes/multi_agent/double_row_corridor_env.py",
"license": "Apache License 2.0",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/tests/test_multiprocessing_standalone.py | """Tests for ray.util.multiprocessing that require a standalone Ray cluster per test.
Tests that can run on a shared Ray cluster fixture should go in test_multiprocessing.py
"""
import math
import multiprocessing as mp
import os
import sys
import pytest
import ray
from ray._private.test_utils import external_redis_test_enabled
from ray.util.multiprocessing import Pool
@pytest.fixture(scope="module")
def ray_init_4_cpu_shared():
yield ray.init(num_cpus=4)
@pytest.fixture
def pool_4_processes(ray_init_4_cpu_shared):
pool = Pool(processes=4)
yield pool
pool.terminate()
pool.join()
@pytest.fixture
def pool_4_processes_python_multiprocessing_lib():
pool = mp.Pool(processes=4)
yield pool
pool.terminate()
pool.join()
@pytest.mark.skipif(
external_redis_test_enabled(),
reason="Starts multiple Ray instances in parallel with the same namespace.",
)
def test_ray_init(shutdown_only):
def getpid(i: int):
return os.getpid()
def check_pool_size(pool, size: int):
assert len(set(pool.map(getpid, range(size)))) == size
# Check that starting a pool starts ray if not initialized.
assert not ray.is_initialized()
with Pool(processes=4) as pool:
assert ray.is_initialized()
check_pool_size(pool, 4)
assert int(ray.cluster_resources()["CPU"]) == 4
pool.join()
# Check that starting a pool doesn't affect ray if there is a local
# ray cluster running.
assert ray.is_initialized()
assert int(ray.cluster_resources()["CPU"]) == 4
with Pool(processes=2) as pool:
assert ray.is_initialized()
check_pool_size(pool, 2)
assert int(ray.cluster_resources()["CPU"]) == 4
pool.join()
# Check that trying to start a pool on an existing ray cluster throws an
# error if there aren't enough CPUs for the number of processes.
assert ray.is_initialized()
assert int(ray.cluster_resources()["CPU"]) == 4
with pytest.raises(ValueError):
Pool(processes=8)
assert int(ray.cluster_resources()["CPU"]) == 4
@pytest.mark.skipif(
external_redis_test_enabled(),
reason="Starts multiple Ray instances in parallel with the same namespace.",
)
@pytest.mark.parametrize(
"ray_start_cluster",
[
{
"num_cpus": 1,
"num_nodes": 1,
"do_init": False,
}
],
indirect=True,
)
def test_connect_to_ray(monkeypatch, ray_start_cluster):
def getpid(args):
return os.getpid()
def check_pool_size(pool, size):
args = [tuple() for _ in range(size)]
assert len(set(pool.map(getpid, args))) == size
# Use different numbers of CPUs to distinguish between starting a local
# ray cluster and connecting to an existing one.
ray.init(address=ray_start_cluster.address)
existing_cluster_cpus = int(ray.cluster_resources()["CPU"])
local_cluster_cpus = existing_cluster_cpus + 1
ray.shutdown()
# Check that starting a pool connects to the running ray cluster by default.
assert not ray.is_initialized()
with Pool() as pool:
assert ray.is_initialized()
check_pool_size(pool, existing_cluster_cpus)
assert int(ray.cluster_resources()["CPU"]) == existing_cluster_cpus
pool.join()
ray.shutdown()
# Check that starting a pool connects to a running ray cluster if
# ray_address is set to the cluster address.
assert not ray.is_initialized()
with Pool(ray_address=ray_start_cluster.address) as pool:
check_pool_size(pool, existing_cluster_cpus)
assert int(ray.cluster_resources()["CPU"]) == existing_cluster_cpus
pool.join()
ray.shutdown()
# Check that starting a pool connects to a running ray cluster if
# RAY_ADDRESS is set to the cluster address.
assert not ray.is_initialized()
monkeypatch.setenv("RAY_ADDRESS", ray_start_cluster.address)
with Pool() as pool:
check_pool_size(pool, existing_cluster_cpus)
assert int(ray.cluster_resources()["CPU"]) == existing_cluster_cpus
pool.join()
ray.shutdown()
# Check that trying to start a pool on an existing ray cluster throws an
# error if there aren't enough CPUs for the number of processes.
assert not ray.is_initialized()
with pytest.raises(Exception):
Pool(processes=existing_cluster_cpus + 1)
assert int(ray.cluster_resources()["CPU"]) == existing_cluster_cpus
ray.shutdown()
# Check that starting a pool starts a local ray cluster if ray_address="local".
assert not ray.is_initialized()
with Pool(processes=local_cluster_cpus, ray_address="local") as pool:
check_pool_size(pool, local_cluster_cpus)
assert int(ray.cluster_resources()["CPU"]) == local_cluster_cpus
pool.join()
ray.shutdown()
# Check that starting a pool starts a local ray cluster if RAY_ADDRESS="local".
assert not ray.is_initialized()
monkeypatch.setenv("RAY_ADDRESS", "local")
with Pool(processes=local_cluster_cpus) as pool:
check_pool_size(pool, local_cluster_cpus)
assert int(ray.cluster_resources()["CPU"]) == local_cluster_cpus
pool.join()
ray.shutdown()
def test_maxtasksperchild(shutdown_only):
with Pool(processes=5, maxtasksperchild=1) as pool:
assert len(set(pool.map(lambda _: os.getpid(), range(20)))) == 20
pool.join()
def test_deadlock_avoidance_in_recursive_tasks(shutdown_only):
ray.init(num_cpus=1)
def poolit_a(_):
with Pool() as pool:
return list(pool.map(math.sqrt, range(0, 2, 1)))
def poolit_b():
with Pool() as pool:
return list(pool.map(poolit_a, range(2, 4, 1)))
result = poolit_b()
assert result == [[0.0, 1.0], [0.0, 1.0]]
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_multiprocessing_standalone.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/dashboard/modules/event/tests/test_export_task.py | import json
import os
import sys
import pytest
import ray
from ray._common.test_utils import wait_for_condition
from ray._private.test_utils import wait_until_server_available
from ray.dashboard.tests.conftest import * # noqa
os.environ["RAY_enable_export_api_write"] = "1"
os.environ["RAY_enable_core_worker_ray_event_to_aggregator"] = "0"
@pytest.mark.asyncio
async def test_task_labels(disable_aiohttp_cache, ray_start_with_dashboard):
"""
Test task events are correctly generated and written to file
"""
assert wait_until_server_available(ray_start_with_dashboard["webui_url"])
export_event_path = os.path.join(
ray_start_with_dashboard["session_dir"], "logs", "export_events"
)
# A simple task to trigger the export event
@ray.remote
def hi_w00t_task():
return 1
ray.get(hi_w00t_task.options(_labels={"hi": "w00t"}).remote())
def _verify():
# Verify export events are written
events = []
for filename in os.listdir(export_event_path):
if not filename.startswith("event_EXPORT_TASK"):
continue
with open(f"{export_event_path}/{filename}", "r") as f:
for line in f.readlines():
events.append(json.loads(line))
hi_w00t_event = next(
(
event
for event in events
if event["source_type"] == "EXPORT_TASK"
and event["event_data"].get("task_info", {}).get("func_or_class_name")
== "hi_w00t_task"
),
None,
)
return (
hi_w00t_event is not None
and hi_w00t_event["event_data"]
.get("task_info", {})
.get("labels", {})
.get("hi")
== "w00t"
)
wait_for_condition(_verify, timeout=30)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/event/tests/test_export_task.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/tests/test_label_scheduling.py | import sys
import pytest
import ray
@ray.remote
class MyActor:
def __init__(self):
self.value = 0
def value(self):
return self.value
def get_node_id(self):
return ray.get_runtime_context().get_node_id()
@ray.remote
def get_node_id():
return ray.get_runtime_context().get_node_id()
@pytest.fixture
def cluster_with_labeled_nodes(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
resources={"worker1": 1},
num_cpus=3,
labels={"ray.io/accelerator-type": "A100", "region": "us-west4"},
)
ray.init(address=cluster.address)
node_1 = ray.get(get_node_id.options(resources={"worker1": 1}).remote())
cluster.add_node(
resources={"worker2": 1},
num_cpus=3,
labels={"ray.io/accelerator-type": "B200", "market-type": "spot"},
)
node_2 = ray.get(get_node_id.options(resources={"worker2": 1}).remote())
cluster.add_node(
resources={"worker3": 1},
num_cpus=3,
labels={
"ray.io/accelerator-type": "TPU",
"market-type": "on-demand",
"region": "us-east4",
},
)
node_3 = ray.get(get_node_id.options(resources={"worker3": 1}).remote())
cluster.wait_for_nodes()
return node_1, node_2, node_3
def test_label_selector_equals(cluster_with_labeled_nodes):
node_1, _, _ = cluster_with_labeled_nodes
actor = MyActor.options(label_selector={"ray.io/accelerator-type": "A100"}).remote()
assert ray.get(actor.get_node_id.remote(), timeout=3) == node_1
def test_label_selector_not_equals(cluster_with_labeled_nodes):
_, node_2, _ = cluster_with_labeled_nodes
actor = MyActor.options(
label_selector={"ray.io/accelerator-type": "!A100", "market-type": "spot"}
).remote()
assert ray.get(actor.get_node_id.remote(), timeout=3) == node_2
def test_label_selector_in(cluster_with_labeled_nodes):
node_1, _, _ = cluster_with_labeled_nodes
actor = MyActor.options(
label_selector={"region": "in(us-west4, us-central2)"}
).remote()
assert ray.get(actor.get_node_id.remote(), timeout=3) == node_1
def test_label_selector_not_in(cluster_with_labeled_nodes):
_, node_2, _ = cluster_with_labeled_nodes
actor = MyActor.options(
label_selector={
"ray.io/accelerator-type": "!in(A100)",
"region": "!in(us-east4, us-west4)",
}
).remote()
assert ray.get(actor.get_node_id.remote(), timeout=3) == node_2
def test_label_selector_multiple(cluster_with_labeled_nodes):
_, _, node_3 = cluster_with_labeled_nodes
actor = MyActor.options(
label_selector={"ray.io/accelerator-type": "TPU", "region": "us-east4"}
).remote()
assert ray.get(actor.get_node_id.remote(), timeout=3) == node_3
def test_fallback_strategy(cluster_with_labeled_nodes):
# Create a RayCluster with labelled nodes.
gpu_node, _, _ = cluster_with_labeled_nodes
# Define an unsatisfiable label selector.
infeasible_label_selector = {"ray.io/accelerator-type": "does-not-exist"}
# Create a fallback strategy with multiple accelerator options.
accelerator_fallbacks = [
{"label_selector": {"ray.io/accelerator-type": "A100"}},
{"label_selector": {"ray.io/accelerator-type": "TPU"}},
]
# Attempt to schedule the actor. The scheduler should fail to find a node with the
# primary `label_selector` and fall back to the first available option, 'A100'.
label_selector_actor = MyActor.options(
label_selector=infeasible_label_selector,
fallback_strategy=accelerator_fallbacks,
).remote()
# Assert that the actor was scheduled on the expected node.
assert ray.get(label_selector_actor.get_node_id.remote(), timeout=5) == gpu_node
def test_empty_selector_fallback_strategy(cluster_with_labeled_nodes):
node_1, node_2, node_3 = cluster_with_labeled_nodes
# Define an unsatisfiable label selector.
infeasible_label_selector = {"ray.io/accelerator-type": "does-not-exist"}
# Create a fallback strategy with multiple label selector fallbacks. The
# first fallback option is unsatisfiable, so it falls back to the empty label
# selector option. This fallback should match any node.
accelerator_fallbacks = [
{"label_selector": {"ray.io/accelerator-type": "also-does-not-exist"}},
{"label_selector": {}},
]
label_selector_actor = MyActor.options(
label_selector=infeasible_label_selector,
fallback_strategy=accelerator_fallbacks,
).remote()
# Assert that the actor was scheduled on the expected node.
assert ray.get(label_selector_actor.get_node_id.remote(), timeout=5) in {
node_1,
node_2,
node_3,
}
def test_infeasible_fallback_strategy(cluster_with_labeled_nodes):
# Define an unsatisfiable label selector and fallback strategy.
label_selector = {"ray.io/accelerator-type": "does-not-exist"}
fallback_strategy = [
{"label_selector": {"ray.io/accelerator-type": "does-not-exist-either"}},
{"label_selector": {"ray.io/accelerator-type": "also-nonexistant"}},
]
# Attempt to schedule the actor, but it should timeout since none of
# the nodes match any label selector.
label_selector_actor = MyActor.options(
label_selector=label_selector, fallback_strategy=fallback_strategy
).remote()
with pytest.raises(TimeoutError):
ray.get(label_selector_actor.get_node_id.remote(), timeout=3)
def test_fallback_with_feasible_primary_selector(cluster_with_labeled_nodes):
gpu_node, _, _ = cluster_with_labeled_nodes
feasible_label_selector = {"ray.io/accelerator-type": "A100"}
feasible_fallback_strategy = [
{"label_selector": {"ray.io/accelerator-type": "B200"}},
]
# Attempt to schedule the actor. The scheduler should use the
# primary selector and ignore the fallback.
label_selector_actor = MyActor.options(
label_selector=feasible_label_selector,
fallback_strategy=feasible_fallback_strategy,
).remote()
# Assert that the actor was scheduled on the expected node and not the fallback.
assert ray.get(label_selector_actor.get_node_id.remote(), timeout=5) == gpu_node
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/tests/test_label_scheduling.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:doc/source/train/doc_code/xgboost_quickstart.py | # flake8: noqa
# isort: skip_file
# __xgboost_start__
import pandas as pd
import xgboost
# 1. Load your data as an `xgboost.DMatrix`.
train_df = pd.read_csv("s3://ray-example-data/iris/train/1.csv")
eval_df = pd.read_csv("s3://ray-example-data/iris/val/1.csv")
train_X = train_df.drop("target", axis=1)
train_y = train_df["target"]
eval_X = eval_df.drop("target", axis=1)
eval_y = eval_df["target"]
dtrain = xgboost.DMatrix(train_X, label=train_y)
deval = xgboost.DMatrix(eval_X, label=eval_y)
# 2. Define your xgboost model training parameters.
params = {
"tree_method": "approx",
"objective": "reg:squarederror",
"eta": 1e-4,
"subsample": 0.5,
"max_depth": 2,
}
# 3. Do non-distributed training.
bst = xgboost.train(
params,
dtrain=dtrain,
evals=[(deval, "validation")],
num_boost_round=10,
)
# __xgboost_end__
# __xgboost_ray_start__
import xgboost
import ray.train
from ray.train.xgboost import XGBoostTrainer, RayTrainReportCallback
# 1. Load your data as a Ray Data Dataset.
train_dataset = ray.data.read_csv("s3://anonymous@ray-example-data/iris/train")
eval_dataset = ray.data.read_csv("s3://anonymous@ray-example-data/iris/val")
def train_func():
# 2. Load your data shard as an `xgboost.DMatrix`.
# Get dataset shards for this worker
train_shard = ray.train.get_dataset_shard("train")
eval_shard = ray.train.get_dataset_shard("eval")
# Convert shards to pandas DataFrames
train_df = train_shard.materialize().to_pandas()
eval_df = eval_shard.materialize().to_pandas()
train_X = train_df.drop("target", axis=1)
train_y = train_df["target"]
eval_X = eval_df.drop("target", axis=1)
eval_y = eval_df["target"]
dtrain = xgboost.DMatrix(train_X, label=train_y)
deval = xgboost.DMatrix(eval_X, label=eval_y)
# 3. Define your xgboost model training parameters.
params = {
"tree_method": "approx",
"objective": "reg:squarederror",
"eta": 1e-4,
"subsample": 0.5,
"max_depth": 2,
}
# 4. Do distributed data-parallel training.
# Ray Train sets up the necessary coordinator processes and
# environment variables for your workers to communicate with each other.
bst = xgboost.train(
params,
dtrain=dtrain,
evals=[(deval, "validation")],
num_boost_round=10,
# Optional: Use the `RayTrainReportCallback` to save and report checkpoints.
callbacks=[RayTrainReportCallback()],
)
# 5. Configure scaling and resource requirements.
scaling_config = ray.train.ScalingConfig(num_workers=2, resources_per_worker={"CPU": 2})
# 6. Launch distributed training job.
trainer = XGBoostTrainer(
train_func,
scaling_config=scaling_config,
datasets={"train": train_dataset, "eval": eval_dataset},
# If running in a multi-node cluster, this is where you
# should configure the run's persistent storage that is accessible
# across all worker nodes.
# run_config=ray.train.RunConfig(storage_path="s3://..."),
)
result = trainer.fit()
# 7. Load the trained model
import os
with result.checkpoint.as_directory() as checkpoint_dir:
model_path = os.path.join(checkpoint_dir, RayTrainReportCallback.CHECKPOINT_NAME)
model = xgboost.Booster()
model.load_model(model_path)
# __xgboost_ray_end__
| {
"repo_id": "ray-project/ray",
"file_path": "doc/source/train/doc_code/xgboost_quickstart.py",
"license": "Apache License 2.0",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/_internal/execution/operators/join.py | import logging
import math
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Set, Tuple, Type
from ray.data._internal.arrow_block import ArrowBlockAccessor
from ray.data._internal.arrow_ops.transform_pyarrow import (
MIN_PYARROW_VERSION_RUN_END_ENCODED_TYPES,
MIN_PYARROW_VERSION_VIEW_TYPES,
)
from ray.data._internal.execution.interfaces import PhysicalOperator
from ray.data._internal.execution.operators.hash_shuffle import (
HashShufflingOperatorBase,
ShuffleAggregation,
_combine,
)
from ray.data._internal.logical.operators import JoinType
from ray.data._internal.util import GiB, MiB
from ray.data._internal.utils.arrow_utils import get_pyarrow_version
from ray.data._internal.utils.transform_pyarrow import _is_pa_extension_type
from ray.data.block import Block
from ray.data.context import DataContext
if TYPE_CHECKING:
import pyarrow as pa
@dataclass(frozen=True)
class _DatasetPreprocessingResult:
"""Result of join preprocessing containing split tables.
Separates tables into supported (directly joinable) and unsupported
(requires indexing) column projections.
"""
supported_projection: "pa.Table"
unsupported_projection: "pa.Table"
_JOIN_TYPE_TO_ARROW_JOIN_VERB_MAP = {
JoinType.INNER: "inner",
JoinType.LEFT_OUTER: "left outer",
JoinType.RIGHT_OUTER: "right outer",
JoinType.FULL_OUTER: "full outer",
JoinType.LEFT_SEMI: "left semi",
JoinType.RIGHT_SEMI: "right semi",
JoinType.LEFT_ANTI: "left anti",
JoinType.RIGHT_ANTI: "right anti",
}
logger = logging.getLogger(__name__)
class JoiningAggregation(ShuffleAggregation):
"""Stateless aggregation for distributed joining of 2 sequences.
This implementation performs hash-based distributed joining by:
- Accumulating identical keys from both sequences into the same partition
- Performing join on individual partitions independently
For actual joining, Pyarrow native joining functionality is utilised.
"""
def __init__(
self,
*,
join_type: JoinType,
left_key_col_names: Tuple[str, ...],
right_key_col_names: Tuple[str, ...],
left_columns_suffix: Optional[str] = None,
right_columns_suffix: Optional[str] = None,
data_context: DataContext,
):
assert (
len(left_key_col_names) > 0
), "At least 1 column to join on has to be provided"
assert len(right_key_col_names) == len(
left_key_col_names
), "Number of columns for both left and right join operands has to match"
assert join_type in _JOIN_TYPE_TO_ARROW_JOIN_VERB_MAP, (
f"Join type is not currently supported (got: {join_type}; " # noqa: C416
f"supported: {[jt for jt in JoinType]})" # noqa: C416
)
self._left_key_col_names: Tuple[str, ...] = left_key_col_names
self._right_key_col_names: Tuple[str, ...] = right_key_col_names
self._join_type: JoinType = join_type
self._left_columns_suffix: Optional[str] = left_columns_suffix
self._right_columns_suffix: Optional[str] = right_columns_suffix
def finalize(self, partition_shards_map: Dict[int, List[Block]]) -> Iterator[Block]:
"""Performs join on blocks from left (seq 0) and right (seq 1) sequences."""
assert (
len(partition_shards_map) == 2
), f"Two input-sequences are expected (got {len(partition_shards_map)})"
left_partition_shards = partition_shards_map[0]
right_partition_shards = partition_shards_map[1]
left_table = _combine(left_partition_shards)
right_table = _combine(right_partition_shards)
left_on = list(self._left_key_col_names)
right_on = list(self._right_key_col_names)
# Preprocess: split unsupported columns and add index columns if needed
preprocess_result_l, preprocess_result_r = self._preprocess(
left_table, right_table, left_on, right_on
)
# Perform the join on supported columns
arrow_join_type = _JOIN_TYPE_TO_ARROW_JOIN_VERB_MAP[self._join_type]
supported = preprocess_result_l.supported_projection.join(
preprocess_result_r.supported_projection,
join_type=arrow_join_type,
keys=left_on,
right_keys=right_on,
left_suffix=self._left_columns_suffix,
right_suffix=self._right_columns_suffix,
)
# Add back unsupported columns
result = self._postprocess(
supported,
preprocess_result_l.unsupported_projection,
preprocess_result_r.unsupported_projection,
)
yield result
def _preprocess(
self,
left_table: "pa.Table",
right_table: "pa.Table",
left_on: List[str],
right_on: List[str],
) -> Tuple[_DatasetPreprocessingResult, _DatasetPreprocessingResult]:
"""Preprocesses tables by splitting unsupported columns and adding indices."""
# Get supported columns
supported_l, unsupported_l = self._split_unsupported_columns(left_table)
supported_r, unsupported_r = self._split_unsupported_columns(right_table)
# Handle joins on unsupported columns
conflicting_columns: Set[str] = set(unsupported_l.column_names) & set(left_on)
if conflicting_columns:
raise ValueError(
f"Cannot join on columns with unjoinable types. "
f"Left join key columns {conflicting_columns} have unjoinable types "
f"(map, union, list, struct, etc.) which cannot be used for join operations."
)
conflicting_columns: Set[str] = set(unsupported_r.column_names) & set(right_on)
if conflicting_columns:
raise ValueError(
f"Cannot join on columns with unjoinable types. "
f"Right join key columns {conflicting_columns} have unjoinable types "
f"(map, union, list, struct, etc.) which cannot be used for join operations."
)
# Index if we have unsupported columns
should_index_l = self._should_index_side("left", supported_l, unsupported_l)
should_index_r = self._should_index_side("right", supported_r, unsupported_r)
# Add index columns for back-referencing if we have unsupported columns
if should_index_l:
supported_l = self._append_index_column(
table=supported_l, col_name=self._index_name("left")
)
if should_index_r:
supported_r = self._append_index_column(
table=supported_r, col_name=self._index_name("right")
)
left = _DatasetPreprocessingResult(
supported_projection=supported_l,
unsupported_projection=unsupported_l,
)
right = _DatasetPreprocessingResult(
supported_projection=supported_r,
unsupported_projection=unsupported_r,
)
return left, right
def _postprocess(
self,
supported: "pa.Table",
unsupported_l: "pa.Table",
unsupported_r: "pa.Table",
) -> "pa.Table":
# Index if we have unsupported columns
should_index_l = self._index_name("left") in supported.schema.names
should_index_r = self._index_name("right") in supported.schema.names
# Add back unsupported columns (join type logic is in should_index_* variables)
if should_index_l:
supported = self._add_back_unsupported_columns(
joined_table=supported,
unsupported_table=unsupported_l,
index_col_name=self._index_name("left"),
)
if should_index_r:
supported = self._add_back_unsupported_columns(
joined_table=supported,
unsupported_table=unsupported_r,
index_col_name=self._index_name("right"),
)
return supported
def _index_name(self, suffix: str) -> str:
return f"__rd_index_level_{suffix}__"
def _should_index_side(
self, side: str, supported_table: "pa.Table", unsupported_table: "pa.Table"
) -> bool:
"""
Determine whether to create an index column for a given side of the join.
A column is "supported" if it is "joinable", and "unsupported" otherwise.
A supported_table is a table with only "supported" columns. Index columns are
needed when we have both supported and unsupported columns in a table, and
that table's columns will appear in the final result.
Args:
side: "left" or "right" to indicate which side of the join
supported_table: Table containing ONLY joinable columns
unsupported_table: Table containing ONLY unjoinable columns
Returns:
True if an index column should be created for this side
"""
# Must have both supported and unsupported columns to need indexing.
# We cannot rely on row_count because it can return a non-zero row count
# for an empty-schema.
if not supported_table.schema or not unsupported_table.schema:
return False
# For semi/anti joins, only index the side that appears in the result
if side == "left":
# Left side appears in result for all joins except right_semi/right_anti
return self._join_type not in [JoinType.RIGHT_SEMI, JoinType.RIGHT_ANTI]
else: # side == "right"
# Right side appears in result for all joins except left_semi/left_anti
return self._join_type not in [JoinType.LEFT_SEMI, JoinType.LEFT_ANTI]
def _split_unsupported_columns(
self, table: "pa.Table"
) -> Tuple["pa.Table", "pa.Table"]:
"""
Split a PyArrow table into two tables based on column joinability.
Separates columns into supported types and unsupported types that cannot be
directly joined on but should be preserved in results.
Args:
table: Input PyArrow table to split
Returns:
Tuple of (supported_table, unsupported_table) where:
- supported_table contains columns with primitive/joinable types
- unsupported_table contains columns with complex/unjoinable types
"""
supported, unsupported = [], []
for idx in range(len(table.columns)):
col: "pa.ChunkedArray" = table.column(idx)
col_type: "pa.DataType" = col.type
if _is_pa_extension_type(col_type) or self._is_pa_join_not_supported(
col_type
):
unsupported.append(idx)
else:
supported.append(idx)
return table.select(supported), table.select(unsupported)
def _add_back_unsupported_columns(
self,
joined_table: "pa.Table",
unsupported_table: "pa.Table",
index_col_name: str,
) -> "pa.Table":
# Extract the index column array and drop the column from the joined table
i = joined_table.schema.get_field_index(index_col_name)
indices = joined_table.column(i)
joined_table = joined_table.remove_column(i)
# Project the unsupported columns using the indices and combine with joined table
projected = ArrowBlockAccessor(unsupported_table).take(indices)
return ArrowBlockAccessor(joined_table).hstack(projected)
def _append_index_column(self, table: "pa.Table", col_name: str) -> "pa.Table":
import numpy as np
import pyarrow as pa
index_col = pa.array(np.arange(table.num_rows))
return table.append_column(col_name, index_col)
def _is_pa_join_not_supported(self, type: "pa.DataType") -> bool:
"""
The latest pyarrow versions do not support joins where the
tables contain the following types below (lists,
structs, maps, unions, extension types, etc.)
Args:
type: The input type of column.
Returns:
True if the type cannot be present (non join-key) during joins.
False if the type can be present.
"""
import pyarrow as pa
pyarrow_version = get_pyarrow_version()
is_v12 = pyarrow_version >= MIN_PYARROW_VERSION_RUN_END_ENCODED_TYPES
is_v16 = pyarrow_version >= MIN_PYARROW_VERSION_VIEW_TYPES
return (
pa.types.is_map(type)
or pa.types.is_union(type)
or pa.types.is_list(type)
or pa.types.is_struct(type)
or pa.types.is_null(type)
or pa.types.is_large_list(type)
or pa.types.is_fixed_size_list(type)
or (is_v12 and pa.types.is_run_end_encoded(type))
or (
is_v16
and (
pa.types.is_binary_view(type)
or pa.types.is_string_view(type)
or pa.types.is_list_view(type)
)
)
)
class JoinOperator(HashShufflingOperatorBase):
def __init__(
self,
data_context: DataContext,
left_input_op: PhysicalOperator,
right_input_op: PhysicalOperator,
left_key_columns: Tuple[str],
right_key_columns: Tuple[str],
join_type: JoinType,
*,
num_partitions: Optional[int] = None,
left_columns_suffix: Optional[str] = None,
right_columns_suffix: Optional[str] = None,
partition_size_hint: Optional[int] = None,
aggregator_ray_remote_args_override: Optional[Dict[str, Any]] = None,
shuffle_aggregation_type: Optional[Type[ShuffleAggregation]] = None,
):
# Use new stateless JoiningAggregation factory
def _create_joining_aggregation() -> JoiningAggregation:
if shuffle_aggregation_type is not None:
if not issubclass(shuffle_aggregation_type, ShuffleAggregation):
raise TypeError(
f"shuffle_aggregation_type must be a subclass of {ShuffleAggregation}, "
f"got {shuffle_aggregation_type}"
)
aggregation_class = shuffle_aggregation_type or JoiningAggregation
return aggregation_class(
join_type=join_type,
left_key_col_names=left_key_columns,
right_key_col_names=right_key_columns,
left_columns_suffix=left_columns_suffix,
right_columns_suffix=right_columns_suffix,
data_context=data_context,
)
super().__init__(
name_factory=(
lambda num_partitions: f"Join(num_partitions={num_partitions})"
),
input_ops=[left_input_op, right_input_op],
data_context=data_context,
key_columns=[left_key_columns, right_key_columns],
num_input_seqs=2,
num_partitions=num_partitions,
partition_size_hint=partition_size_hint,
partition_aggregation_factory=_create_joining_aggregation,
aggregator_ray_remote_args_override=aggregator_ray_remote_args_override,
shuffle_progress_bar_name="Shuffle",
finalize_progress_bar_name="Join",
)
def _get_operator_num_cpus_override(self) -> float:
return self.data_context.join_operator_actor_num_cpus_override
@classmethod
def _estimate_aggregator_memory_allocation(
cls,
*,
num_aggregators: int,
num_partitions: int,
estimated_dataset_bytes: int,
) -> int:
partition_byte_size_estimate = math.ceil(
estimated_dataset_bytes / num_partitions
)
# Estimate of object store memory required to accommodate all partitions
# handled by a single aggregator
aggregator_shuffle_object_store_memory_required: int = math.ceil(
estimated_dataset_bytes / num_aggregators
)
# Estimate of memory required to perform actual (in-memory) join
# operation (inclusive of 50% overhead allocated for Pyarrow join
# implementation)
#
# NOTE:
# - 2x due to budgeted 100% overhead of Arrow's in-memory join
join_memory_required: int = math.ceil(partition_byte_size_estimate * 2)
# Estimate of memory required to accommodate single partition as an output
# (inside Object Store)
#
# NOTE: x2 due to 2 sequences involved in joins
output_object_store_memory_required: int = partition_byte_size_estimate
aggregator_total_memory_required: int = (
# Inputs (object store)
aggregator_shuffle_object_store_memory_required
+
# Join (heap)
join_memory_required
+
# Output (object store)
output_object_store_memory_required
)
logger.info(
f"Estimated memory requirement for joining aggregator "
f"(partitions={num_partitions}, "
f"aggregators={num_aggregators}, "
f"dataset (estimate)={estimated_dataset_bytes / GiB:.1f}GiB): "
f"shuffle={aggregator_shuffle_object_store_memory_required / MiB:.1f}MiB, "
f"joining={join_memory_required / MiB:.1f}MiB, "
f"output={output_object_store_memory_required / MiB:.1f}MiB, "
f"total={aggregator_total_memory_required / MiB:.1f}MiB, "
)
return aggregator_total_memory_required
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/execution/operators/join.py",
"license": "Apache License 2.0",
"lines": 382,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/_internal/logical/operators/join_operator.py | from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence, Tuple
from ray.data._internal.logical.interfaces import (
LogicalOperator,
LogicalOperatorSupportsPredicatePassThrough,
PredicatePassThroughBehavior,
)
from ray.data._internal.logical.operators.n_ary_operator import NAry
if TYPE_CHECKING:
from ray.data.dataset import Schema
from ray.data.expressions import Expr
__all__ = [
"Join",
"JoinSide",
"JoinType",
]
class JoinType(Enum):
INNER = "inner"
LEFT_OUTER = "left_outer"
RIGHT_OUTER = "right_outer"
FULL_OUTER = "full_outer"
LEFT_SEMI = "left_semi"
RIGHT_SEMI = "right_semi"
LEFT_ANTI = "left_anti"
RIGHT_ANTI = "right_anti"
class JoinSide(Enum):
"""Represents which side of a join to push a predicate to.
The enum values correspond to branch indices (0 for left, 1 for right).
"""
LEFT = 0
RIGHT = 1
class Join(NAry, LogicalOperatorSupportsPredicatePassThrough):
"""Logical operator for join."""
def __init__(
self,
left_input_op: LogicalOperator,
right_input_op: LogicalOperator,
join_type: str,
left_key_columns: Tuple[str],
right_key_columns: Tuple[str],
*,
num_partitions: int,
left_columns_suffix: Optional[str] = None,
right_columns_suffix: Optional[str] = None,
partition_size_hint: Optional[int] = None,
aggregator_ray_remote_args: Optional[Dict[str, Any]] = None,
):
"""
Args:
left_input_op: The input operator at left hand side.
right_input_op: The input operator at right hand side.
join_type: The kind of join that should be performed, one of ("inner",
"left_outer", "right_outer", "full_outer", "left_semi", "right_semi",
"left_anti", "right_anti").
left_key_columns: The columns from the left Dataset that should be used as
keys of the join operation.
right_key_columns: The columns from the right Dataset that should be used as
keys of the join operation.
partition_size_hint: Hint to joining operator about the estimated
avg expected size of the resulting partition (in bytes)
num_partitions: Total number of expected blocks outputted by this
operator.
"""
try:
join_type_enum = JoinType(join_type)
except ValueError:
raise ValueError(
f"Invalid join type: '{join_type}'. "
f"Supported join types are: {', '.join(jt.value for jt in JoinType)}."
)
super().__init__(left_input_op, right_input_op, num_outputs=num_partitions)
self.left_key_columns = left_key_columns
self.right_key_columns = right_key_columns
self.join_type = join_type_enum
self.left_columns_suffix = left_columns_suffix
self.right_columns_suffix = right_columns_suffix
self.partition_size_hint = partition_size_hint
self.aggregator_ray_remote_args = aggregator_ray_remote_args
@staticmethod
def _validate_schemas(
left_op_schema: "Schema",
right_op_schema: "Schema",
left_key_column_names: Tuple[str],
right_key_column_names: Tuple[str],
):
def _col_names_as_str(keys: Sequence[str]):
keys_joined = ", ".join(map(lambda k: f"'{k}'", keys))
return f"[{keys_joined}]"
if len(left_key_column_names) < 1:
raise ValueError(
f"At least 1 column name to join on has to be provided (got "
f"{_col_names_as_str(left_key_column_names)})"
)
if len(left_key_column_names) != len(right_key_column_names):
raise ValueError(
f"Number of columns provided for left and right datasets has to match "
f"(got {_col_names_as_str(left_key_column_names)} and "
f"{_col_names_as_str(right_key_column_names)})"
)
def _get_key_column_types(schema: "Schema", keys: Tuple[str]):
return (
[
_type
for name, _type in zip(schema.names, schema.types)
if name in keys
]
if schema
else None
)
right_op_key_cols = _get_key_column_types(
right_op_schema, left_key_column_names
)
left_op_key_cols = _get_key_column_types(left_op_schema, right_key_column_names)
if left_op_key_cols != right_op_key_cols:
raise ValueError(
f"Key columns are expected to be present and have the same types "
"in both left and right operands of the join operation: "
f"left has {left_op_schema}, but right has {right_op_schema}"
)
def predicate_passthrough_behavior(self) -> PredicatePassThroughBehavior:
return PredicatePassThroughBehavior.CONDITIONAL
def which_side_to_push_predicate(
self, predicate_expr: "Expr"
) -> Optional[JoinSide]:
"""Determine which side of the join to push a predicate to.
Returns the side to push to, or None if pushdown is not safe.
Predicate pushdown is safe for:
- INNER: Can push to either side
- LEFT_OUTER/SEMI/ANTI: Can push to left side (preserved/output side)
- RIGHT_OUTER/SEMI/ANTI: Can push to right side (preserved/output side)
- FULL_OUTER: Cannot push (both sides can generate nulls)
The predicate must reference columns from exactly one side of the join,
OR reference only join key columns that all exist on one side.
"""
# Get predicate columns and schemas
predicate_columns = self._get_referenced_columns(predicate_expr)
left_schema = self.input_dependencies[0].infer_schema()
right_schema = self.input_dependencies[1].infer_schema()
if not left_schema or not right_schema:
return None
# Get column sets for each side
left_columns = set(left_schema.names)
right_columns = set(right_schema.names)
left_join_keys = set(self.left_key_columns)
right_join_keys = set(self.right_key_columns)
# Get pushdown rules for this join type
can_push_left, can_push_right = self._get_pushdown_rules()
# Check if predicate can be evaluated on left side
# Condition: ALL predicate columns must exist on left (either as regular columns or join keys)
can_evaluate_on_left = predicate_columns.issubset(
left_columns
) or predicate_columns.issubset(left_join_keys)
if can_evaluate_on_left and can_push_left:
return JoinSide.LEFT
# Check if predicate can be evaluated on right side
can_evaluate_on_right = predicate_columns.issubset(
right_columns
) or predicate_columns.issubset(right_join_keys)
if can_evaluate_on_right and can_push_right:
return JoinSide.RIGHT
# Cannot push down
return None
def _get_pushdown_rules(self) -> Tuple[bool, bool]:
"""Get pushdown rules for the current join type.
Returns:
Tuple of (can_push_left, can_push_right) indicating which sides
can accept predicate pushdown for this join type.
"""
pushdown_rules = {
JoinType.INNER: (True, True),
JoinType.LEFT_OUTER: (True, False),
JoinType.RIGHT_OUTER: (False, True),
JoinType.LEFT_SEMI: (True, False),
JoinType.RIGHT_SEMI: (False, True),
JoinType.LEFT_ANTI: (True, False),
JoinType.RIGHT_ANTI: (False, True),
JoinType.FULL_OUTER: (False, False),
}
return pushdown_rules.get(self.join_type, (False, False))
def _get_referenced_columns(self, expr: "Expr") -> set[str]:
"""Extract all column names referenced in an expression."""
from ray.data._internal.planner.plan_expression.expression_visitors import (
_ColumnReferenceCollector,
)
visitor = _ColumnReferenceCollector()
visitor.visit(expr)
return set(visitor.get_column_refs())
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/logical/operators/join_operator.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/tests/test_join.py | from typing import Optional
import numpy as np
import pandas as pd
import pytest
from packaging.version import parse as parse_version
import ray
from ray.data._internal.logical.operators import JoinType
from ray.data._internal.util import MiB, rows_same
from ray.data._internal.utils.arrow_utils import get_pyarrow_version
from ray.data.context import DataContext
from ray.data.dataset import Dataset
from ray.exceptions import RayTaskError
from ray.tests.conftest import * # noqa
@pytest.mark.parametrize(
"num_rows_left,num_rows_right,partition_size_hint",
[
(32, 32, 1 * MiB),
(32, 16, None),
(16, 32, None),
# "Degenerate" cases with mostly empty partitions
(32, 1, None),
(1, 32, None),
],
)
def test_simple_inner_join(
ray_start_regular_shared_2_cpus,
num_rows_left: int,
num_rows_right: int,
partition_size_hint: Optional[int],
):
# NOTE: We override max-block size to make sure that in cases when a partition
# size hint is not provided, we're not over-estimating amount of memory
# required for the aggregators
DataContext.get_current().target_max_block_size = 1 * MiB
doubles = ray.data.range(num_rows_left).map(
lambda row: {"id": row["id"], "double": int(row["id"]) * 2}
)
squares = ray.data.range(num_rows_right).map(
lambda row: {"id": row["id"], "square": int(row["id"]) ** 2}
)
doubles_pd = doubles.to_pandas()
squares_pd = squares.to_pandas()
# Join using Pandas (to assert against)
expected_pd = doubles_pd.join(squares_pd.set_index("id"), on="id", how="inner")
expected_pd_sorted = expected_pd.sort_values(by=["id"]).reset_index(drop=True)
# Join using Ray Data
joined: Dataset = doubles.join(
squares,
join_type="inner",
num_partitions=16,
on=("id",),
partition_size_hint=partition_size_hint,
)
# TODO use native to_pandas() instead
joined_pd = pd.DataFrame(joined.take_all())
# Sort resulting frame and reset index (to be able to compare with expected one)
joined_pd_sorted = joined_pd.sort_values(by=["id"]).reset_index(drop=True)
pd.testing.assert_frame_equal(expected_pd_sorted, joined_pd_sorted)
@pytest.mark.parametrize(
"join_type",
[
"left_outer",
"right_outer",
"left_semi",
"right_semi",
"left_anti",
"right_anti",
],
)
@pytest.mark.parametrize(
"num_rows_left,num_rows_right",
[
(32, 32),
(32, 16),
(16, 32),
# "Degenerate" cases with mostly empty partitions
(1, 32),
(32, 1),
],
)
def test_simple_left_right_outer_semi_anti_join(
ray_start_regular_shared_2_cpus,
join_type,
num_rows_left,
num_rows_right,
):
# NOTE: We override max-block size to make sure that in cases when a partition
# size hint is not provided, we're not over-estimating amount of memory
# required for the aggregators
DataContext.get_current().target_max_block_size = 1 * MiB
doubles = ray.data.range(num_rows_left).map(
lambda row: {"id": row["id"], "double": int(row["id"]) * 2}
)
squares = ray.data.range(num_rows_right).map(
lambda row: {"id": row["id"], "square": int(row["id"]) ** 2}
)
doubles_pd = doubles.to_pandas()
squares_pd = squares.to_pandas()
# Join using Pandas (to assert against)
if join_type == "left_outer":
expected_pd = doubles_pd.join(
squares_pd.set_index("id"), on="id", how="left"
).reset_index(drop=True)
elif join_type == "right_outer":
expected_pd = (
doubles_pd.set_index("id")
.join(squares_pd, on="id", how="right")
.reset_index(drop=True)
)
elif join_type == "left_semi":
# Left semi: left rows that have matches in right (left columns only)
merged = doubles_pd.merge(squares_pd, on="id", how="inner")
expected_pd = merged[["id", "double"]].drop_duplicates().reset_index(drop=True)
elif join_type == "right_semi":
# Right semi: right rows that have matches in left (right columns only)
merged = doubles_pd.merge(squares_pd, on="id", how="inner")
expected_pd = merged[["id", "square"]].drop_duplicates().reset_index(drop=True)
elif join_type == "left_anti":
# Left anti: left rows that don't have matches in right
merged = doubles_pd.merge(squares_pd, on="id", how="left", indicator=True)
expected_pd = merged[merged["_merge"] == "left_only"][
["id", "double"]
].reset_index(drop=True)
elif join_type == "right_anti":
# Right anti: right rows that don't have matches in left
merged = doubles_pd.merge(squares_pd, on="id", how="right", indicator=True)
expected_pd = merged[merged["_merge"] == "right_only"][
["id", "square"]
].reset_index(drop=True)
else:
raise ValueError(f"Unsupported join type: {join_type}")
# Join using Ray Data
joined: Dataset = doubles.join(
squares,
join_type=join_type,
num_partitions=16,
on=("id",),
)
joined_pd = pd.DataFrame(joined.take_all())
# Handle empty results from Ray Data which may not preserve schema
if len(joined_pd) == 0 and len(expected_pd) == 0:
pass
else:
# Sort resulting frame and reset index (to be able to compare with expected one)
joined_pd_sorted = joined_pd.sort_values(by=["id"]).reset_index(drop=True)
expected_pd_sorted = expected_pd.sort_values(by=["id"]).reset_index(drop=True)
pd.testing.assert_frame_equal(expected_pd_sorted, joined_pd_sorted)
@pytest.mark.parametrize(
"num_rows_left,num_rows_right",
[
(32, 32),
(32, 16),
(16, 32),
# # "Degenerate" cases with mostly empty partitions
(1, 32),
(32, 1),
],
)
def test_simple_full_outer_join(
ray_start_regular_shared_2_cpus,
num_rows_left,
num_rows_right,
):
# NOTE: We override max-block size to make sure that in cases when a partition
# size hint is not provided, we're not over-estimating amount of memory
# required for the aggregators
DataContext.get_current().target_max_block_size = 1 * MiB
doubles = ray.data.range(num_rows_left).map(
lambda row: {"id": row["id"], "double": int(row["id"]) * 2}
)
squares = ray.data.range(num_rows_right).map(
lambda row: {"id": row["id"] + num_rows_left, "square": int(row["id"]) ** 2}
)
doubles_pd = doubles.to_pandas()
squares_pd = squares.to_pandas()
# Join using Pandas (to assert against)
expected_pd = doubles_pd.join(
squares_pd.set_index("id"), on="id", how="outer"
).reset_index(drop=True)
# Join using Ray Data
joined: Dataset = doubles.join(
squares,
join_type="full_outer",
num_partitions=16,
on=("id",),
# NOTE: We override this to reduce hardware requirements
# for every aggregator (by default requiring 1 logical CPU)
aggregator_ray_remote_args={"num_cpus": 0.01},
)
joined_pd = pd.DataFrame(joined.take_all())
# Handle empty results from Ray Data which may not preserve schema
if len(joined_pd) == 0 and len(expected_pd) == 0:
pass
else:
# Sort resulting frame and reset index (to be able to compare with expected one)
joined_pd_sorted = joined_pd.sort_values(by=["id"]).reset_index(drop=True)
expected_pd_sorted = expected_pd.sort_values(by=["id"]).reset_index(drop=True)
pd.testing.assert_frame_equal(expected_pd_sorted, joined_pd_sorted)
@pytest.mark.parametrize("left_suffix", [None, "_left"])
@pytest.mark.parametrize("right_suffix", [None, "_right"])
def test_simple_self_join(ray_start_regular_shared_2_cpus, left_suffix, right_suffix):
# NOTE: We override max-block size to make sure that in cases when a partition
# size hint is not provided, we're not over-estimating amount of memory
# required for the aggregators
DataContext.get_current().target_max_block_size = 1 * MiB
doubles = ray.data.range(100).map(
lambda row: {"id": row["id"], "double": int(row["id"]) * 2}
)
doubles_pd = doubles.to_pandas()
# Self-join
joined: Dataset = doubles.join(
doubles,
join_type="inner",
num_partitions=16,
on=("id",),
left_suffix=left_suffix,
right_suffix=right_suffix,
# NOTE: We override this to reduce hardware requirements
# for every aggregator (by default requiring 1 logical CPU)
aggregator_ray_remote_args={"num_cpus": 0.01},
)
if left_suffix is None and right_suffix is None:
with pytest.raises(RayTaskError) as exc_info:
joined.count()
assert 'Field "double" exists 2 times' in str(exc_info.value.cause)
else:
joined_pd = joined.to_pandas()
# Join using Pandas (to assert against)
expected_pd = doubles_pd.join(
doubles_pd.set_index("id"),
on="id",
how="inner",
lsuffix=left_suffix,
rsuffix=right_suffix,
).reset_index(drop=True)
assert rows_same(expected_pd, joined_pd), "Expected contents to be same"
def test_invalid_join_config(ray_start_regular_shared_2_cpus):
ds = ray.data.range(32)
with pytest.raises(ValueError) as exc_info:
ds.join(
ds,
"inner",
num_partitions=16,
on="id", # has to be tuple/list
validate_schemas=True,
)
assert str(exc_info.value) == "Expected tuple or list as `on` (got str)"
with pytest.raises(ValueError) as exc_info:
ds.join(
ds,
"inner",
num_partitions=16,
on=("id",),
right_on="id", # has to be tuple/list
validate_schemas=True,
)
assert str(exc_info.value) == "Expected tuple or list as `right_on` (got str)"
@pytest.mark.parametrize("join_type", [jt for jt in JoinType]) # noqa: C416
def test_invalid_join_not_matching_key_columns(
ray_start_regular_shared_2_cpus, join_type
):
# Case 1: Check on missing key column
empty_ds = ray.data.range(0)
non_empty_ds = ray.data.range(32)
with pytest.raises(ValueError) as exc_info:
empty_ds.join(
non_empty_ds,
join_type,
num_partitions=16,
on=("id",),
validate_schemas=True,
)
assert (
str(exc_info.value)
== "Key columns are expected to be present and have the same types in both "
"left and right operands of the join operation: left has None, but right "
"has Column Type\n------ ----\nid int64"
)
# Case 2: Check mismatching key column
id_int_type_ds = ray.data.range(32).map(lambda row: {"id": int(row["id"])})
id_float_type_ds = ray.data.range(32).map(lambda row: {"id": float(row["id"])})
with pytest.raises(ValueError) as exc_info:
id_int_type_ds.join(
id_float_type_ds,
join_type,
num_partitions=16,
on=("id",),
validate_schemas=True,
)
assert (
str(exc_info.value)
== "Key columns are expected to be present and have the same types in both "
"left and right operands of the join operation: left has "
"Column Type\n------ ----\nid int64, but right has "
"Column Type\n------ ----\nid double"
)
@pytest.mark.parametrize("join_type", ["left_anti", "right_anti"])
def test_anti_join_no_matches(
ray_start_regular_shared_2_cpus,
join_type,
):
"""Test anti-join when there are no matches - should return all rows from respective side"""
DataContext.get_current().target_max_block_size = 1 * MiB
doubles = ray.data.range(32).map(
lambda row: {"id": row["id"], "double": int(row["id"]) * 2}
)
# Create squares with completely different keys
squares = ray.data.range(32).map(
lambda row: {"id": row["id"] + 100, "square": int(row["id"]) ** 2}
)
# Anti-join should return all rows from respective side
joined: Dataset = doubles.join(
squares,
join_type=join_type,
num_partitions=4,
on=("id",),
)
joined_pd = pd.DataFrame(joined.take_all())
if join_type == "left_anti":
expected_pd = doubles.to_pandas()
else: # right_anti
expected_pd = squares.to_pandas()
# Should get all rows from the respective table
joined_pd_sorted = joined_pd.sort_values(by=["id"]).reset_index(drop=True)
expected_pd_sorted = expected_pd.sort_values(by=["id"]).reset_index(drop=True)
pd.testing.assert_frame_equal(expected_pd_sorted, joined_pd_sorted)
@pytest.mark.parametrize("join_type", ["left_anti", "right_anti"])
def test_anti_join_all_matches(
ray_start_regular_shared_2_cpus,
join_type,
):
"""Test anti-join when all rows match - should return empty result"""
DataContext.get_current().target_max_block_size = 1 * MiB
doubles = ray.data.range(32).map(
lambda row: {"id": row["id"], "double": int(row["id"]) * 2}
)
squares = ray.data.range(32).map(
lambda row: {"id": row["id"], "square": int(row["id"]) ** 2}
)
# Anti-join should return no rows since all keys match
joined: Dataset = doubles.join(
squares,
join_type=join_type,
num_partitions=4,
on=("id",),
)
joined_pd = pd.DataFrame(joined.take_all())
# Should get empty result
assert len(joined_pd) == 0
@pytest.mark.parametrize("join_type", ["left_anti", "right_anti"])
def test_anti_join_multi_key(
ray_start_regular_shared_2_cpus,
join_type,
):
"""Test anti-join with multiple join keys"""
DataContext.get_current().target_max_block_size = 1 * MiB
# Create left dataset using ray.data.range for consistency
left_ds = ray.data.range(32).map(
lambda row: {
"id": row["id"],
"oddness": row["id"] % 2, # Even
"10x": row["id"] * 10,
}
)
# Create right dataset with partial matches (16 vs 32 for partial overlap)
right_ds = ray.data.range(16).map(
lambda row: {
"id": row["id"] % 2,
"oddness": row["id"] % 2 + 1, # odd
"100x": row["id"] * 100,
}
)
# Anti-join should return rows that don't have matching key1,key2 in the other dataset
joined: Dataset = left_ds.join(
right_ds,
join_type=join_type,
num_partitions=4,
on=("id", "oddness"),
)
joined_pd = pd.DataFrame(joined.take_all())
# Create expected data for pandas comparison
left_pd = left_ds.to_pandas()
right_pd = right_ds.to_pandas()
# Calculate expected result using pandas
if join_type == "left_anti":
expected_cols = ["id", "oddness", "10x"]
merged = left_pd.merge(
right_pd, on=["id", "oddness"], how="left", indicator=True
)
expected_pd = merged[merged["_merge"] == "left_only"][expected_cols]
else:
expected_cols = ["id", "oddness", "100x"]
merged = left_pd.merge(
right_pd, on=["id", "oddness"], how="right", indicator=True
)
expected_pd = merged[merged["_merge"] == "right_only"][expected_cols]
# Sort resulting frames and reset index (to be able to compare with expected one)
expected_pd_sorted = expected_pd.sort_values(by=expected_cols).reset_index(
drop=True
)
joined_pd_sorted = joined_pd.sort_values(by=expected_cols).reset_index(drop=True)
pd.testing.assert_frame_equal(expected_pd_sorted, joined_pd_sorted)
# Helper functions to reduce test code bloat
def _assert_columns_match(result, expected_columns):
"""Assert that result has the expected column schema."""
actual_columns = set(result[0].keys())
assert actual_columns == expected_columns
def _assert_list_values(result_by_id, expected_values):
"""Assert list column values match expected values."""
for row_id, expected_list in expected_values.items():
assert result_by_id[row_id]["list_col"] == expected_list
def _assert_tensor_values(result_by_id, expected_values):
"""Assert tensor column values match expected tensor data."""
for row_id, expected_tensor in expected_values.items():
assert np.array_equal(result_by_id[row_id]["tensor_col"], expected_tensor)
def _assert_none_values(result_by_id, none_checks):
"""Assert that specified columns are None for specified row IDs."""
for row_id, columns in none_checks.items():
for column in columns:
assert result_by_id[row_id][column] is None
def _assert_scalar_values(result_by_id, expected_values):
"""Assert scalar column values match expected values."""
for row_id, column_values in expected_values.items():
for column, expected_value in column_values.items():
assert result_by_id[row_id][column] == expected_value
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("10.0.0"),
reason="""Joins use empty arrays with type coercion. This pyarrow
version does not support type coercion of extension types, which
are needed for tensors.""",
)
@pytest.mark.parametrize(
"join_type",
[
"inner",
"left_outer",
"right_outer",
"full_outer",
"left_semi",
"right_semi",
"left_anti",
"right_anti",
],
)
def test_join_with_unjoinable_non_key_columns(
ray_start_regular_shared_2_cpus, join_type
):
"""Test that joins work correctly when non-key columns have unjoinable types."""
# Left dataset with joinable key but unjoinable non-key columns
# Create test data - centralized for clarity and maintainability
list_data = [
[1, 2, 3], # list for id=0
[4, 5, 6], # list for id=1
[7, 8, 9], # list for id=2
]
tensor_data = [
np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32), # 2x2 tensor for id=0
np.array([[5.0, 6.0], [7.0, 8.0]], dtype=np.float32), # 2x2 tensor for id=1
np.array([[9.0, 10.0], [11.0, 12.0]], dtype=np.float32), # 2x2 tensor for id=2
]
scalar_data = ["a", "b", "c"] # scalar data for id=0,1,2
left_ds = ray.data.from_items(
[
{
"id": 0,
"list_col": list_data[0],
"tensor_col": tensor_data[0],
"data": scalar_data[0],
},
{
"id": 1,
"list_col": list_data[1],
"tensor_col": tensor_data[1],
"data": scalar_data[1],
},
{
"id": 2,
"list_col": list_data[2],
"tensor_col": tensor_data[2],
"data": scalar_data[2],
},
]
)
# Right dataset with joinable key and columns
# ids: 0, 1, 3 (so id=2 from left won't match, id=3 from right won't match)
right_ds = ray.data.from_items(
[
{"id": 0, "value": "x", "score": 10},
{"id": 1, "value": "y", "score": 20},
{"id": 3, "value": "z", "score": 30},
]
)
# Verify the join worked and includes unjoinable columns
joined = left_ds.join(right_ds, join_type=join_type, on=("id",), num_partitions=2)
result = joined.take_all()
result_by_id = {row["id"]: row for row in result}
# Basic validation - join should succeed with unjoinable non-key columns
if join_type == "inner":
# Should have 2 rows (id=0 and id=1 match)
assert len(result) == 2
# Verify unjoinable columns are preserved
_assert_list_values(result_by_id, {i: list_data[i] for i in [0, 1]})
_assert_tensor_values(result_by_id, {i: tensor_data[i] for i in [0, 1]})
elif join_type == "left_outer":
# Should have 3 rows (all from left: id=0, 1, 2)
assert len(result) == 3
# All left unjoinable columns preserved
_assert_list_values(result_by_id, {i: list_data[i] for i in [0, 1, 2]})
_assert_tensor_values(result_by_id, {i: tensor_data[i] for i in [0, 1, 2]})
# Unmatched left row (id=2) should have None for right columns
_assert_none_values(result_by_id, {2: ["value"]})
elif join_type == "right_outer":
# Should have 3 rows (all from right: id=0, 1, 3)
assert len(result) == 3
# Matched rows should have unjoinable columns from left
_assert_list_values(result_by_id, {i: list_data[i] for i in [0, 1]})
_assert_tensor_values(result_by_id, {i: tensor_data[i] for i in [0, 1]})
_assert_scalar_values(result_by_id, {3: {"value": "z"}})
# Unmatched right row (id=3) should have None for left unjoinable columns
_assert_none_values(result_by_id, {3: ["list_col", "tensor_col"]})
elif join_type == "full_outer":
# Should have 4 rows (all from both sides: id=0, 1, 2, 3)
assert len(result) == 4
# Matched rows (id=0, 1) should have data from both sides
_assert_list_values(result_by_id, {i: list_data[i] for i in [0, 1, 2]})
_assert_tensor_values(result_by_id, {i: tensor_data[i] for i in [0, 1, 2]})
_assert_scalar_values(
result_by_id,
{
0: {"value": "x"},
1: {"value": "y"},
2: {"data": scalar_data[2]},
3: {"value": "z", "score": 30},
},
)
# Unmatched rows should have None for columns from the other side
_assert_none_values(
result_by_id, {2: ["value", "score"], 3: ["list_col", "tensor_col", "data"]}
)
elif join_type == "left_semi":
# Should return left rows that have matches in right (id=0, 1)
assert len(result) == 2
_assert_columns_match(result, {"id", "list_col", "tensor_col", "data"})
_assert_list_values(result_by_id, {i: list_data[i] for i in [0, 1]})
_assert_tensor_values(result_by_id, {i: tensor_data[i] for i in [0, 1]})
elif join_type == "left_anti":
# Should return left rows that DON'T have matches in right (id=2)
assert len(result) == 1
_assert_columns_match(result, {"id", "list_col", "tensor_col", "data"})
_assert_list_values(result_by_id, {2: list_data[2]})
_assert_tensor_values(result_by_id, {2: tensor_data[2]})
_assert_scalar_values(result_by_id, {2: {"data": scalar_data[2]}})
elif join_type == "right_semi":
# Should return right rows that have matches in left (id=0, 1)
assert len(result) == 2
_assert_columns_match(result, {"id", "value", "score"})
_assert_scalar_values(result_by_id, {0: {"value": "x"}, 1: {"value": "y"}})
elif join_type == "right_anti":
# Should return right rows that DON'T have matches in left (id=3)
assert len(result) == 1
_assert_columns_match(result, {"id", "value", "score"})
_assert_scalar_values(result_by_id, {3: {"value": "z", "score": 30}})
# For outer joins, ensure unjoinable columns are present
if join_type in ["inner", "left_outer", "right_outer", "full_outer"]:
_assert_columns_match(
result, {"id", "list_col", "tensor_col", "data", "value", "score"}
)
@pytest.mark.parametrize(
"join_type,filter_side,should_push",
[
("inner", "left", True),
("inner", "right", True),
("left_outer", "left", True),
("left_outer", "right", False),
],
ids=["inner_left", "inner_right", "left_outer_left", "left_outer_right"],
)
def test_join_with_predicate_pushdown(
ray_start_regular_shared_2_cpus, join_type, filter_side, should_push
):
"""Test that predicate pushdown works correctly with different join types.
Filters on single-side predicates should push past the join when appropriate:
- Inner join: can push to either side
- Left outer: can push to left (preserved) side only
- Right outer: can push to right (preserved) side only
"""
from ray.data._internal.logical.optimizers import LogicalOptimizer
from ray.data._internal.util import MiB
from ray.data.expressions import col
DataContext.get_current().target_max_block_size = 1 * MiB
# Create datasets directly without map to allow filter pushdown through join
# Both have ids 0-31 with different value columns
left_data = [{"id": i, "left_val": i * 10} for i in range(32)]
right_data = [{"id": i, "right_val": i * 100} for i in range(32)]
left_ds = ray.data.from_items(left_data)
right_ds = ray.data.from_items(right_data)
# Join then filter
joined = left_ds.join(
right_ds,
join_type=join_type,
num_partitions=4,
on=("id",),
aggregator_ray_remote_args={"num_cpus": 0.01},
)
# Filter on column from specified side
if filter_side == "left":
filtered_ds = joined.filter(expr=col("left_val") < 100)
else:
filtered_ds = joined.filter(expr=col("right_val") < 1000)
# Verify correctness by computing expected result with pandas
from ray.data._internal.util import rows_same
left_pd = left_ds.to_pandas()
right_pd = right_ds.to_pandas()
# Compute expected join result
if join_type == "inner":
expected_pd = left_pd.merge(right_pd, on="id", how="inner")
elif join_type == "left_outer":
expected_pd = left_pd.merge(right_pd, on="id", how="left")
else:
raise ValueError(f"Unsupported join type for this test: {join_type}")
# Apply filter (must match what we filtered in Ray Data)
if filter_side == "left":
# For left-side filter, use notna() to include NaN rows from outer joins
expected_pd = expected_pd[expected_pd["left_val"] < 100]
else:
# For right-side filter in outer joins, NaN values fail the comparison
# and are filtered out (matching Ray Data behavior)
expected_pd = expected_pd[expected_pd["right_val"] < 1000]
actual_df = filtered_ds.to_pandas()
expected_df = expected_pd.reset_index(drop=True)
assert rows_same(actual_df, expected_df), (
f"Results don't match for {join_type} join with {filter_side} filter:\n"
f"Actual:\n{actual_df}\n\nExpected:\n{expected_df}"
)
# Check plan to verify pushdown behavior
logical_plan = filtered_ds._plan._logical_plan
optimized_plan = LogicalOptimizer().optimize(logical_plan)
plan_str = optimized_plan.dag.dag_str
join_idx = plan_str.find("Join[Join]")
filter_idx = plan_str.find("Filter[Filter(")
if should_push:
# Filter should be pushed before join
assert filter_idx != -1, f"Filter should exist in plan: {plan_str}"
assert filter_idx < join_idx, (
f"Filter should be pushed before Join for {join_type} with {filter_side} "
f"predicate. Plan: {plan_str}"
)
else:
# Filter should remain after join
if filter_idx != -1:
assert filter_idx > join_idx, (
f"Filter should stay after Join for {join_type} with {filter_side} "
f"predicate. Plan: {plan_str}"
)
def test_join_cross_side_column_comparison_no_pushdown(ray_start_regular_shared_2_cpus):
"""Test PR bug: comparing differently-named join keys from both sides.
When join keys have different names on left and right
sides (e.g., left.id and right.user_id), a predicate like col("id") > col("user_id")
references both sides but cannot be pushed to either side alone since each side
only has one of these columns.
Setup:
- Left has columns: {id, user_id, left_val} - join on "id"
- Right has columns: {id, user_id, right_val} - join on "user_id"
- Join: left.id = right.user_id
- Filter: col("id") > col("user_id") (with suffixes to avoid collision)
"""
from ray.data._internal.logical.operators import Filter, Join
from ray.data._internal.logical.optimizers import LogicalOptimizer
from ray.data._internal.util import MiB
from ray.data.expressions import col
from ray.data.tests.test_util import plan_operator_comes_before
DataContext.get_current().target_max_block_size = 1 * MiB
# Left: has both id and user_id as columns, joins on "id"
left_data = [{"id": i, "user_id": i + 5, "left_val": i * 10} for i in range(10)]
# Right: has both id and user_id as columns, joins on "user_id"
right_data = [{"id": i + 20, "user_id": i, "right_val": i * 5} for i in range(10)]
left_ds = ray.data.from_items(left_data)
right_ds = ray.data.from_items(right_data)
# Join on left.id = right.user_id (different column names used as keys)
# Need suffixes to avoid column name collision
joined = left_ds.join(
right_ds,
join_type="inner",
num_partitions=2,
on=("id",),
right_on=("user_id",),
left_suffix="_l",
right_suffix="_r",
aggregator_ray_remote_args={"num_cpus": 0.01},
)
# Filter comparing non-join-key columns from both sides
# left_val exists only on left, right_val exists only on right
# Neither side can evaluate this alone
filtered_ds = joined.filter(expr=col("left_val") > col("right_val"))
# Verify correctness
result = filtered_ds.take_all()
# left.id = right.user_id means they match (both 0-9)
# left_val = id * 10, right_val = user_id * 5 = id * 5
# So left_val > right_val means id*10 > id*5, true for all id > 0
assert len(result) == 9, f"Should have 9 rows (id 1-9), got {len(result)}"
assert all(row["left_val"] > row["right_val"] for row in result)
# Check plan: filter should NOT be pushed down (should stay after join)
logical_plan = filtered_ds._plan._logical_plan
optimized_plan = LogicalOptimizer().optimize(logical_plan)
# Filter should come AFTER Join (not pushed down)
# Before join: left has left_val but not right_val, right has right_val but not left_val
assert not plan_operator_comes_before(optimized_plan, Filter, Join), (
"Filter comparing columns from both sides should NOT be pushed before Join "
"since neither side has both columns"
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_join.py",
"license": "Apache License 2.0",
"lines": 713,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/routers/test_router.py | import sys
from typing import Optional
from unittest.mock import AsyncMock, MagicMock
import openai
import pytest
from ray import serve
from ray.llm._internal.serve.core.configs.llm_config import (
LLMConfig,
ModelLoadingConfig,
)
from ray.llm._internal.serve.core.ingress.ingress import (
OpenAiIngress,
make_fastapi_ingress,
)
from ray.llm._internal.serve.core.server.llm_server import LLMServer
from ray.llm.tests.serve.mocks.mock_vllm_engine import MockVLLMEngine
@pytest.fixture(name="llm_config")
def create_llm_config(stream_batching_interval_ms: Optional[int] = None):
if stream_batching_interval_ms is not None:
return LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id="llm_model_id",
),
experimental_configs={
"stream_batching_interval_ms": stream_batching_interval_ms,
},
)
else:
return LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id="llm_model_id",
),
)
@pytest.fixture(name="client")
def create_oai_client(llm_config: LLMConfig):
ServerDeployment = serve.deployment(LLMServer)
ingress_options = OpenAiIngress.get_deployment_options(llm_configs=[llm_config])
ingress_cls = make_fastapi_ingress(OpenAiIngress)
RouterDeployment = serve.deployment(ingress_cls, **ingress_options)
server = ServerDeployment.bind(llm_config, engine_cls=MockVLLMEngine)
router = RouterDeployment.bind(llm_deployments=[server])
serve.run(router)
client = openai.Client(base_url="http://localhost:8000/v1", api_key="foo")
yield client
serve.shutdown()
class TestOpenAiIngress:
@pytest.mark.asyncio
@pytest.mark.parametrize("stream_batching_interval_ms", [None, 0, 10000])
@pytest.mark.parametrize("stream", [True, False])
async def test_chat(self, stream_batching_interval_ms, client, stream):
"""Tests chat streaming with different stream_batching_interval_ms values.
0ms super fast batching (no batching)
10000ms basically should be equivalent to non-streaming
None is default, which is some fixed non-zero value.
"""
# Generate 1000 chunks
n_tokens = 1000
response = client.chat.completions.create(
model="llm_model_id",
messages=[dict(role="user", content="Hello")],
stream=stream,
max_tokens=n_tokens,
)
if stream:
text = ""
role = None
for chunk in response:
if chunk.choices[0].delta.role is not None and role is None:
role = chunk.choices[0].delta.role
if chunk.choices[0].delta.content:
text += chunk.choices[0].delta.content
else:
text = response.choices[0].message.content
role = response.choices[0].message.role
assert role == "assistant"
assert text.strip() == " ".join([f"test_{i}" for i in range(n_tokens)])
@pytest.mark.asyncio
@pytest.mark.parametrize("stream_batching_interval_ms", [None, 0, 10000])
@pytest.mark.parametrize("stream", [True, False])
async def test_completion(self, stream_batching_interval_ms, client, stream):
"""Tests text completions streaming with different stream_batching_interval_ms values."""
# Generate tokens
n_tokens = 1000
response = client.completions.create(
model="llm_model_id",
prompt="Hello",
stream=stream,
max_tokens=n_tokens,
)
if stream:
text = ""
for chunk in response:
text += chunk.choices[0].text
else:
text = response.choices[0].text
# The mock engine produces "test_0 test_1 test_2 ..." pattern
expected_text = " ".join([f"test_{i}" for i in range(n_tokens)])
assert text.strip() == expected_text
@pytest.mark.asyncio
@pytest.mark.parametrize("stream", [True, False])
async def test_tool_call(self, client, stream):
response = client.chat.completions.create(
model="llm_model_id",
messages=[
{
"role": "user",
"content": "Can you tell me what the temperate will be in Dallas, in fahrenheit?",
},
{
"content": None,
"role": "assistant",
"tool_calls": [
{
"id": "RBS92VTjJ",
"function": {
"arguments": '{"city": "Dallas", "state": "TX", "unit": "fahrenheit"}',
"name": "get_current_weather",
},
"type": "function",
}
],
},
{
"role": "tool",
"content": "The weather in Dallas, TX is 85 degrees fahrenheit. It is partly cloudly, with highs in the 90's.",
"tool_call_id": "n3OMUpydP",
},
],
stream=stream,
max_tokens=200,
)
if stream:
text = ""
role = None
for chunk in response:
if chunk.choices[0].delta.role is not None and role is None:
role = chunk.choices[0].delta.role
if chunk.choices[0].delta.content:
text += chunk.choices[0].delta.content
else:
text = response.choices[0].message.content
role = response.choices[0].message.role
assert text
@pytest.mark.asyncio
async def test_check_health(self, llm_config: LLMConfig):
"""Test health check functionality."""
server = MagicMock()
server.llm_config = MagicMock()
server.llm_config.remote = AsyncMock(return_value=llm_config)
server.check_health = MagicMock()
server.check_health.remote = AsyncMock()
router = OpenAiIngress(llm_deployments=[server])
await router.check_health()
@pytest.mark.asyncio
async def test_raw_request_info_passed_to_deployment_handle(
self, llm_config: LLMConfig
):
"""Test that raw_request_info is passed to the deployment handle."""
from ray.llm._internal.serve.core.configs.openai_api_models import (
ChatCompletionRequest,
ChatCompletionResponse,
)
from ray.llm._internal.serve.core.protocol import RawRequestInfo
# Track if raw_request_info was received
captured_raw_request_infos = []
# Create a mock deployment handle that captures raw_request_info
async def mock_chat_generator(request, raw_request_info):
captured_raw_request_infos.append(raw_request_info)
# Return a valid response
yield ChatCompletionResponse(
id="test_id",
choices=[
{
"index": 0,
"message": {"role": "assistant", "content": "Hello!"},
"finish_reason": "stop",
}
],
model="llm_model_id",
object="chat.completion",
usage={
"prompt_tokens": 1,
"completion_tokens": 1,
"total_tokens": 2,
},
)
mock_handle = MagicMock()
mock_handle.llm_config = MagicMock()
mock_handle.llm_config.remote = AsyncMock(return_value=llm_config)
mock_handle.chat = MagicMock()
mock_handle.chat.remote = mock_chat_generator
# Make options() return the same mock so chat.remote is preserved
mock_handle.options.return_value = mock_handle
# Create router with mock handle
router = OpenAiIngress(llm_deployments=[mock_handle])
await router._init_completed.wait()
# Create a mock FastAPI request
from starlette.datastructures import Headers
mock_request = MagicMock()
mock_headers = {
"content-type": "application/json",
"x-ray-serve-llm-test-header": "router-raw-request-info",
}
mock_request.headers = Headers(mock_headers)
# Make a request through the router
request_body = ChatCompletionRequest(
model="llm_model_id",
messages=[{"role": "user", "content": "Hello"}],
stream=False,
)
await router.chat(request_body, mock_request)
# Verify that raw_request_info was passed to the deployment handle
assert len(captured_raw_request_infos) == 1
assert isinstance(captured_raw_request_infos[0], RawRequestInfo)
assert captured_raw_request_infos[0].headers == mock_headers
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/llm/tests/serve/cpu/deployments/routers/test_router.py",
"license": "Apache License 2.0",
"lines": 217,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/llm_tests/serve/benchmark/benchmark_vllm.py | # There is a dead-lock issue that arises due to gevent's monkey-patching
# https://github.com/ipython/ipython/issues/11730
# Fix: We do this import first before anything else
# Thus the # noqa tags are needed below
import gevent.monkey
gevent.monkey.patch_all()
import sys # noqa: E402
import argparse # noqa: E402
import json # noqa: E402
import logging # noqa: E402
import os # noqa: E402
import subprocess # noqa: E402
import threading # noqa: E402
import time # noqa: E402
from datetime import datetime # noqa: E402
from bm import run_bm # noqa: E402
from common import write_to_s3, get_llm_config # noqa: E402
RAYLLM_RELEASE_TEST_PERF_SERVICE_NAME = "rayllm_release_test_perf_service"
THREAD_CLEANUP_TIMEOUT_S = 10
logger = logging.getLogger(__file__)
logging.basicConfig(
format="%(asctime)s %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
def get_timestamp():
return datetime.now().strftime("%H:%M:%S.%f")[:-3]
class ColoredLogger:
HEADER = "\033[95m"
SERVER = "\033[94m"
CLIENT = "\033[92m"
WARNING = "\033[93m"
ERROR = "\033[91m"
ENDC = "\033[0m"
@staticmethod
def log_server(msg):
print(
f"{ColoredLogger.SERVER}[SERVER {get_timestamp()}] {msg}{ColoredLogger.ENDC}"
)
@staticmethod
def log_client(msg):
print(
f"{ColoredLogger.CLIENT}[CLIENT {get_timestamp()}] {msg}{ColoredLogger.ENDC}"
)
@staticmethod
def log_error(msg):
print(
f"{ColoredLogger.ERROR}[ERROR {get_timestamp()}] {msg}{ColoredLogger.ENDC}"
)
def stream_process_output(process, logger_func, stop_event, is_error: bool = False):
while not stop_event.is_set():
if is_error:
output_line = process.stderr.readline()
else:
output_line = process.stdout.readline()
if output_line:
logger_func(output_line.strip())
elif process.poll() is not None:
break
def get_vllm_cli_args(llm_config):
engine_kwargs = llm_config["engine_kwargs"]
# When we define tokenizer_pool size, vllm, by default, uses Ray
# that breaks the assumption that this script should not use ray
# TODO (Kourosh): When the job issue with non driver ray
# subprocesses are resolved we can remove these constraints
engine_kwargs.pop("tokenizer_pool_extra_config", None)
engine_kwargs.pop("tokenizer_pool_size", None)
engine_kwargs.pop("tokenizer_pool_type", None)
cli_args = ["--model", llm_config["model_loading_config"]["model_id"]]
for key, value in engine_kwargs.items():
cli_args.append("--" + key.replace("_", "-"))
if isinstance(value, dict):
cli_args.append(json.dumps(value))
elif isinstance(value, bool):
pass
else:
cli_args.append(str(value))
cli_args.extend(
["--tensor-parallel-size", str(engine_kwargs["tensor_parallel_size"])]
)
if "max_model_len" in engine_kwargs:
cli_args.extend(["--max-model-len", str(engine_kwargs["max_model_len"])])
return cli_args
def get_ray_options(llm_config):
num_gpus = llm_config["tensor_parallelism"]["degree"]
acc_type = llm_config["accelerator_type"]
resources = {f"accelerator_type:{acc_type}": 0.001}
return {"num_gpus": num_gpus, "resources": resources}
def start_vllm_process(vllm_cli_args):
server_process = subprocess.Popen(
["python", "-m", "vllm.entrypoints.openai.api_server"] + vllm_cli_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True, # Return strings from stdout/stderr
bufsize=1, # Line buffered
env=os.environ.copy(),
)
return server_process
def run_vllm_benchmark(vllm_cli_args):
stop_event = threading.Event()
results = {}
try:
# Start the server process
ColoredLogger.log_server("Starting vLLM server...")
server_process = start_vllm_process(vllm_cli_args)
# Start server output streaming thread
server_thread = threading.Thread(
target=stream_process_output,
args=(server_process, ColoredLogger.log_server, stop_event),
daemon=True, # Daemonize the thread so it stops when the main thread stops.
)
server_thread.start()
# Start server error streaming thread
server_error_thread = threading.Thread(
target=stream_process_output,
args=(server_process, ColoredLogger.log_server, stop_event),
kwargs={"is_error": True},
daemon=True, # Daemonize the thread so it stops when the main thread stops.
)
server_error_thread.start()
# Wait for server to be ready
server_ready = False
start_time = time.time()
timeout = 300 # 5 minutes timeout
while not server_ready and time.time() - start_time < timeout:
if server_process.poll() is not None:
raise Exception("Server process terminated unexpectedly")
# Check if server is responding
try:
import requests
response = requests.get("http://localhost:8000/health")
if response.status_code == 200:
server_ready = True
ColoredLogger.log_server("Server is ready!")
except Exception:
time.sleep(1)
continue
if not server_ready:
raise TimeoutError("Server failed to start within timeout period")
# Start benchmark
ColoredLogger.log_client("Starting benchmark...")
results = run_bm(
api_url="http://localhost:8000",
api_key="NONE",
concurrency=[1, 2, 4, 8, 16, 32],
run_time="1m",
prompt_tokens=256,
max_tokens=64,
stream=False,
summary_file="./results.csv",
)
print(
"Writing final result to AWS Firehose:",
json.dumps(results, indent=4, sort_keys=True),
sep="\n",
)
ColoredLogger.log_client("Benchmark completed successfully")
except Exception as e:
ColoredLogger.log_error(f"Error during benchmark: {str(e)}")
raise
finally:
# Clean up
if "server_process" in locals():
ColoredLogger.log_server("Shutting down server...")
server_process.terminate()
server_process.wait(timeout=THREAD_CLEANUP_TIMEOUT_S)
stop_event.set()
# Wait for all threads to complete
if "server_thread" in locals():
server_thread.join(timeout=THREAD_CLEANUP_TIMEOUT_S)
if "server_error_thread" in locals():
server_error_thread.join(timeout=THREAD_CLEANUP_TIMEOUT_S)
# Wait some time to make sure everyting is cleanend up.
time.sleep(5)
return results
def upload_results_to_s3(s3_path, results, service_metadata):
if any(result is None for result in results):
raise ValueError(
"Found None results during benchmarking. " "This should not have happened."
)
data_to_write = [{**result, **service_metadata} for result in results]
write_to_s3(data_to_write, s3_path)
def main(pargs):
llm_config = get_llm_config(pargs.llm_config)
vllm_cli_args = get_vllm_cli_args(llm_config)
results = run_vllm_benchmark(vllm_cli_args)
tag = f"{llm_config['accelerator_type']}-TP{llm_config['engine_kwargs']['tensor_parallel_size']}"
service_metadata = {
"cloud_name": "",
"service_name": "",
"py_version": f"py{sys.version_info.major}{sys.version_info.minor}",
"tag": tag,
"vllm_engine": f"V{os.environ.get('VLLM_USE_V1', '')}",
}
# Post the results to S3
if results:
print(
"Writing final result to AWS S3:",
json.dumps(results, indent=4, sort_keys=True),
sep="\n",
)
upload_results_to_s3(pargs.remote_result_path, results, service_metadata)
else:
raise ValueError(
"For some reason the benchmarking results are empty. Something is wrong."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--llm-config",
type=str,
required=True,
default="The LLM config to start vLLM engine",
)
parser.add_argument(
"--remote-result-path",
type=str,
required=True,
help="The remote s3 path to store intermediate results on.",
)
main(parser.parse_args())
| {
"repo_id": "ray-project/ray",
"file_path": "release/llm_tests/serve/benchmark/benchmark_vllm.py",
"license": "Apache License 2.0",
"lines": 221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:release/llm_tests/serve/benchmark/common.py | """Common utilities shared between release tests"""
import json
import logging
import os
from typing import Dict, List, Any
from urllib.parse import urlparse
import boto3
import yaml
logger = logging.getLogger(__file__)
logging.basicConfig(
format="%(asctime)s %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
def get_llm_config(serve_config_file: List[Dict]) -> List[Any]:
"""Get the first llm_config from serve config file."""
with open(serve_config_file, "r") as f:
loaded_llm_config = yaml.safe_load(f)
applications = loaded_llm_config["applications"]
config = applications[0]["args"]["llm_configs"][0]
if isinstance(config, dict):
return config
assert isinstance(config, str)
with open(config, "r") as f:
loaded_llm_config = yaml.safe_load(f)
return loaded_llm_config
def read_yaml(file_path: str) -> Dict:
if not file_path.endswith(".yaml"):
raise RuntimeError(
"Must pass in a Serve config yaml file using the -f option. Got "
f'file path "{file_path}", which does not end in ".yaml".'
)
if not os.path.exists(file_path):
raise RuntimeError(f"File path {file_path} does not exist.")
with open(file_path, "r") as f:
return yaml.safe_load(f)
def write_to_s3(data_to_write: List[Dict], s3_path: str):
# Parse S3 path using urllib
parsed_url = urlparse(s3_path)
bucket_name = parsed_url.netloc
# Remove leading slash and get the rest of the path
key = parsed_url.path.lstrip("/")
# If no file extension provided, append .json
if not any(key.endswith(ext) for ext in [".jsonl", ".json"]):
key += ".jsonl"
s3_client = boto3.client("s3")
try:
# Convert data to JSON string
jsonl_data = "\n".join(json.dumps(record) for record in data_to_write)
# Upload to S3
s3_client.put_object(
Bucket=bucket_name,
Key=key,
Body=jsonl_data,
ContentType="application/x-jsonlines", # MIME type for JSONL
)
logging.info(f"Successfully wrote {len(data_to_write)} records to {s3_path}")
except Exception as e:
logging.error(f"Failed to write to S3: {str(e)}")
raise
def read_from_s3(s3_path: str) -> List[Dict]:
# Parse S3 path using urllib
parsed_url = urlparse(s3_path)
bucket_name = parsed_url.netloc
key = parsed_url.path.lstrip("/")
# Initialize S3 client
s3_client = boto3.client("s3")
try:
# Get the object from S3
response = s3_client.get_object(Bucket=bucket_name, Key=key)
# Read the data
data = response["Body"].read().decode("utf-8")
records = [
json.loads(line)
for line in data.splitlines()
if line.strip() # Skip empty lines
]
logging.info(f"Successfully read {len(records)} records from {s3_path}")
return records
except Exception as e:
logging.error(f"Failed to read from S3: {str(e)}")
raise
| {
"repo_id": "ray-project/ray",
"file_path": "release/llm_tests/serve/benchmark/common.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:release/llm_tests/serve/benchmark/constants.py | """This file contains constants used throughout the Buildkite files."""
# Config for the rayllm release test.
RAYLLM_RELEASE_TEST_SERVICE_NAME = "rayllm_release_test_service"
RAYLLM_RELEASE_TEST_COMPUTE_CONFIG_NAME = "rayllm-release-test"
DEFAULT_CLOUD = "serve_release_tests_cloud"
CLOUD_PROVIDER_TO_CLOUD_NAME = {
"aws": DEFAULT_CLOUD,
"gcp": "anyscale_gcp_public_default_cloud_us_west_1",
}
| {
"repo_id": "ray-project/ray",
"file_path": "release/llm_tests/serve/benchmark/constants.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/llm_tests/serve/benchmark/firehose_utils.py | import json
import os
import time
from enum import Enum
from typing import Any, Dict
import ray
import boto3
from pydantic import BaseModel, field_validator
STREAM_NAME = "rayllm-ci-results"
DEFAULT_TABLE_NAME = "release_test_result"
# Time to sleep in-between firehose writes to make sure the timestamp between
# records are distinct
SLEEP_BETWEEN_FIREHOSE_WRITES_MS = 50
class RecordName(str, Enum):
STARTUP_TEST = "service-startup-test"
STARTUP_TEST_GCP = "service-startup-test-gcp"
STARTUP_TEST_AWS = "service-startup-test-aws"
RAYLLM_PERF_TEST = "rayllm-perf-test"
VLLM_PERF_TEST = "vllm-perf-test"
class FirehoseRecord(BaseModel):
record_name: RecordName
record_metrics: Dict[str, Any]
@field_validator("record_name", mode="before")
def validate_record_name(cls, v):
if isinstance(v, str):
return RecordName(v)
return v
def write(self, verbose: bool = False):
final_result = {
"_table": DEFAULT_TABLE_NAME,
"name": str(self.record_name.value),
"branch": os.environ.get("BUILDKITE_BRANCH", ""),
"commit": ray.__commit__,
"report_timestamp_ms": int(time.time() * 1000),
"results": {**self.record_metrics},
}
if verbose:
print(
"Writing final result to AWS Firehose:",
json.dumps(final_result, indent=4, sort_keys=True),
sep="\n",
)
# Add newline character to separate records
data = json.dumps(final_result) + "\n"
# Need to assume the role in order to share access to the Firehose
sts_client = boto3.client("sts")
assumed_role = sts_client.assume_role(
RoleArn="arn:aws:iam::830883877497:role/service-role/KinesisFirehoseServiceRole-rayllm-ci-res-us-west-2-1728664186256",
RoleSessionName="FirehosePutRecordSession",
)
credentials = assumed_role["Credentials"]
# Use the assumed credentials to create a Firehose client
firehose_client = boto3.client(
"firehose",
region_name="us-west-2",
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"],
)
response = firehose_client.put_record(
DeliveryStreamName=STREAM_NAME, Record={"Data": data}
)
if verbose:
print("PutRecord response:")
print(response)
# Add some delay to make sure timestamps are unique ints.
time.sleep(SLEEP_BETWEEN_FIREHOSE_WRITES_MS / 1000)
| {
"repo_id": "ray-project/ray",
"file_path": "release/llm_tests/serve/benchmark/firehose_utils.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/data/_internal/execution/operators/hash_aggregate.py | import logging
import math
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Tuple
from ray.data._internal.execution.interfaces import PhysicalOperator
from ray.data._internal.execution.operators.hash_shuffle import (
BlockTransformer,
HashShufflingOperatorBase,
ShuffleAggregation,
)
from ray.data._internal.util import GiB, MiB
from ray.data.aggregate import AggregateFn
from ray.data.block import Block, BlockAccessor
from ray.data.context import DataContext
if TYPE_CHECKING:
from ray.data._internal.planner.exchange.sort_task_spec import SortKey
logger = logging.getLogger(__name__)
class ReducingAggregation(ShuffleAggregation):
"""Stateless aggregation that reduces blocks using aggregation functions.
This implementation performs incremental reduction during compaction,
combining multiple partially-aggregated blocks into one. The final
aggregation is performed during finalization.
"""
def __init__(
self,
key_columns: Tuple[str, ...],
aggregation_fns: Tuple[AggregateFn, ...],
):
self._sort_key: "SortKey" = self._get_sort_key(key_columns)
self._aggregation_fns: Tuple[AggregateFn, ...] = aggregation_fns
@classmethod
def is_compacting(cls):
return True
def compact(self, partition_shards: List[Block]) -> Block:
assert len(partition_shards) > 0, "Provided sequence must be non-empty"
return self._combine(partition_shards, finalize=False)
def finalize(self, partition_shards_map: Dict[int, List[Block]]) -> Iterator[Block]:
assert (
len(partition_shards_map) == 1
), f"Single input-sequence is expected (got {len(partition_shards_map)})"
blocks = partition_shards_map[0]
if not blocks:
return
yield self._combine(blocks, finalize=True)
def _combine(self, blocks: List[Block], *, finalize: bool) -> Block:
"""Internal method to combine blocks with optional finalization."""
assert len(blocks) > 0
block_accessor = BlockAccessor.for_block(blocks[0])
combined_block, _ = block_accessor._combine_aggregated_blocks(
blocks,
sort_key=self._sort_key,
aggs=self._aggregation_fns,
finalize=finalize,
)
return combined_block
@staticmethod
def _get_sort_key(key_columns: Tuple[str, ...]) -> "SortKey":
from ray.data._internal.planner.exchange.sort_task_spec import SortKey
return SortKey(key=list(key_columns), descending=False)
class HashAggregateOperator(HashShufflingOperatorBase):
_DEFAULT_MIN_NUM_SHARDS_COMPACTION_THRESHOLD = 100
_DEFAULT_MAX_NUM_SHARDS_COMPACTION_THRESHOLD = 2000
def __init__(
self,
data_context: DataContext,
input_op: PhysicalOperator,
key_columns: Tuple[str],
aggregation_fns: Tuple[AggregateFn],
*,
num_partitions: Optional[int] = None,
aggregator_ray_remote_args_override: Optional[Dict[str, Any]] = None,
):
# Use new stateless ReducingAggregation factory
def _create_reducing_aggregation() -> ReducingAggregation:
return ReducingAggregation(
key_columns=key_columns,
aggregation_fns=aggregation_fns,
)
super().__init__(
name_factory=(
lambda num_partitions: f"HashAggregate(key_columns={key_columns}, "
f"num_partitions={num_partitions})"
),
input_ops=[input_op],
data_context=data_context,
key_columns=[key_columns],
num_input_seqs=1,
num_partitions=(
# NOTE: In case of global aggregations (ie with no key columns specified),
# we override number of partitions to 1, since the whole dataset
# will be reduced to just a single row
num_partitions
if len(key_columns) > 0
else 1
),
partition_aggregation_factory=_create_reducing_aggregation,
input_block_transformer=_create_aggregating_transformer(
key_columns, aggregation_fns
),
aggregator_ray_remote_args_override=aggregator_ray_remote_args_override,
shuffle_progress_bar_name="Shuffle",
finalize_progress_bar_name="Aggregation",
)
def _get_operator_num_cpus_override(self) -> float:
return self.data_context.hash_aggregate_operator_actor_num_cpus_override
@classmethod
def _estimate_aggregator_memory_allocation(
cls,
*,
num_aggregators: int,
num_partitions: int,
estimated_dataset_bytes: int,
) -> int:
partition_byte_size_estimate = math.ceil(
estimated_dataset_bytes / num_partitions
)
# Estimate of object store memory required to accommodate all partitions
# handled by a single aggregator
aggregator_shuffle_object_store_memory_required: int = math.ceil(
estimated_dataset_bytes / num_aggregators
)
# Estimate of memory required to accommodate single partition as an output
# (inside Object Store)
output_object_store_memory_required: int = partition_byte_size_estimate
aggregator_total_memory_required: int = (
# Inputs (object store)
aggregator_shuffle_object_store_memory_required
+
# Output (object store)
output_object_store_memory_required
)
logger.info(
f"Estimated memory requirement for aggregating aggregator "
f"(partitions={num_partitions}, "
f"aggregators={num_aggregators}, "
f"dataset (estimate)={estimated_dataset_bytes / GiB:.1f}GiB): "
f"shuffle={aggregator_shuffle_object_store_memory_required / MiB:.1f}MiB, "
f"output={output_object_store_memory_required / MiB:.1f}MiB, "
f"total={aggregator_total_memory_required / MiB:.1f}MiB, "
)
return aggregator_total_memory_required
@classmethod
def _get_min_max_partition_shards_compaction_thresholds(
cls,
) -> Optional[Tuple[int, int]]:
return (
cls._DEFAULT_MIN_NUM_SHARDS_COMPACTION_THRESHOLD,
cls._DEFAULT_MAX_NUM_SHARDS_COMPACTION_THRESHOLD,
)
def _create_aggregating_transformer(
key_columns: Tuple[str], aggregation_fns: Tuple[AggregateFn]
) -> BlockTransformer:
"""Method creates input block transformer performing partial aggregation of
the block applied prior to block being shuffled (to reduce amount of bytes shuffled)"""
sort_key = ReducingAggregation._get_sort_key(key_columns)
def _aggregate(block: Block) -> Block:
from ray.data._internal.planner.exchange.aggregate_task_spec import (
SortAggregateTaskSpec,
)
# TODO unify blocks schemas, to avoid validating every block
# Validate block's schema compatible with aggregations
for agg_fn in aggregation_fns:
agg_fn._validate(BlockAccessor.for_block(block).schema())
# Project block to only carry columns used in aggregation
pruned_block = SortAggregateTaskSpec._prune_unused_columns(
block,
sort_key,
aggregation_fns,
)
# NOTE: If columns to aggregate on have been provided,
# sort the block on these before aggregation
if sort_key.get_columns():
target_block = BlockAccessor.for_block(pruned_block).sort(sort_key)
else:
target_block = pruned_block
return BlockAccessor.for_block(target_block)._aggregate(
sort_key, aggregation_fns
)
return _aggregate
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/execution/operators/hash_aggregate.py",
"license": "Apache License 2.0",
"lines": 178,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/_internal/execution/operators/hash_shuffle.py | import abc
import functools
import itertools
import logging
import math
import queue
import random
import threading
import time
import typing
from collections import defaultdict, deque
from dataclasses import dataclass
from typing import (
Any,
Callable,
DefaultDict,
Deque,
Dict,
Generator,
Iterator,
List,
Optional,
Set,
Tuple,
Union,
)
import numpy as np
import pyarrow as pa
import ray
from ray import ObjectRef
from ray._private.ray_constants import (
env_integer,
)
from ray._raylet import StreamingGeneratorStats
from ray.actor import ActorHandle
from ray.data._internal.arrow_block import ArrowBlockBuilder
from ray.data._internal.arrow_ops.transform_pyarrow import (
_create_empty_table,
hash_partition,
)
from ray.data._internal.execution.interfaces import (
ExecutionOptions,
ExecutionResources,
PhysicalOperator,
RefBundle,
)
from ray.data._internal.execution.interfaces.physical_operator import (
DataOpTask,
MetadataOpTask,
OpTask,
TaskExecDriverStats,
estimate_total_num_of_blocks,
)
from ray.data._internal.execution.operators.sub_progress import SubProgressBarMixin
from ray.data._internal.logical.interfaces import LogicalOperator
from ray.data._internal.output_buffer import BlockOutputBuffer, OutputBlockSizeOption
from ray.data._internal.stats import OpRuntimeMetrics
from ray.data._internal.table_block import TableBlockAccessor
from ray.data._internal.util import GiB, MiB
from ray.data.block import (
Block,
BlockAccessor,
BlockExecStats,
BlockMetadata,
BlockMetadataWithSchema,
BlockStats,
BlockType,
TaskExecWorkerStats,
to_stats,
)
from ray.data.context import (
DEFAULT_MAX_HASH_SHUFFLE_AGGREGATORS,
DEFAULT_TARGET_MAX_BLOCK_SIZE,
DataContext,
)
if typing.TYPE_CHECKING:
from ray.data._internal.progress.base_progress import BaseProgressBar
logger = logging.getLogger(__name__)
BlockTransformer = Callable[[Block], Block]
DEFAULT_HASH_SHUFFLE_AGGREGATOR_MAX_CONCURRENCY = env_integer(
"RAY_DATA_DEFAULT_HASH_SHUFFLE_AGGREGATOR_MAX_CONCURRENCY", 8
)
DEFAULT_HASH_SHUFFLE_AGGREGATOR_MEMORY_ALLOCATION = env_integer(
"RAY_DATA_DEFAULT_HASH_SHUFFLE_AGGREGATOR_MEMORY_ALLOCATION", 1 * GiB
)
class ShuffleAggregation:
"""Stateless implementation of shuffle "aggregation" operation, which for ex,
could be:
- Concatenation: concatenates received shuffled partitions into a
single block (for ``repartition`` for ex).
- Join: joins corresponding shuffled partitions.
- Group Aggregation: applies aggregation on grouped data (like ``sum``,
``count``, ``unique``, etc).
Each implementation is meant to be *stateless*, simply implementing
corresponding transformation on provided partition-shards. Accumulation and
state management is handled by the ``HashShuffleAggregator`` actor,
which invokes these methods as pure transformations.
Each implementation must implement following methods:
- ``compact``: "compacts" an accumulated, *partial* list of partition shards.
Used extensively by reducing aggregating transformations like (``sum``,
``count``, etc), to perform periodic aggregations during the shuffle stage
itself (for more details check out method's py-doc)
- ``finalize``: finalizes provided *complete* list of partition's shards
(per input sequence).
"""
@classmethod
def is_compacting(cls):
"""Returns whether this aggregation is capable of compacting partial
partition's shards list.
"""
return False
def compact(self, partial_partition_shards: List[Block]) -> Block:
"""Incrementally "compacts" provided partition shards of a *single*
partition from a single input blocks sequence.
This operation is meant to incrementally process provided *partial*
list of the partition's shards. This is particularly beneficial for
aggregating transformations such as ``sum``, ``count``, ``unique``, etc.,
which can effectively continuously incrementally aggregate partial partition
while shuffle is ongoing, therefore reducing amount of computation needed
during finalization.
This operation can be invoked multiple times during the shuffle stage.
For some transformation no meaningful compaction is possible, for which
is perfectly fine to return provided partition shards as they are.
Args:
partial_partition_shards: Partial (incomplete) list of partition shards.
Returns:
Potentially "compacted" block (if it's advantageous to do so).
"""
raise NotImplementedError()
def finalize(self, partition_shards_map: Dict[int, List[Block]]) -> Iterator[Block]:
"""Processes final, complete set of partition shards producing
output block of this aggregation.
Called once after all shards for a partition have been received.
Args:
partition_shards_map: map input sequence id into final, complete
list of corresponding partition's shards.
For single input-sequence operations, this will be tuple of 1 list.
For multi input-sequence operations (e.g., joins), this will
contain multiple lists corresponding to the same partition from
respective input-sequences (tuple[0] for first seq, etc).
Returns:
Iterator of incrementally yielded output blocks for this partition.
"""
raise NotImplementedError()
# Factory type for creating stateless aggregation components
ShuffleAggregationFactory = Callable[[], ShuffleAggregation]
class ConcatAggregation(ShuffleAggregation):
"""Simple concatenation aggregation for hash shuffle.
Concatenates all partition shards into a single block, optionally sorting
the result by key columns.
"""
def __init__(
self,
*,
should_sort: bool = False,
key_columns: Optional[Tuple[str, ...]] = None,
):
if should_sort and not key_columns:
raise ValueError("Key columns must be specified when should_sort=True")
self._should_sort = should_sort
self._key_columns = key_columns
def finalize(self, partition_shards_map: Dict[int, List[Block]]) -> Iterator[Block]:
"""Concatenates blocks and optionally sorts by key columns."""
assert (
len(partition_shards_map) == 1
), f"Single input-sequence is expected (got {len(partition_shards_map)})"
blocks = partition_shards_map[0]
if not blocks:
return
result = _combine(blocks)
if self._should_sort and result.num_rows > 0:
result = result.sort_by([(k, "ascending") for k in self._key_columns])
yield result
def _combine(partition_shards: List[Block]) -> Block:
builder = ArrowBlockBuilder()
for block in partition_shards:
builder.add_block(block)
return builder.build()
@ray.remote
def _shuffle_block(
block: Block,
input_index: int,
key_columns: List[str],
pool: "AggregatorPool",
block_transformer: Optional[BlockTransformer] = None,
send_empty_blocks: bool = False,
override_partition_id: Optional[int] = None,
) -> Tuple[BlockMetadata, Dict[int, "_PartitionStats"]]:
"""Shuffles provided block following the algorithm:
1. Hash-partitions provided block into N partitions (where N is determined by
the number of receiving aggregators)
2. Individual (non-empty) partitions are subsequently submitted to respective
aggregators
Args:
block: Incoming block (in the form of Pyarrow's `Table`) to be shuffled
input_index: Id of the input sequence block belongs to
key_columns: Columns to be used by hash-partitioning algorithm
pool: Hash-shuffling operator's pool of aggregators that are due to receive
corresponding partitions (of the block)
send_empty_blocks: If set to true, empty blocks will NOT be filtered and
still be fanned out to individual aggregators to distribute schemas
(only known once we receive incoming block)
override_partition_id: Target (overridden) partition id that input block will be
assigned to
block_transformer: Block transformer that will be applied to every block prior
to shuffling
Returns:
A tuple of
- Metadata for the block being shuffled
- Map of partition ids to partition shard stats produced from the
shuffled block
"""
stats = BlockExecStats.builder()
assert (len(key_columns) > 0) ^ (override_partition_id is not None), (
f"Either list of key columns to hash-partition by (got {key_columns} or "
f"target partition id override (got {override_partition_id}) must be provided!"
)
# Apply block transformer prior to shuffling (if any)
if block_transformer is not None:
block = block_transformer(block)
# Make sure we're handling Arrow blocks
block: Block = TableBlockAccessor.try_convert_block_type(
block, block_type=BlockType.ARROW
)
if block.num_rows == 0:
empty = BlockAccessor.for_block(block).get_metadata(
block_exec_stats=stats.build(block_ser_time_s=0),
)
return (empty, {})
num_partitions = pool.num_partitions
assert isinstance(block, pa.Table), f"Expected Pyarrow's `Table`, got {type(block)}"
# In case when no target key columns have been provided shuffling is
# reduced to just forwarding whole block to the target aggregator
if key_columns:
block_partitions = hash_partition(
block, hash_cols=key_columns, num_partitions=num_partitions
)
else:
assert (
0 <= override_partition_id < num_partitions
), f"Expected override partition id < {num_partitions} (got {override_partition_id})"
block_partitions = {override_partition_id: block}
partition_shards_stats = {}
awaitable_to_partition_map = {}
for partition_id in range(num_partitions):
partition_shard = block_partitions.get(partition_id)
if partition_shard is None:
# NOTE: Hash-based shuffle operator uses empty blocks to disseminate
# schema to aggregators that otherwise might not receive it,
# in cases when corresponding target partition is resulting into
# empty one during hash-partitioning
if not send_empty_blocks:
continue
partition_shard = _create_empty_table(block.schema)
# Capture partition shard metadata
#
# NOTE: We're skipping over empty shards as these are used for schema
# broadcasting and aren't relevant to keep track of
if partition_shard.num_rows > 0:
partition_shards_stats[partition_id] = _PartitionStats.for_table(
partition_shard
)
aggregator = pool.get_aggregator_for_partition(partition_id)
# Put target partition shard into the Object Store to make sure partition shards
# are managed t/h Object Store irrespective of their size
partition_ref = ray.put(partition_shard)
# NOTE: Shuffling task is only considered completed upon target aggregator
# accepting its respective partition shard
awaitable = aggregator.submit.remote(input_index, partition_id, partition_ref)
awaitable_to_partition_map[awaitable] = partition_id
pending_submissions = list(awaitable_to_partition_map.keys())
i = 0
# Before completing shuffling task await for all the blocks
# to get accepted by corresponding aggregators
#
# NOTE: This synchronization is crucial to make sure aggregations are not
# getting finalized before they receive corresponding partitions
while len(pending_submissions) > 0:
ready, unready = ray.wait(
pending_submissions, num_returns=len(pending_submissions), timeout=1
)
pending_submissions = unready
i += 1
original_block_metadata = BlockAccessor.for_block(block).get_metadata(
block_exec_stats=stats.build(block_ser_time_s=0)
)
if logger.isEnabledFor(logging.DEBUG):
num_rows_series, byte_sizes_series = zip(
*[(s.num_rows, s.byte_size) for s in partition_shards_stats.values()]
)
quantiles = [0, 50, 100]
num_rows_quantiles = np.percentile(num_rows_series, quantiles)
byte_sizes_quantiles = np.percentile(byte_sizes_series, quantiles)
logger.debug(
f"Shuffled block (rows={original_block_metadata.num_rows}, "
f"bytes={original_block_metadata.size_bytes/MiB:.1f}MB) "
f"into {len(partition_shards_stats)} partitions ("
f"quantiles={'/'.join(map(str, quantiles))}, "
f"rows={'/'.join(map(str, num_rows_quantiles))}, "
f"bytes={'/'.join(map(str, byte_sizes_quantiles))})"
)
# Return metadata for the original, shuffled block
return original_block_metadata, partition_shards_stats
@dataclass
class PartitionBucket:
"""Per-partition state for thread-safe block accumulation.
Each partition has its own lock and queue, eliminating cross-partition
contention during the accept (submit) path.
The queue is used for lock-free block accumulation (Queue.put is thread-safe).
The lock is only acquired during compaction to ensure at most one compaction
runs at a time per partition.
"""
lock: threading.Lock
queue: queue.Queue
compaction_threshold: Optional[int]
def drain_queue(self) -> List[Block]:
blocks = []
try:
while True:
blocks.append(self.queue.get_nowait())
except queue.Empty:
pass
return blocks
@staticmethod
def create(compaction_threshold: Optional[int]) -> "PartitionBucket":
return PartitionBucket(
lock=threading.Lock(),
queue=queue.Queue(),
compaction_threshold=compaction_threshold,
)
@dataclass
class _PartitionStats:
num_rows: int
byte_size: int
@staticmethod
def from_block_metadata(block_metadata: BlockMetadata) -> "_PartitionStats":
return _PartitionStats(
num_rows=block_metadata.num_rows,
byte_size=block_metadata.size_bytes,
)
@staticmethod
def for_table(table: pa.Table) -> "_PartitionStats":
return _PartitionStats(
num_rows=table.num_rows,
byte_size=table.nbytes,
)
@staticmethod
def combine(one: "_PartitionStats", other: "_PartitionStats") -> "_PartitionStats":
return _PartitionStats(
num_rows=one.num_rows + other.num_rows,
byte_size=one.byte_size + other.byte_size,
)
def _derive_max_shuffle_aggregators(
total_cluster_resources: ExecutionResources,
data_context: DataContext,
) -> int:
# Motivation for derivation of max # of shuffle aggregators is based on the
# following observations:
#
# - Shuffle operation is necessarily a terminal operation: it terminates current
# shuffle stage (set of operators that can execute concurrently)
# - Shuffle operation has very low computation footprint until all preceding
# operation completes (ie until shuffle finalization)
# - When shuffle is finalized only shuffle operator is executing (ie it has
# all of the cluster resources available at its disposal)
#
# As such we establish that the max number of shuffle
# aggregators (workers):
#
# - Should not exceed total # of CPUs (to fully utilize cluster resources
# while avoiding thrashing these due to over-allocation)
# - Should be capped at fixed size (128 by default)
return min(
math.ceil(total_cluster_resources.cpu),
data_context.max_hash_shuffle_aggregators
or DEFAULT_MAX_HASH_SHUFFLE_AGGREGATORS,
)
class HashShufflingOperatorBase(PhysicalOperator, SubProgressBarMixin):
"""Physical operator base-class for any operators requiring hash-based
shuffling.
Hash-based shuffling follows standard map-reduce architecture:
1. Every incoming block is mapped (using provided `input_block_transformer`,
if any)
2. After mapping, every block is hash-partitioned into `num_partitions`
partitions and distributed (shuffled) to corresponding aggregators.
3. Aggregators perform "reducing" stage, aggregating individual partitions
(using configured `StatefulAggregation`), and ultimately yield resutling
blocks.
NOTE: This operator can perform hash-based shuffling for multiple sequences
simultaneously (as required by Join operator for ex).
"""
_DEFAULT_SHUFFLE_BLOCK_NUM_CPUS = 1.0
_DEFAULT_AGGREGATORS_MIN_CPUS = 0.01
def __init__(
self,
name_factory: Callable[[int], str],
input_ops: List[PhysicalOperator],
data_context: DataContext,
*,
key_columns: List[Tuple[str]],
partition_aggregation_factory: ShuffleAggregationFactory,
num_input_seqs: int,
num_partitions: Optional[int] = None,
partition_size_hint: Optional[int] = None,
input_block_transformer: Optional[BlockTransformer] = None,
aggregator_ray_remote_args_override: Optional[Dict[str, Any]] = None,
shuffle_progress_bar_name: Optional[str] = None,
finalize_progress_bar_name: Optional[str] = None,
disallow_block_splitting: bool = False,
):
input_logical_ops = [
input_physical_op._logical_operators[0] for input_physical_op in input_ops
]
estimated_input_blocks = [
input_op.estimated_num_outputs() for input_op in input_logical_ops
]
# Derive target num partitions as either of
# - Requested target number of partitions
# - Max estimated target number of blocks generated by the input op(s)
# - Default configured hash-shuffle parallelism (200)
target_num_partitions: int = (
num_partitions
or (max(estimated_input_blocks) if all(estimated_input_blocks) else None)
or data_context.default_hash_shuffle_parallelism
)
super().__init__(
name=name_factory(target_num_partitions),
input_dependencies=input_ops,
data_context=data_context,
)
assert partition_size_hint is None or partition_size_hint > 0
if shuffle_progress_bar_name is None:
shuffle_progress_bar_name = "Shuffle"
if finalize_progress_bar_name is None:
finalize_progress_bar_name = "Reduce"
self._shuffle_name = shuffle_progress_bar_name
self._reduce_name = finalize_progress_bar_name
assert len(key_columns) == len(input_ops), (
"Each input operation has to specify matching tuple of columns used as "
"its hashing keys"
)
self._key_column_names: List[Tuple[str]] = key_columns
self._num_partitions: int = target_num_partitions
# Determine max number of shuffle aggregators (defaults to
# `DataContext.min_parallelism`)
total_available_cluster_resources = _get_total_cluster_resources()
max_shuffle_aggregators = _derive_max_shuffle_aggregators(
total_available_cluster_resources, data_context
)
# Cap number of aggregators to not exceed max configured
num_aggregators = min(target_num_partitions, max_shuffle_aggregators)
# Target dataset's size estimated as either of
# 1. ``partition_size_hint`` multiplied by target number of partitions
# 2. Estimation of input ops' outputs bytes
if partition_size_hint is not None:
# TODO replace with dataset-byte-size hint
estimated_dataset_bytes = partition_size_hint * target_num_partitions
else:
estimated_dataset_bytes = _try_estimate_output_bytes(
input_logical_ops,
)
ray_remote_args = self._get_default_aggregator_ray_remote_args(
num_partitions=target_num_partitions,
num_aggregators=num_aggregators,
total_available_cluster_resources=total_available_cluster_resources,
estimated_dataset_bytes=estimated_dataset_bytes,
)
if aggregator_ray_remote_args_override is not None:
# Set default values missing for configs missing in the override
ray_remote_args.update(aggregator_ray_remote_args_override)
self._aggregator_pool: AggregatorPool = AggregatorPool(
num_input_seqs=num_input_seqs,
num_partitions=target_num_partitions,
num_aggregators=num_aggregators,
aggregation_factory=partition_aggregation_factory,
aggregator_ray_remote_args=ray_remote_args,
target_max_block_size=(
None if disallow_block_splitting else data_context.target_max_block_size
),
min_max_shards_compaction_thresholds=(
self._get_min_max_partition_shards_compaction_thresholds()
),
)
# We track the running usage total because iterating
# and summing over all shuffling tasks can be expensive
# if the # of shuffling tasks is large
self._shuffling_resource_usage = ExecutionResources.zero()
self._input_block_transformer = input_block_transformer
self._next_shuffle_tasks_idx: int = 0
# Shuffling tasks are mapped like following
# - Input sequence id -> Task id -> Task
#
# NOTE: Input sequences correspond to the outputs of the input operators
self._shuffling_tasks: DefaultDict[
int, Dict[int, MetadataOpTask]
] = defaultdict(dict)
self._next_aggregate_task_idx: int = 0
# Aggregating tasks are mapped like following
# - Task id -> Task
#
# NOTE: Aggregating tasks are invariant of the # of input operators, as
# aggregation is assumed to always produce a single sequence
self._finalizing_tasks: Dict[int, DataOpTask] = dict()
# This is a workaround to be able to distribute schemas to individual
# aggregators (keeps track which input sequences have already broadcasted
# their schemas)
self._has_schemas_broadcasted: DefaultDict[int, bool] = defaultdict(bool)
# Set of partitions still pending finalization
self._pending_finalization_partition_ids: Set[int] = set(
range(target_num_partitions)
)
self._output_queue: Deque[RefBundle] = deque()
self._output_blocks_stats: List[BlockStats] = list()
self._shuffled_blocks_stats: List[BlockStats] = list()
# Incremental individual partition metadata accumulated separately for
# individual input sequences during shuffling. Maps
#
# input_sequence_id -> partition_id -> _PartitionStats
#
self._partitions_stats: DefaultDict[
int, Dict[int, _PartitionStats]
] = defaultdict(dict)
self._health_monitoring_started: bool = False
self._health_monitoring_start_time: float = 0.0
self._pending_aggregators_refs: Optional[List[ObjectRef[ActorHandle]]] = None
# sub-progress bar initializations
self._shuffle_bar = None
self._shuffle_metrics = OpRuntimeMetrics(self)
self._reduce_bar = None
self._reduce_metrics = OpRuntimeMetrics(self)
def start(self, options: ExecutionOptions) -> None:
super().start(options)
self._aggregator_pool.start()
@property
def shuffle_name(self) -> str:
return self._shuffle_name
@property
def reduce_name(self) -> str:
return self._reduce_name
def _add_input_inner(self, input_bundle: RefBundle, input_index: int) -> None:
# TODO move to base class
self._shuffle_metrics.on_input_received(input_bundle)
self._do_add_input_inner(input_bundle, input_index)
def _do_add_input_inner(self, input_bundle: RefBundle, input_index: int):
input_blocks_refs: List[ObjectRef[Block]] = input_bundle.block_refs
input_blocks_metadata: List[BlockMetadata] = input_bundle.metadata
for block_ref, block_metadata in zip(input_blocks_refs, input_blocks_metadata):
# If operator hasn't propagated schemas (for this sequence) to its
# aggregator pool, it will need to do that upon receiving of the
# first block
should_broadcast_schemas = not self._has_schemas_broadcasted[input_index]
input_key_column_names = self._key_column_names[input_index]
# Compose shuffling task resource bundle
shuffle_task_resource_bundle = {
"num_cpus": self._DEFAULT_SHUFFLE_BLOCK_NUM_CPUS,
"memory": self._estimate_shuffling_memory_req(
block_metadata,
target_max_block_size=(
self._data_context.target_max_block_size
or DEFAULT_TARGET_MAX_BLOCK_SIZE
),
),
}
cur_shuffle_task_idx = self._next_shuffle_tasks_idx
self._next_shuffle_tasks_idx += 1
# NOTE: In cases when NO key-columns are provided for hash-partitioning
# to be performed on (legitimate scenario for global aggregations),
# shuffling is essentially reduced to round-robin'ing of the blocks
# among the aggregators
override_partition_id = (
cur_shuffle_task_idx % self._num_partitions
if not input_key_column_names
else None
)
# Fan out provided input blocks to "shuffle" it
# - Block is first hash-partitioned into N partitions
# - Individual partitions then are submitted to the corresponding
# aggregators
#
# TODO HSA needs to be idempotent for _shuffle_block to be retriable
# https://anyscale1.atlassian.net/browse/DATA-1763
input_block_partition_shards_metadata_tuple_ref: ObjectRef[
Tuple[BlockMetadata, Dict[int, _PartitionStats]]
] = _shuffle_block.options(
**shuffle_task_resource_bundle,
num_returns=1,
).remote(
block_ref,
input_index,
input_key_column_names,
self._aggregator_pool,
block_transformer=self._input_block_transformer,
send_empty_blocks=should_broadcast_schemas,
override_partition_id=override_partition_id,
)
if should_broadcast_schemas:
self._has_schemas_broadcasted[input_index] = True
def _on_partitioning_done(cur_shuffle_task_idx: int):
task = self._shuffling_tasks[input_index].pop(cur_shuffle_task_idx)
self._shuffling_resource_usage = (
self._shuffling_resource_usage.subtract(
task.get_requested_resource_bundle()
)
)
# Fetch input block and resulting partition shards block metadata and
# handle obtained metadata
#
# NOTE: We set timeout equal to 1m here as an upper-bound to make
# sure that `ray.get(...)` invocation couldn't stall the pipeline
# indefinitely
input_block_metadata, partition_shards_stats = ray.get(
task.get_waitable(), timeout=60
)
self._handle_shuffled_block_metadata(
input_index, input_block_metadata, partition_shards_stats
)
# Update Shuffle metrics on task output generated
blocks = [(task.get_waitable(), input_block_metadata)]
# NOTE: schema doesn't matter because we are creating a ref bundle
# for metrics recording purposes
out_bundle = RefBundle(blocks, schema=None, owns_blocks=False)
self._shuffle_metrics.on_output_taken(input_bundle)
self._shuffle_metrics.on_task_output_generated(
cur_shuffle_task_idx, out_bundle
)
# TODO wire in stats & exceptions
self._shuffle_metrics.on_task_finished(
cur_shuffle_task_idx,
None,
task_exec_stats=None,
task_exec_driver_stats=None,
)
# Update Shuffle progress bar
self._shuffle_bar.update(increment=input_block_metadata.num_rows or 0)
# TODO update metrics
task = self._shuffling_tasks[input_index][
cur_shuffle_task_idx
] = MetadataOpTask(
task_index=cur_shuffle_task_idx,
object_ref=input_block_partition_shards_metadata_tuple_ref,
task_done_callback=functools.partial(
_on_partitioning_done, cur_shuffle_task_idx
),
task_resource_bundle=ExecutionResources.from_resource_dict(
shuffle_task_resource_bundle
),
)
if task.get_requested_resource_bundle() is not None:
self._shuffling_resource_usage = self._shuffling_resource_usage.add(
task.get_requested_resource_bundle()
)
# Update Shuffle Metrics on task submission
self._shuffle_metrics.on_task_submitted(
cur_shuffle_task_idx,
RefBundle(
[(block_ref, block_metadata)], schema=None, owns_blocks=False
),
task_id=task.get_task_id(),
)
# Update Shuffle progress bar
_, _, num_rows = estimate_total_num_of_blocks(
cur_shuffle_task_idx + 1,
self.upstream_op_num_outputs(),
self._shuffle_metrics,
total_num_tasks=None,
)
self._shuffle_bar.update(total=num_rows)
def has_next(self) -> bool:
self._try_finalize()
return len(self._output_queue) > 0
def _get_next_inner(self) -> RefBundle:
bundle: RefBundle = self._output_queue.popleft()
# TODO move to base class
self._reduce_metrics.on_output_dequeued(bundle)
self._reduce_metrics.on_output_taken(bundle)
self._output_blocks_stats.extend(to_stats(bundle.metadata))
return bundle
def get_active_tasks(self) -> List[OpTask]:
# Collect shuffling tasks for every input sequence
shuffling_tasks = self._get_active_shuffling_tasks()
# Collect aggregating tasks for every input sequence
finalizing_tasks: List[DataOpTask] = list(self._finalizing_tasks.values())
return shuffling_tasks + finalizing_tasks
def _get_active_shuffling_tasks(self) -> List[MetadataOpTask]:
return list(
itertools.chain.from_iterable(
[
input_shuffling_task_map.values()
for input_shuffling_task_map in self._shuffling_tasks.values()
]
)
)
def _is_shuffling_done(self):
return self._inputs_complete and all(
[
len(self._shuffling_tasks[input_seq_idx]) == 0
for input_seq_idx in range(len(self._input_dependencies))
]
)
def _try_finalize(self):
# Skip if finalization of all partitions had been already scheduled
if self._is_finalized():
return
# Finalization can only proceed once
# - All input sequences have been ingested
# - All outstanding shuffling tasks have completed
if not self._is_shuffling_done():
return
def _on_bundle_ready(partition_id: int, bundle: RefBundle):
# Add finalized block to the output queue
self._output_queue.append(bundle)
# Update Finalize Metrics on task output generated
self._reduce_metrics.on_output_queued(bundle)
self._reduce_metrics.on_task_output_generated(
task_index=partition_id, output=bundle
)
_, num_outputs, num_rows = estimate_total_num_of_blocks(
partition_id + 1,
self.upstream_op_num_outputs(),
self._reduce_metrics,
total_num_tasks=self._num_partitions,
)
self._estimated_num_output_bundles = num_outputs
self._estimated_output_num_rows = num_rows
# Update Finalize progress bar
self._reduce_bar.update(
increment=bundle.num_rows() or 0, total=self.num_output_rows_total()
)
def _on_aggregation_done(
partition_id: int,
exc: Optional[Exception],
task_exec_stats: Optional[TaskExecWorkerStats],
task_exec_driver_stats: Optional[TaskExecDriverStats],
):
# NOTE: `TaskExecStats` could be null in case there's no blocks
# emitted (current limitation, since it's emitted along with
# `BlockMetadata`)
assert exc or (
task_exec_driver_stats
), "Driver's task execution stats must be provided on task's successful completion"
if partition_id in self._finalizing_tasks:
self._finalizing_tasks.pop(partition_id)
# Update Finalize Metrics on task completion
self._reduce_metrics.on_task_finished(
task_index=partition_id,
exception=exc,
task_exec_stats=task_exec_stats,
task_exec_driver_stats=task_exec_driver_stats,
)
if exc:
logger.error(
f"Aggregation of the {partition_id} partition "
f"failed with: {exc}",
exc_info=exc,
)
# NOTE: Unless explicitly set finalization batch size defaults to the #
# of shuffle aggregators
max_batch_size = (
self.data_context.max_hash_shuffle_finalization_batch_size
or self._aggregator_pool.num_aggregators
)
num_running_finalizing_tasks = len(self._finalizing_tasks)
num_remaining_partitions = len(self._pending_finalization_partition_ids)
# Finalization is executed in batches of no more than
# `DataContext.max_hash_shuffle_finalization_batch_size` tasks at a time.
#
# Batch size is used as a lever to limit memory pressure on the nodes
# where aggregators are run by limiting # of finalization tasks running
# concurrently
next_batch_size = min(
num_remaining_partitions,
max_batch_size - num_running_finalizing_tasks,
)
assert next_batch_size >= 0, (
f"Finalization batch size must be greater than 0 "
f"(got {next_batch_size}; "
f"remaining={num_remaining_partitions}, "
f"finalizing={num_running_finalizing_tasks}, "
f"max_batch_size={max_batch_size})"
)
if next_batch_size == 0:
return
# We're sampling randomly next set of partitions to be finalized
# to distribute finalization window uniformly across the nodes of the cluster
# and avoid effect of "sliding lense" effect where we finalize the batch of
# N *adjacent* partitions that may be co-located on the same node:
#
# - Adjacent partitions i and i+1 are handled by adjacent
# aggregators (since membership is determined as i % num_aggregators)
#
# - Adjacent aggregators have high likelihood of running on the
# same node (when num aggregators > num nodes)
#
# NOTE: This doesn't affect determinism, since this only impacts order
# of finalization (hence not required to be seeded)
target_partition_ids = random.sample(
list(self._pending_finalization_partition_ids), next_batch_size
)
logger.debug(
f"Scheduling partitions {target_partition_ids} for finalization: "
f"{[self._get_partition_stats(pid) for pid in target_partition_ids]}"
)
for partition_id in target_partition_ids:
aggregator = self._aggregator_pool.get_aggregator_for_partition(
partition_id
)
# Estimate (heap) memory requirement to execute finalization task
# Compose shuffling task resource bundle
finalize_task_resource_bundle = {
# TODO currently not possible to specify the resources for an actor
# "memory": self._estimate_finalization_memory_req(partition_id),
}
# Request finalization of the partition
block_gen = aggregator.finalize.options(
**finalize_task_resource_bundle,
).remote(partition_id)
data_task = DataOpTask(
task_index=partition_id,
streaming_gen=block_gen,
output_ready_callback=functools.partial(_on_bundle_ready, partition_id),
task_done_callback=functools.partial(
_on_aggregation_done, partition_id
),
task_resource_bundle=(
ExecutionResources.from_resource_dict(finalize_task_resource_bundle)
),
)
self._finalizing_tasks[partition_id] = data_task
# Pop partition id from remaining set
self._pending_finalization_partition_ids.remove(partition_id)
# Update Finalize Metrics on task submission
# NOTE: This is empty because the input is directly forwarded from the
# output of the shuffling stage, which we don't return.
empty_bundle = RefBundle([], schema=None, owns_blocks=False)
self._reduce_metrics.on_task_submitted(
partition_id, empty_bundle, task_id=data_task.get_task_id()
)
def _do_shutdown(self, force: bool = False) -> None:
self._aggregator_pool.shutdown(force=True)
# NOTE: It's critical for Actor Pool to release actors before calling into
# the base method that will attempt to cancel and join pending.
super()._do_shutdown(force)
# Release any pending refs
self._shuffling_tasks.clear()
self._finalizing_tasks.clear()
def _extra_metrics(self):
shuffle_name = f"{self._name}_shuffle"
finalize_name = f"{self._name}_finalize"
self._shuffle_metrics.as_dict()
return {
shuffle_name: self._shuffle_metrics.as_dict(),
finalize_name: self._reduce_metrics.as_dict(),
}
def get_stats(self):
shuffle_name = f"{self._name}_shuffle"
reduce_name = f"{self._name}_finalize"
return {
shuffle_name: self._shuffled_blocks_stats,
reduce_name: self._output_blocks_stats,
}
def current_logical_usage(self) -> ExecutionResources:
# Current processors resource usage is comprised by
# - Base Aggregator actors resource utilization (captured by
# `base_resource_usage` method)
# - Active shuffling tasks
# - Active finalizing tasks (actor tasks)
base_usage = self.base_resource_usage
running_usage = self._shuffling_resource_usage
return base_usage.add(running_usage)
@property
def base_resource_usage(self) -> ExecutionResources:
return ExecutionResources(
cpu=(
self._aggregator_pool.num_aggregators
* self._aggregator_pool._aggregator_ray_remote_args["num_cpus"]
),
gpu=0,
memory=(
self._aggregator_pool.num_aggregators
* self._aggregator_pool._aggregator_ray_remote_args.get("memory", 0)
),
object_store_memory=0,
)
def incremental_resource_usage(self) -> ExecutionResources:
return ExecutionResources(
cpu=self._DEFAULT_SHUFFLE_BLOCK_NUM_CPUS,
# cpu=self._shuffle_block_ray_remote_args.get("num_cpus", 0),
# TODO estimate (twice avg block size)
object_store_memory=0,
gpu=0,
)
def min_scheduling_resources(self) -> ExecutionResources:
return self.incremental_resource_usage()
def has_completed(self) -> bool:
# TODO separate marking as completed from the check
return self._is_finalized() and super().has_completed()
def _is_finalized(self):
return len(self._pending_finalization_partition_ids) == 0
def _handle_shuffled_block_metadata(
self,
input_seq_id: int,
input_block_metadata: BlockMetadata,
partition_shards_stats: Dict[int, _PartitionStats],
):
# Keep track of the progress of shuffling incoming blocks
self._shuffled_blocks_stats.append(input_block_metadata.to_stats())
# Update incremental input sequence partitions metadata
for partition_id, new_partition_shard_stats in partition_shards_stats.items():
current_partition_stats: Optional[_PartitionStats] = self._partitions_stats[
input_seq_id
].get(partition_id, None)
self._partitions_stats[input_seq_id][partition_id] = (
_PartitionStats.combine(
current_partition_stats,
new_partition_shard_stats,
)
if current_partition_stats
else new_partition_shard_stats
)
def _get_partition_stats(
self, partition_id: int
) -> Dict[int, Optional[_PartitionStats]]:
return {
# NOTE: Some partitions might be empty (and hence missing) in some sequences
input_seq_id: partition_stats_map.get(partition_id)
for input_seq_id, partition_stats_map in self._partitions_stats.items()
}
@classmethod
def _estimate_shuffling_memory_req(
cls,
block_metadata: BlockMetadata,
target_max_block_size: int,
):
estimated_block_bytes = (
block_metadata.size_bytes
if block_metadata.size_bytes is not None
else target_max_block_size
)
return estimated_block_bytes * 2
def _get_default_aggregator_ray_remote_args(
self,
*,
num_partitions: int,
num_aggregators: int,
total_available_cluster_resources: ExecutionResources,
estimated_dataset_bytes: Optional[int],
):
assert num_partitions >= num_aggregators
if estimated_dataset_bytes is not None:
estimated_aggregator_memory_required = self._estimate_aggregator_memory_allocation(
num_aggregators=num_aggregators,
num_partitions=num_partitions,
# NOTE: If no partition size hint is provided we simply assume target
# max block size specified as the best partition size estimate
estimated_dataset_bytes=estimated_dataset_bytes,
)
else:
# NOTE: In cases when we're unable to estimate dataset size,
# we simply fallback to request the minimum of:
# - conservative 50% of total available memory for a join operation.
# - ``DEFAULT_HASH_SHUFFLE_AGGREGATOR_MEMORY_ALLOCATION`` worth of
# memory for every Aggregator.
max_memory_per_aggregator = (
total_available_cluster_resources.memory / num_aggregators
)
modest_memory_per_aggregator = max_memory_per_aggregator / 2
estimated_aggregator_memory_required = min(
modest_memory_per_aggregator,
DEFAULT_HASH_SHUFFLE_AGGREGATOR_MEMORY_ALLOCATION,
)
remote_args = {
"num_cpus": self._get_aggregator_num_cpus(
total_available_cluster_resources,
estimated_aggregator_memory_required,
num_aggregators=num_aggregators,
),
"memory": estimated_aggregator_memory_required,
# NOTE: By default aggregating actors should be spread across available
# nodes to prevent any single node being overloaded with a "thundering
# herd"
"scheduling_strategy": "SPREAD",
# Allow actor tasks to execute out of order by default to prevent head-of-line
# blocking scenario.
"allow_out_of_order_execution": True,
}
return remote_args
@abc.abstractmethod
def _get_operator_num_cpus_override(self) -> int:
pass
def _get_aggregator_num_cpus(
self,
total_available_cluster_resources: ExecutionResources,
estimated_aggregator_memory_required: int,
num_aggregators: int,
) -> float:
"""Estimates number of CPU resources to be provisioned for individual
Aggregators.
Due to semantic of the Aggregator's role (outlined below), their CPU
allocation is mostly playing a role of complimenting their memory allocation
such that it serves as a protection mechanism from over-allocation of the
tasks that do not specify their respective memory resources.
"""
# First, check whether there is an override
if self._get_operator_num_cpus_override() is not None:
return self._get_operator_num_cpus_override()
# Note that
#
# - Shuffle aggregators have modest computational footprint until
# finalization stage
# - Finalization stage actually always executes standalone, since it only
# starts when all preceding operations complete
#
# Though we don't need to purposefully allocate any meaningful amount of
# CPU resources to the shuffle aggregators, we're still allocating nominal
# CPU resources to it such that to compliment its required memory allocation
# and therefore protect from potential OOMs in case other tasks getting
# scheduled onto the same node, but not specifying their respective memory
# requirements.
#
# CPU allocation is determined like following
#
# CPUs = Total memory required / 4 GiB (standard ratio in the conventional clouds)
#
# But no more than
# - 25% of total available CPUs but
# - No more than 4 CPUs per aggregator
#
cap = min(4.0, total_available_cluster_resources.cpu * 0.25 / num_aggregators)
target_num_cpus = min(
cap,
estimated_aggregator_memory_required / (4 * GiB),
)
# Round resource to 2d decimal point (for readability)
rounded_target_num_cpus = round(target_num_cpus, 2)
# Lower bound to avoid scheduling on nodes with 0 CPUs (i.e. the head node).
if rounded_target_num_cpus < self._DEFAULT_AGGREGATORS_MIN_CPUS:
logger.debug(
f"Total # of cpus in cluster is {total_available_cluster_resources.cpu}, "
f"but the requested # of cpus is {target_num_cpus}. "
f"To prevent rounding precision, we are setting {self._DEFAULT_AGGREGATORS_MIN_CPUS} cpus per aggregator. "
f"This can happen for a very large # of aggregators {num_aggregators} "
f"or a small dataset size {estimated_aggregator_memory_required}B"
)
return self._DEFAULT_AGGREGATORS_MIN_CPUS
return rounded_target_num_cpus
@classmethod
def _estimate_aggregator_memory_allocation(
cls,
*,
num_aggregators: int,
num_partitions: int,
estimated_dataset_bytes: int,
) -> int:
raise NotImplementedError()
@classmethod
def _gen_op_name(cls, num_partitions: int) -> str:
raise NotImplementedError()
@classmethod
def _get_min_max_partition_shards_compaction_thresholds(
cls,
) -> Optional[Tuple[int, int]]:
return None
def get_sub_progress_bar_names(self) -> Optional[List[str]]:
return [self.shuffle_name, self.reduce_name]
def set_sub_progress_bar(self, name: str, pg: "BaseProgressBar"):
if self.shuffle_name == name:
self._shuffle_bar = pg
elif self.reduce_name == name:
self._reduce_bar = pg
class HashShuffleOperator(HashShufflingOperatorBase):
# Add 30% buffer to account for data skew
SHUFFLE_AGGREGATOR_MEMORY_ESTIMATE_SKEW_FACTOR = 1.3
def __init__(
self,
input_op: PhysicalOperator,
data_context: DataContext,
*,
key_columns: Tuple[str],
num_partitions: Optional[int] = None,
should_sort: bool = False,
aggregator_ray_remote_args_override: Optional[Dict[str, Any]] = None,
):
# Use new stateless ConcatAggregation factory
def _create_concat_aggregation() -> ConcatAggregation:
return ConcatAggregation(
should_sort=should_sort,
key_columns=key_columns if key_columns else None,
)
super().__init__(
name_factory=(
lambda num_partitions: f"Shuffle(key_columns={key_columns}, num_partitions={num_partitions})"
),
input_ops=[input_op],
data_context=data_context,
key_columns=[key_columns],
num_input_seqs=1,
num_partitions=num_partitions,
aggregator_ray_remote_args_override=aggregator_ray_remote_args_override,
partition_aggregation_factory=_create_concat_aggregation,
shuffle_progress_bar_name="Shuffle",
# NOTE: In cases like ``groupby`` blocks can't be split as this might violate an invariant that all rows
# with the same key are in the same group (block)
disallow_block_splitting=True,
)
def _get_operator_num_cpus_override(self) -> float:
return self.data_context.hash_shuffle_operator_actor_num_cpus_override
@classmethod
def _estimate_aggregator_memory_allocation(
cls,
*,
num_aggregators: int,
num_partitions: int,
estimated_dataset_bytes: int,
) -> int:
max_partitions_for_aggregator = math.ceil(
num_partitions / num_aggregators
) # Max number of partitions that a single aggregator might handle
partition_byte_size_estimate = math.ceil(
estimated_dataset_bytes / num_partitions
) # Estimated byte size of a single partition
# Inputs (object store) - memory for receiving shuffled partitions
aggregator_shuffle_object_store_memory_required = math.ceil(
partition_byte_size_estimate * max_partitions_for_aggregator
)
# Output (object store) - memory for output partitions
output_object_store_memory_required = math.ceil(
partition_byte_size_estimate * max_partitions_for_aggregator
)
aggregator_total_memory_required: int = (
# Inputs (object store)
aggregator_shuffle_object_store_memory_required
+
# Output (object store)
output_object_store_memory_required
)
total_with_skew = math.ceil(
aggregator_total_memory_required
* cls.SHUFFLE_AGGREGATOR_MEMORY_ESTIMATE_SKEW_FACTOR
)
logger.info(
f"Estimated memory requirement for shuffling aggregator "
f"(partitions={num_partitions}, "
f"aggregators={num_aggregators}, "
f"dataset (estimate)={estimated_dataset_bytes / GiB:.1f}GiB): "
f"shuffle={aggregator_shuffle_object_store_memory_required / MiB:.1f}MiB, "
f"output={output_object_store_memory_required / MiB:.1f}MiB, "
f"total_base={aggregator_total_memory_required / MiB:.1f}MiB, "
f"shuffle_aggregator_memory_estimate_skew_factor={cls.SHUFFLE_AGGREGATOR_MEMORY_ESTIMATE_SKEW_FACTOR}, "
f"total_with_skew={total_with_skew / MiB:.1f}MiB"
)
return total_with_skew
@dataclass
class AggregatorHealthInfo:
"""Health information about aggregators for issue detection."""
started_at: float
ready_aggregators: int
total_aggregators: int
has_unready_aggregators: bool
wait_time: float
required_resources: ExecutionResources
class AggregatorPool:
def __init__(
self,
num_input_seqs: int,
num_partitions: int,
num_aggregators: int,
aggregation_factory: ShuffleAggregationFactory,
aggregator_ray_remote_args: Dict[str, Any],
target_max_block_size: Optional[int],
min_max_shards_compaction_thresholds: Optional[Tuple[int, int]] = None,
):
assert (
num_partitions >= 1
), f"Number of partitions has to be >= 1 (got {num_partitions})"
self._target_max_block_size = target_max_block_size
self._num_input_seqs = num_input_seqs
self._num_partitions = num_partitions
self._num_aggregators: int = num_aggregators
self._aggregator_partition_map: Dict[
int, List[int]
] = self._allocate_partitions(
num_partitions=num_partitions,
)
self._aggregators: List[ray.actor.ActorHandle] = []
self._aggregation_factory_ref: ObjectRef[ShuffleAggregationFactory] = ray.put(
aggregation_factory
)
self._aggregator_ray_remote_args: Dict[
str, Any
] = self._derive_final_shuffle_aggregator_ray_remote_args(
aggregator_ray_remote_args,
self._aggregator_partition_map,
)
self._min_max_shards_compaction_thresholds = (
min_max_shards_compaction_thresholds
)
def start(self):
# Check cluster resources before starting aggregators
self._check_cluster_resources()
logger.debug(
f"Starting {self._num_aggregators} shuffle aggregators with remote "
f"args: {self._aggregator_ray_remote_args}"
)
for aggregator_id in range(self._num_aggregators):
target_partition_ids = self._aggregator_partition_map[aggregator_id]
assert len(target_partition_ids) > 0
aggregator = HashShuffleAggregator.options(
**self._aggregator_ray_remote_args
).remote(
aggregator_id,
self._num_input_seqs,
target_partition_ids,
self._aggregation_factory_ref,
self._target_max_block_size,
self._min_max_shards_compaction_thresholds,
)
self._aggregators.append(aggregator)
# Start issue detector actor
self.start_health_monitoring()
def _check_cluster_resources(self) -> None:
"""Check if cluster has enough resources to schedule all aggregators.
Raises:
ValueError: If cluster doesn't have sufficient resources.
"""
try:
cluster_resources = ray.cluster_resources()
available_resources = ray.available_resources()
except Exception as e:
logger.warning(f"Failed to get cluster resources: {e}")
return
# Calculate required resources for all aggregators
required_cpus = (
self._aggregator_ray_remote_args.get("num_cpus", 1) * self._num_aggregators
)
required_memory = (
self._aggregator_ray_remote_args.get("memory", 0) * self._num_aggregators
)
# Check CPU resources
total_cpus = cluster_resources.get("CPU", 0)
available_cpus = available_resources.get("CPU", 0)
if required_cpus > total_cpus:
logger.warning(
f"Insufficient CPU resources in cluster for hash shuffle operation. "
f"Required: {required_cpus} CPUs for {self._num_aggregators} aggregators, "
f"but cluster only has {total_cpus} total CPUs. "
f"Consider either increasing the cluster size or reducing the number of aggregators via `DataContext.max_hash_shuffle_aggregators`."
)
if required_cpus > available_cpus:
logger.warning(
f"Limited available CPU resources for hash shuffle operation. "
f"Required: {required_cpus} CPUs, available: {available_cpus} CPUs. "
f"Aggregators may take longer to start due to contention for resources."
)
# Check memory resources if specified
if required_memory > 0:
total_memory = cluster_resources.get("memory", 0)
available_memory = available_resources.get("memory", 0)
if required_memory > total_memory:
logger.warning(
f"Insufficient memory resources in cluster for hash shuffle operation. "
f"Required: {required_memory / GiB:.1f} GiB for {self._num_aggregators} aggregators, "
f"but cluster only has {total_memory / GiB:.1f} GiB total memory. "
f"Consider reducing the number of partitions or increasing cluster size."
)
if required_memory > available_memory:
logger.warning(
f"Limited available memory resources for hash shuffle operation. "
f"Required: {required_memory / GiB:.1f} GiB, available: {available_memory / GiB:.1f} GiB. "
f"Aggregators may take longer to start due to resource contention."
)
logger.debug(
f"Resource check passed for hash shuffle operation: "
f"required CPUs={required_cpus}, available CPUs={available_cpus}, "
f"required memory={required_memory / GiB:.1f} GiB, available memory={available_memory / GiB:.1f} GiB"
)
@property
def num_partitions(self):
return self._num_partitions
@property
def num_aggregators(self):
return self._num_aggregators
def get_aggregator_for_partition(self, partition_id: int) -> ActorHandle:
return self._aggregators[self._get_aggregator_id_for_partition(partition_id)]
def _allocate_partitions(self, *, num_partitions: int):
assert num_partitions >= self._num_aggregators
aggregator_to_partition_map: DefaultDict[int, List[int]] = defaultdict(list)
for partition_id in range(num_partitions):
aggregator_id = self._get_aggregator_id_for_partition(partition_id)
aggregator_to_partition_map[aggregator_id].append(partition_id)
return aggregator_to_partition_map
def _get_aggregator_id_for_partition(self, partition_id: int) -> int:
assert partition_id < self._num_partitions
return partition_id % self._num_aggregators
@staticmethod
def _derive_final_shuffle_aggregator_ray_remote_args(
aggregator_ray_remote_args: Dict[str, Any],
aggregator_partition_map: Dict[int, List[int]],
):
max_partitions_per_aggregator = max(
[len(ps) for ps in aggregator_partition_map.values()]
)
# Cap shuffle aggregator concurrency at the smaller of
# - Max number of partitions per aggregator
# - Threshold (8 by default)
max_concurrency = min(
max_partitions_per_aggregator,
DEFAULT_HASH_SHUFFLE_AGGREGATOR_MAX_CONCURRENCY,
)
assert (
max_concurrency >= 1
), f"{max_partitions_per_aggregator=}, {DEFAULT_MAX_HASH_SHUFFLE_AGGREGATORS}"
# NOTE: ShuffleAggregator is configured as threaded actor to allow for
# multiple requests to be handled "concurrently" (par GIL) --
# while it's not a real concurrency in its fullest of senses, having
# multiple Python threads allows as to parallelize all activities not
# requiring the GIL (inside Ray Core) such that concurrent request
# handling tasks are only blocked on GIL and are ready to execute as
# soon as it's released.
finalized_remote_args = {
"max_concurrency": max_concurrency,
**aggregator_ray_remote_args,
}
return finalized_remote_args
def shutdown(self, force: bool):
# Shutdown aggregators
if force:
for actor in self._aggregators:
# NOTE: Actors can't be brought back after being ``ray.kill``-ed,
# hence we're only doing that if this is a forced release
ray.kill(actor)
self._aggregators.clear()
def check_aggregator_health(self) -> Optional[AggregatorHealthInfo]:
"""Get health information about aggregators for issue detection.
Returns:
AggregatorHealthInfo with health info or None if monitoring hasn't started.
"""
if not self._health_monitoring_started:
return None
if self._pending_aggregators_refs is None:
# Initialize readiness refs
self._pending_aggregators_refs = [
aggregator.__ray_ready__.remote() for aggregator in self._aggregators
]
# Use ray.wait to check readiness in non-blocking fashion
_, unready_refs = ray.wait(
self._pending_aggregators_refs,
num_returns=len(self._pending_aggregators_refs),
timeout=0, # Non-blocking
)
# Update readiness refs to only track the unready ones
self._pending_aggregators_refs = unready_refs
current_time = time.time()
ready_aggregators = self._num_aggregators - len(unready_refs)
required_cpus = (
self._aggregator_ray_remote_args.get("num_cpus", 1) * self._num_aggregators
)
required_memory = (
self._aggregator_ray_remote_args.get("memory", 0) * self._num_aggregators
)
return AggregatorHealthInfo(
started_at=self._health_monitoring_start_time,
ready_aggregators=ready_aggregators,
total_aggregators=self._num_aggregators,
has_unready_aggregators=len(unready_refs) > 0,
wait_time=current_time - self._health_monitoring_start_time,
required_resources=ExecutionResources(
cpu=required_cpus, memory=required_memory
),
)
def start_health_monitoring(self):
"""Start health monitoring (without separate actor)."""
self._health_monitoring_started = True
self._health_monitoring_start_time = time.time()
self._pending_aggregators_refs = None
@ray.remote(
# Make sure tasks are retried indefinitely
max_task_retries=-1
)
class HashShuffleAggregator:
"""Actor handling of the assigned partitions during hash-shuffle operation.
This actor uses per-(sequence, partition) locking to eliminate cross-partition
contention during the submit (accept) path. Each (sequence, partition) pair has
its own lock and block queue.
The aggregation logic is delegated to a stateless `ShuffleAggregation` component
that operates on batches of blocks without maintaining internal state.
For multi-sequence operations (e.g., joins), blocks from different input sequences
are stored separately and passed to finalize() as a dict keyed by sequence ID.
NOTE: This actor might have ``max_concurrency`` > 1 (depending on the number of
assigned partitions), and is thread-safe via per-(sequence, partition) locks.
"""
_DEBUG_DUMP_PERIOD_S = 10
def __init__(
self,
aggregator_id: int,
num_input_seqs: int,
target_partition_ids: List[int],
agg_factory: ShuffleAggregationFactory,
target_max_block_size: Optional[int],
min_max_shards_compaction_thresholds: Optional[Tuple[int, int]] = None,
):
self._aggregator_id: int = aggregator_id
self._target_max_block_size: int = target_max_block_size
self._max_num_blocks_compaction_threshold = (
min_max_shards_compaction_thresholds[1]
if min_max_shards_compaction_thresholds is not None
else None
)
# Create stateless aggregation component
self._aggregation: ShuffleAggregation = agg_factory()
min_num_blocks_compaction_threshold = (
min_max_shards_compaction_thresholds[0]
if min_max_shards_compaction_thresholds is not None
else None
)
# Per-sequence mapping of partition-id to `PartitionState` with individual
# locks for thread-safe block accumulation
self._input_seq_partition_buckets: Dict[
int, Dict[int, PartitionBucket]
] = self._allocate_partition_buckets(
num_input_seqs,
target_partition_ids,
min_num_blocks_compaction_threshold,
)
self._bg_thread = threading.Thread(
target=self._debug_dump,
name="hash_shuffle_aggregator_debug_dump",
daemon=True,
)
self._bg_thread.start()
def submit(self, input_seq_id: int, partition_id: int, partition_shard: Block):
"""Accepts a partition shard for accumulation.
Uses per-(sequence, partition) locking to avoid cross-partition contention.
Performs incremental compaction when the block count exceeds threshold.
"""
bucket = self._input_seq_partition_buckets[input_seq_id][partition_id]
# Add partition shard into the queue
bucket.queue.put(partition_shard)
# Check whether queue exceeded compaction threshold
if (
self._aggregation.is_compacting()
and bucket.queue.qsize() >= bucket.compaction_threshold
):
# We're taking a lock to drain the queue to make sure that there's
# no concurrent compactions happening
with bucket.lock:
# Check queue size again to avoid running compaction after
# another one just drained the queue
if bucket.queue.qsize() < bucket.compaction_threshold:
return
# Drain the queue to perform compaction
to_compact = bucket.drain_queue()
# We revise up compaction thresholds for partition after every
# compaction so that for "non-reducing" aggregations (like
# `Unique`, `AsList`) we amortize the cost of compaction processing
# the same elements multiple times.
bucket.compaction_threshold = min(
bucket.compaction_threshold * 2,
self._max_num_blocks_compaction_threshold,
)
# For actual compaction we're releasing the lock
compacted = self._aggregation.compact(to_compact)
# Requeue compacted block back into the queue
bucket.queue.put(compacted)
def finalize(
self, partition_id: int
) -> Generator[Union[Block, "BlockMetadataWithSchema"], None, None]:
"""Finalizes aggregation for a partition and yields output blocks.
NOTE: Finalize is expected to be called
- Only all `accept` calls are complete
- Only once per partition
And therefore as such doesn't require explicit concurrency control
"""
start_time_s = time.perf_counter()
exec_stats_builder = BlockExecStats.builder()
# Collect partition shards from all input sequences for this partition
partition_shards_map: Dict[int, List[Block]] = {}
# Find all sequences that have data for this partition
for seq_id, partition_map in list(self._input_seq_partition_buckets.items()):
if partition_id in partition_map:
partition_shards_map[seq_id] = partition_map[partition_id].drain_queue()
# Accumulated partition shard lists could be empty in case of
# dataset being empty
if partition_shards_map:
# Finalization happens outside the lock (doesn't block other partitions)
# Convert dict to tuple ordered by sequence ID for finalize interface
blocks = self._aggregation.finalize(partition_shards_map)
else:
blocks = iter([])
if self._target_max_block_size is not None:
blocks = _shape_blocks(blocks, self._target_max_block_size)
for block in blocks:
# Collect execution stats (and reset)
exec_stats = exec_stats_builder.build()
exec_stats_builder = BlockExecStats.builder()
stats: StreamingGeneratorStats = yield block
# Update block serialization time
if stats:
exec_stats.block_ser_time_s = stats.object_creation_dur_s
yield BlockMetadataWithSchema.from_block(
block,
block_exec_stats=exec_stats,
task_exec_stats=TaskExecWorkerStats(
task_wall_time_s=time.perf_counter() - start_time_s,
),
)
def _debug_dump(self):
"""Periodically dumps the state of the HashShuffleAggregator for debugging."""
while True:
time.sleep(self._DEBUG_DUMP_PERIOD_S)
result = defaultdict(defaultdict)
for seq_id, partition_map in list(
self._input_seq_partition_buckets.items()
):
for partition_id, partition in list(partition_map.items()):
result[f"seq_{seq_id}"][f"partition_{partition_id}"] = {
# NOTE: qsize() is approximate but sufficient for debug logging
"num_blocks": partition.queue.qsize(),
"compaction_threshold": partition.compaction_threshold,
}
logger.debug(
f"Hash shuffle aggregator id={self._aggregator_id}, " f"state: {result}"
)
@staticmethod
def _allocate_partition_buckets(
num_input_seqs: int,
target_partition_ids: List[int],
compaction_threshold: Optional[int],
):
partition_buckets = defaultdict(defaultdict)
for seq_id in range(num_input_seqs):
for part_id in target_partition_ids:
partition_buckets[seq_id][part_id] = PartitionBucket.create(
compaction_threshold
)
return partition_buckets
def _shape_blocks(
blocks: Iterator[Block], target_max_block_size: int
) -> Iterator[Block]:
output_buffer = BlockOutputBuffer(
output_block_size_option=OutputBlockSizeOption(
target_max_block_size=target_max_block_size
)
)
for block in blocks:
output_buffer.add_block(block)
while output_buffer.has_next():
block = output_buffer.next()
yield block
output_buffer.finalize()
while output_buffer.has_next():
block = output_buffer.next()
yield block
def _get_total_cluster_resources() -> ExecutionResources:
"""Retrieves total available cluster resources:
1. If AutoscalerV2 is used, then corresponding max configured resources of
the corresponding `ClusterConfig` is returned.
2. In case `ClusterConfig` is not set then falls back to currently available
cluster resources (retrieved by `ray.cluster_resources()`)
"""
return ExecutionResources.from_resource_dict(
ray._private.state.state.get_max_resources_from_cluster_config()
or ray.cluster_resources()
)
# TODO rebase on generic operator output estimation
def _try_estimate_output_bytes(
input_logical_ops: List[LogicalOperator],
) -> Optional[int]:
inferred_op_output_bytes = [
op.infer_metadata().size_bytes for op in input_logical_ops
]
# Return sum of input ops estimated output byte sizes,
# if all are well defined
if all(nbs is not None for nbs in inferred_op_output_bytes):
return sum(inferred_op_output_bytes)
return None
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/_internal/execution/operators/hash_shuffle.py",
"license": "Apache License 2.0",
"lines": 1575,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:python/ray/data/tests/test_task_pool_map_operator.py | from unittest.mock import MagicMock
import pytest
import ray
from ray.data._internal.execution.interfaces import ExecutionResources
from ray.data._internal.execution.operators.input_data_buffer import InputDataBuffer
from ray.data._internal.execution.operators.task_pool_map_operator import (
TaskPoolMapOperator,
)
def test_min_max_resource_requirements(ray_start_regular_shared, restore_data_context):
data_context = ray.data.DataContext.get_current()
op = TaskPoolMapOperator(
map_transformer=MagicMock(),
input_op=InputDataBuffer(data_context, input_data=MagicMock()),
data_context=data_context,
ray_remote_args={"num_cpus": 1},
)
op._metrics = MagicMock(obj_store_mem_max_pending_output_per_task=3)
(
min_resource_usage_bound,
max_resource_usage_bound,
) = op.min_max_resource_requirements()
# At a minimum, you need enough processors to run one task and enough object
# store memory for a pending task.
assert min_resource_usage_bound == ExecutionResources(
cpu=1, gpu=0, object_store_memory=3
)
# For CPU-only operators, max GPU/memory is 0 (not inf) to prevent hoarding.
assert max_resource_usage_bound == ExecutionResources.for_limits(gpu=0, memory=0)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/data/tests/test_task_pool_map_operator.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/dashboard/modules/metrics/dashboards/train_dashboard_panels.py | # flake8: noqa E501
from ray.dashboard.modules.metrics.dashboards.common import (
DashboardConfig,
Panel,
Row,
Target,
)
# Ray Train Metrics (Controller)
CONTROLLER_STATE_PANEL = Panel(
id=1,
title="Controller State",
description="Current state of the train controller.",
unit="",
targets=[
Target(
expr='sum(ray_train_controller_state{{ray_train_run_name=~"$TrainRunName", ray_train_run_id=~"$TrainRunId", {global_filters}}}) by (ray_train_run_name, ray_train_controller_state)',
legend="Run Name: {{ray_train_run_name}}, Controller State: {{ray_train_controller_state}}",
),
],
)
CONTROLLER_OPERATION_TIME_PANEL = Panel(
id=2,
title="Controller Operation Time",
description="Time taken by the controller for worker group operations.",
unit="seconds",
targets=[
Target(
expr='sum(ray_train_worker_group_start_total_time_s{{ray_train_run_name=~"$TrainRunName", ray_train_run_id=~"$TrainRunId", {global_filters}}}) by (ray_train_run_name)',
legend="Run Name: {{ray_train_run_name}}, Worker Group Start Time",
),
Target(
expr='sum(ray_train_worker_group_shutdown_total_time_s{{ray_train_run_name=~"$TrainRunName", ray_train_run_id=~"$TrainRunId", {global_filters}}}) by (ray_train_run_name)',
legend="Run Name: {{ray_train_run_name}}, Worker Group Shutdown Time",
),
],
fill=0,
stack=False,
)
# Ray Train Metrics (Worker)
WORKER_CHECKPOINT_REPORT_TIME_PANEL = Panel(
id=3,
title="Cumulative Checkpoint Report Time",
description="Cumulative time taken to report checkpoints to storage.",
unit="seconds",
targets=[
Target(
expr='sum(ray_train_report_total_blocked_time_s{{ray_train_run_name=~"$TrainRunName", ray_train_run_id=~"$TrainRunId", ray_train_worker_world_rank=~"$TrainWorkerWorldRank", ray_train_worker_actor_id=~"$TrainWorkerActorId", {global_filters}}}) by (ray_train_run_name, ray_train_worker_world_rank, ray_train_worker_actor_id)',
legend="Run Name: {{ray_train_run_name}}, World Rank: {{ray_train_worker_world_rank}}",
)
],
fill=0,
stack=False,
)
# Core System Resources
CPU_UTILIZATION_PANEL = Panel(
id=4,
title="CPU Usage",
description="CPU core utilization across all workers.",
unit="cores",
targets=[
Target(
expr='sum(ray_node_cpu_utilization{{instance=~"$Instance", RayNodeType=~"$RayNodeType", {global_filters}}} * ray_node_cpu_count{{instance=~"$Instance", RayNodeType=~"$RayNodeType", {global_filters}}} / 100) by (instance, RayNodeType)',
legend="CPU Usage: {{instance}} ({{RayNodeType}})",
),
Target(
expr='sum(ray_node_cpu_count{{instance=~"$Instance", RayNodeType=~"$RayNodeType", {global_filters}}})',
legend="MAX",
),
],
)
MEMORY_UTILIZATION_PANEL = Panel(
id=5,
title="Total Memory Usage",
description="Total physical memory used vs total available memory.",
unit="bytes",
targets=[
Target(
expr='sum(ray_node_mem_used{{instance=~"$Instance", RayNodeType=~"$RayNodeType", {global_filters}}}) by (instance, RayNodeType)',
legend="Memory Used: {{instance}} ({{RayNodeType}})",
),
Target(
expr='sum(ray_node_mem_total{{instance=~"$Instance", RayNodeType=~"$RayNodeType", {global_filters}}})',
legend="MAX",
),
],
)
MEMORY_DETAILED_PANEL = Panel(
id=6,
title="Memory Allocation Details",
description="Memory allocation details including available and shared memory.",
unit="bytes",
targets=[
Target(
expr='sum(ray_node_mem_available{{instance=~"$Instance", RayNodeType=~"$RayNodeType", {global_filters}}}) by (instance, RayNodeType)',
legend="Available Memory: {{instance}} ({{RayNodeType}})",
),
Target(
expr='sum(ray_node_mem_shared_bytes{{instance=~"$Instance", RayNodeType=~"$RayNodeType", {global_filters}}}) by (instance, RayNodeType)',
legend="Shared Memory: {{instance}} ({{RayNodeType}})",
),
],
)
# GPU Resources
# TODO: Add GPU Device/Index as a filter.
GPU_UTILIZATION_PANEL = Panel(
id=7,
title="GPU Usage",
description="GPU utilization across all workers.",
unit="GPUs",
targets=[
Target(
expr='sum(ray_node_gpus_utilization{{instance=~"$Instance", RayNodeType=~"$RayNodeType", GpuIndex=~"$GpuIndex", GpuDeviceName=~"$GpuDeviceName", {global_filters}}} / 100) by (instance, RayNodeType, GpuIndex, GpuDeviceName)',
legend="GPU Usage: {{instance}} ({{RayNodeType}}), gpu.{{GpuIndex}}, {{GpuDeviceName}}",
),
Target(
expr='sum(ray_node_gpus_available{{instance=~"$Instance", RayNodeType=~"$RayNodeType", GpuIndex=~"$GpuIndex", GpuDeviceName=~"$GpuDeviceName", {global_filters}}})',
legend="MAX",
),
],
)
GPU_MEMORY_UTILIZATION_PANEL = Panel(
id=8,
title="GPU Memory Usage",
description="GPU memory usage across all workers.",
unit="bytes",
targets=[
Target(
expr='sum(ray_node_gram_used{{instance=~"$Instance", RayNodeType=~"$RayNodeType", GpuIndex=~"$GpuIndex", GpuDeviceName=~"$GpuDeviceName", {global_filters}}} * 1024 * 1024) by (instance, RayNodeType, GpuIndex, GpuDeviceName)',
legend="Used GRAM: {{instance}} ({{RayNodeType}}), gpu.{{GpuIndex}}, {{GpuDeviceName}}",
),
Target(
expr='(sum(ray_node_gram_available{{instance=~"$Instance", RayNodeType=~"$RayNodeType", GpuIndex=~"$GpuIndex", GpuDeviceName=~"$GpuDeviceName", {global_filters}}}) + sum(ray_node_gram_used{{instance=~"$Instance", RayNodeType=~"$RayNodeType", GpuIndex=~"$GpuIndex", GpuDeviceName=~"$GpuDeviceName", {global_filters}}})) * 1024 * 1024',
legend="MAX",
),
],
)
# Storage Resources
DISK_UTILIZATION_PANEL = Panel(
id=9,
title="Disk Space Usage",
description="Disk space usage across all workers.",
unit="bytes",
targets=[
Target(
expr='sum(ray_node_disk_usage{{instance=~"$Instance", RayNodeType=~"$RayNodeType", {global_filters}}}) by (instance, RayNodeType)',
legend="Disk Used: {{instance}} ({{RayNodeType}})",
),
Target(
expr='sum(ray_node_disk_free{{instance=~"$Instance", RayNodeType=~"$RayNodeType", {global_filters}}}) + sum(ray_node_disk_usage{{instance=~"$Instance", RayNodeType=~"$RayNodeType", {global_filters}}})',
legend="MAX",
),
],
)
DISK_THROUGHPUT_PANEL = Panel(
id=10,
title="Disk Throughput",
description="Current disk read/write throughput.",
unit="Bps",
targets=[
Target(
expr='sum(ray_node_disk_io_read_speed{{instance=~"$Instance", RayNodeType=~"$RayNodeType", {global_filters}}}) by (instance, RayNodeType)',
legend="Read Speed: {{instance}} ({{RayNodeType}})",
),
Target(
expr='sum(ray_node_disk_io_write_speed{{instance=~"$Instance", RayNodeType=~"$RayNodeType", {global_filters}}}) by (instance, RayNodeType)',
legend="Write Speed: {{instance}} ({{RayNodeType}})",
),
],
)
DISK_OPERATIONS_PANEL = Panel(
id=11,
title="Disk Operations",
description="Current disk read/write operations per second.",
unit="ops/s",
targets=[
Target(
expr='sum(ray_node_disk_read_iops{{instance=~"$Instance", RayNodeType=~"$RayNodeType", {global_filters}}}) by (instance, RayNodeType)',
legend="Read IOPS: {{instance}} ({{RayNodeType}})",
),
Target(
expr='sum(ray_node_disk_write_iops{{instance=~"$Instance", RayNodeType=~"$RayNodeType", {global_filters}}}) by (instance, RayNodeType)',
legend="Write IOPS: {{instance}} ({{RayNodeType}})",
),
],
)
# Network Resources
NETWORK_THROUGHPUT_PANEL = Panel(
id=12,
title="Network Throughput",
description="Current network send/receive throughput.",
unit="Bps",
targets=[
Target(
expr='sum(ray_node_network_receive_speed{{instance=~"$Instance", RayNodeType=~"$RayNodeType", {global_filters}}}) by (instance, RayNodeType)',
legend="Receive Speed: {{instance}} ({{RayNodeType}})",
),
Target(
expr='sum(ray_node_network_send_speed{{instance=~"$Instance", RayNodeType=~"$RayNodeType", {global_filters}}}) by (instance, RayNodeType)',
legend="Send Speed: {{instance}} ({{RayNodeType}})",
),
],
)
NETWORK_TOTAL_PANEL = Panel(
id=13,
title="Network Total Traffic",
description="Total network traffic sent/received.",
unit="bytes",
targets=[
Target(
expr='sum(ray_node_network_sent{{instance=~"$Instance", RayNodeType=~"$RayNodeType", {global_filters}}}) by (instance, RayNodeType)',
legend="Total Sent: {{instance}} ({{RayNodeType}})",
),
Target(
expr='sum(ray_node_network_received{{instance=~"$Instance", RayNodeType=~"$RayNodeType", {global_filters}}}) by (instance, RayNodeType)',
legend="Total Received: {{instance}} ({{RayNodeType}})",
),
],
)
TRAIN_GRAFANA_PANELS = []
TRAIN_GRAFANA_ROWS = [
# Train Metrics Row
Row(
title="Train Metrics",
id=14,
panels=[
# Ray Train Metrics (Controller)
CONTROLLER_STATE_PANEL,
CONTROLLER_OPERATION_TIME_PANEL,
# Ray Train Metrics (Worker)
WORKER_CHECKPOINT_REPORT_TIME_PANEL,
],
collapsed=False,
),
# System Resources Row
Row(
title="Resource Utilization",
id=15,
panels=[
CPU_UTILIZATION_PANEL,
MEMORY_UTILIZATION_PANEL,
MEMORY_DETAILED_PANEL,
# GPU Resources
GPU_UTILIZATION_PANEL,
GPU_MEMORY_UTILIZATION_PANEL,
# Storage Resources
DISK_UTILIZATION_PANEL,
DISK_THROUGHPUT_PANEL,
DISK_OPERATIONS_PANEL,
# Network Resources
NETWORK_THROUGHPUT_PANEL,
NETWORK_TOTAL_PANEL,
],
collapsed=True,
),
]
TRAIN_RUN_PANELS = [
# Ray Train Metrics (Controller)
CONTROLLER_STATE_PANEL,
CONTROLLER_OPERATION_TIME_PANEL,
# Ray Train Metrics (Worker)
WORKER_CHECKPOINT_REPORT_TIME_PANEL,
]
TRAIN_WORKER_PANELS = [
# Ray Train Metrics (Worker)
WORKER_CHECKPOINT_REPORT_TIME_PANEL,
# Core System Resources
CPU_UTILIZATION_PANEL,
MEMORY_UTILIZATION_PANEL,
# GPU Resources
GPU_UTILIZATION_PANEL,
GPU_MEMORY_UTILIZATION_PANEL,
# Storage Resources
DISK_UTILIZATION_PANEL,
# Network Resources
NETWORK_THROUGHPUT_PANEL,
]
# Get all panel IDs from both top-level panels and panels within rows
all_panel_ids = [panel.id for panel in TRAIN_GRAFANA_PANELS]
for row in TRAIN_GRAFANA_ROWS:
all_panel_ids.append(row.id)
all_panel_ids.extend(panel.id for panel in row.panels)
all_panel_ids.sort()
assert len(all_panel_ids) == len(
set(all_panel_ids)
), f"Duplicated id found. Use unique id for each panel. {all_panel_ids}"
train_dashboard_config = DashboardConfig(
name="TRAIN",
default_uid="rayTrainDashboard",
rows=TRAIN_GRAFANA_ROWS,
standard_global_filters=['SessionName=~"$SessionName"'],
base_json_file_name="train_grafana_dashboard_base.json",
)
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/dashboard/modules/metrics/dashboards/train_dashboard_panels.py",
"license": "Apache License 2.0",
"lines": 292,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/serve/_private/controller_avatar.py | import ray
from ray.serve._private.constants import SERVE_CONTROLLER_NAME, SERVE_NAMESPACE
from ray.serve._private.default_impl import get_controller_impl
from ray.serve.config import HTTPOptions
from ray.serve.schema import LoggingConfig
@ray.remote(num_cpus=0)
class ServeControllerAvatar:
"""A hack that proxy the creation of async actors from Java.
To be removed after https://github.com/ray-project/ray/pull/26037
Java api can not support python async actor. If we use java api create
python async actor. The async init method won't be executed. The async
method will fail with pickle error. And the run_control_loop of controller
actor can't be executed too. We use this proxy actor create python async
actor to avoid the above problem.
"""
def __init__(
self,
http_proxy_port: int = 8000,
):
try:
self._controller = ray.get_actor(
SERVE_CONTROLLER_NAME, namespace=SERVE_NAMESPACE
)
except ValueError:
self._controller = None
if self._controller is None:
controller_impl = get_controller_impl()
self._controller = controller_impl.remote(
http_options=HTTPOptions(port=http_proxy_port),
global_logging_config=LoggingConfig(),
)
def check_alive(self) -> None:
"""No-op to check if this actor is alive."""
return
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/serve/_private/controller_avatar.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:python/ray/train/v2/tests/test_serialization.py | import sys
import pytest
import ray
import ray.cloudpickle as ray_pickle
from ray.train.v2._internal.execution.callback import (
ControllerCallback,
WorkerGroupCallback,
)
from ray.train.v2._internal.execution.context import TrainRunContext
from ray.train.v2.api.data_parallel_trainer import DataParallelTrainer
from ray.train.v2.api.exceptions import ControllerError, WorkerGroupError
def block_import(import_name):
import sys
class BlockTorchImport:
def find_spec(self, fullname, path, target=None):
if fullname == import_name or fullname.startswith(import_name + "."):
raise ImportError(
f"Test error: {import_name} not installed on this node"
)
sys.meta_path.insert(0, BlockTorchImport())
def test_captured_imports(ray_start_4_cpus):
import torch
def capture_torch_import_fn():
# torch is captured in the closure of the train_fn
# and should be re-imported on each worker.
torch.ones(1)
class AssertImportsCallback(ControllerCallback):
def after_controller_start(self, train_run_context: TrainRunContext):
# Check that torch is not imported in the controller process.
# The train_fn should be deserialized directly on the workers.
assert "torch" not in sys.modules
trainer = DataParallelTrainer(
capture_torch_import_fn,
run_config=ray.train.RunConfig(callbacks=[AssertImportsCallback()]),
scaling_config=ray.train.ScalingConfig(num_workers=2),
)
trainer.fit()
def test_deserialization_error(ray_start_4_cpus):
"""Test that train_fn deserialization errors are propagated properly.
This test showcases a common deserialization error example, where
the driver script successfully imports torch, but torch is not
installed on the worker nodes.
"""
import torch
def capture_torch_import_fn():
torch.ones(1)
class BlockTorchImportCallback(WorkerGroupCallback):
def after_worker_group_start(self, worker_group):
# Make it so that the torch import that happens on
# train_fn deserialization will fail on workers.
worker_group.execute(block_import, "torch")
trainer = DataParallelTrainer(
capture_torch_import_fn,
run_config=ray.train.RunConfig(callbacks=[BlockTorchImportCallback()]),
scaling_config=ray.train.ScalingConfig(num_workers=2),
)
with pytest.raises(ControllerError, match="torch not installed on this node"):
trainer.fit()
@pytest.mark.parametrize(
"error",
[
WorkerGroupError(
"Training failed on multiple workers",
{0: ValueError("worker 0 failed"), 1: RuntimeError("worker 1 failed")},
),
ControllerError(Exception("Controller crashed")),
],
)
def test_exceptions_are_picklable(error):
"""Test that WorkerGroupError and ControllerError are picklable."""
# Test pickle/unpickle for WorkerGroupError
pickled_error = ray_pickle.dumps(error)
unpickled_error = ray_pickle.loads(pickled_error)
# Verify attributes are preserved
assert str(unpickled_error) == str(error)
assert type(unpickled_error) is type(error)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/test_serialization.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:python/ray/train/v2/tests/util.py | import os
import time
import uuid
from pathlib import Path
from typing import List, Optional
from unittest.mock import MagicMock
from ray.train import Checkpoint
from ray.train.context import TrainContext
from ray.train.v2._internal.execution.context import (
DistributedContext,
TrainRunContext,
)
from ray.train.v2._internal.execution.failure_handling import (
FailureDecision,
FailurePolicy,
)
from ray.train.v2._internal.execution.scaling_policy import (
NoopDecision,
ScalingDecision,
ScalingPolicy,
)
from ray.train.v2._internal.execution.storage import StorageContext
from ray.train.v2._internal.execution.training_report import _TrainingReport
from ray.train.v2._internal.execution.worker_group import (
WorkerGroup,
WorkerGroupContext,
WorkerGroupPollStatus,
WorkerGroupState,
WorkerStatus,
)
from ray.train.v2._internal.state.schema import (
ActorStatus,
RunAttemptStatus,
RunStatus,
TrainResources,
TrainRun,
TrainRunAttempt,
TrainWorker,
)
from ray.train.v2._internal.util import ObjectRefWrapper, time_monotonic
from ray.train.v2.api.exceptions import TrainingFailedError
from ray.train.v2.api.validation_config import ValidationTaskConfig
class DummyWorkerGroup(WorkerGroup):
_start_failure = None
_poll_failure = None
# TODO: Clean this up and use Mocks instead.
def __init__(
self,
train_run_context: TrainRunContext,
worker_group_context: WorkerGroupContext,
callbacks=None,
):
self._num_workers = worker_group_context.num_workers
self._worker_group_state = None
self._worker_statuses = {}
def poll_status(self, *args, **kwargs) -> WorkerGroupPollStatus:
if self._poll_failure:
raise self._poll_failure
return WorkerGroupPollStatus(
worker_statuses=self._worker_statuses,
)
def _start(self):
num_workers = self._num_workers
if self._start_failure:
raise self._start_failure
self._worker_group_state = WorkerGroupState(
start_time=time_monotonic(),
workers=[MagicMock() for i in range(num_workers)],
placement_group_handle=MagicMock(),
sync_actor=None,
)
self._worker_statuses = {
i: WorkerStatus(running=True, error=None) for i in range(num_workers)
}
def shutdown(self):
self._worker_group_state = None
def abort(self):
pass
# === Test methods ===
def error_worker(self, worker_index):
status = self._worker_statuses[worker_index]
status.error = RuntimeError(f"Worker {worker_index} failed")
def finish_worker(self, worker_index):
status = self._worker_statuses[worker_index]
status.running = False
@classmethod
def set_start_failure(cls, start_failure):
cls._start_failure = start_failure
@classmethod
def set_poll_failure(cls, poll_failure):
cls._poll_failure = poll_failure
class MockScalingPolicy(ScalingPolicy):
def __init__(self, scaling_config):
self._recovery_decision_queue = []
self._monitor_decision_queue = []
super().__init__(scaling_config)
def make_decision_for_non_running_worker_group(self) -> ScalingDecision:
if self._recovery_decision_queue:
return self._recovery_decision_queue.pop(0)
return NoopDecision()
def make_decision_for_running_worker_group(
self,
worker_group_state: WorkerGroupState,
worker_group_status: WorkerGroupPollStatus,
) -> ScalingDecision:
if self._monitor_decision_queue:
return self._monitor_decision_queue.pop(0)
return NoopDecision()
# === Test methods ===
def queue_recovery_decision(self, decision):
self._recovery_decision_queue.append(decision)
def queue_monitor_decision(self, decision):
self._monitor_decision_queue.append(decision)
class MockFailurePolicy(FailurePolicy):
def __init__(self, failure_config):
self._decision_queue = []
super().__init__(failure_config)
def make_decision(
self, training_failed_error: TrainingFailedError
) -> FailureDecision:
if self._decision_queue:
return self._decision_queue.pop(0)
return FailureDecision.NOOP
# === Test methods ===
def queue_decision(self, decision):
self._decision_queue.append(decision)
class DummyObjectRefWrapper(ObjectRefWrapper):
"""Mock object that returns the object passed in without going through ray.put."""
def __init__(self, obj):
self._obj = obj
def get(self):
return self._obj
_RUN_ID = "mock_run_id"
def create_mock_train_run(
status: RunStatus = RunStatus.RUNNING,
controller_actor_id: Optional[str] = None,
end_time_ns: Optional[int] = None,
id: Optional[str] = None,
status_detail: Optional[str] = None,
):
return TrainRun(
schema_version=0,
id=id or _RUN_ID,
name="test_run",
job_id=uuid.uuid4().hex,
controller_actor_id=controller_actor_id or uuid.uuid4().hex,
status=status,
status_detail=status_detail,
start_time_ns=time.time_ns(),
end_time_ns=end_time_ns,
controller_log_file_path="/tmp/ray/session_xxx/logs/train/ray-train-app-controller.log",
)
def create_mock_train_run_attempt(
attempt_id: str = "mock_attempt_id",
status: RunAttemptStatus = RunAttemptStatus.RUNNING,
end_time_ns: Optional[int] = None,
run_id: Optional[str] = None,
worker_status: Optional[ActorStatus] = ActorStatus.ALIVE,
status_detail: Optional[str] = None,
):
worker = TrainWorker(
world_rank=0,
local_rank=0,
node_rank=0,
actor_id=uuid.uuid4().hex,
node_id=uuid.uuid4().hex,
node_ip="127.0.0.1",
pid=1234,
gpu_ids=[0],
status=worker_status,
resources=TrainResources(resources={"CPU": 1}),
log_file_path="/tmp/ray/session_xxx/logs/train/ray-train-app-worker.log",
)
return TrainRunAttempt(
schema_version=0,
attempt_id=attempt_id,
run_id=run_id or _RUN_ID,
status=status,
status_detail=status_detail,
start_time_ns=time.time_ns(),
resources=[TrainResources(resources={"CPU": 1})],
workers=[worker],
end_time_ns=end_time_ns,
)
def create_dummy_run_context(**kwargs: dict) -> TrainRunContext:
"""Create a standardized TrainRunContext for testing.
Args:
**kwargs: Optional overrides for the default configuration.
Returns:
TrainRunContext: A standardized TrainRunContext instance for testing.
"""
from ray.train import BackendConfig, DataConfig
from ray.train.v2._internal.execution.context import TrainRunContext
from ray.train.v2.api.config import RunConfig, ScalingConfig
config = dict(
run_config=RunConfig(name="test"),
train_loop_config={},
scaling_config=ScalingConfig(num_workers=1),
backend_config=BackendConfig(),
datasets={},
dataset_config=DataConfig(),
)
config.update(kwargs)
return TrainRunContext(**config)
class DummyTrainContext(TrainContext):
"""A dummy TrainContext subclass for testing."""
def __init__(self):
self.train_run_context = create_dummy_run_context()
self.distributed_context = DistributedContext(
world_rank=0,
world_size=1,
local_rank=0,
local_world_size=1,
node_rank=0,
)
# Mock everything else since we don't need the actual functionality
self.execution_context = MagicMock()
self.storage_context = MagicMock()
self.dataset_shards = {}
def get_run_config(self):
return self.train_run_context.run_config
def create_dummy_train_context() -> TrainContext:
"""Create a standardized TrainContext for testing.
Returns:
TrainContext: A standardized TrainContext instance for testing.
"""
return DummyTrainContext()
def create_dummy_training_reports(
num_results: int,
storage_context: StorageContext,
include_metrics: bool = True,
include_validation: bool = False,
starting_checkpoint_index: int = 0,
) -> List[_TrainingReport]:
training_results = []
for i in range(num_results):
metrics = {"score": i} if include_metrics else {}
validation = (
ValidationTaskConfig(fn_kwargs={"arg": i}) if include_validation else False
)
checkpoint_path = os.path.join(
storage_context.experiment_fs_path,
f"checkpoint_{starting_checkpoint_index + i}",
)
os.makedirs(checkpoint_path, exist_ok=True)
training_results.append(
_TrainingReport(
checkpoint=Checkpoint(
path=Path(checkpoint_path).as_posix(),
filesystem=storage_context.storage_filesystem,
),
metrics=metrics,
validation=validation,
)
)
return training_results
| {
"repo_id": "ray-project/ray",
"file_path": "python/ray/train/v2/tests/util.py",
"license": "Apache License 2.0",
"lines": 256,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ray-project/ray:release/train_tests/benchmark/constants.py | """Constants shared across the benchmarks."""
class DatasetKey:
TRAIN = "train"
VALID = "val"
TEST = "test"
| {
"repo_id": "ray-project/ray",
"file_path": "release/train_tests/benchmark/constants.py",
"license": "Apache License 2.0",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/train_tests/benchmark/recsys/criteo.py | import logging
import os
from typing import TYPE_CHECKING, Dict, List, Tuple
import boto3
import json
import numpy as np
import pyarrow.csv
import ray.data
from constants import DatasetKey
if TYPE_CHECKING:
from torchrec.datasets.utils import Batch
logger = logging.getLogger(__name__)
S3_BUCKET = "ray-benchmark-data-internal-us-west-2"
CRITEO_S3_URI = f"s3://{S3_BUCKET}/criteo/tsv.gz"
CAT_FEATURE_VALUE_COUNT_JSON_PATH_PATTERN = (
"criteo/tsv.gz/categorical_feature_value_counts/{}-value_counts.json"
)
INT_FEATURE_COUNT = 13
CAT_FEATURE_COUNT = 26
DAYS = 24
DEFAULT_LABEL_NAME = "label"
DEFAULT_INT_NAMES: List[str] = [f"int_{idx}" for idx in range(INT_FEATURE_COUNT)]
DEFAULT_CAT_NAMES: List[str] = [f"cat_{idx}" for idx in range(CAT_FEATURE_COUNT)]
DEFAULT_COLUMN_NAMES: List[str] = [
DEFAULT_LABEL_NAME,
*DEFAULT_INT_NAMES,
*DEFAULT_CAT_NAMES,
]
CRITEO_NUM_EMBEDDINGS_PER_FEATURE: List[int] = [
45833188,
36746,
17245,
7413,
20243,
3,
7114,
1441,
62,
29275261,
1572176,
345138,
10,
2209,
11267,
128,
4,
974,
14,
48937457,
11316796,
40094537,
452104,
12606,
104,
35,
]
DATASET_PATHS = {
DatasetKey.TRAIN: f"{CRITEO_S3_URI}/train",
DatasetKey.VALID: f"{CRITEO_S3_URI}/valid",
DatasetKey.TEST: f"{CRITEO_S3_URI}/test",
}
def fill_missing(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Fill in missing feature values with defaults.
Default to 0 for dense features, empty string "" for categorical features.
"""
for feature_name in DEFAULT_INT_NAMES:
batch[feature_name] = np.nan_to_num(batch[feature_name], nan=0)
for feature_name in DEFAULT_CAT_NAMES:
features = batch[feature_name]
features[np.equal(features, None)] = ""
return batch
def concat_and_normalize_dense_features(
batch: Dict[str, np.ndarray],
) -> Dict[str, np.ndarray]:
"""Concatenate dense and sparse features together.
Apply log transformation to dense features."""
out = {}
out["dense"] = np.column_stack(
[batch[feature_name] for feature_name in DEFAULT_INT_NAMES]
)
out["sparse"] = np.column_stack(
[batch[feature_name] for feature_name in DEFAULT_CAT_NAMES]
)
out["dense"] += 3 # Prevent log(0)
out["dense"] = np.log(out["dense"], dtype=np.float32)
out["label"] = batch["label"]
return out
def mock_dataloader(num_batches: int, batch_size: int):
"""Creates a dummy batch of size `batch_size` and yields it `num_batches` times."""
dense = np.random.randn(batch_size, INT_FEATURE_COUNT).astype(np.float32)
sparse = np.random.randint(
1,
np.array(CRITEO_NUM_EMBEDDINGS_PER_FEATURE),
(batch_size, CAT_FEATURE_COUNT),
).astype(np.int32)
labels = np.random.randint(0, 1, (batch_size,)).astype(np.int32)
batch = convert_to_torchrec_batch_format(
{"dense": dense, "sparse": sparse, "label": labels}
)
batch = batch.pin_memory()
for _ in range(num_batches):
yield batch
def convert_to_torchrec_batch_format(batch: Dict[str, np.ndarray]) -> "Batch":
"""Convert to a Batch, packaging sparse features as a KJT."""
import torch
from torchrec.datasets.utils import Batch
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
dense = batch["dense"]
sparse = batch["sparse"]
labels = batch["label"]
batch_size = len(dense)
lengths = torch.ones((batch_size * CAT_FEATURE_COUNT,), dtype=torch.int32)
offsets = torch.arange(0, batch_size * CAT_FEATURE_COUNT + 1, dtype=torch.int32)
length_per_key: List[int] = [batch_size] * CAT_FEATURE_COUNT
offset_per_key = [batch_size * i for i in range(CAT_FEATURE_COUNT + 1)]
index_per_key = {key: i for i, key in enumerate(DEFAULT_CAT_NAMES)}
# Handle partial batches (last batch).
# if batch_size == self.batch_size:
# length_per_key = self.length_per_key
# offset_per_key = self.offset_per_key
# else:
# # handle last batch in dataset when it's an incomplete batch.
# length_per_key = CAT_FEATURE_COUNT * [batch_size]
# offset_per_key = [batch_size * i for i in range(CAT_FEATURE_COUNT + 1)]
return Batch(
dense_features=torch.from_numpy(dense),
sparse_features=KeyedJaggedTensor(
keys=DEFAULT_CAT_NAMES,
# transpose().reshape(-1) introduces a copy
values=torch.from_numpy(sparse.transpose(1, 0).reshape(-1)),
lengths=lengths,
offsets=offsets,
stride=batch_size,
length_per_key=length_per_key,
offset_per_key=offset_per_key,
index_per_key=index_per_key,
),
labels=torch.from_numpy(labels.reshape(-1)),
)
def read_json_from_s3(bucket_name, key):
s3 = boto3.client("s3")
# Download object content
response = s3.get_object(Bucket=bucket_name, Key=key)
content = response["Body"].read().decode("utf-8")
# Parse JSON
data = json.loads(content)
return data
def _get_base_dataset(stage: DatasetKey = DatasetKey.TRAIN):
ds_path = DATASET_PATHS[stage]
ds = ray.data.read_csv(
ds_path,
read_options=pyarrow.csv.ReadOptions(column_names=DEFAULT_COLUMN_NAMES),
parse_options=pyarrow.csv.ParseOptions(delimiter="\t"),
shuffle=(
"files" if stage == DatasetKey.TRAIN else None
), # coarse file-level shuffle
)
return ds
def get_ray_dataset(stage: DatasetKey = DatasetKey.TRAIN):
ds = _get_base_dataset(stage)
# Convert categorical features to integers.
# Fetch cached value counts instead of "fitting" the preprocessor from scratch.
COMPUTE_VALUE_COUNTS_FROM_SCRATCH: bool = False
FREQUENCY_THRESHOLD = 3
LOW_FREQUENCY_INDEX = 1 # map low frequency values -> 1
categorical_value_to_index = {}
for cat_feature in DEFAULT_CAT_NAMES:
if COMPUTE_VALUE_COUNTS_FROM_SCRATCH:
value_counts = _compute_value_counts(ds, cat_feature)
else:
json_filepath = CAT_FEATURE_VALUE_COUNT_JSON_PATH_PATTERN.format(
cat_feature
)
logger.info(f"Downloading value counts file: {json_filepath}")
value_counts = read_json_from_s3(bucket_name=S3_BUCKET, key=json_filepath)
value_counts = filter(lambda x: x[1] >= FREQUENCY_THRESHOLD, value_counts)
categorical_value_to_index[cat_feature] = {
val: i for i, (val, _) in enumerate(value_counts, start=2)
}
# TODO: This will not scale well for the full dataset, since this dict might be 10s of GBs,
# which is expensive to copy to each map task.
# This mapping is large, so put a shared copy in the object store for all the map tasks to use.
categorical_value_to_index_ref = ray.put(categorical_value_to_index)
# Clean data.
ds = ds.map_batches(fill_missing)
def categorical_values_to_indices(
batch: Dict[str, np.ndarray], mapping_ref: ray.ObjectRef
):
mapping: Dict[str, int] = ray.get(mapping_ref)
for cat_feature in DEFAULT_CAT_NAMES:
batch[cat_feature] = np.vectorize(
lambda k: mapping.get(cat_feature, {}).get(k, LOW_FREQUENCY_INDEX)
)(batch[cat_feature])
return batch
ds = ds.map_batches(
categorical_values_to_indices, fn_args=(categorical_value_to_index_ref,)
)
# HACK: Dummy encoding for quicker testing.
# def dummy_categorical_encoder(batch):
# for feature_name in DEFAULT_CAT_NAMES:
# batch[feature_name] = np.random.randint(0, 3, size=(len(batch[feature_name]),))
# return batch
# ds = ds.map_batches(dummy_categorical_encoder)
ds = ds.map_batches(concat_and_normalize_dense_features)
# TODO: Need to shuffle the data.
return ds
def _compute_value_counts(ds, feature_name) -> List[Tuple]:
logger.info(f"Computing value counts for: {feature_name}")
# TODO: This needs to be optimized in order to run on the full dataset.
# Need to fill missing values with empty string.
value_counts = [
(
group[feature_name] if group[feature_name] is not None else "",
group["count()"],
)
for group in (
ds.select_columns(feature_name).groupby(key=feature_name).count().take_all()
)
]
return value_counts
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
ds = _get_base_dataset(stage="train")
# Create a directory for the value counts files.
save_dir = "/mnt/cluster_storage/criteo"
os.makedirs(save_dir, exist_ok=True)
for cat_feature in DEFAULT_CAT_NAMES:
value_counts = _compute_value_counts(ds, cat_feature)
json_filepath = os.path.join(save_dir, f"{cat_feature}-value_counts.json")
logger.info(f"Writing value counts to: {json_filepath}")
with open(json_filepath, "w") as f:
json.dump(value_counts, f)
| {
"repo_id": "ray-project/ray",
"file_path": "release/train_tests/benchmark/recsys/criteo.py",
"license": "Apache License 2.0",
"lines": 232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:release/train_tests/benchmark/recsys/recsys_factory.py | from typing import Dict, List, Optional
import logging
import numpy as np
from pydantic import BaseModel
import torch
import torch.distributed as torch_dist
from ray.data.collate_fn import CollateFn, NumpyBatchCollateFn
import ray.train
import ray.train.torch
from constants import DatasetKey
from config import DataloaderType, BenchmarkConfig
from benchmark_factory import BenchmarkFactory
from dataloader_factory import (
BaseDataLoaderFactory,
)
from logger_utils import ContextLoggerAdapter
from ray_dataloader_factory import RayDataLoaderFactory
from recsys.criteo import (
CRITEO_NUM_EMBEDDINGS_PER_FEATURE,
convert_to_torchrec_batch_format,
get_ray_dataset,
mock_dataloader,
)
logger = ContextLoggerAdapter(logging.getLogger(__name__))
class RecsysMockDataLoaderFactory(BaseDataLoaderFactory):
def get_train_dataloader(self):
return mock_dataloader(
2048, self.benchmark_config.dataloader_config.train_batch_size
)
def get_val_dataloader(self):
return mock_dataloader(
256, self.benchmark_config.dataloader_config.validation_batch_size
)
class RecsysRayDataLoaderFactory(RayDataLoaderFactory):
def get_ray_datasets(self) -> Dict[str, ray.data.Dataset]:
# TODO: Use the train dataset for validation as well.
ds = get_ray_dataset(DatasetKey.VALID)
return {
DatasetKey.TRAIN: ds,
DatasetKey.VALID: ds,
}
def _get_collate_fn(self) -> Optional[CollateFn]:
from torchrec.datasets.utils import Batch
class TorchRecCollateFn(NumpyBatchCollateFn):
def __call__(self, batch: Dict[str, np.ndarray]) -> Batch:
return convert_to_torchrec_batch_format(batch)
return TorchRecCollateFn()
class TorchRecConfig(BaseModel):
embedding_dim: int = 128
num_embeddings_per_feature: List[int] = CRITEO_NUM_EMBEDDINGS_PER_FEATURE
over_arch_layer_sizes: List[int] = [1024, 1024, 512, 256, 1]
dense_arch_layer_sizes: List[int] = [512, 256, 128]
interaction_type: str = "dcn"
dcn_num_layers: int = 3
dcn_low_rank_dim: int = 512
class RecsysFactory(BenchmarkFactory):
def __init__(self, benchmark_config: BenchmarkConfig):
super().__init__(benchmark_config)
self.torchrec_config = TorchRecConfig()
def get_dataloader_factory(self) -> BaseDataLoaderFactory:
data_factory_cls = {
DataloaderType.MOCK: RecsysMockDataLoaderFactory,
DataloaderType.RAY_DATA: RecsysRayDataLoaderFactory,
}[self.benchmark_config.dataloader_type]
return data_factory_cls(self.benchmark_config)
def get_model(self) -> torch.nn.Module:
# NOTE: These imports error on a CPU-only driver node.
# Delay the import to happen on the GPU train workers instead.
from torchrec import EmbeddingBagCollection
from torchrec.datasets.criteo import DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES
from torchrec.distributed.model_parallel import (
DistributedModelParallel,
get_default_sharders,
)
from torchrec.distributed.planner import EmbeddingShardingPlanner, Topology
from torchrec.distributed.planner.storage_reservations import (
HeuristicalStorageReservation,
)
from torchrec.models.dlrm import DLRM, DLRM_DCN, DLRM_Projection, DLRMTrain
from torchrec.modules.embedding_configs import EmbeddingBagConfig
from torchrec.optim.apply_optimizer_in_backward import (
apply_optimizer_in_backward,
)
args = self.torchrec_config
device = ray.train.torch.get_device()
local_world_size = ray.train.get_context().get_local_world_size()
global_world_size = ray.train.get_context().get_world_size()
eb_configs = [
EmbeddingBagConfig(
name=f"t_{feature_name}",
embedding_dim=args.embedding_dim,
num_embeddings=args.num_embeddings_per_feature[feature_idx],
feature_names=[feature_name],
)
for feature_idx, feature_name in enumerate(DEFAULT_CAT_NAMES)
]
sharded_module_kwargs = {}
if args.over_arch_layer_sizes is not None:
sharded_module_kwargs["over_arch_layer_sizes"] = args.over_arch_layer_sizes
if args.interaction_type == "original":
dlrm_model = DLRM(
embedding_bag_collection=EmbeddingBagCollection(
tables=eb_configs, device=torch.device("meta")
),
dense_in_features=len(DEFAULT_INT_NAMES),
dense_arch_layer_sizes=args.dense_arch_layer_sizes,
over_arch_layer_sizes=args.over_arch_layer_sizes,
dense_device=device,
)
elif args.interaction_type == "dcn":
dlrm_model = DLRM_DCN(
embedding_bag_collection=EmbeddingBagCollection(
tables=eb_configs, device=torch.device("meta")
),
dense_in_features=len(DEFAULT_INT_NAMES),
dense_arch_layer_sizes=args.dense_arch_layer_sizes,
over_arch_layer_sizes=args.over_arch_layer_sizes,
dcn_num_layers=args.dcn_num_layers,
dcn_low_rank_dim=args.dcn_low_rank_dim,
dense_device=device,
)
elif args.interaction_type == "projection":
raise NotImplementedError
dlrm_model = DLRM_Projection(
embedding_bag_collection=EmbeddingBagCollection(
tables=eb_configs, device=torch.device("meta")
),
dense_in_features=len(DEFAULT_INT_NAMES),
dense_arch_layer_sizes=args.dense_arch_layer_sizes,
over_arch_layer_sizes=args.over_arch_layer_sizes,
interaction_branch1_layer_sizes=args.interaction_branch1_layer_sizes,
interaction_branch2_layer_sizes=args.interaction_branch2_layer_sizes,
dense_device=device,
)
else:
raise ValueError(
"Unknown interaction option set. Should be original, dcn, or projection."
)
train_model = DLRMTrain(dlrm_model)
embedding_optimizer = torch.optim.Adagrad
# This will apply the Adagrad optimizer in the backward pass for the embeddings (sparse_arch). This means that
# the optimizer update will be applied in the backward pass, in this case through a fused op.
# TorchRec will use the FBGEMM implementation of EXACT_ADAGRAD. For GPU devices, a fused CUDA kernel is invoked. For CPU, FBGEMM_GPU invokes CPU kernels
# https://github.com/pytorch/FBGEMM/blob/2cb8b0dff3e67f9a009c4299defbd6b99cc12b8f/fbgemm_gpu/fbgemm_gpu/split_table_batched_embeddings_ops.py#L676-L678
# Note that lr_decay, weight_decay and initial_accumulator_value for Adagrad optimizer in FBGEMM v0.3.2
# cannot be specified below. This equivalently means that all these parameters are hardcoded to zero.
optimizer_kwargs = {"lr": 15.0, "eps": 1e-8}
apply_optimizer_in_backward(
embedding_optimizer,
train_model.model.sparse_arch.parameters(),
optimizer_kwargs,
)
planner = EmbeddingShardingPlanner(
topology=Topology(
local_world_size=local_world_size,
world_size=global_world_size,
compute_device=device.type,
),
batch_size=self.benchmark_config.dataloader_config.train_batch_size,
# If experience OOM, increase the percentage. see
# https://pytorch.org/torchrec/torchrec.distributed.planner.html#torchrec.distributed.planner.storage_reservations.HeuristicalStorageReservation
storage_reservation=HeuristicalStorageReservation(percentage=0.05),
)
plan = planner.collective_plan(
train_model, get_default_sharders(), torch_dist.GroupMember.WORLD
)
model = DistributedModelParallel(
module=train_model,
device=device,
plan=plan,
)
if ray.train.get_context().get_world_rank() == 0:
for collectionkey, plans in model._plan.plan.items():
logger.info(collectionkey)
for table_name, plan in plans.items():
logger.info(table_name)
logger.info(plan)
return model
def get_loss_fn(self) -> torch.nn.Module:
raise NotImplementedError(
"torchrec model should return the loss directly in forward. "
"See the `DLRMTrain` wrapper class."
)
| {
"repo_id": "ray-project/ray",
"file_path": "release/train_tests/benchmark/recsys/recsys_factory.py",
"license": "Apache License 2.0",
"lines": 183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ray-project/ray:release/train_tests/benchmark/recsys/torchrec_runner.py | import logging
import gc
import os
import torch
import torch.nn
from torchrec.distributed.train_pipeline import StagedTrainPipeline, SparseDataDistUtil
from torchrec.distributed.train_pipeline.utils import PipelineStage
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizerWrapper
from torchrec.optim.optimizers import in_backward_optimizer_filter
import ray.train
import ray.train.torch
from runner import TrainLoopRunner
logger = logging.getLogger(__name__)
class TorchRecRunner(TrainLoopRunner):
def _setup(self):
if self.factory.benchmark_config.mock_gpu:
raise ValueError("Mock GPU is not supported for running TorchRec.")
self.model = self.factory.get_model()
# TODO: This code depends on the model having a fused_optimizer,
# which is hidden in the `get_model` method of the factory.
dense_optimizer = KeyedOptimizerWrapper(
dict(in_backward_optimizer_filter(self.model.named_parameters())),
lambda params: torch.optim.Adagrad(params, lr=15.0, eps=1e-8),
)
self.optimizer = CombinedOptimizer(
[self.model.fused_optimizer, dense_optimizer]
)
self._data_dist_stream = torch.cuda.Stream()
self._h2d_stream = torch.cuda.Stream()
def _wrap_dataloader(self, dataloader, train: bool = True):
dataloader_iter = iter(dataloader)
device = ray.train.torch.get_device()
sdd = SparseDataDistUtil(
model=self.model,
data_dist_stream=self._data_dist_stream,
# prefetch_stream=torch.cuda.Stream(),
)
pipeline = [
PipelineStage(
name="data_copy",
runnable=lambda batch: batch.to(device, non_blocking=True),
stream=self._h2d_stream,
),
PipelineStage(
name="start_sparse_data_dist",
runnable=sdd.start_sparse_data_dist,
stream=sdd.data_dist_stream,
fill_callback=sdd.wait_sparse_data_dist,
),
# PipelineStage(
# name="prefetch",
# runnable=sdd.prefetch,
# stream=sdd.prefetch_stream,
# fill_callback=sdd.load_prefetch,
# ),
]
pipeline = StagedTrainPipeline(pipeline_stages=pipeline)
def dataloader_with_torchrec_pipeline():
while batch := pipeline.progress(dataloader_iter):
yield batch
pipeline.flush_end()
return super()._wrap_dataloader(
dataloader_with_torchrec_pipeline(), train=train
)
def _train_step(self, batch):
self.model.train()
self.optimizer.zero_grad()
loss, out = self.model(batch)
loss.backward()
self.optimizer.step()
def _validate_step(self, batch):
self.model.eval()
with torch.no_grad():
loss, out = self.model(batch)
return loss
def _get_model_and_optim_filenames(self):
rank = ray.train.get_context().get_world_rank()
return f"model_shard_{rank=}.pt", f"optimizer_shard_{rank=}.pt"
def _save_training_state(self, local_dir: str):
# NOTE: Embedding table shards are on different GPUs,
# so we need to do distributed checkpointing.
# This checkpoint format must be loaded on the same number
# of workers and GPU types, since it was sharded with a compute-specific plan.
model_filename, optimizer_filename = self._get_model_and_optim_filenames()
torch.save(self.model.state_dict(), os.path.join(local_dir, model_filename))
torch.save(
self.optimizer.state_dict(), os.path.join(local_dir, optimizer_filename)
)
def _load_training_state(self, local_dir: str):
model_filename, optimizer_filename = self._get_model_and_optim_filenames()
self.model.load_state_dict(
torch.load(
os.path.join(local_dir, model_filename),
map_location=self.model.device,
)
)
self.optimizer.load_state_dict(
torch.load(
os.path.join(local_dir, optimizer_filename),
map_location=self.model.device,
)
)
def _cleanup(self):
# NOTE: This cleanup is needed to avoid zombie Train worker processes
# that hang on gc collect on python teardown.
del self.model
del self.optimizer
del self._data_dist_stream
del self._h2d_stream
torch.cuda.synchronize()
torch.cuda.empty_cache()
gc.collect()
| {
"repo_id": "ray-project/ray",
"file_path": "release/train_tests/benchmark/recsys/torchrec_runner.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ray-project/ray:release/train_tests/benchmark/runner.py | import collections
import json
import logging
import os
import pprint
import time
import tempfile
from typing import Dict, Optional
import ray.train
from ray.data._internal.stats import Timer
import torch
from logger_utils import ContextLoggerAdapter
from benchmark_factory import BenchmarkFactory
logger = ContextLoggerAdapter(logging.getLogger(__name__))
class TrainLoopRunner:
"""Generic runner that sets up the training loop scaffolding.
Collects perf metrics and handles periodic checkpointing and validation.
"""
def __init__(self, factory: BenchmarkFactory):
self.factory = factory
self.benchmark_config = factory.benchmark_config
self._setup()
# Training progress state.
self._train_batch_idx: int = 0
self._train_epoch_idx: int = 0
self._global_rows_processed_this_epoch: int = 0
# Performance metrics
self._metrics = collections.defaultdict(lambda: Timer())
checkpoint = ray.train.get_checkpoint()
if checkpoint:
self._restore_from_checkpoint(checkpoint)
# Methods for subclasses to implement.
def _setup(self):
"""Subclasses should override this to setup the model, optimizer, etc.
The attributes initialized in this method should only be used in the
other overridden methods."""
pass
def _cleanup(self):
"""Subclasses can override this to cleanup any resources."""
pass
def _train_step(self, train_dataloader):
"""Subclasses should override this to implement the training step.
A training step represents a single forward and backward pass on a batch of data.
"""
raise NotImplementedError
def _validate_step(self, val_dataloader):
"""Subclasses should override this to implement the validation step.
A validation step represents a single forward pass on a batch of data."""
raise NotImplementedError
def _save_training_state(self, local_dir: str):
"""Subclasses should override this to save the training state.
This should reference the model and optimizer state initialized
in the `_setup` method."""
pass
def _load_training_state(self, local_dir: str):
"""Subclasses should override this to load the training state.
This should reference the model and optimizer state initialized
in the `_setup` method."""
pass
def _restore_from_checkpoint(self, checkpoint: ray.train.Checkpoint):
logger.info(
f"Restoring from checkpoint: {checkpoint} for worker "
f"{ray.train.get_context().get_world_rank()}"
)
with tempfile.TemporaryDirectory(
dir="/mnt/local_storage"
) as temp_checkpoint_dir:
download_start = time.perf_counter()
checkpoint.to_directory(temp_checkpoint_dir)
download_time = time.perf_counter() - download_start
load_start = time.perf_counter()
self._load_checkpoint(temp_checkpoint_dir)
load_time = time.perf_counter() - load_start
self._metrics["checkpoint/download"].add(download_time)
self._metrics["checkpoint/load"].add(load_time)
def _wrap_dataloader(self, dataloader, train: bool = True):
dataloader_iter = iter(dataloader)
prefix = "train" if train else "validation"
def dataloader_with_timers():
try:
with self._metrics[f"{prefix}/iter_first_batch"].timer():
batch = next(dataloader_iter)
if train:
self._train_batch_idx += 1
except StopIteration:
return
while True:
yield batch
try:
with self._metrics[f"{prefix}/iter_batch"].timer():
batch = next(dataloader_iter)
if train:
self._train_batch_idx += 1
except StopIteration:
return
return dataloader_with_timers()
@property
def _num_batches_to_skip(self) -> int:
"""Calculate the number of batches to skip based on the number of rows already processed in this epoch."""
global_batch_size = (
self.benchmark_config.dataloader_config.train_batch_size
* ray.train.get_context().get_world_size()
)
return self._global_rows_processed_this_epoch // global_batch_size
def _train_epoch(self):
"""Subclasses can override the entrire `_train_epoch` method for more training
logic customization."""
if ray.train.get_context().get_world_rank() == 0:
logger.info(f"Training starting @ epoch={self._train_epoch_idx}")
train_dataloader = self.factory.get_train_dataloader()
train_dataloader = self._wrap_dataloader(train_dataloader, train=True)
# Skip through batches if we restored to a middle of the epoch.
# TODO: Compare this baseline to the data checkpointing approach once we have it.
if self._num_batches_to_skip:
if ray.train.get_context().get_world_rank() == 0:
logger.info(f"Skipping {self._num_batches_to_skip} batches...")
for _ in range(self._num_batches_to_skip):
with self._metrics["train/iter_skip_batch"].timer():
next(train_dataloader)
for batch in train_dataloader:
with self._metrics["train/step"].timer():
if not self.benchmark_config.skip_train_step:
self._train_step(batch)
# TODO: This is slightly off if the last batch is a partial batch (if drop_last=False)
global_batch_size = (
self.benchmark_config.dataloader_config.train_batch_size
* ray.train.get_context().get_world_size()
)
self._metrics["train/rows_processed"].add(global_batch_size)
self._global_rows_processed_this_epoch += global_batch_size
if self._should_checkpoint_during_epoch():
self._checkpoint()
if self._should_validate_during_epoch():
validation_metrics = self._validate()
self._checkpoint(validation_metrics)
if self._should_log_metrics():
logger.info(pprint.pformat(self.get_metrics(), indent=2))
self._train_epoch_idx += 1
self._train_batch_idx = 0
self._global_rows_processed_this_epoch = 0
def _validate_epoch(self) -> Dict[str, float]:
if ray.train.get_context().get_world_rank() == 0:
logger.info(
f"Validation starting @ epoch={self._train_epoch_idx}, "
f"batch={self._train_batch_idx}"
)
val_dataloader = self.factory.get_val_dataloader()
val_dataloader = self._wrap_dataloader(val_dataloader, train=False)
total_loss = torch.tensor(0.0).to(ray.train.torch.get_device())
num_rows = 0
for batch in val_dataloader:
with self._metrics["validation/step"].timer():
if not self.benchmark_config.skip_validation_step:
total_loss += self._validate_step(batch)
num_rows += self.benchmark_config.dataloader_config.validation_batch_size
self._metrics["validation/rows_processed"].add(
self.benchmark_config.dataloader_config.validation_batch_size
)
assert num_rows > 0, "Validation dataset yielded no batches."
return {"validation/loss": total_loss.item() / num_rows}
def _should_checkpoint_during_epoch(self) -> bool:
"""Handles the checkpoint_every_n_steps logic."""
return (
self.benchmark_config.checkpoint_every_n_steps > 0
and self._train_batch_idx % self.benchmark_config.checkpoint_every_n_steps
== 0
)
def _should_validate_during_epoch(self) -> bool:
"""Handles the validate_every_n_steps logic."""
return (
self.benchmark_config.validate_every_n_steps > 0
and self._train_batch_idx % self.benchmark_config.validate_every_n_steps
== 0
)
def _should_log_metrics(self) -> bool:
"""Handles the log_metrics_every_n_steps logic."""
return (
self.benchmark_config.log_metrics_every_n_steps > 0
and self._train_batch_idx % self.benchmark_config.log_metrics_every_n_steps
== 0
)
def _validate(self) -> Dict[str, float]:
with self._metrics["validation/epoch"].timer():
validation_metrics = self._validate_epoch()
return validation_metrics
def _checkpoint(self, metrics: Optional[Dict[str, float]] = None):
with tempfile.TemporaryDirectory(
dir="/mnt/local_storage"
) as temp_checkpoint_dir:
with self._metrics["checkpoint/save"].timer():
self._save_checkpoint(temp_checkpoint_dir)
with self._metrics["checkpoint/report"].timer():
self._report_checkpoint(
metrics=metrics or {},
checkpoint=ray.train.Checkpoint.from_directory(temp_checkpoint_dir),
)
def _load_checkpoint(self, local_dir: str):
self._load_training_state(local_dir)
run_state = torch.load(os.path.join(local_dir, "run_state.pt"))
self._train_epoch_idx = run_state["epoch"]
self._train_batch_idx = run_state["batch_idx"]
self._global_rows_processed_this_epoch = run_state[
"global_rows_processed_this_epoch"
]
with open(os.path.join(local_dir, "metrics.json"), "r") as f:
metrics_json = json.load(f)
for k, v in metrics_json.items():
self._metrics[k].__dict__.update(v)
if ray.train.get_context().get_world_rank() == 0:
logger.info(
f"Restored to epoch={self._train_epoch_idx}, "
f"train_batch_idx={self._train_batch_idx} from checkpoint: "
f"{ray.train.get_checkpoint()}"
)
def _save_checkpoint(self, local_dir: str):
logger.info(
f"Saving checkpoint @ epoch={self._train_epoch_idx}, "
f"train_batch_idx={self._train_batch_idx}"
)
self._save_training_state(local_dir)
if ray.train.get_context().get_world_rank() == 0:
run_state = {
"epoch": self._train_epoch_idx,
"batch_idx": self._train_batch_idx,
"global_rows_processed_this_epoch": self._global_rows_processed_this_epoch,
}
torch.save(run_state, os.path.join(local_dir, "run_state.pt"))
metrics_json = {k: v.__dict__.copy() for k, v in self._metrics.items()}
with open(os.path.join(local_dir, "metrics.json"), "w") as f:
json.dump(metrics_json, f)
def _report_checkpoint(self, metrics, checkpoint):
logger.info(
f"Uploading checkpoint @ epoch={self._train_epoch_idx}, "
f"train_batch_idx={self._train_batch_idx}"
)
checkpoint_dir_name = (
f"checkpoint_epoch={self._train_epoch_idx}_batch={self._train_batch_idx}"
)
ray.train.report(
metrics,
checkpoint=checkpoint,
checkpoint_dir_name=checkpoint_dir_name,
)
def run(self):
starting_epoch = self._train_epoch_idx
for _ in range(starting_epoch, self.benchmark_config.num_epochs):
with self._metrics["train/epoch"].timer():
self._train_epoch()
if not self.benchmark_config.skip_validation_at_epoch_end:
validation_metrics = self._validate()
self._checkpoint(validation_metrics)
if ray.train.get_context().get_world_rank() == 0:
logger.info(pprint.pformat(self.get_metrics(), indent=2))
self._cleanup()
def get_metrics(self, dataset_creation_time: float = 0.0) -> Dict[str, float]:
# TODO: These metrics should be aggregated across training workers.
metrics = {}
for key, metric in self._metrics.items():
metrics.update(
{
f"{key}-avg": metric.avg(),
f"{key}-min": metric.min(),
f"{key}-max": metric.max(),
f"{key}-total": metric.get(),
}
)
metrics["train/dataset_creation_time"] = dataset_creation_time
metrics["validation/dataset_creation_time"] = dataset_creation_time
# Throughput
# TODO: Ray Data can provide these throughput metrics automatically.
train_time = (
metrics["train/dataset_creation_time"]
+ self._metrics["train/step"].get()
# Include the time it takes to get the first batch.
+ self._metrics["train/iter_first_batch"].get()
+ self._metrics["train/iter_batch"].get()
)
if train_time > 0:
metrics["train/global_throughput"] = (
self._metrics["train/rows_processed"].get() / train_time
)
validation_time = (
metrics["validation/dataset_creation_time"]
+ self._metrics["validation/step"].get()
# Include the time it takes to get the first batch.
+ self._metrics["validation/iter_first_batch"].get()
+ self._metrics["validation/iter_batch"].get()
)
if validation_time > 0:
metrics["validation/global_throughput"] = (
self._metrics["validation/rows_processed"].get() / validation_time
)
# Extra time that each worker spends to restore from checkpoint,
# which includes downloading the checkpoint, loading the checkpoint,
# and skipping through batches that were already processed.
restoration_time = (
self._metrics["checkpoint/download"].get()
+ self._metrics["checkpoint/load"].get()
+ self._metrics["train/iter_skip_batch"].get()
)
if restoration_time > 0:
metrics["checkpoint/restoration_time"] = restoration_time
# Dataloader metrics (ex: Ray Data stats)
metrics.update(self.factory.get_dataloader_metrics())
return metrics
class VanillaTorchRunner(TrainLoopRunner):
"""A simple runner that uses a PyTorch model, optimizer, and loss function."""
def _setup(self):
model = self.factory.get_model()
self.model = ray.train.torch.prepare_model(model)
self.loss_fn = self.factory.get_loss_fn()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3)
def _train_step(self, batch):
self.model.train()
input_batch, labels = batch
self.model.train()
self.optimizer.zero_grad()
out = self.model(input_batch)
loss = self.loss_fn(out, labels)
loss.backward()
self.optimizer.step()
def _validate_step(self, batch):
self.model.eval()
input_batch, labels = batch
with torch.no_grad():
out = self.model(input_batch)
loss = self.loss_fn(out, labels)
return loss
def _save_training_state(self, local_dir: str):
# Standard DDP checkpointing.
if ray.train.get_context().get_world_rank() == 0:
torch.save(self.model.state_dict(), os.path.join(local_dir, "model.pt"))
torch.save(
self.optimizer.state_dict(), os.path.join(local_dir, "optimizer.pt")
)
def _load_training_state(self, local_dir: str):
self.model.load_state_dict(
torch.load(os.path.join(local_dir, "model.pt"), map_location="cpu")
)
self.optimizer.load_state_dict(
torch.load(os.path.join(local_dir, "optimizer.pt"), map_location="cpu")
)
| {
"repo_id": "ray-project/ray",
"file_path": "release/train_tests/benchmark/runner.py",
"license": "Apache License 2.0",
"lines": 342,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
recommenders-team/recommenders:recommenders/models/cornac/bpr.py | # Copyright (c) Recommenders contributors.
# Licensed under the MIT License.
import numpy as np
import pandas as pd
from cornac.models import BPR as CBPR
from recommenders.utils.constants import (
DEFAULT_USER_COL,
DEFAULT_ITEM_COL,
DEFAULT_PREDICTION_COL
)
class BPR(CBPR):
"""Custom BPR class extending Cornac's BPR model with a recommend_k_items method."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def recommend_k_items(
self,
data,
top_k=None,
remove_seen=False,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_prediction=DEFAULT_PREDICTION_COL,
):
"""Computes top-k predictions of recommender model from Cornac on all users in data.
It can be used for computing ranking metrics like NDCG.
Args:
data (pandas.DataFrame): The data from which to get the users and items.
top_k (int): Number of items to recommend per user.
remove_seen (bool): Flag to remove (user, item) pairs seen in the training data.
col_user (str): Name of the user column.
col_item (str): Name of the item column.
col_rating (str): Name of the prediction column.
Returns:
pandas.DataFrame: Dataframe with col_user, col_item, col_rating for top-k items per user.
"""
# Get user and item mappings
items = np.array(list(self.train_set.iid_map.keys()))
users = np.array(list(self.train_set.uid_map.keys()))
n_users = len(users)
n_items = len(items)
top_k = n_items if top_k is None else min(top_k, n_items)
# Compute user and item indices
user_indices = np.array([self.train_set.uid_map[u] for u in users])
item_indices = np.array([self.train_set.iid_map[i] for i in items])
# Get latent factors and biases
U = self.u_factors[user_indices]
V = self.i_factors[item_indices]
B = self.i_biases[item_indices] if hasattr(self, "i_biases") else np.zeros(n_items)
# Compute score matrix for all user-item pairs
preds_matrix = U @ V.T + B # Shape: (n_users, n_items)
# Select top-k items per user
top_k_indices = np.argpartition(preds_matrix, -top_k, axis=1)[:, -top_k:] # Shape: (n_users, top_k)
sorted_indices = np.argsort(-preds_matrix[np.arange(n_users)[:, None], top_k_indices], axis=1)
top_k_indices = top_k_indices[np.arange(n_users)[:, None], sorted_indices] # Shape: (n_users, top_k)
# Extract items and scores
user_array = np.repeat(users, top_k) # Shape: (n_users * top_k,)
item_array = items[top_k_indices].flatten() # Shape: (n_users * top_k,)
pred_array = np.take_along_axis(preds_matrix, top_k_indices, axis=1).flatten() # Shape: (n_users * top_k,)
# Create DataFrame
all_predictions = pd.DataFrame(
{col_user: user_array, col_item: item_array, col_prediction: pred_array}
)
if remove_seen:
seen = data[[col_user, col_item]].drop_duplicates()
merged = all_predictions.merge(
seen, on=[col_user, col_item], how="left", indicator=True
)
return merged[merged["_merge"] == "left_only"].drop(columns=["_merge"]).reset_index(drop=True)
return all_predictions | {
"repo_id": "recommenders-team/recommenders",
"file_path": "recommenders/models/cornac/bpr.py",
"license": "MIT License",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
recommenders-team/recommenders:recommenders/models/embdotbias/data_loader.py | # Copyright (c) Recommenders contributors.
# Licensed under the MIT License.
import random
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
class RecoDataset(Dataset):
"""
PyTorch Dataset for collaborative filtering tasks.
Stores user, item, and rating data as tensors for efficient batching.
"""
def __init__(self, users, items, ratings):
"""
Args:
users (array-like): User IDs or indices.
items (array-like): Item IDs or indices.
ratings (array-like): Ratings or interactions.
"""
# Convert to numpy arrays first and ensure correct types
users = np.array(users, dtype=np.int64)
items = np.array(items, dtype=np.int64)
ratings = np.array(ratings, dtype=np.float32)
# Then convert to tensors
self.users = torch.tensor(users, dtype=torch.long)
self.items = torch.tensor(items, dtype=torch.long)
self.ratings = torch.tensor(ratings, dtype=torch.float)
def __len__(self):
"""
Returns the number of samples in the dataset.
Returns:
int: Number of ratings.
"""
return len(self.ratings)
def __getitem__(self, idx):
"""
Retrieves a single sample from the dataset.
Args:
idx (int): Index of the sample to retrieve.
Returns:
tuple: (user_item_tensor, rating_tensor)
"""
user_item_tensor = torch.stack((self.users[idx], self.items[idx]))
rating_tensor = self.ratings[idx].unsqueeze(0)
return user_item_tensor, rating_tensor
class RecoDataLoader:
"""
Utility class for managing training and validation DataLoaders for collaborative filtering.
Stores metadata about users/items and provides helper methods for data preparation and inspection.
"""
def __init__(self, train_dl, valid_dl=None):
"""Initialize the dataloaders.
Args:
train_dl (DataLoader): Training dataloader
valid_dl (DataLoader, optional): Validation dataloader
"""
self.train = train_dl
self.valid = valid_dl
self.classes = {}
@classmethod
def from_df(
cls,
ratings,
valid_pct=0.2,
user_name=None,
item_name=None,
rating_name=None,
seed=42,
batch_size=64,
**kwargs,
):
"""
Create DataLoaders from a pandas DataFrame for collaborative filtering.
Args:
ratings (pd.DataFrame): DataFrame containing user, item, and rating columns.
valid_pct (float): Fraction of data to use for validation.
user_name (str): Name of the user column.
item_name (str): Name of the item column.
rating_name (str): Name of the rating column.
seed (int): Random seed for reproducibility.
batch_size (int): Batch size for DataLoaders.
**kwargs: Additional DataLoader arguments.
Returns:
RecoDataLoader: Instance with train/valid DataLoaders and metadata.
"""
# Validate input
if ratings is None or len(ratings) == 0:
raise ValueError("Input DataFrame is empty")
# Set random seed
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
# Get column names
user_name = user_name or ratings.columns[0]
item_name = item_name or ratings.columns[1]
rating_name = rating_name or ratings.columns[2]
# Validate columns exist
required_cols = [user_name, item_name, rating_name]
if not all(col in ratings.columns for col in required_cols):
raise ValueError(
f"Missing required columns: {[col for col in required_cols if col not in ratings.columns]}"
)
# Drop any rows with NaN values
ratings = ratings.dropna(subset=[user_name, item_name, rating_name])
if len(ratings) == 0:
raise ValueError("No valid data after dropping NaN values")
# Get unique users and items (as strings)
# Convert to string first to ensure consistent type for sorting
users = ratings[user_name].astype(str).unique()
items = ratings[item_name].astype(str).unique()
if len(users) == 0 or len(items) == 0:
raise ValueError("No unique users or items found in the data")
# Sort unique users and items using standard string sorting
# This matches the behavior observed in fastai's categorization for numeric strings
sorted_users = ["#na#"] + sorted(users.tolist())
sorted_items = ["#na#"] + sorted(items.tolist())
# Create mapping dictionaries using the string-sorted lists
user2idx = {u: i for i, u in enumerate(sorted_users)}
item2idx = {i: idx for idx, i in enumerate(sorted_items)}
# Convert original IDs in the DataFrame to indices using the mapping
# Use .loc[] for assignment to avoid SettingWithCopyWarning
ratings.loc[:, user_name] = (
ratings[user_name]
.astype(str)
.map(user2idx)
.fillna(user2idx["#na#"])
.astype(np.int64)
)
ratings.loc[:, item_name] = (
ratings[item_name]
.astype(str)
.map(item2idx)
.fillna(item2idx["#na#"])
.astype(np.int64)
)
ratings.loc[:, rating_name] = ratings[rating_name].astype(
np.float32
) # Ensure rating is float
# Split into train and validation
n = len(ratings)
n_valid = int(n * valid_pct)
if n_valid >= n:
if n == 0:
raise ValueError(
"Input DataFrame was empty or contained no valid rows after cleaning."
)
else:
raise ValueError(
f"Validation percentage {valid_pct} is too high. {n} total items, {n_valid} requested for validation leaves {n - n_valid} for training."
)
indices = list(range(n))
random.shuffle(indices)
train_idx = indices[n_valid:]
valid_idx = indices[:n_valid]
if len(train_idx) == 0:
raise ValueError("Training set is empty after split. Reduce valid_pct.")
# Create datasets using the index-mapped values
train_ds = RecoDataset(
ratings.iloc[train_idx][user_name].values,
ratings.iloc[train_idx][item_name].values,
ratings.iloc[train_idx][rating_name].values,
)
valid_ds = (
RecoDataset(
ratings.iloc[valid_idx][user_name].values,
ratings.iloc[valid_idx][item_name].values,
ratings.iloc[valid_idx][rating_name].values,
)
if n_valid > 0
else None
)
# Create dataloaders with safe batch sizes
train_dl = DataLoader(
train_ds,
batch_size=(
min(batch_size, len(train_ds)) if len(train_ds) > 0 else 1
), # Ensure batch_size isn't larger than dataset
shuffle=True,
**kwargs,
)
valid_batch_size = batch_size
valid_dl = (
DataLoader(
valid_ds,
batch_size=(
min(valid_batch_size, len(valid_ds))
if valid_ds and len(valid_ds) > 0
else (1 if valid_ds else None)
),
shuffle=False,
**kwargs,
)
if valid_ds is not None and len(valid_ds) > 0
else None
) # Ensure valid_dl is None if valid_ds is empty
# Create instance and store metadata
dl = cls(train_dl, valid_dl)
# Store the string-sorted lists in .classes
dl.classes = {user_name: sorted_users, item_name: sorted_items}
dl.user = user_name
dl.item = item_name
# n_users and n_items should be the size of the classes lists, including #na#
dl.n_users = len(sorted_users)
dl.n_items = len(sorted_items)
dl.user2idx = user2idx # Store mappings for potential later use
dl.item2idx = item2idx # Store mappings for potential later use
return dl
def show_batch(self, n=5):
"""
Display a sample batch from the training DataLoader.
Args:
n (int): Number of examples to show from the batch.
"""
# Get one batch from the training dataloader
# Unpack the two elements from the batch: user_item_batch (tensor of shape [bs, 2]) and ratings_batch (tensor of shape [bs, 1])
for user_item_batch, ratings_batch in self.train:
batch_size = user_item_batch.shape[0]
if n > batch_size:
raise ValueError(
f"n ({n}) rows cannot be greater than the batch size ({batch_size})"
)
users = user_item_batch[:, 0] # Shape [bs]
items = user_item_batch[:, 1] # Shape [bs]
users = users[:n].numpy()
items = items[:n].numpy()
# Squeeze the ratings numpy array to remove the dimension of size 1
ratings = ratings_batch[:n].numpy().squeeze() # Shape [n]
df = pd.DataFrame(
{
self.user: [self.classes[self.user][u] for u in users],
self.item: [self.classes[self.item][i] for i in items],
"rating": ratings,
}
)
print(f"Showing {n} examples from a batch:")
print(df)
break
| {
"repo_id": "recommenders-team/recommenders",
"file_path": "recommenders/models/embdotbias/data_loader.py",
"license": "MIT License",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
recommenders-team/recommenders:recommenders/models/embdotbias/model.py | # Copyright (c) Recommenders contributors.
# Licensed under the MIT License.
import torch
from torch.nn import Embedding
from torch.nn import Module
import torch.nn.init as init
class EmbeddingDotBias(Module):
"""
Base dot-product model for collaborative filtering.
This model learns user and item embeddings and biases, and predicts ratings via dot product and bias terms.
"""
def __init__(self, n_factors, n_users, n_items, y_range=None):
"""
Initialize the EmbeddingDotBias model.
Args:
n_factors (int): Number of latent factors.
n_users (int): Number of users.
n_items (int): Number of items.
y_range (tuple): Range for output normalization (min, max).
"""
super().__init__()
self.classes = None
self.y_range = y_range
self.u_weight = Embedding(n_users, n_factors)
self.i_weight = Embedding(n_items, n_factors)
self.u_bias = Embedding(n_users, 1)
self.i_bias = Embedding(n_items, 1)
# Initialize with truncated normal
for emb in [self.u_weight, self.i_weight, self.u_bias, self.i_bias]:
init.trunc_normal_(emb.weight, std=0.01)
def forward(self, x):
"""
Forward pass for the model.
Args:
x (torch.Tensor): Tensor of shape (batch_size, 2) with user and item indices.
Returns:
torch.Tensor: Predicted ratings for each user-item pair.
"""
users, items = x[:, 0], x[:, 1]
dot = self.u_weight(users) * self.i_weight(items)
res = dot.sum(1) + self.u_bias(users).squeeze() + self.i_bias(items).squeeze()
if self.y_range is None:
return res
return (
torch.sigmoid(res) * (self.y_range[1] - self.y_range[0]) + self.y_range[0]
)
@classmethod
def from_classes(cls, n_factors, classes, user=None, item=None, y_range=None):
"""
Build a model with `n_factors` by inferring `n_users` and `n_items` from `classes`.
Args:
n_factors (int): Number of latent factors.
classes (dict): Dictionary mapping entity names to lists of IDs.
user (str): Key for user IDs in `classes`.
item (str): Key for item IDs in `classes`.
y_range (tuple): Range for output normalization.
Returns:
EmbeddingDotBias: Instantiated model.
"""
if user is None:
user = list(classes.keys())[0]
if item is None:
item = list(classes.keys())[1]
res = cls(n_factors, len(classes[user]), len(classes[item]), y_range=y_range)
res.classes, res.user, res.item = classes, user, item
return res
def _get_idx(self, entity_ids, is_item=True):
"""
Fetch item or user indices for all in `entity_ids`.
Args:
entity_ids (list): List of user or item IDs.
is_item (bool): If True, fetch item indices; else user indices.
Returns:
torch.Tensor: Tensor of indices for embedding lookup.
"""
if not hasattr(self, "classes"):
raise RuntimeError(
"Build your model with `EmbeddingDotBias.from_classes` to use this functionality."
)
classes = self.classes[self.item] if is_item else self.classes[self.user]
# Create a mapping from entity ID (user or item) to its integer index in the embedding matrix
entity_id_to_index = {entity_id: idx for idx, entity_id in enumerate(classes)}
try:
return torch.tensor([entity_id_to_index[o] for o in entity_ids])
except KeyError as e:
message = f"You're trying to access {'item' if is_item else 'user'} {entity_ids} that isn't in the training data. If it was in your original data, it may have been split such that it's only in the validation set now."
raise KeyError(message)
def bias(self, entity_ids, is_item=True):
"""
Get bias values for items or users in `entity_ids`.
Args:
entity_ids (list): List of user or item IDs.
is_item (bool): If True, fetch item bias; else user bias.
Returns:
torch.Tensor: Bias values for the given entities.
"""
idx = self._get_idx(entity_ids, is_item)
layer = (self.i_bias if is_item else self.u_bias).eval().cpu()
return layer(idx).squeeze().detach()
def weight(self, entity_ids, is_item=True):
"""
Get embedding weights for items or users in `entity_ids`.
Args:
entity_ids (list): List of user or item IDs.
is_item (bool): If True, fetch item weights; else user weights.
Returns:
torch.Tensor: Embedding weights for the given entities.
"""
idx = self._get_idx(entity_ids, is_item)
layer = (self.i_weight if is_item else self.u_weight).eval().cpu()
return layer(idx).detach()
| {
"repo_id": "recommenders-team/recommenders",
"file_path": "recommenders/models/embdotbias/model.py",
"license": "MIT License",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
recommenders-team/recommenders:recommenders/models/embdotbias/training_utils.py | # Copyright (c) Recommenders contributors.
# Licensed under the MIT License.
import torch
import torch.optim as optim
from torch.nn import MSELoss
import logging
from recommenders.utils.constants import (
DEFAULT_USER_COL as USER,
DEFAULT_ITEM_COL as ITEM,
DEFAULT_RATING_COL as RATING,
DEFAULT_TIMESTAMP_COL as TIMESTAMP,
DEFAULT_PREDICTION_COL as PREDICTION,
)
# Set up logger
logger = logging.getLogger(__name__)
class Trainer:
def __init__(self, model, learning_rate=1e-3, weight_decay=0.01):
"""
Initializes the RecommenderTrainer.
Args:
model: The PyTorch model to train.
learning_rate (float): The learning rate for the optimizer.
weight_decay (float): The weight decay for the optimizer.
"""
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = model.to(self.device)
self.optimizer = optim.AdamW(
self.model.parameters(),
lr=learning_rate,
betas=(0.9, 0.99),
eps=1e-5,
weight_decay=weight_decay,
)
self.loss_fn = MSELoss()
def train_epoch(self, train_dl):
"""
Trains the model for one epoch.
Args:
train_dl: The training data loader.
Returns:
float: The average training loss for the epoch.
"""
self.model.train()
total_loss = 0
for batch in train_dl:
users_items, ratings = batch
users_items = users_items.to(self.device)
ratings = ratings.to(self.device)
self.optimizer.zero_grad()
predictions = self.model(users_items)
loss = self.loss_fn(predictions.view(-1), ratings.view(-1))
loss.backward()
self.optimizer.step()
total_loss += loss.item()
return total_loss / len(train_dl)
def validate(self, valid_dl):
"""
Validates the model on the validation set.
Args:
valid_dl: The validation data loader.
Returns:
float or None: The average validation loss, or None if the validation data loader is empty.
"""
self.model.eval()
total_loss = 0
try:
with torch.no_grad():
for batch in valid_dl:
users_items, ratings = batch
users_items = users_items.to(self.device)
ratings = ratings.to(self.device)
predictions = self.model(users_items)
loss = self.loss_fn(predictions.view(-1), ratings.view(-1))
total_loss += loss.item()
return total_loss / len(valid_dl)
except ZeroDivisionError:
return None
def fit(self, train_dl, valid_dl, n_epochs):
"""
Trains the model for a specified number of epochs.
Args:
train_dl: The training data loader.
valid_dl: The validation data loader.
n_epochs (int): The number of epochs to train for.
"""
for epoch in range(n_epochs):
train_loss = self.train_epoch(train_dl)
valid_loss = self.validate(valid_dl)
logger.info(f"Epoch {epoch+1}/{n_epochs}:")
logger.info(f"Train Loss: {train_loss}")
logger.info(f"Valid Loss: {valid_loss}")
def predict_rating(model, user_id, item_id):
"""
Predicts the rating for a given user and item.
Args:
user_id (str): The ID of the user.
item_id (str): The ID of the item.
Returns:
float or None: The predicted rating, or None if an error occurs.
"""
model.eval()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device) # Ensure model is on the same device as input
with torch.no_grad():
try:
user_idx = model._get_idx([user_id], is_item=False)
item_idx = model._get_idx([item_id], is_item=True)
x = torch.stack([user_idx, item_idx], dim=1).to(device)
pred = model(x)
return pred.item()
except Exception as e:
logger.error(f"Error in prediction: {str(e)}")
return None
| {
"repo_id": "recommenders-team/recommenders",
"file_path": "recommenders/models/embdotbias/training_utils.py",
"license": "MIT License",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
recommenders-team/recommenders:tests/unit/recommenders/models/test_embdotbias_model.py | # Copyright (c) Recommenders contributors.
# Licensed under the MIT License.
import pytest
import tempfile
import numpy as np
import pandas as pd
import torch
from recommenders.models.embdotbias.data_loader import RecoDataLoader, RecoDataset
from recommenders.models.embdotbias.model import EmbeddingDotBias
from recommenders.models.embdotbias.training_utils import Trainer, predict_rating
from recommenders.utils.constants import (
DEFAULT_ITEM_COL,
DEFAULT_RATING_COL,
DEFAULT_USER_COL,
)
@pytest.fixture(scope="module")
def sample_ratings_data():
"""Create fixed sample ratings data for testing."""
data = {
DEFAULT_USER_COL: [1, 4, 8, 5, 7, 10, 3],
DEFAULT_ITEM_COL: [1, 3, 14, 17, 4, 18, 8],
DEFAULT_RATING_COL: [
3.493193,
2.323592,
1.254233,
2.243929,
2.300733,
3.918425,
3.550230,
],
}
return pd.DataFrame(data)
@pytest.fixture(scope="module")
def sample_model_params():
"""Sample model parameters for testing."""
return {"n_factors": 50, "n_users": 6, "n_items": 11, "y_range": (1.0, 5.0)}
@pytest.fixture(scope="module")
def sample_classes():
"""Create sample classes mapping for testing."""
return {
DEFAULT_USER_COL: ["1", "2", "3", "4", "5"],
DEFAULT_ITEM_COL: ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
}
@pytest.mark.gpu
def test_embedding_dot_bias_weight_method(sample_classes):
"""Test EmbeddingDotBias weight method."""
model = EmbeddingDotBias.from_classes(
n_factors=10, classes=sample_classes, y_range=(1.0, 5.0)
)
# Test user weight
user_weight = model.weight(["1", "2"], is_item=False)
assert user_weight.shape == (2, 10)
# Test item weight
item_weight = model.weight(["1", "2"], is_item=True)
assert item_weight.shape == (2, 10)
@pytest.mark.gpu
def test_embedding_dot_bias_from_classes(sample_classes):
"""Test EmbeddingDotBias.from_classes method."""
model = EmbeddingDotBias.from_classes(
n_factors=10, classes=sample_classes, y_range=(1.0, 5.0)
)
assert model.classes == sample_classes
assert model.user == DEFAULT_USER_COL
assert model.item == DEFAULT_ITEM_COL
assert model.u_weight.num_embeddings == len(sample_classes[DEFAULT_USER_COL])
assert model.i_weight.num_embeddings == len(sample_classes[DEFAULT_ITEM_COL])
@pytest.mark.gpu
def test_embedding_dot_bias_init(sample_model_params):
"""Test EmbeddingDotBias initialization."""
model = EmbeddingDotBias(**sample_model_params)
assert model.u_weight.num_embeddings == sample_model_params["n_users"]
assert model.i_weight.num_embeddings == sample_model_params["n_items"]
assert model.u_bias.num_embeddings == sample_model_params["n_users"]
assert model.i_bias.num_embeddings == sample_model_params["n_items"]
assert model.y_range == sample_model_params["y_range"]
@pytest.mark.gpu
def test_embedding_dot_bias_forward(sample_model_params):
"""Test EmbeddingDotBias forward pass."""
model = EmbeddingDotBias(**sample_model_params)
# Create sample input
batch_size = 3
x = torch.tensor([[0, 1], [1, 2], [2, 3]], dtype=torch.long)
output = model(x)
assert output.shape == (batch_size,)
assert output.dtype == torch.float
@pytest.mark.gpu
def test_trainer_init(sample_model_params):
"""Test Trainer initialization."""
model = EmbeddingDotBias(**sample_model_params)
trainer = Trainer(model, learning_rate=0.001, weight_decay=0.01)
assert trainer.model == model
assert trainer.optimizer is not None
assert trainer.loss_fn is not None
assert trainer.device is not None
@pytest.mark.gpu
def test_trainer_train_epoch(sample_ratings_data):
"""Test Trainer train_epoch method."""
# Create dataloader
dl = RecoDataLoader.from_df(
sample_ratings_data, valid_pct=0.2, batch_size=4, seed=42
)
# Create model
model = EmbeddingDotBias.from_classes(
n_factors=10, classes=dl.classes, y_range=(1.0, 5.0)
)
trainer = Trainer(model, learning_rate=0.001)
# Train for one epoch
loss = trainer.train_epoch(dl.train)
assert isinstance(loss, float)
assert loss >= 0
@pytest.mark.gpu
def test_trainer_fit(sample_ratings_data):
"""Test Trainer fit method."""
# Create dataloader
dl = RecoDataLoader.from_df(
sample_ratings_data, valid_pct=0.2, batch_size=4, seed=42
)
# Create model
model = EmbeddingDotBias.from_classes(
n_factors=10, classes=dl.classes, y_range=(1.0, 5.0)
)
trainer = Trainer(model, learning_rate=0.001)
# Fit for 2 epochs
trainer.fit(dl.train, dl.valid, n_epochs=2)
# Model should be trained
assert model.training is False # Should be in eval mode after training
@pytest.mark.gpu
def test_trainer_validate(sample_ratings_data):
"""Test Trainer validate method."""
# Create dataloader
dl = RecoDataLoader.from_df(
sample_ratings_data, valid_pct=0.2, batch_size=4, seed=42
)
# Create model
model = EmbeddingDotBias.from_classes(
n_factors=10, classes=dl.classes, y_range=(1.0, 5.0)
)
trainer = Trainer(model, learning_rate=0.001)
# Validate
loss = trainer.validate(dl.valid)
assert isinstance(loss, float)
assert loss >= 0
@pytest.mark.gpu
def test_predict_rating(sample_classes):
"""Test predict_rating function."""
model = EmbeddingDotBias.from_classes(
n_factors=10, classes=sample_classes, y_range=(1.0, 5.0)
)
prediction = predict_rating(model, "1", "1")
assert isinstance(prediction, float)
assert 1.0 <= prediction <= 5.0 # Should be within y_range
@pytest.mark.gpu
def test_full_pipeline(sample_ratings_data):
"""Test the full pipeline from data loading to training."""
# Create dataloader
dl = RecoDataLoader.from_df(
sample_ratings_data, valid_pct=0.2, batch_size=4, seed=42
)
# Create model
model = EmbeddingDotBias.from_classes(
n_factors=10, classes=dl.classes, y_range=(1.0, 5.0)
)
# Create trainer
trainer = Trainer(model, learning_rate=0.001)
# Train for one epoch
train_loss = trainer.train_epoch(dl.train)
valid_loss = trainer.validate(dl.valid)
# Make prediction
prediction = predict_rating(model, "1", "1")
assert isinstance(train_loss, float)
assert isinstance(valid_loss, float)
assert isinstance(prediction, float)
assert train_loss >= 0
assert valid_loss >= 0
assert 1.0 <= prediction <= 5.0
@pytest.mark.gpu
@pytest.mark.parametrize(
"entity_ids,is_item,expected_exception",
[
(["999"], True, KeyError), # Non-existent item
(["999"], False, KeyError), # Non-existent user
([], True, None), # Empty list
([], False, None), # Empty list
],
)
def test_get_idx_edge_cases(sample_classes, entity_ids, is_item, expected_exception):
model = EmbeddingDotBias.from_classes(
n_factors=10, classes=sample_classes, y_range=(1.0, 5.0)
)
if expected_exception:
with pytest.raises(expected_exception):
model._get_idx(entity_ids, is_item=is_item)
else:
result = model._get_idx(entity_ids, is_item=is_item)
assert isinstance(result, torch.Tensor)
assert result.shape[0] == 0
def test_reco_dataset(sample_ratings_data):
"""Test RecoDataset `__len__` and `__getitem__`."""
users = sample_ratings_data[DEFAULT_USER_COL].values
items = sample_ratings_data[DEFAULT_ITEM_COL].values
ratings = sample_ratings_data[DEFAULT_RATING_COL].values
dataset = RecoDataset(users, items, ratings)
assert len(dataset) == len(ratings)
user_item_tensor, rating_tensor = dataset[0]
assert user_item_tensor.shape == (2,)
assert rating_tensor.shape == (1,)
assert user_item_tensor[0] == users[0]
assert user_item_tensor[1] == items[0]
assert rating_tensor[0] == ratings[0]
@pytest.mark.gpu
def test_reco_dataset(sample_ratings_data):
"""Test RecoDataset `__len__` and `__getitem__`."""
users = sample_ratings_data[DEFAULT_USER_COL].values
items = sample_ratings_data[DEFAULT_ITEM_COL].values
ratings = sample_ratings_data[DEFAULT_RATING_COL].values
dataset = RecoDataset(users, items, ratings)
assert len(dataset) == len(ratings)
user_item_tensor, rating_tensor = dataset[0]
assert user_item_tensor.shape == (2,)
assert rating_tensor.shape == (1,)
assert user_item_tensor[0] == users[0]
assert user_item_tensor[1] == items[0]
assert rating_tensor[0] == ratings[0]
@pytest.mark.gpu
def test_model_serialization(sample_model_params):
"""Test saving and loading of EmbeddingDotBias model."""
model = EmbeddingDotBias(**sample_model_params)
with tempfile.TemporaryDirectory() as tmpdir:
path = f"{tmpdir}/model.pt"
torch.save(model.state_dict(), path)
loaded_model = EmbeddingDotBias(**sample_model_params)
loaded_model.load_state_dict(torch.load(path))
for p1, p2 in zip(model.parameters(), loaded_model.parameters()):
assert torch.allclose(p1, p2)
| {
"repo_id": "recommenders-team/recommenders",
"file_path": "tests/unit/recommenders/models/test_embdotbias_model.py",
"license": "MIT License",
"lines": 234,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
recommenders-team/recommenders:tests/unit/recommenders/models/test_embdotbias_utils.py | import numpy as np
import pandas as pd
import pytest
import torch
from recommenders.models.embdotbias.utils import cartesian_product, score
from recommenders.utils.constants import (
DEFAULT_USER_COL,
DEFAULT_ITEM_COL,
DEFAULT_RATING_COL,
)
@pytest.fixture(scope="module")
def sample_ratings_data():
"""Create fixed sample ratings data for testing."""
data = {
DEFAULT_USER_COL: [1, 4, 8, 5, 7, 10, 3],
DEFAULT_ITEM_COL: [1, 3, 14, 17, 4, 18, 8],
DEFAULT_RATING_COL: [
3.493193,
2.323592,
1.254233,
2.243929,
2.300733,
3.918425,
3.550230,
],
}
return pd.DataFrame(data)
def test_cartesian_product_two_arrays():
a = np.array([1, 2])
b = np.array([3, 4])
result = cartesian_product(a, b)
expected = np.array([[1, 3], [1, 4], [2, 3], [2, 4]])
np.testing.assert_array_equal(result, expected)
def test_cartesian_product_three_arrays():
a = np.array([1, 2])
b = np.array([3, 4])
c = np.array([5, 6])
result = cartesian_product(a, b, c)
expected = np.array(
[
[1, 3, 5],
[1, 3, 6],
[1, 4, 5],
[1, 4, 6],
[2, 3, 5],
[2, 3, 6],
[2, 4, 5],
[2, 4, 6],
]
)
np.testing.assert_array_equal(result, expected)
def test_cartesian_product_single_array():
a = np.array([1, 2, 3])
result = cartesian_product(a)
expected = np.array([[1], [2], [3]])
np.testing.assert_array_equal(result, expected)
def test_cartesian_product_empty_array():
a = np.array([])
b = np.array([1, 2])
result = cartesian_product(a, b)
expected = np.empty((0, 2))
np.testing.assert_array_equal(result, expected)
def test_score(sample_ratings_data):
"""Test score function."""
# Create a dummy model
class DummyModel:
def __init__(self, classes):
self.classes = classes
def _get_idx(self, entity_ids, is_item=True):
entity_map = (
self.classes[DEFAULT_USER_COL]
if not is_item
else self.classes[DEFAULT_ITEM_COL]
)
return torch.tensor(
[entity_map.index(x) for x in entity_ids if x in entity_map]
)
def forward(self, x):
return torch.ones(x.shape[0])
def to(self, device):
return self
classes = {
DEFAULT_USER_COL: list(sample_ratings_data[DEFAULT_USER_COL].unique()),
DEFAULT_ITEM_COL: list(sample_ratings_data[DEFAULT_ITEM_COL].unique()),
}
model = DummyModel(classes)
# Test with top_k
result = score(model, sample_ratings_data, top_k=2)
assert isinstance(result, pd.DataFrame)
assert result.groupby(DEFAULT_USER_COL).size().max() <= 2
# Test without top_k
result = score(model, sample_ratings_data)
assert isinstance(result, pd.DataFrame)
assert len(result) == len(sample_ratings_data)
# Test with unknown users/items
test_df_new = pd.DataFrame({DEFAULT_USER_COL: [999, 1], DEFAULT_ITEM_COL: [999, 1]})
# Calling score with mismatched data lengths should raise ValueError
with pytest.raises(ValueError):
score(model, test_df_new)
| {
"repo_id": "recommenders-team/recommenders",
"file_path": "tests/unit/recommenders/models/test_embdotbias_utils.py",
"license": "MIT License",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
reflex-dev/reflex:tests/units/istate/test_proxy.py | """Tests for MutableProxy pickle behavior."""
import dataclasses
import pickle
import reflex as rx
from reflex.istate.proxy import MutableProxy
@dataclasses.dataclass
class Item:
"""Simple picklable object for testing."""
id: int
class ProxyTestState(rx.State):
"""Test state with a list field."""
items: list[Item] = []
def test_mutable_proxy_pickle_preserves_object_identity():
"""Test that same object referenced directly and via proxy maintains identity."""
state = ProxyTestState()
obj = Item(1)
data = {
"direct": [obj],
"proxied": [MutableProxy(obj, state, "items")],
}
unpickled = pickle.loads(pickle.dumps(data))
assert unpickled["direct"][0].id == 1
assert unpickled["proxied"][0].id == 1
assert unpickled["direct"][0] is unpickled["proxied"][0]
| {
"repo_id": "reflex-dev/reflex",
"file_path": "tests/units/istate/test_proxy.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
reflex-dev/reflex:reflex/istate/shared.py | """Base classes for shared / linked states."""
import asyncio
import contextlib
from collections.abc import AsyncIterator
from typing import Self, TypeVar
from reflex.constants import ROUTER_DATA
from reflex.event import Event, get_hydrate_event
from reflex.state import BaseState, State, _override_base_method, _substate_key
from reflex.utils import console
from reflex.utils.exceptions import ReflexRuntimeError
UPDATE_OTHER_CLIENT_TASKS: set[asyncio.Task] = set()
LINKED_STATE = TypeVar("LINKED_STATE", bound="SharedStateBaseInternal")
def _log_update_client_errors(task: asyncio.Task):
"""Log errors from updating other clients.
Args:
task: The asyncio task to check for errors.
"""
try:
task.result()
except Exception as e:
console.warn(f"Error updating linked client: {e}")
finally:
UPDATE_OTHER_CLIENT_TASKS.discard(task)
def _do_update_other_tokens(
affected_tokens: set[str],
previous_dirty_vars: dict[str, set[str]],
state_type: type[BaseState],
) -> list[asyncio.Task]:
"""Update other clients after a shared state update.
Submit the updates in separate asyncio tasks to avoid deadlocking.
Args:
affected_tokens: The tokens to update.
previous_dirty_vars: The dirty vars to apply to other clients.
state_type: The type of the shared state.
Returns:
The list of asyncio tasks created to perform the updates.
"""
from reflex.utils.prerequisites import get_app
app = get_app().app
async def _update_client(token: str):
async with app.modify_state(
_substate_key(token, state_type),
previous_dirty_vars=previous_dirty_vars,
):
pass
tasks = []
for affected_token in affected_tokens:
# Don't send updates for disconnected clients.
if affected_token not in app.event_namespace._token_manager.token_to_socket:
continue
# TODO: remove disconnected clients after some time.
t = asyncio.create_task(_update_client(affected_token))
UPDATE_OTHER_CLIENT_TASKS.add(t)
t.add_done_callback(_log_update_client_errors)
tasks.append(t)
return tasks
@contextlib.asynccontextmanager
async def _patch_state(
original_state: BaseState, linked_state: BaseState, full_delta: bool = False
):
"""Patch the linked state into the original state's tree, restoring it afterward.
Args:
original_state: The original shared state.
linked_state: The linked shared state.
full_delta: If True, mark all Vars in linked_state dirty and resolve
the delta from the root. This option is used when linking or unlinking
to ensure that other computed vars in the tree pick up the newly
linked/unlinked values.
"""
if (original_parent_state := original_state.parent_state) is None:
msg = "Cannot patch root state as linked state."
raise ReflexRuntimeError(msg)
state_name = original_state.get_name()
original_parent_state.substates[state_name] = linked_state
linked_parent_state = linked_state.parent_state
linked_state.parent_state = original_parent_state
try:
if full_delta:
linked_state.dirty_vars.update(linked_state.base_vars)
linked_state.dirty_vars.update(linked_state.backend_vars)
linked_state.dirty_vars.update(linked_state.computed_vars)
linked_state._mark_dirty()
# Apply the updates into the existing state tree for rehydrate.
root_state = original_state._get_root_state()
root_state.dirty_vars.add("router")
root_state.dirty_vars.add(ROUTER_DATA)
root_state._mark_dirty()
await root_state._get_resolved_delta()
yield
finally:
original_parent_state.substates[state_name] = original_state
linked_state.parent_state = linked_parent_state
class SharedStateBaseInternal(State):
"""The private base state for all shared states."""
_exit_stack: contextlib.AsyncExitStack | None = None
_held_locks: dict[str, dict[type[BaseState], BaseState]] | None = None
def __getstate__(self):
"""Override redis serialization to remove temporary fields.
Returns:
The state dictionary without temporary fields.
"""
s = super().__getstate__()
s.pop("_previous_dirty_vars", None)
s.pop("_exit_stack", None)
s.pop("_held_locks", None)
return s
@_override_base_method
def _clean(self):
"""Override BaseState._clean to track the last set of dirty vars.
This is necessary for applying dirty vars from one event to other linked states.
"""
if (
previous_dirty_vars := getattr(self, "_previous_dirty_vars", None)
) is not None:
previous_dirty_vars.clear()
previous_dirty_vars.update(self.dirty_vars)
super()._clean()
@_override_base_method
def _mark_dirty(self):
"""Override BaseState._mark_dirty to avoid marking certain vars as dirty.
Since these internal fields are not persisted to redis, they shouldn't cause the
state to be considered dirty either.
"""
self.dirty_vars.discard("_previous_dirty_vars")
self.dirty_vars.discard("_exit_stack")
self.dirty_vars.discard("_held_locks")
# Only mark dirty if there are still dirty vars, or any substate is dirty
if self.dirty_vars or any(
substate.dirty_vars for substate in self.substates.values()
):
super()._mark_dirty()
def _rehydrate(self):
"""Get the events to rehydrate the state.
Returns:
The events to rehydrate the state (these should be returned/yielded).
"""
return [
Event(
token=self.router.session.client_token,
name=get_hydrate_event(self._get_root_state()),
),
State.set_is_hydrated(True),
]
async def _link_to(self, token: str) -> Self:
"""Link this shared state to a token.
After linking, subsequent access to this shared state will affect the
linked token's state, and cause changes to be propagated to all other
clients linked to that token.
Args:
token: The token to link to (Cannot contain underscore characters).
Returns:
The newly linked state.
Raises:
ReflexRuntimeError: If linking fails or token is invalid.
"""
if not token:
msg = "Cannot link shared state to empty token."
raise ReflexRuntimeError(msg)
if not isinstance(self, SharedState):
msg = "Can only link SharedState instances."
raise RuntimeError(msg)
if self._linked_to == token:
return self # already linked to this token
if self._linked_to and self._linked_to != token:
# Disassociate from previous linked token since unlink will not be called.
self._linked_from.discard(self.router.session.client_token)
# TODO: Change StateManager to accept token + class instead of combining them in a string.
if "_" in token:
msg = f"Invalid token {token} for linking state {self.get_full_name()}, cannot use underscore (_) in the token name."
raise ReflexRuntimeError(msg)
# Associate substate with the given link token.
state_name = self.get_full_name()
if self._reflex_internal_links is None:
self._reflex_internal_links = {}
self._reflex_internal_links[state_name] = token
return await self._internal_patch_linked_state(token, full_delta=True)
async def _unlink(self):
"""Unlink this shared state from its linked token.
Returns:
The events to rehydrate the state after unlinking (these should be returned/yielded).
"""
from reflex.istate.manager import get_state_manager
if not isinstance(self, SharedState):
msg = "Can only unlink SharedState instances."
raise ReflexRuntimeError(msg)
state_name = self.get_full_name()
if (
not self._reflex_internal_links
or state_name not in self._reflex_internal_links
):
msg = f"State {state_name} is not linked and cannot be unlinked."
raise ReflexRuntimeError(msg)
# Break the linkage for future events.
self._reflex_internal_links.pop(state_name)
self._linked_from.discard(self.router.session.client_token)
# Patch in the original state, apply updates, then rehydrate.
private_root_state = await get_state_manager().get_state(
_substate_key(self.router.session.client_token, type(self))
)
private_state = await private_root_state.get_state(type(self))
async with _patch_state(
original_state=self,
linked_state=private_state,
full_delta=True,
):
return self._rehydrate()
async def _internal_patch_linked_state(
self, token: str, full_delta: bool = False
) -> Self:
"""Load and replace this state with the linked state for a given token.
Must be called inside a `_modify_linked_states` context, to ensure locks are
released after the event is done processing.
Args:
token: The token of the linked state.
full_delta: If True, mark all Vars in linked_state dirty and resolve
delta to update cached computed vars
Returns:
The state that was linked into the tree.
"""
from reflex.istate.manager import get_state_manager
if self._exit_stack is None or self._held_locks is None:
msg = "Cannot link shared state outside of _modify_linked_states context."
raise ReflexRuntimeError(msg)
# Get the newly linked state and update pointers/delta for subsequent events.
if token not in self._held_locks:
linked_root_state = await self._exit_stack.enter_async_context(
get_state_manager().modify_state(_substate_key(token, type(self)))
)
self._held_locks.setdefault(token, {})
else:
linked_root_state = await get_state_manager().get_state(
_substate_key(token, type(self))
)
linked_state = await linked_root_state.get_state(type(self))
if not isinstance(linked_state, SharedState):
msg = f"Linked state for token {token} is not a SharedState."
raise ReflexRuntimeError(msg)
# Avoid unnecessary dirtiness of shared state when there are no changes.
if type(self) not in self._held_locks[token]:
self._held_locks[token][type(self)] = linked_state
if self.router.session.client_token not in linked_state._linked_from:
linked_state._linked_from.add(self.router.session.client_token)
if linked_state._linked_to != token:
linked_state._linked_to = token
await self._exit_stack.enter_async_context(
_patch_state(
original_state=self,
linked_state=linked_state,
full_delta=full_delta,
)
)
return linked_state
def _held_locks_linked_states(self) -> list["SharedState"]:
"""Get all linked states currently held by this state.
Returns:
The list of linked states currently held.
"""
if self._held_locks is None:
return []
return [
linked_state
for linked_state_cls_to_instance in self._held_locks.values()
for linked_state in linked_state_cls_to_instance.values()
if isinstance(linked_state, SharedState)
]
@contextlib.asynccontextmanager
async def _modify_linked_states(
self, previous_dirty_vars: dict[str, set[str]] | None = None
) -> AsyncIterator[None]:
"""Take lock, fetch all linked states, and patch them into the current state tree.
If previous_dirty_vars is NOT provided, then any dirty vars after
exiting the context will be applied to all other clients linked to this
state's linked token.
Args:
previous_dirty_vars: When apply linked state changes to other
tokens, provide mapping of state full_name to set of dirty vars.
Yields:
None.
"""
if self._exit_stack is not None:
msg = "Cannot nest _modify_linked_states contexts."
raise ReflexRuntimeError(msg)
if self._reflex_internal_links is None:
msg = "No linked states to modify."
raise ReflexRuntimeError(msg)
self._exit_stack = contextlib.AsyncExitStack()
self._held_locks = {}
current_dirty_vars: dict[str, set[str]] = {}
affected_tokens: set[str] = set()
try:
# Go through all linked states and patch them in if they are present in the tree
for linked_state_name, linked_token in self._reflex_internal_links.items():
linked_state_cls: type[SharedState] = (
self.get_root_state().get_class_substate( # pyright: ignore[reportAssignmentType]
linked_state_name
)
)
try:
original_state = self._get_state_from_cache(linked_state_cls)
except ValueError:
# This state wasn't required for processing the event.
continue
linked_state = await original_state._internal_patch_linked_state(
linked_token
)
if (
previous_dirty_vars
and (dv := previous_dirty_vars.get(linked_state_name)) is not None
):
linked_state.dirty_vars.update(dv)
linked_state._mark_dirty()
async with self._exit_stack:
yield None
# Collect dirty vars and other affected clients that need to be updated.
for linked_state in self._held_locks_linked_states():
if linked_state._previous_dirty_vars is not None:
current_dirty_vars[linked_state.get_full_name()] = set(
linked_state._previous_dirty_vars
)
if (
linked_state._get_was_touched()
or linked_state._previous_dirty_vars is not None
):
affected_tokens.update(
token
for token in linked_state._linked_from
if token != self.router.session.client_token
)
finally:
self._exit_stack = None
# Only propagate dirty vars when we are not already propagating from another state.
if previous_dirty_vars is None:
_do_update_other_tokens(
affected_tokens=affected_tokens,
previous_dirty_vars=current_dirty_vars,
state_type=type(self),
)
class SharedState(SharedStateBaseInternal, mixin=True):
"""Mixin for defining new shared states."""
_linked_from: set[str] = set()
_linked_to: str = ""
_previous_dirty_vars: set[str] = set()
@classmethod
def __init_subclass__(cls, **kwargs):
"""Initialize subclass and set up shared state fields.
Args:
**kwargs: The kwargs to pass to the init_subclass method.
"""
kwargs["mixin"] = False
cls._mixin = False
super().__init_subclass__(**kwargs)
root_state = cls.get_root_state()
if root_state.backend_vars["_reflex_internal_links"] is None:
root_state.backend_vars["_reflex_internal_links"] = {}
if root_state is State:
# Always fetch SharedStateBaseInternal to access
# `_modify_linked_states` without having to use `.get_state()` which
# pulls in all linked states and substates which may not actually be
# accessed for this event.
root_state._always_dirty_substates.add(SharedStateBaseInternal.get_name())
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/istate/shared.py",
"license": "Apache License 2.0",
"lines": 360,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
reflex-dev/reflex:tests/integration/test_linked_state.py | """Test linked state."""
from __future__ import annotations
import uuid
from collections.abc import Callable, Generator
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from reflex.testing import AppHarness, WebDriver
from . import utils
def LinkedStateApp():
"""Test that linked state works as expected."""
import uuid
from typing import Any
import reflex as rx
class SharedState(rx.SharedState):
_who: str = "world"
n_changes: int = 0
counter: int = 0
@rx.event
def set_counter(self, value: int) -> None:
self.counter = value
@rx.event
def set_who(self, who: str) -> None:
self._who = who
self.n_changes += 1
@rx.event
async def link_to(self, token: str):
await self._link_to(token)
@rx.event
async def link_to_and_increment(self):
linked_state = await self._link_to(f"arbitrary-token-{uuid.uuid4()}")
linked_state.counter += 1
@rx.event
async def unlink(self):
return await self._unlink()
@rx.event
async def on_load_link_default(self):
linked_state = await self._link_to(self.room or "default") # pyright: ignore[reportAttributeAccessIssue]
if self.room: # pyright: ignore[reportAttributeAccessIssue]
assert linked_state._linked_to == self.room # pyright: ignore[reportAttributeAccessIssue]
else:
assert linked_state._linked_to == "default"
@rx.event
async def handle_submit(self, form_data: dict[str, Any]):
if "who" in form_data:
self.set_who(form_data["who"])
if "token" in form_data:
await self.link_to(form_data["token"])
class PrivateState(rx.State):
@rx.var
async def greeting(self) -> str:
ss = await self.get_state(SharedState)
return f"Hello, {ss._who}!"
@rx.var
async def linked_to(self) -> str:
ss = await self.get_state(SharedState)
return ss._linked_to
@rx.event(background=True)
async def bump_counter_bg(self):
for _ in range(5):
async with self:
ss = await self.get_state(SharedState)
ss.counter += 1
async with self:
ss = await self.get_state(SharedState)
for _ in range(5):
async with ss:
ss.counter += 1
@rx.event
async def bump_counter_yield(self):
ss = await self.get_state(SharedState)
for _ in range(5):
ss.counter += 1
yield
def index() -> rx.Component:
return rx.vstack(
rx.text(
SharedState.n_changes,
id="n-changes",
),
rx.text(
PrivateState.greeting,
id="greeting",
),
rx.form(
rx.input(name="who", id="who-input"),
rx.button("Set Who"),
on_submit=SharedState.handle_submit,
reset_on_submit=True,
),
rx.text(PrivateState.linked_to, id="linked-to"),
rx.button("Unlink", id="unlink-button", on_click=SharedState.unlink),
rx.form(
rx.input(name="token", id="token-input"),
rx.button("Link To Token"),
on_submit=SharedState.handle_submit,
reset_on_submit=True,
),
rx.button(
SharedState.counter,
id="counter-button",
on_click=SharedState.set_counter(SharedState.counter + 1),
on_context_menu=SharedState.set_counter(
SharedState.counter - 1
).prevent_default,
),
rx.button(
"Bump Counter in Background",
on_click=PrivateState.bump_counter_bg,
id="bg-button",
),
rx.button(
"Bump Counter with Yield",
on_click=PrivateState.bump_counter_yield,
id="yield-button",
),
rx.button(
"Link to arbitrary token and Increment n_changes",
on_click=SharedState.link_to_and_increment,
id="link-increment-button",
),
)
app = rx.App()
app.add_page(index, route="/room/[room]", on_load=SharedState.on_load_link_default)
app.add_page(index)
@pytest.fixture
def linked_state(
tmp_path_factory,
) -> Generator[AppHarness, None, None]:
"""Start LinkedStateApp at tmp_path via AppHarness.
Args:
tmp_path_factory: pytest tmp_path_factory fixture
Yields:
running AppHarness instance
"""
with AppHarness.create(
root=tmp_path_factory.mktemp("linked_state"),
app_source=LinkedStateApp,
) as harness:
yield harness
@pytest.fixture
def tab_factory(
linked_state: AppHarness,
) -> Generator[Callable[[], WebDriver], None, None]:
"""Get an instance of the browser open to the linked_state app.
Args:
linked_state: harness for LinkedStateApp
Yields:
WebDriver instance.
"""
assert linked_state.app_instance is not None, "app is not running"
drivers = []
def driver() -> WebDriver:
d = linked_state.frontend()
drivers.append(d)
return d
try:
yield driver
finally:
for d in drivers:
d.quit()
def test_linked_state(
linked_state: AppHarness,
tab_factory: Callable[[], WebDriver],
):
"""Test that multiple tabs can link to and share state.
Args:
linked_state: harness for LinkedStateApp.
tab_factory: factory to create WebDriver instances.
"""
assert linked_state.app_instance is not None
tab1 = tab_factory()
tab2 = tab_factory()
ss = utils.SessionStorage(tab1)
assert AppHarness._poll_for(lambda: ss.get("token") is not None), "token not found"
n_changes_1 = tab1.find_element(By.ID, "n-changes")
greeting_1 = tab1.find_element(By.ID, "greeting")
ss = utils.SessionStorage(tab2)
assert AppHarness._poll_for(lambda: ss.get("token") is not None), "token not found"
n_changes_2 = tab2.find_element(By.ID, "n-changes")
greeting_2 = tab2.find_element(By.ID, "greeting")
# Initial state
assert n_changes_1.text == "0"
assert greeting_1.text == "Hello, world!"
assert n_changes_2.text == "0"
assert greeting_2.text == "Hello, world!"
# Change state in tab 1
tab1.find_element(By.ID, "who-input").send_keys("Alice", Keys.ENTER)
assert linked_state.poll_for_content(n_changes_1, exp_not_equal="0") == "1"
assert (
linked_state.poll_for_content(greeting_1, exp_not_equal="Hello, world!")
== "Hello, Alice!"
)
# Change state in tab 2
tab2.find_element(By.ID, "who-input").send_keys("Bob", Keys.ENTER)
assert linked_state.poll_for_content(n_changes_2, exp_not_equal="0") == "1"
assert (
linked_state.poll_for_content(greeting_2, exp_not_equal="Hello, world!")
== "Hello, Bob!"
)
# Link both tabs to the same token, "shared-foo"
shared_token = f"shared-foo-{uuid.uuid4()}"
for tab in (tab1, tab2):
tab.find_element(By.ID, "token-input").send_keys(shared_token, Keys.ENTER)
assert linked_state.poll_for_content(n_changes_1, exp_not_equal="1") == "0"
assert (
linked_state.poll_for_content(greeting_1, exp_not_equal="Hello, Alice!")
== "Hello, world!"
)
assert linked_state.poll_for_content(n_changes_2, exp_not_equal="1") == "0"
assert (
linked_state.poll_for_content(greeting_2, exp_not_equal="Hello, Bob!")
== "Hello, world!"
)
# Set a new value in tab 1, should reflect in tab 2
tab1.find_element(By.ID, "who-input").send_keys("Charlie", Keys.ENTER)
assert linked_state.poll_for_content(n_changes_1, exp_not_equal="0") == "1"
assert (
linked_state.poll_for_content(greeting_1, exp_not_equal="Hello, world!")
== "Hello, Charlie!"
)
assert linked_state.poll_for_content(n_changes_2, exp_not_equal="0") == "1"
assert (
linked_state.poll_for_content(greeting_2, exp_not_equal="Hello, world!")
== "Hello, Charlie!"
)
# Bump the counter in tab 2, should reflect in tab 1
counter_button_1 = tab1.find_element(By.ID, "counter-button")
counter_button_2 = tab2.find_element(By.ID, "counter-button")
assert counter_button_1.text == "0"
assert counter_button_2.text == "0"
counter_button_2.click()
assert linked_state.poll_for_content(counter_button_1, exp_not_equal="0") == "1"
assert linked_state.poll_for_content(counter_button_2, exp_not_equal="0") == "1"
counter_button_1.click()
assert linked_state.poll_for_content(counter_button_1, exp_not_equal="1") == "2"
assert linked_state.poll_for_content(counter_button_2, exp_not_equal="1") == "2"
counter_button_2.click()
assert linked_state.poll_for_content(counter_button_1, exp_not_equal="2") == "3"
assert linked_state.poll_for_content(counter_button_2, exp_not_equal="2") == "3"
# Unlink tab 2, should revert to previous private values
tab2.find_element(By.ID, "unlink-button").click()
assert n_changes_2.text == "1"
assert (
linked_state.poll_for_content(greeting_2, exp_not_equal="Hello, Charlie!")
== "Hello, Bob!"
)
assert linked_state.poll_for_content(counter_button_2, exp_not_equal="3") == "0"
# Relink tab 2, should go back to shared values
tab2.find_element(By.ID, "token-input").send_keys(shared_token, Keys.ENTER)
assert n_changes_2.text == "1"
assert (
linked_state.poll_for_content(greeting_2, exp_not_equal="Hello, Bob!")
== "Hello, Charlie!"
)
assert linked_state.poll_for_content(counter_button_2, exp_not_equal="0") == "3"
# Unlink tab 1, change the shared value in tab 2, and relink tab 1
tab1.find_element(By.ID, "unlink-button").click()
assert n_changes_1.text == "1"
assert (
linked_state.poll_for_content(greeting_1, exp_not_equal="Hello, Charlie!")
== "Hello, Alice!"
)
tab2.find_element(By.ID, "who-input").send_keys("Diana", Keys.ENTER)
assert linked_state.poll_for_content(n_changes_2, exp_not_equal="1") == "2"
assert (
linked_state.poll_for_content(greeting_2, exp_not_equal="Hello, Charlie!")
== "Hello, Diana!"
)
assert counter_button_2.text == "3"
assert n_changes_1.text == "1"
assert greeting_1.text == "Hello, Alice!"
tab1.find_element(By.ID, "token-input").send_keys(shared_token, Keys.ENTER)
assert linked_state.poll_for_content(n_changes_1, exp_not_equal="1") == "2"
assert (
linked_state.poll_for_content(greeting_1, exp_not_equal="Hello, Alice!")
== "Hello, Diana!"
)
assert linked_state.poll_for_content(counter_button_1, exp_not_equal="0") == "3"
# Open a third tab linked to the shared token on_load
tab3 = tab_factory()
tab3.get(f"{linked_state.frontend_url}room/{shared_token}")
ss = utils.SessionStorage(tab3)
assert AppHarness._poll_for(lambda: ss.get("token") is not None), "token not found"
n_changes_3 = AppHarness._poll_for(lambda: tab3.find_element(By.ID, "n-changes"))
assert n_changes_3
greeting_3 = tab3.find_element(By.ID, "greeting")
counter_button_3 = tab3.find_element(By.ID, "counter-button")
assert linked_state.poll_for_content(n_changes_3, exp_not_equal="0") == "2"
assert (
linked_state.poll_for_content(greeting_3, exp_not_equal="Hello, world!")
== "Hello, Diana!"
)
assert linked_state.poll_for_content(counter_button_3, exp_not_equal="0") == "3"
assert tab3.find_element(By.ID, "linked-to").text == shared_token
# Trigger a background task in all shared states, assert on final value
tab1.find_element(By.ID, "bg-button").click()
tab2.find_element(By.ID, "bg-button").click()
tab3.find_element(By.ID, "bg-button").click()
assert AppHarness._poll_for(lambda: counter_button_1.text == "33")
assert AppHarness._poll_for(lambda: counter_button_2.text == "33")
assert AppHarness._poll_for(lambda: counter_button_3.text == "33")
# Trigger a yield-based task in all shared states, assert on final value
tab1.find_element(By.ID, "yield-button").click()
tab2.find_element(By.ID, "yield-button").click()
tab3.find_element(By.ID, "yield-button").click()
assert AppHarness._poll_for(lambda: counter_button_1.text == "48")
assert AppHarness._poll_for(lambda: counter_button_2.text == "48")
assert AppHarness._poll_for(lambda: counter_button_3.text == "48")
# Link to a new token when we're already linked
new_shared_token = f"shared-bar-{uuid.uuid4()}"
tab1.find_element(By.ID, "token-input").send_keys(new_shared_token, Keys.ENTER)
assert linked_state.poll_for_content(n_changes_1, exp_not_equal="2") == "0"
assert (
linked_state.poll_for_content(greeting_1, exp_not_equal="Hello, Diana!")
== "Hello, world!"
)
assert linked_state.poll_for_content(counter_button_1, exp_not_equal="48") == "0"
counter_button_1.click()
assert linked_state.poll_for_content(counter_button_1, exp_not_equal="0") == "1"
counter_button_1.click()
assert linked_state.poll_for_content(counter_button_1, exp_not_equal="1") == "2"
counter_button_1.click()
assert linked_state.poll_for_content(counter_button_1, exp_not_equal="2") == "3"
# Ensure other tabs are unaffected
assert n_changes_2.text == "2"
assert greeting_2.text == "Hello, Diana!"
assert counter_button_2.text == "48"
assert n_changes_3.text == "2"
assert greeting_3.text == "Hello, Diana!"
assert counter_button_3.text == "48"
# Link to a new state and increment the counter in the same event
tab1.find_element(By.ID, "link-increment-button").click()
assert linked_state.poll_for_content(counter_button_1, exp_not_equal="3") == "1"
| {
"repo_id": "reflex-dev/reflex",
"file_path": "tests/integration/test_linked_state.py",
"license": "Apache License 2.0",
"lines": 334,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
reflex-dev/reflex:reflex/utils/tasks.py | """Helpers for managing asyncio tasks."""
import asyncio
import time
from collections.abc import Callable, Coroutine
from typing import Any
from reflex.utils import console
async def _run_forever(
coro_function: Callable[..., Coroutine],
*args: Any,
suppress_exceptions: list[type[BaseException]],
exception_delay: float,
exception_limit: int,
exception_limit_window: float,
**kwargs: Any,
):
"""Wrapper to continuously run a coroutine function, suppressing certain exceptions.
Args:
coro_function: The coroutine function to run.
*args: The arguments to pass to the coroutine function.
suppress_exceptions: The exceptions to suppress.
exception_delay: The delay between retries when an exception is suppressed.
exception_limit: The maximum number of suppressed exceptions within the limit window before raising.
exception_limit_window: The time window in seconds for counting suppressed exceptions.
**kwargs: The keyword arguments to pass to the coroutine function.
"""
last_regular_loop_start = 0
exception_count = 0
while True:
# Reset the exception count when the limit window has elapsed since the last non-exception loop started.
if last_regular_loop_start + exception_limit_window < time.monotonic():
exception_count = 0
if not exception_count:
last_regular_loop_start = time.monotonic()
try:
await coro_function(*args, **kwargs)
except (asyncio.CancelledError, RuntimeError):
raise
except Exception as e:
if any(isinstance(e, ex) for ex in suppress_exceptions):
exception_count += 1
if exception_count >= exception_limit:
console.error(
f"{coro_function.__name__}: task exceeded exception limit {exception_limit} within {exception_limit_window}s: {e}"
)
raise
console.error(f"{coro_function.__name__}: task error suppressed: {e}")
await asyncio.sleep(exception_delay)
continue
raise
def ensure_task(
owner: Any,
task_attribute: str,
coro_function: Callable[..., Coroutine],
*args: Any,
suppress_exceptions: list[type[BaseException]] | None = None,
exception_delay: float = 1.0,
exception_limit: int = 5,
exception_limit_window: float = 60.0,
**kwargs: Any,
) -> asyncio.Task:
"""Ensure that a task is running for the given coroutine function.
Note: if the task is already running, args and kwargs are ignored.
Args:
owner: The owner of the task.
task_attribute: The attribute name to store/retrieve the task from the owner object.
coro_function: The coroutine function to run as a task.
suppress_exceptions: The exceptions to log and continue when running the coroutine.
exception_delay: The delay between retries when an exception is suppressed.
exception_limit: The maximum number of suppressed exceptions within the limit window before raising.
exception_limit_window: The time window in seconds for counting suppressed exceptions.
*args: The arguments to pass to the coroutine function.
**kwargs: The keyword arguments to pass to the coroutine function.
Returns:
The asyncio task running the coroutine function.
"""
if suppress_exceptions is None:
suppress_exceptions = []
if RuntimeError in suppress_exceptions:
msg = "Cannot suppress RuntimeError exceptions which may be raised by asyncio machinery."
raise RuntimeError(msg)
task = getattr(owner, task_attribute, None)
if task is None or task.done():
asyncio.get_running_loop() # Ensure we're in an event loop.
task = asyncio.create_task(
_run_forever(
coro_function,
*args,
suppress_exceptions=suppress_exceptions,
exception_delay=exception_delay,
exception_limit=exception_limit,
exception_limit_window=exception_limit_window,
**kwargs,
),
name=f"reflex_ensure_task|{type(owner).__name__}.{task_attribute}={coro_function.__name__}|{time.time()}",
)
setattr(owner, task_attribute, task)
return task
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/utils/tasks.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
reflex-dev/reflex:tests/units/istate/manager/test_redis.py | """Tests specific to redis state manager."""
import asyncio
import os
import time
import uuid
from collections.abc import AsyncGenerator
from typing import Any
import pytest
import pytest_asyncio
from reflex.istate.manager.redis import StateManagerRedis
from reflex.state import BaseState, _substate_key
from tests.units.mock_redis import mock_redis, real_redis
class RedisTestState(BaseState):
"""A test state for redis state manager tests."""
foo: str = "bar"
count: int = 0
@pytest.fixture
def root_state() -> type[RedisTestState]:
return RedisTestState
@pytest_asyncio.fixture(loop_scope="function", scope="function")
async def state_manager_redis(
root_state: type[RedisTestState],
) -> AsyncGenerator[StateManagerRedis]:
"""Get a StateManagerRedis with a real or mocked redis client.
Args:
root_state: The root state class.
Yields:
The StateManagerRedis.
"""
async with real_redis() as redis:
if redis is None:
redis = mock_redis()
state_manager = StateManagerRedis(state=root_state, redis=redis)
test_start = time.monotonic()
yield state_manager
# None of the tests should have triggered a lock expiration.
assert (time.monotonic() - test_start) * 1000 < state_manager.lock_expiration
await state_manager.close()
@pytest.fixture
def event_log(state_manager_redis: StateManagerRedis) -> list[dict[str, Any]]:
"""Get the redis event log from the state manager.
Args:
state_manager_redis: The StateManagerRedis.
Returns:
The redis event log.
"""
return state_manager_redis.redis._internals["event_log"] # pyright: ignore[reportAttributeAccessIssue]
@pytest.mark.asyncio
async def test_basic_get_set(
state_manager_redis: StateManagerRedis,
root_state: type[RedisTestState],
):
"""Test basic operations of StateManagerRedis.
Args:
state_manager_redis: The StateManagerRedis to test.
root_state: The root state class.
"""
state_manager_redis._oplock_enabled = False
token = str(uuid.uuid4())
fresh_state = await state_manager_redis.get_state(_substate_key(token, root_state))
fresh_state.foo = "baz"
fresh_state.count = 42
await state_manager_redis.set_state(_substate_key(token, root_state), fresh_state)
async def test_modify(
state_manager_redis: StateManagerRedis,
root_state: type[RedisTestState],
):
"""Test modifying state with StateManagerRedis.
Args:
state_manager_redis: The StateManagerRedis to test.
root_state: The root state class.
"""
state_manager_redis._oplock_enabled = False
token = str(uuid.uuid4())
# Initial modify should set count to 1
async with state_manager_redis.modify_state(
_substate_key(token, root_state)
) as new_state:
new_state.count = 1
# Subsequent modify should set count to 2
async with state_manager_redis.modify_state(
_substate_key(token, root_state)
) as new_state:
assert isinstance(new_state, root_state)
assert new_state.count == 1
new_state.count += 2
final_state = await state_manager_redis.get_state(_substate_key(token, root_state))
assert isinstance(final_state, root_state)
assert final_state.count == 3
async def test_modify_oplock(
state_manager_redis: StateManagerRedis,
root_state: type[RedisTestState],
event_log: list[dict[str, Any]],
):
"""Test modifying state with StateManagerRedis with optimistic locking.
Args:
state_manager_redis: The StateManagerRedis to test.
root_state: The root state class.
event_log: The redis event log.
"""
token = str(uuid.uuid4())
state_manager_redis._debug_enabled = True
state_manager_redis._oplock_enabled = True
state_manager_2 = StateManagerRedis(
state=root_state, redis=state_manager_redis.redis
)
state_manager_2._debug_enabled = True
state_manager_2._oplock_enabled = True
# Initial modify should set count to 1
async with state_manager_redis.modify_state(
_substate_key(token, root_state),
) as new_state:
new_state.count = 1
# Initial state manager should be holding a lease
lease_task_1 = state_manager_redis._local_leases.get(token)
assert lease_task_1 is not None
assert not lease_task_1.done()
# The state should not be locked
state_lock_1 = state_manager_redis._cached_states_locks.get(token)
assert state_lock_1 is not None
assert not state_lock_1.locked()
lock_events_before = len([
ev
for ev in event_log
if ev["channel"].endswith(b"lock") and ev["data"] == b"set"
])
assert lock_events_before == 1
# The second modify should NOT trigger another redis lock
async with state_manager_redis.modify_state(
_substate_key(token, root_state),
) as new_state:
new_state.count = 2
assert state_lock_1.locked()
lock_events_after = len([
ev
for ev in event_log
if ev["channel"].endswith(b"lock") and ev["data"] == b"set"
])
assert lock_events_before == lock_events_after
# Contend the lock from another state manager
async with state_manager_2.modify_state(
_substate_key(token, root_state),
) as new_state:
new_state.count = 3
state_lock_2 = state_manager_2._cached_states_locks.get(token)
assert state_lock_2 is not None
assert state_lock_2.locked()
# The second manager should be holding the lease now
lease_task_2 = state_manager_2._local_leases.get(token)
assert lease_task_2 is not None
assert not lease_task_2.done()
assert not state_lock_2.locked()
# Lease task 1 should be cancelled by the time we have modified the state
assert lease_task_1.done()
assert lease_task_1.cancelled()
assert token not in state_manager_redis._local_leases
assert token not in state_manager_redis._cached_states
# There should have been another redis lock taken.
lock_events_after_2 = len([
ev
for ev in event_log
if ev["channel"].endswith(b"lock") and ev["data"] == b"set"
])
assert lock_events_after_2 == lock_events_after + 1
# And there should have been a lock release.
unlock_events = len([
ev
for ev in event_log
if ev["channel"].endswith(b"lock") and ev["data"] == b"del"
])
assert unlock_events == 1
# And a single token set.
token_set_events = len([
ev
for ev in event_log
if ev["channel"].endswith(root_state.get_full_name().encode())
and ev["data"] == b"set"
])
assert token_set_events == 1
# Now close the contender to release its lease.
await state_manager_2.close()
# Both locks should have been released.
unlock_events = len([
ev
for ev in event_log
if ev["channel"].endswith(b"lock") and ev["data"] == b"del"
])
assert unlock_events == 2
# And both tokens should have been set.
token_set_events = len([
ev
for ev in event_log
if ev["channel"].endswith(root_state.get_full_name().encode())
and ev["data"] == b"set"
])
assert token_set_events == 2
async def test_oplock_contention_queue(
state_manager_redis: StateManagerRedis,
root_state: type[RedisTestState],
event_log: list[dict[str, Any]],
):
"""Test the oplock contention queue.
Args:
state_manager_redis: The StateManagerRedis to test.
root_state: The root state class.
event_log: The redis event log.
"""
token = str(uuid.uuid4())
state_manager_redis._debug_enabled = True
state_manager_redis._oplock_enabled = True
state_manager_2 = StateManagerRedis(
state=root_state, redis=state_manager_redis.redis
)
state_manager_2._debug_enabled = True
state_manager_2._oplock_enabled = True
modify_started = asyncio.Event()
modify_2_started = asyncio.Event()
modify_1_continue = asyncio.Event()
modify_2_continue = asyncio.Event()
async def modify_1():
async with state_manager_redis.modify_state(
_substate_key(token, root_state),
) as new_state:
assert isinstance(new_state, root_state)
new_state.count += 1
modify_started.set()
await modify_1_continue.wait()
async def modify_2():
await modify_started.wait()
modify_2_started.set()
async with state_manager_2.modify_state(
_substate_key(token, root_state),
) as new_state:
assert isinstance(new_state, root_state)
new_state.count += 1
await modify_2_continue.wait()
async def modify_3():
await modify_started.wait()
modify_2_started.set()
async with state_manager_2.modify_state(
_substate_key(token, root_state),
) as new_state:
assert isinstance(new_state, root_state)
new_state.count += 1
await modify_2_continue.wait()
task_1 = asyncio.create_task(modify_1())
task_2 = asyncio.create_task(modify_2())
task_3 = asyncio.create_task(modify_3())
await modify_2_started.wait()
# Let modify 1 complete
modify_1_continue.set()
# Let modify 2 complete
modify_2_continue.set()
await task_1
await task_2
await task_3
interim_state = await state_manager_redis.get_state(
_substate_key(token, root_state)
)
assert isinstance(interim_state, root_state)
assert interim_state.count == 1
await state_manager_2.close()
final_state = await state_manager_redis.get_state(_substate_key(token, root_state))
assert isinstance(final_state, root_state)
assert final_state.count == 3
# There should only be two lock acquisitions
lock_events = len([
ev
for ev in event_log
if ev["channel"].endswith(b"lock") and ev["data"] == b"set"
])
assert lock_events == 2
async def test_oplock_contention_no_lease(
state_manager_redis: StateManagerRedis,
root_state: type[RedisTestState],
event_log: list[dict[str, Any]],
):
"""Test the oplock contention queue, when no waiters can share.
Args:
state_manager_redis: The StateManagerRedis to test.
root_state: The root state class.
event_log: The redis event log.
"""
token = str(uuid.uuid4())
state_manager_redis._debug_enabled = True
state_manager_redis._oplock_enabled = True
state_manager_2 = StateManagerRedis(
state=root_state, redis=state_manager_redis.redis
)
state_manager_2._debug_enabled = True
state_manager_2._oplock_enabled = True
state_manager_3 = StateManagerRedis(
state=root_state, redis=state_manager_redis.redis
)
state_manager_3._debug_enabled = True
state_manager_3._oplock_enabled = True
modify_started = asyncio.Event()
modify_2_started = asyncio.Event()
modify_1_continue = asyncio.Event()
modify_2_continue = asyncio.Event()
async def modify_1():
async with state_manager_redis.modify_state(
_substate_key(token, root_state),
) as new_state:
assert isinstance(new_state, root_state)
new_state.count += 1
modify_started.set()
await modify_1_continue.wait()
async def modify_2():
await modify_started.wait()
modify_2_started.set()
async with state_manager_2.modify_state(
_substate_key(token, root_state),
) as new_state:
assert isinstance(new_state, root_state)
new_state.count += 1
await modify_2_continue.wait()
async def modify_3():
await modify_started.wait()
modify_2_started.set()
async with state_manager_3.modify_state(
_substate_key(token, root_state),
) as new_state:
assert isinstance(new_state, root_state)
new_state.count += 1
await modify_2_continue.wait()
task_1 = asyncio.create_task(modify_1())
task_2 = asyncio.create_task(modify_2())
task_3 = asyncio.create_task(modify_3())
await modify_2_started.wait()
# Let modify 1 complete
modify_1_continue.set()
# Let modify 2 complete
modify_2_continue.set()
await task_1
await task_2
await task_3
# First task should have always gotten a lease
assert token in state_manager_redis._cached_states_locks
# The 2nd or 3rd modify should have _never_ got a lease due to contention
if token not in state_manager_2._cached_states_locks:
assert await state_manager_3._get_local_lease(token) is not None
elif token not in state_manager_3._cached_states_locks:
assert await state_manager_2._get_local_lease(token) is not None
else:
pytest.fail("One of the contending state managers should not have a lease.")
await state_manager_2.close()
await state_manager_3.close()
final_state = await state_manager_2.get_state(_substate_key(token, root_state))
assert isinstance(final_state, root_state)
assert final_state.count == 3
# There should be three lock acquisitions
lock_events = len([
ev
for ev in event_log
if ev["channel"].endswith(b"lock") and ev["data"] == b"set"
])
assert lock_events == 3
@pytest.mark.parametrize("racer_delay", [None, 0, 0.1])
@pytest.mark.asyncio
async def test_oplock_contention_racers(
state_manager_redis: StateManagerRedis,
root_state: type[RedisTestState],
racer_delay: float | None,
):
"""Test the oplock contention queue with racers.
Args:
state_manager_redis: The StateManagerRedis to test.
root_state: The root state class.
racer_delay: The delay before the second racer starts.
"""
token = str(uuid.uuid4())
state_manager_redis._debug_enabled = True
state_manager_redis._oplock_enabled = True
state_manager_2 = StateManagerRedis(
state=root_state, redis=state_manager_redis.redis
)
state_manager_2._debug_enabled = True
state_manager_2._oplock_enabled = True
lease_1 = None
lease_2 = None
async def modify_1():
nonlocal lease_1
async with state_manager_redis.modify_state(
_substate_key(token, root_state),
) as new_state:
lease_1 = await state_manager_redis._get_local_lease(token)
assert isinstance(new_state, root_state)
new_state.count += 1
async def modify_2():
if racer_delay is not None:
await asyncio.sleep(racer_delay)
nonlocal lease_2
async with state_manager_2.modify_state(
_substate_key(token, root_state),
) as new_state:
lease_2 = await state_manager_2._get_local_lease(token)
assert isinstance(new_state, root_state)
new_state.count += 1
await asyncio.gather(
modify_1(),
modify_2(),
)
if lease_1 is None or lease_1.cancelled():
assert lease_2 is not None
assert not lease_2.cancelled()
elif lease_2 is None or lease_2.cancelled():
assert lease_1 is not None
assert not lease_1.cancelled()
else:
pytest.fail(
"One lease should have been cancelled, other should still be active."
)
@pytest.mark.asyncio
async def test_oplock_immediate_cancel(
state_manager_redis: StateManagerRedis,
root_state: type[RedisTestState],
event_log: list[dict[str, Any]],
):
"""Test that immediate cancellation of modify releases oplock.
Args:
state_manager_redis: The StateManagerRedis to test.
root_state: The root state class.
event_log: The redis event log.
"""
token = str(uuid.uuid4())
state_manager_redis._debug_enabled = True
state_manager_redis._oplock_enabled = True
async def canceller():
while (lease_task := state_manager_redis._local_leases.get(token)) is None: # noqa: ASYNC110
await asyncio.sleep(0)
lease_task.cancel()
task = asyncio.create_task(canceller())
async with state_manager_redis.modify_state(
_substate_key(token, root_state),
) as new_state:
assert await state_manager_redis._get_local_lease(token) is None
assert isinstance(new_state, root_state)
new_state.count += 1
await task
@pytest.mark.asyncio
async def test_oplock_fetch_substate(
state_manager_redis: StateManagerRedis,
root_state: type[RedisTestState],
event_log: list[dict[str, Any]],
):
"""Test fetching substate with oplock enabled and partial state is cached.
Args:
state_manager_redis: The StateManagerRedis to test.
root_state: The root state class.
event_log: The redis event log.
"""
class SubState1(root_state):
pass
class SubState2(root_state):
pass
token = str(uuid.uuid4())
state_manager_redis._debug_enabled = True
state_manager_redis._oplock_enabled = True
async with state_manager_redis.modify_state(
_substate_key(token, SubState1),
) as new_state:
assert SubState1.get_name() in new_state.substates
assert SubState2.get_name() not in new_state.substates
async with state_manager_redis.modify_state(
_substate_key(token, SubState2),
) as new_state:
# Both substates should be fetched and cached.
assert SubState1.get_name() in new_state.substates
assert SubState2.get_name() in new_state.substates
async with state_manager_redis.modify_state(
_substate_key(token, SubState1),
) as new_state:
# Both substates should be fetched and cached now.
assert SubState1.get_name() in new_state.substates
assert SubState2.get_name() in new_state.substates
# Should have still only been one lock acquisition.
lock_events = len([
ev
for ev in event_log
if ev["channel"].endswith(b"lock") and ev["data"] == b"set"
])
assert lock_events == 1
@pytest.fixture
def short_lock_expiration(
state_manager_redis: StateManagerRedis,
):
"""Get a StateManagerRedis with a short lock expiration for testing.
Args:
state_manager_redis: The base StateManagerRedis.
Yields:
The lock expiration time in milliseconds.
"""
lock_expiration = 4000 if os.environ.get("CI") else 300
original_expiration = state_manager_redis.lock_expiration
state_manager_redis.lock_expiration = lock_expiration
yield lock_expiration
state_manager_redis.lock_expiration = original_expiration
@pytest.mark.asyncio
async def test_oplock_hold_oplock_after_cancel(
state_manager_redis: StateManagerRedis,
root_state: type[RedisTestState],
event_log: list[dict[str, Any]],
short_lock_expiration: int,
):
"""Test that cancelling a modify does not release the oplock prematurely.
Args:
state_manager_redis: The StateManagerRedis to test.
root_state: The root state class.
event_log: The redis event log.
short_lock_expiration: The lock expiration time in milliseconds.
"""
token = str(uuid.uuid4())
state_manager_redis._debug_enabled = True
state_manager_redis._oplock_enabled = True
modify_started = asyncio.Event()
modify_continue = asyncio.Event()
modify_ended = asyncio.Event()
async def modify():
async with state_manager_redis.modify_state(
_substate_key(token, root_state),
) as new_state:
modify_started.set()
assert isinstance(new_state, root_state)
new_state.count += 1
await modify_continue.wait()
modify_ended.set()
task = asyncio.create_task(modify())
await modify_started.wait()
started = time.monotonic()
await asyncio.sleep(short_lock_expiration / 1000 * 0.5)
state_lock = state_manager_redis._cached_states_locks.get(token)
assert state_lock is not None
assert state_lock.locked()
lease_task = await state_manager_redis._get_local_lease(token)
assert lease_task is not None
assert not lease_task.done()
lease_task.cancel()
# post-cancel wait should get another full lock_expiration.
await asyncio.sleep(short_lock_expiration / 1000 * 0.8)
assert not lease_task.done()
modify_continue.set()
await modify_ended.wait()
ended = time.monotonic()
# We should have successfully held the lock for longer than the lock expiration
assert (ended - started) * 1000 > short_lock_expiration
await task
with pytest.raises(asyncio.CancelledError):
await lease_task
# Modify the state again, this should get a new lock and lease
async with state_manager_redis.modify_state(
_substate_key(token, root_state),
) as new_state:
assert isinstance(new_state, root_state)
new_state.count += 1
# There should have been two redis lock acquisitions.
lock_events = len([
ev
for ev in event_log
if ev["channel"].endswith(b"lock") and ev["data"] == b"set"
])
assert lock_events == 2
await state_manager_redis.close()
# Both increments should be present.
final_state = await state_manager_redis.get_state(_substate_key(token, root_state))
assert isinstance(final_state, root_state)
assert final_state.count == 2
| {
"repo_id": "reflex-dev/reflex",
"file_path": "tests/units/istate/manager/test_redis.py",
"license": "Apache License 2.0",
"lines": 567,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
reflex-dev/reflex:tests/units/mock_redis.py | """Mock implementation of redis for unit testing."""
import asyncio
import contextlib
import fnmatch
import time
from collections.abc import AsyncGenerator, Callable
from typing import Any
from unittest.mock import AsyncMock, Mock
from redis.asyncio import Redis
from redis.typing import EncodableT, KeyT
from reflex.utils import prerequisites
WRONGTYPE_MESSAGE = "WRONGTYPE Operation against a key holding the wrong kind of value"
def mock_redis() -> Redis:
"""Mock the redis client with pubsub support.
Returns:
The mocked redis client.
"""
keys: dict[bytes, EncodableT | set[EncodableT]] = {}
expire_times: dict[bytes, float] = {}
event_log: list[dict[str, bytes]] = []
event_log_new_events = asyncio.Event()
def _key_bytes(key: KeyT) -> bytes:
if isinstance(key, str):
return key.encode()
if isinstance(key, memoryview):
return key.tobytes()
return key
def _keyspace_event(key: KeyT, data: str | bytes):
if isinstance(key, str):
key = key.encode()
if isinstance(data, str):
data = data.encode()
event_log.append({"channel": b"__keyspace@1__:" + key, "data": data})
event_log_new_events.set()
def _expire_keys():
to_delete = []
for key, expire_time in expire_times.items():
if expire_time <= time.monotonic():
to_delete.append(key)
for key in to_delete:
del keys[key]
del expire_times[key]
_keyspace_event(key, "expired")
async def mock_get(key: KeyT): # noqa: RUF029
_expire_keys()
return keys.get(_key_bytes(key))
async def mock_set( # noqa: RUF029
key: KeyT,
value: EncodableT,
ex: int | None = None,
px: int | None = None,
nx: bool = False,
) -> bool:
_expire_keys()
key = _key_bytes(key)
if nx and key in keys:
return False
keys[key] = value
_keyspace_event(key, "set")
if ex is not None:
expire_times[key] = time.monotonic() + ex
_keyspace_event(key, "expire")
elif px is not None:
expire_times[key] = time.monotonic() + (px / 1000)
_keyspace_event(key, "expire")
return True
async def mock_sadd(key: KeyT, value: EncodableT) -> int: # noqa: RUF029
_expire_keys()
key = _key_bytes(key)
keyset = keys.setdefault(key, set())
if not isinstance(keyset, set):
raise TypeError(WRONGTYPE_MESSAGE)
before = len(keyset)
keyset.add(value)
_keyspace_event(key, "sadd")
return len(keyset) - before
async def mock_srem(key: KeyT, value: EncodableT) -> int:
_expire_keys()
keyset = keys.get(_key_bytes(key))
if keyset is None:
return 0
if not isinstance(keyset, set):
raise TypeError(WRONGTYPE_MESSAGE)
if value in keyset:
keyset.remove(value)
_keyspace_event(key, "srem")
if not keyset:
await redis_mock.delete(key)
return 1
return 0
async def mock_scard(key: KeyT) -> int: # noqa: RUF029
_expire_keys()
keyset = keys.get(_key_bytes(key))
if keyset is None:
return 0
if not isinstance(keyset, set):
raise TypeError(WRONGTYPE_MESSAGE)
return len(keyset)
async def mock_delete(key: KeyT) -> int: # noqa: RUF029
_expire_keys()
key = _key_bytes(key)
Unset = object()
expire_times.pop(key, None)
if keys.pop(key, Unset) is not Unset:
_keyspace_event(key, "del")
return 1
return 0
async def mock_getdel(key: KeyT) -> Any:
value = await redis_mock.get(key)
await redis_mock.delete(key)
return value
async def mock_pexpire(key: KeyT, px: int, xx: bool = False) -> bool: # noqa: RUF029
_expire_keys()
key = _key_bytes(key)
if key in keys:
if not xx or key in expire_times:
expire_times[key] = time.monotonic() + (px / 1000)
_keyspace_event(key, "expire")
return True
return False
def pipeline():
pipeline_mock = Mock()
results = []
def get_pipeline(key: KeyT):
results.append(redis_mock.get(key=key))
def set_pipeline(
key: KeyT,
value: EncodableT,
ex: int | None = None,
px: int | None = None,
nx: bool = False,
):
results.append(redis_mock.set(key=key, value=value, ex=ex, px=px, nx=nx))
def sadd_pipeline(key: KeyT, value: EncodableT):
results.append(redis_mock.sadd(key=key, value=value))
def pexpire_pipeline(key: KeyT, px: int, xx: bool = False):
results.append(redis_mock.pexpire(key=key, px=px, xx=xx))
async def execute():
_expire_keys()
return await asyncio.gather(*results)
pipeline_mock.get = get_pipeline
pipeline_mock.set = set_pipeline
pipeline_mock.sadd = sadd_pipeline
pipeline_mock.pexpire = pexpire_pipeline
pipeline_mock.execute = execute
return pipeline_mock
async def pttl(key: KeyT) -> int: # noqa: RUF029
_expire_keys()
return (
int(expire_times.get(_key_bytes(key), time.monotonic()) - time.monotonic())
* 1000
)
@contextlib.asynccontextmanager
async def pubsub(): # noqa: RUF029
watch_patterns = {}
event_log_pointer = 0
async def psubscribe( # noqa: RUF029
*patterns: str,
**handlers: Callable[[dict[str, bytes]], None],
):
nonlocal event_log_pointer, watch_patterns
event_log_pointer = len(event_log) - 1
for pattern in patterns:
watch_patterns[pattern] = None
event_log.append({"channel": b"psubscribe", "data": pattern.encode()})
event_log_new_events.set()
for pattern, handler in handlers.items():
watch_patterns[pattern] = handler
event_log.append({"channel": b"psubscribe", "data": pattern.encode()})
event_log_new_events.set()
async def listen() -> AsyncGenerator[dict[str, Any] | None, None]:
nonlocal event_log_pointer
while True:
if event_log_pointer >= len(event_log):
await event_log_new_events.wait()
event_log_new_events.clear()
continue
event = event_log[event_log_pointer]
channel_str = event["channel"].decode()
event_log_pointer += 1
for pattern, handler in watch_patterns.items():
if fnmatch.fnmatch(channel_str, pattern):
if handler is not None:
res = handler(event)
if asyncio.iscoroutine(res):
res = await res
# Yields None to indicate handled
yield None
else:
yield event
pubsub_mock = AsyncMock()
pubsub_mock.psubscribe = psubscribe
pubsub_mock.listen = listen
yield pubsub_mock
redis_mock = AsyncMock(spec=Redis)
redis_mock.get = mock_get
redis_mock.set = mock_set
redis_mock.delete = mock_delete
redis_mock.getdel = mock_getdel
redis_mock.sadd = mock_sadd
redis_mock.srem = mock_srem
redis_mock.scard = mock_scard
redis_mock.pexpire = mock_pexpire
redis_mock.pipeline = pipeline
redis_mock.pttl = pttl
redis_mock.pubsub = pubsub
redis_mock.config_set = AsyncMock()
redis_mock.get_connection_kwargs = Mock(return_value={"db": 1})
redis_mock._internals = {
"keys": keys,
"expire_times": expire_times,
"event_log": event_log,
}
return redis_mock
@contextlib.asynccontextmanager
async def real_redis() -> AsyncGenerator[Redis | None]:
"""Get a real redis client for testing.
Yields:
The redis client.
"""
redis = prerequisites.get_redis()
if redis is None:
yield None
return
# Create a pubsub to keep the internal event log for assertions.
event_log = []
object.__setattr__(
redis,
"_internals",
{
"event_log": event_log,
},
)
redis_db = redis.get_connection_kwargs().get("db", 0)
async def log_events():
async with redis.pubsub() as pubsub:
await pubsub.psubscribe(f"__keyspace@{redis_db}__:*")
async for message in pubsub.listen():
if message is None:
continue
event_log.append(message)
log_events_task = asyncio.create_task(log_events())
try:
yield redis
finally:
log_events_task.cancel()
with contextlib.suppress(asyncio.CancelledError):
await log_events_task
| {
"repo_id": "reflex-dev/reflex",
"file_path": "tests/units/mock_redis.py",
"license": "Apache License 2.0",
"lines": 248,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
reflex-dev/reflex:tests/units/utils/test_tasks.py | import asyncio
import contextlib
import os
from unittest.mock import Mock
import pytest
from reflex.utils.tasks import ensure_task
CI = bool(os.environ.get("CI", False))
class NotSuppressedError(Exception):
"""An exception that should not be suppressed."""
@pytest.mark.asyncio
async def test_ensure_task_suppresses_exceptions():
"""Test that ensure_task suppresses specified exceptions."""
call_count = 0
async def faulty_coro(): # noqa: RUF029
nonlocal call_count
call_count += 1
if call_count < 3:
raise ValueError("Intentional error") # noqa: EM101
if call_count > 4:
raise NotSuppressedError("Should not be suppressed") # noqa: EM101
return "Success"
# Use ensure_task to run the faulty_coro, suppressing ValueError
owner = Mock()
ensure_task(
owner=owner,
task_attribute="task",
coro_function=faulty_coro,
suppress_exceptions=[ValueError],
exception_delay=0,
exception_limit=5,
exception_limit_window=1.0,
)
with contextlib.suppress(asyncio.CancelledError), pytest.raises(NotSuppressedError):
await asyncio.wait_for(owner.task, timeout=1)
# Should have retried until success, then raised RuntimeError
assert call_count == 5
async def test_ensure_task_limit_window():
"""Test that ensure_task raises after exceeding exception limit within the limit window."""
call_count = 0
async def faulty_coro(): # noqa: RUF029
nonlocal call_count
call_count += 1
raise ValueError("Intentional error") # noqa: EM101
owner = Mock()
ensure_task(
owner=owner,
task_attribute="task",
coro_function=faulty_coro,
suppress_exceptions=[ValueError],
exception_delay=0,
exception_limit=3,
exception_limit_window=1.0,
)
with contextlib.suppress(asyncio.CancelledError), pytest.raises(ValueError):
await asyncio.wait_for(owner.task, timeout=1)
# Should have raised after exceeding the limit
assert call_count == 3
async def test_ensure_task_limit_window_passed():
"""Test that ensure_task resets exception limit past the limit window."""
call_count = 0
async def faulty_coro():
nonlocal call_count
call_count += 1
await asyncio.sleep(0.5 if CI else 0.05)
if call_count > 3:
raise RuntimeError("Test Passed") # noqa: EM101
raise ValueError("Should have been suppressed") # noqa: EM101
owner = Mock()
ensure_task(
owner=owner,
task_attribute="task",
coro_function=faulty_coro,
suppress_exceptions=[ValueError],
exception_delay=0,
exception_limit=2,
exception_limit_window=0.1 if CI else 0.01,
)
with contextlib.suppress(asyncio.CancelledError), pytest.raises(RuntimeError):
await asyncio.wait_for(owner.task, timeout=3)
# Should have raised after exceeding the limit
assert call_count == 4
def test_ensure_task_no_runtime_error_suppression():
"""Test that ensure_task raises if RuntimeError is in suppress_exceptions."""
owner = Mock()
with pytest.raises(RuntimeError, match="Cannot suppress RuntimeError"):
ensure_task(
owner=owner,
task_attribute="task",
coro_function=asyncio.sleep,
suppress_exceptions=[RuntimeError],
exception_delay=0,
exception_limit=5,
exception_limit_window=1.0,
)
| {
"repo_id": "reflex-dev/reflex",
"file_path": "tests/units/utils/test_tasks.py",
"license": "Apache License 2.0",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
reflex-dev/reflex:reflex/istate/manager/disk.py | """A state manager that stores states on disk."""
import asyncio
import contextlib
import dataclasses
import functools
import time
from collections.abc import AsyncIterator
from hashlib import md5
from pathlib import Path
from typing_extensions import Unpack, override
from reflex.environment import environment
from reflex.istate.manager import (
StateManager,
StateModificationContext,
_default_token_expiration,
)
from reflex.state import BaseState, _split_substate_key, _substate_key
from reflex.utils import console, path_ops, prerequisites
from reflex.utils.misc import run_in_thread
@dataclasses.dataclass(frozen=True)
class QueueItem:
"""An item in the write queue."""
token: str
state: BaseState
timestamp: float
@dataclasses.dataclass
class StateManagerDisk(StateManager):
"""A state manager that stores states on disk."""
# The mapping of client ids to states.
states: dict[str, BaseState] = dataclasses.field(default_factory=dict)
# The mutex ensures the dict of mutexes is updated exclusively
_state_manager_lock: asyncio.Lock = dataclasses.field(default=asyncio.Lock())
# The dict of mutexes for each client
_states_locks: dict[str, asyncio.Lock] = dataclasses.field(
default_factory=dict,
init=False,
)
# The token expiration time (s).
token_expiration: int = dataclasses.field(default_factory=_default_token_expiration)
# Last time a token was touched.
_token_last_touched: dict[str, float] = dataclasses.field(
default_factory=dict,
init=False,
)
# Pending writes
_write_queue: dict[str, QueueItem] = dataclasses.field(
default_factory=dict,
init=False,
)
_write_queue_task: asyncio.Task | None = None
_write_debounce_seconds: float = dataclasses.field(
default=environment.REFLEX_STATE_MANAGER_DISK_DEBOUNCE_SECONDS.get()
)
def __post_init__(self):
"""Create a new state manager."""
path_ops.mkdir(self.states_directory)
self._purge_expired_states()
@functools.cached_property
def states_directory(self) -> Path:
"""Get the states directory.
Returns:
The states directory.
"""
return prerequisites.get_states_dir()
def _purge_expired_states(self):
"""Purge expired states from the disk."""
for path in path_ops.ls(self.states_directory):
# check path is a pickle file
if path.suffix != ".pkl":
continue
# load last edited field from file
last_edited = path.stat().st_mtime
# check if the file is older than the token expiration time
if time.time() - last_edited > self.token_expiration:
# remove the file
path.unlink()
def token_path(self, token: str) -> Path:
"""Get the path for a token.
Args:
token: The token to get the path for.
Returns:
The path for the token.
"""
return (
self.states_directory / f"{md5(token.encode()).hexdigest()}.pkl"
).absolute()
async def load_state(self, token: str) -> BaseState | None:
"""Load a state object based on the provided token.
Args:
token: The token used to identify the state object.
Returns:
The loaded state object or None.
"""
token_path = self.token_path(token)
if token_path.exists():
try:
with token_path.open(mode="rb") as file:
return BaseState._deserialize(fp=file)
except Exception:
pass
return None
async def populate_substates(
self, client_token: str, state: BaseState, root_state: BaseState
):
"""Populate the substates of a state object.
Args:
client_token: The client token.
state: The state object to populate.
root_state: The root state object.
"""
for substate in state.get_substates():
substate_token = _substate_key(client_token, substate)
fresh_instance = await root_state.get_state(substate)
instance = await self.load_state(substate_token)
if instance is not None:
# Ensure all substates exist, even if they weren't serialized previously.
instance.substates = fresh_instance.substates
else:
instance = fresh_instance
state.substates[substate.get_name()] = instance
instance.parent_state = state
await self.populate_substates(client_token, instance, root_state)
@override
async def get_state(
self,
token: str,
) -> BaseState:
"""Get the state for a token.
Args:
token: The token to get the state for.
Returns:
The state for the token.
"""
client_token = _split_substate_key(token)[0]
self._token_last_touched[client_token] = time.time()
root_state = self.states.get(client_token)
if root_state is not None:
# Retrieved state from memory.
return root_state
# Deserialize root state from disk.
root_state = await self.load_state(_substate_key(client_token, self.state))
# Create a new root state tree with all substates instantiated.
fresh_root_state = self.state(_reflex_internal_init=True)
if root_state is None:
root_state = fresh_root_state
else:
# Ensure all substates exist, even if they were not serialized previously.
root_state.substates = fresh_root_state.substates
self.states[client_token] = root_state
await self.populate_substates(client_token, root_state, root_state)
return root_state
async def set_state_for_substate(self, client_token: str, substate: BaseState):
"""Set the state for a substate.
Args:
client_token: The client token.
substate: The substate to set.
"""
substate_token = _substate_key(client_token, substate)
if substate._get_was_touched():
substate._was_touched = False # Reset the touched flag after serializing.
pickle_state = substate._serialize()
if pickle_state:
if not self.states_directory.exists():
self.states_directory.mkdir(parents=True, exist_ok=True)
await run_in_thread(
lambda: self.token_path(substate_token).write_bytes(pickle_state),
)
for substate_substate in substate.substates.values():
await self.set_state_for_substate(client_token, substate_substate)
async def _process_write_queue_delay(self):
"""Wait for the debounce period before processing the write queue again."""
now = time.time()
if self._write_queue:
# There are still items in the queue, schedule another run.
next_write_in = max(
0,
min(
self._write_debounce_seconds - (now - item.timestamp)
for item in self._write_queue.values()
),
)
await asyncio.sleep(next_write_in)
elif self._write_debounce_seconds > 0:
# No items left, wait a bit before checking again.
await asyncio.sleep(self._write_debounce_seconds)
else:
# Debounce is disabled, so sleep until the next token expiration.
oldest_token_last_touch = min(
self._token_last_touched.values(), default=now
)
next_expiration_in = self.token_expiration - (now - oldest_token_last_touch)
await asyncio.sleep(next_expiration_in)
async def _process_write_queue(self):
"""Long running task that checks for states to write to disk.
Raises:
asyncio.CancelledError: When the task is cancelled.
"""
while True:
try:
now = time.time()
# sort the _write_queue by oldest timestamp and exclude items younger than debounce time
items_to_write = sorted(
(
item
for item in self._write_queue.values()
if now - item.timestamp >= self._write_debounce_seconds
),
key=lambda item: item.timestamp,
)
for item in items_to_write:
token = item.token
client_token, _ = _split_substate_key(token)
await self.set_state_for_substate(
client_token, self._write_queue.pop(token).state
)
# Check for expired states to purge.
for token, last_touched in list(self._token_last_touched.items()):
if now - last_touched > self.token_expiration:
self._token_last_touched.pop(token)
self.states.pop(token, None)
await run_in_thread(self._purge_expired_states)
await self._process_write_queue_delay()
except asyncio.CancelledError: # noqa: PERF203
await self._flush_write_queue()
raise
except Exception as e:
console.error(f"Error processing write queue: {e!r}")
if e.args == ("cannot schedule new futures after shutdown",):
# Event loop is shutdown, nothing else we can really do...
return
await self._process_write_queue_delay()
async def _flush_write_queue(self):
"""Flush any remaining items in the write queue to disk."""
outstanding_items = list(self._write_queue.values())
n_outstanding_items = len(outstanding_items)
self._write_queue.clear()
# When the task is cancelled, write all remaining items to disk.
console.debug(
f"StateManagerDisk._flush_write_queue: writing {n_outstanding_items} remaining items to disk"
)
for item in outstanding_items:
token = item.token
client_token, _ = _split_substate_key(token)
await self.set_state_for_substate(
client_token,
item.state,
)
console.debug(
f"StateManagerDisk._flush_write_queue: Finished writing {n_outstanding_items} items"
)
async def _schedule_process_write_queue(self):
"""Schedule the write queue processing task if not already running."""
if self._write_queue_task is None or self._write_queue_task.done():
async with self._state_manager_lock:
if self._write_queue_task is None or self._write_queue_task.done():
self._write_queue_task = asyncio.create_task(
self._process_write_queue(),
name="StateManagerDisk|WriteQueueProcessor",
)
await asyncio.sleep(0) # Yield to allow the task to start.
@override
async def set_state(
self, token: str, state: BaseState, **context: Unpack[StateModificationContext]
):
"""Set the state for a token.
Args:
token: The token to set the state for.
state: The state to set.
context: The state modification context.
"""
client_token, _ = _split_substate_key(token)
if self._write_debounce_seconds > 0:
# Deferred write to reduce disk IO overhead.
if client_token not in self._write_queue:
self._write_queue[client_token] = QueueItem(
token=client_token,
state=state,
timestamp=time.time(),
)
else:
# Immediate write to disk.
await self.set_state_for_substate(client_token, state)
# Ensure the processing task is scheduled to handle expirations and any deferred writes.
await self._schedule_process_write_queue()
@override
@contextlib.asynccontextmanager
async def modify_state(
self, token: str, **context: Unpack[StateModificationContext]
) -> AsyncIterator[BaseState]:
"""Modify the state for a token while holding exclusive lock.
Args:
token: The token to modify the state for.
context: The state modification context.
Yields:
The state for the token.
"""
# Disk state manager ignores the substate suffix and always returns the top-level state.
client_token, _ = _split_substate_key(token)
if client_token not in self._states_locks:
async with self._state_manager_lock:
if client_token not in self._states_locks:
self._states_locks[client_token] = asyncio.Lock()
async with self._states_locks[client_token]:
state = await self.get_state(token)
yield state
await self.set_state(token, state, **context)
async def close(self):
"""Close the state manager, flushing any pending writes to disk."""
async with self._state_manager_lock:
if self._write_queue_task:
self._write_queue_task.cancel()
with contextlib.suppress(asyncio.CancelledError):
await self._write_queue_task
self._write_queue_task = None
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/istate/manager/disk.py",
"license": "Apache License 2.0",
"lines": 314,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
reflex-dev/reflex:reflex/istate/manager/memory.py | """A state manager that stores states in memory."""
import asyncio
import contextlib
import dataclasses
from collections.abc import AsyncIterator
from typing_extensions import Unpack, override
from reflex.istate.manager import StateManager, StateModificationContext
from reflex.state import BaseState, _split_substate_key
@dataclasses.dataclass
class StateManagerMemory(StateManager):
"""A state manager that stores states in memory."""
# The mapping of client ids to states.
states: dict[str, BaseState] = dataclasses.field(default_factory=dict)
# The mutex ensures the dict of mutexes is updated exclusively
_state_manager_lock: asyncio.Lock = dataclasses.field(default=asyncio.Lock())
# The dict of mutexes for each client
_states_locks: dict[str, asyncio.Lock] = dataclasses.field(
default_factory=dict, init=False
)
@override
async def get_state(self, token: str) -> BaseState:
"""Get the state for a token.
Args:
token: The token to get the state for.
Returns:
The state for the token.
"""
# Memory state manager ignores the substate suffix and always returns the top-level state.
token = _split_substate_key(token)[0]
if token not in self.states:
self.states[token] = self.state(_reflex_internal_init=True)
return self.states[token]
@override
async def set_state(
self,
token: str,
state: BaseState,
**context: Unpack[StateModificationContext],
):
"""Set the state for a token.
Args:
token: The token to set the state for.
state: The state to set.
context: The state modification context.
"""
token = _split_substate_key(token)[0]
self.states[token] = state
@override
@contextlib.asynccontextmanager
async def modify_state(
self, token: str, **context: Unpack[StateModificationContext]
) -> AsyncIterator[BaseState]:
"""Modify the state for a token while holding exclusive lock.
Args:
token: The token to modify the state for.
context: The state modification context.
Yields:
The state for the token.
"""
# Memory state manager ignores the substate suffix and always returns the top-level state.
token = _split_substate_key(token)[0]
if token not in self._states_locks:
async with self._state_manager_lock:
if token not in self._states_locks:
self._states_locks[token] = asyncio.Lock()
async with self._states_locks[token]:
state = await self.get_state(token)
yield state
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/istate/manager/memory.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
reflex-dev/reflex:reflex/components/radix/primitives/dialog.py | """Interactive components provided by @radix-ui/react-dialog."""
from typing import Any, ClassVar
from reflex.components.component import ComponentNamespace
from reflex.components.el import elements
from reflex.constants.compiler import MemoizationMode
from reflex.event import EventHandler, no_args_event_spec, passthrough_event_spec
from reflex.vars.base import Var
from .base import RadixPrimitiveComponent, RadixPrimitiveTriggerComponent
class DialogElement(RadixPrimitiveComponent):
"""Base class for all @radix-ui/react-dialog components."""
library = "@radix-ui/react-dialog@1.1.15"
class DialogRoot(DialogElement):
"""Root component for Dialog."""
tag = "Root"
alias = "RadixPrimitiveDialogRoot"
# The controlled open state of the dialog.
open: Var[bool]
# Fired when the open state changes.
on_open_change: EventHandler[passthrough_event_spec(bool)]
# The open state of the dialog when it is initially rendered. Use when you do not need to control its open state.
default_open: Var[bool]
# The modality of the dialog. When set to true, interaction with outside elements will be disabled and only dialog content will be visible to screen readers.
modal: Var[bool]
_valid_children: ClassVar[list[str]] = [
"DialogTrigger",
"DialogPortal",
]
class DialogPortal(DialogElement):
"""Portal component for Dialog."""
tag = "Portal"
alias = "RadixPrimitiveDialogPortal"
# Used to force mounting when more control is needed. Useful when controlling animation with React animation libraries. If used on this part, it will be inherited by Dialog.Overlay and Dialog.Content.
force_mount: Var[bool]
# Specify a container element to portal the content into.
container: Var[Any]
_valid_parents: ClassVar[list[str]] = ["DialogRoot"]
class DialogOverlay(DialogElement):
"""A layer that covers the inert portion of the view when the dialog is open."""
tag = "Overlay"
alias = "RadixPrimitiveDialogOverlay"
# Change the default rendered element for the one passed as a child, merging their props and behavior.
as_child: Var[bool]
# Used to force mounting when more control is needed. Useful when controlling animation with React animation libraries. It inherits from Dialog.Portal.
force_mount: Var[bool]
_valid_parents: ClassVar[list[str]] = ["DialogPortal"]
class DialogTrigger(DialogElement, RadixPrimitiveTriggerComponent):
"""Trigger an action or event, to open a Dialog modal."""
tag = "Trigger"
alias = "RadixPrimitiveDialogTrigger"
# Change the default rendered element for the one passed as a child, merging their props and behavior.
as_child: Var[bool]
_memoization_mode = MemoizationMode(recursive=False)
_valid_parents: ClassVar[list[str]] = ["DialogRoot"]
class DialogContent(elements.Div, DialogElement):
"""Content component to display inside a Dialog modal."""
tag = "Content"
alias = "RadixPrimitiveDialogContent"
# Used to force mounting when more control is needed. Useful when controlling animation with React animation libraries. It inherits from Dialog.Portal.
force_mount: Var[bool]
# Change the default rendered element for the one passed as a child, merging their props and behavior.
as_child: Var[bool]
# Fired when the dialog is opened.
on_open_auto_focus: EventHandler[no_args_event_spec]
# Fired when the dialog is closed.
on_close_auto_focus: EventHandler[no_args_event_spec]
# Fired when the escape key is pressed.
on_escape_key_down: EventHandler[no_args_event_spec]
# Fired when the pointer is down outside the dialog.
on_pointer_down_outside: EventHandler[no_args_event_spec]
# Fired when the pointer interacts outside the dialog.
on_interact_outside: EventHandler[no_args_event_spec]
_valid_parents: ClassVar[list[str]] = ["DialogPortal"]
class DialogTitle(DialogElement):
"""Title component to display inside a Dialog modal."""
tag = "Title"
alias = "RadixPrimitiveDialogTitle"
# Change the default rendered element for the one passed as a child, merging their props and behavior.
as_child: Var[bool]
class DialogDescription(DialogElement):
"""Description component to display inside a Dialog modal."""
tag = "Description"
alias = "RadixPrimitiveDialogDescription"
# Change the default rendered element for the one passed as a child, merging their props and behavior.
as_child: Var[bool]
class DialogClose(DialogElement, RadixPrimitiveTriggerComponent):
"""Close button component to close an open Dialog modal."""
tag = "Close"
alias = "RadixPrimitiveDialogClose"
# Change the default rendered element for the one passed as a child, merging their props and behavior.
as_child: Var[bool]
class Dialog(ComponentNamespace):
"""Dialog components namespace."""
root = __call__ = staticmethod(DialogRoot.create)
portal = staticmethod(DialogPortal.create)
trigger = staticmethod(DialogTrigger.create)
title = staticmethod(DialogTitle.create)
overlay = staticmethod(DialogOverlay.create)
content = staticmethod(DialogContent.create)
description = staticmethod(DialogDescription.create)
close = staticmethod(DialogClose.create)
dialog = Dialog()
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/components/radix/primitives/dialog.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
reflex-dev/reflex:reflex/vars/color.py | """Vars for colors."""
import dataclasses
from reflex.constants.colors import Color
from reflex.vars.base import (
CachedVarOperation,
LiteralVar,
Var,
VarData,
cached_property_no_lock,
get_python_literal,
)
from reflex.vars.number import ternary_operation
from reflex.vars.sequence import ConcatVarOperation, LiteralStringVar, StringVar
class ColorVar(StringVar[Color], python_types=Color):
"""Base class for immutable color vars."""
@dataclasses.dataclass(
eq=False,
frozen=True,
slots=True,
)
class LiteralColorVar(CachedVarOperation, LiteralVar, ColorVar):
"""Base class for immutable literal color vars."""
_var_value: Color = dataclasses.field(default_factory=lambda: Color(color="black"))
@classmethod
def _get_all_var_data_without_creating_var(
cls,
value: Color,
) -> VarData | None:
return VarData.merge(
LiteralStringVar._get_all_var_data_without_creating_var(value.color)
if isinstance(value.color, str)
else value.color._get_all_var_data(),
value.alpha._get_all_var_data()
if not isinstance(value.alpha, bool)
else None,
value.shade._get_all_var_data()
if not isinstance(value.shade, int)
else None,
)
@classmethod
def create(
cls,
value: Color,
_var_type: type[Color] | None = None,
_var_data: VarData | None = None,
) -> ColorVar:
"""Create a var from a string value.
Args:
value: The value to create the var from.
_var_type: The type of the var.
_var_data: Additional hooks and imports associated with the Var.
Returns:
The var.
"""
return cls(
_js_expr="",
_var_type=_var_type or Color,
_var_data=_var_data,
_var_value=value,
)
def __hash__(self) -> int:
"""Get the hash of the var.
Returns:
The hash of the var.
"""
return hash((
self.__class__.__name__,
self._var_value.color,
self._var_value.alpha,
self._var_value.shade,
))
@cached_property_no_lock
def _cached_var_name(self) -> str:
"""The name of the var.
Returns:
The name of the var.
"""
alpha = self._var_value.alpha
alpha = (
ternary_operation(
alpha,
LiteralStringVar.create("a"),
LiteralStringVar.create(""),
)
if isinstance(alpha, Var)
else LiteralStringVar.create("a" if alpha else "")
)
shade = self._var_value.shade
shade = (
shade.to_string(use_json=False)
if isinstance(shade, Var)
else LiteralStringVar.create(str(shade))
)
return str(
ConcatVarOperation.create(
LiteralStringVar.create("var(--"),
self._var_value.color,
LiteralStringVar.create("-"),
alpha,
shade,
LiteralStringVar.create(")"),
)
)
@cached_property_no_lock
def _cached_get_all_var_data(self) -> VarData | None:
"""Get all the var data.
Returns:
The var data.
"""
return VarData.merge(
LiteralStringVar._get_all_var_data_without_creating_var(
self._var_value.color
)
if isinstance(self._var_value.color, str)
else self._var_value.color._get_all_var_data(),
self._var_value.alpha._get_all_var_data()
if not isinstance(self._var_value.alpha, bool)
else None,
self._var_value.shade._get_all_var_data()
if not isinstance(self._var_value.shade, int)
else None,
self._var_data,
)
def json(self) -> str:
"""Get the JSON representation of the var.
Returns:
The JSON representation of the var.
Raises:
TypeError: If the color is not a valid color.
"""
color, alpha, shade = map(
get_python_literal,
(self._var_value.color, self._var_value.alpha, self._var_value.shade),
)
if color is None or alpha is None or shade is None:
msg = "Cannot serialize color that contains non-literal vars."
raise TypeError(msg)
if (
not isinstance(color, str)
or not isinstance(alpha, bool)
or not isinstance(shade, int)
):
msg = "Color is not a valid color."
raise TypeError(msg)
return f"var(--{color}-{'a' if alpha else ''}{shade})"
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/vars/color.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
reflex-dev/reflex:reflex/utils/frontend_skeleton.py | """This module provides utility functions to initialize the frontend skeleton."""
import json
import random
import re
from pathlib import Path
from reflex import constants
from reflex.compiler import templates
from reflex.config import Config, get_config
from reflex.environment import environment
from reflex.utils import console, path_ops
from reflex.utils.prerequisites import get_project_hash, get_web_dir
from reflex.utils.registry import get_npm_registry
def initialize_gitignore(
gitignore_file: Path = constants.GitIgnore.FILE,
files_to_ignore: set[str] | list[str] = constants.GitIgnore.DEFAULTS,
):
"""Initialize the template .gitignore file.
Args:
gitignore_file: The .gitignore file to create.
files_to_ignore: The files to add to the .gitignore file.
"""
# Combine with the current ignored files.
current_ignore: list[str] = []
if gitignore_file.exists():
current_ignore = [ln.strip() for ln in gitignore_file.read_text().splitlines()]
if files_to_ignore == current_ignore:
console.debug(f"{gitignore_file} already up to date.")
return
files_to_ignore = [ln for ln in files_to_ignore if ln not in current_ignore]
files_to_ignore += current_ignore
# Write files to the .gitignore file.
gitignore_file.touch(exist_ok=True)
console.debug(f"Creating {gitignore_file}")
gitignore_file.write_text("\n".join(files_to_ignore) + "\n")
def initialize_requirements_txt() -> bool:
"""Initialize the requirements.txt file.
If absent and no pyproject.toml file exists, generate one for the user.
If the requirements.txt does not have reflex as dependency,
generate a requirement pinning current version and append to
the requirements.txt file.
Returns:
True if the user has to update the requirements.txt file.
Raises:
SystemExit: If the requirements.txt file cannot be read or written to.
"""
requirements_file_path = Path(constants.RequirementsTxt.FILE)
if (
not requirements_file_path.exists()
and Path(constants.PyprojectToml.FILE).exists()
):
return True
requirements_file_path.touch(exist_ok=True)
for encoding in [None, "utf-8"]:
try:
content = requirements_file_path.read_text(encoding)
break
except UnicodeDecodeError:
continue
except Exception as e:
console.error(f"Failed to read {requirements_file_path} due to {e}.")
raise SystemExit(1) from None
else:
return True
for line in content.splitlines():
if re.match(r"^reflex[^a-zA-Z0-9]", line):
console.debug(f"{requirements_file_path} already has reflex as dependency.")
return False
console.debug(
f"Appending {constants.RequirementsTxt.DEFAULTS_STUB} to {requirements_file_path}"
)
with requirements_file_path.open("a", encoding=encoding) as f:
f.write(
"\n" + constants.RequirementsTxt.DEFAULTS_STUB + constants.Reflex.VERSION
)
return False
def initialize_web_directory():
"""Initialize the web directory on reflex init."""
console.log("Initializing the web directory.")
# Reuse the hash if one is already created, so we don't over-write it when running reflex init
project_hash = get_project_hash()
console.debug(f"Copying {constants.Templates.Dirs.WEB_TEMPLATE} to {get_web_dir()}")
path_ops.copy_tree(constants.Templates.Dirs.WEB_TEMPLATE, str(get_web_dir()))
console.debug("Initializing the web directory.")
initialize_package_json()
console.debug("Initializing the bun config file.")
initialize_bun_config()
console.debug("Initializing the .npmrc file.")
initialize_npmrc()
console.debug("Initializing the public directory.")
path_ops.mkdir(get_web_dir() / constants.Dirs.PUBLIC)
console.debug("Initializing the react-router.config.js file.")
update_react_router_config()
console.debug("Initializing the vite.config.js file.")
initialize_vite_config()
console.debug("Initializing the reflex.json file.")
# Initialize the reflex json file.
init_reflex_json(project_hash=project_hash)
def update_react_router_config(prerender_routes: bool = False):
"""Update react-router.config.js config from Reflex config.
Args:
prerender_routes: Whether to enable prerendering of routes.
"""
react_router_config_file_path = get_web_dir() / constants.ReactRouter.CONFIG_FILE
new_react_router_config = _update_react_router_config(
get_config(), prerender_routes=prerender_routes
)
# Overwriting the config file triggers a full server reload, so make sure
# there is actually a diff.
old_react_router_config = (
react_router_config_file_path.read_text()
if react_router_config_file_path.exists()
else ""
)
if old_react_router_config != new_react_router_config:
react_router_config_file_path.write_text(new_react_router_config)
def _update_react_router_config(config: Config, prerender_routes: bool = False):
basename = "/" + (config.frontend_path or "").strip("/")
if not basename.endswith("/"):
basename += "/"
react_router_config = {
"basename": basename,
"future": {
"unstable_optimizeDeps": True,
},
"ssr": False,
}
if prerender_routes:
react_router_config["prerender"] = True
react_router_config["build"] = constants.Dirs.BUILD_DIR
return f"export default {json.dumps(react_router_config)};"
def _compile_package_json():
config = get_config()
return templates.package_json_template(
scripts={
"dev": constants.PackageJson.Commands.DEV,
"export": constants.PackageJson.Commands.EXPORT,
"prod": constants.PackageJson.Commands.get_prod_command(
config.frontend_path
),
},
dependencies=constants.PackageJson.DEPENDENCIES,
dev_dependencies=constants.PackageJson.DEV_DEPENDENCIES,
overrides=constants.PackageJson.OVERRIDES,
)
def initialize_package_json():
"""Render and write in .web the package.json file."""
output_path = get_web_dir() / constants.PackageJson.PATH
output_path.write_text(_compile_package_json())
def _compile_vite_config(config: Config):
# base must have exactly one trailing slash
base = "/"
if frontend_path := config.frontend_path.strip("/"):
base += frontend_path + "/"
return templates.vite_config_template(
base=base,
hmr=environment.VITE_HMR.get(),
force_full_reload=environment.VITE_FORCE_FULL_RELOAD.get(),
experimental_hmr=environment.VITE_EXPERIMENTAL_HMR.get(),
sourcemap=environment.VITE_SOURCEMAP.get(),
allowed_hosts=config.vite_allowed_hosts,
)
def initialize_vite_config():
"""Render and write in .web the vite.config.js file using Reflex config."""
vite_config_file_path = get_web_dir() / constants.ReactRouter.VITE_CONFIG_FILE
vite_config_file_path.write_text(_compile_vite_config(get_config()))
def initialize_bun_config():
"""Initialize the bun config file."""
bun_config_path = get_web_dir() / constants.Bun.CONFIG_PATH
if (custom_bunfig := Path(constants.Bun.CONFIG_PATH)).exists():
bunfig_content = custom_bunfig.read_text()
console.info(f"Copying custom bunfig.toml inside {get_web_dir()} folder")
else:
best_registry = get_npm_registry()
bunfig_content = constants.Bun.DEFAULT_CONFIG.format(registry=best_registry)
bun_config_path.write_text(bunfig_content)
def initialize_npmrc():
"""Initialize the .npmrc file."""
npmrc_path = get_web_dir() / constants.Node.CONFIG_PATH
if (custom_npmrc := Path(constants.Node.CONFIG_PATH)).exists():
npmrc_content = custom_npmrc.read_text()
console.info(f"Copying custom .npmrc inside {get_web_dir()} folder")
else:
best_registry = get_npm_registry()
npmrc_content = constants.Node.DEFAULT_CONFIG.format(registry=best_registry)
npmrc_path.write_text(npmrc_content)
def init_reflex_json(project_hash: int | None):
"""Write the hash of the Reflex project to a REFLEX_JSON.
Reuse the hash if one is already created, therefore do not
overwrite it every time we run the reflex init command
.
Args:
project_hash: The app hash.
"""
if project_hash is not None:
console.debug(f"Project hash is already set to {project_hash}.")
else:
# Get a random project hash.
project_hash = random.getrandbits(128)
console.debug(f"Setting project hash to {project_hash}.")
# Write the hash and version to the reflex json file.
reflex_json = {
"version": constants.Reflex.VERSION,
"project_hash": project_hash,
}
path_ops.update_json_file(get_web_dir() / constants.Reflex.JSON, reflex_json)
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/utils/frontend_skeleton.py",
"license": "Apache License 2.0",
"lines": 205,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
reflex-dev/reflex:reflex/utils/js_runtimes.py | """This module provides utilities for managing JavaScript runtimes like Node.js and Bun."""
import functools
import os
import tempfile
from collections.abc import Sequence
from pathlib import Path
from packaging import version
from reflex import constants
from reflex.config import Config, get_config
from reflex.environment import environment
from reflex.utils import console, net, path_ops, processes
from reflex.utils.decorator import cached_procedure, once
from reflex.utils.exceptions import SystemPackageMissingError
from reflex.utils.prerequisites import get_web_dir, windows_check_onedrive_in_path
def check_node_version() -> bool:
"""Check the version of Node.js.
Returns:
Whether the version of Node.js is valid.
"""
current_version = get_node_version()
return current_version is not None and current_version >= version.parse(
constants.Node.MIN_VERSION
)
def _get_version_of_executable(
executable_path: Path | None, version_arg: str = "--version"
) -> version.Version | None:
"""Get the version of an executable.
Args:
executable_path: The path to the executable.
version_arg: The argument to pass to the executable to get its version.
Returns:
The version of the executable.
"""
if executable_path is None:
return None
try:
result = processes.new_process([executable_path, version_arg], run=True)
if result.returncode != 0:
console.error(
f"Failed to run {executable_path} {version_arg} to get version. Return code: {result.returncode}. Standard error: {result.stderr!r}."
)
return None
return version.parse(result.stdout.strip())
except (FileNotFoundError, TypeError):
return None
except version.InvalidVersion as e:
console.warn(
f"The detected version of {executable_path} ({e.args[0]}) is not valid. Defaulting to None."
)
return None
@once
def get_node_version() -> version.Version | None:
"""Get the version of node.
Returns:
The version of node.
"""
return _get_version_of_executable(path_ops.get_node_path())
def get_bun_version(bun_path: Path | None = None) -> version.Version | None:
"""Get the version of bun.
Args:
bun_path: The path to the bun executable.
Returns:
The version of bun.
"""
return _get_version_of_executable(bun_path or path_ops.get_bun_path())
def npm_escape_hatch() -> bool:
"""If the user sets REFLEX_USE_NPM, prefer npm over bun.
Returns:
If the user has set REFLEX_USE_NPM.
"""
return environment.REFLEX_USE_NPM.get()
def prefer_npm_over_bun() -> bool:
"""Check if npm should be preferred over bun.
Returns:
If npm should be preferred over bun.
"""
return npm_escape_hatch() or (
constants.IS_WINDOWS and windows_check_onedrive_in_path()
)
def get_nodejs_compatible_package_managers(
raise_on_none: bool = True,
) -> Sequence[str]:
"""Get the package manager executable for installation. Typically, bun is used for installation.
Args:
raise_on_none: Whether to raise an error if the package manager is not found.
Returns:
The path to the package manager.
Raises:
FileNotFoundError: If the package manager is not found and raise_on_none is True.
"""
bun_package_manager = (
str(bun_path) if (bun_path := path_ops.get_bun_path()) else None
)
npm_package_manager = (
str(npm_path) if (npm_path := path_ops.get_npm_path()) else None
)
if prefer_npm_over_bun():
package_managers = [npm_package_manager, bun_package_manager]
else:
package_managers = [bun_package_manager, npm_package_manager]
package_managers = list(filter(None, package_managers))
if not package_managers and raise_on_none:
msg = "Bun or npm not found. You might need to rerun `reflex init` or install either."
raise FileNotFoundError(msg)
return package_managers
def is_outdated_nodejs_installed():
"""Check if the installed Node.js version is outdated.
Returns:
If the installed Node.js version is outdated.
"""
current_version = get_node_version()
if current_version is not None and current_version < version.parse(
constants.Node.MIN_VERSION
):
console.warn(
f"Your version ({current_version}) of Node.js is out of date. Upgrade to {constants.Node.MIN_VERSION} or higher."
)
return True
return False
def get_js_package_executor(raise_on_none: bool = False) -> Sequence[Sequence[str]]:
"""Get the paths to package managers for running commands. Ordered by preference.
This is currently identical to get_install_package_managers, but may change in the future.
Args:
raise_on_none: Whether to raise an error if no package managers is not found.
Returns:
The paths to the package managers as a list of lists, where each list is the command to run and its arguments.
Raises:
FileNotFoundError: If no package managers are found and raise_on_none is True.
"""
bun_package_manager = (
[str(bun_path)] + (["--bun"] if is_outdated_nodejs_installed() else [])
if (bun_path := path_ops.get_bun_path())
else None
)
npm_package_manager = (
[str(npm_path)] if (npm_path := path_ops.get_npm_path()) else None
)
if prefer_npm_over_bun():
package_managers = [npm_package_manager, bun_package_manager]
else:
package_managers = [bun_package_manager, npm_package_manager]
package_managers = list(filter(None, package_managers))
if not package_managers and raise_on_none:
msg = "Bun or npm not found. You might need to rerun `reflex init` or install either."
raise FileNotFoundError(msg)
return package_managers
def download_and_run(url: str, *args, show_status: bool = False, **env):
"""Download and run a script.
Args:
url: The url of the script.
args: The arguments to pass to the script.
show_status: Whether to show the status of the script.
env: The environment variables to use.
Raises:
SystemExit: If the script fails to download.
"""
import httpx
# Download the script
console.debug(f"Downloading {url}")
try:
response = net.get(url)
response.raise_for_status()
except httpx.HTTPError as e:
console.error(
f"Failed to download bun install script. You can install or update bun manually from https://bun.com \n{e}"
)
raise SystemExit(1) from None
# Save the script to a temporary file.
with tempfile.NamedTemporaryFile() as tempfile_file:
script = Path(tempfile_file.name)
script.write_text(response.text)
# Run the script.
env = {**os.environ, **env}
process = processes.new_process(["bash", str(script), *args], env=env)
show = processes.show_status if show_status else processes.show_logs
show(f"Installing {url}", process)
def install_bun():
"""Install bun onto the user's system.
Raises:
SystemPackageMissingError: If "unzip" is missing.
SystemExit: If REFLEX_USE_NPM is set but Node.js is not installed.
"""
if npm_escape_hatch():
if get_node_version() is not None:
console.info(
"Skipping bun installation as REFLEX_USE_NPM is set. Using npm instead."
)
return
console.error(
"REFLEX_USE_NPM is set, but Node.js is not installed. Please install Node.js to use npm."
)
raise SystemExit(1)
bun_path = path_ops.get_bun_path()
# Skip if bun is already installed.
if (
bun_path
and (current_version := get_bun_version(bun_path=bun_path))
and current_version >= version.parse(constants.Bun.MIN_VERSION)
):
console.debug("Skipping bun installation as it is already installed.")
return
if bun_path and path_ops.use_system_bun():
validate_bun(bun_path=bun_path)
return
if constants.IS_WINDOWS:
processes.new_process(
[
"powershell",
"-c",
f"irm {constants.Bun.WINDOWS_INSTALL_URL}|iex",
],
env={
"BUN_INSTALL": str(constants.Bun.ROOT_PATH),
"BUN_VERSION": constants.Bun.VERSION,
},
shell=True,
run=True,
show_logs=console.is_debug(),
)
else:
if path_ops.which("unzip") is None:
msg = "unzip"
raise SystemPackageMissingError(msg)
# Run the bun install script.
download_and_run(
constants.Bun.INSTALL_URL,
f"bun-v{constants.Bun.VERSION}",
BUN_INSTALL=str(constants.Bun.ROOT_PATH),
BUN_VERSION=str(constants.Bun.VERSION),
)
def validate_bun(bun_path: Path | None = None):
"""Validate bun if a custom bun path is specified to ensure the bun version meets requirements.
Args:
bun_path: The path to the bun executable. If None, the default bun path is used.
Raises:
SystemExit: If custom specified bun does not exist or does not meet requirements.
"""
bun_path = bun_path or path_ops.get_bun_path()
if bun_path is None:
return
if not path_ops.samefile(bun_path, constants.Bun.DEFAULT_PATH):
console.info(f"Using custom Bun path: {bun_path}")
bun_version = get_bun_version(bun_path=bun_path)
if bun_version is None:
console.error(
"Failed to obtain bun version. Make sure the specified bun path in your config is correct."
)
raise SystemExit(1)
if bun_version < version.parse(constants.Bun.MIN_VERSION):
console.warn(
f"Reflex requires bun version {constants.Bun.MIN_VERSION} or higher to run, but the detected version is "
f"{bun_version}. If you have specified a custom bun path in your config, make sure to provide one "
f"that satisfies the minimum version requirement. You can upgrade bun by running [bold]bun upgrade[/bold]."
)
def validate_frontend_dependencies(init: bool = True):
"""Validate frontend dependencies to ensure they meet requirements.
Args:
init: whether running `reflex init`
Raises:
SystemExit: If the package manager is invalid.
"""
if not init:
try:
get_js_package_executor(raise_on_none=True)
except FileNotFoundError as e:
console.error(f"Failed to find a valid package manager due to {e}.")
raise SystemExit(1) from None
if prefer_npm_over_bun() and not check_node_version():
node_version = get_node_version()
console.error(
f"Reflex requires node version {constants.Node.MIN_VERSION} or higher to run, but the detected version is {node_version}",
)
raise SystemExit(1)
def remove_existing_bun_installation():
"""Remove existing bun installation."""
console.debug("Removing existing bun installation.")
if Path(get_config().bun_path).exists():
path_ops.rm(constants.Bun.ROOT_PATH)
@cached_procedure(
cache_file_path=lambda: get_web_dir() / "reflex.install_frontend_packages.cached",
payload_fn=lambda packages, config: f"{sorted(packages)!r},{config.json()}",
)
def install_frontend_packages(packages: set[str], config: Config):
"""Installs the base and custom frontend packages.
Args:
packages: A list of package names to be installed.
config: The config object.
Example:
>>> install_frontend_packages(["react", "react-dom"], get_config())
"""
install_package_managers = get_nodejs_compatible_package_managers(
raise_on_none=True
)
env = (
{
"NODE_TLS_REJECT_UNAUTHORIZED": "0",
}
if environment.SSL_NO_VERIFY.get()
else {}
)
primary_package_manager = install_package_managers[0]
fallbacks = install_package_managers[1:]
run_package_manager = functools.partial(
processes.run_process_with_fallbacks,
fallbacks=fallbacks,
analytics_enabled=True,
cwd=get_web_dir(),
shell=constants.IS_WINDOWS,
env=env,
)
run_package_manager(
[primary_package_manager, "install", "--legacy-peer-deps"],
show_status_message="Installing base frontend packages",
)
development_deps: set[str] = set()
for plugin in config.plugins:
development_deps.update(plugin.get_frontend_development_dependencies())
packages.update(plugin.get_frontend_dependencies())
if development_deps:
run_package_manager(
[
primary_package_manager,
"add",
"--legacy-peer-deps",
"-d",
*development_deps,
],
show_status_message="Installing frontend development dependencies",
)
# Install custom packages defined in frontend_packages
if packages:
run_package_manager(
[primary_package_manager, "add", "--legacy-peer-deps", *packages],
show_status_message="Installing frontend packages from config and components",
)
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/utils/js_runtimes.py",
"license": "Apache License 2.0",
"lines": 335,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
reflex-dev/reflex:reflex/utils/rename.py | """This module provides utilities for renaming directories and files in a Reflex app."""
import re
import sys
from pathlib import Path
from reflex import constants
from reflex.config import get_config
from reflex.utils import console
from reflex.utils.misc import get_module_path
def rename_path_up_tree(full_path: str | Path, old_name: str, new_name: str) -> Path:
"""Rename all instances of `old_name` in the path (file and directories) to `new_name`.
The renaming stops when we reach the directory containing `rxconfig.py`.
Args:
full_path: The full path to start renaming from.
old_name: The name to be replaced.
new_name: The replacement name.
Returns:
The updated path after renaming.
"""
current_path = Path(full_path)
new_path = None
while True:
directory, base = current_path.parent, current_path.name
# Stop renaming when we reach the root dir (which contains rxconfig.py)
if current_path.is_dir() and (current_path / "rxconfig.py").exists():
new_path = current_path
break
if old_name == base.removesuffix(constants.Ext.PY):
new_base = base.replace(old_name, new_name)
new_path = directory / new_base
current_path.rename(new_path)
console.debug(f"Renamed {current_path} -> {new_path}")
current_path = new_path
else:
new_path = current_path
# Move up the directory tree
current_path = directory
return new_path
def rename_app(new_app_name: str, loglevel: constants.LogLevel):
"""Rename the app directory.
Args:
new_app_name: The new name for the app.
loglevel: The log level to use.
Raises:
SystemExit: If the command is not ran in the root dir or the app module cannot be imported.
"""
# Set the log level.
console.set_log_level(loglevel)
if not constants.Config.FILE.exists():
console.error(
"No rxconfig.py found. Make sure you are in the root directory of your app."
)
raise SystemExit(1)
sys.path.insert(0, str(Path.cwd()))
config = get_config()
module_path = get_module_path(config.module)
if module_path is None:
console.error(f"Could not find module {config.module}.")
raise SystemExit(1)
console.info(f"Renaming app directory to {new_app_name}.")
process_directory(
Path.cwd(),
config.app_name,
new_app_name,
exclude_dirs=[constants.Dirs.WEB, constants.Dirs.APP_ASSETS],
)
rename_path_up_tree(module_path, config.app_name, new_app_name)
console.success(f"App directory renamed to [bold]{new_app_name}[/bold].")
def rename_imports_and_app_name(file_path: str | Path, old_name: str, new_name: str):
"""Rename imports the file using string replacement as well as app_name in rxconfig.py.
Args:
file_path: The file to process.
old_name: The old name to replace.
new_name: The new name to use.
"""
file_path = Path(file_path)
content = file_path.read_text()
# Replace `from old_name.` or `from old_name` with `from new_name`
content = re.sub(
rf"\bfrom {re.escape(old_name)}(\b|\.|\s)",
lambda match: f"from {new_name}{match.group(1)}",
content,
)
# Replace `import old_name` with `import new_name`
content = re.sub(
rf"\bimport {re.escape(old_name)}\b",
f"import {new_name}",
content,
)
# Replace `app_name="old_name"` in rx.Config
content = re.sub(
rf'\bapp_name\s*=\s*["\']{re.escape(old_name)}["\']',
f'app_name="{new_name}"',
content,
)
# Replace positional argument `"old_name"` in rx.Config
content = re.sub(
rf'\brx\.Config\(\s*["\']{re.escape(old_name)}["\']',
f'rx.Config("{new_name}"',
content,
)
file_path.write_text(content)
def process_directory(
directory: str | Path,
old_name: str,
new_name: str,
exclude_dirs: list | None = None,
extensions: list | None = None,
):
"""Process files with specified extensions in a directory, excluding specified directories.
Args:
directory: The root directory to process.
old_name: The old name to replace.
new_name: The new name to use.
exclude_dirs: List of directory names to exclude. Defaults to None.
extensions: List of file extensions to process.
"""
exclude_dirs = exclude_dirs or []
extensions = extensions or [
constants.Ext.PY,
constants.Ext.MD,
] # include .md files, typically used in reflex-web.
extensions_set = {ext.lstrip(".") for ext in extensions}
directory = Path(directory)
root_exclude_dirs = {directory / exclude_dir for exclude_dir in exclude_dirs}
files = (
p.resolve()
for p in directory.glob("**/*")
if p.is_file() and p.suffix.lstrip(".") in extensions_set
)
for file_path in files:
if not any(
file_path.is_relative_to(exclude_dir) for exclude_dir in root_exclude_dirs
):
rename_imports_and_app_name(file_path, old_name, new_name)
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/utils/rename.py",
"license": "Apache License 2.0",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
reflex-dev/reflex:reflex/utils/templates.py | """This module provides utilities for managing Reflex app templates."""
import dataclasses
import shutil
import tempfile
import zipfile
from pathlib import Path
from urllib.parse import urlparse
from reflex import constants
from reflex.config import get_config
from reflex.utils import console, net, path_ops, redir
@dataclasses.dataclass(frozen=True)
class Template:
"""A template for a Reflex app."""
name: str
description: str
code_url: str
def create_config(app_name: str):
"""Create a new rxconfig file.
Args:
app_name: The name of the app.
"""
# Import here to avoid circular imports.
from reflex.compiler import templates
console.debug(f"Creating {constants.Config.FILE}")
constants.Config.FILE.write_text(templates.rxconfig_template(app_name=app_name))
def initialize_app_directory(
app_name: str,
template_name: str = constants.Templates.DEFAULT,
template_code_dir_name: str | None = None,
template_dir: Path | None = None,
):
"""Initialize the app directory on reflex init.
Args:
app_name: The name of the app.
template_name: The name of the template to use.
template_code_dir_name: The name of the code directory in the template.
template_dir: The directory of the template source files.
Raises:
SystemExit: If template_name, template_code_dir_name, template_dir combination is not supported.
"""
console.log("Initializing the app directory.")
# By default, use the blank template from local assets.
if template_name == constants.Templates.DEFAULT:
if template_code_dir_name is not None or template_dir is not None:
console.error(
f"Only {template_name=} should be provided, got {template_code_dir_name=}, {template_dir=}."
)
raise SystemExit(1)
template_code_dir_name = constants.Templates.Dirs.CODE
template_dir = Path(constants.Templates.Dirs.BASE, "apps", template_name)
else:
if template_code_dir_name is None or template_dir is None:
console.error(
f"For `{template_name}` template, `template_code_dir_name` and `template_dir` should both be provided."
)
raise SystemExit(1)
console.debug(f"Using {template_name=} {template_dir=} {template_code_dir_name=}.")
# Remove __pycache__ dirs in template directory and current directory.
for pycache_dir in [
*template_dir.glob("**/__pycache__"),
*Path.cwd().glob("**/__pycache__"),
]:
shutil.rmtree(pycache_dir, ignore_errors=True)
for file in template_dir.iterdir():
# Copy the file to current directory but keep the name the same.
path_ops.cp(str(file), file.name)
# Rename the template app to the app name.
path_ops.mv(template_code_dir_name, app_name)
path_ops.mv(
Path(app_name) / (template_name + constants.Ext.PY),
Path(app_name) / (app_name + constants.Ext.PY),
)
# Fix up the imports.
path_ops.find_replace(
app_name,
f"from {template_name}",
f"from {app_name}",
)
def initialize_default_app(app_name: str):
"""Initialize the default app.
Args:
app_name: The name of the app.
"""
create_config(app_name)
initialize_app_directory(app_name)
def create_config_init_app_from_remote_template(app_name: str, template_url: str):
"""Create new rxconfig and initialize app using a remote template.
Args:
app_name: The name of the app.
template_url: The path to the template source code as a zip file.
Raises:
SystemExit: If any download, file operations fail or unexpected zip file format.
"""
import httpx
# Create a temp directory for the zip download.
try:
temp_dir = tempfile.mkdtemp()
except OSError as ose:
console.error(f"Failed to create temp directory for download: {ose}")
raise SystemExit(1) from None
# Use httpx GET with redirects to download the zip file.
zip_file_path: Path = Path(temp_dir) / "template.zip"
try:
# Note: following redirects can be risky. We only allow this for reflex built templates at the moment.
response = net.get(template_url, follow_redirects=True)
console.debug(f"Server responded download request: {response}")
response.raise_for_status()
except httpx.HTTPError as he:
console.error(f"Failed to download the template: {he}")
raise SystemExit(1) from None
try:
zip_file_path.write_bytes(response.content)
console.debug(f"Downloaded the zip to {zip_file_path}")
except OSError as ose:
console.error(f"Unable to write the downloaded zip to disk {ose}")
raise SystemExit(1) from None
# Create a temp directory for the zip extraction.
try:
unzip_dir = Path(tempfile.mkdtemp())
except OSError as ose:
console.error(f"Failed to create temp directory for extracting zip: {ose}")
raise SystemExit(1) from None
try:
zipfile.ZipFile(zip_file_path).extractall(path=unzip_dir)
# The zip file downloaded from github looks like:
# repo-name-branch/**/*, so we need to remove the top level directory.
except Exception as uze:
console.error(f"Failed to unzip the template: {uze}")
raise SystemExit(1) from None
if len(subdirs := list(unzip_dir.iterdir())) != 1:
console.error(f"Expected one directory in the zip, found {subdirs}")
raise SystemExit(1)
template_dir = unzip_dir / subdirs[0]
console.debug(f"Template folder is located at {template_dir}")
# Move the rxconfig file here first.
path_ops.mv(str(template_dir / constants.Config.FILE), constants.Config.FILE)
new_config = get_config(reload=True)
# Get the template app's name from rxconfig in case it is different than
# the source code repo name on github.
template_name = new_config.app_name
create_config(app_name)
initialize_app_directory(
app_name,
template_name=template_name,
template_code_dir_name=template_name,
template_dir=template_dir,
)
req_file = Path("requirements.txt")
if req_file.exists() and len(req_file.read_text().splitlines()) > 1:
console.info(
"Run `pip install -r requirements.txt` to install the required python packages for this template."
)
# Clean up the temp directories.
shutil.rmtree(temp_dir)
shutil.rmtree(unzip_dir)
def validate_and_create_app_using_remote_template(
app_name: str, template: str, templates: dict[str, Template]
):
"""Validate and create an app using a remote template.
Args:
app_name: The name of the app.
template: The name of the template.
templates: The available templates.
Raises:
SystemExit: If the template is not found.
"""
# If user selects a template, it needs to exist.
if template in templates:
from reflex_cli.v2.utils import hosting
authenticated_token = hosting.authenticated_token()
if not authenticated_token or not authenticated_token[0]:
console.print(
f"Please use `reflex login` to access the '{template}' template."
)
raise SystemExit(3)
template_url = templates[template].code_url
else:
template_parsed_url = urlparse(template)
# Check if the template is a github repo.
if template_parsed_url.hostname == "github.com":
path = template_parsed_url.path.strip("/").removesuffix(".git")
template_url = f"https://github.com/{path}/archive/main.zip"
else:
console.error(f"Template `{template}` not found or invalid.")
raise SystemExit(1)
if template_url is None:
return
create_config_init_app_from_remote_template(
app_name=app_name, template_url=template_url
)
def fetch_app_templates(version: str) -> dict[str, Template]:
"""Fetch a dict of templates from the templates repo using github API.
Args:
version: The version of the templates to fetch.
Returns:
The dict of templates.
"""
def get_release_by_tag(tag: str) -> dict | None:
url = f"{constants.Reflex.RELEASES_URL}/tags/v{tag}"
response = net.get(url)
if response.status_code == 404:
return None
response.raise_for_status()
return response.json()
release = get_release_by_tag(version)
if release is None:
console.warn(f"No templates known for version {version}")
return {}
asset_map = {
a["name"]: a["browser_download_url"] for a in release.get("assets", [])
}
templates_url = asset_map.get("templates.json")
if not templates_url:
console.warn(f"Templates metadata not found for version {version}")
return {}
templates_data = (
net.get(templates_url, follow_redirects=True).json().get("templates", [])
)
known_fields = {f.name for f in dataclasses.fields(Template)}
filtered_templates = {}
for template in templates_data:
code_url = (
""
if template["name"] == "blank"
else asset_map.get(f"{template['name']}.zip")
)
if template["hidden"] or code_url is None:
continue
filtered_templates[template["name"]] = Template(
**{k: v for k, v in template.items() if k in known_fields},
code_url=code_url,
)
return filtered_templates
def fetch_remote_templates(
template: str,
) -> tuple[str, dict[str, Template]]:
"""Fetch the available remote templates.
Args:
template: The name of the template.
Returns:
The selected template and the available templates.
"""
available_templates = {}
try:
# Get the available templates
available_templates = fetch_app_templates(constants.Reflex.VERSION)
except Exception as e:
console.warn("Failed to fetch templates. Falling back to default template.")
console.debug(f"Error while fetching templates: {e}")
template = constants.Templates.DEFAULT
return template, available_templates
def prompt_for_template_options(templates: list[Template]) -> str:
"""Prompt the user to specify a template.
Args:
templates: The templates to choose from.
Returns:
The template name the user selects.
Raises:
SystemExit: If the user does not select a template.
"""
# Show the user the URLs of each template to preview.
console.print("\nGet started with a template:")
# Prompt the user to select a template.
for index, template in enumerate(templates):
console.print(f"({index}) {template.description}")
template = console.ask(
"Which template would you like to use?",
choices=[str(i) for i in range(len(templates))],
show_choices=False,
default="0",
)
if not template:
console.error("No template selected.")
raise SystemExit(1)
try:
template_index = int(template)
except ValueError:
console.error("Invalid template selected.")
raise SystemExit(1) from None
if template_index < 0 or template_index >= len(templates):
console.error("Invalid template selected.")
raise SystemExit(1)
# Return the template.
return templates[template_index].name
def initialize_app(app_name: str, template: str | None = None) -> str | None:
"""Initialize the app either from a remote template or a blank app. If the config file exists, it is considered as reinit.
Args:
app_name: The name of the app.
template: The name of the template to use.
Returns:
The name of the template.
Raises:
SystemExit: If the template is not valid or unspecified.
"""
# Local imports to avoid circular imports.
from reflex.utils import telemetry
# Check if the app is already initialized.
if constants.Config.FILE.exists():
telemetry.send("reinit")
return None
templates: dict[str, Template] = {}
# Don't fetch app templates if the user directly asked for DEFAULT.
if template is not None and template != constants.Templates.DEFAULT:
template, templates = fetch_remote_templates(template)
if template is None:
template = prompt_for_template_options(get_init_cli_prompt_options())
if template == constants.Templates.CHOOSE_TEMPLATES:
redir.reflex_templates()
raise SystemExit(0)
if template == constants.Templates.AI:
redir.reflex_build_redirect()
raise SystemExit(0)
# If the blank template is selected, create a blank app.
if template == constants.Templates.DEFAULT:
# Default app creation behavior: a blank app.
initialize_default_app(app_name)
else:
validate_and_create_app_using_remote_template(
app_name=app_name, template=template, templates=templates
)
telemetry.send("init", template=template)
return template
def get_init_cli_prompt_options() -> list[Template]:
"""Get the CLI options for initializing a Reflex app.
Returns:
The CLI options.
"""
return [
Template(
name=constants.Templates.DEFAULT,
description="A blank Reflex app.",
code_url="",
),
Template(
name=constants.Templates.CHOOSE_TEMPLATES,
description="Premade templates built by the Reflex team.",
code_url="",
),
Template(
name=constants.Templates.AI,
description="[bold]Try our AI builder.",
code_url="",
),
]
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/utils/templates.py",
"license": "Apache License 2.0",
"lines": 344,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
reflex-dev/reflex:reflex/plugins/_screenshot.py | """Plugin to enable screenshot functionality."""
from typing import TYPE_CHECKING
from reflex.plugins.base import Plugin as BasePlugin
if TYPE_CHECKING:
from starlette.requests import Request
from starlette.responses import Response
from typing_extensions import Unpack
from reflex.app import App
from reflex.plugins.base import PostCompileContext
from reflex.state import BaseState
ACTIVE_CONNECTIONS = "/_active_connections"
CLONE_STATE = "/_clone_state"
def _deep_copy(state: "BaseState") -> "BaseState":
"""Create a deep copy of the state.
Args:
state: The state to copy.
Returns:
A deep copy of the state.
"""
import copy
copy_of_state = copy.deepcopy(state)
def copy_substate(substate: "BaseState") -> "BaseState":
substate_copy = _deep_copy(substate)
substate_copy.parent_state = copy_of_state
return substate_copy
copy_of_state.substates = {
substate_name: copy_substate(substate)
for substate_name, substate in state.substates.items()
}
return copy_of_state
class ScreenshotPlugin(BasePlugin):
"""Plugin to handle screenshot functionality."""
def post_compile(self, **context: "Unpack[PostCompileContext]") -> None:
"""Called after the compilation of the plugin.
Args:
context: The context for the plugin.
"""
app = context["app"]
self._add_active_connections_endpoint(app)
self._add_clone_state_endpoint(app)
@staticmethod
def _add_active_connections_endpoint(app: "App") -> None:
"""Add an endpoint to the app that returns the active connections.
Args:
app: The application instance to which the endpoint will be added.
"""
if not app._api:
return
def active_connections(_request: "Request") -> "Response":
from starlette.responses import JSONResponse
if not app.event_namespace:
return JSONResponse({})
return JSONResponse(app.event_namespace.token_to_sid)
app._api.add_route(
ACTIVE_CONNECTIONS,
active_connections,
methods=["GET"],
)
@staticmethod
def _add_clone_state_endpoint(app: "App") -> None:
"""Add an endpoint to the app that clones the current state.
Args:
app: The application instance to which the endpoint will be added.
"""
if not app._api:
return
async def clone_state(request: "Request") -> "Response":
import uuid
from starlette.responses import JSONResponse
from reflex.state import _substate_key
if not app.event_namespace:
return JSONResponse({})
token_to_clone = await request.json()
if not isinstance(token_to_clone, str):
return JSONResponse(
{"error": "Token to clone must be a string."}, status_code=400
)
old_state = await app.state_manager.get_state(token_to_clone)
new_state = _deep_copy(old_state)
new_token = uuid.uuid4().hex
all_states = [new_state]
found_new = True
while found_new:
found_new = False
for state in list(all_states):
for substate in state.substates.values():
substate._was_touched = True
if substate not in all_states:
all_states.append(substate)
found_new = True
await app.state_manager.set_state(
_substate_key(new_token, new_state), new_state
)
return JSONResponse(new_token)
app._api.add_route(
CLONE_STATE,
clone_state,
methods=["POST"],
)
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/plugins/_screenshot.py",
"license": "Apache License 2.0",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
reflex-dev/reflex:reflex/utils/monitoring.py | """PyLeak integration for monitoring event loop blocking and resource leaks in Reflex applications."""
import contextlib
import functools
import inspect
import threading
from collections.abc import AsyncGenerator, Awaitable, Callable, Generator
from typing import TypeVar, overload
from reflex.config import get_config
try:
from pyleak import no_event_loop_blocking, no_task_leaks, no_thread_leaks
from pyleak.base import LeakAction
PYLEAK_AVAILABLE = True
except ImportError:
PYLEAK_AVAILABLE = False
no_event_loop_blocking = no_task_leaks = no_thread_leaks = None # pyright: ignore[reportAssignmentType]
LeakAction = None # pyright: ignore[reportAssignmentType]
# Thread-local storage to track if monitoring is already active
_thread_local = threading.local()
def is_pyleak_enabled() -> bool:
"""Check if PyLeak monitoring is enabled and available.
Returns:
True if PyLeak monitoring is enabled in config and PyLeak is available.
"""
if not PYLEAK_AVAILABLE:
return False
config = get_config()
return config.enable_pyleak_monitoring
@contextlib.contextmanager
def monitor_sync():
"""Sync context manager for PyLeak monitoring.
Yields:
None: Context for monitoring sync operations.
"""
if not is_pyleak_enabled():
yield
return
# Check if monitoring is already active in this thread
if getattr(_thread_local, "monitoring_active", False):
yield
return
config = get_config()
action = config.pyleak_action or LeakAction.WARN # pyright: ignore[reportOptionalMemberAccess]
# Mark monitoring as active
_thread_local.monitoring_active = True
try:
with contextlib.ExitStack() as stack:
# Thread leak detection has issues with background tasks (no_thread_leaks)
stack.enter_context(
no_event_loop_blocking( # pyright: ignore[reportOptionalCall]
action=action,
threshold=config.pyleak_blocking_threshold,
)
)
yield
finally:
_thread_local.monitoring_active = False
@contextlib.asynccontextmanager
async def monitor_async():
"""Async context manager for PyLeak monitoring.
Yields:
None: Context for monitoring async operations.
"""
if not is_pyleak_enabled():
yield
return
# Check if monitoring is already active in this thread
if getattr(_thread_local, "monitoring_active", False):
yield
return
config = get_config()
action = config.pyleak_action or LeakAction.WARN # pyright: ignore[reportOptionalMemberAccess]
# Mark monitoring as active
_thread_local.monitoring_active = True
try:
async with contextlib.AsyncExitStack() as stack:
# Thread leak detection has issues with background tasks (no_thread_leaks)
# Re-add thread leak later.
# Block detection for event loops
stack.enter_context(
no_event_loop_blocking( # pyright: ignore[reportOptionalCall]
action=action,
threshold=config.pyleak_blocking_threshold,
)
)
# Task leak detection has issues with background tasks (no_task_leaks)
yield
finally:
_thread_local.monitoring_active = False
YieldType = TypeVar("YieldType")
SendType = TypeVar("SendType")
ReturnType = TypeVar("ReturnType")
@overload
def monitor_loopblocks(
func: Callable[..., AsyncGenerator[YieldType, ReturnType]],
) -> Callable[..., AsyncGenerator[YieldType, ReturnType]]: ...
@overload
def monitor_loopblocks(
func: Callable[..., Generator[YieldType, SendType, ReturnType]],
) -> Callable[..., Generator[YieldType, SendType, ReturnType]]: ...
@overload
def monitor_loopblocks(
func: Callable[..., Awaitable[ReturnType]],
) -> Callable[..., Awaitable[ReturnType]]: ...
def monitor_loopblocks(func: Callable) -> Callable:
"""Framework decorator using the monitoring module's context manager.
Args:
func: The function to be monitored for leaks.
Returns:
Decorator function that applies PyLeak monitoring to sync/async functions.
"""
if inspect.isasyncgenfunction(func):
@functools.wraps(func)
async def async_gen_wrapper(*args, **kwargs):
async with monitor_async():
async for item in func(*args, **kwargs):
yield item
return async_gen_wrapper
if inspect.iscoroutinefunction(func):
@functools.wraps(func)
async def async_wrapper(*args, **kwargs):
async with monitor_async():
return await func(*args, **kwargs)
return async_wrapper
if inspect.isgeneratorfunction(func):
@functools.wraps(func)
def gen_wrapper(*args, **kwargs):
with monitor_sync():
yield from func(*args, **kwargs)
return gen_wrapper
@functools.wraps(func)
def sync_wrapper(*args, **kwargs):
with monitor_sync():
return func(*args, **kwargs)
return sync_wrapper # pyright: ignore[reportReturnType]
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/utils/monitoring.py",
"license": "Apache License 2.0",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.