sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
PrefectHQ/prefect:src/integrations/prefect-kubernetes/prefect_kubernetes/_logging.py | import logging
from typing import Any, Optional
from pythonjsonlogger.core import RESERVED_ATTRS
from pythonjsonlogger.json import JsonFormatter
DEFAULT_JSON_REFKEY = "object"
class KopfObjectJsonFormatter(JsonFormatter):
"""
Log formatter for kopf objects.
This formatter will filter unserializable fields from the log record,
which the `prefect` JSON formatter is unable to do.
"""
def __init__(
self,
*args: Any,
refkey: Optional[str] = None,
**kwargs: Any,
) -> None:
# Avoid type checking, as the args are not in the parent constructor.
reserved_attrs = kwargs.pop("reserved_attrs", RESERVED_ATTRS)
reserved_attrs = set(reserved_attrs)
reserved_attrs |= {"k8s_skip", "k8s_ref", "settings"}
kwargs |= dict(reserved_attrs=reserved_attrs)
kwargs.setdefault("timestamp", True)
super().__init__(*args, **kwargs)
self._refkey: str = refkey or DEFAULT_JSON_REFKEY
def add_fields(
self,
log_record: dict[str, object],
record: logging.LogRecord,
message_dict: dict[str, object],
) -> None:
super().add_fields(log_record, record, message_dict)
if self._refkey and hasattr(record, "k8s_ref"):
ref = getattr(record, "k8s_ref")
log_record[self._refkey] = ref
if "severity" not in log_record:
log_record["severity"] = (
"debug"
if record.levelno <= logging.DEBUG
else "info"
if record.levelno <= logging.INFO
else "warn"
if record.levelno <= logging.WARNING
else "error"
if record.levelno <= logging.ERROR
else "fatal"
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-kubernetes/prefect_kubernetes/_logging.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:src/integrations/prefect-kubernetes/tests/test_logging.py | import json
import logging
from typing import Any
from prefect_kubernetes._logging import KopfObjectJsonFormatter
class TestKopfObjectJsonFormatter:
"""Tests for the KopfObjectJsonFormatter"""
def test_filters_unserializable_kopf_fields(self):
"""
Test that kopf-specific fields (k8s_skip, k8s_ref, settings) are
filtered out from the log record.
"""
formatter = KopfObjectJsonFormatter()
# Create a log record with kopf-specific fields
record = logging.LogRecord(
name="kopf.test",
level=logging.INFO,
pathname="test.py",
lineno=1,
msg="Test message",
args=(),
exc_info=None,
)
# Add kopf-specific attributes that should be filtered
record.k8s_skip = True # type: ignore
record.k8s_ref = {"kind": "Pod", "name": "test-pod"} # type: ignore
record.settings = {"some": "settings"} # type: ignore
# Format the record
formatted = formatter.format(record)
log_dict = json.loads(formatted)
# Verify kopf fields are NOT in the output
assert "k8s_skip" not in log_dict, "k8s_skip should be filtered out"
assert "settings" not in log_dict, "settings should be filtered out"
def test_adds_severity_field(self):
"""Test that the formatter adds a severity field with correct values"""
formatter = KopfObjectJsonFormatter()
# Test different log levels
test_cases = [
(logging.DEBUG, "debug"),
(logging.INFO, "info"),
(logging.WARNING, "warn"),
(logging.ERROR, "error"),
(logging.CRITICAL, "fatal"),
]
for level, expected_severity in test_cases:
record = logging.LogRecord(
name="kopf.test",
level=level,
pathname="test.py",
lineno=1,
msg=f"Test message at {logging.getLevelName(level)}",
args=(),
exc_info=None,
)
formatted = formatter.format(record)
log_dict = json.loads(formatted)
assert "severity" in log_dict, (
f"severity field should be present for {logging.getLevelName(level)}"
)
assert log_dict["severity"] == expected_severity, (
f"Expected severity '{expected_severity}' for {logging.getLevelName(level)}, "
f"got '{log_dict['severity']}'"
)
def test_adds_kubernetes_object_reference(self):
"""Test that k8s_ref is added to the output under the 'object' key"""
formatter = KopfObjectJsonFormatter()
record = logging.LogRecord(
name="kopf.test",
level=logging.INFO,
pathname="test.py",
lineno=1,
msg="Test message",
args=(),
exc_info=None,
)
# Add k8s_ref attribute
k8s_ref: dict[str, Any] = {
"apiVersion": "v1",
"kind": "Pod",
"name": "test-pod",
"uid": "12345",
"namespace": "default",
}
record.k8s_ref = k8s_ref # type: ignore
formatted = formatter.format(record)
log_dict = json.loads(formatted)
# Verify the object field contains the k8s_ref
assert "object" in log_dict, (
"object field should be present when k8s_ref is set"
)
assert log_dict["object"] == k8s_ref, "object field should contain the k8s_ref"
def test_custom_refkey(self):
"""Test that a custom refkey can be used instead of 'object'"""
formatter = KopfObjectJsonFormatter(refkey="k8s_resource")
record = logging.LogRecord(
name="kopf.test",
level=logging.INFO,
pathname="test.py",
lineno=1,
msg="Test message",
args=(),
exc_info=None,
)
k8s_ref = {"kind": "Pod", "name": "test-pod"}
record.k8s_ref = k8s_ref # type: ignore
formatted = formatter.format(record)
log_dict = json.loads(formatted)
# Verify custom refkey is used
assert "k8s_resource" in log_dict, "Custom refkey should be present"
assert log_dict["k8s_resource"] == k8s_ref
def test_json_output_is_valid(self):
"""Test that the formatter produces valid JSON"""
formatter = KopfObjectJsonFormatter()
record = logging.LogRecord(
name="kopf.test",
level=logging.INFO,
pathname="test.py",
lineno=1,
msg="Test message",
args=(),
exc_info=None,
)
formatted = formatter.format(record)
# Should not raise a JSONDecodeError
log_dict = json.loads(formatted)
# Verify expected fields are present
assert "message" in log_dict, "message field should be present"
assert "severity" in log_dict, "severity field should be present"
assert log_dict["message"] == "Test message"
def test_timestamp_is_included(self):
"""Test that timestamp is included in the output"""
formatter = KopfObjectJsonFormatter()
record = logging.LogRecord(
name="kopf.test",
level=logging.INFO,
pathname="test.py",
lineno=1,
msg="Test message",
args=(),
exc_info=None,
)
formatted = formatter.format(record)
log_dict = json.loads(formatted)
# The formatter should include a timestamp
assert "timestamp" in log_dict, "timestamp field should be present"
def test_log_with_extra_fields(self):
"""Test that extra fields are included in the output"""
formatter = KopfObjectJsonFormatter()
record = logging.LogRecord(
name="kopf.test",
level=logging.INFO,
pathname="test.py",
lineno=1,
msg="Test message",
args=(),
exc_info=None,
)
# Add extra custom field
record.custom_field = "custom_value" # type: ignore
formatted = formatter.format(record)
log_dict = json.loads(formatted)
# Custom field should be present
assert "custom_field" in log_dict
assert log_dict["custom_field"] == "custom_value"
def test_no_k8s_ref_attribute(self):
"""Test that formatter works correctly when k8s_ref is not present"""
formatter = KopfObjectJsonFormatter()
record = logging.LogRecord(
name="kopf.test",
level=logging.INFO,
pathname="test.py",
lineno=1,
msg="Test message",
args=(),
exc_info=None,
)
# Don't set k8s_ref
formatted = formatter.format(record)
log_dict = json.loads(formatted)
# Should work fine, just without the object field
assert "message" in log_dict
assert "severity" in log_dict
# object field should not be present if k8s_ref is not set
assert "object" not in log_dict or log_dict["object"] is None
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-kubernetes/tests/test_logging.py",
"license": "Apache License 2.0",
"lines": 183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/_internal/pydantic/validated_func.py | """
Pure Pydantic v2 implementation of function argument validation.
This module provides validation of function arguments without calling the function,
compatible with Python 3.14+ (no Pydantic v1 dependency).
"""
from __future__ import annotations
import inspect
from typing import Any, Callable, ClassVar, Optional
from pydantic import (
BaseModel,
ConfigDict,
Field,
ValidationError,
create_model,
field_validator,
)
# Special field names for validation
# These match pydantic.v1.decorator constants for compatibility
V_ARGS_NAME = "v__args"
V_KWARGS_NAME = "v__kwargs"
V_POSITIONAL_ONLY_NAME = "v__positional_only"
V_DUPLICATE_KWARGS = "v__duplicate_kwargs"
class ValidatedFunction:
"""
Validates function arguments using Pydantic v2 without calling the function.
This class inspects a function's signature and creates a Pydantic model
that can validate arguments passed to the function, including handling
of *args, **kwargs, positional-only parameters, and duplicate arguments.
Example:
```python
def greet(name: str, age: int = 0):
return f"Hello {name}, you are {age} years old"
vf = ValidatedFunction(greet)
# Validate arguments
values = vf.validate_call_args(("Alice",), {"age": 30})
# Returns: {"name": "Alice", "age": 30}
# Invalid arguments will raise ValidationError
vf.validate_call_args(("Bob",), {"age": "not a number"})
# Raises: ValidationError
```
"""
def __init__(
self,
function: Callable[..., Any],
config: ConfigDict | None = None,
):
"""
Initialize the validated function.
Args:
function: The function to validate arguments for
config: Optional Pydantic ConfigDict or dict configuration
Raises:
ValueError: If function parameters conflict with internal field names
"""
self.raw_function = function
self.signature = inspect.signature(function)
self.arg_mapping: dict[int, str] = {}
self.positional_only_args: set[str] = set()
self.v_args_name = V_ARGS_NAME
self.v_kwargs_name = V_KWARGS_NAME
self._needs_rebuild = False
# Check for conflicts with internal field names
reserved_names = {
V_ARGS_NAME,
V_KWARGS_NAME,
V_POSITIONAL_ONLY_NAME,
V_DUPLICATE_KWARGS,
}
param_names = set(self.signature.parameters.keys())
conflicts = reserved_names & param_names
if conflicts:
raise ValueError(
f"Function parameters conflict with internal field names: {conflicts}. "
f"These names are reserved: {reserved_names}"
)
# Build the validation model
fields, takes_args, takes_kwargs, has_forward_refs = self._build_fields()
self._create_model(fields, takes_args, takes_kwargs, config, has_forward_refs)
def _build_fields(self) -> tuple[dict[str, Any], bool, bool, bool]:
"""
Build field definitions from function signature.
Returns:
Tuple of (fields_dict, takes_args, takes_kwargs, has_forward_refs)
"""
fields: dict[str, Any] = {}
takes_args = False
takes_kwargs = False
has_forward_refs = False
position = 0
for param_name, param in self.signature.parameters.items():
if param.kind == inspect.Parameter.VAR_POSITIONAL:
takes_args = True
continue
if param.kind == inspect.Parameter.VAR_KEYWORD:
takes_kwargs = True
continue
# Track positional-only parameters
if param.kind == inspect.Parameter.POSITIONAL_ONLY:
self.positional_only_args.add(param_name)
# Map position to parameter name
if param.kind in (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
):
self.arg_mapping[position] = param_name
position += 1
# Determine type and default
annotation = (
param.annotation if param.annotation != inspect.Parameter.empty else Any
)
# Check if annotation is a string (forward reference)
if isinstance(annotation, str):
has_forward_refs = True
if param.default == inspect.Parameter.empty:
# Required parameter
fields[param_name] = (annotation, Field(...))
else:
# Optional parameter with default
fields[param_name] = (annotation, Field(default=param.default))
# Always add args/kwargs fields for validation, even if function doesn't accept them
fields[self.v_args_name] = (Optional[list[Any]], Field(default=None))
fields[self.v_kwargs_name] = (Optional[dict[str, Any]], Field(default=None))
# Add special validation fields
fields[V_POSITIONAL_ONLY_NAME] = (Optional[list[str]], Field(default=None))
fields[V_DUPLICATE_KWARGS] = (Optional[list[str]], Field(default=None))
return fields, takes_args, takes_kwargs, has_forward_refs
def _create_model(
self,
fields: dict[str, Any],
takes_args: bool,
takes_kwargs: bool,
config: ConfigDict | None,
has_forward_refs: bool,
) -> None:
"""Create the Pydantic validation model."""
pos_args = len(self.arg_mapping)
# Process config
# Note: ConfigDict is a TypedDict, so we can't use isinstance() in Python 3.14
# Instead, check if it's a dict-like object and merge with defaults
if config is None:
config_dict = ConfigDict(extra="forbid")
else:
config_dict = config.copy()
if "extra" not in config_dict:
config_dict["extra"] = "forbid"
# Create base model with validators
class DecoratorBaseModel(BaseModel):
model_config: ClassVar[ConfigDict] = config_dict
@field_validator(V_ARGS_NAME, check_fields=False)
@classmethod
def check_args(cls, v: Optional[list[Any]]) -> Optional[list[Any]]:
if takes_args or v is None:
return v
raise TypeError(
f"{pos_args} positional argument{'s' if pos_args != 1 else ''} "
f"expected but {pos_args + len(v)} given"
)
@field_validator(V_KWARGS_NAME, check_fields=False)
@classmethod
def check_kwargs(
cls, v: Optional[dict[str, Any]]
) -> Optional[dict[str, Any]]:
if takes_kwargs or v is None:
return v
plural = "" if len(v) == 1 else "s"
keys = ", ".join(map(repr, v.keys()))
raise TypeError(f"unexpected keyword argument{plural}: {keys}")
@field_validator(V_POSITIONAL_ONLY_NAME, check_fields=False)
@classmethod
def check_positional_only(cls, v: Optional[list[str]]) -> None:
if v is None:
return
plural = "" if len(v) == 1 else "s"
keys = ", ".join(map(repr, v))
raise TypeError(
f"positional-only argument{plural} passed as keyword "
f"argument{plural}: {keys}"
)
@field_validator(V_DUPLICATE_KWARGS, check_fields=False)
@classmethod
def check_duplicate_kwargs(cls, v: Optional[list[str]]) -> None:
if v is None:
return
plural = "" if len(v) == 1 else "s"
keys = ", ".join(map(repr, v))
raise TypeError(f"multiple values for argument{plural}: {keys}")
# Create the model dynamically
model_name = f"{self.raw_function.__name__.title()}Model"
self.model = create_model(
model_name,
__base__=DecoratorBaseModel,
**fields,
)
# Rebuild the model with the original function's namespace to resolve forward references
# This is necessary when using `from __future__ import annotations` or when
# parameters reference types not in the current scope
# Only rebuild if we detected forward references to avoid performance overhead
# If rebuild fails (e.g., forward-referenced types not yet defined), defer to validation time
if has_forward_refs:
try:
self.model.model_rebuild(_types_namespace=self.raw_function.__globals__)
except (NameError, AttributeError):
# Forward references can't be resolved yet (e.g., types defined after decorator)
# Model will be rebuilt during validate_call_args when types are available
self._needs_rebuild = True
def validate_call_args(
self, args: tuple[Any, ...], kwargs: dict[str, Any]
) -> dict[str, Any]:
"""
Validate function arguments and return normalized parameters.
Args:
args: Positional arguments
kwargs: Keyword arguments
Returns:
Dictionary mapping parameter names to values
Raises:
ValidationError: If arguments don't match the function signature
"""
# Build the values dict for validation
values: dict[str, Any] = {}
var_args: list[Any] = []
var_kwargs: dict[str, Any] = {}
positional_only_passed_as_kw: list[str] = []
duplicate_kwargs: list[str] = []
# Process positional arguments
for i, arg in enumerate(args):
if i in self.arg_mapping:
param_name = self.arg_mapping[i]
if param_name in kwargs:
# Duplicate: both positional and keyword
duplicate_kwargs.append(param_name)
values[param_name] = arg
else:
# Extra positional args go into *args
var_args.append(arg)
# Process keyword arguments
for key, value in kwargs.items():
if key in values:
# Already set by positional arg
continue
# Check if this is a positional-only param passed as keyword
if key in self.positional_only_args:
positional_only_passed_as_kw.append(key)
continue
# Check if this is a known parameter
if key in self.signature.parameters:
values[key] = value
else:
# Unknown parameter goes into **kwargs
var_kwargs[key] = value
# Add special fields
if var_args:
values[self.v_args_name] = var_args
if var_kwargs:
values[self.v_kwargs_name] = var_kwargs
if positional_only_passed_as_kw:
values[V_POSITIONAL_ONLY_NAME] = positional_only_passed_as_kw
if duplicate_kwargs:
values[V_DUPLICATE_KWARGS] = duplicate_kwargs
# Rebuild model if needed to resolve any forward references that weren't available
# at initialization time (e.g., when using `from __future__ import annotations`)
# Only rebuild if we previously failed to resolve forward refs at init time
if self._needs_rebuild:
# Try rebuilding with raise_errors=False to handle any remaining issues gracefully
self.model.model_rebuild(
_types_namespace=self.raw_function.__globals__, raise_errors=False
)
# Clear the flag - we only need to rebuild once
self._needs_rebuild = False
# Validate using the model
try:
validated = self.model.model_validate(values)
except ValidationError as e:
# Convert ValidationError to TypeError for certain cases to match Python's behavior
# Check if the error is about extra kwargs
for error in e.errors():
if error.get("type") == "extra_forbidden" and error.get("loc") == (
"kwargs",
):
# This is an extra keyword argument error
extra_keys = error.get("input", {})
if isinstance(extra_keys, dict):
plural = "" if len(extra_keys) == 1 else "s"
keys = ", ".join(map(repr, extra_keys.keys()))
raise TypeError(f"unexpected keyword argument{plural}: {keys}")
# For other validation errors, re-raise as-is
raise
# Extract only the actual function parameters
result: dict[str, Any] = {}
for param_name in self.signature.parameters.keys():
param = self.signature.parameters[param_name]
if param.kind == inspect.Parameter.VAR_POSITIONAL:
result[param_name] = getattr(validated, self.v_args_name) or []
elif param.kind == inspect.Parameter.VAR_KEYWORD:
result[param_name] = getattr(validated, self.v_kwargs_name) or {}
else:
# Regular parameter
value = getattr(validated, param_name)
result[param_name] = value
return result
def __call__(self, *args: Any, **kwargs: Any) -> Any:
"""
Validate arguments and call the function.
Args:
*args: Positional arguments
**kwargs: Keyword arguments
Returns:
The result of calling the function with validated arguments
"""
validated_params = self.validate_call_args(args, kwargs)
return self.raw_function(**validated_params)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/_internal/pydantic/validated_func.py",
"license": "Apache License 2.0",
"lines": 311,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:tests/_internal/pydantic/test_validated_func.py | """Tests for the pure Pydantic v2 validated function implementation."""
from unittest.mock import patch
import pytest
from pydantic import BaseModel, ValidationError
from prefect._internal.pydantic.validated_func import ValidatedFunction
class TestBasicValidation:
"""Test basic function argument validation."""
def test_simple_function(self):
def greet(name: str, age: int = 0):
return f"Hello {name}, you are {age} years old"
vf = ValidatedFunction(greet)
result = vf.validate_call_args(("Alice",), {"age": 30})
assert result == {"name": "Alice", "age": 30}
def test_simple_function_with_defaults(self):
def greet(name: str, age: int = 25):
return f"Hello {name}"
vf = ValidatedFunction(greet)
result = vf.validate_call_args(("Bob",), {})
assert result == {"name": "Bob", "age": 25}
def test_all_positional(self):
def add(a: int, b: int):
return a + b
vf = ValidatedFunction(add)
result = vf.validate_call_args((5, 10), {})
assert result == {"a": 5, "b": 10}
def test_all_keyword(self):
def add(a: int, b: int):
return a + b
vf = ValidatedFunction(add)
result = vf.validate_call_args((), {"a": 5, "b": 10})
assert result == {"a": 5, "b": 10}
def test_mixed_positional_and_keyword(self):
def multiply(x: int, y: int, z: int = 1):
return x * y * z
vf = ValidatedFunction(multiply)
result = vf.validate_call_args((2, 3), {"z": 4})
assert result == {"x": 2, "y": 3, "z": 4}
class TestTypeValidation:
"""Test that types are validated correctly."""
def test_type_coercion(self):
def add(a: int, b: int):
return a + b
vf = ValidatedFunction(add)
# Pydantic should coerce string to int
result = vf.validate_call_args(("5", "10"), {})
assert result == {"a": 5, "b": 10}
def test_type_validation_error(self):
def add(a: int, b: int):
return a + b
vf = ValidatedFunction(add)
with pytest.raises(ValidationError) as exc_info:
vf.validate_call_args(("not a number",), {"b": 10})
assert "a" in str(exc_info.value)
def test_pydantic_model_validation(self):
class Person(BaseModel):
name: str
age: int
def process_person(person: Person):
return person
vf = ValidatedFunction(process_person)
result = vf.validate_call_args(({"name": "Alice", "age": 30},), {})
assert isinstance(result["person"], Person)
assert result["person"].name == "Alice"
assert result["person"].age == 30
class TestVariadicArguments:
"""Test *args and **kwargs handling."""
def test_var_positional(self):
def sum_all(*numbers: int):
return sum(numbers)
vf = ValidatedFunction(sum_all)
result = vf.validate_call_args((1, 2, 3, 4, 5), {})
assert result == {"numbers": [1, 2, 3, 4, 5]}
def test_var_keyword(self):
def print_kwargs(**kwargs):
return kwargs
vf = ValidatedFunction(print_kwargs)
result = vf.validate_call_args((), {"a": 1, "b": 2, "c": 3})
assert result == {"kwargs": {"a": 1, "b": 2, "c": 3}}
def test_mixed_with_var_positional(self):
def func(a: int, b: int, *rest):
return (a, b, rest)
vf = ValidatedFunction(func)
result = vf.validate_call_args((1, 2, 3, 4, 5), {})
assert result == {"a": 1, "b": 2, "rest": [3, 4, 5]}
def test_mixed_with_var_keyword(self):
def func(a: int, b: int = 0, **kwargs):
return (a, b, kwargs)
vf = ValidatedFunction(func)
result = vf.validate_call_args((1,), {"b": 2, "c": 3, "d": 4})
assert result == {"a": 1, "b": 2, "kwargs": {"c": 3, "d": 4}}
def test_both_var_args_and_kwargs(self):
def func(a: int, *args, **kwargs):
return (a, args, kwargs)
vf = ValidatedFunction(func)
result = vf.validate_call_args((1, 2, 3), {"x": 10, "y": 20})
assert result == {"a": 1, "args": [2, 3], "kwargs": {"x": 10, "y": 20}}
class TestPositionalOnly:
"""Test positional-only parameters (Python 3.8+)."""
def test_positional_only_valid(self):
def func(a, b, /, c):
return a + b + c
vf = ValidatedFunction(func)
result = vf.validate_call_args((1, 2, 3), {})
assert result == {"a": 1, "b": 2, "c": 3}
def test_positional_only_with_keyword_for_c(self):
def func(a, b, /, c):
return a + b + c
vf = ValidatedFunction(func)
result = vf.validate_call_args((1, 2), {"c": 3})
assert result == {"a": 1, "b": 2, "c": 3}
def test_positional_only_error(self):
def func(a, b, /, c):
return a + b + c
vf = ValidatedFunction(func)
with pytest.raises(
TypeError, match="positional-only argument.*passed as keyword"
):
vf.validate_call_args((1,), {"b": 2, "c": 3})
class TestKeywordOnly:
"""Test keyword-only parameters."""
def test_keyword_only_valid(self):
def func(a, *, b, c=3):
return a + b + c
vf = ValidatedFunction(func)
result = vf.validate_call_args((1,), {"b": 2})
assert result == {"a": 1, "b": 2, "c": 3}
def test_keyword_only_all_provided(self):
def func(a, *, b, c):
return a + b + c
vf = ValidatedFunction(func)
result = vf.validate_call_args((1,), {"b": 2, "c": 3})
assert result == {"a": 1, "b": 2, "c": 3}
class TestErrorHandling:
"""Test error handling and validation."""
def test_missing_required_argument(self):
def func(a: int, b: int):
return a + b
vf = ValidatedFunction(func)
with pytest.raises(ValidationError) as exc_info:
vf.validate_call_args((1,), {})
assert "b" in str(exc_info.value)
def test_too_many_positional_arguments(self):
def func(a: int, b: int):
return a + b
vf = ValidatedFunction(func)
with pytest.raises(
TypeError, match="2 positional arguments expected but 3 given"
):
vf.validate_call_args((1, 2, 3), {})
def test_unexpected_keyword_argument(self):
def func(a: int, b: int):
return a + b
vf = ValidatedFunction(func)
with pytest.raises(TypeError, match="unexpected keyword argument.*'c'"):
vf.validate_call_args((1,), {"b": 2, "c": 3})
def test_duplicate_argument(self):
def func(a: int, b: int):
return a + b
vf = ValidatedFunction(func)
with pytest.raises(TypeError, match="multiple values for argument.*'a'"):
vf.validate_call_args((1,), {"a": 2, "b": 3})
class TestCallable:
"""Test using ValidatedFunction as a callable."""
def test_call_with_validation(self):
def add(a: int, b: int):
return a + b
vf = ValidatedFunction(add)
result = vf(5, 10)
assert result == 15
def test_call_with_keyword_args(self):
def greet(name: str, greeting: str = "Hello"):
return f"{greeting}, {name}!"
vf = ValidatedFunction(greet)
result = vf("Alice", greeting="Hi")
assert result == "Hi, Alice!"
def test_call_with_validation_error(self):
def add(a: int, b: int):
return a + b
vf = ValidatedFunction(add)
with pytest.raises(ValidationError):
vf("not a number", 10)
class TestComplexScenarios:
"""Test complex real-world scenarios."""
def test_function_with_all_parameter_types(self):
def complex_func(a, b, /, c, d=4, *args, e, f=6, **kwargs):
return {
"a": a,
"b": b,
"c": c,
"d": d,
"args": args,
"e": e,
"f": f,
"kwargs": kwargs,
}
vf = ValidatedFunction(complex_func)
result = vf.validate_call_args(
(1, 2, 3), # a, b, c
{"d": 5, "e": 7, "f": 8, "x": 9, "y": 10},
)
assert result == {
"a": 1,
"b": 2,
"c": 3,
"d": 5,
"args": [],
"e": 7,
"f": 8,
"kwargs": {"x": 9, "y": 10},
}
def test_function_with_no_type_hints(self):
def add(a, b):
return a + b
vf = ValidatedFunction(add)
result = vf.validate_call_args((1, 2), {})
assert result == {"a": 1, "b": 2}
def test_with_custom_config(self):
def func(a: int):
return a
# Pass config as a dict (ConfigDict is a TypedDict and can't use isinstance in Python 3.14)
config = {"strict": True}
vf = ValidatedFunction(func, config=config)
# With strict mode, string won't be coerced to int
with pytest.raises(ValidationError):
vf.validate_call_args(("5",), {})
class TestEdgeCases:
"""Test edge cases and corner scenarios."""
def test_no_parameters(self):
def func():
return "no params"
vf = ValidatedFunction(func)
result = vf.validate_call_args((), {})
assert result == {}
def test_only_defaults(self):
def func(a=1, b=2):
return a + b
vf = ValidatedFunction(func)
result = vf.validate_call_args((), {})
assert result == {"a": 1, "b": 2}
def test_empty_var_args(self):
def func(*args):
return args
vf = ValidatedFunction(func)
result = vf.validate_call_args((), {})
assert result == {"args": []}
def test_empty_var_kwargs(self):
def func(**kwargs):
return kwargs
vf = ValidatedFunction(func)
result = vf.validate_call_args((), {})
assert result == {"kwargs": {}}
def test_reserved_parameter_name_v__args(self):
"""Test that using reserved parameter name v__args raises ValueError."""
def func(v__args):
return v__args
with pytest.raises(
ValueError, match="Function parameters conflict with internal field names"
):
ValidatedFunction(func)
def test_reserved_parameter_name_v__kwargs(self):
"""Test that using reserved parameter name v__kwargs raises ValueError."""
def func(v__kwargs):
return v__kwargs
with pytest.raises(
ValueError, match="Function parameters conflict with internal field names"
):
ValidatedFunction(func)
def test_reserved_parameter_name_v__positional_only(self):
"""Test that using reserved parameter name v__positional_only raises ValueError."""
def func(v__positional_only):
return v__positional_only
with pytest.raises(
ValueError, match="Function parameters conflict with internal field names"
):
ValidatedFunction(func)
def test_reserved_parameter_name_v__duplicate_kwargs(self):
"""Test that using reserved parameter name v__duplicate_kwargs raises ValueError."""
def func(v__duplicate_kwargs):
return v__duplicate_kwargs
with pytest.raises(
ValueError, match="Function parameters conflict with internal field names"
):
ValidatedFunction(func)
class TestForwardReferences:
"""Test handling of forward references and `from __future__ import annotations`."""
def test_pydantic_model_with_future_annotations(self):
"""Test that Pydantic models work with forward reference annotations.
This is a regression test for issue #19288.
When using `from __future__ import annotations`, type hints become strings
and need to be resolved via model_rebuild() with the proper namespace.
"""
# Define a test module namespace that simulates using future annotations
namespace = {}
# Create a model in that namespace
exec(
"""
from pydantic import BaseModel, Field
class TestModel(BaseModel):
name: str = Field(..., description="Test name")
value: int = 42
""",
namespace,
)
TestModel = namespace["TestModel"]
# Define a function with the model as a parameter using string annotation
# This simulates what happens with `from __future__ import annotations`
def process_model(model: "TestModel") -> dict: # noqa: F821
return {"name": model.name, "value": model.value}
# Update the function's globals to include the TestModel
process_model.__globals__.update(namespace)
# Create validated function
vf = ValidatedFunction(process_model)
# Create an instance of the model
test_instance = TestModel(name="test")
# This should work without raising PydanticUserError about undefined models
result = vf.validate_call_args((test_instance,), {})
assert isinstance(result["model"], TestModel)
assert result["model"].name == "test"
assert result["model"].value == 42
def test_nested_pydantic_models_with_forward_refs(self):
"""Test nested Pydantic models with forward references work correctly."""
class Inner(BaseModel):
value: int
class Outer(BaseModel):
inner: Inner
name: str
# Simulate forward reference by using string annotation
def process_nested(data: "Outer") -> str: # noqa: F821
return data.name
# Add the types to the function's globals
process_nested.__globals__["Outer"] = Outer
process_nested.__globals__["Inner"] = Inner
vf = ValidatedFunction(process_nested)
# Create nested structure
outer_instance = Outer(inner=Inner(value=42), name="test")
result = vf.validate_call_args((outer_instance,), {})
assert isinstance(result["data"], Outer)
assert result["data"].name == "test"
assert result["data"].inner.value == 42
def test_no_rebuild_without_forward_refs(self):
"""Test that model_rebuild is not called when there are no forward references.
This is a performance optimization test - we should avoid the overhead
of model_rebuild() when it's not necessary.
"""
class MyModel(BaseModel):
name: str
# Function with concrete type annotations (no forward refs)
def process_data(model: MyModel, count: int = 0) -> dict:
return {"name": model.name, "count": count}
# Spy on model_rebuild to ensure it's NOT called during initialization
with patch.object(BaseModel, "model_rebuild") as mock_rebuild:
vf = ValidatedFunction(process_data)
# model_rebuild should NOT have been called since there are no forward refs
mock_rebuild.assert_not_called()
# The model should work correctly without rebuild
instance = MyModel(name="test")
# Also verify model_rebuild is NOT called during validation
with patch.object(vf.model, "model_rebuild") as mock_rebuild:
result = vf.validate_call_args((instance,), {"count": 5})
# model_rebuild should NOT be called during validation either
mock_rebuild.assert_not_called()
assert isinstance(result["model"], MyModel)
assert result["model"].name == "test"
assert result["count"] == 5
def test_forward_ref_defined_after_decorator(self):
"""Test that forward references work when type is defined after the function.
This is a regression test for issue #19447.
When using `from __future__ import annotations`, the @flow decorator
was failing if a forward-referenced Pydantic model was defined after
the function using it.
"""
# First, define A and the function WITHOUT B defined yet
namespace = {}
exec(
"""
from __future__ import annotations
from pydantic import BaseModel, Field
class A(BaseModel):
a: B = Field()
def process_model(model: A):
return model
""",
namespace,
)
# At this point, B doesn't exist yet. Creating ValidatedFunction should not fail
# (it should defer the model rebuild until validation time)
vf = ValidatedFunction(namespace["process_model"])
# Now define B in the namespace
exec(
"""
class B(BaseModel):
b: str = Field()
""",
namespace,
)
# Update the function's globals to include B
namespace["process_model"].__globals__.update(namespace)
# Now test that validation actually works at runtime
result = vf.validate_call_args((), {"model": {"a": {"b": "test"}}})
assert result["model"].a.b == "test"
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/_internal/pydantic/test_validated_func.py",
"license": "Apache License 2.0",
"lines": 400,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/concurrency/_sync.py | from __future__ import annotations
from contextlib import contextmanager
from typing import Generator, Literal, Optional
from uuid import UUID
from prefect.client.schemas.objects import ConcurrencyLeaseHolder
from prefect.client.schemas.responses import (
ConcurrencyLimitWithLeaseResponse,
MinimalConcurrencyLimitResponse,
)
from prefect.concurrency._asyncio import (
aacquire_concurrency_slots,
aacquire_concurrency_slots_with_lease,
arelease_concurrency_slots_with_lease,
)
from prefect.concurrency._events import (
emit_concurrency_acquisition_events,
emit_concurrency_release_events,
)
from prefect.concurrency._leases import maintain_concurrency_lease
from prefect.utilities.asyncutils import run_coro_as_sync
def release_concurrency_slots_with_lease(lease_id: UUID) -> None:
run_coro_as_sync(arelease_concurrency_slots_with_lease(lease_id))
def acquire_concurrency_slots(
names: list[str],
slots: int,
mode: Literal["concurrency", "rate_limit"] = "concurrency",
timeout_seconds: Optional[float] = None,
max_retries: Optional[int] = None,
strict: bool = False,
) -> list[MinimalConcurrencyLimitResponse]:
result = run_coro_as_sync(
aacquire_concurrency_slots(
names, slots, mode, timeout_seconds, max_retries, strict
)
)
return result
def acquire_concurrency_slots_with_lease(
names: list[str],
slots: int,
mode: Literal["concurrency", "rate_limit"] = "concurrency",
timeout_seconds: Optional[float] = None,
max_retries: Optional[int] = None,
lease_duration: float = 300,
strict: bool = False,
holder: "Optional[ConcurrencyLeaseHolder]" = None,
suppress_warnings: bool = False,
) -> ConcurrencyLimitWithLeaseResponse:
result = run_coro_as_sync(
aacquire_concurrency_slots_with_lease(
names,
slots,
mode,
timeout_seconds,
max_retries,
lease_duration,
strict,
holder,
suppress_warnings,
)
)
return result
@contextmanager
def concurrency(
names: str | list[str],
occupy: int = 1,
timeout_seconds: Optional[float] = None,
max_retries: Optional[int] = None,
lease_duration: float = 300,
strict: bool = False,
holder: "Optional[ConcurrencyLeaseHolder]" = None,
suppress_warnings: bool = False,
) -> Generator[None, None, None]:
"""A context manager that acquires and releases concurrency slots from the
given concurrency limits.
Args:
names: The names of the concurrency limits to acquire slots from.
occupy: The number of slots to acquire and hold from each limit.
timeout_seconds: The number of seconds to wait for the slots to be acquired before
raising a `TimeoutError`. A timeout of `None` will wait indefinitely.
max_retries: The maximum number of retries to acquire the concurrency slots.
lease_duration: The duration of the lease for the acquired slots in seconds.
strict: A boolean specifying whether to raise an error if the concurrency limit does not exist.
Defaults to `False`.
holder: A dictionary containing information about the holder of the concurrency slots.
Typically includes 'type' and 'id' keys.
Raises:
TimeoutError: If the slots are not acquired within the given timeout.
ConcurrencySlotAcquisitionError: If the concurrency limit does not exist and `strict` is `True`.
Example:
A simple example of using the sync `concurrency` context manager:
```python
from prefect.concurrency.sync import concurrency
def resource_heavy():
with concurrency("test", occupy=1):
print("Resource heavy task")
def main():
resource_heavy()
```
"""
if not names:
yield
return
names = names if isinstance(names, list) else [names]
acquisition_response = acquire_concurrency_slots_with_lease(
names,
occupy,
timeout_seconds=timeout_seconds,
strict=strict,
lease_duration=lease_duration,
max_retries=max_retries,
holder=holder,
suppress_warnings=suppress_warnings,
)
if not acquisition_response.limits:
yield
return
emitted_events = emit_concurrency_acquisition_events(
acquisition_response.limits, occupy
)
try:
with maintain_concurrency_lease(
acquisition_response.lease_id,
lease_duration,
raise_on_lease_renewal_failure=strict,
suppress_warnings=suppress_warnings,
):
yield
finally:
release_concurrency_slots_with_lease(acquisition_response.lease_id)
emit_concurrency_release_events(
acquisition_response.limits, occupy, emitted_events
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/concurrency/_sync.py",
"license": "Apache License 2.0",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:examples/ai_data_analyst_with_pydantic_ai.py | # ---
# title: AI-Powered Data Analyst
# description: Build a resilient AI data analyst using Prefect and `pydantic-ai` to analyze datasets, detect anomalies, and generate insights.
# icon: robot
# dependencies: ["prefect", "pydantic-ai[prefect]", "pandas"]
# keywords: ["ai", "agents", "pydantic-ai", "data-analysis", "llm", "durable-execution"]
# order: 6
# ---
#
# This example shows how to build resilient AI workflows using Prefect and `pydantic-ai`.
# The integration provides automatic retries for LLM calls, full observability of agent decisions,
# and durable execution semantics that make workflows idempotent and rerunnable.
#
# ## The Scenario: AI Data Analyst
#
# You need to analyze datasets programmatically, but writing custom analysis code for each dataset is time-consuming.
# Instead, you'll build an AI agent that:
#
# 1. Understands your dataset structure
# 2. Decides which analyses are most valuable
# 3. Uses Python tools to calculate statistics and detect anomalies
# 4. Generates actionable insights
#
# All while being resilient to LLM failures, tool errors, and network issues.
#
# This example demonstrates:
# * [`PrefectAgent`](https://ai.pydantic.dev/durable_execution/prefect/) – Wraps `pydantic-ai` agents for durable execution
# * **Agent Tools** – Python functions the AI can call, automatically wrapped as Prefect tasks
# * [`TaskConfig`](https://ai.pydantic.dev/durable_execution/prefect/#task-configuration) – Custom retry policies and timeouts for AI operations
# * [**Durable Execution**](https://ai.pydantic.dev/durable_execution/prefect/#durable-execution) – Automatic idempotency and failure recovery
#
# ## Setup
#
# Install dependencies (if not already installed):
# ```bash
# uv add pydantic-ai[prefect] pandas
# # or with pip:
# pip install "pydantic-ai[prefect]" pandas
# ```
from __future__ import annotations
from typing import Any
import pandas as pd
from pydantic import BaseModel, Field
from pydantic_ai import Agent, RunContext
from pydantic_ai.durable_exec.prefect import PrefectAgent, TaskConfig
from prefect import flow, task
# ## Agent Tools
#
# These functions are "tools" that the AI agent can call to analyze data.
# Prefect automatically wraps each tool execution as a task for observability and retries.
def calculate_statistics(ctx: RunContext[pd.DataFrame], column: str) -> dict[str, Any]:
"""Calculate descriptive statistics for a column.
The AI agent can call this tool to understand data distribution,
and Prefect ensures it retries on failure."""
df = ctx.deps
if column not in df.columns:
return {"error": f"Column '{column}' not found. Available: {list(df.columns)}"}
stats = df[column].describe().to_dict()
stats["missing_count"] = int(df[column].isna().sum())
stats["unique_count"] = int(df[column].nunique())
return {
k: (float(v) if isinstance(v, (int, float)) else v) for k, v in stats.items()
}
def detect_anomalies(
ctx: RunContext[pd.DataFrame], column: str, threshold: float = 3.0
) -> list[dict[str, Any]]:
"""Detect anomalies using standard deviation method.
Identifies values that are more than `threshold` standard deviations from the mean.
This tool demonstrates how complex analysis logic can be made reliable with Prefect."""
df = ctx.deps
if column not in df.columns:
return [{"error": f"Column '{column}' not found"}]
if not pd.api.types.is_numeric_dtype(df[column]):
return [{"error": f"Column '{column}' is not numeric"}]
mean = df[column].mean()
std = df[column].std()
if std == 0:
return []
anomalies = df[abs(df[column] - mean) > (threshold * std)]
return [
{
"index": int(idx),
"value": float(row[column]),
"z_score": float((row[column] - mean) / std),
}
for idx, row in anomalies.head(10).iterrows()
]
def get_column_info(ctx: RunContext[pd.DataFrame]) -> dict[str, Any]:
"""Get overview of all columns in the dataset.
Helps the AI agent understand the dataset structure before analysis."""
df = ctx.deps
return {
"columns": list(df.columns),
"shape": {"rows": len(df), "columns": len(df.columns)},
"dtypes": {col: str(dtype) for col, dtype in df.dtypes.items()},
}
# ## Analysis Results Model
#
# Structured output ensures the AI returns consistent, parseable results.
class DataAnalysis(BaseModel):
"""Structured analysis results from the AI agent."""
summary: str = Field(description="High-level summary of the dataset")
key_findings: list[str] = Field(
description="Key findings discovered from the data", min_length=3, max_length=5
)
recommendations: list[str] = Field(
description="Actionable recommendations based on the findings",
min_length=3,
max_length=5,
)
columns_analyzed: list[str] = Field(
description="List of columns that were analyzed"
)
def __str__(self) -> str:
"""Format the analysis results for clean display."""
findings = "\n".join(
f" {i}. {finding}" for i, finding in enumerate(self.key_findings, 1)
)
recommendations = "\n".join(
f" {i}. {rec}" for i, rec in enumerate(self.recommendations, 1)
)
return f"""
{"=" * 80}
ANALYSIS RESULTS
{"=" * 80}
📋 Summary:
{self.summary}
🔑 Key Findings:
{findings}
💡 Recommendations:
{recommendations}
📊 Columns Analyzed: {", ".join(self.columns_analyzed)}
{"=" * 80}
"""
# ## Creating the AI Agent
#
# We configure the agent with tools and wrap it with PrefectAgent for durability.
def create_data_analyst_agent() -> PrefectAgent[pd.DataFrame, DataAnalysis]:
"""Create an AI data analyst with Prefect durability.
The PrefectAgent wrapper automatically:
- Wraps agent.run as a Prefect flow
- Wraps LLM calls as Prefect tasks with retries
- Wraps tool calls as separate Prefect tasks
"""
# Create the base pydantic-ai agent
agent = Agent(
"openai:gpt-4o",
name="data-analyst-agent",
output_type=DataAnalysis,
deps_type=pd.DataFrame,
# Register tools that the agent can use
tools=[calculate_statistics, detect_anomalies, get_column_info],
system_prompt=(
"You are an expert data analyst. Analyze the provided dataset using "
"the available tools. Focus on finding meaningful patterns, anomalies, "
"and actionable insights. Always start by understanding the dataset "
"structure with get_column_info."
),
)
# Wrap with PrefectAgent for durable execution with custom retry policy
return PrefectAgent(
agent,
model_task_config=TaskConfig(
retries=3, # Retry LLM calls up to 3 times
retry_delay_seconds=[1.0, 2.0, 4.0], # Exponential backoff
timeout_seconds=60.0, # 60s timeout for LLM calls
),
tool_task_config=TaskConfig(
retries=2, # Retry tool calls up to 2 times
retry_delay_seconds=[0.5, 1.0],
),
)
# ## Sample Dataset Generator
#
# Create a realistic sales dataset for demonstration.
@task
def create_sample_dataset() -> pd.DataFrame:
"""Generate a sample sales dataset with some anomalies.
In production, you'd load real data from a file, database, or API."""
data = {
"product": ["Widget", "Gadget", "Doohickey", "Widget", "Gadget"] * 20,
"sales": [100, 150, 200, 110, 145] * 19
+ [100, 150, 200, 1000, 2000], # Last 2 are anomalies
"region": ["North", "South", "East", "West", "Central"] * 20,
"month": [1, 2, 3, 4, 5] * 20,
}
return pd.DataFrame(data)
# ## Main Analysis Flow
#
# Orchestrate the entire AI analysis workflow with Prefect.
@flow(name="ai-data-analyst", log_prints=True)
async def analyze_dataset_with_ai() -> DataAnalysis:
"""Run AI-powered data analysis with automatic retries and observability.
This flow demonstrates how Prefect makes AI workflows production-ready:
1. Dataset preparation is tracked as a task
2. AI agent execution is wrapped for durability
3. All LLM and tool calls are logged and retryable
4. Results are structured and validated with Pydantic
"""
# Prepare the dataset
print("📊 Preparing dataset...")
df = create_sample_dataset()
print(f"Dataset shape: {df.shape}\n")
# Create the AI agent with Prefect durability
print("🤖 Initializing AI data analyst...")
agent = create_data_analyst_agent()
# Run the analysis - all LLM and tool calls are automatically retried on failure
print("🔍 Running AI analysis...\n")
result = await agent.run(
"Analyze this sales dataset. Identify patterns, anomalies, and provide recommendations.",
deps=df,
)
# Display results
print(result.output)
return result.output
# ## Serve the Flow
#
# To get full durable execution with automatic idempotency, serve the flow to create a deployment.
# Deployed flows enable Prefect's transactional semantics for agent operations.
if __name__ == "__main__":
import os
import sys
# Check if OpenAI API key is set
if not os.getenv("OPENAI_API_KEY"):
print("❌ Error: OPENAI_API_KEY environment variable not set")
print("Set it with: export OPENAI_API_KEY='your-key-here'")
sys.exit(1)
# Serve the flow - this creates a deployment and runs a worker process
analyze_dataset_with_ai.serve(
name="ai-data-analyst-deployment",
tags=["ai", "pydantic-ai", "data-analysis"],
)
# ## Triggering Flow Runs
#
# Once served, trigger runs via:
#
# **Prefect UI:**
# 1. Navigate to http://localhost:4200
# 2. Go to Deployments → "ai-data-analyst-deployment"
# 3. Click "Run" → "Quick Run"
#
# **CLI:**
# ```bash
# prefect deployment run ai-data-analyst/ai-data-analyst-deployment --watch
# ```
#
# ## Local Testing
#
# For quick local testing without deployment:
# ```python
# import asyncio
# asyncio.run(analyze_dataset_with_ai())
# ```
# ## What Just Happened?
#
# When you serve and trigger this flow, Prefect and `pydantic-ai` work together to create a resilient AI pipeline:
#
# 1. **Deployment Creation** – `serve()` creates a deployment and starts a worker to execute flow runs
# 2. **Durable AI Execution** – The `PrefectAgent` wrapper makes all AI operations retryable:
# - LLM calls retry up to 3 times with exponential backoff (1s, 2s, 4s)
# - Tool calls retry up to 2 times
# - All operations respect 60s timeout
# 3. **Tool Observability** – Each time the AI calls a tool (`get_column_info`, `calculate_statistics`, `detect_anomalies`),
# the call is run as a Prefect task
# 4. **Structured Results** – Pydantic validates the AI's output, ensuring it matches the expected schema
# 5. **Automatic Idempotency** – When a deployed flow run is retried, Prefect's transactional semantics ensure that
# completed tasks are skipped and only failed operations are re-executed. This prevents duplicate API calls and
# wasted compute.
#
# ## Key Takeaways
#
# * **Deploy for Durability** – Use `flow.serve()` or `flow.deploy()` to unlock automatic idempotency and transactional semantics
# * **Retry Intelligence** – Failed flow runs can be retried from the UI, skipping already-completed tasks
# * **Tool Observability** – Every AI decision and tool call is tracked, logged, and independently retryable
# * **Zero Boilerplate** – Just wrap your pydantic-ai agent with `PrefectAgent`
# * **Customizable Policies** – Fine-tune retries, timeouts, and error handling per operation type
#
# **Try it yourself:**
# 1. Set your OpenAI API key: `export OPENAI_API_KEY='your-key'`
# 2. Start the Prefect server: `prefect server start`
# 3. Serve the flow: `uv run -s examples/ai_data_analyst_with_pydantic_ai.py`
# 4. Trigger a run from the UI (http://localhost:4200) or CLI
# 5. Watch all AI operations tracked in real-time
#
# For more on AI orchestration with Prefect:
# - [pydantic-ai + Prefect documentation](https://ai.pydantic.dev/durable_execution/prefect/)
# - [Task configuration and retries](/v3/how-to-guides/workflows/write-and-run#task-configuration)
# - [Workflow deployments](/v3/how-to-guides/deployments/create-deployments)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "examples/ai_data_analyst_with_pydantic_ai.py",
"license": "Apache License 2.0",
"lines": 288,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/integrations/prefect-azure/prefect_azure/repository.py | """Interact with files stored in Azure DevOps Git repositories.
The `AzureDevopsRepository` class in this module is a storage block that lets Prefect
agents pull Prefect flow code from Azure DevOps repositories.
The `AzureDevopsRepository` block is ideally configured via the Prefect UI, but can
also be used in Python as the following examples demonstrate.
Examples:
Load a configured Azure DevOps repository block:
```python
from prefect_azure.repository import AzureDevopsRepository
azuredevops_repository_block = AzureDevopsRepository.load("BLOCK_NAME")
```
Clone a public Azure DevOps repository:
```python
from prefect_azure.repository import AzureDevopsRepository
public_repo = AzureDevopsRepository(
repository="https://dev.azure.com/myorg/myproject/_git/myrepo"
)
public_repo.save(name="my-azuredevops-block")
```
Clone a specific branch or tag:
```python
from prefect_azure.repository import AzureDevopsRepository
branch_repo = AzureDevopsRepository(
repository="https://dev.azure.com/myorg/myproject/_git/myrepo",
reference="develop"
)
branch_repo.save(name="my-azuredevops-branch-block")
```
Clone a private Azure DevOps repository:
```python
from prefect_azure import AzureDevopsCredentials, AzureDevopsRepository
azuredevops_credentials_block = AzureDevopsCredentials.load("my-azuredevops-credentials")
private_repo = AzureDevopsRepository(
repository="https://dev.azure.com/myorg/myproject/_git/myrepo",
credentials=azuredevops_credentials_block
)
private_repo.save(name="my-private-azuredevops-block")
```
"""
import io
import shutil
import urllib.parse
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Optional, Tuple, Union
from pydantic import Field
from prefect._internal.compatibility.async_dispatch import async_dispatch
from prefect._internal.retries import retry_async_fn
from prefect.filesystems import ReadableDeploymentStorage
from prefect.utilities.processutils import run_process
from prefect_azure.credentials import AzureDevopsCredentials
MAX_CLONE_ATTEMPTS = 3
CLONE_RETRY_MIN_DELAY_SECONDS = 1
CLONE_RETRY_MIN_DELAY_JITTER_SECONDS = 0
CLONE_RETRY_MAX_DELAY_JITTER_SECONDS = 3
class AzureDevopsRepository(ReadableDeploymentStorage):
"""
Interact with files stored in Azure DevOps Git repositories.
"""
_block_type_name = "Azure DevOps Repository"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/54e3fa7e00197a4fbd1d82ed62494cb58d08c96a-250x250.png"
_description = "Interact with files stored in Azure DevOps repositories."
_documentation_url = "https://docs.prefect.io/integrations/prefect-azure"
repository: str = Field(
default=...,
description="The full HTTPS URL of the Azure DevOps repository.",
)
reference: Optional[str] = Field(
default=None,
description="Optional branch or tag name.",
)
git_depth: Optional[int] = Field(
default=1,
ge=1,
description="Depth of git history to fetch.",
)
credentials: Optional[AzureDevopsCredentials] = Field(
default=None,
title="Credentials",
description="Azure DevOps Credentials block to authenticate with azuredevops private repositories",
)
def _create_repo_url(self) -> str:
if self.repository.startswith("git@"):
raise ValueError(
"SSH URLs are not supported. Please provide an HTTPS URL for the repository."
)
url_components = urllib.parse.urlparse(self.repository)
if self.credentials is not None and self.credentials.token is not None:
token = self.credentials.token.get_secret_value()
token = urllib.parse.quote(token, safe="")
if url_components.username:
user_info = f"{url_components.username}:{token}"
else:
user_info = f":{token}"
netloc = user_info + "@" + url_components.hostname
if url_components.port:
netloc += f":{url_components.port}"
updated_components = url_components._replace(netloc=netloc)
full_url = urllib.parse.urlunparse(updated_components)
else:
full_url = self.repository
return full_url
@staticmethod
def _get_paths(
dst_dir: Union[str, None], src_dir: str, sub_directory: Optional[str]
) -> Tuple[str, str]:
if dst_dir is None:
content_destination = Path(".").absolute()
else:
content_destination = Path(dst_dir)
content_source = Path(src_dir)
if sub_directory:
content_destination = content_destination.joinpath(sub_directory)
content_source = content_source.joinpath(sub_directory)
return str(content_source), str(content_destination)
@retry_async_fn(
max_attempts=MAX_CLONE_ATTEMPTS,
base_delay=CLONE_RETRY_MIN_DELAY_SECONDS,
max_delay=CLONE_RETRY_MIN_DELAY_SECONDS + CLONE_RETRY_MAX_DELAY_JITTER_SECONDS,
operation_name="clone_repository",
)
async def aget_directory(
self, from_path: Optional[str] = None, local_path: Optional[str] = None
) -> None:
"""Asynchronously clones an Azure DevOps repository.
This defaults to cloning the repository reference configured on the
Block to the present working directory.
Args:
from_path: If provided, interpreted as a subdirectory of the underlying
repository that will be copied to the provided local path.
local_path: A local path to clone to; defaults to present working directory.
"""
cmd = ["git", "clone", self._create_repo_url()]
if self.reference:
cmd += ["-b", self.reference]
if self.git_depth is not None:
cmd += ["--depth", f"{self.git_depth}"]
with TemporaryDirectory(suffix="prefect") as tmp_dir:
cmd.append(tmp_dir)
err_stream = io.StringIO()
out_stream = io.StringIO()
process = await run_process(cmd, stream_output=(out_stream, err_stream))
if process.returncode != 0:
err_stream.seek(0)
error_output = err_stream.read()
if self.credentials and self.credentials.token:
raw_token = self.credentials.token.get_secret_value()
if raw_token:
error_output = error_output.replace(raw_token, "[REDACTED]")
repo_url_with_token = self._create_repo_url()
error_output = error_output.replace(
repo_url_with_token, "[REDACTED]"
)
raise RuntimeError(f"Failed to pull from remote:\n {error_output}")
content_source, content_destination = self._get_paths(
dst_dir=local_path, src_dir=tmp_dir, sub_directory=from_path
)
shutil.copytree(
src=content_source, dst=content_destination, dirs_exist_ok=True
)
@async_dispatch(aget_directory)
def get_directory(
self, from_path: Optional[str] = None, local_path: Optional[str] = None
) -> None:
"""Clones an Azure DevOps project within `from_path` to the provided `local_path`.
This defaults to cloning the repository reference configured on the
Block to the present working directory.
Args:
from_path: If provided, interpreted as a subdirectory of the underlying
repository that will be copied to the provided local path.
local_path: A local path to clone to; defaults to present working directory.
"""
from prefect.utilities.asyncutils import run_coro_as_sync
run_coro_as_sync(
self.aget_directory(from_path=from_path, local_path=local_path)
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-azure/prefect_azure/repository.py",
"license": "Apache License 2.0",
"lines": 176,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/integrations/prefect-azure/tests/test_repository.py | import urllib.parse
from pathlib import Path
from tempfile import TemporaryDirectory
from unittest.mock import AsyncMock
import pytest
from prefect_azure.credentials import AzureDevopsCredentials
from prefect_azure.repository import AzureDevopsRepository
from pydantic import SecretStr
class TestAzureDevopsRepository:
def test_https_repo_without_credentials(self):
repo_url = "https://example.com/org/project/_git/repo"
repo = AzureDevopsRepository(repository=repo_url)
assert repo._create_repo_url() == repo_url
def test_https_repo_with_token_credentials(self):
repo_url = "https://example.com/org/project/_git/repo"
token = "test-token"
credentials = AzureDevopsCredentials(token=SecretStr(token))
repo = AzureDevopsRepository(repository=repo_url, credentials=credentials)
full_url = repo._create_repo_url()
parsed = urllib.parse.urlparse(full_url)
assert parsed.scheme == "https"
assert parsed.hostname == "example.com"
assert parsed.password == token
assert parsed.username == ""
def test_https_repo_with_username_and_token(self):
repo_url = "https://fake@example.com/org/project/_git/repo"
token = "test-token"
credentials = AzureDevopsCredentials(token=SecretStr(token))
repo = AzureDevopsRepository(repository=repo_url, credentials=credentials)
full_url = repo._create_repo_url()
parsed = urllib.parse.urlparse(full_url)
assert parsed.scheme == "https"
assert parsed.hostname == "example.com"
assert parsed.username == "fake"
assert parsed.password == token
def test_token_url_encoding(self):
token = "my:we!rd@tok#en"
encoded_token = urllib.parse.quote(token, safe="")
repo_url = "https://fake@example.com/org/project/_git/repo"
credentials = AzureDevopsCredentials(token=SecretStr(token))
repo = AzureDevopsRepository(repository=repo_url, credentials=credentials)
full_url = repo._create_repo_url()
parsed = urllib.parse.urlparse(full_url)
assert parsed.password == encoded_token
assert encoded_token in full_url # Allow this since it's encoded check
def test_ssh_url_raises_value_error(self):
ssh_url = "git@example.com:org/project/_git/repo"
with pytest.raises(ValueError, match="SSH URLs are not supported"):
AzureDevopsRepository(repository=ssh_url)._create_repo_url()
async def test_get_directory_executes_clone(self, monkeypatch):
mock_process = AsyncMock()
mock_process.return_value.returncode = 0
monkeypatch.setattr("prefect_azure.repository.run_process", mock_process)
repo = AzureDevopsRepository(repository="https://example.com/repo.git")
await repo.get_directory()
assert mock_process.await_count == 1
assert "git" in mock_process.await_args[0][0]
async def test_get_directory_raises_on_failed_clone(self, monkeypatch):
class FakeProcess:
returncode = 1
async def fail_process(*args, **kwargs):
return FakeProcess()
monkeypatch.setattr("prefect_azure.repository.run_process", fail_process)
repo = AzureDevopsRepository(repository="https://example.com/repo.git")
with pytest.raises(RuntimeError, match="Failed to pull from remote"):
await repo.get_directory()
async def test_get_directory_retries_on_failure(self, monkeypatch):
call_counter = {"count": 0}
class FakeProcess:
returncode = 1
async def fail_process(*args, **kwargs):
call_counter["count"] += 1
return FakeProcess()
monkeypatch.setattr("prefect_azure.repository.run_process", fail_process)
repo = AzureDevopsRepository(repository="https://example.com/repo.git")
with pytest.raises(RuntimeError):
await repo.get_directory()
assert call_counter["count"] == 3 # MAX_CLONE_ATTEMPTS
async def test_directory_contents_are_copied(self, monkeypatch):
class FakeProcess:
returncode = 0
monkeypatch.setattr(
"prefect_azure.repository.run_process",
AsyncMock(return_value=FakeProcess()),
)
repo = AzureDevopsRepository(repository="https://example.com/repo.git")
with TemporaryDirectory() as tmp_src:
test_file = Path(tmp_src) / "file.txt"
test_file.write_text("hello world")
with TemporaryDirectory() as tmp_dst:
class MockTmpDir:
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
return tmp_src
def __exit__(self, *args):
pass
monkeypatch.setattr(
"prefect_azure.repository.TemporaryDirectory",
lambda *a, **kw: MockTmpDir(),
)
await repo.get_directory(local_path=tmp_dst)
assert (Path(tmp_dst) / "file.txt").exists()
def test_get_directory_sync(self, monkeypatch):
"""Test that get_directory works in sync context"""
mock_process = AsyncMock()
mock_process.return_value.returncode = 0
monkeypatch.setattr("prefect_azure.repository.run_process", mock_process)
repo = AzureDevopsRepository(repository="https://example.com/repo.git")
with TemporaryDirectory() as tmp_src:
test_file = Path(tmp_src) / "file.txt"
test_file.write_text("sync test")
with TemporaryDirectory() as tmp_dst:
class MockTmpDir:
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
return tmp_src
def __exit__(self, *args):
pass
monkeypatch.setattr(
"prefect_azure.repository.TemporaryDirectory",
lambda *a, **kw: MockTmpDir(),
)
# Call without await - should use sync implementation via run_coro_as_sync
repo.get_directory(local_path=tmp_dst)
assert mock_process.await_count == 1
assert "git" in mock_process.await_args[0][0]
assert (Path(tmp_dst) / "file.txt").exists()
async def test_get_directory_force_sync_from_async(self, monkeypatch):
"""Test that _sync=True forces sync execution from async context"""
mock_process = AsyncMock()
mock_process.return_value.returncode = 0
monkeypatch.setattr("prefect_azure.repository.run_process", mock_process)
repo = AzureDevopsRepository(repository="https://example.com/repo.git")
with TemporaryDirectory() as tmp_src:
test_file = Path(tmp_src) / "file.txt"
test_file.write_text("force sync")
with TemporaryDirectory() as tmp_dst:
class MockTmpDir:
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
return tmp_src
def __exit__(self, *args):
pass
monkeypatch.setattr(
"prefect_azure.repository.TemporaryDirectory",
lambda *a, **kw: MockTmpDir(),
)
# Force sync execution with _sync=True
repo.get_directory(local_path=tmp_dst, _sync=True)
# run_process should be called once (via run_coro_as_sync)
assert mock_process.await_count == 1
assert (Path(tmp_dst) / "file.txt").exists()
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-azure/tests/test_repository.py",
"license": "Apache License 2.0",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/_experimental/plugins/apply.py | """
Safe application of plugin setup results with secret redaction.
"""
from __future__ import annotations
import logging
import os
from prefect._experimental.plugins.spec import SetupResult
REDACT_KEYS = ("SECRET", "TOKEN", "PASSWORD", "KEY")
def redact(key: str, value: str) -> str:
"""
Redact sensitive values based on key name heuristics.
Args:
key: Environment variable name
value: Environment variable value
Returns:
Redacted value if the key appears sensitive, otherwise truncated value
"""
k = key.upper()
if any(tag in k for tag in REDACT_KEYS):
return "••••••"
return value if len(value) <= 64 else value[:20] + "…"
def apply_setup_result(result: SetupResult, logger: logging.Logger) -> None:
"""
Apply environment changes to the current process.
This function never logs secrets - all values are redacted based on key name
heuristics.
Args:
result: The SetupResult containing environment variables to set
logger: Logger to use for informational messages
"""
for k, v in (result.env or {}).items():
os.environ[str(k)] = str(v)
note = result.note or ""
logger.info(
"plugin env applied%s",
f" — {note}" if note else "",
)
def summarize_env(env: dict[str, str]) -> dict[str, str]:
"""
Create a safe summary of environment variables with redacted values.
Args:
env: Dictionary of environment variables
Returns:
Dictionary with same keys but redacted values
"""
return {k: redact(k, v) for k, v in env.items()}
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/_experimental/plugins/apply.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
PrefectHQ/prefect:src/prefect/_experimental/plugins/diagnostics.py | """
Diagnostic data structures for plugin system.
"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class SetupSummary:
"""
Summary of a plugin's setup_environment execution.
Attributes:
plugin: Name of the plugin
env_preview: Preview of environment variables set (with redacted values)
note: Human-readable note from the plugin, if any
error: Error message if the plugin failed, or None if successful
"""
plugin: str
env_preview: dict[str, str]
note: str | None
error: str | None
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/_experimental/plugins/diagnostics.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
PrefectHQ/prefect:src/prefect/_experimental/plugins/manager.py | """
Plugin manager using pluggy with async bridge.
"""
from __future__ import annotations
import importlib.metadata as md
import inspect
import logging
import sys
from typing import Any
import pluggy
from packaging.specifiers import InvalidSpecifier, SpecifierSet
from prefect._experimental.plugins.spec import PREFECT_PLUGIN_API_VERSION
PM_PROJECT_NAME = "prefect-experimental"
ENTRYPOINTS_GROUP = "prefect.plugins"
register_hook = pluggy.HookimplMarker(PM_PROJECT_NAME)
def build_manager(hookspecs: type) -> pluggy.PluginManager:
"""
Create a pluggy PluginManager and register hook specifications.
Args:
hookspecs: The hook specification class/protocol
Returns:
Configured PluginManager instance
"""
pm = pluggy.PluginManager(PM_PROJECT_NAME)
pm.add_hookspecs(hookspecs)
return pm
def load_entry_point_plugins(
pm: pluggy.PluginManager,
*,
allow: set[str] | None,
deny: set[str] | None,
logger: logging.Logger,
) -> None:
"""
Discover and load plugins from entry points.
Args:
pm: The PluginManager to register plugins with
allow: If set, only load plugins with names in this set
deny: If set, skip plugins with names in this set
logger: Logger for reporting load failures
"""
# Python 3.10+ supports group parameter, 3.9 requires dict access
if sys.version_info >= (3, 10):
entry_points_list = md.entry_points(group=ENTRYPOINTS_GROUP)
else:
# Python 3.9 returns a dict-like object
entry_points_list = md.entry_points().get(ENTRYPOINTS_GROUP, [])
for ep in entry_points_list:
if allow and ep.name not in allow:
logger.debug("Skipping plugin %s (not in allow list)", ep.name)
continue
if deny and ep.name in deny:
logger.debug("Skipping plugin %s (in deny list)", ep.name)
continue
try:
plugin = ep.load()
# Version fence (best effort)
requires = getattr(plugin, "PREFECT_PLUGIN_API_REQUIRES", ">=0.1,<1")
# Validate plugin API version requirement
try:
spec = SpecifierSet(requires)
if PREFECT_PLUGIN_API_VERSION not in spec:
logger.warning(
"Skipping plugin %s: requires API version %s, current version is %s",
ep.name,
requires,
PREFECT_PLUGIN_API_VERSION,
)
continue
except InvalidSpecifier:
logger.debug(
"Plugin %s has invalid version specifier %r, ignoring version check",
ep.name,
requires,
)
pm.register(plugin, name=ep.name)
logger.debug(
"Loaded plugin %s (requires API %s, current %s)",
ep.name,
requires,
PREFECT_PLUGIN_API_VERSION,
)
except Exception:
logger.exception("Failed to load plugin %s", ep.name)
async def call_async_hook(
pm: pluggy.PluginManager, hook_name: str, **kwargs: Any
) -> list[tuple[str, Any, Exception | None]]:
"""
Call a hook that may return coroutines.
This function handles both sync and async hook implementations, gathering
results and exceptions per plugin.
Args:
pm: The PluginManager
hook_name: Name of the hook to call
**kwargs: Arguments to pass to the hook
Returns:
List of tuples: (plugin_name, result, exception)
- If successful: (name, result, None)
- If failed: (name, None, exception)
"""
hook = getattr(pm.hook, hook_name)
results: list[tuple[str, Any, Exception | None]] = []
for impl in hook.get_hookimpls():
fn = impl.function
try:
res = fn(**kwargs)
if inspect.iscoroutine(res):
res = await res
results.append((impl.plugin_name, res, None))
except Exception as exc:
results.append((impl.plugin_name, None, exc))
return results
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/_experimental/plugins/manager.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/_experimental/plugins/spec.py | """
Experimental plugin API specification.
This module defines the hook specification and data structures for Prefect's
experimental plugin system.
"""
from __future__ import annotations
import logging
from dataclasses import dataclass
from typing import Any, Callable, Mapping, Optional
import pluggy
# Bump this when breaking the hook contract
PREFECT_PLUGIN_API_VERSION = "0.1"
hookspec = pluggy.HookspecMarker("prefect-experimental")
@dataclass
class HookContext:
"""
Context provided to plugin hooks at startup.
Attributes:
prefect_version: The version of Prefect running
api_url: The configured Prefect API URL, if any
logger_factory: Factory function to create a stdlib logger for the plugin
"""
prefect_version: str
api_url: str | None
# Logger factory returns a stdlib logger; plugins should use this.
logger_factory: Callable[[str], logging.Logger]
# Future: async Prefect client getter, settings snapshot, etc.
@dataclass
class SetupResult:
"""
Result returned by a plugin's setup_environment hook.
Attributes:
env: Environment variables to set (e.g., AWS_* variables)
note: Short, non-secret human-readable hint about what was configured
required: If True and hook fails, abort in strict mode
"""
env: Mapping[str, str] # e.g. AWS_* variables
note: Optional[str] = None # short, non-secret human hint
required: bool = False # if True and hook fails -> abort (in strict mode)
class HookSpec:
"""
Plugin hook specification.
Plugins should implement the methods defined here to provide startup hooks.
"""
@hookspec
def setup_environment(self, *, ctx: HookContext) -> Optional[SetupResult]:
"""
Prepare process environment for Prefect and its children.
This hook is called before Prefect CLI commands, workers, or agents
start their main work. It allows plugins to configure environment
variables, authenticate with external services, or perform other
setup tasks.
Args:
ctx: Context object with Prefect version, API URL, and logger factory
Returns:
SetupResult with environment variables to set, or None to indicate
no changes are needed.
Important:
- Must not print secrets or write sensitive data to disk by default
- Should be idempotent
- May be async or sync
- Exceptions are caught and logged unless required=True in strict mode
"""
@hookspec
def set_database_connection_params(
self, connection_url: str, settings: Any
) -> Mapping[str, Any]:
"""
Set additional database connection parameters.
This hook is called when creating a database engine. It allows plugins
to provide additional connection parameters, such as authentication
tokens or SSL configuration.
Args:
connection_url: The database connection URL
settings: The current Prefect settings
Returns:
Dictionary of connection parameters to merge into connect_args
"""
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/_experimental/plugins/spec.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
PrefectHQ/prefect:src/prefect/cli/experimental.py | """
Experimental command — native cyclopts implementation.
Access experimental features (subject to change).
"""
import cyclopts
import prefect.cli._app as _cli
from prefect.cli._utilities import with_cli_exception_handling
experimental_app: cyclopts.App = cyclopts.App(
name="experimental",
help="Access experimental features (subject to change).",
version_flags=[],
help_flags=["--help"],
)
plugins_app: cyclopts.App = cyclopts.App(
name="plugins",
help="Plugin system diagnostics.",
version_flags=[],
help_flags=["--help"],
)
experimental_app.command(plugins_app)
@plugins_app.command(name="diagnose")
@with_cli_exception_handling
async def diagnose():
"""Diagnose the experimental plugin system."""
import importlib.metadata as md
from prefect._experimental.plugins.manager import ENTRYPOINTS_GROUP
from prefect.settings import get_current_settings
_cli.console.print(
"\n[bold]Prefect Experimental Plugin System Diagnostics[/bold]\n"
)
# Check if enabled
settings = get_current_settings().experiments.plugins
enabled = settings.enabled
_cli.console.print(f"Enabled: [{'green' if enabled else 'red'}]{enabled}[/]")
if not enabled:
_cli.console.print("\n[yellow]Plugin system is disabled.[/yellow]")
_cli.console.print(
"Set [cyan]PREFECT_EXPERIMENTS_PLUGINS_ENABLED=1[/cyan] to enable.\n"
)
return
# Show configuration
timeout = settings.setup_timeout_seconds
allow = settings.allow
deny = settings.deny
strict = settings.strict
safe = settings.safe_mode
_cli.console.print(f"Timeout: {timeout}s")
_cli.console.print(f"Strict mode: {strict}")
_cli.console.print(f"Safe mode: {safe}")
_cli.console.print(f"Allow list: {allow or 'None'}")
_cli.console.print(f"Deny list: {deny or 'None'}")
# Discover entry points
_cli.console.print(
f"\n[bold]Discoverable Plugins (entry point group: {ENTRYPOINTS_GROUP})[/bold]\n"
)
entry_points = list(md.entry_points(group=ENTRYPOINTS_GROUP))
if not entry_points:
_cli.console.print("[yellow]No plugins found.[/yellow]\n")
return
for ep in entry_points:
filtered = False
reason = None
if allow and ep.name not in allow:
filtered = True
reason = "not in allow list"
elif deny and ep.name in deny:
filtered = True
reason = "in deny list"
status_color = "red" if filtered else "green"
status = f"[{status_color}]{'filtered' if filtered else 'active'}[/]"
_cli.console.print(f" • {ep.name}: {status}")
_cli.console.print(f" Module: {ep.value}")
if filtered and reason:
_cli.console.print(f" Reason: {reason}")
# Try to load and get version requirement
try:
plugin = ep.load()
requires = getattr(plugin, "PREFECT_PLUGIN_API_REQUIRES", ">=0.1,<1")
_cli.console.print(f" API requirement: {requires}")
except Exception as e:
_cli.console.print(f" [red]Failed to load: {e}[/]")
_cli.console.print()
# Run startup hooks to show what they do
if not safe:
_cli.console.print("[bold]Running Startup Hooks[/bold]\n")
from prefect import __version__
from prefect._experimental.plugins import run_startup_hooks
from prefect._experimental.plugins.spec import HookContext
from prefect.logging import get_logger
ctx = HookContext(
prefect_version=__version__,
api_url=get_current_settings().api.url,
logger_factory=get_logger,
)
summaries = await run_startup_hooks(ctx)
if summaries:
for summary in summaries:
status = "[red]error[/]" if summary.error else "[green]success[/]"
_cli.console.print(f" • {summary.plugin}: {status}")
if summary.error:
_cli.console.print(f" Error: {summary.error}")
elif summary.env_preview:
_cli.console.print(
f" Environment variables: {len(summary.env_preview)}"
)
for k, v in summary.env_preview.items():
_cli.console.print(f" {k}={v}")
if summary.note:
_cli.console.print(f" Note: {summary.note}")
else:
_cli.console.print(" No changes")
_cli.console.print()
else:
_cli.console.print("[yellow]No plugins executed.[/yellow]\n")
else:
_cli.console.print(
"\n[yellow]Safe mode enabled - skipping hook execution.[/yellow]"
)
_cli.console.print(
"Set [cyan]PREFECT_EXPERIMENTS_PLUGINS_SAFE_MODE=0[/cyan] to execute hooks.\n"
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/experimental.py",
"license": "Apache License 2.0",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:tests/_experimental/plugins/test_plugins.py | """
Tests for the experimental plugin system.
"""
from __future__ import annotations
import asyncio
import logging
import os
import sys
from unittest.mock import Mock, patch
import pytest
from prefect._experimental.plugins import run_startup_hooks
from prefect._experimental.plugins.apply import redact, summarize_env
from prefect._experimental.plugins.diagnostics import SetupSummary
from prefect._experimental.plugins.manager import (
ENTRYPOINTS_GROUP,
build_manager,
call_async_hook,
load_entry_point_plugins,
register_hook,
)
from prefect._experimental.plugins.spec import HookContext, HookSpec, SetupResult
from prefect.settings import (
PREFECT_EXPERIMENTS_PLUGINS_ALLOW,
PREFECT_EXPERIMENTS_PLUGINS_DENY,
PREFECT_EXPERIMENTS_PLUGINS_ENABLED,
PREFECT_EXPERIMENTS_PLUGINS_SAFE_MODE,
PREFECT_EXPERIMENTS_PLUGINS_SETUP_TIMEOUT_SECONDS,
PREFECT_EXPERIMENTS_PLUGINS_STRICT,
get_current_settings,
temporary_settings,
)
from prefect.settings.legacy import Settings, _get_settings_fields
def mock_entry_points(entry_points_list):
"""
Create a mock for importlib.metadata.entry_points that behaves correctly
for the current Python version.
Args:
entry_points_list: List of entry point mocks to return
Returns:
A mock object that behaves like entry_points() for the current Python version
"""
if sys.version_info >= (3, 10):
# Python 3.10+: entry_points(group=...) returns list directly
return Mock(return_value=entry_points_list)
else:
# Python 3.9: entry_points() returns dict-like object
return Mock(return_value={ENTRYPOINTS_GROUP: entry_points_list})
@pytest.fixture
def mock_ctx():
"""Create a mock HookContext for testing."""
return HookContext(
prefect_version="3.0.0",
api_url="http://localhost:4200/api",
logger_factory=lambda name: logging.getLogger(name),
)
@pytest.fixture
def clean_env(monkeypatch: pytest.MonkeyPatch):
"""Clean environment variables for plugin tests."""
monkeypatch.delenv("PREFECT_EXPERIMENTS_PLUGINS_ENABLED", raising=False)
monkeypatch.delenv("PREFECT_EXPERIMENTS_PLUGINS_ALLOW", raising=False)
monkeypatch.delenv("PREFECT_EXPERIMENTS_PLUGINS_DENY", raising=False)
monkeypatch.delenv(
"PREFECT_EXPERIMENTS_PLUGINS_SETUP_TIMEOUT_SECONDS", raising=False
)
monkeypatch.delenv("PREFECT_EXPERIMENTS_PLUGINS_STRICT", raising=False)
monkeypatch.delenv("PREFECT_EXPERIMENTS_PLUGINS_SAFE_MODE", raising=False)
class TestPluginConfig:
"""Tests for plugin configuration."""
@pytest.mark.usefixtures("clean_env")
def test_feature_flag_off(self):
"""Test that plugins are disabled by default."""
settings = get_current_settings().experiments.plugins
assert settings.enabled is False
@pytest.mark.usefixtures("clean_env")
def test_feature_flag_on(self):
"""Test that plugins can be enabled."""
with temporary_settings(updates={PREFECT_EXPERIMENTS_PLUGINS_ENABLED: True}):
settings = get_current_settings().experiments.plugins
assert settings.enabled is True
@pytest.mark.usefixtures("clean_env")
def test_timeout_default(self):
"""Test default timeout value."""
settings = get_current_settings().experiments.plugins
assert settings.setup_timeout_seconds == 20.0
@pytest.mark.usefixtures("clean_env")
def test_timeout_custom(self):
"""Test custom timeout value."""
with temporary_settings(
updates={PREFECT_EXPERIMENTS_PLUGINS_SETUP_TIMEOUT_SECONDS: 10.0}
):
settings = get_current_settings().experiments.plugins
assert settings.setup_timeout_seconds == 10.0
@pytest.mark.usefixtures("clean_env")
def test_parse_plugin_lists(self):
"""Test plugin allow and deny list parsing."""
with temporary_settings(
updates={
PREFECT_EXPERIMENTS_PLUGINS_ALLOW: "plugin1,plugin2",
PREFECT_EXPERIMENTS_PLUGINS_DENY: "plugin3",
}
):
settings = get_current_settings().experiments.plugins
assert settings.allow == {"plugin1", "plugin2"}
assert settings.deny == {"plugin3"}
@pytest.mark.usefixtures("clean_env")
def test_strict_mode(self):
"""Test strict mode flag."""
with temporary_settings(updates={PREFECT_EXPERIMENTS_PLUGINS_STRICT: True}):
settings = get_current_settings().experiments.plugins
assert settings.strict is True
@pytest.mark.usefixtures("clean_env")
def test_safe_mode(self):
"""Test safe mode flag."""
with temporary_settings(updates={PREFECT_EXPERIMENTS_PLUGINS_SAFE_MODE: True}):
settings = get_current_settings().experiments.plugins
assert settings.safe_mode is True
class TestRedaction:
"""Tests for secret redaction."""
def test_redact_secret_key(self):
"""Test that AWS_SECRET_ACCESS_KEY is redacted."""
result = redact("AWS_SECRET_ACCESS_KEY", "supersecret123")
assert result == "••••••"
def test_redact_token(self):
"""Test that TOKEN is redacted."""
result = redact("GITHUB_TOKEN", "ghp_abcdefghijklmnop")
assert result == "••••••"
def test_redact_password(self):
"""Test that PASSWORD is redacted."""
result = redact("DATABASE_PASSWORD", "mypassword")
assert result == "••••••"
def test_no_redaction_normal_key(self):
"""Test that normal keys are not redacted (but truncated if long)."""
result = redact("AWS_REGION", "us-east-1")
assert result == "us-east-1"
def test_truncate_long_value(self):
"""Test that long values are truncated."""
long_value = "x" * 100
result = redact("SOME_VALUE", long_value)
assert len(result) < len(long_value)
assert result.endswith("…")
def test_summarize_env(self):
"""Test that environment summary redacts secrets."""
env = {
"AWS_SECRET_ACCESS_KEY": "supersecret",
"AWS_REGION": "us-east-1",
}
summary = summarize_env(env)
assert summary["AWS_SECRET_ACCESS_KEY"] == "••••••"
assert summary["AWS_REGION"] == "us-east-1"
class TestPluginManager:
"""Tests for plugin discovery and management."""
def test_build_manager(self):
"""Test that we can build a plugin manager."""
pm = build_manager(HookSpec)
assert pm is not None
assert pm.project_name == "prefect-experimental"
async def test_async_hook_call_sync(self, mock_ctx: HookContext):
"""Test calling a sync hook implementation."""
class TestPlugin:
@register_hook
def setup_environment(self, *, ctx: HookContext):
return SetupResult(env={"TEST": "value"})
pm = build_manager(HookSpec)
pm.register(TestPlugin(), name="test-plugin")
results = await call_async_hook(pm, "setup_environment", ctx=mock_ctx)
assert len(results) == 1
name, result, error = results[0]
assert name == "test-plugin"
assert error is None
assert result.env["TEST"] == "value"
async def test_async_hook_call_async(self, mock_ctx: HookContext):
"""Test calling an async hook implementation."""
class TestPlugin:
@register_hook
async def setup_environment(self, *, ctx: HookContext):
await asyncio.sleep(0.001)
return SetupResult(env={"TEST": "async_value"})
pm = build_manager(HookSpec)
pm.register(TestPlugin(), name="test-plugin")
results = await call_async_hook(pm, "setup_environment", ctx=mock_ctx)
assert len(results) == 1
name, result, error = results[0]
assert name == "test-plugin"
assert error is None
assert result.env["TEST"] == "async_value"
async def test_hook_error_handling(self, mock_ctx: HookContext):
"""Test that hook errors are captured per plugin."""
class GoodPlugin:
@register_hook
def setup_environment(self, *, ctx: HookContext):
return SetupResult(env={"GOOD": "value"})
class BadPlugin:
@register_hook
def setup_environment(self, *, ctx: HookContext):
raise ValueError("Plugin failed!")
pm = build_manager(HookSpec)
pm.register(GoodPlugin(), name="good-plugin")
pm.register(BadPlugin(), name="bad-plugin")
results = await call_async_hook(pm, "setup_environment", ctx=mock_ctx)
assert len(results) == 2
# Check good plugin
good_result = next(r for r in results if r[0] == "good-plugin")
assert good_result[1].env["GOOD"] == "value"
assert good_result[2] is None
# Check bad plugin
bad_result = next(r for r in results if r[0] == "bad-plugin")
assert bad_result[1] is None
assert isinstance(bad_result[2], ValueError)
async def test_function_based_plugin_sync(self, mock_ctx: HookContext):
"""Test that function-based plugins work (sync)."""
from types import ModuleType
# Create a mock module with a function-based plugin
plugin_module = ModuleType("test_plugin")
@register_hook
def setup_environment(*, ctx: HookContext):
return SetupResult(env={"FUNC_PLUGIN": "sync_value"})
# Add the function to the module
plugin_module.setup_environment = setup_environment
pm = build_manager(HookSpec)
pm.register(plugin_module, name="func-plugin")
results = await call_async_hook(pm, "setup_environment", ctx=mock_ctx)
assert len(results) == 1
name, result, error = results[0]
assert name == "func-plugin"
assert error is None
assert result.env["FUNC_PLUGIN"] == "sync_value"
async def test_function_based_plugin_async(self, mock_ctx: HookContext):
"""Test that function-based plugins work (async)."""
from types import ModuleType
# Create a mock module with an async function-based plugin
plugin_module = ModuleType("test_plugin")
@register_hook
async def setup_environment(*, ctx: HookContext):
await asyncio.sleep(0.001)
return SetupResult(env={"FUNC_PLUGIN": "async_value"})
# Add the function to the module
plugin_module.setup_environment = setup_environment
pm = build_manager(HookSpec)
pm.register(plugin_module, name="func-plugin")
results = await call_async_hook(pm, "setup_environment", ctx=mock_ctx)
assert len(results) == 1
name, result, error = results[0]
assert name == "func-plugin"
assert error is None
assert result.env["FUNC_PLUGIN"] == "async_value"
def test_load_entry_point_plugins_with_allow_list(self):
"""Test that allow list filters plugins."""
pm = build_manager(HookSpec)
logger = logging.getLogger("test")
# Mock entry points
mock_plugin1 = Mock()
mock_plugin1.PREFECT_PLUGIN_API_REQUIRES = ">=0.1,<1"
mock_ep1 = Mock()
mock_ep1.name = "plugin1"
mock_ep1.load.return_value = mock_plugin1
mock_plugin2 = Mock()
mock_plugin2.PREFECT_PLUGIN_API_REQUIRES = ">=0.1,<1"
mock_ep2 = Mock()
mock_ep2.name = "plugin2"
mock_ep2.load.return_value = mock_plugin2
with patch(
"importlib.metadata.entry_points",
mock_entry_points([mock_ep1, mock_ep2]),
):
load_entry_point_plugins(pm, allow={"plugin1"}, deny=None, logger=logger)
# Only plugin1 should be registered
assert len(pm.get_plugins()) == 1
def test_load_entry_point_plugins_with_deny_list(self):
"""Test that deny list filters plugins."""
pm = build_manager(HookSpec)
logger = logging.getLogger("test")
# Mock entry points
mock_plugin1 = Mock()
mock_plugin1.PREFECT_PLUGIN_API_REQUIRES = ">=0.1,<1"
mock_ep1 = Mock()
mock_ep1.name = "plugin1"
mock_ep1.load.return_value = mock_plugin1
mock_plugin2 = Mock()
mock_plugin2.PREFECT_PLUGIN_API_REQUIRES = ">=0.1,<1"
mock_ep2 = Mock()
mock_ep2.name = "plugin2"
mock_ep2.load.return_value = mock_plugin2
with patch(
"importlib.metadata.entry_points",
mock_entry_points([mock_ep1, mock_ep2]),
):
load_entry_point_plugins(pm, allow=None, deny={"plugin2"}, logger=logger)
# Only plugin1 should be registered
assert len(pm.get_plugins()) == 1
def test_load_entry_point_plugins_version_validation_compatible(self):
"""Test that plugins with compatible API versions are loaded."""
pm = build_manager(HookSpec)
logger = logging.getLogger("test")
# Mock a plugin with compatible version requirement
mock_plugin = Mock()
mock_plugin.PREFECT_PLUGIN_API_REQUIRES = ">=0.1,<1"
mock_ep = Mock()
mock_ep.name = "compatible-plugin"
mock_ep.load.return_value = mock_plugin
with patch("importlib.metadata.entry_points", mock_entry_points([mock_ep])):
load_entry_point_plugins(pm, allow=None, deny=None, logger=logger)
# Plugin should be registered
assert len(pm.get_plugins()) == 1
def test_load_entry_point_plugins_version_validation_incompatible(self):
"""Test that plugins with incompatible API versions are skipped."""
pm = build_manager(HookSpec)
logger = logging.getLogger("test")
# Mock a plugin with incompatible version requirement
mock_plugin = Mock()
mock_plugin.PREFECT_PLUGIN_API_REQUIRES = ">=1.0"
mock_ep = Mock()
mock_ep.name = "incompatible-plugin"
mock_ep.load.return_value = mock_plugin
with patch("importlib.metadata.entry_points", mock_entry_points([mock_ep])):
load_entry_point_plugins(pm, allow=None, deny=None, logger=logger)
# Plugin should NOT be registered due to version mismatch
assert len(pm.get_plugins()) == 0
def test_load_entry_point_plugins_invalid_version_specifier(self):
"""Test that plugins with invalid version specifiers are loaded with warning."""
pm = build_manager(HookSpec)
logger = logging.getLogger("test")
# Mock a plugin with invalid version specifier
mock_plugin = Mock()
mock_plugin.PREFECT_PLUGIN_API_REQUIRES = "this-is-not-valid"
mock_ep = Mock()
mock_ep.name = "invalid-spec-plugin"
mock_ep.load.return_value = mock_plugin
with patch("importlib.metadata.entry_points", mock_entry_points([mock_ep])):
load_entry_point_plugins(pm, allow=None, deny=None, logger=logger)
# Plugin should still be registered (we log but don't block)
assert len(pm.get_plugins()) == 1
class TestStartupHooks:
"""Tests for the full startup hook system."""
@pytest.mark.usefixtures("clean_env")
async def test_disabled_plugins_no_execution(self, mock_ctx: HookContext):
"""Test that plugins don't run when disabled."""
summaries = await run_startup_hooks(mock_ctx)
assert summaries == []
@pytest.mark.usefixtures("clean_env")
async def test_safe_mode_no_execution(self, mock_ctx: HookContext):
"""Test that safe mode loads plugins but doesn't execute hooks."""
with temporary_settings(
updates={
PREFECT_EXPERIMENTS_PLUGINS_ENABLED: True,
PREFECT_EXPERIMENTS_PLUGINS_SAFE_MODE: True,
}
):
summaries = await run_startup_hooks(mock_ctx)
# Should return empty list in safe mode
assert summaries == []
async def test_timeout_handling(self, clean_env, mock_ctx):
"""Test that slow plugins time out gracefully."""
fields = _get_settings_fields(Settings)
timeout_setting = fields["PREFECT_EXPERIMENTS_PLUGINS_SETUP_TIMEOUT_SECONDS"]
class SlowPlugin:
async def setup_environment(self, *, ctx: HookContext):
await asyncio.sleep(1.0)
return SetupResult(env={"SLOW": "value"})
pm = build_manager(HookSpec)
pm.register(SlowPlugin(), name="slow-plugin")
with temporary_settings(
updates={PREFECT_EXPERIMENTS_PLUGINS_ENABLED: True, timeout_setting: 0.1}
):
with patch("prefect._experimental.plugins.build_manager", return_value=pm):
with patch(
"prefect._experimental.plugins.manager.load_entry_point_plugins"
):
summaries = await run_startup_hooks(mock_ctx)
# Should complete without crashing
assert isinstance(summaries, list)
async def test_strict_mode_required_failure(self, clean_env, mock_ctx):
"""Test that strict mode exits on required plugin failure."""
fields = _get_settings_fields(Settings)
strict_setting = fields["PREFECT_EXPERIMENTS_PLUGINS_STRICT"]
class RequiredPlugin:
@register_hook
def setup_environment(self, *, ctx: HookContext):
raise ValueError("Required plugin failed!")
pm = build_manager(HookSpec)
pm.register(RequiredPlugin(), name="required-plugin")
with temporary_settings(
updates={PREFECT_EXPERIMENTS_PLUGINS_ENABLED: True, strict_setting: True}
):
with patch("prefect._experimental.plugins.build_manager", return_value=pm):
with patch(
"prefect._experimental.plugins.manager.load_entry_point_plugins"
):
with pytest.raises(SystemExit):
await run_startup_hooks(mock_ctx)
async def test_successful_plugin_execution(self, clean_env, mock_ctx):
"""Test that successful plugins apply environment variables."""
class TestPlugin:
@register_hook
def setup_environment(self, *, ctx: HookContext):
return SetupResult(
env={"TEST_VAR": "test_value"},
note="Test plugin ran",
)
pm = build_manager(HookSpec)
pm.register(TestPlugin(), name="test-plugin")
with temporary_settings(updates={PREFECT_EXPERIMENTS_PLUGINS_ENABLED: True}):
with patch("prefect._experimental.plugins.build_manager", return_value=pm):
with patch(
"prefect._experimental.plugins.manager.load_entry_point_plugins"
):
summaries = await run_startup_hooks(mock_ctx)
assert len(summaries) == 1
assert summaries[0].plugin == "test-plugin"
assert summaries[0].error is None
assert summaries[0].note == "Test plugin ran"
assert "TEST_VAR" in summaries[0].env_preview
# Check that env var was actually set
assert os.environ.get("TEST_VAR") == "test_value"
async def test_plugin_returning_none(self, clean_env, mock_ctx):
"""Test that plugins can return None to indicate no changes."""
class NoOpPlugin:
@register_hook
def setup_environment(self, *, ctx: HookContext):
return None
pm = build_manager(HookSpec)
pm.register(NoOpPlugin(), name="noop-plugin")
with temporary_settings(updates={PREFECT_EXPERIMENTS_PLUGINS_ENABLED: True}):
with patch("prefect._experimental.plugins.build_manager", return_value=pm):
with patch(
"prefect._experimental.plugins.manager.load_entry_point_plugins"
):
summaries = await run_startup_hooks(mock_ctx)
assert len(summaries) == 1
assert summaries[0].plugin == "noop-plugin"
assert summaries[0].error is None
assert summaries[0].env_preview == {}
async def test_plugin_env_vars_reflected_in_settings(self, clean_env, mock_ctx):
"""Test that env vars set by plugins are reflected in get_current_settings()."""
from prefect.context import refresh_global_settings_context
# Save original env var value for cleanup
original_api_url = os.environ.get("PREFECT_API_URL")
class SettingsPlugin:
@register_hook
def setup_environment(self, *, ctx: HookContext):
# Set an env var that maps to a Prefect setting
return SetupResult(
env={"PREFECT_API_URL": "http://plugin-set-url:4200/api"}
)
pm = build_manager(HookSpec)
pm.register(SettingsPlugin(), name="settings-plugin")
try:
with temporary_settings(
updates={PREFECT_EXPERIMENTS_PLUGINS_ENABLED: True}
):
with patch(
"prefect._experimental.plugins.build_manager", return_value=pm
):
with patch(
"prefect._experimental.plugins.manager.load_entry_point_plugins"
):
await run_startup_hooks(mock_ctx)
refresh_global_settings_context()
# Verify the global settings context was updated
# We check GLOBAL_SETTINGS_CONTEXT directly because
# get_current_settings() respects the context stack
# (temporary_settings is still active)
from prefect.context import (
GLOBAL_SETTINGS_CONTEXT as refreshed_ctx,
)
assert (
str(refreshed_ctx.settings.api.url)
== "http://plugin-set-url:4200/api"
)
finally:
# Clean up: restore original PREFECT_API_URL and refresh settings
if original_api_url is None:
os.environ.pop("PREFECT_API_URL", None)
else:
os.environ["PREFECT_API_URL"] = original_api_url
refresh_global_settings_context()
class TestSetupSummary:
"""Tests for SetupSummary data structure."""
def test_setup_summary_creation(self):
"""Test creating a SetupSummary."""
summary = SetupSummary(
plugin="test-plugin",
env_preview={"KEY": "value"},
note="Test note",
error=None,
)
assert summary.plugin == "test-plugin"
assert summary.env_preview == {"KEY": "value"}
assert summary.note == "Test note"
assert summary.error is None
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/_experimental/plugins/test_plugins.py",
"license": "Apache License 2.0",
"lines": 489,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/cli/flow_runs_watching.py | """
Utilities for following flow runs with interleaved events and logs
"""
from __future__ import annotations
from datetime import datetime
from typing import TYPE_CHECKING
from uuid import UUID
import anyio
from rich.console import Console
from prefect.client.orchestration import get_client
from prefect.client.schemas.objects import Log, StateType
from prefect.events import Event
from prefect.events.subscribers import FlowRunSubscriber
from prefect.exceptions import FlowRunWaitTimeout
if TYPE_CHECKING:
from prefect.client.schemas.objects import FlowRun
# Color mapping for state types
STATE_TYPE_COLORS: dict[StateType, str] = {
StateType.SCHEDULED: "yellow",
StateType.PENDING: "bright_black",
StateType.RUNNING: "blue",
StateType.COMPLETED: "green",
StateType.FAILED: "red",
StateType.CANCELLED: "bright_black",
StateType.CANCELLING: "bright_black",
StateType.CRASHED: "orange1",
StateType.PAUSED: "bright_black",
}
async def watch_flow_run(
flow_run_id: UUID, console: Console, timeout: int | None = None
) -> FlowRun:
"""
Watch a flow run, displaying interleaved events and logs until completion.
Args:
flow_run_id: The ID of the flow run to watch
console: Rich console for output
timeout: Maximum time to wait for flow run completion in seconds.
If None, waits indefinitely.
Returns:
The finished flow run
Raises:
FlowRunWaitTimeout: If the flow run exceeds the timeout
"""
formatter = FlowRunFormatter()
if timeout is not None:
with anyio.move_on_after(timeout) as cancel_scope:
async with FlowRunSubscriber(flow_run_id=flow_run_id) as subscriber:
async for item in subscriber:
console.print(formatter.format(item))
if cancel_scope.cancelled_caught:
raise FlowRunWaitTimeout(
f"Flow run with ID {flow_run_id} exceeded watch timeout of {timeout} seconds"
)
else:
async with FlowRunSubscriber(flow_run_id=flow_run_id) as subscriber:
async for item in subscriber:
console.print(formatter.format(item))
async with get_client() as client:
return await client.read_flow_run(flow_run_id)
class FlowRunFormatter:
"""Handles formatting of logs and events for CLI display"""
def __init__(self):
self._last_timestamp_parts = ["", "", "", ""]
self._last_datetime: datetime | None = None
def format_timestamp(self, dt: datetime) -> str:
"""Format timestamp with incremental display"""
ms = dt.strftime("%f")[:3]
current_parts = [dt.strftime("%H"), dt.strftime("%M"), dt.strftime("%S"), ms]
if self._last_datetime and dt < self._last_datetime:
self._last_timestamp_parts = current_parts[:]
self._last_datetime = dt
return f"{current_parts[0]}:{current_parts[1]}:{current_parts[2]}.{current_parts[3]}"
display_parts = []
for i, (last, current) in enumerate(
zip(self._last_timestamp_parts, current_parts)
):
if current != last:
display_parts = current_parts[i:]
break
else:
display_parts = [current_parts[3]]
self._last_timestamp_parts = current_parts[:]
self._last_datetime = dt
if len(display_parts) == 4:
timestamp_str = f"{display_parts[0]}:{display_parts[1]}:{display_parts[2]}.{display_parts[3]}"
elif len(display_parts) == 3:
timestamp_str = f":{display_parts[0]}:{display_parts[1]}.{display_parts[2]}"
elif len(display_parts) == 2:
timestamp_str = f":{display_parts[0]}.{display_parts[1]}"
else:
timestamp_str = f".{display_parts[0]}"
return f"{timestamp_str:>12}"
def format_run_id(self, run_id_short: str) -> str:
"""Format run ID"""
return f"{run_id_short:>12}"
def format(self, item: Log | Event) -> str:
"""Format a log or event for display"""
if isinstance(item, Log):
return self.format_log(item)
else:
return self.format_event(item)
def format_log(self, log: Log) -> str:
"""Format a log entry"""
timestamp = self.format_timestamp(log.timestamp)
run_id = log.task_run_id or log.flow_run_id
run_id_short = str(run_id)[-12:] if run_id else "............"
run_id_display = self.format_run_id(run_id_short)
icon = "▪"
prefix_plain = f"{icon} {timestamp.strip()} {run_id_display.strip()} "
lines = log.message.split("\n")
if len(lines) == 1:
return f"[dim]▪[/dim] {timestamp} [dim]{run_id_display}[/dim] {log.message}"
first_line = f"[dim]▪[/dim] {timestamp} [dim]{run_id_display}[/dim] {lines[0]}"
indent = " " * len(prefix_plain)
continuation_lines = [f"{indent}{line}" for line in lines[1:]]
return first_line + "\n" + "\n".join(continuation_lines)
def format_event(self, event: Event) -> str:
"""Format an event"""
timestamp = self.format_timestamp(event.occurred)
run_id = None
if event.resource.id.startswith("prefect.task-run."):
run_id = event.resource.id.split(".", 2)[2]
elif event.resource.id.startswith("prefect.flow-run."):
run_id = event.resource.id.split(".", 2)[2]
if not run_id:
for related in event.related:
if related.id.startswith("prefect.task-run."):
run_id = related.id.split(".", 2)[2]
break
elif related.id.startswith("prefect.flow-run."):
run_id = related.id.split(".", 2)[2]
break
run_id_short = run_id[-12:] if run_id else "............"
run_id_display = self.format_run_id(run_id_short)
# Get state type from event resource or payload
state_type_str = event.resource.get("prefect.state-type")
if not state_type_str and "validated_state" in event.payload:
state_type_str = event.payload["validated_state"].get("type")
# Map state type to color
color = "bright_magenta" # default for unknown states
if state_type_str:
try:
state_type = StateType(state_type_str)
color = STATE_TYPE_COLORS.get(state_type, "bright_magenta")
except ValueError:
pass
name = event.resource.get("prefect.resource.name") or event.resource.id
return (
f"[{color}]●[/{color}] {timestamp} [dim]{run_id_display}[/dim] "
f"{event.event} * [bold cyan]{name}[/bold cyan]"
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/flow_runs_watching.py",
"license": "Apache License 2.0",
"lines": 151,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/events/subscribers.py | """
Flow run subscriber that interleaves events and logs from a flow run
"""
from __future__ import annotations
import asyncio
from types import TracebackType
from typing import TYPE_CHECKING, Any, Optional, Union
from uuid import UUID
from typing_extensions import Self
from prefect.client.schemas.filters import LogFilter, LogFilterFlowRunId
from prefect.client.schemas.objects import TERMINAL_STATES, Log, StateType
from prefect.events import Event
from prefect.events.clients import get_events_subscriber
from prefect.events.filters import EventAnyResourceFilter, EventFilter
from prefect.logging.clients import get_logs_subscriber
if TYPE_CHECKING:
from prefect.events.clients import PrefectEventSubscriber
from prefect.logging.clients import PrefectLogsSubscriber
class FlowRunSubscriber:
"""
Subscribes to both events and logs for a specific flow run, yielding them
in an interleaved stream.
This subscriber combines the event stream and log stream for a flow run into
a single async iterator. When a terminal event (Completed, Failed, or Crashed)
is received, the event subscription stops but log subscription continues for a
configurable timeout to catch any straggler logs.
Example:
```python
from prefect.events.subscribers import FlowRunSubscriber
async with FlowRunSubscriber(flow_run_id=my_flow_run_id) as subscriber:
async for item in subscriber:
if isinstance(item, Event):
print(f"Event: {item.event}")
else: # isinstance(item, Log)
print(f"Log: {item.message}")
```
"""
_flow_run_id: UUID
_queue: asyncio.Queue[Union[Log, Event, None]]
_tasks: list[asyncio.Task[None]]
_flow_completed: bool
_straggler_timeout: int
_reconnection_attempts: int
_log_filter: LogFilter
_event_filter: EventFilter
_logs_subscriber: PrefectLogsSubscriber | Any
_events_subscriber: PrefectEventSubscriber | Any
_sentinels_received: int
def __init__(
self,
flow_run_id: UUID,
straggler_timeout: int = 3,
reconnection_attempts: int = 10,
):
"""
Args:
flow_run_id: The ID of the flow run to follow
straggler_timeout: After a terminal event, how long (in seconds) to wait
for additional logs before stopping
reconnection_attempts: Number of times to attempt reconnection if
the websocket connection is lost
"""
self._flow_run_id = flow_run_id
self._straggler_timeout = straggler_timeout
self._reconnection_attempts = reconnection_attempts
self._queue = asyncio.Queue()
self._tasks = []
self._flow_completed = False
self._sentinels_received = 0
self._log_filter = LogFilter(flow_run_id=LogFilterFlowRunId(any_=[flow_run_id]))
self._event_filter = EventFilter(
any_resource=EventAnyResourceFilter(id=[f"prefect.flow-run.{flow_run_id}"])
)
self._logs_subscriber = None
self._events_subscriber = None
async def __aenter__(self) -> Self:
"""Enter the async context manager"""
self._logs_subscriber = get_logs_subscriber(
filter=self._log_filter, reconnection_attempts=self._reconnection_attempts
)
self._events_subscriber = get_events_subscriber(
filter=self._event_filter, reconnection_attempts=self._reconnection_attempts
)
await self._logs_subscriber.__aenter__()
await self._events_subscriber.__aenter__()
self._tasks = [
asyncio.create_task(self._consume_logs()),
asyncio.create_task(self._consume_events()),
]
return self
async def __aexit__(
self,
exc_type: Optional[type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
"""Exit the async context manager and clean up resources"""
for task in self._tasks:
task.cancel()
await asyncio.gather(*self._tasks, return_exceptions=True)
await self._logs_subscriber.__aexit__(exc_type, exc_val, exc_tb)
await self._events_subscriber.__aexit__(exc_type, exc_val, exc_tb)
def __aiter__(self) -> Self:
"""Return self as an async iterator"""
return self
async def __anext__(self) -> Union[Log, Event]:
"""Get the next log or event from the interleaved stream"""
while self._sentinels_received < len(self._tasks):
if self._flow_completed:
try:
item = await asyncio.wait_for(
self._queue.get(), timeout=self._straggler_timeout
)
except asyncio.TimeoutError:
raise StopAsyncIteration
else:
item = await self._queue.get()
if item is None:
self._sentinels_received += 1
continue
return item
raise StopAsyncIteration
async def _consume_logs(self) -> None:
"""Background task to consume logs and put them in the queue"""
try:
async for log in self._logs_subscriber:
await self._queue.put(log)
except asyncio.CancelledError:
pass
except Exception:
pass
finally:
await self._queue.put(None)
async def _consume_events(self) -> None:
"""Background task to consume events and put them in the queue"""
try:
async for event in self._events_subscriber:
await self._queue.put(event)
# Check if this is a terminal state event for our flow run
if event.resource.id == f"prefect.flow-run.{self._flow_run_id}":
# Get state type from event resource or payload
state_type_str = event.resource.get("prefect.state-type")
if not state_type_str and "validated_state" in event.payload:
state_type_str = event.payload["validated_state"].get("type")
if state_type_str:
try:
state_type = StateType(state_type_str)
if state_type in TERMINAL_STATES:
self._flow_completed = True
break
except ValueError:
pass
except Exception:
pass
finally:
await self._queue.put(None)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/events/subscribers.py",
"license": "Apache License 2.0",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:tests/cli/test_flow_run_watching.py | """Tests for flow run watching functionality"""
from __future__ import annotations
import asyncio
import pytest
from rich.console import Console
from prefect import flow
from prefect.cli.flow_runs_watching import watch_flow_run
from prefect.client.orchestration import PrefectClient
from prefect.exceptions import FlowRunWaitTimeout
from prefect.flow_engine import run_flow_async
from prefect.states import Completed
pytestmark = pytest.mark.usefixtures("hosted_api_server")
@flow
async def successful_flow():
"""Simple flow that completes successfully"""
return 42
@flow
async def failing_flow():
"""Simple flow that raises an error"""
raise ValueError("Intentional test failure")
@flow
async def slow_flow():
"""Simple flow that takes a long time"""
await asyncio.sleep(10)
async def test_watch_flow_run_exits_after_successful_completion(
prefect_client: PrefectClient,
):
"""Test that watch_flow_run exits after flow run completes successfully"""
flow_run = await prefect_client.create_flow_run(
flow=successful_flow, state=Completed()
)
console = Console()
# Start the flow in the background
flow_task = asyncio.create_task(
run_flow_async(
flow=successful_flow,
flow_run=flow_run,
return_type="state",
)
)
# Watch the flow run
result = await watch_flow_run(flow_run.id, console)
# Clean up
await flow_task
assert result.state.is_completed()
assert result.id == flow_run.id
async def test_watch_flow_run_with_failed_state(prefect_client: PrefectClient):
"""Test that watch_flow_run returns flow run with failed state"""
flow_run = await prefect_client.create_flow_run(
flow=failing_flow, state=Completed()
)
console = Console()
# Start the flow in the background
flow_task = asyncio.create_task(
run_flow_async(
flow=failing_flow,
flow_run=flow_run,
return_type="state",
)
)
# Watch the flow run
result = await watch_flow_run(flow_run.id, console)
# Clean up
await flow_task
assert result.state.is_failed()
assert result.id == flow_run.id
async def test_watch_flow_run_timeout(prefect_client: PrefectClient):
"""Test that watch_flow_run raises timeout when flow run exceeds timeout"""
flow_run = await prefect_client.create_flow_run(flow=slow_flow, state=Completed())
console = Console()
# Start the flow in the background
flow_task = asyncio.create_task(
run_flow_async(
flow=slow_flow,
flow_run=flow_run,
return_type="state",
)
)
# Start watching with a short timeout
with pytest.raises(FlowRunWaitTimeout, match="exceeded watch timeout"):
await watch_flow_run(flow_run.id, console, timeout=1)
# Clean up - cancel the slow flow
flow_task.cancel()
try:
await flow_task
except asyncio.CancelledError:
pass
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/cli/test_flow_run_watching.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/events/client/test_flow_run_subscriber.py | from __future__ import annotations
import asyncio
from datetime import datetime, timedelta, timezone
from uuid import UUID, uuid4
import pytest
from typing_extensions import Self
import prefect.events.subscribers
from prefect.client.schemas.objects import Log
from prefect.events import Event, Resource
from prefect.events.subscribers import FlowRunSubscriber
TERMINAL_FLOW_RUN_EVENTS = {
"prefect.flow-run.Completed",
"prefect.flow-run.Failed",
"prefect.flow-run.Crashed",
}
class MockEventSubscriber:
"""Mock event subscriber for testing"""
def __init__(self, events: list[Event]):
self.events = events
self._index = 0
async def __aenter__(self) -> Self:
return self
async def __aexit__(self, *args) -> None:
pass
def __aiter__(self) -> Self:
return self
async def __anext__(self) -> Event:
if self._index >= len(self.events):
raise StopAsyncIteration
event = self.events[self._index]
self._index += 1
return event
class MockLogsSubscriber:
"""Mock logs subscriber for testing"""
def __init__(self, logs: list[Log]):
self.logs = logs
self._index = 0
async def __aenter__(self) -> Self:
return self
async def __aexit__(self, *args) -> None:
pass
def __aiter__(self) -> Self:
return self
async def __anext__(self) -> Log:
if self._index >= len(self.logs):
raise StopAsyncIteration
log = self.logs[self._index]
self._index += 1
return log
@pytest.fixture
def flow_run_id() -> UUID:
return uuid4()
@pytest.fixture
def sample_log1(flow_run_id: UUID) -> Log:
"""A sample log at T+0s"""
return Log(
id=uuid4(),
name="test.logger",
level=20,
message="Test log 1",
timestamp=datetime.now(timezone.utc),
flow_run_id=flow_run_id,
)
@pytest.fixture
def sample_log2(flow_run_id: UUID) -> Log:
"""A sample log at T+2s"""
return Log(
id=uuid4(),
name="test.logger",
level=20,
message="Test log 2",
timestamp=datetime.now(timezone.utc) + timedelta(seconds=2),
flow_run_id=flow_run_id,
)
@pytest.fixture
def sample_event1(flow_run_id: UUID) -> Event:
"""A sample event at T+1s"""
return Event(
id=uuid4(),
occurred=datetime.now(timezone.utc) + timedelta(seconds=1),
event="prefect.flow-run.Running",
resource=Resource(
root={"prefect.resource.id": f"prefect.flow-run.{flow_run_id}"}
),
payload={},
)
@pytest.fixture
def terminal_event(flow_run_id: UUID) -> Event:
"""A terminal event at T+3s"""
return Event(
id=uuid4(),
occurred=datetime.now(timezone.utc) + timedelta(seconds=3),
event="prefect.flow-run.Completed",
resource=Resource(
root={"prefect.resource.id": f"prefect.flow-run.{flow_run_id}"}
),
payload={},
)
@pytest.fixture
def straggler_log(flow_run_id: UUID) -> Log:
"""A log that arrives after terminal event at T+4s"""
return Log(
id=uuid4(),
name="test.logger",
level=20,
message="Straggler log",
timestamp=datetime.now(timezone.utc) + timedelta(seconds=4),
flow_run_id=flow_run_id,
)
@pytest.fixture
def setup_mocks(monkeypatch):
"""Setup mocks for get_events_subscriber and get_logs_subscriber"""
def create_mocks(events: list[Event], logs: list[Log]):
mock_events = MockEventSubscriber(events)
mock_logs = MockLogsSubscriber(logs)
def mock_get_events_subscriber(*args, **kwargs):
return mock_events
def mock_get_logs_subscriber(*args, **kwargs):
return mock_logs
monkeypatch.setattr(
prefect.events.subscribers,
"get_events_subscriber",
mock_get_events_subscriber,
)
monkeypatch.setattr(
prefect.events.subscribers, "get_logs_subscriber", mock_get_logs_subscriber
)
return create_mocks
async def test_flow_run_subscriber_basic_interleaving(
flow_run_id: UUID,
sample_log1: Log,
sample_event1: Event,
sample_log2: Log,
setup_mocks,
):
"""Test that FlowRunSubscriber interleaves logs and events"""
setup_mocks([sample_event1], [sample_log1, sample_log2])
items: list[Log | Event] = []
async with FlowRunSubscriber(flow_run_id=flow_run_id) as subscriber:
async for item in subscriber:
items.append(item)
assert len(items) == 3
assert any(isinstance(item, Log) and item.message == "Test log 1" for item in items)
assert any(isinstance(item, Log) and item.message == "Test log 2" for item in items)
assert any(
isinstance(item, Event) and item.event == "prefect.flow-run.Running"
for item in items
)
async def test_flow_run_subscriber_terminal_event_stops_events(
flow_run_id: UUID,
sample_log1: Log,
sample_event1: Event,
terminal_event: Event,
straggler_log: Log,
setup_mocks,
):
"""Test that terminal events stop event consumption but allow log stragglers"""
setup_mocks([sample_event1, terminal_event], [sample_log1, straggler_log])
items: list[Log | Event] = []
async with FlowRunSubscriber(
flow_run_id=flow_run_id, straggler_timeout=1
) as subscriber:
async for item in subscriber:
items.append(item)
assert len(items) == 4
assert any(
isinstance(item, Event) and item.event == "prefect.flow-run.Running"
for item in items
)
assert any(
isinstance(item, Event) and item.event == "prefect.flow-run.Completed"
for item in items
)
assert any(isinstance(item, Log) and item.message == "Test log 1" for item in items)
assert any(
isinstance(item, Log) and item.message == "Straggler log" for item in items
)
async def test_flow_run_subscriber_straggler_timeout(
flow_run_id: UUID,
terminal_event: Event,
monkeypatch,
):
"""Test that straggler timeout works after terminal event"""
class SlowMockLogsSubscriber:
"""Mock logs subscriber that delays to simulate stragglers"""
def __init__(self):
self._started = False
async def __aenter__(self) -> Self:
return self
async def __aexit__(self, *args) -> None:
pass
def __aiter__(self) -> Self:
return self
async def __anext__(self) -> Log:
if not self._started:
self._started = True
await asyncio.sleep(10)
raise StopAsyncIteration
mock_events = MockEventSubscriber([terminal_event])
mock_logs = SlowMockLogsSubscriber()
def mock_get_events_subscriber(*args, **kwargs):
return mock_events
def mock_get_logs_subscriber(*args, **kwargs):
return mock_logs
monkeypatch.setattr(
prefect.events.subscribers, "get_events_subscriber", mock_get_events_subscriber
)
monkeypatch.setattr(
prefect.events.subscribers, "get_logs_subscriber", mock_get_logs_subscriber
)
items: list[Log | Event] = []
async with FlowRunSubscriber(
flow_run_id=flow_run_id, straggler_timeout=0.5
) as subscriber:
async for item in subscriber:
items.append(item)
assert len(items) == 1
assert isinstance(items[0], Event)
assert items[0].event == "prefect.flow-run.Completed"
async def test_flow_run_subscriber_empty_streams(flow_run_id: UUID, setup_mocks):
"""Test that FlowRunSubscriber handles empty streams"""
setup_mocks([], [])
items: list[Log | Event] = []
async with FlowRunSubscriber(flow_run_id=flow_run_id) as subscriber:
async for item in subscriber:
items.append(item)
assert len(items) == 0
async def test_flow_run_subscriber_context_manager_cleanup(
flow_run_id: UUID, monkeypatch
):
"""Test that FlowRunSubscriber properly cleans up resources"""
events_entered = False
events_exited = False
logs_entered = False
logs_exited = False
class TrackingEventSubscriber:
async def __aenter__(self) -> Self:
nonlocal events_entered
events_entered = True
return self
async def __aexit__(self, *args) -> None:
nonlocal events_exited
events_exited = True
def __aiter__(self) -> Self:
return self
async def __anext__(self) -> Event:
raise StopAsyncIteration
class TrackingLogsSubscriber:
async def __aenter__(self) -> Self:
nonlocal logs_entered
logs_entered = True
return self
async def __aexit__(self, *args) -> None:
nonlocal logs_exited
logs_exited = True
def __aiter__(self) -> Self:
return self
async def __anext__(self) -> Log:
raise StopAsyncIteration
def mock_get_events_subscriber(*args, **kwargs):
return TrackingEventSubscriber()
def mock_get_logs_subscriber(*args, **kwargs):
return TrackingLogsSubscriber()
monkeypatch.setattr(
prefect.events.subscribers, "get_events_subscriber", mock_get_events_subscriber
)
monkeypatch.setattr(
prefect.events.subscribers, "get_logs_subscriber", mock_get_logs_subscriber
)
async with FlowRunSubscriber(flow_run_id=flow_run_id):
assert events_entered
assert logs_entered
assert not events_exited
assert not logs_exited
assert events_exited
assert logs_exited
async def test_flow_run_subscriber_only_terminal_events_stop_consumption(
flow_run_id: UUID, setup_mocks
):
"""Test that only terminal events stop event consumption"""
non_terminal_event = Event(
id=uuid4(),
occurred=datetime.now(timezone.utc),
event="prefect.flow-run.Running",
resource=Resource(
root={"prefect.resource.id": f"prefect.flow-run.{flow_run_id}"}
),
payload={},
)
another_event = Event(
id=uuid4(),
occurred=datetime.now(timezone.utc) + timedelta(seconds=1),
event="prefect.flow-run.Pending",
resource=Resource(
root={"prefect.resource.id": f"prefect.flow-run.{flow_run_id}"}
),
payload={},
)
setup_mocks([non_terminal_event, another_event], [])
items: list[Log | Event] = []
async with FlowRunSubscriber(flow_run_id=flow_run_id) as subscriber:
async for item in subscriber:
items.append(item)
assert len(items) == 2
assert all(isinstance(item, Event) for item in items)
async def test_flow_run_subscriber_terminal_event_for_different_flow_run(
flow_run_id: UUID, setup_mocks
):
"""Test that terminal events for other flow runs don't stop consumption"""
other_flow_run_id = uuid4()
event1 = Event(
id=uuid4(),
occurred=datetime.now(timezone.utc),
event="prefect.flow-run.Running",
resource=Resource(
root={"prefect.resource.id": f"prefect.flow-run.{flow_run_id}"}
),
payload={},
)
other_terminal = Event(
id=uuid4(),
occurred=datetime.now(timezone.utc) + timedelta(seconds=1),
event="prefect.flow-run.Completed",
resource=Resource(
root={"prefect.resource.id": f"prefect.flow-run.{other_flow_run_id}"}
),
payload={},
)
event2 = Event(
id=uuid4(),
occurred=datetime.now(timezone.utc) + timedelta(seconds=2),
event="prefect.flow-run.Pending",
resource=Resource(
root={"prefect.resource.id": f"prefect.flow-run.{flow_run_id}"}
),
payload={},
)
setup_mocks([event1, other_terminal, event2], [])
items: list[Log | Event] = []
async with FlowRunSubscriber(flow_run_id=flow_run_id) as subscriber:
async for item in subscriber:
items.append(item)
assert len(items) == 3
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/events/client/test_flow_run_subscriber.py",
"license": "Apache License 2.0",
"lines": 343,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/client/test_subscriptions.py | import ssl
import pytest
from prefect.client.schemas import TaskRun
from prefect.client.subscriptions import Subscription
from prefect.settings import (
PREFECT_API_TLS_INSECURE_SKIP_VERIFY,
PREFECT_CLIENT_CUSTOM_HEADERS,
temporary_settings,
)
def test_subscription_uses_websocket_connect_with_ssl_for_wss():
"""Test that Subscription creates a connector with SSL context for wss:// URLs"""
subscription = Subscription(
model=TaskRun,
path="/api/task_runs/subscriptions/scheduled",
keys=["test.task"],
base_url="https://api.example.com",
)
# Verify the connection is configured
assert subscription._connect is not None
# Verify SSL context is configured for wss:// URL
assert "ssl" in subscription._connect.connection_kwargs
ssl_context = subscription._connect.connection_kwargs["ssl"]
assert isinstance(ssl_context, ssl.SSLContext)
assert ssl_context.check_hostname is True
assert ssl_context.verify_mode == ssl.CERT_REQUIRED
def test_subscription_uses_websocket_connect_with_insecure_ssl():
"""Test that Subscription respects insecure SSL mode setting"""
with temporary_settings({PREFECT_API_TLS_INSECURE_SKIP_VERIFY: True}):
subscription = Subscription(
model=TaskRun,
path="/api/task_runs/subscriptions/scheduled",
keys=["test.task"],
base_url="https://api.example.com",
)
# Verify SSL context is configured with insecure mode
assert "ssl" in subscription._connect.connection_kwargs
ssl_context = subscription._connect.connection_kwargs["ssl"]
assert not ssl_context.check_hostname
assert ssl_context.verify_mode == ssl.CERT_NONE
def test_subscription_no_ssl_for_http():
"""Test that Subscription doesn't add SSL for http:// URLs"""
subscription = Subscription(
model=TaskRun,
path="/api/task_runs/subscriptions/scheduled",
keys=["test.task"],
base_url="http://localhost:4200",
)
# Verify SSL is not configured for http:// URLs
assert "ssl" not in subscription._connect.connection_kwargs
def test_subscription_uses_custom_headers_from_settings():
"""Test that Subscription respects custom headers from settings"""
custom_headers = {"X-Custom-Header": "test-value", "Authorization": "Bearer token"}
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: custom_headers}):
subscription = Subscription(
model=TaskRun,
path="/api/task_runs/subscriptions/scheduled",
keys=["test.task"],
base_url="https://api.example.com",
)
# Verify custom headers are added
assert subscription._connect.additional_headers is not None
assert (
subscription._connect.additional_headers["X-Custom-Header"] == "test-value"
)
assert (
subscription._connect.additional_headers["Authorization"] == "Bearer token"
)
def test_subscription_empty_custom_headers_doesnt_break():
"""Test that empty custom headers don't cause issues"""
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: {}}):
subscription = Subscription(
model=TaskRun,
path="/api/task_runs/subscriptions/scheduled",
keys=["test.task"],
base_url="https://api.example.com",
)
# Should create successfully without errors
assert subscription._connect is not None
def test_subscription_url_conversion_http_to_ws():
"""Test that Subscription properly converts HTTP URL to WebSocket URL"""
subscription = Subscription(
model=TaskRun,
path="/api/task_runs/subscriptions/scheduled",
keys=["test.task"],
base_url="http://localhost:4200",
)
assert (
subscription.subscription_url
== "ws://localhost:4200/api/task_runs/subscriptions/scheduled"
)
def test_subscription_url_conversion_https_to_wss():
"""Test that Subscription properly converts HTTPS URL to WSS URL"""
subscription = Subscription(
model=TaskRun,
path="/api/task_runs/subscriptions/scheduled",
keys=["test.task"],
base_url="https://api.example.com",
)
assert (
subscription.subscription_url
== "wss://api.example.com/api/task_runs/subscriptions/scheduled"
)
@pytest.mark.usefixtures("hosted_api_server")
async def test_subscription_integration_with_custom_headers(hosted_api_server: str):
"""Integration test verifying Subscription works with custom headers"""
custom_headers = {"X-Test-Header": "integration-test"}
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: custom_headers}):
subscription = Subscription(
model=TaskRun,
path="/api/task_runs/subscriptions/scheduled",
keys=["test.task"],
base_url=hosted_api_server,
)
# Verify the subscription can be created with custom headers
# The actual connection happens in _ensure_connected, which we're not testing here
# but we verify the connector is properly configured
assert subscription._connect is not None
assert (
subscription._connect.additional_headers["X-Test-Header"]
== "integration-test"
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/client/test_subscriptions.py",
"license": "Apache License 2.0",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/cli/api.py | """
API command — native cyclopts implementation.
Make direct requests to the Prefect API.
"""
import json
import sys
from typing import Annotated, Any, Optional
import cyclopts
import prefect.cli._app as _cli
from prefect.cli._utilities import (
exit_with_error,
with_cli_exception_handling,
)
api_app: cyclopts.App = cyclopts.App(
name="api",
help="Interact with the Prefect API.",
version_flags=[],
help_flags=["--help"],
)
def _parse_headers(header_list: list[str]) -> dict[str, str]:
"""Parse header strings in format 'Key: Value' into a dict."""
headers = {}
for header in header_list:
if ":" not in header:
exit_with_error(
f"Invalid header format: {header!r}. Use 'Key: Value' format."
)
key, value = header.split(":", 1)
headers[key.strip()] = value.strip()
return headers
def _parse_data(data: str | None) -> dict[str, Any] | str | None:
"""Parse data input - can be JSON string, @filename, or None."""
from pathlib import Path
if data is None:
return None
if data.startswith("@"):
filepath = Path(data[1:])
if not filepath.exists():
exit_with_error(f"File not found: {filepath}")
try:
content = filepath.read_text()
return json.loads(content)
except json.JSONDecodeError as e:
exit_with_error(f"Invalid JSON in file {filepath}: {e}")
else:
try:
return json.loads(data)
except json.JSONDecodeError as e:
exit_with_error(f"Invalid JSON data: {e}")
def _read_stdin_data() -> dict[str, Any] | None:
"""Read and parse JSON data from stdin if available."""
if not sys.stdin.isatty():
try:
content = sys.stdin.read()
if content.strip():
return json.loads(content)
except json.JSONDecodeError as e:
exit_with_error(f"Invalid JSON from stdin: {e}")
return None
def _format_output(response: Any, verbose: bool) -> None:
"""Format and print the response using rich."""
from rich.syntax import Syntax
if verbose:
req = response.request
_cli.console.print(f"[dim]> {req.method} {req.url}[/dim]")
for key, value in req.headers.items():
if key.lower() == "authorization":
value = "Bearer ***"
_cli.console.print(f"[dim]> {key}: {value}[/dim]")
_cli.console.print()
_cli.console.print(
f"[dim]< {response.status_code} {response.reason_phrase}[/dim]"
)
for key, value in response.headers.items():
_cli.console.print(f"[dim]< {key}: {value}[/dim]")
_cli.console.print()
if response.text:
try:
data = response.json()
json_str = json.dumps(data, indent=2)
if sys.stdout.isatty():
syntax = Syntax(json_str, "json", theme="monokai", word_wrap=True)
_cli.console.print(syntax)
else:
print(json.dumps(data))
except (json.JSONDecodeError, ValueError):
_cli.console.print(response.text)
def _get_exit_code(error: Exception) -> int:
"""Determine the appropriate exit code for an error."""
import httpx
if isinstance(error, httpx.HTTPStatusError):
status = error.response.status_code
if status in (401, 403):
return 3
elif 400 <= status < 500:
return 4
elif 500 <= status < 600:
return 5
elif isinstance(
error, (httpx.ConnectError, httpx.TimeoutException, httpx.NetworkError)
):
return 7
return 1
@api_app.default
@with_cli_exception_handling
async def api_request(
method: str,
path: str,
*,
data: Annotated[
Optional[str],
cyclopts.Parameter("--data", help="Request body as JSON string or @filename"),
] = None,
headers: Annotated[
Optional[list[str]],
cyclopts.Parameter(
"-H", alias="--header", help="Custom header in 'Key: Value' format"
),
] = None,
verbose: Annotated[
bool,
cyclopts.Parameter(
"-v", alias="--verbose", help="Show request/response headers"
),
] = False,
root: Annotated[
bool,
cyclopts.Parameter("--root", help="Access API root level (e.g., /api/me)"),
] = False,
account: Annotated[
bool,
cyclopts.Parameter("--account", help="Access account level (Cloud only)"),
] = False,
):
"""Make a direct request to the Prefect API."""
import httpx
from rich.console import Console
from rich.syntax import Syntax
from prefect.client.cloud import get_cloud_client
from prefect.client.orchestration import get_client
from prefect.settings import get_current_settings
if headers is None:
headers = []
console_err = Console(stderr=True)
http_methods = {"GET", "POST", "PUT", "PATCH", "DELETE", "HEAD", "OPTIONS"}
http_method = method.upper()
if http_method not in http_methods:
exit_with_error(
f"Invalid HTTP method: {method!r}. "
f"Must be one of: {', '.join(sorted(http_methods))}"
)
settings = get_current_settings()
configured_api_url = settings.api.url
if not configured_api_url:
exit_with_error(
"No API URL configured. Set PREFECT_API_URL or run 'prefect cloud login'."
)
cloud_api_url = settings.cloud.api_url
is_cloud = cloud_api_url and configured_api_url.startswith(cloud_api_url)
if (root or account) and not is_cloud:
exit_with_error(
"--root and --account flags are only valid for Prefect Cloud. "
"For self-hosted servers, paths are relative to your configured API URL."
)
body_data = _parse_data(data)
if body_data is None and data is None:
stdin_data = _read_stdin_data()
if stdin_data is not None:
body_data = stdin_data
elif http_method in ("POST", "PUT", "PATCH"):
body_data = {}
custom_headers = _parse_headers(headers)
try:
if root or account:
async with get_cloud_client() as client:
if account:
route = f"{client.account_base_url}{path}"
else:
route = path
response = await client.raw_request(
http_method,
route,
json=body_data if body_data is not None else None,
headers=custom_headers if custom_headers else None,
)
response.raise_for_status()
else:
async with get_client() as client:
response = await client.request(
http_method,
path,
json=body_data if body_data is not None else None,
headers=custom_headers if custom_headers else None,
)
response.raise_for_status()
_format_output(response, verbose)
except httpx.HTTPStatusError as e:
if verbose:
_format_output(e.response, verbose)
else:
console_err.print(
f"[red]Error: {e.response.status_code} {e.response.reason_phrase}[/red]"
)
if e.response.text:
try:
error_data = e.response.json()
json_str = json.dumps(error_data, indent=2)
syntax = Syntax(json_str, "json", theme="monokai")
console_err.print(syntax)
except (json.JSONDecodeError, ValueError):
console_err.print(e.response.text)
if e.response.status_code == 401:
console_err.print(
"\n[yellow]Check your API key configuration with:[/yellow] prefect config view"
)
raise SystemExit(_get_exit_code(e))
except (httpx.ConnectError, httpx.TimeoutException, httpx.NetworkError) as e:
console_err.print(f"[red]Error: Network error - {e}[/red]")
console_err.print(f"\nCould not connect to API at: {configured_api_url}")
raise SystemExit(7)
except Exception as e:
console_err.print(f"[red]Error: {e}[/red]")
raise SystemExit(1)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/api.py",
"license": "Apache License 2.0",
"lines": 223,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:tests/cli/test_api_command.py | import json
from uuid import UUID
import httpx
import pytest
from respx import MockRouter
from prefect.settings import (
PREFECT_API_URL,
PREFECT_CLIENT_CSRF_SUPPORT_ENABLED,
PREFECT_CLOUD_API_URL,
PREFECT_SERVER_ALLOW_EPHEMERAL_MODE,
temporary_settings,
)
from prefect.testing.cli import invoke_and_assert
@pytest.fixture
def account_id() -> UUID:
"""Test account ID."""
return UUID("aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa")
@pytest.fixture
def workspace_id() -> UUID:
"""Test workspace ID."""
return UUID("bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb")
@pytest.fixture(autouse=True)
def test_settings():
"""Configure settings for testing."""
with temporary_settings(
{
PREFECT_SERVER_ALLOW_EPHEMERAL_MODE: False,
PREFECT_CLIENT_CSRF_SUPPORT_ENABLED: False,
}
):
yield
class TestBasicRequests:
"""Test basic request functionality."""
def test_get_request_oss(self, respx_mock: MockRouter) -> None:
"""Test GET request to OSS server."""
respx_mock.get("http://localhost:4200/api/flows").mock(
return_value=httpx.Response(200, json={"result": "success"})
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
invoke_and_assert(
["api", "GET", "/flows"],
expected_code=0,
)
def test_post_request_with_data(self, respx_mock: MockRouter) -> None:
"""Test POST request with inline JSON data."""
route = respx_mock.post("http://localhost:4200/api/flows/filter").mock(
return_value=httpx.Response(200, json={"result": "success"})
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
invoke_and_assert(
["api", "POST", "/flows/filter", "--data", '{"limit": 10}'],
expected_code=0,
)
assert route.called
assert json.loads(route.calls.last.request.content) == {"limit": 10}
def test_post_request_default_empty_body(self, respx_mock: MockRouter) -> None:
"""Test POST request defaults to empty object when no data provided."""
route = respx_mock.post("http://localhost:4200/api/flows/filter").mock(
return_value=httpx.Response(200, json=[])
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
invoke_and_assert(
["api", "POST", "/flows/filter"],
expected_code=0,
)
assert route.called
assert json.loads(route.calls.last.request.content) == {}
def test_delete_request(self, respx_mock: MockRouter) -> None:
"""Test DELETE request."""
route = respx_mock.delete("http://localhost:4200/api/flows/abc-123").mock(
return_value=httpx.Response(204)
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
invoke_and_assert(
["api", "DELETE", "/flows/abc-123"],
expected_code=0,
)
assert route.called
def test_patch_request_with_data(self, respx_mock: MockRouter) -> None:
"""Test PATCH request with data."""
route = respx_mock.patch("http://localhost:4200/api/deployments/abc-123").mock(
return_value=httpx.Response(200, json={"updated": True})
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
invoke_and_assert(
[
"api",
"PATCH",
"/deployments/abc-123",
"--data",
'{"is_schedule_active": false}',
],
expected_code=0,
)
assert route.called
assert json.loads(route.calls.last.request.content) == {
"is_schedule_active": False
}
def test_put_request(self, respx_mock: MockRouter) -> None:
"""Test PUT request."""
route = respx_mock.put("http://localhost:4200/api/flows/abc-123").mock(
return_value=httpx.Response(200, json={"updated": True})
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
invoke_and_assert(
["api", "PUT", "/flows/abc-123", "--data", '{"name": "updated"}'],
expected_code=0,
)
assert route.called
class TestCustomHeaders:
"""Test custom headers functionality."""
def test_custom_headers(self, respx_mock: MockRouter) -> None:
"""Test custom headers with -H flag."""
route = respx_mock.post("http://localhost:4200/api/flows/filter").mock(
return_value=httpx.Response(200, json={})
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
invoke_and_assert(
[
"api",
"POST",
"/flows/filter",
"-H",
"X-Custom: value",
"--data",
"{}",
],
expected_code=0,
)
assert route.called
assert route.calls.last.request.headers.get("X-Custom") == "value"
def test_multiple_custom_headers(self, respx_mock: MockRouter) -> None:
"""Test multiple custom headers."""
route = respx_mock.post("http://localhost:4200/api/flows/filter").mock(
return_value=httpx.Response(200, json={})
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
invoke_and_assert(
[
"api",
"POST",
"/flows/filter",
"-H",
"X-Custom-1: value1",
"-H",
"X-Custom-2: value2",
"--data",
"{}",
],
expected_code=0,
)
assert route.called
assert route.calls.last.request.headers.get("X-Custom-1") == "value1"
assert route.calls.last.request.headers.get("X-Custom-2") == "value2"
class TestPathHandling:
"""Test smart path handling for Cloud vs OSS."""
def test_oss_uses_api_prefix(self, respx_mock: MockRouter) -> None:
"""Test OSS automatically uses /api prefix from configured URL."""
route = respx_mock.get("http://localhost:4200/api/flows").mock(
return_value=httpx.Response(200, json=[])
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
invoke_and_assert(
["api", "GET", "/flows"],
expected_code=0,
)
assert route.called
def test_cloud_workspace_requests(
self, respx_mock: MockRouter, account_id: UUID, workspace_id: UUID
) -> None:
"""Test Cloud workspace requests use workspace URL."""
route = respx_mock.get(
f"https://api.prefect.cloud/api/accounts/{account_id}/workspaces/{workspace_id}/flows"
).mock(return_value=httpx.Response(200, json=[]))
with temporary_settings(
{
PREFECT_API_URL: f"https://api.prefect.cloud/api/accounts/{account_id}/workspaces/{workspace_id}",
PREFECT_CLOUD_API_URL: "https://api.prefect.cloud/api",
}
):
invoke_and_assert(
["api", "GET", "/flows"],
expected_code=0,
)
assert route.called
def test_root_flag_uses_cloud_api_root(
self, respx_mock: MockRouter, account_id: UUID, workspace_id: UUID
) -> None:
"""Test --root flag uses CloudClient for API root."""
route = respx_mock.get("https://api.prefect.cloud/api/me").mock(
return_value=httpx.Response(200, json={"user": "test"})
)
with temporary_settings(
{
PREFECT_API_URL: f"https://api.prefect.cloud/api/accounts/{account_id}/workspaces/{workspace_id}",
PREFECT_CLOUD_API_URL: "https://api.prefect.cloud/api",
}
):
invoke_and_assert(
["api", "GET", "/me", "--root"],
expected_code=0,
)
assert route.called
def test_root_flag_requires_cloud(self, respx_mock: MockRouter) -> None:
"""Test --root flag requires Cloud."""
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
result = invoke_and_assert(
["api", "GET", "/me", "--root"],
expected_code=1,
)
assert (
"--root and --account flags are only valid for Prefect Cloud"
in result.output
)
def test_account_flag_uses_cloud_client(
self, respx_mock: MockRouter, account_id: UUID, workspace_id: UUID
) -> None:
"""Test --account flag uses CloudClient with account path."""
route = respx_mock.get(
f"https://api.prefect.cloud/api/accounts/{account_id}/workspaces"
).mock(return_value=httpx.Response(200, json=[]))
with temporary_settings(
{
PREFECT_API_URL: f"https://api.prefect.cloud/api/accounts/{account_id}/workspaces/{workspace_id}",
PREFECT_CLOUD_API_URL: "https://api.prefect.cloud/api",
}
):
invoke_and_assert(
["api", "GET", "/workspaces", "--account"],
expected_code=0,
)
assert route.called
def test_query_parameters(self, respx_mock: MockRouter) -> None:
"""Test query parameters in path."""
route = respx_mock.get(
"http://localhost:4200/api/flows?limit=10&offset=20"
).mock(return_value=httpx.Response(200, json=[]))
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
invoke_and_assert(
["api", "GET", "/flows?limit=10&offset=20"],
expected_code=0,
)
assert route.called
class TestInputSources:
"""Test different input sources for request body."""
def test_data_from_file(self, respx_mock: MockRouter, tmp_path) -> None:
"""Test reading data from file with @filename syntax."""
route = respx_mock.post("http://localhost:4200/api/flows/filter").mock(
return_value=httpx.Response(200, json={})
)
data_file = tmp_path / "data.json"
data_file.write_text('{"test": "value"}')
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
invoke_and_assert(
["api", "POST", "/flows/filter", "--data", f"@{data_file}"],
expected_code=0,
)
assert route.called
assert json.loads(route.calls.last.request.content) == {"test": "value"}
class TestErrorHandling:
"""Test error handling and exit codes."""
def test_404_error_exit_code(self, respx_mock: MockRouter) -> None:
"""Test 404 errors exit with code 4."""
respx_mock.get("http://localhost:4200/api/flows/invalid-id").mock(
return_value=httpx.Response(404, json={"detail": "Flow not found"})
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
result = invoke_and_assert(
["api", "GET", "/flows/invalid-id"],
expected_code=4,
)
assert "404" in result.output
def test_401_error_exit_code(self, respx_mock: MockRouter) -> None:
"""Test 401 errors exit with code 3."""
respx_mock.get("http://localhost:4200/api/flows").mock(
return_value=httpx.Response(401, json={"detail": "Unauthorized"})
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
result = invoke_and_assert(
["api", "GET", "/flows"],
expected_code=3,
)
assert "401" in result.output
def test_500_error_exit_code(self, respx_mock: MockRouter) -> None:
"""Test 500 errors exit with code 5."""
respx_mock.get("http://localhost:4200/api/flows").mock(
return_value=httpx.Response(500, json={"detail": "Internal server error"})
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
result = invoke_and_assert(
["api", "GET", "/flows"],
expected_code=5,
)
assert "500" in result.output
def test_422_validation_error(self, respx_mock: MockRouter) -> None:
"""Test 422 validation errors show friendly message."""
respx_mock.post("http://localhost:4200/api/flows").mock(
return_value=httpx.Response(
422,
json={
"detail": [
{
"loc": ["body", "name"],
"msg": "field required",
"type": "value_error.missing",
}
]
},
)
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
result = invoke_and_assert(
["api", "POST", "/flows"],
expected_code=4,
)
assert "422" in result.output
class TestOutputFormatting:
"""Test output formatting and verbosity."""
def test_json_output(self, respx_mock: MockRouter) -> None:
"""Test JSON is output correctly."""
respx_mock.get("http://localhost:4200/api/flows/123").mock(
return_value=httpx.Response(200, json={"id": "123", "name": "test"})
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
result = invoke_and_assert(
["api", "GET", "/flows/123"],
expected_code=0,
)
assert "123" in result.output
assert "test" in result.output
def test_verbose_shows_request_info(self, respx_mock: MockRouter) -> None:
"""Test --verbose shows request details."""
respx_mock.get("http://localhost:4200/api/flows").mock(
return_value=httpx.Response(
200, json={}, headers={"content-type": "application/json"}
)
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
result = invoke_and_assert(
["api", "GET", "/flows", "--verbose"],
expected_code=0,
)
assert "GET" in result.output
assert "200" in result.output
def test_verbose_on_error(self, respx_mock: MockRouter) -> None:
"""Test --verbose shows details on errors."""
respx_mock.get("http://localhost:4200/api/flows/bad").mock(
return_value=httpx.Response(404, json={"detail": "Not found"})
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
result = invoke_and_assert(
["api", "GET", "/flows/bad", "--verbose"],
expected_code=4,
)
assert "404" in result.output
class TestEdgeCases:
"""Test edge cases and special scenarios."""
def test_invalid_http_method(self, respx_mock: MockRouter) -> None:
"""Test invalid HTTP method shows helpful error."""
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
result = invoke_and_assert(
["api", "INVALID", "/flows"],
expected_code=1,
)
assert "Invalid HTTP method" in result.output
def test_empty_response_body(self, respx_mock: MockRouter) -> None:
"""Test handling of empty response bodies."""
respx_mock.delete("http://localhost:4200/api/flows/123").mock(
return_value=httpx.Response(204)
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
invoke_and_assert(
["api", "DELETE", "/flows/123"],
expected_code=0,
)
def test_non_json_response(self, respx_mock: MockRouter) -> None:
"""Test handling of non-JSON responses."""
respx_mock.get("http://localhost:4200/api/some-text-endpoint").mock(
return_value=httpx.Response(
200, text="Plain text response", headers={"content-type": "text/plain"}
)
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
result = invoke_and_assert(
["api", "GET", "/some-text-endpoint"],
expected_code=0,
)
assert "Plain text response" in result.output
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/cli/test_api_command.py",
"license": "Apache License 2.0",
"lines": 388,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/cli/deploy/test_schedule_config.py | """Tests for schedule configuration in deployment YAML."""
import pytest
from prefect.cli.deploy._models import RawScheduleConfig
from prefect.cli.deploy._schedules import _schedule_config_to_deployment_schedule
from prefect.client.schemas.schedules import CronSchedule, IntervalSchedule
def test_cron_schedule_with_day_or_and_active():
"""Test that cron schedule with both day_or and active fields validates.
Regression test for issue #19117 where using day_or and active together
would fail with "Extra inputs are not permitted" error.
"""
# Test RawScheduleConfig model accepts both fields
schedule = RawScheduleConfig(
cron="0 3 * * *",
timezone="UTC",
active=False,
day_or=True,
)
assert schedule.cron == "0 3 * * *"
assert schedule.active is False
assert schedule.day_or is True
# Test that conversion to CronSchedule passes day_or through
schedule_config = {
"cron": "0 3 * * *",
"timezone": "UTC",
"active": False,
"day_or": True,
}
result = _schedule_config_to_deployment_schedule(schedule_config)
assert result["active"] is False
schedule = result["schedule"]
assert isinstance(schedule, CronSchedule)
assert schedule.day_or is True
class TestIntervalScheduleFormats:
"""Test various interval formats accepted in prefect.yaml."""
@pytest.mark.parametrize(
"interval_value,expected_seconds",
[
(600, 600), # integer seconds
(600.5, 600.5), # float seconds
("PT10M", 600), # ISO 8601: 10 minutes
("PT1H30M", 5400), # ISO 8601: 1 hour 30 minutes
("PT1H", 3600), # ISO 8601: 1 hour
("P1D", 86400), # ISO 8601: 1 day
("1:30:00", 5400), # time format: HH:MM:SS
("00:10:00", 600), # time format: 10 minutes
],
)
def test_interval_schedule_accepts_various_formats(
self, interval_value, expected_seconds
):
"""Test that interval schedules accept integers, floats, ISO 8601 durations,
and time format strings in prefect.yaml.
The underlying IntervalSchedule pydantic model accepts these formats via
pydantic's timedelta parsing, so the CLI YAML model should too.
"""
# Test RawScheduleConfig model accepts the format
schedule = RawScheduleConfig(
interval=interval_value,
timezone="UTC",
active=False,
)
assert schedule.interval is not None
# Test conversion to IntervalSchedule works
schedule_config = {
"interval": interval_value,
"timezone": "UTC",
"active": False,
}
result = _schedule_config_to_deployment_schedule(schedule_config)
assert result["active"] is False
schedule = result["schedule"]
assert isinstance(schedule, IntervalSchedule)
assert schedule.interval.total_seconds() == expected_seconds
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/cli/deploy/test_schedule_config.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:examples/atproto_dashboard_with_prefect_assets.py | # ---
# title: Social Analytics Dashboard
# description: Build a social media analytics dashboard using Prefect Assets, ATProto/Bluesky APIs, dbt transformations, and Streamlit visualization.
# icon: chart-bar
# dependencies: ["prefect", "prefect-aws"]
# cmd: ["python", "atproto_dashboard_with_prefect_assets.py"]
# tags: [assets, social-media, analytics, dbt, streamlit, atproto, bluesky]
# github_url: https://github.com/zzstoatzz/atproto-dashboard
# draft: false
# order: 4
# ---
#
# **Build data pipelines with Prefect Assets – declarative, dependency-aware, and observable.**
#
# This example demonstrates how to use **Prefect Assets** to build a social media analytics pipeline.
# The full implementation with [ATProto](https://atproto.com) integration, [dbt](https://www.getdbt.com) transformations, and a [Streamlit](https://streamlit.io) dashboard
# dashboard is available at: https://github.com/zzstoatzz/atproto-dashboard
#
# ## Key Prefect Features
#
# * **`@materialize` decorator** – Transform functions into versioned, cacheable data assets
# * **Automatic dependency tracking** – Prefect infers dependencies from function parameters
# * **S3-backed assets** – Store assets directly in S3 with built-in versioning
# * **Artifact creation** – Generate rich UI artifacts for observability
# * **Flow orchestration** – Coordinate asset materialization with retries and scheduling
#
# ## The Pattern: Asset-Based Data Pipelines
#
# Instead of manually managing data dependencies and storage:
# 1. Define assets with `@materialize` and unique keys (e.g., S3 paths)
# 2. Declare dependencies via function parameters or `asset_deps`
# 3. Let Prefect handle execution order, caching, and storage
# 4. Get automatic lineage tracking and observability
#
# ## Running This Example
#
# This simplified example demonstrates the core patterns. For the complete implementation:
# ```bash
# git clone https://github.com/zzstoatzz/atproto-dashboard
# cd atproto-dashboard
# # Follow README for setup and configuration
# ```
#
# ## Core Pattern: Define Assets and Dependencies
#
# Assets represent data products in your pipeline. Each asset has:
# - A unique key (often an S3 path or other storage location)
# - A materialization function decorated with `@materialize`
# - Dependencies (automatically tracked via function parameters)
#
# Define assets with descriptive keys
import json
from datetime import datetime, timezone
from pathlib import Path
from textwrap import dedent
from typing import Any
from prefect import flow
from prefect.artifacts import create_markdown_artifact
from prefect.assets import Asset, materialize
raw_data_asset = Asset(key="pipeline://raw_data")
processed_data_asset = Asset(key="pipeline://processed_data")
analytics_asset = Asset(key="pipeline://analytics")
# ### Step 1: Fetch Raw Data
#
# The first asset fetches data from an external source. In the [full implementation](https://github.com/zzstoatzz/atproto-dashboard),
# this connects to the ATProto/Bluesky API to fetch social media data.
@materialize(raw_data_asset)
def fetch_raw_data() -> dict[str, Any]:
"""Fetch raw data from an external source."""
print("Fetching raw data...")
data = {
"items": ["item1", "item2", "item3"],
"fetched_at": datetime.now(timezone.utc).isoformat(),
"count": 3,
}
print(f"✓ Fetched {data['count']} items")
return data
# ### Step 2: Process the Data
#
# This asset demonstrates **automatic dependency tracking**. By accepting `raw_data` as a parameter,
# Prefect knows this asset depends on `raw_data_asset` and ensures it's materialized first.
#
# In production, this would store data to S3 with partitioning. Here we use local storage for simplicity.
@materialize(processed_data_asset)
def process_data(raw_data: dict[str, Any]) -> dict[str, Any]:
"""Process raw data into a structured format with automatic dependency tracking."""
print(f"Processing {raw_data['count']} items...")
processed = {
"items": [item.upper() for item in raw_data["items"]],
"processed_at": datetime.now(timezone.utc).isoformat(),
"source_count": raw_data["count"],
}
storage_dir = Path("./data")
storage_dir.mkdir(exist_ok=True)
with open(storage_dir / "processed.json", "w") as f:
json.dump(processed, f, indent=2)
print(f"✓ Processed and stored {len(processed['items'])} items")
return processed
# ### Step 3: Create Analytics
#
# This asset demonstrates **chained dependencies** (it depends on `processed_data`, which depends on `raw_data`)
# and **artifact creation** for rich observability in the Prefect UI.
#
# In the full implementation, this runs dbt transformations to create analytics models.
@materialize(analytics_asset)
def create_analytics(processed_data: dict[str, Any]) -> dict[str, Any]:
"""Generate analytics with chained dependencies and create UI artifacts."""
print("Creating analytics...")
analytics = {
"total_items": len(processed_data["items"]),
"source_timestamp": processed_data["processed_at"],
"created_at": datetime.now(timezone.utc).isoformat(),
}
create_markdown_artifact(
key="analytics-summary",
markdown=dedent(
f"""
# Analytics Summary
- **Total Items**: {analytics["total_items"]}
- **Created**: {analytics["created_at"]}
- **Source**: {analytics["source_timestamp"]}
This artifact appears in the Prefect UI for observability.
"""
),
description="Analytics summary for this pipeline run",
)
print(f"✓ Analytics created for {analytics['total_items']} items")
return analytics
# ## Flow: Orchestrate Asset Materialization
#
# The flow calls each asset function, and Prefect handles:
# - Dependency resolution (ensuring correct execution order)
# - Automatic caching (skip re-computation if upstream assets haven't changed)
# - Observability (tracking lineage and execution in the UI)
@flow(name="asset-pipeline-demo", log_prints=True)
def run_asset_pipeline() -> dict[str, Any]:
"""
Orchestrate the asset pipeline.
By calling the materialization functions in sequence and passing results,
Prefect automatically:
- Tracks dependencies between assets
- Ensures execution order
- Provides observability in the UI
- Enables caching and versioning
"""
print("🚀 Starting asset pipeline")
# Materialize assets - Prefect tracks dependencies automatically
raw = fetch_raw_data()
processed = process_data(raw)
analytics = create_analytics(processed)
print(f"✅ Pipeline complete! Processed {analytics['total_items']} items")
return analytics
if __name__ == "__main__":
run_asset_pipeline()
# ## What Makes Assets Powerful?
#
# 1. **Automatic Dependency Tracking**
# - Prefect infers dependencies from function parameters
# - Ensures correct execution order without manual DAG definition
# - Tracks asset lineage for observability
#
# 2. **Caching and Versioning**
# - Assets are versioned based on their inputs
# - Skip re-computation when upstream data hasn't changed
# - Efficient incremental processing
#
# 3. **Storage Integration**
# - Asset keys can be S3 paths, database URIs, or any identifier
# - Built-in support for `prefect-aws`, `prefect-gcp`, etc.
# - Automatic data persistence and retrieval
#
# 4. **Observability**
# - Every materialization tracked in the Prefect UI
# - Artifacts provide rich context (tables, markdown, links)
# - Full lineage and execution history
#
# 5. **Production Ready**
# - Built-in retry logic and error handling
# - Scheduling and automation via Prefect deployments
# - Scales from local development to cloud production
#
# ## Full Implementation
#
# This example demonstrates the core patterns. The complete implementation includes:
# - Real ATProto API integration
# - S3-backed asset storage with partitioning
# - dbt transformations with DuckDB
# - Streamlit dashboard for visualization
# - Production-ready error handling and logging
#
# See the full project at: https://github.com/zzstoatzz/atproto-dashboard
#
# ## Learn More
#
# - [Prefect Assets Documentation](https://docs.prefect.io/v3/concepts/assets)
# - [prefect-aws Integration](https://docs.prefect.io/integrations/prefect-aws)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "examples/atproto_dashboard_with_prefect_assets.py",
"license": "Apache License 2.0",
"lines": 196,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/types/_schema.py | """Types for OpenAPI schemas."""
from typing import Annotated, Any
from pydantic import BeforeValidator
def _normalize_parameter_schema(value: Any) -> dict[str, Any]:
"""
Normalize empty or None parameter schemas to valid OpenAPI format.
Ensures parameter_openapi_schema is always a valid OpenAPI object schema,
converting None or {} to {"type": "object", "properties": {}}.
"""
if value is None or value == {}:
return {"type": "object", "properties": {}}
return value
ParameterSchema = Annotated[
dict[str, Any],
BeforeValidator(_normalize_parameter_schema),
]
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/types/_schema.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:tests/cli/deploy/test_format_validation_error.py | """Tests for the _format_validation_error function."""
import uuid
import pytest
from pydantic import ValidationError
from prefect.cli.deploy._config import _format_validation_error
from prefect.cli.deploy._models import PrefectYamlModel
def test_format_validation_error_with_invalid_schedule():
"""Test error formatting when schedule field is invalid."""
deployment_name = f"my-deployment-{uuid.uuid4()}"
raw_data = {
"deployments": [
{
"name": deployment_name,
"entrypoint": "flow.py:my_flow",
"schedule": "not a dict", # Should be a dict
}
]
}
with pytest.raises(ValidationError) as exc_info:
PrefectYamlModel.model_validate(raw_data)
result = _format_validation_error(exc_info.value, raw_data)
assert "Invalid fields in deployments:" in result
assert f"{deployment_name}: schedule" in result
assert "https://docs.prefect.io/v3/concepts/deployments#deployment-schema" in result
def test_format_validation_error_with_multiple_invalid_fields():
"""Test error formatting with multiple invalid fields in one deployment."""
deployment_name = f"test-deploy-{uuid.uuid4()}"
raw_data = {
"deployments": [
{
"name": deployment_name,
"entrypoint": 123, # Should be string
"schedule": False, # Should be dict
"tags": {}, # Should be list or string
}
]
}
with pytest.raises(ValidationError) as exc_info:
PrefectYamlModel.model_validate(raw_data)
result = _format_validation_error(exc_info.value, raw_data)
assert "Invalid fields in deployments:" in result
assert f"{deployment_name}:" in result
# Fields should be sorted alphabetically
assert "entrypoint, schedule, tags" in result or (
"entrypoint" in result and "schedule" in result and "tags" in result
)
def test_format_validation_error_with_multiple_deployments():
"""Test error formatting with errors in multiple deployments."""
first_deployment_name = f"first-deployment-{uuid.uuid4()}"
second_deployment_name = f"second-deployment-{uuid.uuid4()}"
raw_data = {
"deployments": [
{
"name": first_deployment_name,
"entrypoint": "flow.py:my_flow",
"schedule": [], # Invalid
},
{
"name": second_deployment_name,
"entrypoint": "flow.py:my_flow",
"concurrency_limit": "not a number", # Invalid
},
]
}
with pytest.raises(ValidationError) as exc_info:
PrefectYamlModel.model_validate(raw_data)
result = _format_validation_error(exc_info.value, raw_data)
assert "Invalid fields in deployments:" in result
assert f"{first_deployment_name}: schedule" in result
assert f"{second_deployment_name}: concurrency_limit" in result
def test_format_validation_error_with_unnamed_deployment():
"""Test error formatting when deployment has no name."""
raw_data = {
"deployments": [
{
"entrypoint": "flow.py:my_flow",
"tags": 123, # Invalid
}
]
}
with pytest.raises(ValidationError) as exc_info:
PrefectYamlModel.model_validate(raw_data)
result = _format_validation_error(exc_info.value, raw_data)
assert "Invalid fields in deployments:" in result
assert "#0: tags" in result # Should use index when no name
def test_format_validation_error_with_invalid_tags_type():
"""Test error formatting for invalid tags type."""
deployment_name = f"my-deployment-{uuid.uuid4()}"
raw_data = {
"deployments": [
{
"name": deployment_name,
"entrypoint": "flow.py:my_flow",
"tags": 123, # Should be list or string
}
]
}
with pytest.raises(ValidationError) as exc_info:
PrefectYamlModel.model_validate(raw_data)
result = _format_validation_error(exc_info.value, raw_data)
assert "Invalid fields in deployments:" in result
assert f"{deployment_name}: tags" in result
def test_format_validation_error_with_invalid_dict_keys():
"""Test error formatting when dict has wrong keys."""
deployment_name = f"bad-schedule-{uuid.uuid4()}"
raw_data = {
"deployments": [
{
"name": deployment_name,
"entrypoint": "flow.py:my_flow",
"schedule": {"foo": "0 4 * * *"}, # Invalid key
}
]
}
with pytest.raises(ValidationError) as exc_info:
PrefectYamlModel.model_validate(raw_data)
result = _format_validation_error(exc_info.value, raw_data)
assert "Invalid fields in deployments:" in result
# Should still identify schedule as the problem field
assert f"{deployment_name}:" in result
assert "schedule" in result or "foo" in result
def test_format_validation_error_empty_raw_data():
"""Test error formatting with minimal data."""
raw_data = {}
# This should validate fine (empty is valid)
model = PrefectYamlModel.model_validate(raw_data)
assert model.deployments == []
def test_format_validation_error_no_deployment_errors():
"""Test when there are no deployment-specific errors."""
raw_data = {"invalid_top_level_field": "value", "deployments": []}
# Extra fields are ignored, so this should validate fine
model = PrefectYamlModel.model_validate(raw_data)
assert model.deployments == []
def test_format_validation_error_with_top_level_field_error():
"""Test error formatting for top-level field validation errors (issue #19467)."""
deployment_name = f"my-deployment-{uuid.uuid4()}"
raw_data = {
"prefect-version": 3, # Should be a string, not an int
"deployments": [
{
"name": deployment_name,
"entrypoint": "flow.py:my_flow",
}
],
}
with pytest.raises(ValidationError) as exc_info:
PrefectYamlModel.model_validate(raw_data)
result = _format_validation_error(exc_info.value, raw_data)
assert "Invalid top-level fields in config file:" in result
assert "prefect-version: Input should be a valid string" in result
assert "https://docs.prefect.io/v3/how-to-guides/deployments/prefect-yaml" in result
# Should not include deployment error section
assert "Invalid fields in deployments:" not in result
def test_format_validation_error_with_both_top_level_and_deployment_errors():
"""Test error formatting when both top-level and deployment errors exist."""
deployment_name = f"my-deployment-{uuid.uuid4()}"
raw_data = {
"name": 123, # Should be a string
"deployments": [
{
"name": deployment_name,
"entrypoint": "flow.py:my_flow",
"tags": {}, # Should be list or string
}
],
}
with pytest.raises(ValidationError) as exc_info:
PrefectYamlModel.model_validate(raw_data)
result = _format_validation_error(exc_info.value, raw_data)
# Should have both sections
assert "Invalid top-level fields in config file:" in result
assert "name: Input should be a valid string" in result
assert "Invalid fields in deployments:" in result
assert f"{deployment_name}: tags" in result
# Should have both links
assert "https://docs.prefect.io/v3/how-to-guides/deployments/prefect-yaml" in result
assert "https://docs.prefect.io/v3/concepts/deployments#deployment-schema" in result
def test_trigger_with_templated_enabled_field_should_not_raise():
"""
Regression test for issue #19501: triggers with templated 'enabled' field
should be accepted during YAML loading, allowing template resolution later.
The 'enabled' field may contain a Jinja template like
'{{ prefect.variables.deployment_trigger_is_active }}' which will be
resolved to a boolean after variable resolution. The YAML validation
should accept this string and defer strict type validation until after
templating occurs.
"""
deployment_name = f"my-deployment-{uuid.uuid4()}"
raw_data = {
"deployments": [
{
"name": deployment_name,
"entrypoint": "flow.py:my_flow",
"triggers": [
{
"type": "event",
"enabled": "{{ prefect.variables.deployment_trigger_is_active }}",
"match": {"prefect.resource.id": "hello.world"},
"expect": ["external.resource.pinged"],
}
],
}
]
}
# This should NOT raise a validation error - the trigger should be accepted
# as a raw dict and validated later after template resolution
model = PrefectYamlModel.model_validate(raw_data)
assert len(model.deployments) == 1
assert model.deployments[0].triggers is not None
assert len(model.deployments[0].triggers) == 1
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/cli/deploy/test_format_validation_error.py",
"license": "Apache License 2.0",
"lines": 210,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/events/test_events_text_search.py | """Tests for text search functionality across events storage backends"""
from datetime import datetime, timedelta, timezone
from typing import Awaitable, Callable, Optional, Union
from uuid import uuid4
import pytest
from sqlalchemy.ext.asyncio import AsyncSession
from prefect.events.schemas.events import Event, Resource
from prefect.server.events.filters import (
EventFilter,
EventOccurredFilter,
EventTextFilter,
)
from prefect.server.events.schemas.events import ReceivedEvent
from prefect.server.events.storage.database import query_events, write_events
# Define function types for our test variations
QueryEventsFn = Callable[..., Awaitable[tuple[list[Event], int, Optional[str]]]]
# In-memory event search that intentionally matches query_events() signature
async def query_events_memory(
session: list[Event], # For memory tests, this will be the list of events
filter: EventFilter,
page_size: int = 50,
) -> tuple[list[Event], int, Optional[str]]:
"""In-memory event filtering using EventFilter.includes() method.
This function intentionally shares the signature of the database counterpart
(query_events) and is used for filtering on WebSockets and other testing use cases
where we need to filter events in memory rather than in the database.
"""
# Alias for clarity - session is actually a list of events for memory tests
events_list = session
# Filter events using the EventFilter.includes() method
filtered_events = []
for event in events_list:
# Check occurred filter
if not filter.occurred.includes(event):
continue
# Check text filter
if filter.text and not filter.text.includes(event):
continue
# Check other filters as needed
if filter.event and not filter.event.includes(event):
continue
if filter.resource and not filter.resource.includes(event):
continue
if filter.id and not filter.id.includes(event):
continue
filtered_events.append(event)
# Apply sorting based on order
if filter.order.value == "ASC":
filtered_events.sort(key=lambda event: event.occurred)
else: # DESC
filtered_events.sort(key=lambda event: event.occurred, reverse=True)
# Apply pagination - limit to page_size
paginated_events = filtered_events[:page_size]
total_count = len(filtered_events)
# For simplicity, no pagination token in memory tests
return paginated_events, total_count, None
VARIATIONS: list[tuple[str, QueryEventsFn]] = [
("memory", query_events_memory),
("database", query_events),
]
def pytest_generate_tests(metafunc: pytest.Metafunc):
fixtures = set(metafunc.fixturenames)
# If the test itself includes a marker saying that it is for only one variation,
# then honor that marker and filter the test generation down to just that
variation_names = {v[0] for v in VARIATIONS}
marked_variations = {
mark.name
for mark in metafunc.definition.own_markers
if mark.name in variation_names
}
if marked_variations:
variation_names = variation_names.intersection(marked_variations)
def marks(variation: str) -> list[pytest.MarkDecorator]:
return []
if fixtures.issuperset({"events_query_session", "query_events"}):
metafunc.parametrize(
"events_query_session, query_events",
[
pytest.param(
*values[:2],
id=values[0],
marks=marks(values[0]),
)
for values in VARIATIONS
if values[0] in variation_names
],
indirect=["events_query_session"],
)
@pytest.fixture
def test_events() -> list[Event]:
"""Create test events with various text content for searching"""
test_data = [
# Basic error events
{
"event": "prefect.flow-run.Failed",
"resource": {
"prefect.resource.id": "prefect.flow-run.abc123",
"prefect.resource.name": "error-flow",
},
"payload": {"error": "connection timeout"},
},
{
"event": "prefect.task-run.Failed",
"resource": {
"prefect.resource.id": "prefect.task-run.def456",
"prefect.resource.name": "debug-task",
},
"payload": {"message": "failed to process data"},
},
# Success events
{
"event": "prefect.flow-run.Completed",
"resource": {
"prefect.resource.id": "prefect.flow-run.xyz789",
"prefect.resource.name": "success-flow",
},
"payload": {"result": "processed successfully"},
},
# Events with quoted phrases in names/payload
{
"event": "prefect.deployment.triggered",
"resource": {
"prefect.resource.id": "prefect.deployment.staging-env",
"prefect.resource.name": "connection timeout handler",
},
"payload": {"message": "Unable to connect to database"},
},
# International characters - Japanese
{
"event": "prefect.flow-run.エラー",
"resource": {
"prefect.resource.id": "prefect.flow-run.japanese123",
"prefect.resource.name": "フローテスト",
},
"payload": {"message": "データベース接続エラー", "environment": "本番"},
},
# International characters - Chinese
{
"event": "prefect.task-run.失败",
"resource": {
"prefect.resource.id": "prefect.task-run.chinese456",
"prefect.resource.name": "数据处理任务",
},
"payload": {"error": "连接超时", "level": "错误"},
},
# Test environment events
{
"event": "prefect.flow-run.Running",
"resource": {
"prefect.resource.id": "prefect.flow-run.test123",
"prefect.resource.name": "test-flow",
},
"payload": {"environment": "test", "debug": True},
},
# Production events
{
"event": "prefect.flow-run.Scheduled",
"resource": {
"prefect.resource.id": "prefect.flow-run.prod456",
"prefect.resource.name": "prod-flow",
},
"payload": {"environment": "production", "warning": "high memory usage"},
},
]
events: list[Event] = []
base_time = datetime.now(timezone.utc)
for i, data in enumerate(test_data):
occurred = base_time - timedelta(hours=i)
events.append(
Event(
occurred=occurred,
event=data["event"],
resource=Resource(root=data["resource"]),
related=[],
payload=data["payload"],
id=uuid4(),
)
)
return events
@pytest.fixture
async def events_query_session(
request: pytest.FixtureRequest,
test_events: list[Event],
session: AsyncSession,
):
"""Opens an appropriate session for the given backend, seeds it with the
test events, and returns it for use in tests"""
backend: str = request.param
if backend == "memory":
yield test_events
elif backend == "database":
# Convert Events to ReceivedEvents and write to database
received_events = []
for event in test_events:
received_events.append(
ReceivedEvent(
occurred=event.occurred,
event=event.event,
resource=event.resource.root,
related=[r.root for r in event.related],
payload=event.payload,
id=event.id,
received=event.occurred,
)
)
# Write events to database using OSS write_events
await write_events(session=session, events=received_events)
await session.commit()
yield session
else:
raise NotImplementedError(f"Unknown backend: {backend}")
@pytest.fixture
def full_occurred_range(test_events: list[Event]) -> EventOccurredFilter:
"""Create an occurred filter that includes all test events"""
return EventOccurredFilter(
since=min(e.occurred for e in test_events),
until=max(e.occurred for e in test_events),
)
# Test cases for basic text search functionality
async def test_single_term_search(
events_query_session: Union[list[Event], AsyncSession],
query_events: QueryEventsFn,
full_occurred_range: EventOccurredFilter,
):
"""Test searching for a single term that appears in various fields"""
events, count, _ = await query_events(
session=events_query_session,
filter=EventFilter(
occurred=full_occurred_range, text=EventTextFilter(query="error")
),
)
# Should find events with "error" in resource name or payload
assert len(events) >= 1
for event in events:
# Should contain "error" somewhere in the searchable text
text_filter = EventTextFilter(query="error")
searchable_text = text_filter._build_searchable_text(event).lower()
assert "error" in searchable_text
async def test_multiple_terms_or_logic(
events_query_session: list[Event],
query_events: QueryEventsFn,
full_occurred_range: EventOccurredFilter,
):
"""Test space-separated terms should use OR logic"""
events, count, _ = await query_events(
session=events_query_session,
filter=EventFilter(
occurred=full_occurred_range, text=EventTextFilter(query="error success")
),
)
# Should find events with either "error" OR "success"
assert len(events) >= 2
for event in events:
text_filter = EventTextFilter(query="error success")
searchable_text = text_filter._build_searchable_text(event).lower()
assert "error" in searchable_text or "success" in searchable_text
async def test_negative_terms_with_minus(
events_query_session: list[Event],
query_events: QueryEventsFn,
full_occurred_range: EventOccurredFilter,
):
"""Test excluding terms with minus prefix"""
events, count, _ = await query_events(
session=events_query_session,
filter=EventFilter(
occurred=full_occurred_range, text=EventTextFilter(query="flow -test")
),
)
# Should find events with "flow" but NOT "test"
assert len(events) >= 1
for event in events:
text_filter = EventTextFilter(query="flow -test")
searchable_text = text_filter._build_searchable_text(event).lower()
assert "flow" in searchable_text
assert "test" not in searchable_text
async def test_negative_terms_with_exclamation(
events_query_session: list[Event],
query_events: QueryEventsFn,
full_occurred_range: EventOccurredFilter,
):
"""Test excluding terms with exclamation prefix"""
events, count, _ = await query_events(
session=events_query_session,
filter=EventFilter(
occurred=full_occurred_range, text=EventTextFilter(query="flow !debug")
),
)
# Should find events with "flow" but NOT "debug"
assert len(events) >= 1
for event in events:
text_filter = EventTextFilter(query="flow !debug")
searchable_text = text_filter._build_searchable_text(event).lower()
assert "flow" in searchable_text
assert "debug" not in searchable_text
async def test_quoted_phrase_search(
events_query_session: list[Event],
query_events: QueryEventsFn,
full_occurred_range: EventOccurredFilter,
):
"""Test searching for exact phrases with quotes"""
events, count, _ = await query_events(
session=events_query_session,
filter=EventFilter(
occurred=full_occurred_range,
text=EventTextFilter(query='"connection timeout"'),
),
)
# Should find events with exact phrase "connection timeout"
assert len(events) >= 1
for event in events:
text_filter = EventTextFilter(query='"connection timeout"')
searchable_text = text_filter._build_searchable_text(event).lower()
assert "connection timeout" in searchable_text
async def test_quoted_phrase_exclusion(
events_query_session: list[Event],
query_events: QueryEventsFn,
full_occurred_range: EventOccurredFilter,
):
"""Test excluding exact phrases with quotes and minus"""
events, count, _ = await query_events(
session=events_query_session,
filter=EventFilter(
occurred=full_occurred_range,
text=EventTextFilter(query='flow -"connection timeout"'),
),
)
# Should find events with "flow" but NOT the exact phrase "connection timeout"
for event in events:
text_filter = EventTextFilter(query='flow -"connection timeout"')
searchable_text = text_filter._build_searchable_text(event).lower()
assert "flow" in searchable_text
assert "connection timeout" not in searchable_text
async def test_complex_combined_search(
events_query_session: list[Event],
query_events: QueryEventsFn,
full_occurred_range: EventOccurredFilter,
):
"""Test complex query combining multiple features"""
events, count, _ = await query_events(
session=events_query_session,
filter=EventFilter(
occurred=full_occurred_range,
text=EventTextFilter(query='flow error -debug -"connection timeout"'),
),
)
# Should find events with "flow" OR "error" but NOT "debug" or "connection timeout"
for event in events:
text_filter = EventTextFilter(query='flow error -debug -"connection timeout"')
searchable_text = text_filter._build_searchable_text(event).lower()
assert "flow" in searchable_text or "error" in searchable_text
assert "debug" not in searchable_text
assert "connection timeout" not in searchable_text
async def test_case_insensitive_search(
events_query_session: list[Event],
query_events: QueryEventsFn,
full_occurred_range: EventOccurredFilter,
):
"""Test that searches are case insensitive"""
events, count, _ = await query_events(
session=events_query_session,
filter=EventFilter(
occurred=full_occurred_range, text=EventTextFilter(query="FAILED")
),
)
# Should find events with "failed" (lowercase in event name)
assert len(events) >= 1
for event in events:
text_filter = EventTextFilter(query="FAILED")
searchable_text = text_filter._build_searchable_text(event).lower()
assert "failed" in searchable_text
async def test_empty_query_returns_all(
events_query_session: list[Event],
query_events: QueryEventsFn,
full_occurred_range: EventOccurredFilter,
test_events: list[Event],
):
"""Test that empty query returns all results like no text filter"""
# Query with empty text
events_with_empty_text, _, _ = await query_events(
session=events_query_session,
filter=EventFilter(
occurred=full_occurred_range, text=EventTextFilter(query="")
),
)
# Query without text filter
events_no_text, _, _ = await query_events(
session=events_query_session,
filter=EventFilter(occurred=full_occurred_range),
)
# Should return same results
assert len(events_with_empty_text) == len(events_no_text)
async def test_searches_event_field(
events_query_session: list[Event],
query_events: QueryEventsFn,
full_occurred_range: EventOccurredFilter,
):
"""Test that search covers event type/name field"""
events, count, _ = await query_events(
session=events_query_session,
filter=EventFilter(
occurred=full_occurred_range, text=EventTextFilter(query="deployment")
),
)
assert len(events) >= 1
# Should find events with "deployment" in event name
assert any("deployment" in event.event.lower() for event in events)
async def test_searches_resource_labels(
events_query_session: list[Event],
query_events: QueryEventsFn,
full_occurred_range: EventOccurredFilter,
):
"""Test that search covers resource label values (not keys)"""
events, count, _ = await query_events(
session=events_query_session,
filter=EventFilter(
occurred=full_occurred_range, text=EventTextFilter(query="abc123")
),
)
assert len(events) >= 1
# Should find events with "abc123" in resource values
assert any("abc123" in str(event.resource.root.values()) for event in events)
async def test_searches_payload_content(
events_query_session: list[Event],
query_events: QueryEventsFn,
full_occurred_range: EventOccurredFilter,
):
"""Test that search covers payload content"""
events, count, _ = await query_events(
session=events_query_session,
filter=EventFilter(
occurred=full_occurred_range, text=EventTextFilter(query="processed")
),
)
assert len(events) >= 1
# Should find events with "processed" in payload
assert any("processed" in str(event.payload).lower() for event in events)
async def test_no_matches_returns_empty(
events_query_session: list[Event],
query_events: QueryEventsFn,
full_occurred_range: EventOccurredFilter,
):
"""Test that searches with no matches return empty results"""
events, count, _ = await query_events(
session=events_query_session,
filter=EventFilter(
occurred=full_occurred_range,
text=EventTextFilter(query="nonexistentterm12345"),
),
)
assert len(events) == 0
async def test_does_not_search_resource_label_keys(
events_query_session: list[Event],
query_events: QueryEventsFn,
full_occurred_range: EventOccurredFilter,
):
"""Test that search does NOT cover resource label keys, only values"""
# Search for "prefect.resource.id" which is a key, not a value
events, count, _ = await query_events(
session=events_query_session,
filter=EventFilter(
occurred=full_occurred_range,
text=EventTextFilter(query="prefect.resource.id"),
),
)
# Should find no events since we only search values, not keys
# Note: This might find some if "prefect.resource.id" appears in payload as a value
# The test validates the principle that keys are not automatically searched
for event in events:
text_filter = EventTextFilter(query="prefect.resource.id")
searchable_text = text_filter._build_searchable_text(event)
# If found, it should be in payload, not as a resource key
if "prefect.resource.id" in searchable_text:
assert "prefect.resource.id" in str(event.payload)
async def test_multilingual_character_search(
events_query_session: list[Event],
query_events: QueryEventsFn,
full_occurred_range: EventOccurredFilter,
):
"""Test that international characters work through the full stack"""
# Test Japanese characters in event name
events, count, _ = await query_events(
session=events_query_session,
filter=EventFilter(
occurred=full_occurred_range, text=EventTextFilter(query="エラー")
),
)
assert len(events) >= 1
# Should find events with Japanese characters
assert any("エラー" in event.event for event in events)
# Test Chinese characters in payload
# Note: This fails on SQLite database tests due to Unicode handling
# issues with Chinese characters in case-insensitive LIKE queries
if isinstance(events_query_session, AsyncSession):
from prefect.server.database import provide_database_interface
db = provide_database_interface()
if db.dialect.name == "sqlite":
pytest.xfail(
"SQLite Unicode handling issue with Chinese characters in lower() + LIKE queries"
)
events, count, _ = await query_events(
session=events_query_session,
filter=EventFilter(
occurred=full_occurred_range, text=EventTextFilter(query="连接超时")
),
)
assert len(events) >= 1
# Should find events with Chinese characters in payload
assert any("连接超时" in str(event.payload) for event in events)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/events/test_events_text_search.py",
"license": "Apache License 2.0",
"lines": 508,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/server/api/test_logs_text_search.py | """Tests for text search functionality across logs storage backends"""
from datetime import datetime, timedelta, timezone
from typing import Awaitable, Callable, Union
from uuid import uuid4
import pytest
from sqlalchemy.ext.asyncio import AsyncSession
from prefect.server import models
from prefect.server.models.logs import read_logs
from prefect.server.schemas.actions import LogCreate
from prefect.server.schemas.core import Log
from prefect.server.schemas.filters import (
LogFilter,
LogFilterLevel,
LogFilterTextSearch,
)
from prefect.server.schemas.sorting import LogSort
# Define function types for our test variations
QueryLogsFn = Callable[..., Awaitable[list[Log]]]
# In-memory log search that intentionally matches read_logs() signature
async def query_logs_memory(
session: list[Log], # For memory tests, this will be the list of logs
log_filter: LogFilter,
limit: int,
offset: int,
sort: LogSort = LogSort.TIMESTAMP_ASC,
) -> list[Log]:
"""In-memory log filtering using LogFilter.includes() method.
This function intentionally shares the signature of the database counterpart
(read_logs) and is used for filtering on WebSockets and other testing use cases
where we need to filter logs in memory rather than in the database.
"""
# Alias for clarity - session is actually a list of logs for memory tests
logs_list = session
# Filter logs using the LogFilter.includes() method
filtered_logs = []
for log in logs_list:
# Check each individual filter
if log_filter.level and not _log_level_matches(log, log_filter.level):
continue
if log_filter.timestamp and not _log_timestamp_matches(
log, log_filter.timestamp
):
continue
if log_filter.flow_run_id and not _log_flow_run_id_matches(
log, log_filter.flow_run_id
):
continue
if log_filter.task_run_id and not _log_task_run_id_matches(
log, log_filter.task_run_id
):
continue
if log_filter.text and not log_filter.text.includes(log):
continue
filtered_logs.append(log)
# Apply sorting
if sort == LogSort.TIMESTAMP_ASC:
filtered_logs.sort(key=lambda log: log.timestamp)
else: # TIMESTAMP_DESC
filtered_logs.sort(key=lambda log: log.timestamp, reverse=True)
# Apply pagination
start_idx = offset
end_idx = offset + limit
return filtered_logs[start_idx:end_idx]
def _log_level_matches(log: Log, level_filter: LogFilterLevel) -> bool:
"""Check if log matches level filter"""
if level_filter.ge_ is not None and log.level < level_filter.ge_:
return False
if level_filter.le_ is not None and log.level > level_filter.le_:
return False
return True
def _log_timestamp_matches(log: Log, timestamp_filter) -> bool:
"""Check if log matches timestamp filter"""
if (
timestamp_filter.before_ is not None
and log.timestamp > timestamp_filter.before_
):
return False
if timestamp_filter.after_ is not None and log.timestamp < timestamp_filter.after_:
return False
return True
def _log_flow_run_id_matches(log: Log, flow_run_id_filter) -> bool:
"""Check if log matches flow run id filter"""
if flow_run_id_filter.any_ is not None:
return log.flow_run_id in flow_run_id_filter.any_
return True
def _log_task_run_id_matches(log: Log, task_run_id_filter) -> bool:
"""Check if log matches task run id filter"""
if task_run_id_filter.any_ is not None:
return log.task_run_id in task_run_id_filter.any_
if task_run_id_filter.is_null_ is not None:
if task_run_id_filter.is_null_:
return log.task_run_id is None
else:
return log.task_run_id is not None
return True
VARIATIONS: list[tuple[str, QueryLogsFn]] = [
("memory", query_logs_memory),
("database", read_logs),
]
def pytest_generate_tests(metafunc: pytest.Metafunc):
fixtures = set(metafunc.fixturenames)
# If the test itself includes a marker saying that it is for only one variation,
# then honor that marker and filter the test generation down to just that
variation_names = {v[0] for v in VARIATIONS}
marked_variations = {
mark.name
for mark in metafunc.definition.own_markers
if mark.name in variation_names
}
if marked_variations:
variation_names = variation_names.intersection(marked_variations)
def marks(variation: str) -> list[pytest.MarkDecorator]:
return []
if fixtures.issuperset({"logs_query_session", "query_logs"}):
metafunc.parametrize(
"logs_query_session, query_logs",
[
pytest.param(
*values[:2],
id=values[0],
marks=marks(values[0]),
)
for values in VARIATIONS
if values[0] in variation_names
],
indirect=["logs_query_session"],
)
@pytest.fixture
def test_logs() -> list[Log]:
"""Create test logs with various text content for searching"""
test_data = [
# Error logs
{
"name": "prefect.flow_runs",
"message": "Flow run failed with connection timeout",
"level": 40, # ERROR
},
{
"name": "prefect.task_runs",
"message": "Task failed to process data due to network error",
"level": 40, # ERROR
},
# Debug logs
{
"name": "prefect.flow_runs",
"message": "Debug: Starting flow execution in test environment",
"level": 10, # DEBUG
},
# Info logs
{
"name": "prefect.deployments",
"message": "Successfully deployed to production environment",
"level": 20, # INFO
},
# Warning logs
{
"name": "prefect.workers",
"message": "Warning: high memory usage detected",
"level": 30, # WARNING
},
# Logs with quoted phrases
{
"name": "prefect.flow_runs",
"message": "Unable to connect to database server",
"level": 40, # ERROR
},
# International characters - Japanese
{
"name": "prefect.フロー実行",
"message": "データベース接続エラーが発生しました",
"level": 40, # ERROR
},
# International characters - French with accents
{
"name": "prefect.déploiements",
"message": "Erreur de connexión à la base de données",
"level": 40, # ERROR
},
]
logs: list[Log] = []
base_time = datetime.now(timezone.utc)
for i, data in enumerate(test_data):
logs.append(
Log(
id=uuid4(),
created=base_time - timedelta(hours=i),
updated=base_time - timedelta(hours=i),
name=data["name"],
level=data["level"],
flow_run_id=uuid4(),
task_run_id=None if i % 2 == 0 else uuid4(),
message=data["message"],
timestamp=base_time - timedelta(hours=i),
)
)
return logs
@pytest.fixture
async def logs_query_session(
request: pytest.FixtureRequest,
test_logs: list[Log],
session: AsyncSession,
):
"""Opens an appropriate session for the given backend, seeds it with the
test logs, and returns it for use in tests"""
backend: str = request.param
if backend == "memory":
yield test_logs
elif backend == "database":
# Write test logs to database
log_creates = [
LogCreate(
name=log.name,
level=log.level,
flow_run_id=log.flow_run_id,
task_run_id=log.task_run_id,
message=log.message,
timestamp=log.timestamp,
)
for log in test_logs
]
await models.logs.create_logs(session=session, logs=log_creates)
await session.commit()
yield session
else:
raise NotImplementedError(f"Unknown backend: {backend}")
# Test cases for basic text search functionality
async def test_single_term_search(
logs_query_session: Union[list[Log], AsyncSession],
query_logs: QueryLogsFn,
):
"""Test searching for a single term that appears in log messages"""
logs = await query_logs(
session=logs_query_session,
log_filter=LogFilter(text=LogFilterTextSearch(query="error")),
limit=100,
offset=0,
)
# Should find logs with "error" in message or name
assert len(logs) >= 1
for log in logs:
searchable_text = f"{log.message} {log.name}".lower()
assert "error" in searchable_text
async def test_multiple_terms_or_logic(
logs_query_session: list[Log],
query_logs: QueryLogsFn,
):
"""Test space-separated terms should use OR logic"""
logs = await query_logs(
session=logs_query_session,
log_filter=LogFilter(text=LogFilterTextSearch(query="error success")),
limit=100,
offset=0,
)
# Should find logs with either "error" OR "success"
assert len(logs) >= 2
for log in logs:
searchable_text = f"{log.message} {log.name}".lower()
assert "error" in searchable_text or "success" in searchable_text
async def test_negative_terms_with_minus(
logs_query_session: list[Log],
query_logs: QueryLogsFn,
):
"""Test excluding terms with minus prefix"""
logs = await query_logs(
session=logs_query_session,
log_filter=LogFilter(text=LogFilterTextSearch(query="flow -debug")),
limit=100,
offset=0,
)
# Should find logs with "flow" but NOT "debug"
assert len(logs) >= 1
for log in logs:
searchable_text = f"{log.message} {log.name}".lower()
assert "flow" in searchable_text
assert "debug" not in searchable_text
async def test_negative_terms_with_exclamation(
logs_query_session: list[Log],
query_logs: QueryLogsFn,
):
"""Test excluding terms with exclamation prefix"""
logs = await query_logs(
session=logs_query_session,
log_filter=LogFilter(text=LogFilterTextSearch(query="flow !test")),
limit=100,
offset=0,
)
# Should find logs with "flow" but NOT "test"
assert len(logs) >= 1
for log in logs:
searchable_text = f"{log.message} {log.name}".lower()
assert "flow" in searchable_text
assert "test" not in searchable_text
async def test_quoted_phrase_search(
logs_query_session: list[Log],
query_logs: QueryLogsFn,
):
"""Test searching for exact phrases with quotes"""
logs = await query_logs(
session=logs_query_session,
log_filter=LogFilter(text=LogFilterTextSearch(query='"connection timeout"')),
limit=100,
offset=0,
)
# Should find logs with exact phrase "connection timeout"
assert len(logs) >= 1
for log in logs:
searchable_text = f"{log.message} {log.name}".lower()
assert "connection timeout" in searchable_text
async def test_quoted_phrase_exclusion(
logs_query_session: list[Log],
query_logs: QueryLogsFn,
):
"""Test excluding exact phrases with quotes and minus"""
logs = await query_logs(
session=logs_query_session,
log_filter=LogFilter(
text=LogFilterTextSearch(query='flow -"connection timeout"')
),
limit=100,
offset=0,
)
# Should find logs with "flow" but NOT the exact phrase "connection timeout"
for log in logs:
searchable_text = f"{log.message} {log.name}".lower()
assert "flow" in searchable_text
assert "connection timeout" not in searchable_text
async def test_case_insensitive_search(
logs_query_session: list[Log],
query_logs: QueryLogsFn,
):
"""Test that searches are case insensitive"""
logs = await query_logs(
session=logs_query_session,
log_filter=LogFilter(text=LogFilterTextSearch(query="FAILED")),
limit=100,
offset=0,
)
# Should find logs with "failed" (lowercase in message)
assert len(logs) >= 1
for log in logs:
searchable_text = f"{log.message} {log.name}".lower()
assert "failed" in searchable_text
async def test_searches_message_content(
logs_query_session: list[Log],
query_logs: QueryLogsFn,
):
"""Test that search covers log message content"""
logs = await query_logs(
session=logs_query_session,
log_filter=LogFilter(text=LogFilterTextSearch(query="database")),
limit=100,
offset=0,
)
assert len(logs) >= 1
# Should find logs with "database" in message
assert any("database" in log.message.lower() for log in logs)
async def test_searches_logger_name(
logs_query_session: list[Log],
query_logs: QueryLogsFn,
):
"""Test that search covers logger names"""
logs = await query_logs(
session=logs_query_session,
log_filter=LogFilter(text=LogFilterTextSearch(query="deployments")),
limit=100,
offset=0,
)
assert len(logs) >= 1
# Should find logs with "deployments" in logger name
assert any("deployments" in log.name.lower() for log in logs)
async def test_complex_combined_search(
logs_query_session: list[Log],
query_logs: QueryLogsFn,
):
"""Test complex query combining multiple features"""
logs = await query_logs(
session=logs_query_session,
log_filter=LogFilter(
text=LogFilterTextSearch(query='flow error -debug -"connection timeout"')
),
limit=100,
offset=0,
)
# Should find logs with "flow" OR "error" but NOT "debug" or "connection timeout"
for log in logs:
searchable_text = f"{log.message} {log.name}".lower()
assert "flow" in searchable_text or "error" in searchable_text
assert "debug" not in searchable_text
assert "connection timeout" not in searchable_text
async def test_empty_query_returns_all(
logs_query_session: list[Log],
query_logs: QueryLogsFn,
test_logs: list[Log],
):
"""Test that empty query returns all results like no text filter"""
# Query with empty text
logs_with_empty_text = await query_logs(
session=logs_query_session,
log_filter=LogFilter(text=LogFilterTextSearch(query="")),
limit=100,
offset=0,
)
# Query without text filter
logs_no_text = await query_logs(
session=logs_query_session,
log_filter=LogFilter(),
limit=100,
offset=0,
)
# Should return same results
assert len(logs_with_empty_text) == len(logs_no_text)
async def test_no_matches_returns_empty(
logs_query_session: list[Log],
query_logs: QueryLogsFn,
):
"""Test that searches with no matches return empty results"""
logs = await query_logs(
session=logs_query_session,
log_filter=LogFilter(text=LogFilterTextSearch(query="nonexistentterm12345")),
limit=100,
offset=0,
)
assert len(logs) == 0
async def test_text_filter_composable_with_other_filters(
logs_query_session: list[Log],
query_logs: QueryLogsFn,
):
"""Test that text filter works with other existing filters"""
logs = await query_logs(
session=logs_query_session,
log_filter=LogFilter(
text=LogFilterTextSearch(query="flow"),
level=LogFilterLevel(ge_=30), # WARNING level and above
),
limit=100,
offset=0,
)
# Should find logs with "flow" AND level >= 30 (WARNING/ERROR)
for log in logs:
searchable_text = f"{log.message} {log.name}".lower()
assert "flow" in searchable_text
assert log.level >= 30
async def test_multilingual_character_search(
logs_query_session: list[Log],
query_logs: QueryLogsFn,
):
"""Test that international characters work through the full stack"""
# Test Japanese characters in logger name
logs = await query_logs(
session=logs_query_session,
log_filter=LogFilter(text=LogFilterTextSearch(query="フロー実行")),
limit=100,
offset=0,
)
assert len(logs) >= 1
# Should find logs with Japanese characters in logger name
assert any("フロー実行" in log.name for log in logs)
# Test French accents in message content
logs = await query_logs(
session=logs_query_session,
log_filter=LogFilter(text=LogFilterTextSearch(query="connexión")),
limit=100,
offset=0,
)
assert len(logs) >= 1
# Should find logs with French accented characters
assert any("connexión" in log.message for log in logs)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/server/api/test_logs_text_search.py",
"license": "Apache License 2.0",
"lines": 467,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/server/utilities/test_text_search_parser.py | """Tests for text search query parser
Tests the parsing of text search queries according to the following syntax:
- Space-separated terms → OR logic (include)
- Prefix with `-` or `!` → Exclude term
- Prefix with `+` → Required term (AND logic, future)
- Quote phrases → Match exact phrase
- Case-insensitive, substring matching
"""
import pytest
from prefect.server.utilities.text_search_parser import (
TextSearchQuery,
parse_text_search_query,
)
class TestBasicParsing:
"""Test basic query parsing functionality"""
def test_empty_string(self):
result = parse_text_search_query("")
assert result == TextSearchQuery(include=[], exclude=[], required=[])
def test_whitespace_only(self):
result = parse_text_search_query(" \t\n ")
assert result == TextSearchQuery(include=[], exclude=[], required=[])
def test_single_term(self):
result = parse_text_search_query("error")
assert result == TextSearchQuery(include=["error"], exclude=[], required=[])
def test_multiple_terms_or_logic(self):
result = parse_text_search_query("error warning timeout")
assert result == TextSearchQuery(
include=["error", "warning", "timeout"], exclude=[], required=[]
)
def test_multiple_spaces_between_terms(self):
result = parse_text_search_query("error warning\t\ttimeout")
assert result == TextSearchQuery(
include=["error", "warning", "timeout"], exclude=[], required=[]
)
def test_leading_trailing_whitespace(self):
result = parse_text_search_query(" error warning ")
assert result == TextSearchQuery(
include=["error", "warning"], exclude=[], required=[]
)
def test_whitespace_preserved_in_quotes_only(self):
# Multiple spaces between terms should be collapsed, but preserved in quotes
result = parse_text_search_query('hello "world again "')
assert result == TextSearchQuery(
include=["hello", "world again "], exclude=[], required=[]
)
class TestNegativeTerms:
"""Test exclusion syntax with - and ! prefixes"""
def test_minus_prefix_single_term(self):
result = parse_text_search_query("-debug")
assert result == TextSearchQuery(include=[], exclude=["debug"], required=[])
def test_exclamation_prefix_single_term(self):
result = parse_text_search_query("!debug")
assert result == TextSearchQuery(include=[], exclude=["debug"], required=[])
def test_mixed_negative_prefixes(self):
result = parse_text_search_query("error -debug !test")
assert result == TextSearchQuery(
include=["error"], exclude=["debug", "test"], required=[]
)
def test_minus_only(self):
result = parse_text_search_query("-")
assert result == TextSearchQuery(include=[], exclude=[], required=[])
def test_exclamation_only(self):
result = parse_text_search_query("!")
assert result == TextSearchQuery(include=[], exclude=[], required=[])
def test_multiple_prefixes(self):
# Double prefixes should be treated as part of the term
result = parse_text_search_query("--term")
assert result == TextSearchQuery(include=[], exclude=["-term"], required=[])
def test_prefix_mixed_with_content(self):
result = parse_text_search_query("!-term")
assert result == TextSearchQuery(include=[], exclude=["-term"], required=[])
def test_dash_in_middle_of_word(self):
# Dashes in middle should be preserved as part of term
result = parse_text_search_query("task-run flow-name")
assert result == TextSearchQuery(
include=["task-run", "flow-name"], exclude=[], required=[]
)
def test_dash_at_end_of_word(self):
# Dash at end should be preserved
result = parse_text_search_query("prefix-")
assert result == TextSearchQuery(include=["prefix-"], exclude=[], required=[])
class TestRequiredTerms:
"""Test required/AND syntax with + prefix (future feature)"""
def test_plus_prefix_single_term(self):
result = parse_text_search_query("+required")
assert result == TextSearchQuery(include=[], exclude=[], required=["required"])
def test_plus_only(self):
result = parse_text_search_query("+")
assert result == TextSearchQuery(include=[], exclude=[], required=[])
def test_mixed_required_with_other_terms(self):
result = parse_text_search_query("include +required -excluded")
assert result == TextSearchQuery(
include=["include"], exclude=["excluded"], required=["required"]
)
def test_multiple_required_terms(self):
result = parse_text_search_query("+error +connection")
assert result == TextSearchQuery(
include=[], exclude=[], required=["error", "connection"]
)
class TestQuotedPhrases:
"""Test quoted phrase handling"""
def test_simple_quoted_phrase(self):
result = parse_text_search_query('"connection timeout"')
assert result == TextSearchQuery(
include=["connection timeout"], exclude=[], required=[]
)
def test_multiple_quoted_phrases(self):
result = parse_text_search_query('"phrase one" "phrase two"')
assert result == TextSearchQuery(
include=["phrase one", "phrase two"], exclude=[], required=[]
)
def test_quoted_phrase_with_regular_terms(self):
result = parse_text_search_query('error "connection timeout" warning')
assert result == TextSearchQuery(
include=["error", "connection timeout", "warning"], exclude=[], required=[]
)
def test_excluded_quoted_phrase(self):
result = parse_text_search_query('-"debug mode"')
assert result == TextSearchQuery(
include=[], exclude=["debug mode"], required=[]
)
def test_excluded_quoted_phrase_with_exclamation(self):
result = parse_text_search_query('!"test environment"')
assert result == TextSearchQuery(
include=[], exclude=["test environment"], required=[]
)
def test_required_quoted_phrase(self):
result = parse_text_search_query('+"connection established"')
assert result == TextSearchQuery(
include=[], exclude=[], required=["connection established"]
)
def test_empty_quoted_string(self):
result = parse_text_search_query('""')
assert result == TextSearchQuery(include=[], exclude=[], required=[])
def test_whitespace_only_quoted_string(self):
result = parse_text_search_query('" "')
assert result == TextSearchQuery(include=[], exclude=[], required=[])
def test_unclosed_quote_at_end(self):
# Unclosed quote should include everything to end as literal term
result = parse_text_search_query('error "unclosed quote')
assert result == TextSearchQuery(
include=["error", "unclosed quote"], exclude=[], required=[]
)
def test_unclosed_quote_with_following_terms(self):
# Unclosed quote should consume rest of string
result = parse_text_search_query('error "unclosed and more terms')
assert result == TextSearchQuery(
include=["error", "unclosed and more terms"], exclude=[], required=[]
)
def test_quotes_with_special_characters(self):
result = parse_text_search_query('"error: connection failed!"')
assert result == TextSearchQuery(
include=["error: connection failed!"], exclude=[], required=[]
)
def test_escaped_quotes_within_phrases(self):
# Backslash escapes allow quotes within phrases
result = parse_text_search_query(r'"phrase with \"inner\" quotes"')
assert result == TextSearchQuery(
include=['phrase with "inner" quotes'], exclude=[], required=[]
)
def test_escaped_backslashes(self):
# Escaped backslashes should be literal
result = parse_text_search_query(r'"path\\to\\file"')
assert result == TextSearchQuery(
include=[r"path\to\file"], exclude=[], required=[]
)
def test_escaped_quote_at_end(self):
# Escaped quote at end of phrase
result = parse_text_search_query(r'"error message\""')
assert result == TextSearchQuery(
include=['error message"'], exclude=[], required=[]
)
def test_escaped_quote_at_start(self):
# Escaped quote at start of phrase
result = parse_text_search_query(r'"\"quoted\" message"')
assert result == TextSearchQuery(
include=['"quoted" message'], exclude=[], required=[]
)
def test_multiple_escaped_quotes(self):
# Multiple escaped quotes in one phrase
result = parse_text_search_query(
r'"He said \"Hello\" and she said \"Goodbye\""'
)
assert result == TextSearchQuery(
include=['He said "Hello" and she said "Goodbye"'], exclude=[], required=[]
)
def test_backslash_without_quote_is_literal(self):
# Backslash not followed by quote should be literal
result = parse_text_search_query(r'"path\folder\file"')
assert result == TextSearchQuery(
include=[r"path\folder\file"], exclude=[], required=[]
)
def test_nested_quotes_without_escaping(self):
# Without escaping, inner quotes end the phrase early
result = parse_text_search_query('"phrase with "inner" quotes"')
assert result == TextSearchQuery(
include=["phrase with ", "inner", " quotes"], exclude=[], required=[]
)
class TestComplexScenarios:
"""Test complex real-world query scenarios"""
def test_debugging_failed_flow_run(self):
# Scenario: User knows the flow failed with a connection error
result = parse_text_search_query('"connection" error -debug')
assert result == TextSearchQuery(
include=["connection", "error"], exclude=["debug"], required=[]
)
def test_production_issues_only(self):
# Scenario: User wants errors from production only
result = parse_text_search_query("error exception -test -staging -dev")
assert result == TextSearchQuery(
include=["error", "exception"],
exclude=["test", "staging", "dev"],
required=[],
)
def test_specific_error_message(self):
# Scenario: User remembers exact error message
result = parse_text_search_query('"Unable to connect to database"')
assert result == TextSearchQuery(
include=["Unable to connect to database"], exclude=[], required=[]
)
def test_complex_mixed_syntax(self):
# Ultimate complexity test with all syntax features
result = parse_text_search_query(
'"connection timeout" error -debug !test +required'
)
assert result == TextSearchQuery(
include=["connection timeout", "error"],
exclude=["debug", "test"],
required=["required"],
)
def test_flow_run_id_search(self):
# Scenario: User has a flow run ID
result = parse_text_search_query("abc123-def456-789")
assert result == TextSearchQuery(
include=["abc123-def456-789"], exclude=[], required=[]
)
def test_environment_filtering(self):
result = parse_text_search_query("deployment -test -staging")
assert result == TextSearchQuery(
include=["deployment"], exclude=["test", "staging"], required=[]
)
class TestEdgeCasesAndLimitations:
"""Test edge cases and documented limitations"""
def test_literal_dash_at_start_not_supported(self):
# Searching for literal `-` at start is not supported
# This should be treated as exclusion, not literal dash
result = parse_text_search_query("-")
assert result == TextSearchQuery(include=[], exclude=[], required=[])
def test_special_characters_preserved_in_terms(self):
result = parse_text_search_query("error@domain.com task#123 flow$var")
assert result == TextSearchQuery(
include=["error@domain.com", "task#123", "flow$var"],
exclude=[],
required=[],
)
def test_unicode_characters(self):
result = parse_text_search_query("errör ñame 中文")
assert result == TextSearchQuery(
include=["errör", "ñame", "中文"], exclude=[], required=[]
)
def test_very_long_terms(self):
long_term = "a" * 100
result = parse_text_search_query(f"error {long_term}")
assert result == TextSearchQuery(
include=["error", long_term], exclude=[], required=[]
)
def test_many_terms(self):
# Test parsing many terms efficiently (keep under 200 char limit)
terms = [f"t{i}" for i in range(30)] # t0 t1 t2... fits in 200 chars
query = " ".join(terms)
result = parse_text_search_query(query)
assert result == TextSearchQuery(include=terms, exclude=[], required=[])
def test_alternating_prefixes(self):
result = parse_text_search_query(
"include -exclude +required -exclude2 include2"
)
assert result == TextSearchQuery(
include=["include", "include2"],
exclude=["exclude", "exclude2"],
required=["required"],
)
def test_quoted_phrases_with_prefixes_inside(self):
# Prefixes inside quotes should be literal
result = parse_text_search_query('"error -debug +required"')
assert result == TextSearchQuery(
include=["error -debug +required"], exclude=[], required=[]
)
def test_mixed_quote_styles_not_supported(self):
# Only double quotes have special meaning
result = parse_text_search_query("'single quotes' error")
assert result == TextSearchQuery(
include=["'single", "quotes'", "error"], exclude=[], required=[]
)
def test_backslash_escape_only_for_quotes(self):
# Backslashes only escape quotes, other backslashes are literal
result = parse_text_search_query(r'error\test "quote\"inside"')
assert result == TextSearchQuery(
include=[r"error\test", 'quote"inside'], exclude=[], required=[]
)
def test_prefix_with_quotes_complex(self):
result = parse_text_search_query(
'+"required phrase" -"excluded phrase" "normal phrase"'
)
assert result == TextSearchQuery(
include=["normal phrase"],
exclude=["excluded phrase"],
required=["required phrase"],
)
class TestQueryValidation:
"""Test query validation (character limits enforced at API layer)"""
def test_handles_very_long_queries(self):
# Parser should handle long queries (limits enforced in EventFilter/LogFilter)
long_query = "a" * 500
result = parse_text_search_query(long_query)
assert result == TextSearchQuery(include=[long_query], exclude=[], required=[])
def test_handles_long_quoted_phrases(self):
# Parser should handle long quoted content
long_phrase = "a" * 300
query_with_quotes = f'"{long_phrase}"'
result = parse_text_search_query(query_with_quotes)
assert result == TextSearchQuery(include=[long_phrase], exclude=[], required=[])
class TestParserRobustness:
"""Test parser handles malformed input gracefully"""
def test_multiple_consecutive_quotes(self):
result = parse_text_search_query('""error""')
# Should treat as empty quote, then "error", then empty quote
assert result == TextSearchQuery(include=["error"], exclude=[], required=[])
def test_quote_at_start_and_end(self):
result = parse_text_search_query('"start error end"')
assert result == TextSearchQuery(
include=["start error end"], exclude=[], required=[]
)
def test_only_prefixes(self):
result = parse_text_search_query("- ! +")
assert result == TextSearchQuery(include=[], exclude=[], required=[])
def test_prefix_followed_by_space(self):
result = parse_text_search_query("error - debug")
# Dash followed by space should be ignored completely, both error and debug included
assert result == TextSearchQuery(
include=["error", "debug"], exclude=[], required=[]
)
def test_prefix_at_word_boundary_only(self):
# Prefixes should only work at word boundaries (start of terms)
result = parse_text_search_query("word-dash task!exclaim value+plus")
assert result == TextSearchQuery(
include=["word-dash", "task!exclaim", "value+plus"], exclude=[], required=[]
)
def test_combining_all_features(self):
# Kitchen sink test with every feature
result = parse_text_search_query(
'regular "exact phrase" -excluded +"required phrase" !also_excluded more_regular'
)
assert result == TextSearchQuery(
include=["regular", "exact phrase", "more_regular"],
exclude=["excluded", "also_excluded"],
required=["required phrase"],
)
class TestCasePreservation:
"""Test that original case is preserved in parsed terms"""
def test_preserves_case_in_include_terms(self):
result = parse_text_search_query("Error WARNING Timeout")
assert result == TextSearchQuery(
include=["Error", "WARNING", "Timeout"], exclude=[], required=[]
)
def test_preserves_case_in_exclude_terms(self):
result = parse_text_search_query("-DEBUG !TestMode")
assert result == TextSearchQuery(
include=[], exclude=["DEBUG", "TestMode"], required=[]
)
def test_preserves_case_in_quoted_phrases(self):
result = parse_text_search_query('"Connection Timeout Error"')
assert result == TextSearchQuery(
include=["Connection Timeout Error"], exclude=[], required=[]
)
class TestDataclassStructure:
"""Test the TextSearchQuery dataclass itself"""
def test_dataclass_creation(self):
query = TextSearchQuery(
include=["term1", "term2"], exclude=["excluded"], required=["required"]
)
assert query.include == ["term1", "term2"]
assert query.exclude == ["excluded"]
assert query.required == ["required"]
def test_dataclass_defaults(self):
query = TextSearchQuery()
assert query.include == []
assert query.exclude == []
assert query.required == []
def test_dataclass_equality(self):
query1 = TextSearchQuery(include=["test"], exclude=[], required=[])
query2 = TextSearchQuery(include=["test"], exclude=[], required=[])
assert query1 == query2
def test_dataclass_repr(self):
query = TextSearchQuery(include=["test"], exclude=["debug"], required=["error"])
repr_str = repr(query)
assert "include=['test']" in repr_str
assert "exclude=['debug']" in repr_str
assert "required=['error']" in repr_str
# Integration-style tests that verify the complete parsing flow
class TestIntegrationScenarios:
"""Test realistic query parsing scenarios end-to-end"""
@pytest.mark.parametrize(
"query, expected",
[
# Simple cases
("error", TextSearchQuery(include=["error"], exclude=[], required=[])),
("-debug", TextSearchQuery(include=[], exclude=["debug"], required=[])),
(
"+required",
TextSearchQuery(include=[], exclude=[], required=["required"]),
),
# Query examples
(
"error warning timeout",
TextSearchQuery(
include=["error", "warning", "timeout"], exclude=[], required=[]
),
),
(
"error -debug -test",
TextSearchQuery(
include=["error"], exclude=["debug", "test"], required=[]
),
),
(
"error !debug !test",
TextSearchQuery(
include=["error"], exclude=["debug", "test"], required=[]
),
),
(
'"connection timeout"',
TextSearchQuery(
include=["connection timeout"], exclude=[], required=[]
),
),
(
'"connection timeout" error -debug !test',
TextSearchQuery(
include=["connection timeout", "error"],
exclude=["debug", "test"],
required=[],
),
),
# Future AND syntax
(
"+error +connection -debug",
TextSearchQuery(
include=[], exclude=["debug"], required=["error", "connection"]
),
),
],
)
def test_example_queries(self, query: str, expected: TextSearchQuery) -> None:
"""Test all query examples parse correctly"""
result = parse_text_search_query(query)
assert result == expected
class TestMultilingualSupport:
"""Test parsing with international characters and languages"""
def test_japanese_terms(self):
# Japanese: error, flow, test
result = parse_text_search_query("エラー フロー -テスト")
assert result == TextSearchQuery(
include=["エラー", "フロー"], exclude=["テスト"], required=[]
)
def test_chinese_simplified_terms(self):
# Chinese: error, connection, debug
result = parse_text_search_query("错误 连接 -调试")
assert result == TextSearchQuery(
include=["错误", "连接"], exclude=["调试"], required=[]
)
def test_chinese_traditional_terms(self):
# Traditional Chinese: database, timeout
result = parse_text_search_query("資料庫 +超時")
assert result == TextSearchQuery(
include=["資料庫"], exclude=[], required=["超時"]
)
def test_cyrillic_terms(self):
# Russian: error, flow, test
result = parse_text_search_query("ошибка поток -тест")
assert result == TextSearchQuery(
include=["ошибка", "поток"], exclude=["тест"], required=[]
)
def test_french_with_accents(self):
# French: error, connection, test environment
result = parse_text_search_query('erreur connexión -"environment de tést"')
assert result == TextSearchQuery(
include=["erreur", "connexión"],
exclude=["environment de tést"],
required=[],
)
def test_german_compound_words(self):
# German compound words
result = parse_text_search_query(
"Verbindungsfehler Datenbankzugriff -Testumgebung"
)
assert result == TextSearchQuery(
include=["Verbindungsfehler", "Datenbankzugriff"],
exclude=["Testumgebung"],
required=[],
)
def test_arabic_terms(self):
# Arabic: error, connection (right-to-left text)
result = parse_text_search_query("خطأ اتصال")
assert result == TextSearchQuery(
include=["خطأ", "اتصال"], exclude=[], required=[]
)
def test_mixed_languages_in_query(self):
# Mixed language query
result = parse_text_search_query(
'error エラー -debug -調試 +"connection établie"'
)
assert result == TextSearchQuery(
include=["error", "エラー"],
exclude=["debug", "調試"],
required=["connection établie"],
)
def test_emoji_in_search_terms(self):
# Modern usage might include emoji
result = parse_text_search_query("🚫 error ✅ success -🐛 -bug")
assert result == TextSearchQuery(
include=["🚫", "error", "✅", "success"], exclude=["🐛", "bug"], required=[]
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/server/utilities/test_text_search_parser.py",
"license": "Apache License 2.0",
"lines": 523,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/server/orchestration/test_task_concurrency_v2_integration.py | """
Tests for V2 Global Concurrency Limits integration in task orchestration rules.
These tests cover the integration between tag-based task concurrency (V1) and
Global Concurrency Limits V2 in SecureTaskConcurrencySlots and ReleaseTaskConcurrencySlots.
"""
import contextlib
from typing import Any, Callable
from sqlalchemy.ext.asyncio import AsyncSession
from prefect.server.concurrency.lease_storage import get_concurrency_lease_storage
from prefect.server.database.orm_models import ConcurrencyLimitV2
from prefect.server.models import concurrency_limits, concurrency_limits_v2
from prefect.server.orchestration.core_policy import (
ReleaseTaskConcurrencySlots,
SecureTaskConcurrencySlots,
)
from prefect.server.schemas import actions, core, states
from prefect.server.schemas.responses import SetStateStatus
class TestSecureTaskConcurrencySlotsV2Integration:
"""Test SecureTaskConcurrencySlots with V2 Global Concurrency Limits."""
async def create_v1_concurrency_limit(
self, session: AsyncSession, tag: str, limit: int
) -> None:
"""Helper to create a V1 concurrency limit."""
cl_create = actions.ConcurrencyLimitCreate(
tag=tag,
concurrency_limit=limit,
).model_dump(mode="json")
cl_model = core.ConcurrencyLimit(**cl_create)
await concurrency_limits.create_concurrency_limit(
session=session, concurrency_limit=cl_model
)
async def create_v2_concurrency_limit(
self, session: AsyncSession, tag: str, limit: int
) -> ConcurrencyLimitV2:
"""Helper to create a V2 concurrency limit."""
gcl = await concurrency_limits_v2.create_concurrency_limit(
session=session,
concurrency_limit=actions.ConcurrencyLimitV2Create(
name=f"tag:{tag}",
limit=limit,
active=True,
),
)
return gcl
async def test_v2_limits_take_priority_over_v1(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""Test that V2 limits are processed before V1 limits for the same tag."""
# Create both V1 and V2 limits for the same tag
await self.create_v1_concurrency_limit(session, "shared-tag", 2)
v2_limit = await self.create_v2_concurrency_limit(session, "shared-tag", 1)
concurrency_policy = [SecureTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
# First task should use V2 limit (limit 1) not V1 limit (limit 2)
ctx1 = await initialize_orchestration(
session, "task", *running_transition, run_tags=["shared-tag"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
ctx1 = await stack.enter_async_context(rule(ctx1, *running_transition))
await ctx1.validate_proposed_state()
assert ctx1.response_status == SetStateStatus.ACCEPT
# Check that V2 limit has 1 active slot
await session.refresh(v2_limit)
assert v2_limit.active_slots == 1
# Second task should be delayed because V2 limit is reached (limit 1)
ctx2 = await initialize_orchestration(
session, "task", *running_transition, run_tags=["shared-tag"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
ctx2 = await stack.enter_async_context(rule(ctx2, *running_transition))
await ctx2.validate_proposed_state()
assert ctx2.response_status == SetStateStatus.WAIT
async def test_v2_zero_limit_aborts_transition(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""Test that V2 limits with zero limit abort transitions."""
await self.create_v2_concurrency_limit(session, "zero-tag", 0)
concurrency_policy = [SecureTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["zero-tag"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
ctx = await stack.enter_async_context(rule(ctx, *running_transition))
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ABORT
assert "is 0 and will deadlock" in ctx.response_details.reason
async def test_v2_lease_creation_and_metadata(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""Test that V2 limits create proper leases with metadata."""
v2_limit = await self.create_v2_concurrency_limit(session, "lease-tag", 2)
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["lease-tag"]
)
# Use the rule with try/finally to ensure cleanup happens
rule = SecureTaskConcurrencySlots(ctx, *running_transition)
try:
async with rule as rule_ctx:
await rule_ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ACCEPT
# Verify V2 limit active slots were incremented
await session.refresh(v2_limit)
assert v2_limit.active_slots == 1
# Verify lease was created - check the rule's internal tracking
assert len(rule._acquired_v2_lease_ids) == 1
lease_id = rule._acquired_v2_lease_ids[0]
# Verify lease exists and has proper metadata
lease_storage = get_concurrency_lease_storage()
lease = await lease_storage.read_lease(lease_id=lease_id)
assert lease is not None
assert lease.metadata is not None
assert lease.metadata.slots == 1
assert lease.metadata.holder.type == "task_run"
assert lease.metadata.holder.id == ctx.run.id
finally:
# Cleanup happens in rule's cleanup method
pass
async def test_mixed_v1_v2_tags_on_same_task(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""Test task with both V1 and V2 tags processes V2 first, then V1."""
# Create V2 limit for one tag, V1 for another
v2_limit = await self.create_v2_concurrency_limit(session, "v2-tag", 1)
await self.create_v1_concurrency_limit(session, "v1-tag", 1)
concurrency_policy = [SecureTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["v2-tag", "v1-tag"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
ctx = await stack.enter_async_context(rule(ctx, *running_transition))
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ACCEPT
# Verify V2 limit was used
await session.refresh(v2_limit)
assert v2_limit.active_slots == 1
# Verify V1 limit was also used (should have the task run ID in active_slots)
v1_limit = await concurrency_limits.read_concurrency_limit_by_tag(
session, "v1-tag"
)
assert str(ctx.run.id) in v1_limit.active_slots
async def test_v2_lease_cleanup_on_abort(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""Test that V2 leases are properly cleaned up when transition is aborted."""
# Create a zero limit which will trigger abort immediately
zero_limit = await self.create_v2_concurrency_limit(session, "zero-tag", 0)
concurrency_policy = [SecureTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["zero-tag"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
ctx = await stack.enter_async_context(rule(ctx, *running_transition))
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ABORT
# Verify zero limit is still zero - no slots should have been acquired
await session.refresh(zero_limit)
assert zero_limit.active_slots == 0
async def test_v1_limits_processed_when_no_v2_overlap(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""Test that V1 limits are still processed for tags without V2 limits."""
# Create V2 limit for one tag, V1 for different tags
await self.create_v2_concurrency_limit(session, "v2-only", 2)
await self.create_v1_concurrency_limit(session, "v1-only", 1)
concurrency_policy = [SecureTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
# Test with only V1 tag
ctx1 = await initialize_orchestration(
session, "task", *running_transition, run_tags=["v1-only"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
ctx1 = await stack.enter_async_context(rule(ctx1, *running_transition))
await ctx1.validate_proposed_state()
assert ctx1.response_status == SetStateStatus.ACCEPT
# Verify V1 limit was used
v1_limit = await concurrency_limits.read_concurrency_limit_by_tag(
session, "v1-only"
)
assert str(ctx1.run.id) in v1_limit.active_slots
# Test second task hits V1 limit
ctx2 = await initialize_orchestration(
session, "task", *running_transition, run_tags=["v1-only"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
ctx2 = await stack.enter_async_context(rule(ctx2, *running_transition))
await ctx2.validate_proposed_state()
assert ctx2.response_status == SetStateStatus.WAIT
async def test_v2_inactive_limits_ignored(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""Test that inactive V2 limits are ignored."""
# Create inactive V2 limit and active V1 limit for same tag
v2_limit = await concurrency_limits_v2.create_concurrency_limit(
session=session,
concurrency_limit=actions.ConcurrencyLimitV2Create(
name="tag:inactive-tag",
limit=1,
active=False, # Inactive
),
)
await self.create_v1_concurrency_limit(session, "inactive-tag", 2)
concurrency_policy = [SecureTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["inactive-tag"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
ctx = await stack.enter_async_context(rule(ctx, *running_transition))
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ACCEPT
# Verify V2 limit was not used (should be 0 active slots)
await session.refresh(v2_limit)
assert v2_limit.active_slots == 0
# Verify V1 limit was used instead
v1_limit = await concurrency_limits.read_concurrency_limit_by_tag(
session, "inactive-tag"
)
assert str(ctx.run.id) in v1_limit.active_slots
async def test_v2_tags_excluded_from_v1_processing(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""Test that tags with V2 limits are excluded from V1 processing."""
# Create both V2 and V1 limits for the same tag
v2_limit = await self.create_v2_concurrency_limit(session, "shared-tag", 5)
await self.create_v1_concurrency_limit(session, "shared-tag", 2)
concurrency_policy = [SecureTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["shared-tag"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in concurrency_policy:
ctx = await stack.enter_async_context(rule(ctx, *running_transition))
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ACCEPT
# V2 limit should be used
await session.refresh(v2_limit)
assert v2_limit.active_slots == 1
# V1 limit should NOT be used (active_slots should be empty)
v1_limit = await concurrency_limits.read_concurrency_limit_by_tag(
session, "shared-tag"
)
assert str(ctx.run.id) not in v1_limit.active_slots
assert len(v1_limit.active_slots) == 0
class TestReleaseTaskConcurrencySlotsV2Integration:
"""Test ReleaseTaskConcurrencySlots with V2 Global Concurrency Limits.
Note: Some of these tests may fail due to a bug in the current implementation
where holder.id (task run ID) is used as lease_id in the release logic.
The correct behavior would require finding the lease_id associated with a holder.
"""
async def create_v2_concurrency_limit(
self, session: AsyncSession, tag: str, limit: int
) -> ConcurrencyLimitV2:
"""Helper to create a V2 concurrency limit."""
return await concurrency_limits_v2.create_concurrency_limit(
session=session,
concurrency_limit=actions.ConcurrencyLimitV2Create(
name=f"tag:{tag}",
limit=limit,
active=True,
),
)
async def test_v2_and_v1_integration_full_cycle(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""Test full cycle: secure V2+V1 limits, then release both."""
# Set up both V2 and V1 limits
v2_limit = await self.create_v2_concurrency_limit(session, "cycle-v2", 2)
cl_create = actions.ConcurrencyLimitCreate(
tag="cycle-v1",
concurrency_limit=2,
).model_dump(mode="json")
cl_model = core.ConcurrencyLimit(**cl_create)
await concurrency_limits.create_concurrency_limit(
session=session, concurrency_limit=cl_model
)
# Test acquiring slots
secure_policy = [SecureTaskConcurrencySlots]
release_policy = [ReleaseTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
completed_transition = (states.StateType.RUNNING, states.StateType.COMPLETED)
# Task gets both V2 and V1 tags
ctx1 = await initialize_orchestration(
session, "task", *running_transition, run_tags=["cycle-v2", "cycle-v1"]
)
# Secure slots
async with contextlib.AsyncExitStack() as stack:
for rule in secure_policy:
ctx1 = await stack.enter_async_context(rule(ctx1, *running_transition))
await ctx1.validate_proposed_state()
assert ctx1.response_status == SetStateStatus.ACCEPT
# Verify both limits were used
await session.refresh(v2_limit)
assert v2_limit.active_slots == 1
v1_limit = await concurrency_limits.read_concurrency_limit_by_tag(
session, "cycle-v1"
)
assert str(ctx1.run.id) in v1_limit.active_slots
# Now complete the task to release slots
ctx2 = await initialize_orchestration(
session,
"task",
*completed_transition,
run_override=ctx1.run, # Same task run
run_tags=["cycle-v2", "cycle-v1"],
)
# Set validated state to completed (normally done by orchestration)
ctx2.validated_state = states.State(type=states.StateType.COMPLETED)
async with contextlib.AsyncExitStack() as stack:
for rule in release_policy:
ctx2 = await stack.enter_async_context(
rule(ctx2, *completed_transition)
)
# Verify slots were released
await session.refresh(v2_limit)
assert v2_limit.active_slots == 0
await session.refresh(v1_limit)
assert str(ctx1.run.id) not in v1_limit.active_slots
async def test_release_only_on_terminal_transitions(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""Test that slots are only released on terminal transitions."""
v2_limit = await self.create_v2_concurrency_limit(session, "terminal-test", 2)
# First acquire a slot
secure_policy = [SecureTaskConcurrencySlots]
release_policy = [ReleaseTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx1 = await initialize_orchestration(
session, "task", *running_transition, run_tags=["terminal-test"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in secure_policy:
ctx1 = await stack.enter_async_context(rule(ctx1, *running_transition))
await ctx1.validate_proposed_state()
assert ctx1.response_status == SetStateStatus.ACCEPT
await session.refresh(v2_limit)
assert v2_limit.active_slots == 1
# Do a terminal transition (running to completed - should release)
terminal_transition = (states.StateType.RUNNING, states.StateType.COMPLETED)
ctx2 = await initialize_orchestration(
session,
"task",
*terminal_transition,
run_override=ctx1.run,
run_tags=["terminal-test"],
)
# Set validated state to completed (normally done by orchestration)
ctx2.validated_state = states.State(type=states.StateType.COMPLETED)
async with contextlib.AsyncExitStack() as stack:
for rule in release_policy:
ctx2 = await stack.enter_async_context(rule(ctx2, *terminal_transition))
# Verify slots were released on terminal transition
await session.refresh(v2_limit)
assert v2_limit.active_slots == 0
async def test_v2_release_with_no_matching_holders(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""Test that release handles case where no holders match the task run."""
v2_limit = await self.create_v2_concurrency_limit(session, "no-match", 2)
release_policy = [ReleaseTaskConcurrencySlots]
completed_transition = (states.StateType.RUNNING, states.StateType.COMPLETED)
# Task that doesn't have any leases
ctx = await initialize_orchestration(
session, "task", *completed_transition, run_tags=["no-match"]
)
# This should not raise any errors
async with contextlib.AsyncExitStack() as stack:
for rule in release_policy:
ctx = await stack.enter_async_context(rule(ctx, *completed_transition))
# No slots should be affected since no leases existed
await session.refresh(v2_limit)
assert v2_limit.active_slots == 0
async def test_v2_limits_with_multiple_tags(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""Test that a task with multiple V2 tags processes all limits."""
v2_limit1 = await self.create_v2_concurrency_limit(session, "multi-1", 2)
v2_limit2 = await self.create_v2_concurrency_limit(session, "multi-2", 3)
secure_policy = [SecureTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["multi-1", "multi-2"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in secure_policy:
ctx = await stack.enter_async_context(rule(ctx, *running_transition))
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ACCEPT
# Both limits should have active slots
await session.refresh(v2_limit1)
await session.refresh(v2_limit2)
assert v2_limit1.active_slots == 1
assert v2_limit2.active_slots == 1
async def test_v2_slot_increment_lease_creation_atomicity(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""
Test that slot increments and lease creation are atomic in orchestration policy.
This test verifies the fix for the zombie slot bug where slots could be
incremented but leases not created due to session/transaction boundary issues.
The fix ensures both operations happen in a single transaction context.
"""
# Create two limits with different capacities
v2_limit_5 = await self.create_v2_concurrency_limit(session, "limit-5", 5)
v2_limit_10 = await self.create_v2_concurrency_limit(session, "limit-10", 10)
secure_policy = [SecureTaskConcurrencySlots]
release_policy = [ReleaseTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
completed_transition = (states.StateType.RUNNING, states.StateType.COMPLETED)
# Run 5 tasks to fill the smaller limit and use slots from both limits
task_contexts = []
for _ in range(5):
task_ctx = await initialize_orchestration(
session,
"task",
*running_transition,
run_tags=["limit-5", "limit-10"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in secure_policy:
task_ctx = await stack.enter_async_context(
rule(task_ctx, *running_transition)
)
await task_ctx.validate_proposed_state()
# Each task should be accepted and increment both limits
assert task_ctx.response_status == SetStateStatus.ACCEPT
task_contexts.append(task_ctx)
# Verify both limits have the expected slots and leases
await session.refresh(v2_limit_5)
await session.refresh(v2_limit_10)
assert v2_limit_5.active_slots == 5
assert v2_limit_10.active_slots == 5
# Count leases via lease storage
lease_storage = get_concurrency_lease_storage()
limit_5_holders = await lease_storage.list_holders_for_limit(v2_limit_5.id)
limit_10_holders = await lease_storage.list_holders_for_limit(v2_limit_10.id)
assert len(limit_5_holders) == 5
assert len(limit_10_holders) == 5
# 6th task should be blocked because limit-5 is full
blocked_task_ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["limit-5", "limit-10"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in secure_policy:
blocked_task_ctx = await stack.enter_async_context(
rule(blocked_task_ctx, *running_transition)
)
await blocked_task_ctx.validate_proposed_state()
# Should be blocked - no partial increments should occur
assert blocked_task_ctx.response_status == SetStateStatus.WAIT
# Slots should remain the same (no zombie slots created)
await session.refresh(v2_limit_5)
await session.refresh(v2_limit_10)
assert v2_limit_5.active_slots == 5
assert v2_limit_10.active_slots == 5
# Complete all tasks to verify proper cleanup atomicity
for task_ctx in task_contexts:
completed_ctx = await initialize_orchestration(
session,
"task",
*completed_transition,
run_override=task_ctx.run,
run_tags=["limit-5", "limit-10"],
)
# Set validated state to completed (normally done by orchestration)
completed_ctx.validated_state = states.State(
type=states.StateType.COMPLETED
)
async with contextlib.AsyncExitStack() as stack:
for rule in release_policy:
completed_ctx = await stack.enter_async_context(
rule(completed_ctx, *completed_transition)
)
# Both limits should be completely cleaned up
await session.refresh(v2_limit_5)
await session.refresh(v2_limit_10)
assert v2_limit_5.active_slots == 0
assert v2_limit_10.active_slots == 0
# Verify leases were cleaned up
limit_5_holders = await lease_storage.list_holders_for_limit(v2_limit_5.id)
limit_10_holders = await lease_storage.list_holders_for_limit(v2_limit_10.id)
assert len(limit_5_holders) == 0
assert len(limit_10_holders) == 0
# Now the previously blocked task should be able to run
retry_task_ctx = await initialize_orchestration(
session,
"task",
*running_transition,
run_override=blocked_task_ctx.run,
run_tags=["limit-5", "limit-10"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in secure_policy:
retry_task_ctx = await stack.enter_async_context(
rule(retry_task_ctx, *running_transition)
)
await retry_task_ctx.validate_proposed_state()
assert retry_task_ctx.response_status == SetStateStatus.ACCEPT
await session.refresh(v2_limit_5)
await session.refresh(v2_limit_10)
assert v2_limit_5.active_slots == 1
assert v2_limit_10.active_slots == 1
async def test_v2_limit_prevents_exceeding_capacity(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""Test that V2 limits prevent tasks from exceeding capacity."""
v2_limit = await self.create_v2_concurrency_limit(session, "capacity-test", 1)
# First, manually fill the limit to capacity
await concurrency_limits_v2.bulk_increment_active_slots(
session=session,
concurrency_limit_ids=[v2_limit.id],
slots=1,
)
secure_policy = [SecureTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["capacity-test"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in secure_policy:
ctx = await stack.enter_async_context(rule(ctx, *running_transition))
await ctx.validate_proposed_state()
# Should be told to wait since capacity is already reached
assert ctx.response_status == SetStateStatus.WAIT
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/server/orchestration/test_task_concurrency_v2_integration.py",
"license": "Apache License 2.0",
"lines": 564,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/_internal/compatibility/starlette.py | """
Compatibility wrapper for starlette status codes.
Starlette 0.48.0 renamed several status codes per RFC 9110.
This module provides backwards-compatible access to these codes.
"""
from starlette import status as _starlette_status
class _StatusCompatibility:
"""
Compatibility wrapper that maintains old status code names while using new ones where available.
Maps these renamed codes from RFC 9110:
- HTTP_422_UNPROCESSABLE_ENTITY -> HTTP_422_UNPROCESSABLE_CONTENT
- HTTP_413_REQUEST_ENTITY_TOO_LARGE -> HTTP_413_CONTENT_TOO_LARGE
- HTTP_414_REQUEST_URI_TOO_LONG -> HTTP_414_URI_TOO_LONG
- HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE -> HTTP_416_RANGE_NOT_SATISFIABLE
"""
def __getattr__(self, name: str) -> int:
mapping = {
"HTTP_422_UNPROCESSABLE_ENTITY": "HTTP_422_UNPROCESSABLE_CONTENT",
"HTTP_413_REQUEST_ENTITY_TOO_LARGE": "HTTP_413_CONTENT_TOO_LARGE",
"HTTP_414_REQUEST_URI_TOO_LONG": "HTTP_414_URI_TOO_LONG",
"HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE": "HTTP_416_RANGE_NOT_SATISFIABLE",
}
if name in mapping:
new_name = mapping[name]
if hasattr(_starlette_status, new_name):
return getattr(_starlette_status, new_name)
elif hasattr(_starlette_status, name):
return getattr(_starlette_status, name)
else:
fallback_values = {
"HTTP_422_UNPROCESSABLE_ENTITY": 422,
"HTTP_413_REQUEST_ENTITY_TOO_LARGE": 413,
"HTTP_414_REQUEST_URI_TOO_LONG": 414,
"HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE": 416,
}
return fallback_values[name]
return getattr(_starlette_status, name)
status = _StatusCompatibility()
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/_internal/compatibility/starlette.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:src/prefect/cli/deploy/_models.py | from __future__ import annotations
from datetime import timedelta
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
from prefect._experimental.sla.objects import SlaTypes
from prefect.client.schemas.actions import DeploymentScheduleCreate
from prefect.client.schemas.schedules import SCHEDULE_TYPES
class WorkPoolConfig(BaseModel):
model_config = ConfigDict(extra="ignore")
name: Optional[str] = None
work_queue_name: Optional[str] = None
job_variables: Dict[str, Any] = Field(default_factory=dict)
class DeploymentConfig(BaseModel):
model_config = ConfigDict(extra="ignore")
# base metadata
name: Optional[str] = None
version: Optional[str] = None
version_type: Optional[str] = None
tags: Optional[Union[str, list[Any]]] = (
None # allow raw templated string or list; templating will normalize
)
description: Optional[str] = None
# schedule metadata
schedule: Optional["ScheduleItem"] = None
schedules: Optional[List["ScheduleItem"]] = None
paused: Optional[bool] = None
concurrency_limit: Optional[Union[int, "ConcurrencyLimitSpec"]] = None
# flow-specific
flow_name: Optional[str] = None
entrypoint: Optional[str] = None
parameters: Dict[str, Any] = Field(default_factory=dict)
enforce_parameter_schema: Optional[bool] = None
# per-deployment actions (optional overrides)
# Accept list, mapping (empty or step), or null for flexibility
build: Optional[Union[List[Dict[str, Any]], Dict[str, Any]]] = None
push: Optional[Union[List[Dict[str, Any]], Dict[str, Any]]] = None
pull: Optional[Union[List[Dict[str, Any]], Dict[str, Any]]] = None
# infra-specific
work_pool: Optional[WorkPoolConfig] = None
# automations metadata
# Triggers are stored as raw dicts to allow Jinja templating (e.g., enabled: "{{ prefect.variables.is_prod }}")
# Strict validation happens later in _initialize_deployment_triggers after template resolution
triggers: Optional[List[Dict[str, Any]]] = None
sla: Optional[List[SlaTypes]] = None
class PrefectYamlModel(BaseModel):
model_config = ConfigDict(populate_by_name=True, extra="ignore")
# generic metadata (currently unused by CLI but allowed)
prefect_version: Optional[str] = Field(default=None, alias="prefect-version")
name: Optional[str] = None
# global actions
build: Optional[Union[List[Dict[str, Any]], Dict[str, Any]]] = None
push: Optional[Union[List[Dict[str, Any]], Dict[str, Any]]] = None
pull: Optional[Union[List[Dict[str, Any]], Dict[str, Any]]] = None
# deployments
deployments: List[DeploymentConfig] = Field(default_factory=list)
@staticmethod
def _validate_action_steps(steps: Optional[List[Dict[str, Any]]]) -> None:
# Light validation: allow any mapping; prefer single-key style but do not enforce
if not steps:
return
for step in steps:
if not isinstance(step, dict):
raise TypeError("Each action step must be a mapping")
# empty or multi-key steps will be passed through unchanged
@field_validator("build", "push", "pull")
@classmethod
def _validate_actions(cls, v: Optional[List[Dict[str, Any]]]):
cls._validate_action_steps(v)
return v
@field_validator("deployments")
@classmethod
def _validate_deployments(cls, v: List[DeploymentConfig]):
# Ensure deployments is a list
return v or []
class ConcurrencyLimitSpec(BaseModel):
model_config = ConfigDict(extra="ignore")
limit: Optional[int] = None
collision_strategy: Optional[str] = None
grace_period_seconds: Optional[int] = None
class RawScheduleConfig(BaseModel):
"""
Strongly-typed schedule config that mirrors the CLI's accepted YAML shape.
Exactly one of cron, interval, or rrule must be provided.
"""
model_config = ConfigDict(extra="forbid")
# One-of schedule selectors
cron: Optional[str] = None
interval: Optional[timedelta] = (
None # accepts int/float (seconds), ISO 8601, HH:MM:SS
)
rrule: Optional[str] = None
# Common extras
timezone: Optional[str] = None
anchor_date: Optional[str] = None
active: Optional[Union[bool, str]] = None # Allow string for template values
parameters: Dict[str, Any] = Field(default_factory=dict)
slug: Optional[str] = None
replaces: Optional[str] = None # The slug of an existing schedule to replace
# Cron-specific
day_or: Optional[Union[bool, str]] = None # Allow string for template values
@model_validator(mode="after")
def _one_of_schedule(self):
provided = [v is not None for v in (self.cron, self.interval, self.rrule)]
if sum(provided) != 1:
raise ValueError(
"Exactly one of 'cron', 'interval', or 'rrule' must be provided"
)
return self
ScheduleItem = Union[
RawScheduleConfig, DeploymentScheduleCreate, SCHEDULE_TYPES, Dict[None, None]
]
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/deploy/_models.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:src/prefect/cli/deploy/_actions.py | from __future__ import annotations
import os
from getpass import GetPassWarning
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
from prefect.blocks.system import Secret
from prefect.cli._prompts import (
confirm,
prompt,
prompt_select_blob_storage_credentials,
prompt_select_remote_flow_storage,
)
from prefect.utilities._git import get_git_branch, get_git_remote_origin_url
from prefect.utilities.slugify import slugify
if TYPE_CHECKING:
from rich.console import Console
async def _generate_git_clone_pull_step(
console: "Console",
deploy_config: dict[str, Any],
remote_url: str,
) -> list[dict[str, Any]]:
# confirm/prompt imported from concrete module
branch = get_git_branch() or "main"
if not remote_url:
remote_url = prompt(
"Please enter the URL to pull your flow code from", console=console
)
elif not confirm(
f"Is [green]{remote_url}[/] the correct URL to pull your flow code from?",
default=True,
console=console,
):
remote_url = prompt(
"Please enter the URL to pull your flow code from", console=console
)
if not confirm(
f"Is [green]{branch}[/] the correct branch to pull your flow code from?",
default=True,
console=console,
):
branch = prompt(
"Please enter the branch to pull your flow code from",
default="main",
console=console,
)
token_secret_block_name = None
if confirm("Is this a private repository?", console=console):
token_secret_block_name = f"deployment-{slugify(deploy_config['name'])}-{slugify(deploy_config['flow_name'])}-repo-token"
create_new_block = False
prompt_message = (
"Please enter a token that can be used to access your private"
" repository. This token will be saved as a secret via the Prefect API"
)
try:
await Secret.aload(token_secret_block_name)
if not confirm(
(
"We found an existing token saved for this deployment. Would"
" you like use the existing token?"
),
default=True,
console=console,
):
prompt_message = (
"Please enter a token that can be used to access your private"
" repository (this will overwrite the existing token saved via"
" the Prefect API)."
)
create_new_block = True
except ValueError:
create_new_block = True
if create_new_block:
try:
repo_token = prompt(
prompt_message,
console=console,
password=True,
)
except GetPassWarning:
# Handling for when password masking is not supported
repo_token = prompt(
prompt_message,
console=console,
)
await Secret(
value=repo_token,
).save(name=token_secret_block_name, overwrite=True)
git_clone_step = {
"prefect.deployments.steps.git_clone": {
"repository": remote_url,
"branch": branch,
}
}
if token_secret_block_name:
git_clone_step["prefect.deployments.steps.git_clone"]["access_token"] = (
"{{ prefect.blocks.secret." + token_secret_block_name + " }}"
)
return [git_clone_step]
async def _generate_pull_step_for_build_docker_image(
console: "Console", deploy_config: Dict[str, Any], auto: bool = True
) -> list[dict[str, Any]]:
pull_step: dict[str, Any] = {}
# prompt imported from concrete module
dir_name = os.path.basename(os.getcwd())
if auto:
pull_step["directory"] = f"/opt/prefect/{dir_name}"
else:
pull_step["directory"] = prompt(
"What is the path to your flow code in your Dockerfile?",
default=f"/opt/prefect/{dir_name}",
console=console,
)
return [{"prefect.deployments.steps.set_working_directory": pull_step}]
async def _check_for_build_docker_image_step(
build_action: List[Dict],
) -> Optional[Dict[str, Any]]:
if not build_action:
return None
build_docker_image_steps = [
"prefect_docker.deployments.steps.build_docker_image",
]
for build_docker_image_step in build_docker_image_steps:
for action in build_action:
if action.get(build_docker_image_step):
return action.get(build_docker_image_step)
return None
async def _generate_actions_for_remote_flow_storage(
console: "Console", deploy_config: dict[str, Any], actions: list[dict[str, Any]]
) -> dict[str, list[dict[str, Any]]]:
# prompt + selection helpers imported from concrete module
storage_provider_to_collection: dict[str, str] = {
"s3": "prefect_aws",
"gcs": "prefect_gcp",
"azure_blob_storage": "prefect_azure",
}
selected_storage_provider = await prompt_select_remote_flow_storage(console=console)
if selected_storage_provider == "git":
actions["pull"] = await _generate_git_clone_pull_step(
console=console,
deploy_config=deploy_config,
remote_url=get_git_remote_origin_url(),
)
elif selected_storage_provider in storage_provider_to_collection.keys():
collection = storage_provider_to_collection[selected_storage_provider]
bucket, folder = prompt("Bucket name"), prompt("Folder name")
# Prompt user to select or create credentials for the chosen provider
credentials_block = await prompt_select_blob_storage_credentials(
console=console, storage_provider=selected_storage_provider
)
# This prompt returns a Jinja template string referencing a credentials block
credentials = credentials_block if credentials_block else None
step_fields = {
(
"container"
if selected_storage_provider == "azure_blob_storage"
else "bucket"
): bucket,
"folder": folder,
"credentials": credentials,
}
actions["push"] = [
{
f"{collection}.deployments.steps.push_to_{selected_storage_provider}": (
step_fields
)
}
]
actions["pull"] = [
{
f"{collection}.deployments.steps.pull_from_{selected_storage_provider}": (
step_fields
)
}
]
return actions
async def _generate_default_pull_action(
console: "Console",
deploy_config: dict[str, Any],
actions: list[dict[str, Any]],
*,
is_interactive: Callable[[], bool],
) -> list[dict[str, Any]]:
from prefect.cli._utilities import exit_with_error
build_docker_image_step = await _check_for_build_docker_image_step(
deploy_config.get("build") or actions["build"]
)
if build_docker_image_step:
dockerfile = build_docker_image_step.get("dockerfile")
if dockerfile == "auto":
return await _generate_pull_step_for_build_docker_image(
console, deploy_config
)
if is_interactive():
if not confirm(
"Does your Dockerfile have a line that copies the current working"
" directory into your image?"
):
exit_with_error(
"Your flow code must be copied into your Docker image to run"
" your deployment.\nTo do so, you can copy this line into your"
" Dockerfile: [yellow]COPY . /opt/prefect/[/yellow]"
)
return await _generate_pull_step_for_build_docker_image(
console, deploy_config, auto=False
)
else:
entrypoint_path, _ = deploy_config["entrypoint"].split(":")
console.print(
"Your Prefect workers will attempt to load your flow from:"
f" [green]{(Path.cwd() / Path(entrypoint_path)).absolute().resolve()}[/]. To"
" see more options for managing your flow's code, run:\n\n\t[blue]$"
" prefect init[/]\n"
)
return [
{
"prefect.deployments.steps.set_working_directory": {
"directory": str(Path.cwd().absolute().resolve())
}
}
]
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/deploy/_actions.py",
"license": "Apache License 2.0",
"lines": 220,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/cli/deploy/_commands.py | """
Deploy command — native cyclopts implementation.
Reuses all business logic from prefect.cli.deploy.* modules, threading
`console` and `is_interactive` as parameters.
"""
from __future__ import annotations
from pathlib import Path
from typing import Annotated, Any
import cyclopts
from rich.prompt import Prompt
from rich.table import Table
import prefect
import prefect.cli._app as _cli
from prefect.cli._utilities import (
exit_with_error,
with_cli_exception_handling,
)
from prefect.client.schemas.objects import ConcurrencyLimitConfig
deploy_app: cyclopts.App = cyclopts.App(
name="deploy",
help="Create and manage deployments.",
version_flags=[],
help_flags=["--help"],
)
@deploy_app.command(name="init")
@with_cli_exception_handling
async def init(
*,
name: Annotated[
str | None,
cyclopts.Parameter("--name", help="The name to give the project."),
] = None,
recipe: Annotated[
str | None,
cyclopts.Parameter("--recipe", help="The recipe to use for the project."),
] = None,
fields: Annotated[
list[str] | None,
cyclopts.Parameter(
"--field",
alias="-f",
help=(
"One or more fields to pass to the recipe (e.g., image_name) in the"
" format of key=value."
),
),
] = None,
):
"""Initialize a Prefect project."""
import yaml
from prefect.deployments import initialize_project
inputs: dict[str, Any] = {}
fields = fields or []
recipe_paths = prefect.__module_path__ / "deployments" / "recipes"
for field in fields:
key, value = field.split("=")
inputs[key] = value
from prefect.cli._prompts import prompt_select_from_table
if not recipe and _cli.is_interactive():
recipes: list[dict[str, Any]] = []
for r in recipe_paths.iterdir():
if r.is_dir() and (r / "prefect.yaml").exists():
with open(r / "prefect.yaml") as f:
recipe_data = yaml.safe_load(f)
recipe_name = r.name
recipe_description = recipe_data.get(
"description", "(no description available)"
)
recipes.append(
{"name": recipe_name, "description": recipe_description}
)
selected_recipe = prompt_select_from_table(
_cli.console,
"Would you like to initialize your deployment configuration with a recipe?",
columns=[
{"header": "Name", "key": "name"},
{"header": "Description", "key": "description"},
],
data=recipes,
opt_out_message="No, I'll use the default deployment configuration.",
opt_out_response={},
)
if selected_recipe != {}:
recipe = selected_recipe["name"]
if recipe and (recipe_paths / recipe / "prefect.yaml").exists():
with open(recipe_paths / recipe / "prefect.yaml") as f:
recipe_inputs = yaml.safe_load(f).get("required_inputs") or {}
if recipe_inputs:
if set(recipe_inputs.keys()) < set(inputs.keys()):
_cli.console.print(
(
f"Warning: extra fields provided for {recipe!r} recipe:"
f" '{', '.join(set(inputs.keys()) - set(recipe_inputs.keys()))}'"
),
style="red",
)
elif set(recipe_inputs.keys()) > set(inputs.keys()):
table = Table(
title=f"[red]Required inputs for {recipe!r} recipe[/red]",
)
table.add_column("Field Name", style="green", no_wrap=True)
table.add_column(
"Description", justify="left", style="white", no_wrap=False
)
for field_name, description in recipe_inputs.items():
if field_name not in inputs:
table.add_row(field_name, description)
_cli.console.print(table)
for key, description in recipe_inputs.items():
if key not in inputs:
inputs[key] = Prompt.ask(key, console=_cli.console)
_cli.console.print("-" * 15)
try:
files = [
f"[green]{fname}[/green]"
for fname in initialize_project(name=name, recipe=recipe, inputs=inputs)
]
except ValueError as exc:
if "Unknown recipe" in str(exc):
exit_with_error(
f"Unknown recipe {recipe!r} provided - run"
" [yellow]`prefect init`[/yellow] to see all available recipes."
)
else:
raise
files_str = "\n".join(files)
empty_msg = (
f"Created project in [green]{Path('.').resolve()}[/green];"
" no new files created."
)
file_msg = (
f"Created project in [green]{Path('.').resolve()}[/green]"
f" with the following new files:\n{files_str}"
)
_cli.console.print(file_msg if files_str else empty_msg)
@deploy_app.default
@with_cli_exception_handling
async def deploy(
entrypoint: Annotated[
str | None,
cyclopts.Parameter(
show=False,
help=(
"The path to a flow entrypoint within a project, in the form of"
" `./path/to/file.py:flow_func_name`"
),
),
] = None,
*,
names: Annotated[
list[str] | None,
cyclopts.Parameter(
"--name",
alias="-n",
help=(
"The name to give the deployment. Can be a pattern. Examples:"
" 'my-deployment', 'my-flow/my-deployment', 'my-deployment-*',"
" '*-flow-name/deployment*'"
),
),
] = None,
description: Annotated[
str | None,
cyclopts.Parameter(
"--description",
alias="-d",
help=(
"The description to give the deployment. If not provided, the"
" description will be populated from the flow's description."
),
),
] = None,
version_type: Annotated[
str | None,
cyclopts.Parameter(
"--version-type",
help="The type of version to use for this deployment.",
),
] = None,
version: Annotated[
str | None,
cyclopts.Parameter("--version", help="A version to give the deployment."),
] = None,
tags: Annotated[
list[str] | None,
cyclopts.Parameter(
"--tag",
alias="-t",
help=(
"One or more optional tags to apply to the deployment. Note: tags are"
" used only for organizational purposes. For delegating work to"
" workers, use the --work-queue flag."
),
),
] = None,
concurrency_limit: Annotated[
int | None,
cyclopts.Parameter(
"--concurrency-limit",
help="The maximum number of concurrent runs for this deployment.",
),
] = None,
concurrency_limit_collision_strategy: Annotated[
str | None,
cyclopts.Parameter(
"--collision-strategy",
help=(
"Configure the behavior for runs once the concurrency limit is"
" reached. Falls back to `ENQUEUE` if unset."
),
),
] = None,
work_pool_name: Annotated[
str | None,
cyclopts.Parameter(
"--pool",
alias="-p",
help="The work pool that will handle this deployment's runs.",
),
] = None,
work_queue_name: Annotated[
str | None,
cyclopts.Parameter(
"--work-queue",
alias="-q",
help=(
"The work queue that will handle this deployment's runs. It will be"
" created if it doesn't already exist. Defaults to `None`."
),
),
] = None,
job_variables: Annotated[
list[str] | None,
cyclopts.Parameter(
"--job-variable",
json_list=False,
help=(
"One or more job variable overrides for the work pool provided in the"
" format of key=value string or a JSON object"
),
),
] = None,
cron: Annotated[
list[str] | None,
cyclopts.Parameter(
"--cron",
help=(
"A cron string that will be used to set a CronSchedule on the"
" deployment."
),
),
] = None,
interval: Annotated[
list[int] | None,
cyclopts.Parameter(
"--interval",
help=(
"An integer specifying an interval (in seconds) that will be used to"
" set an IntervalSchedule on the deployment."
),
),
] = None,
interval_anchor: Annotated[
str | None,
cyclopts.Parameter(
"--anchor-date",
help="The anchor date for all interval schedules",
),
] = None,
rrule: Annotated[
list[str] | None,
cyclopts.Parameter(
"--rrule",
help=(
"An RRule that will be used to set an RRuleSchedule on the deployment."
),
),
] = None,
timezone: Annotated[
str | None,
cyclopts.Parameter(
"--timezone",
help="Deployment schedule timezone string e.g. 'America/New_York'",
),
] = None,
trigger: Annotated[
list[str] | None,
cyclopts.Parameter(
"--trigger",
json_list=False,
help=(
"Specifies a trigger for the deployment. The value can be a json"
" string or path to `.yaml`/`.json` file. This flag can be used"
" multiple times."
),
),
] = None,
param: Annotated[
list[str] | None,
cyclopts.Parameter(
"--param",
help=(
"An optional parameter override, values are parsed as JSON strings"
" e.g. --param question=ultimate --param answer=42"
),
),
] = None,
params: Annotated[
str | None,
cyclopts.Parameter(
"--params",
help=(
"An optional parameter override in a JSON string format e.g."
' --params=\'{"question": "ultimate", "answer": 42}\''
),
),
] = None,
enforce_parameter_schema: Annotated[
bool,
cyclopts.Parameter(
"--enforce-parameter-schema",
help=(
"Whether to enforce the parameter schema on this deployment. If set to"
" True, any parameters passed to this deployment must match the"
" signature of the flow."
),
),
] = True,
deploy_all: Annotated[
bool,
cyclopts.Parameter(
"--all",
help=(
"Deploy all flows in the project. If a flow name or entrypoint is also"
" provided, this flag will be ignored."
),
),
] = False,
prefect_file: Annotated[
Path,
cyclopts.Parameter(
"--prefect-file",
help="Specify a custom path to a prefect.yaml file",
),
] = Path("prefect.yaml"),
sla: Annotated[
list[str] | None,
cyclopts.Parameter(
"--sla",
json_list=False,
help=(
"Experimental: One or more SLA configurations for the deployment. May"
" be removed or modified at any time. Currently only supported on"
" Prefect Cloud."
),
),
] = None,
):
"""Create and update deployments."""
from prefect.cli.deploy._config import (
_load_deploy_configs_and_actions,
_parse_name_from_pattern,
_pick_deploy_configs,
)
from prefect.cli.deploy._core import _run_multi_deploy, _run_single_deploy
from prefect.settings import get_current_settings
# Resolve default work pool from settings (typer uses a lambda callback).
if work_pool_name is None:
work_pool_name = get_current_settings().deployments.default_work_pool_name
if job_variables is None:
job_variables = list()
concurrency_limit_config = (
None
if concurrency_limit is None
else (
concurrency_limit
if concurrency_limit_collision_strategy is None
else ConcurrencyLimitConfig(
limit=concurrency_limit,
collision_strategy=concurrency_limit_collision_strategy,
).model_dump()
)
)
options: dict[str, Any] = {
"entrypoint": entrypoint,
"description": description,
"version_type": version_type,
"version": version,
"tags": tags,
"concurrency_limit": concurrency_limit_config,
"work_pool_name": work_pool_name,
"work_queue_name": work_queue_name,
"variables": job_variables,
"cron": cron,
"interval": interval,
"anchor_date": interval_anchor,
"rrule": rrule,
"timezone": timezone,
"triggers": trigger,
"param": param,
"params": params,
"sla": sla,
}
try:
all_deploy_configs, actions = _load_deploy_configs_and_actions(
prefect_file=prefect_file, console=_cli.console
)
parsed_names: list[str] = []
for name in names or []:
if "*" in name:
parsed_names.extend(_parse_name_from_pattern(all_deploy_configs, name))
else:
parsed_names.append(name)
deploy_configs = _pick_deploy_configs(
all_deploy_configs,
parsed_names,
deploy_all,
console=_cli.console,
is_interactive=_cli.is_interactive,
)
if len(deploy_configs) > 1:
if any(options.values()):
_cli.console.print(
(
"You have passed options to the deploy command, but you are"
" creating or updating multiple deployments. These options"
" will be ignored."
),
style="yellow",
)
await _run_multi_deploy(
deploy_configs=deploy_configs,
actions=actions,
deploy_all=deploy_all,
prefect_file=prefect_file,
console=_cli.console,
is_interactive=_cli.is_interactive,
)
else:
deploy_config = deploy_configs[0] if deploy_configs else {}
options["names"] = [
name.split("/", 1)[-1] if "/" in name else name for name in parsed_names
]
if not enforce_parameter_schema:
options["enforce_parameter_schema"] = False
await _run_single_deploy(
deploy_config=deploy_config,
actions=actions,
options=options,
prefect_file=prefect_file,
console=_cli.console,
is_interactive=_cli.is_interactive,
)
except ValueError as exc:
exit_with_error(str(exc))
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/deploy/_commands.py",
"license": "Apache License 2.0",
"lines": 455,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/cli/deploy/_config.py | from __future__ import annotations
import json
import re
from copy import deepcopy
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Optional
import yaml
from pydantic import ValidationError
from yaml.error import YAMLError
from prefect.utilities.annotations import NotSet
if TYPE_CHECKING:
from rich.console import Console
from ._models import PrefectYamlModel
def _format_validation_error(exc: ValidationError, raw_data: dict[str, Any]) -> str:
"""Format Pydantic validation errors into user-friendly messages."""
deployment_errors: dict[str, set[str]] = {}
top_level_errors: list[tuple[str, str]] = []
for error in exc.errors():
loc = error.get("loc", ())
msg = error.get("msg", "Invalid value")
# Handle deployment-level errors
if len(loc) >= 2 and loc[0] == "deployments" and isinstance(loc[1], int):
idx = loc[1]
deployments = raw_data.get("deployments", [])
name = (
deployments[idx].get("name", f"#{idx}")
if idx < len(deployments)
else f"#{idx}"
)
# Get field path (only include string field names, not indices or type names)
field_parts = []
for part in loc[2:]:
if isinstance(part, str) and not part.startswith("function-"):
# Assume lowercase names are field names, not type names
if part[0].islower():
field_parts.append(part)
if field_parts:
field = field_parts[0] # Just use the top-level field
if name not in deployment_errors:
deployment_errors[name] = set()
deployment_errors[name].add(field)
# Handle top-level field errors (prefect-version, name, build, push, pull, etc.)
elif len(loc) >= 1 and isinstance(loc[0], str):
field_name = loc[0]
top_level_errors.append((field_name, msg))
if not deployment_errors and not top_level_errors:
return "Validation error in config file"
lines = []
# Format top-level errors
if top_level_errors:
lines.append("Invalid top-level fields in config file:\n")
for field_name, msg in top_level_errors:
lines.append(f" • {field_name}: {msg}")
lines.append(
"\nFor valid prefect.yaml fields, see: https://docs.prefect.io/v3/how-to-guides/deployments/prefect-yaml"
)
# Format deployment-level errors
if deployment_errors:
if top_level_errors:
lines.append("") # blank line separator
lines.append("Invalid fields in deployments:\n")
for name, fields in sorted(deployment_errors.items()):
lines.append(f" • {name}: {', '.join(sorted(fields))}")
lines.append(
"\nFor valid deployment fields and examples, go to: https://docs.prefect.io/v3/concepts/deployments#deployment-schema"
)
return "\n".join(lines)
def _merge_with_default_deploy_config(deploy_config: dict[str, Any]) -> dict[str, Any]:
deploy_config = deepcopy(deploy_config)
DEFAULT_DEPLOY_CONFIG: dict[str, Any] = {
"name": None,
"version": None,
"tags": [],
"concurrency_limit": None,
"description": None,
"flow_name": None,
"entrypoint": None,
"parameters": {},
"work_pool": {
"name": None,
"work_queue_name": None,
"job_variables": {},
},
}
for key, value in DEFAULT_DEPLOY_CONFIG.items():
if key not in deploy_config:
deploy_config[key] = value
if isinstance(value, dict):
for k, v in value.items():
if k not in deploy_config[key]:
deploy_config[key][k] = v
return deploy_config
def _load_deploy_configs_and_actions(
prefect_file: Path,
*,
console: "Console",
) -> tuple[list[dict[str, Any]], dict[str, Any]]:
"""
Load and validate a prefect.yaml using Pydantic models.
Returns a tuple of (deployment_configs, actions_dict) where deployments are
dictionaries compatible with existing CLI code and actions contains
top-level build/push/pull lists from the file.
"""
raw: dict[str, Any] = {}
try:
with prefect_file.open("r") as f:
loaded = yaml.safe_load(f)
except (FileNotFoundError, IsADirectoryError, YAMLError) as exc:
console.print(
f"Unable to read the specified config file. Reason: {exc}. Skipping.",
style="yellow",
)
loaded = {}
if isinstance(loaded, dict):
raw = loaded
else:
console.print(
"Unable to parse the specified config file. Skipping.",
style="yellow",
)
try:
model = PrefectYamlModel.model_validate(raw)
except ValidationError as exc:
# Format and display validation errors
error_message = _format_validation_error(exc, raw)
console.print(error_message, style="yellow")
console.print(
"\nSkipping deployment configuration due to validation errors.",
style="yellow",
)
model = PrefectYamlModel()
actions: dict[str, Any] = {
"build": model.build or [],
"push": model.push or [],
"pull": model.pull or [],
}
# Convert Pydantic models to plain dicts for downstream consumption,
# excluding keys that were not provided by users to preserve legacy semantics
deploy_configs: list[dict[str, Any]] = [
d.model_dump(exclude_unset=True, mode="json") for d in model.deployments
]
return deploy_configs, actions
def _extract_variable(variable: str) -> dict[str, Any]:
"""
Extracts a variable from a string. Variables can be in the format
key=value or a JSON object.
"""
try:
key, value = variable.split("=", 1)
except ValueError:
pass
else:
return {key: value}
try:
# Only key=value strings and JSON objexcts are valid inputs for
# variables, not arrays or strings, so we attempt to convert the parsed
# object to a dict.
return dict(json.loads(variable))
except (ValueError, TypeError) as e:
raise ValueError(
f'Could not parse variable: "{variable}". Please ensure variables are'
" either in the format `key=value` or are strings containing a valid JSON"
" object."
) from e
def _apply_cli_options_to_deploy_config(
deploy_config: dict[str, Any],
cli_options: dict[str, Any],
*,
console: "Console",
) -> dict[str, Any]:
"""
Applies CLI options to a deploy config. CLI options take
precedence over values in the deploy config.
Args:
deploy_config: A deploy config
cli_options: A dictionary of CLI options
Returns:
Dict: a deploy config with CLI options applied
"""
deploy_config = deepcopy(deploy_config)
# verification
if cli_options.get("param") and (cli_options.get("params") is not None):
raise ValueError("Can only pass one of `param` or `params` options")
# If there's more than one name, we can't set the name of the deploy config.
# The user will be prompted if running in interactive mode.
if len(cli_options.get("names", [])) == 1:
deploy_config["name"] = cli_options["names"][0]
variable_overrides: dict[str, Any] = {}
for cli_option, cli_value in cli_options.items():
if (
cli_option
in [
"description",
"entrypoint",
"version",
"tags",
"concurrency_limit",
"flow_name",
"enforce_parameter_schema",
]
and cli_value is not None
):
deploy_config[cli_option] = cli_value
elif (
cli_option in ["work_pool_name", "work_queue_name", "variables"]
and cli_value
):
if not isinstance(deploy_config.get("work_pool"), dict):
deploy_config["work_pool"] = {}
if cli_option == "work_pool_name":
deploy_config["work_pool"]["name"] = cli_value
elif cli_option == "variables":
for variable in cli_value or []:
variable_overrides.update(**_extract_variable(variable))
if not isinstance(deploy_config["work_pool"].get("variables"), dict):
deploy_config["work_pool"]["job_variables"] = {}
deploy_config["work_pool"]["job_variables"].update(variable_overrides)
else:
deploy_config["work_pool"][cli_option] = cli_value
elif cli_option in ["cron", "interval", "rrule"] and cli_value:
if not isinstance(deploy_config.get("schedules"), list):
deploy_config["schedules"] = []
for value in cli_value:
deploy_config["schedules"].append({cli_option: value})
elif cli_option in ["param", "params"] and cli_value:
parameters: dict[str, Any] = {}
if cli_option == "param":
for p in cli_value or []:
k, unparsed_value = p.split("=", 1)
try:
v = json.loads(unparsed_value)
console.print(
f"The parameter value {unparsed_value} is parsed as a JSON"
" string"
)
except json.JSONDecodeError:
v = unparsed_value
parameters[k] = v
if cli_option == "params" and cli_value is not None:
parameters = json.loads(cli_value)
if not isinstance(deploy_config.get("parameters"), dict):
deploy_config["parameters"] = {}
deploy_config["parameters"].update(parameters)
anchor_date = cli_options.get("anchor_date")
timezone = cli_options.get("timezone")
# Apply anchor_date and timezone to new and existing schedules
for schedule_config in deploy_config.get("schedules") or []:
if anchor_date and schedule_config.get("interval"):
schedule_config["anchor_date"] = anchor_date
if timezone:
schedule_config["timezone"] = timezone
return deploy_config, variable_overrides
def _handle_pick_deploy_without_name(
deploy_configs: list[dict[str, Any]],
*,
console: "Console",
) -> list[dict[str, Any]]:
from prefect.cli._prompts import prompt_select_from_table
selectable_deploy_configs = [
deploy_config for deploy_config in deploy_configs if deploy_config.get("name")
]
if not selectable_deploy_configs:
return []
selected_deploy_config = prompt_select_from_table(
console,
"Would you like to use an existing deployment configuration?",
[
{"header": "Name", "key": "name"},
{"header": "Entrypoint", "key": "entrypoint"},
{"header": "Description", "key": "description"},
],
selectable_deploy_configs,
opt_out_message="No, configure a new deployment",
opt_out_response=None,
)
return [selected_deploy_config] if selected_deploy_config else []
def _log_missing_deployment_names(
missing_names,
matched_deploy_configs,
names,
*,
console: "Console",
):
if missing_names:
console.print(
(
"The following deployment(s) could not be found and will not be"
f" deployed: {', '.join(list(sorted(missing_names)))}"
),
style="yellow",
)
if not matched_deploy_configs:
console.print(
(
"Could not find any deployment configurations with the given"
f" name(s): {', '.join(names)}. Your flow will be deployed with a"
" new deployment configuration."
),
style="yellow",
)
def _filter_matching_deploy_config(
name: str, deploy_configs: list[dict[str, Any]]
) -> list[dict[str, Any]]:
matching_deployments: list[dict[str, Any]] = []
if "/" in name:
flow_name, deployment_name = name.split("/")
flow_name = flow_name.replace("-", "_")
matching_deployments = [
deploy_config
for deploy_config in deploy_configs
if deploy_config.get("name") == deployment_name
and deploy_config.get("entrypoint", "").split(":")[-1] == flow_name
]
else:
matching_deployments = [
deploy_config
for deploy_config in deploy_configs
if deploy_config.get("name") == name
]
return matching_deployments
def _parse_name_from_pattern(
deploy_configs: list[dict[str, Any]], name_pattern: str
) -> list[str]:
parsed_names: list[str] = []
name_pattern = re.escape(name_pattern).replace(r"\*", ".*")
if "/" in name_pattern:
flow_name, deploy_name = name_pattern.split("/", 1)
flow_name = (
re.compile(flow_name.replace("*", ".*"))
if "*" in flow_name
else re.compile(flow_name)
)
deploy_name = (
re.compile(deploy_name.replace("*", ".*"))
if "*" in deploy_name
else re.compile(deploy_name)
)
else:
flow_name = None
deploy_name = re.compile(name_pattern.replace("*", ".*"))
for deploy_config in deploy_configs:
if not deploy_config.get("entrypoint"):
continue
entrypoint = deploy_config.get("entrypoint").split(":")[-1].replace("_", "-")
deployment_name = deploy_config.get("name")
flow_match = flow_name.fullmatch(entrypoint) if flow_name else True
deploy_match = deploy_name.fullmatch(deployment_name)
if flow_match and deploy_match:
parsed_names.append(deployment_name)
return parsed_names
def _handle_pick_deploy_with_name(
deploy_configs: list[dict[str, Any]],
names: list[str],
*,
console: "Console",
is_interactive: Callable[[], bool],
) -> list[dict[str, Any]]:
from prefect.cli._prompts import prompt_select_from_table
matched_deploy_configs: list[dict[str, Any]] = []
deployment_names: list[str] = []
for name in names:
matching_deployments = _filter_matching_deploy_config(name, deploy_configs)
if len(matching_deployments) > 1 and is_interactive():
user_selected_matching_deployment = prompt_select_from_table(
console,
(
"Found multiple deployment configurations with the name"
f" [yellow]{name}[/yellow]. Please select the one you would"
" like to deploy:"
),
[
{"header": "Name", "key": "name"},
{"header": "Entrypoint", "key": "entrypoint"},
{"header": "Description", "key": "description"},
],
matching_deployments,
)
matched_deploy_configs.append(user_selected_matching_deployment)
elif matching_deployments:
matched_deploy_configs.extend(matching_deployments)
deployment_names.append(name.split("/")[-1])
unfound_names = set(deployment_names) - {
deploy_config.get("name") for deploy_config in matched_deploy_configs
}
_log_missing_deployment_names(
unfound_names, matched_deploy_configs, names, console=console
)
return matched_deploy_configs
def _pick_deploy_configs(
deploy_configs: list[dict[str, Any]],
names: Optional[list[str]] = None,
deploy_all: bool = False,
*,
console: "Console",
is_interactive: Callable[[], bool],
) -> list[dict[str, Any]]:
names = names or []
if deploy_all and names:
raise ValueError(
"Cannot use both `--all` and `--name` at the same time. Use only one."
)
if not deploy_configs:
if not is_interactive():
return [
_merge_with_default_deploy_config({}),
]
selected_deploy_config = _handle_pick_deploy_without_name(
deploy_configs, console=console
)
if not selected_deploy_config:
return [
_merge_with_default_deploy_config({}),
]
return selected_deploy_config
# Original behavior (pre-refactor): in non-interactive mode, if there is
# exactly one deploy config and at most one name provided, proceed with the
# single deploy config even if the provided name does not match. This allows
# users/tests to override the name via CLI while still inheriting templated
# fields (e.g., version, tags, description) from the config.
if (not is_interactive()) and len(deploy_configs) == 1 and len(names) <= 1:
return [
_merge_with_default_deploy_config(deploy_configs[0]),
]
if not names and not deploy_all:
if not is_interactive():
if len(deploy_configs) == 1:
return [
_merge_with_default_deploy_config(deploy_configs[0]),
]
# Mirror original behavior: error when multiple configs present and no
# explicit name provided in non-interactive mode.
raise ValueError(
"Discovered one or more deployment configurations, but no name was"
" given. Please specify the name of at least one deployment to"
" create or update."
)
selected_deploy_config = _handle_pick_deploy_without_name(
deploy_configs, console=console
)
if not selected_deploy_config:
return [
_merge_with_default_deploy_config({}),
]
return selected_deploy_config
if names:
matched_deploy_configs = _handle_pick_deploy_with_name(
deploy_configs, names, console=console, is_interactive=is_interactive
)
return matched_deploy_configs
if deploy_all:
return [
_merge_with_default_deploy_config(deploy_config)
for deploy_config in deploy_configs
]
raise ValueError("Invalid selection. Please try again.")
def _handle_deprecated_schedule_fields(deploy_config: dict[str, Any]):
deploy_config = deepcopy(deploy_config)
legacy_schedule = deploy_config.get("schedule", NotSet)
schedule_configs = deploy_config.get("schedules", NotSet)
if (
legacy_schedule
and legacy_schedule is not NotSet
and schedule_configs is not NotSet
):
raise ValueError(
"Both 'schedule' and 'schedules' keys are present in the deployment"
" configuration. Please use only use `schedules`."
)
if legacy_schedule and isinstance(legacy_schedule, dict):
deploy_config["schedules"] = [deploy_config["schedule"]]
return deploy_config
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/deploy/_config.py",
"license": "Apache License 2.0",
"lines": 471,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/cli/deploy/_core.py | from __future__ import annotations
import inspect
import os
from copy import deepcopy
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Optional
from rich.markup import escape
from rich.panel import Panel
from prefect.cli._prompts import (
confirm,
prompt,
prompt_build_custom_docker_image,
prompt_entrypoint,
prompt_push_custom_docker_image,
prompt_select_work_pool,
)
from prefect.client.orchestration import get_client
from prefect.client.schemas.filters import WorkerFilter
from prefect.deployments.base import _save_deployment_to_prefect_file
from prefect.deployments.runner import RunnerDeployment
from prefect.deployments.steps.core import run_steps
from prefect.exceptions import ObjectNotFound
from prefect.flows import load_flow_from_entrypoint
from prefect.settings import get_current_settings
from prefect.utilities.callables import parameter_schema
from prefect.utilities.collections import get_from_dict
from prefect.utilities.templating import (
apply_values,
resolve_block_document_references,
resolve_variables,
)
from ._actions import (
_generate_actions_for_remote_flow_storage,
_generate_default_pull_action,
)
from ._config import (
_apply_cli_options_to_deploy_config,
_handle_deprecated_schedule_fields,
_merge_with_default_deploy_config,
)
from ._schedules import _construct_schedules
from ._sla import (
_create_slas,
_gather_deployment_sla_definitions,
_initialize_deployment_slas,
)
from ._storage import _PullStepStorage
from ._triggers import (
_create_deployment_triggers,
_gather_deployment_trigger_definitions,
_initialize_deployment_triggers,
)
if TYPE_CHECKING:
from rich.console import Console
from prefect.client.orchestration import PrefectClient
async def _run_single_deploy(
deploy_config: dict[str, Any],
actions: dict[str, Any],
options: dict[str, Any] | None = None,
client: Optional["PrefectClient"] = None,
prefect_file: Path = Path("prefect.yaml"),
*,
console: "Console",
is_interactive: Callable[[], bool],
):
client = client or get_client()
deploy_config = deepcopy(deploy_config) if deploy_config else {}
actions = deepcopy(actions) if actions else {}
options = deepcopy(options) if options else {}
deploy_config = _merge_with_default_deploy_config(deploy_config)
deploy_config = _handle_deprecated_schedule_fields(deploy_config)
(
deploy_config,
variable_overrides,
) = _apply_cli_options_to_deploy_config(deploy_config, options, console=console)
build_steps = deploy_config.get("build", actions.get("build")) or []
push_steps = deploy_config.get("push", actions.get("push")) or []
pull_steps = deploy_config.get("pull", actions.get("pull")) or []
deploy_config = await resolve_block_document_references(deploy_config)
deploy_config = await resolve_variables(deploy_config)
# check for env var placeholders early so users can pass work pool names, etc.
deploy_config = apply_values(deploy_config, os.environ, remove_notset=False)
if not deploy_config.get("entrypoint"):
if not is_interactive():
raise ValueError(
"An entrypoint must be provided:\n\n"
" \t[yellow]prefect deploy path/to/file.py:flow_function\n\n"
"You can also provide an entrypoint in a prefect.yaml file."
)
deploy_config["entrypoint"] = await prompt_entrypoint(console)
flow = load_flow_from_entrypoint(deploy_config["entrypoint"])
deploy_config["flow_name"] = flow.name
deployment_name = deploy_config.get("name")
if not deployment_name:
if not is_interactive():
raise ValueError("A deployment name must be provided.")
deploy_config["name"] = prompt("Deployment name", default="default")
deploy_config["parameter_openapi_schema"] = parameter_schema(flow)
work_pool_name = get_from_dict(deploy_config, "work_pool.name")
# determine work pool
if work_pool_name:
try:
work_pool = await client.read_work_pool(deploy_config["work_pool"]["name"])
# dont allow submitting to prefect-agent typed work pools
if work_pool.type == "prefect-agent":
if not is_interactive():
raise ValueError(
"Cannot create a project-style deployment with work pool of"
" type 'prefect-agent'. If you wish to use an agent with"
" your deployment, please use the `prefect deployment"
" build` command."
)
console.print(
"You've chosen a work pool with type 'prefect-agent' which"
" cannot be used for project-style deployments. Let's pick"
" another work pool to deploy to."
)
deploy_config["work_pool"]["name"] = await prompt_select_work_pool(
console
)
except ObjectNotFound:
raise ValueError(
"This deployment configuration references work pool"
f" {deploy_config['work_pool']['name']!r} which does not exist. This"
" means no worker will be able to pick up its runs. You can create a"
" work pool in the Prefect UI."
)
else:
if not is_interactive():
raise ValueError(
"A work pool is required to deploy this flow. Please specify a work"
" pool name via the '--pool' flag or in your prefect.yaml file."
)
if not isinstance(deploy_config.get("work_pool"), dict):
deploy_config["work_pool"] = {}
deploy_config["work_pool"]["name"] = await prompt_select_work_pool(
console=console
)
docker_build_steps = [
"prefect_docker.deployments.steps.build_docker_image",
]
docker_push_steps = [
"prefect_docker.deployments.steps.push_docker_image",
]
docker_build_step_exists = any(
any(step in action for step in docker_build_steps)
for action in deploy_config.get("build", actions.get("build")) or []
)
update_work_pool_image = False
build_step_set_to_null = "build" in deploy_config and (
deploy_config["build"] is None
or deploy_config["build"] == {}
or deploy_config["build"] == []
)
work_pool = await client.read_work_pool(deploy_config["work_pool"]["name"])
image_properties = (
work_pool.base_job_template.get("variables", {})
.get("properties", {})
.get("image", {})
)
image_is_configurable = (
"image"
in work_pool.base_job_template.get("variables", {}).get("properties", {})
and image_properties.get("type") == "string"
and not image_properties.get("enum")
)
if (
is_interactive()
and not docker_build_step_exists
and not build_step_set_to_null
and image_is_configurable
):
build_docker_image_step = await prompt_build_custom_docker_image(
console, deploy_config
)
if build_docker_image_step is not None:
if not get_from_dict(deploy_config, "work_pool.job_variables.image"):
update_work_pool_image = True
(
push_docker_image_step,
updated_build_docker_image_step,
) = await prompt_push_custom_docker_image(
console, deploy_config, build_docker_image_step
)
if actions.get("build"):
actions["build"].append(updated_build_docker_image_step)
else:
actions["build"] = [updated_build_docker_image_step]
if push_docker_image_step is not None:
if actions.get("push"):
actions["push"].append(push_docker_image_step)
else:
actions["push"] = [push_docker_image_step]
build_steps = deploy_config.get("build", actions.get("build")) or []
push_steps = deploy_config.get("push", actions.get("push")) or []
docker_push_step_exists = any(
any(step in action for step in docker_push_steps)
for action in deploy_config.get("push", actions.get("push")) or []
)
## CONFIGURE PUSH and/or PULL STEPS FOR REMOTE FLOW STORAGE
if (
is_interactive()
and not (deploy_config.get("pull") or actions.get("pull"))
and not docker_push_step_exists
and confirm(
(
"Your Prefect workers will need access to this flow's code in order to"
" run it. Would you like your workers to pull your flow code from a"
" remote storage location when running this flow?"
),
default=True,
console=console,
)
):
actions = await _generate_actions_for_remote_flow_storage(
console=console, deploy_config=deploy_config, actions=actions
)
# Prefer the originally captured pull_steps (taken before resolution) to
# preserve unresolved block placeholders in the deployment spec. Only fall
# back to the config/actions/default if no pull steps were provided.
pull_steps = (
pull_steps
or deploy_config.get("pull")
or actions.get("pull")
or await _generate_default_pull_action(
console,
deploy_config=deploy_config,
actions=actions,
is_interactive=is_interactive,
)
)
## RUN BUILD AND PUSH STEPS
step_outputs: dict[str, Any] = {}
if build_steps:
console.print("Running deployment build steps...")
step_outputs.update(
await run_steps(build_steps, step_outputs, print_function=console.print)
)
if push_steps := push_steps or actions.get("push"):
console.print("Running deployment push steps...")
step_outputs.update(
await run_steps(push_steps, step_outputs, print_function=console.print)
)
step_outputs.update(variable_overrides)
if update_work_pool_image:
if "build-image" not in step_outputs:
console.print(
"Warning: no build-image step found in the deployment build steps."
" The work pool image will not be updated."
)
deploy_config["work_pool"]["job_variables"]["image"] = "{{ build-image.image }}"
if not deploy_config.get("description"):
deploy_config["description"] = flow.description
deploy_config["schedules"] = _construct_schedules(
deploy_config, step_outputs, console=console, is_interactive=is_interactive
)
# save deploy_config before templating
deploy_config_before_templating = deepcopy(deploy_config)
## apply templating from build and push steps to the final deployment spec
_parameter_schema = deploy_config.pop("parameter_openapi_schema")
_schedules = deploy_config.pop("schedules")
# Save triggers before templating to preserve event template parameters
_triggers = deploy_config.pop("triggers", None)
# Preserve {{ ctx.* }} placeholders during deploy-time templating.
# These are runtime templates resolved by the worker's
# prepare_for_flow_run() and must not be stripped here.
deploy_config = apply_values(
deploy_config,
step_outputs,
warn_on_notset=True,
skip_prefixes=["ctx."],
)
deploy_config["parameter_openapi_schema"] = _parameter_schema
deploy_config["schedules"] = _schedules
# This initialises triggers after templating to ensure that jinja variables are resolved
# Use the pre-templated trigger specs to preserve event template parameters like {{ event.name }}
# while still applying templating to trigger-level fields like enabled
if trigger_specs := _gather_deployment_trigger_definitions(
options.get("triggers"), _triggers
):
# Apply templating only to non-parameter trigger fields to preserve event templates
templated_trigger_specs = []
for spec in trigger_specs:
# Save parameters before templating
parameters = spec.pop("parameters", None)
# Apply templating to trigger fields (e.g., enabled)
templated_spec = apply_values(spec, step_outputs, warn_on_notset=False)
# Restore parameters without templating
if parameters is not None:
templated_spec["parameters"] = parameters
templated_trigger_specs.append(templated_spec)
triggers = _initialize_deployment_triggers(
deployment_name, templated_trigger_specs
)
else:
triggers = []
if isinstance(deploy_config.get("concurrency_limit"), dict):
concurrency_options = {
"collision_strategy": get_from_dict(
deploy_config, "concurrency_limit.collision_strategy"
)
}
grace_period_seconds = get_from_dict(
deploy_config, "concurrency_limit.grace_period_seconds"
)
if grace_period_seconds is not None:
concurrency_options["grace_period_seconds"] = grace_period_seconds
deploy_config["concurrency_options"] = concurrency_options
deploy_config["concurrency_limit"] = get_from_dict(
deploy_config, "concurrency_limit.limit"
)
pull_steps = apply_values(pull_steps, step_outputs, remove_notset=False)
deployment = RunnerDeployment(
name=deploy_config["name"],
flow_name=deploy_config.get("flow_name"),
entrypoint=deploy_config.get("entrypoint"),
work_pool_name=get_from_dict(deploy_config, "work_pool.name"),
work_queue_name=get_from_dict(deploy_config, "work_pool.work_queue_name"),
parameters=deploy_config.get("parameters"),
description=deploy_config.get("description"),
version=deploy_config.get("version") or options.get("version"),
version_type=deploy_config.get("version_type") or options.get("version_type"),
tags=deploy_config.get("tags"),
concurrency_limit=deploy_config.get("concurrency_limit"),
concurrency_options=deploy_config.get("concurrency_options"),
paused=deploy_config.get("paused"),
storage=_PullStepStorage(pull_steps),
job_variables=get_from_dict(deploy_config, "work_pool.job_variables"),
)
deployment._set_defaults_from_flow(flow)
deployment._parameter_openapi_schema = deploy_config["parameter_openapi_schema"]
if deploy_config.get("enforce_parameter_schema") is not None:
deployment.enforce_parameter_schema = deploy_config.get(
"enforce_parameter_schema"
)
apply_coro = deployment.apply(schedules=deploy_config.get("schedules"))
if TYPE_CHECKING:
assert inspect.isawaitable(apply_coro)
deployment_id = await apply_coro
await _create_deployment_triggers(client, deployment_id, triggers)
# # We want to ensure that if a user passes an empty list of SLAs, we call the
# # apply endpoint to remove existing SLAs for the deployment.
# # If the argument is not provided, we will not call the endpoint.
# Import SLA helpers from the package namespace to honor test monkeypatches
sla_specs = _gather_deployment_sla_definitions(
options.get("sla"), deploy_config.get("sla")
)
if sla_specs is not None:
slas = _initialize_deployment_slas(deployment_id, sla_specs)
await _create_slas(client, deployment_id, slas)
console.print(
Panel(
f"Deployment '{deploy_config['flow_name']}/{deploy_config['name']}'"
f" successfully created with id '{deployment_id}'."
),
style="green",
)
if ui_url := get_current_settings().ui_url:
message = (
"\nView Deployment in UI:"
f" {ui_url}/deployments/deployment/{deployment_id}\n"
)
console.print(message, soft_wrap=True)
if is_interactive() and not prefect_file.exists():
if confirm(
(
"Would you like to save configuration for this deployment for faster"
" deployments in the future?"
),
console=console,
):
deploy_config_before_templating.update({"schedules": _schedules})
_save_deployment_to_prefect_file(
deploy_config_before_templating,
build_steps=build_steps or None,
push_steps=push_steps or None,
pull_steps=pull_steps or None,
triggers=trigger_specs or None,
sla=sla_specs or None,
prefect_file=prefect_file,
)
console.print(
(
f"\n[green]Deployment configuration saved to {prefect_file}![/]"
" You can now deploy using this deployment configuration"
" with:\n\n\t[blue]$ prefect deploy -n"
f" {deploy_config['name']}[/]\n\nYou can also make changes to"
" this deployment configuration by making changes to the"
" YAML file."
),
)
active_workers = []
if work_pool_name:
active_workers = await client.read_workers_for_work_pool(
work_pool_name, worker_filter=WorkerFilter(status={"any_": ["ONLINE"]})
)
if (
not work_pool.is_push_pool
and not work_pool.is_managed_pool
and not active_workers
):
console.print(
"\nTo execute flow runs from these deployments, start a worker in a"
" separate terminal that pulls work from the"
f" {work_pool_name!r} work pool:"
)
console.print(
f"\n\t$ prefect worker start --pool {work_pool_name!r}",
style="blue",
)
console.print("\nTo schedule a run for this deployment, use the following command:")
console.print(
(
"\n\t$ prefect deployment run"
f" '{deploy_config['flow_name']}/{deploy_config['name']}'\n"
),
style="blue",
)
async def _run_multi_deploy(
deploy_configs: list[dict[str, Any]],
actions: dict[str, Any],
names: Optional[list[str]] = None,
deploy_all: bool = False,
prefect_file: Path = Path("prefect.yaml"),
*,
console: "Console",
is_interactive: Callable[[], bool],
):
deploy_configs = deepcopy(deploy_configs) if deploy_configs else []
actions = deepcopy(actions) if actions else {}
names = names or []
if deploy_all:
console.print(
"Deploying all flows with an existing deployment configuration..."
)
else:
console.print("Deploying flows with selected deployment configurations...")
for deploy_config in deploy_configs:
if deploy_config.get("name") is None:
if not is_interactive():
console.print(
"Discovered unnamed deployment. Skipping...", style="yellow"
)
continue
console.print("Discovered unnamed deployment.", style="yellow")
console.print_json(data=deploy_config)
if confirm(
"Would you like to give this deployment a name and deploy it?",
default=True,
console=console,
):
deploy_config["name"] = prompt("Deployment name", default="default")
else:
console.print("Skipping unnamed deployment.", style="yellow")
continue
# Resolve env var templates in name for display purposes only
resolved_name = apply_values(
{"name": deploy_config["name"]}, os.environ, remove_notset=False
)["name"]
# Escape Rich markup to prevent brackets from being interpreted as style tags
display_name = escape(str(resolved_name))
console.print(Panel(f"Deploying {display_name}", style="blue"))
await _run_single_deploy(
deploy_config,
actions,
prefect_file=prefect_file,
console=console,
is_interactive=is_interactive,
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/deploy/_core.py",
"license": "Apache License 2.0",
"lines": 469,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/cli/deploy/_schedules.py | from __future__ import annotations
import json
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Callable
from prefect.cli._prompts import prompt_schedules
from prefect.client.schemas.schedules import (
CronSchedule,
IntervalSchedule,
RRuleSchedule,
)
from prefect.types._datetime import parse_datetime
from prefect.utilities.annotations import NotSet
from prefect.utilities.templating import apply_values
if TYPE_CHECKING:
from rich.console import Console
def _construct_schedules(
deploy_config: dict[str, Any],
step_outputs: dict[str, Any],
*,
console: "Console",
is_interactive: Callable[[], bool],
) -> list[dict[str, Any]]:
schedules: list[dict[str, Any]] = []
schedule_configs = deploy_config.get("schedules", NotSet) or []
if schedule_configs is not NotSet:
schedules = [
_schedule_config_to_deployment_schedule(schedule_config)
for schedule_config in apply_values(schedule_configs, step_outputs)
]
elif schedule_configs is NotSet:
if is_interactive():
schedules = prompt_schedules(console)
return schedules
def _schedule_config_to_deployment_schedule(
schedule_config: dict[str, Any],
) -> dict[str, Any]:
anchor_date = schedule_config.get("anchor_date")
timezone = schedule_config.get("timezone")
schedule_active = schedule_config.get("active")
parameters = schedule_config.get("parameters", {})
slug = schedule_config.get("slug")
replaces = schedule_config.get("replaces")
if cron := schedule_config.get("cron"):
day_or = schedule_config.get("day_or")
cron_kwargs = {"cron": cron, "timezone": timezone, "day_or": day_or}
schedule = CronSchedule(
**{k: v for k, v in cron_kwargs.items() if v is not None}
)
elif interval := schedule_config.get("interval"):
# interval can be int/float (seconds), timedelta, or ISO 8601 string
# IntervalSchedule's pydantic validation handles all these formats
if isinstance(interval, (int, float)):
interval = timedelta(seconds=interval)
interval_kwargs = {
"interval": interval,
"anchor_date": parse_datetime(anchor_date) if anchor_date else None,
"timezone": timezone,
}
schedule = IntervalSchedule(
**{k: v for k, v in interval_kwargs.items() if v is not None}
)
elif rrule := schedule_config.get("rrule"):
try:
schedule = RRuleSchedule(**json.loads(rrule))
if timezone:
schedule.timezone = timezone
except json.JSONDecodeError:
schedule = RRuleSchedule(rrule=rrule, timezone=timezone)
else:
raise ValueError(
f"Unknown schedule type. Please provide a valid schedule. schedule={schedule_config}"
)
schedule_obj: dict[str, Any] = {"schedule": schedule}
if schedule_active is not None:
schedule_obj["active"] = schedule_active
if parameters:
schedule_obj["parameters"] = parameters
if slug:
schedule_obj["slug"] = slug
if replaces:
schedule_obj["replaces"] = replaces
return schedule_obj
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/deploy/_schedules.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/cli/deploy/_sla.py | from __future__ import annotations
import json
from typing import Any, List, Union
from uuid import UUID
from pydantic import TypeAdapter
from prefect._experimental.sla.objects import SlaTypes
from prefect.client.base import ServerType
from prefect.client.orchestration import PrefectClient
SlaAdapter: TypeAdapter[SlaTypes] = TypeAdapter(SlaTypes)
def _gather_deployment_sla_definitions(
sla_flags: Union[list[str], None], existing_slas: Union[list[dict[str, Any]], None]
) -> Union[list[dict[str, Any]], None]:
if sla_flags is not None:
sla_specs: list[dict[str, Any]] = []
for s in sla_flags:
try:
if s.endswith(".yaml"):
import yaml
with open(s, "r") as f:
sla_specs.extend(yaml.safe_load(f).get("sla", []))
elif s.endswith(".json"):
with open(s, "r") as f:
sla_specs.extend(json.load(f).get("sla", []))
else:
sla_specs.append(json.loads(s))
except Exception as e:
raise ValueError(f"Failed to parse SLA: {s}. Error: {str(e)}")
return sla_specs
return existing_slas
def _initialize_deployment_slas(
deployment_id: UUID, sla_specs: list[dict[str, Any]]
) -> list[SlaTypes]:
if sla_specs == [] or sla_specs == [[]]:
return []
slas = [SlaAdapter.validate_python(spec) for spec in sla_specs]
for sla in slas:
sla.set_deployment_id(deployment_id)
return slas
async def _create_slas(
client: PrefectClient,
deployment_id: UUID,
slas: List[SlaTypes],
):
if client.server_type == ServerType.CLOUD:
await client.apply_slas_for_deployment(deployment_id, slas)
else:
raise ValueError(
"SLA configuration is currently only supported on Prefect Cloud."
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/deploy/_sla.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/cli/deploy/_storage.py | from __future__ import annotations
from pathlib import Path
from typing import Any
class _PullStepStorage:
"""
A shim storage class that allows passing pull steps to a `RunnerDeployment`.
"""
def __init__(self, pull_steps: list[dict[str, Any]]):
self._base_path = Path.cwd()
self.pull_steps = pull_steps
def set_base_path(self, path: Path):
self._base_path = path
@property
def destination(self):
return self._base_path
@property
def pull_interval(self):
return 60
async def pull_code(self):
pass
def to_pull_step(self):
return self.pull_steps
def __eq__(self, other: Any) -> bool:
return self.pull_steps == getattr(other, "pull_steps", None)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/deploy/_storage.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:src/prefect/cli/deploy/_triggers.py | from __future__ import annotations
import json
from typing import Any
from uuid import UUID
from pydantic import TypeAdapter
from prefect.client.orchestration import PrefectClient
from prefect.events import DeploymentTriggerTypes, TriggerTypes
from prefect.exceptions import PrefectHTTPStatusError
DeploymentTriggerAdapter: TypeAdapter[DeploymentTriggerTypes] = TypeAdapter(
DeploymentTriggerTypes
)
def _initialize_deployment_triggers(
deployment_name: str, triggers_spec: list[dict[str, Any]]
) -> list[DeploymentTriggerTypes]:
triggers: list[DeploymentTriggerTypes] = []
for i, spec in enumerate(triggers_spec, start=1):
spec.setdefault("name", f"{deployment_name}__automation_{i}")
triggers.append(DeploymentTriggerAdapter.validate_python(spec))
return triggers
async def _create_deployment_triggers(
client: PrefectClient,
deployment_id: UUID,
triggers: list[DeploymentTriggerTypes | TriggerTypes],
):
try:
await client.delete_resource_owned_automations(
f"prefect.deployment.{deployment_id}"
)
except PrefectHTTPStatusError as e:
if e.response.status_code == 404:
return
raise e
for trigger in triggers:
trigger.set_deployment_id(deployment_id)
await client.create_automation(trigger.as_automation())
def _gather_deployment_trigger_definitions(
trigger_flags: list[str], existing_triggers: list[dict[str, Any]]
) -> list[dict[str, Any]]:
if trigger_flags:
trigger_specs: list[dict[str, Any]] = []
for t in trigger_flags:
try:
if t.endswith(".yaml"):
import yaml
with open(t, "r") as f:
trigger_specs.extend(yaml.safe_load(f).get("triggers", []))
elif t.endswith(".json"):
with open(t, "r") as f:
trigger_specs.extend(json.load(f).get("triggers", []))
else:
trigger_specs.append(json.loads(t))
except Exception as e:
raise ValueError(f"Failed to parse trigger: {t}. Error: {str(e)}")
return trigger_specs
return existing_triggers
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/deploy/_triggers.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/types/_concurrency.py | from typing import ClassVar, Literal
from uuid import UUID
from pydantic import BaseModel, ConfigDict
class ConcurrencyLeaseHolder(BaseModel):
"""Model for validating concurrency lease holder information."""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
type: Literal["flow_run", "task_run", "deployment"]
id: UUID
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/types/_concurrency.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:tests/events/client/test_automations_schema.py | """Tests for client-side automation schema validation and deserialization."""
import uuid
from prefect.events.schemas.automations import (
Automation,
MetricTrigger,
MetricTriggerQuery,
Posture,
)
def test_metric_trigger_deserialization():
"""Test that metric triggers can be deserialized from API-like data (issue #18747)."""
test_data = {
"id": str(uuid.uuid4()),
"name": "Test Metric Automation",
"description": "Test metric trigger validation",
"enabled": True,
"trigger": {
"type": "metric",
"posture": "Metric",
"metric": {
"name": "duration",
"threshold": 100.0,
"operator": ">",
"range": 300.0,
"firing_for": 300.0,
},
"match": {"prefect.resource.id": "prefect.flow-run.*"},
},
"actions": [],
}
automation = Automation.model_validate(test_data)
assert isinstance(automation.trigger, MetricTrigger)
assert automation.trigger.type == "metric"
assert automation.trigger.posture == Posture.Metric
assert automation.trigger.metric.name.value == "duration"
assert automation.trigger.metric.threshold == 100.0
assert automation.trigger.metric.operator.value == ">"
def test_metric_trigger_round_trip():
"""Test that metric triggers can be serialized and deserialized."""
automation = Automation(
id=uuid.uuid4(),
name="Test Metric Automation",
description="Test",
enabled=True,
trigger=MetricTrigger(
posture=Posture.Metric,
metric=MetricTriggerQuery(
name="successes",
threshold=10,
operator="<",
range=600.0,
firing_for=300.0,
),
match={"prefect.resource.id": "prefect.flow-run.*"},
),
actions=[],
)
data = automation.model_dump()
restored = Automation.model_validate(data)
assert isinstance(restored.trigger, MetricTrigger)
assert restored.trigger.type == "metric"
assert restored.trigger.metric.name.value == "successes"
assert restored.trigger.metric.threshold == 10
def test_trigger_type_discrimination_with_metric():
"""Test that the discriminator correctly identifies metric triggers."""
data_with_type = {
"id": str(uuid.uuid4()),
"name": "Test",
"trigger": {
"type": "metric",
"posture": "Metric",
"metric": {
"name": "lateness",
"threshold": 5,
"operator": ">",
"range": 300.0,
"firing_for": 300.0,
},
},
"actions": [],
}
automation = Automation.model_validate(data_with_type)
assert isinstance(automation.trigger, MetricTrigger)
assert automation.trigger.type == "metric"
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/events/client/test_automations_schema.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/events/client/test_deployment_trigger_schedule_after.py | from datetime import timedelta
import pytest
from pydantic import ValidationError
from prefect.events.schemas.deployment_triggers import DeploymentEventTrigger
async def test_deployment_trigger_can_omit_schedule_after_field():
"""Test that the schedule_after field defaults to timedelta(0)"""
trigger = DeploymentEventTrigger.model_validate(
{
"expect": ["foo"],
}
)
assert trigger.schedule_after == timedelta(0)
async def test_deployment_trigger_accepts_schedule_after_field():
"""Test that the schedule_after field accepts various timedelta values"""
trigger = DeploymentEventTrigger.model_validate(
{
"expect": ["foo"],
"schedule_after": 3600, # seconds
}
)
assert trigger.schedule_after == timedelta(hours=1)
trigger = DeploymentEventTrigger.model_validate(
{
"expect": ["foo"],
"schedule_after": "PT2H", # ISO 8601 duration
}
)
assert trigger.schedule_after == timedelta(hours=2)
async def test_deployment_trigger_rejects_negative_schedule_after():
"""Test that negative schedule_after values are rejected"""
with pytest.raises(ValidationError, match="schedule_after must be non-negative"):
DeploymentEventTrigger.model_validate(
{
"expect": ["foo"],
"schedule_after": -3600,
}
)
async def test_deployment_trigger_passes_schedule_after_to_action():
"""Test that deployment trigger passes schedule_after to RunDeployment action"""
from uuid import uuid4
trigger = DeploymentEventTrigger.model_validate(
{
"expect": ["foo"],
"schedule_after": timedelta(hours=1),
}
)
deployment_id = uuid4()
trigger.set_deployment_id(deployment_id)
actions = trigger.actions()
assert len(actions) == 1
action = actions[0]
assert action.schedule_after == timedelta(hours=1)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/events/client/test_deployment_trigger_schedule_after.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:scripts/prepare_integration_release_notes.py | #!/usr/bin/env python3
"""
Prepare release notes for Prefect integration packages.
This script generates release notes from commits since the last release.
USAGE:
======
Run this script to generate release notes for any integration package:
`just prepare-integration-release PACKAGE`
or
`python scripts/prepare_integration_release_notes.py PACKAGE`
Examples:
`python scripts/prepare_integration_release_notes.py prefect-aws`
`python scripts/prepare_integration_release_notes.py prefect-docker`
`python scripts/prepare_integration_release_notes.py --list`
The script will:
- Find the latest release tag for the specified integration package
- Generate release notes from commits that affect the package since that tag
- Apply formatting transformations (GitHub users to links, PR URLs, etc.)
- Save the formatted release notes to a markdown file
- Display the release notes for review
WORKFLOW:
=========
1. Run this script with the integration package name
2. Review the generated release notes
3. Use the output for GitHub releases or documentation
FORMATTING:
===========
- ### and #### headers are converted to bold text to reduce nav clutter
- GitHub usernames are converted to links (@username -> [@username](https://github.com/username))
- PR URLs are converted to short format with links
- Version constraints like <0.14.0,>=0.12.0 are wrapped in backticks
- Dates are formatted as "Month Day, Year" on a separate line
"""
from __future__ import annotations
import argparse
import json
import os
import re
import subprocess
import sys
from datetime import datetime
from typing import Any
def run_command(cmd: list[str]) -> str:
"""Run a command and return its output."""
try:
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
return result.stdout.strip()
except subprocess.CalledProcessError as e:
print(f"Error running command {' '.join(cmd)}: {e}", file=sys.stderr)
print(f"stderr: {e.stderr}", file=sys.stderr)
sys.exit(1)
def get_available_packages() -> list[str]:
"""Get list of available integration packages."""
integrations_dir = "src/integrations"
if not os.path.exists(integrations_dir):
print(f"Integration directory {integrations_dir} not found", file=sys.stderr)
sys.exit(1)
packages = []
for item in os.listdir(integrations_dir):
path = os.path.join(integrations_dir, item)
if os.path.isdir(path) and item.startswith("prefect-"):
packages.append(item)
return sorted(packages)
def get_latest_package_tag(package_name: str) -> str:
"""Get the latest release tag for the given package."""
cmd = ["gh", "api", "repos/PrefectHQ/prefect/tags", "--paginate"]
output = run_command(cmd)
tags = json.loads(output)
package_tags = [
tag["name"] for tag in tags if tag["name"].startswith(f"{package_name}-")
]
if not package_tags:
print(f"No {package_name} tags found", file=sys.stderr)
sys.exit(1)
# Tags are already sorted by creation date (newest first)
return package_tags[0]
def extract_pr_info(commit_subject: str) -> dict[str, str] | None:
"""Extract PR number and title from commit subject."""
# Match patterns like: "Fix something (#1234)" or "Add feature (#5678)"
pr_pattern = r"^(.+?)\s*\(#(\d+)\)$"
match = re.match(pr_pattern, commit_subject.strip())
if match:
title = match.group(1).strip()
pr_number = match.group(2)
return {"title": title, "pr_number": pr_number}
return None
def get_pr_data(pr_number: str) -> dict[str, Any]:
"""Get PR data including author and labels using GitHub CLI."""
try:
cmd = ["gh", "pr", "view", pr_number, "--json", "author,labels"]
output = run_command(cmd)
pr_data = json.loads(output)
author = pr_data.get("author", {}).get("login", "unknown")
labels = [label.get("name", "") for label in pr_data.get("labels", [])]
return {"author": author, "labels": labels}
except Exception:
return {"author": "unknown", "labels": []}
def get_commits_since_tag(tag: str, package_name: str) -> list[dict[str, Any]]:
"""Get all commits that affect the given package since the given tag."""
# Get commits since the tag that touch the package directory
cmd = [
"git",
"log",
f"{tag}..HEAD",
"--oneline",
"--pretty=format:%s",
"--",
f"src/integrations/{package_name}/",
]
output = run_command(cmd)
if not output:
return []
prs = []
seen_prs = set()
for line in output.split("\n"):
if line.strip():
pr_info = extract_pr_info(line.strip())
if pr_info and pr_info["pr_number"] not in seen_prs:
# Get PR data (author and labels) from GitHub
pr_data = get_pr_data(pr_info["pr_number"])
prs.append(
{
"title": pr_info["title"],
"pr_number": pr_info["pr_number"],
"author": pr_data["author"],
"labels": pr_data["labels"],
}
)
seen_prs.add(pr_info["pr_number"])
return prs
def categorize_prs(
prs: list[dict[str, Any]],
) -> dict[str, list[dict[str, Any]]]:
"""Categorize PRs by type based on their GitHub labels."""
categories = {
"Features": [],
"Bug Fixes": [],
"Documentation": [],
"Maintenance": [],
"Other": [],
}
# Define label to category mappings
label_mappings = {
"Features": ["enhancement", "feature", "new feature"],
"Bug Fixes": ["bug", "bugfix", "fix"],
"Documentation": ["documentation", "docs"],
"Maintenance": ["maintenance", "chore", "dependencies", "refactor", "cleanup"],
}
for pr in prs:
labels = [label.lower() for label in pr.get("labels", [])]
categorized = False
# Try to categorize based on labels
for category, category_labels in label_mappings.items():
if any(label in labels for label in category_labels):
categories[category].append(pr)
categorized = True
break
# Fallback to title-based categorization if no matching labels
if not categorized:
title = pr["title"].lower()
if any(
word in title for word in ["feat", "add", "implement", "support", "new"]
):
categories["Features"].append(pr)
elif any(word in title for word in ["fix", "bug", "resolve", "correct"]):
categories["Bug Fixes"].append(pr)
elif any(word in title for word in ["doc", "readme", "comment"]):
categories["Documentation"].append(pr)
elif any(
word in title
for word in ["refactor", "clean", "update", "bump", "chore", "test"]
):
categories["Maintenance"].append(pr)
else:
categories["Other"].append(pr)
return categories
def parse_version(version_str: str) -> tuple[int, int, int]:
"""Parse version string into major, minor, patch."""
parts = version_str.split(".")
return int(parts[0]), int(parts[1]), int(parts[2] if len(parts) > 2 else 0)
def bump_version(version_str: str) -> str:
"""Bump the micro (patch) version by 1."""
major, minor, patch = parse_version(version_str)
return f"{major}.{minor}.{patch + 1}"
def format_release_notes(
tag: str, package_name: str, categorized_prs: dict[str, list[dict[str, Any]]]
) -> str:
"""Format the PRs into release notes."""
total_prs = sum(len(prs) for prs in categorized_prs.values())
# Extract version from tag (e.g., "prefect-aws-0.5.13" -> "0.5.13")
current_version = tag.split("-")[-1] if "-" in tag else tag
# Bump to next version for release notes
next_version = bump_version(current_version)
if total_prs == 0:
return f"## {next_version}\n\n_Released on {datetime.now().strftime('%B %d, %Y')}_\n\nNo changes to {package_name} found since the last release.\n"
# Format the header with next version
notes = [f"## {next_version}"]
notes.append(f"\n_Released on {datetime.now().strftime('%B %d, %Y')}_\n")
for category, prs in categorized_prs.items():
if prs:
# Convert category headers to bold text (matching main release notes style)
notes.append(f"**{category}**")
notes.append("") # Blank line after header
for pr in prs:
title = pr["title"]
pr_number = pr["pr_number"]
author = pr["author"]
# Apply formatting transformations to title
title = format_pr_title(title)
# Create PR link and author link
pr_link = f"[#{pr_number}](https://github.com/PrefectHQ/prefect/pull/{pr_number})"
author_link = f"[@{author}](https://github.com/{author})"
notes.append(f"- {title} {pr_link} by {author_link}")
notes.append("") # Blank line after section
body = "\n".join(notes).strip() + "\n"
return body
def format_pr_title(title: str) -> str:
"""Apply formatting transformations to PR titles."""
# Fix version constraints by wrapping them in backticks
version_pattern = r"(?<!`)([<>]=?[\d\.]+(?:,\s*[<>]=?[\d\.]+)*)"
def wrap_version(match):
version = match.group(1)
return f"`{version}`"
title = re.sub(version_pattern, wrap_version, title)
# Convert GitHub usernames to links in title (avoiding infrastructure decorators)
infra_decorators = [
"docker",
"kubernetes",
"ecs",
"process",
"cloudrun",
"modal",
"azure_container",
]
def replace_github_user(match):
username = match.group(1)
if username.lower() in infra_decorators:
return match.group(0) # Return original text
return f"[@{username}](https://github.com/{username})"
github_user_pattern = (
r"(?<!\w)@([a-zA-Z0-9](?:[a-zA-Z0-9]|-(?=[a-zA-Z0-9])){0,38})(?![a-zA-Z0-9/-])"
)
title = re.sub(github_user_pattern, replace_github_user, title)
return title
def parse_args():
"""Parse command line arguments."""
available_packages = get_available_packages()
parser = argparse.ArgumentParser(
description="Generate release notes for Prefect integration packages",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=f"""
Available packages:
{chr(10).join(f" - {pkg}" for pkg in available_packages)}
Examples:
python {sys.argv[0]} prefect-aws
python {sys.argv[0]} prefect-gcp
python {sys.argv[0]} --list
""",
)
parser.add_argument(
"package",
nargs="?",
help="Name of the integration package (e.g., prefect-aws, prefect-gcp)",
)
parser.add_argument(
"--list", action="store_true", help="List all available integration packages"
)
args = parser.parse_args()
if args.list:
print("Available integration packages:")
for pkg in available_packages:
print(f" - {pkg}")
sys.exit(0)
if not args.package:
parser.error("Package name is required. Use --list to see available packages.")
if args.package not in available_packages:
print(f"Error: Package '{args.package}' not found.", file=sys.stderr)
print(f"Available packages: {', '.join(available_packages)}", file=sys.stderr)
sys.exit(1)
return args
def main():
"""Main function to generate release notes."""
args = parse_args()
package_name = args.package
print(f"🔍 Finding latest {package_name} release...")
latest_tag = get_latest_package_tag(package_name)
print(f"📍 Latest release: {latest_tag}")
print("📝 Getting PRs since last release...")
prs = get_commits_since_tag(latest_tag, package_name)
print(f"📊 Found {len(prs)} PR(s) affecting {package_name}")
if not prs:
print("✅ No changes found since last release")
return
print("🏷️ Categorizing PRs...")
categorized = categorize_prs(prs)
print("📋 Generating release notes...")
release_notes = format_release_notes(latest_tag, package_name, categorized)
print("\n" + "=" * 60)
print("RELEASE NOTES")
print("=" * 60)
print(release_notes)
print("=" * 60)
# Save to the integration documentation directory
docs_dir = "docs/v3/release-notes/integrations"
os.makedirs(docs_dir, exist_ok=True)
output_file = f"{docs_dir}/{package_name}.mdx"
# Check if file exists to determine if we need frontmatter
if os.path.exists(output_file):
# Read existing content and prepend new release
with open(output_file, "r") as f:
existing_content = f.read()
# Find where to insert (after frontmatter)
lines = existing_content.split("\n")
insert_index = 0
# Skip frontmatter
in_frontmatter = False
for i, line in enumerate(lines):
if line.strip() == "---":
if not in_frontmatter:
in_frontmatter = True
else:
insert_index = i + 2 # After frontmatter and blank line
break
# Insert new release at the top (most recent first)
lines.insert(insert_index, release_notes.rstrip())
lines.insert(insert_index + 1, "\n---\n")
content = "\n".join(lines)
else:
# Create new file with frontmatter
content = f"""---
title: {package_name}
---
{release_notes}
"""
with open(output_file, "w") as f:
f.write(content)
print(f"\n💾 Release notes saved to: {output_file}")
if __name__ == "__main__":
main()
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "scripts/prepare_integration_release_notes.py",
"license": "Apache License 2.0",
"lines": 339,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/integrations/prefect-aws/prefect_aws/_cli/ecs_worker.py | """ECS worker deployment commands."""
import json
import pathlib
from typing import Optional
import click
import typer
from rich.table import Table
from typing_extensions import Annotated
from .utils import (
add_cli_tags,
console,
delete_stack,
deploy_stack,
display_stacks_table,
get_aws_client,
get_stack_status,
list_cli_deployed_stacks,
load_template,
update_work_pool_defaults,
validate_aws_credentials,
validate_ecs_cluster,
validate_stack_is_cli_managed,
validate_vpc_and_subnets,
)
ecs_worker_app = typer.Typer(
name="ecs-worker",
help="Deploy and manage ECS worker infrastructure",
no_args_is_help=True,
)
@ecs_worker_app.command("deploy-service")
def deploy_service(
work_pool_name: Annotated[
str, typer.Option(help="Name of the Prefect work pool", prompt=True)
],
stack_name: Annotated[
str, typer.Option(help="CloudFormation stack name", prompt=True)
],
# ECS Configuration
existing_cluster_identifier: Annotated[
str,
typer.Option(
help="ECS cluster name or ARN", prompt="ECS clust identifier (name or ARN)"
),
],
existing_vpc_id: Annotated[str, typer.Option(help="VPC ID", prompt="VPC ID")],
existing_subnet_ids: Annotated[
str,
typer.Option(
help="Comma-separated subnet IDs", prompt="Subnet IDs (comma-separated)"
),
],
# Prefect Configuration
prefect_api_url: Annotated[
str, typer.Option(help="Prefect API URL", prompt="Prefect API URL")
],
prefect_api_key_secret_arn: Annotated[
str, typer.Option(help="ARN of existing Prefect API key secret")
] = "",
prefect_api_key: Annotated[
str,
typer.Option(
help="Prefect API key (if not using existing secret)", hide_input=True
),
] = "",
prefect_auth_string_secret_arn: Annotated[
str,
typer.Option(
help="ARN of existing Prefect auth string secret for self-hosted servers"
),
] = "",
prefect_auth_string: Annotated[
str,
typer.Option(
help="Prefect auth string for self-hosted servers (if not using existing secret)",
hide_input=True,
),
] = "",
# Worker Configuration
docker_image: Annotated[
str, typer.Option(help="Docker image for worker")
] = "prefecthq/prefect-aws:latest",
work_queues: Annotated[str, typer.Option(help="Comma-separated work queues")] = "",
desired_count: Annotated[
int, typer.Option(help="Desired number of worker tasks")
] = 1,
min_capacity: Annotated[
int, typer.Option(help="Minimum capacity for auto scaling")
] = 1,
max_capacity: Annotated[
int, typer.Option(help="Maximum capacity for auto scaling")
] = 10,
# Task Configuration
task_cpu: Annotated[int, typer.Option(help="CPU units (1024 = 1 vCPU)")] = 1024,
task_memory: Annotated[int, typer.Option(help="Memory in MB")] = 2048,
# Logging Configuration
log_retention_days: Annotated[
int, typer.Option(help="CloudWatch log retention days")
] = 30,
existing_log_group_name: Annotated[
str, typer.Option(help="Existing log group name")
] = "",
# AWS Configuration
region: Annotated[Optional[str], typer.Option(help="AWS region")] = None,
profile: Annotated[Optional[str], typer.Option(help="AWS profile")] = None,
# Other options
dry_run: Annotated[
bool,
typer.Option("--dry-run", help="Show what would be deployed without deploying"),
] = False,
wait: Annotated[
bool,
typer.Option("--wait/--no-wait", help="Wait for stack deployment to complete"),
] = True,
):
"""Deploy an ECS service stack with worker and event infrastructure."""
# Validate AWS credentials
if not validate_aws_credentials(region, profile):
typer.echo("Error: Invalid AWS credentials", err=True)
raise typer.Exit(1)
# Check if this is Prefect Cloud (requires API key)
is_prefect_cloud = "api.prefect.cloud" in prefect_api_url.lower()
if is_prefect_cloud:
# Prefect Cloud requires API key
if not prefect_api_key_secret_arn and not prefect_api_key:
prefect_api_key = typer.prompt(
"Prefect API key (required for Prefect Cloud)", hide_input=True
)
else:
# Self-hosted Prefect server - auth is optional
if not prefect_auth_string_secret_arn and not prefect_auth_string:
auth_needed = typer.confirm(
"Does your Prefect server require authentication?", default=False
)
if auth_needed:
prefect_auth_string = typer.prompt(
"Prefect auth string (username:password format)", hide_input=True
)
# Parse subnet IDs
subnet_id_list = [s.strip() for s in existing_subnet_ids.split(",")]
# Get AWS clients
cf_client = get_aws_client("cloudformation", region, profile)
ecs_client = get_aws_client("ecs", region, profile)
ec2_client = get_aws_client("ec2", region, profile)
# Validate AWS resources
console.print("[cyan]Validating AWS resources...")
if not validate_ecs_cluster(ecs_client, existing_cluster_identifier):
typer.echo(
f"Error: ECS cluster '{existing_cluster_identifier}' not found or not active",
err=True,
)
raise typer.Exit(1)
valid_vpc, vpc_error = validate_vpc_and_subnets(
ec2_client, existing_vpc_id, subnet_id_list
)
if not valid_vpc:
typer.echo(f"Error: {vpc_error}", err=True)
raise typer.Exit(1)
# Load template
template = load_template("service.json")
# Prepare parameters
parameters = [
{"ParameterKey": "WorkPoolName", "ParameterValue": work_pool_name},
{"ParameterKey": "PrefectApiUrl", "ParameterValue": prefect_api_url},
{
"ParameterKey": "PrefectApiKeySecretArn",
"ParameterValue": prefect_api_key_secret_arn,
},
{"ParameterKey": "PrefectApiKey", "ParameterValue": prefect_api_key},
{
"ParameterKey": "PrefectAuthStringSecretArn",
"ParameterValue": prefect_auth_string_secret_arn,
},
{"ParameterKey": "PrefectAuthString", "ParameterValue": prefect_auth_string},
{
"ParameterKey": "ExistingClusterIdentifier",
"ParameterValue": existing_cluster_identifier,
},
{"ParameterKey": "ExistingVpcId", "ParameterValue": existing_vpc_id},
{"ParameterKey": "ExistingSubnetIds", "ParameterValue": existing_subnet_ids},
{"ParameterKey": "DockerImage", "ParameterValue": docker_image},
{"ParameterKey": "WorkQueues", "ParameterValue": work_queues},
{"ParameterKey": "DesiredCount", "ParameterValue": str(desired_count)},
{"ParameterKey": "MinCapacity", "ParameterValue": str(min_capacity)},
{"ParameterKey": "MaxCapacity", "ParameterValue": str(max_capacity)},
{"ParameterKey": "TaskCpu", "ParameterValue": str(task_cpu)},
{"ParameterKey": "TaskMemory", "ParameterValue": str(task_memory)},
{"ParameterKey": "LogRetentionDays", "ParameterValue": str(log_retention_days)},
{
"ParameterKey": "ExistingLogGroupName",
"ParameterValue": existing_log_group_name,
},
]
# Prepare tags
tags = add_cli_tags(work_pool_name, "service")
if dry_run:
console.print("[yellow]DRY RUN - Would deploy the following:")
console.print(f"Stack Name: {stack_name}")
console.print(f"Work Pool: {work_pool_name}")
console.print(f"Cluster: {existing_cluster_identifier}")
console.print(f"VPC: {existing_vpc_id}")
console.print(f"Subnets: {existing_subnet_ids}")
console.print(f"Docker Image: {docker_image}")
console.print(f"Desired Count: {desired_count}")
return
# Deploy stack
deploy_stack(
cf_client=cf_client,
stack_name=stack_name,
template_body=json.dumps(template),
parameters=parameters,
tags=tags,
wait=wait,
)
# Update work pool defaults with deployed infrastructure values if deployment completed
if wait:
try:
stack_info = get_stack_status(cf_client, stack_name)
if stack_info and "Outputs" in stack_info:
outputs = {
output["OutputKey"]: output["OutputValue"]
for output in stack_info["Outputs"]
}
update_work_pool_defaults(work_pool_name, outputs)
except Exception:
pass
console.print(f"[green]Successfully deployed ECS service stack: {stack_name}")
@ecs_worker_app.command("deploy-events")
def deploy_events(
work_pool_name: Annotated[
str, typer.Option(help="Name of the Prefect work pool", prompt=True)
],
stack_name: Annotated[
str, typer.Option(help="CloudFormation stack name", prompt=True)
],
existing_cluster_arn: Annotated[
str, typer.Option(help="ECS cluster ARN", prompt="ECS cluster ARN")
],
# AWS Configuration
region: Annotated[Optional[str], typer.Option(help="AWS region")] = None,
profile: Annotated[Optional[str], typer.Option(help="AWS profile")] = None,
# Other options
dry_run: Annotated[
bool,
typer.Option("--dry-run", help="Show what would be deployed without deploying"),
] = False,
wait: Annotated[
bool,
typer.Option("--wait/--no-wait", help="Wait for stack deployment to complete"),
] = True,
):
"""Deploy an events-only stack for monitoring existing ECS infrastructure."""
# Validate AWS credentials
if not validate_aws_credentials(region, profile):
typer.echo("Error: Invalid AWS credentials", err=True)
raise typer.Exit(1)
# Get AWS clients
cf_client = get_aws_client("cloudformation", region, profile)
ecs_client = get_aws_client("ecs", region, profile)
# Validate ECS cluster
console.print("[cyan]Validating AWS resources...")
if not validate_ecs_cluster(ecs_client, existing_cluster_arn):
typer.echo(
f"Error: ECS cluster '{existing_cluster_arn}' not found or not active",
err=True,
)
raise typer.Exit(1)
# Load template
template = load_template("events-only.json")
# Prepare parameters
parameters = [
{"ParameterKey": "WorkPoolName", "ParameterValue": work_pool_name},
{"ParameterKey": "ExistingClusterArn", "ParameterValue": existing_cluster_arn},
]
# Prepare tags
tags = add_cli_tags(work_pool_name, "events")
if dry_run:
console.print("[yellow]DRY RUN - Would deploy the following:")
console.print(f"Stack Name: {stack_name}")
console.print(f"Work Pool: {work_pool_name}")
console.print(f"Cluster ARN: {existing_cluster_arn}")
return
# Deploy stack
deploy_stack(
cf_client=cf_client,
stack_name=stack_name,
template_body=json.dumps(template),
parameters=parameters,
tags=tags,
wait=wait,
)
console.print(f"[green]Successfully deployed ECS events stack: {stack_name}")
@ecs_worker_app.command("list")
def list_stacks(
region: Annotated[Optional[str], typer.Option(help="AWS region")] = None,
profile: Annotated[Optional[str], typer.Option(help="AWS profile")] = None,
output_format: Annotated[
str, typer.Option("--format", help="Output format: table, json")
] = "table",
):
"""List all stacks deployed by prefect-aws CLI."""
# Validate AWS credentials
if not validate_aws_credentials(region, profile):
typer.echo("Error: Invalid AWS credentials", err=True)
raise typer.Exit(1)
cf_client = get_aws_client("cloudformation", region, profile)
stacks = list_cli_deployed_stacks(cf_client)
if output_format == "json":
# Convert datetime objects to strings for JSON serialization
import json
from datetime import datetime
def serialize_datetime(obj):
if isinstance(obj, datetime):
return obj.isoformat()
raise TypeError(f"Object of type {type(obj)} is not JSON serializable")
print(json.dumps(stacks, default=serialize_datetime, indent=2))
else:
display_stacks_table(stacks)
@ecs_worker_app.command("status")
def stack_status(
stack_name: Annotated[str, typer.Argument(help="Name of the stack")],
region: Annotated[Optional[str], typer.Option(help="AWS region")] = None,
profile: Annotated[Optional[str], typer.Option(help="AWS profile")] = None,
):
"""Get the status of a specific stack."""
# Validate AWS credentials
if not validate_aws_credentials(region, profile):
typer.echo("Error: Invalid AWS credentials", err=True)
raise typer.Exit(1)
cf_client = get_aws_client("cloudformation", region, profile)
# Validate stack is CLI-managed
if not validate_stack_is_cli_managed(cf_client, stack_name):
typer.echo(
f"Error: Stack '{stack_name}' was not deployed by prefect-aws CLI", err=True
)
raise typer.Exit(1)
stack_info = get_stack_status(cf_client, stack_name)
if not stack_info:
typer.echo(f"Stack '{stack_name}' not found", err=True)
raise typer.Exit(1)
# Display stack information
console.print(f"[bold cyan]Stack: {stack_info['StackName']}[/bold cyan]")
console.print(f"Status: {stack_info['StackStatus']}")
# Format creation time in a human-friendly way
created_time = stack_info["CreationTime"]
if hasattr(created_time, "strftime"):
created_formatted = created_time.strftime("%B %d, %Y at %I:%M %p UTC")
else:
created_formatted = str(created_time)
console.print(f"Created: {created_formatted}", highlight=False)
# Format update time if available
if "LastUpdatedTime" in stack_info:
updated_time = stack_info["LastUpdatedTime"]
if hasattr(updated_time, "strftime"):
updated_formatted = updated_time.strftime("%B %d, %Y at %I:%M %p UTC")
else:
updated_formatted = str(updated_time)
console.print(f"Updated: {updated_formatted}", highlight=False)
# Show outputs if available
if "Outputs" in stack_info:
console.print("\n[bold]Stack Outputs:[/bold]")
outputs_table = Table(show_header=True, header_style="bold magenta")
outputs_table.add_column("Output Key", style="cyan", overflow="ellipsis")
outputs_table.add_column("Value", style="green", overflow="fold")
outputs_table.add_column("Description", style="yellow", overflow="fold")
for output in stack_info["Outputs"]:
description = output.get("Description", "")
outputs_table.add_row(
output["OutputKey"], output["OutputValue"], description
)
console.print(outputs_table)
@ecs_worker_app.command("delete")
def delete_stack_cmd(
stack_name: Annotated[str, typer.Argument(help="Name of the stack to delete")],
region: Annotated[Optional[str], typer.Option(help="AWS region")] = None,
profile: Annotated[Optional[str], typer.Option(help="AWS profile")] = None,
force: Annotated[
bool, typer.Option("--force", help="Skip confirmation prompt")
] = False,
wait: Annotated[
bool,
typer.Option("--wait/--no-wait", help="Wait for stack deletion to complete"),
] = True,
):
"""Delete a stack deployed by prefect-aws CLI."""
# Validate AWS credentials
if not validate_aws_credentials(region, profile):
typer.echo("Error: Invalid AWS credentials", err=True)
raise typer.Exit(1)
if not force:
confirm = typer.confirm(
f"Are you sure you want to delete stack '{stack_name}'?"
)
if not confirm:
typer.echo("Deletion cancelled")
return
cf_client = get_aws_client("cloudformation", region, profile)
delete_stack(cf_client, stack_name, wait=wait)
@ecs_worker_app.command("export-template")
def export_template(
template_type: Annotated[
str,
typer.Option(
help="Template type: 'service' or 'events-only'",
click_type=click.Choice(["service", "events-only"]),
prompt=True,
),
],
output_path: Annotated[
pathlib.Path,
typer.Option(help="Output file path for the template", prompt=True),
],
format: Annotated[
str,
typer.Option(
help="Output format: 'json' or 'yaml'",
click_type=click.Choice(["json", "yaml"]),
),
] = "json",
):
"""Export CloudFormation templates to files for direct use or customization."""
try:
# Load the template
console.print(f"[cyan]Loading template '{template_type}'...")
template = load_template(f"{template_type}.json")
content = ""
# Prepare content based on format
if format == "json":
content = json.dumps(template, indent=2)
elif format == "yaml":
try:
import yaml
content = yaml.dump(template, default_flow_style=False, sort_keys=False)
except ImportError:
console.print(
"[yellow]Warning: PyYAML not available, falling back to JSON format"
)
content = json.dumps(template, indent=2)
output_path = (
pathlib.Path(str(output_path).rsplit(".", 1)[0] + ".json")
if "." in str(output_path)
else pathlib.Path(str(output_path) + ".json")
)
# Write to file
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, "w") as f:
f.write(content)
console.print(f"[green]✓ Template exported to: {output_path}")
console.print("\n[bold]Next steps:[/bold]")
console.print("1. Review and customize the template as needed")
console.print("2. Deploy using AWS CLI:")
console.print(
f" [cyan]aws cloudformation deploy --template-file {output_path} --stack-name YOUR_STACK_NAME --parameter-overrides ParameterKey=Value[/cyan]"
)
console.print("3. Or use the template with other infrastructure tools")
except Exception as e:
typer.echo(f"Error exporting template: {e}", err=True)
raise typer.Exit(1)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-aws/prefect_aws/_cli/ecs_worker.py",
"license": "Apache License 2.0",
"lines": 457,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/integrations/prefect-aws/prefect_aws/_cli/main.py | """Main CLI entry point for prefect-aws."""
import typer
from .ecs_worker import ecs_worker_app
app = typer.Typer(
name="prefect-aws",
help="CLI tool for deploying Prefect AWS infrastructure",
no_args_is_help=True,
)
# Add the ecs-worker subcommand group
app.add_typer(ecs_worker_app, name="ecs-worker")
@app.command()
def version():
"""Show the version of prefect-aws."""
try:
from prefect_aws._version import __version__
typer.echo(f"prefect-aws {__version__}")
except ImportError:
typer.echo("prefect-aws version unknown")
if __name__ == "__main__":
app()
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-aws/prefect_aws/_cli/main.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:src/integrations/prefect-aws/prefect_aws/_cli/utils.py | """Utility functions for AWS operations and CLI helpers."""
from __future__ import annotations
import json
from datetime import datetime, timezone
from typing import TYPE_CHECKING, Any, Literal
import boto3
import typer
from botocore.exceptions import ClientError, NoCredentialsError
from rich.console import Console
from rich.progress import Progress, SpinnerColumn, TextColumn
from rich.table import Table
from typing_extensions import overload
from prefect.client.orchestration import get_client
from prefect.client.schemas.actions import WorkPoolUpdate
try:
from importlib.resources import files
except ImportError:
# Python < 3.9 fallback
from importlib_resources import files
if TYPE_CHECKING:
from mypy_boto3_cloudformation.client import CloudFormationClient
from mypy_boto3_ec2.client import EC2Client
from mypy_boto3_ecs.client import ECSClient
from mypy_boto3_sts.client import STSClient
console = Console()
# Tags that identify stacks deployed by this CLI
CLI_TAGS = {
"ManagedBy": "prefect-aws-cli",
"DeploymentType": "ecs-worker",
}
def get_template_path(template_name: str) -> str:
"""Get the path to a CloudFormation template.
Args:
template_name: Name of the template file (e.g., 'service.json')
Returns:
Path to the template file
"""
try:
template_files = files("prefect_aws.templates.ecs")
template_path = template_files / template_name
return str(template_path)
except (ImportError, FileNotFoundError) as e:
typer.echo(f"Error: Could not find template {template_name}: {e}", err=True)
raise typer.Exit(1)
def load_template(template_name: str) -> dict[str, Any]:
"""Load a CloudFormation template from the templates directory.
Args:
template_name: Name of the template file
Returns:
Template content as a dictionary
"""
try:
template_files = files("prefect_aws.templates.ecs")
template_content = (template_files / template_name).read_text()
return json.loads(template_content)
except (ImportError, FileNotFoundError, json.JSONDecodeError) as e:
typer.echo(f"Error loading template {template_name}: {e}", err=True)
raise typer.Exit(1)
@overload
def get_aws_client(
service: Literal["sts"],
region: str | None = None,
profile: str | None = None,
) -> STSClient:
pass
@overload
def get_aws_client(
service: Literal["ecs"],
region: str | None = None,
profile: str | None = None,
) -> ECSClient:
pass
@overload
def get_aws_client(
service: Literal["cloudformation"],
region: str | None = None,
profile: str | None = None,
) -> CloudFormationClient:
pass
@overload
def get_aws_client(
service: Literal["ec2"],
region: str | None = None,
profile: str | None = None,
) -> EC2Client:
pass
def get_aws_client(
service: Literal["sts", "ecs", "cloudformation", "ec2"],
region: str | None = None,
profile: str | None = None,
):
"""Get an AWS client with error handling.
Args:
service: AWS service name (e.g., 'cloudformation', 'ecs')
region: AWS region
profile: AWS profile name
Returns:
Boto3 client
"""
try:
session = boto3.Session(profile_name=profile, region_name=region)
return session.client(service)
except NoCredentialsError:
typer.echo(
"Error: AWS credentials not found. Please configure your credentials.",
err=True,
)
raise typer.Exit(1)
except Exception as e:
typer.echo(f"Error creating AWS client: {e}", err=True)
raise typer.Exit(1)
def validate_aws_credentials(
region: str | None = None, profile: str | None = None
) -> bool:
"""Validate that AWS credentials are available and working.
Args:
region: AWS region
profile: AWS profile name
Returns:
True if credentials are valid
"""
try:
sts = get_aws_client("sts", region, profile)
sts.get_caller_identity()
return True
except Exception:
return False
def add_cli_tags(work_pool_name: str, stack_type: str) -> list[dict[str, str]]:
"""Add CLI-specific tags to a CloudFormation stack.
Args:
work_pool_name: Name of the work pool
stack_type: Type of stack ('service' or 'events')
Returns:
List of tags for CloudFormation
"""
tags = [{"Key": k, "Value": v} for k, v in CLI_TAGS.items()]
tags.extend(
[
{"Key": "StackType", "Value": stack_type},
{"Key": "WorkPoolName", "Value": work_pool_name},
{"Key": "CreatedAt", "Value": datetime.now(timezone.utc).isoformat()},
]
)
return tags
def list_cli_deployed_stacks(cf_client) -> list[dict[str, Any]]:
"""List all stacks deployed by this CLI.
Args:
cf_client: CloudFormation client
Returns:
List of stack information dictionaries
"""
try:
paginator = cf_client.get_paginator("describe_stacks")
cli_stacks = []
for page in paginator.paginate():
for stack in page["Stacks"]:
if stack["StackStatus"] in ["DELETE_COMPLETE"]:
continue
tags = {tag["Key"]: tag["Value"] for tag in stack.get("Tags", [])}
# Check if stack was deployed by CLI
if (
tags.get("ManagedBy") == CLI_TAGS["ManagedBy"]
and tags.get("DeploymentType") == CLI_TAGS["DeploymentType"]
):
stack_info = {
"StackName": stack["StackName"],
"StackStatus": stack["StackStatus"],
"CreationTime": stack["CreationTime"],
"WorkPoolName": tags.get("WorkPoolName", "Unknown"),
"StackType": tags.get("StackType", "Unknown"),
"Description": stack.get("Description", ""),
}
if "LastUpdatedTime" in stack:
stack_info["LastUpdatedTime"] = stack["LastUpdatedTime"]
cli_stacks.append(stack_info)
return cli_stacks
except ClientError as e:
typer.echo(f"Error listing stacks: {e}", err=True)
raise typer.Exit(1)
def validate_stack_is_cli_managed(cf_client, stack_name: str) -> bool:
"""Validate that a stack was deployed by this CLI.
Args:
cf_client: CloudFormation client
stack_name: Name of the stack
Returns:
True if stack was deployed by CLI
"""
try:
response = cf_client.describe_stacks(StackName=stack_name)
stack = response["Stacks"][0]
tags = {tag["Key"]: tag["Value"] for tag in stack.get("Tags", [])}
return (
tags.get("ManagedBy") == CLI_TAGS["ManagedBy"]
and tags.get("DeploymentType") == CLI_TAGS["DeploymentType"]
)
except ClientError:
return False
def deploy_stack(
cf_client,
stack_name: str,
template_body: str,
parameters: list[dict[str, str]],
tags: list[dict[str, str]],
capabilities: list[str] | None = None,
wait: bool = True,
) -> None:
"""Deploy or update a CloudFormation stack.
Args:
cf_client: CloudFormation client
stack_name: Name of the stack
template_body: CloudFormation template as JSON string
parameters: List of parameter dictionaries
tags: List of tag dictionaries
capabilities: IAM capabilities if required
wait: Whether to wait for the stack operation to complete
"""
if capabilities is None:
capabilities = ["CAPABILITY_NAMED_IAM"]
operation = "create"
try:
# Check if stack exists
try:
cf_client.describe_stacks(StackName=stack_name)
stack_exists = True
except ClientError as e:
if e.response.get("Error", {}).get("Code") == "ValidationError":
stack_exists = False
else:
raise
operation = "update" if stack_exists else operation
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
console=console,
) as progress:
task = progress.add_task(
f"[cyan]{operation.capitalize()}ing stack {stack_name}..."
)
if stack_exists:
try:
cf_client.update_stack(
StackName=stack_name,
TemplateBody=template_body,
Parameters=parameters,
Tags=tags,
Capabilities=capabilities,
)
except ClientError as e:
if "No updates are to be performed" in str(e):
progress.update(
task,
description=f"[green]Stack {stack_name} is already up to date",
)
return
raise
else:
cf_client.create_stack(
StackName=stack_name,
TemplateBody=template_body,
Parameters=parameters,
Tags=tags,
Capabilities=capabilities,
)
if wait:
# Wait for operation to complete
waiter_name = f"stack_{operation}_complete"
waiter = cf_client.get_waiter(waiter_name)
progress.update(
task, description=f"[yellow]Waiting for {operation} to complete..."
)
waiter.wait(
StackName=stack_name, WaiterConfig={"Delay": 10, "MaxAttempts": 120}
)
progress.update(
task,
description=f"[green]Stack {stack_name} {operation}d successfully!",
)
else:
progress.update(
task,
description=f"[green]Stack {stack_name} {operation} initiated. Check status with: prefect-aws ecs-worker status {stack_name}",
)
except ClientError as e:
error_msg = e.response.get("Error", {}).get("Message")
typer.echo(
f"Error performing {operation} action for stack: {error_msg}", err=True
)
raise typer.Exit(1)
def delete_stack(cf_client, stack_name: str, wait: bool = True) -> None:
"""Delete a CloudFormation stack.
Args:
cf_client: CloudFormation client
stack_name: Name of the stack
wait: Whether to wait for the stack deletion to complete
"""
try:
# Validate stack is CLI-managed
if not validate_stack_is_cli_managed(cf_client, stack_name):
typer.echo(
f"Error: Stack '{stack_name}' was not deployed by prefect-aws CLI",
err=True,
)
raise typer.Exit(1)
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
console=console,
) as progress:
task = progress.add_task(f"[red]Deleting stack {stack_name}...")
cf_client.delete_stack(StackName=stack_name)
if wait:
# Wait for deletion to complete
waiter = cf_client.get_waiter("stack_delete_complete")
progress.update(
task, description="[yellow]Waiting for deletion to complete..."
)
waiter.wait(
StackName=stack_name, WaiterConfig={"Delay": 10, "MaxAttempts": 120}
)
progress.update(
task, description=f"[green]Stack {stack_name} deleted successfully!"
)
else:
progress.update(
task,
description=f"[green]Stack {stack_name} deletion initiated. Check status with: prefect-aws ecs-worker status {stack_name}",
)
except ClientError as e:
if (code := e.response.get("Error", {}).get("Code")) == "ValidationError":
typer.echo(f"Error: Stack '{stack_name}' not found", err=True)
else:
typer.echo(f"Error deleting stack: {code}", err=True)
raise typer.Exit(1)
def get_stack_status(cf_client, stack_name: str) -> dict[str, Any] | None:
"""Get the status of a CloudFormation stack.
Args:
cf_client: CloudFormation client
stack_name: Name of the stack
Returns:
Stack information or None if not found
"""
try:
response = cf_client.describe_stacks(StackName=stack_name)
return response["Stacks"][0]
except ClientError as e:
if e.response.get("Error", {}).get("Code") == "ValidationError":
return None
raise
def display_stacks_table(stacks: list[dict[str, Any]]) -> None:
"""Display stacks in a formatted table.
Args:
stacks: List of stack information dictionaries
"""
if not stacks:
typer.echo("No stacks found deployed by prefect-aws CLI")
return
table = Table(title="ECS Worker Stacks")
table.add_column("Stack Name", style="cyan")
table.add_column("Work Pool", style="green")
table.add_column("Type", style="yellow")
table.add_column("Status", style="magenta")
table.add_column("Created", style="blue")
for stack in stacks:
created_time = stack["CreationTime"].strftime("%B %d, %Y at %I:%M %p UTC")
table.add_row(
stack["StackName"],
stack["WorkPoolName"],
stack["StackType"],
stack["StackStatus"],
created_time,
)
console.print(table)
def validate_ecs_cluster(ecs_client, cluster_identifier: str) -> bool:
"""Validate that an ECS cluster exists.
Args:
ecs_client: ECS client
cluster_identifier: Cluster name or ARN
Returns:
True if cluster exists
"""
try:
response = ecs_client.describe_clusters(clusters=[cluster_identifier])
clusters = response["clusters"]
return len(clusters) > 0 and clusters[0]["status"] == "ACTIVE"
except ClientError:
return False
def validate_vpc_and_subnets(
ec2_client, vpc_id: str, subnet_ids: list[str]
) -> tuple[bool, str]:
"""Validate VPC and subnets exist and are compatible.
Args:
ec2_client: EC2 client
vpc_id: VPC ID
subnet_ids: List of subnet IDs
Returns:
Tuple of (is_valid, error_message)
"""
try:
# Validate VPC exists
vpc_response = ec2_client.describe_vpcs(VpcIds=[vpc_id])
if not vpc_response["Vpcs"]:
return False, f"VPC {vpc_id} not found"
# Validate subnets exist and belong to VPC
subnet_response = ec2_client.describe_subnets(SubnetIds=subnet_ids)
for subnet in subnet_response["Subnets"]:
if subnet["VpcId"] != vpc_id:
return False, f"Subnet {subnet['SubnetId']} is not in VPC {vpc_id}"
return True, ""
except ClientError as e:
return False, str(e)
def update_work_pool_defaults(
work_pool_name: str,
stack_outputs: dict[str, str],
) -> None:
"""Update work pool base job template defaults with stack deployment values.
Only updates fields that are currently empty/null to preserve user customizations.
Args:
work_pool_name: Name of the work pool to update
stack_outputs: CloudFormation stack outputs containing infrastructure values
"""
try:
with get_client(sync_client=True) as client:
# Get current work pool
try:
work_pool = client.read_work_pool(work_pool_name)
except Exception:
return
# Get current base job template
base_template = work_pool.base_job_template
if not base_template or "variables" not in base_template:
return
variables = base_template["variables"]
properties = variables.get("properties", {})
# Update VPC ID default if not set (treat None as empty)
vpc_id_prop = properties.get("vpc_id", {})
current_default = vpc_id_prop.get("default")
if (
current_default is None or not current_default
) and "VpcId" in stack_outputs:
vpc_id_prop["default"] = stack_outputs["VpcId"]
properties["vpc_id"] = vpc_id_prop
# Update cluster default if not set (treat None as empty)
cluster_prop = properties.get("cluster", {})
current_default = cluster_prop.get("default")
if (
current_default is None or not current_default
) and "ClusterArn" in stack_outputs:
cluster_prop["default"] = stack_outputs["ClusterArn"]
properties["cluster"] = cluster_prop
# Update Prefect API key secret ARN default if not set and we created one (treat None as empty)
api_key_secret_prop = properties.get("prefect_api_key_secret_arn", {})
current_default = api_key_secret_prop.get("default")
if (
current_default is None or not current_default
) and "PrefectApiKeySecretArnOutput" in stack_outputs:
api_key_secret_prop["default"] = stack_outputs[
"PrefectApiKeySecretArnOutput"
]
properties["prefect_api_key_secret_arn"] = api_key_secret_prop
# Update execution role ARN default if not set (treat None as empty)
execution_role_prop = properties.get("execution_role_arn", {})
current_default = execution_role_prop.get("default")
if (
current_default is None or not current_default
) and "TaskExecutionRoleArn" in stack_outputs:
execution_role_prop["default"] = stack_outputs["TaskExecutionRoleArn"]
properties["execution_role_arn"] = execution_role_prop
# Update network configuration subnets default if not set
network_config_prop = properties.get("network_configuration", {})
network_config_props = network_config_prop.get("properties", {})
awsvpc_config_prop = network_config_props.get("awsvpcConfiguration", {})
awsvpc_config_props = awsvpc_config_prop.get("properties", {})
subnets_prop = awsvpc_config_props.get("subnets", {})
current_subnets_default = subnets_prop.get("default")
if (
current_subnets_default is None or not current_subnets_default
) and "SubnetIds" in stack_outputs:
subnet_list = stack_outputs["SubnetIds"].split(",")
subnets_prop["default"] = subnet_list
awsvpc_config_props["subnets"] = subnets_prop
awsvpc_config_prop["properties"] = awsvpc_config_props
network_config_props["awsvpcConfiguration"] = awsvpc_config_prop
network_config_prop["properties"] = network_config_props
properties["network_configuration"] = network_config_prop
variables["properties"] = properties
# Update the work pool
update_data = WorkPoolUpdate(base_job_template=base_template)
client.update_work_pool(work_pool_name, update_data)
except Exception:
# Don't fail the deployment if work pool update fails
pass
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-aws/prefect_aws/_cli/utils.py",
"license": "Apache License 2.0",
"lines": 491,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/integrations/prefect-aws/tests/cli/test_ecs_worker.py | """Tests for ECS worker CLI commands."""
import json
import uuid
from unittest.mock import Mock, patch
import boto3
import pytest
from moto import mock_aws
from prefect_aws._cli.main import app
from prefect_aws.workers import ECSWorker
from typer.testing import CliRunner
import prefect
from prefect.client.schemas.actions import WorkPoolCreate
@pytest.fixture
def aws_credentials(monkeypatch):
"""Set up AWS credentials for moto testing."""
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "test")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "test")
monkeypatch.setenv("AWS_SESSION_TOKEN", "test")
monkeypatch.setenv("AWS_DEFAULT_REGION", "us-east-1")
@pytest.fixture
def mock_aws_resources():
"""Create common AWS resources for testing."""
with mock_aws():
# Create VPC
ec2 = boto3.client("ec2", region_name="us-east-1")
vpc_response = ec2.create_vpc(CidrBlock="10.0.0.0/16")
vpc_id = vpc_response["Vpc"]["VpcId"]
# Create subnets
subnet1 = ec2.create_subnet(VpcId=vpc_id, CidrBlock="10.0.1.0/24")
subnet2 = ec2.create_subnet(VpcId=vpc_id, CidrBlock="10.0.2.0/24")
# Create ECS cluster
ecs = boto3.client("ecs", region_name="us-east-1")
cluster_response = ecs.create_cluster(clusterName="test-cluster")
yield {
"vpc_id": vpc_id,
"subnet_ids": [
subnet1["Subnet"]["SubnetId"],
subnet2["Subnet"]["SubnetId"],
],
"cluster_name": "test-cluster",
"cluster_arn": cluster_response["cluster"]["clusterArn"],
}
class TestECSWorkerCLI:
def setup_method(self):
self.runner = CliRunner()
@patch("prefect_aws._cli.ecs_worker.load_template")
def test_deploy_service_dry_run(
self, mock_load_template, aws_credentials, mock_aws_resources
):
"""Test deploy-service command with dry-run."""
mock_load_template.return_value = {"AWSTemplateFormatVersion": "2010-09-09"}
result = self.runner.invoke(
app,
[
"ecs-worker",
"deploy-service",
"--work-pool-name",
"test-pool",
"--stack-name",
"test-stack",
"--prefect-api-url",
"https://api.prefect.cloud/api",
"--existing-cluster-identifier",
mock_aws_resources["cluster_name"],
"--existing-vpc-id",
mock_aws_resources["vpc_id"],
"--existing-subnet-ids",
",".join(mock_aws_resources["subnet_ids"]),
"--prefect-api-key",
"test-key",
"--dry-run",
],
)
assert result.exit_code == 0
assert "DRY RUN" in result.stdout
assert "test-stack" in result.stdout
@patch("prefect_aws._cli.ecs_worker.load_template")
def test_deploy_service_no_wait(
self, mock_load_template, aws_credentials, mock_aws_resources
):
"""Test deploy-service command with --no-wait flag."""
mock_load_template.return_value = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {},
}
result = self.runner.invoke(
app,
[
"ecs-worker",
"deploy-service",
"--work-pool-name",
"test-pool",
"--stack-name",
"test-stack",
"--prefect-api-url",
"https://api.prefect.cloud/api",
"--existing-cluster-identifier",
mock_aws_resources["cluster_name"],
"--existing-vpc-id",
mock_aws_resources["vpc_id"],
"--existing-subnet-ids",
",".join(mock_aws_resources["subnet_ids"]),
"--prefect-api-key",
"test-key",
"--no-wait",
],
)
assert result.exit_code == 0
assert "Check status with:" in result.stdout
@patch("prefect_aws._cli.ecs_worker.validate_aws_credentials")
def test_deploy_service_invalid_credentials(self, mock_validate_creds):
"""Test deploy-service command with invalid credentials."""
mock_validate_creds.return_value = False
result = self.runner.invoke(
app,
[
"ecs-worker",
"deploy-service",
"--work-pool-name",
"test-pool",
"--stack-name",
"test-stack",
"--prefect-api-url",
"https://api.prefect.cloud/api",
"--prefect-api-key",
"test-key",
"--existing-cluster-identifier",
"test-cluster",
"--existing-vpc-id",
"vpc-12345",
"--existing-subnet-ids",
"subnet-1,subnet-2",
],
)
assert result.exit_code == 1
# Check both stdout and stderr as behavior varies across Typer versions
output = result.stdout
try:
output += result.stderr
except (ValueError, AttributeError):
pass # stderr not separately captured
assert "Invalid AWS credentials" in output
@patch("prefect_aws._cli.ecs_worker.load_template")
@patch("typer.confirm")
def test_deploy_service_self_hosted_server(
self, mock_confirm, mock_load_template, aws_credentials, mock_aws_resources
):
"""Test deploy-service command with self-hosted Prefect server."""
mock_load_template.return_value = {"AWSTemplateFormatVersion": "2010-09-09"}
mock_confirm.return_value = False # No authentication needed
result = self.runner.invoke(
app,
[
"ecs-worker",
"deploy-service",
"--work-pool-name",
"test-pool",
"--stack-name",
"test-stack",
"--prefect-api-url",
"http://localhost:4200/api",
"--existing-cluster-identifier",
mock_aws_resources["cluster_name"],
"--existing-vpc-id",
mock_aws_resources["vpc_id"],
"--existing-subnet-ids",
",".join(mock_aws_resources["subnet_ids"]),
"--dry-run",
],
)
assert result.exit_code == 0
assert "DRY RUN" in result.stdout
assert "test-stack" in result.stdout
@patch("prefect_aws._cli.ecs_worker.load_template")
def test_deploy_service_with_auth_string_parameters(
self, mock_load_template, aws_credentials, mock_aws_resources
):
"""Test deploy-service command with auth string parameters."""
mock_load_template.return_value = {"AWSTemplateFormatVersion": "2010-09-09"}
result = self.runner.invoke(
app,
[
"ecs-worker",
"deploy-service",
"--work-pool-name",
"test-pool",
"--stack-name",
"test-stack",
"--prefect-api-url",
"http://localhost:4200/api",
"--existing-cluster-identifier",
mock_aws_resources["cluster_name"],
"--existing-vpc-id",
mock_aws_resources["vpc_id"],
"--existing-subnet-ids",
",".join(mock_aws_resources["subnet_ids"]),
"--prefect-auth-string",
"user:pass",
"--dry-run",
],
)
assert result.exit_code == 0
assert "DRY RUN" in result.stdout
assert "test-stack" in result.stdout
@patch("prefect_aws._cli.ecs_worker.load_template")
def test_deploy_events_dry_run(
self, mock_load_template, aws_credentials, mock_aws_resources
):
"""Test deploy-events command with dry-run."""
mock_load_template.return_value = {"AWSTemplateFormatVersion": "2010-09-09"}
result = self.runner.invoke(
app,
[
"ecs-worker",
"deploy-events",
"--work-pool-name",
"test-pool",
"--stack-name",
"test-events",
"--existing-cluster-arn",
mock_aws_resources["cluster_arn"],
"--dry-run",
],
)
assert result.exit_code == 0
assert "DRY RUN" in result.stdout
assert "test-events" in result.stdout
def test_list_stacks(self, aws_credentials):
"""Test list stacks command."""
with mock_aws():
# Create a test stack with CLI tags
cf = boto3.client("cloudformation", region_name="us-east-1")
cf.create_stack(
StackName="test-stack",
TemplateBody='{"AWSTemplateFormatVersion": "2010-09-09", "Resources": {}}',
Tags=[
{"Key": "ManagedBy", "Value": "prefect-aws-cli"},
{"Key": "DeploymentType", "Value": "ecs-worker"},
{"Key": "StackType", "Value": "service"},
{"Key": "WorkPoolName", "Value": "test-pool"},
{"Key": "CreatedAt", "Value": "2023-01-01T00:00:00Z"},
],
)
result = self.runner.invoke(app, ["ecs-worker", "list"])
assert result.exit_code == 0
# Output should contain stack information
assert "test-stack" in result.stdout
def test_list_stacks_json_format(self, aws_credentials):
"""Test list stacks command with JSON output."""
with mock_aws():
# Create a test stack with CLI tags
cf = boto3.client("cloudformation", region_name="us-east-1")
cf.create_stack(
StackName="test-stack",
TemplateBody='{"AWSTemplateFormatVersion": "2010-09-09", "Resources": {}}',
Tags=[
{"Key": "ManagedBy", "Value": "prefect-aws-cli"},
{"Key": "DeploymentType", "Value": "ecs-worker"},
{"Key": "StackType", "Value": "service"},
{"Key": "WorkPoolName", "Value": "test-pool"},
{"Key": "CreatedAt", "Value": "2023-01-01T00:00:00Z"},
],
)
result = self.runner.invoke(app, ["ecs-worker", "list", "--format", "json"])
assert result.exit_code == 0
# Should be valid JSON
json.loads(result.stdout)
def test_stack_status(self, aws_credentials):
"""Test stack status command."""
with mock_aws():
# Create a test stack with CLI tags
cf = boto3.client("cloudformation", region_name="us-east-1")
cf.create_stack(
StackName="test-stack",
TemplateBody='{"AWSTemplateFormatVersion": "2010-09-09", "Resources": {}}',
Tags=[
{"Key": "ManagedBy", "Value": "prefect-aws-cli"},
{"Key": "DeploymentType", "Value": "ecs-worker"},
{"Key": "StackType", "Value": "service"},
{"Key": "WorkPoolName", "Value": "test-pool"},
],
)
result = self.runner.invoke(app, ["ecs-worker", "status", "test-stack"])
assert result.exit_code == 0
assert "test-stack" in result.stdout
assert "CREATE_COMPLETE" in result.stdout
def test_stack_status_not_cli_managed(self, aws_credentials):
"""Test stack status command for non-CLI managed stack."""
with mock_aws():
# Create a test stack without CLI tags
cf = boto3.client("cloudformation", region_name="us-east-1")
cf.create_stack(
StackName="test-stack",
TemplateBody='{"AWSTemplateFormatVersion": "2010-09-09", "Resources": {}}',
Tags=[
{"Key": "Owner", "Value": "someone-else"},
],
)
result = self.runner.invoke(app, ["ecs-worker", "status", "test-stack"])
assert result.exit_code == 1
# Check both stdout and stderr as behavior varies across Typer versions
output = result.stdout
try:
output += result.stderr
except (ValueError, AttributeError):
pass # stderr not separately captured
assert "not deployed by prefect-aws CLI" in output
def test_delete_stack_force(self, aws_credentials):
"""Test delete stack command with force flag."""
with mock_aws():
# Create a test stack with CLI tags
cf = boto3.client("cloudformation", region_name="us-east-1")
cf.create_stack(
StackName="test-stack",
TemplateBody='{"AWSTemplateFormatVersion": "2010-09-09", "Resources": {}}',
Tags=[
{"Key": "ManagedBy", "Value": "prefect-aws-cli"},
{"Key": "DeploymentType", "Value": "ecs-worker"},
],
)
result = self.runner.invoke(
app, ["ecs-worker", "delete", "test-stack", "--force"]
)
assert result.exit_code == 0
# Verify the stack was deleted
try:
cf.describe_stacks(StackName="test-stack")
# If we get here, the stack wasn't deleted
assert False, "Stack should have been deleted"
except cf.exceptions.ClientError as e:
# Stack should not exist anymore
assert "does not exist" in str(e) or "DELETE_COMPLETE" in str(e)
def test_delete_stack_no_wait(self, aws_credentials):
"""Test delete stack command with --no-wait flag."""
with mock_aws():
# Create a test stack with CLI tags
cf = boto3.client("cloudformation", region_name="us-east-1")
cf.create_stack(
StackName="test-stack",
TemplateBody='{"AWSTemplateFormatVersion": "2010-09-09", "Resources": {}}',
Tags=[
{"Key": "ManagedBy", "Value": "prefect-aws-cli"},
{"Key": "DeploymentType", "Value": "ecs-worker"},
],
)
result = self.runner.invoke(
app, ["ecs-worker", "delete", "test-stack", "--force", "--no-wait"]
)
assert result.exit_code == 0
assert "Check status with:" in result.stdout
class TestECSWorkerUtils:
"""Test utility functions used by ECS worker commands."""
def test_cli_tags_generation(self):
"""Test that CLI tags are generated correctly."""
from prefect_aws._cli.utils import add_cli_tags
tags = add_cli_tags("test-pool", "service")
tag_dict = {tag["Key"]: tag["Value"] for tag in tags}
assert tag_dict["ManagedBy"] == "prefect-aws-cli"
assert tag_dict["DeploymentType"] == "ecs-worker"
assert tag_dict["StackType"] == "service"
assert tag_dict["WorkPoolName"] == "test-pool"
assert "CreatedAt" in tag_dict
@patch("prefect_aws._cli.utils.boto3.Session")
def test_get_aws_client(self, mock_session):
"""Test AWS client creation."""
from prefect_aws._cli.utils import get_aws_client
mock_client = Mock()
mock_session_instance = Mock()
mock_session_instance.client.return_value = mock_client
mock_session.return_value = mock_session_instance
client = get_aws_client("cloudformation", "us-east-1", "test-profile")
mock_session.assert_called_once_with(
profile_name="test-profile", region_name="us-east-1"
)
mock_session_instance.client.assert_called_once_with("cloudformation")
assert client == mock_client
def test_load_template_success(self):
"""Test successful template loading."""
from prefect_aws._cli.utils import load_template
# This test requires the actual template files to exist
# In a real test environment, you might want to mock this
with patch("prefect_aws._cli.utils.files") as mock_files:
mock_template_files = Mock()
mock_template_file = Mock()
mock_template_file.read_text.return_value = '{"test": "template"}'
# Mock the / operator for pathlib-like behavior
mock_template_files.__truediv__ = Mock(return_value=mock_template_file)
mock_files.return_value = mock_template_files
template = load_template("service.json")
assert template == {"test": "template"}
class TestWorkPoolDefaults:
"""Test work pool defaults update functionality."""
@pytest.fixture
async def test_work_pool(self):
"""Create a test work pool with ECS base template."""
async with prefect.get_client() as client:
work_pool = await client.create_work_pool(
WorkPoolCreate(
name=f"test-ecs-pool-{uuid.uuid4()}",
base_job_template=ECSWorker.get_default_base_job_template(),
)
)
try:
yield work_pool
finally:
await client.delete_work_pool(work_pool.name)
async def test_update_work_pool_defaults_success(self, test_work_pool):
"""Test successful work pool defaults update."""
from prefect_aws._cli.utils import update_work_pool_defaults
stack_outputs = {
"VpcId": "vpc-123456",
"ClusterArn": "arn:aws:ecs:us-east-1:123456789012:cluster/test-cluster",
"PrefectApiKeySecretArnOutput": "arn:aws:secretsmanager:us-east-1:123456789012:secret:test-key",
"TaskExecutionRoleArn": "arn:aws:iam::123456789012:role/test-execution-role",
"SubnetIds": "subnet-1,subnet-2",
}
# Update the work pool defaults
update_work_pool_defaults(test_work_pool.name, stack_outputs)
# Verify the updates were applied
async with prefect.get_client() as client:
updated_work_pool = await client.read_work_pool(test_work_pool.name)
properties = updated_work_pool.base_job_template["variables"]["properties"]
assert properties["vpc_id"]["default"] == "vpc-123456"
assert (
properties["cluster"]["default"]
== "arn:aws:ecs:us-east-1:123456789012:cluster/test-cluster"
)
assert (
properties["prefect_api_key_secret_arn"]["default"]
== "arn:aws:secretsmanager:us-east-1:123456789012:secret:test-key"
)
assert (
properties["execution_role_arn"]["default"]
== "arn:aws:iam::123456789012:role/test-execution-role"
)
# Check network configuration subnets
network_config = properties["network_configuration"]["properties"]
subnets = network_config["awsvpcConfiguration"]["properties"]["subnets"]
assert subnets["default"] == ["subnet-1", "subnet-2"]
async def test_update_work_pool_defaults_preserves_existing(self, test_work_pool):
"""Test that existing defaults are preserved."""
from prefect_aws._cli.utils import update_work_pool_defaults
# First, set some existing defaults
async with prefect.get_client() as client:
work_pool = await client.read_work_pool(test_work_pool.name)
base_template = work_pool.base_job_template
base_template["variables"]["properties"]["vpc_id"]["default"] = (
"existing-vpc"
)
base_template["variables"]["properties"]["execution_role_arn"][
"default"
] = "existing-role"
from prefect.client.schemas.actions import WorkPoolUpdate
await client.update_work_pool(
test_work_pool.name, WorkPoolUpdate(base_job_template=base_template)
)
stack_outputs = {
"VpcId": "vpc-123456",
"ClusterArn": "arn:aws:ecs:us-east-1:123456789012:cluster/test-cluster",
"TaskExecutionRoleArn": "arn:aws:iam::123456789012:role/test-execution-role",
}
# Update with new stack outputs
update_work_pool_defaults(test_work_pool.name, stack_outputs)
# Verify existing defaults were preserved and only empty ones updated
async with prefect.get_client() as client:
updated_work_pool = await client.read_work_pool(test_work_pool.name)
properties = updated_work_pool.base_job_template["variables"]["properties"]
# Existing defaults should be preserved
assert properties["vpc_id"]["default"] == "existing-vpc"
assert properties["execution_role_arn"]["default"] == "existing-role"
# Empty defaults should be updated
assert (
properties["cluster"]["default"]
== "arn:aws:ecs:us-east-1:123456789012:cluster/test-cluster"
)
async def test_update_work_pool_defaults_handles_missing_outputs(
self, test_work_pool
):
"""Test handling of missing stack outputs."""
from prefect_aws._cli.utils import update_work_pool_defaults
# Only provide some outputs
stack_outputs = {
"VpcId": "vpc-123456",
# Missing ClusterArn, TaskExecutionRoleArn, etc.
}
update_work_pool_defaults(test_work_pool.name, stack_outputs)
# Should still update available outputs
async with prefect.get_client() as client:
updated_work_pool = await client.read_work_pool(test_work_pool.name)
properties = updated_work_pool.base_job_template["variables"]["properties"]
assert properties["vpc_id"]["default"] == "vpc-123456"
# cluster should still have its original default since ClusterArn wasn't provided
assert properties["cluster"]["default"] is None
async def test_update_work_pool_defaults_handles_nonexistent_pool(self):
"""Test that errors for nonexistent work pools are handled gracefully."""
from prefect_aws._cli.utils import update_work_pool_defaults
stack_outputs = {"VpcId": "vpc-123456"}
# Should not raise an exception for nonexistent work pool
update_work_pool_defaults("nonexistent-pool", stack_outputs)
async def test_deploy_service_updates_work_pool_when_wait_true(
self, test_work_pool
):
"""Test that deploy-service updates work pool defaults when --wait is True."""
def mock_deploy_and_get_status(cf_client, stack_name, **kwargs):
"""Mock deploy_stack that simulates successful deployment"""
pass
def mock_get_status(cf_client, stack_name):
"""Mock get_stack_status that returns outputs"""
return {
"StackName": stack_name,
"StackStatus": "CREATE_COMPLETE",
"Outputs": [
{"OutputKey": "VpcId", "OutputValue": "vpc-deployed"},
{
"OutputKey": "ClusterArn",
"OutputValue": "arn:aws:ecs:us-east-1:123456789012:cluster/deployed-cluster",
},
{
"OutputKey": "TaskExecutionRoleArn",
"OutputValue": "arn:aws:iam::123456789012:role/deployed-role",
},
{
"OutputKey": "SubnetIds",
"OutputValue": "subnet-deployed-1,subnet-deployed-2",
},
],
}
with (
patch(
"prefect_aws._cli.ecs_worker.deploy_stack",
side_effect=mock_deploy_and_get_status,
),
patch(
"prefect_aws._cli.ecs_worker.get_stack_status",
side_effect=mock_get_status,
),
patch("prefect_aws._cli.ecs_worker.load_template") as mock_load_template,
patch(
"prefect_aws._cli.ecs_worker.validate_aws_credentials",
return_value=True,
),
patch(
"prefect_aws._cli.ecs_worker.validate_ecs_cluster", return_value=True
),
patch(
"prefect_aws._cli.ecs_worker.validate_vpc_and_subnets",
return_value=(True, ""),
),
):
mock_load_template.return_value = {"AWSTemplateFormatVersion": "2010-09-09"}
runner = CliRunner()
result = runner.invoke(
app,
[
"ecs-worker",
"deploy-service",
"--work-pool-name",
test_work_pool.name,
"--stack-name",
"test-stack",
"--prefect-api-url",
"http://localhost:4200/api",
"--existing-cluster-identifier",
"test-cluster",
"--existing-vpc-id",
"vpc-12345",
"--existing-subnet-ids",
"subnet-1,subnet-2",
"--region",
"us-east-1",
"--prefect-auth-string",
"this-is-a-test-auth-string",
"--wait", # This should trigger work pool update
],
)
assert result.exit_code == 0, result.stdout
# Verify the work pool was updated with deployment values
async with prefect.get_client() as client:
updated_work_pool = await client.read_work_pool(test_work_pool.name)
properties = updated_work_pool.base_job_template["variables"]["properties"]
assert properties["vpc_id"]["default"] == "vpc-deployed"
assert (
properties["cluster"]["default"]
== "arn:aws:ecs:us-east-1:123456789012:cluster/deployed-cluster"
)
assert (
properties["execution_role_arn"]["default"]
== "arn:aws:iam::123456789012:role/deployed-role"
)
# Check subnets
network_config = properties["network_configuration"]["properties"]
subnets = network_config["awsvpcConfiguration"]["properties"]["subnets"]
assert subnets["default"] == ["subnet-deployed-1", "subnet-deployed-2"]
async def test_deploy_service_skips_work_pool_update_when_no_wait(
self, test_work_pool
):
"""Test that deploy-service skips work pool update when --no-wait is used."""
def mock_deploy_no_wait(cf_client, stack_name, **kwargs):
# Should not call get_stack_status when wait=False
pass
with (
patch(
"prefect_aws._cli.ecs_worker.deploy_stack",
side_effect=mock_deploy_no_wait,
),
patch("prefect_aws._cli.ecs_worker.load_template") as mock_load_template,
patch(
"prefect_aws._cli.ecs_worker.validate_aws_credentials",
return_value=True,
),
patch(
"prefect_aws._cli.ecs_worker.validate_ecs_cluster", return_value=True
),
patch(
"prefect_aws._cli.ecs_worker.validate_vpc_and_subnets",
return_value=(True, ""),
),
):
mock_load_template.return_value = {"AWSTemplateFormatVersion": "2010-09-09"}
runner = CliRunner()
result = runner.invoke(
app,
[
"ecs-worker",
"deploy-service",
"--work-pool-name",
test_work_pool.name,
"--stack-name",
"test-stack",
"--prefect-api-url",
"http://localhost:4200/api",
"--existing-cluster-identifier",
"test-cluster",
"--existing-vpc-id",
"vpc-12345",
"--existing-subnet-ids",
"subnet-1,subnet-2",
"--region",
"us-east-1",
"--prefect-auth-string",
"this-is-a-test-auth-string",
"--no-wait", # This should skip work pool update
],
)
assert result.exit_code == 0, result.stdout
# Verify the work pool was NOT updated (should still have original defaults)
async with prefect.get_client() as client:
work_pool = await client.read_work_pool(test_work_pool.name)
properties = work_pool.base_job_template["variables"]["properties"]
# Should still have original None defaults, not updated values
assert properties["vpc_id"]["default"] is None
assert properties["cluster"]["default"] is None
assert properties["execution_role_arn"]["default"] is None
class TestExportTemplate:
"""Test export-template command functionality."""
@patch("prefect_aws._cli.ecs_worker.load_template")
def test_export_template_service_json(self, mock_load_template, tmp_path):
"""Test exporting service template as JSON."""
mock_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {"TestResource": {"Type": "AWS::S3::Bucket"}},
}
mock_load_template.return_value = mock_template
output_file = tmp_path / "test-service.json"
runner = CliRunner()
result = runner.invoke(
app,
[
"ecs-worker",
"export-template",
"--template-type",
"service",
"--output-path",
str(output_file),
"--format",
"json",
],
)
assert result.exit_code == 0, result.stdout
assert "Template exported to" in result.stdout
assert output_file.exists()
# Verify file content
with open(output_file) as f:
exported_content = json.loads(f.read())
assert exported_content == mock_template
mock_load_template.assert_called_once_with("service.json")
@patch("prefect_aws._cli.ecs_worker.load_template")
def test_export_template_events_json(self, mock_load_template, tmp_path):
"""Test exporting events-only template as JSON."""
mock_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {"TestQueue": {"Type": "AWS::SQS::Queue"}},
}
mock_load_template.return_value = mock_template
output_file = tmp_path / "test-events.json"
runner = CliRunner()
result = runner.invoke(
app,
[
"ecs-worker",
"export-template",
"--template-type",
"events-only",
"--output-path",
str(output_file),
],
)
assert result.exit_code == 0
assert output_file.exists()
# Verify file content
with open(output_file) as f:
exported_content = json.loads(f.read())
assert exported_content == mock_template
mock_load_template.assert_called_once_with("events-only.json")
@patch("prefect_aws._cli.ecs_worker.load_template")
def test_export_template_yaml_format(self, mock_load_template, tmp_path):
"""Test exporting template as YAML format."""
mock_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {"TestResource": {"Type": "AWS::S3::Bucket"}},
}
mock_load_template.return_value = mock_template
output_file = tmp_path / "test-service.yaml"
runner = CliRunner()
result = runner.invoke(
app,
[
"ecs-worker",
"export-template",
"--template-type",
"service",
"--output-path",
str(output_file),
"--format",
"yaml",
],
)
assert result.exit_code == 0
assert output_file.exists()
# Verify file content (either YAML or JSON fallback)
with open(output_file, "r") as f:
content = f.read()
# Content should be YAML if pyyaml is available, otherwise JSON
if "AWSTemplateFormatVersion:" in content:
# YAML format
import yaml
yaml_content = yaml.safe_load(content)
assert yaml_content == mock_template
else:
# JSON fallback
json_content = json.loads(content)
assert json_content == mock_template
@patch("prefect_aws._cli.ecs_worker.load_template")
def test_export_template_yaml_fallback_to_json(
self, mock_load_template, tmp_path, monkeypatch
):
"""Test YAML format falls back to JSON when PyYAML not available."""
# Mock yaml import in the specific function context
import sys
if "yaml" in sys.modules:
monkeypatch.setitem(sys.modules, "yaml", None)
mock_template = {"AWSTemplateFormatVersion": "2010-09-09"}
mock_load_template.return_value = mock_template
output_file = tmp_path / "test-service.yaml"
runner = CliRunner()
result = runner.invoke(
app,
[
"ecs-worker",
"export-template",
"--template-type",
"service",
"--output-path",
str(output_file),
"--format",
"yaml",
],
)
assert result.exit_code == 0, result.stdout
assert "Warning: PyYAML not available" in result.stdout
# Should create .json file due to fallback
json_file = tmp_path / "test-service.json"
assert json_file.exists()
@patch("prefect_aws._cli.ecs_worker.load_template")
def test_export_template_interactive_prompts(self, mock_load_template, tmp_path):
"""Test interactive prompts when required parameters not provided."""
mock_template = {"AWSTemplateFormatVersion": "2010-09-09"}
mock_load_template.return_value = mock_template
output_file = tmp_path / "interactive-test.json"
runner = CliRunner()
result = runner.invoke(
app, ["ecs-worker", "export-template"], input=f"service\n{output_file}\n"
)
assert result.exit_code == 0
assert output_file.exists()
@patch("prefect_aws._cli.ecs_worker.load_template")
def test_export_template_creates_output_directory(
self, mock_load_template, tmp_path
):
"""Test that output directories are created if they don't exist."""
mock_template = {"AWSTemplateFormatVersion": "2010-09-09"}
mock_load_template.return_value = mock_template
output_dir = tmp_path / "nested" / "directory"
output_file = output_dir / "template.json"
runner = CliRunner()
result = runner.invoke(
app,
[
"ecs-worker",
"export-template",
"--template-type",
"service",
"--output-path",
str(output_file),
],
)
assert result.exit_code == 0
assert output_file.exists()
assert output_dir.exists()
@patch("prefect_aws._cli.ecs_worker.load_template")
def test_export_template_load_error(self, mock_load_template):
"""Test handling of template loading errors."""
mock_load_template.side_effect = Exception("Template not found")
runner = CliRunner()
result = runner.invoke(
app,
[
"ecs-worker",
"export-template",
"--template-type",
"service",
"--output-path",
"/tmp/test.json",
],
)
assert result.exit_code == 1
# Check both stdout and stderr as behavior varies across Typer versions
output = result.stdout
try:
output += result.stderr
except (ValueError, AttributeError):
pass # stderr not separately captured
assert "Error exporting template" in output
@patch("prefect_aws._cli.ecs_worker.load_template")
def test_export_template_includes_usage_instructions(
self, mock_load_template, tmp_path
):
"""Test that successful export includes helpful usage instructions."""
mock_template = {"AWSTemplateFormatVersion": "2010-09-09"}
mock_load_template.return_value = mock_template
output_file = tmp_path / "template.json"
runner = CliRunner()
result = runner.invoke(
app,
[
"ecs-worker",
"export-template",
"--template-type",
"events-only",
"--output-path",
str(output_file),
],
)
assert result.exit_code == 0
assert "Next steps:" in result.stdout
assert "aws cloudformation deploy" in result.stdout
assert "Review and customize" in result.stdout
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-aws/tests/cli/test_ecs_worker.py",
"license": "Apache License 2.0",
"lines": 867,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-aws/infra/app.py | #!/usr/bin/env python3
import aws_cdk as cdk
from worker.events_stack import EcsEventsStack
from worker.service_stack import EcsServiceStack
app = cdk.App()
# Create stack variants for different deployment scenarios without bootstrap requirements
EcsServiceStack(
app,
"PrefectEcsServiceStack",
synthesizer=cdk.DefaultStackSynthesizer(generate_bootstrap_version_rule=False),
)
EcsEventsStack(
app,
"PrefectEcsEventsStack",
synthesizer=cdk.DefaultStackSynthesizer(generate_bootstrap_version_rule=False),
)
app.synth()
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-aws/infra/app.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:src/integrations/prefect-aws/infra/tests/test_worker_stack.py | import aws_cdk as core
import aws_cdk.assertions as assertions
from ..worker.events_stack import EcsEventsStack
from ..worker.service_stack import EcsServiceStack
def test_service_stack():
"""Test that service stack doesn't create cluster."""
app = core.App()
stack = EcsServiceStack(app, "TestServiceStack")
template = assertions.Template.from_stack(stack)
# Should NOT have ECS cluster
template.resource_count_is("AWS::ECS::Cluster", 0)
# Should have ECS service
template.resource_count_is("AWS::ECS::Service", 1)
# Should have SQS queue and DLQ
template.resource_count_is("AWS::SQS::Queue", 2)
# Should have EventBridge rule
template.resource_count_is("AWS::Events::Rule", 1)
def test_events_stack():
"""Test that events stack only creates event infrastructure."""
app = core.App()
stack = EcsEventsStack(app, "TestEventsStack")
template = assertions.Template.from_stack(stack)
# Should NOT have ECS cluster or service
template.resource_count_is("AWS::ECS::Cluster", 0)
template.resource_count_is("AWS::ECS::Service", 0)
# Should have SQS queue and DLQ
template.resource_count_is("AWS::SQS::Queue", 2)
# Should have EventBridge rule
template.resource_count_is("AWS::Events::Rule", 1)
def test_all_stacks_have_outputs():
"""Test that all stacks provide necessary outputs."""
app = core.App()
service_stack = EcsServiceStack(app, "TestServiceStack")
events_stack = EcsEventsStack(app, "TestEventsStack")
service_template = assertions.Template.from_stack(service_stack)
events_template = assertions.Template.from_stack(events_stack)
# All should have queue outputs
for template in [service_template, events_template]:
template.has_output("EcsEventsQueueUrl", {})
template.has_output("EcsEventsQueueArn", {})
# Service stack should have service outputs
service_template.has_output("ServiceArn", {})
def test_conditional_secret_creation():
"""Test that secrets are created conditionally based on auth method."""
app = core.App()
stack = EcsServiceStack(app, "TestServiceStack")
template = assertions.Template.from_stack(stack)
# Should have conditional API key secret creation
template.has_condition(
"CreateNewApiKeySecret",
{
"Fn::And": [
{"Condition": "UseApiKey"},
{"Fn::Equals": [{"Ref": "PrefectApiKeySecretArn"}, ""]},
]
},
)
# Should have conditional auth string secret creation
template.has_condition(
"CreateNewAuthStringSecret",
{
"Fn::And": [
{"Condition": "UseAuthString"},
{"Fn::Equals": [{"Ref": "PrefectAuthStringSecretArn"}, ""]},
]
},
)
# Should have both secrets with their respective conditions
template.has_resource(
"AWS::SecretsManager::Secret", {"Condition": "CreateNewApiKeySecret"}
)
template.has_resource(
"AWS::SecretsManager::Secret", {"Condition": "CreateNewAuthStringSecret"}
)
def test_work_queues_parameter():
"""Test that work queues parameter is properly configured."""
app = core.App()
stack = EcsServiceStack(app, "TestServiceStack")
template = assertions.Template.from_stack(stack)
# Should have WorkQueues parameter as CommaDelimitedList
template.has_parameter("WorkQueues", {"Type": "CommaDelimitedList"})
# Should have condition for work queues
template.has_condition(
"HasWorkQueues",
{"Fn::Not": [{"Fn::Equals": [{"Fn::Select": [0, {"Ref": "WorkQueues"}]}, ""]}]},
)
def test_auth_conditions():
"""Test CloudFormation conditions for auth method selection."""
app = core.App()
stack = EcsServiceStack(app, "TestServiceStack")
template = assertions.Template.from_stack(stack)
# Should have conditions for auth method selection
template.has_condition(
"UseApiKey",
{
"Fn::Or": [
{"Fn::Not": [{"Fn::Equals": [{"Ref": "PrefectApiKeySecretArn"}, ""]}]},
{"Fn::Not": [{"Fn::Equals": [{"Ref": "PrefectApiKey"}, ""]}]},
]
},
)
template.has_condition(
"UseAuthString",
{
"Fn::Or": [
{
"Fn::Not": [
{"Fn::Equals": [{"Ref": "PrefectAuthStringSecretArn"}, ""]}
]
},
{"Fn::Not": [{"Fn::Equals": [{"Ref": "PrefectAuthString"}, ""]}]},
]
},
)
# Should have conditions for secret creation
template.has_condition(
"CreateNewApiKeySecret",
{
"Fn::And": [
{"Condition": "UseApiKey"},
{"Fn::Equals": [{"Ref": "PrefectApiKeySecretArn"}, ""]},
]
},
)
template.has_condition(
"CreateNewAuthStringSecret",
{
"Fn::And": [
{"Condition": "UseAuthString"},
{"Fn::Equals": [{"Ref": "PrefectAuthStringSecretArn"}, ""]},
]
},
)
def test_iam_permissions():
"""Test that IAM roles have correct permissions."""
app = core.App()
stack = EcsServiceStack(app, "TestServiceStack")
template = assertions.Template.from_stack(stack)
# Task role should have ECS permissions including the ones we specifically need
template.has_resource_properties(
"AWS::IAM::Policy",
{
"PolicyDocument": {
"Statement": assertions.Match.array_with(
[
assertions.Match.object_like(
{
"Effect": "Allow",
"Action": assertions.Match.array_with(
[
"ecs:RegisterTaskDefinition",
"ecs:DeregisterTaskDefinition",
"ec2:DescribeVpcs",
"logs:GetLogEvents",
]
),
"Resource": "*",
}
),
assertions.Match.object_like(
{
"Effect": "Allow",
"Action": "iam:PassRole",
"Resource": "arn:aws:iam::*:role/*",
}
),
]
)
}
},
)
# Task execution role should have secrets access
template.has_resource_properties(
"AWS::IAM::Policy",
{
"PolicyDocument": {
"Statement": assertions.Match.array_with(
[
assertions.Match.object_like(
{
"Effect": "Allow",
"Action": "secretsmanager:GetSecretValue",
}
)
]
)
}
},
)
def test_eventbridge_rule_patterns():
"""Test EventBridge rule event patterns."""
# Test service stack EventBridge rule (dynamic cluster filtering)
service_app = core.App()
service_stack = EcsServiceStack(service_app, "TestServiceStack")
service_template = assertions.Template.from_stack(service_stack)
service_template.has_resource_properties(
"AWS::Events::Rule",
{
"EventPattern": {
"source": ["aws.ecs"],
"detail-type": ["ECS Task State Change"],
"detail": {
"clusterArn": assertions.Match.any_value(), # Dynamic based on cluster identifier
},
}
},
)
# Test events stack EventBridge rule (should filter by cluster) - separate app
events_app = core.App()
events_stack = EcsEventsStack(events_app, "TestEventsStack")
events_template = assertions.Template.from_stack(events_stack)
events_template.has_resource_properties(
"AWS::Events::Rule",
{
"EventPattern": {
"source": ["aws.ecs"],
"detail-type": ["ECS Task State Change"],
"detail": {
"clusterArn": [{"Ref": "ExistingClusterArn"}],
},
}
},
)
def test_sqs_configuration():
"""Test SQS queue configuration."""
app = core.App()
stack = EcsServiceStack(app, "TestServiceStack")
template = assertions.Template.from_stack(stack)
# Main queue configuration
template.has_resource_properties(
"AWS::SQS::Queue",
{
"MessageRetentionPeriod": 604800, # 7 days
"VisibilityTimeout": 300, # 5 minutes
"RedrivePolicy": {"maxReceiveCount": 3},
},
)
# DLQ configuration
template.has_resource_properties(
"AWS::SQS::Queue",
{
"MessageRetentionPeriod": 1209600, # 14 days
"VisibilityTimeout": 60, # 1 minute
},
)
# Queue policy for EventBridge - just check that required actions are present
template.has_resource_properties(
"AWS::SQS::QueuePolicy",
{
"PolicyDocument": {
"Statement": assertions.Match.array_with(
[
assertions.Match.object_like(
{
"Effect": "Allow",
"Principal": {"Service": "events.amazonaws.com"},
"Action": assertions.Match.array_with(
[
"sqs:SendMessage" # Key permission for EventBridge
]
),
}
)
]
)
}
},
)
def test_parameter_validation():
"""Test CloudFormation parameters have correct types and constraints."""
app = core.App()
service_stack = EcsServiceStack(app, "TestServiceStack")
events_stack = EcsEventsStack(app, "TestEventsStack")
service_template = assertions.Template.from_stack(service_stack)
events_template = assertions.Template.from_stack(events_stack)
# Service stack parameters
service_template.has_parameter("WorkPoolName", {"Type": "String"})
service_template.has_parameter("PrefectApiUrl", {"Type": "String"})
service_template.has_parameter("DockerImage", {"Type": "String"})
service_template.has_parameter("WorkQueues", {"Type": "CommaDelimitedList"})
service_template.has_parameter("LogRetentionDays", {"Type": "Number"})
# Auth parameters (both API key and auth string)
service_template.has_parameter("PrefectApiKeySecretArn", {"Type": "String"})
service_template.has_parameter("PrefectApiKey", {"Type": "String", "NoEcho": True})
service_template.has_parameter("PrefectAuthStringSecretArn", {"Type": "String"})
service_template.has_parameter(
"PrefectAuthString", {"Type": "String", "NoEcho": True}
)
# Service-specific parameters
service_template.has_parameter(
"ExistingClusterIdentifier",
{
"Type": "String",
"AllowedPattern": "^(arn:aws:ecs:[a-z0-9-]+:\\d{12}:cluster/.+|[a-zA-Z][a-zA-Z0-9_-]{0,254})$",
},
)
service_template.has_parameter(
"ExistingVpcId", {"Type": "String", "AllowedPattern": "^vpc-[0-9a-f]{8,17}$"}
)
service_template.has_parameter("ExistingSubnetIds", {"Type": "CommaDelimitedList"})
# Events stack parameters (minimal set)
events_template.has_parameter("WorkPoolName", {"Type": "String"})
events_template.has_parameter("ExistingClusterArn", {"Type": "String"})
# Events stack should NOT have these parameters (test that they raise AssertionError)
try:
events_template.has_parameter("PrefectApiUrl", {"Type": "String"})
assert False, "Expected AssertionError for PrefectApiUrl parameter"
except Exception as e:
assert "AssertionError" in str(e), f"Expected AssertionError, got: {e}"
try:
events_template.has_parameter("DockerImage", {"Type": "String"})
assert False, "Expected AssertionError for DockerImage parameter"
except Exception as e:
assert "AssertionError" in str(e), f"Expected AssertionError, got: {e}"
try:
events_template.has_parameter("WorkQueues", {"Type": "CommaDelimitedList"})
assert False, "Expected AssertionError for WorkQueues parameter"
except Exception as e:
assert "AssertionError" in str(e), f"Expected AssertionError, got: {e}"
def test_resource_naming():
"""Test that resources have predictable names based on work pool."""
app = core.App()
stack = EcsServiceStack(app, "TestServiceStack")
template = assertions.Template.from_stack(stack)
# Queue names should use work pool name
template.has_resource_properties(
"AWS::SQS::Queue",
{"QueueName": {"Fn::Join": ["", [{"Ref": "WorkPoolName"}, "-ecs-events"]]}},
)
template.has_resource_properties(
"AWS::SQS::Queue",
{"QueueName": {"Fn::Join": ["", [{"Ref": "WorkPoolName"}, "-ecs-events-dlq"]]}},
)
# EventBridge rule name
template.has_resource_properties(
"AWS::Events::Rule",
{"Name": {"Fn::Join": ["", [{"Ref": "WorkPoolName"}, "-ecs-task-events"]]}},
)
# IAM role names
template.has_resource_properties(
"AWS::IAM::Role",
{"RoleName": {"Fn::Join": ["", [{"Ref": "WorkPoolName"}, "-task-role"]]}},
)
template.has_resource_properties(
"AWS::IAM::Role",
{
"RoleName": {
"Fn::Join": ["", [{"Ref": "WorkPoolName"}, "-task-execution-role"]]
}
},
)
def test_conditional_log_group():
"""Test conditional log group creation."""
app = core.App()
stack = EcsServiceStack(app, "TestServiceStack")
template = assertions.Template.from_stack(stack)
# Should have condition for existing log group
template.has_condition(
"UseExistingLogGroup",
{"Fn::Not": [{"Fn::Equals": [{"Ref": "ExistingLogGroupName"}, ""]}]},
)
# Log group should only be created when NOT using existing log group
template.has_resource(
"AWS::Logs::LogGroup",
{
"Condition": "CreateNewLogGroup",
"Properties": {
"LogGroupName": {
"Fn::Join": [
"/",
["/ecs", {"Ref": "WorkPoolName"}, assertions.Match.any_value()],
]
},
"RetentionInDays": {"Ref": "LogRetentionDays"},
},
},
)
# Should have condition for creating new log group
template.has_condition(
"CreateNewLogGroup",
{"Fn::Not": [{"Condition": "UseExistingLogGroup"}]},
)
def test_cluster_arn_handling():
"""Test cluster ARN vs name handling in service stack."""
app = core.App()
stack = EcsServiceStack(app, "TestServiceStack")
template = assertions.Template.from_stack(stack)
# Should have condition to detect cluster ARN
template.has_condition(
"IsClusterArn",
{
"Fn::Equals": [
{
"Fn::Select": [
0,
{"Fn::Split": [":", {"Ref": "ExistingClusterIdentifier"}]},
]
},
"arn",
]
},
)
# Service cluster reference should handle both cases
template.has_resource_properties(
"AWS::ECS::Service",
{
"Cluster": {
"Fn::If": [
"IsClusterArn",
{
"Fn::Select": [
1,
{"Fn::Split": ["/", {"Ref": "ExistingClusterIdentifier"}]},
]
},
{"Ref": "ExistingClusterIdentifier"},
]
}
},
)
def test_task_definition_configuration():
"""Test ECS task definition configuration."""
app = core.App()
stack = EcsServiceStack(app, "TestServiceStack")
template = assertions.Template.from_stack(stack)
# Task definition should have correct configuration
template.has_resource_properties(
"AWS::ECS::TaskDefinition",
{
"Family": {"Fn::Join": ["", [{"Ref": "WorkPoolName"}, "-worker"]]},
"RequiresCompatibilities": ["FARGATE"],
"NetworkMode": "awsvpc",
},
)
# Container should have health check
template.has_resource_properties(
"AWS::ECS::TaskDefinition",
{
"ContainerDefinitions": [
assertions.Match.object_like(
{
"Name": "prefect-worker",
"HealthCheck": {
"Command": [
"CMD-SHELL",
assertions.Match.string_like_regexp(r".*urllib.*"),
],
"Interval": 30,
"Timeout": 5,
"Retries": 3,
"StartPeriod": 60,
},
}
)
]
},
)
def test_service_networking():
"""Test ECS service networking configuration."""
app = core.App()
stack = EcsServiceStack(app, "TestServiceStack")
template = assertions.Template.from_stack(stack)
# Service should disable public IP
template.has_resource_properties(
"AWS::ECS::Service",
{
"NetworkConfiguration": {
"AwsvpcConfiguration": {"AssignPublicIp": "DISABLED"}
}
},
)
# Security group should allow health check port
template.has_resource_properties(
"AWS::EC2::SecurityGroup",
{
"SecurityGroupIngress": [
{
"CidrIp": "0.0.0.0/0",
"Description": "Health check endpoint",
"FromPort": 8080,
"ToPort": 8080,
"IpProtocol": "tcp",
}
]
},
)
def test_auto_scaling_configuration():
"""Test ECS service auto scaling configuration."""
app = core.App()
stack = EcsServiceStack(app, "TestServiceStack")
template = assertions.Template.from_stack(stack)
# Should have scalable target
template.resource_count_is("AWS::ApplicationAutoScaling::ScalableTarget", 1)
# Should have CPU and memory scaling policies
template.resource_count_is("AWS::ApplicationAutoScaling::ScalingPolicy", 2)
# CPU scaling policy
template.has_resource_properties(
"AWS::ApplicationAutoScaling::ScalingPolicy",
{
"PolicyType": "TargetTrackingScaling",
"TargetTrackingScalingPolicyConfiguration": {
"PredefinedMetricSpecification": {
"PredefinedMetricType": "ECSServiceAverageCPUUtilization"
},
"TargetValue": 70,
},
},
)
# Memory scaling policy
template.has_resource_properties(
"AWS::ApplicationAutoScaling::ScalingPolicy",
{
"PolicyType": "TargetTrackingScaling",
"TargetTrackingScalingPolicyConfiguration": {
"PredefinedMetricSpecification": {
"PredefinedMetricType": "ECSServiceAverageMemoryUtilization"
},
"TargetValue": 80,
},
},
)
def test_required_outputs():
"""Test that all stacks provide required outputs."""
app = core.App()
service_stack = EcsServiceStack(app, "TestServiceStack")
events_stack = EcsEventsStack(app, "TestEventsStack")
service_template = assertions.Template.from_stack(service_stack)
events_template = assertions.Template.from_stack(events_stack)
# Both should have SQS outputs
for template in [service_template, events_template]:
template.has_output(
"EcsEventsQueueUrl",
{"Description": "URL of the SQS queue receiving ECS events"},
)
template.has_output(
"EcsEventsQueueArn",
{"Description": "ARN of the SQS queue receiving ECS events"},
)
template.has_output(
"EventBridgeRuleArn",
{"Description": "ARN of the EventBridge rule for ECS events"},
)
# Events stack should have queue name output
events_template.has_output(
"EcsEventsQueueName",
{"Description": "Name of the SQS queue receiving ECS events"},
)
# Service stack should have service outputs
service_template.has_output("ServiceArn", {"Description": "ARN of the ECS service"})
service_template.has_output(
"TaskDefinitionArn",
{"Description": "ARN of the ECS task definition for the worker"},
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-aws/infra/tests/test_worker_stack.py",
"license": "Apache License 2.0",
"lines": 553,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-aws/infra/worker/events_stack.py | """Events-only stack - sets up EventBridge and SQS for ECS task state changes."""
from aws_cdk import (
CfnOutput,
CfnParameter,
Duration,
Stack,
)
from aws_cdk import (
aws_events as events,
)
from aws_cdk import (
aws_events_targets as events_targets,
)
from aws_cdk import (
aws_iam as iam,
)
from aws_cdk import (
aws_sqs as sqs,
)
from constructs import Construct
class EcsEventsStack(Stack):
"""EventBridge and SQS infrastructure for ECS task state monitoring."""
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
# Parameters
self.work_pool_name = CfnParameter(
self,
"WorkPoolName",
type="String",
description="Name of the Prefect work pool",
default="ecs-work-pool",
)
self.existing_cluster_arn = CfnParameter(
self,
"ExistingClusterArn",
type="String",
description="ARN of existing ECS cluster to monitor",
)
# Create only the event infrastructure
self.queue, self.dlq = self.create_sqs_infrastructure()
self.eventbridge_rule = self.create_eventbridge_rule(
self.queue, cluster_arn=self.existing_cluster_arn.value_as_string
)
# Output the queue configuration for workers to consume
CfnOutput(
self,
"EcsEventsQueueName",
value=self.queue.queue_name,
description="Name of the SQS queue receiving ECS events",
)
def create_sqs_infrastructure(self) -> tuple[sqs.Queue, sqs.Queue]:
"""Create SQS queue and DLQ for ECS events."""
# Dead Letter Queue
dlq = sqs.Queue(
self,
"EcsEventsDLQ",
queue_name=f"{self.work_pool_name.value_as_string}-ecs-events-dlq",
visibility_timeout=Duration.seconds(60),
retention_period=Duration.days(14),
)
# Main Queue
queue = sqs.Queue(
self,
"EcsEventsQueue",
queue_name=f"{self.work_pool_name.value_as_string}-ecs-events",
visibility_timeout=Duration.seconds(300),
dead_letter_queue=sqs.DeadLetterQueue(
max_receive_count=3,
queue=dlq,
),
retention_period=Duration.days(7),
)
CfnOutput(
self,
"EcsEventsQueueUrl",
value=queue.queue_url,
description="URL of the SQS queue receiving ECS events",
)
CfnOutput(
self,
"EcsEventsQueueArn",
value=queue.queue_arn,
description="ARN of the SQS queue receiving ECS events",
)
return queue, dlq
def create_eventbridge_rule(
self, queue: sqs.Queue, cluster_arn: str = None
) -> events.Rule:
"""Create EventBridge rule for ECS task state changes."""
# Use CDK's EventPattern class instead of raw dict
if cluster_arn:
event_pattern = events.EventPattern(
source=["aws.ecs"],
detail_type=["ECS Task State Change"],
detail={
"clusterArn": [cluster_arn],
},
)
else:
event_pattern = events.EventPattern(
source=["aws.ecs"],
detail_type=["ECS Task State Change"],
detail={},
)
rule = events.Rule(
self,
"EcsTaskStateChangeRule",
rule_name=f"{self.work_pool_name.value_as_string}-ecs-task-events",
description="Capture ECS task state changes for Prefect workers",
event_pattern=event_pattern,
targets=[events_targets.SqsQueue(queue)],
)
# Grant EventBridge permission to send messages to SQS
queue.grant_send_messages(iam.ServicePrincipal("events.amazonaws.com"))
CfnOutput(
self,
"EventBridgeRuleArn",
value=rule.rule_arn,
description="ARN of the EventBridge rule for ECS events",
)
return rule
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-aws/infra/worker/events_stack.py",
"license": "Apache License 2.0",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:src/integrations/prefect-aws/infra/worker/service_stack.py | """ECS worker service stack - deploys to existing cluster with event infrastructure."""
from aws_cdk import (
CfnCondition,
CfnOutput,
CfnParameter,
Duration,
Fn,
RemovalPolicy,
SecretValue,
Stack,
)
from aws_cdk import (
aws_ec2 as ec2,
)
from aws_cdk import (
aws_ecs as ecs,
)
from aws_cdk import (
aws_events as events,
)
from aws_cdk import (
aws_events_targets as events_targets,
)
from aws_cdk import (
aws_iam as iam,
)
from aws_cdk import (
aws_logs as logs,
)
from aws_cdk import (
aws_secretsmanager as secretsmanager,
)
from aws_cdk import (
aws_sqs as sqs,
)
from constructs import Construct
class EcsServiceStack(Stack):
"""ECS worker service for existing cluster with event infrastructure."""
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
# Common Parameters
self.work_pool_name = CfnParameter(
self,
"WorkPoolName",
type="String",
description="Name of the Prefect work pool",
default="ecs-work-pool",
)
self.prefect_api_url = CfnParameter(
self,
"PrefectApiUrl",
type="String",
description="Prefect API URL",
default="https://api.prefect.cloud/api",
)
# Either provide an existing secret ARN OR a raw API key to create a secret
self.prefect_api_key_secret_arn = CfnParameter(
self,
"PrefectApiKeySecretArn",
type="String",
description="ARN of existing AWS Secrets Manager secret containing Prefect API key (leave empty to create new)",
default="",
)
self.prefect_api_key = CfnParameter(
self,
"PrefectApiKey",
type="String",
description="Prefect API key (only used if PrefectApiKeySecretArn is empty)",
default="",
no_echo=True,
)
# Auth string parameters (alternative to API key for self-hosted servers)
self.prefect_auth_string_secret_arn = CfnParameter(
self,
"PrefectAuthStringSecretArn",
type="String",
description="ARN of existing AWS Secrets Manager secret containing Prefect auth string for self-hosted servers (leave empty to create new)",
default="",
)
self.prefect_auth_string = CfnParameter(
self,
"PrefectAuthString",
type="String",
description="Prefect auth string for self-hosted servers in format 'username:password' (only used if PrefectAuthStringSecretArn is empty)",
default="",
no_echo=True,
)
self.work_queues = CfnParameter(
self,
"WorkQueues",
type="CommaDelimitedList",
description="Comma-separated list of work queues to pull from (leave empty to pull from all queues in the work pool)",
default="",
)
self.docker_image = CfnParameter(
self,
"DockerImage",
type="String",
description="Docker image for the worker",
default="prefecthq/prefect-aws:latest",
)
self.log_retention_days = CfnParameter(
self,
"LogRetentionDays",
type="Number",
description="CloudWatch log retention in days",
default=30,
)
self.existing_log_group_name = CfnParameter(
self,
"ExistingLogGroupName",
type="String",
description="Name of existing CloudWatch log group to use (leave empty to create new). Format: /ecs/your-work-pool-name",
default="",
)
# Additional parameters for service-only deployment
self.existing_cluster_identifier = CfnParameter(
self,
"ExistingClusterIdentifier",
type="String",
description="ECS cluster name or ARN. You can provide either the cluster name (e.g., 'my-cluster') or the full ARN (e.g., 'arn:aws:ecs:us-east-1:123456789012:cluster/my-cluster'). Find available clusters in the ECS console.",
allowed_pattern=r"^(arn:aws:ecs:[a-z0-9-]+:\d{12}:cluster/.+|[a-zA-Z][a-zA-Z0-9_-]{0,254})$",
constraint_description="Must be either a valid cluster name (1-255 characters) or a complete ECS cluster ARN",
)
self.existing_vpc_id = CfnParameter(
self,
"ExistingVpcId",
type="String",
description="VPC ID where the existing cluster is located (e.g., vpc-12345678). Find this in the EC2 console or run 'aws ec2 describe-vpcs'.",
allowed_pattern=r"^vpc-[0-9a-f]{8,17}$",
constraint_description="Must be a valid VPC ID in the format: vpc-xxxxxxxx",
)
self.existing_subnet_ids = CfnParameter(
self,
"ExistingSubnetIds",
type="CommaDelimitedList",
description="Comma-separated list of subnet IDs for the service (e.g., subnet-12345678,subnet-87654321). Use private subnets for better security. Find these in the VPC console or run 'aws ec2 describe-subnets --filters Name=vpc-id,Values=YOUR_VPC_ID'.",
)
self.desired_count = CfnParameter(
self,
"DesiredCount",
type="Number",
description="Desired number of worker tasks to run",
default=1,
)
self.min_capacity = CfnParameter(
self,
"MinCapacity",
type="Number",
description="Minimum number of worker tasks for auto scaling",
default=1,
)
self.max_capacity = CfnParameter(
self,
"MaxCapacity",
type="Number",
description="Maximum number of worker tasks for auto scaling",
default=10,
)
# Conditions for auth method selection
self.use_api_key_condition = CfnCondition(
self,
"UseApiKey",
expression=Fn.condition_or(
Fn.condition_not(
Fn.condition_equals(
self.prefect_api_key_secret_arn.value_as_string, ""
)
),
Fn.condition_not(
Fn.condition_equals(self.prefect_api_key.value_as_string, "")
),
),
)
self.use_auth_string_condition = CfnCondition(
self,
"UseAuthString",
expression=Fn.condition_or(
Fn.condition_not(
Fn.condition_equals(
self.prefect_auth_string_secret_arn.value_as_string, ""
)
),
Fn.condition_not(
Fn.condition_equals(self.prefect_auth_string.value_as_string, "")
),
),
)
# Conditions to check if we should create new secrets
self.create_new_api_key_secret_condition = CfnCondition(
self,
"CreateNewApiKeySecret",
expression=Fn.condition_and(
self.use_api_key_condition,
Fn.condition_equals(
self.prefect_api_key_secret_arn.value_as_string, ""
),
),
)
self.create_new_auth_string_secret_condition = CfnCondition(
self,
"CreateNewAuthStringSecret",
expression=Fn.condition_and(
self.use_auth_string_condition,
Fn.condition_equals(
self.prefect_auth_string_secret_arn.value_as_string, ""
),
),
)
# Condition to check if work queues are specified
self.has_work_queues_condition = CfnCondition(
self,
"HasWorkQueues",
expression=Fn.condition_not(
Fn.condition_equals(Fn.select(0, self.work_queues.value_as_list), "")
),
)
# Condition to check if we should use existing log group
self.use_existing_log_group_condition = CfnCondition(
self,
"UseExistingLogGroup",
expression=Fn.condition_not(
Fn.condition_equals(self.existing_log_group_name.value_as_string, "")
),
)
# Condition to check if cluster identifier is an ARN
self.is_cluster_arn_condition = CfnCondition(
self,
"IsClusterArn",
expression=Fn.condition_equals(
Fn.select(
0, Fn.split(":", self.existing_cluster_identifier.value_as_string)
),
"arn",
),
)
# Create the secrets if needed
self.api_key_secret = self._create_api_key_secret()
self.auth_string_secret = self._create_auth_string_secret()
# Get existing resources
self.vpc = self._get_existing_vpc()
# Convert cluster identifier to ARN if needed
self.cluster_arn = self._get_cluster_arn()
self.cluster = self._get_existing_cluster()
# Create infrastructure
self.queue, self.dlq = self.create_sqs_infrastructure()
self.eventbridge_rule = self.create_eventbridge_rule(
self.queue, cluster_arn=self.cluster_arn
)
self.execution_role = self.create_task_execution_role()
self.task_role = self.create_task_role(self.queue)
self.log_group = self.create_log_group()
self.task_definition = self.create_task_definition(
self.execution_role, self.task_role, self.log_group, self.queue
)
self.service = self._create_service()
self._setup_autoscaling()
def _get_existing_vpc(self) -> ec2.IVpc:
"""Import existing VPC."""
# For synthesis, we provide explicit AZs to avoid the list token warning
# In actual deployment, these will be resolved correctly
return ec2.Vpc.from_vpc_attributes(
self,
"ExistingVpc",
vpc_id=self.existing_vpc_id.value_as_string,
availability_zones=[
"us-east-1a",
"us-east-1b",
], # Will be overridden at deployment
)
def _get_cluster_arn(self) -> str:
"""Convert cluster identifier to ARN if it's just a name."""
cluster_identifier = self.existing_cluster_identifier.value_as_string
# If it already looks like an ARN, use it as-is
# Otherwise, construct ARN from cluster name
return Fn.condition_if(
self.is_cluster_arn_condition.logical_id,
cluster_identifier,
Fn.sub(
"arn:aws:ecs:${AWS::Region}:${AWS::AccountId}:cluster/${ClusterName}",
{"ClusterName": cluster_identifier},
),
).to_string()
def _get_existing_cluster(self) -> ecs.ICluster:
"""Import existing ECS cluster."""
# Extract cluster name from ARN or use the identifier if it's already a name
cluster_name = Fn.condition_if(
self.is_cluster_arn_condition.logical_id,
Fn.select(
1, Fn.split("/", self.existing_cluster_identifier.value_as_string)
),
self.existing_cluster_identifier.value_as_string,
).to_string()
return ecs.Cluster.from_cluster_attributes(
self,
"ExistingCluster",
cluster_name=cluster_name,
cluster_arn=self.cluster_arn,
vpc=self.vpc,
security_groups=[],
)
def _create_service(self) -> ecs.FargateService:
"""Create ECS service in existing cluster."""
# Create security group for the service
security_group = ec2.SecurityGroup(
self,
"WorkerSecurityGroup",
vpc=self.vpc,
description="Security group for Prefect ECS workers",
allow_all_outbound=True,
)
# Allow health check traffic
security_group.add_ingress_rule(
peer=ec2.Peer.any_ipv4(),
connection=ec2.Port.tcp(8080),
description="Health check endpoint",
)
# Import existing subnets with proper attributes
subnets = [
ec2.Subnet.from_subnet_attributes(
self,
f"ExistingSubnet{i}",
subnet_id=subnet_id,
availability_zone=f"us-east-1{chr(97 + i)}", # Will be resolved at deployment
route_table_id=f"rtb-{subnet_id[-8:]}", # Placeholder route table ID
)
for i, subnet_id in enumerate(self.existing_subnet_ids.value_as_list)
]
# For CloudFormation synthesis, we can't use conditions directly in CDK
# We'll set assign_public_ip based on the parameter value
# This is a limitation when synthesizing without deployment context
service = ecs.FargateService(
self,
"WorkerService",
cluster=self.cluster,
task_definition=self.task_definition,
service_name=f"{self.work_pool_name.value_as_string}-workers",
desired_count=self.desired_count.value_as_number,
security_groups=[security_group],
vpc_subnets=ec2.SubnetSelection(subnets=subnets),
assign_public_ip=False,
capacity_provider_strategies=[
ecs.CapacityProviderStrategy(
capacity_provider="FARGATE",
weight=1,
)
],
enable_execute_command=True, # Enable ECS Exec for debugging
min_healthy_percent=100, # Prevent tasks from stopping during deployments
)
CfnOutput(
self,
"ServiceArn",
value=service.service_arn,
description="ARN of the ECS service",
)
CfnOutput(
self,
"ServiceName",
value=service.service_name,
description="Name of the ECS service",
)
return service
def _setup_autoscaling(self) -> None:
"""Set up auto scaling for the service."""
scaling_target = self.service.auto_scale_task_count(
min_capacity=self.min_capacity.value_as_number,
max_capacity=self.max_capacity.value_as_number,
)
# Scale based on CPU utilization
scaling_target.scale_on_cpu_utilization(
"CpuScaling",
target_utilization_percent=70,
scale_in_cooldown=Duration.minutes(5),
scale_out_cooldown=Duration.minutes(2),
)
# Scale based on memory utilization
scaling_target.scale_on_memory_utilization(
"MemoryScaling",
target_utilization_percent=80,
scale_in_cooldown=Duration.minutes(5),
scale_out_cooldown=Duration.minutes(2),
)
def _create_api_key_secret(self) -> secretsmanager.Secret:
"""Create Secrets Manager secret for Prefect API key if needed."""
secret = secretsmanager.Secret(
self,
"PrefectApiKeySecret",
secret_name=f"{self.work_pool_name.value_as_string}-prefect-api-key",
description="Prefect API key for ECS worker",
secret_string_value=SecretValue.cfn_parameter(self.prefect_api_key),
)
# Apply condition so it's only created when needed
secret.node.default_child.cfn_options.condition = (
self.create_new_api_key_secret_condition
)
return secret
def _create_auth_string_secret(self) -> secretsmanager.Secret:
"""Create Secrets Manager secret for Prefect auth string if needed."""
secret = secretsmanager.Secret(
self,
"PrefectAuthStringSecret",
secret_name=f"{self.work_pool_name.value_as_string}-prefect-auth-string",
description="Prefect auth string for ECS worker (self-hosted servers)",
secret_string_value=SecretValue.cfn_parameter(self.prefect_auth_string),
)
# Apply condition so it's only created when needed
secret.node.default_child.cfn_options.condition = (
self.create_new_auth_string_secret_condition
)
return secret
def get_api_key_secret_arn(self) -> str:
"""Get the ARN of the API key secret (either existing or newly created)."""
return Fn.condition_if(
self.create_new_api_key_secret_condition.logical_id,
self.api_key_secret.secret_arn,
self.prefect_api_key_secret_arn.value_as_string,
).to_string()
def get_auth_string_secret_arn(self) -> str:
"""Get the ARN of the auth string secret (either existing or newly created)."""
return Fn.condition_if(
self.create_new_auth_string_secret_condition.logical_id,
self.auth_string_secret.secret_arn,
self.prefect_auth_string_secret_arn.value_as_string,
).to_string()
def get_active_secret_arn(self) -> str:
"""Get the ARN of whichever auth method is being used."""
return Fn.condition_if(
self.use_api_key_condition.logical_id,
self.get_api_key_secret_arn(),
self.get_auth_string_secret_arn(),
).to_string()
def create_sqs_infrastructure(self) -> tuple[sqs.Queue, sqs.Queue]:
"""Create SQS queue and DLQ for ECS events."""
# Dead Letter Queue
dlq = sqs.Queue(
self,
"EcsEventsDLQ",
queue_name=f"{self.work_pool_name.value_as_string}-ecs-events-dlq",
visibility_timeout=Duration.seconds(60),
retention_period=Duration.days(14),
)
# Main Queue
queue = sqs.Queue(
self,
"EcsEventsQueue",
queue_name=f"{self.work_pool_name.value_as_string}-ecs-events",
visibility_timeout=Duration.seconds(300),
dead_letter_queue=sqs.DeadLetterQueue(
max_receive_count=3,
queue=dlq,
),
retention_period=Duration.days(7),
)
CfnOutput(
self,
"EcsEventsQueueUrl",
value=queue.queue_url,
description="URL of the SQS queue receiving ECS events",
)
CfnOutput(
self,
"EcsEventsQueueArn",
value=queue.queue_arn,
description="ARN of the SQS queue receiving ECS events",
)
return queue, dlq
def create_eventbridge_rule(
self, queue: sqs.Queue, cluster_arn: str = None
) -> events.Rule:
"""Create EventBridge rule for ECS task state changes."""
# Use CDK's EventPattern class instead of raw dict
if cluster_arn:
event_pattern = events.EventPattern(
source=["aws.ecs"],
detail_type=["ECS Task State Change"],
detail={
"clusterArn": [cluster_arn],
},
)
else:
event_pattern = events.EventPattern(
source=["aws.ecs"],
detail_type=["ECS Task State Change"],
detail={},
)
rule = events.Rule(
self,
"EcsTaskStateChangeRule",
rule_name=f"{self.work_pool_name.value_as_string}-ecs-task-events",
description="Capture ECS task state changes for Prefect workers",
event_pattern=event_pattern,
targets=[events_targets.SqsQueue(queue)],
)
# Grant EventBridge permission to send messages to SQS
queue.grant_send_messages(iam.ServicePrincipal("events.amazonaws.com"))
CfnOutput(
self,
"EventBridgeRuleArn",
value=rule.rule_arn,
description="ARN of the EventBridge rule for ECS events",
)
return rule
def create_task_execution_role(self) -> iam.Role:
"""Create IAM role for ECS task execution."""
role = iam.Role(
self,
"EcsTaskExecutionRole",
role_name=f"{self.work_pool_name.value_as_string}-task-execution-role",
assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
managed_policies=[
iam.ManagedPolicy.from_managed_policy_arn(
self,
"AmazonECSTaskExecutionRolePolicy",
Fn.sub(
"arn:${AWS::Partition}:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy"
),
)
],
)
# Grant access to the appropriate Prefect auth secret
role.add_to_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"secretsmanager:GetSecretValue",
],
resources=[self.get_active_secret_arn()],
)
)
return role
def create_task_role(self, queue: sqs.Queue) -> iam.Role:
"""Create IAM role for ECS tasks."""
role = iam.Role(
self,
"EcsTaskRole",
role_name=f"{self.work_pool_name.value_as_string}-task-role",
assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
)
# Grant access to SQS queue for consuming events
queue.grant_consume_messages(role)
# Grant basic ECS permissions for worker functionality
role.add_to_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"ecs:DescribeTasks",
"ecs:DescribeTaskDefinition",
"ecs:ListTasks",
"ecs:RunTask",
"ecs:StopTask",
"ecs:DescribeClusters",
"ecs:ListClusters",
"ecs:RegisterTaskDefinition",
"ecs:DeregisterTaskDefinition",
"ecs:ListTaskDefinitions",
"ecs:TagResource",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"ec2:DescribeSecurityGroups",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents",
"logs:GetLogEvents",
"logs:DescribeLogStreams",
],
resources=["*"],
)
)
# Grant IAM PassRole permission for any role (required for dynamic task definitions)
role.add_to_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=["iam:PassRole"],
resources=["arn:aws:iam::*:role/*"],
)
)
return role
def create_log_group(self) -> logs.ILogGroup:
"""Create or reference CloudWatch log group for worker tasks."""
# Generate unique suffix for log group name to prevent collisions
# Extract first 8 chars of UUID from stack ID for true uniqueness across deployments
unique_suffix = Fn.select(
0, Fn.split("-", Fn.select(2, Fn.split("/", Stack.of(self).stack_id)))
)
# Create log group with unique name (will only be created when condition is true)
new_log_group = logs.LogGroup(
self,
"WorkerLogGroup",
log_group_name=Fn.join(
"/", ["", "ecs", self.work_pool_name.value_as_string, unique_suffix]
),
retention=logs.RetentionDays.ONE_MONTH, # Default, will be overridden by parameter
removal_policy=RemovalPolicy.RETAIN, # Preserve logs when stack is deleted
)
# Store the generated log group name for use in task definition
self._generated_log_group_name = Fn.join(
"/", ["", "ecs", self.work_pool_name.value_as_string, unique_suffix]
)
# Configure the underlying CloudFormation resource
cfn_log_group = new_log_group.node.default_child
cfn_log_group.add_property_override(
"RetentionInDays", self.log_retention_days.value_as_number
)
# Only create this log group resource when NOT using existing log group
create_new_log_group_condition = CfnCondition(
self,
"CreateNewLogGroup",
expression=Fn.condition_not(self.use_existing_log_group_condition),
)
cfn_log_group.cfn_options.condition = create_new_log_group_condition
# Output the log group name conditionally
CfnOutput(
self,
"LogGroupName",
value=Fn.condition_if(
self.use_existing_log_group_condition.logical_id,
self.existing_log_group_name.value_as_string,
self._generated_log_group_name,
).to_string(),
description="CloudWatch log group for worker tasks",
)
# Output the created secret ARNs if we created them
CfnOutput(
self,
"PrefectApiKeySecretArnOutput",
value=self.get_api_key_secret_arn(),
description="ARN of the Prefect API key secret",
condition=self.create_new_api_key_secret_condition,
)
CfnOutput(
self,
"PrefectAuthStringSecretArnOutput",
value=self.get_auth_string_secret_arn(),
description="ARN of the Prefect auth string secret",
condition=self.create_new_auth_string_secret_condition,
)
# Return the new log group (CDK will handle the conditional logic in CloudFormation)
return new_log_group
def get_log_group_for_task_definition(self) -> str:
"""Get the appropriate log group name for the task definition."""
return Fn.condition_if(
self.use_existing_log_group_condition.logical_id,
self.existing_log_group_name.value_as_string,
self._generated_log_group_name,
).to_string()
def create_task_definition(
self,
execution_role: iam.Role,
task_role: iam.Role,
log_group: logs.LogGroup,
queue: sqs.Queue = None,
) -> ecs.TaskDefinition:
"""Create ECS task definition for the worker."""
cpu_param = CfnParameter(
self,
"TaskCpu",
type="Number",
description="CPU units for the task (1024 = 1 vCPU)",
default=1024,
)
memory_param = CfnParameter(
self,
"TaskMemory",
type="Number",
description="Memory for the task in MB",
default=2048,
)
task_definition = ecs.TaskDefinition(
self,
"WorkerTaskDefinition",
family=f"{self.work_pool_name.value_as_string}-worker",
compatibility=ecs.Compatibility.FARGATE,
cpu=cpu_param.value_as_string,
memory_mib=memory_param.value_as_string,
execution_role=execution_role,
task_role=task_role,
)
# Build worker start command using shell to handle multiple --work-queue flags
# Create base command template for substitution
base_cmd_template = (
"prefect worker start --type ecs --pool ${WorkPoolName} --with-healthcheck"
)
# Build the command with conditional work queues
command_with_queues = Fn.condition_if(
self.has_work_queues_condition.logical_id,
# If work queues specified, add multiple --work-queue flags
Fn.sub(
base_cmd_template + " --work-queue ${WorkQueueList}",
{
"WorkPoolName": self.work_pool_name.value_as_string,
"WorkQueueList": Fn.join(
" --work-queue ", self.work_queues.value_as_list
),
},
),
# If no work queues, just the base command
Fn.sub(
base_cmd_template,
{
"WorkPoolName": self.work_pool_name.value_as_string,
},
),
).to_string()
command_args = ["sh", "-c", command_with_queues]
environment_vars = {
"PREFECT_API_URL": self.prefect_api_url.value_as_string,
"PREFECT_INTEGRATIONS_AWS_ECS_OBSERVER_ENABLED": "true",
}
# Add SQS queue name if provided
if queue:
environment_vars["PREFECT_INTEGRATIONS_AWS_ECS_OBSERVER_SQS_QUEUE_NAME"] = (
queue.queue_name
)
# Add container (we'll handle auth method selection by post-processing the CloudFormation)
task_definition.add_container(
"worker",
container_name="prefect-worker",
image=ecs.ContainerImage.from_registry(self.docker_image.value_as_string),
command=command_args,
environment=environment_vars,
# Logging will be configured via CloudFormation override below
health_check=ecs.HealthCheck(
command=[
"CMD-SHELL",
"python -c \"import urllib.request; urllib.request.urlopen('http://localhost:8080/health', timeout=5)\"",
],
interval=Duration.seconds(30),
timeout=Duration.seconds(5),
retries=3,
start_period=Duration.seconds(60),
),
port_mappings=[
ecs.PortMapping(
container_port=8080,
protocol=ecs.Protocol.TCP,
name="health",
)
],
)
# Manually configure the secrets in the CloudFormation template since CDK doesn't support conditional secrets
cfn_task_definition = task_definition.node.default_child
# Get the container definition and add conditional secrets
# API key takes precedence - if provided, use it exclusively
# Auth string is only used if NO API key is provided
cfn_task_definition.add_property_override(
"ContainerDefinitions.0.Secrets",
Fn.condition_if(
self.use_api_key_condition.logical_id,
# If using API key, only include API key secret
[
{
"Name": "PREFECT_API_KEY",
"ValueFrom": self.get_api_key_secret_arn(),
}
],
Fn.condition_if(
self.use_auth_string_condition.logical_id,
# If not using API key but using auth string, only include auth string
[
{
"Name": "PREFECT_AUTH_STRING",
"ValueFrom": self.get_auth_string_secret_arn(),
}
],
# If using neither, omit the Secrets property entirely
Fn.ref("AWS::NoValue"),
),
),
)
# Configure logging to use the appropriate log group (existing or new)
cfn_task_definition.add_property_override(
"ContainerDefinitions.0.LogConfiguration",
{
"LogDriver": "awslogs",
"Options": {
"awslogs-group": self.get_log_group_for_task_definition(),
"awslogs-region": {"Ref": "AWS::Region"},
"awslogs-stream-prefix": "ecs",
},
},
)
CfnOutput(
self,
"TaskDefinitionArn",
value=task_definition.task_definition_arn,
description="ARN of the ECS task definition for the worker",
)
# Add outputs for work pool configuration
CfnOutput(
self,
"TaskExecutionRoleArn",
value=execution_role.role_arn,
description="ARN of the ECS task execution role",
)
CfnOutput(
self,
"ClusterArn",
value=self.cluster_arn,
description="ARN of the ECS cluster",
)
CfnOutput(
self,
"VpcId",
value=self.existing_vpc_id.value_as_string,
description="VPC ID where the worker is deployed",
)
CfnOutput(
self,
"SubnetIds",
value=Fn.join(",", self.existing_subnet_ids.value_as_list),
description="Comma-separated list of subnet IDs",
)
return task_definition
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-aws/infra/worker/service_stack.py",
"license": "Apache License 2.0",
"lines": 815,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/cli/transfer/_dag.py | """
Execution DAG for managing resource transfer dependencies.
This module provides a pure execution engine that:
- Stores nodes by UUID for deduplication
- Implements Kahn's algorithm for topological sorting
- Manages concurrent execution with worker pools
- Handles failure propagation (skip descendants)
"""
from __future__ import annotations
import asyncio
import uuid
from collections import defaultdict, deque
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Awaitable, Callable, Coroutine, Sequence
import anyio
from anyio import create_task_group
from anyio.abc import TaskGroup
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources import MigratableProtocol
from prefect.logging import get_logger
logger = get_logger(__name__)
class NodeState(Enum):
"""State of a node during traversal."""
PENDING = "pending"
READY = "ready"
IN_PROGRESS = "in_progress"
COMPLETED = "completed"
FAILED = "failed"
SKIPPED = "skipped"
@dataclass
class NodeStatus:
"""Tracks the status of a node during traversal."""
node: MigratableProtocol
state: NodeState = NodeState.PENDING
dependents: set[uuid.UUID] = field(default_factory=set)
dependencies: set[uuid.UUID] = field(default_factory=set)
error: Exception | None = None
class TransferDAG:
"""
Execution DAG for managing resource transfer dependencies.
Uses Kahn's algorithm for topological sorting and concurrent execution.
See: https://en.wikipedia.org/wiki/Topological_sorting#Kahn%27s_algorithm
The DAG ensures resources are transferred in dependency order while
maximizing parallelism for independent resources.
"""
def __init__(self):
self.nodes: dict[uuid.UUID, MigratableProtocol] = {}
self._dependencies: dict[uuid.UUID, set[uuid.UUID]] = defaultdict(set)
self._dependents: dict[uuid.UUID, set[uuid.UUID]] = defaultdict(set)
self._status: dict[uuid.UUID, NodeStatus] = {}
self._lock = asyncio.Lock()
def add_node(self, node: MigratableProtocol) -> uuid.UUID:
"""
Add a node to the graph, deduplicating by source ID.
Args:
node: Resource to add to the graph
Returns:
The node's source UUID
"""
if node.source_id not in self.nodes:
self.nodes[node.source_id] = node
self._status[node.source_id] = NodeStatus(node)
return node.source_id
def add_edge(self, dependent_id: uuid.UUID, dependency_id: uuid.UUID) -> None:
"""
Add a dependency edge where dependent depends on dependency.
Args:
dependent_id: ID of the resource that has a dependency
dependency_id: ID of the resource being depended upon
"""
if dependency_id in self._dependencies[dependent_id]:
return
self._dependencies[dependent_id].add(dependency_id)
self._dependents[dependency_id].add(dependent_id)
self._status[dependent_id].dependencies.add(dependency_id)
self._status[dependency_id].dependents.add(dependent_id)
async def build_from_roots(self, roots: Sequence[MigratableProtocol]) -> None:
"""
Build the graph from root resources by recursively discovering dependencies.
Args:
roots: Collection of root resources to start discovery from
"""
visited: set[uuid.UUID] = set()
async def visit(resource: MigratableProtocol):
if resource.source_id in visited:
return
visited.add(resource.source_id)
rid = self.add_node(resource)
visit_coroutines: list[Coroutine[Any, Any, None]] = []
for dep in await resource.get_dependencies():
did = self.add_node(dep)
self.add_edge(rid, did)
visit_coroutines.append(visit(dep))
await asyncio.gather(*visit_coroutines)
visit_coroutines = [visit(r) for r in roots]
await asyncio.gather(*visit_coroutines)
def has_cycles(self) -> bool:
"""
Check if the graph has cycles using three-color DFS.
Uses the classic three-color algorithm where:
- WHITE (0): Unvisited node
- GRAY (1): Currently being explored (in DFS stack)
- BLACK (2): Fully explored
A cycle exists if we encounter a GRAY node during traversal (back edge).
See: https://en.wikipedia.org/wiki/Depth-first_search#Vertex_orderings
Returns:
True if the graph contains cycles, False otherwise
"""
WHITE, GRAY, BLACK = 0, 1, 2
color = {node_id: WHITE for node_id in self.nodes}
def visit(node_id: uuid.UUID) -> bool:
if color[node_id] == GRAY:
return True # Back edge found - cycle detected
if color[node_id] == BLACK:
return False # Already fully explored
color[node_id] = GRAY
for dep_id in self._dependencies[node_id]:
if visit(dep_id):
return True
color[node_id] = BLACK
return False
for node_id in self.nodes:
if color[node_id] == WHITE:
if visit(node_id):
return True
return False
def get_execution_layers(
self, *, _assume_acyclic: bool = False
) -> list[list[MigratableProtocol]]:
"""
Get execution layers using Kahn's algorithm.
Each layer contains nodes that can be executed in parallel.
Kahn's algorithm repeatedly removes nodes with no dependencies,
forming layers of concurrent work.
See: https://en.wikipedia.org/wiki/Topological_sorting#Kahn%27s_algorithm
Args:
_assume_acyclic: Skip cycle check if caller already verified
Returns:
List of layers, each containing nodes that can run in parallel
Raises:
ValueError: If the graph contains cycles
"""
if not _assume_acyclic and self.has_cycles():
raise ValueError("Cannot sort DAG with cycles")
in_degree = {n: len(self._dependencies[n]) for n in self.nodes}
layers: list[list[MigratableProtocol]] = []
cur = [n for n in self.nodes if in_degree[n] == 0]
while cur:
layers.append([self.nodes[n] for n in cur])
nxt: list[uuid.UUID] = []
for n in cur:
for d in self._dependents[n]:
in_degree[d] -= 1
if in_degree[d] == 0:
nxt.append(d)
cur = nxt
return layers
async def execute_concurrent(
self,
process_node: Callable[[MigratableProtocol], Awaitable[Any]],
max_workers: int = 10,
skip_on_failure: bool = True,
) -> dict[uuid.UUID, Any]:
"""
Execute the DAG concurrently using Kahn's algorithm.
Processes nodes in topological order while maximizing parallelism.
When a node completes, its dependents are checked to see if they're
ready to execute (all dependencies satisfied).
Args:
process_node: Async function to process each node
max_workers: Maximum number of concurrent workers
skip_on_failure: Whether to skip descendants when a node fails
Returns:
Dictionary mapping node IDs to their results (or exceptions)
Raises:
ValueError: If the graph contains cycles
"""
if self.has_cycles():
raise ValueError("Cannot execute DAG with cycles")
layers = self.get_execution_layers(_assume_acyclic=True)
logger.debug(f"Execution plan has {len(layers)} layers")
for i, layer in enumerate(layers):
# Count each type in the layer
type_counts: dict[str, int] = {}
for node in layer:
node_type = type(node).__name__
type_counts[node_type] = type_counts.get(node_type, 0) + 1
type_summary = ", ".join(
[f"{count} {type_name}" for type_name, count in type_counts.items()]
)
logger.debug(f"Layer {i}: ({type_summary})")
# Initialize with nodes that have no dependencies
ready_queue: list[uuid.UUID] = []
for nid in self.nodes:
if not self._dependencies[nid]:
ready_queue.append(nid)
self._status[nid].state = NodeState.READY
results: dict[uuid.UUID, Any] = {}
limiter = anyio.CapacityLimiter(max_workers)
processing: set[uuid.UUID] = set()
async def worker(nid: uuid.UUID, tg: TaskGroup):
"""Process a single node."""
node = self.nodes[nid]
# Check if node was skipped after being queued
if self._status[nid].state != NodeState.READY:
logger.debug(f"Node {node} was skipped before execution")
return
async with limiter:
try:
self._status[nid].state = NodeState.IN_PROGRESS
logger.debug(f"Processing {node}")
res = await process_node(node)
results[nid] = res
self._status[nid].state = NodeState.COMPLETED
logger.debug(f"Completed {node}")
# Mark dependents as ready if all their dependencies are satisfied
async with self._lock:
for did in self._status[nid].dependents:
dst = self._status[did]
if dst.state == NodeState.PENDING:
if all(
self._status[d].state == NodeState.COMPLETED
for d in dst.dependencies
):
dst.state = NodeState.READY
# Start the newly ready task immediately
if did not in processing:
processing.add(did)
tg.start_soon(worker, did, tg)
except TransferSkipped as e:
results[nid] = e
self._status[nid].state = NodeState.SKIPPED
self._status[nid].error = e
logger.debug(f"Skipped {node}: {e}")
except Exception as e:
results[nid] = e
self._status[nid].state = NodeState.FAILED
self._status[nid].error = e
logger.debug(f"Failed to process {node}: {e}")
if skip_on_failure:
# Skip all descendants of the failed node
to_skip = deque([nid])
seen_failed: set[uuid.UUID] = set()
while to_skip:
cur = to_skip.popleft()
if cur in seen_failed:
continue
seen_failed.add(cur)
for did in self._status[cur].dependents:
st = self._status[did]
# Skip nodes that haven't started yet
if st.state in {NodeState.PENDING, NodeState.READY}:
st.state = NodeState.SKIPPED
results[did] = TransferSkipped(
"Skipped due to upstream resource failure"
)
logger.debug(
f"Skipped {self.nodes[did]} due to upstream failure"
)
to_skip.append(did)
finally:
processing.discard(nid)
async with create_task_group() as tg:
# Start processing all initially ready nodes
for nid in ready_queue:
if self._status[nid].state == NodeState.READY:
processing.add(nid)
tg.start_soon(worker, nid, tg)
return results
def get_statistics(self) -> dict[str, Any]:
"""
Get statistics about the DAG structure.
Returns:
Dictionary with node counts, edge counts, and cycle detection
"""
deps = self._dependencies
return {
"total_nodes": len(self.nodes),
"total_edges": sum(len(v) for v in deps.values()),
"max_in_degree": max((len(deps[n]) for n in self.nodes), default=0),
"max_out_degree": max(
(len(self._dependents[n]) for n in self.nodes), default=0
),
"has_cycles": self.has_cycles(),
}
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/transfer/_dag.py",
"license": "Apache License 2.0",
"lines": 286,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/cli/transfer/_exceptions.py | class TransferSkipped(Exception):
"""
Exception raised when a resource is skipped during transfer.
"""
def __init__(self, reason: str):
self.reason = reason
def __str__(self) -> str:
return self.reason
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/transfer/_exceptions.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:src/prefect/cli/transfer/_migratable_resources/automations.py | from __future__ import annotations
import uuid
from typing_extensions import Self
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources import construct_migratable_resource
from prefect.cli.transfer._migratable_resources.base import (
MigratableProtocol,
MigratableResource,
)
from prefect.cli.transfer._migratable_resources.blocks import MigratableBlockDocument
from prefect.cli.transfer._migratable_resources.deployments import MigratableDeployment
from prefect.cli.transfer._migratable_resources.work_pools import MigratableWorkPool
from prefect.cli.transfer._migratable_resources.work_queues import MigratableWorkQueue
from prefect.client.orchestration import get_client
from prefect.client.schemas.filters import (
WorkPoolFilter,
WorkPoolFilterId,
)
from prefect.events.actions import (
AutomationAction,
CallWebhook,
DeploymentAction,
SendNotification,
WorkPoolAction,
WorkQueueAction,
)
from prefect.events.schemas.automations import Automation, AutomationCore
from prefect.exceptions import ObjectNotFound
from prefect.logging import get_logger
logger = get_logger(__name__)
class MigratableAutomation(MigratableResource[Automation]):
_instances: dict[uuid.UUID, Self] = {}
def __init__(self, automation: Automation):
self.source_automation = automation
self.destination_automation: Automation | None = None
self._dependencies: dict[uuid.UUID, MigratableProtocol] = {}
@property
def source_id(self) -> uuid.UUID:
return self.source_automation.id
@property
def destination_id(self) -> uuid.UUID | None:
return self.destination_automation.id if self.destination_automation else None
@classmethod
async def construct(cls, obj: Automation) -> Self:
if obj.id in cls._instances:
return cls._instances[obj.id]
instance = cls(obj)
cls._instances[obj.id] = instance
return instance
@classmethod
async def get_instance(
cls, id: uuid.UUID
) -> "MigratableResource[Automation] | None":
if id in cls._instances:
return cls._instances[id]
return None
async def get_dependencies(self) -> "list[MigratableProtocol]":
if self._dependencies:
return list(self._dependencies.values())
async with get_client() as client:
for action in self.source_automation.actions:
if (
isinstance(action, DeploymentAction)
and action.deployment_id is not None
):
if dependency := await MigratableDeployment.get_instance(
id=action.deployment_id
):
self._dependencies[action.deployment_id] = dependency
else:
try:
deployment = await client.read_deployment(
action.deployment_id
)
except ObjectNotFound:
logger.warning(
"Deployment %s referenced by automation %r no longer exists, skipping dependency",
action.deployment_id,
self.source_automation.name,
)
else:
self._dependencies[
deployment.id
] = await construct_migratable_resource(deployment)
elif (
isinstance(action, WorkPoolAction)
and action.work_pool_id is not None
):
# TODO: Find a better way to get a work pool by id
if dependency := await MigratableWorkPool.get_instance(
id=action.work_pool_id
):
self._dependencies[action.work_pool_id] = dependency
else:
work_pool = await client.read_work_pools(
work_pool_filter=WorkPoolFilter(
id=WorkPoolFilterId(any_=[action.work_pool_id])
)
)
if work_pool:
self._dependencies[
work_pool[0].id
] = await construct_migratable_resource(work_pool[0])
elif (
isinstance(action, WorkQueueAction)
and action.work_queue_id is not None
):
if dependency := await MigratableWorkQueue.get_instance(
id=action.work_queue_id
):
self._dependencies[action.work_queue_id] = dependency
else:
try:
work_queue = await client.read_work_queue(
action.work_queue_id
)
except ObjectNotFound:
logger.warning(
"Work queue %s referenced by automation %r no longer exists, skipping dependency",
action.work_queue_id,
self.source_automation.name,
)
else:
self._dependencies[
work_queue.id
] = await construct_migratable_resource(work_queue)
elif (
isinstance(action, AutomationAction)
and action.automation_id is not None
):
if dependency := await MigratableAutomation.get_instance(
id=action.automation_id
):
self._dependencies[action.automation_id] = dependency
else:
automation = await client.find_automation(action.automation_id)
if automation:
self._dependencies[
automation.id
] = await construct_migratable_resource(automation)
elif isinstance(action, CallWebhook):
if dependency := await MigratableBlockDocument.get_instance(
id=action.block_document_id
):
self._dependencies[action.block_document_id] = dependency
else:
try:
block_document = await client.read_block_document(
action.block_document_id
)
except ObjectNotFound:
logger.warning(
"Block document %s referenced by automation %r no longer exists, skipping dependency",
action.block_document_id,
self.source_automation.name,
)
else:
self._dependencies[
block_document.id
] = await construct_migratable_resource(block_document)
elif isinstance(action, SendNotification):
if dependency := await MigratableBlockDocument.get_instance(
id=action.block_document_id
):
self._dependencies[action.block_document_id] = dependency
else:
try:
block_document = await client.read_block_document(
action.block_document_id
)
except ObjectNotFound:
logger.warning(
"Block document %s referenced by automation %r no longer exists, skipping dependency",
action.block_document_id,
self.source_automation.name,
)
else:
self._dependencies[
block_document.id
] = await construct_migratable_resource(block_document)
return list(self._dependencies.values())
async def migrate(self) -> None:
async with get_client() as client:
automations = await client.read_automations_by_name(
name=self.source_automation.name
)
if automations:
self.destination_automation = automations[0]
raise TransferSkipped("Already exists")
else:
automation_copy = AutomationCore.model_validate(
self.source_automation.model_dump(mode="json")
)
for action in automation_copy.actions:
if (
isinstance(action, DeploymentAction)
and action.deployment_id is not None
):
action.deployment_id = self._dependencies[
action.deployment_id
].destination_id
elif (
isinstance(action, WorkPoolAction)
and action.work_pool_id is not None
):
action.work_pool_id = self._dependencies[
action.work_pool_id
].destination_id
elif (
isinstance(action, WorkQueueAction)
and action.work_queue_id is not None
):
action.work_queue_id = self._dependencies[
action.work_queue_id
].destination_id
elif (
isinstance(action, AutomationAction)
and action.automation_id is not None
):
action.automation_id = self._dependencies[
action.automation_id
].destination_id
elif isinstance(action, CallWebhook):
if destination_block_document_id := getattr(
self._dependencies.get(action.block_document_id),
"destination_id",
None,
):
action.block_document_id = destination_block_document_id
elif isinstance(action, SendNotification):
if destination_block_document_id := getattr(
self._dependencies.get(action.block_document_id),
"destination_id",
None,
):
action.block_document_id = destination_block_document_id
automation_id = await client.create_automation(
automation=automation_copy
)
self.destination_automation = await client.read_automation(
automation_id=automation_id
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/transfer/_migratable_resources/automations.py",
"license": "Apache License 2.0",
"lines": 242,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/cli/transfer/_migratable_resources/base.py | from __future__ import annotations
import abc
import uuid
from typing import Generic, Protocol, TypeVar, Union
from prefect.client.schemas.objects import (
BlockDocument,
BlockSchema,
BlockType,
Flow,
Variable,
WorkPool,
WorkQueue,
)
from prefect.client.schemas.responses import (
DeploymentResponse,
GlobalConcurrencyLimitResponse,
)
from prefect.events.schemas.automations import Automation
MigratableType = Union[
WorkPool,
WorkQueue,
DeploymentResponse,
Flow,
BlockType,
BlockSchema,
BlockDocument,
Automation,
GlobalConcurrencyLimitResponse,
Variable,
]
T = TypeVar("T", bound=MigratableType)
class MigratableProtocol(Protocol):
@property
def source_id(self) -> uuid.UUID: ...
@property
def destination_id(self) -> uuid.UUID | None: ...
async def get_dependencies(self) -> list["MigratableProtocol"]: ...
async def migrate(self) -> None: ...
class MigratableResource(Generic[T], abc.ABC):
@property
@abc.abstractmethod
def source_id(self) -> uuid.UUID: ...
@property
@abc.abstractmethod
def destination_id(self) -> uuid.UUID | None: ...
# Using this construct method because we may want to persist a serialized version of the object
# to disk and reload it later to avoid using too much memory.
@classmethod
@abc.abstractmethod
async def construct(cls, obj: T) -> "MigratableResource[T]": ...
@abc.abstractmethod
async def get_dependencies(self) -> "list[MigratableProtocol]": ...
@classmethod
@abc.abstractmethod
async def get_instance(cls, id: uuid.UUID) -> "MigratableResource[T] | None": ...
@abc.abstractmethod
async def migrate(self) -> None: ...
def __str__(self) -> str:
return f"{type(self).__name__}(source_id={self.source_id})"
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/transfer/_migratable_resources/base.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:src/prefect/cli/transfer/_migratable_resources/blocks.py | from __future__ import annotations
import uuid
from typing import Any, cast
from typing_extensions import Self
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources import construct_migratable_resource
from prefect.cli.transfer._migratable_resources.base import (
MigratableProtocol,
MigratableResource,
)
from prefect.client.orchestration import get_client
from prefect.client.schemas.actions import (
BlockDocumentCreate,
BlockSchemaCreate,
BlockTypeCreate,
)
from prefect.client.schemas.objects import (
BlockDocument,
BlockSchema,
BlockType,
)
from prefect.exceptions import (
ObjectAlreadyExists,
)
class MigratableBlockType(MigratableResource[BlockType]):
_instances: dict[uuid.UUID, Self] = {}
def __init__(self, block_type: BlockType):
self.source_block_type = block_type
self.destination_block_type: BlockType | None = None
@property
def source_id(self) -> uuid.UUID:
return self.source_block_type.id
@property
def destination_id(self) -> uuid.UUID | None:
return self.destination_block_type.id if self.destination_block_type else None
@classmethod
async def construct(cls, obj: BlockType) -> Self:
if obj.id in cls._instances:
return cls._instances[obj.id]
instance = cls(obj)
cls._instances[obj.id] = instance
return instance
@classmethod
async def get_instance(
cls, id: uuid.UUID
) -> "MigratableResource[BlockType] | None":
if id in cls._instances:
return cls._instances[id]
return None
async def get_dependencies(self) -> "list[MigratableProtocol]":
return []
async def migrate(self) -> None:
async with get_client() as client:
try:
block_type = await client.create_block_type(
block_type=BlockTypeCreate(
name=self.source_block_type.name,
slug=self.source_block_type.slug,
),
)
self.destination_block_type = block_type
except ObjectAlreadyExists:
self.destination_block_type = await client.read_block_type_by_slug(
self.source_block_type.slug
)
raise TransferSkipped("Already exists")
class MigratableBlockSchema(MigratableResource[BlockSchema]):
_instances: dict[uuid.UUID, Self] = {}
def __init__(self, block_schema: BlockSchema):
self.source_block_schema = block_schema
self.destination_block_schema: BlockSchema | None = None
self._dependencies: dict[uuid.UUID, MigratableProtocol] = {}
@property
def source_id(self) -> uuid.UUID:
return self.source_block_schema.id
@property
def destination_id(self) -> uuid.UUID | None:
return (
self.destination_block_schema.id if self.destination_block_schema else None
)
@classmethod
async def construct(cls, obj: BlockSchema) -> Self:
if obj.id in cls._instances:
return cls._instances[obj.id]
instance = cls(obj)
cls._instances[obj.id] = instance
return instance
@classmethod
async def get_instance(
cls, id: uuid.UUID
) -> "MigratableResource[BlockSchema] | None":
if id in cls._instances:
return cls._instances[id]
return None
async def get_dependencies(self) -> "list[MigratableProtocol]":
if self._dependencies:
return list(self._dependencies.values())
async with get_client() as client:
if self.source_block_schema.block_type is not None:
if dependency := await MigratableBlockType.get_instance(
id=self.source_block_schema.block_type.id
):
self._dependencies[self.source_block_schema.block_type.id] = (
dependency
)
else:
self._dependencies[
self.source_block_schema.block_type.id
] = await construct_migratable_resource(
self.source_block_schema.block_type
)
elif self.source_block_schema.block_type_id is not None:
if dependency := await MigratableBlockType.get_instance(
id=self.source_block_schema.block_type_id
):
self._dependencies[self.source_block_schema.block_type_id] = (
dependency
)
else:
response = await client.request(
"GET",
"/block_types/{id}",
params={"id": self.source_block_schema.block_type_id},
)
block_type = BlockType.model_validate(response.json())
self._dependencies[
block_type.id
] = await construct_migratable_resource(block_type)
else:
raise ValueError("Block schema has no associated block type")
block_schema_references: dict[str, dict[str, Any]] = (
self.source_block_schema.fields.get("block_schema_references", {})
)
for block_schema_reference in block_schema_references.values():
if isinstance(block_schema_reference, list):
for nested_block_schema_reference in block_schema_reference:
if block_schema_checksum := cast(
dict[str, str], nested_block_schema_reference
).get("block_schema_checksum"):
block_schema = await client.read_block_schema_by_checksum(
block_schema_checksum
)
if dependency := await MigratableBlockSchema.get_instance(
id=block_schema.id
):
self._dependencies[block_schema.id] = dependency
else:
self._dependencies[
block_schema.id
] = await construct_migratable_resource(block_schema)
else:
if block_schema_checksum := block_schema_reference.get(
"block_schema_checksum"
):
block_schema = await client.read_block_schema_by_checksum(
block_schema_checksum
)
if dependency := await MigratableBlockSchema.get_instance(
id=block_schema.id
):
self._dependencies[block_schema.id] = dependency
else:
self._dependencies[
block_schema.id
] = await construct_migratable_resource(block_schema)
return list(self._dependencies.values())
async def migrate(self) -> None:
if self.source_block_schema.block_type_id is None:
raise ValueError("Block schema has no associated block type")
if (
destination_block_type := self._dependencies.get(
self.source_block_schema.block_type_id
)
) is None:
raise ValueError("Unable to find destination block type")
async with get_client() as client:
try:
self.destination_block_schema = await client.create_block_schema(
block_schema=BlockSchemaCreate(
fields=self.source_block_schema.fields,
block_type_id=destination_block_type.destination_id,
capabilities=self.source_block_schema.capabilities,
version=self.source_block_schema.version,
),
)
except ObjectAlreadyExists:
self.destination_block_schema = (
await client.read_block_schema_by_checksum(
self.source_block_schema.checksum
)
)
raise TransferSkipped("Already exists")
class MigratableBlockDocument(MigratableResource[BlockDocument]):
_instances: dict[uuid.UUID, Self] = {}
def __init__(self, block_document: BlockDocument):
self.source_block_document = block_document
self.destination_block_document: BlockDocument | None = None
self._dependencies: dict[uuid.UUID, MigratableProtocol] = {}
@property
def source_id(self) -> uuid.UUID:
return self.source_block_document.id
@property
def destination_id(self) -> uuid.UUID | None:
return (
self.destination_block_document.id
if self.destination_block_document
else None
)
@classmethod
async def construct(cls, obj: BlockDocument) -> Self:
if obj.id in cls._instances:
return cls._instances[obj.id]
instance = cls(obj)
cls._instances[obj.id] = instance
return instance
@classmethod
async def get_instance(
cls, id: uuid.UUID
) -> "MigratableResource[BlockDocument] | None":
if id in cls._instances:
return cls._instances[id]
return None
async def get_dependencies(self) -> "list[MigratableProtocol]":
if self._dependencies:
return list(self._dependencies.values())
# TODO: When we write serialized versions of the objects to disk, we should have a way to
# use a client, but read from disk if the object has already been fetched.
async with get_client() as client:
if self.source_block_document.block_type is not None:
if dependency := await MigratableBlockType.get_instance(
id=self.source_block_document.block_type.id
):
self._dependencies[self.source_block_document.block_type.id] = (
dependency
)
else:
self._dependencies[
self.source_block_document.block_type.id
] = await construct_migratable_resource(
self.source_block_document.block_type
)
else:
if dependency := await MigratableBlockType.get_instance(
id=self.source_block_document.block_type_id
):
self._dependencies[self.source_block_document.block_type_id] = (
dependency
)
else:
response = await client.request(
"GET",
"/block_types/{id}",
params={"id": self.source_block_document.block_type_id},
)
block_type = BlockType.model_validate(response.json())
self._dependencies[
block_type.id
] = await construct_migratable_resource(block_type)
if self.source_block_document.block_schema is not None:
if dependency := await MigratableBlockSchema.get_instance(
id=self.source_block_document.block_schema.id
):
self._dependencies[self.source_block_document.block_schema.id] = (
dependency
)
else:
self._dependencies[
self.source_block_document.block_schema.id
] = await construct_migratable_resource(
self.source_block_document.block_schema
)
else:
if dependency := await MigratableBlockSchema.get_instance(
id=self.source_block_document.block_schema_id
):
self._dependencies[self.source_block_document.block_schema_id] = (
dependency
)
else:
response = await client.request(
"GET",
"/block_schemas/{id}",
params={"id": self.source_block_document.block_schema_id},
)
block_schema = BlockSchema.model_validate(response.json())
self._dependencies[
block_schema.id
] = await construct_migratable_resource(block_schema)
if self.source_block_document.block_document_references:
for (
block_document_reference
) in self.source_block_document.block_document_references.values():
if block_document_id := block_document_reference.get(
"block_document_id"
):
if dependency := await MigratableBlockDocument.get_instance(
id=block_document_id
):
self._dependencies[block_document_id] = dependency
else:
block_document = await client.read_block_document(
block_document_id
)
self._dependencies[
block_document.id
] = await construct_migratable_resource(block_document)
return list(self._dependencies.values())
async def migrate(self) -> None:
if (
destination_block_type := self._dependencies.get(
self.source_block_document.block_type_id
)
) is None or not destination_block_type.destination_id:
raise ValueError("Unable to find destination block type")
if (
destination_block_schema := self._dependencies.get(
self.source_block_document.block_schema_id
)
) is None or not destination_block_schema.destination_id:
raise ValueError("Unable to find destination block schema")
async with get_client() as client:
try:
# TODO: Check if data needs to be written differently to maintain composition
self.destination_block_document = await client.create_block_document(
block_document=BlockDocumentCreate(
name=self.source_block_document.name,
block_type_id=destination_block_type.destination_id,
block_schema_id=destination_block_schema.destination_id,
data=self.source_block_document.data,
),
)
except ObjectAlreadyExists:
if self.source_block_document.name is None:
# This is technically impossible, but our typing thinks it's possible
raise ValueError(
"Block document has no name, which should be impossible. "
"Please report this as a bug."
)
if self.source_block_document.block_type is not None:
block_type_slug = self.source_block_document.block_type.slug
else:
# TODO: Add real client methods for places where we use `client.request`
response = await client.request(
"GET",
"/block_types/{id}",
params={"id": self.source_block_document.block_type_id},
)
block_type = BlockType.model_validate(response.json())
block_type_slug = block_type.slug
self.destination_block_document = (
await client.read_block_document_by_name(
block_type_slug=block_type_slug,
name=self.source_block_document.name,
)
)
raise TransferSkipped("Already exists")
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/transfer/_migratable_resources/blocks.py",
"license": "Apache License 2.0",
"lines": 356,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/cli/transfer/_migratable_resources/concurrency_limits.py | from __future__ import annotations
import uuid
from typing_extensions import Self
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources.base import (
MigratableProtocol,
MigratableResource,
)
from prefect.client.orchestration import get_client
from prefect.client.schemas.actions import (
GlobalConcurrencyLimitCreate,
)
from prefect.client.schemas.responses import (
GlobalConcurrencyLimitResponse,
)
from prefect.exceptions import (
ObjectAlreadyExists,
)
class MigratableGlobalConcurrencyLimit(
MigratableResource[GlobalConcurrencyLimitResponse]
):
_instances: dict[uuid.UUID, Self] = {}
def __init__(self, global_concurrency_limit: GlobalConcurrencyLimitResponse):
self.source_global_concurrency_limit = global_concurrency_limit
self.destination_global_concurrency_limit: (
GlobalConcurrencyLimitResponse | None
) = None
@property
def source_id(self) -> uuid.UUID:
return self.source_global_concurrency_limit.id
@property
def destination_id(self) -> uuid.UUID | None:
return (
self.destination_global_concurrency_limit.id
if self.destination_global_concurrency_limit
else None
)
@classmethod
async def construct(cls, obj: GlobalConcurrencyLimitResponse) -> Self:
if obj.id in cls._instances:
return cls._instances[obj.id]
instance = cls(obj)
cls._instances[obj.id] = instance
return instance
@classmethod
async def get_instance(
cls, id: uuid.UUID
) -> "MigratableResource[GlobalConcurrencyLimitResponse] | None":
if id in cls._instances:
return cls._instances[id]
return None
async def get_dependencies(self) -> "list[MigratableProtocol]":
return []
async def migrate(self) -> None:
async with get_client() as client:
try:
await client.create_global_concurrency_limit(
concurrency_limit=GlobalConcurrencyLimitCreate(
name=self.source_global_concurrency_limit.name,
limit=self.source_global_concurrency_limit.limit,
active=self.source_global_concurrency_limit.active,
active_slots=self.source_global_concurrency_limit.active_slots,
),
)
self.destination_global_concurrency_limit = (
await client.read_global_concurrency_limit_by_name(
self.source_global_concurrency_limit.name
)
)
except ObjectAlreadyExists:
self.destination_global_concurrency_limit = (
await client.read_global_concurrency_limit_by_name(
self.source_global_concurrency_limit.name
)
)
raise TransferSkipped("Already exists")
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/transfer/_migratable_resources/concurrency_limits.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:src/prefect/cli/transfer/_migratable_resources/deployments.py | from __future__ import annotations
import uuid
from typing_extensions import Self
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources import construct_migratable_resource
from prefect.cli.transfer._migratable_resources.base import (
MigratableProtocol,
MigratableResource,
)
from prefect.cli.transfer._migratable_resources.blocks import MigratableBlockDocument
from prefect.cli.transfer._migratable_resources.flows import MigratableFlow
from prefect.cli.transfer._migratable_resources.work_pools import MigratableWorkPool
from prefect.cli.transfer._migratable_resources.work_queues import MigratableWorkQueue
from prefect.client.orchestration import get_client
from prefect.client.schemas.actions import DeploymentScheduleCreate
from prefect.client.schemas.responses import DeploymentResponse
from prefect.exceptions import (
ObjectAlreadyExists,
ObjectLimitReached,
)
class MigratableDeployment(MigratableResource[DeploymentResponse]):
_instances: dict[uuid.UUID, Self] = {}
def __init__(self, deployment: DeploymentResponse):
self.source_deployment = deployment
self.destination_deployment: DeploymentResponse | None = None
self._dependencies: dict[uuid.UUID, MigratableProtocol] = {}
@property
def source_id(self) -> uuid.UUID:
return self.source_deployment.id
@property
def destination_id(self) -> uuid.UUID | None:
return self.destination_deployment.id if self.destination_deployment else None
@classmethod
async def construct(cls, obj: DeploymentResponse) -> Self:
if obj.id in cls._instances:
return cls._instances[obj.id]
instance = cls(obj)
cls._instances[obj.id] = instance
return instance
@classmethod
async def get_instance(
cls, id: uuid.UUID
) -> "MigratableResource[DeploymentResponse] | None":
if id in cls._instances:
return cls._instances[id]
return None
async def get_dependencies(self) -> "list[MigratableProtocol]":
if self._dependencies:
return list(self._dependencies.values())
async with get_client() as client:
if dependency := await MigratableFlow.get_instance(
id=self.source_deployment.flow_id
):
self._dependencies[self.source_deployment.flow_id] = dependency
else:
flow = await client.read_flow(self.source_deployment.flow_id)
self._dependencies[
self.source_deployment.flow_id
] = await construct_migratable_resource(flow)
if self.source_deployment.work_queue_id is not None:
if dependency := await MigratableWorkQueue.get_instance(
id=self.source_deployment.work_queue_id
):
self._dependencies[self.source_deployment.work_queue_id] = (
dependency
)
else:
work_queue = await client.read_work_queue(
self.source_deployment.work_queue_id
)
self._dependencies[
work_queue.id
] = await construct_migratable_resource(work_queue)
if self.source_deployment.work_pool_name is not None:
if dependency := await MigratableWorkPool.get_instance_by_name(
name=self.source_deployment.work_pool_name
):
self._dependencies[dependency.source_id] = dependency
else:
work_pool = await client.read_work_pool(
self.source_deployment.work_pool_name
)
self._dependencies[
work_pool.id
] = await construct_migratable_resource(work_pool)
if self.source_deployment.storage_document_id is not None:
if dependency := await MigratableBlockDocument.get_instance(
id=self.source_deployment.storage_document_id
):
self._dependencies[self.source_deployment.storage_document_id] = (
dependency
)
else:
storage_document = await client.read_block_document(
self.source_deployment.storage_document_id
)
self._dependencies[
storage_document.id
] = await construct_migratable_resource(storage_document)
if self.source_deployment.infrastructure_document_id is not None:
if dependency := await MigratableBlockDocument.get_instance(
id=self.source_deployment.infrastructure_document_id
):
self._dependencies[
self.source_deployment.infrastructure_document_id
] = dependency
else:
infrastructure_document = await client.read_block_document(
self.source_deployment.infrastructure_document_id
)
self._dependencies[
infrastructure_document.id
] = await construct_migratable_resource(infrastructure_document)
if self.source_deployment.pull_steps:
# TODO: Figure out how to find block document references in pull steps
pass
return list(self._dependencies.values())
async def migrate(self) -> None:
async with get_client() as client:
try:
if (
destination_flow_id := getattr(
self._dependencies.get(self.source_deployment.flow_id),
"destination_id",
None,
)
) is None:
raise ValueError("Unable to find destination flow")
if (
self.source_deployment.storage_document_id
and (
destination_storage_document_id := getattr(
self._dependencies.get(
self.source_deployment.storage_document_id
),
"destination_id",
None,
)
)
is None
):
raise ValueError("Unable to find destination storage document")
else:
destination_storage_document_id = None
if (
self.source_deployment.infrastructure_document_id
and (
destination_infrastructure_document_id := getattr(
self._dependencies.get(
self.source_deployment.infrastructure_document_id
),
"destination_id",
None,
)
)
is None
):
raise ValueError(
"Unable to find destination infrastructure document"
)
else:
destination_infrastructure_document_id = None
destination_deployment_id = await client.create_deployment(
flow_id=destination_flow_id,
name=self.source_deployment.name,
version=self.source_deployment.version,
version_info=self.source_deployment.version_info,
schedules=[
DeploymentScheduleCreate(
schedule=schedule.schedule,
active=schedule.active,
max_scheduled_runs=schedule.max_scheduled_runs,
parameters=schedule.parameters,
slug=schedule.slug,
)
for schedule in self.source_deployment.schedules
],
concurrency_limit=self.source_deployment.concurrency_limit,
concurrency_options=self.source_deployment.concurrency_options,
parameters=self.source_deployment.parameters,
description=self.source_deployment.description,
work_queue_name=self.source_deployment.work_queue_name,
work_pool_name=self.source_deployment.work_pool_name,
tags=self.source_deployment.tags,
storage_document_id=destination_storage_document_id,
path=self.source_deployment.path,
entrypoint=self.source_deployment.entrypoint,
infrastructure_document_id=destination_infrastructure_document_id,
parameter_openapi_schema=self.source_deployment.parameter_openapi_schema,
paused=self.source_deployment.paused,
pull_steps=self.source_deployment.pull_steps,
enforce_parameter_schema=self.source_deployment.enforce_parameter_schema,
job_variables=self.source_deployment.job_variables,
branch=self.source_deployment.branch,
base=self.source_deployment.base,
root=self.source_deployment.root,
)
self.destination_deployment = await client.read_deployment(
destination_deployment_id
)
except ObjectLimitReached:
raise TransferSkipped("Deployment limit reached (upgrade tier)")
except ObjectAlreadyExists:
self.destination_deployment = await client.read_deployment(
self.source_deployment.id
)
raise TransferSkipped("Already exists")
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/transfer/_migratable_resources/deployments.py",
"license": "Apache License 2.0",
"lines": 207,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/cli/transfer/_migratable_resources/flows.py | from __future__ import annotations
import uuid
from typing_extensions import Self
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources.base import (
MigratableProtocol,
MigratableResource,
)
from prefect.client.orchestration import get_client
from prefect.client.schemas.actions import FlowCreate
from prefect.client.schemas.filters import FlowFilter, FlowFilterName
from prefect.client.schemas.objects import Flow
from prefect.exceptions import (
ObjectAlreadyExists,
)
class MigratableFlow(MigratableResource[Flow]):
_instances: dict[uuid.UUID, Self] = {}
def __init__(self, flow: Flow):
self.source_flow = flow
self.destination_flow: Flow | None = None
@property
def source_id(self) -> uuid.UUID:
return self.source_flow.id
@property
def destination_id(self) -> uuid.UUID | None:
return self.destination_flow.id if self.destination_flow else None
@classmethod
async def construct(cls, obj: Flow) -> Self:
if obj.id in cls._instances:
return cls._instances[obj.id]
instance = cls(obj)
cls._instances[obj.id] = instance
return instance
@classmethod
async def get_instance(cls, id: uuid.UUID) -> "MigratableResource[Flow] | None":
if id in cls._instances:
return cls._instances[id]
return None
async def get_dependencies(self) -> "list[MigratableProtocol]":
return []
async def migrate(self) -> None:
async with get_client() as client:
try:
flow_data = FlowCreate(
name=self.source_flow.name,
tags=self.source_flow.tags,
labels=self.source_flow.labels,
)
# We don't have a pre-built client method that accepts tags and labels
response = await client.request(
"POST", "/flows/", json=flow_data.model_dump(mode="json")
)
self.destination_flow = Flow.model_validate(response.json())
except ObjectAlreadyExists:
# Flow already exists, read it by name
flows = await client.read_flows(
flow_filter=FlowFilter(
name=FlowFilterName(any_=[self.source_flow.name])
)
)
if flows and len(flows) == 1:
self.destination_flow = flows[0]
else:
raise RuntimeError("Unable to find destination flow")
raise TransferSkipped("Already exists")
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/transfer/_migratable_resources/flows.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:src/prefect/cli/transfer/_migratable_resources/variables.py | from __future__ import annotations
import uuid
from typing_extensions import Self
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources.base import (
MigratableProtocol,
MigratableResource,
)
from prefect.client.orchestration import get_client
from prefect.client.schemas.actions import (
VariableCreate,
)
from prefect.client.schemas.objects import (
Variable,
)
from prefect.exceptions import (
ObjectAlreadyExists,
)
class MigratableVariable(MigratableResource[Variable]):
_instances: dict[uuid.UUID, Self] = {}
def __init__(self, variable: Variable):
self.source_variable = variable
self.destination_variable: Variable | None = None
@property
def source_id(self) -> uuid.UUID:
return self.source_variable.id
@property
def destination_id(self) -> uuid.UUID | None:
return self.destination_variable.id if self.destination_variable else None
@classmethod
async def construct(cls, obj: Variable) -> Self:
if obj.id in cls._instances:
return cls._instances[obj.id]
instance = cls(obj)
cls._instances[obj.id] = instance
return instance
@classmethod
async def get_instance(cls, id: uuid.UUID) -> "MigratableResource[Variable] | None":
if id in cls._instances:
return cls._instances[id]
return None
async def get_dependencies(self) -> "list[MigratableProtocol]":
return []
async def migrate(self) -> None:
async with get_client() as client:
try:
self.destination_variable = await client.create_variable(
variable=VariableCreate(
name=self.source_variable.name,
value=self.source_variable.value,
tags=self.source_variable.tags,
),
)
except ObjectAlreadyExists:
self.destination_variable = await client.read_variable_by_name(
self.source_variable.name
)
raise TransferSkipped("Already exists")
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/transfer/_migratable_resources/variables.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:src/prefect/cli/transfer/_migratable_resources/work_pools.py | from __future__ import annotations
import uuid
from typing_extensions import Self
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources import construct_migratable_resource
from prefect.cli.transfer._migratable_resources.base import (
MigratableProtocol,
MigratableResource,
)
from prefect.cli.transfer._migratable_resources.blocks import MigratableBlockDocument
from prefect.client.orchestration import get_client
from prefect.client.schemas.actions import (
WorkPoolCreate,
)
from prefect.client.schemas.objects import (
WorkPool,
WorkQueue,
)
from prefect.exceptions import (
ObjectAlreadyExists,
ObjectUnsupported,
)
class MigratableWorkPool(MigratableResource[WorkPool]):
_instances: dict[uuid.UUID, Self] = {}
def __init__(self, work_pool: WorkPool, default_queue: WorkQueue):
self.source_work_pool = work_pool
self.source_default_queue = default_queue
self.destination_work_pool: WorkPool | None = None
self._dependencies: list[MigratableProtocol] = []
@property
def source_id(self) -> uuid.UUID:
return self.source_work_pool.id
@property
def destination_id(self) -> uuid.UUID | None:
return self.destination_work_pool.id if self.destination_work_pool else None
@classmethod
async def construct(cls, obj: WorkPool) -> Self:
if obj.id in cls._instances:
return cls._instances[obj.id]
async with get_client() as client:
default_queue = await client.read_work_queue(obj.default_queue_id)
instance = cls(obj, default_queue)
cls._instances[obj.id] = instance
return instance
@classmethod
async def get_instance(cls, id: uuid.UUID) -> "MigratableResource[WorkPool] | None":
if id in cls._instances:
return cls._instances[id]
return None
@classmethod
async def get_instance_by_name(
cls, name: str
) -> "MigratableResource[WorkPool] | None":
for instance in cls._instances.values():
if instance.source_work_pool.name == name:
return instance
return None
async def get_dependencies(self) -> "list[MigratableProtocol]":
if self._dependencies:
return self._dependencies
async with get_client() as client:
if (
self.source_work_pool.storage_configuration.default_result_storage_block_id
is not None
):
if dependency := await MigratableBlockDocument.get_instance(
id=self.source_work_pool.storage_configuration.default_result_storage_block_id
):
self._dependencies.append(dependency)
else:
result_storage_block = await client.read_block_document(
self.source_work_pool.storage_configuration.default_result_storage_block_id
)
self._dependencies.append(
await construct_migratable_resource(result_storage_block)
)
if (
self.source_work_pool.storage_configuration.bundle_upload_step
is not None
):
# TODO: Figure out how to find block document references in bundle upload step
pass
if (
self.source_work_pool.storage_configuration.bundle_execution_step
is not None
):
# TODO: Figure out how to find block document references in bundle download step
pass
return self._dependencies
async def migrate(self) -> None:
async with get_client() as client:
# Skip managed pools always - they're cloud-specific infrastructure
if self.source_work_pool.is_managed_pool:
raise TransferSkipped("Skipped managed pool (cloud-specific)")
# Allow push pools only if destination is Cloud
if self.source_work_pool.is_push_pool:
from prefect.client.base import ServerType
if client.server_type != ServerType.CLOUD:
raise TransferSkipped("Skipped push pool (requires Prefect Cloud)")
try:
self.destination_work_pool = await client.create_work_pool(
work_pool=WorkPoolCreate(
name=self.source_work_pool.name,
type=self.source_work_pool.type,
base_job_template=self.source_work_pool.base_job_template,
is_paused=self.source_work_pool.is_paused,
concurrency_limit=self.source_work_pool.concurrency_limit,
storage_configuration=self.source_work_pool.storage_configuration,
),
)
except ObjectUnsupported:
raise TransferSkipped("Destination requires Standard/Pro tier")
except ObjectAlreadyExists:
self.destination_work_pool = await client.read_work_pool(
self.source_work_pool.name
)
raise TransferSkipped("Already exists")
# Update the default queue after successful creation
await client.update_work_queue(
id=self.destination_work_pool.default_queue_id,
description=self.source_default_queue.description,
priority=self.source_default_queue.priority,
concurrency_limit=self.source_default_queue.concurrency_limit,
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/transfer/_migratable_resources/work_pools.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/cli/transfer/_migratable_resources/work_queues.py | from __future__ import annotations
import uuid
from typing_extensions import Self
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources import construct_migratable_resource
from prefect.cli.transfer._migratable_resources.base import (
MigratableProtocol,
MigratableResource,
)
from prefect.cli.transfer._migratable_resources.work_pools import MigratableWorkPool
from prefect.client.orchestration import get_client
from prefect.client.schemas.filters import WorkQueueFilter, WorkQueueFilterName
from prefect.client.schemas.objects import (
WorkQueue,
)
from prefect.exceptions import (
ObjectAlreadyExists,
)
class MigratableWorkQueue(MigratableResource[WorkQueue]):
_instances: dict[uuid.UUID, Self] = {}
def __init__(self, work_queue: WorkQueue):
self.source_work_queue = work_queue
self.destination_work_queue: WorkQueue | None = None
self._dependencies: list[MigratableProtocol] = []
@property
def source_id(self) -> uuid.UUID:
return self.source_work_queue.id
@property
def destination_id(self) -> uuid.UUID | None:
return self.destination_work_queue.id if self.destination_work_queue else None
@classmethod
async def construct(cls, obj: WorkQueue) -> Self:
if obj.id in cls._instances:
return cls._instances[obj.id]
instance = cls(obj)
cls._instances[obj.id] = instance
return instance
@classmethod
async def get_instance(
cls, id: uuid.UUID
) -> "MigratableResource[WorkQueue] | None":
if id in cls._instances:
return cls._instances[id]
return None
async def get_dependencies(self) -> "list[MigratableProtocol]":
if self._dependencies:
return self._dependencies
async with get_client() as client:
if self.source_work_queue.work_pool_name is not None:
if dependency := await MigratableWorkPool.get_instance_by_name(
name=self.source_work_queue.work_pool_name
):
# Always include the pool as a dependency
# If it's push/managed, it will be skipped and this queue will be skipped too
self._dependencies.append(dependency)
else:
work_pool = await client.read_work_pool(
self.source_work_queue.work_pool_name
)
self._dependencies.append(
await construct_migratable_resource(work_pool)
)
return self._dependencies
async def migrate(self) -> None:
async with get_client() as client:
# Skip default work queues as they are created when work pools are transferred
if self.source_work_queue.name == "default":
work_queues = await client.read_work_queues(
work_pool_name=self.source_work_queue.work_pool_name,
work_queue_filter=WorkQueueFilter(
name=WorkQueueFilterName(any_=["default"]),
),
)
raise TransferSkipped("Default work queues are created with work pools")
try:
self.destination_work_queue = await client.create_work_queue(
name=self.source_work_queue.name,
description=self.source_work_queue.description,
priority=self.source_work_queue.priority,
concurrency_limit=self.source_work_queue.concurrency_limit,
work_pool_name=self.source_work_queue.work_pool_name,
)
except ObjectAlreadyExists:
# Work queue already exists, read it by work pool and name
work_queues = await client.read_work_queues(
work_pool_name=self.source_work_queue.work_pool_name,
work_queue_filter=WorkQueueFilter(
name=WorkQueueFilterName(any_=[self.source_work_queue.name]),
),
)
if work_queues:
self.destination_work_queue = work_queues[0]
raise TransferSkipped("Already exists")
else:
raise RuntimeError(
"Transfer failed due to conflict, but no existing queue found."
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/cli/transfer/_migratable_resources/work_queues.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:tests/cli/test_transfer.py | """Tests for the prefect transfer CLI command."""
import uuid
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from prefect.cli._transfer_utils import (
find_root_resources as _find_root_resources,
)
from prefect.cli._transfer_utils import (
get_resource_display_name as _get_resource_display_name,
)
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources.base import MigratableProtocol
from prefect.settings import Profile, ProfilesCollection
from prefect.testing.cli import invoke_and_assert
from prefect.utilities.asyncutils import run_sync_in_worker_thread
_PATCH_LOAD_PROFILES = "prefect.settings.load_profiles"
_PATCH_USE_PROFILE = "prefect.context.use_profile"
_PATCH_GET_CLIENT = "prefect.client.orchestration.get_client"
class MockMigratableResource:
"""Mock migratable resource for CLI testing."""
def __init__(
self,
resource_id: uuid.UUID,
name: str,
resource_type: str = "test",
migrate_success: bool = True,
skip: bool = False,
):
self.source_id = resource_id
self.destination_id = None
self.name = name
self.resource_type = resource_type
self._migrate_success = migrate_success
self._skip = skip
# Add source attributes for display name mapping
if resource_type == "work_pool":
self.source_work_pool = MagicMock()
self.source_work_pool.name = name
elif resource_type == "work_queue":
self.source_work_queue = MagicMock()
self.source_work_queue.name = name
elif resource_type == "deployment":
self.source_deployment = MagicMock()
self.source_deployment.name = name
elif resource_type == "flow":
self.source_flow = MagicMock()
self.source_flow.name = name
elif resource_type == "block_document":
self.source_block_document = MagicMock()
self.source_block_document.name = name
elif resource_type == "block_type":
self.source_block_type = MagicMock()
self.source_block_type.slug = name
elif resource_type == "block_schema":
self.source_block_schema = MagicMock()
self.source_block_schema.id = resource_id
elif resource_type == "variable":
self.source_variable = MagicMock()
self.source_variable.name = name
elif resource_type == "automation":
self.source_automation = MagicMock()
self.source_automation.name = name
elif resource_type == "concurrency_limit":
self.source_global_concurrency_limit = MagicMock()
self.source_global_concurrency_limit.name = name
async def migrate(self) -> None:
"""Mock migrate method."""
if self._skip:
raise TransferSkipped(f"Skipped {self.name}")
elif not self._migrate_success:
raise ValueError(f"Migration failed for {self.name}")
async def get_dependencies(self) -> list[MigratableProtocol]:
"""Mock get_dependencies method."""
return []
def __str__(self) -> str:
return f"Mock{self.resource_type.title()}({self.name})"
@pytest.fixture
def mock_profiles():
"""Mock profile collection with test profiles."""
source_profile = Profile(name="source", settings={})
target_profile = Profile(name="target", settings={})
return ProfilesCollection([source_profile, target_profile], active="source")
@pytest.fixture
def mock_resources():
"""Mock migratable resources of various types."""
return [
MockMigratableResource(uuid.uuid4(), "test-pool", "work_pool"),
MockMigratableResource(uuid.uuid4(), "test-queue", "work_queue"),
MockMigratableResource(uuid.uuid4(), "test-deployment", "deployment"),
MockMigratableResource(uuid.uuid4(), "test-flow", "flow"),
MockMigratableResource(uuid.uuid4(), "test-block", "block_document"),
MockMigratableResource(uuid.uuid4(), "test-var", "variable"),
]
@pytest.fixture
def mock_client():
"""Mock PrefectClient with resource collections."""
client = AsyncMock()
# Mock empty collections by default - these need to be AsyncMock to be awaitable
client.read_work_pools = AsyncMock(return_value=[])
client.read_work_queues = AsyncMock(return_value=[])
client.read_deployments = AsyncMock(return_value=[])
client.read_block_documents = AsyncMock(return_value=[])
client.read_variables = AsyncMock(return_value=[])
client.read_global_concurrency_limits = AsyncMock(return_value=[])
client.read_automations = AsyncMock(return_value=[])
return client
@pytest.fixture
def mock_dag():
"""Mock TransferDAG for execution testing."""
dag = MagicMock()
dag.nodes = {}
dag.get_statistics.return_value = {
"total_nodes": 0,
"total_edges": 0,
"has_cycles": False,
}
dag.execute_concurrent = AsyncMock(return_value={})
return dag
class TestTransferArguments:
"""Test command line argument validation."""
@patch(_PATCH_LOAD_PROFILES)
def test_transfer_source_profile_not_found(self, mock_load_profiles: MagicMock):
"""Test transfer command fails when source profile doesn't exist."""
mock_load_profiles.return_value = ProfilesCollection([])
invoke_and_assert(
command=["transfer", "--from", "nonexistent", "--to", "target"],
expected_code=1,
expected_output_contains="Source profile 'nonexistent' not found",
)
@patch(_PATCH_LOAD_PROFILES)
def test_transfer_target_profile_not_found(
self, mock_load_profiles: MagicMock, mock_profiles: ProfilesCollection
):
"""Test transfer command fails when target profile doesn't exist."""
# Only include source profile
source_profile = Profile(name="source", settings={})
mock_load_profiles.return_value = ProfilesCollection([source_profile])
invoke_and_assert(
command=["transfer", "--from", "source", "--to", "nonexistent"],
expected_code=1,
expected_output_contains="Target profile 'nonexistent' not found",
)
@patch(_PATCH_LOAD_PROFILES)
def test_transfer_same_source_and_target_profiles(
self, mock_load_profiles: MagicMock
):
"""Test transfer command fails when source and target are the same."""
profile = Profile(name="same", settings={})
mock_load_profiles.return_value = ProfilesCollection([profile])
invoke_and_assert(
command=["transfer", "--from", "same", "--to", "same"],
expected_code=1,
expected_output_contains="Source and target profiles must be different",
)
class TestResourceCollection:
"""Test resource collection from source profile."""
@patch(_PATCH_LOAD_PROFILES)
@patch(_PATCH_USE_PROFILE)
@patch(_PATCH_GET_CLIENT)
async def test_transfer_no_resources_found(
self,
mock_get_client: MagicMock,
mock_use_profile: MagicMock,
mock_load_profiles: MagicMock,
mock_profiles: ProfilesCollection,
):
"""Test transfer when no resources are found."""
mock_load_profiles.return_value = mock_profiles
# Mock context managers
mock_use_profile.return_value.__enter__.return_value = None
mock_use_profile.return_value.__exit__.return_value = None
# Mock client with empty resource collections
mock_client = AsyncMock()
mock_client.read_work_pools = AsyncMock(return_value=[])
mock_client.read_work_queues = AsyncMock(return_value=[])
mock_client.read_deployments = AsyncMock(return_value=[])
mock_client.read_block_documents = AsyncMock(return_value=[])
mock_client.read_variables = AsyncMock(return_value=[])
mock_client.read_global_concurrency_limits = AsyncMock(return_value=[])
mock_client.read_automations = AsyncMock(return_value=[])
mock_get_client.return_value.__aenter__.return_value = mock_client
mock_get_client.return_value.__aexit__.return_value = None
await run_sync_in_worker_thread(
invoke_and_assert,
command=["transfer", "--from", "source", "--to", "target"],
expected_code=0,
expected_output_contains="No resources found to transfer",
)
class TestIntegrationScenarios:
"""Integration-style tests for common scenarios."""
@patch(_PATCH_LOAD_PROFILES)
def test_transfer_basic_help_and_validation(
self, mock_load_profiles: MagicMock, mock_profiles: ProfilesCollection
):
"""Test that basic command structure and validation works."""
# Test that the command is registered
invoke_and_assert(
command=["transfer", "--help"],
expected_code=0,
expected_output_contains=[
"Transfer resources from one Prefect profile to another",
],
)
class TestHelperFunctions:
"""Test helper functions used by the CLI."""
def test_get_resource_display_name_work_pool(self):
"""Test display name generation for work pool."""
resource = MockMigratableResource(uuid.uuid4(), "test-pool", "work_pool")
display_name = _get_resource_display_name(resource)
assert display_name == "work-pool/test-pool"
def test_get_resource_display_name_work_queue(self):
"""Test display name generation for work queue."""
resource = MockMigratableResource(uuid.uuid4(), "test-queue", "work_queue")
display_name = _get_resource_display_name(resource)
assert display_name == "work-queue/test-queue"
def test_get_resource_display_name_deployment(self):
"""Test display name generation for deployment."""
resource = MockMigratableResource(uuid.uuid4(), "test-deploy", "deployment")
display_name = _get_resource_display_name(resource)
assert display_name == "deployment/test-deploy"
def test_get_resource_display_name_block_type(self):
"""Test display name generation for block type."""
resource = MockMigratableResource(uuid.uuid4(), "test-block-type", "block_type")
display_name = _get_resource_display_name(resource)
assert display_name == "block-type/test-block-type"
def test_get_resource_display_name_block_schema(self):
"""Test display name generation for block schema."""
id = uuid.uuid4()
resource = MockMigratableResource(id, "test-block-schema", "block_schema")
display_name = _get_resource_display_name(resource)
assert display_name == f"block-schema/{str(id)[:8]}"
def test_get_resource_display_name_variable(self):
"""Test display name generation for variable."""
resource = MockMigratableResource(uuid.uuid4(), "test-var", "variable")
display_name = _get_resource_display_name(resource)
assert display_name == "variable/test-var"
def test_get_resource_display_name_automation(self):
"""Test display name generation for automation."""
resource = MockMigratableResource(uuid.uuid4(), "test-automation", "automation")
display_name = _get_resource_display_name(resource)
assert display_name == "automation/test-automation"
def test_get_resource_display_name_concurrency_limit(self):
"""Test display name generation for concurrency limit."""
resource = MockMigratableResource(
uuid.uuid4(), "test-concurrency-limit", "concurrency_limit"
)
display_name = _get_resource_display_name(resource)
assert display_name == "concurrency-limit/test-concurrency-limit"
def test_get_resource_display_name_block_document(self):
"""Test display name generation for block document."""
resource = MockMigratableResource(
uuid.uuid4(), "test-block-document", "block_document"
)
display_name = _get_resource_display_name(resource)
assert display_name == "block-document/test-block-document"
def test_get_resource_display_name_flow(self):
"""Test display name generation for flow."""
resource = MockMigratableResource(uuid.uuid4(), "test-flow", "flow")
display_name = _get_resource_display_name(resource)
assert display_name == "flow/test-flow"
def test_get_resource_display_name_unknown(self):
"""Test display name generation for unknown resource type."""
resource = MockMigratableResource(uuid.uuid4(), "test", "unknown")
display_name = _get_resource_display_name(resource)
assert display_name == "MockUnknown(test)"
async def test_find_root_resources_with_dependencies(
self, mock_resources: list[MockMigratableResource]
):
"""Test _find_root_resources identifies correct root resources."""
# Create mock resources with dependencies
root1 = MockMigratableResource(uuid.uuid4(), "root1")
root2 = MockMigratableResource(uuid.uuid4(), "root2")
dep1 = MockMigratableResource(uuid.uuid4(), "dep1")
dep2 = MockMigratableResource(uuid.uuid4(), "dep2")
all_resources = [root1, root2, dep1, dep2]
# Mock dependencies: root1 -> dep1, root2 -> dep2
async def mock_get_deps_root1():
return [dep1]
async def mock_get_deps_root2():
return [dep2]
async def mock_get_deps_empty():
return []
root1.get_dependencies = mock_get_deps_root1
root2.get_dependencies = mock_get_deps_root2
dep1.get_dependencies = mock_get_deps_empty
dep2.get_dependencies = mock_get_deps_empty
roots = await _find_root_resources(all_resources)
# Root1 and root2 should be identified as roots (not dependencies of others)
root_ids = {r.source_id for r in roots}
assert root1.source_id in root_ids
assert root2.source_id in root_ids
assert dep1.source_id not in root_ids
assert dep2.source_id not in root_ids
async def test_find_root_resources_no_dependencies(
self, mock_resources: list[MockMigratableResource]
):
"""Test _find_root_resources when no resources have dependencies."""
from prefect.cli._transfer_utils import (
find_root_resources as _find_root_resources,
)
# All resources have no dependencies - all should be roots
async def mock_get_deps_empty():
return []
for resource in mock_resources:
resource.get_dependencies = mock_get_deps_empty
roots = await _find_root_resources(mock_resources)
# All resources should be roots
assert len(roots) == len(mock_resources)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/cli/test_transfer.py",
"license": "Apache License 2.0",
"lines": 308,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/cli/transfer/test_automations.py | import uuid
from datetime import timedelta
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources.automations import MigratableAutomation
from prefect.events.actions import (
CallWebhook,
DoNothing,
PauseAutomation,
PauseWorkPool,
PauseWorkQueue,
RunDeployment,
SendNotification,
)
from prefect.events.schemas.automations import Automation, EventTrigger, Posture
from prefect.events.schemas.events import ResourceSpecification
from prefect.exceptions import ObjectNotFound
_http_exc = Exception("404 Not Found")
def create_test_trigger() -> EventTrigger:
"""Helper to create a proper EventTrigger for testing."""
return EventTrigger(
expect={"prefect.flow-run.Failed"},
match=ResourceSpecification(root={}),
match_related=[],
posture=Posture.Reactive,
threshold=1,
within=timedelta(seconds=30),
)
class TestMigratableAutomation:
async def test_construct_creates_new_instance(
self, transfer_automation: Automation
):
"""Test that construct creates a new MigratableAutomation instance."""
migratable = await MigratableAutomation.construct(transfer_automation)
assert isinstance(migratable, MigratableAutomation)
assert migratable.source_automation == transfer_automation
assert migratable.source_id == transfer_automation.id
assert migratable.destination_automation is None
assert migratable.destination_id is None
assert migratable._dependencies == {}
async def test_construct_returns_cached_instance(
self, transfer_automation: Automation
):
"""Test that construct returns cached instance for same ID."""
# Clear any existing instances
MigratableAutomation._instances.clear()
# Create first instance
migratable1 = await MigratableAutomation.construct(transfer_automation)
# Create second instance with same automation
migratable2 = await MigratableAutomation.construct(transfer_automation)
# Should be the same instance
assert migratable1 is migratable2
assert len(MigratableAutomation._instances) == 1
async def test_get_instance_returns_cached_instance(
self, transfer_automation: Automation
):
"""Test that get_instance returns cached instance."""
# Clear any existing instances
MigratableAutomation._instances.clear()
# Create instance
migratable = await MigratableAutomation.construct(transfer_automation)
# Retrieve instance
retrieved = await MigratableAutomation.get_instance(transfer_automation.id)
assert retrieved is migratable
async def test_get_instance_returns_none_for_unknown_id(self):
"""Test that get_instance returns None for unknown ID."""
# Clear any existing instances
MigratableAutomation._instances.clear()
unknown_id = uuid.uuid4()
retrieved = await MigratableAutomation.get_instance(unknown_id)
assert retrieved is None
async def test_get_dependencies_no_dependencies(
self, transfer_automation: Automation
):
"""Test automation with actions that have no dependencies."""
migratable = await MigratableAutomation.construct(transfer_automation)
dependencies = await migratable.get_dependencies()
assert dependencies == []
assert migratable._dependencies == {}
@patch("prefect.cli.transfer._migratable_resources.automations.get_client")
@patch(
"prefect.cli.transfer._migratable_resources.automations.construct_migratable_resource"
)
async def test_get_dependencies_deployment_action(
self, mock_construct_resource: AsyncMock, mock_get_client: MagicMock
):
"""Test automation with DeploymentAction dependencies."""
deployment_id = uuid.uuid4()
automation = Automation(
id=uuid.uuid4(),
name=f"test-automation-{uuid.uuid4()}",
description="Test automation",
enabled=True,
tags=[],
trigger=create_test_trigger(),
actions=[
RunDeployment(
deployment_id=deployment_id,
source="selected",
parameters=None,
job_variables=None,
),
],
actions_on_trigger=[],
actions_on_resolve=[],
)
# Mock the client and deployment
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
mock_deployment = MagicMock()
mock_deployment.id = deployment_id
mock_client.read_deployment.return_value = mock_deployment
mock_migratable_deployment = MagicMock()
mock_construct_resource.return_value = mock_migratable_deployment
migratable = await MigratableAutomation.construct(automation)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 1
assert dependencies[0] == mock_migratable_deployment
assert deployment_id in migratable._dependencies
mock_client.read_deployment.assert_called_once_with(deployment_id)
mock_construct_resource.assert_called_once_with(mock_deployment)
@patch("prefect.cli.transfer._migratable_resources.automations.get_client")
@patch(
"prefect.cli.transfer._migratable_resources.automations.construct_migratable_resource"
)
async def test_get_dependencies_work_pool_action(
self, mock_construct_resource: AsyncMock, mock_get_client: MagicMock
):
"""Test automation with WorkPoolAction dependencies."""
work_pool_id = uuid.uuid4()
automation = Automation(
id=uuid.uuid4(),
name=f"test-automation-{uuid.uuid4()}",
description="Test automation",
enabled=True,
tags=[],
trigger=create_test_trigger(),
actions=[
PauseWorkPool(work_pool_id=work_pool_id, source="selected"),
],
actions_on_trigger=[],
actions_on_resolve=[],
)
# Mock the client and work pool
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
mock_work_pool = MagicMock()
mock_work_pool.id = work_pool_id
mock_client.read_work_pools.return_value = [mock_work_pool]
mock_migratable_work_pool = MagicMock()
mock_construct_resource.return_value = mock_migratable_work_pool
migratable = await MigratableAutomation.construct(automation)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 1
assert dependencies[0] == mock_migratable_work_pool
assert work_pool_id in migratable._dependencies
mock_construct_resource.assert_called_once_with(mock_work_pool)
@patch("prefect.cli.transfer._migratable_resources.automations.get_client")
@patch(
"prefect.cli.transfer._migratable_resources.automations.construct_migratable_resource"
)
async def test_get_dependencies_work_queue_action(
self, mock_construct_resource: AsyncMock, mock_get_client: MagicMock
):
"""Test automation with WorkQueueAction dependencies."""
work_queue_id = uuid.uuid4()
automation = Automation(
id=uuid.uuid4(),
name=f"test-automation-{uuid.uuid4()}",
description="Test automation",
enabled=True,
tags=[],
trigger=create_test_trigger(),
actions=[
PauseWorkQueue(work_queue_id=work_queue_id, source="selected"),
],
actions_on_trigger=[],
actions_on_resolve=[],
)
# Mock the client and work queue
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
mock_work_queue = MagicMock()
mock_work_queue.id = work_queue_id
mock_client.read_work_queue.return_value = mock_work_queue
mock_migratable_work_queue = MagicMock()
mock_construct_resource.return_value = mock_migratable_work_queue
migratable = await MigratableAutomation.construct(automation)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 1
assert dependencies[0] == mock_migratable_work_queue
assert work_queue_id in migratable._dependencies
mock_client.read_work_queue.assert_called_once_with(work_queue_id)
mock_construct_resource.assert_called_once_with(mock_work_queue)
@patch("prefect.cli.transfer._migratable_resources.automations.get_client")
@patch(
"prefect.cli.transfer._migratable_resources.automations.construct_migratable_resource"
)
async def test_get_dependencies_automation_action(
self, mock_construct_resource: AsyncMock, mock_get_client: MagicMock
):
"""Test automation with AutomationAction dependencies."""
automation_id = uuid.uuid4()
automation = Automation(
id=uuid.uuid4(),
name=f"test-automation-{uuid.uuid4()}",
description="Test automation",
enabled=True,
tags=[],
trigger=create_test_trigger(),
actions=[
PauseAutomation(automation_id=automation_id, source="selected"),
],
actions_on_trigger=[],
actions_on_resolve=[],
)
# Mock the client and automation
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
mock_automation = MagicMock()
mock_automation.id = automation_id
mock_client.find_automation.return_value = mock_automation
mock_migratable_automation = MagicMock()
mock_construct_resource.return_value = mock_migratable_automation
migratable = await MigratableAutomation.construct(automation)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 1
assert dependencies[0] == mock_migratable_automation
assert automation_id in migratable._dependencies
mock_client.find_automation.assert_called_once_with(automation_id)
mock_construct_resource.assert_called_once_with(mock_automation)
@patch("prefect.cli.transfer._migratable_resources.automations.get_client")
@patch(
"prefect.cli.transfer._migratable_resources.automations.construct_migratable_resource"
)
async def test_get_dependencies_call_webhook(
self, mock_construct_resource: AsyncMock, mock_get_client: MagicMock
):
"""Test automation with CallWebhook block document dependencies."""
block_document_id = uuid.uuid4()
automation = Automation(
id=uuid.uuid4(),
name=f"test-automation-{uuid.uuid4()}",
description="Test automation",
enabled=True,
tags=[],
trigger=create_test_trigger(),
actions=[
CallWebhook(block_document_id=block_document_id),
],
actions_on_trigger=[],
actions_on_resolve=[],
)
# Mock the client and block document
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
mock_block_document = MagicMock()
mock_block_document.id = block_document_id
mock_client.read_block_document.return_value = mock_block_document
mock_migratable_block = MagicMock()
mock_construct_resource.return_value = mock_migratable_block
migratable = await MigratableAutomation.construct(automation)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 1
assert dependencies[0] == mock_migratable_block
assert block_document_id in migratable._dependencies
mock_client.read_block_document.assert_called_once_with(block_document_id)
mock_construct_resource.assert_called_once_with(mock_block_document)
@patch("prefect.cli.transfer._migratable_resources.automations.get_client")
@patch(
"prefect.cli.transfer._migratable_resources.automations.construct_migratable_resource"
)
async def test_get_dependencies_send_notification(
self, mock_construct_resource: AsyncMock, mock_get_client: MagicMock
):
"""Test automation with SendNotification block document dependencies."""
block_document_id = uuid.uuid4()
automation = Automation(
id=uuid.uuid4(),
name=f"test-automation-{uuid.uuid4()}",
description="Test automation",
enabled=True,
tags=[],
trigger=create_test_trigger(),
actions=[
SendNotification(
block_document_id=block_document_id,
subject="Test notification",
body="Test body",
),
],
actions_on_trigger=[],
actions_on_resolve=[],
)
# Mock the client and block document
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
mock_block_document = MagicMock()
mock_block_document.id = block_document_id
mock_client.read_block_document.return_value = mock_block_document
mock_migratable_block = MagicMock()
mock_construct_resource.return_value = mock_migratable_block
migratable = await MigratableAutomation.construct(automation)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 1
assert dependencies[0] == mock_migratable_block
assert block_document_id in migratable._dependencies
mock_client.read_block_document.assert_called_once_with(block_document_id)
mock_construct_resource.assert_called_once_with(mock_block_document)
@patch("prefect.cli.transfer._migratable_resources.automations.get_client")
@patch(
"prefect.cli.transfer._migratable_resources.automations.construct_migratable_resource"
)
async def test_get_dependencies_multiple_actions(
self, mock_construct_resource: AsyncMock, mock_get_client: MagicMock
):
"""Test automation with multiple actions that have dependencies."""
deployment_id = uuid.uuid4()
block_document_id = uuid.uuid4()
automation = Automation(
id=uuid.uuid4(),
name=f"test-automation-{uuid.uuid4()}",
description="Test automation",
enabled=True,
tags=[],
trigger=create_test_trigger(),
actions=[
RunDeployment(
deployment_id=deployment_id,
source="selected",
parameters=None,
job_variables=None,
),
CallWebhook(block_document_id=block_document_id),
DoNothing(), # No dependencies
],
actions_on_trigger=[],
actions_on_resolve=[],
)
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock deployment
mock_deployment = MagicMock()
mock_deployment.id = deployment_id
mock_client.read_deployment.return_value = mock_deployment
# Mock block document
mock_block_document = MagicMock()
mock_block_document.id = block_document_id
mock_client.read_block_document.return_value = mock_block_document
# Mock migratable resources
mock_migratable_deployment = MagicMock()
mock_migratable_block = MagicMock()
mock_construct_resource.side_effect = [
mock_migratable_deployment,
mock_migratable_block,
]
migratable = await MigratableAutomation.construct(automation)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 2
assert mock_migratable_deployment in dependencies
assert mock_migratable_block in dependencies
assert deployment_id in migratable._dependencies
assert block_document_id in migratable._dependencies
async def test_get_dependencies_cached(self, transfer_automation: Automation):
"""Test that dependencies are cached after first call."""
migratable = await MigratableAutomation.construct(transfer_automation)
# Set up some mock dependencies
mock_dependency = MagicMock()
migratable._dependencies[uuid.uuid4()] = mock_dependency
dependencies1 = await migratable.get_dependencies()
dependencies2 = await migratable.get_dependencies()
# Should return the same cached result
assert dependencies1 == dependencies2
assert dependencies1 == [mock_dependency]
@patch("prefect.cli.transfer._migratable_resources.automations.get_client")
async def test_migrate_already_exists(
self, mock_get_client: MagicMock, transfer_automation: Automation
):
"""Test migration when automation already exists."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock existing automation
existing_automation = Automation(
id=uuid.uuid4(),
name=transfer_automation.name,
description="Existing automation",
enabled=False,
tags=["existing"],
trigger=transfer_automation.trigger,
actions=[DoNothing()],
actions_on_trigger=[],
actions_on_resolve=[],
)
mock_client.read_automations_by_name.return_value = [existing_automation]
migratable = await MigratableAutomation.construct(transfer_automation)
# Should raise TransferSkipped
with pytest.raises(TransferSkipped, match="Already exists"):
await migratable.migrate()
# Verify calls
mock_client.read_automations_by_name.assert_called_once_with(
name=transfer_automation.name
)
# Verify destination_automation is set to existing
assert migratable.destination_automation == existing_automation
assert migratable.destination_id == existing_automation.id
@patch("prefect.cli.transfer._migratable_resources.automations.get_client")
async def test_migrate_success_no_dependencies(
self, mock_get_client: MagicMock, transfer_automation: Automation
):
"""Test successful migration of automation with no dependencies."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock no existing automations
mock_client.read_automations_by_name.return_value = []
# Mock successful creation
created_automation_id = uuid.uuid4()
mock_client.create_automation.return_value = created_automation_id
destination_automation = Automation(
id=created_automation_id,
name=transfer_automation.name,
description=transfer_automation.description,
enabled=transfer_automation.enabled,
tags=transfer_automation.tags,
trigger=transfer_automation.trigger,
actions=transfer_automation.actions,
actions_on_trigger=transfer_automation.actions_on_trigger,
actions_on_resolve=transfer_automation.actions_on_resolve,
)
mock_client.read_automation.return_value = destination_automation
migratable = await MigratableAutomation.construct(transfer_automation)
await migratable.migrate()
# Verify calls
mock_client.read_automations_by_name.assert_called_once_with(
name=transfer_automation.name
)
mock_client.create_automation.assert_called_once()
mock_client.read_automation.assert_called_once_with(
automation_id=created_automation_id
)
# Verify destination_automation is set
assert migratable.destination_automation == destination_automation
assert migratable.destination_id == created_automation_id
@patch("prefect.cli.transfer._migratable_resources.automations.get_client")
async def test_migrate_success_with_dependencies(self, mock_get_client: MagicMock):
"""Test successful migration of automation with dependencies."""
deployment_id = uuid.uuid4()
automation = Automation(
id=uuid.uuid4(),
name=f"test-automation-with-deps-{uuid.uuid4()}",
description="Test automation with dependencies",
enabled=True,
tags=["test"],
trigger=create_test_trigger(),
actions=[
RunDeployment(
deployment_id=deployment_id,
source="selected",
parameters=None,
job_variables=None,
),
],
actions_on_trigger=[],
actions_on_resolve=[],
)
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock no existing automations
mock_client.read_automations_by_name.return_value = []
# Mock successful creation
created_automation_id = uuid.uuid4()
mock_client.create_automation.return_value = created_automation_id
# Mock dependency with destination_id
mock_dependency = MagicMock()
destination_deployment_id = uuid.uuid4()
mock_dependency.destination_id = destination_deployment_id
migratable = await MigratableAutomation.construct(automation)
# Set up the dependency manually
migratable._dependencies[deployment_id] = mock_dependency
# Mock the read automation response
expected_automation = Automation(
id=created_automation_id,
name=automation.name,
description=automation.description,
enabled=automation.enabled,
tags=automation.tags,
trigger=automation.trigger,
actions=[
RunDeployment(
deployment_id=destination_deployment_id,
source="selected",
parameters=None,
job_variables=None,
),
],
actions_on_trigger=[],
actions_on_resolve=[],
)
mock_client.read_automation.return_value = expected_automation
await migratable.migrate()
# Verify the automation was created with updated IDs
create_call = mock_client.create_automation.call_args[1]["automation"]
assert create_call.actions[0].deployment_id == destination_deployment_id
# Verify destination_automation is set
assert migratable.destination_automation == expected_automation
assert migratable.destination_id == created_automation_id
@patch("prefect.cli.transfer._migratable_resources.automations.get_client")
async def test_get_dependencies_missing_deployment_skipped(
self, mock_get_client: MagicMock
):
"""Test that a missing deployment is skipped gracefully."""
deployment_id = uuid.uuid4()
automation = Automation(
id=uuid.uuid4(),
name=f"test-automation-{uuid.uuid4()}",
description="Test automation",
enabled=True,
tags=[],
trigger=create_test_trigger(),
actions=[
RunDeployment(
deployment_id=deployment_id,
source="selected",
parameters=None,
job_variables=None,
),
],
actions_on_trigger=[],
actions_on_resolve=[],
)
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
mock_client.read_deployment.side_effect = ObjectNotFound(_http_exc)
migratable = await MigratableAutomation.construct(automation)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 0
assert deployment_id not in migratable._dependencies
mock_client.read_deployment.assert_called_once_with(deployment_id)
@patch("prefect.cli.transfer._migratable_resources.automations.get_client")
async def test_get_dependencies_missing_work_queue_skipped(
self, mock_get_client: MagicMock
):
"""Test that a missing work queue is skipped gracefully."""
work_queue_id = uuid.uuid4()
automation = Automation(
id=uuid.uuid4(),
name=f"test-automation-{uuid.uuid4()}",
description="Test automation",
enabled=True,
tags=[],
trigger=create_test_trigger(),
actions=[
PauseWorkQueue(work_queue_id=work_queue_id, source="selected"),
],
actions_on_trigger=[],
actions_on_resolve=[],
)
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
mock_client.read_work_queue.side_effect = ObjectNotFound(_http_exc)
migratable = await MigratableAutomation.construct(automation)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 0
assert work_queue_id not in migratable._dependencies
mock_client.read_work_queue.assert_called_once_with(work_queue_id)
@patch("prefect.cli.transfer._migratable_resources.automations.get_client")
async def test_get_dependencies_missing_block_document_call_webhook_skipped(
self, mock_get_client: MagicMock
):
"""Test that a missing block document for CallWebhook is skipped gracefully."""
block_document_id = uuid.uuid4()
automation = Automation(
id=uuid.uuid4(),
name=f"test-automation-{uuid.uuid4()}",
description="Test automation",
enabled=True,
tags=[],
trigger=create_test_trigger(),
actions=[
CallWebhook(block_document_id=block_document_id),
],
actions_on_trigger=[],
actions_on_resolve=[],
)
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
mock_client.read_block_document.side_effect = ObjectNotFound(_http_exc)
migratable = await MigratableAutomation.construct(automation)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 0
assert block_document_id not in migratable._dependencies
mock_client.read_block_document.assert_called_once_with(block_document_id)
@patch("prefect.cli.transfer._migratable_resources.automations.get_client")
async def test_get_dependencies_missing_block_document_send_notification_skipped(
self, mock_get_client: MagicMock
):
"""Test that a missing block document for SendNotification is skipped gracefully."""
block_document_id = uuid.uuid4()
automation = Automation(
id=uuid.uuid4(),
name=f"test-automation-{uuid.uuid4()}",
description="Test automation",
enabled=True,
tags=[],
trigger=create_test_trigger(),
actions=[
SendNotification(
block_document_id=block_document_id,
subject="Test notification",
body="Test body",
),
],
actions_on_trigger=[],
actions_on_resolve=[],
)
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
mock_client.read_block_document.side_effect = ObjectNotFound(_http_exc)
migratable = await MigratableAutomation.construct(automation)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 0
assert block_document_id not in migratable._dependencies
mock_client.read_block_document.assert_called_once_with(block_document_id)
@patch("prefect.cli.transfer._migratable_resources.automations.get_client")
@patch(
"prefect.cli.transfer._migratable_resources.automations.construct_migratable_resource"
)
async def test_get_dependencies_partial_missing_resources(
self, mock_construct_resource: AsyncMock, mock_get_client: MagicMock
):
"""Test that existing deps are kept when some resources are missing."""
deployment_id = uuid.uuid4()
missing_block_document_id = uuid.uuid4()
automation = Automation(
id=uuid.uuid4(),
name=f"test-automation-{uuid.uuid4()}",
description="Test automation",
enabled=True,
tags=[],
trigger=create_test_trigger(),
actions=[
RunDeployment(
deployment_id=deployment_id,
source="selected",
parameters=None,
job_variables=None,
),
CallWebhook(block_document_id=missing_block_document_id),
],
actions_on_trigger=[],
actions_on_resolve=[],
)
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
mock_deployment = MagicMock()
mock_deployment.id = deployment_id
mock_client.read_deployment.return_value = mock_deployment
mock_client.read_block_document.side_effect = ObjectNotFound(_http_exc)
mock_migratable_deployment = MagicMock()
mock_construct_resource.return_value = mock_migratable_deployment
migratable = await MigratableAutomation.construct(automation)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 1
assert dependencies[0] == mock_migratable_deployment
assert deployment_id in migratable._dependencies
assert missing_block_document_id not in migratable._dependencies
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/cli/transfer/test_automations.py",
"license": "Apache License 2.0",
"lines": 670,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/cli/transfer/test_blocks.py | import uuid
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources.blocks import (
MigratableBlockDocument,
MigratableBlockSchema,
MigratableBlockType,
)
from prefect.client.schemas.actions import (
BlockDocumentCreate,
BlockSchemaCreate,
BlockTypeCreate,
)
from prefect.client.schemas.objects import BlockDocument, BlockSchema, BlockType
from prefect.exceptions import ObjectAlreadyExists
class TestMigratableBlockType:
async def test_construct_creates_new_instance(
self, transfer_block_type_x: BlockType
):
"""Test that construct creates a new MigratableBlockType instance."""
migratable = await MigratableBlockType.construct(transfer_block_type_x)
assert isinstance(migratable, MigratableBlockType)
assert migratable.source_block_type == transfer_block_type_x
assert migratable.source_id == transfer_block_type_x.id
assert migratable.destination_block_type is None
assert migratable.destination_id is None
async def test_construct_returns_cached_instance(
self, transfer_block_type_x: BlockType
):
"""Test that construct returns cached instance for same ID."""
# Clear any existing instances
MigratableBlockType._instances.clear()
# Create first instance
migratable1 = await MigratableBlockType.construct(transfer_block_type_x)
# Create second instance with same block type
migratable2 = await MigratableBlockType.construct(transfer_block_type_x)
# Should be the same instance
assert migratable1 is migratable2
assert len(MigratableBlockType._instances) == 1
async def test_get_instance_returns_cached_instance(
self, transfer_block_type_x: BlockType
):
"""Test that get_instance returns cached instance."""
# Clear any existing instances
MigratableBlockType._instances.clear()
# Create instance
migratable = await MigratableBlockType.construct(transfer_block_type_x)
# Retrieve instance
retrieved = await MigratableBlockType.get_instance(transfer_block_type_x.id)
assert retrieved is migratable
async def test_get_instance_returns_none_for_unknown_id(self):
"""Test that get_instance returns None for unknown ID."""
# Clear any existing instances
MigratableBlockType._instances.clear()
unknown_id = uuid.uuid4()
retrieved = await MigratableBlockType.get_instance(unknown_id)
assert retrieved is None
async def test_get_dependencies_returns_empty_list(
self, transfer_block_type_x: BlockType
):
"""Test that get_dependencies returns empty list (block types have no dependencies)."""
migratable = await MigratableBlockType.construct(transfer_block_type_x)
dependencies = await migratable.get_dependencies()
assert dependencies == []
@patch("prefect.cli.transfer._migratable_resources.blocks.get_client")
async def test_migrate_success(
self, mock_get_client: MagicMock, transfer_block_type_x: BlockType
):
"""Test successful block type migration."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock successful creation
destination_block_type = BlockType(
id=uuid.uuid4(),
name=transfer_block_type_x.name,
slug=transfer_block_type_x.slug,
logo_url=transfer_block_type_x.logo_url,
documentation_url=transfer_block_type_x.documentation_url,
description=transfer_block_type_x.description,
code_example=transfer_block_type_x.code_example,
is_protected=transfer_block_type_x.is_protected,
created=transfer_block_type_x.created,
updated=transfer_block_type_x.updated,
)
mock_client.create_block_type.return_value = destination_block_type
migratable = await MigratableBlockType.construct(transfer_block_type_x)
await migratable.migrate()
# Verify client was called correctly
mock_client.create_block_type.assert_called_once_with(
block_type=BlockTypeCreate(
name=transfer_block_type_x.name,
slug=transfer_block_type_x.slug,
)
)
# Verify destination_block_type is set
assert migratable.destination_block_type == destination_block_type
assert migratable.destination_id == destination_block_type.id
@patch("prefect.cli.transfer._migratable_resources.blocks.get_client")
async def test_migrate_already_exists(
self, mock_get_client: MagicMock, transfer_block_type_x: BlockType
):
"""Test migration when block type already exists."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock ObjectAlreadyExists exception on create
mock_http_exc = Exception("Conflict")
mock_client.create_block_type.side_effect = ObjectAlreadyExists(mock_http_exc)
# Mock successful read of existing block type
existing_block_type = BlockType(
id=uuid.uuid4(),
name=transfer_block_type_x.name,
slug=transfer_block_type_x.slug,
logo_url="https://example.com/existing-logo.png", # Different to show it reads existing
documentation_url=transfer_block_type_x.documentation_url,
description="existing description",
code_example=transfer_block_type_x.code_example,
is_protected=transfer_block_type_x.is_protected,
created=transfer_block_type_x.created,
updated=transfer_block_type_x.updated,
)
mock_client.read_block_type_by_slug.return_value = existing_block_type
migratable = await MigratableBlockType.construct(transfer_block_type_x)
# Should raise TransferSkipped
with pytest.raises(TransferSkipped, match="Already exists"):
await migratable.migrate()
# Verify client calls
mock_client.create_block_type.assert_called_once()
mock_client.read_block_type_by_slug.assert_called_once_with(
transfer_block_type_x.slug
)
# Verify destination_block_type is set to existing
assert migratable.destination_block_type == existing_block_type
assert migratable.destination_id == existing_block_type.id
class TestMigratableBlockSchema:
async def test_construct_creates_new_instance(
self, transfer_block_schema: BlockSchema
):
"""Test that construct creates a new MigratableBlockSchema instance."""
migratable = await MigratableBlockSchema.construct(transfer_block_schema)
assert isinstance(migratable, MigratableBlockSchema)
assert migratable.source_block_schema == transfer_block_schema
assert migratable.source_id == transfer_block_schema.id
assert migratable.destination_block_schema is None
assert migratable.destination_id is None
assert migratable._dependencies == {}
async def test_construct_returns_cached_instance(
self, transfer_block_schema: BlockSchema
):
"""Test that construct returns cached instance for same ID."""
# Clear any existing instances
MigratableBlockSchema._instances.clear()
# Create first instance
migratable1 = await MigratableBlockSchema.construct(transfer_block_schema)
# Create second instance with same block schema
migratable2 = await MigratableBlockSchema.construct(transfer_block_schema)
# Should be the same instance
assert migratable1 is migratable2
assert len(MigratableBlockSchema._instances) == 1
async def test_get_instance_returns_cached_instance(
self, transfer_block_schema: BlockSchema
):
"""Test that get_instance returns cached instance."""
# Clear any existing instances
MigratableBlockSchema._instances.clear()
# Create instance
migratable = await MigratableBlockSchema.construct(transfer_block_schema)
# Retrieve instance
retrieved = await MigratableBlockSchema.get_instance(transfer_block_schema.id)
assert retrieved is migratable
async def test_get_instance_returns_none_for_unknown_id(self):
"""Test that get_instance returns None for unknown ID."""
# Clear any existing instances
MigratableBlockSchema._instances.clear()
unknown_id = uuid.uuid4()
retrieved = await MigratableBlockSchema.get_instance(unknown_id)
assert retrieved is None
@patch(
"prefect.cli.transfer._migratable_resources.blocks.construct_migratable_resource"
)
async def test_get_dependencies_with_block_type(
self, mock_construct_resource: AsyncMock, transfer_block_schema: BlockSchema
):
"""Test get_dependencies with block_type present."""
mock_migratable_block_type = MagicMock()
mock_construct_resource.return_value = mock_migratable_block_type
migratable = await MigratableBlockSchema.construct(transfer_block_schema)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 1
assert dependencies[0] == mock_migratable_block_type
assert transfer_block_schema.block_type.id in migratable._dependencies
mock_construct_resource.assert_called_once_with(
transfer_block_schema.block_type
)
@patch("prefect.cli.transfer._migratable_resources.blocks.get_client")
@patch(
"prefect.cli.transfer._migratable_resources.blocks.construct_migratable_resource"
)
async def test_get_dependencies_with_block_type_id_only(
self, mock_construct_resource: AsyncMock, mock_get_client: MagicMock
):
"""Test get_dependencies with only block_type_id."""
block_type_id = uuid.uuid4()
block_schema = BlockSchema(
id=uuid.uuid4(),
checksum="test-checksum",
fields={"type": "object"},
block_type_id=block_type_id,
block_type=None, # No block_type object
capabilities=[],
version="1.0.0",
)
# Mock the client and response
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
mock_response = MagicMock()
block_type = BlockType(
id=block_type_id,
name=f"test-block-type-{uuid.uuid4()}",
slug=f"test-block-type-{uuid.uuid4()}",
logo_url=None,
documentation_url=None,
description=None,
code_example=None,
is_protected=False,
)
mock_response.json.return_value = block_type.model_dump()
mock_client.request.return_value = mock_response
mock_migratable_block_type = MagicMock()
mock_construct_resource.return_value = mock_migratable_block_type
migratable = await MigratableBlockSchema.construct(block_schema)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 1
assert dependencies[0] == mock_migratable_block_type
assert block_type_id in migratable._dependencies
mock_client.request.assert_called_once_with(
"GET", "/block_types/{id}", params={"id": block_type_id}
)
async def test_get_dependencies_no_block_type_raises_error(self):
"""Test get_dependencies raises error when no block type."""
block_schema = BlockSchema(
id=uuid.uuid4(),
checksum="test-checksum",
fields={"type": "object"},
block_type_id=None,
block_type=None,
capabilities=[],
version="1.0.0",
)
migratable = await MigratableBlockSchema.construct(block_schema)
with pytest.raises(
ValueError, match="Block schema has no associated block type"
):
await migratable.get_dependencies()
@patch("prefect.cli.transfer._migratable_resources.blocks.get_client")
async def test_migrate_success(
self, mock_get_client: MagicMock, transfer_block_schema: BlockSchema
):
"""Test successful block schema migration."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock dependency
mock_dependency = MagicMock()
destination_block_type_id = uuid.uuid4()
mock_dependency.destination_id = destination_block_type_id
migratable = await MigratableBlockSchema.construct(transfer_block_schema)
# Set up the dependency manually
migratable._dependencies[transfer_block_schema.block_type_id] = mock_dependency
# Mock successful creation
destination_block_schema = BlockSchema(
id=uuid.uuid4(),
checksum=transfer_block_schema.checksum,
fields=transfer_block_schema.fields,
block_type_id=destination_block_type_id,
block_type=None,
capabilities=transfer_block_schema.capabilities,
version=transfer_block_schema.version,
created=transfer_block_schema.created,
updated=transfer_block_schema.updated,
)
mock_client.create_block_schema.return_value = destination_block_schema
await migratable.migrate()
# Verify client was called correctly
mock_client.create_block_schema.assert_called_once_with(
block_schema=BlockSchemaCreate(
fields=transfer_block_schema.fields,
block_type_id=destination_block_type_id,
capabilities=transfer_block_schema.capabilities,
version=transfer_block_schema.version,
)
)
# Verify destination_block_schema is set
assert migratable.destination_block_schema == destination_block_schema
assert migratable.destination_id == destination_block_schema.id
@patch("prefect.cli.transfer._migratable_resources.blocks.get_client")
async def test_migrate_already_exists(
self, mock_get_client: MagicMock, transfer_block_schema: BlockSchema
):
"""Test migration when block schema already exists."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock dependency
mock_dependency = MagicMock()
destination_block_type_id = uuid.uuid4()
mock_dependency.destination_id = destination_block_type_id
migratable = await MigratableBlockSchema.construct(transfer_block_schema)
migratable._dependencies[transfer_block_schema.block_type_id] = mock_dependency
# Mock ObjectAlreadyExists exception on create
mock_http_exc = Exception("Conflict")
mock_client.create_block_schema.side_effect = ObjectAlreadyExists(mock_http_exc)
# Mock successful read of existing block schema
existing_block_schema = BlockSchema(
id=uuid.uuid4(),
checksum=transfer_block_schema.checksum,
fields={"different": "fields"}, # Different to show it reads existing
block_type_id=destination_block_type_id,
block_type=None,
capabilities=transfer_block_schema.capabilities,
version=transfer_block_schema.version,
created=transfer_block_schema.created,
updated=transfer_block_schema.updated,
)
mock_client.read_block_schema_by_checksum.return_value = existing_block_schema
# Should raise TransferSkipped
with pytest.raises(TransferSkipped, match="Already exists"):
await migratable.migrate()
# Verify client calls
mock_client.create_block_schema.assert_called_once()
mock_client.read_block_schema_by_checksum.assert_called_once_with(
transfer_block_schema.checksum
)
# Verify destination_block_schema is set to existing
assert migratable.destination_block_schema == existing_block_schema
assert migratable.destination_id == existing_block_schema.id
async def test_migrate_no_block_type_raises_error(
self, transfer_block_schema: BlockSchema
):
"""Test migrate raises error when no block type ID."""
transfer_block_schema.block_type_id = None
migratable = await MigratableBlockSchema.construct(transfer_block_schema)
with pytest.raises(
ValueError, match="Block schema has no associated block type"
):
await migratable.migrate()
class TestMigratableBlockDocument:
async def test_construct_creates_new_instance(
self, transfer_block_document: BlockDocument
):
"""Test that construct creates a new MigratableBlockDocument instance."""
migratable = await MigratableBlockDocument.construct(transfer_block_document)
assert isinstance(migratable, MigratableBlockDocument)
assert migratable.source_block_document == transfer_block_document
assert migratable.source_id == transfer_block_document.id
assert migratable.destination_block_document is None
assert migratable.destination_id is None
assert migratable._dependencies == {}
async def test_construct_returns_cached_instance(
self, transfer_block_document: BlockDocument
):
"""Test that construct returns cached instance for same ID."""
# Clear any existing instances
MigratableBlockDocument._instances.clear()
# Create first instance
migratable1 = await MigratableBlockDocument.construct(transfer_block_document)
# Create second instance with same block document
migratable2 = await MigratableBlockDocument.construct(transfer_block_document)
# Should be the same instance
assert migratable1 is migratable2
assert len(MigratableBlockDocument._instances) == 1
async def test_get_instance_returns_cached_instance(
self, transfer_block_document: BlockDocument
):
"""Test that get_instance returns cached instance."""
# Clear any existing instances
MigratableBlockDocument._instances.clear()
# Create instance
migratable = await MigratableBlockDocument.construct(transfer_block_document)
# Retrieve instance
retrieved = await MigratableBlockDocument.get_instance(
transfer_block_document.id
)
assert retrieved is migratable
async def test_get_instance_returns_none_for_unknown_id(self):
"""Test that get_instance returns None for unknown ID."""
# Clear any existing instances
MigratableBlockDocument._instances.clear()
unknown_id = uuid.uuid4()
retrieved = await MigratableBlockDocument.get_instance(unknown_id)
assert retrieved is None
@patch(
"prefect.cli.transfer._migratable_resources.blocks.construct_migratable_resource"
)
async def test_get_dependencies_with_block_type_and_schema(
self, mock_construct_resource: AsyncMock, transfer_block_document: BlockDocument
):
"""Test get_dependencies with block_type and block_schema present."""
mock_migratable_block_type = MagicMock()
mock_migratable_block_schema = MagicMock()
mock_construct_resource.side_effect = [
mock_migratable_block_type,
mock_migratable_block_schema,
]
migratable = await MigratableBlockDocument.construct(transfer_block_document)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 2
assert mock_migratable_block_type in dependencies
assert mock_migratable_block_schema in dependencies
assert transfer_block_document.block_type.id in migratable._dependencies
assert transfer_block_document.block_schema.id in migratable._dependencies
@patch("prefect.cli.transfer._migratable_resources.blocks.get_client")
@patch(
"prefect.cli.transfer._migratable_resources.blocks.construct_migratable_resource"
)
async def test_get_dependencies_with_document_references(
self, mock_construct_resource: AsyncMock, mock_get_client: MagicMock
):
"""Test get_dependencies with block document references."""
referenced_doc_id = uuid.uuid4()
block_document = BlockDocument(
id=uuid.uuid4(),
name=f"test-block-{uuid.uuid4()}",
data={"foo": "bar"},
block_schema_id=uuid.uuid4(),
block_schema=None,
block_type_id=uuid.uuid4(),
block_type=None,
block_document_references={
"ref1": {"block_document_id": str(referenced_doc_id)}
},
is_anonymous=False,
)
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock referenced block document
mock_referenced_doc = MagicMock()
mock_referenced_doc.id = referenced_doc_id
mock_client.read_block_document.return_value = mock_referenced_doc
# Mock migratable resources
mock_migratable_block_type = MagicMock()
mock_migratable_block_schema = MagicMock()
mock_migratable_referenced_doc = MagicMock()
# Mock responses for block type and schema
mock_type_response = MagicMock()
mock_type_response.json.return_value = {
"id": str(block_document.block_type_id),
"name": f"test-block-type-{uuid.uuid4()}",
"slug": f"test-block-type-{uuid.uuid4()}",
"logo_url": None,
"documentation_url": None,
"description": None,
"code_example": None,
"is_protected": False,
"created": "2023-01-01T00:00:00Z",
"updated": "2023-01-01T00:00:00Z",
}
mock_schema_response = MagicMock()
mock_schema_response.json.return_value = {
"id": str(block_document.block_schema_id),
"checksum": "test-checksum",
"fields": {"type": "object"},
"block_type_id": str(block_document.block_type_id),
"capabilities": [],
"version": "1.0.0",
"created": "2023-01-01T00:00:00Z",
"updated": "2023-01-01T00:00:00Z",
}
mock_client.request.side_effect = [mock_type_response, mock_schema_response]
mock_construct_resource.side_effect = [
mock_migratable_block_type,
mock_migratable_block_schema,
mock_migratable_referenced_doc,
]
migratable = await MigratableBlockDocument.construct(block_document)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 3
assert mock_migratable_block_type in dependencies
assert mock_migratable_block_schema in dependencies
assert mock_migratable_referenced_doc in dependencies
assert referenced_doc_id in migratable._dependencies
@patch("prefect.cli.transfer._migratable_resources.blocks.get_client")
async def test_migrate_success(
self, mock_get_client: MagicMock, transfer_block_document: BlockDocument
):
"""Test successful block document migration."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock dependencies
mock_block_type_dependency = MagicMock()
destination_block_type_id = uuid.uuid4()
mock_block_type_dependency.destination_id = destination_block_type_id
mock_block_schema_dependency = MagicMock()
destination_block_schema_id = uuid.uuid4()
mock_block_schema_dependency.destination_id = destination_block_schema_id
migratable = await MigratableBlockDocument.construct(transfer_block_document)
# Set up dependencies manually
migratable._dependencies[transfer_block_document.block_type_id] = (
mock_block_type_dependency
)
migratable._dependencies[transfer_block_document.block_schema_id] = (
mock_block_schema_dependency
)
# Mock successful creation
destination_block_document = BlockDocument(
id=uuid.uuid4(),
name=transfer_block_document.name,
data=transfer_block_document.data,
block_schema_id=destination_block_schema_id,
block_schema=None,
block_type_id=destination_block_type_id,
block_type=None,
block_document_references={},
is_anonymous=transfer_block_document.is_anonymous,
created=transfer_block_document.created,
updated=transfer_block_document.updated,
)
mock_client.create_block_document.return_value = destination_block_document
await migratable.migrate()
# Verify client was called correctly
mock_client.create_block_document.assert_called_once_with(
block_document=BlockDocumentCreate(
name=transfer_block_document.name,
block_type_id=destination_block_type_id,
block_schema_id=destination_block_schema_id,
data=transfer_block_document.data,
)
)
# Verify destination_block_document is set
assert migratable.destination_block_document == destination_block_document
assert migratable.destination_id == destination_block_document.id
@patch("prefect.cli.transfer._migratable_resources.blocks.get_client")
async def test_migrate_already_exists(
self, mock_get_client: MagicMock, transfer_block_document: BlockDocument
):
"""Test migration when block document already exists."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock dependencies
mock_block_type_dependency = MagicMock()
destination_block_type_id = uuid.uuid4()
mock_block_type_dependency.destination_id = destination_block_type_id
mock_block_schema_dependency = MagicMock()
destination_block_schema_id = uuid.uuid4()
mock_block_schema_dependency.destination_id = destination_block_schema_id
migratable = await MigratableBlockDocument.construct(transfer_block_document)
migratable._dependencies[transfer_block_document.block_type_id] = (
mock_block_type_dependency
)
migratable._dependencies[transfer_block_document.block_schema_id] = (
mock_block_schema_dependency
)
# Mock ObjectAlreadyExists exception on create
mock_http_exc = Exception("Conflict")
mock_client.create_block_document.side_effect = ObjectAlreadyExists(
mock_http_exc
)
# Mock successful read of existing block document
existing_block_document = BlockDocument(
id=uuid.uuid4(),
name=transfer_block_document.name,
data={"different": "data"}, # Different to show it reads existing
block_schema_id=destination_block_schema_id,
block_schema=None,
block_type_id=destination_block_type_id,
block_type=None,
block_document_references={},
is_anonymous=transfer_block_document.is_anonymous,
created=transfer_block_document.created,
updated=transfer_block_document.updated,
)
mock_client.read_block_document_by_name.return_value = existing_block_document
# Should raise TransferSkipped
with pytest.raises(TransferSkipped, match="Already exists"):
await migratable.migrate()
# Verify client calls
mock_client.create_block_document.assert_called_once()
mock_client.read_block_document_by_name.assert_called_once_with(
block_type_slug=transfer_block_document.block_type.slug,
name=transfer_block_document.name,
)
# Verify destination_block_document is set to existing
assert migratable.destination_block_document == existing_block_document
assert migratable.destination_id == existing_block_document.id
async def test_migrate_missing_block_type_dependency_raises_error(
self, transfer_block_document: BlockDocument
):
"""Test migrate raises error when block type dependency is missing."""
migratable = await MigratableBlockDocument.construct(transfer_block_document)
with pytest.raises(ValueError, match="Unable to find destination block type"):
await migratable.migrate()
async def test_migrate_missing_block_schema_dependency_raises_error(
self, transfer_block_document: BlockDocument
):
"""Test migrate raises error when block schema dependency is missing."""
# Mock block type dependency but not block schema
mock_block_type_dependency = MagicMock()
mock_block_type_dependency.destination_id = uuid.uuid4()
migratable = await MigratableBlockDocument.construct(transfer_block_document)
migratable._dependencies[transfer_block_document.block_type_id] = (
mock_block_type_dependency
)
with pytest.raises(ValueError, match="Unable to find destination block schema"):
await migratable.migrate()
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/cli/transfer/test_blocks.py",
"license": "Apache License 2.0",
"lines": 609,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/cli/transfer/test_concurrency_limits.py | import uuid
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from sqlalchemy.ext.asyncio import AsyncSession
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources.concurrency_limits import (
MigratableGlobalConcurrencyLimit,
)
from prefect.client.schemas.actions import GlobalConcurrencyLimitCreate
from prefect.client.schemas.responses import GlobalConcurrencyLimitResponse
from prefect.exceptions import ObjectAlreadyExists
class TestMigratableGlobalConcurrencyLimit:
async def test_construct_creates_new_instance(
self, transfer_global_concurrency_limit: GlobalConcurrencyLimitResponse
):
"""Test that construct creates a new MigratableGlobalConcurrencyLimit instance."""
migratable = await MigratableGlobalConcurrencyLimit.construct(
transfer_global_concurrency_limit
)
assert isinstance(migratable, MigratableGlobalConcurrencyLimit)
assert (
migratable.source_global_concurrency_limit
== transfer_global_concurrency_limit
)
assert migratable.source_id == transfer_global_concurrency_limit.id
assert migratable.destination_global_concurrency_limit is None
assert migratable.destination_id is None
async def test_construct_returns_cached_instance(
self, transfer_global_concurrency_limit: GlobalConcurrencyLimitResponse
):
"""Test that construct returns cached instance for same ID."""
# Clear any existing instances
MigratableGlobalConcurrencyLimit._instances.clear()
# Create first instance
migratable1 = await MigratableGlobalConcurrencyLimit.construct(
transfer_global_concurrency_limit
)
# Create second instance with same limit
migratable2 = await MigratableGlobalConcurrencyLimit.construct(
transfer_global_concurrency_limit
)
# Should be the same instance
assert migratable1 is migratable2
assert len(MigratableGlobalConcurrencyLimit._instances) == 1
async def test_construct_different_limits_create_different_instances(
self, session: AsyncSession
):
"""Test that different concurrency limits create different instances."""
from prefect.client.schemas.responses import GlobalConcurrencyLimitResponse
from prefect.server import models, schemas
# Create two different concurrency limits
orm_limit1 = await models.concurrency_limits_v2.create_concurrency_limit(
session=session,
concurrency_limit=schemas.core.ConcurrencyLimitV2(
name=f"test-limit-1-{uuid.uuid4()}",
limit=3,
active=True,
active_slots=0,
),
)
orm_limit2 = await models.concurrency_limits_v2.create_concurrency_limit(
session=session,
concurrency_limit=schemas.core.ConcurrencyLimitV2(
name=f"test-limit-2-{uuid.uuid4()}",
limit=10,
active=False,
active_slots=5,
),
)
await session.commit()
# Convert to client schema objects
limit1 = GlobalConcurrencyLimitResponse(
id=orm_limit1.id,
name=orm_limit1.name,
limit=orm_limit1.limit,
active=orm_limit1.active,
active_slots=orm_limit1.active_slots,
slot_decay_per_second=orm_limit1.slot_decay_per_second,
created=orm_limit1.created,
updated=orm_limit1.updated,
)
limit2 = GlobalConcurrencyLimitResponse(
id=orm_limit2.id,
name=orm_limit2.name,
limit=orm_limit2.limit,
active=orm_limit2.active,
active_slots=orm_limit2.active_slots,
slot_decay_per_second=orm_limit2.slot_decay_per_second,
created=orm_limit2.created,
updated=orm_limit2.updated,
)
# Clear any existing instances
MigratableGlobalConcurrencyLimit._instances.clear()
migratable1 = await MigratableGlobalConcurrencyLimit.construct(limit1)
migratable2 = await MigratableGlobalConcurrencyLimit.construct(limit2)
assert migratable1 is not migratable2
assert len(MigratableGlobalConcurrencyLimit._instances) == 2
assert migratable1.source_id != migratable2.source_id
async def test_get_instance_returns_cached_instance(
self, transfer_global_concurrency_limit: GlobalConcurrencyLimitResponse
):
"""Test that get_instance returns cached instance."""
# Clear any existing instances
MigratableGlobalConcurrencyLimit._instances.clear()
# Create instance
migratable = await MigratableGlobalConcurrencyLimit.construct(
transfer_global_concurrency_limit
)
# Retrieve instance
retrieved = await MigratableGlobalConcurrencyLimit.get_instance(
transfer_global_concurrency_limit.id
)
assert retrieved is migratable
async def test_get_instance_returns_none_for_unknown_id(self):
"""Test that get_instance returns None for unknown ID."""
# Clear any existing instances
MigratableGlobalConcurrencyLimit._instances.clear()
unknown_id = uuid.uuid4()
retrieved = await MigratableGlobalConcurrencyLimit.get_instance(unknown_id)
assert retrieved is None
async def test_get_dependencies_returns_empty_list(
self, transfer_global_concurrency_limit: GlobalConcurrencyLimitResponse
):
"""Test that get_dependencies returns empty list (concurrency limits have no dependencies)."""
migratable = await MigratableGlobalConcurrencyLimit.construct(
transfer_global_concurrency_limit
)
dependencies = await migratable.get_dependencies()
assert dependencies == []
@patch("prefect.cli.transfer._migratable_resources.concurrency_limits.get_client")
async def test_migrate_success(
self,
mock_get_client: MagicMock,
transfer_global_concurrency_limit: GlobalConcurrencyLimitResponse,
):
"""Test successful concurrency limit migration."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock successful creation and read
destination_limit = GlobalConcurrencyLimitResponse(
id=uuid.uuid4(),
name=transfer_global_concurrency_limit.name,
limit=transfer_global_concurrency_limit.limit,
active=transfer_global_concurrency_limit.active,
active_slots=transfer_global_concurrency_limit.active_slots,
slot_decay_per_second=transfer_global_concurrency_limit.slot_decay_per_second,
created=transfer_global_concurrency_limit.created,
updated=transfer_global_concurrency_limit.updated,
)
mock_client.create_global_concurrency_limit.return_value = (
None # This method doesn't return the object
)
mock_client.read_global_concurrency_limit_by_name.return_value = (
destination_limit
)
migratable = await MigratableGlobalConcurrencyLimit.construct(
transfer_global_concurrency_limit
)
await migratable.migrate()
# Verify client was called correctly
mock_client.create_global_concurrency_limit.assert_called_once_with(
concurrency_limit=GlobalConcurrencyLimitCreate(
name=transfer_global_concurrency_limit.name,
limit=transfer_global_concurrency_limit.limit,
active=transfer_global_concurrency_limit.active,
active_slots=transfer_global_concurrency_limit.active_slots,
)
)
mock_client.read_global_concurrency_limit_by_name.assert_called_once_with(
transfer_global_concurrency_limit.name
)
# Verify destination_global_concurrency_limit is set
assert migratable.destination_global_concurrency_limit == destination_limit
assert migratable.destination_id == destination_limit.id
@patch("prefect.cli.transfer._migratable_resources.concurrency_limits.get_client")
async def test_migrate_already_exists_raises_transfer_skipped(
self,
mock_get_client: MagicMock,
transfer_global_concurrency_limit: GlobalConcurrencyLimitResponse,
):
"""Test migration when concurrency limit already exists raises TransferSkipped."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock ObjectAlreadyExists exception on create
mock_http_exc = Exception("Conflict")
mock_client.create_global_concurrency_limit.side_effect = ObjectAlreadyExists(
mock_http_exc
)
# Mock successful read of existing limit
existing_limit = GlobalConcurrencyLimitResponse(
id=uuid.uuid4(),
name=transfer_global_concurrency_limit.name,
limit=10, # Different limit to show it reads existing
active=False, # Different active state
active_slots=2,
slot_decay_per_second=1.5,
created=transfer_global_concurrency_limit.created,
updated=transfer_global_concurrency_limit.updated,
)
mock_client.read_global_concurrency_limit_by_name.return_value = existing_limit
migratable = await MigratableGlobalConcurrencyLimit.construct(
transfer_global_concurrency_limit
)
# Should raise TransferSkipped
with pytest.raises(TransferSkipped, match="Already exists"):
await migratable.migrate()
# Verify client calls
mock_client.create_global_concurrency_limit.assert_called_once()
mock_client.read_global_concurrency_limit_by_name.assert_called_once_with(
transfer_global_concurrency_limit.name
)
# Verify destination_global_concurrency_limit is still set to the existing limit
assert migratable.destination_global_concurrency_limit == existing_limit
assert migratable.destination_id == existing_limit.id
@pytest.mark.parametrize(
"active,active_slots,limit",
[
(True, 0, 5),
(False, 2, 10),
(True, 8, 8), # At capacity
],
)
async def test_concurrency_limit_with_different_states(
self, session: AsyncSession, active: bool, active_slots: int, limit: int
):
"""Test concurrency limits with different active states."""
from prefect.client.schemas.responses import GlobalConcurrencyLimitResponse
from prefect.server import models, schemas
# Clear instances before test
MigratableGlobalConcurrencyLimit._instances.clear()
orm_limit = await models.concurrency_limits_v2.create_concurrency_limit(
session=session,
concurrency_limit=schemas.core.ConcurrencyLimitV2(
name=f"test-limit-{uuid.uuid4()}",
limit=limit,
active=active,
active_slots=active_slots,
),
)
await session.commit()
# Convert to client schema object
client_limit = GlobalConcurrencyLimitResponse(
id=orm_limit.id,
name=orm_limit.name,
limit=orm_limit.limit,
active=orm_limit.active,
active_slots=orm_limit.active_slots,
slot_decay_per_second=orm_limit.slot_decay_per_second,
created=orm_limit.created,
updated=orm_limit.updated,
)
# Test construction works with different states
migratable = await MigratableGlobalConcurrencyLimit.construct(client_limit)
assert migratable.source_global_concurrency_limit.active == active
assert migratable.source_global_concurrency_limit.active_slots == active_slots
assert migratable.source_global_concurrency_limit.limit == limit
@pytest.mark.parametrize(
"name_prefix,limit,active_slots",
[
("zero-limit", 0, 0),
("large-limit", 1000000, 0),
("single-limit", 1, 1),
],
)
async def test_concurrency_limit_with_edge_case_values(
self, session: AsyncSession, name_prefix: str, limit: int, active_slots: int
):
"""Test concurrency limits with edge case values."""
from prefect.client.schemas.responses import GlobalConcurrencyLimitResponse
from prefect.server import models, schemas
# Clear instances before test
MigratableGlobalConcurrencyLimit._instances.clear()
orm_limit = await models.concurrency_limits_v2.create_concurrency_limit(
session=session,
concurrency_limit=schemas.core.ConcurrencyLimitV2(
name=f"{name_prefix}-{uuid.uuid4()}",
limit=limit,
active=True,
active_slots=active_slots,
),
)
await session.commit()
# Convert to client schema object
client_limit = GlobalConcurrencyLimitResponse(
id=orm_limit.id,
name=orm_limit.name,
limit=orm_limit.limit,
active=orm_limit.active,
active_slots=orm_limit.active_slots,
slot_decay_per_second=orm_limit.slot_decay_per_second,
created=orm_limit.created,
updated=orm_limit.updated,
)
# Test construction works with edge case values
migratable = await MigratableGlobalConcurrencyLimit.construct(client_limit)
assert migratable.source_global_concurrency_limit.limit == limit
assert migratable.source_global_concurrency_limit.active_slots == active_slots
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/cli/transfer/test_concurrency_limits.py",
"license": "Apache License 2.0",
"lines": 299,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/cli/transfer/test_dag.py | import asyncio
import uuid
from collections import defaultdict
from typing import Optional
from unittest.mock import patch
from prefect.cli.transfer._dag import NodeState, TransferDAG
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources.base import MigratableProtocol
class MockMigratableResource:
"""Mock migratable resource for testing DAG functionality."""
def __init__(
self,
resource_id: uuid.UUID,
name: str,
migrate_success: bool = True,
dependencies: Optional[list[uuid.UUID]] = None,
):
self.id = resource_id
self.source_id = resource_id
self.destination_id = None
self.name = name
self._migrate_success = migrate_success
self._dependencies = dependencies or []
self.migrate_called = False
self.get_dependencies_called = False
async def migrate(self) -> None:
"""Mock migrate method."""
self.migrate_called = True
if not self._migrate_success:
if self.name.endswith("_skip"):
raise TransferSkipped("Test skip")
else:
raise ValueError(f"Mock migration error for {self.name}")
async def get_dependencies(self) -> list[MigratableProtocol]:
"""Mock get_dependencies method."""
self.get_dependencies_called = True
return [
MockMigratableResource(dep_id, f"dep_{dep_id}")
for dep_id in self._dependencies
]
def __str__(self) -> str:
return f"MockResource({self.name})"
def __repr__(self) -> str:
return f"MockResource(id={self.id}, name='{self.name}')"
class TestTransferDAG:
def test_init_creates_empty_dag(self):
"""Test DAG initialization creates empty structures."""
dag = TransferDAG()
assert dag.nodes == {}
assert dag._dependencies == defaultdict(set)
assert dag._dependents == defaultdict(set)
assert dag._status == {}
def test_add_node_creates_new_node(self):
"""Test adding a new node to the DAG."""
dag = TransferDAG()
resource = MockMigratableResource(uuid.uuid4(), "test-resource")
node_id = dag.add_node(resource)
assert node_id == resource.source_id
assert resource.source_id in dag.nodes
assert dag.nodes[resource.source_id] == resource
assert dag._status[resource.source_id].state == NodeState.PENDING
def test_add_node_returns_existing_node(self):
"""Test adding an existing node returns the cached node."""
dag = TransferDAG()
resource = MockMigratableResource(uuid.uuid4(), "test-resource")
node_id1 = dag.add_node(resource)
node_id2 = dag.add_node(resource)
assert node_id1 == node_id2
assert len(dag.nodes) == 1
def test_add_edge_creates_dependency(self):
"""Test adding an edge creates proper dependency relationship."""
dag = TransferDAG()
parent = MockMigratableResource(uuid.uuid4(), "parent")
child = MockMigratableResource(uuid.uuid4(), "child")
dag.add_node(parent)
dag.add_node(child)
dag.add_edge(parent.source_id, child.source_id)
assert child.source_id in dag._dependencies[parent.source_id]
assert parent.source_id in dag._dependents[child.source_id]
async def test_build_from_roots_single_resource(self):
"""Test building DAG from a single resource with no dependencies."""
dag = TransferDAG()
resource = MockMigratableResource(uuid.uuid4(), "single-resource")
await dag.build_from_roots([resource])
assert len(dag.nodes) == 1
assert resource.source_id in dag.nodes
assert len(dag._dependencies) == 0
assert resource.get_dependencies_called
async def test_build_from_roots_with_dependencies(self):
"""Test building DAG from resources with dependencies."""
dag = TransferDAG()
dep_id = uuid.uuid4()
root_resource = MockMigratableResource(
uuid.uuid4(), "root-resource", dependencies=[dep_id]
)
with patch.object(root_resource, "get_dependencies") as mock_get_deps:
dependency = MockMigratableResource(dep_id, "dependency")
mock_get_deps.return_value = [dependency]
await dag.build_from_roots([root_resource])
assert len(dag.nodes) == 2
assert root_resource.source_id in dag.nodes
assert dependency.source_id in dag.nodes
assert dependency.source_id in dag._dependencies[root_resource.source_id]
assert root_resource.source_id in dag._dependents[dependency.source_id]
mock_get_deps.assert_called_once()
async def test_build_from_roots_complex_dependency_chain(self):
"""Test building DAG with complex dependency chains."""
dag = TransferDAG()
# Create a chain: root -> mid -> leaf
leaf_id = uuid.uuid4()
mid_id = uuid.uuid4()
root_id = uuid.uuid4()
leaf = MockMigratableResource(leaf_id, "leaf")
mid = MockMigratableResource(mid_id, "mid", dependencies=[leaf_id])
root = MockMigratableResource(root_id, "root", dependencies=[mid_id])
with (
patch.object(mid, "get_dependencies", return_value=[leaf]),
patch.object(root, "get_dependencies", return_value=[mid]),
patch.object(leaf, "get_dependencies", return_value=[]),
):
await dag.build_from_roots([root])
assert len(dag.nodes) == 3
assert mid_id in dag._dependencies[root_id]
assert leaf_id in dag._dependencies[mid_id]
assert len(dag._dependencies[leaf_id]) == 0
assert len(dag._dependencies[mid_id]) == 1
assert len(dag._dependencies[root_id]) == 1
def test_detect_cycles_no_cycle(self):
"""Test cycle detection on acyclic graph."""
dag = TransferDAG()
node1 = MockMigratableResource(uuid.uuid4(), "node1")
node2 = MockMigratableResource(uuid.uuid4(), "node2")
dag.add_node(node1)
dag.add_node(node2)
dag.add_edge(node1.source_id, node2.source_id)
has_cycles = dag.has_cycles()
assert has_cycles is False
def test_detect_cycles_with_cycle(self):
"""Test cycle detection finds cycles."""
dag = TransferDAG()
node1 = MockMigratableResource(uuid.uuid4(), "node1")
node2 = MockMigratableResource(uuid.uuid4(), "node2")
node3 = MockMigratableResource(uuid.uuid4(), "node3")
dag.add_node(node1)
dag.add_node(node2)
dag.add_node(node3)
# Create cycle: node1 -> node2 -> node3 -> node1
dag.add_edge(node1.source_id, node2.source_id)
dag.add_edge(node2.source_id, node3.source_id)
dag.add_edge(node3.source_id, node1.source_id)
has_cycles = dag.has_cycles()
assert has_cycles is True
def test_detect_cycles_self_loop(self):
"""Test cycle detection finds self loops."""
dag = TransferDAG()
node = MockMigratableResource(uuid.uuid4(), "self-loop")
dag.add_node(node)
dag.add_edge(node.source_id, node.source_id)
has_cycles = dag.has_cycles()
assert has_cycles is True
def test_get_execution_layers_empty_dag(self):
"""Test getting execution layers from empty DAG."""
dag = TransferDAG()
layers = dag.get_execution_layers()
assert layers == []
def test_get_execution_layers_single_node(self):
"""Test getting execution layers with single node."""
dag = TransferDAG()
resource = MockMigratableResource(uuid.uuid4(), "single")
dag.add_node(resource)
layers = dag.get_execution_layers()
assert len(layers) == 1
assert len(layers[0]) == 1
assert resource in layers[0]
def test_get_execution_layers_dependency_chain(self):
"""Test getting execution layers with dependency chain."""
dag = TransferDAG()
# Create chain: dep -> resource
dep = MockMigratableResource(uuid.uuid4(), "dep")
resource = MockMigratableResource(uuid.uuid4(), "resource")
dag.add_node(dep)
dag.add_node(resource)
dag.add_edge(resource.source_id, dep.source_id) # resource depends on dep
layers = dag.get_execution_layers()
assert len(layers) == 2
assert dep in layers[0] # Dependencies first
assert resource in layers[1] # Dependents second
def test_get_execution_layers_complex_graph(self):
"""Test getting execution layers with complex dependency graph."""
dag = TransferDAG()
# Create diamond dependency: root -> [dep1, dep2] -> leaf
root = MockMigratableResource(uuid.uuid4(), "root")
dep1 = MockMigratableResource(uuid.uuid4(), "dep1")
dep2 = MockMigratableResource(uuid.uuid4(), "dep2")
leaf = MockMigratableResource(uuid.uuid4(), "leaf")
for resource in [root, dep1, dep2, leaf]:
dag.add_node(resource)
# Root depends on dep1 and dep2
dag.add_edge(root.source_id, dep1.source_id)
dag.add_edge(root.source_id, dep2.source_id)
# dep1 and dep2 depend on leaf
dag.add_edge(dep1.source_id, leaf.source_id)
dag.add_edge(dep2.source_id, leaf.source_id)
layers = dag.get_execution_layers()
assert len(layers) == 3
assert leaf in layers[0] # Leaf first (no dependencies)
assert {dep1, dep2} == set(layers[1]) # dep1 and dep2 parallel
assert root in layers[2] # Root last
async def test_execute_success_single_resource(self):
"""Test successful execution of single resource."""
dag = TransferDAG()
resource = MockMigratableResource(uuid.uuid4(), "test-resource")
await dag.build_from_roots([resource])
async def process_node(node: MigratableProtocol):
await node.migrate()
return "success"
results = await dag.execute_concurrent(process_node)
assert len([r for r in results.values() if r == "success"]) == 1
assert resource.migrate_called
async def test_execute_success_multiple_resources(self):
"""Test successful execution of multiple independent resources."""
dag = TransferDAG()
resource1 = MockMigratableResource(uuid.uuid4(), "resource1")
resource2 = MockMigratableResource(uuid.uuid4(), "resource2")
await dag.build_from_roots([resource1, resource2])
async def process_node(node: MigratableProtocol):
await node.migrate()
return "success"
results = await dag.execute_concurrent(process_node)
assert len([r for r in results.values() if r == "success"]) == 2
assert resource1.migrate_called
assert resource2.migrate_called
async def test_execute_with_skipped_resource(self):
"""Test execution with skipped resource."""
dag = TransferDAG()
resource = MockMigratableResource(
uuid.uuid4(), "test_skip", migrate_success=False
)
await dag.build_from_roots([resource])
async def process_node(node: MigratableProtocol):
await node.migrate()
return "success"
results = await dag.execute_concurrent(process_node)
# Should have TransferSkipped exception
assert len([r for r in results.values() if isinstance(r, TransferSkipped)]) == 1
async def test_execute_with_failed_resource(self):
"""Test execution with failed resource."""
dag = TransferDAG()
resource = MockMigratableResource(
uuid.uuid4(), "test_fail", migrate_success=False
)
await dag.build_from_roots([resource])
async def process_node(node: MigratableProtocol):
await node.migrate()
return "success"
results = await dag.execute_concurrent(process_node)
# Should have ValueError exception
assert len([r for r in results.values() if isinstance(r, ValueError)]) == 1
async def test_execute_failure_propagation(self):
"""Test that failure propagates to skip dependent resources."""
dag = TransferDAG()
# Create failing dependency and successful root
dep = MockMigratableResource(uuid.uuid4(), "failing_dep", migrate_success=False)
root = MockMigratableResource(uuid.uuid4(), "root")
dag.add_node(dep)
dag.add_node(root)
dag.add_edge(root.source_id, dep.source_id) # root depends on dep
async def process_node(node: MigratableProtocol):
await node.migrate()
return "success"
results = await dag.execute_concurrent(process_node)
# Should have one failure and one skip
assert len([r for r in results.values() if isinstance(r, ValueError)]) == 1
assert len([r for r in results.values() if isinstance(r, TransferSkipped)]) == 1
# Root should not be migrated due to failed dependency
assert not root.migrate_called
assert dep.migrate_called
async def test_execute_with_dependency_chain(self):
"""Test execution with successful dependency chain."""
dag = TransferDAG()
# Create chain: leaf <- mid <- root (execution order: leaf, mid, root)
leaf = MockMigratableResource(uuid.uuid4(), "leaf")
mid = MockMigratableResource(uuid.uuid4(), "mid")
root = MockMigratableResource(uuid.uuid4(), "root")
dag.add_node(leaf)
dag.add_node(mid)
dag.add_node(root)
dag.add_edge(mid.source_id, leaf.source_id) # mid depends on leaf
dag.add_edge(root.source_id, mid.source_id) # root depends on mid
async def process_node(node: MigratableProtocol):
await node.migrate()
return "success"
results = await dag.execute_concurrent(process_node)
assert len([r for r in results.values() if r == "success"]) == 3
assert leaf.migrate_called
assert mid.migrate_called
assert root.migrate_called
async def test_execute_concurrent_worker_pool(self):
"""Test execution uses concurrent worker pool."""
dag = TransferDAG()
# Create multiple independent resources
resources = [
MockMigratableResource(uuid.uuid4(), f"resource_{i}") for i in range(5)
]
for resource in resources:
dag.add_node(resource)
# Mock sleep to verify concurrency
async def mock_migrate(self: MockMigratableResource):
await asyncio.sleep(0.1) # Simulate work
self.migrate_called = True
for resource in resources:
resource.migrate = mock_migrate.__get__(resource, MockMigratableResource)
async def process_node(node: MigratableProtocol):
await node.migrate()
return "success"
# Execute with worker pool
start_time = asyncio.get_event_loop().time()
results = await dag.execute_concurrent(process_node, max_workers=3)
end_time = asyncio.get_event_loop().time()
# Should complete faster than sequential execution due to concurrency
assert end_time - start_time < 0.5 # Would be 0.5s if sequential
assert len([r for r in results.values() if r == "success"]) == 5
for resource in resources:
assert resource.migrate_called
async def test_execute_respects_max_workers_limit(self):
"""Test execution respects max_workers limiter."""
dag = TransferDAG()
# Track active workers
active_workers: list[str] = []
max_concurrent = 0
async def mock_migrate(self: MockMigratableResource):
nonlocal max_concurrent
active_workers.append(self.name)
max_concurrent = max(max_concurrent, len(active_workers))
await asyncio.sleep(0.1)
active_workers.remove(self.name)
self.migrate_called = True
# Create resources
resources = [
MockMigratableResource(uuid.uuid4(), f"resource_{i}") for i in range(5)
]
for resource in resources:
dag.add_node(resource)
resource.migrate = mock_migrate.__get__(resource, MockMigratableResource)
async def process_node(node: MigratableProtocol):
await node.migrate()
return "success"
await dag.execute_concurrent(process_node, max_workers=2)
# Should never exceed max_workers limit
assert max_concurrent <= 2
for resource in resources:
assert resource.migrate_called
def test_statistics_empty_dag(self):
"""Test statistics for empty DAG."""
dag = TransferDAG()
stats = dag.get_statistics()
assert stats["total_nodes"] == 0
assert stats["total_edges"] == 0
def test_statistics_with_nodes_and_edges(self):
"""Test statistics calculation with nodes and edges."""
dag = TransferDAG()
resource1 = MockMigratableResource(uuid.uuid4(), "resource1")
resource2 = MockMigratableResource(uuid.uuid4(), "resource2")
resource3 = MockMigratableResource(uuid.uuid4(), "resource3")
dag.add_node(resource1)
dag.add_node(resource2)
dag.add_node(resource3)
dag.add_edge(resource1.source_id, resource2.source_id)
dag.add_edge(resource2.source_id, resource3.source_id)
stats = dag.get_statistics()
assert stats["total_nodes"] == 3
assert stats["total_edges"] == 2
async def test_node_status_tracking(self):
"""Test that node status is properly tracked during execution."""
dag = TransferDAG()
success_resource = MockMigratableResource(uuid.uuid4(), "success")
skip_resource = MockMigratableResource(
uuid.uuid4(), "test_skip", migrate_success=False
)
fail_resource = MockMigratableResource(
uuid.uuid4(), "fail", migrate_success=False
)
dag.add_node(success_resource)
dag.add_node(skip_resource)
dag.add_node(fail_resource)
async def process_node(node: MigratableProtocol):
await node.migrate()
return "success"
await dag.execute_concurrent(process_node)
# Check final node states
assert dag._status[success_resource.source_id].state == NodeState.COMPLETED
assert (
dag._status[skip_resource.source_id].state == NodeState.SKIPPED
) # TransferSkipped is treated as failure
assert dag._status[fail_resource.source_id].state == NodeState.FAILED
async def test_build_from_roots_handles_duplicate_dependencies(self):
"""Test building DAG handles duplicate dependency references correctly."""
dag = TransferDAG()
shared_dep_id = uuid.uuid4()
shared_dep = MockMigratableResource(shared_dep_id, "shared-dep")
root1 = MockMigratableResource(
uuid.uuid4(), "root1", dependencies=[shared_dep_id]
)
root2 = MockMigratableResource(
uuid.uuid4(), "root2", dependencies=[shared_dep_id]
)
with (
patch.object(root1, "get_dependencies", return_value=[shared_dep]),
patch.object(root2, "get_dependencies", return_value=[shared_dep]),
patch.object(shared_dep, "get_dependencies", return_value=[]),
):
await dag.build_from_roots([root1, root2])
# Should have 3 nodes (root1, root2, shared_dep)
assert len(dag.nodes) == 3
# shared_dep should be depended on by both roots
assert len(dag._dependents[shared_dep_id]) == 2
# Both roots should depend on shared_dep
assert shared_dep_id in dag._dependencies[root1.source_id]
assert shared_dep_id in dag._dependencies[root2.source_id]
async def test_execute_empty_dag_returns_zero_stats(self):
"""Test executing empty DAG returns empty results."""
dag = TransferDAG()
async def process_node(node: MigratableProtocol):
await node.migrate()
return "success"
results = await dag.execute_concurrent(process_node)
assert results == {}
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/cli/transfer/test_dag.py",
"license": "Apache License 2.0",
"lines": 427,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/cli/transfer/test_deployments.py | import uuid
import warnings
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources.deployments import MigratableDeployment
from prefect.client.schemas.actions import DeploymentScheduleCreate
from prefect.client.schemas.responses import DeploymentResponse
from prefect.exceptions import ObjectAlreadyExists, ObjectLimitReached
# Suppress deprecation warnings from accessing deprecated fields in DeploymentResponse
warnings.filterwarnings("ignore", category=DeprecationWarning, module="pydantic.*")
class TestMigratableDeployment:
async def test_construct_creates_new_instance(
self, transfer_deployment: DeploymentResponse
):
"""Test that construct creates a new MigratableDeployment instance."""
migratable = await MigratableDeployment.construct(transfer_deployment)
assert isinstance(migratable, MigratableDeployment)
assert migratable.source_deployment == transfer_deployment
assert migratable.source_id == transfer_deployment.id
assert migratable.destination_deployment is None
assert migratable.destination_id is None
assert migratable._dependencies == {}
async def test_construct_returns_cached_instance(
self, transfer_deployment: DeploymentResponse
):
"""Test that construct returns cached instance for same ID."""
# Clear any existing instances
MigratableDeployment._instances.clear()
# Create first instance
migratable1 = await MigratableDeployment.construct(transfer_deployment)
# Create second instance with same deployment
migratable2 = await MigratableDeployment.construct(transfer_deployment)
# Should be the same instance
assert migratable1 is migratable2
assert len(MigratableDeployment._instances) == 1
async def test_get_instance_returns_cached_instance(
self, transfer_deployment: DeploymentResponse
):
"""Test that get_instance returns cached instance."""
# Clear any existing instances
MigratableDeployment._instances.clear()
# Create instance
migratable = await MigratableDeployment.construct(transfer_deployment)
# Retrieve instance
retrieved = await MigratableDeployment.get_instance(transfer_deployment.id)
assert retrieved is migratable
async def test_get_instance_returns_none_for_unknown_id(self):
"""Test that get_instance returns None for unknown ID."""
# Clear any existing instances
MigratableDeployment._instances.clear()
unknown_id = uuid.uuid4()
retrieved = await MigratableDeployment.get_instance(unknown_id)
assert retrieved is None
@patch(
"prefect.cli.transfer._migratable_resources.deployments.construct_migratable_resource"
)
@patch("prefect.cli.transfer._migratable_resources.deployments.get_client")
async def test_get_dependencies_with_flow_only(
self,
mock_get_client: MagicMock,
mock_construct_resource: AsyncMock,
transfer_deployment: DeploymentResponse,
):
"""Test get_dependencies with only flow dependency."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock flow read
mock_flow = MagicMock()
mock_flow.id = transfer_deployment.flow_id
mock_client.read_flow.return_value = mock_flow
mock_migratable_flow = MagicMock()
mock_construct_resource.return_value = mock_migratable_flow
# Create deployment with no work queue, storage, or infrastructure
deployment = DeploymentResponse(
id=uuid.uuid4(),
name=f"test-deployment-{uuid.uuid4()}",
flow_id=transfer_deployment.flow_id,
schedules=[],
tags=[],
parameters={},
work_queue_id=None,
storage_document_id=None,
infrastructure_document_id=None,
pull_steps=None,
)
migratable = await MigratableDeployment.construct(deployment)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 1
assert dependencies[0] == mock_migratable_flow
assert deployment.flow_id in migratable._dependencies
mock_client.read_flow.assert_called_once_with(deployment.flow_id)
mock_construct_resource.assert_called_once_with(mock_flow)
@patch(
"prefect.cli.transfer._migratable_resources.deployments.construct_migratable_resource"
)
@patch("prefect.cli.transfer._migratable_resources.deployments.get_client")
async def test_get_dependencies_with_work_queue(
self,
mock_get_client: MagicMock,
mock_construct_resource: AsyncMock,
transfer_deployment: DeploymentResponse,
):
"""Test get_dependencies with work queue dependency."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock flow read
mock_flow = MagicMock()
mock_flow.id = transfer_deployment.flow_id
mock_client.read_flow.return_value = mock_flow
# Mock work queue read
work_queue_id = uuid.uuid4()
mock_work_queue = MagicMock()
mock_work_queue.id = work_queue_id
mock_client.read_work_queue.return_value = mock_work_queue
mock_migratable_flow = MagicMock()
mock_migratable_work_queue = MagicMock()
mock_construct_resource.side_effect = [
mock_migratable_flow,
mock_migratable_work_queue,
]
# Create deployment with work queue
deployment = DeploymentResponse(
id=uuid.uuid4(),
name=f"test-deployment-{uuid.uuid4()}",
flow_id=transfer_deployment.flow_id,
schedules=[],
tags=[],
parameters={},
work_queue_id=work_queue_id,
storage_document_id=None,
infrastructure_document_id=None,
pull_steps=None,
)
migratable = await MigratableDeployment.construct(deployment)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 2
assert mock_migratable_flow in dependencies
assert mock_migratable_work_queue in dependencies
assert deployment.flow_id in migratable._dependencies
assert work_queue_id in migratable._dependencies
mock_client.read_work_queue.assert_called_once_with(work_queue_id)
@patch(
"prefect.cli.transfer._migratable_resources.deployments.construct_migratable_resource"
)
@patch("prefect.cli.transfer._migratable_resources.deployments.get_client")
async def test_get_dependencies_with_storage_document(
self,
mock_get_client: MagicMock,
mock_construct_resource: AsyncMock,
transfer_deployment: DeploymentResponse,
):
"""Test get_dependencies with storage document dependency."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock flow read
mock_flow = MagicMock()
mock_flow.id = transfer_deployment.flow_id
mock_client.read_flow.return_value = mock_flow
# Mock storage document read
storage_document_id = uuid.uuid4()
mock_storage_document = MagicMock()
mock_storage_document.id = storage_document_id
mock_client.read_block_document.return_value = mock_storage_document
mock_migratable_flow = MagicMock()
mock_migratable_storage = MagicMock()
mock_construct_resource.side_effect = [
mock_migratable_flow,
mock_migratable_storage,
]
# Create deployment with storage document
deployment = DeploymentResponse(
id=uuid.uuid4(),
name=f"test-deployment-{uuid.uuid4()}",
flow_id=transfer_deployment.flow_id,
schedules=[],
tags=[],
parameters={},
work_queue_id=None,
storage_document_id=storage_document_id,
infrastructure_document_id=None,
pull_steps=None,
)
migratable = await MigratableDeployment.construct(deployment)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 2
assert mock_migratable_flow in dependencies
assert mock_migratable_storage in dependencies
assert deployment.flow_id in migratable._dependencies
assert storage_document_id in migratable._dependencies
assert mock_client.read_block_document.call_count == 1
mock_client.read_block_document.assert_any_call(storage_document_id)
@patch(
"prefect.cli.transfer._migratable_resources.deployments.construct_migratable_resource"
)
@patch("prefect.cli.transfer._migratable_resources.deployments.get_client")
async def test_get_dependencies_with_infrastructure_document(
self,
mock_get_client: MagicMock,
mock_construct_resource: AsyncMock,
transfer_deployment: DeploymentResponse,
):
"""Test get_dependencies with infrastructure document dependency."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock flow read
mock_flow = MagicMock()
mock_flow.id = transfer_deployment.flow_id
mock_client.read_flow.return_value = mock_flow
# Mock infrastructure document read
infrastructure_document_id = uuid.uuid4()
mock_infrastructure_document = MagicMock()
mock_infrastructure_document.id = infrastructure_document_id
mock_client.read_block_document.return_value = mock_infrastructure_document
mock_migratable_flow = MagicMock()
mock_migratable_infrastructure = MagicMock()
mock_construct_resource.side_effect = [
mock_migratable_flow,
mock_migratable_infrastructure,
]
# Create deployment with infrastructure document
deployment = DeploymentResponse(
id=uuid.uuid4(),
name=f"test-deployment-{uuid.uuid4()}",
flow_id=transfer_deployment.flow_id,
schedules=[],
tags=[],
parameters={},
work_queue_id=None,
storage_document_id=None,
infrastructure_document_id=infrastructure_document_id,
pull_steps=None,
)
migratable = await MigratableDeployment.construct(deployment)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 2
assert mock_migratable_flow in dependencies
assert mock_migratable_infrastructure in dependencies
assert deployment.flow_id in migratable._dependencies
assert infrastructure_document_id in migratable._dependencies
assert mock_client.read_block_document.call_count == 1
mock_client.read_block_document.assert_any_call(infrastructure_document_id)
@patch(
"prefect.cli.transfer._migratable_resources.deployments.construct_migratable_resource"
)
@patch("prefect.cli.transfer._migratable_resources.deployments.get_client")
async def test_get_dependencies_with_all_dependencies(
self,
mock_get_client: MagicMock,
mock_construct_resource: AsyncMock,
transfer_deployment: DeploymentResponse,
):
"""Test get_dependencies with all possible dependencies."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock flow read
mock_flow = MagicMock()
mock_flow.id = transfer_deployment.flow_id
mock_client.read_flow.return_value = mock_flow
# Mock work queue read
work_queue_id = uuid.uuid4()
mock_work_queue = MagicMock()
mock_work_queue.id = work_queue_id
mock_client.read_work_queue.return_value = mock_work_queue
# Mock storage and infrastructure document reads
storage_document_id = uuid.uuid4()
infrastructure_document_id = uuid.uuid4()
mock_storage_document = MagicMock()
mock_storage_document.id = storage_document_id
mock_infrastructure_document = MagicMock()
mock_infrastructure_document.id = infrastructure_document_id
mock_client.read_block_document.side_effect = [
mock_storage_document,
mock_infrastructure_document,
]
mock_migratable_flow = MagicMock()
mock_migratable_work_queue = MagicMock()
mock_migratable_storage = MagicMock()
mock_migratable_infrastructure = MagicMock()
mock_construct_resource.side_effect = [
mock_migratable_flow,
mock_migratable_work_queue,
mock_migratable_storage,
mock_migratable_infrastructure,
]
# Create deployment with all dependencies
deployment = DeploymentResponse(
id=uuid.uuid4(),
name=f"test-deployment-{uuid.uuid4()}",
flow_id=transfer_deployment.flow_id,
schedules=[],
tags=[],
parameters={},
work_queue_id=work_queue_id,
storage_document_id=storage_document_id,
infrastructure_document_id=infrastructure_document_id,
pull_steps=None,
)
migratable = await MigratableDeployment.construct(deployment)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 4
assert mock_migratable_flow in dependencies
assert mock_migratable_work_queue in dependencies
assert mock_migratable_storage in dependencies
assert mock_migratable_infrastructure in dependencies
assert deployment.flow_id in migratable._dependencies
assert work_queue_id in migratable._dependencies
assert storage_document_id in migratable._dependencies
assert infrastructure_document_id in migratable._dependencies
async def test_get_dependencies_cached(
self, transfer_deployment: DeploymentResponse
):
"""Test that dependencies are cached after first call."""
migratable = await MigratableDeployment.construct(transfer_deployment)
# Set up some mock dependencies
mock_dependency = MagicMock()
migratable._dependencies[uuid.uuid4()] = mock_dependency
dependencies1 = await migratable.get_dependencies()
dependencies2 = await migratable.get_dependencies()
# Should return the same cached result
assert dependencies1 == dependencies2
assert dependencies1 == [mock_dependency]
@patch("prefect.cli.transfer._migratable_resources.deployments.get_client")
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
async def test_migrate_success(
self, mock_get_client: MagicMock, transfer_deployment: DeploymentResponse
):
"""Test successful deployment migration."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock flow dependency with destination_id
mock_flow_dependency = MagicMock()
destination_flow_id = uuid.uuid4()
mock_flow_dependency.destination_id = destination_flow_id
# Mock storage dependency with destination_id
mock_storage_dependency = MagicMock()
destination_storage_id = uuid.uuid4()
mock_storage_dependency.destination_id = destination_storage_id
# Mock successful creation
created_deployment_id = uuid.uuid4()
mock_client.create_deployment.return_value = created_deployment_id
destination_deployment = DeploymentResponse(
id=created_deployment_id,
name=transfer_deployment.name,
flow_id=destination_flow_id,
schedules=transfer_deployment.schedules,
tags=transfer_deployment.tags,
parameters=transfer_deployment.parameters,
)
mock_client.read_deployment.return_value = destination_deployment
migratable = await MigratableDeployment.construct(transfer_deployment)
# Set up the dependencies manually
migratable._dependencies[transfer_deployment.flow_id] = mock_flow_dependency
migratable._dependencies[transfer_deployment.storage_document_id] = (
mock_storage_dependency
)
await migratable.migrate()
# Verify client calls
mock_client.create_deployment.assert_called_once()
mock_client.read_deployment.assert_called_once_with(created_deployment_id)
# Verify destination_deployment is set
assert migratable.destination_deployment == destination_deployment
assert migratable.destination_id == created_deployment_id
@patch("prefect.cli.transfer._migratable_resources.deployments.get_client")
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
async def test_migrate_with_schedules(
self, mock_get_client: MagicMock, transfer_deployment: DeploymentResponse
):
"""Test migration with deployment schedules."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock flow dependency
mock_flow_dependency = MagicMock()
destination_flow_id = uuid.uuid4()
mock_flow_dependency.destination_id = destination_flow_id
# Mock storage dependency
mock_storage_dependency = MagicMock()
destination_storage_id = uuid.uuid4()
mock_storage_dependency.destination_id = destination_storage_id
# Mock successful creation
created_deployment_id = uuid.uuid4()
mock_client.create_deployment.return_value = created_deployment_id
mock_client.read_deployment.return_value = MagicMock(id=created_deployment_id)
migratable = await MigratableDeployment.construct(transfer_deployment)
migratable._dependencies[transfer_deployment.flow_id] = mock_flow_dependency
migratable._dependencies[transfer_deployment.storage_document_id] = (
mock_storage_dependency
)
await migratable.migrate()
# Verify schedules are properly converted
create_call = mock_client.create_deployment.call_args[1]
assert "schedules" in create_call
schedules = create_call["schedules"]
assert len(schedules) == len(transfer_deployment.schedules)
for schedule in schedules:
assert isinstance(schedule, DeploymentScheduleCreate)
@patch("prefect.cli.transfer._migratable_resources.deployments.get_client")
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
async def test_migrate_already_exists(
self, mock_get_client: MagicMock, transfer_deployment: DeploymentResponse
):
"""Test migration when deployment already exists."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock flow dependency
mock_flow_dependency = MagicMock()
destination_flow_id = uuid.uuid4()
mock_flow_dependency.destination_id = destination_flow_id
# Mock storage dependency
mock_storage_dependency = MagicMock()
destination_storage_id = uuid.uuid4()
mock_storage_dependency.destination_id = destination_storage_id
# Mock ObjectAlreadyExists exception on create
mock_http_exc = Exception("Conflict")
mock_client.create_deployment.side_effect = ObjectAlreadyExists(mock_http_exc)
# Mock successful read of existing deployment
existing_deployment = DeploymentResponse(
id=uuid.uuid4(),
name=transfer_deployment.name,
flow_id=destination_flow_id,
schedules=[],
tags=["existing"],
parameters=transfer_deployment.parameters,
)
mock_client.read_deployment.return_value = existing_deployment
migratable = await MigratableDeployment.construct(transfer_deployment)
migratable._dependencies[transfer_deployment.flow_id] = mock_flow_dependency
migratable._dependencies[transfer_deployment.storage_document_id] = (
mock_storage_dependency
)
# Should raise TransferSkipped
with pytest.raises(TransferSkipped, match="Already exists"):
await migratable.migrate()
# Verify calls
mock_client.create_deployment.assert_called_once()
mock_client.read_deployment.assert_called_once_with(transfer_deployment.id)
# Verify destination_deployment is set to existing
assert migratable.destination_deployment == existing_deployment
assert migratable.destination_id == existing_deployment.id
@patch("prefect.cli.transfer._migratable_resources.deployments.get_client")
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
async def test_migrate_limit_reached(
self, mock_get_client: MagicMock, transfer_deployment: DeploymentResponse
):
"""Test migration when deployment limit is reached."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock flow dependency
mock_flow_dependency = MagicMock()
destination_flow_id = uuid.uuid4()
mock_flow_dependency.destination_id = destination_flow_id
# Mock storage dependency
mock_storage_dependency = MagicMock()
destination_storage_id = uuid.uuid4()
mock_storage_dependency.destination_id = destination_storage_id
# Mock ObjectLimitReached exception on create
mock_client.create_deployment.side_effect = ObjectLimitReached("Limit reached")
migratable = await MigratableDeployment.construct(transfer_deployment)
migratable._dependencies[transfer_deployment.flow_id] = mock_flow_dependency
migratable._dependencies[transfer_deployment.storage_document_id] = (
mock_storage_dependency
)
# Should raise TransferSkipped
with pytest.raises(
TransferSkipped, match=r"Deployment limit reached \(upgrade tier\)"
):
await migratable.migrate()
# Verify calls
mock_client.create_deployment.assert_called_once()
async def test_migrate_missing_flow_dependency_raises_error(
self, transfer_deployment: DeploymentResponse
):
"""Test migrate raises error when flow dependency is missing."""
migratable = await MigratableDeployment.construct(transfer_deployment)
with pytest.raises(ValueError, match="Unable to find destination flow"):
await migratable.migrate()
async def test_migrate_missing_storage_dependency_raises_error(
self, transfer_deployment_with_infra: DeploymentResponse
):
"""Test migrate raises error when storage dependency is missing."""
# Mock flow dependency but not storage
mock_flow_dependency = MagicMock()
mock_flow_dependency.destination_id = uuid.uuid4()
# Create deployment with storage document but missing dependency
deployment = DeploymentResponse(
id=uuid.uuid4(),
name=f"test-deployment-{uuid.uuid4()}",
flow_id=transfer_deployment_with_infra.flow_id,
schedules=[],
tags=[],
parameters={},
storage_document_id=uuid.uuid4(), # Has storage but no dependency
infrastructure_document_id=None,
)
migratable = await MigratableDeployment.construct(deployment)
migratable._dependencies[deployment.flow_id] = mock_flow_dependency
with pytest.raises(
ValueError, match="Unable to find destination storage document"
):
await migratable.migrate()
async def test_migrate_missing_infrastructure_dependency_raises_error(
self, transfer_deployment_with_infra: DeploymentResponse
):
"""Test migrate raises error when infrastructure dependency is missing."""
# Mock flow dependency but not infrastructure
mock_flow_dependency = MagicMock()
mock_flow_dependency.destination_id = uuid.uuid4()
# Create deployment with infrastructure document but missing dependency
deployment = DeploymentResponse(
id=uuid.uuid4(),
name=f"test-deployment-{uuid.uuid4()}",
flow_id=transfer_deployment_with_infra.flow_id,
schedules=[],
tags=[],
parameters={},
storage_document_id=None,
infrastructure_document_id=uuid.uuid4(), # Has infrastructure but no dependency
)
migratable = await MigratableDeployment.construct(deployment)
migratable._dependencies[deployment.flow_id] = mock_flow_dependency
with pytest.raises(
ValueError, match="Unable to find destination infrastructure document"
):
await migratable.migrate()
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/cli/transfer/test_deployments.py",
"license": "Apache License 2.0",
"lines": 533,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/cli/transfer/test_flows.py | import uuid
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from sqlalchemy.ext.asyncio import AsyncSession
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources.flows import MigratableFlow
from prefect.client.schemas.actions import FlowCreate
from prefect.client.schemas.objects import Flow
from prefect.exceptions import ObjectAlreadyExists
class TestMigratableFlow:
async def test_construct_creates_new_instance(self, transfer_flow: Flow):
"""Test that construct creates a new MigratableFlow instance."""
migratable = await MigratableFlow.construct(transfer_flow)
assert isinstance(migratable, MigratableFlow)
assert migratable.source_flow == transfer_flow
assert migratable.source_id == transfer_flow.id
assert migratable.destination_flow is None
assert migratable.destination_id is None
async def test_construct_returns_cached_instance(self, transfer_flow: Flow):
"""Test that construct returns cached instance for same ID."""
# Clear any existing instances
MigratableFlow._instances.clear()
# Create first instance
migratable1 = await MigratableFlow.construct(transfer_flow)
# Create second instance with same flow
migratable2 = await MigratableFlow.construct(transfer_flow)
# Should be the same instance
assert migratable1 is migratable2
assert len(MigratableFlow._instances) == 1
async def test_construct_different_flows_create_different_instances(
self, session: AsyncSession
):
"""Test that different flows create different instances."""
from prefect.client.schemas.objects import Flow
from prefect.server import models, schemas
# Create two different flows
orm_flow1 = await models.flows.create_flow(
session=session,
flow=schemas.core.Flow(name=f"test-flow-1-{uuid.uuid4()}"),
)
orm_flow2 = await models.flows.create_flow(
session=session,
flow=schemas.core.Flow(name=f"test-flow-2-{uuid.uuid4()}"),
)
await session.commit()
# Convert to client schema objects
flow1 = Flow(
id=orm_flow1.id,
name=orm_flow1.name,
tags=orm_flow1.tags or [],
labels=orm_flow1.labels or {},
created=orm_flow1.created,
updated=orm_flow1.updated,
)
flow2 = Flow(
id=orm_flow2.id,
name=orm_flow2.name,
tags=orm_flow2.tags or [],
labels=orm_flow2.labels or {},
created=orm_flow2.created,
updated=orm_flow2.updated,
)
# Clear any existing instances
MigratableFlow._instances.clear()
migratable1 = await MigratableFlow.construct(flow1)
migratable2 = await MigratableFlow.construct(flow2)
assert migratable1 is not migratable2
assert len(MigratableFlow._instances) == 2
assert migratable1.source_id != migratable2.source_id
async def test_get_instance_returns_cached_instance(self, transfer_flow: Flow):
"""Test that get_instance returns cached instance."""
# Clear any existing instances
MigratableFlow._instances.clear()
# Create instance
migratable = await MigratableFlow.construct(transfer_flow)
# Retrieve instance
retrieved = await MigratableFlow.get_instance(transfer_flow.id)
assert retrieved is migratable
async def test_get_instance_returns_none_for_unknown_id(self):
"""Test that get_instance returns None for unknown ID."""
# Clear any existing instances
MigratableFlow._instances.clear()
unknown_id = uuid.uuid4()
retrieved = await MigratableFlow.get_instance(unknown_id)
assert retrieved is None
async def test_get_dependencies_returns_empty_list(self, transfer_flow: Flow):
"""Test that get_dependencies returns empty list (flows have no dependencies)."""
migratable = await MigratableFlow.construct(transfer_flow)
dependencies = await migratable.get_dependencies()
assert dependencies == []
@patch("prefect.cli.transfer._migratable_resources.flows.get_client")
async def test_migrate_success(
self, mock_get_client: MagicMock, transfer_flow: Flow
):
"""Test successful flow migration."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock successful creation
destination_flow = Flow(
id=uuid.uuid4(),
name=transfer_flow.name,
tags=transfer_flow.tags,
labels=transfer_flow.labels,
created=transfer_flow.created,
updated=transfer_flow.updated,
)
mock_response = MagicMock()
mock_response.json.return_value = destination_flow.model_dump()
mock_client.request.return_value = mock_response
migratable = await MigratableFlow.construct(transfer_flow)
await migratable.migrate()
# Verify client was called correctly
mock_client.request.assert_called_once_with(
"POST",
"/flows/",
json=FlowCreate(
name=transfer_flow.name,
tags=transfer_flow.tags,
labels=transfer_flow.labels,
).model_dump(mode="json"),
)
# Verify destination_flow is set
assert migratable.destination_flow is not None
assert migratable.destination_flow.name == destination_flow.name
assert migratable.destination_flow.tags == destination_flow.tags
assert migratable.destination_flow.labels == destination_flow.labels
@patch("prefect.cli.transfer._migratable_resources.flows.get_client")
async def test_migrate_already_exists_raises_transfer_skipped(
self, mock_get_client: MagicMock, transfer_flow: Flow
):
"""Test migration when flow already exists raises TransferSkipped."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock ObjectAlreadyExists exception on create
mock_http_exc = Exception("Conflict")
mock_client.request.side_effect = ObjectAlreadyExists(mock_http_exc)
# Mock successful read of existing flow
from typing import cast
from prefect.types import KeyValueLabelsField
existing_flow = Flow(
id=uuid.uuid4(),
name=transfer_flow.name,
tags=["existing-tag"], # Different tags to show it reads existing
labels=cast(
KeyValueLabelsField, {"environment": "existing"}
), # Different labels
created=transfer_flow.created,
updated=transfer_flow.updated,
)
mock_client.read_flows.return_value = [existing_flow]
migratable = await MigratableFlow.construct(transfer_flow)
# Should raise TransferSkipped
with pytest.raises(TransferSkipped, match="Already exists"):
await migratable.migrate()
# Verify client calls
mock_client.request.assert_called_once()
mock_client.read_flows.assert_called_once()
# Verify destination_flow is still set to the existing flow
assert migratable.destination_flow == existing_flow
assert migratable.destination_id == existing_flow.id
@pytest.mark.parametrize(
"tags,labels",
[
([], {}),
(["tag1", "tag2"], {}),
([], {"environment": "prod", "team": "data"}),
(["tag1", "tag2"], {"environment": "prod", "team": "data"}),
],
ids=["no-tags-no-labels", "tags-only", "labels-only", "tags-and-labels"],
)
async def test_flow_with_different_tags_and_labels(
self, session: AsyncSession, tags: list[str], labels: dict[str, str]
):
"""Test flows with different combinations of tags and labels."""
from prefect.client.schemas.objects import Flow
from prefect.server import models, schemas
# Clear instances before test
MigratableFlow._instances.clear()
from typing import cast
from prefect.types import KeyValueLabelsField
# Create flow with specific tags and labels
orm_flow = await models.flows.create_flow(
session=session,
flow=schemas.core.Flow(
name=f"test-flow-{uuid.uuid4()}",
tags=tags,
labels=cast(KeyValueLabelsField, labels),
),
)
await session.commit()
# Convert to client schema object
flow = Flow(
id=orm_flow.id,
name=orm_flow.name,
tags=orm_flow.tags or [],
labels=orm_flow.labels or {},
created=orm_flow.created,
updated=orm_flow.updated,
)
# Test construction works with different tags/labels combinations
migratable = await MigratableFlow.construct(flow)
assert migratable.source_flow.tags == tags
assert migratable.source_flow.labels == labels
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/cli/transfer/test_flows.py",
"license": "Apache License 2.0",
"lines": 205,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/cli/transfer/test_variables.py | import uuid
from typing import Any
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from sqlalchemy.ext.asyncio import AsyncSession
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources.variables import MigratableVariable
from prefect.client.schemas.actions import VariableCreate
from prefect.client.schemas.objects import Variable
from prefect.exceptions import ObjectAlreadyExists
class TestMigratableVariable:
async def test_construct_creates_new_instance(self, transfer_variable: Variable):
"""Test that construct creates a new MigratableVariable instance."""
migratable = await MigratableVariable.construct(transfer_variable)
assert isinstance(migratable, MigratableVariable)
assert migratable.source_variable == transfer_variable
assert migratable.source_id == transfer_variable.id
assert migratable.destination_variable is None
assert migratable.destination_id is None
async def test_construct_returns_cached_instance(self, transfer_variable: Variable):
"""Test that construct returns cached instance for same ID."""
# Clear any existing instances
MigratableVariable._instances.clear()
# Create first instance
migratable1 = await MigratableVariable.construct(transfer_variable)
# Create second instance with same variable
migratable2 = await MigratableVariable.construct(transfer_variable)
# Should be the same instance
assert migratable1 is migratable2
assert len(MigratableVariable._instances) == 1
async def test_get_instance_returns_cached_instance(
self, transfer_variable: Variable
):
"""Test that get_instance returns cached instance."""
# Clear any existing instances
MigratableVariable._instances.clear()
# Create instance
migratable = await MigratableVariable.construct(transfer_variable)
# Retrieve instance
retrieved = await MigratableVariable.get_instance(transfer_variable.id)
assert retrieved is migratable
async def test_get_instance_returns_none_for_unknown_id(self):
"""Test that get_instance returns None for unknown ID."""
# Clear any existing instances
MigratableVariable._instances.clear()
unknown_id = uuid.uuid4()
retrieved = await MigratableVariable.get_instance(unknown_id)
assert retrieved is None
async def test_get_dependencies_returns_empty_list(
self, transfer_variable: Variable
):
"""Test that get_dependencies returns empty list (variables have no dependencies)."""
migratable = await MigratableVariable.construct(transfer_variable)
dependencies = await migratable.get_dependencies()
assert dependencies == []
@patch("prefect.cli.transfer._migratable_resources.variables.get_client")
async def test_migrate_success(
self, mock_get_client: MagicMock, transfer_variable: Variable
):
"""Test successful variable migration."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock successful creation
destination_variable = Variable(
id=uuid.uuid4(),
name=transfer_variable.name,
value=transfer_variable.value,
tags=transfer_variable.tags,
created=transfer_variable.created,
updated=transfer_variable.updated,
)
mock_client.create_variable.return_value = destination_variable
migratable = await MigratableVariable.construct(transfer_variable)
await migratable.migrate()
# Verify client was called correctly
mock_client.create_variable.assert_called_once_with(
variable=VariableCreate(
name=transfer_variable.name,
value=transfer_variable.value,
tags=transfer_variable.tags,
)
)
# Verify destination_variable is set
assert migratable.destination_variable == destination_variable
assert migratable.destination_id == destination_variable.id
@patch("prefect.cli.transfer._migratable_resources.variables.get_client")
async def test_migrate_already_exists_raises_transfer_skipped(
self, mock_get_client: MagicMock, transfer_variable: Variable
):
"""Test migration when variable already exists raises TransferSkipped."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock ObjectAlreadyExists exception on create
mock_http_exc = Exception("Conflict")
mock_client.create_variable.side_effect = ObjectAlreadyExists(mock_http_exc)
# Mock successful read of existing variable
existing_variable = Variable(
id=uuid.uuid4(),
name=transfer_variable.name,
value="existing-value", # Different value to show it reads existing
tags=["existing-tag"],
created=transfer_variable.created,
updated=transfer_variable.updated,
)
mock_client.read_variable_by_name.return_value = existing_variable
migratable = await MigratableVariable.construct(transfer_variable)
# Should raise TransferSkipped
with pytest.raises(TransferSkipped, match="Already exists"):
await migratable.migrate()
# Verify client calls
mock_client.create_variable.assert_called_once()
mock_client.read_variable_by_name.assert_called_once_with(
transfer_variable.name
)
# Verify destination_variable is still set to the existing variable
assert migratable.destination_variable == existing_variable
assert migratable.destination_id == existing_variable.id
@pytest.mark.parametrize(
"test_value",
[
"string_value",
123,
12.34,
True,
{"key": "value", "nested": {"inner": "data"}},
["item1", "item2", "item3"],
None,
],
ids=[
"string",
"integer",
"float",
"boolean",
"dict",
"list",
"none",
],
)
async def test_migration_with_different_value_types(
self, session: AsyncSession, test_value: Any
):
"""Test migration with variables containing different value types."""
from prefect.server import models, schemas
# Clear instances before test
MigratableVariable._instances.clear()
# Create variable with specific value type
orm_variable = await models.variables.create_variable(
session=session,
variable=schemas.actions.VariableCreate(
name=f"test-var-{uuid.uuid4()}",
value=test_value,
tags=["test"],
),
)
await session.commit()
# Convert to client schema object
variable = Variable(
id=orm_variable.id,
name=orm_variable.name,
value=orm_variable.value,
tags=orm_variable.tags,
created=orm_variable.created,
updated=orm_variable.updated,
)
# Test construction works with different value types
migratable = await MigratableVariable.construct(variable)
assert migratable.source_variable.value == test_value
async def test_variable_with_tags(self, session: AsyncSession):
"""Test variable with tags."""
from prefect.server import models, schemas
tags = ["tag1", "tag2", "environment:prod", "team:data"]
orm_variable = await models.variables.create_variable(
session=session,
variable=schemas.actions.VariableCreate(
name=f"tagged-var-{uuid.uuid4()}",
value="tagged_value",
tags=tags,
),
)
await session.commit()
# Convert to client schema object
variable = Variable(
id=orm_variable.id,
name=orm_variable.name,
value=orm_variable.value,
tags=orm_variable.tags,
created=orm_variable.created,
updated=orm_variable.updated,
)
migratable = await MigratableVariable.construct(variable)
assert migratable.source_variable.tags == tags
async def test_variable_without_tags(self, session: AsyncSession):
"""Test variable without tags."""
from prefect.server import models, schemas
orm_variable = await models.variables.create_variable(
session=session,
variable=schemas.actions.VariableCreate(
name=f"untagged-var-{uuid.uuid4()}",
value="untagged_value",
tags=[], # Empty tags
),
)
await session.commit()
# Convert to client schema object
variable = Variable(
id=orm_variable.id,
name=orm_variable.name,
value=orm_variable.value,
tags=orm_variable.tags,
created=orm_variable.created,
updated=orm_variable.updated,
)
migratable = await MigratableVariable.construct(variable)
assert migratable.source_variable.tags == []
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/cli/transfer/test_variables.py",
"license": "Apache License 2.0",
"lines": 216,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/cli/transfer/test_work_pools.py | import uuid
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources.work_pools import MigratableWorkPool
from prefect.client.base import ServerType
from prefect.client.schemas.actions import WorkPoolCreate
from prefect.client.schemas.objects import WorkPool, WorkQueue
from prefect.exceptions import ObjectAlreadyExists, ObjectUnsupported
class TestMigratableWorkPool:
async def test_construct_creates_new_instance_and_reads_default_queue(
self, transfer_work_pool: WorkPool
):
"""Test that construct creates a new MigratableWorkPool instance and reads default queue."""
# Clear any existing instances
MigratableWorkPool._instances.clear()
# Mock the client to return a default queue
with patch(
"prefect.cli.transfer._migratable_resources.work_pools.get_client"
) as mock_get_client:
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock default queue
default_queue = WorkQueue(
id=transfer_work_pool.default_queue_id,
name="default",
description="Default queue",
priority=1,
concurrency_limit=None,
filter=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=transfer_work_pool.id,
)
mock_client.read_work_queue.return_value = default_queue
migratable = await MigratableWorkPool.construct(transfer_work_pool)
assert isinstance(migratable, MigratableWorkPool)
assert migratable.source_work_pool == transfer_work_pool
assert migratable.source_default_queue == default_queue
assert migratable.source_id == transfer_work_pool.id
assert migratable.destination_work_pool is None
assert migratable.destination_id is None
assert migratable._dependencies == []
mock_client.read_work_queue.assert_called_once_with(
transfer_work_pool.default_queue_id
)
async def test_construct_returns_cached_instance(
self, transfer_work_pool: WorkPool
):
"""Test that construct returns cached instance for same ID."""
# Clear any existing instances
MigratableWorkPool._instances.clear()
# Mock the client for both calls
with patch(
"prefect.cli.transfer._migratable_resources.work_pools.get_client"
) as mock_get_client:
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock default queue
default_queue = WorkQueue(
id=transfer_work_pool.default_queue_id,
name="default",
description="Default queue",
priority=1,
concurrency_limit=None,
filter=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=transfer_work_pool.id,
)
mock_client.read_work_queue.return_value = default_queue
# Create first instance
migratable1 = await MigratableWorkPool.construct(transfer_work_pool)
# Create second instance with same work pool
migratable2 = await MigratableWorkPool.construct(transfer_work_pool)
# Should be the same instance
assert migratable1 is migratable2
assert len(MigratableWorkPool._instances) == 1
# Client should only be called once due to caching
mock_client.read_work_queue.assert_called_once()
async def test_get_instance_returns_cached_instance(
self, transfer_work_pool: WorkPool
):
"""Test that get_instance returns cached instance."""
# Clear any existing instances
MigratableWorkPool._instances.clear()
# Mock the client
with patch(
"prefect.cli.transfer._migratable_resources.work_pools.get_client"
) as mock_get_client:
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
default_queue = WorkQueue(
id=transfer_work_pool.default_queue_id,
name="default",
description="Default queue",
priority=1,
concurrency_limit=None,
filter=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=transfer_work_pool.id,
)
mock_client.read_work_queue.return_value = default_queue
# Create instance
migratable = await MigratableWorkPool.construct(transfer_work_pool)
# Retrieve instance
retrieved = await MigratableWorkPool.get_instance(transfer_work_pool.id)
assert retrieved is migratable
async def test_get_instance_returns_none_for_unknown_id(self):
"""Test that get_instance returns None for unknown ID."""
# Clear any existing instances
MigratableWorkPool._instances.clear()
unknown_id = uuid.uuid4()
retrieved = await MigratableWorkPool.get_instance(unknown_id)
assert retrieved is None
async def test_get_instance_by_name_returns_instance(
self, transfer_work_pool: WorkPool
):
"""Test that get_instance_by_name returns cached instance."""
# Clear any existing instances
MigratableWorkPool._instances.clear()
# Mock the client
with patch(
"prefect.cli.transfer._migratable_resources.work_pools.get_client"
) as mock_get_client:
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
default_queue = WorkQueue(
id=transfer_work_pool.default_queue_id,
name="default",
description="Default queue",
priority=1,
concurrency_limit=None,
filter=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=transfer_work_pool.id,
)
mock_client.read_work_queue.return_value = default_queue
# Create instance
migratable = await MigratableWorkPool.construct(transfer_work_pool)
# Retrieve instance by name
retrieved = await MigratableWorkPool.get_instance_by_name(
transfer_work_pool.name
)
assert retrieved is migratable
async def test_get_instance_by_name_returns_none_for_unknown_name(self):
"""Test that get_instance_by_name returns None for unknown name."""
# Clear any existing instances
MigratableWorkPool._instances.clear()
retrieved = await MigratableWorkPool.get_instance_by_name("unknown-work-pool")
assert retrieved is None
@patch(
"prefect.cli.transfer._migratable_resources.work_pools.construct_migratable_resource"
)
@patch("prefect.cli.transfer._migratable_resources.work_pools.get_client")
async def test_get_dependencies_with_result_storage_block(
self, mock_get_client: MagicMock, mock_construct_resource: AsyncMock
):
"""Test get_dependencies with result storage block dependency."""
from prefect.client.schemas.objects import WorkPoolStorageConfiguration
# Create work pool with result storage block
storage_block_id = uuid.uuid4()
work_pool = WorkPool(
id=uuid.uuid4(),
name=f"test-work-pool-{uuid.uuid4()}",
type="test-type",
base_job_template={},
is_paused=False,
concurrency_limit=None,
storage_configuration=WorkPoolStorageConfiguration(
default_result_storage_block_id=storage_block_id
),
default_queue_id=uuid.uuid4(),
)
# Mock the client for construct
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
default_queue = WorkQueue(
id=work_pool.default_queue_id,
name="default",
description="Default queue",
priority=1,
concurrency_limit=None,
filter=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=work_pool.id,
)
# Mock for construct call
mock_client.read_work_queue.return_value = default_queue
# Mock for get_dependencies call
mock_block_document = MagicMock()
mock_block_document.id = storage_block_id
mock_client.read_block_document.return_value = mock_block_document
mock_migratable_block = MagicMock()
mock_construct_resource.return_value = mock_migratable_block
# Clear instances
MigratableWorkPool._instances.clear()
migratable = await MigratableWorkPool.construct(work_pool)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 1
assert dependencies[0] == mock_migratable_block
mock_client.read_block_document.assert_called_once_with(storage_block_id)
mock_construct_resource.assert_called_once_with(mock_block_document)
async def test_get_dependencies_with_no_storage_configuration(
self, transfer_work_pool: WorkPool
):
"""Test get_dependencies with no storage configuration dependencies."""
# Mock the client
with patch(
"prefect.cli.transfer._migratable_resources.work_pools.get_client"
) as mock_get_client:
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
default_queue = WorkQueue(
id=transfer_work_pool.default_queue_id,
name="default",
description="Default queue",
priority=1,
concurrency_limit=None,
filter=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=transfer_work_pool.id,
)
mock_client.read_work_queue.return_value = default_queue
# Clear instances
MigratableWorkPool._instances.clear()
migratable = await MigratableWorkPool.construct(transfer_work_pool)
dependencies = await migratable.get_dependencies()
assert dependencies == []
async def test_get_dependencies_cached(self, transfer_work_pool: WorkPool):
"""Test that dependencies are cached after first call."""
# Mock the client
with patch(
"prefect.cli.transfer._migratable_resources.work_pools.get_client"
) as mock_get_client:
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
default_queue = WorkQueue(
id=transfer_work_pool.default_queue_id,
name="default",
description="Default queue",
priority=1,
concurrency_limit=None,
filter=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=transfer_work_pool.id,
)
mock_client.read_work_queue.return_value = default_queue
# Clear instances
MigratableWorkPool._instances.clear()
migratable = await MigratableWorkPool.construct(transfer_work_pool)
# Set up some mock dependencies
mock_dependency = MagicMock()
migratable._dependencies = [mock_dependency]
dependencies1 = await migratable.get_dependencies()
dependencies2 = await migratable.get_dependencies()
# Should return the same cached result
assert dependencies1 == dependencies2
assert dependencies1 == [mock_dependency]
@patch("prefect.cli.transfer._migratable_resources.work_pools.get_client")
async def test_migrate_success_regular_pool(
self, mock_get_client: MagicMock, transfer_work_pool: WorkPool
):
"""Test successful migration of regular work pool."""
# Mock the client for construct
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
default_queue = WorkQueue(
id=transfer_work_pool.default_queue_id,
name="default",
description="Default queue",
priority=1,
concurrency_limit=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=transfer_work_pool.id,
)
mock_client.read_work_queue.return_value = default_queue
# Mock successful work pool creation
destination_work_pool = WorkPool(
id=uuid.uuid4(),
name=transfer_work_pool.name,
type=transfer_work_pool.type,
base_job_template=transfer_work_pool.base_job_template,
is_paused=transfer_work_pool.is_paused,
concurrency_limit=transfer_work_pool.concurrency_limit,
storage_configuration=transfer_work_pool.storage_configuration,
default_queue_id=uuid.uuid4(),
)
mock_client.create_work_pool.return_value = destination_work_pool
# Clear instances
MigratableWorkPool._instances.clear()
migratable = await MigratableWorkPool.construct(transfer_work_pool)
await migratable.migrate()
# Verify client calls
mock_client.create_work_pool.assert_called_once_with(
work_pool=WorkPoolCreate(
name=transfer_work_pool.name,
type=transfer_work_pool.type,
base_job_template=transfer_work_pool.base_job_template,
is_paused=transfer_work_pool.is_paused,
concurrency_limit=transfer_work_pool.concurrency_limit,
storage_configuration=transfer_work_pool.storage_configuration,
)
)
mock_client.update_work_queue.assert_called_once_with(
id=destination_work_pool.default_queue_id,
description=default_queue.description,
priority=default_queue.priority,
concurrency_limit=default_queue.concurrency_limit,
)
# Verify destination_work_pool is set
assert migratable.destination_work_pool == destination_work_pool
assert migratable.destination_id == destination_work_pool.id
@patch("prefect.cli.transfer._migratable_resources.work_pools.get_client")
async def test_migrate_managed_pool_skipped(
self, mock_get_client: MagicMock, transfer_managed_work_pool: WorkPool
):
"""Test that managed pools are skipped."""
# Mock the client for construct
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
default_queue = WorkQueue(
id=transfer_managed_work_pool.default_queue_id,
name="default",
description="Default queue",
priority=1,
concurrency_limit=None,
filter=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=transfer_managed_work_pool.id,
)
mock_client.read_work_queue.return_value = default_queue
# Clear instances
MigratableWorkPool._instances.clear()
migratable = await MigratableWorkPool.construct(transfer_managed_work_pool)
# Should raise TransferSkipped
with pytest.raises(TransferSkipped, match="Skipped managed pool"):
await migratable.migrate()
# Should not try to create work pool
mock_client.create_work_pool.assert_not_called()
@patch("prefect.cli.transfer._migratable_resources.work_pools.get_client")
async def test_migrate_push_pool_with_cloud_server(
self, mock_get_client: MagicMock, transfer_push_work_pool: WorkPool
):
"""Test that push pools work with cloud server."""
# Mock the client for construct
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
mock_client.server_type = ServerType.CLOUD # Set server type to cloud
default_queue = WorkQueue(
id=transfer_push_work_pool.default_queue_id,
name="default",
description="Default queue",
priority=1,
concurrency_limit=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=transfer_push_work_pool.id,
)
mock_client.read_work_queue.return_value = default_queue
# Mock successful work pool creation
destination_work_pool = WorkPool(
id=uuid.uuid4(),
name=transfer_push_work_pool.name,
type=transfer_push_work_pool.type,
base_job_template=transfer_push_work_pool.base_job_template,
is_paused=transfer_push_work_pool.is_paused,
concurrency_limit=transfer_push_work_pool.concurrency_limit,
storage_configuration=transfer_push_work_pool.storage_configuration,
default_queue_id=uuid.uuid4(),
)
mock_client.create_work_pool.return_value = destination_work_pool
# Clear instances
MigratableWorkPool._instances.clear()
migratable = await MigratableWorkPool.construct(transfer_push_work_pool)
await migratable.migrate()
# Should successfully create work pool
mock_client.create_work_pool.assert_called_once()
assert migratable.destination_work_pool == destination_work_pool
@patch("prefect.cli.transfer._migratable_resources.work_pools.get_client")
async def test_migrate_push_pool_without_cloud_server_skipped(
self, mock_get_client: MagicMock, transfer_push_work_pool: WorkPool
):
"""Test that push pools are skipped without cloud server."""
# Mock the client for construct
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
mock_client.server_type = ServerType.EPHEMERAL # Set server type to non-cloud
default_queue = WorkQueue(
id=transfer_push_work_pool.default_queue_id,
name="default",
description="Default queue",
priority=1,
concurrency_limit=None,
filter=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=transfer_push_work_pool.id,
)
mock_client.read_work_queue.return_value = default_queue
# Clear instances
MigratableWorkPool._instances.clear()
migratable = await MigratableWorkPool.construct(transfer_push_work_pool)
# Should raise TransferSkipped
with pytest.raises(TransferSkipped, match="Skipped push pool"):
await migratable.migrate()
# Should not try to create work pool
mock_client.create_work_pool.assert_not_called()
@patch("prefect.cli.transfer._migratable_resources.work_pools.get_client")
async def test_migrate_unsupported_work_pool_skipped(
self, mock_get_client: MagicMock, transfer_work_pool: WorkPool
):
"""Test that unsupported work pools are skipped."""
# Mock the client for construct
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
default_queue = WorkQueue(
id=transfer_work_pool.default_queue_id,
name="default",
description="Default queue",
priority=1,
concurrency_limit=None,
filter=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=transfer_work_pool.id,
)
mock_client.read_work_queue.return_value = default_queue
# Mock ObjectUnsupported exception on create
mock_client.create_work_pool.side_effect = ObjectUnsupported("Unsupported")
# Clear instances
MigratableWorkPool._instances.clear()
migratable = await MigratableWorkPool.construct(transfer_work_pool)
# Should raise TransferSkipped
with pytest.raises(
TransferSkipped, match="Destination requires Standard/Pro tier"
):
await migratable.migrate()
# Verify client calls
mock_client.create_work_pool.assert_called_once()
@patch("prefect.cli.transfer._migratable_resources.work_pools.get_client")
async def test_migrate_already_exists(
self, mock_get_client: MagicMock, transfer_work_pool: WorkPool
):
"""Test migration when work pool already exists."""
# Mock the client for construct
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
default_queue = WorkQueue(
id=transfer_work_pool.default_queue_id,
name="default",
description="Default queue",
priority=1,
concurrency_limit=None,
filter=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=transfer_work_pool.id,
)
mock_client.read_work_queue.return_value = default_queue
# Mock ObjectAlreadyExists exception on create
mock_http_exc = Exception("Conflict")
mock_client.create_work_pool.side_effect = ObjectAlreadyExists(mock_http_exc)
# Mock successful read of existing work pool
existing_work_pool = WorkPool(
id=uuid.uuid4(),
name=transfer_work_pool.name,
type="existing-type", # Different to show it reads existing
base_job_template={"existing": "template"},
is_paused=True,
concurrency_limit=10,
storage_configuration=transfer_work_pool.storage_configuration,
default_queue_id=uuid.uuid4(),
)
mock_client.read_work_pool.return_value = existing_work_pool
# Clear instances
MigratableWorkPool._instances.clear()
migratable = await MigratableWorkPool.construct(transfer_work_pool)
# Should raise TransferSkipped
with pytest.raises(TransferSkipped, match="Already exists"):
await migratable.migrate()
# Verify client calls
mock_client.create_work_pool.assert_called_once()
mock_client.read_work_pool.assert_called_once_with(transfer_work_pool.name)
# Verify destination_work_pool is set to existing
assert migratable.destination_work_pool == existing_work_pool
assert migratable.destination_id == existing_work_pool.id
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/cli/transfer/test_work_pools.py",
"license": "Apache License 2.0",
"lines": 507,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/cli/transfer/test_work_queues.py | import uuid
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from prefect.cli.transfer._exceptions import TransferSkipped
from prefect.cli.transfer._migratable_resources.work_queues import MigratableWorkQueue
from prefect.client.schemas.filters import WorkQueueFilter, WorkQueueFilterName
from prefect.client.schemas.objects import WorkQueue
from prefect.exceptions import ObjectAlreadyExists
class TestMigratableWorkQueue:
async def test_construct_creates_new_instance(self, transfer_work_queue: WorkQueue):
"""Test that construct creates a new MigratableWorkQueue instance."""
# Clear any existing instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(transfer_work_queue)
assert isinstance(migratable, MigratableWorkQueue)
assert migratable.source_work_queue == transfer_work_queue
assert migratable.source_id == transfer_work_queue.id
assert migratable.destination_work_queue is None
assert migratable.destination_id is None
assert migratable._dependencies == []
async def test_construct_returns_cached_instance(
self, transfer_work_queue: WorkQueue
):
"""Test that construct returns cached instance for same ID."""
# Clear any existing instances
MigratableWorkQueue._instances.clear()
# Create first instance
migratable1 = await MigratableWorkQueue.construct(transfer_work_queue)
# Create second instance with same work queue
migratable2 = await MigratableWorkQueue.construct(transfer_work_queue)
# Should be the same instance
assert migratable1 is migratable2
assert len(MigratableWorkQueue._instances) == 1
async def test_get_instance_returns_cached_instance(
self, transfer_work_queue: WorkQueue
):
"""Test that get_instance returns cached instance."""
# Clear any existing instances
MigratableWorkQueue._instances.clear()
# Create instance
migratable = await MigratableWorkQueue.construct(transfer_work_queue)
# Retrieve instance
retrieved = await MigratableWorkQueue.get_instance(transfer_work_queue.id)
assert retrieved is migratable
async def test_get_instance_returns_none_for_unknown_id(self):
"""Test that get_instance returns None for unknown ID."""
# Clear any existing instances
MigratableWorkQueue._instances.clear()
unknown_id = uuid.uuid4()
retrieved = await MigratableWorkQueue.get_instance(unknown_id)
assert retrieved is None
@patch(
"prefect.cli.transfer._migratable_resources.work_queues.construct_migratable_resource"
)
@patch("prefect.cli.transfer._migratable_resources.work_queues.get_client")
async def test_get_dependencies_with_work_pool_name(
self, mock_get_client: MagicMock, mock_construct_resource: AsyncMock
):
"""Test get_dependencies with work pool name dependency."""
# Create work queue with work pool name
work_pool_name = f"test-work-pool-{uuid.uuid4()}"
work_queue = WorkQueue(
id=uuid.uuid4(),
name=f"test-queue-{uuid.uuid4()}",
description="Test queue",
priority=1,
concurrency_limit=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=uuid.uuid4(),
work_pool_name=work_pool_name,
)
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock work pool read
mock_work_pool = MagicMock()
mock_work_pool.name = work_pool_name
mock_client.read_work_pool.return_value = mock_work_pool
mock_migratable_work_pool = MagicMock()
mock_construct_resource.return_value = mock_migratable_work_pool
# Clear instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(work_queue)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 1
assert dependencies[0] == mock_migratable_work_pool
mock_client.read_work_pool.assert_called_once_with(work_pool_name)
mock_construct_resource.assert_called_once_with(mock_work_pool)
@patch(
"prefect.cli.transfer._migratable_resources.work_pools.MigratableWorkPool.get_instance_by_name"
)
async def test_get_dependencies_with_cached_work_pool_dependency(
self, mock_get_instance_by_name: AsyncMock
):
"""Test get_dependencies with cached work pool dependency."""
# Create work queue with work pool name
work_pool_name = f"test-work-pool-{uuid.uuid4()}"
work_queue = WorkQueue(
id=uuid.uuid4(),
name=f"test-queue-{uuid.uuid4()}",
description="Test queue",
priority=1,
concurrency_limit=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=uuid.uuid4(),
work_pool_name=work_pool_name,
)
# Mock cached work pool dependency
mock_migratable_work_pool = MagicMock()
mock_get_instance_by_name.return_value = mock_migratable_work_pool
# Clear instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(work_queue)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 1
assert dependencies[0] == mock_migratable_work_pool
mock_get_instance_by_name.assert_called_once_with(name=work_pool_name)
async def test_get_dependencies_with_no_work_pool_name(
self, transfer_work_queue: WorkQueue
):
"""Test get_dependencies with no work pool name (standalone queue)."""
# Clear instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(transfer_work_queue)
dependencies = await migratable.get_dependencies()
assert dependencies == []
async def test_get_dependencies_cached(self):
"""Test that dependencies are cached after first call."""
# Create work queue with work pool name
work_queue = WorkQueue(
id=uuid.uuid4(),
name=f"test-queue-{uuid.uuid4()}",
description="Test queue",
priority=1,
concurrency_limit=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=uuid.uuid4(),
work_pool_name=f"test-work-pool-{uuid.uuid4()}",
)
# Clear instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(work_queue)
# Set up some mock dependencies
mock_dependency = MagicMock()
migratable._dependencies = [mock_dependency]
dependencies1 = await migratable.get_dependencies()
dependencies2 = await migratable.get_dependencies()
# Should return the same cached result
assert dependencies1 == dependencies2
assert dependencies1 == [mock_dependency]
@patch("prefect.cli.transfer._migratable_resources.work_queues.get_client")
async def test_migrate_success_standalone_queue(
self, mock_get_client: MagicMock, transfer_work_queue: WorkQueue
):
"""Test successful migration of standalone work queue."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock successful work queue creation
destination_work_queue = WorkQueue(
id=uuid.uuid4(),
name=transfer_work_queue.name,
description=transfer_work_queue.description,
priority=transfer_work_queue.priority,
concurrency_limit=transfer_work_queue.concurrency_limit,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=None,
work_pool_name=None,
)
mock_client.create_work_queue.return_value = destination_work_queue
# Clear instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(transfer_work_queue)
await migratable.migrate()
# Verify client calls
mock_client.create_work_queue.assert_called_once_with(
name=transfer_work_queue.name,
description=transfer_work_queue.description,
priority=transfer_work_queue.priority,
concurrency_limit=transfer_work_queue.concurrency_limit,
work_pool_name=transfer_work_queue.work_pool_name,
)
# Verify destination_work_queue is set
assert migratable.destination_work_queue == destination_work_queue
assert migratable.destination_id == destination_work_queue.id
@patch("prefect.cli.transfer._migratable_resources.work_queues.get_client")
async def test_migrate_success_with_work_pool(
self, mock_get_client: MagicMock, transfer_work_queue_with_pool: WorkQueue
):
"""Test successful migration of work queue with work pool."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock successful work queue creation
destination_work_queue = WorkQueue(
id=uuid.uuid4(),
name=transfer_work_queue_with_pool.name,
description=transfer_work_queue_with_pool.description,
priority=transfer_work_queue_with_pool.priority,
concurrency_limit=transfer_work_queue_with_pool.concurrency_limit,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=uuid.uuid4(),
work_pool_name=f"test-pool-{uuid.uuid4()}",
)
mock_client.create_work_queue.return_value = destination_work_queue
# Clear instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(transfer_work_queue_with_pool)
await migratable.migrate()
# Verify client calls
mock_client.create_work_queue.assert_called_once_with(
name=transfer_work_queue_with_pool.name,
description=transfer_work_queue_with_pool.description,
priority=transfer_work_queue_with_pool.priority,
concurrency_limit=transfer_work_queue_with_pool.concurrency_limit,
work_pool_name=transfer_work_queue_with_pool.work_pool_name,
)
# Verify destination_work_queue is set
assert migratable.destination_work_queue == destination_work_queue
assert migratable.destination_id == destination_work_queue.id
@patch("prefect.cli.transfer._migratable_resources.work_queues.get_client")
async def test_migrate_already_exists(
self, mock_get_client: MagicMock, transfer_work_queue_with_pool: WorkQueue
):
"""Test migration when work queue already exists."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock ObjectAlreadyExists exception on create
mock_http_exc = Exception("Conflict")
mock_client.create_work_queue.side_effect = ObjectAlreadyExists(mock_http_exc)
# Mock existing work queue in read_work_queues response
existing_work_queue = WorkQueue(
id=uuid.uuid4(),
name=transfer_work_queue_with_pool.name,
description="existing description", # Different to show it reads existing
priority=2,
concurrency_limit=10,
is_paused=True,
last_polled=None,
status=None,
work_pool_id=uuid.uuid4(),
work_pool_name=f"test-pool-{uuid.uuid4()}",
)
mock_client.read_work_queues.return_value = [existing_work_queue]
# Clear instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(transfer_work_queue_with_pool)
# Should raise TransferSkipped
with pytest.raises(TransferSkipped, match="Already exists"):
await migratable.migrate()
# Verify client calls
mock_client.create_work_queue.assert_called_once()
mock_client.read_work_queues.assert_called_once_with(
work_pool_name=transfer_work_queue_with_pool.work_pool_name,
work_queue_filter=WorkQueueFilter(
name=WorkQueueFilterName(any_=[transfer_work_queue_with_pool.name]),
),
)
# Verify destination_work_queue is set to existing
assert migratable.destination_work_queue == existing_work_queue
assert migratable.destination_id == existing_work_queue.id
@patch("prefect.cli.transfer._migratable_resources.work_queues.get_client")
async def test_migrate_already_exists_queue_not_found_in_list(
self, mock_get_client: MagicMock, transfer_work_queue_with_pool: WorkQueue
):
"""Test migration when work queue already exists but is not found in list."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock ObjectAlreadyExists exception on create
mock_http_exc = Exception("Conflict")
mock_client.create_work_queue.side_effect = ObjectAlreadyExists(mock_http_exc)
# Mock empty work queues list (queue not found)
mock_client.read_work_queues.return_value = []
# Clear instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(transfer_work_queue_with_pool)
with pytest.raises(RuntimeError):
await migratable.migrate()
# Verify calls
mock_client.create_work_queue.assert_called_once()
mock_client.read_work_queues.assert_called_once()
# destination_work_queue should remain None since we couldn't find it
assert migratable.destination_work_queue is None
@patch("prefect.cli.transfer._migratable_resources.work_queues.get_client")
async def test_migrate_skips_default_work_queue(self, mock_get_client: MagicMock):
"""Test that migration skips work queues named 'default'."""
# Create a work queue with name 'default'
default_work_queue = WorkQueue(
id=uuid.uuid4(),
name="default",
description="Default work queue",
priority=1,
concurrency_limit=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=uuid.uuid4(),
work_pool_name=f"test-pool-{uuid.uuid4()}",
)
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock empty work queues list (queue not found)
mock_client.read_work_queues.return_value = [default_work_queue]
# Clear instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(default_work_queue)
# Should raise TransferSkipped for default work queue
with pytest.raises(
TransferSkipped,
match="Default work queues are created with work pools",
):
await migratable.migrate()
# Verify no client calls were made since it's skipped early
assert migratable.destination_work_queue is None
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/cli/transfer/test_work_queues.py",
"license": "Apache License 2.0",
"lines": 327,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:scripts/generate_api_ref.py | """
Script to generate API reference documentation using mdxify.
"""
import subprocess
import sys
def main() -> None:
"""Generate API reference documentation."""
cmd = [
"uvx",
"--with-editable",
".",
"mdxify@latest",
"--all",
"--root-module",
"prefect",
"--output-dir",
"docs/v3/api-ref/python",
"--anchor-name",
"Python SDK Reference",
"--exclude",
"prefect.agent",
"--exclude",
"prefect.analytics",
"--include-inheritance",
"--repo-url",
"https://github.com/PrefectHQ/prefect",
]
result = subprocess.run(cmd, capture_output=True, text=True)
if result.returncode != 0:
print(f"Error generating API reference: {result.stderr}", file=sys.stderr)
sys.exit(result.returncode)
print(result.stdout)
if __name__ == "__main__":
main()
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "scripts/generate_api_ref.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:src/integrations/prefect-aws/prefect_aws/observers/ecs.py | from __future__ import annotations
import asyncio
import datetime
import enum
import json
import logging
import uuid
from collections import deque
from contextlib import AsyncExitStack
from datetime import timedelta
from functools import partial
from types import TracebackType
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Literal,
NamedTuple,
Protocol,
TypedDict,
Union,
)
import aiobotocore.session
import anyio
from botocore.exceptions import ClientError
from cachetools import LRUCache
from prefect_aws.settings import EcsObserverSettings
from slugify import slugify
import prefect
from prefect.events.clients import get_events_client
from prefect.events.schemas.events import Event, RelatedResource, Resource
from prefect.exceptions import ObjectNotFound
from prefect.states import Crashed
from prefect.utilities.engine import propose_state
if TYPE_CHECKING:
from mypy_boto3_sqs.type_defs import MessageTypeDef
from types_aiobotocore_ecs import ECSClient
logger: logging.Logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
_last_event_cache: LRUCache[uuid.UUID, Event] = LRUCache(maxsize=1000)
_ECS_DEFAULT_CONTAINER_NAME = "prefect"
_ECS_EVENT_DETAIL_MAP: dict[
str, Literal["task", "container-instance", "deployment"]
] = {
"ECS Task State Change": "task",
"ECS Container Instance State Change": "container-instance",
"ECS Deployment": "deployment",
}
EcsTaskLastStatus = Literal[
"PROVISIONING",
"PENDING",
"ACTIVATING",
"RUNNING",
"DEACTIVATING",
"STOPPING",
"DEPROVISIONING",
"STOPPED",
"DELETED",
]
class FilterCase(enum.Enum):
PRESENT = enum.auto()
ABSENT = enum.auto()
class EcsEventHandler(Protocol):
__name__: str
def __call__(
self,
event: dict[str, Any],
tags: dict[str, str],
) -> None: ...
class AsyncEcsEventHandler(Protocol):
__name__: str
async def __call__(
self,
event: dict[str, Any],
tags: dict[str, str],
) -> None: ...
class EventHandlerFilters(TypedDict):
tags: TagsFilter
last_status: LastStatusFilter
class TagsFilter:
def __init__(self, **tags: str | FilterCase):
self.tags = tags
def is_match(self, tags: dict[str, str]) -> bool:
return not self.tags or all(
tag_value == FilterCase.PRESENT
and tag_name in tags
or tag_value == FilterCase.ABSENT
and tag_name not in tags
or tag_value == tags.get(tag_name)
for tag_name, tag_value in self.tags.items()
)
class LastStatusFilter:
def __init__(self, *statuses: EcsTaskLastStatus):
self.statuses = statuses
def is_match(self, last_status: EcsTaskLastStatus) -> bool:
return not self.statuses or last_status in self.statuses
HandlerWithFilters = NamedTuple(
"HandlerWithFilters",
[
("handler", Union[EcsEventHandler, AsyncEcsEventHandler]),
("filters", EventHandlerFilters),
],
)
class EcsTaskTagsReader:
def __init__(self):
self.ecs_client: "ECSClient | None" = None
self._cache: LRUCache[str, dict[str, str]] = LRUCache(maxsize=100)
async def read_tags(self, cluster_arn: str, task_arn: str) -> dict[str, str]:
if not self.ecs_client:
raise RuntimeError("ECS client not initialized for EcsTaskTagsReader")
if task_arn in self._cache:
return self._cache[task_arn]
try:
response = await self.ecs_client.describe_tasks(
cluster=cluster_arn,
tasks=[task_arn],
include=["TAGS"],
)
except Exception as e:
print(f"Error reading tags for task {task_arn}: {e}")
return {}
if not (tasks := response.get("tasks", [])):
return {}
if len(tasks) == 0:
return {}
tags = {
tag["key"]: tag["value"]
for tag in tasks[0].get("tags", [])
if "key" in tag and "value" in tag
}
self._cache[task_arn] = tags
return tags
async def __aenter__(self):
self.ecs_client = (
await aiobotocore.session.get_session().create_client("ecs").__aenter__()
)
return self
async def __aexit__(self, *args: Any) -> None:
if self.ecs_client:
await self.ecs_client.__aexit__(*args)
SQS_MEMORY = 10
SQS_CONSECUTIVE_FAILURES = 3
SQS_BACKOFF = 1
SQS_MAX_BACKOFF_ATTEMPTS = 5
OBSERVER_RESTART_BASE_DELAY = 30
OBSERVER_MAX_RESTART_ATTEMPTS = 5
class SqsSubscriber:
def __init__(self, queue_name: str, queue_region: str | None = None):
self.queue_name = queue_name
self.queue_region = queue_region
async def stream_messages(
self,
) -> AsyncGenerator["MessageTypeDef", None]:
session = aiobotocore.session.get_session()
async with session.create_client(
"sqs", region_name=self.queue_region
) as sqs_client:
try:
queue_url = (await sqs_client.get_queue_url(QueueName=self.queue_name))[
"QueueUrl"
]
except ClientError as e:
if (
e.response.get("Error", {}).get("Code")
== "AWS.SimpleQueueService.NonExistentQueue"
):
logger.warning(
(
"SQS queue '%s' does not exist in region '%s'. "
"This worker will continue to submit ECS tasks, but event replication "
"and crash detection will not work. To enable ECS event replication and "
"crash detection, deploy an SQS queue using "
"`prefect-aws ecs-worker deploy-events` and configure the "
"PREFECT_INTEGRATIONS_AWS_ECS_OBSERVER_SQS_QUEUE_NAME environment "
"variable on your worker to point to the deployed queue."
),
self.queue_name,
self.queue_region or "default",
)
return
raise
track_record: deque[bool] = deque(
[True] * SQS_CONSECUTIVE_FAILURES, maxlen=SQS_CONSECUTIVE_FAILURES
)
failures: deque[tuple[Exception, TracebackType | None]] = deque(
maxlen=SQS_MEMORY
)
backoff_count = 0
while True:
try:
messages = await sqs_client.receive_message(
QueueUrl=queue_url,
MaxNumberOfMessages=10,
WaitTimeSeconds=20,
)
for message in messages.get("Messages", []):
if not (receipt_handle := message.get("ReceiptHandle")):
continue
yield message
await sqs_client.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle,
)
backoff_count = 0
except Exception as e:
track_record.append(False)
failures.append((e, e.__traceback__))
logger.debug("Failed to receive messages from SQS", exc_info=e)
if not any(track_record):
backoff_count += 1
if backoff_count > SQS_MAX_BACKOFF_ATTEMPTS:
logger.error(
"SQS polling exceeded maximum backoff attempts (%s). "
"Last %s errors: %s",
SQS_MAX_BACKOFF_ATTEMPTS,
len(failures),
[str(e) for e, _ in failures],
)
raise RuntimeError(
f"SQS polling failed after {SQS_MAX_BACKOFF_ATTEMPTS} backoff attempts"
)
track_record.extend([True] * SQS_CONSECUTIVE_FAILURES)
failures.clear()
backoff_seconds = SQS_BACKOFF * 2**backoff_count
logger.debug(
"Backing off due to consecutive errors, using increased interval of %s seconds.",
backoff_seconds,
)
await asyncio.sleep(backoff_seconds)
class EcsObserver:
def __init__(
self,
settings: EcsObserverSettings | None = None,
sqs_subscriber: SqsSubscriber | None = None,
ecs_tags_reader: EcsTaskTagsReader | None = None,
):
self.settings = settings or EcsObserverSettings()
self.sqs_subscriber = sqs_subscriber or SqsSubscriber(
queue_name=self.settings.sqs.queue_name,
queue_region=self.settings.sqs.queue_region,
)
self.ecs_tags_reader = ecs_tags_reader or EcsTaskTagsReader()
self.event_handlers: dict[
Literal["task", "container-instance", "deployment"],
list[HandlerWithFilters],
] = {
"task": [],
"container-instance": [],
"deployment": [],
}
async def run(self):
async with AsyncExitStack() as stack:
task_group = await stack.enter_async_context(anyio.create_task_group())
await stack.enter_async_context(self.ecs_tags_reader)
async for message in self.sqs_subscriber.stream_messages():
if not (body := message.get("Body")):
logger.debug(
"No body in message. Skipping.",
extra={"sqs_message": message},
)
continue
body = json.loads(body)
if (task_arn := body.get("detail", {}).get("taskArn")) and (
cluster_arn := body.get("detail", {}).get("clusterArn")
):
tags = await self.ecs_tags_reader.read_tags(
cluster_arn=cluster_arn,
task_arn=task_arn,
)
else:
tags = {}
if not (detail_type := body.get("detail-type")):
logger.debug(
"No event type in message. Skipping.",
extra={"sqs_message": message},
)
continue
if detail_type not in _ECS_EVENT_DETAIL_MAP:
logger.debug("Unknown event type: %s. Skipping.", detail_type)
continue
last_status = body.get("detail", {}).get("lastStatus")
event_type = _ECS_EVENT_DETAIL_MAP[detail_type]
for handler, filters in self.event_handlers[event_type]:
if filters["tags"].is_match(tags) and filters[
"last_status"
].is_match(last_status):
logger.debug(
"Running handler %s for message",
handler.__name__,
extra={"sqs_message": message},
)
if asyncio.iscoroutinefunction(handler):
task_group.start_soon(handler, body, tags)
else:
task_group.start_soon(
asyncio.to_thread, partial(handler, body, tags)
)
def on_event(
self,
event_type: Literal["task", "container-instance", "deployment"],
/,
tags: dict[str, str | FilterCase] | None = None,
statuses: list[EcsTaskLastStatus] | None = None,
):
def decorator(fn: EcsEventHandler | AsyncEcsEventHandler):
self.event_handlers[event_type].append(
HandlerWithFilters(
handler=fn,
filters={
"tags": TagsFilter(**(tags or {})),
"last_status": LastStatusFilter(*(statuses or [])),
},
)
)
return fn
return decorator
def _related_resources_from_tags(tags: dict[str, str]) -> list[RelatedResource]:
"""Convert labels to related resources"""
related: list[RelatedResource] = []
if flow_run_id := tags.get("prefect.io/flow-run-id"):
related.append(
RelatedResource.model_validate(
{
"prefect.resource.id": f"prefect.flow-run.{flow_run_id}",
"prefect.resource.role": "flow-run",
"prefect.resource.name": tags.get("prefect.io/flow-run-name"),
}
)
)
if deployment_id := tags.get("prefect.io/deployment-id"):
related.append(
RelatedResource.model_validate(
{
"prefect.resource.id": f"prefect.deployment.{deployment_id}",
"prefect.resource.role": "deployment",
"prefect.resource.name": tags.get("prefect.io/deployment-name"),
}
)
)
if flow_id := tags.get("prefect.io/flow-id"):
related.append(
RelatedResource.model_validate(
{
"prefect.resource.id": f"prefect.flow.{flow_id}",
"prefect.resource.role": "flow",
"prefect.resource.name": tags.get("prefect.io/flow-name"),
}
)
)
if work_pool_id := tags.get("prefect.io/work-pool-id"):
related.append(
RelatedResource.model_validate(
{
"prefect.resource.id": f"prefect.work-pool.{work_pool_id}",
"prefect.resource.role": "work-pool",
"prefect.resource.name": tags.get("prefect.io/work-pool-name"),
}
)
)
if worker_name := tags.get("prefect.io/worker-name"):
related.append(
RelatedResource.model_validate(
{
"prefect.resource.id": f"prefect.worker.ecs.{slugify(worker_name)}",
"prefect.resource.role": "worker",
"prefect.resource.name": worker_name,
"prefect.worker-type": "ecs",
"prefect.version": prefect.__version__,
}
)
)
return related
ecs_observer = EcsObserver()
@ecs_observer.on_event("task", tags={"prefect.io/flow-run-id": FilterCase.PRESENT})
async def replicate_ecs_event(event: dict[str, Any], tags: dict[str, str]):
handler_logger = logger.getChild("replicate_ecs_event")
event_id = event.get("id")
if not event_id:
handler_logger.debug("No event ID in event. Skipping.")
return
task_arn = event.get("detail", {}).get("taskArn")
if not task_arn:
handler_logger.debug("No task ARN in event. Skipping.")
return
last_status = event.get("detail", {}).get("lastStatus")
if not last_status:
handler_logger.debug("No last status in event. Skipping.")
return
handler_logger.debug(
"Replicating ECS task %s event %s",
last_status,
event_id,
extra={"event": event},
)
async with get_events_client() as events_client:
event_id = uuid.UUID(event_id)
task_id = task_arn.split("/")[-1]
resource = {
"prefect.resource.id": f"prefect.ecs.task.{task_id}",
"ecs.taskArn": task_arn,
}
if cluster_arn := event.get("detail", {}).get("clusterArn"):
resource["ecs.clusterArn"] = cluster_arn
if task_definition_arn := event.get("detail", {}).get("taskDefinitionArn"):
resource["ecs.taskDefinitionArn"] = task_definition_arn
prefect_event = Event(
event=f"prefect.ecs.task.{last_status.lower()}",
resource=Resource.model_validate(resource),
id=event_id,
related=_related_resources_from_tags(tags),
)
if ecs_event_time := event.get("time"):
prefect_event.occurred = datetime.datetime.fromisoformat(
ecs_event_time.replace("Z", "+00:00")
)
if (prev_event := _last_event_cache.get(event_id)) is not None:
if (
-timedelta(minutes=5)
< (prefect_event.occurred - prev_event.occurred)
< timedelta(minutes=5)
):
prefect_event.follows = prev_event.id
try:
await events_client.emit(event=prefect_event)
handler_logger.debug(
"Replicated ECS task %s event %s",
last_status,
event_id,
extra={"event": prefect_event},
)
_last_event_cache[event_id] = prefect_event
except Exception:
handler_logger.exception("Error emitting event %s", event_id)
@ecs_observer.on_event(
"task", tags={"prefect.io/flow-run-id": FilterCase.PRESENT}, statuses=["STOPPED"]
)
async def mark_runs_as_crashed(event: dict[str, Any], tags: dict[str, str]):
handler_logger = logger.getChild("mark_runs_as_crashed")
task_arn = event.get("detail", {}).get("taskArn")
if not task_arn:
handler_logger.debug("No task ARN in event. Skipping.")
return
flow_run_id = tags.get("prefect.io/flow-run-id")
async with prefect.get_client() as orchestration_client:
try:
flow_run = await orchestration_client.read_flow_run(
flow_run_id=uuid.UUID(flow_run_id)
)
except ObjectNotFound:
logger.debug(f"Flow run {flow_run_id} not found, skipping")
return
assert flow_run.state is not None, "Expected flow run state to be set"
# Exit early for final, scheduled, or paused states
if (
flow_run.state.is_final()
or flow_run.state.is_scheduled()
or flow_run.state.is_paused()
):
logger.debug(
f"Flow run {flow_run_id} is in final, scheduled, or paused state, skipping"
)
return
containers = event.get("detail", {}).get("containers", [])
orchestration_container = next(
(
container
for container in containers
if container.get("name") == _ECS_DEFAULT_CONTAINER_NAME
),
None,
)
if orchestration_container is not None:
containers_to_check = [orchestration_container]
else:
containers_to_check = containers
containers_with_non_zero_exit_codes = [
container
for container in containers_to_check
if container.get("exitCode") is None or container.get("exitCode") != 0
]
if any(containers_with_non_zero_exit_codes):
container_identifiers = [
c.get("name") or c.get("containerArn")
for c in containers_with_non_zero_exit_codes
]
handler_logger.info(
"The following containers stopped with a non-zero exit code: %s. Marking flow run %s as crashed",
container_identifiers,
flow_run_id,
)
await propose_state(
client=orchestration_client,
state=Crashed(
message=f"The following containers stopped with a non-zero exit code: {container_identifiers}"
),
flow_run_id=uuid.UUID(flow_run_id),
)
@ecs_observer.on_event(
"task",
tags={"prefect.io/degregister-task-definition": "true"},
statuses=["STOPPED"],
)
async def deregister_task_definition(event: dict[str, Any], tags: dict[str, str]):
handler_logger = logger.getChild("deregister_task_definition")
if not (task_definition_arn := event.get("detail", {}).get("taskDefinitionArn")):
handler_logger.debug("No task definition ARN in event. Skipping.")
return
async with aiobotocore.session.get_session().create_client("ecs") as ecs_client:
await ecs_client.deregister_task_definition(taskDefinition=task_definition_arn)
handler_logger.info(
"Task definition %s successfully deregistered", task_definition_arn
)
_observer_task: asyncio.Task[None] | None = None
_observer_restart_count: int = 0
_observer_restart_task: asyncio.Task[None] | None = None
async def _restart_observer_after_delay(delay: int):
"""Restart the observer after a delay."""
global _observer_task, _observer_restart_count, _observer_restart_task
logger.info(
"ECS observer will restart in %s seconds (attempt %s of %s)",
delay,
_observer_restart_count,
OBSERVER_MAX_RESTART_ATTEMPTS,
)
await asyncio.sleep(delay)
# Start the observer again
_observer_task = asyncio.create_task(ecs_observer.run())
_observer_task.add_done_callback(_observer_task_done)
_observer_restart_task = None
logger.info("ECS observer restarted")
def _observer_task_done(task: asyncio.Task[None]):
global _observer_restart_count, _observer_restart_task
if task.cancelled():
logger.debug("ECS observer task cancelled")
_observer_restart_count = 0
elif task.exception():
logger.error("ECS observer task crashed", exc_info=task.exception())
_observer_restart_count += 1
if _observer_restart_count <= OBSERVER_MAX_RESTART_ATTEMPTS:
# Schedule a restart with exponential backoff
delay = OBSERVER_RESTART_BASE_DELAY * (2 ** (_observer_restart_count - 1))
try:
loop = asyncio.get_event_loop()
_observer_restart_task = loop.create_task(
_restart_observer_after_delay(delay)
)
except RuntimeError:
logger.error(
"Cannot schedule observer restart: no event loop available"
)
else:
logger.error(
"ECS observer has crashed %s times, giving up on automatic restarts",
_observer_restart_count,
)
else:
logger.debug("ECS observer task completed")
_observer_restart_count = 0
async def start_observer():
global _observer_task, _observer_restart_count, _observer_restart_task
if _observer_task:
return
# Cancel any pending restart task
if _observer_restart_task and not _observer_restart_task.done():
_observer_restart_task.cancel()
try:
await _observer_restart_task
except asyncio.CancelledError:
pass
_observer_restart_task = None
_observer_restart_count = 0
_observer_task = asyncio.create_task(ecs_observer.run())
_observer_task.add_done_callback(_observer_task_done)
logger.debug("ECS observer started")
async def stop_observer():
global _observer_task, _observer_restart_count, _observer_restart_task
# Cancel any pending restart task
if _observer_restart_task and not _observer_restart_task.done():
_observer_restart_task.cancel()
try:
await _observer_restart_task
except asyncio.CancelledError:
pass
_observer_restart_task = None
if not _observer_task:
return
task = _observer_task
_observer_task = None
_observer_restart_count = 0
task.cancel()
try:
await asyncio.shield(task)
except asyncio.CancelledError:
pass
logger.debug("ECS observer stopped")
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-aws/prefect_aws/observers/ecs.py",
"license": "Apache License 2.0",
"lines": 595,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/integrations/prefect-aws/prefect_aws/settings.py | from __future__ import annotations
from typing import Optional
from pydantic import Field
from prefect.settings.base import PrefectBaseSettings, build_settings_config
class EcsObserverSqsSettings(PrefectBaseSettings):
model_config = build_settings_config(
("integrations", "aws", "ecs", "observer", "sqs")
)
queue_name: str = Field(
default="prefect-ecs-tasks-events",
description="The name of the SQS queue to watch for Prefect-submitted ECS tasks.",
)
queue_region: Optional[str] = Field(
default=None,
description="The region of the SQS queue to watch for Prefect-submitted ECS tasks.",
)
class EcsObserverSettings(PrefectBaseSettings):
model_config = build_settings_config(("integrations", "aws", "ecs", "observer"))
enabled: bool = Field(
default=True,
description="Whether to enable the ECS observer.",
)
sqs: EcsObserverSqsSettings = Field(
description="Settings for controlling ECS observer SQS behavior.",
default_factory=EcsObserverSqsSettings,
)
class EcsWorkerSettings(PrefectBaseSettings):
"""Settings for controlling ECS worker behavior."""
model_config = build_settings_config(("integrations", "aws", "ecs", "worker"))
create_task_run_max_attempts: int = Field(
default=3,
description=(
"The maximum number of attempts to create an ECS task run. "
"Increase this value to allow more retries when task creation fails "
"due to transient issues like resource constraints during cluster "
"scaling."
),
ge=1,
)
create_task_run_min_delay_seconds: int = Field(
default=1,
description=(
"The minimum fixed delay in seconds between retries when creating "
"an ECS task run."
),
ge=0,
)
create_task_run_min_delay_jitter_seconds: int = Field(
default=0,
description=(
"The minimum jitter in seconds to add to the delay between retries "
"when creating an ECS task run."
),
ge=0,
)
create_task_run_max_delay_jitter_seconds: int = Field(
default=3,
description=(
"The maximum jitter in seconds to add to the delay between retries "
"when creating an ECS task run."
),
ge=0,
)
class EcsSettings(PrefectBaseSettings):
model_config = build_settings_config(("integrations", "aws", "ecs"))
observer: EcsObserverSettings = Field(
description="Settings for controlling ECS observer behavior.",
default_factory=EcsObserverSettings,
)
worker: EcsWorkerSettings = Field(
description="Settings for controlling ECS worker behavior.",
default_factory=EcsWorkerSettings,
)
class RdsIAMSettings(PrefectBaseSettings):
"""Settings for controlling RDS IAM authentication."""
model_config = build_settings_config(("integrations", "aws", "rds", "iam"))
enabled: bool = Field(
default=False,
description="Controls whether to use IAM authentication for RDS PostgreSQL connections.",
)
region_name: Optional[str] = Field(
default=None,
description="The AWS region for IAM authentication. If not provided, it will be inferred from the environment.",
)
class RdsSettings(PrefectBaseSettings):
"""Settings for AWS RDS integration."""
model_config = build_settings_config(("integrations", "aws", "rds"))
iam: RdsIAMSettings = Field(
description="Settings for controlling RDS IAM authentication.",
default_factory=RdsIAMSettings,
)
class AwsSettings(PrefectBaseSettings):
model_config = build_settings_config(("integrations", "aws"))
ecs: EcsSettings = Field(
description="Settings for controlling ECS behavior.",
default_factory=EcsSettings,
)
rds: RdsSettings = Field(
description="Settings for controlling RDS behavior.",
default_factory=RdsSettings,
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-aws/prefect_aws/settings.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/integrations/prefect-aws/tests/observers/test_ecs_observer.py | from __future__ import annotations
import asyncio
import json
import uuid
from datetime import datetime
from typing import Any, AsyncGenerator
from unittest.mock import AsyncMock, Mock, patch
import pytest
from cachetools import LRUCache
from prefect_aws.observers.ecs import (
EcsObserver,
EcsTaskTagsReader,
FilterCase,
LastStatusFilter,
SqsSubscriber,
TagsFilter,
_related_resources_from_tags,
deregister_task_definition,
mark_runs_as_crashed,
replicate_ecs_event,
start_observer,
stop_observer,
)
from prefect_aws.settings import EcsObserverSettings
from prefect.client.schemas import FlowRun, State
from prefect.client.schemas.objects import StateType
from prefect.events.schemas.events import Event, Resource
from prefect.exceptions import ObjectNotFound
class TestTagsFilter:
def test_is_match_with_no_filter_tags(self):
filter = TagsFilter()
assert filter.is_match({"any": "tags"})
assert filter.is_match({})
def test_is_match_with_present_filter(self):
filter = TagsFilter(required_key=FilterCase.PRESENT)
assert filter.is_match({"required_key": "any_value"})
assert not filter.is_match({"other_key": "value"})
assert not filter.is_match({})
def test_is_match_with_absent_filter(self):
filter = TagsFilter(forbidden_key=FilterCase.ABSENT)
assert filter.is_match({"other_key": "value"})
assert filter.is_match({})
assert not filter.is_match({"forbidden_key": "any_value"})
def test_is_match_with_specific_value(self):
filter = TagsFilter(key1="expected_value")
assert filter.is_match({"key1": "expected_value"})
assert not filter.is_match({"key1": "wrong_value"})
assert not filter.is_match({})
def test_is_match_with_combined_filters(self):
filter = TagsFilter(
present_key=FilterCase.PRESENT,
absent_key=FilterCase.ABSENT,
specific_key="specific_value",
)
assert filter.is_match(
{"present_key": "any", "specific_key": "specific_value", "other": "data"}
)
assert not filter.is_match(
{"present_key": "any", "specific_key": "wrong_value"}
)
assert not filter.is_match(
{"present_key": "any", "absent_key": "should_not_be_here"}
)
assert not filter.is_match({"specific_key": "specific_value"})
class TestLastStatusFilter:
def test_is_match_with_no_filter_statuses(self):
filter = LastStatusFilter()
assert filter.is_match("RUNNING")
assert filter.is_match("STOPPED")
assert filter.is_match("PENDING")
def test_is_match_with_single_status(self):
filter = LastStatusFilter("RUNNING")
assert filter.is_match("RUNNING")
assert not filter.is_match("STOPPED")
assert not filter.is_match("PENDING")
def test_is_match_with_multiple_statuses(self):
filter = LastStatusFilter("RUNNING", "STOPPED")
assert filter.is_match("RUNNING")
assert filter.is_match("STOPPED")
assert not filter.is_match("PENDING")
assert not filter.is_match("PROVISIONING")
def test_is_match_with_all_valid_statuses(self):
filter = LastStatusFilter(
"PROVISIONING",
"PENDING",
"ACTIVATING",
"RUNNING",
"DEACTIVATING",
"STOPPING",
"DEPROVISIONING",
"STOPPED",
"DELETED",
)
assert filter.is_match("PROVISIONING")
assert filter.is_match("PENDING")
assert filter.is_match("ACTIVATING")
assert filter.is_match("RUNNING")
assert filter.is_match("DEACTIVATING")
assert filter.is_match("STOPPING")
assert filter.is_match("DEPROVISIONING")
assert filter.is_match("STOPPED")
assert filter.is_match("DELETED")
def test_is_match_with_final_states(self):
filter = LastStatusFilter("STOPPED", "DELETED")
assert filter.is_match("STOPPED")
assert filter.is_match("DELETED")
assert not filter.is_match("RUNNING")
assert not filter.is_match("PENDING")
def test_is_match_with_intermediate_states(self):
filter = LastStatusFilter("PROVISIONING", "PENDING", "ACTIVATING")
assert filter.is_match("PROVISIONING")
assert filter.is_match("PENDING")
assert filter.is_match("ACTIVATING")
assert not filter.is_match("RUNNING")
assert not filter.is_match("STOPPED")
class TestEcsTaskTagsReader:
@pytest.fixture
def tags_reader(self):
return EcsTaskTagsReader()
@pytest.fixture
def mock_ecs_client(self):
client = AsyncMock()
return client
async def test_init(self, tags_reader):
assert tags_reader.ecs_client is None
assert isinstance(tags_reader._cache, LRUCache)
assert tags_reader._cache.maxsize == 100
async def test_read_tags_without_client(self, tags_reader):
with pytest.raises(RuntimeError, match="ECS client not initialized"):
await tags_reader.read_tags("cluster-arn", "task-arn")
async def test_read_tags_from_cache(self, tags_reader, mock_ecs_client):
tags_reader.ecs_client = mock_ecs_client
cached_tags = {"key": "value"}
tags_reader._cache["task-arn"] = cached_tags
result = await tags_reader.read_tags("cluster-arn", "task-arn")
assert result == cached_tags
mock_ecs_client.describe_tasks.assert_not_called()
async def test_read_tags_from_ecs(self, tags_reader, mock_ecs_client):
tags_reader.ecs_client = mock_ecs_client
mock_ecs_client.describe_tasks.return_value = {
"tasks": [
{
"tags": [
{"key": "tag1", "value": "value1"},
{"key": "tag2", "value": "value2"},
]
}
]
}
result = await tags_reader.read_tags("cluster-arn", "task-arn")
assert result == {"tag1": "value1", "tag2": "value2"}
assert tags_reader._cache["task-arn"] == result
mock_ecs_client.describe_tasks.assert_called_once_with(
cluster="cluster-arn",
tasks=["task-arn"],
include=["TAGS"],
)
async def test_read_tags_handles_missing_keys(self, tags_reader, mock_ecs_client):
tags_reader.ecs_client = mock_ecs_client
mock_ecs_client.describe_tasks.return_value = {
"tasks": [
{
"tags": [
{"key": "tag1", "value": "value1"},
{"value": "missing_key"},
{"key": "missing_value"},
{},
]
}
]
}
result = await tags_reader.read_tags("cluster-arn", "task-arn")
assert result == {"tag1": "value1"}
async def test_read_tags_handles_empty_response(self, tags_reader, mock_ecs_client):
tags_reader.ecs_client = mock_ecs_client
mock_ecs_client.describe_tasks.return_value = {}
result = await tags_reader.read_tags("cluster-arn", "task-arn")
assert result == {}
async def test_read_tags_handles_exception(
self, tags_reader, mock_ecs_client, capfd
):
tags_reader.ecs_client = mock_ecs_client
mock_ecs_client.describe_tasks.side_effect = Exception("AWS error")
result = await tags_reader.read_tags("cluster-arn", "task-arn")
assert result == {}
captured = capfd.readouterr()
assert "Error reading tags for task task-arn: AWS error" in captured.out
class TestSqsSubscriber:
@pytest.fixture
def subscriber(self):
return SqsSubscriber("test-queue", "us-east-1")
def test_init(self):
subscriber = SqsSubscriber("queue-name", "us-west-2")
assert subscriber.queue_name == "queue-name"
assert subscriber.queue_region == "us-west-2"
def test_init_without_region(self):
subscriber = SqsSubscriber("queue-name")
assert subscriber.queue_name == "queue-name"
assert subscriber.queue_region is None
@patch("prefect_aws.observers.ecs.aiobotocore.session.get_session")
async def test_stream_messages(self, mock_get_session, subscriber):
mock_session = Mock()
mock_sqs_client = AsyncMock()
mock_client_context = AsyncMock()
mock_client_context.__aenter__.return_value = mock_sqs_client
mock_session.create_client.return_value = mock_client_context
mock_get_session.return_value = mock_session
mock_sqs_client.get_queue_url.return_value = {
"QueueUrl": "https://sqs.us-east-1.amazonaws.com/123456789/test-queue"
}
messages_batch_1 = {
"Messages": [
{"Body": "message1", "ReceiptHandle": "handle1"},
{"Body": "message2", "ReceiptHandle": "handle2"},
]
}
messages_batch_2 = {
"Messages": [
{"Body": "message3", "ReceiptHandle": "handle3"},
]
}
empty_batch = {"Messages": []}
mock_sqs_client.receive_message.side_effect = [
messages_batch_1,
messages_batch_2,
empty_batch,
]
messages = []
message_generator = subscriber.stream_messages()
async for message in message_generator:
messages.append(message)
if len(messages) >= 3:
# Close the generator properly to avoid pending task warning
await message_generator.aclose()
break
assert len(messages) == 3
assert messages[0]["Body"] == "message1"
assert messages[1]["Body"] == "message2"
assert messages[2]["Body"] == "message3"
# Note: Only 2 deletes will be called because we break after the 3rd yield
# but before its delete can execute
assert mock_sqs_client.delete_message.call_count == 2
delete_calls = mock_sqs_client.delete_message.call_args_list
# Extract the arguments from each call (only 2 will complete)
for i, handle in enumerate(["handle1", "handle2"]):
call_kwargs = delete_calls[i].kwargs
assert (
call_kwargs["QueueUrl"]
== "https://sqs.us-east-1.amazonaws.com/123456789/test-queue"
)
assert call_kwargs["ReceiptHandle"] == handle
@patch("prefect_aws.observers.ecs.aiobotocore.session.get_session")
async def test_stream_messages_skips_without_receipt_handle(
self, mock_get_session, subscriber
):
mock_session = Mock()
mock_sqs_client = AsyncMock()
mock_client_context = AsyncMock()
mock_client_context.__aenter__.return_value = mock_sqs_client
mock_session.create_client.return_value = mock_client_context
mock_get_session.return_value = mock_session
mock_sqs_client.get_queue_url.return_value = {
"QueueUrl": "https://sqs.us-east-1.amazonaws.com/123456789/test-queue"
}
messages_batch = {
"Messages": [
{"Body": "message1"}, # No ReceiptHandle, should be skipped
{"Body": "message2", "ReceiptHandle": "handle2"},
]
}
# Second batch to ensure we can break out
empty_batch = {"Messages": []}
mock_sqs_client.receive_message.side_effect = [
messages_batch,
empty_batch,
]
messages = []
message_generator = subscriber.stream_messages()
async for message in message_generator:
messages.append(message)
# Since message1 is skipped (no receipt handle), we only get message2
await message_generator.aclose()
break
assert len(messages) == 1
assert messages[0]["Body"] == "message2"
assert messages[0]["ReceiptHandle"] == "handle2"
# Note: delete may not be called if we break immediately after yield
# The generator is interrupted before the delete after yield can execute
@patch("prefect_aws.observers.ecs.aiobotocore.session.get_session")
@patch("prefect_aws.observers.ecs.asyncio.sleep")
async def test_stream_messages_backoff_after_consecutive_failures(
self, mock_sleep, mock_get_session, subscriber
):
"""Test that backoff is triggered after 3 consecutive failures."""
mock_session = Mock()
mock_sqs_client = AsyncMock()
mock_client_context = AsyncMock()
mock_client_context.__aenter__.return_value = mock_sqs_client
mock_session.create_client.return_value = mock_client_context
mock_get_session.return_value = mock_session
mock_sqs_client.get_queue_url.return_value = {
"QueueUrl": "https://sqs.us-east-1.amazonaws.com/123456789/test-queue"
}
# Simulate 3 consecutive failures, then success
failure_exception = Exception("Temporary AWS error")
mock_sqs_client.receive_message.side_effect = [
failure_exception,
failure_exception,
failure_exception,
{"Messages": [{"Body": "message1", "ReceiptHandle": "handle1"}]},
]
messages = []
message_generator = subscriber.stream_messages()
async for message in message_generator:
messages.append(message)
await message_generator.aclose()
break
# Should have triggered one backoff with delay of 2 seconds (2^1 * 1)
mock_sleep.assert_called_once_with(2)
assert len(messages) == 1
@patch("prefect_aws.observers.ecs.aiobotocore.session.get_session")
@patch("prefect_aws.observers.ecs.asyncio.sleep")
async def test_stream_messages_exponential_backoff(
self, mock_sleep, mock_get_session, subscriber
):
"""Test that backoff uses exponential delays."""
mock_session = Mock()
mock_sqs_client = AsyncMock()
mock_client_context = AsyncMock()
mock_client_context.__aenter__.return_value = mock_sqs_client
mock_session.create_client.return_value = mock_client_context
mock_get_session.return_value = mock_session
mock_sqs_client.get_queue_url.return_value = {
"QueueUrl": "https://sqs.us-east-1.amazonaws.com/123456789/test-queue"
}
# Simulate multiple rounds of 3 consecutive failures
failure_exception = Exception("Temporary AWS error")
mock_sqs_client.receive_message.side_effect = [
# First round: 3 failures -> backoff with 2s
failure_exception,
failure_exception,
failure_exception,
# Second round: 3 failures -> backoff with 4s
failure_exception,
failure_exception,
failure_exception,
# Third round: 3 failures -> backoff with 8s
failure_exception,
failure_exception,
failure_exception,
# Success
{"Messages": [{"Body": "message1", "ReceiptHandle": "handle1"}]},
]
messages = []
message_generator = subscriber.stream_messages()
async for message in message_generator:
messages.append(message)
await message_generator.aclose()
break
# Should have triggered 3 backoffs with exponential delays: 2s, 4s, 8s
assert mock_sleep.call_count == 3
sleep_calls = [call.args[0] for call in mock_sleep.call_args_list]
assert sleep_calls == [2, 4, 8]
@patch("prefect_aws.observers.ecs.aiobotocore.session.get_session")
@patch("prefect_aws.observers.ecs.asyncio.sleep")
async def test_stream_messages_raises_after_max_backoff_attempts(
self, mock_sleep, mock_get_session, subscriber
):
"""Test that RuntimeError is raised after exceeding max backoff attempts."""
mock_session = Mock()
mock_sqs_client = AsyncMock()
mock_client_context = AsyncMock()
mock_client_context.__aenter__.return_value = mock_sqs_client
mock_session.create_client.return_value = mock_client_context
mock_get_session.return_value = mock_session
mock_sqs_client.get_queue_url.return_value = {
"QueueUrl": "https://sqs.us-east-1.amazonaws.com/123456789/test-queue"
}
# Simulate continuous failures (3 failures * 6 rounds = 18 failures)
failure_exception = Exception("Persistent AWS error")
mock_sqs_client.receive_message.side_effect = [failure_exception] * 18
message_generator = subscriber.stream_messages()
with pytest.raises(
RuntimeError, match="SQS polling failed after 5 backoff attempts"
):
async for _ in message_generator:
pass
# Should have attempted 5 backoffs before giving up
assert mock_sleep.call_count == 5
sleep_calls = [call.args[0] for call in mock_sleep.call_args_list]
assert sleep_calls == [2, 4, 8, 16, 32]
@patch("prefect_aws.observers.ecs.aiobotocore.session.get_session")
@patch("prefect_aws.observers.ecs.asyncio.sleep")
async def test_stream_messages_resets_backoff_on_success(
self, mock_sleep, mock_get_session, subscriber
):
"""Test that successful message reception resets the backoff counter."""
mock_session = Mock()
mock_sqs_client = AsyncMock()
mock_client_context = AsyncMock()
mock_client_context.__aenter__.return_value = mock_sqs_client
mock_session.create_client.return_value = mock_client_context
mock_get_session.return_value = mock_session
mock_sqs_client.get_queue_url.return_value = {
"QueueUrl": "https://sqs.us-east-1.amazonaws.com/123456789/test-queue"
}
failure_exception = Exception("Temporary AWS error")
mock_sqs_client.receive_message.side_effect = [
# First round: 3 failures -> backoff with 2s
failure_exception,
failure_exception,
failure_exception,
# Success (resets backoff counter)
{"Messages": [{"Body": "message1", "ReceiptHandle": "handle1"}]},
# Second round: 3 failures -> should restart at 2s, not 4s
failure_exception,
failure_exception,
failure_exception,
# Success
{"Messages": [{"Body": "message2", "ReceiptHandle": "handle2"}]},
]
messages = []
message_generator = subscriber.stream_messages()
async for message in message_generator:
messages.append(message)
if len(messages) >= 2:
await message_generator.aclose()
break
# Should have triggered 2 backoffs, both with 2s delay (counter reset after first success)
assert mock_sleep.call_count == 2
sleep_calls = [call.args[0] for call in mock_sleep.call_args_list]
assert sleep_calls == [2, 2]
assert len(messages) == 2
class TestEcsObserver:
@pytest.fixture
def settings(self):
return EcsObserverSettings()
@pytest.fixture
def mock_sqs_subscriber(self):
return AsyncMock(spec=SqsSubscriber)
@pytest.fixture
def mock_tags_reader(self):
reader = AsyncMock(spec=EcsTaskTagsReader)
reader.__aenter__.return_value = reader
return reader
@pytest.fixture
def observer(self, settings, mock_sqs_subscriber, mock_tags_reader):
return EcsObserver(
settings=settings,
sqs_subscriber=mock_sqs_subscriber,
ecs_tags_reader=mock_tags_reader,
)
def test_init_with_defaults(self):
observer = EcsObserver()
assert isinstance(observer.settings, EcsObserverSettings)
assert isinstance(observer.sqs_subscriber, SqsSubscriber)
assert isinstance(observer.ecs_tags_reader, EcsTaskTagsReader)
assert observer.event_handlers == {
"task": [],
"container-instance": [],
"deployment": [],
}
def test_init_with_custom_components(
self, settings, mock_sqs_subscriber, mock_tags_reader
):
observer = EcsObserver(
settings=settings,
sqs_subscriber=mock_sqs_subscriber,
ecs_tags_reader=mock_tags_reader,
)
assert observer.settings == settings
assert observer.sqs_subscriber == mock_sqs_subscriber
assert observer.ecs_tags_reader == mock_tags_reader
def test_on_event_decorator(self, observer):
handler = Mock()
decorated = observer.on_event("task", tags={"key": "value"})(handler)
assert decorated == handler
assert len(observer.event_handlers["task"]) == 1
handler_with_filters = observer.event_handlers["task"][0]
assert handler_with_filters.handler == handler
assert isinstance(handler_with_filters.filters["tags"], TagsFilter)
def test_on_event_decorator_multiple_handlers(self, observer):
handler1 = Mock()
handler2 = Mock()
observer.on_event("task")(handler1)
observer.on_event("task", tags={"key": FilterCase.PRESENT})(handler2)
assert len(observer.event_handlers["task"]) == 2
async def test_run_processes_messages(
self, observer, mock_sqs_subscriber, mock_tags_reader
):
handler = AsyncMock()
handler.__name__ = "test_handler" # Mock needs __name__ attribute
observer.on_event("task", tags={"prefect": "test"})(handler)
message = {
"Body": json.dumps(
{
"detail-type": "ECS Task State Change",
"detail": {
"taskArn": "arn:aws:ecs:us-east-1:123456789:task/task-id",
"clusterArn": "arn:aws:ecs:us-east-1:123456789:cluster/cluster",
},
}
)
}
mock_sqs_subscriber.stream_messages.return_value = async_generator_from_list(
[message]
)
mock_tags_reader.read_tags.return_value = {"prefect": "test"}
task = asyncio.create_task(observer.run())
await asyncio.sleep(0.1)
handler.assert_called_once()
call_args = handler.call_args[0]
assert call_args[0]["detail-type"] == "ECS Task State Change"
assert call_args[1] == {"prefect": "test"}
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
async def test_run_skips_message_without_body(
self, observer, mock_sqs_subscriber, mock_tags_reader
):
handler = Mock()
handler.__name__ = "test_handler" # Mock needs __name__ attribute
observer.on_event("task")(handler)
message = {"MessageId": "123"}
mock_sqs_subscriber.stream_messages.return_value = async_generator_from_list(
[message]
)
task = asyncio.create_task(observer.run())
await asyncio.sleep(0.1)
handler.assert_not_called()
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
async def test_run_handles_sync_handler(
self, observer, mock_sqs_subscriber, mock_tags_reader
):
handler = Mock()
handler.__name__ = "test_handler" # Mock needs __name__ attribute
observer.on_event("task")(handler)
message = {
"Body": json.dumps(
{
"detail-type": "ECS Task State Change",
"detail": {},
}
)
}
mock_sqs_subscriber.stream_messages.return_value = async_generator_from_list(
[message]
)
task = asyncio.create_task(observer.run())
await asyncio.sleep(0.2)
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
except BaseException:
# Handle any ExceptionGroup raised by the task group
pass
async def test_run_filters_handlers_by_tags(
self, observer, mock_sqs_subscriber, mock_tags_reader
):
matching_handler = AsyncMock()
matching_handler.__name__ = "matching_handler" # Mock needs __name__ attribute
non_matching_handler = AsyncMock()
non_matching_handler.__name__ = (
"non_matching_handler" # Mock needs __name__ attribute
)
observer.on_event("task", tags={"env": "prod"})(matching_handler)
observer.on_event("task", tags={"env": "dev"})(non_matching_handler)
message = {
"Body": json.dumps(
{
"detail-type": "ECS Task State Change",
"detail": {
"taskArn": "arn:aws:ecs:us-east-1:123456789:task/task-id",
"clusterArn": "arn:aws:ecs:us-east-1:123456789:cluster/cluster",
},
}
)
}
mock_sqs_subscriber.stream_messages.return_value = async_generator_from_list(
[message]
)
mock_tags_reader.read_tags.return_value = {"env": "prod"}
task = asyncio.create_task(observer.run())
await asyncio.sleep(0.2)
matching_handler.assert_called_once()
non_matching_handler.assert_not_called()
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
class TestRelatedResourcesFromTags:
def test_empty_tags(self):
result = _related_resources_from_tags({})
assert result == []
def test_flow_run_tags(self):
tags = {
"prefect.io/flow-run-id": "flow-run-123",
"prefect.io/flow-run-name": "my-flow-run",
}
result = _related_resources_from_tags(tags)
assert len(result) == 1
assert (
result[0].model_dump()["prefect.resource.id"]
== "prefect.flow-run.flow-run-123"
)
assert result[0].model_dump()["prefect.resource.role"] == "flow-run"
assert result[0].model_dump()["prefect.resource.name"] == "my-flow-run"
def test_deployment_tags(self):
tags = {
"prefect.io/deployment-id": "deployment-456",
"prefect.io/deployment-name": "my-deployment",
}
result = _related_resources_from_tags(tags)
assert len(result) == 1
assert (
result[0].model_dump()["prefect.resource.id"]
== "prefect.deployment.deployment-456"
)
assert result[0].model_dump()["prefect.resource.role"] == "deployment"
assert result[0].model_dump()["prefect.resource.name"] == "my-deployment"
def test_flow_tags(self):
tags = {
"prefect.io/flow-id": "flow-789",
"prefect.io/flow-name": "my-flow",
}
result = _related_resources_from_tags(tags)
assert len(result) == 1
assert result[0].model_dump()["prefect.resource.id"] == "prefect.flow.flow-789"
assert result[0].model_dump()["prefect.resource.role"] == "flow"
assert result[0].model_dump()["prefect.resource.name"] == "my-flow"
def test_work_pool_tags(self):
tags = {
"prefect.io/work-pool-id": "pool-abc",
"prefect.io/work-pool-name": "my-pool",
}
result = _related_resources_from_tags(tags)
assert len(result) == 1
assert (
result[0].model_dump()["prefect.resource.id"]
== "prefect.work-pool.pool-abc"
)
assert result[0].model_dump()["prefect.resource.role"] == "work-pool"
assert result[0].model_dump()["prefect.resource.name"] == "my-pool"
def test_worker_tags(self):
tags = {
"prefect.io/worker-name": "My Worker",
}
result = _related_resources_from_tags(tags)
assert len(result) == 1
assert (
result[0].model_dump()["prefect.resource.id"]
== "prefect.worker.ecs.my-worker"
)
assert result[0].model_dump()["prefect.resource.role"] == "worker"
assert result[0].model_dump()["prefect.resource.name"] == "My Worker"
def test_all_tags_combined(self):
tags = {
"prefect.io/flow-run-id": "flow-run-123",
"prefect.io/flow-run-name": "my-flow-run",
"prefect.io/deployment-id": "deployment-456",
"prefect.io/deployment-name": "my-deployment",
"prefect.io/flow-id": "flow-789",
"prefect.io/flow-name": "my-flow",
"prefect.io/work-pool-id": "pool-abc",
"prefect.io/work-pool-name": "my-pool",
"prefect.io/worker-name": "my-worker",
}
result = _related_resources_from_tags(tags)
assert len(result) == 5
resource_ids = [r.model_dump()["prefect.resource.id"] for r in result]
assert "prefect.flow-run.flow-run-123" in resource_ids
assert "prefect.deployment.deployment-456" in resource_ids
assert "prefect.flow.flow-789" in resource_ids
assert "prefect.work-pool.pool-abc" in resource_ids
assert "prefect.worker.ecs.my-worker" in resource_ids
class TestReplicateEcsEvent:
@pytest.fixture
def sample_event(self):
return {
"id": str(uuid.uuid4()),
"time": "2024-01-01T12:00:00Z",
"detail": {
"taskArn": "arn:aws:ecs:us-east-1:123456789:task/cluster/task-id",
"clusterArn": "arn:aws:ecs:us-east-1:123456789:cluster/cluster",
"taskDefinitionArn": "arn:aws:ecs:us-east-1:123456789:task-definition/task-def:1",
"lastStatus": "RUNNING",
},
}
@pytest.fixture
def sample_tags(self):
return {
"prefect.io/flow-run-id": "flow-run-123",
"prefect.io/flow-run-name": "my-flow-run",
}
@patch("prefect_aws.observers.ecs.get_events_client")
async def test_replicate_ecs_event(
self, mock_get_events_client, sample_event, sample_tags
):
mock_events_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_events_client
mock_get_events_client.return_value = mock_context
await replicate_ecs_event(sample_event, sample_tags)
mock_events_client.emit.assert_called_once()
emitted_event = mock_events_client.emit.call_args[1]["event"]
assert emitted_event.event == "prefect.ecs.task.running"
assert emitted_event.id == uuid.UUID(sample_event["id"])
assert "prefect.ecs.task.task-id" in str(
emitted_event.resource.model_dump()["prefect.resource.id"]
)
assert len(emitted_event.related) > 0
@patch("prefect_aws.observers.ecs.get_events_client")
async def test_replicate_ecs_event_missing_id(
self, mock_get_events_client, sample_tags
):
event = {"detail": {"taskArn": "arn", "lastStatus": "RUNNING"}}
await replicate_ecs_event(event, sample_tags)
mock_get_events_client.assert_not_called()
@patch("prefect_aws.observers.ecs.get_events_client")
async def test_replicate_ecs_event_missing_task_arn(
self, mock_get_events_client, sample_tags
):
event = {"id": str(uuid.uuid4()), "detail": {"lastStatus": "RUNNING"}}
await replicate_ecs_event(event, sample_tags)
mock_get_events_client.assert_not_called()
@patch("prefect_aws.observers.ecs.get_events_client")
async def test_replicate_ecs_event_missing_last_status(
self, mock_get_events_client, sample_tags
):
event = {
"id": str(uuid.uuid4()),
"detail": {
"taskArn": "arn:aws:ecs:us-east-1:123456789:task/cluster/task-id"
},
}
await replicate_ecs_event(event, sample_tags)
mock_get_events_client.assert_not_called()
@patch("prefect_aws.observers.ecs.get_events_client")
@patch("prefect_aws.observers.ecs._last_event_cache")
async def test_replicate_ecs_event_with_follows(
self, mock_cache, mock_get_events_client, sample_event, sample_tags
):
mock_events_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_events_client
mock_get_events_client.return_value = mock_context
previous_event = Event(
event="prefect.ecs.task.pending",
resource=Resource.model_validate({"prefect.resource.id": "test"}),
occurred=datetime.fromisoformat("2024-01-01T11:59:00+00:00"),
)
mock_cache.get.return_value = previous_event
await replicate_ecs_event(sample_event, sample_tags)
emitted_event = mock_events_client.emit.call_args[1]["event"]
assert emitted_event.follows == previous_event.id
@patch("prefect_aws.observers.ecs.get_events_client")
async def test_replicate_ecs_event_handles_exception(
self, mock_get_events_client, sample_event, sample_tags
):
mock_events_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_events_client
mock_get_events_client.return_value = mock_context
mock_events_client.emit.side_effect = Exception("Emit failed")
await replicate_ecs_event(sample_event, sample_tags)
class TestMarkRunsAsCrashed:
@pytest.fixture
def sample_event(self):
return {
"detail": {
"taskArn": "arn:aws:ecs:us-east-1:123456789:task/cluster/task-id",
"containers": [
{"name": "prefect", "exitCode": 1},
{"name": "sidecar", "exitCode": 0},
],
}
}
@pytest.fixture
def sample_tags(self):
return {"prefect.io/flow-run-id": str(uuid.uuid4())}
@patch("prefect_aws.observers.ecs.prefect.get_client")
@patch("prefect_aws.observers.ecs.propose_state")
async def test_mark_runs_as_crashed_with_non_zero_exit_codes(
self, mock_propose_state, mock_get_client, sample_event, sample_tags
):
flow_run_id = uuid.UUID(sample_tags["prefect.io/flow-run-id"])
mock_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_get_client.return_value = mock_context
# Mock a running flow run
flow_run = FlowRun(
id=flow_run_id,
name="test-flow-run",
flow_id=uuid.uuid4(),
state=State(type="RUNNING", name="Running"),
)
mock_client.read_flow_run.return_value = flow_run
await mark_runs_as_crashed(sample_event, sample_tags)
mock_client.read_flow_run.assert_called_once_with(flow_run_id=flow_run_id)
mock_propose_state.assert_called_once()
# Verify the proposed state is a Crashed state
call_args = mock_propose_state.call_args[1]
proposed_state = call_args["state"]
assert proposed_state.type == StateType.CRASHED
assert proposed_state.name == "Crashed"
assert call_args["flow_run_id"] == flow_run_id
assert call_args["client"] == mock_client
@patch("prefect_aws.observers.ecs.prefect.get_client")
@patch("prefect_aws.observers.ecs.propose_state")
async def test_mark_runs_as_crashed_with_all_zero_exit_codes(
self, mock_propose_state, mock_get_client, sample_tags
):
event = {
"detail": {
"taskArn": "arn:aws:ecs:us-east-1:123456789:task/cluster/task-id",
"containers": [
{"name": "prefect", "exitCode": 0},
{"name": "sidecar", "exitCode": 0},
],
}
}
flow_run_id = uuid.UUID(sample_tags["prefect.io/flow-run-id"])
mock_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_get_client.return_value = mock_context
# Mock a running flow run
flow_run = FlowRun(
id=flow_run_id,
name="test-flow-run",
flow_id=uuid.uuid4(),
state=State(type="RUNNING", name="Running"),
)
mock_client.read_flow_run.return_value = flow_run
await mark_runs_as_crashed(event, sample_tags)
mock_client.read_flow_run.assert_called_once_with(flow_run_id=flow_run_id)
# Should not propose crashed state when all containers have exit code 0
mock_propose_state.assert_not_called()
@patch("prefect_aws.observers.ecs.prefect.get_client")
@patch("prefect_aws.observers.ecs.propose_state")
async def test_mark_runs_as_crashed_with_none_exit_codes(
self, mock_propose_state, mock_get_client, sample_tags
):
event = {
"detail": {
"taskArn": "arn:aws:ecs:us-east-1:123456789:task/cluster/task-id",
"containers": [
{"name": "prefect", "exitCode": None},
{"name": "sidecar", "exitCode": 0},
],
}
}
flow_run_id = uuid.UUID(sample_tags["prefect.io/flow-run-id"])
mock_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_get_client.return_value = mock_context
# Mock a running flow run
flow_run = FlowRun(
id=flow_run_id,
name="test-flow-run",
flow_id=uuid.uuid4(),
state=State(type="RUNNING", name="Running"),
)
mock_client.read_flow_run.return_value = flow_run
await mark_runs_as_crashed(event, sample_tags)
mock_client.read_flow_run.assert_called_once_with(flow_run_id=flow_run_id)
# Should propose crashed state when exit code is None (undefined exit)
mock_propose_state.assert_called_once()
@patch("prefect_aws.observers.ecs.prefect.get_client")
async def test_mark_runs_as_crashed_missing_task_arn(
self, mock_get_client, sample_tags
):
event = {"detail": {}}
await mark_runs_as_crashed(event, sample_tags)
# Should exit early without creating client
mock_get_client.assert_not_called()
@patch("prefect_aws.observers.ecs.prefect.get_client")
async def test_mark_runs_as_crashed_flow_run_not_found(
self, mock_get_client, sample_event, sample_tags
):
mock_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_get_client.return_value = mock_context
mock_client.read_flow_run.side_effect = ObjectNotFound("Flow run not found")
await mark_runs_as_crashed(sample_event, sample_tags)
# Should handle the exception gracefully
mock_client.read_flow_run.assert_called_once()
@patch("prefect_aws.observers.ecs.prefect.get_client")
@patch("prefect_aws.observers.ecs.propose_state")
async def test_mark_runs_as_crashed_skips_final_states(
self, mock_propose_state, mock_get_client, sample_event, sample_tags
):
flow_run_id = uuid.UUID(sample_tags["prefect.io/flow-run-id"])
mock_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_get_client.return_value = mock_context
# Mock a completed flow run (final state)
flow_run = FlowRun(
id=flow_run_id,
name="test-flow-run",
flow_id=uuid.uuid4(),
state=State(type="COMPLETED", name="Completed"),
)
mock_client.read_flow_run.return_value = flow_run
await mark_runs_as_crashed(sample_event, sample_tags)
mock_client.read_flow_run.assert_called_once_with(flow_run_id=flow_run_id)
# Should not propose state for final states
mock_propose_state.assert_not_called()
@patch("prefect_aws.observers.ecs.prefect.get_client")
@patch("prefect_aws.observers.ecs.propose_state")
async def test_mark_runs_as_crashed_skips_scheduled_states(
self, mock_propose_state, mock_get_client, sample_event, sample_tags
):
flow_run_id = uuid.UUID(sample_tags["prefect.io/flow-run-id"])
mock_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_get_client.return_value = mock_context
# Mock a scheduled flow run
flow_run = FlowRun(
id=flow_run_id,
name="test-flow-run",
flow_id=uuid.uuid4(),
state=State(type="SCHEDULED", name="Scheduled"),
)
mock_client.read_flow_run.return_value = flow_run
await mark_runs_as_crashed(sample_event, sample_tags)
mock_client.read_flow_run.assert_called_once_with(flow_run_id=flow_run_id)
# Should not propose state for scheduled states
mock_propose_state.assert_not_called()
@patch("prefect_aws.observers.ecs.prefect.get_client")
@patch("prefect_aws.observers.ecs.propose_state")
async def test_mark_runs_as_crashed_skips_paused_states(
self, mock_propose_state, mock_get_client, sample_event, sample_tags
):
flow_run_id = uuid.UUID(sample_tags["prefect.io/flow-run-id"])
mock_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_get_client.return_value = mock_context
flow_run = FlowRun(
id=flow_run_id,
name="test-flow-run",
flow_id=uuid.uuid4(),
state=State(type="PAUSED", name="Suspended"),
)
mock_client.read_flow_run.return_value = flow_run
await mark_runs_as_crashed(sample_event, sample_tags)
mock_client.read_flow_run.assert_called_once_with(flow_run_id=flow_run_id)
mock_propose_state.assert_not_called()
@patch("prefect_aws.observers.ecs.prefect.get_client")
@patch("prefect_aws.observers.ecs.propose_state")
async def test_mark_runs_as_crashed_ignores_sidecar_exit_codes(
self, mock_propose_state, mock_get_client, sample_tags
):
event = {
"detail": {
"taskArn": "arn:aws:ecs:us-east-1:123456789:task/cluster/task-id",
"containers": [
{"name": "prefect", "exitCode": 0},
{"name": "vmagent", "exitCode": 0},
{"name": "vector", "exitCode": 0},
{"name": "ecs-exporter", "exitCode": 2},
],
}
}
flow_run_id = uuid.UUID(sample_tags["prefect.io/flow-run-id"])
mock_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_get_client.return_value = mock_context
flow_run = FlowRun(
id=flow_run_id,
name="test-flow-run",
flow_id=uuid.uuid4(),
state=State(type="RUNNING", name="Running"),
)
mock_client.read_flow_run.return_value = flow_run
await mark_runs_as_crashed(event, sample_tags)
mock_propose_state.assert_not_called()
@patch("prefect_aws.observers.ecs.prefect.get_client")
@patch("prefect_aws.observers.ecs.propose_state")
async def test_mark_runs_as_crashed_checks_orchestration_container(
self, mock_propose_state, mock_get_client, sample_tags
):
event = {
"detail": {
"taskArn": "arn:aws:ecs:us-east-1:123456789:task/cluster/task-id",
"containers": [
{"name": "prefect", "exitCode": 1},
{"name": "sidecar", "exitCode": 0},
],
}
}
flow_run_id = uuid.UUID(sample_tags["prefect.io/flow-run-id"])
mock_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_get_client.return_value = mock_context
flow_run = FlowRun(
id=flow_run_id,
name="test-flow-run",
flow_id=uuid.uuid4(),
state=State(type="RUNNING", name="Running"),
)
mock_client.read_flow_run.return_value = flow_run
await mark_runs_as_crashed(event, sample_tags)
mock_propose_state.assert_called_once()
call_args = mock_propose_state.call_args[1]
proposed_state = call_args["state"]
assert proposed_state.type == StateType.CRASHED
assert "prefect" in proposed_state.message
@patch("prefect_aws.observers.ecs.prefect.get_client")
@patch("prefect_aws.observers.ecs.propose_state")
async def test_mark_runs_as_crashed_falls_back_to_all_containers(
self, mock_propose_state, mock_get_client, sample_tags
):
event = {
"detail": {
"taskArn": "arn:aws:ecs:us-east-1:123456789:task/cluster/task-id",
"containers": [
{"name": "custom-container", "exitCode": 1},
{"name": "sidecar", "exitCode": 0},
],
}
}
flow_run_id = uuid.UUID(sample_tags["prefect.io/flow-run-id"])
mock_client = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_get_client.return_value = mock_context
flow_run = FlowRun(
id=flow_run_id,
name="test-flow-run",
flow_id=uuid.uuid4(),
state=State(type="RUNNING", name="Running"),
)
mock_client.read_flow_run.return_value = flow_run
await mark_runs_as_crashed(event, sample_tags)
mock_propose_state.assert_called_once()
call_args = mock_propose_state.call_args[1]
proposed_state = call_args["state"]
assert proposed_state.type == StateType.CRASHED
class TestDeregisterTaskDefinition:
@pytest.fixture
def sample_event(self):
return {
"detail": {
"taskDefinitionArn": "arn:aws:ecs:us-east-1:123456789:task-definition/my-task:1"
}
}
@pytest.fixture
def sample_tags(self):
return {"prefect.io/degregister-task-definition": "true"}
@patch("prefect_aws.observers.ecs.aiobotocore.session.get_session")
async def test_deregister_task_definition_success(
self, mock_get_session, sample_event, sample_tags
):
mock_session = Mock()
mock_ecs_client = AsyncMock()
mock_client_context = AsyncMock()
mock_client_context.__aenter__.return_value = mock_ecs_client
mock_session.create_client.return_value = mock_client_context
mock_get_session.return_value = mock_session
await deregister_task_definition(sample_event, sample_tags)
mock_session.create_client.assert_called_once_with("ecs")
mock_ecs_client.deregister_task_definition.assert_called_once_with(
taskDefinition="arn:aws:ecs:us-east-1:123456789:task-definition/my-task:1"
)
@patch("prefect_aws.observers.ecs.aiobotocore.session.get_session")
async def test_deregister_task_definition_missing_arn(
self, mock_get_session, sample_tags
):
event = {"detail": {}}
await deregister_task_definition(event, sample_tags)
# Should exit early without creating session/client
mock_get_session.assert_not_called()
@patch("prefect_aws.observers.ecs.aiobotocore.session.get_session")
async def test_deregister_task_definition_empty_detail(
self, mock_get_session, sample_tags
):
event = {}
await deregister_task_definition(event, sample_tags)
# Should exit early without creating session/client
mock_get_session.assert_not_called()
class TestObserverManagement:
@patch("prefect_aws.observers.ecs.ecs_observer")
async def test_start_and_stop_observer(self, mock_observer):
mock_observer.run = AsyncMock(
side_effect=lambda started_event: started_event.set()
)
await start_observer()
mock_observer.run.assert_called_once()
await start_observer()
# Shouldn't be called again
mock_observer.run.assert_called_once()
await stop_observer()
async def test_stop_observer_not_running(self):
# Shouldn't raise
await stop_observer()
async def async_generator_from_list(items: list) -> AsyncGenerator[Any, None]:
for item in items:
yield item
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-aws/tests/observers/test_ecs_observer.py",
"license": "Apache License 2.0",
"lines": 1116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-redis/prefect_redis/lease_storage.py | from __future__ import annotations
import json
import logging
from datetime import datetime, timedelta, timezone
from typing import Any
from uuid import UUID
from redis.asyncio import Redis
from redis.exceptions import RedisError
from prefect.server.concurrency.lease_storage import (
ConcurrencyLeaseHolder,
ConcurrencyLimitLeaseMetadata,
)
from prefect.server.concurrency.lease_storage import (
ConcurrencyLeaseStorage as _ConcurrencyLeaseStorage,
)
from prefect.server.utilities.leasing import ResourceLease
from prefect_redis.client import get_async_redis_client
logger = logging.getLogger(__name__)
class ConcurrencyLeaseStorage(_ConcurrencyLeaseStorage):
"""
A Redis-based concurrency lease storage implementation.
"""
def __init__(self, redis_client: Redis | None = None):
self.redis_client = redis_client or get_async_redis_client()
self.base_prefix = "prefect:concurrency:"
self.lease_prefix = f"{self.base_prefix}lease:"
self.expirations_key = f"{self.base_prefix}expirations"
self.expiration_prefix = f"{self.base_prefix}expiration:"
# Lua scripts registered on the server
self._create_script: Any | None = None
self._revoke_script: Any | None = None
def _lease_key(self, lease_id: UUID) -> str:
"""Generate Redis key for a lease."""
return f"{self.lease_prefix}{lease_id}"
def _expiration_key(self, lease_id: UUID) -> str:
"""Generate Redis key for lease expiration."""
return f"{self.expiration_prefix}{lease_id}"
@staticmethod
def _limit_holders_key(limit_id: UUID) -> str:
return f"prefect:concurrency:limit:{limit_id}:holders"
async def _ensure_scripts(self) -> None:
if self._create_script is None:
create_script = """
-- KEYS[1] = lease_key
-- KEYS[2] = expirations_key
-- KEYS[3..n] = limit_holders_key for each resource id
-- ARGV[1] = lease_json
-- ARGV[2] = expiration_ts (number)
-- ARGV[3] = lease_id
-- ARGV[4] = holder_entry_json (or empty string)
redis.call('SET', KEYS[1], ARGV[1])
redis.call('ZADD', KEYS[2], ARGV[2], ARGV[3])
if ARGV[4] ~= '' then
local i = 3
while i <= #KEYS do
redis.call('HSET', KEYS[i], ARGV[3], ARGV[4])
i = i + 1
end
end
return 1
"""
self._create_script = self.redis_client.register_script(create_script)
if self._revoke_script is None:
revoke_script = """
-- KEYS[1] = lease_key
-- KEYS[2] = expirations_key
-- ARGV[1] = lease_id
-- Read the lease in-script to avoid races and compute index keys
local lease_json = redis.call('GET', KEYS[1])
if lease_json then
local ok, lease = pcall(cjson.decode, lease_json)
if ok and lease then
if lease['resource_ids'] then
for _, rid in ipairs(lease['resource_ids']) do
local holder_index_key = 'prefect:concurrency:limit:' .. tostring(rid) .. ':holders'
redis.call('HDEL', holder_index_key, ARGV[1])
end
end
end
end
-- Proceed with idempotent deletes regardless
redis.call('DEL', KEYS[1])
redis.call('ZREM', KEYS[2], ARGV[1])
return 1
"""
self._revoke_script = self.redis_client.register_script(revoke_script)
def _serialize_lease(
self, lease: ResourceLease[ConcurrencyLimitLeaseMetadata]
) -> str:
"""Serialize a lease to JSON."""
metadata_dict: dict[str, Any] | None = None
if lease.metadata:
metadata_dict = {"slots": lease.metadata.slots}
if getattr(lease.metadata, "holder", None) is not None:
holder = lease.metadata.holder
if hasattr(holder, "model_dump"):
holder = holder.model_dump(mode="json") # type: ignore[attr-defined]
metadata_dict["holder"] = holder
data = {
"id": str(lease.id),
"resource_ids": [str(rid) for rid in lease.resource_ids],
"expiration": lease.expiration.isoformat(),
"created_at": lease.created_at.isoformat(),
"metadata": metadata_dict,
}
return json.dumps(data)
def _deserialize_lease(
self, data: str
) -> ResourceLease[ConcurrencyLimitLeaseMetadata]:
"""Deserialize a lease from JSON."""
lease_data = json.loads(data)
metadata = None
if lease_data["metadata"]:
holder = lease_data["metadata"].get("holder")
metadata = ConcurrencyLimitLeaseMetadata(
slots=lease_data["metadata"]["slots"]
)
if holder is not None:
try:
setattr(metadata, "holder", holder)
except (AttributeError, TypeError):
logger.debug(
"Could not set holder on metadata type %s for lease %s",
type(metadata).__name__,
lease_data.get("id"),
)
return ResourceLease(
id=UUID(lease_data["id"]),
resource_ids=[UUID(rid) for rid in lease_data["resource_ids"]],
expiration=datetime.fromisoformat(lease_data["expiration"]),
created_at=datetime.fromisoformat(lease_data["created_at"]),
metadata=metadata,
)
async def create_lease(
self,
resource_ids: list[UUID],
ttl: timedelta,
metadata: ConcurrencyLimitLeaseMetadata | None = None,
) -> ResourceLease[ConcurrencyLimitLeaseMetadata]:
expiration = datetime.now(timezone.utc) + ttl
lease = ResourceLease(
resource_ids=resource_ids, metadata=metadata, expiration=expiration
)
try:
lease_key = self._lease_key(lease.id)
serialized_lease = self._serialize_lease(lease)
# Use a Lua script for atomic multi-key updates
await self._ensure_scripts()
holder_entry_json = ""
if metadata is not None and getattr(metadata, "holder", None) is not None:
holder = getattr(metadata, "holder")
if hasattr(holder, "model_dump"):
holder = holder.model_dump(mode="json") # type: ignore[attr-defined]
holder_entry_json = json.dumps(
{
"holder": holder,
"slots": metadata.slots,
"lease_id": str(lease.id),
}
)
keys: list[str] = [
lease_key,
self.expirations_key,
]
if holder_entry_json:
for rid in resource_ids:
keys.append(self._limit_holders_key(rid))
args: list[str] = [
serialized_lease,
str(expiration.timestamp()),
str(lease.id),
holder_entry_json,
]
await self._create_script(keys=keys, args=args) # type: ignore[misc]
return lease
except RedisError as e:
logger.error(f"Failed to create lease {lease.id}: {e}")
raise
async def read_lease(
self, lease_id: UUID
) -> ResourceLease[ConcurrencyLimitLeaseMetadata] | None:
try:
lease_key = self._lease_key(lease_id)
data = await self.redis_client.get(lease_key)
if data is None:
return None
return self._deserialize_lease(data)
except RedisError as e:
logger.error(f"Failed to read lease {lease_id}: {e}")
raise
async def renew_lease(self, lease_id: UUID, ttl: timedelta) -> bool:
"""
Atomically renew a concurrency lease by updating its expiration.
Uses a Lua script to atomically check if the lease exists, update its expiration
in the lease data, and update the index - all in a single atomic operation,
preventing race conditions from creating orphaned index entries.
Args:
lease_id: The ID of the lease to renew
ttl: The new time-to-live duration
Returns:
True if the lease was renewed, False if it didn't exist
"""
try:
lease_key = self._lease_key(lease_id)
new_expiration = datetime.now(timezone.utc) + ttl
new_expiration_iso = new_expiration.isoformat()
new_expiration_timestamp = new_expiration.timestamp()
# Lua script to atomically get, update, and store lease + index
# All operations are atomic - no TOCTOU race condition possible
renew_lease_script = """
local lease_key = KEYS[1]
local expirations_key = KEYS[2]
local lease_id = ARGV[1]
local new_expiration_timestamp = tonumber(ARGV[2])
local new_expiration_iso = ARGV[3]
-- Get existing lease data
local serialized_lease = redis.call('get', lease_key)
if not serialized_lease then
-- Lease doesn't exist - clean up any orphaned index entry
redis.call('zrem', expirations_key, lease_id)
return 0
end
-- Parse lease data, update expiration, and save back
local lease_data = cjson.decode(serialized_lease)
lease_data['expiration'] = new_expiration_iso
redis.call('set', lease_key, cjson.encode(lease_data))
-- Update the expiration index
redis.call('zadd', expirations_key, new_expiration_timestamp, lease_id)
return 1
"""
# Execute the atomic Lua script
result = await self.redis_client.eval(
renew_lease_script,
2, # number of keys
lease_key,
self.expirations_key,
str(lease_id),
new_expiration_timestamp,
new_expiration_iso,
)
return bool(result)
except RedisError as e:
logger.error(f"Failed to renew lease {lease_id}: {e}")
raise
async def revoke_lease(self, lease_id: UUID) -> None:
try:
lease_key = self._lease_key(lease_id)
# Use a Lua script for atomic multi-key updates with in-script read/cleanup
await self._ensure_scripts()
keys: list[str] = [
lease_key,
self.expirations_key,
]
args: list[str] = [
str(lease_id),
]
await self._revoke_script(keys=keys, args=args) # type: ignore[misc]
except RedisError as e:
logger.error(f"Failed to revoke lease {lease_id}: {e}")
raise
async def read_active_lease_ids(
self, limit: int = 100, offset: int = 0
) -> list[UUID]:
try:
now = datetime.now(timezone.utc).timestamp()
# Get lease IDs that expire after now (active leases)
# Redis zrangebyscore uses 'start' as the offset and 'num' as the limit
active_lease_ids = await self.redis_client.zrangebyscore(
self.expirations_key, now, "+inf", start=offset, num=limit
)
return [UUID(lease_id) for lease_id in active_lease_ids]
except RedisError as e:
logger.error(f"Failed to read active lease IDs: {e}")
raise
async def read_expired_lease_ids(self, limit: int = 100) -> list[UUID]:
try:
now = datetime.now(timezone.utc).timestamp()
# Get lease IDs that expire before now (expired leases)
expired_lease_ids = await self.redis_client.zrangebyscore(
self.expirations_key, "-inf", now, start=0, num=limit
)
return [UUID(lease_id) for lease_id in expired_lease_ids]
except RedisError as e:
logger.error(f"Failed to read expired lease IDs: {e}")
raise
async def list_holders_for_limit(
self, limit_id: UUID
) -> list[tuple[UUID, ConcurrencyLeaseHolder]]:
try:
# Get all holder entries for this limit
values = await self.redis_client.hvals(self._limit_holders_key(limit_id))
holders_with_leases: list[tuple[UUID, ConcurrencyLeaseHolder]] = []
for v in values:
if isinstance(v, (bytes, bytearray)):
v = v.decode()
try:
data = json.loads(v)
if isinstance(data, dict) and "holder" in data:
holder_data: dict[str, Any] = data["holder"]
if isinstance(holder_data, dict):
# Create ConcurrencyLeaseHolder from the data
holder = ConcurrencyLeaseHolder(**holder_data)
holders_with_leases.append((UUID(data["lease_id"]), holder))
except Exception:
# Skip malformed entries
continue
return holders_with_leases
except RedisError as e:
logger.error(f"Failed to list holders for limit {limit_id}: {e}")
raise
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-redis/prefect_redis/lease_storage.py",
"license": "Apache License 2.0",
"lines": 310,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/integrations/prefect-redis/tests/test_lease_storage.py | import asyncio
from datetime import datetime, timedelta, timezone
from uuid import UUID, uuid4
import pytest
from prefect_redis.lease_storage import ConcurrencyLeaseStorage
from redis.asyncio import Redis
from prefect.server.concurrency.lease_storage import ConcurrencyLimitLeaseMetadata
from prefect.server.utilities.leasing import ResourceLease
from prefect.types._concurrency import ConcurrencyLeaseHolder
class TestConcurrencyLeaseStorage:
"""Test suite for Redis-based ConcurrencyLeaseStorage implementation."""
@pytest.fixture
async def storage(self, redis: Redis) -> ConcurrencyLeaseStorage:
"""Create a ConcurrencyLeaseStorage instance with the test Redis client."""
return ConcurrencyLeaseStorage(redis_client=redis)
async def test_create_lease(self, storage: ConcurrencyLeaseStorage):
"""Test creating a new lease."""
resource_ids = [uuid4(), uuid4()]
ttl = timedelta(seconds=300)
metadata = ConcurrencyLimitLeaseMetadata(slots=5)
lease = await storage.create_lease(resource_ids, ttl, metadata)
assert isinstance(lease, ResourceLease)
assert lease.resource_ids == resource_ids
assert lease.metadata == metadata
assert lease.expiration > datetime.now(timezone.utc)
assert isinstance(lease.id, UUID)
async def test_create_lease_without_metadata(
self, storage: ConcurrencyLeaseStorage
):
"""Test creating a lease without metadata."""
resource_ids = [uuid4()]
ttl = timedelta(seconds=300)
lease = await storage.create_lease(resource_ids, ttl)
assert lease.resource_ids == resource_ids
assert lease.metadata is None
async def test_read_lease(self, storage: ConcurrencyLeaseStorage):
"""Test reading an existing lease."""
resource_ids = [uuid4()]
ttl = timedelta(seconds=300)
metadata = ConcurrencyLimitLeaseMetadata(slots=3)
# Create a lease first
created_lease = await storage.create_lease(resource_ids, ttl, metadata)
# Read it back
read_lease = await storage.read_lease(created_lease.id)
assert read_lease is not None
assert read_lease.id == created_lease.id
assert read_lease.resource_ids == resource_ids
assert read_lease.metadata is not None
assert read_lease.metadata.slots == metadata.slots
assert read_lease.expiration == created_lease.expiration
async def test_read_nonexistent_lease(self, storage: ConcurrencyLeaseStorage):
"""Test reading a lease that doesn't exist."""
nonexistent_id = uuid4()
lease = await storage.read_lease(nonexistent_id)
assert lease is None
async def test_renew_lease(self, storage: ConcurrencyLeaseStorage):
"""Test renewing an existing lease."""
resource_ids = [uuid4()]
initial_ttl = timedelta(seconds=300)
metadata = ConcurrencyLimitLeaseMetadata(slots=2)
# Create a lease
lease = await storage.create_lease(resource_ids, initial_ttl, metadata)
original_expiration = lease.expiration
# Wait a small amount to ensure time difference
await asyncio.sleep(0.1)
# Renew the lease
new_ttl = timedelta(seconds=600)
await storage.renew_lease(lease.id, new_ttl)
# Read the renewed lease
renewed_lease = await storage.read_lease(lease.id)
assert renewed_lease is not None
assert renewed_lease.expiration > original_expiration
async def test_renew_nonexistent_lease(self, storage: ConcurrencyLeaseStorage):
"""Test renewing a lease that doesn't exist (should not raise error)."""
nonexistent_id = uuid4()
ttl = timedelta(seconds=300)
# Should not raise an error
await storage.renew_lease(nonexistent_id, ttl)
async def test_revoke_lease(self, storage: ConcurrencyLeaseStorage):
"""Test revoking an existing lease."""
resource_ids = [uuid4()]
ttl = timedelta(seconds=300)
# Create a lease
lease = await storage.create_lease(resource_ids, ttl)
# Verify it exists
assert await storage.read_lease(lease.id) is not None
# Revoke it
await storage.revoke_lease(lease.id)
# Verify it's gone
assert await storage.read_lease(lease.id) is None
async def test_revoke_nonexistent_lease(self, storage: ConcurrencyLeaseStorage):
"""Test revoking a lease that doesn't exist (should not raise error)."""
nonexistent_id = uuid4()
# Should not raise an error
await storage.revoke_lease(nonexistent_id)
async def test_read_active_lease_ids(self, storage: ConcurrencyLeaseStorage):
"""Test reading active lease IDs."""
resource_ids = [uuid4()]
# Create an active lease (expires in future)
active_ttl = timedelta(seconds=300)
active_lease = await storage.create_lease(resource_ids, active_ttl)
# Create an expired lease (expires in past)
expired_ttl = timedelta(seconds=-300) # Negative TTL for expired lease
expired_lease = await storage.create_lease(resource_ids, expired_ttl)
# Read active lease IDs
active_ids = await storage.read_active_lease_ids()
assert active_lease.id in active_ids
assert expired_lease.id not in active_ids
async def test_read_expired_lease_ids(self, storage: ConcurrencyLeaseStorage):
"""Test reading expired lease IDs."""
resource_ids = [uuid4()]
# Create an active lease (expires in future)
active_ttl = timedelta(seconds=300)
active_lease = await storage.create_lease(resource_ids, active_ttl)
# Create an expired lease (expires in past)
expired_ttl = timedelta(seconds=-300) # Negative TTL for expired lease
expired_lease = await storage.create_lease(resource_ids, expired_ttl)
# Read expired lease IDs
expired_ids = await storage.read_expired_lease_ids()
assert expired_lease.id in expired_ids
assert active_lease.id not in expired_ids
async def test_read_active_lease_ids_with_limit(
self, storage: ConcurrencyLeaseStorage
):
"""Test reading active lease IDs with a limit."""
resource_ids = [uuid4()]
ttl = timedelta(seconds=300)
# Create multiple active leases
leases: list[ResourceLease[ConcurrencyLimitLeaseMetadata]] = []
for _ in range(5):
lease = await storage.create_lease(resource_ids, ttl)
leases.append(lease)
# Read with limit
active_ids = await storage.read_active_lease_ids(limit=3)
assert len(active_ids) == 3
# All returned IDs should be from our created leases
assert all(
lease_id in [lease.id for lease in leases] for lease_id in active_ids
)
async def test_read_expired_lease_ids_with_limit(
self, storage: ConcurrencyLeaseStorage
):
"""Test reading expired lease IDs with a limit."""
resource_ids = [uuid4()]
expired_ttl = timedelta(seconds=-300)
# Create multiple expired leases
leases: list[ResourceLease[ConcurrencyLimitLeaseMetadata]] = []
for _ in range(5):
lease = await storage.create_lease(resource_ids, expired_ttl)
leases.append(lease)
# Read with limit
expired_ids = await storage.read_expired_lease_ids(limit=3)
assert len(expired_ids) == 3
# All returned IDs should be from our created leases
assert all(
lease_id in [lease.id for lease in leases] for lease_id in expired_ids
)
async def test_lease_serialization_deserialization(
self, storage: ConcurrencyLeaseStorage
):
"""Test that lease serialization and deserialization work correctly."""
resource_ids = [uuid4(), uuid4()]
ttl = timedelta(seconds=300)
metadata = ConcurrencyLimitLeaseMetadata(slots=10)
# Create lease
original_lease = await storage.create_lease(resource_ids, ttl, metadata)
# Read it back
deserialized_lease = await storage.read_lease(original_lease.id)
assert deserialized_lease is not None
assert deserialized_lease.id == original_lease.id
assert deserialized_lease.resource_ids == original_lease.resource_ids
assert deserialized_lease.expiration == original_lease.expiration
assert deserialized_lease.created_at == original_lease.created_at
assert deserialized_lease.metadata is not None
assert deserialized_lease.metadata.slots == original_lease.metadata.slots
async def test_concurrent_operations(self, storage: ConcurrencyLeaseStorage):
"""Test concurrent lease operations."""
resource_ids = [uuid4()]
ttl = timedelta(seconds=300)
# Create multiple leases concurrently
tasks = [
storage.create_lease(
resource_ids, ttl, ConcurrencyLimitLeaseMetadata(slots=i)
)
for i in range(10)
]
leases = await asyncio.gather(*tasks)
# Verify all leases were created successfully
assert len(leases) == 10
assert len(set(lease.id for lease in leases)) == 10 # All unique IDs
# Read all leases back concurrently
read_tasks = [storage.read_lease(lease.id) for lease in leases]
read_leases = await asyncio.gather(*read_tasks)
# Verify all reads succeeded
assert all(read_lease is not None for read_lease in read_leases)
assert len(read_leases) == 10
async def test_redis_key_management(
self, storage: ConcurrencyLeaseStorage, redis: Redis
):
"""Test that Redis keys are managed correctly."""
resource_ids = [uuid4()]
ttl = timedelta(seconds=300)
# Create a lease
lease = await storage.create_lease(resource_ids, ttl)
# Verify keys exist in Redis
lease_key = storage._lease_key(lease.id)
assert await redis.exists(lease_key) == 1
assert (
await redis.zrank("prefect:concurrency:expirations", str(lease.id))
is not None
)
# Revoke the lease
await storage.revoke_lease(lease.id)
# Verify keys are removed
assert await redis.exists(lease_key) == 0
assert (
await redis.zrank("prefect:concurrency:expirations", str(lease.id)) is None
)
async def test_storage_isolation(self, redis: Redis):
"""Test that different storage instances are properly isolated."""
storage1 = ConcurrencyLeaseStorage(redis_client=redis)
storage2 = ConcurrencyLeaseStorage(redis_client=redis)
resource_ids = [uuid4()]
ttl = timedelta(seconds=300)
# Create lease with first storage
lease = await storage1.create_lease(resource_ids, ttl)
# Read with second storage (should work since they share Redis)
read_lease = await storage2.read_lease(lease.id)
assert read_lease is not None
assert read_lease.id == lease.id
async def test_holder_round_trip(self, storage: ConcurrencyLeaseStorage):
"""Holder data is preserved through serialize/deserialize."""
resource_ids = [uuid4()]
ttl = timedelta(seconds=60)
holder = {"type": "task_run", "id": str(uuid4())}
metadata = ConcurrencyLimitLeaseMetadata(slots=2)
# Support both models that define 'holder' and legacy ones
setattr(metadata, "holder", holder)
lease = await storage.create_lease(resource_ids, ttl, metadata)
read_back = await storage.read_lease(lease.id)
assert read_back is not None
assert getattr(read_back.metadata, "holder", None) == holder
async def test_holder_indexes_and_lookup(self, storage: ConcurrencyLeaseStorage):
rid = uuid4()
ttl = timedelta(seconds=120)
holder_id = uuid4()
holder = {"type": "task_run", "id": str(holder_id)}
meta = ConcurrencyLimitLeaseMetadata(slots=1)
setattr(meta, "holder", holder)
lease = await storage.create_lease([rid], ttl, meta)
holders = await storage.list_holders_for_limit(rid)
assert len(holders) == 1
lease_id, holder = holders[0]
assert holder.type == "task_run"
assert holder.id == holder_id
assert lease_id == lease.id
await storage.revoke_lease(lease.id)
holders_after = await storage.list_holders_for_limit(rid)
assert len(holders_after) == 0
# Reverse lookup removed; ensure holder entry is gone via list
async def test_create_without_holder_does_not_index(
self, storage: ConcurrencyLeaseStorage
):
rid = uuid4()
ttl = timedelta(seconds=60)
# No holder
lease = await storage.create_lease([rid], ttl)
assert lease is not None
holders = await storage.list_holders_for_limit(rid)
assert holders == []
# Reverse lookup removed; nothing to check here beyond empty list
async def test_multiple_resource_ids_index_all_and_cleanup(
self, storage: ConcurrencyLeaseStorage
):
rid1, rid2 = uuid4(), uuid4()
ttl = timedelta(seconds=60)
holder_id = uuid4()
holder = {"type": "task_run", "id": str(holder_id)}
meta = ConcurrencyLimitLeaseMetadata(slots=1)
setattr(meta, "holder", holder)
lease = await storage.create_lease([rid1, rid2], ttl, meta)
holders_rid1 = await storage.list_holders_for_limit(rid1)
assert len(holders_rid1) == 1
lease_id, holder = holders_rid1[0]
assert holder.type == "task_run"
assert holder.id == holder_id
assert lease_id == lease.id
holders_rid2 = await storage.list_holders_for_limit(rid2)
assert len(holders_rid2) == 1
lease_id, holder = holders_rid2[0]
assert holder.type == "task_run"
assert holder.id == holder_id
assert lease_id == lease.id
await storage.revoke_lease(lease.id)
holders_rid1_after = await storage.list_holders_for_limit(rid1)
assert len(holders_rid1_after) == 0
holders_rid2_after = await storage.list_holders_for_limit(rid2)
assert len(holders_rid2_after) == 0
async def test_list_holders_for_limit_returns_typed_holders(
self, storage: ConcurrencyLeaseStorage
):
"""Test that list_holders_for_limit returns properly typed ConcurrencyLeaseHolder objects."""
limit_id = uuid4()
ttl = timedelta(seconds=60)
# Create multiple leases with different holders
holder1_id = uuid4()
holder2_id = uuid4()
holder1_data = {"type": "task_run", "id": str(holder1_id)}
holder2_data = {"type": "flow_run", "id": str(holder2_id)}
meta1 = ConcurrencyLimitLeaseMetadata(slots=2)
setattr(meta1, "holder", holder1_data)
meta2 = ConcurrencyLimitLeaseMetadata(slots=1)
setattr(meta2, "holder", holder2_data)
# Create leases
lease1 = await storage.create_lease([limit_id], ttl, meta1)
lease2 = await storage.create_lease([limit_id], ttl, meta2)
# Get holders - should return ConcurrencyLeaseHolder objects
holders = await storage.list_holders_for_limit(limit_id)
assert len(holders) == 2
# Check that they are ConcurrencyLeaseHolder instances
for _, holder in holders:
assert isinstance(holder, ConcurrencyLeaseHolder)
# Check that the data matches (IDs are UUIDs in the returned objects)
holder_types = {holder.type for _, holder in holders}
holder_ids = {holder.id for _, holder in holders}
assert "task_run" in holder_types
assert "flow_run" in holder_types
assert holder1_id in holder_ids
assert holder2_id in holder_ids
# Clean up
await storage.revoke_lease(lease1.id)
await storage.revoke_lease(lease2.id)
async def test_list_holders_for_limit_empty_when_no_holders(
self, storage: ConcurrencyLeaseStorage
):
"""Test that list_holders_for_limit returns empty list when no holders exist."""
limit_id = uuid4()
holders = await storage.list_holders_for_limit(limit_id)
assert holders == []
assert isinstance(holders, list)
async def test_read_active_lease_ids_with_pagination(
self, storage: ConcurrencyLeaseStorage
):
"""Test pagination of active lease IDs."""
# Create 10 active leases
active_ttl = timedelta(minutes=5)
lease_ids: list[UUID] = []
for _ in range(10):
lease = await storage.create_lease([uuid4()], active_ttl)
lease_ids.append(lease.id)
# Test getting first page
first_page = await storage.read_active_lease_ids(limit=3, offset=0)
assert len(first_page) == 3
assert all(lid in lease_ids for lid in first_page)
# Test getting second page
second_page = await storage.read_active_lease_ids(limit=3, offset=3)
assert len(second_page) == 3
assert all(lid in lease_ids for lid in second_page)
# Ensure no overlap between pages
assert set(first_page).isdisjoint(set(second_page))
# Test getting third page
third_page = await storage.read_active_lease_ids(limit=3, offset=6)
assert len(third_page) == 3
assert all(lid in lease_ids for lid in third_page)
# Test getting partial last page
fourth_page = await storage.read_active_lease_ids(limit=3, offset=9)
assert len(fourth_page) == 1
assert all(lid in lease_ids for lid in fourth_page)
# Test offset beyond available items
empty_page = await storage.read_active_lease_ids(limit=3, offset=100)
assert empty_page == []
# Clean up
for lease_id in lease_ids:
await storage.revoke_lease(lease_id)
async def test_read_active_lease_ids_default_pagination(
self, storage: ConcurrencyLeaseStorage
):
"""Test default pagination behavior."""
# Create 150 active leases (more than default limit)
active_ttl = timedelta(minutes=5)
lease_ids: list[UUID] = []
for _ in range(150):
lease = await storage.create_lease([uuid4()], active_ttl)
lease_ids.append(lease.id)
# Test default limit of 100
default_page = await storage.read_active_lease_ids()
assert len(default_page) == 100
assert all(lid in lease_ids for lid in default_page)
# Test with offset
offset_page = await storage.read_active_lease_ids(offset=100)
assert len(offset_page) == 50 # remaining leases
assert all(lid in lease_ids for lid in offset_page)
# Ensure no overlap with first page
assert set(default_page).isdisjoint(set(offset_page))
# Clean up
for lease_id in lease_ids:
await storage.revoke_lease(lease_id)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-redis/tests/test_lease_storage.py",
"license": "Apache License 2.0",
"lines": 397,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:scripts/backfill_release_notes.py | #!/usr/bin/env python3
"""
Backfill release notes documentation from GitHub releases.
This script generates release notes pages for all 3.x releases.
PURPOSE:
========
This script is used to regenerate ALL historical release notes from GitHub.
It should only be needed in these cases:
1. Initial setup of release notes documentation
2. Fixing formatting issues across all releases
3. Recovering from data loss
4. Major restructuring of release notes format
USAGE:
======
Run from the repository root:
python scripts/backfill_release_notes.py
This will:
1. Fetch all stable 3.x release tags from git
2. Query GitHub API for each release's information
3. Group releases by minor version (3.0, 3.1, 3.2, etc.)
4. Generate one MDX page per minor version with all patches
5. Create the index page and directory structure
DIRECTORY STRUCTURE:
====================
docs/v3/release-notes/
├── index.mdx # Main release notes landing page
├── oss/
│ ├── version-3-0.mdx # All 3.0.x releases
│ ├── version-3-1.mdx # All 3.1.x releases
│ └── ...
└── cloud/
└── index.mdx # Placeholder for cloud releases
FORMATTING:
===========
The script applies several transformations to improve readability:
- Removes "New Contributors" sections (reduce clutter)
- Converts ### headers to **bold text** (simplify right-nav)
- Converts #### headers to **bold text** (for integration subsections)
- Wraps version constraints in backticks (e.g., `<2.0,>=1.5`)
- Removes duplicate headers and "What's Changed" sections
- Formats dates consistently as "Month Day, Year"
REQUIREMENTS:
=============
- Must be run from the Prefect repository root
- Requires 'gh' CLI tool to be installed and authenticated
- Needs git repository with all tags fetched
NOTE: For preparing release notes for a NEW release, use prepare_release_notes.py instead.
"""
import json
import re
import subprocess
from collections import defaultdict
from pathlib import Path
def run_command(cmd: list[str]) -> str:
"""Run a command and return its output."""
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
return result.stdout.strip()
def get_release_tags() -> list[str]:
"""Get all stable release tags for 3.x versions."""
output = run_command(["git", "tag", "-l", "3.*"])
tags = output.split("\n") if output else []
# Filter out rc, dev, and other pre-release versions
stable_tags = []
for tag in tags:
if not any(x in tag for x in ["rc", "dev", "alpha", "beta"]):
stable_tags.append(tag)
return sorted(stable_tags, key=lambda x: list(map(int, x.split("."))))
def get_release_info(tag: str) -> dict | None:
"""Get release information from GitHub."""
try:
output = run_command(
["gh", "release", "view", tag, "--json", "body,name,tagName,publishedAt"]
)
return json.loads(output)
except subprocess.CalledProcessError:
print(f"Warning: Could not fetch release info for {tag}")
return None
def parse_version(tag: str) -> tuple[int, int, int]:
"""Parse version string into major, minor, patch."""
parts = tag.split(".")
return int(parts[0]), int(parts[1]), int(parts[2] if len(parts) > 2 else 0)
def format_release_notes(release_info: dict) -> str:
"""Format release notes for markdown."""
body = release_info.get("body", "")
name = release_info.get("name", "")
tag = release_info.get("tagName", "")
date = release_info.get("publishedAt", "")[:10] # Just the date part
# Clean up the body
body = body.replace(
"<!-- Release notes generated using configuration in .github/release.yml at main -->",
"",
).strip()
body = body.replace("\r\n", "\n")
# Remove duplicate headers that might be in the body
lines = body.split("\n")
filtered_lines = []
# Extract just the subtitle part from the name if it exists
subtitle = ""
if name and name != tag:
# Handle various formats of the name
subtitle = name.replace(tag, "").strip()
if subtitle.startswith("-"):
subtitle = subtitle[1:].strip()
if subtitle.startswith(":"):
subtitle = subtitle[1:].strip()
# If no subtitle from name, try to extract from first line of body
# This handles cases like "## 3.1.5: Like Leftovers, But Async (No More Pi)"
if not subtitle and lines:
first_line = lines[0].strip()
if first_line.startswith(f"## {tag}:") or first_line.startswith(f"# {tag}:"):
subtitle = first_line.split(":", 1)[1].strip() if ":" in first_line else ""
skip_next_empty = False
skip_section = False
for i, line in enumerate(lines):
line_stripped = line.strip()
# Skip New Contributors sections entirely
if line_stripped == "## New Contributors":
skip_section = True
continue
elif skip_section and line_stripped.startswith("##"):
# We've hit the next section, stop skipping
skip_section = False
elif skip_section and line_stripped.startswith("**Full Changelog**"):
# Keep the Full Changelog line and stop skipping
skip_section = False
filtered_lines.append(line)
continue
elif skip_section:
# Skip lines in the New Contributors section
continue
# Skip various duplicate header formats
should_skip = False
# Check for exact version match headers
if line_stripped in [f"## {tag}", f"# {tag}", f"## {tag}:", f"# {tag}:"]:
should_skip = True
# Check for headers that have version: subtitle format (even when name == tag)
# This handles cases like "## 3.1.5: Like Leftovers, But Async (No More Pi)"
if line_stripped.startswith(f"## {tag}:") or line_stripped.startswith(
f"# {tag}:"
):
should_skip = True
# Check for subtitle-only headers
if subtitle:
# Various subtitle formats
subtitle_patterns = [
f"## {subtitle}",
f"# {subtitle}",
f"## : {subtitle}",
f"# : {subtitle}",
f"##{subtitle}", # No space
f"#{subtitle}", # No space
]
if line_stripped in subtitle_patterns:
should_skip = True
# Check for the full duplicate with version and subtitle
full_patterns = [
f"## {tag}: {subtitle}",
f"# {tag}: {subtitle}",
f"## {tag} - {subtitle}",
f"# {tag} - {subtitle}",
]
if line_stripped in full_patterns:
should_skip = True
# Also check if the line matches our own header format exactly
# to avoid duplicating what we're already adding
if (
line_stripped == f"## {tag} - {subtitle}"
or line_stripped == f"## {tag} - : {subtitle}"
):
should_skip = True
# Skip "What's Changed" headers at the beginning (we'll use the content after it)
if line_stripped in ["## What's Changed", "# What's Changed"]:
should_skip = True
if should_skip:
skip_next_empty = True
continue
# Skip empty lines immediately after removed headers
if skip_next_empty and line_stripped == "":
skip_next_empty = False
continue
skip_next_empty = False
# Transform ### and #### headers to bold text to reduce nav clutter
if line.startswith("### "):
header_text = line[4:].strip()
# Ensure there's a blank line before the header
# Check if we need to add spacing
if filtered_lines:
# If the last line is not empty, add a blank line
if filtered_lines[-1].strip():
filtered_lines.append("")
filtered_lines.append(f"**{header_text}**")
# Always add a blank line after headers for consistent formatting
filtered_lines.append("")
elif line.startswith("#### "):
# Integration subsections - also convert to bold
header_text = line[5:].strip()
# Ensure there's a blank line before the header
# Check if we need to add spacing
if filtered_lines:
# If the last line is not empty, add a blank line
if filtered_lines[-1].strip():
filtered_lines.append("")
filtered_lines.append(f"**{header_text}**")
# Always add a blank line after headers for consistent formatting
filtered_lines.append("")
else:
# Skip duplicate empty lines that might be in the source after headers
# This prevents having too many blank lines if source already had spacing
if line.strip() == "" and filtered_lines and filtered_lines[-1] == "":
continue
filtered_lines.append(line)
body = "\n".join(filtered_lines).strip()
# Fix problematic patterns for MDX
# Find version ranges like <0.14.0,>=0.12.0 that aren't already in backticks
# Pattern to find version constraints not already in backticks
# Matches patterns like <24.3,>=21.3 or >=0.7.3,<0.9.0
version_pattern = r"(?<!`)([<>]=?[\d\.]+(?:,\s*[<>]=?[\d\.]+)*)"
def wrap_version(match):
version = match.group(1)
return f"`{version}`"
# Wrap version constraints in backticks
body = re.sub(version_pattern, wrap_version, body)
# Convert GitHub usernames to hyperlinks (e.g., @username -> [@username](https://github.com/username))
# But avoid matching npm packages like @prefecthq/prefect-ui-library
# And avoid matching infrastructure decorators like @docker, @kubernetes, @ecs, @process
# List of known infrastructure decorators to exclude
infra_decorators = [
"docker",
"kubernetes",
"ecs",
"process",
"cloudrun",
"modal",
"azure_container",
]
def replace_github_user(match):
username = match.group(1)
# Don't convert if it's an infrastructure decorator
if username.lower() in infra_decorators:
return match.group(0) # Return the original text
return f"[@{username}](https://github.com/{username})"
github_user_pattern = (
r"(?<!\w)@([a-zA-Z0-9](?:[a-zA-Z0-9]|-(?=[a-zA-Z0-9])){0,38})(?![a-zA-Z0-9/-])"
)
body = re.sub(github_user_pattern, replace_github_user, body)
# Convert full PR URLs to short format with links
# e.g., https://github.com/PrefectHQ/prefect/pull/1234 -> [#1234](https://github.com/PrefectHQ/prefect/pull/1234)
pr_url_pattern = r"https://github\.com/PrefectHQ/prefect/pull/(\d+)"
body = re.sub(
pr_url_pattern, r"[#\1](https://github.com/PrefectHQ/prefect/pull/\1)", body
)
# Format the patch release section
patch_title = f"## {tag}"
# If we have a subtitle, add it to the title
if subtitle:
patch_title = f"## {tag} - {subtitle}"
elif name and name != tag:
# Fallback to extracting from name if we didn't get subtitle earlier
name_parts = name.replace(tag, "").strip()
if name_parts.startswith("-"):
name_parts = name_parts[1:].strip()
if name_parts.startswith(":"):
name_parts = name_parts[1:].strip()
if name_parts:
patch_title = f"## {tag} - {name_parts}"
# Format the date nicely on a separate line
from datetime import datetime
try:
date_obj = datetime.strptime(date, "%Y-%m-%d")
formatted_date = date_obj.strftime("%B %d, %Y") # e.g., "July 31, 2025"
except Exception:
formatted_date = date # Fallback to original if parsing fails
return f"{patch_title}\n\n*Released on {formatted_date}*\n\n{body}\n"
def create_release_notes_structure():
"""Create the release notes directory structure and files."""
base_dir = Path("docs/v3/release-notes")
base_dir.mkdir(parents=True, exist_ok=True)
# Create subdirectories
oss_dir = base_dir / "oss"
cloud_dir = base_dir / "cloud"
oss_dir.mkdir(exist_ok=True)
cloud_dir.mkdir(exist_ok=True)
# Get all release tags
tags = get_release_tags()
# Group releases by minor version
releases_by_minor = defaultdict(list)
for tag in tags:
try:
major, minor, patch = parse_version(tag)
if major == 3: # Only process 3.x releases
releases_by_minor[f"3.{minor}"].append(tag)
except (ValueError, IndexError):
print(f"Skipping invalid tag: {tag}")
continue
# Create index page
index_content = """---
title: Release Notes
---
Browse release notes for Prefect OSS and Prefect Cloud.
## Prefect OSS
Release notes for the open-source Prefect orchestration engine.
### Available Versions
"""
for minor_version in sorted(
releases_by_minor.keys(),
key=lambda x: list(map(int, x.split("."))),
reverse=True,
):
index_content += f"- [Version {minor_version}](/v3/release-notes/oss/version-{minor_version.replace('.', '-')})\n"
index_content += """
## Prefect Cloud
Release notes for Prefect Cloud features and updates.
*Coming soon*
"""
with open(base_dir / "index.mdx", "w") as f:
f.write(index_content)
# Create release notes for each minor version
for minor_version, tags in releases_by_minor.items():
print(f"Processing {minor_version} with {len(tags)} releases...")
# Sort tags by patch version (descending)
tags = sorted(tags, key=lambda x: list(map(int, x.split("."))), reverse=True)
# Create the minor version page
page_content = f"""---
title: {minor_version}
---
"""
for tag in tags:
release_info = get_release_info(tag)
if release_info:
page_content += format_release_notes(release_info)
page_content += "\n---\n\n"
# Write the page
filename = f"version-{minor_version.replace('.', '-')}.mdx"
with open(oss_dir / filename, "w") as f:
f.write(page_content)
print(f" Created {filename}")
# Create placeholder for Cloud release notes
cloud_index = """---
title: Prefect Cloud Release Notes
---
# Prefect Cloud Release Notes
Prefect Cloud release notes will be available here soon.
"""
with open(cloud_dir / "index.mdx", "w") as f:
f.write(cloud_index)
print(f"\nRelease notes structure created in {base_dir}")
if __name__ == "__main__":
create_release_notes_structure()
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "scripts/backfill_release_notes.py",
"license": "Apache License 2.0",
"lines": 348,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:scripts/prepare_release_notes.py | #!/usr/bin/env python3
"""
Prepare release notes documentation for an upcoming release.
This script generates release notes from merged PRs and adds them to the docs.
USAGE:
======
Run this script before creating a new release to prepare the documentation:
`just prepare-release VERSION [SUBTITLE]`
or
`python scripts/prepare_release_notes.py 3.5.0 "Optional Subtitle"`
The script will:
- Generate release notes from merged PRs since the last release
- Apply formatting transformations (remove New Contributors, convert headers to bold)
- Add the release notes to the appropriate minor version page (e.g., version-3-5.mdx)
- Update docs.json to include the new page if needed
- Open the file in your $EDITOR for review
WORKFLOW:
=========
1. Run this script with the upcoming version number
2. Review and edit the generated markdown
3. Commit the changes and create a PR
4. Merge to main before creating the actual GitHub release
FORMATTING:
===========
- New Contributors sections are automatically removed
- ### and #### headers are converted to bold text to reduce nav clutter
- Version constraints like <0.14.0,>=0.12.0 are wrapped in backticks
- Dates are formatted as "Month Day, Year" on a separate line
"""
import json
import re
import subprocess
import sys
from pathlib import Path
def run_command(cmd: list[str]) -> str:
"""Run a command and return its output."""
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
return result.stdout.strip()
def generate_release_notes(version: str, subtitle: str = "") -> dict:
"""Generate release notes from merged PRs since the last release."""
try:
# Get the previous version tag to use as starting point
output = run_command(["git", "tag", "-l", "3.*", "--sort=-version:refname"])
tags = output.split("\n") if output else []
# Filter out rc, dev, and other pre-release versions
stable_tags = []
for tag in tags:
if not any(x in tag for x in ["rc", "dev", "alpha", "beta"]):
stable_tags.append(tag)
previous_tag = stable_tags[0] if stable_tags else None
# Generate release notes using gh CLI
cmd = [
"gh",
"api",
"/repos/PrefectHQ/prefect/releases/generate-notes",
"-X",
"POST",
"-f",
f"tag_name={version}",
]
if previous_tag:
cmd.extend(["-f", f"previous_tag_name={previous_tag}"])
output = run_command(cmd)
result = json.loads(output)
# Build the release info dict
release_info = {
"body": result.get("body", ""),
"name": f"{version} - {subtitle}" if subtitle else version,
"tagName": version,
}
return release_info
except subprocess.CalledProcessError as e:
print(f"Error generating release notes: {e}")
# Return a basic structure if generation fails
return {
"body": "Release notes will be added manually.",
"name": f"{version} - {subtitle}" if subtitle else version,
"tagName": version,
}
def parse_version(tag: str) -> tuple[int, int, int]:
"""Parse version string into major, minor, patch."""
parts = tag.split(".")
return int(parts[0]), int(parts[1]), int(parts[2] if len(parts) > 2 else 0)
def format_release_notes(release_info: dict, version: str) -> str:
"""Format release notes for markdown."""
body = release_info.get("body", "")
name = release_info.get("name", version)
tag = version
# For draft releases, we need to generate a date
from datetime import datetime
date = datetime.now().strftime("%Y-%m-%d")
# Clean up the body
body = body.replace(
"<!-- Release notes generated using configuration in .github/release.yml at main -->",
"",
).strip()
body = body.replace("\r\n", "\n")
# Remove duplicate headers that might be in the body
import re
lines = body.split("\n")
filtered_lines = []
# Extract just the subtitle part from the name if it exists
subtitle = ""
if name and name != tag:
# Handle various formats of the name
subtitle = name.replace(tag, "").strip()
if subtitle.startswith("-"):
subtitle = subtitle[1:].strip()
if subtitle.startswith(":"):
subtitle = subtitle[1:].strip()
# If no subtitle from name, try to extract from first line of body
# This handles cases like "## 3.1.5: Like Leftovers, But Async (No More Pi)"
if not subtitle and lines:
first_line = lines[0].strip()
if first_line.startswith(f"## {tag}:") or first_line.startswith(f"# {tag}:"):
subtitle = first_line.split(":", 1)[1].strip() if ":" in first_line else ""
skip_next_empty = False
skip_section = False
for i, line in enumerate(lines):
line_stripped = line.strip()
# Skip New Contributors sections entirely
if line_stripped == "## New Contributors":
skip_section = True
continue
elif skip_section and line_stripped.startswith("##"):
# We've hit the next section, stop skipping
skip_section = False
elif skip_section and line_stripped.startswith("**Full Changelog**"):
# Keep the Full Changelog line and stop skipping
skip_section = False
filtered_lines.append(line)
continue
elif skip_section:
# Skip lines in the New Contributors section
continue
# Skip various duplicate header formats
should_skip = False
# Check for exact version match headers
if line_stripped in [f"## {tag}", f"# {tag}", f"## {tag}:", f"# {tag}:"]:
should_skip = True
# Check for headers that have version: subtitle format (even when name == tag)
# This handles cases like "## 3.1.5: Like Leftovers, But Async (No More Pi)"
if line_stripped.startswith(f"## {tag}:") or line_stripped.startswith(
f"# {tag}:"
):
should_skip = True
# Check for subtitle-only headers
if subtitle:
# Various subtitle formats
subtitle_patterns = [
f"## {subtitle}",
f"# {subtitle}",
f"## : {subtitle}",
f"# : {subtitle}",
f"##{subtitle}", # No space
f"#{subtitle}", # No space
]
if line_stripped in subtitle_patterns:
should_skip = True
# Check for the full duplicate with version and subtitle
full_patterns = [
f"## {tag}: {subtitle}",
f"# {tag}: {subtitle}",
f"## {tag} - {subtitle}",
f"# {tag} - {subtitle}",
]
if line_stripped in full_patterns:
should_skip = True
# Also check if the line matches our own header format exactly
# to avoid duplicating what we're already adding
if (
line_stripped == f"## {tag} - {subtitle}"
or line_stripped == f"## {tag} - : {subtitle}"
):
should_skip = True
# Skip "What's Changed" headers at the beginning (we'll use the content after it)
if line_stripped in ["## What's Changed", "# What's Changed"]:
should_skip = True
if should_skip:
skip_next_empty = True
continue
# Skip empty lines immediately after removed headers
if skip_next_empty and line_stripped == "":
skip_next_empty = False
continue
skip_next_empty = False
# Transform ### and #### headers to bold text to reduce nav clutter
if line.startswith("### "):
header_text = line[4:].strip()
# Ensure there's a blank line before the header
# Check if we need to add spacing
if filtered_lines:
# If the last line is not empty, add a blank line
if filtered_lines[-1].strip():
filtered_lines.append("")
filtered_lines.append(f"**{header_text}**")
# Always add a blank line after headers for consistent formatting
filtered_lines.append("")
elif line.startswith("#### "):
# Integration subsections - also convert to bold
header_text = line[5:].strip()
# Ensure there's a blank line before the header
# Check if we need to add spacing
if filtered_lines:
# If the last line is not empty, add a blank line
if filtered_lines[-1].strip():
filtered_lines.append("")
filtered_lines.append(f"**{header_text}**")
# Always add a blank line after headers for consistent formatting
filtered_lines.append("")
else:
# Omit dependabot-style chores by default (e.g., "chore(deps): ...")
# This targets conventional commit prefixes for dependency bumps.
if re.search(r"(?i)chore\(deps", line_stripped):
continue
# Omit auto-update documentation PRs (typically from github-actions bot)
if (
"auto-update documentation" in line_stripped.lower()
and "github-actions" in line_stripped.lower()
):
continue
# Skip duplicate empty lines that might be in the source after headers
# This prevents having too many blank lines if source already had spacing
if line.strip() == "" and filtered_lines and filtered_lines[-1] == "":
continue
filtered_lines.append(line)
body = "\n".join(filtered_lines).strip()
# Fix problematic patterns for MDX
# Find version ranges like <0.14.0,>=0.12.0 that aren't already in backticks
# Pattern to find version constraints not already in backticks
# Matches patterns like <24.3,>=21.3 or >=0.7.3,<0.9.0
version_pattern = r"(?<!`)([<>]=?[\d\.]+(?:,\s*[<>]=?[\d\.]+)*)"
def wrap_version(match):
version = match.group(1)
return f"`{version}`"
# Wrap version constraints in backticks
body = re.sub(version_pattern, wrap_version, body)
# Convert GitHub usernames to hyperlinks (e.g., @username -> [@username](https://github.com/username))
# But avoid matching npm packages like @prefecthq/prefect-ui-library
# And avoid matching infrastructure decorators like @docker, @kubernetes, @ecs, @process
# List of known infrastructure decorators to exclude
infra_decorators = [
"docker",
"kubernetes",
"ecs",
"process",
"cloudrun",
"modal",
"azure_container",
]
def replace_github_user(match):
username = match.group(1)
# Don't convert if it's an infrastructure decorator
if username.lower() in infra_decorators:
return match.group(0) # Return the original text
return f"[@{username}](https://github.com/{username})"
github_user_pattern = (
r"(?<!\w)@([a-zA-Z0-9](?:[a-zA-Z0-9]|-(?=[a-zA-Z0-9])){0,38})(?![a-zA-Z0-9/-])"
)
body = re.sub(github_user_pattern, replace_github_user, body)
# Convert full PR URLs to short format with links
# e.g., https://github.com/PrefectHQ/prefect/pull/1234 -> [#1234](https://github.com/PrefectHQ/prefect/pull/1234)
pr_url_pattern = r"https://github\.com/PrefectHQ/prefect/pull/(\d+)"
body = re.sub(
pr_url_pattern, r"[#\1](https://github.com/PrefectHQ/prefect/pull/\1)", body
)
# Format the patch release section
patch_title = f"## {tag}"
# If we have a subtitle, add it to the title
if subtitle:
patch_title = f"## {tag} - {subtitle}"
elif name and name != tag:
# Fallback to extracting from name if we didn't get subtitle earlier
name_parts = name.replace(tag, "").strip()
if name_parts.startswith("-"):
name_parts = name_parts[1:].strip()
if name_parts.startswith(":"):
name_parts = name_parts[1:].strip()
if name_parts:
patch_title = f"## {tag} - {name_parts}"
# Format the date nicely on a separate line
from datetime import datetime
try:
date_obj = datetime.strptime(date, "%Y-%m-%d")
formatted_date = date_obj.strftime("%B %d, %Y") # e.g., "July 31, 2025"
except Exception:
formatted_date = date # Fallback to original if parsing fails
return f"{patch_title}\n\n*Released on {formatted_date}*\n\n{body}\n"
def update_minor_version_page(minor_version: str, new_release: str, release_info: dict):
"""Update or create the minor version release notes page."""
base_dir = Path("docs/v3/release-notes/oss")
base_dir.mkdir(parents=True, exist_ok=True)
filename = f"version-{minor_version.replace('.', '-')}.mdx"
filepath = base_dir / filename
new_content = format_release_notes(release_info, new_release)
if filepath.exists():
# Read existing content
with open(filepath, "r") as f:
existing_content = f.read()
# Find where to insert the new release (after the frontmatter)
lines = existing_content.split("\n")
insert_index = 0
# Skip front matter
in_frontmatter = False
for i, line in enumerate(lines):
if line.strip() == "---":
if not in_frontmatter:
in_frontmatter = True
else:
insert_index = i + 2 # After frontmatter and blank line
break
# Insert the new release at the top (most recent first)
lines.insert(insert_index, new_content)
lines.insert(insert_index + 1, "\n---\n")
updated_content = "\n".join(lines)
else:
# Create new page
updated_content = f"""---
title: {minor_version}
---
{new_content}
---
"""
# Write the updated content
with open(filepath, "w") as f:
f.write(updated_content)
print(f"Updated {filepath}")
return True
def update_docs_json(minor_version: str):
"""Update docs.json to ensure the new version page is included."""
docs_json_path = Path("docs/docs.json")
with open(docs_json_path, "r") as f:
docs_config = json.load(f)
# Find the Release Notes tab
tabs = docs_config.get("navigation", {}).get("tabs", [])
for tab in tabs:
if tab.get("tab") == "Release Notes":
# Find OSS group
for group in tab.get("pages", []):
if isinstance(group, dict) and group.get("group") == "OSS":
# Get all existing release note files
oss_dir = Path("docs/v3/release-notes/oss")
oss_pages = []
if oss_dir.exists():
for file in sorted(oss_dir.glob("version-*.mdx")):
page_path = f"v3/release-notes/oss/{file.stem}"
oss_pages.append(page_path)
# Update the pages list (sorted by version, descending)
group["pages"] = sorted(oss_pages, reverse=True)
# Save the updated config
with open(docs_json_path, "w") as f:
json.dump(docs_config, f, indent=2)
f.write("\n") # Add trailing newline
print(f"Updated {docs_json_path}")
return
def main():
"""Main function."""
if len(sys.argv) < 2 or len(sys.argv) > 3:
print("Usage: python scripts/prepare_release_notes.py <version> [subtitle]")
print("Example: python scripts/prepare_release_notes.py 3.5.0")
print(
"Example: python scripts/prepare_release_notes.py 3.5.0 'Performance Improvements'"
)
print(
"\nThis will generate release notes from merged PRs since the last release."
)
sys.exit(1)
version = sys.argv[1]
subtitle = sys.argv[2] if len(sys.argv) == 3 else ""
# Validate version format
if not re.match(r"^\d+\.\d+\.\d+$", version):
print(f"Error: Invalid version format '{version}'. Expected format: X.Y.Z")
sys.exit(1)
# Parse version
try:
major, minor, patch = parse_version(version)
except (ValueError, IndexError):
print(f"Error: Could not parse version '{version}'")
sys.exit(1)
if major != 3:
print(f"Warning: This script is designed for 3.x releases. Got {version}")
minor_version = f"{major}.{minor}"
print(f"Preparing release notes for {version}...")
print("Generating release notes from merged PRs...")
# Generate the release notes
release_info = generate_release_notes(version, subtitle)
print(f"Generated release notes for: {release_info.get('name', version)}")
# Update the minor version page
if update_minor_version_page(minor_version, version, release_info):
# Update docs.json
update_docs_json(minor_version)
print(f"\n✅ Successfully prepared release notes for {version}")
print("\nNext steps:")
print(" 1. Review the generated release notes")
print(" 2. Make any necessary edits")
print(" 3. Commit the changes")
print(" 4. Create PR and merge to main")
else:
print(f"\n❌ Failed to prepare release notes for {version}")
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "scripts/prepare_release_notes.py",
"license": "Apache License 2.0",
"lines": 402,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:examples/hello_world.py | # ---
# title: Hello, world!
# description: Your first steps with Prefect – learn how to create a basic flow and understand core concepts.
# icon: play
# dependencies: ["prefect"]
# keywords: ["getting_started", "basics"]
# draft: false
# order: 1
# ---
#
# Welcome to your first Prefect flow. In under a minute you will:
# 1. Ship production-ready orchestration code with **zero boilerplate**.
# 2. See live, structured logs without writing any logging boilerplate.
# 3. Understand how the very same Python stays portable from a laptop to Kubernetes (or Prefect Cloud).
#
# *Pro tip*: change anything in this file and re-run it. Prefect hot-loads your new logic in seconds, no image builds, ever.
#
# ## Importing Prefect and setting up
# We start by importing the essential `flow` decorator from Prefect.
from prefect import flow, tags
# ## Defining a flow
# Prefect takes your Python functions and transforms them into flows with enhanced capabilities.
#
# Let's write a simple function that takes a name parameter and prints a greeting.
#
# To make this function work with Prefect, we just wrap it in the `@flow` decorator.
@flow(log_prints=True)
def hello(name: str = "Marvin") -> None:
"""Log a friendly greeting."""
print(f"Hello, {name}!")
# ## Running our flow locally and with parameters
# Now let's see different ways we can call that flow:
#
# 1. As a regular call with default parameters
# 2. With custom parameters
# 3. Multiple times to greet different people
if __name__ == "__main__":
# run the flow with default parameters
with tags(
"test"
): # This is a tag that we can use to filter the flow runs in the UI
hello() # Logs: "Hello, Marvin!"
# run the flow with a different input
hello("Marvin") # Logs: "Hello, Marvin!"
# run the flow multiple times for different people
crew = ["Zaphod", "Trillian", "Ford"]
for name in crew:
hello(name)
# ## What just happened?
# When we decorated our function with `@flow`, the function was transformed into a Prefect flow. Each time we called it:
#
# 1. Prefect registered the execution as a flow run
# 2. It tracked all inputs, outputs, and logs
# 3. It maintained detailed state information about the execution
# 4. Added tags to the flow run to make it easier to find when observing the flow runs in the UI
#
# In short, we took a regular function and enhanced it with observability and tracking capabilities.
# ## But why does this matter?
# This simple example demonstrates Prefect's core value proposition: taking regular Python code and enhancing it with production-grade orchestration capabilities. Let's explore why this matters for real-world data workflows.
# ### You can change the code and run it again
# For instance, change the greeting message in the `hello` function to a different message and run the flow again.
# You'll see your changes immediately reflected in the logs.
# ### You can process more data
# Add more names to the `crew` list or create a larger data set to process. Prefect will handle each execution and track every input and output.
# ### You can run a more complex flow
# The `hello` function is a simple example, but in its place imagine something that matters to you, like:
#
# * ETL processes that extract, transform, and load data
# * Machine learning training and inference pipelines
# * API integrations and data synchronization jobs
#
# Prefect lets you orchestrate these operations effortlessly with automatic observability, error handling, and retries.
# ### Key Takeaways
# Remember that Prefect makes it easy to:
#
# * Transform regular Python functions into production-ready workflows with just a [decorator](https://docs.prefect.io/v3/develop/write-flows#write-and-run-flows)
# * Get automatic logging, [retries](https://docs.prefect.io/v3/develop/write-tasks#retries), and observability without extra code
# * Run the same code anywhere - from your laptop to production
# * Build complex data pipelines while maintaining simplicity
# * Track every execution with [detailed logs](https://docs.prefect.io/v3/develop/logging#configure-logging) and state information
# The `@flow` decorator is your gateway to enterprise-grade orchestration - no complex configuration needed!
#
#
# For more information about the orchestration concepts demonstrated in this example, see the [Prefect documentation](https://docs.prefect.io/).
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "examples/hello_world.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:examples/run_api_sourced_etl.py | # ---
# title: API-sourced ETL
# description: Build a small ETL pipeline that fetches JSON from a public API, transforms it with pandas, and writes a CSV – all orchestrated by Prefect.
# icon: database
# dependencies: ["prefect", "httpx", "pandas"]
# keywords: ["getting_started", "etl", "pandas"]
# draft: false
# order: 2
# ---
#
# Prefect turns everyday Python into production-grade workflows with **zero boilerplate**.
#
# When you pair Prefect with pandas you get a **versatile ETL toolkit**:
#
# * **Python** supplies a rich ecosystem of connectors and libraries for virtually every data source and destination.
# * **pandas** gives you lightning-fast, expressive transforms that turn raw bits into tidy DataFrames.
# * **Prefect** wraps the whole thing in battle-tested orchestration: automatic [retries](https://docs.prefect.io/v3/develop/write-tasks#retries), [scheduling](https://docs.prefect.io/v3/deploy/index#workflow-scheduling-and-parametrization), and [observability](https://docs.prefect.io/v3/develop/logging#prefect-loggers) , so you don't have to write reams of defensive code.
#
# The result? You spend your time thinking about *what* you want to build, not *how* to keep it alive. Point this trio at any API, database, or file system and it will move the data where you need it while handling the messy details for you.
#
# In this article you will:
# 1. **Extract** JSON from the public [Dev.to REST API](https://dev.to/api).
# 2. **Transform** it into an analytics-friendly pandas `DataFrame`.
# 3. **Load** the result to a CSV – ready for your BI tool of choice.
#
# This example demonstrates these Prefect features:
# * [`@task`](https://docs.prefect.io/v3/develop/write-tasks#write-and-run-tasks) – wrap any function in retries & observability.
# * [`log_prints`](https://docs.prefect.io/v3/develop/logging#configure-logging) – surface `print()` logs automatically.
# * Automatic [**retries**](https://docs.prefect.io/v3/develop/write-tasks#retries) with back-off, no extra code.
#
# ### Rapid analytics from a public API
# Your data team wants engagement metrics from Dev.to articles, daily. You need a quick,
# reliable pipeline that anyone can run locally and later schedule in Prefect Cloud.
#
# ### The Solution
# Write three small Python functions (extract, transform, load), add two decorators, and
# let Prefect handle [retries](https://docs.prefect.io/v3/develop/write-tasks#retries), [concurrency](https://docs.prefect.io/v3/develop/task-runners#configure-a-task-runner), and [logging](https://docs.prefect.io/v3/develop/logging#prefect-loggers). No framework-specific hoops, just
# Python the way you already write it.
#
# *For more background on Prefect's design philosophy, check out our blog post: [Built to Fail: Design Patterns for Resilient Data Pipelines](https://www.prefect.io/blog/built-to-fail-design-patterns-for-resilient-data-pipelines)*
#
# Watch as Prefect orchestrates the ETL pipeline with automatic retries and logging. The flow fetches multiple pages of articles, transforms them into a structured DataFrame, and saves the results to CSV. This pattern is highly adaptable - use it to build pipelines that move data between any sources and destinations:
# * APIs → Databases (Postgres, MySQL, etc.)
# * APIs → Cloud Storage (S3, GCS, Azure)
# * APIs → Data Warehouses (Snowflake, BigQuery, Redshift, etc.)
# * And many more combinations
#
# ## Code walkthrough
# 1. **Imports** – Standard libraries for HTTP + pandas.
# 2. **`fetch_page` task** – Downloads a single page with retries.
# 3. **`to_dataframe` task** – Normalises JSON to a pandas DataFrame.
# 4. **`save_csv` task** – Persists the DataFrame and logs a peek.
# 5. **`etl` flow** – Orchestrates the tasks sequentially for clarity.
# 6. **Execution** – A friendly `if __name__ == "__main__"` with some basic configurations kicks things off.
#
from __future__ import annotations
from pathlib import Path
from typing import Any
import httpx
import pandas as pd
from prefect import flow, task
# ---------------------------------------------------------------------------
# Extract – fetch a single page of articles
# ---------------------------------------------------------------------------
@task(retries=3, retry_delay_seconds=[2, 5, 15])
def fetch_page(page: int, api_base: str, per_page: int) -> list[dict[str, Any]]:
"""Return a list of article dicts for a given page number."""
url = f"{api_base}/articles"
params = {"page": page, "per_page": per_page}
print(f"Fetching page {page} …")
response = httpx.get(url, params=params, timeout=30)
response.raise_for_status()
return response.json()
# ---------------------------------------------------------------------------
# Transform – convert list[dict] ➜ pandas DataFrame
# ---------------------------------------------------------------------------
@task
def to_dataframe(raw_articles: list[list[dict[str, Any]]]) -> pd.DataFrame:
"""Flatten & normalise JSON into a tidy DataFrame."""
# Combine pages, then select fields we care about
records = [article for page in raw_articles for article in page]
df = pd.json_normalize(records)[
[
"id",
"title",
"published_at",
"url",
"comments_count",
"positive_reactions_count",
"tag_list",
"user.username",
]
]
return df
# ---------------------------------------------------------------------------
# Load – save DataFrame to CSV (or print preview)
# ---------------------------------------------------------------------------
@task
def save_csv(df: pd.DataFrame, path: Path) -> None:
"""Persist DataFrame to disk then log a preview."""
df.to_csv(path, index=False)
print(f"Saved {len(df)} rows ➜ {path}\n\nPreview:\n{df.head()}\n")
# ---------------------------------------------------------------------------
# Flow – orchestrate the ETL with optional concurrency
# ---------------------------------------------------------------------------
@flow(name="devto_etl", log_prints=True)
def etl(api_base: str, pages: int, per_page: int, output_file: Path) -> None:
"""Run the end-to-end ETL for *pages* of articles."""
# Extract – simple loop for clarity
raw_pages: list[list[dict[str, Any]]] = []
for page_number in range(1, pages + 1):
raw_pages.append(fetch_page(page_number, api_base, per_page))
# Transform
df = to_dataframe(raw_pages)
# Load
save_csv(df, output_file)
# ## Run it!
#
# ```bash
# python 01_getting_started/03_run_api_sourced_etl.py
# ```
if __name__ == "__main__":
# Configuration – tweak to taste
api_base = "https://dev.to/api"
pages = 3 # Number of pages to fetch
per_page = 30 # Articles per page (max 30 per API docs)
output_file = Path("devto_articles.csv")
etl(api_base=api_base, pages=pages, per_page=per_page, output_file=output_file)
# ## What just happened?
#
# 1. Prefect registered a *flow run* and three *task runs* (`fetch_page`, `to_dataframe`, `save_csv`).
# 2. Each `fetch_page` call downloaded a page and, if it failed, would automatically retry.
# 3. The raw JSON pages were combined into a single pandas DataFrame.
# 4. The CSV was written to disk and a preview printed locally (the flow's `log_prints=True` flag logs messages inside the flow body; prints inside tasks are displayed in the console).
# 5. You can view run details, timings, and logs in the Prefect UI.
#
# ## Key Takeaways
#
# * **Pure Python, powered-up** – Decorators add retries and logging without changing your logic.
# * **Observability first** – Each task run (including every page fetch) is logged and can be viewed in the UI if you have a Prefect Cloud account or a local Prefect server running.
# * **Composable** – Swap `save_csv` for a database loader or S3 upload with one small change.
# * **Reusable** – Import the `etl` flow and run it with different parameters from another flow.
#
# Prefect lets you focus on *data*, not orchestration plumbing – happy ETL-ing! 🎉
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "examples/run_api_sourced_etl.py",
"license": "Apache License 2.0",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:examples/run_dbt_with_prefect.py | # ---
# title: dbt Model Orchestration
# description: Orchestrate any dbt project with bullet-proof retries, observability, and a single Python file – no YAML or shell scripts required.
# icon: database
# dependencies: ["prefect", "prefect-dbt>=0.7.0rc1", "dbt-core", "dbt-duckdb"]
# keywords: ["dbt", "materialization", "tasks", "analytics"]
# draft: false
# order: 3
# ---
#
# **Transform unreliable dbt scripts into production-grade data pipelines with enterprise observability, automatic failure recovery, and zero-downtime deployments.**
#
# When you combine Prefect with dbt, you get the **perfect marriage of best-in-class analytics tools**:
#
# * **Python** gives you the flexibility to integrate with any data source, API, or system your analytics need.
# * **dbt Core** handles the heavy lifting of SQL transformations, testing, and documentation.
# * **Prefect** wraps the entire workflow in battle-tested orchestration: automatic [retries](https://docs.prefect.io/v3/develop/write-tasks#retries), [scheduling](https://docs.prefect.io/v3/deploy/index#workflow-scheduling-and-parametrization), and [observability](https://docs.prefect.io/v3/develop/logging#prefect-loggers).
#
# The result? Your analytics team gets reliable, observable data pipelines that leverage the strengths of both platforms. Point this combo at any warehouse and it will transform your data while providing enterprise-grade workflow management.
#
# > **Note**: This example uses **dbt Core** (the open-source CLI). For dbt Cloud integration, see the [dbt Cloud examples](https://docs.prefect.io/integrations/prefect-dbt#dbt-cloud) in the Prefect documentation.
#
# This example demonstrates these Prefect features:
# * [`@task`](https://docs.prefect.io/v3/develop/write-tasks#write-and-run-tasks) – wrap dbt commands in retries & observability.
# * [`log_prints`](https://docs.prefect.io/v3/develop/logging#configure-logging) – surface dbt output automatically in Prefect logs.
# * Automatic [**retries**](https://docs.prefect.io/v3/develop/write-tasks#retries) with exponential back-off for flaky network connections.
# * [**prefect-dbt integration**](https://docs.prefect.io/integrations/prefect-dbt) – native dbt execution with enhanced logging and failure handling.
#
# ### The Scenario: Reliable Analytics Workflows
# Your analytics team uses dbt to model data in DuckDB for rapid local development and testing, but deploys to Snowflake in production. You need a workflow that:
# - Anyone can run locally without complex setup (DuckDB)
# - Automatically retries on network failures or temporary dbt errors
# - Provides clear logs and observability for debugging
# - Can be easily scheduled and deployed to production
#
# ### Our Solution
# Write three focused Python functions (download project, run dbt commands, orchestrate workflow), add Prefect decorators, and let Prefect handle [retries](https://docs.prefect.io/v3/develop/write-tasks#retries), [logging](https://docs.prefect.io/v3/develop/logging#prefect-loggers), and [scheduling](https://docs.prefect.io/v3/deploy/index#workflow-scheduling-and-parametrization). The entire example is self-contained – no git client or global dbt configuration required.
#
# *For more on integrating Prefect with dbt, see the [Prefect documentation](https://docs.prefect.io/integrations/dbt).*
#
# ### Running the example locally
# ```bash
# python 02_flows/prefect_and_dbt.py
# ```
# Watch as Prefect orchestrates the complete dbt lifecycle: downloading the project, running models, executing tests, and materializing results. The flow creates a local DuckDB file you can explore with any SQL tool.
#
# ## Code walkthrough
# 1. **Project Setup** – Download and cache a demo dbt project from GitHub
# 2. **dbt CLI Wrapper** – Execute dbt commands with automatic retries and logging using prefect-dbt
# 3. **Orchestration Flow** – Run the complete dbt lifecycle in sequence
# 4. **Execution** – Self-contained example that works out of the box
import io
import shutil
import urllib.request
import zipfile
from pathlib import Path
from prefect_dbt import PrefectDbtRunner, PrefectDbtSettings
from prefect import flow, task
DEFAULT_REPO_ZIP = (
"https://github.com/PrefectHQ/examples/archive/refs/heads/examples-markdown.zip"
)
# ---------------------------------------------------------------------------
# Project Setup – download and cache dbt project
# ---------------------------------------------------------------------------
# To keep this example fully self-contained, we download a demo dbt project
# directly from GitHub as a ZIP file. This means users don't need git installed.
# [Learn more about tasks in the Prefect documentation](https://docs.prefect.io/v3/develop/write-tasks)
@task(retries=2, retry_delay_seconds=5, log_prints=True)
def build_dbt_project(repo_zip_url: str = DEFAULT_REPO_ZIP) -> Path:
"""Download and extract the demo dbt project, returning its local path.
To keep the example fully self-contained we grab the GitHub archive as a ZIP
so users do **not** need `git` installed. The project is extracted from the
PrefectHQ/examples repository into a sibling directory next to this script
(`prefect_dbt_project`). If that directory already exists we skip the download
to speed up subsequent runs.
"""
project_dir = Path(__file__).parent / "prefect_dbt_project"
if project_dir.exists():
print(f"Using cached dbt project at {project_dir}\n")
return project_dir
tmp_extract_base = project_dir.parent / "_tmp_dbt_extract"
if tmp_extract_base.exists():
shutil.rmtree(tmp_extract_base)
print(f"Downloading dbt project archive → {repo_zip_url}\n")
with urllib.request.urlopen(repo_zip_url) as resp:
data = resp.read()
with zipfile.ZipFile(io.BytesIO(data)) as zf:
zf.extractall(tmp_extract_base)
# Find the folder containing dbt_project.yml (in resources/prefect_dbt_project)
candidates = list(
tmp_extract_base.rglob("**/resources/prefect_dbt_project/dbt_project.yml")
)
if not candidates:
raise ValueError(
"dbt_project.yml not found in resources/prefect_dbt_project – structure unexpected"
)
project_root = candidates[0].parent
shutil.move(str(project_root), str(project_dir))
shutil.rmtree(tmp_extract_base)
print(f"Extracted dbt project to {project_dir}\n")
return project_dir
# ---------------------------------------------------------------------------
# Create profiles.yml for DuckDB – needed for dbt to work
# ---------------------------------------------------------------------------
# This task creates a simple profiles.yml file for DuckDB so dbt can connect
# to the database. This keeps the example self-contained.
@task(retries=2, retry_delay_seconds=5, log_prints=True)
def create_dbt_profiles(project_dir: Path) -> None:
"""Create a profiles.yml file for DuckDB connection.
This creates a simple DuckDB profile so dbt can run without external
database configuration. The profile points to a local DuckDB file.
This will overwrite any existing profiles.yml to ensure correct formatting.
"""
profiles_content = f"""demo:
outputs:
dev:
type: duckdb
path: {project_dir}/demo.duckdb
threads: 1
target: dev"""
profiles_path = project_dir / "profiles.yml"
with open(profiles_path, "w") as f:
f.write(profiles_content)
print(f"Created/updated profiles.yml at {profiles_path}")
# ---------------------------------------------------------------------------
# dbt CLI Wrapper – execute commands with retries and logging using prefect-dbt
# ---------------------------------------------------------------------------
# This task uses the modern PrefectDbtRunner from prefect-dbt integration which
# provides native dbt execution with enhanced logging, failure handling, and
# automatic event emission.
# [Learn more about retries in the Prefect documentation](https://docs.prefect.io/v3/develop/write-tasks#retries)
@task(retries=2, retry_delay_seconds=5, log_prints=True)
def run_dbt_commands(commands: list[str], project_dir: Path) -> None:
"""Run dbt commands using the modern prefect-dbt integration.
Uses PrefectDbtRunner which provides enhanced logging, failure handling,
and automatic Prefect event emission for dbt node status changes.
This is much more robust than subprocess calls and integrates natively
with Prefect's observability features.
"""
print(f"Running dbt commands: {commands}\n")
# Configure dbt settings to point to our project directory
settings = PrefectDbtSettings(
project_dir=str(project_dir),
profiles_dir=str(project_dir), # Use project dir for profiles too
)
# Create runner and execute commands
# Use raise_on_failure=False to handle dbt failures more gracefully
runner = PrefectDbtRunner(settings=settings, raise_on_failure=False)
for command in commands:
print(f"Executing: dbt {command}")
runner.invoke(command.split())
print(f"Completed: dbt {command}\n")
# ---------------------------------------------------------------------------
# Orchestration Flow – run the complete dbt lifecycle
# ---------------------------------------------------------------------------
# This flow orchestrates the standard dbt workflow: debug → deps → seed → run → test.
# Each step is a separate task run in Prefect, providing granular observability
# and automatic retry handling for any step that fails. Now using the flexible
# prefect-dbt integration for enhanced dbt execution.
# [Learn more about flows in the Prefect documentation](https://docs.prefect.io/v3/develop/write-flows)
@flow(name="dbt_flow", log_prints=True)
def dbt_flow(repo_zip_url: str = DEFAULT_REPO_ZIP) -> None:
"""Run the demo dbt project with Prefect using prefect-dbt integration.
Steps executed:
1. Download and setup the dbt project
2. Create profiles.yml for DuckDB connection
3. `dbt deps` – download any package dependencies (none for this tiny demo).
4. `dbt seed` – load seed CSVs if they exist (safe to run even when empty).
5. `dbt run` – build the model(s) defined under `models/`.
6. `dbt test` – execute any tests declared in the project.
Each step runs as a separate Prefect task with automatic retries and logging.
Uses the modern prefect-dbt integration for enhanced observability and
native dbt execution.
"""
project_dir = build_dbt_project(repo_zip_url)
create_dbt_profiles(project_dir)
# dbt commands – executed sequentially using prefect-dbt integration
run_dbt_commands(["deps"], project_dir)
run_dbt_commands(["seed"], project_dir)
run_dbt_commands(["run"], project_dir)
run_dbt_commands(["test"], project_dir)
# Let users know where the DuckDB file was written for exploration
duckdb_path = project_dir / "demo.duckdb"
print(f"\nDone! DuckDB file located at: {duckdb_path.resolve()}")
# ### What Just Happened?
#
# Here's the sequence of events when you run this flow:
# 1. **Project Download** – Prefect registered a task run to download and extract the dbt project from GitHub (with automatic caching for subsequent runs).
# 2. **dbt Lifecycle** – Five separate task runs executed the standard dbt workflow: `deps`, `seed`, `run`, and `test`.
# 3. **Native dbt Integration** – Each dbt command used the `DbtCoreOperation` for enhanced logging, failure handling, and automatic event emission.
# 4. **Automatic Retries** – Each dbt command would automatically retry on failure (network issues, temporary dbt errors, etc.).
# 5. **Centralized Logging** – All dbt output streamed directly to Prefect logs with proper log level mapping.
# 6. **Event Emission** – Prefect automatically emitted events for each dbt node execution, enabling advanced monitoring and alerting.
# 7. **Local Results** – A DuckDB file appeared at `prefect_dbt_project/demo.duckdb` ready for analysis.
#
# **Prefect + prefect-dbt transformed a series of shell commands into a resilient, observable workflow** – no YAML files, no cron jobs, just Python with enterprise-grade dbt integration.
#
# ### Why This Matters
#
# Traditional dbt orchestration often involves brittle shell scripts, complex YAML configurations, or heavyweight workflow tools. Prefect with the prefect-dbt integration gives you **enterprise-grade orchestration with zero operational overhead**:
#
# - **Reliability**: Automatic retries with exponential backoff handle transient failures
# - **Native Integration**: DbtCoreOperation provides enhanced logging, failure handling, and event emission
# - **Observability**: Every dbt command and node is logged, timed, and searchable in the Prefect UI with proper log level mapping
# - **Event-Driven**: Automatic Prefect events for dbt node status changes enable advanced monitoring and alerting
# - **Portability**: The same Python file runs locally, in CI/CD, and in production
# - **Composability**: Easily extend this flow with data quality checks, Slack alerts, or downstream dependencies
#
# This pattern scales from prototype analytics to production data platforms. Whether you're running dbt against DuckDB for rapid local iteration or Snowflake for enterprise analytics, Prefect ensures your workflows are reliable, observable, and maintainable.
#
# To learn more about orchestrating analytics workflows with Prefect, check out:
# - [prefect-dbt integration guide](https://docs.prefect.io/integrations/prefect-dbt)
# - [Task configuration and retries](https://docs.prefect.io/v3/develop/write-tasks#retries)
# - [Workflow scheduling and deployment](https://docs.prefect.io/v3/deploy/index#workflow-scheduling-and-parametrization)
if __name__ == "__main__":
dbt_flow()
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "examples/run_dbt_with_prefect.py",
"license": "Apache License 2.0",
"lines": 216,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:examples/simple_web_scraper.py | # ---
# title: Simple web scraper
# description: Learn how to scrape article content from web pages with Prefect tasks, retries, and automatic logging.
# icon: globe
# dependencies: ["prefect", "requests", "beautifulsoup4"]
# keywords: ["getting_started", "webscraping", "tasks", "retries"]
# draft: false
# order: 5
# ---
#
# This example shows how Prefect enhances regular Python code without getting in its way.
# You'll write code exactly as you normally would, and Prefect's decorators add production-ready
# features with zero boilerplate.
#
# In this example you will:
# 1. Write regular Python functions for web scraping
# 2. Add production features ([retries](https://docs.prefect.io/v3/develop/write-tasks#retries), [logging](https://docs.prefect.io/v3/develop/logging#configure-logging)) with just two decorators:
# - `@task` - Turn any function into a [retryable, observable unit](https://docs.prefect.io/v3/develop/write-tasks#write-and-run-tasks)
# - `@flow` - Compose tasks into a [reliable pipeline](https://docs.prefect.io/v3/develop/write-flows#write-and-run-flows)
# 3. Keep your code clean and Pythonic - no framework-specific patterns needed
#
# ## The Power of Regular Python
# Notice how the code below is just standard Python with two decorators. You could remove
# the decorators and the code would still work - Prefect just makes it more resilient.
#
# - Regular Python functions? ✓
# - Standard libraries (requests, BeautifulSoup)? ✓
# - Normal control flow (if/else, loops)? ✓
# - Prefect's magic? Just two decorators! ✓
from __future__ import annotations
import requests
from bs4 import BeautifulSoup
from prefect import flow, task
# ## Defining tasks
#
# We separate network IO from parsing so both pieces can be retried or cached independently.
@task(retries=3, retry_delay_seconds=2)
def fetch_html(url: str) -> str:
"""Download page HTML (with retries).
This is just a regular requests call - Prefect adds retry logic
without changing how we write the code."""
print(f"Fetching {url} …")
response = requests.get(url, timeout=10)
response.raise_for_status()
return response.text
@task
def parse_article(html: str) -> str:
"""Extract article text, skipping code blocks.
Regular BeautifulSoup parsing with standard Python string operations.
Prefect adds observability without changing the logic."""
soup = BeautifulSoup(html, "html.parser")
# Find main content - just regular BeautifulSoup
article = soup.find("article") or soup.find("main")
if not article:
return ""
# Standard Python all the way
for code in article.find_all(["pre", "code"]):
code.decompose()
content = []
for elem in article.find_all(["h1", "h2", "h3", "p", "ul", "ol", "li"]):
text = elem.get_text().strip()
if not text:
continue
if elem.name.startswith("h"):
content.extend(["\n" + "=" * 80, text.upper(), "=" * 80 + "\n"])
else:
content.extend([text, ""])
return "\n".join(content)
# ## Defining a flow
#
# `@flow` elevates a function to a *flow* – the orchestration nucleus that can call
# tasks, other flows, and any Python you need. We enable `log_prints=True` so each
# `print()` surfaces in Prefect Cloud or the local API.
@flow(log_prints=True)
def scrape(urls: list[str] | None = None) -> None:
"""Scrape and print article content from URLs.
A regular Python function that composes our tasks together.
Prefect adds logging and dependency management automatically."""
if urls:
for url in urls:
content = parse_article(fetch_html(url))
print(content if content else "No article content found.")
# ## Run it!
#
# Feel free to tweak the URL list or the regex and re-run. Prefect hot-reloads your
# code instantly – no container builds required.
if __name__ == "__main__":
urls = [
"https://www.prefect.io/blog/airflow-to-prefect-why-modern-teams-choose-prefect"
]
scrape(urls=urls)
# ## What just happened?
#
# When you ran this script, Prefect did a few things behind the scenes:
# 1. Turned each decorated function into a *task run* or *flow run* with structured state.
# 2. Applied retry logic to the network call – a flaky connection would auto-retry up to 3 times.
# 3. Captured all `print()` statements so you can view them in the Prefect UI or logs.
# 4. Passed the HTML between tasks **in memory** – no external storage required.
#
# Yet the code itself is standard Python. You could copy-paste the body of `fetch_html` or
# `parse_article` into a notebook and they'd work exactly the same.
#
# ## Key Takeaways
#
# * **Less boilerplate, more Python** – You focus on the scraping logic, Prefect adds production features.
# * **Observability out of the box** – Every run is tracked, making debugging and monitoring trivial.
# * **Portability** – The same script runs on your laptop today and on Kubernetes tomorrow.
# * **Reliability** – Retries, timeouts, and state management are just one decorator away.
#
# Happy scraping – and happy orchestrating! 🎉
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "examples/simple_web_scraper.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/server/events/ordering/memory.py | from __future__ import annotations
import asyncio
import logging
from contextlib import asynccontextmanager
from datetime import datetime, timedelta
from typing import Any, AsyncGenerator
from uuid import UUID
import anyio
from cachetools import TTLCache
import prefect.types._datetime
from prefect.logging import get_logger
from prefect.server.events.ordering import (
MAX_DEPTH_OF_PRECEDING_EVENT,
PRECEDING_EVENT_LOOKBACK,
SEEN_EXPIRATION,
EventArrivedEarly,
MaxDepthExceeded,
event_handler,
)
from prefect.server.events.ordering import CausalOrdering as _CausalOrdering
from prefect.server.events.schemas.events import Event, ReceivedEvent
logger: logging.Logger = get_logger(__name__)
# How long we'll wait for an in-flight event to be processed for follower handling
IN_FLIGHT_EVENT_TIMEOUT = timedelta(seconds=8)
class EventBeingProcessed(Exception):
"""Indicates that an event is currently being processed and should not be processed
until it is finished. This may happen due to concurrent processing."""
def __init__(self, event: ReceivedEvent):
self.event = event
class CausalOrdering(_CausalOrdering):
# Class-level storage for different scopes
_instances: dict[str, "CausalOrdering"] = {}
_locks: dict[str, asyncio.Lock] = {}
def __new__(cls, scope: str) -> "CausalOrdering":
if scope not in cls._instances:
cls._instances[scope] = super().__new__(cls)
return cls._instances[scope]
def __init__(self, scope: str):
# Only initialize once per scope
if hasattr(self, "_initialized") and self._initialized:
return
self.scope: str = scope
self._processing_events: set[UUID] = set()
self._seen_events: TTLCache[UUID, bool] = TTLCache(
maxsize=10000, ttl=SEEN_EXPIRATION.total_seconds()
)
self._followers: dict[UUID, set[UUID]] = {} # leader_id -> set of follower_ids
self._events: dict[UUID, ReceivedEvent] = {} # event_id -> event
self._waitlist: dict[UUID, datetime] = {} # event_id -> received_time
# Each scope gets its own lock
if scope not in self.__class__._locks:
self.__class__._locks[scope] = asyncio.Lock()
self._lock = self.__class__._locks[scope]
self._initialized = True
def clear(self) -> None:
"""Clear all data for this scope."""
self._processing_events.clear()
self._seen_events.clear()
self._followers.clear()
self._events.clear()
self._waitlist.clear()
@classmethod
def clear_all_scopes(cls) -> None:
"""Clear all data for all scopes - useful for testing."""
for instance in cls._instances.values():
instance.clear()
cls._instances.clear()
cls._locks.clear()
async def record_event_as_processing(self, event: ReceivedEvent) -> bool:
"""Record that an event is being processed, returning False if already processing."""
async with self._lock:
if event.id in self._processing_events:
return False
self._processing_events.add(event.id)
return True
async def event_has_started_processing(self, event: UUID | Event) -> bool:
event_id = event.id if isinstance(event, Event) else event
async with self._lock:
return event_id in self._processing_events
async def forget_event_is_processing(self, event: ReceivedEvent) -> None:
async with self._lock:
self._processing_events.discard(event.id)
async def event_has_been_seen(self, event: UUID | Event) -> bool:
event_id = event.id if isinstance(event, Event) else event
async with self._lock:
return event_id in self._seen_events
async def record_event_as_seen(self, event: ReceivedEvent) -> None:
async with self._lock:
self._seen_events[event.id] = True
async def record_follower(self, event: ReceivedEvent) -> None:
"""Remember that this event is waiting on another event to arrive."""
assert event.follows
async with self._lock:
self._events[event.id] = event
if event.follows not in self._followers:
self._followers[event.follows] = set()
self._followers[event.follows].add(event.id)
self._waitlist[event.id] = event.received
async def forget_follower(self, follower: ReceivedEvent) -> None:
"""Forget that this event is waiting on another event to arrive."""
assert follower.follows
async with self._lock:
self._waitlist.pop(follower.id, None)
if follower.follows in self._followers:
self._followers[follower.follows].discard(follower.id)
if not self._followers[follower.follows]:
del self._followers[follower.follows]
self._events.pop(follower.id, None)
async def get_followers(self, leader: ReceivedEvent) -> list[ReceivedEvent]:
"""Returns events that were waiting on this leader event to arrive."""
async with self._lock:
follower_ids = self._followers.get(leader.id, set()).copy()
follower_events: list[ReceivedEvent] = []
for follower_id in follower_ids:
if follower_id in self._events:
follower_events.append(self._events[follower_id])
# Sort by occurred time to maintain causal order
return sorted(follower_events, key=lambda f: f.occurred)
async def followers_by_id(self, follower_ids: list[UUID]) -> list[ReceivedEvent]:
"""Returns the events with the given IDs, in the order they occurred."""
async with self._lock:
follower_events = [
self._events[fid] for fid in follower_ids if fid in self._events
]
return sorted(follower_events, key=lambda f: f.occurred)
async def get_lost_followers(self) -> list[ReceivedEvent]:
"""Returns events that were waiting on a leader event that never arrived."""
cutoff_time = prefect.types._datetime.now("UTC") - PRECEDING_EVENT_LOOKBACK
async with self._lock:
lost_ids = [
event_id
for event_id, received_time in self._waitlist.items()
if received_time <= cutoff_time
]
# Remove lost followers from our tracking
lost_events: list[ReceivedEvent] = []
for event_id in lost_ids:
if event_id in self._events:
event = self._events[event_id]
lost_events.append(event)
# Clean up tracking for this lost event
if event.follows and event.follows in self._followers:
self._followers[event.follows].discard(event_id)
if not self._followers[event.follows]:
del self._followers[event.follows]
del self._events[event_id]
del self._waitlist[event_id]
return sorted(lost_events, key=lambda f: f.occurred)
@asynccontextmanager
async def event_is_processing(
self, event: ReceivedEvent
) -> AsyncGenerator[None, None]:
"""Mark an event as being processed for the duration of its lifespan through
the ordering system."""
if not await self.record_event_as_processing(event):
self._log(event, "is already being processed")
raise EventBeingProcessed(event)
try:
yield
await self.record_event_as_seen(event)
finally:
await self.forget_event_is_processing(event)
async def wait_for_leader(self, event: ReceivedEvent) -> None:
"""Given an event, wait for its leader to be processed before proceeding, or
raise EventArrivedEarly if we would wait too long in this attempt."""
# If this event doesn't follow anything (meaningfully), it's ready to go now
if not event.follows or event.follows == event.id:
return
# If this is an old event, we won't have accurate bookkeeping for its leader
# so we're just going to send it
age = prefect.types._datetime.now("UTC") - event.received
if age >= PRECEDING_EVENT_LOOKBACK:
return
# If the leader has already been seen, keep on trucking
if await self.event_has_been_seen(event.follows):
return
# Check if the leader is currently being processed, and if so, poll until it's
# done being processed as a quicker alternative to sitting on the waitlist
if await self.event_has_started_processing(event.follows):
try:
with anyio.fail_after(IN_FLIGHT_EVENT_TIMEOUT.total_seconds()):
while not await self.event_has_been_seen(event.follows):
await asyncio.sleep(0.25)
return
except asyncio.TimeoutError:
self._log(
event,
"timed out waiting for its in-flight leader %s, will treat as lost",
event.follows,
)
# Otherwise, we'll stop processing now and sit on the waitlist until the leader
# eventually comes through the system
self._log(event, "arrived before the event it follows %s", event.follows)
await self.record_follower(event)
raise EventArrivedEarly(event)
@asynccontextmanager
async def preceding_event_confirmed(
self,
handler: event_handler,
event: ReceivedEvent,
depth: int = 0,
) -> AsyncGenerator[None, None]:
"""
Events may optionally declare that they logically follow another event, so that
we can preserve important event orderings in the face of unreliable delivery and
ordering of messages from the queues.
This function keeps track of the ID of each event that this shard has
successfully processed going back to the PRECEDING_EVENT_LOOKBACK period. If an
event arrives that must follow another one, confirm that we have recently seen
and processed that event before proceeding.
Args:
handler: The function to call when an out-of-order event is
ready to be processed
event: The event to be processed. This object should include
metadata indicating if and what event it follows.
depth: The current recursion depth, used to prevent infinite
recursion due to cyclic dependencies between events. Defaults to 0.
Raises EventArrivedEarly if the current event shouldn't be processed yet.
"""
if depth > MAX_DEPTH_OF_PRECEDING_EVENT:
# There is either a cyclic set of events or a chain
# of events that is too long
self._log(
event,
"has exceeded the maximum recursion depth %s",
MAX_DEPTH_OF_PRECEDING_EVENT,
)
raise MaxDepthExceeded(event)
async with self.event_is_processing(event):
await self.wait_for_leader(event)
yield
# We have just processed an event that other events may have been waiting
# on, so let's react to them now in the order they occurred
try:
for waiter in await self.get_followers(event):
await handler(waiter, depth=depth + 1)
except MaxDepthExceeded:
# We'll only process the first MAX_DEPTH_OF_PRECEDING_EVENT followers.
# If we hit this limit, we'll just log and move on.
self._log(
event,
"reached its max depth of %s followers processed.",
MAX_DEPTH_OF_PRECEDING_EVENT,
)
# If this event was itself waiting on a leader, let's consider it as
# resolved now that it has been processed
if event.follows and event.follows != event.id:
await self.forget_follower(event)
def _log(self, event: ReceivedEvent, message: str, *args: Any) -> None:
logger.info(
"Event %r (%s) for %r " + message,
event.event,
event.id,
event.resource.id,
*args,
extra={
"event_id": event.id,
"follows": event.follows,
"resource_id": event.resource.id,
},
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/server/events/ordering/memory.py",
"license": "Apache License 2.0",
"lines": 261,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:tests/events/server/test_in_memory_ordering.py | import asyncio
from datetime import timedelta
from typing import Sequence
from unittest.mock import patch
from uuid import uuid4
import pytest
from cachetools import TTLCache
from prefect.server.events.ordering import (
MAX_DEPTH_OF_PRECEDING_EVENT,
PRECEDING_EVENT_LOOKBACK,
SEEN_EXPIRATION,
EventArrivedEarly,
)
from prefect.server.events.ordering.memory import CausalOrdering, EventBeingProcessed
from prefect.server.events.schemas.events import ReceivedEvent, Resource
from prefect.types._datetime import DateTime
@pytest.fixture
def resource() -> Resource:
return Resource({"prefect.resource.id": "any.thing"})
@pytest.fixture
def event_one(
start_of_test: DateTime,
resource: Resource,
) -> ReceivedEvent:
return ReceivedEvent(
resource=resource,
event="event.one",
occurred=start_of_test + timedelta(seconds=1),
received=start_of_test + timedelta(seconds=1),
id=uuid4(),
follows=None,
)
@pytest.fixture
def event_two(event_one: ReceivedEvent) -> ReceivedEvent:
return ReceivedEvent(
event="event.two",
id=uuid4(),
follows=event_one.id,
resource=event_one.resource,
occurred=event_one.occurred + timedelta(seconds=1),
received=event_one.received + timedelta(seconds=1, milliseconds=1),
)
@pytest.fixture
def event_three_a(event_two: ReceivedEvent) -> ReceivedEvent:
return ReceivedEvent(
event="event.three.a",
id=uuid4(),
follows=event_two.id,
resource=event_two.resource,
occurred=event_two.occurred + timedelta(seconds=1),
received=event_two.received + timedelta(seconds=1, milliseconds=1),
)
@pytest.fixture
def event_three_b(event_two: ReceivedEvent) -> ReceivedEvent:
return ReceivedEvent(
event="event.three.b",
id=uuid4(),
follows=event_two.id,
resource=event_two.resource,
occurred=event_two.occurred + timedelta(seconds=2),
received=event_two.received + timedelta(seconds=2, milliseconds=1),
)
@pytest.fixture
def in_proper_order(
event_one: ReceivedEvent,
event_two: ReceivedEvent,
event_three_a: ReceivedEvent,
event_three_b: ReceivedEvent,
) -> Sequence[ReceivedEvent]:
return [event_one, event_two, event_three_a, event_three_b]
@pytest.fixture
def in_jumbled_order(
event_one: ReceivedEvent,
event_two: ReceivedEvent,
event_three_a: ReceivedEvent,
event_three_b: ReceivedEvent,
) -> Sequence[ReceivedEvent]:
return [event_two, event_three_a, event_one, event_three_b]
@pytest.fixture
def backwards(
event_one: ReceivedEvent,
event_two: ReceivedEvent,
event_three_a: ReceivedEvent,
event_three_b: ReceivedEvent,
) -> Sequence[ReceivedEvent]:
return [event_three_b, event_three_a, event_two, event_one]
@pytest.fixture(params=["in_proper_order", "in_jumbled_order", "backwards"])
def example(request: pytest.FixtureRequest) -> Sequence[ReceivedEvent]:
return request.getfixturevalue(request.param)
@pytest.fixture
def causal_ordering() -> CausalOrdering:
# Clear all scopes before each test to ensure isolation
CausalOrdering.clear_all_scopes()
ordering = CausalOrdering(scope="unit-tests")
return ordering
def test_causal_ordering_uses_scope_correctly():
CausalOrdering.clear_all_scopes()
ordering_one = CausalOrdering(scope="one")
ordering_two = CausalOrdering(scope="two")
ordering_one_again = CausalOrdering(scope="one")
# Same scope should return same instance
assert ordering_one is ordering_one_again
# Different scopes should return different instances
assert ordering_one is not ordering_two
assert ordering_one.scope == "one"
assert ordering_two.scope == "two"
async def test_attributes_persist_within_same_scope(event_one: ReceivedEvent):
CausalOrdering.clear_all_scopes()
ordering_one = CausalOrdering(scope="test-scope")
await ordering_one.record_event_as_seen(event_one)
ordering_same_scope = CausalOrdering(scope="test-scope")
assert ordering_one is ordering_same_scope
assert await ordering_same_scope.event_has_been_seen(event_one)
class TestEventProcessingState:
async def test_record_and_check_processing(
self, causal_ordering: CausalOrdering, event_one: ReceivedEvent
):
# Initially not processing
assert not await causal_ordering.event_has_started_processing(event_one)
# Can record as processing
assert await causal_ordering.record_event_as_processing(event_one)
assert await causal_ordering.event_has_started_processing(event_one)
# Cannot record again while processing
assert not await causal_ordering.record_event_as_processing(event_one)
# Can forget processing
await causal_ordering.forget_event_is_processing(event_one)
assert not await causal_ordering.event_has_started_processing(event_one)
# Can record again after forgetting
assert await causal_ordering.record_event_as_processing(event_one)
async def test_event_is_processing_context_manager(
self, causal_ordering: CausalOrdering, event_one: ReceivedEvent
):
# Test successful processing
assert not await causal_ordering.event_has_been_seen(event_one)
async with causal_ordering.event_is_processing(event_one):
assert await causal_ordering.event_has_started_processing(event_one)
# After context exits, event should be marked as seen and not processing
assert await causal_ordering.event_has_been_seen(event_one)
assert not await causal_ordering.event_has_started_processing(event_one)
async def test_event_being_processed_exception(
self, causal_ordering: CausalOrdering, event_one: ReceivedEvent
):
# Start processing an event
await causal_ordering.record_event_as_processing(event_one)
# Trying to process again should raise exception
with pytest.raises(EventBeingProcessed) as exc_info:
async with causal_ordering.event_is_processing(event_one):
pass
assert exc_info.value.event == event_one
class TestEventSeenTracking:
async def test_event_seen_tracking(
self, causal_ordering: CausalOrdering, event_one: ReceivedEvent
):
# Initially not seen
assert not await causal_ordering.event_has_been_seen(event_one)
assert not await causal_ordering.event_has_been_seen(event_one.id)
# Record as seen
await causal_ordering.record_event_as_seen(event_one)
assert await causal_ordering.event_has_been_seen(event_one)
assert await causal_ordering.event_has_been_seen(event_one.id)
async def test_seen_events_cleanup(
self, causal_ordering: CausalOrdering, event_one: ReceivedEvent
):
# Record event as seen
await causal_ordering.record_event_as_seen(event_one)
assert await causal_ordering.event_has_been_seen(event_one)
assert isinstance(causal_ordering._seen_events, TTLCache)
assert causal_ordering._seen_events.ttl == SEEN_EXPIRATION.total_seconds()
# Verify maxsize is reasonable (prevents unbounded growth)
assert causal_ordering._seen_events.maxsize == 10000
# Replace the cache temporarily with one that has a very short TTL
original_cache = causal_ordering._seen_events
try:
# Create a TTLCache with 0.1 second TTL for testing
causal_ordering._seen_events = TTLCache(maxsize=10000, ttl=0.1)
# Add event to the short-lived cache
await causal_ordering.record_event_as_seen(event_one)
assert await causal_ordering.event_has_been_seen(event_one)
# Wait for expiration
await asyncio.sleep(0.15)
# Should not be seen anymore due to expiration
assert not await causal_ordering.event_has_been_seen(event_one)
finally:
# Restore original cache
causal_ordering._seen_events = original_cache
class TestFollowerLeaderTracking:
async def test_record_and_forget_follower(
self,
causal_ordering: CausalOrdering,
event_one: ReceivedEvent,
event_two: ReceivedEvent,
):
# Initially no followers
assert await causal_ordering.get_followers(event_one) == []
# Record follower
await causal_ordering.record_follower(event_two)
followers = await causal_ordering.get_followers(event_one)
assert followers == [event_two]
# Forget follower
await causal_ordering.forget_follower(event_two)
assert await causal_ordering.get_followers(event_one) == []
async def test_multiple_followers_sorted_by_occurred(
self,
causal_ordering: CausalOrdering,
event_one: ReceivedEvent,
event_three_a: ReceivedEvent,
event_three_b: ReceivedEvent,
):
# Record followers (event_three_b occurs after event_three_a)
await causal_ordering.record_follower(event_three_b)
await causal_ordering.record_follower(event_three_a)
assert event_three_a.follows is not None
# Should return in occurred order (event_three_a.follows is event_two.id, so we need event_two)
leader_event = ReceivedEvent(
resource=event_three_a.resource,
event="leader",
occurred=event_three_a.occurred,
received=event_three_a.received,
id=event_three_a.follows,
follows=None,
)
followers = await causal_ordering.get_followers(leader_event)
assert followers == [event_three_a, event_three_b]
async def test_followers_by_id(
self,
causal_ordering: CausalOrdering,
event_two: ReceivedEvent,
event_three_a: ReceivedEvent,
event_three_b: ReceivedEvent,
):
# Record followers
await causal_ordering.record_follower(event_two)
await causal_ordering.record_follower(event_three_a)
await causal_ordering.record_follower(event_three_b)
# Get specific followers by ID
follower_ids = [event_three_b.id, event_two.id] # Out of order
followers = await causal_ordering.followers_by_id(follower_ids)
# Should return in occurred order
assert followers == [event_two, event_three_b]
class TestLostFollowers:
async def test_get_lost_followers(
self,
causal_ordering: CausalOrdering,
event_two: ReceivedEvent,
event_three_a: ReceivedEvent,
event_three_b: ReceivedEvent,
):
# Record followers - only event_two follows event_one, but the three_x events follow event_two
await causal_ordering.record_follower(event_two)
await causal_ordering.record_follower(event_three_a)
await causal_ordering.record_follower(event_three_b)
# Mock time to simulate events being old
with patch("prefect.types._datetime.now") as mock_now:
# We need to make the earliest received time old enough
earliest_received = min(
event_two.received, event_three_a.received, event_three_b.received
)
future_time = (
earliest_received + PRECEDING_EVENT_LOOKBACK + timedelta(seconds=1)
)
mock_now.return_value = future_time
lost_followers = await causal_ordering.get_lost_followers()
# Only event_two should be returned as lost because:
# - event_two is waiting for event_one (which never arrived and is now past cutoff)
# - event_three_a and event_three_b are waiting for event_two, but event_two is still in the system
# However, since event_two gets cleaned up, event_three_a and event_three_b become orphaned
assert lost_followers == [event_two]
# Lost followers should be cleaned up - check that waitlist is smaller
assert len(getattr(causal_ordering, "_waitlist", [])) < 3
class TestCausalOrderingFlow:
async def test_ordering_is_correct(
self,
causal_ordering: CausalOrdering,
in_proper_order: Sequence[ReceivedEvent],
example: Sequence[ReceivedEvent],
):
processed: list[ReceivedEvent] = []
async def evaluate(event: ReceivedEvent, depth: int = 0) -> None:
async with causal_ordering.preceding_event_confirmed(
evaluate, event, depth=depth
):
processed.append(event)
example = list(example)
while example:
try:
await evaluate(example.pop(0))
except EventArrivedEarly:
continue
assert processed == list(in_proper_order)
async def test_wait_for_leader_no_follows(
self, causal_ordering: CausalOrdering, event_one: ReceivedEvent
):
# Event without follows should not wait
await causal_ordering.wait_for_leader(event_one) # Should not raise
async def test_wait_for_leader_self_follows(
self, causal_ordering: CausalOrdering, event_one: ReceivedEvent
):
# Event that follows itself should not wait
event_one.follows = event_one.id
await causal_ordering.wait_for_leader(event_one) # Should not raise
async def test_wait_for_leader_old_event(
self,
causal_ordering: CausalOrdering,
event_one: ReceivedEvent,
event_two: ReceivedEvent,
):
# Old events should not wait - patch datetime.now to make event appear old
with patch("prefect.types._datetime.now") as mock_now:
future_time = (
event_two.received + PRECEDING_EVENT_LOOKBACK + timedelta(seconds=1)
)
mock_now.return_value = future_time
await causal_ordering.wait_for_leader(event_two) # Should not raise
async def test_wait_for_leader_seen(
self,
causal_ordering: CausalOrdering,
event_one: ReceivedEvent,
event_two: ReceivedEvent,
):
# Mark leader as seen
await causal_ordering.record_event_as_seen(event_one)
# Should not wait
await causal_ordering.wait_for_leader(event_two) # Should not raise
async def test_wait_for_leader_in_flight(
self,
causal_ordering: CausalOrdering,
event_one: ReceivedEvent,
event_two: ReceivedEvent,
):
# Mark leader as processing
await causal_ordering.record_event_as_processing(event_one)
# Start a task that will mark the leader as seen after a short delay
async def mark_seen_later():
await asyncio.sleep(0.1)
await causal_ordering.record_event_as_seen(event_one)
asyncio.create_task(mark_seen_later())
# Should wait and then proceed
await causal_ordering.wait_for_leader(event_two) # Should not raise
async def test_wait_for_leader_arrives_early(
self, causal_ordering: CausalOrdering, event_two: ReceivedEvent
):
# Leader not seen or processing - should raise EventArrivedEarly
with pytest.raises(EventArrivedEarly) as exc_info:
await causal_ordering.wait_for_leader(event_two)
assert exc_info.value.event == event_two
class TestErrorConditions:
async def test_max_depth_exceeded(
self,
causal_ordering: CausalOrdering,
event_one: ReceivedEvent,
):
# Create a chain longer than MAX_DEPTH_OF_PRECEDING_EVENT
worst_case: list[ReceivedEvent] = []
previous = event_one
for i in range(MAX_DEPTH_OF_PRECEDING_EVENT + 1):
this_one = ReceivedEvent(
event=f"event.{i}",
resource=previous.resource,
occurred=previous.occurred + timedelta(seconds=1),
received=previous.received + timedelta(seconds=1),
id=uuid4(),
follows=previous.id,
)
worst_case.append(this_one)
previous = this_one
async def evaluate(event: ReceivedEvent, depth: int = 0) -> None:
async with causal_ordering.preceding_event_confirmed(
evaluate, event, depth=depth
):
pass
# Process events in reverse order
worst_case.reverse()
while worst_case:
try:
await evaluate(worst_case.pop(0))
except EventArrivedEarly:
continue
# In our implementation, the max depth is reached during follower processing
# which causes the recursion to stop but doesn't raise an exception at the root level
# This is different from the expected behavior, so let's verify it processes without exception
# but logs the max depth message
await evaluate(event_one) # Should complete without raising MaxDepthExceeded
async def test_only_looks_to_a_certain_horizon(
self,
causal_ordering: CausalOrdering,
event_one: ReceivedEvent,
event_two: ReceivedEvent,
):
# Backdate the events so they happened before the lookback period
event_one.received -= timedelta(days=1)
event_two.received -= timedelta(days=1)
processed: list[ReceivedEvent] = []
async def evaluate(event: ReceivedEvent, depth: int = 0) -> None:
async with causal_ordering.preceding_event_confirmed(
evaluate, event, depth=depth
):
processed.append(event)
# Will not raise EventArrivedEarly because we're outside the range we can look back
await evaluate(event_two)
await evaluate(event_one)
assert processed == [event_two, event_one]
async def test_returns_lost_followers_in_occurred_order(
self,
causal_ordering: CausalOrdering,
event_two: ReceivedEvent,
event_three_a: ReceivedEvent,
event_three_b: ReceivedEvent,
):
processed: list[ReceivedEvent] = []
async def evaluate(event: ReceivedEvent, depth: int = 0) -> None:
async with causal_ordering.preceding_event_confirmed(
evaluate, event, depth=depth
):
processed.append(event)
example = [event_three_a, event_three_b, event_two]
while example:
try:
await evaluate(example.pop(0))
except EventArrivedEarly:
continue
assert processed == []
# Mock time to simulate events being lost
with patch("prefect.types._datetime.now") as mock_now:
future_time = (
event_two.received + PRECEDING_EVENT_LOOKBACK + timedelta(seconds=1)
)
mock_now.return_value = future_time
# Because event_one never arrived, only event_two should be lost
# (event_three_a and event_three_b are waiting for event_two, not event_one)
lost_followers = await causal_ordering.get_lost_followers()
assert lost_followers == [event_two]
class TestScopeIsolation:
async def test_two_scopes_do_not_interfere(
self,
event_one: ReceivedEvent,
event_two: ReceivedEvent,
):
# Clear all scopes to start fresh
CausalOrdering.clear_all_scopes()
# A test that two instances of the same class with different scopes do not interfere with each other
ordering_one = CausalOrdering(scope="scope-one")
ordering_two = CausalOrdering(scope="scope-two")
# Verify they are different instances
assert ordering_one is not ordering_two
assert ordering_one.scope == "scope-one"
assert ordering_two.scope == "scope-two"
# Test seen events don't cross scopes
await ordering_one.record_event_as_seen(event_one)
assert await ordering_one.event_has_been_seen(event_one)
assert not await ordering_two.event_has_been_seen(event_one)
await ordering_two.record_event_as_seen(event_one)
assert await ordering_one.event_has_been_seen(event_one)
assert await ordering_two.event_has_been_seen(event_one)
# Test followers don't cross scopes
await ordering_one.record_follower(event_two)
assert await ordering_one.get_followers(event_one) == [event_two]
assert await ordering_two.get_followers(event_one) == []
await ordering_two.record_follower(event_two)
assert await ordering_one.get_followers(event_one) == [event_two]
assert await ordering_two.get_followers(event_one) == [event_two]
await ordering_one.forget_follower(event_two)
assert await ordering_one.get_followers(event_one) == []
assert await ordering_two.get_followers(event_one) == [event_two]
await ordering_two.forget_follower(event_two)
assert await ordering_one.get_followers(event_one) == []
assert await ordering_two.get_followers(event_one) == []
async def test_processing_events_isolated_by_scope(
self,
event_one: ReceivedEvent,
):
CausalOrdering.clear_all_scopes()
ordering_a = CausalOrdering(scope="scope-a")
ordering_b = CausalOrdering(scope="scope-b")
# Start processing in scope A
assert await ordering_a.record_event_as_processing(event_one)
assert await ordering_a.event_has_started_processing(event_one)
# Should not be processing in scope B
assert not await ordering_b.event_has_started_processing(event_one)
# Should be able to start processing same event in scope B
assert await ordering_b.record_event_as_processing(event_one)
assert await ordering_b.event_has_started_processing(event_one)
# Stop processing in scope A
await ordering_a.forget_event_is_processing(event_one)
assert not await ordering_a.event_has_started_processing(event_one)
# Should still be processing in scope B
assert await ordering_b.event_has_started_processing(event_one)
class TestFactoryFunction:
def test_get_task_run_recorder_causal_ordering(self):
"""Test that the factory function returns the correct scoped instance."""
from prefect.server.events.ordering import get_task_run_recorder_causal_ordering
CausalOrdering.clear_all_scopes()
# Get instance from factory function
ordering1 = get_task_run_recorder_causal_ordering()
assert ordering1.scope == "task-run-recorder"
assert isinstance(ordering1, CausalOrdering)
# Multiple calls should return the same instance
ordering2 = get_task_run_recorder_causal_ordering()
assert ordering1 is ordering2
# Direct instantiation with same scope should return same instance
ordering3 = CausalOrdering(scope="task-run-recorder")
assert ordering1 is ordering3
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/events/server/test_in_memory_ordering.py",
"license": "Apache License 2.0",
"lines": 501,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-gcp/tests/test_cloud_run_worker_v2_filtering.py | import uuid
import pytest
from prefect_gcp.credentials import GcpCredentials
from prefect_gcp.models.cloud_run_v2 import SecretKeySelector
from prefect_gcp.workers.cloud_run_v2 import CloudRunWorkerJobV2Configuration
from prefect.client.schemas.objects import FlowRun
@pytest.fixture
def job_body():
return {
"client": "prefect",
"launchStage": None,
"template": {
"template": {
"maxRetries": None,
"timeout": None,
"vpcAccess": {
"connector": None,
},
"containers": [
{
"env": [],
"command": None,
"args": "-m prefect.engine",
"resources": {
"limits": {
"cpu": None,
"memory": None,
},
},
},
],
}
},
}
@pytest.fixture
def flow_run():
return FlowRun(flow_id=uuid.uuid4(), name="my-flow-run-name")
@pytest.fixture
def cloud_run_worker_v2_job_config(service_account_info, job_body):
return CloudRunWorkerJobV2Configuration(
name="my-job-name",
job_body=job_body,
credentials=GcpCredentials(service_account_info=service_account_info),
region="us-central1",
timeout=86400,
env={"ENV1": "VALUE1", "ENV2": "VALUE2"},
)
class TestCloudRunWorkerJobV2ConfigurationFiltering:
def test_populate_env_filters_plaintext_api_key_when_secret_configured(
self, cloud_run_worker_v2_job_config
):
# Add plaintext API key to env
cloud_run_worker_v2_job_config.env["PREFECT_API_KEY"] = "plaintext-api-key"
cloud_run_worker_v2_job_config.prefect_api_key_secret = SecretKeySelector(
secret="prefect-api-key", version="latest"
)
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
# Should not contain plaintext version
assert {"name": "PREFECT_API_KEY", "value": "plaintext-api-key"} not in env_vars
# Should contain secret version
assert {
"name": "PREFECT_API_KEY",
"valueSource": {
"secretKeyRef": {"secret": "prefect-api-key", "version": "latest"}
},
} in env_vars
# Other env vars should still be present
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
def test_populate_env_filters_plaintext_auth_string_when_secret_configured(
self, cloud_run_worker_v2_job_config
):
# Add plaintext auth string to env
cloud_run_worker_v2_job_config.env["PREFECT_API_AUTH_STRING"] = (
"plaintext-auth-string"
)
cloud_run_worker_v2_job_config.prefect_api_auth_string_secret = (
SecretKeySelector(secret="prefect-auth-string", version="latest")
)
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
# Should not contain plaintext version
assert {
"name": "PREFECT_API_AUTH_STRING",
"value": "plaintext-auth-string",
} not in env_vars
# Should contain secret version
assert {
"name": "PREFECT_API_AUTH_STRING",
"valueSource": {
"secretKeyRef": {"secret": "prefect-auth-string", "version": "latest"}
},
} in env_vars
# Other env vars should still be present
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
def test_populate_env_filters_both_plaintext_when_secrets_configured(
self, cloud_run_worker_v2_job_config
):
# Add plaintext versions to env
cloud_run_worker_v2_job_config.env["PREFECT_API_KEY"] = "plaintext-api-key"
cloud_run_worker_v2_job_config.env["PREFECT_API_AUTH_STRING"] = (
"plaintext-auth-string"
)
cloud_run_worker_v2_job_config.prefect_api_key_secret = SecretKeySelector(
secret="prefect-api-key", version="latest"
)
cloud_run_worker_v2_job_config.prefect_api_auth_string_secret = (
SecretKeySelector(secret="prefect-auth-string", version="latest")
)
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
# Should not contain plaintext versions
assert {"name": "PREFECT_API_KEY", "value": "plaintext-api-key"} not in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"value": "plaintext-auth-string",
} not in env_vars
# Should contain secret versions
assert {
"name": "PREFECT_API_KEY",
"valueSource": {
"secretKeyRef": {"secret": "prefect-api-key", "version": "latest"}
},
} in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"valueSource": {
"secretKeyRef": {"secret": "prefect-auth-string", "version": "latest"}
},
} in env_vars
# Other env vars should still be present
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
def test_populate_env_keeps_plaintext_when_no_secrets_configured(
self, cloud_run_worker_v2_job_config
):
# Add plaintext versions to env but don't configure secrets
cloud_run_worker_v2_job_config.env["PREFECT_API_KEY"] = "plaintext-api-key"
cloud_run_worker_v2_job_config.env["PREFECT_API_AUTH_STRING"] = (
"plaintext-auth-string"
)
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
# Should contain plaintext versions since no secrets are configured
assert {"name": "PREFECT_API_KEY", "value": "plaintext-api-key"} in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"value": "plaintext-auth-string",
} in env_vars
# Other env vars should still be present
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
def test_populate_env_filters_api_key_when_configured_via_env_from_secrets(
self, cloud_run_worker_v2_job_config
):
# Add plaintext API key to env
cloud_run_worker_v2_job_config.env["PREFECT_API_KEY"] = "plaintext-api-key"
# Configure API key via env_from_secrets instead of dedicated field
cloud_run_worker_v2_job_config.env_from_secrets = {
"PREFECT_API_KEY": SecretKeySelector(
secret="prefect-api-key", version="latest"
)
}
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
# Should not contain plaintext version
assert {"name": "PREFECT_API_KEY", "value": "plaintext-api-key"} not in env_vars
# Should contain secret version from env_from_secrets
assert {
"name": "PREFECT_API_KEY",
"valueSource": {
"secretKeyRef": {"secret": "prefect-api-key", "version": "latest"}
},
} in env_vars
# Other env vars should still be present
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
def test_populate_env_filters_auth_string_when_configured_via_env_from_secrets(
self, cloud_run_worker_v2_job_config
):
# Add plaintext auth string to env
cloud_run_worker_v2_job_config.env["PREFECT_API_AUTH_STRING"] = (
"plaintext-auth-string"
)
# Configure auth string via env_from_secrets instead of dedicated field
cloud_run_worker_v2_job_config.env_from_secrets = {
"PREFECT_API_AUTH_STRING": SecretKeySelector(
secret="prefect-auth-string", version="latest"
)
}
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
# Should not contain plaintext version
assert {
"name": "PREFECT_API_AUTH_STRING",
"value": "plaintext-auth-string",
} not in env_vars
# Should contain secret version from env_from_secrets
assert {
"name": "PREFECT_API_AUTH_STRING",
"valueSource": {
"secretKeyRef": {"secret": "prefect-auth-string", "version": "latest"}
},
} in env_vars
# Other env vars should still be present
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
def test_populate_env_filters_both_when_configured_via_env_from_secrets(
self, cloud_run_worker_v2_job_config
):
# Add plaintext versions to env
cloud_run_worker_v2_job_config.env["PREFECT_API_KEY"] = "plaintext-api-key"
cloud_run_worker_v2_job_config.env["PREFECT_API_AUTH_STRING"] = (
"plaintext-auth-string"
)
# Configure both via env_from_secrets
cloud_run_worker_v2_job_config.env_from_secrets = {
"PREFECT_API_KEY": SecretKeySelector(
secret="prefect-api-key", version="latest"
),
"PREFECT_API_AUTH_STRING": SecretKeySelector(
secret="prefect-auth-string", version="latest"
),
}
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
# Should not contain plaintext versions
assert {"name": "PREFECT_API_KEY", "value": "plaintext-api-key"} not in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"value": "plaintext-auth-string",
} not in env_vars
# Should contain secret versions from env_from_secrets
assert {
"name": "PREFECT_API_KEY",
"valueSource": {
"secretKeyRef": {"secret": "prefect-api-key", "version": "latest"}
},
} in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"valueSource": {
"secretKeyRef": {"secret": "prefect-auth-string", "version": "latest"}
},
} in env_vars
# Other env vars should still be present
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
def test_populate_env_prioritizes_dedicated_secret_fields_over_env_from_secrets(
self, cloud_run_worker_v2_job_config
):
# Add plaintext versions to env
cloud_run_worker_v2_job_config.env["PREFECT_API_KEY"] = "plaintext-api-key"
cloud_run_worker_v2_job_config.env["PREFECT_API_AUTH_STRING"] = (
"plaintext-auth-string"
)
# Configure via both dedicated fields and env_from_secrets
cloud_run_worker_v2_job_config.prefect_api_key_secret = SecretKeySelector(
secret="dedicated-api-key", version="latest"
)
cloud_run_worker_v2_job_config.prefect_api_auth_string_secret = (
SecretKeySelector(secret="dedicated-auth-string", version="latest")
)
cloud_run_worker_v2_job_config.env_from_secrets = {
"PREFECT_API_KEY": SecretKeySelector(
secret="env-from-secrets-api-key", version="latest"
),
"PREFECT_API_AUTH_STRING": SecretKeySelector(
secret="env-from-secrets-auth-string", version="latest"
),
}
cloud_run_worker_v2_job_config._populate_env()
env_vars = cloud_run_worker_v2_job_config.job_body["template"]["template"][
"containers"
][0]["env"]
# Should not contain plaintext versions
assert {"name": "PREFECT_API_KEY", "value": "plaintext-api-key"} not in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"value": "plaintext-auth-string",
} not in env_vars
# Should contain dedicated field secrets (should be added after env_from_secrets)
assert {
"name": "PREFECT_API_KEY",
"valueSource": {
"secretKeyRef": {"secret": "dedicated-api-key", "version": "latest"}
},
} in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"valueSource": {
"secretKeyRef": {"secret": "dedicated-auth-string", "version": "latest"}
},
} in env_vars
# Should also contain env_from_secrets versions
assert {
"name": "PREFECT_API_KEY",
"valueSource": {
"secretKeyRef": {
"secret": "env-from-secrets-api-key",
"version": "latest",
}
},
} in env_vars
assert {
"name": "PREFECT_API_AUTH_STRING",
"valueSource": {
"secretKeyRef": {
"secret": "env-from-secrets-auth-string",
"version": "latest",
}
},
} in env_vars
# Other env vars should still be present
assert {"name": "ENV1", "value": "VALUE1"} in env_vars
assert {"name": "ENV2", "value": "VALUE2"} in env_vars
class TestCloudRunWorkerJobV2ConfigurationWarnings:
def test_warn_about_plaintext_api_key(
self, cloud_run_worker_v2_job_config, flow_run, caplog
):
# Add plaintext API key to env
cloud_run_worker_v2_job_config.env["PREFECT_API_KEY"] = "plaintext-api-key"
cloud_run_worker_v2_job_config.prepare_for_flow_run(
flow_run=flow_run, deployment=None, flow=None
)
assert (
"PREFECT_API_KEY is provided as a plaintext environment variable"
in caplog.text
)
assert "consider providing it as a secret using" in caplog.text
assert "'prefect_api_key_secret' or 'env_from_secrets'" in caplog.text
def test_warn_about_plaintext_auth_string(
self, cloud_run_worker_v2_job_config, flow_run, caplog
):
# Add plaintext auth string to env
cloud_run_worker_v2_job_config.env["PREFECT_API_AUTH_STRING"] = (
"plaintext-auth-string"
)
cloud_run_worker_v2_job_config.prepare_for_flow_run(
flow_run=flow_run, deployment=None, flow=None
)
assert (
"PREFECT_API_AUTH_STRING is provided as a plaintext environment variable"
in caplog.text
)
assert "consider providing it as a secret using" in caplog.text
assert "'prefect_api_auth_string_secret' or 'env_from_secrets'" in caplog.text
def test_warn_about_both_plaintext_credentials(
self, cloud_run_worker_v2_job_config, flow_run, caplog
):
# Add both plaintext credentials to env
cloud_run_worker_v2_job_config.env["PREFECT_API_KEY"] = "plaintext-api-key"
cloud_run_worker_v2_job_config.env["PREFECT_API_AUTH_STRING"] = (
"plaintext-auth-string"
)
cloud_run_worker_v2_job_config.prepare_for_flow_run(
flow_run=flow_run, deployment=None, flow=None
)
# Should warn about both
assert "PREFECT_API_KEY is provided as a plaintext" in caplog.text
assert "PREFECT_API_AUTH_STRING is provided as a plaintext" in caplog.text
def test_no_warning_when_api_key_secret_configured(
self, cloud_run_worker_v2_job_config, flow_run, caplog
):
# Add plaintext API key but configure secret
cloud_run_worker_v2_job_config.env["PREFECT_API_KEY"] = "plaintext-api-key"
cloud_run_worker_v2_job_config.prefect_api_key_secret = SecretKeySelector(
secret="prefect-api-key", version="latest"
)
cloud_run_worker_v2_job_config.prepare_for_flow_run(
flow_run=flow_run, deployment=None, flow=None
)
# Should not warn since secret is configured
assert "PREFECT_API_KEY is provided as a plaintext" not in caplog.text
def test_no_warning_when_auth_string_in_env_from_secrets(
self, cloud_run_worker_v2_job_config, flow_run, caplog
):
# Add plaintext auth string but configure it in env_from_secrets
cloud_run_worker_v2_job_config.env["PREFECT_API_AUTH_STRING"] = (
"plaintext-auth-string"
)
cloud_run_worker_v2_job_config.env_from_secrets = {
"PREFECT_API_AUTH_STRING": SecretKeySelector(
secret="prefect-auth-string", version="latest"
)
}
cloud_run_worker_v2_job_config.prepare_for_flow_run(
flow_run=flow_run, deployment=None, flow=None
)
# Should not warn since secret is configured via env_from_secrets
assert "PREFECT_API_AUTH_STRING is provided as a plaintext" not in caplog.text
def test_no_warning_when_no_plaintext_credentials(
self, cloud_run_worker_v2_job_config, flow_run, caplog
):
# Don't add any plaintext credentials
cloud_run_worker_v2_job_config.prepare_for_flow_run(
flow_run=flow_run, deployment=None, flow=None
)
# Should not warn since no plaintext credentials are present
assert "is provided as a plaintext environment variable" not in caplog.text
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-gcp/tests/test_cloud_run_worker_v2_filtering.py",
"license": "Apache License 2.0",
"lines": 416,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/_internal/installation.py | import importlib
import subprocess
import sys
import prefect.utilities.processutils
def install_packages(
packages: list[str], stream_output: bool = False, upgrade: bool = False
):
"""
Install packages using uv if available, otherwise use pip.
"""
base_command = ["pip", "install", *packages]
if upgrade:
base_command.append("--upgrade")
if stream_output:
stdout = sys.stdout
stderr = sys.stderr
else:
stdout = subprocess.DEVNULL
stderr = subprocess.DEVNULL
try:
uv = importlib.import_module("uv")
command = [uv.find_uv_bin(), *base_command]
subprocess.check_call(
command,
stdout=stdout,
stderr=stderr,
)
except (ImportError, ModuleNotFoundError, FileNotFoundError):
command = [sys.executable, "-m", *base_command]
subprocess.check_call(
command,
stdout=stdout,
stderr=stderr,
)
async def ainstall_packages(
packages: list[str], stream_output: bool = False, upgrade: bool = False
):
"""
Install packages using uv if available, otherwise use pip.
"""
base_command = ["pip", "install", *packages]
if upgrade:
base_command.append("--upgrade")
try:
uv = importlib.import_module("uv")
await prefect.utilities.processutils.run_process(
[uv.find_uv_bin(), *base_command], stream_output=stream_output
)
except (ImportError, ModuleNotFoundError, FileNotFoundError):
await prefect.utilities.processutils.run_process(
[sys.executable, "-m", *base_command],
stream_output=stream_output,
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/_internal/installation.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:tests/_internal/test_installation.py | import subprocess
import sys
from unittest.mock import AsyncMock, MagicMock, Mock, patch
from prefect._internal.installation import ainstall_packages, install_packages
class TestInstallPackages:
@patch("prefect._internal.installation.importlib.import_module")
@patch("subprocess.check_call")
def test_install_packages_with_uv_available(
self, mock_check_call: MagicMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
mock_uv = Mock()
mock_uv.find_uv_bin.return_value = "/path/to/uv"
mock_import_module.return_value = mock_uv
install_packages(packages)
mock_import_module.assert_called_once_with("uv")
mock_check_call.assert_called_once_with(
["/path/to/uv", "pip", "install", "pytest", "requests"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
@patch(
"prefect._internal.installation.importlib.import_module",
side_effect=ImportError("No module named 'uv'"),
)
@patch("subprocess.check_call")
def test_install_packages_with_uv_unavailable_import_error(
self, mock_check_call: MagicMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
install_packages(packages)
mock_import_module.assert_called_once_with("uv")
mock_check_call.assert_called_once_with(
[sys.executable, "-m", "pip", "install", "pytest", "requests"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
@patch(
"prefect._internal.installation.importlib.import_module",
side_effect=ModuleNotFoundError("No module named 'uv'"),
)
@patch("subprocess.check_call")
def test_install_packages_with_uv_unavailable_module_not_found_error(
self, mock_check_call: MagicMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
install_packages(packages)
mock_import_module.assert_called_once_with("uv")
mock_check_call.assert_called_once_with(
[sys.executable, "-m", "pip", "install", "pytest", "requests"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
@patch("prefect._internal.installation.importlib.import_module")
@patch("subprocess.check_call")
def test_install_packages_with_uv_file_not_found_error(
self, mock_check_call: MagicMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
mock_uv = Mock()
mock_uv.find_uv_bin.side_effect = FileNotFoundError
mock_import_module.return_value = mock_uv
install_packages(packages)
mock_import_module.assert_called_once_with("uv")
mock_check_call.assert_called_once_with(
[sys.executable, "-m", "pip", "install", "pytest", "requests"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
@patch("prefect._internal.installation.importlib.import_module")
@patch("subprocess.check_call")
def test_install_packages_with_upgrade_flag(
self, mock_check_call: MagicMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
mock_uv = Mock()
mock_uv.find_uv_bin.return_value = "/path/to/uv"
mock_import_module.return_value = mock_uv
install_packages(packages, upgrade=True)
mock_import_module.assert_called_once_with("uv")
mock_check_call.assert_called_once_with(
["/path/to/uv", "pip", "install", "pytest", "requests", "--upgrade"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
@patch("prefect._internal.installation.importlib.import_module")
@patch("subprocess.check_call")
def test_install_packages_with_stream_output(
self, mock_check_call: MagicMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
mock_uv = Mock()
mock_uv.find_uv_bin.return_value = "/path/to/uv"
mock_import_module.return_value = mock_uv
install_packages(packages, stream_output=True)
mock_import_module.assert_called_once_with("uv")
mock_check_call.assert_called_once_with(
["/path/to/uv", "pip", "install", "pytest", "requests"],
stdout=sys.stdout,
stderr=sys.stderr,
)
@patch("prefect._internal.installation.importlib.import_module")
@patch("subprocess.check_call")
def test_install_packages_with_upgrade_and_stream_output(
self, mock_check_call: MagicMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
mock_uv = Mock()
mock_uv.find_uv_bin.return_value = "/path/to/uv"
mock_import_module.return_value = mock_uv
install_packages(packages, stream_output=True, upgrade=True)
mock_import_module.assert_called_once_with("uv")
mock_check_call.assert_called_once_with(
["/path/to/uv", "pip", "install", "pytest", "requests", "--upgrade"],
stdout=sys.stdout,
stderr=sys.stderr,
)
@patch(
"prefect._internal.installation.importlib.import_module",
side_effect=ImportError("No module named 'uv'"),
)
@patch("subprocess.check_call")
def test_install_packages_fallback_with_upgrade_and_stream_output(
self, mock_check_call: MagicMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
install_packages(packages, stream_output=True, upgrade=True)
mock_import_module.assert_called_once_with("uv")
mock_check_call.assert_called_once_with(
[sys.executable, "-m", "pip", "install", "pytest", "requests", "--upgrade"],
stdout=sys.stdout,
stderr=sys.stderr,
)
class TestAinstallPackages:
@patch("prefect._internal.installation.importlib.import_module")
@patch("prefect.utilities.processutils.run_process", new_callable=AsyncMock)
async def test_ainstall_packages_with_uv_available(
self, mock_run_process: AsyncMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
mock_uv = Mock()
mock_uv.find_uv_bin.return_value = "/path/to/uv"
mock_import_module.return_value = mock_uv
await ainstall_packages(packages)
mock_import_module.assert_called_once_with("uv")
mock_run_process.assert_called_once_with(
["/path/to/uv", "pip", "install", "pytest", "requests"],
stream_output=False,
)
@patch(
"prefect._internal.installation.importlib.import_module",
side_effect=ImportError("No module named 'uv'"),
)
@patch("prefect.utilities.processutils.run_process", new_callable=AsyncMock)
async def test_ainstall_packages_with_uv_unavailable_import_error(
self, mock_run_process: AsyncMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
await ainstall_packages(packages)
mock_import_module.assert_called_once_with("uv")
mock_run_process.assert_called_once_with(
[sys.executable, "-m", "pip", "install", "pytest", "requests"],
stream_output=False,
)
@patch(
"prefect._internal.installation.importlib.import_module",
side_effect=ModuleNotFoundError("No module named 'uv'"),
)
@patch("prefect.utilities.processutils.run_process", new_callable=AsyncMock)
async def test_ainstall_packages_with_uv_unavailable_module_not_found_error(
self, mock_run_process: AsyncMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
await ainstall_packages(packages)
mock_import_module.assert_called_once_with("uv")
mock_run_process.assert_called_once_with(
[sys.executable, "-m", "pip", "install", "pytest", "requests"],
stream_output=False,
)
@patch("prefect._internal.installation.importlib.import_module")
@patch("prefect.utilities.processutils.run_process", new_callable=AsyncMock)
async def test_ainstall_packages_with_uv_file_not_found_error(
self, mock_run_process: AsyncMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
mock_uv = Mock()
mock_uv.find_uv_bin.side_effect = FileNotFoundError
mock_import_module.return_value = mock_uv
await ainstall_packages(packages)
mock_import_module.assert_called_once_with("uv")
mock_run_process.assert_called_once_with(
[sys.executable, "-m", "pip", "install", "pytest", "requests"],
stream_output=False,
)
@patch("prefect._internal.installation.importlib.import_module")
@patch("prefect.utilities.processutils.run_process", new_callable=AsyncMock)
async def test_ainstall_packages_with_upgrade_flag(
self, mock_run_process: AsyncMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
mock_uv = Mock()
mock_uv.find_uv_bin.return_value = "/path/to/uv"
mock_import_module.return_value = mock_uv
await ainstall_packages(packages, upgrade=True)
mock_import_module.assert_called_once_with("uv")
mock_run_process.assert_called_once_with(
["/path/to/uv", "pip", "install", "pytest", "requests", "--upgrade"],
stream_output=False,
)
@patch("prefect._internal.installation.importlib.import_module")
@patch("prefect.utilities.processutils.run_process", new_callable=AsyncMock)
async def test_ainstall_packages_with_stream_output(
self, mock_run_process: AsyncMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
mock_uv = Mock()
mock_uv.find_uv_bin.return_value = "/path/to/uv"
mock_import_module.return_value = mock_uv
await ainstall_packages(packages, stream_output=True)
mock_import_module.assert_called_once_with("uv")
mock_run_process.assert_called_once_with(
["/path/to/uv", "pip", "install", "pytest", "requests"],
stream_output=True,
)
@patch("prefect._internal.installation.importlib.import_module")
@patch("prefect.utilities.processutils.run_process", new_callable=AsyncMock)
async def test_ainstall_packages_with_upgrade_and_stream_output(
self, mock_run_process: AsyncMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
mock_uv = Mock()
mock_uv.find_uv_bin.return_value = "/path/to/uv"
mock_import_module.return_value = mock_uv
await ainstall_packages(packages, stream_output=True, upgrade=True)
mock_import_module.assert_called_once_with("uv")
mock_run_process.assert_called_once_with(
["/path/to/uv", "pip", "install", "pytest", "requests", "--upgrade"],
stream_output=True,
)
@patch(
"prefect._internal.installation.importlib.import_module",
side_effect=ImportError("No module named 'uv'"),
)
@patch("prefect.utilities.processutils.run_process", new_callable=AsyncMock)
async def test_ainstall_packages_fallback_with_upgrade_and_stream_output(
self, mock_run_process: AsyncMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
await ainstall_packages(packages, stream_output=True, upgrade=True)
mock_import_module.assert_called_once_with("uv")
mock_run_process.assert_called_once_with(
[sys.executable, "-m", "pip", "install", "pytest", "requests", "--upgrade"],
stream_output=True,
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/_internal/test_installation.py",
"license": "Apache License 2.0",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/concurrency/_leases.py | import asyncio
import concurrent.futures
from contextlib import asynccontextmanager, contextmanager
from typing import AsyncGenerator, Generator
from uuid import UUID
from prefect._internal.concurrency.api import create_call
from prefect._internal.concurrency.cancellation import (
AsyncCancelScope,
WatcherThreadCancelScope,
)
from prefect._internal.concurrency.threads import get_global_loop
from prefect.client.orchestration import get_client
from prefect.logging.loggers import get_logger, get_run_logger
async def _lease_renewal_loop(
lease_id: UUID,
lease_duration: float,
) -> None:
"""
Maintain a concurrency lease by renewing it after the given interval.
Args:
lease_id: The ID of the lease to maintain.
lease_duration: The duration of the lease in seconds.
"""
async with get_client() as client:
while True:
await client.renew_concurrency_lease(
lease_id=lease_id, lease_duration=lease_duration
)
await asyncio.sleep( # Renew the lease 3/4 of the way through the lease duration
lease_duration * 0.75
)
@contextmanager
def maintain_concurrency_lease(
lease_id: UUID,
lease_duration: float,
raise_on_lease_renewal_failure: bool = False,
suppress_warnings: bool = False,
) -> Generator[None, None, None]:
"""
Maintain a concurrency lease for the given lease ID.
Args:
lease_id: The ID of the lease to maintain.
lease_duration: The duration of the lease in seconds.
raise_on_lease_renewal_failure: A boolean specifying whether to raise an error if the lease renewal fails.
"""
# Start a loop to renew the lease on the global event loop to avoid blocking the main thread
global_loop = get_global_loop()
lease_renewal_call = create_call(
_lease_renewal_loop,
lease_id,
lease_duration,
)
global_loop.submit(lease_renewal_call)
with WatcherThreadCancelScope() as cancel_scope:
def handle_lease_renewal_failure(future: concurrent.futures.Future[None]):
if future.cancelled():
return
exc = future.exception()
if exc:
try:
# Use a run logger if available
logger = get_run_logger()
except Exception:
logger = get_logger("concurrency")
if raise_on_lease_renewal_failure:
logger.error(
"Concurrency lease renewal failed - slots are no longer reserved. Terminating execution to prevent over-allocation."
)
assert cancel_scope.cancel()
else:
if suppress_warnings:
logger.debug(
"Concurrency lease renewal failed - slots are no longer reserved. Execution will continue, but concurrency limits may be exceeded."
)
else:
logger.warning(
"Concurrency lease renewal failed - slots are no longer reserved. Execution will continue, but concurrency limits may be exceeded."
)
lease_renewal_call.future.add_done_callback(handle_lease_renewal_failure)
try:
yield
finally:
# Cancel the lease renewal loop
lease_renewal_call.cancel()
@asynccontextmanager
async def amaintain_concurrency_lease(
lease_id: UUID,
lease_duration: float,
raise_on_lease_renewal_failure: bool = False,
suppress_warnings: bool = False,
) -> AsyncGenerator[None, None]:
"""
Maintain a concurrency lease for the given lease ID.
Args:
lease_id: The ID of the lease to maintain.
lease_duration: The duration of the lease in seconds.
raise_on_lease_renewal_failure: A boolean specifying whether to raise an error if the lease renewal fails.
"""
lease_renewal_task = asyncio.create_task(
_lease_renewal_loop(lease_id, lease_duration)
)
with AsyncCancelScope() as cancel_scope:
def handle_lease_renewal_failure(task: asyncio.Task[None]):
if task.cancelled():
# Cancellation is the expected way for this loop to stop
return
exc = task.exception()
if exc:
try:
# Use a run logger if available
logger = get_run_logger()
except Exception:
logger = get_logger("concurrency")
if raise_on_lease_renewal_failure:
logger.error(
"Concurrency lease renewal failed - slots are no longer reserved. Terminating execution to prevent over-allocation."
)
cancel_scope.cancel()
else:
if suppress_warnings:
logger.debug(
"Concurrency lease renewal failed - slots are no longer reserved. Execution will continue, but concurrency limits may be exceeded."
)
else:
logger.warning(
"Concurrency lease renewal failed - slots are no longer reserved. Execution will continue, but concurrency limits may be exceeded."
)
# Add a callback to stop execution if the lease renewal fails and strict is True
lease_renewal_task.add_done_callback(handle_lease_renewal_failure)
try:
yield
finally:
lease_renewal_task.cancel()
try:
await lease_renewal_task
except (asyncio.CancelledError, Exception):
# Handling for errors will be done in the callback
pass
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/concurrency/_leases.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:integration-tests/test_concurrency_leases.py | """
Test concurrency leases with filesystem lease storage.
This test is designed to be run in a GitHub Actions workflow.
If you want to run these tests locally, be sure to set these environment variables in the test process and the server process:
PREFECT_SERVER_CONCURRENCY_LEASE_STORAGE=prefect.server.concurrency.lease_storage.filesystem
PREFECT_SERVER_SERVICES_REPOSSESSOR_LOOP_SECONDS=1
"""
import asyncio
import time
import uuid
from datetime import timedelta
from multiprocessing import Process
from typing import Any
from unittest import mock
import pytest
import prefect
from prefect.client.schemas.actions import GlobalConcurrencyLimitCreate
from prefect.client.schemas.objects import GlobalConcurrencyLimit
from prefect.concurrency.asyncio import concurrency
from prefect.concurrency.sync import concurrency as sync_concurrency
from prefect.server.concurrency.lease_storage import get_concurrency_lease_storage
from prefect.server.concurrency.lease_storage.filesystem import (
ConcurrencyLeaseStorage as FileSystemConcurrencyLeaseStorage,
)
@pytest.fixture
async def concurrency_limit():
async with prefect.get_client() as client:
name = f"test-{uuid.uuid4()}"
await client.create_global_concurrency_limit(
concurrency_limit=GlobalConcurrencyLimitCreate(name=name, limit=1)
)
limit = await client.read_global_concurrency_limit_by_name(name=name)
return limit
async def function_that_uses_async_concurrency_and_goes_belly_up(
concurrency_limit_name: str,
):
original_sleep = asyncio.sleep
# Mock sleep so that the lease is renewed more quickly
async def mock_sleep(*args: Any, **kwargs: Any):
await original_sleep(0.1)
with mock.patch("asyncio.sleep", mock_sleep):
async with concurrency(
concurrency_limit_name, occupy=1, lease_duration=60, strict=True
):
await original_sleep(120)
def function_that_uses_sync_concurrency_and_goes_belly_up(
concurrency_limit_name: str,
):
original_sleep = asyncio.sleep
# Mock sleep so that the lease is renewed more quickly
async def mock_sleep(*args: Any, **kwargs: Any):
await original_sleep(0.1)
with mock.patch("asyncio.sleep", mock_sleep):
with sync_concurrency(
concurrency_limit_name, occupy=1, lease_duration=60, strict=True
):
# Use a bunch a little sleeps to make this easier to interrupt
for _ in range(120):
time.sleep(1)
def wrapper_func(concurrency_limit_name: str):
asyncio.run(
function_that_uses_async_concurrency_and_goes_belly_up(concurrency_limit_name)
)
async def test_async_concurrency_with_leases(concurrency_limit: GlobalConcurrencyLimit):
lease_storage = get_concurrency_lease_storage()
assert isinstance(lease_storage, FileSystemConcurrencyLeaseStorage), (
"Set PREFECT_SERVER_CONCURRENCY_LEASE_STORAGE=prefect.server.concurrency.lease_storage.filesystem to run these tests"
)
# Start a process doomed to fail
process = Process(
target=wrapper_func,
args=(concurrency_limit.name,),
)
process.start()
# Wait for lease to be created
active_lease = None
while not active_lease:
await asyncio.sleep(1)
active_lease_ids = await lease_storage.read_active_lease_ids()
for lease_id in active_lease_ids:
lease = await lease_storage.read_lease(lease_id)
if lease and lease.resource_ids == [concurrency_limit.id]:
active_lease = lease
assert active_lease
updated_lease = await lease_storage.read_lease(active_lease.id)
assert updated_lease
# Wait for lease to be renewed
while updated_lease.expiration == active_lease.expiration:
await asyncio.sleep(1)
updated_lease = await lease_storage.read_lease(active_lease.id)
assert updated_lease
# Verify that the lease is renewed periodically
assert updated_lease.expiration > active_lease.expiration
# Nothing personal
process.kill()
# Check that the concurrency limit still has a slot taken
async with prefect.get_client() as client:
limit = await client.read_global_concurrency_limit_by_name(
name=concurrency_limit.name
)
assert limit.active_slots == 1
# Force lease to expire immediately
await lease_storage.renew_lease(active_lease.id, timedelta(seconds=0))
# Wait for the lease to be revoked
while (await lease_storage.read_expired_lease_ids()) != []:
await asyncio.sleep(1)
# Check that the concurrency limit has no slots taken after the lease is revoked
async with prefect.get_client() as client:
limit = await client.read_global_concurrency_limit_by_name(
name=concurrency_limit.name
)
assert limit.limit == 1
assert limit.active_slots == 0
async def test_async_concurrency_with_lease_renewal_failure(
concurrency_limit: GlobalConcurrencyLimit,
):
lease_storage = get_concurrency_lease_storage()
assert isinstance(lease_storage, FileSystemConcurrencyLeaseStorage), (
"Set PREFECT_SERVER_CONCURRENCY_LEASE_STORAGE=prefect.server.concurrency.lease_storage.filesystem to run these tests"
)
# Start a process with some bad luck
process = Process(
target=wrapper_func,
args=(concurrency_limit.name,),
)
process.start()
# Wait for lease to be created
active_lease = None
while not active_lease:
await asyncio.sleep(1)
active_lease_ids = await lease_storage.read_active_lease_ids()
for lease_id in active_lease_ids:
lease = await lease_storage.read_lease(lease_id)
if lease and lease.resource_ids == [concurrency_limit.id]:
active_lease = lease
break
assert active_lease
# Revoke the lease through the API to avoid a cross-process filesystem race
# where a concurrent renew_lease can recreate the file deleted by revoke_lease
async with prefect.get_client() as client:
await client.release_concurrency_slots_with_lease(lease_id=active_lease.id)
# Wait for the process to exit cleanly before the configured sleep time
process.join(timeout=10)
assert process.exitcode == 1
async def test_sync_concurrency_with_leases(concurrency_limit: GlobalConcurrencyLimit):
lease_storage = get_concurrency_lease_storage()
assert isinstance(lease_storage, FileSystemConcurrencyLeaseStorage), (
"Set PREFECT_SERVER_CONCURRENCY_LEASE_STORAGE=prefect.server.concurrency.lease_storage.filesystem to run these tests"
)
# Start a process doomed to fail
process = Process(
target=function_that_uses_sync_concurrency_and_goes_belly_up,
args=(concurrency_limit.name,),
)
process.start()
# Wait for lease to be created
active_lease = None
while not active_lease:
await asyncio.sleep(1)
active_lease_ids = await lease_storage.read_active_lease_ids()
for lease_id in active_lease_ids:
lease = await lease_storage.read_lease(lease_id)
if lease and lease.resource_ids == [concurrency_limit.id]:
active_lease = lease
break
assert active_lease
updated_lease = active_lease
# Wait for lease to be renewed
while updated_lease.expiration == active_lease.expiration:
updated_lease = await lease_storage.read_lease(active_lease.id)
assert updated_lease
await asyncio.sleep(1)
# Verify that the lease is renewed periodically
assert updated_lease.expiration > active_lease.expiration
# Good night, sweet prince
process.kill()
# Check that the concurrency limit still has a slot taken
async with prefect.get_client() as client:
limit = await client.read_global_concurrency_limit_by_name(
name=concurrency_limit.name
)
assert limit.active_slots == 1
# Force lease to expire immediately
await lease_storage.renew_lease(active_lease.id, timedelta(seconds=0))
# Wait for the lease to be revoked
while (await lease_storage.read_expired_lease_ids()) != []:
await asyncio.sleep(1)
# Check that the concurrency limit has no slots taken after the lease is revoked
async with prefect.get_client() as client:
limit = await client.read_global_concurrency_limit_by_name(
name=concurrency_limit.name
)
assert limit.limit == 1
assert limit.active_slots == 0
async def test_sync_concurrency_with_lease_renewal_failure(
concurrency_limit: GlobalConcurrencyLimit,
):
lease_storage = get_concurrency_lease_storage()
assert isinstance(lease_storage, FileSystemConcurrencyLeaseStorage), (
"Set PREFECT_SERVER_CONCURRENCY_LEASE_STORAGE=prefect.server.concurrency.lease_storage.filesystem to run these tests"
)
# Start a process with some bad luck
process = Process(
target=function_that_uses_sync_concurrency_and_goes_belly_up,
args=(concurrency_limit.name,),
)
process.start()
# Wait for lease to be created
active_lease = None
while not active_lease:
await asyncio.sleep(1)
active_lease_ids = await lease_storage.read_active_lease_ids()
for lease_id in active_lease_ids:
lease = await lease_storage.read_lease(lease_id)
if lease and lease.resource_ids == [concurrency_limit.id]:
active_lease = lease
break
assert active_lease
# Revoke the lease through the API to avoid a cross-process filesystem race
# where a concurrent renew_lease can recreate the file deleted by revoke_lease
async with prefect.get_client() as client:
await client.release_concurrency_slots_with_lease(lease_id=active_lease.id)
# Wait for the process to exit cleanly before the configured sleep time
process.join(timeout=10)
assert process.exitcode == 1
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "integration-tests/test_concurrency_leases.py",
"license": "Apache License 2.0",
"lines": 225,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/server/concurrency/lease_storage/filesystem.py | from __future__ import annotations
import json
import os
import tempfile
from datetime import datetime, timedelta, timezone
from pathlib import Path
from typing import Any, TypedDict
from uuid import UUID
import anyio
from prefect.server.concurrency.lease_storage import (
ConcurrencyLeaseHolder,
ConcurrencyLimitLeaseMetadata,
)
from prefect.server.concurrency.lease_storage import (
ConcurrencyLeaseStorage as _ConcurrencyLeaseStorage,
)
from prefect.server.utilities.leasing import ResourceLease
from prefect.settings.context import get_current_settings
class _LeaseFile(TypedDict):
id: str
resource_ids: list[str]
metadata: dict[str, Any] | None
expiration: str
created_at: str
class ConcurrencyLeaseStorage(_ConcurrencyLeaseStorage):
"""
A file-based concurrency lease storage implementation that stores leases on disk.
"""
def __init__(self, storage_path: Path | None = None):
prefect_home = get_current_settings().home
self.storage_path: Path = Path(
storage_path or prefect_home / "concurrency_leases"
)
def _ensure_storage_path(self) -> None:
"""Ensure the storage path exists, creating it if necessary."""
self.storage_path.mkdir(parents=True, exist_ok=True)
def _lease_file_path(self, lease_id: UUID) -> Path:
return self.storage_path / f"{lease_id}.json"
def _expiration_index_path(self) -> anyio.Path:
return anyio.Path(self.storage_path / "expirations.json")
def _atomic_write_json(self, file_path: Path, data: Any) -> None:
"""
Atomically write JSON data to a file.
Uses write-to-temp-then-rename pattern to ensure readers never see
partial/corrupted data. This prevents race conditions when multiple
processes read and write the same file.
"""
self._ensure_storage_path()
# Create temp file in same directory to ensure atomic rename works
# (rename across filesystems is not atomic)
fd, temp_path = tempfile.mkstemp(
dir=self.storage_path, suffix=".tmp", prefix=".lease_"
)
try:
with os.fdopen(fd, "w") as f:
json.dump(data, f)
# Atomic rename - readers will either see old file or new file,
# never partial content
os.replace(temp_path, file_path)
except Exception:
# Clean up temp file on error
try:
os.unlink(temp_path)
except OSError:
pass
raise
async def _load_expiration_index(self) -> dict[str, str]:
"""Load the expiration index from disk."""
expiration_file = self._expiration_index_path()
if not await expiration_file.exists():
return {}
try:
return json.loads(await expiration_file.read_text())
except (json.JSONDecodeError, KeyError, ValueError):
return {}
def _save_expiration_index(self, index: dict[str, str]) -> None:
"""Save the expiration index to disk atomically."""
expiration_file = Path(self._expiration_index_path())
self._atomic_write_json(expiration_file, index)
async def _update_expiration_index(
self, lease_id: UUID, expiration: datetime
) -> None:
"""Update a single lease's expiration in the index."""
index = await self._load_expiration_index()
index[str(lease_id)] = expiration.isoformat()
self._save_expiration_index(index)
async def _remove_from_expiration_index(self, lease_id: UUID) -> None:
"""Remove a lease from the expiration index."""
index = await self._load_expiration_index()
index.pop(str(lease_id), None)
self._save_expiration_index(index)
def _serialize_lease(
self, lease: ResourceLease[ConcurrencyLimitLeaseMetadata]
) -> _LeaseFile:
metadata_dict: dict[str, Any] | None = None
if lease.metadata:
metadata_dict = {"slots": lease.metadata.slots}
if lease.metadata.holder is not None:
metadata_dict["holder"] = lease.metadata.holder.model_dump(mode="json")
return {
"id": str(lease.id),
"resource_ids": [str(rid) for rid in lease.resource_ids],
"metadata": metadata_dict,
"expiration": lease.expiration.isoformat(),
"created_at": lease.created_at.isoformat(),
}
def _deserialize_lease(
self, data: _LeaseFile
) -> ResourceLease[ConcurrencyLimitLeaseMetadata]:
lease_id = UUID(data["id"])
resource_ids = [UUID(rid) for rid in data["resource_ids"]]
metadata = None
if data["metadata"]:
metadata = ConcurrencyLimitLeaseMetadata(
slots=data["metadata"]["slots"], holder=data["metadata"].get("holder")
)
expiration = datetime.fromisoformat(data["expiration"])
created_at = datetime.fromisoformat(data["created_at"])
lease = ResourceLease(
id=lease_id,
resource_ids=resource_ids,
metadata=metadata,
expiration=expiration,
created_at=created_at,
)
return lease
async def create_lease(
self,
resource_ids: list[UUID],
ttl: timedelta,
metadata: ConcurrencyLimitLeaseMetadata | None = None,
) -> ResourceLease[ConcurrencyLimitLeaseMetadata]:
expiration = datetime.now(timezone.utc) + ttl
lease = ResourceLease(
resource_ids=resource_ids, metadata=metadata, expiration=expiration
)
lease_file = self._lease_file_path(lease.id)
lease_data = self._serialize_lease(lease)
# Use atomic write to prevent race conditions with concurrent readers
self._atomic_write_json(lease_file, lease_data)
# Update expiration index
await self._update_expiration_index(lease.id, expiration)
return lease
async def read_lease(
self, lease_id: UUID
) -> ResourceLease[ConcurrencyLimitLeaseMetadata] | None:
lease_file = self._lease_file_path(lease_id)
if not lease_file.exists():
return None
try:
with open(lease_file, "r") as f:
lease_data = json.load(f)
lease = self._deserialize_lease(lease_data)
return lease
except (json.JSONDecodeError, KeyError, ValueError):
# Clean up corrupted lease file. With atomic writes in place,
# corruption indicates a real issue (not a race condition),
# so it's safe to clean up.
lease_file.unlink(missing_ok=True)
await self._remove_from_expiration_index(lease_id)
return None
async def renew_lease(self, lease_id: UUID, ttl: timedelta) -> bool:
"""
Atomically renew a concurrency lease by updating its expiration.
Checks if the lease exists and updates both the lease file and index,
preventing race conditions from creating orphaned index entries.
Args:
lease_id: The ID of the lease to renew
ttl: The new time-to-live duration
Returns:
True if the lease was renewed, False if it didn't exist
"""
lease_file = self._lease_file_path(lease_id)
if not lease_file.exists():
# Clean up any orphaned index entry
await self._remove_from_expiration_index(lease_id)
return False
try:
with open(lease_file, "r") as f:
lease_data = json.load(f)
# Update expiration time
new_expiration = datetime.now(timezone.utc) + ttl
lease_data["expiration"] = new_expiration.isoformat()
# Use atomic write to prevent race conditions with concurrent readers
self._atomic_write_json(lease_file, lease_data)
# Verify file still exists after write (could have been deleted)
if not lease_file.exists():
# Lease was deleted during update - clean up index
await self._remove_from_expiration_index(lease_id)
return False
# Update expiration index
await self._update_expiration_index(lease_id, new_expiration)
return True
except (json.JSONDecodeError, KeyError, ValueError):
# Clean up corrupted lease file
lease_file.unlink(missing_ok=True)
await self._remove_from_expiration_index(lease_id)
return False
async def revoke_lease(self, lease_id: UUID) -> None:
lease_file = self._lease_file_path(lease_id)
lease_file.unlink(missing_ok=True)
# Remove from expiration index
await self._remove_from_expiration_index(lease_id)
async def read_active_lease_ids(
self, limit: int = 100, offset: int = 0
) -> list[UUID]:
now = datetime.now(timezone.utc)
expiration_index = await self._load_expiration_index()
# Collect all active leases first
all_active: list[UUID] = []
for lease_id_str, expiration_str in expiration_index.items():
try:
lease_id = UUID(lease_id_str)
expiration = datetime.fromisoformat(expiration_str)
if expiration > now:
all_active.append(lease_id)
except (ValueError, TypeError):
continue
# Apply offset and limit
return all_active[offset : offset + limit]
async def read_expired_lease_ids(self, limit: int = 100) -> list[UUID]:
expired_leases: list[UUID] = []
now = datetime.now(timezone.utc)
expiration_index = await self._load_expiration_index()
for lease_id_str, expiration_str in expiration_index.items():
if len(expired_leases) >= limit:
break
try:
lease_id = UUID(lease_id_str)
expiration = datetime.fromisoformat(expiration_str)
if expiration < now:
expired_leases.append(lease_id)
except (ValueError, TypeError):
continue
return expired_leases
async def list_holders_for_limit(
self, limit_id: UUID
) -> list[tuple[UUID, ConcurrencyLeaseHolder]]:
"""List all holders for a given concurrency limit."""
now = datetime.now(timezone.utc)
holders_with_leases: list[tuple[UUID, ConcurrencyLeaseHolder]] = []
# Get all active lease IDs - need to paginate through all
all_active_lease_ids: list[UUID] = []
offset = 0
batch_size = 100
while True:
batch = await self.read_active_lease_ids(limit=batch_size, offset=offset)
if not batch:
break
all_active_lease_ids.extend(batch)
if len(batch) < batch_size:
break
offset += batch_size
active_lease_ids = all_active_lease_ids
for lease_id in active_lease_ids:
lease = await self.read_lease(lease_id)
if (
lease
and limit_id in lease.resource_ids
and lease.expiration > now
and lease.metadata
and lease.metadata.holder
):
holders_with_leases.append((lease.id, lease.metadata.holder))
return holders_with_leases
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/server/concurrency/lease_storage/filesystem.py",
"license": "Apache License 2.0",
"lines": 265,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/settings/models/server/concurrency.py | from typing import ClassVar
from pydantic import Field
from pydantic_settings import SettingsConfigDict
from prefect.settings.base import PrefectBaseSettings, build_settings_config
class ServerConcurrencySettings(PrefectBaseSettings):
model_config: ClassVar[SettingsConfigDict] = build_settings_config(
("server", "concurrency")
)
lease_storage: str = Field(
default="prefect.server.concurrency.lease_storage.memory",
description="The module to use for storing concurrency limit leases.",
)
initial_deployment_lease_duration: float = Field(
default=300.0,
ge=30.0, # Minimum 30 seconds
le=3600.0, # Maximum 1 hour
description="Initial duration for deployment concurrency lease in seconds.",
)
maximum_concurrency_slot_wait_seconds: float = Field(
default=30,
ge=0,
description="The maximum number of seconds to wait before retrying when a concurrency slot cannot be acquired.",
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/settings/models/server/concurrency.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:tests/server/concurrency/test_filesystem_lease_storage.py | import json
import tempfile
from datetime import datetime, timedelta, timezone
from pathlib import Path
from uuid import UUID, uuid4
import pytest
from prefect.server.concurrency.lease_storage import ConcurrencyLimitLeaseMetadata
from prefect.server.concurrency.lease_storage.filesystem import (
ConcurrencyLeaseStorage,
)
from prefect.types._concurrency import ConcurrencyLeaseHolder
class TestFilesystemConcurrencyLeaseStorage:
@pytest.fixture
def temp_dir(self):
with tempfile.TemporaryDirectory() as temp_dir:
yield Path(temp_dir)
@pytest.fixture
def storage(self, temp_dir: Path) -> ConcurrencyLeaseStorage:
return ConcurrencyLeaseStorage(storage_path=temp_dir)
@pytest.fixture
def sample_resource_ids(self) -> list[UUID]:
return [uuid4(), uuid4()]
@pytest.fixture
def sample_metadata(self) -> ConcurrencyLimitLeaseMetadata:
return ConcurrencyLimitLeaseMetadata(slots=5)
@pytest.fixture
def sample_metadata_with_holder(self) -> ConcurrencyLimitLeaseMetadata:
return ConcurrencyLimitLeaseMetadata(
slots=3,
holder=ConcurrencyLeaseHolder(type="task_run", id=uuid4()),
)
async def test_create_lease_without_metadata(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
ttl = timedelta(minutes=5)
lease = await storage.create_lease(sample_resource_ids, ttl)
assert lease.resource_ids == sample_resource_ids
assert lease.metadata is None
# Verify lease file was created (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
assert len(lease_files) == 1
async def test_create_lease_with_metadata(
self,
storage: ConcurrencyLeaseStorage,
sample_resource_ids: list[UUID],
sample_metadata: ConcurrencyLimitLeaseMetadata,
):
ttl = timedelta(minutes=5)
lease = await storage.create_lease(sample_resource_ids, ttl, sample_metadata)
assert lease.resource_ids == sample_resource_ids
assert lease.metadata == sample_metadata
# Verify lease file was created with correct data (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
assert len(lease_files) == 1
with open(lease_files[0], "r") as f:
data = json.load(f)
assert data["metadata"]["slots"] == 5
assert len(data["resource_ids"]) == 2
async def test_create_lease_with_holder(
self,
storage: ConcurrencyLeaseStorage,
sample_resource_ids: list[UUID],
sample_metadata_with_holder: ConcurrencyLimitLeaseMetadata,
):
ttl = timedelta(minutes=5)
lease = await storage.create_lease(
sample_resource_ids, ttl, sample_metadata_with_holder
)
assert lease.resource_ids == sample_resource_ids
assert lease.metadata is not None
assert lease.metadata == sample_metadata_with_holder
assert lease.metadata.holder is not None
assert lease.metadata.holder.model_dump() == {
"type": "task_run",
"id": lease.metadata.holder.id,
}
# Verify lease file was created with correct data
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
assert len(lease_files) == 1
with open(lease_files[0], "r") as f:
data = json.load(f)
assert data["metadata"]["slots"] == 3
assert data["metadata"]["holder"] == {
"type": "task_run",
"id": str(lease.metadata.holder.id),
}
async def test_read_lease_existing(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
# Get the lease ID from the file (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
lease_id = UUID(lease_files[0].stem)
read_lease = await storage.read_lease(lease_id)
assert read_lease is not None
assert read_lease.resource_ids == sample_resource_ids
assert read_lease.metadata is None
async def test_read_lease_with_holder(
self,
storage: ConcurrencyLeaseStorage,
sample_resource_ids: list[UUID],
sample_metadata_with_holder: ConcurrencyLimitLeaseMetadata,
):
ttl = timedelta(minutes=5)
created_lease = await storage.create_lease(
sample_resource_ids, ttl, sample_metadata_with_holder
)
read_lease = await storage.read_lease(created_lease.id)
assert read_lease is not None
assert read_lease.resource_ids == sample_resource_ids
assert read_lease.metadata is not None
assert read_lease.metadata.slots == 3
assert read_lease.metadata.holder is not None
assert read_lease.metadata.holder.model_dump() == {
"type": "task_run",
"id": read_lease.metadata.holder.id,
}
async def test_read_lease_non_existing(self, storage: ConcurrencyLeaseStorage):
non_existing_id = uuid4()
lease = await storage.read_lease(non_existing_id)
assert lease is None
async def test_read_lease_expired(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
# Create an expired lease
expired_ttl = timedelta(seconds=-1)
await storage.create_lease(sample_resource_ids, expired_ttl)
# Get the lease ID from the file (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
lease_id = UUID(lease_files[0].stem)
# Reading should return return the lease
read_lease = await storage.read_lease(lease_id)
assert read_lease is not None
assert read_lease.expiration < datetime.now(timezone.utc)
async def test_read_lease_corrupted_file(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
# Create a valid lease first
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
# Get the lease ID and corrupt the file (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
lease_id = UUID(lease_files[0].stem)
with open(lease_files[0], "w") as f:
f.write("invalid json content")
# Reading should return None and clean up the corrupted file
read_lease = await storage.read_lease(lease_id)
assert read_lease is None
# File should be cleaned up (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
assert len(lease_files) == 0
async def test_renew_lease(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
# Get the lease ID and original expiration (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
lease_id = UUID(lease_files[0].stem)
with open(lease_files[0], "r") as f:
original_data = json.load(f)
original_expiration = datetime.fromisoformat(original_data["expiration"])
# Renew the lease
new_ttl = timedelta(minutes=10)
renewed = await storage.renew_lease(lease_id, new_ttl)
assert renewed is True
# Check that expiration was updated
with open(lease_files[0], "r") as f:
updated_data = json.load(f)
new_expiration = datetime.fromisoformat(updated_data["expiration"])
assert new_expiration > original_expiration
async def test_renew_lease_non_existing(self, storage: ConcurrencyLeaseStorage):
non_existing_id = uuid4()
renewed = await storage.renew_lease(non_existing_id, timedelta(minutes=5))
assert renewed is False
async def test_renew_lease_corrupted_file(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
# Create a valid lease first
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
# Get the lease ID and corrupt the file (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
lease_id = UUID(lease_files[0].stem)
with open(lease_files[0], "w") as f:
f.write("invalid json content")
# Renewing should clean up the corrupted file and return False
renewed = await storage.renew_lease(lease_id, timedelta(minutes=10))
assert renewed is False
# File should be cleaned up (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
assert len(lease_files) == 0
async def test_revoke_lease(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
# Verify file exists (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
assert len(lease_files) == 1
lease_id = UUID(lease_files[0].stem)
# Release the lease
await storage.revoke_lease(lease_id)
# File should be deleted (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
assert len(lease_files) == 0
async def test_revoke_lease_non_existing(self, storage: ConcurrencyLeaseStorage):
non_existing_id = uuid4()
# Should not raise an exception
await storage.revoke_lease(non_existing_id)
async def test_read_expired_lease_ids_no_expired(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
expired_ids = await storage.read_expired_lease_ids()
assert expired_ids == []
async def test_read_expired_lease_ids_with_expired(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
expired_ttl = timedelta(seconds=-1)
await storage.create_lease(sample_resource_ids, expired_ttl)
expired_ids = await storage.read_expired_lease_ids()
assert len(expired_ids) == 1
# Verify the lease ID is correct
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
assert len(lease_files) == 1
expected_lease_id = UUID(lease_files[0].stem)
assert expired_ids[0] == expected_lease_id
async def test_read_expired_lease_ids_with_limit(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
expired_ttl = timedelta(seconds=-1)
await storage.create_lease(sample_resource_ids, expired_ttl)
await storage.create_lease(sample_resource_ids, expired_ttl)
await storage.create_lease(sample_resource_ids, expired_ttl)
expired_ids = await storage.read_expired_lease_ids(limit=2)
assert len(expired_ids) == 2
async def test_read_expired_lease_ids_mixed_expiration(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
expired_ttl = timedelta(seconds=-1)
valid_ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, expired_ttl)
await storage.create_lease(sample_resource_ids, valid_ttl)
await storage.create_lease(sample_resource_ids, expired_ttl)
expired_ids = await storage.read_expired_lease_ids()
assert len(expired_ids) == 2
async def test_read_expired_lease_ids_corrupted_files(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
# Create a valid lease and a corrupted file
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
# Create a corrupted file
corrupted_file = storage.storage_path / f"{uuid4()}.json"
with open(corrupted_file, "w") as f:
f.write("invalid json content")
# Should return no expired leases (corrupted files are ignored)
expired_ids = await storage.read_expired_lease_ids()
assert expired_ids == []
# Corrupted file still exists (only cleaned up when accessed)
assert corrupted_file.exists()
async def test_storage_path_creation(self, temp_dir: Path):
# Test that storage path is created only when needed
storage_path = temp_dir / "nested" / "path"
assert not storage_path.exists()
# Creating the storage instance should not create the directory
storage = ConcurrencyLeaseStorage(storage_path=storage_path)
assert not storage_path.exists()
# Creating a lease should create the directory
sample_resource_ids = [uuid4(), uuid4()]
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
assert storage_path.exists()
assert storage_path.is_dir()
async def test_multiple_leases_persistence(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
# Create multiple leases
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
await storage.create_lease(sample_resource_ids, ttl)
await storage.create_lease(sample_resource_ids, ttl)
# Verify all files exist (excluding expiration index)
lease_files = [
f
for f in storage.storage_path.glob("*.json")
if f.name != "expirations.json"
]
assert len(lease_files) == 3
# Verify we can read all leases
lease_ids = [UUID(f.stem) for f in lease_files]
for lease_id in lease_ids:
read_lease = await storage.read_lease(lease_id)
assert read_lease is not None
assert read_lease.resource_ids == sample_resource_ids
async def test_list_holders_for_limit_empty(self, storage: ConcurrencyLeaseStorage):
limit_id = uuid4()
holders = await storage.list_holders_for_limit(limit_id)
assert holders == []
async def test_list_holders_for_limit_no_holders(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
# Create a lease without a holder
ttl = timedelta(minutes=5)
metadata = ConcurrencyLimitLeaseMetadata(slots=2)
await storage.create_lease(sample_resource_ids, ttl, metadata)
holders = await storage.list_holders_for_limit(sample_resource_ids[0])
assert holders == []
async def test_list_holders_for_limit_with_holders(
self, storage: ConcurrencyLeaseStorage
):
limit_id = uuid4()
# Create leases with different holders
holder1 = ConcurrencyLeaseHolder(type="task_run", id=uuid4())
holder2 = ConcurrencyLeaseHolder(type="flow_run", id=uuid4())
metadata1 = ConcurrencyLimitLeaseMetadata(slots=2, holder=holder1)
metadata2 = ConcurrencyLimitLeaseMetadata(slots=1, holder=holder2)
ttl = timedelta(minutes=5)
await storage.create_lease([limit_id], ttl, metadata1)
await storage.create_lease([limit_id], ttl, metadata2)
# Create a lease for a different limit to ensure it's not included
other_limit_id = uuid4()
metadata3 = ConcurrencyLimitLeaseMetadata(
slots=1, holder=ConcurrencyLeaseHolder(type="task_run", id=uuid4())
)
await storage.create_lease([other_limit_id], ttl, metadata3)
holders_with_leases = await storage.list_holders_for_limit(limit_id)
assert len(holders_with_leases) == 2
holders = [holder for _, holder in holders_with_leases]
assert holder1 in holders
assert holder2 in holders
async def test_list_holders_for_limit_expired_leases(
self, storage: ConcurrencyLeaseStorage
):
limit_id = uuid4()
# Create an expired lease with a holder
expired_ttl = timedelta(seconds=-1)
holder = ConcurrencyLeaseHolder(type="task_run", id=uuid4())
metadata = ConcurrencyLimitLeaseMetadata(slots=1, holder=holder)
await storage.create_lease([limit_id], expired_ttl, metadata)
# Create an active lease with a holder
active_ttl = timedelta(minutes=5)
active_holder = ConcurrencyLeaseHolder(type="flow_run", id=uuid4())
active_metadata = ConcurrencyLimitLeaseMetadata(slots=1, holder=active_holder)
active_lease = await storage.create_lease(
[limit_id], active_ttl, active_metadata
)
holders = await storage.list_holders_for_limit(limit_id)
assert len(holders) == 1
lease_id, holder = holders[0]
assert lease_id == active_lease.id
assert holder == active_holder
async def test_read_active_lease_ids_with_pagination(
self, storage: ConcurrencyLeaseStorage
):
# Create 10 active leases
active_ttl = timedelta(minutes=5)
lease_ids: list[UUID] = []
for _ in range(10):
lease = await storage.create_lease([uuid4()], active_ttl)
lease_ids.append(lease.id)
# Test getting first page
first_page = await storage.read_active_lease_ids(limit=3, offset=0)
assert len(first_page) == 3
assert all(lid in lease_ids for lid in first_page)
# Test getting second page
second_page = await storage.read_active_lease_ids(limit=3, offset=3)
assert len(second_page) == 3
assert all(lid in lease_ids for lid in second_page)
# Ensure no overlap between pages
assert set(first_page).isdisjoint(set(second_page))
# Test getting third page
third_page = await storage.read_active_lease_ids(limit=3, offset=6)
assert len(third_page) == 3
assert all(lid in lease_ids for lid in third_page)
# Test getting partial last page
fourth_page = await storage.read_active_lease_ids(limit=3, offset=9)
assert len(fourth_page) == 1
assert all(lid in lease_ids for lid in fourth_page)
# Test offset beyond available items
empty_page = await storage.read_active_lease_ids(limit=3, offset=100)
assert empty_page == []
async def test_read_active_lease_ids_default_pagination(
self, storage: ConcurrencyLeaseStorage
):
# Create 150 active leases (more than default limit)
active_ttl = timedelta(minutes=5)
lease_ids: list[UUID] = []
for _ in range(150):
lease = await storage.create_lease([uuid4()], active_ttl)
lease_ids.append(lease.id)
# Test default limit of 100
default_page = await storage.read_active_lease_ids()
assert len(default_page) == 100
assert all(lid in lease_ids for lid in default_page)
# Test with offset
offset_page = await storage.read_active_lease_ids(offset=100)
assert len(offset_page) == 50 # remaining leases
assert all(lid in lease_ids for lid in offset_page)
# Ensure no overlap with first page
assert set(default_page).isdisjoint(set(offset_page))
async def test_atomic_write_produces_valid_json(
self, storage: ConcurrencyLeaseStorage, temp_dir: Path
):
"""Test that _atomic_write_json produces valid, readable JSON files."""
test_file = temp_dir / "test_atomic.json"
test_data = {"key": "value", "nested": {"a": 1, "b": [1, 2, 3]}}
storage._atomic_write_json(test_file, test_data)
# Verify file exists and contains valid JSON
assert test_file.exists()
with open(test_file, "r") as f:
loaded_data = json.load(f)
assert loaded_data == test_data
async def test_atomic_write_no_temp_files_left_behind(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
"""Test that no temporary files are left behind after lease operations."""
ttl = timedelta(minutes=5)
# Create multiple leases
for _ in range(5):
await storage.create_lease(sample_resource_ids, ttl)
# Check for any temp files (they start with .lease_ and end with .tmp)
temp_files = list(storage.storage_path.glob(".lease_*.tmp"))
assert len(temp_files) == 0, f"Found leftover temp files: {temp_files}"
async def test_atomic_write_overwrites_existing_file(
self, storage: ConcurrencyLeaseStorage, temp_dir: Path
):
"""Test that _atomic_write_json correctly overwrites existing files."""
test_file = temp_dir / "test_overwrite.json"
# Write initial data
initial_data = {"version": 1}
storage._atomic_write_json(test_file, initial_data)
# Overwrite with new data
new_data = {"version": 2, "extra": "field"}
storage._atomic_write_json(test_file, new_data)
# Verify file contains new data
with open(test_file, "r") as f:
loaded_data = json.load(f)
assert loaded_data == new_data
async def test_atomic_write_cleans_up_temp_on_error(
self, storage: ConcurrencyLeaseStorage, temp_dir: Path
):
"""Test that temp files are cleaned up when an error occurs during write."""
test_file = temp_dir / "test_error.json"
# Create an object that will fail JSON serialization
class NonSerializable:
pass
non_serializable_data = {"bad": NonSerializable()}
# Attempt to write non-serializable data
with pytest.raises(TypeError):
storage._atomic_write_json(test_file, non_serializable_data)
# Verify no temp files are left behind
temp_files = list(temp_dir.glob(".lease_*.tmp"))
assert len(temp_files) == 0, f"Found leftover temp files: {temp_files}"
# Verify target file was not created
assert not test_file.exists()
async def test_renew_lease_no_temp_files_left_behind(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
"""Test that renewing leases doesn't leave temp files behind."""
ttl = timedelta(minutes=5)
lease = await storage.create_lease(sample_resource_ids, ttl)
# Renew the lease multiple times
for _ in range(5):
await storage.renew_lease(lease.id, ttl)
# Check for any temp files
temp_files = list(storage.storage_path.glob(".lease_*.tmp"))
assert len(temp_files) == 0, f"Found leftover temp files: {temp_files}"
async def test_expiration_index_atomic_write(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
"""Test that expiration index updates use atomic writes."""
ttl = timedelta(minutes=5)
# Create multiple leases to trigger multiple index updates
lease_ids = []
for _ in range(5):
lease = await storage.create_lease(sample_resource_ids, ttl)
lease_ids.append(lease.id)
# Verify expiration index exists and is valid JSON
expiration_file = storage.storage_path / "expirations.json"
assert expiration_file.exists()
with open(expiration_file, "r") as f:
index_data = json.load(f)
# Verify all lease IDs are in the index
for lease_id in lease_ids:
assert str(lease_id) in index_data
# Verify no temp files left behind
temp_files = list(storage.storage_path.glob(".lease_*.tmp"))
assert len(temp_files) == 0
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/server/concurrency/test_filesystem_lease_storage.py",
"license": "Apache License 2.0",
"lines": 544,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/server/services/repossessor.py | """
The Repossessor service. Handles reconciliation of expired concurrency leases.
"""
from __future__ import annotations
import logging
from datetime import datetime, timedelta, timezone
from typing import Annotated
from uuid import UUID
from docket import CurrentDocket, Depends, Docket, Logged, Perpetual
from prefect.logging import get_logger
from prefect.server.concurrency.lease_storage import (
ConcurrencyLeaseStorage,
get_concurrency_lease_storage,
)
from prefect.server.database import PrefectDBInterface, provide_database_interface
from prefect.server.models.concurrency_limits_v2 import bulk_decrement_active_slots
from prefect.server.services.perpetual_services import perpetual_service
from prefect.settings.context import get_current_settings
logger: logging.Logger = get_logger(__name__)
async def revoke_expired_lease(
lease_id: Annotated[UUID, Logged],
*,
db: PrefectDBInterface = Depends(provide_database_interface),
lease_storage: ConcurrencyLeaseStorage = Depends(get_concurrency_lease_storage),
) -> None:
"""Revoke a single expired lease (docket task)."""
expired_lease = await lease_storage.read_lease(lease_id)
if expired_lease is None or expired_lease.metadata is None:
logger.warning(
f"Lease {lease_id} should be revoked but was not found or has no metadata"
)
return
occupancy_seconds = (
datetime.now(timezone.utc) - expired_lease.created_at
).total_seconds()
logger.info(
f"Revoking lease {lease_id} for {len(expired_lease.resource_ids)} "
f"concurrency limits with {expired_lease.metadata.slots} slots"
)
async with db.session_context(begin_transaction=True) as session:
await bulk_decrement_active_slots(
session=session,
concurrency_limit_ids=expired_lease.resource_ids,
slots=expired_lease.metadata.slots,
occupancy_seconds=occupancy_seconds,
)
await lease_storage.revoke_lease(lease_id)
@perpetual_service(
enabled_getter=lambda: get_current_settings().server.services.repossessor.enabled,
)
async def monitor_expired_leases(
docket: Docket = CurrentDocket(),
lease_storage: ConcurrencyLeaseStorage = Depends(get_concurrency_lease_storage),
perpetual: Perpetual = Perpetual(
automatic=False,
every=timedelta(
seconds=get_current_settings().server.services.repossessor.loop_seconds
),
),
) -> None:
"""Monitor for expired leases and schedule revocation tasks."""
expired_lease_ids = await lease_storage.read_expired_lease_ids()
if expired_lease_ids:
logger.info(f"Scheduling revocation of {len(expired_lease_ids)} expired leases")
for lease_id in expired_lease_ids:
await docket.add(revoke_expired_lease)(lease_id)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/server/services/repossessor.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:tests/server/services/test_repossessor.py | from datetime import timedelta
from uuid import uuid4
import pytest
from docket import Docket
from sqlalchemy.ext.asyncio import AsyncSession
from prefect.server.concurrency.lease_storage import ConcurrencyLimitLeaseMetadata
from prefect.server.concurrency.lease_storage.memory import (
ConcurrencyLeaseStorage,
)
from prefect.server.database.dependencies import provide_database_interface
from prefect.server.models.concurrency_limits_v2 import (
bulk_increment_active_slots,
bulk_read_concurrency_limits,
create_concurrency_limit,
)
from prefect.server.schemas.core import ConcurrencyLimitV2
from prefect.server.services.repossessor import (
monitor_expired_leases,
revoke_expired_lease,
)
class TestRevokeExpiredLease:
@pytest.fixture
def lease_storage(self):
"""Create a fresh concurrency lease storage for testing"""
storage = ConcurrencyLeaseStorage()
storage.leases.clear()
storage.expirations.clear()
return storage
@pytest.fixture
async def concurrency_limit(self, session: AsyncSession):
"""Create a concurrency limit in the database"""
limit = await create_concurrency_limit(
session=session,
concurrency_limit=ConcurrencyLimitV2(
name="test_limit",
limit=10,
avg_slot_occupancy_seconds=0.5,
),
)
await session.commit()
return limit
async def test_revoke_expired_lease(
self, lease_storage, concurrency_limit, session: AsyncSession
):
"""Test revoking an expired lease successfully"""
# Take a couple of slots
await bulk_increment_active_slots(session, [concurrency_limit.id], 2)
await session.commit()
# Create a lease with metadata
resource_ids = [concurrency_limit.id]
metadata = ConcurrencyLimitLeaseMetadata(slots=2)
lease = await lease_storage.create_lease(
resource_ids=resource_ids,
ttl=timedelta(seconds=-1), # Already expired
metadata=metadata,
)
# Verify initial state - lease should exist
assert len(lease_storage.leases) == 1
# Revoke the lease
db = provide_database_interface()
await revoke_expired_lease(
lease.id,
db=db,
lease_storage=lease_storage,
)
# Verify the lease was processed and removed
assert len(lease_storage.leases) == 0
# Verify the slots were decremented
limits = await bulk_read_concurrency_limits(session, [concurrency_limit.name])
assert len(limits) == 1
assert limits[0].active_slots == 0
async def test_revoke_expired_lease_missing_lease(
self, lease_storage, concurrency_limit
):
"""Test handling of missing lease (lease returns None)"""
# Create a lease but manually remove it to simulate missing lease
resource_ids = [concurrency_limit.id]
metadata = ConcurrencyLimitLeaseMetadata(slots=1)
lease = await lease_storage.create_lease(
resource_ids=resource_ids,
ttl=timedelta(seconds=-1), # Already expired
metadata=metadata,
)
# Manually remove the lease to simulate it being missing
lease_storage.leases.clear()
# Revoke the lease - should handle gracefully
db = provide_database_interface()
await revoke_expired_lease(
lease.id,
db=db,
lease_storage=lease_storage,
)
# Verify no error was raised and no leases remain
assert len(lease_storage.leases) == 0
async def test_revoke_expired_lease_missing_metadata(
self, lease_storage, concurrency_limit
):
"""Test handling of lease with missing metadata"""
# Create a lease without metadata
resource_ids = [concurrency_limit.id]
lease = await lease_storage.create_lease(
resource_ids=resource_ids,
ttl=timedelta(seconds=-1), # Already expired
metadata=None, # No metadata
)
# Verify initial state - lease should exist
assert len(lease_storage.leases) == 1
# Revoke the lease - should handle gracefully and not revoke
db = provide_database_interface()
await revoke_expired_lease(
lease.id,
db=db,
lease_storage=lease_storage,
)
# Verify lease was not processed due to missing metadata
assert len(lease_storage.leases) == 1 # Lease should still exist
class TestMonitorExpiredLeases:
@pytest.fixture
def lease_storage(self):
"""Create a fresh concurrency lease storage for testing"""
storage = ConcurrencyLeaseStorage()
storage.leases.clear()
storage.expirations.clear()
return storage
@pytest.fixture
async def concurrency_limit(self, session: AsyncSession):
"""Create a concurrency limit in the database"""
limit = await create_concurrency_limit(
session=session,
concurrency_limit=ConcurrencyLimitV2(
name="test_limit",
limit=10,
avg_slot_occupancy_seconds=0.5,
),
)
await session.commit()
return limit
async def test_monitor_finds_expired_leases(self, lease_storage, concurrency_limit):
"""Test that monitor_expired_leases finds expired leases"""
# Create an expired lease
resource_ids = [concurrency_limit.id]
metadata = ConcurrencyLimitLeaseMetadata(slots=2)
await lease_storage.create_lease(
resource_ids=resource_ids,
ttl=timedelta(seconds=-1), # Already expired
metadata=metadata,
)
# Verify initial state
assert len(lease_storage.leases) == 1
# Use a real Docket instance with unique name for isolation
async with Docket(name=f"test-{uuid4()}", url="memory://") as docket:
docket.register(revoke_expired_lease)
await monitor_expired_leases(
docket=docket,
lease_storage=lease_storage,
)
# Verify a task was scheduled via snapshot
snapshot = await docket.snapshot()
assert snapshot.total_tasks == 1
async def test_monitor_ignores_non_expired_leases(
self, lease_storage, concurrency_limit
):
"""Test that monitor_expired_leases ignores non-expired leases"""
# Create a non-expired lease
resource_ids = [concurrency_limit.id]
metadata = ConcurrencyLimitLeaseMetadata(slots=2)
await lease_storage.create_lease(
resource_ids=resource_ids,
ttl=timedelta(minutes=5), # Not expired
metadata=metadata,
)
# Verify initial state
assert len(lease_storage.leases) == 1
# Use a real Docket instance with unique name for isolation
async with Docket(name=f"test-{uuid4()}", url="memory://") as docket:
docket.register(revoke_expired_lease)
await monitor_expired_leases(
docket=docket,
lease_storage=lease_storage,
)
# Verify no tasks were scheduled
snapshot = await docket.snapshot()
assert snapshot.total_tasks == 0
async def test_monitor_schedules_multiple_tasks(
self, lease_storage, session: AsyncSession
):
"""Test that monitor schedules tasks for multiple expired leases"""
# Create multiple concurrency limits
limits = []
for i in range(3):
limit = await create_concurrency_limit(
session=session,
concurrency_limit=ConcurrencyLimitV2(
name=f"test_limit_{i}",
limit=10,
slot_decay_per_second=0.0,
avg_slot_occupancy_seconds=2.0,
),
)
limits.append(limit)
await session.commit()
# Create multiple expired leases
for i, limit in enumerate(limits):
slots = i + 1
metadata = ConcurrencyLimitLeaseMetadata(slots=slots)
await lease_storage.create_lease(
resource_ids=[limit.id],
ttl=timedelta(seconds=-1), # Already expired
metadata=metadata,
)
# Verify initial state
assert len(lease_storage.leases) == 3
# Use a real Docket instance with unique name for isolation
async with Docket(name=f"test-{uuid4()}", url="memory://") as docket:
docket.register(revoke_expired_lease)
await monitor_expired_leases(
docket=docket,
lease_storage=lease_storage,
)
# Verify all tasks were scheduled
snapshot = await docket.snapshot()
assert snapshot.total_tasks == 3
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/server/services/test_repossessor.py",
"license": "Apache License 2.0",
"lines": 222,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/server/concurrency/lease_storage/memory.py | from __future__ import annotations
from datetime import datetime, timedelta, timezone
from uuid import UUID
from prefect.server.concurrency.lease_storage import (
ConcurrencyLeaseHolder,
ConcurrencyLimitLeaseMetadata,
)
from prefect.server.concurrency.lease_storage import (
ConcurrencyLeaseStorage as _ConcurrencyLeaseStorage,
)
from prefect.server.utilities.leasing import ResourceLease
class ConcurrencyLeaseStorage(_ConcurrencyLeaseStorage):
"""
A singleton concurrency lease storage implementation that stores leases in memory.
"""
_instance: "ConcurrencyLeaseStorage | None" = None
_initialized: bool = False
def __new__(cls) -> "ConcurrencyLeaseStorage":
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def __init__(self):
if self.__class__._initialized:
return
self.leases: dict[UUID, ResourceLease[ConcurrencyLimitLeaseMetadata]] = {}
self.expirations: dict[UUID, datetime] = {}
self.__class__._initialized = True
async def create_lease(
self,
resource_ids: list[UUID],
ttl: timedelta,
metadata: ConcurrencyLimitLeaseMetadata | None = None,
) -> ResourceLease[ConcurrencyLimitLeaseMetadata]:
expiration = datetime.now(timezone.utc) + ttl
lease = ResourceLease(
resource_ids=resource_ids, metadata=metadata, expiration=expiration
)
self.leases[lease.id] = lease
self.expirations[lease.id] = expiration
return lease
async def read_lease(
self, lease_id: UUID
) -> ResourceLease[ConcurrencyLimitLeaseMetadata] | None:
return self.leases.get(lease_id)
async def renew_lease(self, lease_id: UUID, ttl: timedelta) -> bool:
"""
Atomically renew a concurrency lease by updating its expiration.
Checks if the lease exists before updating the expiration index,
preventing orphaned index entries.
Args:
lease_id: The ID of the lease to renew
ttl: The new time-to-live duration
Returns:
True if the lease was renewed, False if it didn't exist
"""
if lease_id not in self.leases:
# Clean up any orphaned expiration entry
self.expirations.pop(lease_id, None)
return False
self.expirations[lease_id] = datetime.now(timezone.utc) + ttl
return True
async def revoke_lease(self, lease_id: UUID) -> None:
self.leases.pop(lease_id, None)
self.expirations.pop(lease_id, None)
async def read_active_lease_ids(
self, limit: int = 100, offset: int = 0
) -> list[UUID]:
now = datetime.now(timezone.utc)
active_leases = [
lease_id
for lease_id, expiration in self.expirations.items()
if expiration > now
]
return active_leases[offset : offset + limit]
async def read_expired_lease_ids(self, limit: int = 100) -> list[UUID]:
now = datetime.now(timezone.utc)
expired_leases = [
lease_id
for lease_id, expiration in self.expirations.items()
if expiration < now
]
return expired_leases[:limit]
async def list_holders_for_limit(
self, limit_id: UUID
) -> list[tuple[UUID, ConcurrencyLeaseHolder]]:
"""List all holders for a given concurrency limit."""
now = datetime.now(timezone.utc)
holders_with_leases: list[tuple[UUID, ConcurrencyLeaseHolder]] = []
for lease_id, lease in self.leases.items():
# Check if lease is active and for the specified limit
if (
limit_id in lease.resource_ids
and self.expirations.get(lease_id, now) > now
and lease.metadata
and lease.metadata.holder
):
holders_with_leases.append((lease.id, lease.metadata.holder))
return holders_with_leases
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/server/concurrency/lease_storage/memory.py",
"license": "Apache License 2.0",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/server/utilities/leasing.py | from __future__ import annotations
from dataclasses import dataclass, field
from datetime import datetime, timedelta, timezone
from functools import partial
from typing import Generic, Protocol, TypeVar
from uuid import UUID, uuid4
T = TypeVar("T")
@dataclass
class ResourceLease(Generic[T]):
resource_ids: list[UUID]
expiration: datetime
created_at: datetime = field(default_factory=partial(datetime.now, timezone.utc))
id: UUID = field(default_factory=uuid4)
metadata: T | None = None
class LeaseStorage(Protocol[T]):
async def create_lease(
self, resource_ids: list[UUID], ttl: timedelta, metadata: T | None = None
) -> ResourceLease[T]:
"""
Create a new resource lease.
Args:
resource_ids: The IDs of the resources that the lease is associated with.
ttl: How long the lease should initially be held for.
metadata: Additional metadata associated with the lease.
Returns:
A ResourceLease object representing the lease.
"""
...
async def read_lease(self, lease_id: UUID) -> ResourceLease[T] | None:
"""
Read a resource lease.
Args:
lease_id: The ID of the lease to read.
Returns:
A ResourceLease object representing the lease, or None if not found.
"""
...
async def renew_lease(self, lease_id: UUID, ttl: timedelta) -> bool | None:
"""
Renew a resource lease.
Args:
lease_id: The ID of the lease to renew.
ttl: The new amount of time the lease should be held for.
Returns:
True if the lease was successfully renewed, False if the lease
does not exist or has already expired. None may be returned by
legacy implementations for backwards compatibility (treated as success).
"""
...
async def revoke_lease(self, lease_id: UUID) -> None:
"""
Release a resource lease by removing it from list of active leases.
Args:
lease_id: The ID of the lease to release.
"""
...
async def read_expired_lease_ids(self, limit: int = 100) -> list[UUID]:
"""
Read the IDs of expired leases.
Args:
limit: The maximum number of expired leases to read.
Returns:
A list of UUIDs representing the expired leases.
"""
...
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/server/utilities/leasing.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
PrefectHQ/prefect:tests/server/concurrency/test_memory_lease_storage.py | from datetime import datetime, timedelta, timezone
from uuid import UUID, uuid4
import pytest
from prefect.server.concurrency.lease_storage import ConcurrencyLimitLeaseMetadata
from prefect.server.concurrency.lease_storage.memory import (
ConcurrencyLeaseStorage,
)
from prefect.server.utilities.leasing import ResourceLease
from prefect.types._concurrency import ConcurrencyLeaseHolder
class TestMemoryConcurrencyLeaseStorage:
def test_singleton_pattern(self):
instance1 = ConcurrencyLeaseStorage()
instance2 = ConcurrencyLeaseStorage()
assert instance1 is instance2
instance1.leases = {
uuid4(): ResourceLease(
resource_ids=[uuid4()],
metadata=None,
expiration=datetime.now(timezone.utc),
)
}
assert instance1.leases == instance2.leases
@pytest.fixture
def storage(self) -> ConcurrencyLeaseStorage:
storage = ConcurrencyLeaseStorage()
storage.leases.clear()
storage.expirations.clear()
return storage
@pytest.fixture
def sample_resource_ids(self) -> list[UUID]:
return [uuid4(), uuid4()]
@pytest.fixture
def sample_metadata(self) -> ConcurrencyLimitLeaseMetadata:
return ConcurrencyLimitLeaseMetadata(slots=5)
@pytest.fixture
def sample_metadata_with_holder(self) -> ConcurrencyLimitLeaseMetadata:
return ConcurrencyLimitLeaseMetadata(
slots=3,
holder=ConcurrencyLeaseHolder(type="flow_run", id=uuid4()),
)
async def test_create_lease_without_metadata(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
ttl = timedelta(minutes=5)
lease = await storage.create_lease(sample_resource_ids, ttl)
assert lease.resource_ids == sample_resource_ids
assert lease.metadata is None
assert len(storage.leases) == 1
assert len(storage.expirations) == 1
async def test_create_lease_with_metadata(
self,
storage: ConcurrencyLeaseStorage,
sample_resource_ids: list[UUID],
sample_metadata: ConcurrencyLimitLeaseMetadata,
):
ttl = timedelta(minutes=5)
lease = await storage.create_lease(sample_resource_ids, ttl, sample_metadata)
assert lease.resource_ids == sample_resource_ids
assert lease.metadata == sample_metadata
assert len(storage.leases) == 1
assert len(storage.expirations) == 1
async def test_create_lease_with_holder(
self,
storage: ConcurrencyLeaseStorage,
sample_resource_ids: list[UUID],
sample_metadata_with_holder: ConcurrencyLimitLeaseMetadata,
):
ttl = timedelta(minutes=5)
lease = await storage.create_lease(
sample_resource_ids, ttl, sample_metadata_with_holder
)
assert lease.resource_ids == sample_resource_ids
assert lease.metadata is not None
assert lease.metadata == sample_metadata_with_holder
assert lease.metadata.holder is not None
assert lease.metadata.holder.model_dump() == {
"type": "flow_run",
"id": lease.metadata.holder.id,
}
assert len(storage.leases) == 1
assert len(storage.expirations) == 1
async def test_read_lease_existing(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
lease_id = list(storage.leases.keys())[0]
read_lease = await storage.read_lease(lease_id)
assert read_lease is not None
assert read_lease.resource_ids == sample_resource_ids
assert read_lease.metadata is None
async def test_read_lease_with_holder(
self,
storage: ConcurrencyLeaseStorage,
sample_resource_ids: list[UUID],
sample_metadata_with_holder: ConcurrencyLimitLeaseMetadata,
):
ttl = timedelta(minutes=5)
created_lease = await storage.create_lease(
sample_resource_ids, ttl, sample_metadata_with_holder
)
read_lease = await storage.read_lease(created_lease.id)
assert read_lease is not None
assert read_lease.resource_ids == sample_resource_ids
assert read_lease.metadata is not None
assert read_lease.metadata.slots == 3
assert read_lease.metadata.holder is not None
assert read_lease.metadata.holder.model_dump() == {
"type": "flow_run",
"id": read_lease.metadata.holder.id,
}
async def test_read_lease_non_existing(self, storage: ConcurrencyLeaseStorage):
non_existing_id = uuid4()
lease = await storage.read_lease(non_existing_id)
assert lease is None
async def test_renew_lease(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
lease_id = list(storage.leases.keys())[0]
original_expiration = storage.expirations[lease_id]
new_ttl = timedelta(minutes=10)
renewed = await storage.renew_lease(lease_id, new_ttl)
assert renewed is True
new_expiration = storage.expirations[lease_id]
assert new_expiration > original_expiration
async def test_renew_lease_non_existing(self, storage: ConcurrencyLeaseStorage):
non_existing_id = uuid4()
renewed = await storage.renew_lease(non_existing_id, timedelta(minutes=5))
assert renewed is False
async def test_revoke_lease(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
lease_id = list(storage.leases.keys())[0]
assert lease_id in storage.leases
assert lease_id in storage.expirations
await storage.revoke_lease(lease_id)
assert lease_id not in storage.leases
assert lease_id not in storage.expirations
async def test_revoke_lease_non_existing(self, storage: ConcurrencyLeaseStorage):
non_existing_id = uuid4()
# should not raise an exception
await storage.revoke_lease(non_existing_id)
async def test_read_expired_lease_ids_no_expired(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, ttl)
expired_ids = await storage.read_expired_lease_ids()
assert expired_ids == []
async def test_read_expired_lease_ids_with_expired(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
expired_ttl = timedelta(seconds=-1)
await storage.create_lease(sample_resource_ids, expired_ttl)
expired_ids = await storage.read_expired_lease_ids()
assert len(expired_ids) == 1
assert expired_ids[0] in storage.leases
async def test_read_expired_lease_ids_with_limit(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
expired_ttl = timedelta(seconds=-1)
await storage.create_lease(sample_resource_ids, expired_ttl)
await storage.create_lease(sample_resource_ids, expired_ttl)
await storage.create_lease(sample_resource_ids, expired_ttl)
expired_ids = await storage.read_expired_lease_ids(limit=2)
assert len(expired_ids) == 2
async def test_read_expired_lease_ids_mixed_expiration(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
expired_ttl = timedelta(seconds=-1)
valid_ttl = timedelta(minutes=5)
await storage.create_lease(sample_resource_ids, expired_ttl)
await storage.create_lease(sample_resource_ids, valid_ttl)
await storage.create_lease(sample_resource_ids, expired_ttl)
expired_ids = await storage.read_expired_lease_ids()
assert len(expired_ids) == 2
async def test_list_holders_for_limit_empty(self, storage: ConcurrencyLeaseStorage):
limit_id = uuid4()
holders = await storage.list_holders_for_limit(limit_id)
assert holders == []
async def test_list_holders_for_limit_no_holders(
self, storage: ConcurrencyLeaseStorage, sample_resource_ids: list[UUID]
):
# Create a lease without a holder
ttl = timedelta(minutes=5)
metadata = ConcurrencyLimitLeaseMetadata(slots=2)
await storage.create_lease(sample_resource_ids, ttl, metadata)
holders = await storage.list_holders_for_limit(sample_resource_ids[0])
assert holders == []
async def test_list_holders_for_limit_with_holders(
self, storage: ConcurrencyLeaseStorage
):
limit_id = uuid4()
# Create leases with different holders
holder1 = ConcurrencyLeaseHolder(type="task_run", id=uuid4())
holder2 = ConcurrencyLeaseHolder(type="flow_run", id=uuid4())
metadata1 = ConcurrencyLimitLeaseMetadata(slots=2, holder=holder1)
metadata2 = ConcurrencyLimitLeaseMetadata(slots=1, holder=holder2)
ttl = timedelta(minutes=5)
await storage.create_lease([limit_id], ttl, metadata1)
await storage.create_lease([limit_id], ttl, metadata2)
# Create a lease for a different limit to ensure it's not included
other_limit_id = uuid4()
metadata3 = ConcurrencyLimitLeaseMetadata(
slots=1, holder=ConcurrencyLeaseHolder(type="task_run", id=uuid4())
)
await storage.create_lease([other_limit_id], ttl, metadata3)
holders_with_leases = await storage.list_holders_for_limit(limit_id)
assert len(holders_with_leases) == 2
holders = [holder for _, holder in holders_with_leases]
assert holder1 in holders
assert holder2 in holders
async def test_list_holders_for_limit_expired_leases(
self, storage: ConcurrencyLeaseStorage
):
limit_id = uuid4()
# Create an expired lease with a holder
expired_ttl = timedelta(seconds=-1)
holder = ConcurrencyLeaseHolder(type="task_run", id=uuid4())
metadata = ConcurrencyLimitLeaseMetadata(slots=1, holder=holder)
await storage.create_lease([limit_id], expired_ttl, metadata)
# Create an active lease with a holder
active_ttl = timedelta(minutes=5)
active_holder = ConcurrencyLeaseHolder(type="flow_run", id=uuid4())
active_metadata = ConcurrencyLimitLeaseMetadata(slots=1, holder=active_holder)
active_lease = await storage.create_lease(
[limit_id], active_ttl, active_metadata
)
holders = await storage.list_holders_for_limit(limit_id)
assert len(holders) == 1
lease_id, holder = holders[0]
assert lease_id == active_lease.id
assert holder == active_holder
async def test_read_active_lease_ids_with_pagination(
self, storage: ConcurrencyLeaseStorage
):
# Create 10 active leases
active_ttl = timedelta(minutes=5)
lease_ids: list[UUID] = []
for _ in range(10):
lease = await storage.create_lease([uuid4()], active_ttl)
lease_ids.append(lease.id)
# Test getting first page
first_page = await storage.read_active_lease_ids(limit=3, offset=0)
assert len(first_page) == 3
assert all(lid in lease_ids for lid in first_page)
# Test getting second page
second_page = await storage.read_active_lease_ids(limit=3, offset=3)
assert len(second_page) == 3
assert all(lid in lease_ids for lid in second_page)
# Ensure no overlap between pages
assert set(first_page).isdisjoint(set(second_page))
# Test getting third page
third_page = await storage.read_active_lease_ids(limit=3, offset=6)
assert len(third_page) == 3
assert all(lid in lease_ids for lid in third_page)
# Test getting partial last page
fourth_page = await storage.read_active_lease_ids(limit=3, offset=9)
assert len(fourth_page) == 1
assert all(lid in lease_ids for lid in fourth_page)
# Test offset beyond available items
empty_page = await storage.read_active_lease_ids(limit=3, offset=100)
assert empty_page == []
async def test_read_active_lease_ids_default_pagination(
self, storage: ConcurrencyLeaseStorage
):
# Create 150 active leases (more than default limit)
active_ttl = timedelta(minutes=5)
lease_ids: list[UUID] = []
for _ in range(150):
lease = await storage.create_lease([uuid4()], active_ttl)
lease_ids.append(lease.id)
# Test default limit of 100
default_page = await storage.read_active_lease_ids()
assert len(default_page) == 100
assert all(lid in lease_ids for lid in default_page)
# Test with offset
offset_page = await storage.read_active_lease_ids(offset=100)
assert len(offset_page) == 50 # remaining leases
assert all(lid in lease_ids for lid in offset_page)
# Ensure no overlap with first page
assert set(default_page).isdisjoint(set(offset_page))
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/server/concurrency/test_memory_lease_storage.py",
"license": "Apache License 2.0",
"lines": 286,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:integration-tests/test_serve_a_flow.py | import signal
from datetime import timedelta
from pathlib import Path
from types import FrameType
from prefect import flow
from prefect.settings import PREFECT_RUNNER_POLL_FREQUENCY, temporary_settings
@flow
def may_i_take_your_hat_sir(item: str, counter_dir: Path):
assert item == "hat", "I don't know how to do everything"
(counter_dir / f"{id(may_i_take_your_hat_sir)}.txt").touch()
return f"May I take your {item}?"
def _handler(signum: int, frame: FrameType | None):
raise KeyboardInterrupt("Simulating user interruption")
def count_runs(counter_dir: Path):
return len(list(counter_dir.glob("*.txt")))
def test_serve_a_flow(tmp_path: Path):
TIMEOUT: int = 20
INTERVAL_SECONDS: int = 3
MINIMUM_EXPECTED_N_FLOW_RUNS: int = 3
signal.signal(signal.SIGALRM, _handler)
signal.alarm(TIMEOUT)
counter_dir = tmp_path / "flow_run_counter"
counter_dir.mkdir(exist_ok=True)
with temporary_settings({PREFECT_RUNNER_POLL_FREQUENCY: 1}):
try:
may_i_take_your_hat_sir.serve(
interval=timedelta(seconds=INTERVAL_SECONDS),
parameters={"item": "hat", "counter_dir": counter_dir},
)
except KeyboardInterrupt as e:
print(str(e))
finally:
signal.alarm(0)
actual_run_count = count_runs(counter_dir)
assert actual_run_count >= MINIMUM_EXPECTED_N_FLOW_RUNS, (
f"Expected at least {MINIMUM_EXPECTED_N_FLOW_RUNS} flow runs, got {actual_run_count}"
)
print(f"Successfully completed and audited {actual_run_count} flow runs")
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "integration-tests/test_serve_a_flow.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.