sample_id
stringlengths
21
196
text
stringlengths
105
936k
metadata
dict
category
stringclasses
6 values
PrefectHQ/prefect:src/prefect/client/attribution.py
""" Attribution context for API requests. This module provides functions to gather attribution headers that identify the source of API requests (flow runs, deployments, workers) for usage tracking and rate limit debugging. """ from __future__ import annotations import os from typing import TYPE_CHECKING if TYPE_CHECKING: pass def get_attribution_headers() -> dict[str, str]: """ Gather attribution headers from the current execution context. These headers help Cloud track which flow runs, deployments, and workers are generating API requests for usage attribution and rate limit debugging. Headers are only included when values are available. All headers are optional. Returns: A dictionary of attribution headers to include in API requests. """ headers: dict[str, str] = {} # Worker context (passed via environment variables from worker to flow run process) if worker_id := os.environ.get("PREFECT__WORKER_ID"): headers["X-Prefect-Worker-Id"] = worker_id if worker_name := os.environ.get("PREFECT__WORKER_NAME"): headers["X-Prefect-Worker-Name"] = worker_name # Flow and deployment context - try to get from context first, fall back to env vars # Import here to avoid circular imports from prefect.context import FlowRunContext flow_run_ctx = FlowRunContext.get() if flow_run_ctx and flow_run_ctx.flow_run: flow_run = flow_run_ctx.flow_run # Flow info (use getattr for safety with mock/minimal FlowRun objects) if flow_id := getattr(flow_run, "flow_id", None): headers["X-Prefect-Flow-Id"] = str(flow_id) if flow_run_ctx.flow and getattr(flow_run_ctx.flow, "name", None): headers["X-Prefect-Flow-Name"] = flow_run_ctx.flow.name # Deployment info from flow run if deployment_id := getattr(flow_run, "deployment_id", None): headers["X-Prefect-Deployment-Id"] = str(deployment_id) # Deployment name is not on FlowRun, fall back to env var if deployment_name := os.environ.get("PREFECT__DEPLOYMENT_NAME"): headers["X-Prefect-Deployment-Name"] = deployment_name else: # Fall back to environment variables if flow_id := os.environ.get("PREFECT__FLOW_ID"): headers["X-Prefect-Flow-Id"] = flow_id if flow_name := os.environ.get("PREFECT__FLOW_NAME"): headers["X-Prefect-Flow-Name"] = flow_name if deployment_id := os.environ.get("PREFECT__DEPLOYMENT_ID"): headers["X-Prefect-Deployment-Id"] = deployment_id if deployment_name := os.environ.get("PREFECT__DEPLOYMENT_NAME"): headers["X-Prefect-Deployment-Name"] = deployment_name return headers
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/client/attribution.py", "license": "Apache License 2.0", "lines": 54, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:tests/client/test_attribution.py
""" Tests for attribution headers functionality. """ import os from unittest import mock from uuid import uuid4 import httpx from httpx import Request, Response from prefect._internal.compatibility.starlette import status from prefect.client.attribution import get_attribution_headers from prefect.client.base import PrefectHttpxAsyncClient, PrefectHttpxSyncClient RESPONSE_200 = Response( status.HTTP_200_OK, request=Request("a test request", "fake.url/fake/route"), ) class TestGetAttributionHeaders: """Tests for the get_attribution_headers function.""" def test_returns_empty_dict_with_no_context(self): """When no context is available, should return empty headers.""" env_vars = [ "PREFECT__WORKER_ID", "PREFECT__WORKER_NAME", "PREFECT__FLOW_ID", "PREFECT__FLOW_NAME", "PREFECT__DEPLOYMENT_ID", "PREFECT__DEPLOYMENT_NAME", ] with mock.patch.dict(os.environ, {}, clear=True): for var in env_vars: os.environ.pop(var, None) headers = get_attribution_headers() assert "X-Prefect-Worker-Id" not in headers assert "X-Prefect-Worker-Name" not in headers assert "X-Prefect-Flow-Id" not in headers assert "X-Prefect-Flow-Name" not in headers def test_includes_worker_id_from_env(self): """Worker ID should be read from environment variable.""" worker_id = str(uuid4()) with mock.patch.dict(os.environ, {"PREFECT__WORKER_ID": worker_id}): headers = get_attribution_headers() assert headers["X-Prefect-Worker-Id"] == worker_id def test_includes_worker_name_from_env(self): """Worker name should be read from environment variable.""" with mock.patch.dict(os.environ, {"PREFECT__WORKER_NAME": "test-worker"}): headers = get_attribution_headers() assert headers["X-Prefect-Worker-Name"] == "test-worker" def test_includes_flow_id_from_env(self): """Flow ID should be read from environment variable when no context.""" flow_id = str(uuid4()) with mock.patch.dict(os.environ, {"PREFECT__FLOW_ID": flow_id}): headers = get_attribution_headers() assert headers["X-Prefect-Flow-Id"] == flow_id def test_includes_flow_name_from_env(self): """Flow name should be read from environment variable when no context.""" with mock.patch.dict(os.environ, {"PREFECT__FLOW_NAME": "my-flow"}): headers = get_attribution_headers() assert headers["X-Prefect-Flow-Name"] == "my-flow" def test_includes_deployment_id_from_env(self): """Deployment ID should be read from environment variable when no context.""" deployment_id = str(uuid4()) with mock.patch.dict(os.environ, {"PREFECT__DEPLOYMENT_ID": deployment_id}): headers = get_attribution_headers() assert headers["X-Prefect-Deployment-Id"] == deployment_id def test_includes_deployment_name_from_env(self): """Deployment name should be read from environment variable when no context.""" with mock.patch.dict(os.environ, {"PREFECT__DEPLOYMENT_NAME": "my-deployment"}): headers = get_attribution_headers() assert headers["X-Prefect-Deployment-Name"] == "my-deployment" def test_includes_flow_info_from_context(self): """Flow info should be read from FlowRunContext when available.""" from prefect.client.schemas import FlowRun from prefect.context import FlowRunContext flow_id = uuid4() deployment_id = uuid4() flow_run = FlowRun( id=uuid4(), name="test-flow-run", flow_id=flow_id, deployment_id=deployment_id, ) mock_context = mock.MagicMock(spec=FlowRunContext) mock_context.flow_run = flow_run mock_context.flow = mock.MagicMock() mock_context.flow.name = "test-flow" with mock.patch.object(FlowRunContext, "get", return_value=mock_context): headers = get_attribution_headers() assert headers["X-Prefect-Flow-Id"] == str(flow_id) assert headers["X-Prefect-Flow-Name"] == "test-flow" assert headers["X-Prefect-Deployment-Id"] == str(deployment_id) def test_context_takes_precedence_over_env(self): """Context should take precedence over environment variables for flow info.""" from prefect.client.schemas import FlowRun from prefect.context import FlowRunContext context_flow_id = uuid4() env_flow_id = str(uuid4()) flow_run = FlowRun( id=uuid4(), name="context-flow-run", flow_id=context_flow_id, ) mock_context = mock.MagicMock(spec=FlowRunContext) mock_context.flow_run = flow_run mock_context.flow = mock.MagicMock() mock_context.flow.name = "context-flow" with mock.patch.dict(os.environ, {"PREFECT__FLOW_ID": env_flow_id}): with mock.patch.object(FlowRunContext, "get", return_value=mock_context): headers = get_attribution_headers() assert headers["X-Prefect-Flow-Id"] == str(context_flow_id) assert headers["X-Prefect-Flow-Name"] == "context-flow" def test_all_headers_present_with_full_context(self): """All headers should be present when full context is available.""" from prefect.client.schemas import FlowRun from prefect.context import FlowRunContext worker_id = str(uuid4()) worker_name = "full-context-worker" flow_id = uuid4() deployment_id = uuid4() flow_run = FlowRun( id=uuid4(), name="full-context-flow-run", flow_id=flow_id, deployment_id=deployment_id, ) mock_context = mock.MagicMock(spec=FlowRunContext) mock_context.flow_run = flow_run mock_context.flow = mock.MagicMock() mock_context.flow.name = "full-context-flow" with mock.patch.dict( os.environ, { "PREFECT__WORKER_ID": worker_id, "PREFECT__WORKER_NAME": worker_name, "PREFECT__DEPLOYMENT_NAME": "full-context-deployment", }, ): with mock.patch.object(FlowRunContext, "get", return_value=mock_context): headers = get_attribution_headers() assert headers["X-Prefect-Worker-Id"] == worker_id assert headers["X-Prefect-Worker-Name"] == worker_name assert headers["X-Prefect-Flow-Id"] == str(flow_id) assert headers["X-Prefect-Flow-Name"] == "full-context-flow" assert headers["X-Prefect-Deployment-Id"] == str(deployment_id) assert headers["X-Prefect-Deployment-Name"] == "full-context-deployment" def test_deployment_name_from_env_when_in_context(self): """Deployment name should come from env var even when in context.""" from prefect.client.schemas import FlowRun from prefect.context import FlowRunContext flow_run = FlowRun( id=uuid4(), name="test-flow-run", flow_id=uuid4(), deployment_id=uuid4(), ) mock_context = mock.MagicMock(spec=FlowRunContext) mock_context.flow_run = flow_run mock_context.flow = mock.MagicMock() mock_context.flow.name = "test-flow" with mock.patch.dict(os.environ, {"PREFECT__DEPLOYMENT_NAME": "my-deployment"}): with mock.patch.object(FlowRunContext, "get", return_value=mock_context): headers = get_attribution_headers() assert headers["X-Prefect-Deployment-Name"] == "my-deployment" class TestAsyncClientAttributionHeaders: """Tests that PrefectHttpxAsyncClient adds attribution headers.""" async def test_attribution_headers_added_to_requests(self): """Attribution headers should be added to all requests.""" worker_id = str(uuid4()) worker_name = "test-worker" flow_id = str(uuid4()) flow_name = "test-flow" with mock.patch.dict( os.environ, { "PREFECT__WORKER_ID": worker_id, "PREFECT__WORKER_NAME": worker_name, "PREFECT__FLOW_ID": flow_id, "PREFECT__FLOW_NAME": flow_name, }, ): with mock.patch("httpx.AsyncClient.send", autospec=True) as send: send.return_value = RESPONSE_200 async with PrefectHttpxAsyncClient() as client: await client.get(url="fake.url/fake/route") request = send.call_args[0][1] assert isinstance(request, httpx.Request) assert request.headers["X-Prefect-Worker-Id"] == worker_id assert request.headers["X-Prefect-Worker-Name"] == worker_name assert request.headers["X-Prefect-Flow-Id"] == flow_id assert request.headers["X-Prefect-Flow-Name"] == flow_name async def test_missing_attribution_values_not_in_headers(self): """Headers should not be present when values are not available.""" from prefect.context import FlowRunContext env = os.environ.copy() for var in [ "PREFECT__WORKER_ID", "PREFECT__WORKER_NAME", "PREFECT__FLOW_ID", "PREFECT__FLOW_NAME", "PREFECT__DEPLOYMENT_ID", "PREFECT__DEPLOYMENT_NAME", ]: env.pop(var, None) with mock.patch.dict(os.environ, env, clear=True): with mock.patch.object(FlowRunContext, "get", return_value=None): with mock.patch("httpx.AsyncClient.send", autospec=True) as send: send.return_value = RESPONSE_200 async with PrefectHttpxAsyncClient() as client: await client.get(url="fake.url/fake/route") request = send.call_args[0][1] assert isinstance(request, httpx.Request) assert "X-Prefect-Worker-Id" not in request.headers assert "X-Prefect-Worker-Name" not in request.headers assert "X-Prefect-Flow-Id" not in request.headers assert "X-Prefect-Flow-Name" not in request.headers class TestSyncClientAttributionHeaders: """Tests that PrefectHttpxSyncClient adds attribution headers.""" def test_attribution_headers_added_to_requests(self): """Attribution headers should be added to all requests.""" worker_id = str(uuid4()) worker_name = "test-sync-worker" flow_id = str(uuid4()) flow_name = "test-sync-flow" with mock.patch.dict( os.environ, { "PREFECT__WORKER_ID": worker_id, "PREFECT__WORKER_NAME": worker_name, "PREFECT__FLOW_ID": flow_id, "PREFECT__FLOW_NAME": flow_name, }, ): with mock.patch("httpx.Client.send", autospec=True) as send: send.return_value = RESPONSE_200 with PrefectHttpxSyncClient() as client: client.get(url="fake.url/fake/route") request = send.call_args[0][1] assert isinstance(request, httpx.Request) assert request.headers["X-Prefect-Worker-Id"] == worker_id assert request.headers["X-Prefect-Worker-Name"] == worker_name assert request.headers["X-Prefect-Flow-Id"] == flow_id assert request.headers["X-Prefect-Flow-Name"] == flow_name
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/client/test_attribution.py", "license": "Apache License 2.0", "lines": 240, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/workers/test_worker_attribution.py
""" Tests for worker attribution environment variables. """ from unittest import mock from uuid import uuid4 from prefect.workers.base import BaseJobConfiguration class TestBaseAttributionEnvironment: """Tests for the _base_attribution_environment method.""" def test_returns_empty_dict_when_no_info(self): """When no info is provided, should return empty dict.""" env = BaseJobConfiguration._base_attribution_environment() assert env == {} def test_includes_worker_id_when_provided(self): """Worker ID should be included in environment.""" worker_id = uuid4() env = BaseJobConfiguration._base_attribution_environment(worker_id=worker_id) assert env["PREFECT__WORKER_ID"] == str(worker_id) def test_includes_worker_name_when_provided(self): """Worker name should be included in environment.""" env = BaseJobConfiguration._base_attribution_environment( worker_name="test-worker" ) assert env["PREFECT__WORKER_NAME"] == "test-worker" def test_includes_flow_id_from_flow_run(self): """Flow ID should be included from flow_run.""" from prefect.client.schemas import FlowRun flow_id = uuid4() flow_run = FlowRun(id=uuid4(), name="test-run", flow_id=flow_id) env = BaseJobConfiguration._base_attribution_environment(flow_run=flow_run) assert env["PREFECT__FLOW_ID"] == str(flow_id) def test_includes_flow_name_from_flow(self): """Flow name should be included from flow object.""" mock_flow = mock.MagicMock() mock_flow.name = "my-flow" env = BaseJobConfiguration._base_attribution_environment(flow=mock_flow) assert env["PREFECT__FLOW_NAME"] == "my-flow" def test_includes_deployment_id_from_flow_run(self): """Deployment ID should be included from flow_run.""" from prefect.client.schemas import FlowRun deployment_id = uuid4() flow_run = FlowRun( id=uuid4(), name="test-run", flow_id=uuid4(), deployment_id=deployment_id, ) env = BaseJobConfiguration._base_attribution_environment(flow_run=flow_run) assert env["PREFECT__DEPLOYMENT_ID"] == str(deployment_id) def test_includes_deployment_name_from_deployment(self): """Deployment name should be included from deployment object.""" mock_deployment = mock.MagicMock() mock_deployment.name = "my-deployment" env = BaseJobConfiguration._base_attribution_environment( deployment=mock_deployment ) assert env["PREFECT__DEPLOYMENT_NAME"] == "my-deployment" def test_includes_all_when_provided(self): """All attribution env vars should be included when all info is provided.""" from prefect.client.schemas import FlowRun worker_id = uuid4() flow_id = uuid4() deployment_id = uuid4() flow_run = FlowRun( id=uuid4(), name="test-run", flow_id=flow_id, deployment_id=deployment_id, ) mock_flow = mock.MagicMock() mock_flow.name = "my-flow" mock_deployment = mock.MagicMock() mock_deployment.name = "my-deployment" env = BaseJobConfiguration._base_attribution_environment( flow_run=flow_run, deployment=mock_deployment, flow=mock_flow, worker_id=worker_id, worker_name="test-worker", ) assert env["PREFECT__WORKER_ID"] == str(worker_id) assert env["PREFECT__WORKER_NAME"] == "test-worker" assert env["PREFECT__FLOW_ID"] == str(flow_id) assert env["PREFECT__FLOW_NAME"] == "my-flow" assert env["PREFECT__DEPLOYMENT_ID"] == str(deployment_id) assert env["PREFECT__DEPLOYMENT_NAME"] == "my-deployment" def test_none_worker_id_not_included(self): """When worker_id is None, it should not be in the environment.""" env = BaseJobConfiguration._base_attribution_environment( worker_id=None, worker_name="test-worker" ) assert "PREFECT__WORKER_ID" not in env assert env["PREFECT__WORKER_NAME"] == "test-worker" class TestPrepareForFlowRun: """Tests for prepare_for_flow_run including attribution.""" def test_includes_attribution_environment_variables(self): """Attribution environment variables should be included in the job configuration.""" from prefect.client.schemas import FlowRun config = BaseJobConfiguration(env={}) worker_id = uuid4() worker_name = "test-worker" flow_id = uuid4() flow_run = FlowRun( id=uuid4(), name="test-run", flow_id=flow_id, ) mock_flow = mock.MagicMock() mock_flow.name = "test-flow" config.prepare_for_flow_run( flow_run=flow_run, flow=mock_flow, worker_id=worker_id, worker_name=worker_name, ) assert config.env["PREFECT__WORKER_ID"] == str(worker_id) assert config.env["PREFECT__WORKER_NAME"] == worker_name assert config.env["PREFECT__FLOW_RUN_ID"] == str(flow_run.id) assert config.env["PREFECT__FLOW_ID"] == str(flow_id) assert config.env["PREFECT__FLOW_NAME"] == "test-flow" def test_includes_deployment_info(self): """Deployment info should be included in the job configuration.""" from prefect.client.schemas import FlowRun config = BaseJobConfiguration(env={}) deployment_id = uuid4() flow_run = FlowRun( id=uuid4(), name="test-run", flow_id=uuid4(), deployment_id=deployment_id, ) mock_deployment = mock.MagicMock() mock_deployment.name = "test-deployment" config.prepare_for_flow_run( flow_run=flow_run, deployment=mock_deployment, ) assert config.env["PREFECT__DEPLOYMENT_ID"] == str(deployment_id) assert config.env["PREFECT__DEPLOYMENT_NAME"] == "test-deployment" def test_backward_compatible_without_worker_id(self): """Should work without worker_id for backward compatibility.""" from prefect.client.schemas import FlowRun config = BaseJobConfiguration(env={}) flow_run = FlowRun( id=uuid4(), name="test-run", flow_id=uuid4(), ) config.prepare_for_flow_run( flow_run=flow_run, worker_name="test-worker", ) assert "PREFECT__WORKER_ID" not in config.env assert config.env["PREFECT__WORKER_NAME"] == "test-worker" assert config.env["PREFECT__FLOW_RUN_ID"] == str(flow_run.id)
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/workers/test_worker_attribution.py", "license": "Apache License 2.0", "lines": 154, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/prefect/_experimental/bundles/_file_collector.py
""" File collection utilities for bundles. This module provides the FileCollector class for collecting files matching user-provided patterns from a base directory. Handles single files, directory patterns, glob patterns, and negation patterns with gitignore-style matching. """ from __future__ import annotations import logging from dataclasses import dataclass, field from enum import Enum, auto from pathlib import Path import pathspec import pathspec.util from prefect._experimental.bundles._ignore_filter import ( IgnoreFilter, check_sensitive_files, emit_excluded_warning, ) from prefect._experimental.bundles._path_resolver import ( PathValidationError, normalize_path_separator, resolve_with_symlink_check, validate_path_input, ) logger = logging.getLogger(__name__) # Large file threshold - files exceeding this size trigger a warning LARGE_FILE_THRESHOLD = 10 * 1024 * 1024 # 10 MB # Default exclusion patterns - common generated/cached directories and files # that users typically don't want bundled. Uses gitignore-style patterns. DEFAULT_EXCLUSIONS = [ # Python "__pycache__/", "*.pyc", "*.pyo", "*.egg-info/", # Version control ".git/", ".hg/", ".svn/", # Package managers "node_modules/", ".venv/", "venv/", # IDEs ".idea/", ".vscode/", # OS files ".DS_Store", "Thumbs.db", ] class PatternType(Enum): """Type of file pattern.""" SINGLE_FILE = auto() DIRECTORY = auto() GLOB = auto() NEGATION = auto() # Characters that indicate a glob pattern GLOB_WILDCARDS = {"*", "?", "["} def _is_glob_pattern(pattern: str) -> bool: """ Check if a pattern contains glob wildcards. Patterns starting with '!' are negation patterns (handled separately), not glob patterns. Args: pattern: The pattern string to check. Returns: True if pattern contains glob wildcards, False otherwise. """ # Negation patterns are not globs if pattern.startswith("!"): return False # Check for any glob wildcard character return any(char in pattern for char in GLOB_WILDCARDS) @dataclass class CollectionResult: """ Result of file collection operation. Attributes: files: List of collected file paths (resolved, absolute). warnings: List of warning messages (e.g., zero-match patterns). total_size: Sum of all collected file sizes in bytes. patterns_matched: Mapping of pattern -> list of files that matched. """ files: list[Path] = field(default_factory=list) warnings: list[str] = field(default_factory=list) total_size: int = 0 patterns_matched: dict[str, list[Path]] = field(default_factory=dict) class FileCollector: """ Collects files matching user patterns from a base directory. Usage: collector = FileCollector(Path("/project/flows")) result = collector.collect(["config.yaml", "data/", "data/input.csv"]) for file in result.files: print(file) Security: - All paths are validated against directory traversal attacks - Symlinks are followed but must resolve within base directory - Absolute paths are rejected """ def __init__(self, base_dir: Path) -> None: """ Initialize FileCollector with a base directory. Args: base_dir: Base directory for file collection. All patterns are resolved relative to this directory. """ self.base_dir = base_dir.resolve() self._collected_files: set[Path] = set() # Track for deduplication def collect(self, patterns: list[str]) -> CollectionResult: """ Collect files matching the given patterns. Supports single file patterns, directory patterns, glob patterns, and negation patterns. Patterns are processed in order (gitignore-style): - Inclusion patterns add files to the collection - Negation patterns (starting with !) remove files from the current collection Pattern order matters: negation at position N only removes files that were already collected by patterns 1 to N-1. For example: - ["*.json", "!fixtures/*.json"] = all JSON except fixtures - ["!fixtures/*.json", "*.json"] = all JSON (negation had nothing to negate) Directory patterns collect all files recursively, excluding hidden files and common generated directories (see DEFAULT_EXCLUSIONS). Args: patterns: List of file patterns (e.g., ["config.yaml", "data/", "!*.test.py"]) Returns: CollectionResult with collected files, warnings, and metadata. Raises: PathValidationError: If any pattern attempts directory traversal, uses absolute paths, or contains invalid characters. """ result = CollectionResult() self._collected_files = set() # Reset deduplication tracking # Process patterns in order - order matters for negation for pattern in patterns: pattern_type = self._classify_pattern(pattern) if pattern_type == PatternType.NEGATION: # Negation: remove files from current collection excluded_files = self._process_negation(pattern) result.patterns_matched[pattern] = excluded_files # Remove excluded files from collection for file in excluded_files: self._collected_files.discard(file) # No warning for negation patterns that exclude nothing else: # Inclusion pattern if pattern_type == PatternType.GLOB: matched_files = self._collect_glob(pattern) elif pattern_type == PatternType.DIRECTORY: matched_files = self._collect_directory(pattern) else: matched_files = self._collect_single_file(pattern) result.patterns_matched[pattern] = matched_files if matched_files: for file in matched_files: # Deduplicate: only add if not already collected if file not in self._collected_files: self._collected_files.add(file) else: # Pattern matched no files - add warning and log warning_msg = f"Pattern '{pattern}' matched no files" result.warnings.append(warning_msg) logger.warning(warning_msg) # Build final file list with sizes for file in self._collected_files: result.files.append(file) file_size = file.stat().st_size result.total_size += file_size # Check for large files and emit warning if file_size > LARGE_FILE_THRESHOLD: size_mb = file_size / (1024 * 1024) logger.warning( f"Large file detected: {file.name} ({size_mb:.1f} MB) exceeds " f"{LARGE_FILE_THRESHOLD / (1024 * 1024):.0f} MB threshold" ) return result def _process_negation(self, pattern: str) -> list[Path]: """ Process a negation pattern and return files to exclude. Args: pattern: Negation pattern starting with '!' (e.g., "!*.test.py") Returns: List of files that match the negation pattern and should be excluded. """ # Extract the inner pattern (without the leading !) inner_pattern = pattern[1:] # Compile the pattern using gitignore-style matching spec = pathspec.PathSpec.from_lines("gitwildmatch", [inner_pattern]) # Find files in current collection that match the negation excluded: list[Path] = [] for file in self._collected_files: rel_path = str(file.relative_to(self.base_dir)) if spec.match_file(rel_path): excluded.append(file) return excluded def _classify_pattern(self, pattern: str) -> PatternType: """ Classify a pattern to determine how to process it. Classification order: 1. Negation patterns (starts with !) 2. Glob patterns (contains *, ?, [ but not starting with !) 3. Directory patterns (existing directory) 4. Single file patterns (default) Args: pattern: The pattern string to classify. Returns: PatternType indicating how to process the pattern. """ # Check for negation first if pattern.startswith("!"): return PatternType.NEGATION # Check for glob wildcards first (before checking filesystem) if _is_glob_pattern(pattern): return PatternType.GLOB # Normalize and check if it's a directory normalized = normalize_path_separator(pattern.rstrip("/")) target = self.base_dir / normalized if target.is_dir(): return PatternType.DIRECTORY return PatternType.SINGLE_FILE def _collect_directory(self, pattern: str) -> list[Path]: """ Collect all files from a directory recursively. Excludes hidden files (dotfiles), hidden directories, and files matching DEFAULT_EXCLUSIONS patterns. Args: pattern: Directory path pattern (e.g., "data/", "data") Returns: List of collected file paths within the directory. Raises: PathValidationError: If pattern fails validation. """ # Validate input first normalized = normalize_path_separator(pattern.rstrip("/")) validate_path_input(normalized) dir_path = self.base_dir / normalized # Check if directory exists if not dir_path.is_dir(): return [] # Use pathspec.util.iter_tree_files for recursive file iteration collected: list[Path] = [] try: for rel_path in pathspec.util.iter_tree_files(str(dir_path)): # rel_path is relative to dir_path file_path = dir_path / rel_path # Skip hidden files/directories (any path component starts with '.') path_parts = Path(rel_path).parts if any(part.startswith(".") for part in path_parts): continue # Check against DEFAULT_EXCLUSIONS if self._matches_exclusion(rel_path): continue # Validate the file (security check for symlinks) resolved = self._validate_file_path(file_path) if resolved is not None: collected.append(resolved) except OSError: # Directory iteration failed - treat as empty return [] return collected def _collect_glob(self, pattern: str) -> list[Path]: """ Collect files matching a glob pattern. Uses pathspec library with gitwildmatch syntax for gitignore-compatible pattern matching. Excludes hidden files and DEFAULT_EXCLUSIONS. Args: pattern: Glob pattern (e.g., "*.json", "**/*.csv", "data/*.txt") Returns: List of collected file paths matching the pattern. """ # Compile the pattern using gitignore-style matching spec = pathspec.PathSpec.from_lines("gitwildmatch", [pattern]) collected: list[Path] = [] try: # Iterate all files in base directory for rel_path in pathspec.util.iter_tree_files(str(self.base_dir)): # Skip hidden files/directories path_parts = Path(rel_path).parts if any(part.startswith(".") for part in path_parts): continue # Check against DEFAULT_EXCLUSIONS if self._matches_exclusion(rel_path): continue # Check if file matches the glob pattern if spec.match_file(rel_path): file_path = self.base_dir / rel_path # Validate the file (security check for symlinks) resolved = self._validate_file_path(file_path) if resolved is not None: collected.append(resolved) except OSError: # Iteration failed - treat as no matches return [] return collected def _matches_exclusion(self, rel_path: str) -> bool: """ Check if a relative path matches any DEFAULT_EXCLUSIONS pattern. Args: rel_path: Path relative to the collected directory. Returns: True if the path should be excluded, False otherwise. """ path_parts = Path(rel_path).parts filename = Path(rel_path).name for exclusion in DEFAULT_EXCLUSIONS: # Directory exclusion with glob (e.g., "*.egg-info/") if exclusion.endswith("/") and exclusion.startswith("*"): suffix = exclusion[1:-1] # e.g., ".egg-info" for part in path_parts: if part.endswith(suffix): return True # Directory exclusion (e.g., "__pycache__/") elif exclusion.endswith("/"): dir_name = exclusion.rstrip("/") if dir_name in path_parts: return True # Glob pattern for files (e.g., "*.pyc") elif exclusion.startswith("*"): suffix = exclusion[1:] # e.g., ".pyc" if filename.endswith(suffix): return True # Exact file match (e.g., ".DS_Store") elif filename == exclusion: return True return False def _validate_file_path(self, file_path: Path) -> Path | None: """ Validate a file path, checking for symlinks and containment. Args: file_path: Absolute file path to validate. Returns: Resolved file path if valid, None if file doesn't exist or is broken symlink. Raises: PathValidationError: If path escapes base directory. """ # Check for symlinks has_symlink = file_path.is_symlink() if not has_symlink: for parent in file_path.parents: if parent == self.base_dir: break if parent.is_symlink(): has_symlink = True break if has_symlink: try: return resolve_with_symlink_check(file_path, self.base_dir) except PathValidationError as e: if e.error_type in ("broken_symlink", "not_found"): return None raise # Non-symlink: just resolve and verify containment resolved = file_path.resolve(strict=False) if not resolved.is_relative_to(self.base_dir): raise PathValidationError( input_path=str(file_path), resolved_path=str(resolved), error_type="traversal", message=f"Path escapes base directory: {file_path}", suggestion="Use paths within the flow file's directory", ) if not resolved.exists(): return None return resolved def _collect_single_file(self, pattern: str) -> list[Path]: """ Collect a single file by its path pattern. Args: pattern: File path pattern (e.g., "config.yaml", "data/input.csv") Returns: List containing the resolved file path if it exists, empty list otherwise. Raises: PathValidationError: If pattern fails validation (traversal, absolute, etc.) """ # Validate input (raises on empty, whitespace, null bytes, absolute paths) validate_path_input(pattern) # Normalize path separators for cross-platform compatibility normalized = normalize_path_separator(pattern) # Construct target path target = self.base_dir / normalized # Check if path is a symlink and use symlink-aware resolution # Also check parent paths for symlinks has_symlink = target.is_symlink() if not has_symlink: # Check if any parent is a symlink for parent in target.parents: if parent == self.base_dir: break if parent.is_symlink(): has_symlink = True break if has_symlink: # Use symlink-aware resolution which validates traversal try: resolved = resolve_with_symlink_check(target, self.base_dir) return [resolved] except PathValidationError as e: if e.error_type in ("broken_symlink", "not_found"): # Treat broken symlink as missing file return [] raise # Non-symlink path: manually validate traversal resolved = target.resolve(strict=False) # Security check: resolved path must be within base directory if not resolved.is_relative_to(self.base_dir): raise PathValidationError( input_path=pattern, resolved_path=str(resolved), error_type="traversal", message=f"Path traversal detected: {pattern!r} resolves outside base directory", suggestion="Use paths within the flow file's directory", ) # Check if file exists if not resolved.exists(): return [] # File exists and is within bounds return [resolved] def format_collection_summary(result: CollectionResult) -> str: """ Format a human-readable summary of collection results. Args: result: CollectionResult from file collection. Returns: Human-readable summary string like "Collected 12 files (2.3 MB)" """ size_mb = result.total_size / (1024 * 1024) if size_mb >= 1: size_str = f"{size_mb:.1f} MB" else: size_kb = result.total_size / 1024 size_str = f"{size_kb:.1f} KB" file_word = "file" if len(result.files) == 1 else "files" return f"Collected {len(result.files)} {file_word} ({size_str})" def preview_collection( base_dir: Path, patterns: list[str], ) -> dict: """ Preview file collection without bundling. This function performs file collection but returns a preview dict instead of modifying any state. Useful for CLI preview commands and debugging pattern matching. Integrates .prefectignore filtering and sensitive file detection. Args: base_dir: Base directory for file collection. patterns: List of file patterns to match. Returns: Dictionary containing: - files: List of relative file paths (strings) - file_count: Number of files matched - total_size: Total size in bytes - total_size_human: Human-readable size string - warnings: List of warning messages - patterns_matched: Dict of pattern -> match count - excluded_by_ignore: List of files excluded by .prefectignore - sensitive_warnings: List of warnings for sensitive files """ collector = FileCollector(base_dir) result = collector.collect(patterns) # Apply ignore filtering ignore_filter = IgnoreFilter(base_dir) filter_result = ignore_filter.filter(result.files, explicit_patterns=patterns) # Check for sensitive files in included files sensitive = check_sensitive_files(filter_result.included_files, base_dir.resolve()) # Emit batched warning for excluded files emit_excluded_warning(filter_result.excluded_by_ignore, base_dir.resolve()) # Calculate total size from included files only total_size = sum(f.stat().st_size for f in filter_result.included_files) # Format human-readable size size_mb = total_size / (1024 * 1024) if size_mb >= 1: size_human = f"{size_mb:.1f} MB" else: size_kb = total_size / 1024 size_human = f"{size_kb:.1f} KB" return { "files": [ str(f.relative_to(base_dir.resolve())) for f in filter_result.included_files ], "file_count": len(filter_result.included_files), "total_size": total_size, "total_size_human": size_human, "warnings": result.warnings + filter_result.explicitly_excluded, "patterns_matched": {p: len(fs) for p, fs in result.patterns_matched.items()}, "excluded_by_ignore": [ str(f.relative_to(base_dir.resolve())) for f in filter_result.excluded_by_ignore ], "sensitive_warnings": sensitive, } __all__ = [ "CollectionResult", "DEFAULT_EXCLUSIONS", "FileCollector", "LARGE_FILE_THRESHOLD", "PatternType", "format_collection_summary", "preview_collection", ]
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_experimental/bundles/_file_collector.py", "license": "Apache License 2.0", "lines": 498, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:src/prefect/_experimental/bundles/_ignore_filter.py
""" Ignore filtering utilities for bundles. This module provides the IgnoreFilter class for filtering collected files through cascading .prefectignore patterns. It supports gitignore-style pattern syntax and cascades patterns from project root and flow directory. Key features: - Cascading .prefectignore loading (project root + flow directory) - gitignore-compatible pattern matching via pathspec.GitIgnoreSpec - Auto-exclusion of .prefectignore file itself - Warnings for explicit includes that are excluded by ignore patterns """ from __future__ import annotations import logging from dataclasses import dataclass, field from pathlib import Path import pathspec logger = logging.getLogger(__name__) # Hardcoded patterns for sensitive files that should trigger warnings. # These files are still collected (warning only, not blocked), but users # are advised to add them to .prefectignore if they don't want them bundled. SENSITIVE_PATTERNS = [ ".env*", # Environment files "*.pem", # SSL certificates "*.key", # Private keys "credentials.*", # Credentials files "*_rsa", # RSA keys "*.p12", # PKCS12 certificates "secrets.*", # Secrets files ] @dataclass class FilterResult: """ Result of filtering files through .prefectignore patterns. Attributes: included_files: List of files that passed filtering. excluded_by_ignore: List of files excluded by .prefectignore patterns. explicitly_excluded: List of warning messages for user-explicit patterns that were excluded by .prefectignore. """ included_files: list[Path] = field(default_factory=list) excluded_by_ignore: list[Path] = field(default_factory=list) explicitly_excluded: list[str] = field(default_factory=list) def find_project_root(start_dir: Path) -> Path | None: """ Find the nearest parent directory containing pyproject.toml. Walks up the directory tree from start_dir looking for a directory containing pyproject.toml, which indicates a Python project root. Args: start_dir: Directory to start searching from. Returns: Path to project root directory, or None if not found. """ # Check start_dir and all parents for parent in [start_dir, *start_dir.parents]: if (parent / "pyproject.toml").exists(): return parent return None def _read_ignore_file(path: Path) -> list[str]: """ Read an ignore file, stripping comments and blank lines. Args: path: Path to the ignore file. Returns: List of pattern strings (no comments, no blanks). """ lines = path.read_text().splitlines() return [line for line in lines if line.strip() and not line.startswith("#")] def load_ignore_patterns(flow_dir: Path) -> list[str]: """ Load patterns from cascading .prefectignore files. Load order (patterns combined via union): 1. Project root .prefectignore (if found via pyproject.toml detection) 2. Flow directory .prefectignore (if different from project root) Missing .prefectignore files emit debug log, not warning. Args: flow_dir: Flow file's directory (base for relative paths). Returns: Combined list of pattern strings from all .prefectignore files. """ patterns: list[str] = [] # 1. Find and load project root .prefectignore project_root = find_project_root(flow_dir) if project_root is not None: project_ignore = project_root / ".prefectignore" if project_ignore.exists(): logger.debug(f"Loading .prefectignore from project root: {project_ignore}") patterns.extend(_read_ignore_file(project_ignore)) else: logger.debug(f"No .prefectignore found at project root: {project_root}") else: logger.debug(f"No project root found for: {flow_dir}") # 2. Load flow directory .prefectignore (if different from project root) flow_ignore = flow_dir / ".prefectignore" if flow_ignore.exists(): # Only load if different from project root's .prefectignore if project_root is None or flow_dir.resolve() != project_root.resolve(): logger.debug(f"Loading .prefectignore from flow directory: {flow_ignore}") patterns.extend(_read_ignore_file(flow_ignore)) else: logger.debug(f"No .prefectignore found in flow directory: {flow_dir}") return patterns class IgnoreFilter: """ Filters collected files through .prefectignore patterns. Loads patterns from cascading .prefectignore files (project root and flow directory) and provides filtering via gitignore-style pattern matching. Usage: filter = IgnoreFilter(Path("/project/flows")) result = filter.filter(collected_files) for file in result.included_files: print(file) The .prefectignore file itself is always auto-excluded from results. """ def __init__(self, flow_dir: Path) -> None: """ Initialize IgnoreFilter with flow directory. Args: flow_dir: Base directory for file collection (typically flow file's parent). """ self.flow_dir = flow_dir.resolve() self._spec: pathspec.GitIgnoreSpec | None = None self._load_patterns() def _load_patterns(self) -> None: """Load patterns and compile into pathspec.""" patterns = load_ignore_patterns(self.flow_dir) if patterns: self._spec = pathspec.GitIgnoreSpec.from_lines(patterns) def filter( self, files: list[Path], explicit_patterns: list[str] | None = None, ) -> FilterResult: """ Filter files through .prefectignore patterns. Args: files: List of file paths to filter. explicit_patterns: Optional list of patterns that the user explicitly included. If a file matches both an explicit pattern AND is excluded by .prefectignore, a warning is added to explicitly_excluded. Returns: FilterResult with included_files, excluded_by_ignore, and explicitly_excluded warnings. """ result = FilterResult() explicit_patterns = explicit_patterns or [] for file in files: # Make path relative to flow_dir for pattern matching try: rel_path = str(file.relative_to(self.flow_dir)) except ValueError: # File not under flow_dir - try with resolved paths try: rel_path = str(file.resolve().relative_to(self.flow_dir)) except ValueError: # Can't make relative - just use name rel_path = file.name # Auto-exclude .prefectignore files (hardcoded behavior) if rel_path.endswith(".prefectignore") or file.name == ".prefectignore": result.excluded_by_ignore.append(file) continue # Check against ignore spec excluded = False if self._spec is not None: if self._spec.match_file(rel_path): excluded = True if excluded: result.excluded_by_ignore.append(file) # Check if this file was explicitly included by user for pattern in explicit_patterns: # Check if the explicit pattern matches this file if self._matches_explicit_pattern(rel_path, pattern): result.explicitly_excluded.append( f"File '{rel_path}' was explicitly included but is " f"excluded by .prefectignore. Edit .prefectignore to include it." ) break else: result.included_files.append(file) return result def _matches_explicit_pattern(self, rel_path: str, pattern: str) -> bool: """ Check if a relative path matches an explicit pattern. Simple matching - checks if the pattern is the same as the relative path or if the path ends with the pattern. Args: rel_path: Relative file path (e.g., "data/input.csv") pattern: User-provided pattern (e.g., "input.csv" or "data/input.csv") Returns: True if the path matches the pattern. """ # Exact match if rel_path == pattern: return True # Pattern is just the filename if rel_path.endswith(f"/{pattern}") or rel_path == pattern: return True # Pattern matches filename if Path(rel_path).name == pattern: return True return False def check_sensitive_files(files: list[Path], base_dir: Path) -> list[str]: """ Check if any files match sensitive patterns and return warnings. Sensitive files are still collected (warning only, not blocked), but users are advised to add them to .prefectignore if they don't want them bundled. Args: files: List of file paths to check. base_dir: Base directory for relative path calculation. Returns: List of warning strings for files matching sensitive patterns. """ spec = pathspec.GitIgnoreSpec.from_lines(SENSITIVE_PATTERNS) warnings: list[str] = [] for file in files: rel_path = str(file.relative_to(base_dir)) if spec.match_file(rel_path): # Find which pattern matched for pattern in SENSITIVE_PATTERNS: single_spec = pathspec.GitIgnoreSpec.from_lines([pattern]) if single_spec.match_file(rel_path): warnings.append( f"{rel_path} matches sensitive pattern {pattern}. " "Consider adding to .prefectignore" ) break return warnings def emit_excluded_warning( excluded: list[Path], base_dir: Path, limit: int = 10 ) -> None: """ Emit a batched warning for files excluded by .prefectignore. The warning lists up to `limit` files, then shows "and N more" for any additional files. Args: excluded: List of excluded file paths. base_dir: Base directory for relative path calculation. limit: Maximum number of file names to show (default 10). """ if not excluded: return count = len(excluded) names = [str(f.relative_to(base_dir)) for f in excluded[:limit]] if count <= limit: logger.warning(f"{count} files excluded by .prefectignore: {', '.join(names)}") else: logger.warning( f"{count} files excluded by .prefectignore: " f"{', '.join(names)}...and {count - limit} more" ) __all__ = [ "FilterResult", "IgnoreFilter", "SENSITIVE_PATTERNS", "check_sensitive_files", "emit_excluded_warning", "find_project_root", "load_ignore_patterns", ]
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_experimental/bundles/_ignore_filter.py", "license": "Apache License 2.0", "lines": 260, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
PrefectHQ/prefect:src/prefect/_experimental/bundles/_path_resolver.py
""" Path resolution and validation utilities for bundles. This module provides functions for validating user-provided paths before resolution and collection. Includes input validation (no filesystem access) and symlink resolution with security checks. """ from __future__ import annotations import errno from dataclasses import dataclass, field from pathlib import Path # Maximum depth for symlink chain traversal. # Provides defense-in-depth alongside OS-level ELOOP protection. MAX_SYMLINK_DEPTH = 10 class PathValidationError(Exception): """ Represents a single path validation error. This exception is raised when a path fails validation. It can also be collected in a PathValidationResult for batch validation. Attributes: input_path: The original user-provided path string. resolved_path: The resolved path, if resolution was attempted. error_type: Type of error (empty, whitespace, null_byte, absolute, duplicate, traversal, symlink_loop, broken_symlink, not_found). message: Human-readable error message. suggestion: Optional suggestion for fixing the error. """ def __init__( self, input_path: str, resolved_path: str | None, error_type: str, message: str, suggestion: str | None = None, ) -> None: self.input_path = input_path self.resolved_path = resolved_path self.error_type = error_type self.message = message self.suggestion = suggestion super().__init__(message) @dataclass class PathValidationResult: """ Result of batch path validation. Collects all validation errors before failing, allowing users to see all problems at once rather than fixing one at a time. Attributes: valid_paths: List of successfully validated paths. errors: List of validation errors encountered. """ valid_paths: list[Path] = field(default_factory=list) errors: list[PathValidationError] = field(default_factory=list) @property def has_errors(self) -> bool: """Return True if any validation errors were collected.""" return len(self.errors) > 0 def raise_if_errors(self) -> None: """ Raise PathResolutionError if any errors were collected. Raises: PathResolutionError: If self.errors is non-empty. """ if self.errors: raise PathResolutionError(self.errors) class PathResolutionError(Exception): """ Exception raised when path validation fails. Contains all collected validation errors, formatted for clear error reporting. Attributes: errors: List of PathValidationError instances. """ def __init__(self, errors: list[PathValidationError]) -> None: self.errors = errors message = self._format_message() super().__init__(message) def _format_message(self) -> str: """Format all errors into a single exception message.""" count = len(self.errors) plural = "" if count == 1 else "s" lines = [f"{count} path validation error{plural}:"] for error in self.errors: lines.append(f" - {error.input_path!r}: {error.message}") return "\n".join(lines) def validate_path_input(path: str) -> None: """ Validate a user-provided path string before resolution. Performs basic input validation without filesystem access: - Rejects empty strings - Rejects whitespace-only strings - Rejects strings containing null bytes - Rejects absolute paths Args: path: User-provided path string to validate. Raises: PathValidationError: If the path fails validation. """ # Check for empty string if not path: raise PathValidationError( input_path=path, resolved_path=None, error_type="empty", message="Path cannot be empty", ) # Check for whitespace-only if path.isspace(): raise PathValidationError( input_path=path, resolved_path=None, error_type="whitespace", message="Path cannot be whitespace only", ) # Check for null bytes if "\x00" in path: raise PathValidationError( input_path=path, resolved_path=None, error_type="null_byte", message="Path cannot contain null bytes", ) # Check for absolute paths # Handle both Unix and Windows absolute paths path_obj = Path(path) # On Unix, Path("C:\\path").is_absolute() returns False # We need to also check Windows-style paths explicitly is_absolute = path_obj.is_absolute() # Also check for Windows drive letters on any platform # A path like "C:\path" or "C:/path" is absolute if not is_absolute and len(path) >= 2: # Check for drive letter pattern (e.g., "C:" or "D:") if path[1] == ":" and path[0].isalpha(): is_absolute = True if is_absolute: raise PathValidationError( input_path=path, resolved_path=None, error_type="absolute", message=f"Absolute paths not allowed: {path!r}", suggestion="Use relative path from flow file directory", ) def check_for_duplicates(paths: list[str]) -> list[str]: """ Check for duplicate paths in a list. Normalizes paths for comparison by: - Converting backslashes to forward slashes - Stripping trailing slashes Does NOT normalize path components (like ../), so paths that would resolve to the same file but have different string representations are NOT detected as duplicates. Args: paths: List of path strings to check. Returns: List of duplicate path strings (second occurrence and beyond). Returns the original path strings, not normalized versions. """ seen: set[str] = set() duplicates: list[str] = [] for path in paths: # Normalize for comparison only normalized = path.replace("\\", "/").rstrip("/") if normalized in seen: duplicates.append(path) else: seen.add(normalized) return duplicates def normalize_path_separator(path: str) -> str: """ Normalize path separators to forward slashes. Converts Windows-style backslashes to POSIX-style forward slashes for cross-platform storage and comparison. Paths are stored in POSIX format for portability. Args: path: Path string to normalize. Returns: Path string with all backslashes converted to forward slashes. """ return path.replace("\\", "/") def resolve_secure_path(user_path: str, base_dir: Path) -> Path: """ Resolve a user-provided path securely relative to base directory. Validates the input path, normalizes path separators, resolves the path, and verifies that the resolved path is within the base directory. This prevents directory traversal attacks (e.g., ../../../etc/passwd). Args: user_path: User-provided path string (must be relative) base_dir: Base directory (e.g., flow file's parent directory) Returns: Resolved, validated Path Raises: PathValidationError: If path fails validation. Error types: - empty: Empty path string - whitespace: Whitespace-only path - null_byte: Path contains null bytes - absolute: Absolute path provided - not_found: Path does not exist - traversal: Path resolves outside base directory - os_error: OS-level error accessing path """ # 1. Run input validation first (rejects empty, whitespace, null bytes, absolute) validate_path_input(user_path) # 2. Normalize separators for cross-platform (\ -> /) normalized = normalize_path_separator(user_path) # 3. Construct target path relative to base directory target = base_dir / normalized # 4. Resolve the base directory (must be resolved for comparison) resolved_base = base_dir.resolve() # 5. Resolve target path WITHOUT existence check first (to detect traversal) # This normalizes .. and . components without requiring the path to exist resolved = target.resolve(strict=False) # 6. Security check BEFORE existence check: resolved path must be within base directory # This ensures traversal attempts are caught even if the target doesn't exist if not resolved.is_relative_to(resolved_base): raise PathValidationError( input_path=user_path, resolved_path=str(resolved), error_type="traversal", message=f"Path traversal detected: {user_path!r} resolves outside base directory", suggestion="Use paths within the flow file's directory", ) # 7. Now check existence if not resolved.exists(): raise PathValidationError( input_path=user_path, resolved_path=str(resolved), error_type="not_found", message=f"Path does not exist: {user_path!r}", suggestion="Check that the file or directory exists", ) return resolved def resolve_with_symlink_check( path: Path, base_dir: Path, max_depth: int = MAX_SYMLINK_DEPTH, ) -> Path: """ Resolve path with explicit symlink chain depth limit. This provides defense-in-depth alongside OS-level ELOOP protection. Symlinks are followed, but the final resolved path must be within base_dir. Args: path: Path to resolve (may contain symlinks) base_dir: Base directory for containment check max_depth: Maximum symlink chain depth (default: 10) Returns: Resolved path (symlinks followed) Raises: PathValidationError: If symlink chain too deep, broken, or escapes base dir """ resolved_base = base_dir.resolve() current = path depth = 0 seen_paths: set[Path] = set() # Manual symlink chain traversal for depth limiting while current.is_symlink(): # Circular reference check if current in seen_paths: raise PathValidationError( input_path=str(path), resolved_path=str(current), error_type="symlink_loop", message="Circular symlink detected", suggestion="Check for circular symlinks in your project", ) seen_paths.add(current) # Depth check if depth >= max_depth: raise PathValidationError( input_path=str(path), resolved_path=str(current), error_type="symlink_loop", message=f"Symlink chain exceeded {max_depth} levels", suggestion="Check for circular symlinks in your project", ) # Read symlink target try: target = current.readlink() except OSError as e: raise PathValidationError( input_path=str(path), resolved_path=str(current), error_type="broken_symlink", message=f"Cannot read symlink: {e}", suggestion="Check that the symlink target exists", ) # Resolve relative symlinks relative to symlink's parent if not target.is_absolute(): target = current.parent / target current = target depth += 1 # Final resolution with existence check try: resolved = current.resolve(strict=True) except FileNotFoundError: raise PathValidationError( input_path=str(path), resolved_path=str(current), error_type="broken_symlink", message="Symlink target does not exist", suggestion="Check that the symlink points to an existing file", ) except OSError as e: if e.errno == errno.ELOOP: raise PathValidationError( input_path=str(path), resolved_path=None, error_type="symlink_loop", message="Circular symlink detected by OS", suggestion="Check for circular symlinks in your project", ) raise # Containment check on final resolved path if not resolved.is_relative_to(resolved_base): raise PathValidationError( input_path=str(path), resolved_path=str(resolved), error_type="traversal", message="Symlink resolves outside base directory", suggestion="Symlinks must point to files within the flow file's directory", ) return resolved class PathResolver: """ Path resolver with caching for efficient repeated resolution. Usage: resolver = PathResolver(base_dir=Path("/project/flows")) result = resolver.resolve_all(["config.yaml", "data/input.csv"]) if result.has_errors: # Handle errors else: # Use result.valid_paths """ def __init__(self, base_dir: Path): """ Initialize resolver with base directory. Args: base_dir: Base directory for path resolution (typically flow file's parent) """ self.base_dir = base_dir.resolve() self._cache: dict[str, Path] = {} self._error_cache: dict[str, PathValidationError] = {} def resolve(self, user_path: str) -> Path: """ Resolve a single path with caching. Args: user_path: User-provided relative path Returns: Resolved Path Raises: PathValidationError: If validation fails """ # Check error cache first if user_path in self._error_cache: raise self._error_cache[user_path] # Check success cache if user_path in self._cache: return self._cache[user_path] # Perform resolution try: resolved = self._do_resolve(user_path) self._cache[user_path] = resolved return resolved except PathValidationError as e: self._error_cache[user_path] = e raise def _do_resolve(self, user_path: str) -> Path: """Internal resolution logic.""" # 1. Input validation validate_path_input(user_path) # 2. Normalize separators normalized = normalize_path_separator(user_path) # 3. Construct and resolve path target = self.base_dir / normalized # 4. Check if path involves symlinks and use appropriate resolver if target.is_symlink() or any( p.is_symlink() for p in target.parents if p != self.base_dir ): return resolve_with_symlink_check(target, self.base_dir) else: return resolve_secure_path(user_path, self.base_dir) def resolve_all(self, paths: list[str]) -> PathValidationResult: """ Resolve multiple paths, collecting all errors. Per user requirements: Collect all errors before failing. Does NOT stop on first error. Args: paths: List of user-provided paths Returns: PathValidationResult with valid_paths and errors """ result = PathValidationResult() # Check for duplicates first duplicates = check_for_duplicates(paths) for dup in duplicates: result.errors.append( PathValidationError( input_path=dup, resolved_path=None, error_type="duplicate", message=f"Duplicate path in input: {dup!r}", suggestion="Remove duplicate paths from include_files", ) ) # Resolve each unique path seen: set[str] = set() for path in paths: normalized = normalize_path_separator(path) if normalized in seen: continue # Skip duplicates (already reported) seen.add(normalized) try: resolved = self.resolve(path) result.valid_paths.append(resolved) except PathValidationError as e: result.errors.append(e) return result def clear_cache(self) -> None: """Clear resolution caches.""" self._cache.clear() self._error_cache.clear() def resolve_paths( paths: list[str], base_dir: Path, raise_on_errors: bool = True, ) -> PathValidationResult: """ Resolve multiple paths relative to a base directory. This is the main entry point for path resolution. It: 1. Validates all paths for security issues 2. Checks for duplicates 3. Resolves paths relative to base_dir 4. Collects ALL errors before optionally raising Args: paths: List of user-provided paths (files, directories, patterns for Phase 2) base_dir: Base directory for resolution (flow file's parent directory) raise_on_errors: If True, raise PathResolutionError when any validation fails. If False, return result with errors for inspection. Returns: PathValidationResult containing valid_paths and errors Raises: PathResolutionError: If raise_on_errors=True and any validation fails Example: # With raising (default) result = resolve_paths(["config.yaml", "data/"], flow_dir) # Without raising (for inspection) result = resolve_paths(["config.yaml", "missing.txt"], flow_dir, raise_on_errors=False) if result.has_errors: for error in result.errors: print(f"Warning: {error.message}") """ resolver = PathResolver(base_dir) result = resolver.resolve_all(paths) if raise_on_errors and result.has_errors: result.raise_if_errors() return result __all__ = [ "PathValidationError", "PathValidationResult", "PathResolutionError", "PathResolver", "resolve_paths", "resolve_secure_path", "resolve_with_symlink_check", "validate_path_input", "check_for_duplicates", "normalize_path_separator", "MAX_SYMLINK_DEPTH", ]
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_experimental/bundles/_path_resolver.py", "license": "Apache License 2.0", "lines": 467, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:src/prefect/_experimental/bundles/_zip_builder.py
""" Zip archive builder for file bundles. This module provides the ZipBuilder class for packaging collected files into a sidecar zip archive with content-addressed storage key derivation using SHA256 hashes. """ from __future__ import annotations import hashlib import logging import shutil import tempfile import zipfile from dataclasses import dataclass from pathlib import Path logger = logging.getLogger(__name__) # Size of chunks for reading files when computing hash (64KB) HASH_CHUNK_SIZE = 65536 # Warning threshold for zip file size (50MB) ZIP_SIZE_WARNING_THRESHOLD = 50 * 1024 * 1024 @dataclass class ZipResult: """ Result of building a zip archive. Attributes: zip_path: Path to the temporary zip file. sha256_hash: Lowercase hex digest of the zip content. storage_key: Content-addressed storage key in format "files/{hash}.zip". size_bytes: Size of the zip file in bytes. """ zip_path: Path sha256_hash: str storage_key: str size_bytes: int class ZipBuilder: """ Builds zip archives from collected files with content-addressed naming. Files are added to the zip with their relative paths preserved, using forward slashes regardless of platform. The resulting zip uses SHA256 content hashing for deduplication across deployments. Usage: builder = ZipBuilder(Path("/project")) result = builder.build([ Path("/project/data/input.csv"), Path("/project/config.yaml"), ]) # Upload result.zip_path using result.storage_key builder.cleanup() The caller is responsible for calling cleanup() when done with the zip file. """ def __init__(self, base_dir: Path) -> None: """ Initialize ZipBuilder with a base directory. Args: base_dir: Base directory for computing relative paths. All files must be within this directory. """ self.base_dir = base_dir.resolve() self._temp_dir: str | None = None def build(self, files: list[Path]) -> ZipResult: """ Build a zip archive from the given files. Files are sorted by relative path before adding to ensure deterministic hash computation regardless of input order. Args: files: List of absolute file paths to include in the zip. All files must be within base_dir. Returns: ZipResult containing path to zip, hash, storage key, and size. """ # Sort files by relative path for deterministic hash sorted_files = sorted(files, key=lambda f: str(f.relative_to(self.base_dir))) # Create temp directory for zip file self._temp_dir = tempfile.mkdtemp(prefix="prefect-zip-") zip_path = Path(self._temp_dir) / "files.zip" # Build the zip with DEFLATED compression with zipfile.ZipFile(zip_path, "w", compression=zipfile.ZIP_DEFLATED) as zf: for file_path in sorted_files: # Compute relative path with forward slashes rel_path = file_path.relative_to(self.base_dir) arcname = str(rel_path).replace("\\", "/") zf.write(file_path, arcname) # Compute SHA256 hash using chunked reading sha256_hash = self._compute_hash(zip_path) # Get file size size_bytes = zip_path.stat().st_size # Emit warning if zip exceeds threshold if size_bytes >= ZIP_SIZE_WARNING_THRESHOLD: self._emit_size_warning(zip_path, sorted_files, size_bytes) # Build storage key storage_key = f"files/{sha256_hash}.zip" return ZipResult( zip_path=zip_path, sha256_hash=sha256_hash, storage_key=storage_key, size_bytes=size_bytes, ) def _compute_hash(self, zip_path: Path) -> str: """ Compute SHA256 hash of a file using chunked reading. Args: zip_path: Path to the file to hash. Returns: Lowercase hex digest of the SHA256 hash. """ hasher = hashlib.sha256() with open(zip_path, "rb") as f: while True: chunk = f.read(HASH_CHUNK_SIZE) if not chunk: break hasher.update(chunk) return hasher.hexdigest() def _emit_size_warning( self, zip_path: Path, files: list[Path], size_bytes: int ) -> None: """ Emit a warning about large zip file size. Args: zip_path: Path to the zip file. files: List of files included in the zip. size_bytes: Size of the zip file in bytes. """ size_mb = size_bytes / (1024 * 1024) # Get file sizes and sort by size descending file_sizes = [(f, f.stat().st_size) for f in files] file_sizes.sort(key=lambda x: x[1], reverse=True) # Format largest files (up to 5) largest = file_sizes[:5] file_info = ", ".join( f"{f.name} ({s / (1024 * 1024):.1f} MB)" for f, s in largest ) logger.warning( f"Zip file is large: {size_mb:.1f} MB exceeds " f"{ZIP_SIZE_WARNING_THRESHOLD / (1024 * 1024):.0f} MB threshold. " f"Largest files: {file_info}" ) def cleanup(self) -> None: """ Remove the temporary directory and zip file. Safe to call multiple times or before build(). """ if self._temp_dir is not None: shutil.rmtree(self._temp_dir, ignore_errors=True) self._temp_dir = None __all__ = [ "HASH_CHUNK_SIZE", "ZIP_SIZE_WARNING_THRESHOLD", "ZipBuilder", "ZipResult", ]
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_experimental/bundles/_zip_builder.py", "license": "Apache License 2.0", "lines": 150, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
PrefectHQ/prefect:src/prefect/_experimental/bundles/_zip_extractor.py
""" Zip archive extractor for file bundles. This module provides the ZipExtractor class for extracting files from a sidecar zip archive to the working directory before flow execution. """ from __future__ import annotations __all__ = [ "ZipExtractor", ] import logging import zipfile from pathlib import Path logger = logging.getLogger(__name__) class ZipExtractor: """ Extracts files from a zip archive to the working directory. Handles: - Relative path preservation - File overwrite with warning - File/directory type mismatch errors - Parent directory creation Usage: extractor = ZipExtractor(Path("/path/to/files.zip")) extractor.extract() # Extracts to cwd extractor.cleanup() # Deletes the zip file """ def __init__(self, zip_path: Path | str) -> None: """ Initialize extractor with path to zip file. Args: zip_path: Path to the zip file to extract. """ self.zip_path = Path(zip_path) self._extracted = False def _validate_members(self, members: list[str], target_dir: Path) -> None: """Reject zip members with absolute paths or '..' traversal.""" resolved_target = target_dir.resolve() for member in members: member_path = Path(member) if member_path.is_absolute(): raise ValueError(f"Zip member has absolute path: {member!r}") if ".." in member_path.parts: raise ValueError(f"Zip member contains path traversal: {member!r}") resolved = (target_dir / member).resolve() if not resolved.is_relative_to(resolved_target): raise ValueError( f"Zip member resolves outside target directory: {member!r}" ) def _check_type_mismatch(self, member: str, target_dir: Path) -> None: """Check if extraction would cause file/dir type mismatch.""" dest_path = target_dir / member if not dest_path.exists(): return is_member_dir = member.endswith("/") is_dest_dir = dest_path.is_dir() if is_dest_dir and not is_member_dir: raise RuntimeError( f"Cannot extract file '{member}': destination exists as directory" ) if not is_dest_dir and is_member_dir: raise RuntimeError( f"Cannot extract directory '{member}': destination exists as file" ) def extract(self, target_dir: Path | None = None) -> list[Path]: """ Extract all files to target directory. Args: target_dir: Directory to extract to (defaults to cwd). Returns: List of extracted file paths. Raises: ValueError: If zip contains path traversal or absolute paths. RuntimeError: If file/directory type mismatch. zipfile.BadZipFile: If zip is corrupted. FileNotFoundError: If zip_path doesn't exist. """ target_dir = target_dir or Path.cwd() extracted_paths: list[Path] = [] with zipfile.ZipFile(self.zip_path, "r") as zf: members = zf.namelist() self._validate_members(members, target_dir) # Pre-check for type mismatches for member in members: self._check_type_mismatch(member, target_dir) # Log overwrites before extraction for member in members: dest_path = target_dir / member if dest_path.exists() and dest_path.is_file(): logger.warning(f"Overwriting existing file: {member}") # Extract all zf.extractall(target_dir) # Build extracted paths list for member in members: if not member.endswith("/"): # Skip directories extracted_paths.append(target_dir / member) self._extracted = True return extracted_paths def cleanup(self) -> None: """ Delete the zip file after successful extraction. Only deletes if extraction was successful. Logs warning if deletion fails. """ if not self._extracted: logger.warning( f"Skipping zip cleanup - extraction not completed: {self.zip_path}" ) return try: self.zip_path.unlink(missing_ok=True) logger.debug(f"Deleted zip file: {self.zip_path}") except OSError as e: logger.warning(f"Failed to delete zip file {self.zip_path}: {e}")
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_experimental/bundles/_zip_extractor.py", "license": "Apache License 2.0", "lines": 113, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:tests/_experimental/bundles/test_bundles.py
"""Tests for SerializedBundle TypedDict with files_key field.""" from pathlib import Path from unittest.mock import MagicMock, patch import pytest class TestSerializedBundleFilesKey: """Tests for SerializedBundle files_key field.""" def test_serialized_bundle_accepts_files_key_none(self): """SerializedBundle should accept files_key=None for bundles without files.""" from prefect._experimental.bundles import SerializedBundle bundle: SerializedBundle = { "function": "serialized_function_data", "context": "serialized_context_data", "flow_run": {"id": "test-flow-run-id"}, "dependencies": "prefect>=3.0.0", "files_key": None, } assert bundle["files_key"] is None assert bundle["function"] == "serialized_function_data" def test_serialized_bundle_accepts_files_key_string(self): """SerializedBundle should accept files_key with a storage key path.""" from prefect._experimental.bundles import SerializedBundle bundle: SerializedBundle = { "function": "serialized_function_data", "context": "serialized_context_data", "flow_run": {"id": "test-flow-run-id"}, "dependencies": "prefect>=3.0.0", "files_key": "files/a1b2c3d4e5f6.zip", } assert bundle["files_key"] == "files/a1b2c3d4e5f6.zip" def test_serialized_bundle_without_files_key_is_valid(self): """Existing bundles without files_key field should remain valid (backward compat).""" from prefect._experimental.bundles import SerializedBundle # This should be valid - no files_key field at all bundle: SerializedBundle = { "function": "serialized_function_data", "context": "serialized_context_data", "flow_run": {"id": "test-flow-run-id"}, "dependencies": "prefect>=3.0.0", } assert bundle["function"] == "serialized_function_data" # files_key is not present assert "files_key" not in bundle def test_serialized_bundle_files_key_full_storage_path(self): """files_key should store full storage key path like 'files/abc123.zip'.""" from prefect._experimental.bundles import SerializedBundle # Full SHA256-based storage key full_key = ( "files/a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2.zip" ) bundle: SerializedBundle = { "function": "serialized_function_data", "context": "serialized_context_data", "flow_run": {"id": "test-flow-run-id"}, "dependencies": "prefect>=3.0.0", "files_key": full_key, } assert bundle["files_key"] == full_key assert bundle["files_key"].startswith("files/") assert bundle["files_key"].endswith(".zip") class TestCreateBundleForFlowRunFilesKey: """Tests for create_bundle_for_flow_run with files_key field.""" def test_create_bundle_returns_bundle_with_files_key(self, monkeypatch): """create_bundle_for_flow_run should return BundleCreationResult with bundle containing files_key.""" import prefect._experimental.bundles as bundles_module from prefect._experimental.bundles import create_bundle_for_flow_run from prefect.flows import flow # Mock subprocess to avoid actual uv pip freeze monkeypatch.setattr( bundles_module.subprocess, "check_output", lambda *args, **kwargs: b"prefect>=3.0.0\n", ) @flow def my_flow(): return "hello" # Create a mock flow run mock_flow_run = MagicMock() mock_flow_run.model_dump.return_value = {"id": "test-id"} result = create_bundle_for_flow_run(my_flow, mock_flow_run) # Result should have bundle and zip_path keys assert "bundle" in result assert "zip_path" in result # Bundle should have files_key field assert "files_key" in result["bundle"] # Default should be None (no files included yet) assert result["bundle"]["files_key"] is None assert result["zip_path"] is None def test_create_bundle_files_key_defaults_to_none(self, monkeypatch): """create_bundle_for_flow_run should default files_key to None.""" import prefect._experimental.bundles as bundles_module from prefect._experimental.bundles import create_bundle_for_flow_run from prefect.flows import flow monkeypatch.setattr( bundles_module.subprocess, "check_output", lambda *args, **kwargs: b"", ) @flow def simple_flow(): pass mock_flow_run = MagicMock() mock_flow_run.model_dump.return_value = {"id": "run-123"} result = create_bundle_for_flow_run(simple_flow, mock_flow_run) assert result["bundle"].get("files_key") is None assert result["zip_path"] is None class TestCreateBundleForFlowRunIncludeFiles: """Tests for include_files integration in create_bundle_for_flow_run.""" @pytest.fixture def project_with_files(self, tmp_path: Path) -> Path: """Create a project directory with files and a flow.""" # Create files to include (tmp_path / "config.yaml").write_text("key: value") data_dir = tmp_path / "data" data_dir.mkdir() (data_dir / "input.csv").write_text("a,b\n1,2") # Create flow file flow_file = tmp_path / "my_flow.py" flow_file.write_text( """ from prefect import flow @flow def my_flow(): pass """ ) return tmp_path def test_files_key_populated_when_include_files_set( self, project_with_files: Path, monkeypatch ) -> None: """files_key is populated when flow has include_files.""" import prefect._experimental.bundles as bundles_module from prefect._experimental.bundles import create_bundle_for_flow_run from prefect.flows import Flow # Mock subprocess to avoid actual uv pip freeze monkeypatch.setattr( bundles_module.subprocess, "check_output", lambda *args, **kwargs: b"prefect>=3.0.0\n", ) # Create a flow with include_files @Flow def test_flow(): pass # Set include_files attribute (as @ecs decorator would) test_flow.include_files = ["config.yaml", "data/"] # Mock inspect.getfile to return our flow file path flow_file = project_with_files / "my_flow.py" with patch( "prefect._experimental.bundles.inspect.getfile", return_value=str(flow_file) ): flow_run = MagicMock() flow_run.model_dump.return_value = {"id": "test-123"} result = create_bundle_for_flow_run( flow=test_flow, flow_run=flow_run, ) # Verify files_key is populated assert result["bundle"]["files_key"] is not None assert result["bundle"]["files_key"].startswith("files/") assert result["bundle"]["files_key"].endswith(".zip") # Verify zip_path is returned assert result["zip_path"] is not None assert result["zip_path"].exists() # Cleanup if result["zip_path"]: result["zip_path"].unlink(missing_ok=True) result["zip_path"].parent.rmdir() def test_files_key_none_when_no_include_files(self, monkeypatch) -> None: """files_key is None when flow has no include_files.""" import prefect._experimental.bundles as bundles_module from prefect._experimental.bundles import create_bundle_for_flow_run from prefect.flows import Flow monkeypatch.setattr( bundles_module.subprocess, "check_output", lambda *args, **kwargs: b"", ) @Flow def test_flow(): pass # No include_files attribute flow_run = MagicMock() flow_run.model_dump.return_value = {"id": "test-123"} result = create_bundle_for_flow_run( flow=test_flow, flow_run=flow_run, ) assert result["bundle"]["files_key"] is None assert result["zip_path"] is None def test_files_key_none_when_include_files_empty(self, monkeypatch) -> None: """files_key is None when include_files is empty list.""" import prefect._experimental.bundles as bundles_module from prefect._experimental.bundles import create_bundle_for_flow_run from prefect.flows import Flow monkeypatch.setattr( bundles_module.subprocess, "check_output", lambda *args, **kwargs: b"", ) @Flow def test_flow(): pass test_flow.include_files = [] flow_run = MagicMock() flow_run.model_dump.return_value = {"id": "test-123"} result = create_bundle_for_flow_run( flow=test_flow, flow_run=flow_run, ) assert result["bundle"]["files_key"] is None assert result["zip_path"] is None
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_experimental/bundles/test_bundles.py", "license": "Apache License 2.0", "lines": 211, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_experimental/bundles/test_file_collector.py
""" Tests for FileCollector single file collection. This module tests the FileCollector class's ability to collect single files by pattern. Tests cover: - Collecting existing single files - Handling non-existent files (warning, no error) - Directory traversal protection - CollectionResult dataclass behavior """ from __future__ import annotations import platform import pytest from prefect._experimental.bundles._file_collector import ( CollectionResult, FileCollector, ) from prefect._experimental.bundles._path_resolver import PathValidationError class TestCollectionResult: """Tests for CollectionResult dataclass.""" def test_create_empty_result(self): """Test creating an empty CollectionResult.""" result = CollectionResult() assert result.files == [] assert result.warnings == [] assert result.total_size == 0 assert result.patterns_matched == {} def test_create_with_files(self, tmp_path): """Test creating a CollectionResult with files.""" file1 = tmp_path / "a.txt" file2 = tmp_path / "b.txt" file1.touch() file2.touch() result = CollectionResult( files=[file1, file2], warnings=[], total_size=0, patterns_matched={"a.txt": [file1], "b.txt": [file2]}, ) assert len(result.files) == 2 assert file1 in result.files assert file2 in result.files def test_create_with_warnings(self): """Test creating a CollectionResult with warnings.""" result = CollectionResult( files=[], warnings=["Pattern 'missing.txt' matched no files"], total_size=0, patterns_matched={"missing.txt": []}, ) assert len(result.warnings) == 1 assert "missing.txt" in result.warnings[0] def test_total_size_tracking(self, tmp_path): """Test that total_size is tracked correctly.""" file = tmp_path / "data.txt" file.write_text("12345") # 5 bytes result = CollectionResult( files=[file], warnings=[], total_size=5, patterns_matched={"data.txt": [file]}, ) assert result.total_size == 5 def test_patterns_matched_tracking(self, tmp_path): """Test patterns_matched tracks which files each pattern matched.""" file = tmp_path / "config.yaml" file.touch() result = CollectionResult( files=[file], warnings=[], total_size=0, patterns_matched={"config.yaml": [file]}, ) assert "config.yaml" in result.patterns_matched assert result.patterns_matched["config.yaml"] == [file] class TestFileCollector: """Tests for FileCollector class.""" def test_init_with_base_dir(self, tmp_path): """Test FileCollector initializes with base directory.""" collector = FileCollector(tmp_path) assert collector.base_dir == tmp_path.resolve() def test_collect_single_existing_file(self, tmp_path): """Test collecting a single existing file.""" # Setup: create a config file config = tmp_path / "config.yaml" config.write_text("key: value") # Collect the file collector = FileCollector(tmp_path) result = collector.collect(["config.yaml"]) # Verify assert len(result.files) == 1 assert result.files[0] == config.resolve() assert result.warnings == [] assert "config.yaml" in result.patterns_matched assert result.patterns_matched["config.yaml"] == [config.resolve()] def test_collect_single_file_tracks_size(self, tmp_path): """Test that file size is tracked in total_size.""" # Setup: create file with known content data_file = tmp_path / "data.txt" data_file.write_text("hello world") # 11 bytes collector = FileCollector(tmp_path) result = collector.collect(["data.txt"]) assert result.total_size == 11 def test_collect_missing_file_adds_warning(self, tmp_path): """Test that missing file adds warning instead of raising.""" collector = FileCollector(tmp_path) result = collector.collect(["nonexistent.txt"]) # Should have warning, not raise assert len(result.files) == 0 assert len(result.warnings) == 1 assert "nonexistent.txt" in result.warnings[0] assert "matched no files" in result.warnings[0].lower() # Pattern should be tracked with empty match list assert result.patterns_matched["nonexistent.txt"] == [] def test_collect_multiple_files(self, tmp_path): """Test collecting multiple single files.""" # Setup: create multiple files file1 = tmp_path / "a.txt" file2 = tmp_path / "b.txt" file1.write_text("aaa") file2.write_text("bbbbb") collector = FileCollector(tmp_path) result = collector.collect(["a.txt", "b.txt"]) assert len(result.files) == 2 assert file1.resolve() in result.files assert file2.resolve() in result.files assert result.total_size == 8 # 3 + 5 def test_collect_mix_of_existing_and_missing(self, tmp_path): """Test collecting mix of existing and missing files.""" # Setup: only create one file existing = tmp_path / "exists.txt" existing.write_text("content") collector = FileCollector(tmp_path) result = collector.collect(["exists.txt", "missing.txt"]) # Should have one file and one warning assert len(result.files) == 1 assert existing.resolve() in result.files assert len(result.warnings) == 1 assert "missing.txt" in result.warnings[0] def test_collect_file_in_subdirectory(self, tmp_path): """Test collecting a file in a subdirectory.""" # Setup: create nested file data_dir = tmp_path / "data" data_dir.mkdir() nested_file = data_dir / "input.csv" nested_file.write_text("col1,col2") collector = FileCollector(tmp_path) result = collector.collect(["data/input.csv"]) assert len(result.files) == 1 assert result.files[0] == nested_file.resolve() def test_collect_traversal_raises_error(self, tmp_path): """Test that directory traversal raises PathValidationError.""" collector = FileCollector(tmp_path) with pytest.raises(PathValidationError) as exc_info: collector.collect(["../escape.txt"]) assert exc_info.value.error_type == "traversal" def test_collect_multiple_traversals_raises_on_first(self, tmp_path): """Test that traversal is caught even with valid files.""" # Setup: create a valid file valid = tmp_path / "valid.txt" valid.touch() collector = FileCollector(tmp_path) # Traversal pattern first should raise immediately with pytest.raises(PathValidationError) as exc_info: collector.collect(["../escape.txt", "valid.txt"]) assert exc_info.value.error_type == "traversal" def test_collect_absolute_path_raises_error(self, tmp_path): """Test that absolute paths raise PathValidationError.""" collector = FileCollector(tmp_path) with pytest.raises(PathValidationError) as exc_info: collector.collect(["/etc/passwd"]) assert exc_info.value.error_type == "absolute" def test_collect_empty_pattern_raises_error(self, tmp_path): """Test that empty pattern raises PathValidationError.""" collector = FileCollector(tmp_path) with pytest.raises(PathValidationError) as exc_info: collector.collect([""]) assert exc_info.value.error_type == "empty" def test_collect_dotfile(self, tmp_path): """Test collecting a dotfile.""" dotfile = tmp_path / ".gitignore" dotfile.write_text("*.pyc") collector = FileCollector(tmp_path) result = collector.collect([".gitignore"]) assert len(result.files) == 1 assert result.files[0] == dotfile.resolve() def test_collect_file_with_backslash_path(self, tmp_path): """Test collecting file with Windows-style backslash path.""" # Setup: create nested file data_dir = tmp_path / "data" data_dir.mkdir() nested_file = data_dir / "file.txt" nested_file.write_text("content") collector = FileCollector(tmp_path) result = collector.collect(["data\\file.txt"]) assert len(result.files) == 1 assert result.files[0] == nested_file.resolve() def test_collect_empty_list(self, tmp_path): """Test collecting with empty pattern list.""" collector = FileCollector(tmp_path) result = collector.collect([]) assert result.files == [] assert result.warnings == [] assert result.total_size == 0 assert result.patterns_matched == {} # Skip symlink tests on Windows since symlink creation requires admin privileges symlink_skip = pytest.mark.skipif( platform.system() == "Windows", reason="Symlink tests require admin privileges on Windows", ) @symlink_skip class TestFileCollectorSymlinks: """Tests for FileCollector symlink handling.""" def test_collect_symlink_within_base_dir(self, tmp_path): """Test collecting a symlink that points within base dir.""" # Setup: create target and symlink target = tmp_path / "actual.txt" target.write_text("content") link = tmp_path / "link.txt" link.symlink_to(target) collector = FileCollector(tmp_path) result = collector.collect(["link.txt"]) # Should resolve to the target file assert len(result.files) == 1 assert result.files[0] == target.resolve() def test_collect_symlink_escaping_base_dir_raises(self, tmp_path): """Test that symlink escaping base dir raises PathValidationError.""" # Setup: create target outside base and symlink inside base_dir = tmp_path / "project" base_dir.mkdir() outside = tmp_path / "outside.txt" outside.write_text("secret") link = base_dir / "sneaky.txt" link.symlink_to(outside) collector = FileCollector(base_dir) with pytest.raises(PathValidationError) as exc_info: collector.collect(["sneaky.txt"]) assert exc_info.value.error_type == "traversal" def test_collect_broken_symlink_adds_warning(self, tmp_path): """Test that broken symlink adds warning instead of raising.""" # Setup: create a symlink to nonexistent target link = tmp_path / "broken.txt" link.symlink_to(tmp_path / "nonexistent.txt") collector = FileCollector(tmp_path) result = collector.collect(["broken.txt"]) # Should be treated as missing file assert len(result.files) == 0 assert len(result.warnings) == 1 assert "broken.txt" in result.warnings[0] class TestFileCollectorDirectory: """Tests for FileCollector directory collection.""" def test_collect_directory_gets_all_files(self, tmp_path): """Test collecting directory gets all files recursively.""" # Setup: create directory with files data_dir = tmp_path / "data" data_dir.mkdir() (data_dir / "a.txt").write_text("aaa") (data_dir / "b.txt").write_text("bbb") subdir = data_dir / "sub" subdir.mkdir() (subdir / "c.txt").write_text("ccc") collector = FileCollector(tmp_path) result = collector.collect(["data/"]) # Should collect all 3 files assert len(result.files) == 3 assert result.total_size == 9 # 3 + 3 + 3 def test_collect_directory_without_trailing_slash(self, tmp_path): """Test that directory pattern works without trailing slash.""" data_dir = tmp_path / "data" data_dir.mkdir() (data_dir / "file.txt").write_text("content") collector = FileCollector(tmp_path) result = collector.collect(["data"]) assert len(result.files) == 1 def test_collect_directory_excludes_hidden_files(self, tmp_path): """Test that hidden files (dotfiles) are excluded by default.""" data_dir = tmp_path / "data" data_dir.mkdir() (data_dir / "visible.txt").write_text("visible") (data_dir / ".hidden").write_text("hidden") collector = FileCollector(tmp_path) result = collector.collect(["data/"]) # Only visible file should be collected assert len(result.files) == 1 assert result.files[0].name == "visible.txt" def test_collect_directory_excludes_hidden_directories(self, tmp_path): """Test that files in hidden directories are excluded.""" data_dir = tmp_path / "data" data_dir.mkdir() (data_dir / "visible.txt").write_text("visible") hidden_dir = data_dir / ".hidden" hidden_dir.mkdir() (hidden_dir / "secret.txt").write_text("secret") collector = FileCollector(tmp_path) result = collector.collect(["data/"]) # Only visible file should be collected assert len(result.files) == 1 assert result.files[0].name == "visible.txt" def test_collect_directory_excludes_pycache(self, tmp_path): """Test that __pycache__ directories are excluded.""" data_dir = tmp_path / "data" data_dir.mkdir() (data_dir / "module.py").write_text("# code") pycache = data_dir / "__pycache__" pycache.mkdir() (pycache / "module.cpython-311.pyc").write_bytes(b"\x00\x00") collector = FileCollector(tmp_path) result = collector.collect(["data/"]) # Only module.py should be collected assert len(result.files) == 1 assert result.files[0].name == "module.py" def test_collect_directory_excludes_pyc_files(self, tmp_path): """Test that .pyc files are excluded even outside __pycache__.""" data_dir = tmp_path / "data" data_dir.mkdir() (data_dir / "module.py").write_text("# code") (data_dir / "old.pyc").write_bytes(b"\x00\x00") collector = FileCollector(tmp_path) result = collector.collect(["data/"]) # Only module.py should be collected assert len(result.files) == 1 assert result.files[0].name == "module.py" def test_collect_directory_excludes_node_modules(self, tmp_path): """Test that node_modules directories are excluded.""" project_dir = tmp_path / "project" project_dir.mkdir() (project_dir / "index.js").write_text("// code") node_modules = project_dir / "node_modules" node_modules.mkdir() dep = node_modules / "some-dep" dep.mkdir() (dep / "index.js").write_text("// dep") collector = FileCollector(tmp_path) result = collector.collect(["project/"]) # Only project index.js should be collected assert len(result.files) == 1 assert result.files[0].name == "index.js" def test_collect_directory_excludes_venv(self, tmp_path): """Test that venv and .venv directories are excluded.""" project_dir = tmp_path / "project" project_dir.mkdir() (project_dir / "main.py").write_text("# code") venv = project_dir / "venv" venv.mkdir() (venv / "pyvenv.cfg").write_text("home = /usr") dotvenv = project_dir / ".venv" dotvenv.mkdir() (dotvenv / "pyvenv.cfg").write_text("home = /usr") collector = FileCollector(tmp_path) result = collector.collect(["project/"]) # Only main.py should be collected assert len(result.files) == 1 assert result.files[0].name == "main.py" def test_collect_directory_excludes_ide_directories(self, tmp_path): """Test that .idea and .vscode directories are excluded.""" project_dir = tmp_path / "project" project_dir.mkdir() (project_dir / "main.py").write_text("# code") idea = project_dir / ".idea" idea.mkdir() (idea / "workspace.xml").write_text("<xml>") vscode = project_dir / ".vscode" vscode.mkdir() (vscode / "settings.json").write_text("{}") collector = FileCollector(tmp_path) result = collector.collect(["project/"]) # Only main.py should be collected (IDE dirs are hidden anyway) assert len(result.files) == 1 assert result.files[0].name == "main.py" def test_collect_empty_directory_adds_warning(self, tmp_path): """Test that empty directory produces warning, not error.""" empty_dir = tmp_path / "empty" empty_dir.mkdir() collector = FileCollector(tmp_path) result = collector.collect(["empty/"]) assert len(result.files) == 0 assert len(result.warnings) == 1 assert "empty" in result.warnings[0].lower() def test_collect_directory_with_only_hidden_files_adds_warning(self, tmp_path): """Test directory with only hidden files produces warning.""" data_dir = tmp_path / "data" data_dir.mkdir() (data_dir / ".hidden1").write_text("hidden") (data_dir / ".hidden2").write_text("hidden") collector = FileCollector(tmp_path) result = collector.collect(["data/"]) # All files excluded, should warn assert len(result.files) == 0 assert len(result.warnings) == 1 def test_collect_directory_tracks_pattern(self, tmp_path): """Test that directory pattern is tracked in patterns_matched.""" data_dir = tmp_path / "data" data_dir.mkdir() (data_dir / "a.txt").write_text("a") (data_dir / "b.txt").write_text("b") collector = FileCollector(tmp_path) result = collector.collect(["data/"]) assert "data/" in result.patterns_matched assert len(result.patterns_matched["data/"]) == 2 def test_collect_directory_excludes_egg_info(self, tmp_path): """Test that *.egg-info directories are excluded.""" project_dir = tmp_path / "project" project_dir.mkdir() (project_dir / "setup.py").write_text("# setup") egg_info = project_dir / "mypackage.egg-info" egg_info.mkdir() (egg_info / "PKG-INFO").write_text("Name: mypackage") collector = FileCollector(tmp_path) result = collector.collect(["project/"]) # Only setup.py should be collected assert len(result.files) == 1 assert result.files[0].name == "setup.py" def test_collect_directory_excludes_git_directory(self, tmp_path): """Test that .git directories are excluded.""" project_dir = tmp_path / "project" project_dir.mkdir() (project_dir / "main.py").write_text("# code") git_dir = project_dir / ".git" git_dir.mkdir() (git_dir / "config").write_text("[core]") collector = FileCollector(tmp_path) result = collector.collect(["project/"]) # Only main.py should be collected (.git is hidden anyway) assert len(result.files) == 1 assert result.files[0].name == "main.py" def test_collect_directory_nested_hidden_component(self, tmp_path): """Test that files under any hidden path component are excluded.""" data_dir = tmp_path / "data" data_dir.mkdir() visible = data_dir / "visible" visible.mkdir() (visible / "file.txt").write_text("ok") hidden = data_dir / ".hidden" hidden.mkdir() nested = hidden / "nested" nested.mkdir() (nested / "secret.txt").write_text("secret") collector = FileCollector(tmp_path) result = collector.collect(["data/"]) # Only data/visible/file.txt should be collected assert len(result.files) == 1 assert "visible" in str(result.files[0]) def test_collect_directory_excludes_ds_store(self, tmp_path): """Test that .DS_Store files are excluded.""" data_dir = tmp_path / "data" data_dir.mkdir() (data_dir / "file.txt").write_text("content") (data_dir / ".DS_Store").write_bytes(b"\x00\x00\x00\x01") collector = FileCollector(tmp_path) result = collector.collect(["data/"]) # Only file.txt should be collected assert len(result.files) == 1 assert result.files[0].name == "file.txt" def test_collect_directory_deduplicates_files(self, tmp_path): """Test that same file from overlapping patterns is deduplicated.""" data_dir = tmp_path / "data" data_dir.mkdir() file = data_dir / "file.txt" file.write_text("content") collector = FileCollector(tmp_path) # Collect the same file via directory and direct path result = collector.collect(["data/", "data/file.txt"]) # File should only appear once assert len(result.files) == 1 def test_collect_nonexistent_directory_adds_warning(self, tmp_path): """Test that non-existent directory treated as missing pattern.""" collector = FileCollector(tmp_path) result = collector.collect(["nonexistent/"]) assert len(result.files) == 0 assert len(result.warnings) == 1 assert "nonexistent" in result.warnings[0] class TestFileCollectorGlob: """Tests for FileCollector glob pattern matching.""" def test_collect_glob_star_matches_files_in_base_dir(self, tmp_path): """Test that *.json matches .json files in base directory.""" # Setup: create json files (tmp_path / "a.json").write_text("{}") (tmp_path / "b.json").write_text("{}") (tmp_path / "c.txt").write_text("not json") collector = FileCollector(tmp_path) result = collector.collect(["*.json"]) # Should match both json files assert len(result.files) == 2 file_names = {f.name for f in result.files} assert file_names == {"a.json", "b.json"} assert result.warnings == [] def test_collect_glob_recursive_matches_nested_files(self, tmp_path): """Test that **/*.csv matches .csv files in any subdirectory.""" # Setup: create nested csv files data_dir = tmp_path / "data" data_dir.mkdir() (data_dir / "x.csv").write_text("col1,col2") nested_dir = tmp_path / "nested" / "deep" nested_dir.mkdir(parents=True) (nested_dir / "y.csv").write_text("col1,col2") # Also create csv at root (should not match **/*.csv per gitwildmatch) (tmp_path / "root.csv").write_text("col1,col2") collector = FileCollector(tmp_path) result = collector.collect(["**/*.csv"]) # Should match nested csv files (gitwildmatch **/ matches any directory) file_names = {f.name for f in result.files} assert "x.csv" in file_names assert "y.csv" in file_names def test_collect_glob_subdir_pattern(self, tmp_path): """Test that data/*.txt matches .txt files directly in data/.""" # Setup: create txt files data_dir = tmp_path / "data" data_dir.mkdir() (data_dir / "a.txt").write_text("a") (data_dir / "b.txt").write_text("b") # Nested file should NOT match data/*.txt nested = data_dir / "sub" nested.mkdir() (nested / "c.txt").write_text("c") collector = FileCollector(tmp_path) result = collector.collect(["data/*.txt"]) # Should only match direct children assert len(result.files) == 2 file_names = {f.name for f in result.files} assert file_names == {"a.txt", "b.txt"} def test_collect_glob_question_mark_wildcard(self, tmp_path): """Test that ?? matches exactly two characters.""" # Setup: create files with varying name lengths data_dir = tmp_path / "data" data_dir.mkdir() (data_dir / "ab.txt").write_text("ab") # matches (data_dir / "cd.txt").write_text("cd") # matches (data_dir / "a.txt").write_text("a") # doesn't match (1 char) (data_dir / "abc.txt").write_text("abc") # doesn't match (3 chars) collector = FileCollector(tmp_path) result = collector.collect(["data/??.txt"]) assert len(result.files) == 2 file_names = {f.name for f in result.files} assert file_names == {"ab.txt", "cd.txt"} def test_collect_glob_zero_matches_produces_warning(self, tmp_path): """Test that glob matching no files adds warning.""" # Setup: create files that don't match pattern (tmp_path / "file.txt").write_text("content") collector = FileCollector(tmp_path) result = collector.collect(["*.missing"]) # Should have warning, no files assert len(result.files) == 0 assert len(result.warnings) == 1 assert "*.missing" in result.warnings[0] assert "matched no files" in result.warnings[0].lower() def test_collect_glob_excludes_hidden_files(self, tmp_path): """Test that glob results exclude hidden files.""" # Setup: create visible and hidden files (tmp_path / "visible.txt").write_text("visible") (tmp_path / ".hidden.txt").write_text("hidden") collector = FileCollector(tmp_path) result = collector.collect(["*.txt"]) # Should only match visible file assert len(result.files) == 1 assert result.files[0].name == "visible.txt" def test_collect_glob_excludes_files_in_hidden_dirs(self, tmp_path): """Test that glob results exclude files in hidden directories.""" # Setup: create files in visible and hidden directories visible = tmp_path / "visible" visible.mkdir() (visible / "file.txt").write_text("visible") hidden = tmp_path / ".hidden" hidden.mkdir() (hidden / "file.txt").write_text("hidden") collector = FileCollector(tmp_path) result = collector.collect(["**/*.txt"]) # Should only match file in visible directory assert len(result.files) == 1 assert "visible" in str(result.files[0]) def test_collect_glob_excludes_pycache(self, tmp_path): """Test that glob results exclude __pycache__ directories.""" # Setup: create files including in __pycache__ (tmp_path / "module.py").write_text("# module") pycache = tmp_path / "__pycache__" pycache.mkdir() (pycache / "module.cpython-312.pyc").write_text("bytecode") collector = FileCollector(tmp_path) result = collector.collect(["**/*"]) # Should not include pycache contents file_paths = [str(f) for f in result.files] assert not any("__pycache__" in p for p in file_paths) assert any("module.py" in p for p in file_paths) def test_collect_glob_excludes_node_modules(self, tmp_path): """Test that glob results exclude node_modules directory.""" # Setup: create files including in node_modules (tmp_path / "index.js").write_text("// index") node_modules = tmp_path / "node_modules" node_modules.mkdir() (node_modules / "dep.js").write_text("// dep") collector = FileCollector(tmp_path) result = collector.collect(["**/*.js"]) # Should not include node_modules contents assert len(result.files) == 1 assert result.files[0].name == "index.js" def test_collect_glob_excludes_venv(self, tmp_path): """Test that glob results exclude .venv and venv directories.""" # Setup: create files including in virtual environments (tmp_path / "app.py").write_text("# app") venv = tmp_path / ".venv" venv.mkdir() (venv / "pyvenv.cfg").write_text("home = /usr/bin") venv2 = tmp_path / "venv" venv2.mkdir() (venv2 / "pyvenv.cfg").write_text("home = /usr/bin") collector = FileCollector(tmp_path) result = collector.collect(["**/*"]) # Should not include venv contents file_paths = [str(f) for f in result.files] assert not any(".venv" in p for p in file_paths) assert not any("/venv/" in p or p.endswith("/venv") for p in file_paths) assert any("app.py" in p for p in file_paths) def test_collect_glob_excludes_git_directory(self, tmp_path): """Test that glob results exclude .git directory.""" # Setup: create files including in .git (tmp_path / "file.txt").write_text("content") git_dir = tmp_path / ".git" git_dir.mkdir() (git_dir / "config").write_text("[core]") collector = FileCollector(tmp_path) result = collector.collect(["**/*"]) # Should not include .git contents (also hidden, but specifically excluded) file_paths = [str(f) for f in result.files] assert not any(".git" in p for p in file_paths) def test_collect_multiple_glob_patterns(self, tmp_path): """Test collecting with multiple glob patterns.""" # Setup: create various files (tmp_path / "a.json").write_text("{}") (tmp_path / "b.yaml").write_text("key: value") (tmp_path / "c.txt").write_text("text") collector = FileCollector(tmp_path) result = collector.collect(["*.json", "*.yaml"]) assert len(result.files) == 2 file_names = {f.name for f in result.files} assert file_names == {"a.json", "b.yaml"} def test_collect_glob_with_bracket_character_class(self, tmp_path): """Test that [abc] character class works in glob patterns.""" # Setup: create files (tmp_path / "file_a.txt").write_text("a") (tmp_path / "file_b.txt").write_text("b") (tmp_path / "file_c.txt").write_text("c") (tmp_path / "file_d.txt").write_text("d") collector = FileCollector(tmp_path) result = collector.collect(["file_[ab].txt"]) assert len(result.files) == 2 file_names = {f.name for f in result.files} assert file_names == {"file_a.txt", "file_b.txt"} def test_collect_glob_not_negation_pattern(self, tmp_path): """Test that patterns starting with ! are not treated as globs.""" # A pattern like !*.txt is negation, not glob # This test verifies glob detection skips negation patterns (tmp_path / "file.txt").write_text("content") collector = FileCollector(tmp_path) # Negation pattern alone has nothing to negate (no prior inclusions) result = collector.collect(["!file.txt"]) # Should have no files (nothing was included) # No warning because negation patterns don't warn about excluding nothing assert len(result.files) == 0 assert len(result.warnings) == 0 def test_collect_glob_tracks_pattern(self, tmp_path): """Test that glob pattern is tracked in patterns_matched.""" (tmp_path / "a.json").write_text("{}") (tmp_path / "b.json").write_text("{}") collector = FileCollector(tmp_path) result = collector.collect(["*.json"]) assert "*.json" in result.patterns_matched assert len(result.patterns_matched["*.json"]) == 2 def test_collect_glob_deduplicates_with_explicit_file(self, tmp_path): """Test that glob and explicit file don't duplicate.""" (tmp_path / "data.json").write_text("{}") collector = FileCollector(tmp_path) result = collector.collect(["data.json", "*.json"]) # File should only appear once assert len(result.files) == 1 assert result.files[0].name == "data.json" class TestFileCollectorNegation: """Tests for FileCollector negation pattern support.""" def test_negation_pattern_excludes_matching_files(self, tmp_path): """Test that !*.test.py excludes test files from collection.""" # Setup: create regular and test files (tmp_path / "main.py").write_text("# main") (tmp_path / "utils.py").write_text("# utils") (tmp_path / "main_test.py").write_text("# test") (tmp_path / "utils_test.py").write_text("# test") collector = FileCollector(tmp_path) result = collector.collect(["*.py", "!*_test.py"]) # Should exclude test files assert len(result.files) == 2 file_names = {f.name for f in result.files} assert file_names == {"main.py", "utils.py"} def test_negation_pattern_order_matters(self, tmp_path): """Test that negation before inclusion has no effect.""" # Setup: create json files fixtures_dir = tmp_path / "fixtures" fixtures_dir.mkdir() (tmp_path / "config.json").write_text("{}") (fixtures_dir / "test_data.json").write_text("{}") collector = FileCollector(tmp_path) # Negation first - nothing to negate yet result = collector.collect(["!fixtures/*.json", "**/*.json"]) # All json files should be included (negation had nothing to negate) file_names = {f.name for f in result.files} assert "config.json" in file_names assert "test_data.json" in file_names def test_negation_pattern_after_inclusion_excludes(self, tmp_path): """Test that negation after inclusion excludes matching files.""" # Setup: create json files in different directories fixtures_dir = tmp_path / "fixtures" fixtures_dir.mkdir() (tmp_path / "config.json").write_text("{}") (fixtures_dir / "test_data.json").write_text("{}") collector = FileCollector(tmp_path) # Include all json, then exclude fixtures result = collector.collect(["**/*.json", "!fixtures/*.json"]) # Only non-fixture json should be included assert len(result.files) == 1 assert result.files[0].name == "config.json" def test_negation_all_files_results_in_empty(self, tmp_path): """Test that negating all matched files results in empty collection.""" # Setup: create json files (tmp_path / "a.json").write_text("{}") (tmp_path / "b.json").write_text("{}") collector = FileCollector(tmp_path) result = collector.collect(["*.json", "!*.json"]) # All files negated assert len(result.files) == 0 def test_negation_directory_pattern(self, tmp_path): """Test that !fixtures/ excludes files in fixtures directory.""" # Setup: create directory structure data_dir = tmp_path / "data" data_dir.mkdir() fixtures_dir = data_dir / "fixtures" fixtures_dir.mkdir() (data_dir / "config.txt").write_text("config") (data_dir / "data.txt").write_text("data") (fixtures_dir / "fixture1.txt").write_text("fixture") (fixtures_dir / "fixture2.txt").write_text("fixture") collector = FileCollector(tmp_path) result = collector.collect(["data/", "!data/fixtures/"]) # Should exclude fixtures directory contents assert len(result.files) == 2 file_names = {f.name for f in result.files} assert file_names == {"config.txt", "data.txt"} def test_negation_with_deduplication(self, tmp_path): """Test that negation works correctly with deduplicated files.""" # Setup: create files (tmp_path / "a.txt").write_text("a") (tmp_path / "b.txt").write_text("b") (tmp_path / "c.txt").write_text("c") collector = FileCollector(tmp_path) # Multiple patterns matching same files, then negation result = collector.collect(["a.txt", "*.txt", "!b.txt"]) # a.txt and c.txt should be in result (b.txt negated) assert len(result.files) == 2 file_names = {f.name for f in result.files} assert file_names == {"a.txt", "c.txt"} def test_negation_pattern_tracked_in_patterns_matched(self, tmp_path): """Test that negation patterns are tracked in patterns_matched.""" # Setup: create files (tmp_path / "a.py").write_text("# a") (tmp_path / "a_test.py").write_text("# test") collector = FileCollector(tmp_path) result = collector.collect(["*.py", "!*_test.py"]) # Negation pattern should be tracked assert "!*_test.py" in result.patterns_matched # And should show which files it excluded excluded_names = [f.name for f in result.patterns_matched["!*_test.py"]] assert "a_test.py" in excluded_names def test_negation_multiple_patterns(self, tmp_path): """Test multiple negation patterns work together.""" # Setup: create files (tmp_path / "main.py").write_text("# main") (tmp_path / "main_test.py").write_text("# test") (tmp_path / "__init__.py").write_text("") (tmp_path / "conftest.py").write_text("# pytest") collector = FileCollector(tmp_path) result = collector.collect(["*.py", "!*_test.py", "!conftest.py"]) # Should exclude both test and conftest files assert len(result.files) == 2 file_names = {f.name for f in result.files} assert file_names == {"main.py", "__init__.py"} def test_negation_does_not_add_files(self, tmp_path): """Test that negation only removes, never adds files.""" # Setup: create files (tmp_path / "a.txt").write_text("a") (tmp_path / "b.json").write_text("{}") collector = FileCollector(tmp_path) # Only include .txt, negation of .json should have no effect result = collector.collect(["*.txt", "!*.json"]) # b.json was never in the set, so negation doesn't affect anything assert len(result.files) == 1 assert result.files[0].name == "a.txt" def test_negation_no_warning_for_zero_matches(self, tmp_path): """Test that negation pattern matching nothing doesn't warn.""" # Setup: create files (tmp_path / "a.txt").write_text("a") collector = FileCollector(tmp_path) # Negation that matches nothing shouldn't produce warning result = collector.collect(["*.txt", "!*.nonexistent"]) assert len(result.files) == 1 # No warning for negation pattern that excluded nothing assert len(result.warnings) == 0 def test_overlapping_patterns_deduplicate(self, tmp_path): """Test that overlapping inclusion patterns deduplicate silently.""" # Setup: create files data_dir = tmp_path / "data" data_dir.mkdir() config = data_dir / "config.json" config.write_text("{}") collector = FileCollector(tmp_path) # Same file matched by multiple patterns result = collector.collect(["data/", "data/config.json", "**/*.json"]) # File should only appear once assert len(result.files) == 1 assert result.files[0] == config.resolve() def test_negation_with_nested_directory(self, tmp_path): """Test negation works with deeply nested directories.""" # Setup: create nested structure src = tmp_path / "src" src.mkdir() tests = src / "tests" tests.mkdir() unit = tests / "unit" unit.mkdir() (src / "main.py").write_text("# main") (tests / "conftest.py").write_text("# conftest") (unit / "test_main.py").write_text("# test") collector = FileCollector(tmp_path) result = collector.collect(["src/", "!src/tests/"]) # Should only include main.py (tests/ excluded) assert len(result.files) == 1 assert result.files[0].name == "main.py" class TestZeroMatchWarning: """Tests for zero-match pattern warning emission via logger.""" def test_zero_match_warning_logged(self, tmp_path, caplog): """Test that zero-match pattern emits warning via logger.""" import logging caplog.set_level(logging.WARNING) collector = FileCollector(tmp_path) result = collector.collect(["*.missing"]) # Should log warning assert any("*.missing" in record.message for record in caplog.records) assert any(record.levelno == logging.WARNING for record in caplog.records) # Should also store in result.warnings assert "*.missing" in result.warnings[0] def test_zero_match_warning_includes_pattern_text(self, tmp_path, caplog): """Test that warning includes the pattern for debugging.""" import logging caplog.set_level(logging.WARNING) collector = FileCollector(tmp_path) collector.collect(["specific_missing_file.xyz"]) # Warning should include the pattern text warning_messages = [ r.message for r in caplog.records if r.levelno == logging.WARNING ] assert any("specific_missing_file.xyz" in msg for msg in warning_messages) def test_zero_match_warning_collection_continues(self, tmp_path, caplog): """Test that collection continues after zero-match warning.""" import logging caplog.set_level(logging.WARNING) # Create one file that exists (tmp_path / "exists.txt").write_text("content") collector = FileCollector(tmp_path) result = collector.collect(["missing.txt", "exists.txt"]) # Should have warning for missing but still collect existing assert len(result.files) == 1 assert result.files[0].name == "exists.txt" assert len(result.warnings) == 1 def test_negation_pattern_no_zero_match_warning(self, tmp_path, caplog): """Test that negation patterns don't trigger zero-match warning.""" import logging caplog.set_level(logging.WARNING) (tmp_path / "file.txt").write_text("content") collector = FileCollector(tmp_path) # Negation that doesn't match anything result = collector.collect(["*.txt", "!*.nonexistent"]) # No warning for negation pattern warning_messages = [ r.message for r in caplog.records if r.levelno == logging.WARNING ] assert not any("nonexistent" in msg for msg in warning_messages) assert len(result.warnings) == 0 class TestLargeFileWarning: """Tests for large file (>10MB) warning emission.""" def test_large_file_warning_emitted(self, tmp_path, caplog): """Test that files >10MB emit warning via logger.""" import logging from prefect._experimental.bundles._file_collector import LARGE_FILE_THRESHOLD caplog.set_level(logging.WARNING) # Create a file slightly over threshold large_file = tmp_path / "huge.bin" large_file.write_bytes(b"x" * (LARGE_FILE_THRESHOLD + 1)) collector = FileCollector(tmp_path) result = collector.collect(["huge.bin"]) # Should emit warning warning_messages = [ r.message for r in caplog.records if r.levelno == logging.WARNING ] assert any("huge.bin" in msg for msg in warning_messages) # File should still be collected assert len(result.files) == 1 def test_large_file_warning_includes_size(self, tmp_path, caplog): """Test that large file warning includes file size.""" import logging from prefect._experimental.bundles._file_collector import LARGE_FILE_THRESHOLD caplog.set_level(logging.WARNING) large_file = tmp_path / "big.bin" size = LARGE_FILE_THRESHOLD + 1024 * 1024 # threshold + 1MB large_file.write_bytes(b"x" * size) collector = FileCollector(tmp_path) collector.collect(["big.bin"]) # Warning should include size info warning_messages = [ r.message for r in caplog.records if r.levelno == logging.WARNING ] assert len(warning_messages) >= 1 # Check warning mentions size (in MB or bytes) assert any("MB" in msg or str(size) in msg for msg in warning_messages) def test_large_file_still_collected(self, tmp_path): """Test that large files are collected despite warning.""" from prefect._experimental.bundles._file_collector import LARGE_FILE_THRESHOLD large_file = tmp_path / "collected.bin" large_file.write_bytes(b"x" * (LARGE_FILE_THRESHOLD + 100)) collector = FileCollector(tmp_path) result = collector.collect(["collected.bin"]) # File must be collected assert len(result.files) == 1 assert result.files[0].name == "collected.bin" assert result.total_size > LARGE_FILE_THRESHOLD def test_large_file_threshold_constant(self): """Test LARGE_FILE_THRESHOLD is exported and equals 10MB.""" from prefect._experimental.bundles._file_collector import LARGE_FILE_THRESHOLD assert LARGE_FILE_THRESHOLD == 10 * 1024 * 1024 # 10 MB class TestCollectionSummary: """Tests for format_collection_summary function.""" def test_format_summary_file_count_and_size(self, tmp_path): """Test that summary shows file count and total size.""" from prefect._experimental.bundles._file_collector import ( format_collection_summary, ) # Create files with known sizes (tmp_path / "a.txt").write_text("hello") # 5 bytes (tmp_path / "b.txt").write_text("world!") # 6 bytes collector = FileCollector(tmp_path) result = collector.collect(["a.txt", "b.txt"]) summary = format_collection_summary(result) # Should include count assert "2 files" in summary # Should include size (11 bytes = ~0.0 KB) assert "KB" in summary or "MB" in summary def test_format_summary_kb_format(self, tmp_path): """Test that small sizes show KB format.""" from prefect._experimental.bundles._file_collector import ( format_collection_summary, ) # Create file with ~500 bytes (tmp_path / "small.txt").write_text("x" * 500) collector = FileCollector(tmp_path) result = collector.collect(["small.txt"]) summary = format_collection_summary(result) # Should show KB for small files assert "KB" in summary assert "1 file" in summary def test_format_summary_mb_format(self, tmp_path): """Test that large sizes show MB format.""" from prefect._experimental.bundles._file_collector import ( format_collection_summary, ) # Create file with ~2MB (tmp_path / "large.txt").write_bytes(b"x" * (2 * 1024 * 1024)) collector = FileCollector(tmp_path) result = collector.collect(["large.txt"]) summary = format_collection_summary(result) # Should show MB for large files assert "MB" in summary assert "2.0 MB" in summary or "2 MB" in summary def test_format_summary_human_readable(self, tmp_path): """Test that summary is human-readable format.""" from prefect._experimental.bundles._file_collector import ( format_collection_summary, ) (tmp_path / "file.txt").write_text("content") collector = FileCollector(tmp_path) result = collector.collect(["file.txt"]) summary = format_collection_summary(result) # Format should be "Collected N files (X.Y KB/MB)" assert summary.startswith("Collected") assert "(" in summary and ")" in summary class TestPreviewCollection: """Tests for preview_collection function.""" def test_preview_returns_files_list(self, tmp_path): """Test preview_collection returns list of files.""" from prefect._experimental.bundles._file_collector import preview_collection (tmp_path / "a.json").write_text("{}") (tmp_path / "b.json").write_text("{}") result = preview_collection(tmp_path, ["*.json"]) assert "files" in result assert "a.json" in result["files"] assert "b.json" in result["files"] def test_preview_returns_file_count(self, tmp_path): """Test preview_collection returns file count.""" from prefect._experimental.bundles._file_collector import preview_collection (tmp_path / "a.txt").write_text("a") (tmp_path / "b.txt").write_text("b") (tmp_path / "c.txt").write_text("c") result = preview_collection(tmp_path, ["*.txt"]) assert result["file_count"] == 3 def test_preview_returns_total_size(self, tmp_path): """Test preview_collection returns total size.""" from prefect._experimental.bundles._file_collector import preview_collection (tmp_path / "data.txt").write_text("hello") # 5 bytes result = preview_collection(tmp_path, ["*.txt"]) assert "total_size" in result assert result["total_size"] == 5 def test_preview_returns_human_readable_size(self, tmp_path): """Test preview_collection returns human-readable size.""" from prefect._experimental.bundles._file_collector import preview_collection (tmp_path / "file.txt").write_text("content") result = preview_collection(tmp_path, ["*.txt"]) assert "total_size_human" in result assert "KB" in result["total_size_human"] or "MB" in result["total_size_human"] def test_preview_returns_warnings(self, tmp_path): """Test preview_collection returns warnings.""" from prefect._experimental.bundles._file_collector import preview_collection result = preview_collection(tmp_path, ["*.missing"]) assert "warnings" in result assert len(result["warnings"]) == 1 assert "*.missing" in result["warnings"][0] def test_preview_returns_patterns_matched(self, tmp_path): """Test preview_collection returns pattern match counts.""" from prefect._experimental.bundles._file_collector import preview_collection (tmp_path / "a.json").write_text("{}") (tmp_path / "b.json").write_text("{}") result = preview_collection(tmp_path, ["*.json"]) assert "patterns_matched" in result assert result["patterns_matched"]["*.json"] == 2 def test_preview_does_not_modify_state(self, tmp_path): """Test preview_collection doesn't modify any state.""" from prefect._experimental.bundles._file_collector import preview_collection (tmp_path / "file.txt").write_text("content") # Call preview multiple times result1 = preview_collection(tmp_path, ["*.txt"]) result2 = preview_collection(tmp_path, ["*.txt"]) # Results should be identical assert result1 == result2 def test_preview_with_path_object(self, tmp_path): """Test preview_collection accepts Path objects.""" from pathlib import Path from prefect._experimental.bundles._file_collector import preview_collection (tmp_path / "file.txt").write_text("content") result = preview_collection(Path(tmp_path), ["*.txt"]) assert result["file_count"] == 1 def test_preview_exported_in_all(self): """Test preview_collection is in __all__.""" from prefect._experimental.bundles import _file_collector assert "preview_collection" in _file_collector.__all__ def test_format_collection_summary_exported_in_all(self): """Test format_collection_summary is in __all__.""" from prefect._experimental.bundles import _file_collector assert "format_collection_summary" in _file_collector.__all__ def test_preview_collection_returns_excluded_by_ignore(self, tmp_path): """Test preview_collection returns excluded_by_ignore list.""" from prefect._experimental.bundles._file_collector import preview_collection # Setup: create .prefectignore and files (tmp_path / ".prefectignore").write_text("*.log\n") (tmp_path / "app.py").write_text("# app") (tmp_path / "debug.log").write_text("log content") result = preview_collection(tmp_path, ["*.py", "*.log"]) assert "excluded_by_ignore" in result assert "debug.log" in result["excluded_by_ignore"] # app.py should be included, not excluded assert "app.py" not in result["excluded_by_ignore"] def test_preview_collection_returns_sensitive_warnings(self, tmp_path): """Test preview_collection returns sensitive_warnings list.""" from prefect._experimental.bundles._file_collector import preview_collection # Setup: create a sensitive file (tmp_path / ".env").write_text("SECRET=value") (tmp_path / "app.py").write_text("# app") result = preview_collection(tmp_path, [".env", "app.py"]) assert "sensitive_warnings" in result assert len(result["sensitive_warnings"]) == 1 assert ".env" in result["sensitive_warnings"][0] def test_preview_collection_sensitive_files_still_collected(self, tmp_path): """Test that sensitive files are still included (warning only).""" from prefect._experimental.bundles._file_collector import preview_collection # Setup: create sensitive file (tmp_path / ".env").write_text("SECRET=value") result = preview_collection(tmp_path, [".env"]) # File should be in the files list despite warning assert ".env" in result["files"] # And warning should exist assert len(result["sensitive_warnings"]) == 1 def test_preview_collection_excludes_by_prefectignore(self, tmp_path): """Test preview_collection applies .prefectignore filtering.""" from prefect._experimental.bundles._file_collector import preview_collection # Setup: create .prefectignore and files (tmp_path / ".prefectignore").write_text("*.tmp\n") (tmp_path / "main.py").write_text("# main") (tmp_path / "cache.tmp").write_text("cached data") result = preview_collection(tmp_path, ["*.py", "*.tmp"]) # main.py should be in files assert "main.py" in result["files"] # cache.tmp should NOT be in files (excluded by .prefectignore) assert "cache.tmp" not in result["files"] # cache.tmp should be in excluded_by_ignore assert "cache.tmp" in result["excluded_by_ignore"] def test_preview_collection_file_count_excludes_ignored(self, tmp_path): """Test preview_collection file_count reflects filtering.""" from prefect._experimental.bundles._file_collector import preview_collection # Setup: create files where some will be ignored (tmp_path / ".prefectignore").write_text("*.log\n") (tmp_path / "a.py").write_text("# a") (tmp_path / "b.py").write_text("# b") (tmp_path / "debug.log").write_text("log") result = preview_collection(tmp_path, ["*.py", "*.log"]) # Only 2 py files should be counted (log excluded) assert result["file_count"] == 2 def test_preview_collection_total_size_excludes_ignored(self, tmp_path): """Test preview_collection total_size reflects filtering.""" from prefect._experimental.bundles._file_collector import preview_collection # Setup: create files with known sizes (tmp_path / ".prefectignore").write_text("*.log\n") (tmp_path / "small.py").write_text("small") # 5 bytes (tmp_path / "huge.log").write_text("x" * 10000) # 10000 bytes result = preview_collection(tmp_path, ["*.py", "*.log"]) # Total size should only include small.py assert result["total_size"] == 5 def test_preview_collection_warnings_includes_explicit_excludes(self, tmp_path): """Test preview_collection warnings includes explicit exclude warnings.""" from prefect._experimental.bundles._file_collector import preview_collection # Setup: user explicitly includes a file that's ignored (tmp_path / ".prefectignore").write_text("important.log\n") (tmp_path / "important.log").write_text("important data") result = preview_collection(tmp_path, ["important.log"]) # Should have warning about explicit include being ignored assert any("important.log" in w for w in result["warnings"]) def test_preview_collection_multiple_sensitive_files(self, tmp_path): """Test preview_collection handles multiple sensitive files.""" from prefect._experimental.bundles._file_collector import preview_collection # Setup: create multiple sensitive files (tmp_path / ".env").write_text("SECRET=1") (tmp_path / "credentials.json").write_text("{}") (tmp_path / "server.key").write_text("key") (tmp_path / "app.py").write_text("# app") result = preview_collection( tmp_path, [".env", "credentials.json", "server.key", "app.py"] ) # Should have 3 sensitive warnings assert len(result["sensitive_warnings"]) == 3 # All 4 files should be collected (warning only) assert result["file_count"] == 4 def test_preview_collection_excluded_by_ignore_empty_when_no_ignores( self, tmp_path ): """Test excluded_by_ignore is empty when no .prefectignore.""" from prefect._experimental.bundles._file_collector import preview_collection # Setup: no .prefectignore (tmp_path / "file.txt").write_text("content") result = preview_collection(tmp_path, ["*.txt"]) assert result["excluded_by_ignore"] == [] def test_preview_collection_sensitive_warnings_empty_for_safe_files(self, tmp_path): """Test sensitive_warnings is empty for non-sensitive files.""" from prefect._experimental.bundles._file_collector import preview_collection # Setup: only safe files (tmp_path / "main.py").write_text("# main") (tmp_path / "config.yaml").write_text("key: value") result = preview_collection(tmp_path, ["*.py", "*.yaml"]) assert result["sensitive_warnings"] == []
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_experimental/bundles/test_file_collector.py", "license": "Apache License 2.0", "lines": 1166, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_experimental/bundles/test_ignore_filter.py
""" Tests for IgnoreFilter class with .prefectignore support. This module tests the IgnoreFilter class's ability to filter collected files through cascading .prefectignore patterns. Tests cover: - Basic pattern filtering (exclude matching files, preserve non-matching) - Cascading .prefectignore from project root and flow directory - Missing .prefectignore handling (debug log, not warning) - Auto-exclusion of .prefectignore file itself - Warning when user explicitly includes an ignored file - Project root detection via pyproject.toml - Pattern loading with comment/blank line stripping - Gitignore syntax support (negation, directories, globs) """ from __future__ import annotations import logging from prefect._experimental.bundles._ignore_filter import ( FilterResult, IgnoreFilter, find_project_root, load_ignore_patterns, ) class TestFilterResult: """Tests for FilterResult dataclass.""" def test_create_empty_filter_result(self): """Test creating an empty FilterResult.""" result = FilterResult( included_files=[], excluded_by_ignore=[], explicitly_excluded=[], ) assert result.included_files == [] assert result.excluded_by_ignore == [] assert result.explicitly_excluded == [] def test_create_filter_result_with_data(self, tmp_path): """Test creating FilterResult with all fields populated.""" file1 = tmp_path / "included.txt" file2 = tmp_path / "excluded.txt" file1.touch() file2.touch() result = FilterResult( included_files=[file1], excluded_by_ignore=[file2], explicitly_excluded=["excluded.txt was explicitly included but ignored"], ) assert len(result.included_files) == 1 assert len(result.excluded_by_ignore) == 1 assert len(result.explicitly_excluded) == 1 class TestFindProjectRoot: """Tests for find_project_root function.""" def test_find_project_root_finds_pyproject_toml(self, tmp_path): """Test find_project_root finds directory containing pyproject.toml.""" # Create project structure project_root = tmp_path / "project" project_root.mkdir() (project_root / "pyproject.toml").write_text("[project]") flow_dir = project_root / "src" / "flows" flow_dir.mkdir(parents=True) # Should find project_root when starting from flow_dir result = find_project_root(flow_dir) assert result == project_root def test_find_project_root_returns_none_when_not_found(self, tmp_path): """Test find_project_root returns None when no pyproject.toml exists.""" # Create directory without pyproject.toml flow_dir = tmp_path / "orphan" / "flows" flow_dir.mkdir(parents=True) result = find_project_root(flow_dir) assert result is None def test_find_project_root_returns_start_dir_if_contains_pyproject(self, tmp_path): """Test find_project_root returns start_dir if it contains pyproject.toml.""" # pyproject.toml in the start directory itself (tmp_path / "pyproject.toml").write_text("[project]") result = find_project_root(tmp_path) assert result == tmp_path def test_find_project_root_traverses_parents(self, tmp_path): """Test find_project_root correctly traverses parent directories.""" # Create nested structure with pyproject.toml at root (tmp_path / "pyproject.toml").write_text("[project]") deep_dir = tmp_path / "a" / "b" / "c" / "d" deep_dir.mkdir(parents=True) result = find_project_root(deep_dir) assert result == tmp_path class TestLoadIgnorePatterns: """Tests for load_ignore_patterns function.""" def test_load_ignore_patterns_from_flow_dir(self, tmp_path): """Test loading patterns from .prefectignore in flow directory.""" (tmp_path / ".prefectignore").write_text("*.log\n*.tmp\n") patterns = load_ignore_patterns(tmp_path) assert "*.log" in patterns assert "*.tmp" in patterns def test_load_ignore_patterns_strips_comments(self, tmp_path): """Test that comment lines (starting with #) are stripped.""" (tmp_path / ".prefectignore").write_text( "# This is a comment\n*.log\n# Another comment\n*.tmp\n" ) patterns = load_ignore_patterns(tmp_path) # Comments should be stripped assert "# This is a comment" not in patterns assert "# Another comment" not in patterns # Actual patterns should remain assert "*.log" in patterns assert "*.tmp" in patterns def test_load_ignore_patterns_strips_blank_lines(self, tmp_path): """Test that blank lines are stripped.""" (tmp_path / ".prefectignore").write_text("*.log\n\n\n*.tmp\n\n") patterns = load_ignore_patterns(tmp_path) # Should only have actual patterns assert len(patterns) == 2 assert "" not in patterns def test_load_ignore_patterns_cascade_project_root(self, tmp_path): """Test that patterns cascade from project root.""" # Create project structure project_root = tmp_path / "project" project_root.mkdir() (project_root / "pyproject.toml").write_text("[project]") (project_root / ".prefectignore").write_text("*.log\n") flow_dir = project_root / "src" / "flows" flow_dir.mkdir(parents=True) patterns = load_ignore_patterns(flow_dir) # Should include project root patterns assert "*.log" in patterns def test_load_ignore_patterns_cascade_union_both_files(self, tmp_path): """Test that patterns from both project root and flow dir are combined.""" # Create project structure project_root = tmp_path / "project" project_root.mkdir() (project_root / "pyproject.toml").write_text("[project]") (project_root / ".prefectignore").write_text("*.log\n") flow_dir = project_root / "src" / "flows" flow_dir.mkdir(parents=True) (flow_dir / ".prefectignore").write_text("*.tmp\n") patterns = load_ignore_patterns(flow_dir) # Should include both patterns (union) assert "*.log" in patterns assert "*.tmp" in patterns def test_load_ignore_patterns_missing_prefectignore_debug_log( self, tmp_path, caplog ): """Test that missing .prefectignore emits debug log, not warning.""" caplog.set_level(logging.DEBUG) # No .prefectignore in tmp_path patterns = load_ignore_patterns(tmp_path) # Should return empty list assert patterns == [] # Should NOT have warning logs, only debug warning_records = [r for r in caplog.records if r.levelno >= logging.WARNING] assert len(warning_records) == 0 class TestIgnoreFilter: """Tests for IgnoreFilter class.""" def test_filter_excludes_matching_files(self, tmp_path): """Test that files matching .prefectignore patterns are excluded.""" # Setup: create .prefectignore and files (tmp_path / ".prefectignore").write_text("*.log\n") keep_file = tmp_path / "app.py" keep_file.touch() log_file = tmp_path / "debug.log" log_file.touch() # Create filter and filter files ignore_filter = IgnoreFilter(tmp_path) result = ignore_filter.filter([keep_file, log_file]) # log file should be excluded assert keep_file in result.included_files assert log_file in result.excluded_by_ignore assert log_file not in result.included_files def test_filter_preserves_non_matching_files(self, tmp_path): """Test that files not matching patterns are preserved.""" # Setup: create .prefectignore that doesn't match test files (tmp_path / ".prefectignore").write_text("*.log\n") file1 = tmp_path / "main.py" file2 = tmp_path / "config.yaml" file1.touch() file2.touch() ignore_filter = IgnoreFilter(tmp_path) result = ignore_filter.filter([file1, file2]) # Both files should be included assert file1 in result.included_files assert file2 in result.included_files assert len(result.excluded_by_ignore) == 0 def test_prefectignore_auto_excluded(self, tmp_path): """Test that .prefectignore file itself is auto-excluded.""" # Setup (tmp_path / ".prefectignore").write_text("*.log\n") prefectignore = tmp_path / ".prefectignore" other_file = tmp_path / "main.py" other_file.touch() ignore_filter = IgnoreFilter(tmp_path) result = ignore_filter.filter([prefectignore, other_file]) # .prefectignore should be excluded assert prefectignore in result.excluded_by_ignore assert prefectignore not in result.included_files # Other file should be included assert other_file in result.included_files def test_explicit_include_excluded_warns(self, tmp_path): """Test warning when user explicitly includes a file that's ignored.""" # Setup: ignore *.secret and have user explicitly include one (tmp_path / ".prefectignore").write_text("*.secret\n") secret_file = tmp_path / "api.secret" secret_file.touch() ignore_filter = IgnoreFilter(tmp_path) # Pass explicit_patterns to indicate user's intent result = ignore_filter.filter([secret_file], explicit_patterns=["api.secret"]) # File should be excluded assert secret_file in result.excluded_by_ignore # Should have explicit exclusion warning assert len(result.explicitly_excluded) >= 1 # Warning should mention the file assert any("api.secret" in warning for warning in result.explicitly_excluded) def test_cascade_loads_project_root_patterns(self, tmp_path): """Test that IgnoreFilter loads patterns from project root.""" # Create project structure project_root = tmp_path / "project" project_root.mkdir() (project_root / "pyproject.toml").write_text("[project]") (project_root / ".prefectignore").write_text("*.log\n") flow_dir = project_root / "src" / "flows" flow_dir.mkdir(parents=True) log_file = flow_dir / "debug.log" log_file.touch() py_file = flow_dir / "flow.py" py_file.touch() ignore_filter = IgnoreFilter(flow_dir) result = ignore_filter.filter([log_file, py_file]) # Log file should be excluded by project root pattern assert log_file in result.excluded_by_ignore assert py_file in result.included_files def test_cascade_loads_flow_dir_patterns(self, tmp_path): """Test that IgnoreFilter loads patterns from flow directory.""" # Setup: .prefectignore in flow_dir only (tmp_path / ".prefectignore").write_text("*.tmp\n") tmp_file = tmp_path / "cache.tmp" tmp_file.touch() py_file = tmp_path / "main.py" py_file.touch() ignore_filter = IgnoreFilter(tmp_path) result = ignore_filter.filter([tmp_file, py_file]) assert tmp_file in result.excluded_by_ignore assert py_file in result.included_files def test_cascade_union_both_files(self, tmp_path): """Test that patterns from both project root and flow dir apply.""" # Create project structure with both .prefectignore files project_root = tmp_path / "project" project_root.mkdir() (project_root / "pyproject.toml").write_text("[project]") (project_root / ".prefectignore").write_text("*.log\n") flow_dir = project_root / "src" / "flows" flow_dir.mkdir(parents=True) (flow_dir / ".prefectignore").write_text("*.tmp\n") # Create test files log_file = flow_dir / "app.log" log_file.touch() tmp_file = flow_dir / "cache.tmp" tmp_file.touch() py_file = flow_dir / "flow.py" py_file.touch() ignore_filter = IgnoreFilter(flow_dir) result = ignore_filter.filter([log_file, tmp_file, py_file]) # Both log and tmp should be excluded (union of patterns) assert log_file in result.excluded_by_ignore assert tmp_file in result.excluded_by_ignore assert py_file in result.included_files def test_gitignore_syntax_negation(self, tmp_path): """Test that gitignore negation syntax works.""" # Exclude all logs except important.log (tmp_path / ".prefectignore").write_text("*.log\n!important.log\n") important_log = tmp_path / "important.log" important_log.touch() debug_log = tmp_path / "debug.log" debug_log.touch() ignore_filter = IgnoreFilter(tmp_path) result = ignore_filter.filter([important_log, debug_log]) # important.log should be included (negated) assert important_log in result.included_files # debug.log should still be excluded assert debug_log in result.excluded_by_ignore def test_gitignore_syntax_directories(self, tmp_path): """Test that directory patterns (ending with /) work.""" # Exclude __pycache__/ directory (tmp_path / ".prefectignore").write_text("__pycache__/\n") pycache = tmp_path / "__pycache__" pycache.mkdir() pyc_file = pycache / "module.cpython-311.pyc" pyc_file.touch() main_py = tmp_path / "main.py" main_py.touch() ignore_filter = IgnoreFilter(tmp_path) result = ignore_filter.filter([pyc_file, main_py]) # File in __pycache__ should be excluded assert pyc_file in result.excluded_by_ignore assert main_py in result.included_files def test_gitignore_syntax_globs(self, tmp_path): """Test that glob patterns work (**, *, ?).""" # Exclude all .pyc files anywhere (tmp_path / ".prefectignore").write_text("**/*.pyc\n") deep_dir = tmp_path / "src" / "utils" deep_dir.mkdir(parents=True) pyc_file = deep_dir / "helper.pyc" pyc_file.touch() py_file = deep_dir / "helper.py" py_file.touch() ignore_filter = IgnoreFilter(tmp_path) result = ignore_filter.filter([pyc_file, py_file]) assert pyc_file in result.excluded_by_ignore assert py_file in result.included_files def test_no_prefectignore_allows_all_files(self, tmp_path): """Test that missing .prefectignore allows all files through.""" # No .prefectignore file file1 = tmp_path / "a.py" file2 = tmp_path / "b.log" file1.touch() file2.touch() ignore_filter = IgnoreFilter(tmp_path) result = ignore_filter.filter([file1, file2]) # All files should be included (except .prefectignore if present) assert file1 in result.included_files assert file2 in result.included_files assert len(result.excluded_by_ignore) == 0 def test_filter_handles_files_relative_to_flow_dir(self, tmp_path): """Test filter handles files in subdirectories correctly.""" (tmp_path / ".prefectignore").write_text("data/*.csv\n") data_dir = tmp_path / "data" data_dir.mkdir() csv_file = data_dir / "input.csv" csv_file.touch() json_file = data_dir / "config.json" json_file.touch() ignore_filter = IgnoreFilter(tmp_path) result = ignore_filter.filter([csv_file, json_file]) # CSV should be excluded by pattern assert csv_file in result.excluded_by_ignore assert json_file in result.included_files def test_filter_empty_file_list(self, tmp_path): """Test filtering empty file list.""" (tmp_path / ".prefectignore").write_text("*.log\n") ignore_filter = IgnoreFilter(tmp_path) result = ignore_filter.filter([]) assert result.included_files == [] assert result.excluded_by_ignore == [] assert result.explicitly_excluded == [] class TestIgnoreFilterEdgeCases: """Edge case tests for IgnoreFilter.""" def test_empty_prefectignore_file(self, tmp_path): """Test handling of empty .prefectignore file.""" (tmp_path / ".prefectignore").write_text("") file = tmp_path / "test.py" file.touch() ignore_filter = IgnoreFilter(tmp_path) result = ignore_filter.filter([file]) # File should be included (no patterns to exclude) assert file in result.included_files def test_prefectignore_only_comments(self, tmp_path): """Test .prefectignore with only comments.""" (tmp_path / ".prefectignore").write_text("# Comment 1\n# Comment 2\n") file = tmp_path / "test.py" file.touch() ignore_filter = IgnoreFilter(tmp_path) result = ignore_filter.filter([file]) # File should be included (no actual patterns) assert file in result.included_files def test_prefectignore_whitespace_lines(self, tmp_path): """Test that whitespace-only lines are handled.""" (tmp_path / ".prefectignore").write_text("*.log\n \n\t\n*.tmp\n") log_file = tmp_path / "debug.log" log_file.touch() tmp_file = tmp_path / "cache.tmp" tmp_file.touch() py_file = tmp_path / "main.py" py_file.touch() ignore_filter = IgnoreFilter(tmp_path) result = ignore_filter.filter([log_file, tmp_file, py_file]) # Both log and tmp should be excluded assert log_file in result.excluded_by_ignore assert tmp_file in result.excluded_by_ignore assert py_file in result.included_files def test_nested_prefectignore_in_path(self, tmp_path): """Test .prefectignore file in nested directory is auto-excluded.""" subdir = tmp_path / "sub" subdir.mkdir() nested_ignore = subdir / ".prefectignore" nested_ignore.write_text("*.tmp") ignore_filter = IgnoreFilter(tmp_path) result = ignore_filter.filter([nested_ignore]) # Any .prefectignore file should be auto-excluded assert nested_ignore in result.excluded_by_ignore def test_multiple_explicit_patterns_matched(self, tmp_path): """Test warning for multiple explicitly included files being ignored.""" (tmp_path / ".prefectignore").write_text("*.secret\n*.key\n") secret1 = tmp_path / "api.secret" secret2 = tmp_path / "private.key" secret1.touch() secret2.touch() ignore_filter = IgnoreFilter(tmp_path) result = ignore_filter.filter( [secret1, secret2], explicit_patterns=["api.secret", "private.key"], ) # Both should be excluded assert secret1 in result.excluded_by_ignore assert secret2 in result.excluded_by_ignore # Should have warnings for both assert len(result.explicitly_excluded) >= 2 class TestSensitivePatterns: """Tests for sensitive file pattern detection.""" def test_sensitive_patterns_constant_defined(self): """Test SENSITIVE_PATTERNS constant is defined and exported.""" from prefect._experimental.bundles._ignore_filter import SENSITIVE_PATTERNS assert isinstance(SENSITIVE_PATTERNS, list) assert len(SENSITIVE_PATTERNS) == 7 def test_sensitive_patterns_contains_env_files(self): """Test SENSITIVE_PATTERNS contains .env* pattern.""" from prefect._experimental.bundles._ignore_filter import SENSITIVE_PATTERNS assert ".env*" in SENSITIVE_PATTERNS def test_sensitive_patterns_contains_pem_files(self): """Test SENSITIVE_PATTERNS contains *.pem pattern.""" from prefect._experimental.bundles._ignore_filter import SENSITIVE_PATTERNS assert "*.pem" in SENSITIVE_PATTERNS def test_sensitive_patterns_contains_key_files(self): """Test SENSITIVE_PATTERNS contains *.key pattern.""" from prefect._experimental.bundles._ignore_filter import SENSITIVE_PATTERNS assert "*.key" in SENSITIVE_PATTERNS def test_sensitive_patterns_contains_credentials_files(self): """Test SENSITIVE_PATTERNS contains credentials.* pattern.""" from prefect._experimental.bundles._ignore_filter import SENSITIVE_PATTERNS assert "credentials.*" in SENSITIVE_PATTERNS def test_sensitive_patterns_contains_rsa_keys(self): """Test SENSITIVE_PATTERNS contains *_rsa pattern.""" from prefect._experimental.bundles._ignore_filter import SENSITIVE_PATTERNS assert "*_rsa" in SENSITIVE_PATTERNS def test_sensitive_patterns_contains_p12_files(self): """Test SENSITIVE_PATTERNS contains *.p12 pattern.""" from prefect._experimental.bundles._ignore_filter import SENSITIVE_PATTERNS assert "*.p12" in SENSITIVE_PATTERNS def test_sensitive_patterns_contains_secrets_files(self): """Test SENSITIVE_PATTERNS contains secrets.* pattern.""" from prefect._experimental.bundles._ignore_filter import SENSITIVE_PATTERNS assert "secrets.*" in SENSITIVE_PATTERNS class TestCheckSensitiveFiles: """Tests for check_sensitive_files function.""" def test_check_sensitive_detects_env_files(self, tmp_path): """Test check_sensitive_files detects .env files.""" from prefect._experimental.bundles._ignore_filter import check_sensitive_files env_file = tmp_path / ".env" env_file.touch() warnings = check_sensitive_files([env_file], tmp_path) assert len(warnings) == 1 assert ".env" in warnings[0] def test_check_sensitive_detects_env_local(self, tmp_path): """Test check_sensitive_files detects .env.local files.""" from prefect._experimental.bundles._ignore_filter import check_sensitive_files env_file = tmp_path / ".env.local" env_file.touch() warnings = check_sensitive_files([env_file], tmp_path) assert len(warnings) == 1 assert ".env.local" in warnings[0] def test_check_sensitive_detects_pem_files(self, tmp_path): """Test check_sensitive_files detects .pem files.""" from prefect._experimental.bundles._ignore_filter import check_sensitive_files pem_file = tmp_path / "server.pem" pem_file.touch() warnings = check_sensitive_files([pem_file], tmp_path) assert len(warnings) == 1 assert "server.pem" in warnings[0] def test_check_sensitive_detects_key_files(self, tmp_path): """Test check_sensitive_files detects .key files.""" from prefect._experimental.bundles._ignore_filter import check_sensitive_files key_file = tmp_path / "private.key" key_file.touch() warnings = check_sensitive_files([key_file], tmp_path) assert len(warnings) == 1 assert "private.key" in warnings[0] def test_check_sensitive_detects_credentials_files(self, tmp_path): """Test check_sensitive_files detects credentials.* files.""" from prefect._experimental.bundles._ignore_filter import check_sensitive_files creds_file = tmp_path / "credentials.json" creds_file.touch() warnings = check_sensitive_files([creds_file], tmp_path) assert len(warnings) == 1 assert "credentials.json" in warnings[0] def test_check_sensitive_detects_rsa_keys(self, tmp_path): """Test check_sensitive_files detects *_rsa files.""" from prefect._experimental.bundles._ignore_filter import check_sensitive_files rsa_file = tmp_path / "id_rsa" rsa_file.touch() warnings = check_sensitive_files([rsa_file], tmp_path) assert len(warnings) == 1 assert "id_rsa" in warnings[0] def test_check_sensitive_detects_p12_files(self, tmp_path): """Test check_sensitive_files detects .p12 files.""" from prefect._experimental.bundles._ignore_filter import check_sensitive_files p12_file = tmp_path / "certificate.p12" p12_file.touch() warnings = check_sensitive_files([p12_file], tmp_path) assert len(warnings) == 1 assert "certificate.p12" in warnings[0] def test_check_sensitive_detects_secrets_files(self, tmp_path): """Test check_sensitive_files detects secrets.* files.""" from prefect._experimental.bundles._ignore_filter import check_sensitive_files secrets_file = tmp_path / "secrets.yaml" secrets_file.touch() warnings = check_sensitive_files([secrets_file], tmp_path) assert len(warnings) == 1 assert "secrets.yaml" in warnings[0] def test_check_sensitive_warning_format(self, tmp_path): """Test warning format includes pattern that matched.""" from prefect._experimental.bundles._ignore_filter import check_sensitive_files env_file = tmp_path / ".env" env_file.touch() warnings = check_sensitive_files([env_file], tmp_path) # Warning should include: "matches sensitive pattern {pattern}" assert "matches sensitive pattern" in warnings[0] assert ".env*" in warnings[0] def test_check_sensitive_suggests_prefectignore(self, tmp_path): """Test warning suggests adding to .prefectignore.""" from prefect._experimental.bundles._ignore_filter import check_sensitive_files key_file = tmp_path / "server.key" key_file.touch() warnings = check_sensitive_files([key_file], tmp_path) # Warning should suggest adding to .prefectignore assert ".prefectignore" in warnings[0] assert "Consider adding" in warnings[0] def test_check_sensitive_returns_empty_for_safe_files(self, tmp_path): """Test check_sensitive_files returns empty for non-sensitive files.""" from prefect._experimental.bundles._ignore_filter import check_sensitive_files safe_file = tmp_path / "main.py" safe_file.touch() warnings = check_sensitive_files([safe_file], tmp_path) assert warnings == [] def test_check_sensitive_multiple_files(self, tmp_path): """Test check_sensitive_files handles multiple files.""" from prefect._experimental.bundles._ignore_filter import check_sensitive_files env_file = tmp_path / ".env" key_file = tmp_path / "server.key" safe_file = tmp_path / "main.py" env_file.touch() key_file.touch() safe_file.touch() warnings = check_sensitive_files([env_file, key_file, safe_file], tmp_path) # Should have warnings for sensitive files only assert len(warnings) == 2 def test_check_sensitive_nested_file(self, tmp_path): """Test check_sensitive_files handles nested sensitive files.""" from prefect._experimental.bundles._ignore_filter import check_sensitive_files config_dir = tmp_path / "config" config_dir.mkdir() env_file = config_dir / ".env.production" env_file.touch() warnings = check_sensitive_files([env_file], tmp_path) assert len(warnings) == 1 # Should show relative path assert "config/.env.production" in warnings[0] class TestEmitExcludedWarning: """Tests for emit_excluded_warning function.""" def test_emit_excluded_warning_batched(self, tmp_path, caplog): """Test emit_excluded_warning batches files into single warning.""" from prefect._experimental.bundles._ignore_filter import emit_excluded_warning caplog.set_level(logging.WARNING) # Create 3 excluded files files = [] for i in range(3): f = tmp_path / f"file{i}.log" f.touch() files.append(f) emit_excluded_warning(files, tmp_path) # Should emit single warning warning_records = [r for r in caplog.records if r.levelno == logging.WARNING] assert len(warning_records) == 1 # Warning should mention count assert "3 files excluded" in warning_records[0].message def test_emit_excluded_warning_includes_file_names(self, tmp_path, caplog): """Test emit_excluded_warning includes file names.""" from prefect._experimental.bundles._ignore_filter import emit_excluded_warning caplog.set_level(logging.WARNING) file1 = tmp_path / "debug.log" file2 = tmp_path / "error.log" file1.touch() file2.touch() emit_excluded_warning([file1, file2], tmp_path) warning_msg = caplog.records[-1].message assert "debug.log" in warning_msg assert "error.log" in warning_msg def test_emit_excluded_warning_truncates_after_10(self, tmp_path, caplog): """Test emit_excluded_warning truncates list after 10 files.""" from prefect._experimental.bundles._ignore_filter import emit_excluded_warning caplog.set_level(logging.WARNING) # Create 15 excluded files files = [] for i in range(15): f = tmp_path / f"file{i:02d}.log" f.touch() files.append(f) emit_excluded_warning(files, tmp_path) warning_msg = caplog.records[-1].message # Should show first 10 files assert "file00.log" in warning_msg assert "file09.log" in warning_msg # Should NOT show files after 10 assert "file10.log" not in warning_msg # Should indicate more files assert "and 5 more" in warning_msg def test_emit_excluded_warning_mentions_prefectignore(self, tmp_path, caplog): """Test emit_excluded_warning mentions .prefectignore.""" from prefect._experimental.bundles._ignore_filter import emit_excluded_warning caplog.set_level(logging.WARNING) file = tmp_path / "excluded.log" file.touch() emit_excluded_warning([file], tmp_path) warning_msg = caplog.records[-1].message assert ".prefectignore" in warning_msg def test_emit_excluded_warning_empty_list_no_warning(self, tmp_path, caplog): """Test emit_excluded_warning emits nothing for empty list.""" from prefect._experimental.bundles._ignore_filter import emit_excluded_warning caplog.set_level(logging.WARNING) emit_excluded_warning([], tmp_path) warning_records = [r for r in caplog.records if r.levelno == logging.WARNING] assert len(warning_records) == 0 def test_emit_excluded_warning_exactly_10_no_truncation(self, tmp_path, caplog): """Test emit_excluded_warning shows all 10 files without truncation.""" from prefect._experimental.bundles._ignore_filter import emit_excluded_warning caplog.set_level(logging.WARNING) # Create exactly 10 files files = [] for i in range(10): f = tmp_path / f"file{i}.log" f.touch() files.append(f) emit_excluded_warning(files, tmp_path) warning_msg = caplog.records[-1].message # Should NOT show "and X more" assert "and" not in warning_msg or "more" not in warning_msg # Should show 10 files assert "10 files excluded" in warning_msg
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_experimental/bundles/test_ignore_filter.py", "license": "Apache License 2.0", "lines": 629, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_experimental/bundles/test_include_files_integration.py
""" End-to-end integration tests for include_files feature. These tests verify the complete flow from @ecs(include_files=[...]) decorator through bundle creation, upload, download, extraction, and flow execution. """ from __future__ import annotations import json import zipfile from pathlib import Path import pytest class TestIncludeFilesIntegration: """End-to-end tests for include_files feature.""" @pytest.fixture def project_dir(self, tmp_path: Path) -> Path: """Create a project directory with sample files.""" # Create config file (tmp_path / "config.yaml").write_text("database: localhost\nport: 5432") # Create data directory with files data_dir = tmp_path / "data" data_dir.mkdir() (data_dir / "input.csv").write_text("id,name\n1,Alice\n2,Bob") (data_dir / "lookup.json").write_text('{"key": "value"}') # Create nested structure templates_dir = tmp_path / "templates" / "emails" templates_dir.mkdir(parents=True) (templates_dir / "welcome.html").write_text("<h1>Welcome!</h1>") return tmp_path @pytest.fixture def mock_flow_file(self, project_dir: Path) -> Path: """Create a mock flow file in the project directory.""" flow_file = project_dir / "flows" / "my_flow.py" flow_file.parent.mkdir(exist_ok=True) flow_file.write_text("# Flow definition here") return flow_file def test_file_collector_and_zip_builder_integration( self, project_dir: Path ) -> None: """FileCollector output feeds correctly into ZipBuilder.""" from prefect._experimental.bundles._file_collector import FileCollector from prefect._experimental.bundles._zip_builder import ZipBuilder # Collect files collector = FileCollector(project_dir) result = collector.collect(["config.yaml", "data/"]) # Build zip builder = ZipBuilder(project_dir) zip_result = builder.build(result.files) try: # Verify zip contains expected files with zipfile.ZipFile(zip_result.zip_path) as zf: names = set(zf.namelist()) assert "config.yaml" in names assert "data/input.csv" in names assert "data/lookup.json" in names # Verify content assert ( zf.read("config.yaml").decode() == "database: localhost\nport: 5432" ) assert "Alice" in zf.read("data/input.csv").decode() finally: builder.cleanup() def test_zip_builder_extractor_roundtrip( self, project_dir: Path, tmp_path: Path ) -> None: """Files packaged by ZipBuilder extract correctly via ZipExtractor.""" from prefect._experimental.bundles._file_collector import FileCollector from prefect._experimental.bundles._zip_builder import ZipBuilder from prefect._experimental.bundles._zip_extractor import ZipExtractor # Collect and build collector = FileCollector(project_dir) result = collector.collect(["config.yaml", "data/", "templates/"]) builder = ZipBuilder(project_dir) zip_result = builder.build(result.files) # Extract to different directory (simulating remote execution) work_dir = tmp_path / "work" work_dir.mkdir() extractor = ZipExtractor(zip_result.zip_path) extracted = extractor.extract(work_dir) try: # Verify extracted files assert (work_dir / "config.yaml").exists() assert (work_dir / "data" / "input.csv").exists() assert (work_dir / "data" / "lookup.json").exists() assert (work_dir / "templates" / "emails" / "welcome.html").exists() # Verify content matches assert ( work_dir / "config.yaml" ).read_text() == "database: localhost\nport: 5432" assert "Alice" in (work_dir / "data" / "input.csv").read_text() assert ( work_dir / "templates" / "emails" / "welcome.html" ).read_text() == "<h1>Welcome!</h1>" # Verify returned paths assert len(extracted) == 4 assert all(p.exists() for p in extracted) finally: builder.cleanup() def test_glob_pattern_collection_and_extraction( self, project_dir: Path, tmp_path: Path ) -> None: """Glob patterns collect and extract correctly.""" from prefect._experimental.bundles._file_collector import FileCollector from prefect._experimental.bundles._zip_builder import ZipBuilder from prefect._experimental.bundles._zip_extractor import ZipExtractor # Add more files for glob testing (project_dir / "schema.json").write_text('{"type": "object"}') (project_dir / "data" / "extra.json").write_text('{"extra": true}') # Collect using globs collector = FileCollector(project_dir) result = collector.collect(["**/*.json", "config.yaml"]) builder = ZipBuilder(project_dir) zip_result = builder.build(result.files) # Extract work_dir = tmp_path / "work" work_dir.mkdir() extractor = ZipExtractor(zip_result.zip_path) extractor.extract(work_dir) try: # All JSON files should be present assert (work_dir / "schema.json").exists() assert (work_dir / "data" / "lookup.json").exists() assert (work_dir / "data" / "extra.json").exists() assert (work_dir / "config.yaml").exists() # CSV should NOT be present (not matching pattern) assert not (work_dir / "data" / "input.csv").exists() finally: builder.cleanup() def test_ignore_filter_integration(self, project_dir: Path, tmp_path: Path) -> None: """Files matching .prefectignore are excluded from extraction.""" from prefect._experimental.bundles._file_collector import FileCollector from prefect._experimental.bundles._ignore_filter import IgnoreFilter from prefect._experimental.bundles._zip_builder import ZipBuilder from prefect._experimental.bundles._zip_extractor import ZipExtractor # Create .prefectignore (project_dir / ".prefectignore").write_text("*.json\n") # Create sensitive file (project_dir / ".env").write_text("SECRET=value") # Collect files collector = FileCollector(project_dir) result = collector.collect(["config.yaml", "data/", ".env"]) # Apply ignore filter ignore_filter = IgnoreFilter(project_dir) filtered = ignore_filter.filter( result.files, explicit_patterns=["config.yaml", "data/", ".env"] ) # Build zip with filtered files builder = ZipBuilder(project_dir) zip_result = builder.build(filtered.included_files) # Extract work_dir = tmp_path / "work" work_dir.mkdir() extractor = ZipExtractor(zip_result.zip_path) extractor.extract(work_dir) try: # YAML should be present assert (work_dir / "config.yaml").exists() # CSV should be present (not ignored) assert (work_dir / "data" / "input.csv").exists() # JSON files should be excluded by .prefectignore assert not (work_dir / "data" / "lookup.json").exists() finally: builder.cleanup() def test_simulated_bundle_execution_flow( self, project_dir: Path, tmp_path: Path ) -> None: """Simulate complete flow: collect -> zip -> 'upload' -> 'download' -> extract.""" from prefect._experimental.bundles._file_collector import FileCollector from prefect._experimental.bundles._zip_builder import ZipBuilder from prefect._experimental.bundles._zip_extractor import ZipExtractor # === Development Environment === # Collect files collector = FileCollector(project_dir) result = collector.collect(["config.yaml", "data/"]) # Build zip builder = ZipBuilder(project_dir) zip_result = builder.build(result.files) # Create bundle metadata (simulating create_bundle_for_flow_run) bundle = { "function": "serialized_flow", "context": "serialized_context", "flow_run": {"id": "test-run-123"}, "dependencies": "prefect>=2.0", "files_key": zip_result.storage_key, } # Simulate "upload" - just keep references uploaded_bundle = json.dumps(bundle).encode() uploaded_zip = zip_result.zip_path.read_bytes() builder.cleanup() # === Execution Environment === execution_dir = tmp_path / "execution" execution_dir.mkdir() # Simulate "download" bundle bundle_path = execution_dir / "bundle.json" bundle_path.write_bytes(uploaded_bundle) # Parse bundle downloaded_bundle = json.loads(bundle_path.read_bytes()) # Check for files_key and "download" zip files_key = downloaded_bundle.get("files_key") assert files_key is not None assert files_key.startswith("files/") # Simulate "download" zip zip_path = execution_dir / "files.zip" zip_path.write_bytes(uploaded_zip) # Extract to working directory work_dir = execution_dir / "work" work_dir.mkdir() extractor = ZipExtractor(zip_path) extractor.extract(work_dir) extractor.cleanup() # === Verification === # Files should be available at same relative paths assert (work_dir / "config.yaml").exists() assert (work_dir / "data" / "input.csv").exists() assert (work_dir / "data" / "lookup.json").exists() # Content should match original assert (work_dir / "config.yaml").read_text() == ( project_dir / "config.yaml" ).read_text() assert (work_dir / "data" / "input.csv").read_text() == ( project_dir / "data" / "input.csv" ).read_text() # Zip should be cleaned up assert not zip_path.exists() def test_decorator_include_files_propagation(self) -> None: """include_files flows from @ecs decorator to InfrastructureBoundFlow.""" pytest.importorskip("prefect_aws") from prefect_aws.experimental.decorators import ecs from prefect.flows import flow @ecs(work_pool="my-pool", include_files=["config.yaml", "data/"]) @flow def my_flow(): pass # Check the bound flow has include_files assert hasattr(my_flow, "include_files") assert my_flow.include_files == ["config.yaml", "data/"] def test_with_options_include_files_override(self) -> None: """with_options can override include_files.""" pytest.importorskip("prefect_aws") from prefect_aws.experimental.decorators import ecs from prefect.flows import flow @ecs(work_pool="my-pool", include_files=["config.yaml"]) @flow def my_flow(): pass # Override with with_options updated_flow = my_flow.with_options(include_files=["data/", "models/"]) assert updated_flow.include_files == ["data/", "models/"] # Original unchanged assert my_flow.include_files == ["config.yaml"] def test_empty_include_files_no_files_key(self) -> None: """Empty include_files results in files_key being None.""" pytest.importorskip("prefect_aws") from prefect_aws.experimental.decorators import ecs from prefect.flows import flow @ecs(work_pool="my-pool", include_files=[]) @flow def my_flow(): pass # Empty list should be treated as no files assert my_flow.include_files == [] def test_none_include_files_no_files_key(self) -> None: """None include_files (default) results in no files_key.""" pytest.importorskip("prefect_aws") from prefect_aws.experimental.decorators import ecs from prefect.flows import flow @ecs(work_pool="my-pool") @flow def my_flow(): pass # Default should be None assert my_flow.include_files is None def test_paths_match_between_dev_and_execution( self, project_dir: Path, tmp_path: Path ) -> None: """ EXEC-02: Extracted files are accessible at same relative paths as in development. This test verifies that if a file exists at `data/input.csv` relative to the flow file in development, it will be available at `./data/input.csv` relative to the working directory during execution. """ from prefect._experimental.bundles._file_collector import FileCollector from prefect._experimental.bundles._zip_builder import ZipBuilder from prefect._experimental.bundles._zip_extractor import ZipExtractor # Development: files at specific relative paths dev_paths = { "config.yaml": project_dir / "config.yaml", "data/input.csv": project_dir / "data" / "input.csv", "templates/emails/welcome.html": project_dir / "templates" / "emails" / "welcome.html", } # Collect all collector = FileCollector(project_dir) result = collector.collect(["config.yaml", "data/", "templates/"]) # Package builder = ZipBuilder(project_dir) zip_result = builder.build(result.files) # Extract to execution directory exec_dir = tmp_path / "execution" exec_dir.mkdir() extractor = ZipExtractor(zip_result.zip_path) extractor.extract(exec_dir) builder.cleanup() # Verify: execution paths match development relative paths for rel_path, dev_path in dev_paths.items(): exec_path = exec_dir / rel_path assert exec_path.exists(), f"Missing: {rel_path}" assert exec_path.read_text() == dev_path.read_text(), ( f"Content mismatch: {rel_path}" ) class TestBundleUploadWithSidecar: """Tests for sidecar zip upload alongside bundle.""" @pytest.fixture def project_with_files(self, tmp_path: Path) -> Path: """Create a project directory with files.""" (tmp_path / "config.yaml").write_text("key: value") data_dir = tmp_path / "data" data_dir.mkdir() (data_dir / "input.csv").write_text("a,b\n1,2") return tmp_path def test_upload_bundle_with_sidecar( self, project_with_files: Path, tmp_path: Path ) -> None: """upload_bundle_to_storage uploads both bundle and sidecar.""" import sys from unittest.mock import patch from prefect._experimental.bundles import upload_bundle_to_storage from prefect._experimental.bundles._file_collector import FileCollector from prefect._experimental.bundles._zip_builder import ZipBuilder # Create a sidecar zip collector = FileCollector(project_with_files) result = collector.collect(["config.yaml", "data/"]) builder = ZipBuilder(project_with_files) zip_result = builder.build(result.files) # Create bundle with files_key bundle = { "function": "serialized_flow", "context": "serialized_context", "flow_run": {"id": "test-123"}, "dependencies": "", "files_key": zip_result.storage_key, } # Create a "storage" directory to simulate upload destination storage_dir = tmp_path / "storage" storage_dir.mkdir() # Use a script that copies files to storage (simulating cloud upload) # The upload command copies each file to the storage directory # Create a helper script that copies files preserving directory structure helper_script = tmp_path / "upload_helper.py" helper_script.write_text(f''' import sys import shutil from pathlib import Path # Get the key (relative path) as argument key = sys.argv[1] storage_dir = Path("{storage_dir}") # Copy the file to storage src = Path(key) dest = storage_dir / key dest.parent.mkdir(parents=True, exist_ok=True) shutil.copy2(src, dest) ''') upload_command = [sys.executable, str(helper_script)] # The upload_step is used by convert_step_to_command to build the sidecar # upload command. We mock convert_step_to_command to return our helper # script command so we don't need `uv run` in tests. upload_step = {"test_module.upload": {}} def mock_convert_step(step, key, quiet=False): return [sys.executable, str(helper_script)] # Upload with patch( "prefect._experimental.bundles.convert_step_to_command", side_effect=mock_convert_step, ): upload_bundle_to_storage( bundle=bundle, # type: ignore[arg-type] key="bundle.json", upload_command=upload_command, zip_path=zip_result.zip_path, upload_step=upload_step, ) # Verify bundle was uploaded assert (storage_dir / "bundle.json").exists() bundle_content = json.loads((storage_dir / "bundle.json").read_text()) assert bundle_content["files_key"] == zip_result.storage_key # Verify sidecar zip was uploaded sidecar_path = storage_dir / zip_result.storage_key assert sidecar_path.exists() # Verify sidecar contains expected files with zipfile.ZipFile(sidecar_path) as zf: names = set(zf.namelist()) assert "config.yaml" in names assert "data/input.csv" in names builder.cleanup() def test_upload_bundle_without_sidecar(self, tmp_path: Path) -> None: """upload_bundle_to_storage works normally without sidecar (backward compat).""" import sys from prefect._experimental.bundles import upload_bundle_to_storage bundle = { "function": "serialized_flow", "context": "serialized_context", "flow_run": {"id": "test-123"}, "dependencies": "", "files_key": None, } storage_dir = tmp_path / "storage" storage_dir.mkdir() # Create helper script helper_script = tmp_path / "upload_helper.py" helper_script.write_text(f''' import sys import shutil from pathlib import Path key = sys.argv[1] storage_dir = Path("{storage_dir}") src = Path(key) dest = storage_dir / key dest.parent.mkdir(parents=True, exist_ok=True) shutil.copy2(src, dest) ''') upload_command = [sys.executable, str(helper_script)] # Should complete without error - no sidecar to upload upload_bundle_to_storage( bundle=bundle, # type: ignore[arg-type] key="bundle.json", upload_command=upload_command, zip_path=None, ) # Verify bundle was uploaded assert (storage_dir / "bundle.json").exists() # Verify no sidecar files were created assert not (storage_dir / "files").exists() def test_upload_bundle_with_sidecar_no_files_key(self, tmp_path: Path) -> None: """If zip_path provided but no files_key, sidecar is not uploaded.""" from prefect._experimental.bundles import upload_bundle_to_storage from prefect._experimental.bundles._file_collector import FileCollector from prefect._experimental.bundles._zip_builder import ZipBuilder # Create files and zip (tmp_path / "data.txt").write_text("test") collector = FileCollector(tmp_path) result = collector.collect(["data.txt"]) builder = ZipBuilder(tmp_path) zip_result = builder.build(result.files) # Bundle WITHOUT files_key bundle = { "function": "serialized_flow", "context": "serialized_context", "flow_run": {"id": "test-123"}, "dependencies": "", "files_key": None, # No files_key } storage_dir = tmp_path / "storage" storage_dir.mkdir() import sys helper_script = tmp_path / "upload_helper.py" helper_script.write_text(f''' import sys import shutil from pathlib import Path key = sys.argv[1] storage_dir = Path("{storage_dir}") src = Path(key) dest = storage_dir / key dest.parent.mkdir(parents=True, exist_ok=True) shutil.copy2(src, dest) ''') upload_command = [sys.executable, str(helper_script)] # Even with zip_path, sidecar should not be uploaded when no files_key upload_bundle_to_storage( bundle=bundle, # type: ignore[arg-type] key="bundle.json", upload_command=upload_command, zip_path=zip_result.zip_path, ) # Bundle uploaded assert (storage_dir / "bundle.json").exists() # But no sidecar (no files directory) assert not (storage_dir / "files").exists() builder.cleanup() class TestCreateBundleForFlowRunE2E: """ End-to-end tests for create_bundle_for_flow_run with include_files. These tests call the actual function (not simulated) to verify the complete integration from decorator attributes through bundle creation to extraction. """ @pytest.fixture def project_with_flow(self, tmp_path: Path) -> tuple[Path, Path]: """ Create a project directory with files and an actual flow module. Returns (project_dir, flow_file_path) """ # Create files to include (tmp_path / "config.yaml").write_text("database: localhost\nport: 5432") data_dir = tmp_path / "data" data_dir.mkdir() (data_dir / "input.csv").write_text("id,name\n1,Alice\n2,Bob") (data_dir / "settings.json").write_text('{"debug": true}') # Create nested directory templates_dir = tmp_path / "templates" templates_dir.mkdir() (templates_dir / "email.html").write_text("<h1>Hello</h1>") # Create a real flow file flow_file = tmp_path / "my_flow.py" flow_file.write_text(''' from prefect import flow @flow def my_flow(): """A test flow.""" return "hello" ''') return tmp_path, flow_file def test_create_bundle_populates_files_key( self, project_with_flow: tuple[Path, Path] ) -> None: """ EXEC-01/EXEC-02: create_bundle_for_flow_run populates files_key when flow has include_files attribute. This test calls the ACTUAL function, not a simulation. """ import shutil from unittest.mock import MagicMock, patch from prefect._experimental.bundles import create_bundle_for_flow_run from prefect.client.schemas.objects import FlowRun from prefect.flows import Flow project_dir, flow_file = project_with_flow # Create a flow and set include_files (as @ecs decorator would) @Flow def test_flow(): return "result" test_flow.include_files = ["config.yaml", "data/", "templates/"] # Mock inspect.getfile to return our flow file path with patch( "prefect._experimental.bundles.inspect.getfile", return_value=str(flow_file) ): flow_run = MagicMock(spec=FlowRun) flow_run.model_dump.return_value = {"id": "test-run-123"} # Call the ACTUAL function result = create_bundle_for_flow_run( flow=test_flow, flow_run=flow_run, ) bundle = result["bundle"] zip_path = result["zip_path"] try: # CRITICAL VERIFICATION: files_key is NOT None assert bundle["files_key"] is not None, ( "files_key should be populated when include_files is set" ) assert bundle["files_key"].startswith("files/"), ( f"files_key should start with 'files/', got: {bundle['files_key']}" ) assert bundle["files_key"].endswith(".zip"), ( f"files_key should end with '.zip', got: {bundle['files_key']}" ) # zip_path should exist assert zip_path is not None assert zip_path.exists(), f"Zip file should exist at {zip_path}" # Verify zip content with zipfile.ZipFile(zip_path) as zf: names = set(zf.namelist()) assert "config.yaml" in names assert "data/input.csv" in names assert "data/settings.json" in names assert "templates/email.html" in names finally: # Cleanup if zip_path and zip_path.exists(): zip_path.unlink() if zip_path.parent.exists(): shutil.rmtree(zip_path.parent, ignore_errors=True) def test_create_bundle_to_extraction_roundtrip( self, project_with_flow: tuple[Path, Path], tmp_path: Path ) -> None: """ Full roundtrip: create_bundle_for_flow_run -> extraction -> file access. Proves EXEC-02: Files are accessible at same relative paths. """ import shutil from unittest.mock import MagicMock, patch from prefect._experimental.bundles import create_bundle_for_flow_run from prefect._experimental.bundles._zip_extractor import ZipExtractor from prefect.client.schemas.objects import FlowRun from prefect.flows import Flow project_dir, flow_file = project_with_flow @Flow def test_flow(): return "result" test_flow.include_files = ["config.yaml", "data/"] with patch( "prefect._experimental.bundles.inspect.getfile", return_value=str(flow_file) ): flow_run = MagicMock(spec=FlowRun) flow_run.model_dump.return_value = {"id": "test-run-456"} result = create_bundle_for_flow_run( flow=test_flow, flow_run=flow_run, ) zip_path = result["zip_path"] try: # Simulate execution environment exec_dir = tmp_path / "execution" exec_dir.mkdir() # Extract files (as cloud execute functions would) extractor = ZipExtractor(zip_path) extractor.extract(exec_dir) # CRITICAL VERIFICATION: Paths match development assert (exec_dir / "config.yaml").exists() assert (exec_dir / "data" / "input.csv").exists() assert (exec_dir / "data" / "settings.json").exists() # Content matches assert (exec_dir / "config.yaml").read_text() == ( project_dir / "config.yaml" ).read_text() assert (exec_dir / "data" / "input.csv").read_text() == ( project_dir / "data" / "input.csv" ).read_text() finally: # Cleanup if zip_path and zip_path.exists(): zip_path.unlink() if zip_path.parent.exists(): shutil.rmtree(zip_path.parent, ignore_errors=True) def test_create_bundle_no_files_key_without_include_files(self) -> None: """files_key remains None when flow has no include_files.""" from unittest.mock import MagicMock from prefect._experimental.bundles import create_bundle_for_flow_run from prefect.client.schemas.objects import FlowRun from prefect.flows import Flow @Flow def test_flow(): return "result" # No include_files attribute set flow_run = MagicMock(spec=FlowRun) flow_run.model_dump.return_value = {"id": "test-run-789"} result = create_bundle_for_flow_run( flow=test_flow, flow_run=flow_run, ) assert result["bundle"]["files_key"] is None assert result["zip_path"] is None def test_create_bundle_respects_prefectignore( self, project_with_flow: tuple[Path, Path], tmp_path: Path ) -> None: """Files in .prefectignore are excluded from bundle.""" import shutil from unittest.mock import MagicMock, patch from prefect._experimental.bundles import create_bundle_for_flow_run from prefect._experimental.bundles._zip_extractor import ZipExtractor from prefect.client.schemas.objects import FlowRun from prefect.flows import Flow project_dir, flow_file = project_with_flow # Add .prefectignore (project_dir / ".prefectignore").write_text("*.json\n") @Flow def test_flow(): return "result" test_flow.include_files = ["config.yaml", "data/"] with patch( "prefect._experimental.bundles.inspect.getfile", return_value=str(flow_file) ): flow_run = MagicMock(spec=FlowRun) flow_run.model_dump.return_value = {"id": "test-ignore"} result = create_bundle_for_flow_run( flow=test_flow, flow_run=flow_run, ) zip_path = result["zip_path"] try: # Extract and verify exec_dir = tmp_path / "execution" exec_dir.mkdir() extractor = ZipExtractor(zip_path) extractor.extract(exec_dir) # config.yaml should be present (not ignored) assert (exec_dir / "config.yaml").exists() # input.csv should be present (not ignored) assert (exec_dir / "data" / "input.csv").exists() # settings.json should be EXCLUDED (matches *.json) assert not (exec_dir / "data" / "settings.json").exists() finally: if zip_path and zip_path.exists(): zip_path.unlink() if zip_path.parent.exists(): shutil.rmtree(zip_path.parent, ignore_errors=True)
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_experimental/bundles/test_include_files_integration.py", "license": "Apache License 2.0", "lines": 684, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_experimental/bundles/test_path_resolver.py
""" Tests for path resolution input validation and symlink handling. This module tests the path validation functions in path_resolver.py. Includes tests for input validation (no filesystem access) and symlink resolution tests (uses tmp_path for filesystem operations). """ from __future__ import annotations import platform import pytest from prefect._experimental.bundles._path_resolver import ( MAX_SYMLINK_DEPTH, PathResolutionError, PathResolver, PathValidationError, PathValidationResult, check_for_duplicates, normalize_path_separator, resolve_paths, resolve_with_symlink_check, validate_path_input, ) class TestPathValidationError: """Tests for PathValidationError dataclass.""" def test_create_with_required_fields(self): """Test creating a PathValidationError with required fields.""" error = PathValidationError( input_path="test.txt", resolved_path=None, error_type="empty", message="Path cannot be empty", ) assert error.input_path == "test.txt" assert error.resolved_path is None assert error.error_type == "empty" assert error.message == "Path cannot be empty" assert error.suggestion is None def test_create_with_suggestion(self): """Test creating a PathValidationError with optional suggestion.""" error = PathValidationError( input_path="/absolute/path", resolved_path="/absolute/path", error_type="absolute", message="Absolute paths not allowed", suggestion="Use relative path from flow file directory", ) assert error.suggestion == "Use relative path from flow file directory" def test_all_error_types_are_valid(self): """Test that all documented error types can be used.""" valid_error_types = [ "empty", "whitespace", "null_byte", "absolute", "duplicate", "traversal", "symlink_loop", "broken_symlink", "not_found", ] for error_type in valid_error_types: error = PathValidationError( input_path="test", resolved_path=None, error_type=error_type, message=f"Error: {error_type}", ) assert error.error_type == error_type class TestPathValidationResult: """Tests for PathValidationResult dataclass.""" def test_create_empty_result(self): """Test creating an empty validation result.""" result = PathValidationResult() assert result.valid_paths == [] assert result.errors == [] assert result.has_errors is False def test_has_errors_with_errors(self): """Test has_errors property returns True when errors exist.""" error = PathValidationError( input_path="", resolved_path=None, error_type="empty", message="Path cannot be empty", ) result = PathValidationResult(errors=[error]) assert result.has_errors is True def test_has_errors_without_errors(self): """Test has_errors property returns False when no errors.""" result = PathValidationResult(valid_paths=[]) assert result.has_errors is False def test_raise_if_errors_with_no_errors(self): """Test raise_if_errors does nothing when no errors.""" result = PathValidationResult() result.raise_if_errors() # Should not raise def test_raise_if_errors_with_errors(self): """Test raise_if_errors raises PathResolutionError when errors exist.""" error = PathValidationError( input_path="", resolved_path=None, error_type="empty", message="Path cannot be empty", ) result = PathValidationResult(errors=[error]) with pytest.raises(PathResolutionError): result.raise_if_errors() class TestPathResolutionError: """Tests for PathResolutionError exception.""" def test_format_single_error(self): """Test exception message format with single error.""" error = PathValidationError( input_path="", resolved_path=None, error_type="empty", message="Path cannot be empty", ) exc = PathResolutionError([error]) assert "1 path validation error" in str(exc) assert "Path cannot be empty" in str(exc) def test_format_multiple_errors(self): """Test exception message format with multiple errors.""" errors = [ PathValidationError( input_path="", resolved_path=None, error_type="empty", message="Path cannot be empty", ), PathValidationError( input_path="/abs/path", resolved_path=None, error_type="absolute", message="Absolute paths not allowed", ), ] exc = PathResolutionError(errors) assert "2 path validation error" in str(exc) assert "Path cannot be empty" in str(exc) assert "Absolute paths not allowed" in str(exc) def test_errors_accessible_on_exception(self): """Test that errors list is accessible on the exception.""" error = PathValidationError( input_path="test", resolved_path=None, error_type="empty", message="Test error", ) exc = PathResolutionError([error]) assert exc.errors == [error] class TestValidatePathInput: """Tests for validate_path_input function.""" def test_empty_string_rejected(self): """Test that empty string raises PathValidationError.""" with pytest.raises(PathValidationError) as exc_info: validate_path_input("") assert exc_info.value.error_type == "empty" assert "empty" in exc_info.value.message.lower() def test_whitespace_only_rejected(self): """Test that whitespace-only string raises PathValidationError.""" with pytest.raises(PathValidationError) as exc_info: validate_path_input(" ") assert exc_info.value.error_type == "whitespace" assert "whitespace" in exc_info.value.message.lower() def test_tabs_only_rejected(self): """Test that tabs-only string raises PathValidationError.""" with pytest.raises(PathValidationError) as exc_info: validate_path_input("\t\t") assert exc_info.value.error_type == "whitespace" def test_newlines_only_rejected(self): """Test that newlines-only string raises PathValidationError.""" with pytest.raises(PathValidationError) as exc_info: validate_path_input("\n\n") assert exc_info.value.error_type == "whitespace" def test_null_byte_rejected(self): """Test that path with null byte raises PathValidationError.""" with pytest.raises(PathValidationError) as exc_info: validate_path_input("path\x00.txt") assert exc_info.value.error_type == "null_byte" assert "null" in exc_info.value.message.lower() def test_null_byte_at_start(self): """Test that null byte at start is rejected.""" with pytest.raises(PathValidationError) as exc_info: validate_path_input("\x00path.txt") assert exc_info.value.error_type == "null_byte" def test_null_byte_at_end(self): """Test that null byte at end is rejected.""" with pytest.raises(PathValidationError) as exc_info: validate_path_input("path.txt\x00") assert exc_info.value.error_type == "null_byte" def test_absolute_path_unix_rejected(self): """Test that absolute Unix path raises PathValidationError.""" with pytest.raises(PathValidationError) as exc_info: validate_path_input("/absolute/path") assert exc_info.value.error_type == "absolute" assert "absolute" in exc_info.value.message.lower() assert exc_info.value.suggestion is not None assert "relative" in exc_info.value.suggestion.lower() @pytest.mark.skipif(platform.system() != "Windows", reason="Windows-only test") def test_absolute_path_windows_drive_rejected(self): """Test that Windows drive path raises PathValidationError.""" with pytest.raises(PathValidationError) as exc_info: validate_path_input("C:\\Users\\path") assert exc_info.value.error_type == "absolute" def test_absolute_path_windows_drive_on_any_platform(self): """Test that Windows drive path is detected on any platform.""" # Windows paths like C:\\ are considered absolute even on Unix with pytest.raises(PathValidationError) as exc_info: validate_path_input("C:\\absolute\\path") assert exc_info.value.error_type == "absolute" def test_valid_relative_path_accepted(self): """Test that valid relative path does not raise.""" validate_path_input("config.yaml") # Should not raise def test_valid_relative_path_with_dot(self): """Test that path starting with ./ does not raise.""" validate_path_input("./config.yaml") # Should not raise def test_valid_relative_path_with_subdirectory(self): """Test that path with subdirectory does not raise.""" validate_path_input("data/file.txt") # Should not raise def test_valid_path_with_backslashes(self): """Test that backslashes in path are allowed as input.""" validate_path_input("data\\file.txt") # Should not raise def test_valid_path_with_parent_reference(self): """Test that parent reference in relative path is allowed at input validation stage.""" # Note: traversal detection happens during resolution, not input validation validate_path_input("../sibling/file.txt") # Should not raise at input stage def test_valid_dotfile(self): """Test that dotfile paths are valid.""" validate_path_input(".gitignore") # Should not raise def test_valid_hidden_directory(self): """Test that hidden directory paths are valid.""" validate_path_input(".config/settings.yaml") # Should not raise class TestCheckForDuplicates: """Tests for check_for_duplicates function.""" def test_no_duplicates(self): """Test that unique paths return empty list.""" paths = ["a.txt", "b.txt", "c.txt"] duplicates = check_for_duplicates(paths) assert duplicates == [] def test_exact_duplicate(self): """Test that exact duplicate is detected.""" paths = ["a.txt", "b.txt", "a.txt"] duplicates = check_for_duplicates(paths) assert duplicates == ["a.txt"] def test_multiple_duplicates(self): """Test that multiple duplicates are detected.""" paths = ["a.txt", "b.txt", "a.txt", "b.txt", "c.txt"] duplicates = check_for_duplicates(paths) assert "a.txt" in duplicates assert "b.txt" in duplicates assert len(duplicates) == 2 def test_triple_occurrence(self): """Test that third occurrence is also reported.""" paths = ["a.txt", "a.txt", "a.txt"] duplicates = check_for_duplicates(paths) # Both second and third occurrences should be reported assert duplicates == ["a.txt", "a.txt"] def test_different_paths_not_normalized(self): """Test that paths with .. are NOT normalized (string comparison only).""" # data/../a.txt and a.txt are different strings paths = ["a.txt", "data/../a.txt"] duplicates = check_for_duplicates(paths) assert duplicates == [] def test_backslash_normalized_for_comparison(self): """Test that backslashes are normalized to forward slashes for comparison.""" paths = ["data/file.txt", "data\\file.txt"] duplicates = check_for_duplicates(paths) # These should be detected as duplicates assert "data\\file.txt" in duplicates def test_trailing_slash_normalized(self): """Test that trailing slashes are normalized for comparison.""" paths = ["data/", "data"] duplicates = check_for_duplicates(paths) # These should be detected as duplicates assert len(duplicates) == 1 def test_preserves_original_path_strings(self): """Test that original path strings are returned, not normalized versions.""" paths = ["data/file.txt", "data\\file.txt"] duplicates = check_for_duplicates(paths) # Should return the original backslash version assert duplicates == ["data\\file.txt"] def test_empty_list(self): """Test that empty list returns empty list.""" duplicates = check_for_duplicates([]) assert duplicates == [] def test_single_item(self): """Test that single item list returns empty list.""" duplicates = check_for_duplicates(["a.txt"]) assert duplicates == [] def test_case_sensitive(self): """Test that comparison is case-sensitive.""" paths = ["File.txt", "file.txt"] duplicates = check_for_duplicates(paths) # These are different paths (case matters on most filesystems) assert duplicates == [] class TestNormalizePathSeparator: """Tests for normalize_path_separator function.""" def test_backslash_to_forward_slash(self): """Test that backslashes are converted to forward slashes.""" assert normalize_path_separator("data\\file.txt") == "data/file.txt" def test_multiple_backslashes(self): """Test that multiple backslashes are converted.""" assert ( normalize_path_separator("path\\to\\deeply\\nested\\file.txt") == "path/to/deeply/nested/file.txt" ) def test_forward_slashes_unchanged(self): """Test that forward slashes remain unchanged.""" assert normalize_path_separator("data/file.txt") == "data/file.txt" def test_mixed_slashes(self): """Test that mixed slashes are normalized.""" assert ( normalize_path_separator("path\\to/mixed/file.txt") == "path/to/mixed/file.txt" ) def test_no_slashes(self): """Test that path without slashes is unchanged.""" assert normalize_path_separator("file.txt") == "file.txt" def test_empty_string(self): """Test that empty string returns empty string.""" assert normalize_path_separator("") == "" def test_dot_path(self): """Test that dot path is handled.""" assert normalize_path_separator(".\\config") == "./config" def test_double_dot_path(self): """Test that parent reference path is handled.""" assert normalize_path_separator("..\\sibling") == "../sibling" def test_windows_style_path(self): """Test full Windows-style path conversion.""" assert ( normalize_path_separator("src\\prefect\\bundles\\__init__.py") == "src/prefect/bundles/__init__.py" ) # Skip symlink tests on Windows since symlink creation requires admin privileges symlink_skip = pytest.mark.skipif( platform.system() == "Windows", reason="Symlink tests require admin privileges on Windows", ) class TestMaxSymlinkDepth: """Tests for MAX_SYMLINK_DEPTH constant.""" def test_max_symlink_depth_is_10(self): """Test that MAX_SYMLINK_DEPTH is set to 10.""" assert MAX_SYMLINK_DEPTH == 10 @symlink_skip class TestResolveWithSymlinkCheck: """Tests for resolve_with_symlink_check function.""" def test_regular_file_no_symlink(self, tmp_path): """Test that regular file without symlink is resolved correctly.""" # Setup: create a regular file file = tmp_path / "data" / "config.yaml" file.parent.mkdir(parents=True) file.write_text("config content") result = resolve_with_symlink_check(file, tmp_path) assert result == file.resolve() def test_symlink_within_base_dir(self, tmp_path): """Test symlink within base dir pointing to file within base dir is resolved.""" # Setup: create target file and symlink target = tmp_path / "data" / "actual.txt" target.parent.mkdir(parents=True) target.write_text("actual content") link = tmp_path / "link.txt" link.symlink_to(target) result = resolve_with_symlink_check(link, tmp_path) assert result == target.resolve() def test_symlink_relative_target(self, tmp_path): """Test symlink with relative target path is resolved correctly.""" # Setup: create target and symlink with relative path target = tmp_path / "data" / "file.txt" target.parent.mkdir(parents=True) target.write_text("content") link = tmp_path / "data" / "link.txt" # Use relative path for symlink target link.symlink_to("file.txt") result = resolve_with_symlink_check(link, tmp_path) assert result == target.resolve() def test_symlink_pointing_outside_base_dir_raises_traversal(self, tmp_path): """Test symlink pointing outside base dir raises PathValidationError.""" # Setup: create base dir and outside target base_dir = tmp_path / "project" base_dir.mkdir() outside_target = tmp_path / "outside" / "secret.txt" outside_target.parent.mkdir(parents=True) outside_target.write_text("secret content") link = base_dir / "sneaky_link.txt" link.symlink_to(outside_target) with pytest.raises(PathValidationError) as exc_info: resolve_with_symlink_check(link, base_dir) assert exc_info.value.error_type == "traversal" assert "outside" in exc_info.value.message.lower() def test_symlink_to_absolute_path_outside_raises_traversal(self, tmp_path): """Test symlink to absolute path outside base dir raises error.""" # Setup: create link pointing to /etc/passwd equivalent base_dir = tmp_path / "project" base_dir.mkdir() outside_target = tmp_path / "outside.txt" outside_target.write_text("outside") link = base_dir / "link.txt" link.symlink_to(outside_target.resolve()) # Absolute path with pytest.raises(PathValidationError) as exc_info: resolve_with_symlink_check(link, base_dir) assert exc_info.value.error_type == "traversal" def test_broken_symlink_raises_error(self, tmp_path): """Test broken symlink (target doesn't exist) raises PathValidationError.""" link = tmp_path / "broken_link.txt" link.symlink_to(tmp_path / "nonexistent.txt") with pytest.raises(PathValidationError) as exc_info: resolve_with_symlink_check(link, tmp_path) assert exc_info.value.error_type == "broken_symlink" assert "exist" in exc_info.value.message.lower() def test_symlink_chain_within_limit(self, tmp_path): """Test chain of symlinks within limit is resolved.""" # Setup: create chain of 3 symlinks target = tmp_path / "actual.txt" target.write_text("content") link1 = tmp_path / "link1.txt" link1.symlink_to(target) link2 = tmp_path / "link2.txt" link2.symlink_to(link1) link3 = tmp_path / "link3.txt" link3.symlink_to(link2) result = resolve_with_symlink_check(link3, tmp_path) assert result == target.resolve() def test_symlink_chain_at_limit(self, tmp_path): """Test chain of exactly MAX_SYMLINK_DEPTH symlinks is resolved.""" target = tmp_path / "actual.txt" target.write_text("content") prev = target for i in range(MAX_SYMLINK_DEPTH): link = tmp_path / f"link{i}.txt" link.symlink_to(prev) prev = link result = resolve_with_symlink_check(prev, tmp_path) assert result == target.resolve() def test_symlink_chain_over_limit_raises_error(self, tmp_path): """Test chain exceeding MAX_SYMLINK_DEPTH raises PathValidationError.""" target = tmp_path / "actual.txt" target.write_text("content") prev = target # Create MAX_SYMLINK_DEPTH + 1 symlinks for i in range(MAX_SYMLINK_DEPTH + 1): link = tmp_path / f"link{i}.txt" link.symlink_to(prev) prev = link with pytest.raises(PathValidationError) as exc_info: resolve_with_symlink_check(prev, tmp_path) assert exc_info.value.error_type == "symlink_loop" assert str(MAX_SYMLINK_DEPTH) in exc_info.value.message def test_circular_symlink_detected(self, tmp_path): """Test circular symlink (a -> b -> a) raises PathValidationError.""" link_a = tmp_path / "a.txt" link_b = tmp_path / "b.txt" # Create circular reference: a -> b -> a link_b.symlink_to(link_a) link_a.symlink_to(link_b) with pytest.raises(PathValidationError) as exc_info: resolve_with_symlink_check(link_a, tmp_path) assert exc_info.value.error_type == "symlink_loop" assert "circular" in exc_info.value.message.lower() def test_self_referential_symlink(self, tmp_path): """Test self-referential symlink (a -> a) raises PathValidationError.""" # Note: Can't create self-ref directly, so we test via a 3-link chain link_a = tmp_path / "a.txt" link_b = tmp_path / "b.txt" link_c = tmp_path / "c.txt" # Create chain that loops: a -> b -> c -> a link_c.symlink_to(link_a) link_b.symlink_to(link_c) link_a.symlink_to(link_b) with pytest.raises(PathValidationError) as exc_info: resolve_with_symlink_check(link_a, tmp_path) assert exc_info.value.error_type == "symlink_loop" def test_custom_max_depth(self, tmp_path): """Test custom max_depth parameter is respected.""" target = tmp_path / "actual.txt" target.write_text("content") prev = target for i in range(5): link = tmp_path / f"link{i}.txt" link.symlink_to(prev) prev = link # With max_depth=3, chain of 5 should fail with pytest.raises(PathValidationError) as exc_info: resolve_with_symlink_check(prev, tmp_path, max_depth=3) assert exc_info.value.error_type == "symlink_loop" def test_symlink_to_directory_within_base(self, tmp_path): """Test symlink to directory within base dir is resolved.""" target_dir = tmp_path / "data" target_dir.mkdir() link = tmp_path / "data_link" link.symlink_to(target_dir) result = resolve_with_symlink_check(link, tmp_path) assert result == target_dir.resolve() def test_symlink_escape_via_parent_traversal(self, tmp_path): """Test symlink using .. to escape base dir raises error.""" base_dir = tmp_path / "project" / "flows" base_dir.mkdir(parents=True) outside = tmp_path / "outside.txt" outside.write_text("outside content") # Create symlink using relative path with .. to escape link = base_dir / "sneaky.txt" link.symlink_to("../../outside.txt") with pytest.raises(PathValidationError) as exc_info: resolve_with_symlink_check(link, base_dir) assert exc_info.value.error_type == "traversal" def test_suggestion_provided_for_traversal(self, tmp_path): """Test that traversal error includes suggestion.""" base_dir = tmp_path / "project" base_dir.mkdir() outside = tmp_path / "outside.txt" outside.write_text("content") link = base_dir / "link.txt" link.symlink_to(outside) with pytest.raises(PathValidationError) as exc_info: resolve_with_symlink_check(link, base_dir) assert exc_info.value.suggestion is not None assert "within" in exc_info.value.suggestion.lower() def test_suggestion_provided_for_broken_symlink(self, tmp_path): """Test that broken symlink error includes suggestion.""" link = tmp_path / "broken.txt" link.symlink_to(tmp_path / "nonexistent.txt") with pytest.raises(PathValidationError) as exc_info: resolve_with_symlink_check(link, tmp_path) assert exc_info.value.suggestion is not None assert "exist" in exc_info.value.suggestion.lower() def test_suggestion_provided_for_symlink_loop(self, tmp_path): """Test that symlink loop error includes suggestion.""" link_a = tmp_path / "a.txt" link_b = tmp_path / "b.txt" link_b.symlink_to(link_a) link_a.symlink_to(link_b) with pytest.raises(PathValidationError) as exc_info: resolve_with_symlink_check(link_a, tmp_path) assert exc_info.value.suggestion is not None assert "circular" in exc_info.value.suggestion.lower() def test_input_path_preserved_in_error(self, tmp_path): """Test that original input path is preserved in error.""" base_dir = tmp_path / "project" base_dir.mkdir() outside = tmp_path / "outside.txt" outside.write_text("content") link = base_dir / "my_link.txt" link.symlink_to(outside) with pytest.raises(PathValidationError) as exc_info: resolve_with_symlink_check(link, base_dir) assert str(link) in exc_info.value.input_path class TestResolveSecurePath: """Tests for resolve_secure_path function. These tests use real filesystem (tmp_path fixture) to verify path resolution and traversal protection. """ def test_simple_relative_path(self, tmp_path): """Test resolving a simple relative path to an existing file.""" from prefect._experimental.bundles._path_resolver import resolve_secure_path # Create a test file test_file = tmp_path / "config.yaml" test_file.write_text("test content") # Resolve the path result = resolve_secure_path("config.yaml", tmp_path) # Should return the resolved path assert result == test_file.resolve() assert result.exists() def test_nested_directory_path(self, tmp_path): """Test resolving a path in a nested directory.""" from prefect._experimental.bundles._path_resolver import resolve_secure_path # Create nested structure data_dir = tmp_path / "data" data_dir.mkdir() input_file = data_dir / "input.csv" input_file.write_text("col1,col2") # Resolve the path result = resolve_secure_path("data/input.csv", tmp_path) assert result == input_file.resolve() def test_dot_relative_path(self, tmp_path): """Test resolving a path starting with ./""" from prefect._experimental.bundles._path_resolver import resolve_secure_path # Create a test file test_file = tmp_path / "data" / "file.txt" test_file.parent.mkdir() test_file.write_text("content") # Resolve with ./ prefix result = resolve_secure_path("./data/file.txt", tmp_path) assert result == test_file.resolve() def test_traversal_single_level_rejected(self, tmp_path): """Test that ../config.yaml is rejected as traversal.""" from prefect._experimental.bundles._path_resolver import resolve_secure_path # Create a file outside the base directory sibling_dir = tmp_path.parent / "sibling" sibling_dir.mkdir(exist_ok=True) outside_file = sibling_dir / "config.yaml" outside_file.write_text("outside") with pytest.raises(PathValidationError) as exc_info: resolve_secure_path("../sibling/config.yaml", tmp_path) assert exc_info.value.error_type == "traversal" assert "traversal" in exc_info.value.message.lower() def test_traversal_nested_rejected(self, tmp_path): """Test that data/../../etc/passwd style traversal is rejected.""" from prefect._experimental.bundles._path_resolver import resolve_secure_path # Create directory structure data_dir = tmp_path / "data" data_dir.mkdir() with pytest.raises(PathValidationError) as exc_info: # This attempts to go outside via nested traversal resolve_secure_path("data/../../etc/passwd", tmp_path) assert exc_info.value.error_type == "traversal" def test_traversal_with_existing_file_outside(self, tmp_path): """Test traversal is rejected even when target file exists.""" from prefect._experimental.bundles._path_resolver import resolve_secure_path # Create file outside base directory outside_dir = tmp_path.parent / "outside_test" outside_dir.mkdir(exist_ok=True) outside_file = outside_dir / "secret.txt" outside_file.write_text("secret data") # Try to access it via traversal with pytest.raises(PathValidationError) as exc_info: resolve_secure_path("../outside_test/secret.txt", tmp_path) assert exc_info.value.error_type == "traversal" def test_nonexistent_file_raises_not_found(self, tmp_path): """Test that non-existent file raises PathValidationError with not_found.""" from prefect._experimental.bundles._path_resolver import resolve_secure_path with pytest.raises(PathValidationError) as exc_info: resolve_secure_path("nonexistent.txt", tmp_path) assert exc_info.value.error_type == "not_found" assert "exist" in exc_info.value.message.lower() def test_nonexistent_nested_path(self, tmp_path): """Test that path with non-existent parent directory raises not_found.""" from prefect._experimental.bundles._path_resolver import resolve_secure_path with pytest.raises(PathValidationError) as exc_info: resolve_secure_path("missing/dir/file.txt", tmp_path) assert exc_info.value.error_type == "not_found" def test_cross_platform_backslash_path(self, tmp_path): """Test that Windows-style backslash paths are resolved correctly.""" from prefect._experimental.bundles._path_resolver import resolve_secure_path # Create nested structure data_dir = tmp_path / "data" data_dir.mkdir() test_file = data_dir / "input.csv" test_file.write_text("content") # Use backslash path (Windows style) result = resolve_secure_path("data\\input.csv", tmp_path) assert result == test_file.resolve() def test_directory_resolution(self, tmp_path): """Test resolving a directory path.""" from prefect._experimental.bundles._path_resolver import resolve_secure_path # Create a subdirectory sub_dir = tmp_path / "subdir" sub_dir.mkdir() result = resolve_secure_path("subdir", tmp_path) assert result == sub_dir.resolve() assert result.is_dir() def test_dotfile_resolution(self, tmp_path): """Test resolving a dotfile path.""" from prefect._experimental.bundles._path_resolver import resolve_secure_path # Create a dotfile dotfile = tmp_path / ".gitignore" dotfile.write_text("*.pyc") result = resolve_secure_path(".gitignore", tmp_path) assert result == dotfile.resolve() def test_hidden_directory_resolution(self, tmp_path): """Test resolving a path in a hidden directory.""" from prefect._experimental.bundles._path_resolver import resolve_secure_path # Create hidden directory structure hidden_dir = tmp_path / ".config" hidden_dir.mkdir() config_file = hidden_dir / "settings.yaml" config_file.write_text("setting: value") result = resolve_secure_path(".config/settings.yaml", tmp_path) assert result == config_file.resolve() def test_traversal_that_returns_to_base(self, tmp_path): """Test path that goes up and back down stays in base.""" from prefect._experimental.bundles._path_resolver import resolve_secure_path # Create structure data_dir = tmp_path / "data" data_dir.mkdir() test_file = tmp_path / "file.txt" test_file.write_text("content") # data/../file.txt goes up one then stays in base - this SHOULD work result = resolve_secure_path("data/../file.txt", tmp_path) assert result == test_file.resolve() def test_multiple_traversal_within_base(self, tmp_path): """Test multiple parent refs that stay within base directory.""" from prefect._experimental.bundles._path_resolver import resolve_secure_path # Create deep structure deep_dir = tmp_path / "a" / "b" / "c" deep_dir.mkdir(parents=True) target = tmp_path / "a" / "target.txt" target.write_text("content") # Go into c, then back up to a result = resolve_secure_path("a/b/c/../../target.txt", tmp_path) assert result == target.resolve() def test_input_validation_still_applied(self, tmp_path): """Test that input validation from Plan 01 is still applied.""" from prefect._experimental.bundles._path_resolver import resolve_secure_path # Empty string should fail input validation with pytest.raises(PathValidationError) as exc_info: resolve_secure_path("", tmp_path) assert exc_info.value.error_type == "empty" # Null byte should fail input validation with pytest.raises(PathValidationError) as exc_info: resolve_secure_path("file\x00.txt", tmp_path) assert exc_info.value.error_type == "null_byte" # Absolute path should fail input validation with pytest.raises(PathValidationError) as exc_info: resolve_secure_path("/absolute/path", tmp_path) assert exc_info.value.error_type == "absolute" def test_resolved_path_contains_suggestion(self, tmp_path): """Test that traversal errors include helpful suggestion.""" from prefect._experimental.bundles._path_resolver import resolve_secure_path # Create the parent structure to ensure file exists outside_dir = tmp_path.parent / "suggestions_test" outside_dir.mkdir(exist_ok=True) outside_file = outside_dir / "file.txt" outside_file.write_text("content") with pytest.raises(PathValidationError) as exc_info: resolve_secure_path("../suggestions_test/file.txt", tmp_path) assert exc_info.value.suggestion is not None assert ( "flow" in exc_info.value.suggestion.lower() or "within" in exc_info.value.suggestion.lower() ) class TestPathResolver: """Tests for PathResolver class.""" def test_resolve_caches_successful_resolution(self, tmp_path): """Successful resolutions are cached.""" (tmp_path / "config.yaml").touch() resolver = PathResolver(tmp_path) # First resolution result1 = resolver.resolve("config.yaml") # Second resolution should use cache result2 = resolver.resolve("config.yaml") assert result1 == result2 assert "config.yaml" in resolver._cache def test_resolve_caches_errors(self, tmp_path): """Failed resolutions are cached to avoid repeated filesystem access.""" resolver = PathResolver(tmp_path) # First attempt fails with pytest.raises(PathValidationError) as exc1: resolver.resolve("nonexistent.txt") # Second attempt should raise same cached error with pytest.raises(PathValidationError) as exc2: resolver.resolve("nonexistent.txt") assert exc1.value.error_type == exc2.value.error_type def test_resolve_all_collects_all_errors(self, tmp_path): """resolve_all collects ALL errors, doesn't stop on first.""" (tmp_path / "valid.txt").touch() resolver = PathResolver(tmp_path) result = resolver.resolve_all( [ "valid.txt", # Valid "", # Error: empty "missing.txt", # Error: not found "/absolute/path", # Error: absolute ] ) # Should have 1 valid path and 3 errors assert len(result.valid_paths) == 1 assert len(result.errors) == 3 assert result.has_errors # Check error types error_types = {e.error_type for e in result.errors} assert "empty" in error_types assert "not_found" in error_types assert "absolute" in error_types def test_resolve_all_detects_duplicates(self, tmp_path): """Duplicate paths are detected and reported.""" (tmp_path / "config.yaml").touch() resolver = PathResolver(tmp_path) result = resolver.resolve_all( [ "config.yaml", "config.yaml", # Duplicate ] ) assert len(result.errors) == 1 assert result.errors[0].error_type == "duplicate" def test_clear_cache(self, tmp_path): """Cache can be cleared.""" (tmp_path / "file.txt").touch() resolver = PathResolver(tmp_path) resolver.resolve("file.txt") assert len(resolver._cache) == 1 resolver.clear_cache() assert len(resolver._cache) == 0 class TestResolvePaths: """Tests for resolve_paths convenience function.""" def test_resolve_paths_raises_by_default(self, tmp_path): """resolve_paths raises PathResolutionError by default.""" with pytest.raises(PathResolutionError) as exc_info: resolve_paths(["nonexistent.txt"], tmp_path) assert "path validation error" in str(exc_info.value).lower() def test_resolve_paths_no_raise_returns_errors(self, tmp_path): """resolve_paths with raise_on_errors=False returns errors.""" result = resolve_paths(["nonexistent.txt"], tmp_path, raise_on_errors=False) assert result.has_errors assert len(result.errors) == 1 def test_resolve_paths_success(self, tmp_path): """resolve_paths succeeds with valid paths.""" (tmp_path / "a.txt").touch() (tmp_path / "b.txt").touch() result = resolve_paths(["a.txt", "b.txt"], tmp_path) assert not result.has_errors assert len(result.valid_paths) == 2 def test_resolve_paths_error_message_includes_all_errors(self, tmp_path): """PathResolutionError message includes all failed paths.""" try: resolve_paths(["missing1.txt", "missing2.txt", ""], tmp_path) pytest.fail("Should have raised PathResolutionError") except PathResolutionError as e: error_str = str(e) assert "3" in error_str # 3 errors assert "missing1.txt" in error_str or "not_found" in error_str
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_experimental/bundles/test_path_resolver.py", "license": "Apache License 2.0", "lines": 807, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_experimental/bundles/test_zip_builder.py
""" Tests for ZipBuilder class. ZipBuilder packages collected files into a sidecar zip archive with content-addressed storage key derivation using SHA256 hashes. """ from __future__ import annotations import hashlib import zipfile from pathlib import Path from unittest.mock import patch import pytest class TestZipResult: """Tests for ZipResult dataclass.""" def test_dataclass_fields(self) -> None: """ZipResult has required fields: zip_path, sha256_hash, storage_key, size_bytes.""" from prefect._experimental.bundles._zip_builder import ZipResult result = ZipResult( zip_path=Path("/tmp/test.zip"), sha256_hash="abc123", storage_key="files/abc123.zip", size_bytes=1024, ) assert result.zip_path == Path("/tmp/test.zip") assert result.sha256_hash == "abc123" assert result.storage_key == "files/abc123.zip" assert result.size_bytes == 1024 class TestZipBuilderConstants: """Tests for ZipBuilder module constants.""" def test_hash_chunk_size(self) -> None: """HASH_CHUNK_SIZE is 64KB for chunked reading.""" from prefect._experimental.bundles._zip_builder import HASH_CHUNK_SIZE assert HASH_CHUNK_SIZE == 65536 # 64KB def test_zip_size_warning_threshold(self) -> None: """ZIP_SIZE_WARNING_THRESHOLD is 50MB.""" from prefect._experimental.bundles._zip_builder import ( ZIP_SIZE_WARNING_THRESHOLD, ) assert ZIP_SIZE_WARNING_THRESHOLD == 50 * 1024 * 1024 # 50MB class TestZipBuilderInit: """Tests for ZipBuilder initialization.""" def test_stores_resolved_base_dir(self, tmp_path: Path) -> None: """ZipBuilder stores resolved base_dir.""" from prefect._experimental.bundles._zip_builder import ZipBuilder builder = ZipBuilder(tmp_path) assert builder.base_dir == tmp_path.resolve() def test_init_temp_dir_is_none(self, tmp_path: Path) -> None: """ZipBuilder initializes _temp_dir to None.""" from prefect._experimental.bundles._zip_builder import ZipBuilder builder = ZipBuilder(tmp_path) assert builder._temp_dir is None class TestZipBuilderBuild: """Tests for ZipBuilder.build() method.""" def test_build_empty_file_list(self, tmp_path: Path) -> None: """Empty file list produces valid empty zip with hash.""" from prefect._experimental.bundles._zip_builder import ZipBuilder builder = ZipBuilder(tmp_path) result = builder.build([]) # Result should be valid assert result.zip_path.exists() assert result.zip_path.suffix == ".zip" assert result.sha256_hash # Should have a hash assert result.storage_key.startswith("files/") assert result.storage_key.endswith(".zip") assert result.size_bytes > 0 # Even empty zip has some bytes # Should be a valid (empty) zip with zipfile.ZipFile(result.zip_path) as zf: assert len(zf.namelist()) == 0 builder.cleanup() def test_build_single_file(self, tmp_path: Path) -> None: """Single file is added with relative path as arcname.""" from prefect._experimental.bundles._zip_builder import ZipBuilder # Create test file data_dir = tmp_path / "data" data_dir.mkdir() test_file = data_dir / "input.csv" test_file.write_text("col1,col2\n1,2\n") builder = ZipBuilder(tmp_path) result = builder.build([test_file]) # Verify zip contents with zipfile.ZipFile(result.zip_path) as zf: names = zf.namelist() assert len(names) == 1 # Should use forward slashes for path assert "data/input.csv" in names # Content should match assert zf.read("data/input.csv").decode() == "col1,col2\n1,2\n" builder.cleanup() def test_build_multiple_files(self, tmp_path: Path) -> None: """Multiple files preserve relative structure.""" from prefect._experimental.bundles._zip_builder import ZipBuilder # Create test files (tmp_path / "config.yaml").write_text("key: value") (tmp_path / "data").mkdir() (tmp_path / "data" / "file1.txt").write_text("content1") (tmp_path / "data" / "file2.txt").write_text("content2") builder = ZipBuilder(tmp_path) result = builder.build( [ tmp_path / "config.yaml", tmp_path / "data" / "file1.txt", tmp_path / "data" / "file2.txt", ] ) # Verify zip contents with zipfile.ZipFile(result.zip_path) as zf: names = zf.namelist() assert len(names) == 3 assert "config.yaml" in names assert "data/file1.txt" in names assert "data/file2.txt" in names builder.cleanup() def test_build_uses_deflate_compression(self, tmp_path: Path) -> None: """Zip uses DEFLATED compression.""" from prefect._experimental.bundles._zip_builder import ZipBuilder # Create test file with compressible content test_file = tmp_path / "data.txt" test_file.write_text("a" * 1000) builder = ZipBuilder(tmp_path) result = builder.build([test_file]) with zipfile.ZipFile(result.zip_path) as zf: info = zf.getinfo("data.txt") assert info.compress_type == zipfile.ZIP_DEFLATED builder.cleanup() def test_build_deterministic_hash(self, tmp_path: Path) -> None: """Same files produce same hash (deterministic).""" from prefect._experimental.bundles._zip_builder import ZipBuilder # Create test files (tmp_path / "a.txt").write_text("content a") (tmp_path / "b.txt").write_text("content b") files = [tmp_path / "a.txt", tmp_path / "b.txt"] # Build twice builder1 = ZipBuilder(tmp_path) result1 = builder1.build(files) hash1 = result1.sha256_hash builder1.cleanup() builder2 = ZipBuilder(tmp_path) result2 = builder2.build(files) hash2 = result2.sha256_hash builder2.cleanup() # Hashes should match assert hash1 == hash2 def test_build_sorts_files_for_determinism(self, tmp_path: Path) -> None: """Files are sorted by relative path for deterministic hash.""" from prefect._experimental.bundles._zip_builder import ZipBuilder # Create test files (tmp_path / "z.txt").write_text("z content") (tmp_path / "a.txt").write_text("a content") # Build with different order builder1 = ZipBuilder(tmp_path) result1 = builder1.build([tmp_path / "z.txt", tmp_path / "a.txt"]) hash1 = result1.sha256_hash builder1.cleanup() builder2 = ZipBuilder(tmp_path) result2 = builder2.build([tmp_path / "a.txt", tmp_path / "z.txt"]) hash2 = result2.sha256_hash builder2.cleanup() # Hashes should match regardless of input order assert hash1 == hash2 def test_build_storage_key_format(self, tmp_path: Path) -> None: """Storage key follows format files/{sha256hash}.zip.""" from prefect._experimental.bundles._zip_builder import ZipBuilder (tmp_path / "test.txt").write_text("test") builder = ZipBuilder(tmp_path) result = builder.build([tmp_path / "test.txt"]) assert result.storage_key == f"files/{result.sha256_hash}.zip" assert result.storage_key.startswith("files/") assert result.storage_key.endswith(".zip") # Hash should be 64 hex characters (SHA256) hash_part = result.storage_key[6:-4] # Remove "files/" and ".zip" assert len(hash_part) == 64 assert all(c in "0123456789abcdef" for c in hash_part) builder.cleanup() def test_build_size_bytes_accurate(self, tmp_path: Path) -> None: """size_bytes matches actual zip file size.""" from prefect._experimental.bundles._zip_builder import ZipBuilder (tmp_path / "test.txt").write_text("some content here") builder = ZipBuilder(tmp_path) result = builder.build([tmp_path / "test.txt"]) # size_bytes should match actual file size actual_size = result.zip_path.stat().st_size assert result.size_bytes == actual_size builder.cleanup() def test_build_creates_temp_directory(self, tmp_path: Path) -> None: """Build creates temp directory with prefect-zip- prefix.""" from prefect._experimental.bundles._zip_builder import ZipBuilder (tmp_path / "test.txt").write_text("test") builder = ZipBuilder(tmp_path) result = builder.build([tmp_path / "test.txt"]) # Temp dir should exist and have correct prefix assert builder._temp_dir is not None assert Path(builder._temp_dir).exists() assert "prefect-zip-" in builder._temp_dir # Zip should be inside temp dir assert str(result.zip_path).startswith(builder._temp_dir) builder.cleanup() def test_build_windows_path_normalization(self, tmp_path: Path) -> None: """Windows-style paths are normalized to forward slashes in zip.""" from prefect._experimental.bundles._zip_builder import ZipBuilder # Create nested file nested = tmp_path / "sub" / "dir" nested.mkdir(parents=True) test_file = nested / "file.txt" test_file.write_text("content") builder = ZipBuilder(tmp_path) result = builder.build([test_file]) with zipfile.ZipFile(result.zip_path) as zf: names = zf.namelist() # Should use forward slashes, not backslashes assert len(names) == 1 assert "\\" not in names[0] assert "sub/dir/file.txt" in names builder.cleanup() class TestZipBuilderSizeWarning: """Tests for size warning when zip exceeds 50MB threshold.""" def test_no_warning_under_threshold( self, tmp_path: Path, caplog: pytest.LogCaptureFixture ) -> None: """No warning when zip is under 50MB.""" from prefect._experimental.bundles._zip_builder import ZipBuilder (tmp_path / "small.txt").write_text("small content") builder = ZipBuilder(tmp_path) with caplog.at_level("WARNING"): result = builder.build([tmp_path / "small.txt"]) assert result.size_bytes < 50 * 1024 * 1024 assert "exceeds" not in caplog.text.lower() assert "warning" not in caplog.text.lower() or "50" not in caplog.text builder.cleanup() def test_warning_at_threshold( self, tmp_path: Path, caplog: pytest.LogCaptureFixture ) -> None: """Warning is emitted when zip reaches 50MB threshold.""" from prefect._experimental.bundles._zip_builder import ( ZIP_SIZE_WARNING_THRESHOLD, ZipBuilder, ) # Create a file that will produce a zip >= 50MB # We mock the file size check since creating actual large files is slow (tmp_path / "test.txt").write_text("test") builder = ZipBuilder(tmp_path) # Mock stat to return size >= threshold with patch.object(Path, "stat") as mock_stat: mock_stat.return_value.st_size = ZIP_SIZE_WARNING_THRESHOLD with caplog.at_level("WARNING"): builder.build([tmp_path / "test.txt"]) # Should have warning about size assert any( "50" in record.message or "MB" in record.message for record in caplog.records if record.levelname == "WARNING" ) builder.cleanup() def test_warning_shows_total_size( self, tmp_path: Path, caplog: pytest.LogCaptureFixture ) -> None: """Warning message includes total zip size.""" from prefect._experimental.bundles._zip_builder import ( ZipBuilder, ) (tmp_path / "file1.txt").write_text("content1") (tmp_path / "file2.txt").write_text("content2") builder = ZipBuilder(tmp_path) # Mock to simulate large zip original_stat = Path.stat def mock_stat(self: Path) -> object: result = original_stat(self) if self.suffix == ".zip": # Return mock with large size class MockStat: st_size = 60 * 1024 * 1024 # 60MB return MockStat() return result with patch.object(Path, "stat", mock_stat): with caplog.at_level("WARNING"): builder.build([tmp_path / "file1.txt", tmp_path / "file2.txt"]) # Warning should mention size (60MB or similar) warning_records = [r for r in caplog.records if r.levelname == "WARNING"] assert len(warning_records) > 0 builder.cleanup() def test_warning_lists_largest_files( self, tmp_path: Path, caplog: pytest.LogCaptureFixture ) -> None: """Warning message lists largest files contributing to size.""" from prefect._experimental.bundles._zip_builder import ( ZIP_SIZE_WARNING_THRESHOLD, ZipBuilder, ) # Create files with different sizes (tmp_path / "large.txt").write_text("x" * 1000) (tmp_path / "small.txt").write_text("y") builder = ZipBuilder(tmp_path) # Mock to simulate large zip original_stat = Path.stat def mock_stat(self: Path) -> object: result = original_stat(self) if self.suffix == ".zip": class MockStat: st_size = ZIP_SIZE_WARNING_THRESHOLD + 1 return MockStat() return result with patch.object(Path, "stat", mock_stat): with caplog.at_level("WARNING"): builder.build([tmp_path / "large.txt", tmp_path / "small.txt"]) # Warning should mention file names warning_text = " ".join( r.message for r in caplog.records if r.levelname == "WARNING" ) # Should mention at least one file assert ( "large.txt" in warning_text or "small.txt" in warning_text or "files" in warning_text.lower() ) builder.cleanup() class TestZipBuilderCleanup: """Tests for ZipBuilder.cleanup() method.""" def test_cleanup_removes_temp_directory(self, tmp_path: Path) -> None: """cleanup() removes the temp directory.""" from prefect._experimental.bundles._zip_builder import ZipBuilder (tmp_path / "test.txt").write_text("test") builder = ZipBuilder(tmp_path) builder.build([tmp_path / "test.txt"]) temp_dir = builder._temp_dir assert temp_dir is not None assert Path(temp_dir).exists() builder.cleanup() # Temp dir should be gone assert not Path(temp_dir).exists() def test_cleanup_before_build_is_safe(self, tmp_path: Path) -> None: """cleanup() is safe to call before build().""" from prefect._experimental.bundles._zip_builder import ZipBuilder builder = ZipBuilder(tmp_path) # Should not raise builder.cleanup() def test_cleanup_multiple_times_is_safe(self, tmp_path: Path) -> None: """cleanup() can be called multiple times safely.""" from prefect._experimental.bundles._zip_builder import ZipBuilder (tmp_path / "test.txt").write_text("test") builder = ZipBuilder(tmp_path) builder.build([tmp_path / "test.txt"]) builder.cleanup() builder.cleanup() # Second call should not raise def test_cleanup_sets_temp_dir_to_none(self, tmp_path: Path) -> None: """cleanup() sets _temp_dir to None after removal.""" from prefect._experimental.bundles._zip_builder import ZipBuilder (tmp_path / "test.txt").write_text("test") builder = ZipBuilder(tmp_path) builder.build([tmp_path / "test.txt"]) assert builder._temp_dir is not None builder.cleanup() assert builder._temp_dir is None class TestZipBuilderHashComputation: """Tests for SHA256 hash computation.""" def test_hash_is_sha256(self, tmp_path: Path) -> None: """Hash is computed using SHA256.""" from prefect._experimental.bundles._zip_builder import ZipBuilder (tmp_path / "test.txt").write_text("test content") builder = ZipBuilder(tmp_path) result = builder.build([tmp_path / "test.txt"]) # Verify by computing hash ourselves with open(result.zip_path, "rb") as f: expected_hash = hashlib.sha256(f.read()).hexdigest() assert result.sha256_hash == expected_hash builder.cleanup() def test_hash_uses_chunked_reading(self, tmp_path: Path) -> None: """Hash computation uses chunked reading (64KB chunks).""" from prefect._experimental.bundles._zip_builder import ( HASH_CHUNK_SIZE, ZipBuilder, ) # Create a file larger than one chunk large_content = "x" * (HASH_CHUNK_SIZE * 2 + 100) (tmp_path / "large.txt").write_text(large_content) builder = ZipBuilder(tmp_path) result = builder.build([tmp_path / "large.txt"]) # Verify hash is still correct for large file with open(result.zip_path, "rb") as f: expected_hash = hashlib.sha256(f.read()).hexdigest() assert result.sha256_hash == expected_hash builder.cleanup() def test_hash_is_lowercase_hex(self, tmp_path: Path) -> None: """Hash is returned as lowercase hexadecimal.""" from prefect._experimental.bundles._zip_builder import ZipBuilder (tmp_path / "test.txt").write_text("test") builder = ZipBuilder(tmp_path) result = builder.build([tmp_path / "test.txt"]) # All characters should be lowercase hex assert result.sha256_hash == result.sha256_hash.lower() assert all(c in "0123456789abcdef" for c in result.sha256_hash) assert len(result.sha256_hash) == 64 builder.cleanup() class TestZipBuilderExports: """Tests for module exports.""" def test_module_exports(self) -> None: """Module exports required symbols.""" from prefect._experimental.bundles import _zip_builder assert hasattr(_zip_builder, "ZipBuilder") assert hasattr(_zip_builder, "ZipResult") assert hasattr(_zip_builder, "HASH_CHUNK_SIZE") assert hasattr(_zip_builder, "ZIP_SIZE_WARNING_THRESHOLD") def test_all_exports(self) -> None: """__all__ includes required exports.""" from prefect._experimental.bundles._zip_builder import __all__ assert "ZipBuilder" in __all__ assert "ZipResult" in __all__ assert "HASH_CHUNK_SIZE" in __all__ assert "ZIP_SIZE_WARNING_THRESHOLD" in __all__
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_experimental/bundles/test_zip_builder.py", "license": "Apache License 2.0", "lines": 406, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_experimental/bundles/test_zip_extractor.py
""" Tests for ZipExtractor class. ZipExtractor extracts files from a zip archive to the working directory, preserving relative paths and handling overwrites with warnings. """ from __future__ import annotations import zipfile from pathlib import Path import pytest class TestZipExtractorImports: """Tests for module imports.""" def test_zip_extractor_importable(self) -> None: """ZipExtractor can be imported from module.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor assert ZipExtractor is not None class TestZipExtractorInit: """Tests for ZipExtractor initialization.""" def test_init_stores_zip_path(self, tmp_path: Path) -> None: """Constructor stores zip_path attribute.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = tmp_path / "test.zip" extractor = ZipExtractor(zip_path) assert extractor.zip_path == zip_path def test_init_accepts_path_object(self, tmp_path: Path) -> None: """Constructor accepts Path object.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = tmp_path / "test.zip" extractor = ZipExtractor(zip_path) assert isinstance(extractor.zip_path, Path) assert extractor.zip_path == zip_path def test_init_accepts_string_path(self, tmp_path: Path) -> None: """Constructor accepts string path and converts to Path.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = str(tmp_path / "test.zip") extractor = ZipExtractor(zip_path) assert isinstance(extractor.zip_path, Path) assert extractor.zip_path == Path(zip_path) def test_init_extracted_flag_false(self, tmp_path: Path) -> None: """_extracted flag is False initially.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = tmp_path / "test.zip" extractor = ZipExtractor(zip_path) assert extractor._extracted is False class TestZipExtractorExtract: """Tests for ZipExtractor.extract() method.""" @pytest.fixture def create_test_zip(self, tmp_path: Path): """Factory fixture to create test zip files.""" def _create(files: dict[str, str]) -> Path: """Create a zip with the given files. Keys are paths, values are content.""" zip_path = tmp_path / "test.zip" with zipfile.ZipFile(zip_path, "w") as zf: for path, content in files.items(): zf.writestr(path, content) return zip_path return _create def test_extract_single_file( self, tmp_path: Path, create_test_zip: callable ) -> None: """Single file is extracted to target directory.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = create_test_zip({"config.yaml": "key: value"}) target_dir = tmp_path / "output" target_dir.mkdir() extractor = ZipExtractor(zip_path) extractor.extract(target_dir) assert (target_dir / "config.yaml").exists() assert (target_dir / "config.yaml").read_text() == "key: value" def test_extract_preserves_relative_paths( self, tmp_path: Path, create_test_zip: callable ) -> None: """Nested directory structure is preserved (data/input.csv -> ./data/input.csv).""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = create_test_zip({"data/input.csv": "a,b,c"}) target_dir = tmp_path / "output" target_dir.mkdir() extractor = ZipExtractor(zip_path) extractor.extract(target_dir) assert (target_dir / "data" / "input.csv").exists() assert (target_dir / "data" / "input.csv").read_text() == "a,b,c" def test_extract_creates_parent_directories( self, tmp_path: Path, create_test_zip: callable ) -> None: """Creates parent directories as needed.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = create_test_zip({"deeply/nested/file.txt": "content"}) target_dir = tmp_path / "output" target_dir.mkdir() extractor = ZipExtractor(zip_path) extractor.extract(target_dir) assert (target_dir / "deeply" / "nested" / "file.txt").exists() def test_extract_to_cwd_by_default( self, tmp_path: Path, create_test_zip: callable, monkeypatch: pytest.MonkeyPatch ) -> None: """Uses cwd when no target specified.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = create_test_zip({"default.txt": "default content"}) work_dir = tmp_path / "workdir" work_dir.mkdir() monkeypatch.chdir(work_dir) extractor = ZipExtractor(zip_path) extractor.extract() assert (work_dir / "default.txt").exists() assert (work_dir / "default.txt").read_text() == "default content" def test_extract_to_custom_target_dir( self, tmp_path: Path, create_test_zip: callable ) -> None: """Accepts custom target directory.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = create_test_zip({"custom.txt": "custom content"}) custom_dir = tmp_path / "custom_output" custom_dir.mkdir() extractor = ZipExtractor(zip_path) extractor.extract(custom_dir) assert (custom_dir / "custom.txt").exists() def test_extract_returns_list_of_paths( self, tmp_path: Path, create_test_zip: callable ) -> None: """Returns list of extracted file paths.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = create_test_zip( {"file1.txt": "content1", "dir/file2.txt": "content2"} ) target_dir = tmp_path / "output" target_dir.mkdir() extractor = ZipExtractor(zip_path) result = extractor.extract(target_dir) assert isinstance(result, list) assert len(result) == 2 assert target_dir / "file1.txt" in result assert target_dir / "dir" / "file2.txt" in result def test_extract_sets_extracted_flag( self, tmp_path: Path, create_test_zip: callable ) -> None: """Sets _extracted to True after successful extraction.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = create_test_zip({"file.txt": "content"}) target_dir = tmp_path / "output" target_dir.mkdir() extractor = ZipExtractor(zip_path) assert extractor._extracted is False extractor.extract(target_dir) assert extractor._extracted is True def test_extract_multiple_files( self, tmp_path: Path, create_test_zip: callable ) -> None: """Multiple files are extracted correctly.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = create_test_zip( { "root.txt": "root content", "dir1/file1.txt": "file1 content", "dir1/file2.txt": "file2 content", "dir2/nested/deep.txt": "deep content", } ) target_dir = tmp_path / "output" target_dir.mkdir() extractor = ZipExtractor(zip_path) result = extractor.extract(target_dir) assert len(result) == 4 assert (target_dir / "root.txt").read_text() == "root content" assert (target_dir / "dir1" / "file1.txt").read_text() == "file1 content" assert (target_dir / "dir1" / "file2.txt").read_text() == "file2 content" assert ( target_dir / "dir2" / "nested" / "deep.txt" ).read_text() == "deep content" class TestZipExtractorOverwrite: """Tests for overwrite behavior with warnings.""" @pytest.fixture def create_test_zip(self, tmp_path: Path): """Factory fixture to create test zip files.""" def _create(files: dict[str, str]) -> Path: """Create a zip with the given files. Keys are paths, values are content.""" zip_path = tmp_path / "test.zip" with zipfile.ZipFile(zip_path, "w") as zf: for path, content in files.items(): zf.writestr(path, content) return zip_path return _create def test_extract_overwrites_existing_file( self, tmp_path: Path, create_test_zip: callable ) -> None: """Existing file is overwritten by extraction.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = create_test_zip({"config.yaml": "new content"}) target_dir = tmp_path / "output" target_dir.mkdir() # Create existing file with different content existing_file = target_dir / "config.yaml" existing_file.write_text("old content") extractor = ZipExtractor(zip_path) extractor.extract(target_dir) assert existing_file.read_text() == "new content" def test_extract_logs_warning_on_overwrite( self, tmp_path: Path, create_test_zip: callable, caplog: pytest.LogCaptureFixture, ) -> None: """Warning is logged when overwriting existing file.""" import logging from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = create_test_zip({"config.yaml": "new content"}) target_dir = tmp_path / "output" target_dir.mkdir() # Create existing file existing_file = target_dir / "config.yaml" existing_file.write_text("old content") extractor = ZipExtractor(zip_path) with caplog.at_level(logging.WARNING): extractor.extract(target_dir) assert "Overwriting existing file: config.yaml" in caplog.text def test_extract_overwrites_multiple_files( self, tmp_path: Path, create_test_zip: callable, caplog: pytest.LogCaptureFixture, ) -> None: """Multiple existing files are overwritten with warnings.""" import logging from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = create_test_zip({"file1.txt": "new1", "dir/file2.txt": "new2"}) target_dir = tmp_path / "output" target_dir.mkdir() (target_dir / "dir").mkdir() # Create existing files (target_dir / "file1.txt").write_text("old1") (target_dir / "dir" / "file2.txt").write_text("old2") extractor = ZipExtractor(zip_path) with caplog.at_level(logging.WARNING): extractor.extract(target_dir) # Verify files overwritten assert (target_dir / "file1.txt").read_text() == "new1" assert (target_dir / "dir" / "file2.txt").read_text() == "new2" # Verify warnings logged assert "Overwriting existing file: file1.txt" in caplog.text assert "Overwriting existing file: dir/file2.txt" in caplog.text def test_extract_no_warning_for_new_files( self, tmp_path: Path, create_test_zip: callable, caplog: pytest.LogCaptureFixture, ) -> None: """No warning when file doesn't exist.""" import logging from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = create_test_zip({"newfile.txt": "content"}) target_dir = tmp_path / "output" target_dir.mkdir() extractor = ZipExtractor(zip_path) with caplog.at_level(logging.WARNING): extractor.extract(target_dir) assert "Overwriting" not in caplog.text class TestZipExtractorTypeMismatch: """Tests for file/directory type mismatch detection.""" @pytest.fixture def create_test_zip(self, tmp_path: Path): """Factory fixture to create test zip files.""" def _create(files: dict[str, str]) -> Path: """Create a zip with the given files. Keys are paths, values are content.""" zip_path = tmp_path / "test.zip" with zipfile.ZipFile(zip_path, "w") as zf: for path, content in files.items(): zf.writestr(path, content) return zip_path return _create def test_extract_fails_file_over_directory( self, tmp_path: Path, create_test_zip: callable ) -> None: """Error when file in zip would overwrite existing directory.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor # Create zip with a file named "data" zip_path = create_test_zip({"data": "file content"}) target_dir = tmp_path / "output" target_dir.mkdir() # Create existing directory with same name (target_dir / "data").mkdir() extractor = ZipExtractor(zip_path) with pytest.raises(RuntimeError) as exc_info: extractor.extract(target_dir) assert "data" in str(exc_info.value) assert "directory" in str(exc_info.value).lower() def test_extract_fails_directory_over_file( self, tmp_path: Path, create_test_zip: callable ) -> None: """Error when directory in zip would overwrite existing file.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor # Create zip with a directory entry and file inside zip_path = tmp_path / "test.zip" with zipfile.ZipFile(zip_path, "w") as zf: # Explicitly add directory entry zf.writestr("config/", "") zf.writestr("config/settings.yaml", "key: value") target_dir = tmp_path / "output" target_dir.mkdir() # Create existing file with same name as directory (target_dir / "config").write_text("i am a file") extractor = ZipExtractor(zip_path) with pytest.raises(RuntimeError) as exc_info: extractor.extract(target_dir) assert "config" in str(exc_info.value) assert "file" in str(exc_info.value).lower() def test_extract_error_message_includes_path( self, tmp_path: Path, create_test_zip: callable ) -> None: """Error message includes the problematic path.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor # Create zip with a file named "conflicting_name" zip_path = create_test_zip({"conflicting_name": "content"}) target_dir = tmp_path / "output" target_dir.mkdir() # Create existing directory with same name (target_dir / "conflicting_name").mkdir() extractor = ZipExtractor(zip_path) with pytest.raises(RuntimeError) as exc_info: extractor.extract(target_dir) assert "conflicting_name" in str(exc_info.value) def test_extract_type_mismatch_before_any_extraction( self, tmp_path: Path, create_test_zip: callable ) -> None: """No files are extracted when type mismatch is detected.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor # Create zip with multiple files, one will conflict zip_path = create_test_zip( {"goodfile.txt": "good content", "badname": "bad content"} ) target_dir = tmp_path / "output" target_dir.mkdir() # Create existing directory that conflicts with "badname" file (target_dir / "badname").mkdir() extractor = ZipExtractor(zip_path) with pytest.raises(RuntimeError): extractor.extract(target_dir) # Verify no files were extracted assert not (target_dir / "goodfile.txt").exists() assert extractor._extracted is False class TestZipExtractorCleanup: """Tests for cleanup() method behavior.""" @pytest.fixture def create_test_zip(self, tmp_path: Path): """Factory fixture to create test zip files.""" def _create(files: dict[str, str]) -> Path: """Create a zip with the given files. Keys are paths, values are content.""" zip_path = tmp_path / "test.zip" with zipfile.ZipFile(zip_path, "w") as zf: for path, content in files.items(): zf.writestr(path, content) return zip_path return _create def test_cleanup_deletes_zip_file( self, tmp_path: Path, create_test_zip: callable ) -> None: """Zip file is deleted after cleanup.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = create_test_zip({"file.txt": "content"}) target_dir = tmp_path / "output" target_dir.mkdir() extractor = ZipExtractor(zip_path) extractor.extract(target_dir) assert zip_path.exists() extractor.cleanup() assert not zip_path.exists() def test_cleanup_only_after_successful_extract( self, tmp_path: Path, create_test_zip: callable ) -> None: """Cleanup skips deletion if extraction not completed.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = create_test_zip({"file.txt": "content"}) extractor = ZipExtractor(zip_path) # Don't call extract extractor.cleanup() # Zip should still exist assert zip_path.exists() def test_cleanup_logs_warning_if_not_extracted( self, tmp_path: Path, create_test_zip: callable, caplog: pytest.LogCaptureFixture, ) -> None: """Warning is logged when cleanup called before extract.""" import logging from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = create_test_zip({"file.txt": "content"}) extractor = ZipExtractor(zip_path) with caplog.at_level(logging.WARNING): extractor.cleanup() assert "extraction not completed" in caplog.text.lower() def test_cleanup_logs_warning_on_delete_failure( self, tmp_path: Path, create_test_zip: callable, caplog: pytest.LogCaptureFixture, ) -> None: """Warning is logged if deletion fails (doesn't raise).""" import logging from unittest.mock import patch from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = create_test_zip({"file.txt": "content"}) target_dir = tmp_path / "output" target_dir.mkdir() extractor = ZipExtractor(zip_path) extractor.extract(target_dir) # Mock Path.unlink to raise OSError original_unlink = Path.unlink def mock_unlink(self, missing_ok=False): if self == zip_path: raise OSError("Permission denied") return original_unlink(self, missing_ok=missing_ok) with ( patch.object(Path, "unlink", mock_unlink), caplog.at_level(logging.WARNING), ): # Should not raise extractor.cleanup() assert "Failed to delete" in caplog.text def test_cleanup_safe_to_call_multiple_times( self, tmp_path: Path, create_test_zip: callable ) -> None: """Multiple cleanup calls don't raise.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = create_test_zip({"file.txt": "content"}) target_dir = tmp_path / "output" target_dir.mkdir() extractor = ZipExtractor(zip_path) extractor.extract(target_dir) # Call cleanup multiple times - should not raise extractor.cleanup() extractor.cleanup() extractor.cleanup() def test_cleanup_handles_missing_file( self, tmp_path: Path, create_test_zip: callable ) -> None: """No error if zip file was already deleted.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = create_test_zip({"file.txt": "content"}) target_dir = tmp_path / "output" target_dir.mkdir() extractor = ZipExtractor(zip_path) extractor.extract(target_dir) # Delete file manually before cleanup zip_path.unlink() # Should not raise extractor.cleanup() class TestZipExtractorPathTraversal: """Tests for path traversal rejection.""" def _create_zip_with_member(self, zip_path: Path, member_name: str) -> Path: """Create a zip file with a single member using a crafted name.""" import io import struct # Build a minimal zip archive manually so we can use arbitrary member names # that zipfile.ZipFile.writestr would otherwise reject or normalise. buf = io.BytesIO() encoded = member_name.encode("utf-8") content = b"malicious" # Local file header local_header = ( b"PK\x03\x04" # signature + struct.pack("<H", 20) # version needed + struct.pack("<H", 0) # flags + struct.pack("<H", 0) # compression + struct.pack("<H", 0) # mod time + struct.pack("<H", 0) # mod date + struct.pack("<I", 0) # crc32 (filled later) + struct.pack("<I", len(content)) # compressed size + struct.pack("<I", len(content)) # uncompressed size + struct.pack("<H", len(encoded)) # filename length + struct.pack("<H", 0) # extra length + encoded + content ) # Fix CRC import binascii crc = binascii.crc32(content) & 0xFFFFFFFF local_header = local_header[:14] + struct.pack("<I", crc) + local_header[18:] buf.write(local_header) central_offset = 0 central_start = buf.tell() # Central directory header central = ( b"PK\x01\x02" + struct.pack("<H", 20) # version made by + struct.pack("<H", 20) # version needed + struct.pack("<H", 0) # flags + struct.pack("<H", 0) # compression + struct.pack("<H", 0) # mod time + struct.pack("<H", 0) # mod date + struct.pack("<I", crc) + struct.pack("<I", len(content)) + struct.pack("<I", len(content)) + struct.pack("<H", len(encoded)) + struct.pack("<H", 0) # extra length + struct.pack("<H", 0) # comment length + struct.pack("<H", 0) # disk number + struct.pack("<H", 0) # internal attrs + struct.pack("<I", 0) # external attrs + struct.pack("<I", central_offset) # local header offset + encoded ) buf.write(central) central_end = buf.tell() # End of central directory eocd = ( b"PK\x05\x06" + struct.pack("<H", 0) # disk number + struct.pack("<H", 0) # disk with central dir + struct.pack("<H", 1) # entries on disk + struct.pack("<H", 1) # total entries + struct.pack("<I", central_end - central_start) + struct.pack("<I", central_start) + struct.pack("<H", 0) # comment length ) buf.write(eocd) zip_path.write_bytes(buf.getvalue()) return zip_path def test_extract_rejects_absolute_path(self, tmp_path: Path) -> None: """Zip with absolute path member raises ValueError.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = self._create_zip_with_member(tmp_path / "evil.zip", "/etc/passwd") target_dir = tmp_path / "output" target_dir.mkdir() extractor = ZipExtractor(zip_path) with pytest.raises(ValueError, match="absolute path"): extractor.extract(target_dir) def test_extract_rejects_dot_dot_traversal(self, tmp_path: Path) -> None: """Zip with ../../ traversal raises ValueError.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = self._create_zip_with_member( tmp_path / "evil.zip", "../../etc/passwd" ) target_dir = tmp_path / "output" target_dir.mkdir() extractor = ZipExtractor(zip_path) with pytest.raises(ValueError, match="path traversal"): extractor.extract(target_dir) def test_extract_rejects_nested_dot_dot(self, tmp_path: Path) -> None: """Zip with data/../../escape.txt raises ValueError.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = self._create_zip_with_member( tmp_path / "evil.zip", "data/../../escape.txt" ) target_dir = tmp_path / "output" target_dir.mkdir() extractor = ZipExtractor(zip_path) with pytest.raises(ValueError, match="path traversal"): extractor.extract(target_dir) def test_extract_no_files_extracted_on_traversal(self, tmp_path: Path) -> None: """No files are written when traversal is detected.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = self._create_zip_with_member( tmp_path / "evil.zip", "../../escape.txt" ) target_dir = tmp_path / "output" target_dir.mkdir() extractor = ZipExtractor(zip_path) with pytest.raises(ValueError): extractor.extract(target_dir) # Verify nothing was extracted assert list(target_dir.iterdir()) == [] assert extractor._extracted is False class TestZipExtractorErrors: """Tests for error handling scenarios.""" def test_extract_raises_on_missing_zip(self, tmp_path: Path) -> None: """FileNotFoundError for non-existent zip file.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor zip_path = tmp_path / "nonexistent.zip" target_dir = tmp_path / "output" target_dir.mkdir() extractor = ZipExtractor(zip_path) with pytest.raises(FileNotFoundError): extractor.extract(target_dir) def test_extract_raises_on_corrupted_zip(self, tmp_path: Path) -> None: """BadZipFile for invalid/corrupted zip file.""" from prefect._experimental.bundles._zip_extractor import ZipExtractor # Create corrupted zip by writing invalid bytes zip_path = tmp_path / "corrupted.zip" zip_path.write_bytes(b"not a valid zip file content") target_dir = tmp_path / "output" target_dir.mkdir() extractor = ZipExtractor(zip_path) with pytest.raises(zipfile.BadZipFile): extractor.extract(target_dir) def test_extract_raises_on_permission_error(self, tmp_path: Path) -> None: """Clear error on permission issues when creating directory.""" from unittest.mock import patch from prefect._experimental.bundles._zip_extractor import ZipExtractor # Create a valid zip zip_path = tmp_path / "test.zip" with zipfile.ZipFile(zip_path, "w") as zf: zf.writestr("file.txt", "content") target_dir = tmp_path / "output" target_dir.mkdir() # Mock extractall to raise permission error def mock_extractall(path=None, members=None, pwd=None): raise PermissionError("Permission denied") extractor = ZipExtractor(zip_path) with patch.object(zipfile.ZipFile, "extractall", mock_extractall): with pytest.raises(PermissionError): extractor.extract(target_dir)
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_experimental/bundles/test_zip_extractor.py", "license": "Apache License 2.0", "lines": 599, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/integrations/prefect-dbt/prefect_dbt/core/_manifest.py
""" Data structures and parser for dbt manifest.json files. This module provides: - DbtNode: Immutable representation of a dbt node - ExecutionWave: A group of nodes that can execute in parallel - ManifestParser: Parser for dbt manifest.json with dependency resolution - resolve_selection: Resolve dbt selectors to node IDs via `dbt ls` - DbtLsError: Exception raised when `dbt ls` fails """ import json from dataclasses import dataclass, field from pathlib import Path from typing import Any from dbt.artifacts.resources.types import NodeType from dbt.cli.main import dbtRunner # Resource types that are test-like (schema/data tests and unit tests). # NodeType.Unit was added in dbt-core 1.8; guard for older versions. _TEST_TYPES = frozenset( t for t in (NodeType.Test, getattr(NodeType, "Unit", None)) if t is not None ) @dataclass(frozen=True) class DbtNode: """Immutable representation of a dbt node from manifest.json. Attributes: unique_id: Full dbt identifier (e.g., "model.analytics.stg_users") name: Short name (e.g., "stg_users") resource_type: Node type from dbt (Model, Source, Test, etc.) depends_on: Tuple of unique_ids this node depends on (tuple for hashability) depends_on_macros: Tuple of macro unique_ids this node depends on fqn: Fully-qualified name as a tuple of path segments materialization: How the node is materialized ("view", "table", "ephemeral", etc.) relation_name: Database relation name original_file_path: Path to the source SQL/YAML file config: Node configuration dictionary description: Optional node description from the dbt project compiled_code: Compiled SQL code (populated by `dbt compile`) """ unique_id: str name: str resource_type: NodeType depends_on: tuple[str, ...] = field(default_factory=tuple) depends_on_macros: tuple[str, ...] = field(default_factory=tuple) fqn: tuple[str, ...] = field(default_factory=tuple) materialization: str | None = None relation_name: str | None = None original_file_path: str | None = None config: dict[str, Any] = field(default_factory=dict) description: str | None = None compiled_code: str | None = None # Resource types that produce database objects via `dbt run`/`dbt seed`/`dbt snapshot`. # Tests are excluded because they use `dbt test` and have their own scheduling strategy. _RUNNABLE_TYPES = frozenset({NodeType.Model, NodeType.Seed, NodeType.Snapshot}) @property def is_executable(self) -> bool: """Return True only for runnable, non-ephemeral nodes. Returns False for: - Sources (external tables, not executed) - Tests (executed separately via `dbt test`) - Ephemeral models (compiled inline, no database object) - Exposures, analyses, and other non-run resource types """ if self.resource_type not in self._RUNNABLE_TYPES: return False if self.materialization == "ephemeral": return False return True @property def dbt_selector(self) -> str: """Return a precise dbt selector string for this node. For runnable resource types (models, seeds, snapshots) each node has a dedicated file, so `path:<original_file_path>` is both globally unique and selects exactly one node. Tests are excluded from `path:` selection because multiple test nodes can be defined in a single YAML schema file — using `path:` would over-select. Falls back to dot-joined FQN, then bare node name. """ if self.original_file_path and self.resource_type in self._RUNNABLE_TYPES: return f"path:{self.original_file_path}" if self.fqn: return ".".join(self.fqn) return self.name def __hash__(self) -> int: return hash(self.unique_id) @dataclass class ExecutionWave: """A group of nodes that can be executed in parallel. All nodes in a wave have their dependencies satisfied by previous waves. Attributes: wave_number: Zero-indexed wave number (0 = first wave, no dependencies) nodes: List of DbtNode objects that can execute concurrently """ wave_number: int nodes: list[DbtNode] = field(default_factory=list) class ManifestParser: """Parser for dbt manifest.json files with dependency resolution. This parser: - Reads manifest.json directly (not using dbt's Manifest class) - Excludes ephemeral models and sources from executable nodes - Resolves transitive dependencies through ephemeral models - Computes execution waves using Kahn's algorithm Example: parser = ManifestParser(Path("target/manifest.json")) waves = parser.compute_execution_waves() for wave in waves: print(f"Wave {wave.wave_number}: {[n.name for n in wave.nodes]}") """ def __init__(self, manifest_path: Path): """Initialize the parser with a path to manifest.json. Args: manifest_path: Path to the dbt manifest.json file Raises: FileNotFoundError: If the manifest file doesn't exist """ if not manifest_path.exists(): raise FileNotFoundError(f"Manifest file not found: {manifest_path}") self._manifest_path = manifest_path self._manifest_data: dict[str, Any] = {} self._nodes: dict[str, DbtNode] = {} self._all_nodes: dict[str, DbtNode] = {} # includes ephemeral/sources self._load_manifest() @property def all_nodes(self) -> dict[str, DbtNode]: """All parsed nodes including sources and ephemeral models.""" return self._all_nodes @property def adapter_type(self) -> str | None: """Database adapter type from manifest metadata (e.g. ``"postgres"``).""" metadata = self._manifest_data.get("metadata", {}) return metadata.get("adapter_type") @property def project_name(self) -> str | None: """dbt project name from manifest metadata.""" metadata = self._manifest_data.get("metadata", {}) return metadata.get("project_name") def _load_manifest(self) -> None: """Load and parse the manifest.json file.""" with open(self._manifest_path) as f: self._manifest_data = json.load(f) self._parse_nodes() def _parse_nodes(self) -> None: """Parse all nodes from the manifest data.""" # Parse regular nodes (models, tests, snapshots, seeds, etc.) nodes_data = self._manifest_data.get("nodes", {}) for unique_id, node_data in nodes_data.items(): dbt_node = self._create_node(unique_id, node_data) self._all_nodes[unique_id] = dbt_node # Parse sources sources_data = self._manifest_data.get("sources", {}) for unique_id, source_data in sources_data.items(): dbt_node = self._create_source_node(unique_id, source_data) self._all_nodes[unique_id] = dbt_node def _create_node(self, unique_id: str, node_data: dict[str, Any]) -> DbtNode: """Create a DbtNode from manifest node data.""" resource_type_str = node_data.get("resource_type", "model") try: resource_type = NodeType(resource_type_str) except ValueError: # Fall back to model if unknown type resource_type = NodeType.Model # Get depends_on nodes and macros depends_on_data = node_data.get("depends_on", {}) depends_on_nodes = depends_on_data.get("nodes", []) depends_on_macros = depends_on_data.get("macros", []) # Get materialization from config config = node_data.get("config", {}) materialization = config.get("materialized") return DbtNode( unique_id=unique_id, name=node_data.get("name", ""), resource_type=resource_type, depends_on=tuple(depends_on_nodes), depends_on_macros=tuple(depends_on_macros), fqn=tuple(node_data.get("fqn", [])), materialization=materialization, relation_name=node_data.get("relation_name"), original_file_path=node_data.get("original_file_path"), config=config, description=node_data.get("description"), compiled_code=node_data.get("compiled_code"), ) def _create_source_node( self, unique_id: str, source_data: dict[str, Any] ) -> DbtNode: """Create a DbtNode from manifest source data.""" return DbtNode( unique_id=unique_id, name=source_data.get("name", ""), resource_type=NodeType.Source, depends_on=tuple(), # Sources have no dependencies fqn=tuple(source_data.get("fqn", [])), materialization=None, relation_name=source_data.get("relation_name"), original_file_path=source_data.get("original_file_path"), config=source_data.get("config", {}), description=source_data.get("description"), ) def _resolve_dependencies_through_ephemeral(self, node: DbtNode) -> tuple[str, ...]: """Resolve dependencies, tracing through ephemeral models. Ephemeral models are compiled inline, so we need to find the actual executable dependencies by traversing through them. Args: node: The node to resolve dependencies for Returns: Tuple of unique_ids of executable dependencies """ resolved: list[str] = [] visited: set[str] = set() def collect(dep_id: str) -> None: if dep_id in visited: return visited.add(dep_id) dep_node = self._all_nodes.get(dep_id) if dep_node is None: return # Skip sources without relation_name if dep_node.resource_type == NodeType.Source and not dep_node.relation_name: return # For ephemeral nodes, trace through to their dependencies if dep_node.materialization == "ephemeral": for nested_dep in dep_node.depends_on: collect(nested_dep) return # This is an executable dependency if dep_node.is_executable: resolved.append(dep_id) for dep_id in node.depends_on: collect(dep_id) return tuple(resolved) def get_executable_nodes(self) -> dict[str, DbtNode]: """Get all executable nodes (excluding ephemeral models and sources). Returns: Dictionary mapping unique_id to DbtNode for executable nodes. Dependencies are resolved through ephemeral models. """ if self._nodes: return self._nodes for unique_id, node in self._all_nodes.items(): if not node.is_executable: continue # Resolve dependencies through ephemeral models resolved_deps = self._resolve_dependencies_through_ephemeral(node) # Create new node with resolved dependencies resolved_node = DbtNode( unique_id=node.unique_id, name=node.name, resource_type=node.resource_type, depends_on=resolved_deps, depends_on_macros=node.depends_on_macros, fqn=node.fqn, materialization=node.materialization, relation_name=node.relation_name, original_file_path=node.original_file_path, config=node.config, description=node.description, compiled_code=node.compiled_code, ) self._nodes[unique_id] = resolved_node return self._nodes def get_node_dependencies(self, node_id: str) -> list[str]: """Get the resolved dependencies for a specific node. Args: node_id: The unique_id of the node Returns: List of unique_ids that this node depends on (resolved through ephemeral) Raises: KeyError: If the node_id is not found """ nodes = self.get_executable_nodes() if node_id not in nodes: raise KeyError(f"Node not found: {node_id}") return list(nodes[node_id].depends_on) def compute_execution_waves( self, nodes: dict[str, DbtNode] | None = None, ) -> list[ExecutionWave]: """Compute execution waves using Kahn's algorithm. Each wave contains nodes that can be executed in parallel. Wave 0 contains nodes with no dependencies. Wave N contains nodes whose dependencies are all in waves 0 through N-1. Args: nodes: Pre-filtered node dict to compute waves for. If None, all executable nodes are used. Returns: List of ExecutionWave objects in execution order Raises: ValueError: If the dependency graph contains cycles """ if nodes is None: nodes = self.get_executable_nodes() if not nodes: return [] # Build in-degree map (count of unresolved dependencies) in_degree: dict[str, int] = {} for node_id, node in nodes.items(): # Only count dependencies that are in our executable nodes deps_in_graph = [d for d in node.depends_on if d in nodes] in_degree[node_id] = len(deps_in_graph) # Build dependents map (who depends on each node) dependents: dict[str, list[str]] = {node_id: [] for node_id in nodes} for node_id, node in nodes.items(): for dep_id in node.depends_on: if dep_id in dependents: dependents[dep_id].append(node_id) # Kahn's algorithm waves: list[ExecutionWave] = [] current_wave = [node_id for node_id, degree in in_degree.items() if degree == 0] processed_count = 0 wave_number = 0 while current_wave: # Create wave with current nodes wave_nodes = [nodes[node_id] for node_id in current_wave] waves.append(ExecutionWave(wave_number=wave_number, nodes=wave_nodes)) processed_count += len(current_wave) # Find next wave next_wave: list[str] = [] for node_id in current_wave: for dependent_id in dependents[node_id]: in_degree[dependent_id] -= 1 if in_degree[dependent_id] == 0: next_wave.append(dependent_id) current_wave = next_wave wave_number += 1 # Check for cycles if processed_count != len(nodes): raise ValueError( "Dependency graph contains cycles. " f"Processed {processed_count} of {len(nodes)} nodes." ) return waves def get_macro_paths(self) -> dict[str, str | None]: """Get a mapping of macro unique_id to original_file_path. Reads the top-level `macros` section of the manifest. Returns: Dict mapping macro unique_id to its `original_file_path` (`None` when the macro has no path, e.g. builtins). """ macros_data = self._manifest_data.get("macros", {}) return { macro_id: macro_data.get("original_file_path") for macro_id, macro_data in macros_data.items() } def get_test_nodes(self) -> dict[str, DbtNode]: """Get all test nodes with dependencies resolved through ephemeral models. Returns: Dictionary mapping unique_id to DbtNode for test nodes. Dependencies are resolved through ephemeral models to reach executable ancestors. """ if hasattr(self, "_test_nodes") and self._test_nodes: return self._test_nodes self._test_nodes: dict[str, DbtNode] = {} for unique_id, node in self._all_nodes.items(): if node.resource_type not in _TEST_TYPES: continue resolved_deps = self._resolve_dependencies_through_ephemeral(node) resolved_node = DbtNode( unique_id=node.unique_id, name=node.name, resource_type=node.resource_type, depends_on=resolved_deps, depends_on_macros=node.depends_on_macros, fqn=node.fqn, materialization=node.materialization, relation_name=node.relation_name, original_file_path=node.original_file_path, config=node.config, description=node.description, compiled_code=node.compiled_code, ) self._test_nodes[unique_id] = resolved_node return self._test_nodes def filter_test_nodes( self, selected_node_ids: set[str] | None = None, executable_node_ids: set[str] | None = None, ) -> dict[str, DbtNode]: """Filter test nodes by selection and executable parent availability. Args: selected_node_ids: If not None, only keep tests whose unique_id is in this set. Pass None to keep all test nodes. executable_node_ids: Only keep tests whose **all** resolved dependencies are in this set. This ensures a multi-model relationship test is excluded if one of its parent models was filtered out by selectors or stale-source filtering. Returns: Dictionary of filtered test nodes. """ tests = self.get_test_nodes() if selected_node_ids is not None: tests = {uid: n for uid, n in tests.items() if uid in selected_node_ids} if executable_node_ids is not None: tests = { uid: n for uid, n in tests.items() if all(dep in executable_node_ids for dep in n.depends_on) } return tests def filter_nodes( self, selected_node_ids: set[str] | None = None, ) -> dict[str, DbtNode]: """Filter executable nodes by a set of unique IDs. Args: selected_node_ids: Set of unique_ids to keep. If None, returns all executable nodes. Returns: Dictionary of filtered executable nodes with resolved dependencies. """ nodes = self.get_executable_nodes() if selected_node_ids is None: return nodes return {uid: node for uid, node in nodes.items() if uid in selected_node_ids} class DbtLsError(Exception): """Raised when `dbt ls` fails during selector resolution.""" def resolve_selection( project_dir: Path, profiles_dir: Path, select: str | None = None, exclude: str | None = None, target_path: Path | None = None, target: str | None = None, ) -> set[str]: """Resolve dbt selectors to a set of node unique_ids. Uses `dbt ls` under the hood, so all of dbt's selector syntax is supported: graph operators (`+model`, `model+`), tags (`tag:daily`), paths, wildcards, and indirect selection. Args: project_dir: Path to dbt project directory profiles_dir: Path to dbt profiles directory select: dbt selector expression (e.g., `"marts"`, `"tag:daily"`, `"+stg_users"`) exclude: dbt exclude expression target_path: Optional override for dbt target directory target: dbt target name (`--target` / `-t`) Returns: Set of unique_ids matching the selection criteria Raises: DbtLsError: If `dbt ls` fails """ args: list[str] = [ "ls", "--resource-type", "all", "--output", "json", "--project-dir", str(project_dir), "--profiles-dir", str(profiles_dir), ] if select is not None: args.extend(["--select", select]) if exclude is not None: args.extend(["--exclude", exclude]) if target_path is not None: args.extend(["--target-path", str(target_path)]) if target is not None: args.extend(["--target", target]) result = dbtRunner().invoke(args) if not result.success: raise DbtLsError(f"dbt ls failed: {result.exception or 'unknown error'}") # With --output json, result.result is a list of JSON strings (or dicts # depending on the dbt version / runner implementation). if not result.result: return set() unique_ids: set[str] = set() for row in result.result: parsed = json.loads(row) if isinstance(row, str) else row unique_ids.add(parsed["unique_id"]) return unique_ids
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/integrations/prefect-dbt/prefect_dbt/core/_manifest.py", "license": "Apache License 2.0", "lines": 471, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:src/integrations/prefect-dbt/tests/core/test_manifest.py
""" Tests for DbtNode, ExecutionWave, and ManifestParser. """ import json from pathlib import Path from typing import Any from unittest.mock import MagicMock, patch import pytest from dbt.artifacts.resources.types import NodeType from prefect_dbt.core._manifest import ( DbtLsError, DbtNode, ExecutionWave, ManifestParser, resolve_selection, ) # ============================================================================= # Test Fixtures # ============================================================================= @pytest.fixture def minimal_manifest_data() -> dict[str, Any]: """Create minimal manifest data with a single model.""" return { "nodes": { "model.test_project.my_model": { "name": "my_model", "resource_type": "model", "depends_on": {"nodes": []}, "config": {"materialized": "table"}, "relation_name": '"db"."schema"."my_model"', "original_file_path": "models/my_model.sql", } }, "sources": {}, } @pytest.fixture def manifest_with_ephemeral() -> dict[str, Any]: """Create manifest with ephemeral model chain: final -> ephemeral -> source_model.""" return { "nodes": { "model.test_project.source_model": { "name": "source_model", "resource_type": "model", "depends_on": {"nodes": []}, "config": {"materialized": "table"}, "relation_name": '"db"."schema"."source_model"', "original_file_path": "models/source_model.sql", }, "model.test_project.ephemeral_model": { "name": "ephemeral_model", "resource_type": "model", "depends_on": {"nodes": ["model.test_project.source_model"]}, "config": {"materialized": "ephemeral"}, "relation_name": None, "original_file_path": "models/ephemeral_model.sql", }, "model.test_project.final_model": { "name": "final_model", "resource_type": "model", "depends_on": {"nodes": ["model.test_project.ephemeral_model"]}, "config": {"materialized": "view"}, "relation_name": '"db"."schema"."final_model"', "original_file_path": "models/final_model.sql", }, }, "sources": {}, } @pytest.fixture def diamond_dependency_manifest() -> dict[str, Any]: """Create diamond dependency pattern: root -> left/right -> leaf. Execution waves should be: - Wave 0: root - Wave 1: left, right - Wave 2: leaf """ return { "nodes": { "model.test_project.root": { "name": "root", "resource_type": "model", "depends_on": {"nodes": []}, "config": {"materialized": "table"}, "relation_name": '"db"."schema"."root"', "original_file_path": "models/root.sql", }, "model.test_project.left": { "name": "left", "resource_type": "model", "depends_on": {"nodes": ["model.test_project.root"]}, "config": {"materialized": "table"}, "relation_name": '"db"."schema"."left"', "original_file_path": "models/left.sql", }, "model.test_project.right": { "name": "right", "resource_type": "model", "depends_on": {"nodes": ["model.test_project.root"]}, "config": {"materialized": "table"}, "relation_name": '"db"."schema"."right"', "original_file_path": "models/right.sql", }, "model.test_project.leaf": { "name": "leaf", "resource_type": "model", "depends_on": { "nodes": [ "model.test_project.left", "model.test_project.right", ] }, "config": {"materialized": "table"}, "relation_name": '"db"."schema"."leaf"', "original_file_path": "models/leaf.sql", }, }, "sources": {}, } @pytest.fixture def manifest_with_sources() -> dict[str, Any]: """Create manifest with source dependencies.""" return { "nodes": { "model.test_project.staging_model": { "name": "staging_model", "resource_type": "model", "depends_on": {"nodes": ["source.test_project.raw.users"]}, "config": {"materialized": "view"}, "relation_name": '"db"."schema"."staging_model"', "original_file_path": "models/staging_model.sql", }, }, "sources": { "source.test_project.raw.users": { "name": "users", "resource_type": "source", "relation_name": '"raw"."users"', "original_file_path": "models/sources.yml", "config": {}, }, }, } @pytest.fixture def cyclic_manifest() -> dict[str, Any]: """Create manifest with cyclic dependencies: a -> b -> c -> a.""" return { "nodes": { "model.test_project.model_a": { "name": "model_a", "resource_type": "model", "depends_on": {"nodes": ["model.test_project.model_c"]}, "config": {"materialized": "table"}, "relation_name": '"db"."schema"."model_a"', "original_file_path": "models/model_a.sql", }, "model.test_project.model_b": { "name": "model_b", "resource_type": "model", "depends_on": {"nodes": ["model.test_project.model_a"]}, "config": {"materialized": "table"}, "relation_name": '"db"."schema"."model_b"', "original_file_path": "models/model_b.sql", }, "model.test_project.model_c": { "name": "model_c", "resource_type": "model", "depends_on": {"nodes": ["model.test_project.model_b"]}, "config": {"materialized": "table"}, "relation_name": '"db"."schema"."model_c"', "original_file_path": "models/model_c.sql", }, }, "sources": {}, } def write_manifest(tmp_path: Path, data: dict[str, Any]) -> Path: """Helper to write manifest data to a file.""" manifest_path = tmp_path / "manifest.json" manifest_path.write_text(json.dumps(data)) return manifest_path # ============================================================================= # DbtNode Tests # ============================================================================= class TestDbtNode: """Tests for the DbtNode dataclass.""" def test_is_executable_table(self): """Table materialization should be executable.""" node = DbtNode( unique_id="model.test.my_model", name="my_model", resource_type=NodeType.Model, materialization="table", ) assert node.is_executable is True def test_is_executable_view(self): """View materialization should be executable.""" node = DbtNode( unique_id="model.test.my_model", name="my_model", resource_type=NodeType.Model, materialization="view", ) assert node.is_executable is True def test_is_executable_incremental(self): """Incremental materialization should be executable.""" node = DbtNode( unique_id="model.test.my_model", name="my_model", resource_type=NodeType.Model, materialization="incremental", ) assert node.is_executable is True def test_is_executable_ephemeral(self): """Ephemeral materialization should NOT be executable.""" node = DbtNode( unique_id="model.test.my_model", name="my_model", resource_type=NodeType.Model, materialization="ephemeral", ) assert node.is_executable is False def test_is_executable_source(self): """Source nodes should NOT be executable.""" node = DbtNode( unique_id="source.test.raw.users", name="users", resource_type=NodeType.Source, ) assert node.is_executable is False def test_is_executable_seed(self): """Seed nodes should be executable.""" node = DbtNode( unique_id="seed.test.my_seed", name="my_seed", resource_type=NodeType.Seed, ) assert node.is_executable is True def test_is_executable_snapshot(self): """Snapshot nodes should be executable.""" node = DbtNode( unique_id="snapshot.test.my_snapshot", name="my_snapshot", resource_type=NodeType.Snapshot, ) assert node.is_executable is True def test_is_executable_test(self): """Test nodes should NOT be executable (they use `dbt test`).""" node = DbtNode( unique_id="test.test.my_test", name="my_test", resource_type=NodeType.Test, ) assert node.is_executable is False def test_is_executable_exposure(self): """Exposure nodes should NOT be executable.""" node = DbtNode( unique_id="exposure.test.my_exposure", name="my_exposure", resource_type=NodeType.Exposure, ) assert node.is_executable is False def test_dbt_selector_uses_path(self): """dbt_selector should return path:<file> when original_file_path is set.""" node = DbtNode( unique_id="model.analytics.stg_users", name="stg_users", resource_type=NodeType.Model, fqn=("analytics", "staging", "stg_users"), original_file_path="models/staging/stg_users.sql", ) assert node.dbt_selector == "path:models/staging/stg_users.sql" def test_dbt_selector_falls_back_to_fqn(self): """dbt_selector should fall back to FQN when file path is absent.""" node = DbtNode( unique_id="model.analytics.stg_users", name="stg_users", resource_type=NodeType.Model, fqn=("analytics", "staging", "stg_users"), ) assert node.dbt_selector == "analytics.staging.stg_users" def test_dbt_selector_falls_back_to_name(self): """dbt_selector should fall back to name when FQN and path are empty.""" node = DbtNode( unique_id="model.analytics.stg_users", name="stg_users", resource_type=NodeType.Model, ) assert node.dbt_selector == "stg_users" def test_dbt_selector_skips_path_for_non_runnable_types(self): """Test nodes share YAML files, so path: would over-select; fall back to FQN.""" node = DbtNode( unique_id="test.analytics.not_null_stg_users_id", name="not_null_stg_users_id", resource_type=NodeType.Test, fqn=("analytics", "not_null_stg_users_id"), original_file_path="models/staging/schema.yml", ) assert node.dbt_selector == "analytics.not_null_stg_users_id" def test_hashability(self): """DbtNode should be hashable (usable in sets/dicts).""" node1 = DbtNode( unique_id="model.test.my_model", name="my_model", resource_type=NodeType.Model, depends_on=("model.test.other",), ) node2 = DbtNode( unique_id="model.test.my_model", name="my_model", resource_type=NodeType.Model, depends_on=("model.test.other",), ) # Should be hashable node_set = {node1, node2} assert len(node_set) == 1 # Should work as dict key node_dict = {node1: "value"} assert node_dict[node2] == "value" def test_immutability(self): """DbtNode should be immutable (frozen=True).""" node = DbtNode( unique_id="model.test.my_model", name="my_model", resource_type=NodeType.Model, ) with pytest.raises(AttributeError): node.name = "new_name" # type: ignore[misc] def test_equality(self): """DbtNodes with same attributes should be equal.""" node1 = DbtNode( unique_id="model.test.my_model", name="my_model", resource_type=NodeType.Model, depends_on=("dep1", "dep2"), ) node2 = DbtNode( unique_id="model.test.my_model", name="my_model", resource_type=NodeType.Model, depends_on=("dep1", "dep2"), ) assert node1 == node2 def test_depends_on_as_tuple(self): """depends_on should be a tuple for hashability.""" node = DbtNode( unique_id="model.test.my_model", name="my_model", resource_type=NodeType.Model, depends_on=("dep1", "dep2"), ) assert isinstance(node.depends_on, tuple) assert node.depends_on == ("dep1", "dep2") # ============================================================================= # ExecutionWave Tests # ============================================================================= class TestExecutionWave: """Tests for the ExecutionWave dataclass.""" def test_creation(self): """ExecutionWave should store wave number and nodes.""" node = DbtNode( unique_id="model.test.my_model", name="my_model", resource_type=NodeType.Model, ) wave = ExecutionWave(wave_number=0, nodes=[node]) assert wave.wave_number == 0 assert len(wave.nodes) == 1 assert wave.nodes[0] == node def test_empty_wave(self): """ExecutionWave can be created with no nodes.""" wave = ExecutionWave(wave_number=0, nodes=[]) assert wave.wave_number == 0 assert wave.nodes == [] def test_multiple_nodes(self): """ExecutionWave can contain multiple nodes.""" nodes = [ DbtNode( unique_id=f"model.test.model_{i}", name=f"model_{i}", resource_type=NodeType.Model, ) for i in range(3) ] wave = ExecutionWave(wave_number=1, nodes=nodes) assert wave.wave_number == 1 assert len(wave.nodes) == 3 # ============================================================================= # ManifestParser Tests # ============================================================================= class TestManifestParser: """Tests for the ManifestParser class.""" def test_file_not_found(self, tmp_path: Path): """Should raise FileNotFoundError for missing manifest.""" with pytest.raises(FileNotFoundError, match="Manifest file not found"): ManifestParser(tmp_path / "nonexistent.json") def test_parse_minimal_manifest( self, tmp_path: Path, minimal_manifest_data: dict[str, Any] ): """Should parse a minimal manifest with one model.""" manifest_path = write_manifest(tmp_path, minimal_manifest_data) parser = ManifestParser(manifest_path) nodes = parser.get_executable_nodes() assert len(nodes) == 1 assert "model.test_project.my_model" in nodes node = nodes["model.test_project.my_model"] assert node.name == "my_model" assert node.resource_type == NodeType.Model assert node.materialization == "table" assert node.depends_on == () def test_exclude_ephemeral( self, tmp_path: Path, manifest_with_ephemeral: dict[str, Any] ): """Should exclude ephemeral models from executable nodes.""" manifest_path = write_manifest(tmp_path, manifest_with_ephemeral) parser = ManifestParser(manifest_path) nodes = parser.get_executable_nodes() node_names = {n.name for n in nodes.values()} assert "source_model" in node_names assert "final_model" in node_names assert "ephemeral_model" not in node_names def test_resolve_dependencies_through_ephemeral( self, tmp_path: Path, manifest_with_ephemeral: dict[str, Any] ): """Should resolve dependencies through ephemeral models.""" manifest_path = write_manifest(tmp_path, manifest_with_ephemeral) parser = ManifestParser(manifest_path) nodes = parser.get_executable_nodes() final_model = nodes["model.test_project.final_model"] # final_model depends on ephemeral_model which depends on source_model # Resolved dependency should be directly to source_model assert "model.test_project.source_model" in final_model.depends_on assert "model.test_project.ephemeral_model" not in final_model.depends_on def test_exclude_sources( self, tmp_path: Path, manifest_with_sources: dict[str, Any] ): """Should exclude source nodes from executable nodes.""" manifest_path = write_manifest(tmp_path, manifest_with_sources) parser = ManifestParser(manifest_path) nodes = parser.get_executable_nodes() # Source should not be in executable nodes assert "source.test_project.raw.users" not in nodes # Model should be present assert "model.test_project.staging_model" in nodes def test_get_node_dependencies( self, tmp_path: Path, diamond_dependency_manifest: dict[str, Any] ): """Should return dependencies for a specific node.""" manifest_path = write_manifest(tmp_path, diamond_dependency_manifest) parser = ManifestParser(manifest_path) deps = parser.get_node_dependencies("model.test_project.leaf") assert set(deps) == { "model.test_project.left", "model.test_project.right", } def test_get_node_dependencies_not_found( self, tmp_path: Path, minimal_manifest_data: dict[str, Any] ): """Should raise KeyError for unknown node.""" manifest_path = write_manifest(tmp_path, minimal_manifest_data) parser = ManifestParser(manifest_path) with pytest.raises(KeyError, match="Node not found"): parser.get_node_dependencies("model.test_project.nonexistent") def test_filter_nodes_none_returns_all( self, tmp_path: Path, minimal_manifest_data: dict[str, Any] ): """filter_nodes(None) returns all executable nodes.""" manifest_path = write_manifest(tmp_path, minimal_manifest_data) parser = ManifestParser(manifest_path) filtered = parser.filter_nodes() assert filtered == parser.get_executable_nodes() filtered = parser.filter_nodes(selected_node_ids=None) assert filtered == parser.get_executable_nodes() # ============================================================================= # ExecutionWave Computation Tests # ============================================================================= class TestComputeExecutionWaves: """Tests for ManifestParser.compute_execution_waves().""" def test_empty_manifest(self, tmp_path: Path): """Empty manifest should return no waves.""" manifest_path = write_manifest(tmp_path, {"nodes": {}, "sources": {}}) parser = ManifestParser(manifest_path) waves = parser.compute_execution_waves() assert waves == [] def test_single_node(self, tmp_path: Path, minimal_manifest_data: dict[str, Any]): """Single node should produce one wave.""" manifest_path = write_manifest(tmp_path, minimal_manifest_data) parser = ManifestParser(manifest_path) waves = parser.compute_execution_waves() assert len(waves) == 1 assert waves[0].wave_number == 0 assert len(waves[0].nodes) == 1 assert waves[0].nodes[0].name == "my_model" def test_diamond_pattern( self, tmp_path: Path, diamond_dependency_manifest: dict[str, Any] ): """Diamond pattern should produce 3 waves.""" manifest_path = write_manifest(tmp_path, diamond_dependency_manifest) parser = ManifestParser(manifest_path) waves = parser.compute_execution_waves() assert len(waves) == 3 # Wave 0: root (no dependencies) assert waves[0].wave_number == 0 wave0_names = {n.name for n in waves[0].nodes} assert wave0_names == {"root"} # Wave 1: left and right (depend on root) assert waves[1].wave_number == 1 wave1_names = {n.name for n in waves[1].nodes} assert wave1_names == {"left", "right"} # Wave 2: leaf (depends on left and right) assert waves[2].wave_number == 2 wave2_names = {n.name for n in waves[2].nodes} assert wave2_names == {"leaf"} def test_linear_chain(self, tmp_path: Path): """Linear chain a -> b -> c should produce 3 waves.""" manifest_data = { "nodes": { "model.test_project.model_a": { "name": "model_a", "resource_type": "model", "depends_on": {"nodes": []}, "config": {"materialized": "table"}, "relation_name": '"db"."schema"."model_a"', "original_file_path": "models/model_a.sql", }, "model.test_project.model_b": { "name": "model_b", "resource_type": "model", "depends_on": {"nodes": ["model.test_project.model_a"]}, "config": {"materialized": "table"}, "relation_name": '"db"."schema"."model_b"', "original_file_path": "models/model_b.sql", }, "model.test_project.model_c": { "name": "model_c", "resource_type": "model", "depends_on": {"nodes": ["model.test_project.model_b"]}, "config": {"materialized": "table"}, "relation_name": '"db"."schema"."model_c"', "original_file_path": "models/model_c.sql", }, }, "sources": {}, } manifest_path = write_manifest(tmp_path, manifest_data) parser = ManifestParser(manifest_path) waves = parser.compute_execution_waves() assert len(waves) == 3 assert waves[0].nodes[0].name == "model_a" assert waves[1].nodes[0].name == "model_b" assert waves[2].nodes[0].name == "model_c" def test_parallel_independent_nodes(self, tmp_path: Path): """Independent nodes should be in wave 0.""" manifest_data = { "nodes": { f"model.test_project.model_{i}": { "name": f"model_{i}", "resource_type": "model", "depends_on": {"nodes": []}, "config": {"materialized": "table"}, "relation_name": f'"db"."schema"."model_{i}"', "original_file_path": f"models/model_{i}.sql", } for i in range(5) }, "sources": {}, } manifest_path = write_manifest(tmp_path, manifest_data) parser = ManifestParser(manifest_path) waves = parser.compute_execution_waves() assert len(waves) == 1 assert waves[0].wave_number == 0 assert len(waves[0].nodes) == 5 def test_cycle_detection(self, tmp_path: Path, cyclic_manifest: dict[str, Any]): """Should raise ValueError when cycles are detected.""" manifest_path = write_manifest(tmp_path, cyclic_manifest) parser = ManifestParser(manifest_path) with pytest.raises(ValueError, match="contains cycles"): parser.compute_execution_waves() def test_waves_with_ephemeral_resolution( self, tmp_path: Path, manifest_with_ephemeral: dict[str, Any] ): """Waves should respect resolved dependencies through ephemeral.""" manifest_path = write_manifest(tmp_path, manifest_with_ephemeral) parser = ManifestParser(manifest_path) waves = parser.compute_execution_waves() # Should have 2 waves: source_model, then final_model # (ephemeral_model is excluded) assert len(waves) == 2 wave0_names = {n.name for n in waves[0].nodes} wave1_names = {n.name for n in waves[1].nodes} assert wave0_names == {"source_model"} assert wave1_names == {"final_model"} def test_source_dependencies_ignored_in_waves( self, tmp_path: Path, manifest_with_sources: dict[str, Any] ): """Source dependencies should not affect wave computation.""" manifest_path = write_manifest(tmp_path, manifest_with_sources) parser = ManifestParser(manifest_path) waves = parser.compute_execution_waves() # Only executable model should be in wave 0 assert len(waves) == 1 assert waves[0].nodes[0].name == "staging_model" # ============================================================================= # Edge Cases and Integration Tests # ============================================================================= class TestManifestParserEdgeCases: """Edge cases and integration tests for ManifestParser.""" def test_nested_ephemeral_chain(self, tmp_path: Path): """Should resolve through multiple ephemeral models.""" manifest_data = { "nodes": { "model.test_project.base": { "name": "base", "resource_type": "model", "depends_on": {"nodes": []}, "config": {"materialized": "table"}, "relation_name": '"db"."schema"."base"', "original_file_path": "models/base.sql", }, "model.test_project.eph1": { "name": "eph1", "resource_type": "model", "depends_on": {"nodes": ["model.test_project.base"]}, "config": {"materialized": "ephemeral"}, "relation_name": None, "original_file_path": "models/eph1.sql", }, "model.test_project.eph2": { "name": "eph2", "resource_type": "model", "depends_on": {"nodes": ["model.test_project.eph1"]}, "config": {"materialized": "ephemeral"}, "relation_name": None, "original_file_path": "models/eph2.sql", }, "model.test_project.final": { "name": "final", "resource_type": "model", "depends_on": {"nodes": ["model.test_project.eph2"]}, "config": {"materialized": "table"}, "relation_name": '"db"."schema"."final"', "original_file_path": "models/final.sql", }, }, "sources": {}, } manifest_path = write_manifest(tmp_path, manifest_data) parser = ManifestParser(manifest_path) nodes = parser.get_executable_nodes() final_node = nodes["model.test_project.final"] # Should resolve all the way through to base assert final_node.depends_on == ("model.test_project.base",) def test_multiple_dependency_paths(self, tmp_path: Path): """Should handle nodes with multiple dependency paths to same node.""" manifest_data = { "nodes": { "model.test_project.base": { "name": "base", "resource_type": "model", "depends_on": {"nodes": []}, "config": {"materialized": "table"}, "relation_name": '"db"."schema"."base"', "original_file_path": "models/base.sql", }, "model.test_project.eph_a": { "name": "eph_a", "resource_type": "model", "depends_on": {"nodes": ["model.test_project.base"]}, "config": {"materialized": "ephemeral"}, "relation_name": None, "original_file_path": "models/eph_a.sql", }, "model.test_project.eph_b": { "name": "eph_b", "resource_type": "model", "depends_on": {"nodes": ["model.test_project.base"]}, "config": {"materialized": "ephemeral"}, "relation_name": None, "original_file_path": "models/eph_b.sql", }, "model.test_project.final": { "name": "final", "resource_type": "model", "depends_on": { "nodes": [ "model.test_project.eph_a", "model.test_project.eph_b", ] }, "config": {"materialized": "table"}, "relation_name": '"db"."schema"."final"', "original_file_path": "models/final.sql", }, }, "sources": {}, } manifest_path = write_manifest(tmp_path, manifest_data) parser = ManifestParser(manifest_path) nodes = parser.get_executable_nodes() final_node = nodes["model.test_project.final"] # Should only include base once (deduplication through visited set) assert final_node.depends_on == ("model.test_project.base",) def test_source_without_relation_name(self, tmp_path: Path): """Sources without relation_name should be skipped in dependency resolution.""" manifest_data = { "nodes": { "model.test_project.model": { "name": "model", "resource_type": "model", "depends_on": {"nodes": ["source.test_project.raw.users"]}, "config": {"materialized": "table"}, "relation_name": '"db"."schema"."model"', "original_file_path": "models/model.sql", }, }, "sources": { "source.test_project.raw.users": { "name": "users", "resource_type": "source", "relation_name": None, # No relation_name "original_file_path": "models/sources.yml", "config": {}, }, }, } manifest_path = write_manifest(tmp_path, manifest_data) parser = ManifestParser(manifest_path) nodes = parser.get_executable_nodes() model_node = nodes["model.test_project.model"] # Source without relation_name should be skipped assert model_node.depends_on == () def test_unknown_resource_type(self, tmp_path: Path): """Should handle unknown resource types gracefully.""" manifest_data = { "nodes": { "unknown.test_project.something": { "name": "something", "resource_type": "unknown_type", "depends_on": {"nodes": []}, "config": {"materialized": "table"}, "relation_name": '"db"."schema"."something"', "original_file_path": "models/something.sql", }, }, "sources": {}, } manifest_path = write_manifest(tmp_path, manifest_data) parser = ManifestParser(manifest_path) nodes = parser.get_executable_nodes() # Should fall back to Model type assert len(nodes) == 1 def test_caching_of_executable_nodes( self, tmp_path: Path, minimal_manifest_data: dict[str, Any] ): """get_executable_nodes should cache results.""" manifest_path = write_manifest(tmp_path, minimal_manifest_data) parser = ManifestParser(manifest_path) nodes1 = parser.get_executable_nodes() nodes2 = parser.get_executable_nodes() assert nodes1 is nodes2 # Same object (cached) # ============================================================================= # Filter Nodes Tests # ============================================================================= class TestFilterNodes: """Tests for ManifestParser.filter_nodes().""" def test_filter_none_returns_all( self, tmp_path: Path, diamond_dependency_manifest: dict[str, Any] ): """filter_nodes(None) returns all executable nodes.""" manifest_path = write_manifest(tmp_path, diamond_dependency_manifest) parser = ManifestParser(manifest_path) filtered = parser.filter_nodes(selected_node_ids=None) assert filtered == parser.get_executable_nodes() def test_filter_subset( self, tmp_path: Path, diamond_dependency_manifest: dict[str, Any] ): """filter_nodes with a subset of IDs returns only those nodes.""" manifest_path = write_manifest(tmp_path, diamond_dependency_manifest) parser = ManifestParser(manifest_path) subset = {"model.test_project.root", "model.test_project.left"} filtered = parser.filter_nodes(selected_node_ids=subset) assert set(filtered.keys()) == subset assert "model.test_project.right" not in filtered assert "model.test_project.leaf" not in filtered def test_filter_empty_set( self, tmp_path: Path, diamond_dependency_manifest: dict[str, Any] ): """filter_nodes with empty set returns empty dict.""" manifest_path = write_manifest(tmp_path, diamond_dependency_manifest) parser = ManifestParser(manifest_path) filtered = parser.filter_nodes(selected_node_ids=set()) assert filtered == {} def test_filter_nonexistent_ids_ignored( self, tmp_path: Path, minimal_manifest_data: dict[str, Any] ): """IDs not in manifest are silently ignored.""" manifest_path = write_manifest(tmp_path, minimal_manifest_data) parser = ManifestParser(manifest_path) filtered = parser.filter_nodes( selected_node_ids={"model.test_project.my_model", "model.fake.not_real"} ) assert set(filtered.keys()) == {"model.test_project.my_model"} def test_filter_preserves_resolved_dependencies( self, tmp_path: Path, manifest_with_ephemeral: dict[str, Any] ): """Filtered nodes retain their resolved (through-ephemeral) deps.""" manifest_path = write_manifest(tmp_path, manifest_with_ephemeral) parser = ManifestParser(manifest_path) # final_model depends on source_model (through ephemeral) filtered = parser.filter_nodes( selected_node_ids={ "model.test_project.source_model", "model.test_project.final_model", } ) final = filtered["model.test_project.final_model"] # Resolved dependency through ephemeral should be preserved assert "model.test_project.source_model" in final.depends_on def test_filter_with_diamond_pattern( self, tmp_path: Path, diamond_dependency_manifest: dict[str, Any] ): """Filter a subset of the diamond graph.""" manifest_path = write_manifest(tmp_path, diamond_dependency_manifest) parser = ManifestParser(manifest_path) # Select only right and leaf — root and left are excluded filtered = parser.filter_nodes( selected_node_ids={ "model.test_project.right", "model.test_project.leaf", } ) assert set(filtered.keys()) == { "model.test_project.right", "model.test_project.leaf", } # leaf still has its original deps (root, left, right) in depends_on # but only right is in the filtered set leaf = filtered["model.test_project.leaf"] assert "model.test_project.left" in leaf.depends_on assert "model.test_project.right" in leaf.depends_on # ============================================================================= # Filtered Execution Waves Tests # ============================================================================= class TestComputeExecutionWavesFiltered: """Tests for ManifestParser.compute_execution_waves() with filtered nodes.""" def test_waves_from_filtered_nodes( self, tmp_path: Path, diamond_dependency_manifest: dict[str, Any] ): """Compute waves from a filtered subset.""" manifest_path = write_manifest(tmp_path, diamond_dependency_manifest) parser = ManifestParser(manifest_path) # Only root and left filtered = parser.filter_nodes( selected_node_ids={ "model.test_project.root", "model.test_project.left", } ) waves = parser.compute_execution_waves(nodes=filtered) assert len(waves) == 2 assert {n.name for n in waves[0].nodes} == {"root"} assert {n.name for n in waves[1].nodes} == {"left"} def test_waves_filtered_recomputes_dependencies( self, tmp_path: Path, diamond_dependency_manifest: dict[str, Any] ): """Wave computation only considers in-set dependencies.""" manifest_path = write_manifest(tmp_path, diamond_dependency_manifest) parser = ManifestParser(manifest_path) # Select leaf and right, but NOT root (which right depends on). # Since root is not in the set, right has 0 in-set deps → wave 0. # leaf depends on left (not in set) and right (in set) → wave 1. filtered = parser.filter_nodes( selected_node_ids={ "model.test_project.right", "model.test_project.leaf", } ) waves = parser.compute_execution_waves(nodes=filtered) assert len(waves) == 2 wave0_names = {n.name for n in waves[0].nodes} wave1_names = {n.name for n in waves[1].nodes} assert wave0_names == {"right"} assert wave1_names == {"leaf"} def test_waves_with_none_uses_all_nodes( self, tmp_path: Path, diamond_dependency_manifest: dict[str, Any] ): """Passing nodes=None uses get_executable_nodes.""" manifest_path = write_manifest(tmp_path, diamond_dependency_manifest) parser = ManifestParser(manifest_path) waves_default = parser.compute_execution_waves() waves_none = parser.compute_execution_waves(nodes=None) assert len(waves_default) == len(waves_none) for w1, w2 in zip(waves_default, waves_none): assert {n.unique_id for n in w1.nodes} == {n.unique_id for n in w2.nodes} # ============================================================================= # resolve_selection Tests # ============================================================================= class TestResolveSelection: """Tests for the resolve_selection() standalone function.""" @patch("prefect_dbt.core._manifest.dbtRunner") def test_resolve_with_select(self, mock_runner_cls: MagicMock, tmp_path: Path): """Invokes dbt ls with --select flag.""" mock_result = MagicMock() mock_result.success = True mock_result.result = [ json.dumps({"unique_id": "model.proj.stg_users"}), json.dumps({"unique_id": "model.proj.stg_orders"}), ] mock_runner_cls.return_value.invoke.return_value = mock_result result = resolve_selection( project_dir=tmp_path / "project", profiles_dir=tmp_path / "profiles", select="marts", ) assert result == {"model.proj.stg_users", "model.proj.stg_orders"} args = mock_runner_cls.return_value.invoke.call_args[0][0] assert "--select" in args assert "marts" in args @patch("prefect_dbt.core._manifest.dbtRunner") def test_resolve_with_exclude(self, mock_runner_cls: MagicMock, tmp_path: Path): """Invokes dbt ls with --exclude flag.""" mock_result = MagicMock() mock_result.success = True mock_result.result = [ json.dumps({"unique_id": "model.proj.stg_users"}), ] mock_runner_cls.return_value.invoke.return_value = mock_result result = resolve_selection( project_dir=tmp_path / "project", profiles_dir=tmp_path / "profiles", exclude="stg_legacy_*", ) assert result == {"model.proj.stg_users"} args = mock_runner_cls.return_value.invoke.call_args[0][0] assert "--exclude" in args assert "stg_legacy_*" in args assert "--select" not in args @patch("prefect_dbt.core._manifest.dbtRunner") def test_resolve_with_both(self, mock_runner_cls: MagicMock, tmp_path: Path): """Invokes dbt ls with both --select and --exclude.""" mock_result = MagicMock() mock_result.success = True mock_result.result = [ json.dumps({"unique_id": "model.proj.dim_users"}), ] mock_runner_cls.return_value.invoke.return_value = mock_result result = resolve_selection( project_dir=tmp_path / "project", profiles_dir=tmp_path / "profiles", select="marts", exclude="dim_legacy_*", ) assert result == {"model.proj.dim_users"} args = mock_runner_cls.return_value.invoke.call_args[0][0] assert "--select" in args assert "--exclude" in args @patch("prefect_dbt.core._manifest.dbtRunner") def test_resolve_no_selectors_returns_all( self, mock_runner_cls: MagicMock, tmp_path: Path ): """No select/exclude still invokes dbt ls for all nodes.""" mock_result = MagicMock() mock_result.success = True mock_result.result = [ json.dumps({"unique_id": "model.proj.a"}), json.dumps({"unique_id": "model.proj.b"}), json.dumps({"unique_id": "seed.proj.c"}), ] mock_runner_cls.return_value.invoke.return_value = mock_result result = resolve_selection( project_dir=tmp_path / "project", profiles_dir=tmp_path / "profiles", ) assert result == {"model.proj.a", "model.proj.b", "seed.proj.c"} args = mock_runner_cls.return_value.invoke.call_args[0][0] assert "--select" not in args assert "--exclude" not in args @patch("prefect_dbt.core._manifest.dbtRunner") def test_resolve_failure_raises_error( self, mock_runner_cls: MagicMock, tmp_path: Path ): """Failed dbt ls raises DbtLsError.""" mock_result = MagicMock() mock_result.success = False mock_result.exception = RuntimeError("compilation error") mock_runner_cls.return_value.invoke.return_value = mock_result with pytest.raises(DbtLsError, match="dbt ls failed"): resolve_selection( project_dir=tmp_path / "project", profiles_dir=tmp_path / "profiles", select="nonexistent_model", ) @patch("prefect_dbt.core._manifest.dbtRunner") def test_resolve_passes_project_and_profiles_dir( self, mock_runner_cls: MagicMock, tmp_path: Path ): """Verifies correct CLI args are passed to dbtRunner.""" mock_result = MagicMock() mock_result.success = True mock_result.result = [] mock_runner_cls.return_value.invoke.return_value = mock_result project = tmp_path / "my_project" profiles = tmp_path / "my_profiles" resolve_selection(project_dir=project, profiles_dir=profiles) args = mock_runner_cls.return_value.invoke.call_args[0][0] assert "ls" in args assert "--resource-type" in args assert "all" in args assert "--project-dir" in args assert str(project) in args assert "--profiles-dir" in args assert str(profiles) in args @patch("prefect_dbt.core._manifest.dbtRunner") def test_resolve_with_target_path(self, mock_runner_cls: MagicMock, tmp_path: Path): """target_path is passed when provided.""" mock_result = MagicMock() mock_result.success = True mock_result.result = [json.dumps({"unique_id": "model.proj.a"})] mock_runner_cls.return_value.invoke.return_value = mock_result target = tmp_path / "custom_target" resolve_selection( project_dir=tmp_path / "project", profiles_dir=tmp_path / "profiles", target_path=target, ) args = mock_runner_cls.return_value.invoke.call_args[0][0] assert "--target-path" in args assert str(target) in args
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/integrations/prefect-dbt/tests/core/test_manifest.py", "license": "Apache License 2.0", "lines": 1026, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/prefect/cli/cloud/asset.py
"""Manage Prefect Cloud assets.""" from __future__ import annotations from typing import Annotated import cyclopts from rich.table import Table import prefect.cli._app as _cli from prefect.cli._cloud_utils import confirm_logged_in from prefect.cli._utilities import ( exit_with_error, exit_with_success, with_cli_exception_handling, ) asset_app: cyclopts.App = cyclopts.App( name="asset", alias="assets", help="Manage Prefect Cloud assets.", version_flags=[], help_flags=["--help"], ) @asset_app.command(name="ls") @with_cli_exception_handling async def asset_ls( *, prefix: Annotated[ str | None, cyclopts.Parameter("--prefix", alias="-p", help="Filter assets by key prefix"), ] = None, search: Annotated[ str | None, cyclopts.Parameter( "--search", alias="-s", help="Filter assets by key substring" ), ] = None, limit: Annotated[ int, cyclopts.Parameter( "--limit", alias="-l", help="Maximum number of assets to return (default 50, max 200)", ), ] = 50, output: Annotated[ str | None, cyclopts.Parameter( "--output", alias="-o", help="Output format. Supports: json" ), ] = None, ): """List assets in the current workspace.""" import orjson from prefect.client.cloud import get_cloud_client from prefect.settings import get_current_settings confirm_logged_in(console=_cli.console) if output and output.lower() != "json": exit_with_error("Only 'json' output format is supported.") if limit < 1 or limit > 200: exit_with_error("Limit must be between 1 and 200.") key_filter: dict[str, list[str]] = {} if prefix: key_filter["prefix"] = [prefix] if search: key_filter["search"] = [search] body: dict[str, object] = {"limit": limit} if key_filter: body["filter"] = {"key": key_filter} async with get_cloud_client(host=get_current_settings().api.url) as client: response = await client.request("POST", "/assets/filter", json=body) assets = response.get("assets", []) total = response.get("total", len(assets)) if output and output.lower() == "json": json_output = orjson.dumps(assets, option=orjson.OPT_INDENT_2).decode() _cli.console.print(json_output, soft_wrap=True) else: if not assets: _cli.console.print("No assets found in this workspace.") return table = Table( title="Assets", show_header=True, ) table.add_column("Key", style="blue", no_wrap=False) table.add_column("Last Seen", style="cyan", no_wrap=True) for asset in sorted(assets, key=lambda x: x.get("key", "")): table.add_row( asset.get("key", ""), asset.get("last_seen", ""), ) _cli.console.print(table) _cli.console.print(f"\nShowing {len(assets)} of {total} asset(s)") @asset_app.command(name="delete") @with_cli_exception_handling async def asset_delete( key: Annotated[str, cyclopts.Parameter(help="The key of the asset to delete")], *, force: Annotated[ bool, cyclopts.Parameter( "--force", alias="-f", negative="", help="Skip confirmation prompt" ), ] = False, ): """Delete an asset by its key. The key should be the full asset URI (e.g., 's3://bucket/data.csv'). """ from prefect.cli._prompts import confirm from prefect.client.cloud import get_cloud_client from prefect.exceptions import ObjectNotFound from prefect.settings import get_current_settings confirm_logged_in(console=_cli.console) if _cli.is_interactive() and not force: if not confirm( f"Are you sure you want to delete asset {key!r}?", default=False, console=_cli.console, ): exit_with_error("Deletion aborted.") async with get_cloud_client(host=get_current_settings().api.url) as client: try: await client.request("DELETE", "/assets/key", params={"key": key}) except ObjectNotFound: exit_with_error(f"Asset {key!r} not found.") exit_with_success(f"Deleted asset {key!r}.")
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/cli/cloud/asset.py", "license": "Apache License 2.0", "lines": 122, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:tests/cli/cloud/test_asset.py
import uuid import httpx import pytest import readchar import respx from starlette import status from tests.cli.cloud.test_cloud import gen_test_workspace from prefect.client.schemas import Workspace from prefect.context import use_profile from prefect.settings import ( PREFECT_API_KEY, PREFECT_API_URL, Profile, ProfilesCollection, save_profiles, ) from prefect.testing.cli import invoke_and_assert @pytest.fixture def cloud_workspace() -> tuple[Workspace, str]: workspace = gen_test_workspace(account_handle="test", workspace_handle="foo") profile_name = f"logged-in-profile-{uuid.uuid4()}" save_profiles( ProfilesCollection( [ Profile( name=profile_name, settings={ PREFECT_API_URL: workspace.api_url(), PREFECT_API_KEY: "foo", }, ) ], active=None, ) ) return workspace, profile_name class TestAssetList: def test_cannot_list_assets_if_not_logged_in(self) -> None: cloud_profile = f"cloud-foo-{uuid.uuid4()}" save_profiles( ProfilesCollection([Profile(name=cloud_profile, settings={})], active=None) ) with use_profile(cloud_profile): invoke_and_assert( ["cloud", "asset", "ls"], expected_code=1, expected_output_contains="Please log in with `prefect cloud login`", ) def test_list_assets_empty( self, respx_mock: respx.MockRouter, cloud_workspace: tuple[Workspace, str] ) -> None: workspace, profile_name = cloud_workspace respx_mock.post(f"{workspace.api_url()}/assets/filter").mock( return_value=httpx.Response( status.HTTP_200_OK, json={"assets": [], "total": 0} ) ) with use_profile(profile_name): invoke_and_assert( ["cloud", "asset", "ls"], expected_code=0, expected_output_contains="No assets found in this workspace", ) def test_list_assets( self, respx_mock: respx.MockRouter, cloud_workspace: tuple[Workspace, str] ) -> None: workspace, profile_name = cloud_workspace assets = [ {"key": "s3://my-bucket/data.csv", "last_seen": "2026-01-20T18:52:16Z"}, {"key": "postgres://db/users", "last_seen": "2026-01-21T10:30:00Z"}, ] respx_mock.post(f"{workspace.api_url()}/assets/filter").mock( return_value=httpx.Response( status.HTTP_200_OK, json={"assets": assets, "total": 2} ) ) with use_profile(profile_name): invoke_and_assert( ["cloud", "asset", "ls"], expected_code=0, expected_output_contains=[ "s3://my-bucket/data.csv", "postgres://db/users", "Showing 2 of 2 asset(s)", ], ) @pytest.mark.parametrize( "flag,value", [("--prefix", "s3://"), ("--search", "bucket")] ) def test_list_assets_with_filters( self, respx_mock: respx.MockRouter, cloud_workspace: tuple[Workspace, str], flag: str, value: str, ) -> None: workspace, profile_name = cloud_workspace asset = {"key": "s3://my-bucket/data.csv", "last_seen": "2026-01-20T18:52:16Z"} respx_mock.post(f"{workspace.api_url()}/assets/filter").mock( return_value=httpx.Response( status.HTTP_200_OK, json={"assets": [asset], "total": 1} ) ) with use_profile(profile_name): invoke_and_assert( ["cloud", "asset", "ls", flag, value], expected_code=0, expected_output_contains=asset["key"], ) def test_list_assets_with_limit( self, respx_mock: respx.MockRouter, cloud_workspace: tuple[Workspace, str] ) -> None: workspace, profile_name = cloud_workspace assets = [ {"key": "s3://my-bucket/data.csv", "last_seen": "2026-01-20T18:52:16Z"}, ] respx_mock.post(f"{workspace.api_url()}/assets/filter").mock( return_value=httpx.Response( status.HTTP_200_OK, json={"assets": assets, "total": 100} ) ) with use_profile(profile_name): invoke_and_assert( ["cloud", "asset", "ls", "--limit", "1"], expected_code=0, expected_output_contains="Showing 1 of 100 asset(s)", ) def test_list_assets_invalid_limit( self, cloud_workspace: tuple[Workspace, str] ) -> None: _, profile_name = cloud_workspace with use_profile(profile_name): invoke_and_assert( ["cloud", "asset", "ls", "--limit", "500"], expected_code=1, expected_output_contains="Limit must be between 1 and 200", ) def test_list_assets_json_output( self, respx_mock: respx.MockRouter, cloud_workspace: tuple[Workspace, str] ) -> None: workspace, profile_name = cloud_workspace asset = {"key": "s3://my-bucket/data.csv", "last_seen": "2026-01-20T18:52:16Z"} respx_mock.post(f"{workspace.api_url()}/assets/filter").mock( return_value=httpx.Response( status.HTTP_200_OK, json={"assets": [asset], "total": 1} ) ) with use_profile(profile_name): invoke_and_assert( ["cloud", "asset", "ls", "-o", "json"], expected_code=0, expected_output_contains=[asset["key"], asset["last_seen"]], ) def test_list_assets_invalid_output_format( self, cloud_workspace: tuple[Workspace, str] ) -> None: _, profile_name = cloud_workspace with use_profile(profile_name): invoke_and_assert( ["cloud", "asset", "ls", "-o", "xml"], expected_code=1, expected_output_contains="Only 'json' output format is supported", ) def test_assets_alias( self, respx_mock: respx.MockRouter, cloud_workspace: tuple[Workspace, str] ) -> None: workspace, profile_name = cloud_workspace respx_mock.post(f"{workspace.api_url()}/assets/filter").mock( return_value=httpx.Response( status.HTTP_200_OK, json={"assets": [], "total": 0} ) ) with use_profile(profile_name): invoke_and_assert( ["cloud", "assets", "ls"], expected_code=0, expected_output_contains="No assets found", ) class TestAssetDelete: def test_cannot_delete_asset_if_not_logged_in(self) -> None: cloud_profile = f"cloud-foo-{uuid.uuid4()}" save_profiles( ProfilesCollection([Profile(name=cloud_profile, settings={})], active=None) ) with use_profile(cloud_profile): invoke_and_assert( ["cloud", "asset", "delete", "s3://bucket/data.csv"], expected_code=1, expected_output_contains="Please log in with `prefect cloud login`", ) def test_delete_asset_with_confirmation( self, respx_mock: respx.MockRouter, cloud_workspace: tuple[Workspace, str], monkeypatch: pytest.MonkeyPatch, ) -> None: workspace, profile_name = cloud_workspace monkeypatch.setattr("prefect.cli._app.is_interactive", lambda: True) import prefect.cli._app as _cli monkeypatch.setattr(_cli, "is_interactive", lambda: True) respx_mock.delete(f"{workspace.api_url()}/assets/key").mock( return_value=httpx.Response(status.HTTP_204_NO_CONTENT) ) with use_profile(profile_name): invoke_and_assert( ["cloud", "asset", "delete", "s3://my-bucket/data.csv"], expected_code=0, user_input="y" + readchar.key.ENTER, expected_output_contains="Deleted asset 's3://my-bucket/data.csv'", ) def test_delete_asset_with_force_flag( self, respx_mock: respx.MockRouter, cloud_workspace: tuple[Workspace, str] ) -> None: workspace, profile_name = cloud_workspace respx_mock.delete(f"{workspace.api_url()}/assets/key").mock( return_value=httpx.Response(status.HTTP_204_NO_CONTENT) ) with use_profile(profile_name): invoke_and_assert( ["cloud", "asset", "delete", "s3://my-bucket/data.csv", "--force"], expected_code=0, expected_output_contains="Deleted asset 's3://my-bucket/data.csv'", ) def test_delete_asset_not_found( self, respx_mock: respx.MockRouter, cloud_workspace: tuple[Workspace, str] ) -> None: workspace, profile_name = cloud_workspace respx_mock.delete(f"{workspace.api_url()}/assets/key").mock( return_value=httpx.Response( status.HTTP_404_NOT_FOUND, json={"detail": "Asset not found"} ) ) with use_profile(profile_name): invoke_and_assert( ["cloud", "asset", "delete", "s3://nonexistent/data.csv", "--force"], expected_code=1, expected_output_contains="Asset 's3://nonexistent/data.csv' not found", ) def test_delete_asset_abort_confirmation( self, cloud_workspace: tuple[Workspace, str], monkeypatch: pytest.MonkeyPatch ) -> None: _, profile_name = cloud_workspace monkeypatch.setattr("prefect.cli._app.is_interactive", lambda: True) import prefect.cli._app as _cli monkeypatch.setattr(_cli, "is_interactive", lambda: True) with use_profile(profile_name): invoke_and_assert( ["cloud", "asset", "delete", "s3://bucket/data.csv"], expected_code=1, user_input="n" + readchar.key.ENTER, expected_output_contains="Deletion aborted", )
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/cli/cloud/test_asset.py", "license": "Apache License 2.0", "lines": 252, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/integrations/prefect-gcp/tests/test_cloud_storage_async_dispatch.py
"""Tests for async_dispatch migration in prefect-gcp cloud_storage. These tests verify the critical behavior from issue #15008 where @sync_compatible would incorrectly return coroutines in sync context. """ from io import BytesIO from typing import Coroutine import pytest from prefect_gcp.cloud_storage import ( GcsBucket, acloud_storage_create_bucket, acloud_storage_download_blob_as_bytes, acloud_storage_download_blob_to_file, acloud_storage_upload_blob_from_file, acloud_storage_upload_blob_from_string, cloud_storage_create_bucket, cloud_storage_download_blob_as_bytes, cloud_storage_download_blob_to_file, cloud_storage_upload_blob_from_file, cloud_storage_upload_blob_from_string, ) from prefect import flow class TestCloudStorageCreateBucketAsyncDispatch: """Tests for cloud_storage_create_bucket migrated from @sync_compatible to @async_dispatch.""" def test_cloud_storage_create_bucket_sync_context_returns_value_not_coroutine( self, gcp_credentials ): """cloud_storage_create_bucket must return result (not coroutine) in sync context. This is a critical regression test for issues #14712 and #14625. """ @flow def test_flow(): result = cloud_storage_create_bucket("test-bucket", gcp_credentials) assert not isinstance(result, Coroutine), "sync context returned coroutine" return result result = test_flow() assert result is not None async def test_cloud_storage_create_bucket_async_context_works( self, gcp_credentials ): """cloud_storage_create_bucket should work correctly in async context.""" @flow async def test_flow(): result = await acloud_storage_create_bucket("test-bucket", gcp_credentials) return result result = await test_flow() assert result is not None def test_acloud_storage_create_bucket_is_available(self): """acloud_storage_create_bucket should be available for direct async usage.""" assert callable(acloud_storage_create_bucket) class TestCloudStorageDownloadBlobAsBytesAsyncDispatch: """Tests for cloud_storage_download_blob_as_bytes migrated from @sync_compatible to @async_dispatch.""" def test_cloud_storage_download_blob_as_bytes_sync_context_returns_value_not_coroutine( self, gcp_credentials ): """cloud_storage_download_blob_as_bytes must return bytes (not coroutine) in sync context.""" @flow def test_flow(): result = cloud_storage_download_blob_as_bytes( "test-bucket", "test-blob", gcp_credentials ) assert not isinstance(result, Coroutine), "sync context returned coroutine" return result result = test_flow() assert isinstance(result, bytes) async def test_cloud_storage_download_blob_as_bytes_async_context_works( self, gcp_credentials ): """cloud_storage_download_blob_as_bytes should work correctly in async context.""" @flow async def test_flow(): result = await acloud_storage_download_blob_as_bytes( "test-bucket", "test-blob", gcp_credentials ) return result result = await test_flow() assert isinstance(result, bytes) def test_acloud_storage_download_blob_as_bytes_is_available(self): """acloud_storage_download_blob_as_bytes should be available for direct async usage.""" assert callable(acloud_storage_download_blob_as_bytes) class TestCloudStorageDownloadBlobToFileAsyncDispatch: """Tests for cloud_storage_download_blob_to_file migrated from @sync_compatible to @async_dispatch.""" def test_cloud_storage_download_blob_to_file_sync_context_returns_value_not_coroutine( self, gcp_credentials, tmp_path ): """cloud_storage_download_blob_to_file must return path (not coroutine) in sync context.""" output_file = tmp_path / "output.txt" @flow def test_flow(): result = cloud_storage_download_blob_to_file( "test-bucket", "test-blob", str(output_file), gcp_credentials ) assert not isinstance(result, Coroutine), "sync context returned coroutine" return result result = test_flow() assert result is not None async def test_cloud_storage_download_blob_to_file_async_context_works( self, gcp_credentials, tmp_path ): """cloud_storage_download_blob_to_file should work correctly in async context.""" output_file = tmp_path / "output.txt" @flow async def test_flow(): result = await acloud_storage_download_blob_to_file( "test-bucket", "test-blob", str(output_file), gcp_credentials ) return result result = await test_flow() assert result is not None def test_acloud_storage_download_blob_to_file_is_available(self): """acloud_storage_download_blob_to_file should be available for direct async usage.""" assert callable(acloud_storage_download_blob_to_file) class TestCloudStorageUploadBlobFromStringAsyncDispatch: """Tests for cloud_storage_upload_blob_from_string migrated from @sync_compatible to @async_dispatch.""" def test_cloud_storage_upload_blob_from_string_sync_context_returns_value_not_coroutine( self, gcp_credentials ): """cloud_storage_upload_blob_from_string must return result (not coroutine) in sync context.""" @flow def test_flow(): result = cloud_storage_upload_blob_from_string( "test data", "test-bucket", "test-blob", gcp_credentials ) assert not isinstance(result, Coroutine), "sync context returned coroutine" return result result = test_flow() assert result is not None async def test_cloud_storage_upload_blob_from_string_async_context_works( self, gcp_credentials ): """cloud_storage_upload_blob_from_string should work correctly in async context.""" @flow async def test_flow(): result = await acloud_storage_upload_blob_from_string( "test data", "test-bucket", "test-blob", gcp_credentials ) return result result = await test_flow() assert result is not None def test_acloud_storage_upload_blob_from_string_is_available(self): """acloud_storage_upload_blob_from_string should be available for direct async usage.""" assert callable(acloud_storage_upload_blob_from_string) class TestCloudStorageUploadBlobFromFileAsyncDispatch: """Tests for cloud_storage_upload_blob_from_file migrated from @sync_compatible to @async_dispatch.""" def test_cloud_storage_upload_blob_from_file_sync_context_returns_value_not_coroutine( self, gcp_credentials, tmp_path ): """cloud_storage_upload_blob_from_file must return result (not coroutine) in sync context.""" test_file = tmp_path / "test.txt" test_file.write_text("test data") @flow def test_flow(): result = cloud_storage_upload_blob_from_file( str(test_file), "test-bucket", "test-blob", gcp_credentials ) assert not isinstance(result, Coroutine), "sync context returned coroutine" return result result = test_flow() assert result is not None async def test_cloud_storage_upload_blob_from_file_async_context_works( self, gcp_credentials, tmp_path ): """cloud_storage_upload_blob_from_file should work correctly in async context.""" test_file = tmp_path / "test.txt" test_file.write_text("test data") @flow async def test_flow(): result = await acloud_storage_upload_blob_from_file( str(test_file), "test-bucket", "test-blob", gcp_credentials ) return result result = await test_flow() assert result is not None def test_acloud_storage_upload_blob_from_file_is_available(self): """acloud_storage_upload_blob_from_file should be available for direct async usage.""" assert callable(acloud_storage_upload_blob_from_file) class TestGcsBucketReadPathAsyncDispatch: """Tests for GcsBucket.read_path migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def gcs_bucket(self, gcp_credentials): return GcsBucket(bucket="test-bucket", gcp_credentials=gcp_credentials) def test_read_path_sync_context_returns_value_not_coroutine(self, gcs_bucket): """read_path must return bytes (not coroutine) in sync context.""" result = gcs_bucket.read_path("test-path") assert not isinstance(result, Coroutine), "sync context returned coroutine" assert isinstance(result, bytes) async def test_read_path_async_context_works(self, gcs_bucket): """read_path should work correctly in async context.""" result = await gcs_bucket.aread_path("test-path") assert isinstance(result, bytes) def test_aread_path_is_available(self, gcs_bucket): """aread_path should be available for direct async usage.""" assert hasattr(gcs_bucket, "aread_path") assert callable(gcs_bucket.aread_path) class TestGcsBucketWritePathAsyncDispatch: """Tests for GcsBucket.write_path migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def gcs_bucket(self, gcp_credentials): return GcsBucket(bucket="test-bucket", gcp_credentials=gcp_credentials) def test_write_path_sync_context_returns_value_not_coroutine(self, gcs_bucket): """write_path must return path (not coroutine) in sync context.""" result = gcs_bucket.write_path("test-path", b"test content") assert not isinstance(result, Coroutine), "sync context returned coroutine" async def test_write_path_async_context_works(self, gcs_bucket): """write_path should work correctly in async context.""" result = await gcs_bucket.awrite_path("test-path", b"test content") assert result is not None def test_awrite_path_is_available(self, gcs_bucket): """awrite_path should be available for direct async usage.""" assert hasattr(gcs_bucket, "awrite_path") assert callable(gcs_bucket.awrite_path) class TestGcsBucketGetBucketAsyncDispatch: """Tests for GcsBucket.get_bucket migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def gcs_bucket(self, gcp_credentials): return GcsBucket(bucket="test-bucket", gcp_credentials=gcp_credentials) def test_get_bucket_sync_context_returns_value_not_coroutine(self, gcs_bucket): """get_bucket must return bucket (not coroutine) in sync context.""" result = gcs_bucket.get_bucket() assert not isinstance(result, Coroutine), "sync context returned coroutine" async def test_get_bucket_async_context_works(self, gcs_bucket): """get_bucket should work correctly in async context.""" result = await gcs_bucket.aget_bucket() assert result is not None def test_aget_bucket_is_available(self, gcs_bucket): """aget_bucket should be available for direct async usage.""" assert hasattr(gcs_bucket, "aget_bucket") assert callable(gcs_bucket.aget_bucket) class TestGcsBucketListBlobsAsyncDispatch: """Tests for GcsBucket.list_blobs migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def gcs_bucket(self, gcp_credentials): return GcsBucket(bucket="test-bucket", gcp_credentials=gcp_credentials) def test_list_blobs_sync_context_returns_value_not_coroutine(self, gcs_bucket): """list_blobs must return list (not coroutine) in sync context.""" result = gcs_bucket.list_blobs() assert not isinstance(result, Coroutine), "sync context returned coroutine" assert isinstance(result, list) async def test_list_blobs_async_context_works(self, gcs_bucket): """list_blobs should work correctly in async context.""" result = await gcs_bucket.alist_blobs() assert isinstance(result, list) def test_alist_blobs_is_available(self, gcs_bucket): """alist_blobs should be available for direct async usage.""" assert hasattr(gcs_bucket, "alist_blobs") assert callable(gcs_bucket.alist_blobs) class TestGcsBucketListFoldersAsyncDispatch: """Tests for GcsBucket.list_folders migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def gcs_bucket(self, gcp_credentials): return GcsBucket(bucket="test-bucket", gcp_credentials=gcp_credentials) def test_list_folders_sync_context_returns_value_not_coroutine(self, gcs_bucket): """list_folders must return list (not coroutine) in sync context.""" result = gcs_bucket.list_folders() assert not isinstance(result, Coroutine), "sync context returned coroutine" assert isinstance(result, list) async def test_list_folders_async_context_works(self, gcs_bucket): """list_folders should work correctly in async context.""" result = await gcs_bucket.alist_folders() assert isinstance(result, list) def test_alist_folders_is_available(self, gcs_bucket): """alist_folders should be available for direct async usage.""" assert hasattr(gcs_bucket, "alist_folders") assert callable(gcs_bucket.alist_folders) class TestGcsBucketDownloadObjectToPathAsyncDispatch: """Tests for GcsBucket.download_object_to_path migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def gcs_bucket(self, gcp_credentials): return GcsBucket( bucket="test-bucket", gcp_credentials=gcp_credentials, bucket_folder="base_folder", ) def test_download_object_to_path_sync_context_returns_value_not_coroutine( self, gcs_bucket, tmp_path ): """download_object_to_path must return path (not coroutine) in sync context.""" to_path = tmp_path / "downloaded.txt" result = gcs_bucket.download_object_to_path("nested_blob.txt", to_path) assert not isinstance(result, Coroutine), "sync context returned coroutine" async def test_download_object_to_path_async_context_works( self, gcs_bucket, tmp_path ): """download_object_to_path should work correctly in async context.""" to_path = tmp_path / "downloaded.txt" result = await gcs_bucket.adownload_object_to_path("nested_blob.txt", to_path) assert result is not None def test_adownload_object_to_path_is_available(self, gcs_bucket): """adownload_object_to_path should be available for direct async usage.""" assert hasattr(gcs_bucket, "adownload_object_to_path") assert callable(gcs_bucket.adownload_object_to_path) class TestGcsBucketDownloadObjectToFileObjectAsyncDispatch: """Tests for GcsBucket.download_object_to_file_object migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def gcs_bucket(self, gcp_credentials): return GcsBucket( bucket="test-bucket", gcp_credentials=gcp_credentials, bucket_folder="base_folder", ) def test_download_object_to_file_object_sync_context_returns_value_not_coroutine( self, gcs_bucket ): """download_object_to_file_object must return file object (not coroutine) in sync context.""" file_obj = BytesIO() result = gcs_bucket.download_object_to_file_object("nested_blob.txt", file_obj) assert not isinstance(result, Coroutine), "sync context returned coroutine" # Returns the file object with content written to it assert result is file_obj async def test_download_object_to_file_object_async_context_works(self, gcs_bucket): """download_object_to_file_object should work correctly in async context.""" file_obj = BytesIO() result = await gcs_bucket.adownload_object_to_file_object( "nested_blob.txt", file_obj ) # Returns the file object with content written to it assert result is file_obj def test_adownload_object_to_file_object_is_available(self, gcs_bucket): """adownload_object_to_file_object should be available for direct async usage.""" assert hasattr(gcs_bucket, "adownload_object_to_file_object") assert callable(gcs_bucket.adownload_object_to_file_object) class TestGcsBucketDownloadFolderToPathAsyncDispatch: """Tests for GcsBucket.download_folder_to_path migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def gcs_bucket(self, gcp_credentials): return GcsBucket(bucket="test-bucket", gcp_credentials=gcp_credentials) def test_download_folder_to_path_sync_context_returns_value_not_coroutine( self, gcs_bucket, tmp_path ): """download_folder_to_path must return path (not coroutine) in sync context.""" result = gcs_bucket.download_folder_to_path("base_folder", tmp_path) assert not isinstance(result, Coroutine), "sync context returned coroutine" async def test_download_folder_to_path_async_context_works( self, gcs_bucket, tmp_path ): """download_folder_to_path should work correctly in async context.""" result = await gcs_bucket.adownload_folder_to_path("base_folder", tmp_path) assert result is not None def test_adownload_folder_to_path_is_available(self, gcs_bucket): """adownload_folder_to_path should be available for direct async usage.""" assert hasattr(gcs_bucket, "adownload_folder_to_path") assert callable(gcs_bucket.adownload_folder_to_path) class TestGcsBucketUploadFromPathAsyncDispatch: """Tests for GcsBucket.upload_from_path migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def gcs_bucket(self, gcp_credentials): return GcsBucket( bucket="test-bucket", gcp_credentials=gcp_credentials, bucket_folder="base_folder", ) def test_upload_from_path_sync_context_returns_value_not_coroutine( self, gcs_bucket, tmp_path ): """upload_from_path must return path (not coroutine) in sync context.""" test_file = tmp_path / "test.txt" test_file.write_text("test content") result = gcs_bucket.upload_from_path(test_file) assert not isinstance(result, Coroutine), "sync context returned coroutine" async def test_upload_from_path_async_context_works(self, gcs_bucket, tmp_path): """upload_from_path should work correctly in async context.""" test_file = tmp_path / "test.txt" test_file.write_text("test content") result = await gcs_bucket.aupload_from_path(test_file) assert result is not None def test_aupload_from_path_is_available(self, gcs_bucket): """aupload_from_path should be available for direct async usage.""" assert hasattr(gcs_bucket, "aupload_from_path") assert callable(gcs_bucket.aupload_from_path) class TestGcsBucketUploadFromFileObjectAsyncDispatch: """Tests for GcsBucket.upload_from_file_object migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def gcs_bucket(self, gcp_credentials): return GcsBucket( bucket="test-bucket", gcp_credentials=gcp_credentials, bucket_folder="base_folder", ) def test_upload_from_file_object_sync_context_returns_value_not_coroutine( self, gcs_bucket ): """upload_from_file_object must return path (not coroutine) in sync context.""" file_obj = BytesIO(b"test content") result = gcs_bucket.upload_from_file_object(file_obj, "test-blob") assert not isinstance(result, Coroutine), "sync context returned coroutine" async def test_upload_from_file_object_async_context_works(self, gcs_bucket): """upload_from_file_object should work correctly in async context.""" file_obj = BytesIO(b"test content") result = await gcs_bucket.aupload_from_file_object(file_obj, "test-blob") assert result is not None def test_aupload_from_file_object_is_available(self, gcs_bucket): """aupload_from_file_object should be available for direct async usage.""" assert hasattr(gcs_bucket, "aupload_from_file_object") assert callable(gcs_bucket.aupload_from_file_object) class TestGcsBucketUploadFromFolderAsyncDispatch: """Tests for GcsBucket.upload_from_folder migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def gcs_bucket(self, gcp_credentials): return GcsBucket(bucket="test-bucket", gcp_credentials=gcp_credentials) def test_upload_from_folder_sync_context_returns_value_not_coroutine( self, gcs_bucket, tmp_path ): """upload_from_folder must return path (not coroutine) in sync context.""" test_file = tmp_path / "test.txt" test_file.write_text("test content") result = gcs_bucket.upload_from_folder(tmp_path) assert not isinstance(result, Coroutine), "sync context returned coroutine" async def test_upload_from_folder_async_context_works(self, gcs_bucket, tmp_path): """upload_from_folder should work correctly in async context.""" test_file = tmp_path / "test.txt" test_file.write_text("test content") result = await gcs_bucket.aupload_from_folder(tmp_path) assert result is not None def test_aupload_from_folder_is_available(self, gcs_bucket): """aupload_from_folder should be available for direct async usage.""" assert hasattr(gcs_bucket, "aupload_from_folder") assert callable(gcs_bucket.aupload_from_folder) class TestGcsBucketGetDirectoryAsyncDispatch: """Tests for GcsBucket.get_directory migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def gcs_bucket(self, gcp_credentials): return GcsBucket(bucket="test-bucket", gcp_credentials=gcp_credentials) def test_get_directory_sync_context_returns_value_not_coroutine( self, gcs_bucket, tmp_path ): """get_directory must return path (not coroutine) in sync context.""" result = gcs_bucket.get_directory(local_path=str(tmp_path)) assert not isinstance(result, Coroutine), "sync context returned coroutine" async def test_get_directory_async_context_works(self, gcs_bucket, tmp_path): """get_directory should work correctly in async context.""" result = await gcs_bucket.aget_directory(local_path=str(tmp_path)) assert result is not None def test_aget_directory_is_available(self, gcs_bucket): """aget_directory should be available for direct async usage.""" assert hasattr(gcs_bucket, "aget_directory") assert callable(gcs_bucket.aget_directory) class TestGcsBucketPutDirectoryAsyncDispatch: """Tests for GcsBucket.put_directory migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def gcs_bucket(self, gcp_credentials): return GcsBucket(bucket="test-bucket", gcp_credentials=gcp_credentials) def test_put_directory_sync_context_returns_value_not_coroutine( self, gcs_bucket, tmp_path ): """put_directory must return count (not coroutine) in sync context.""" test_file = tmp_path / "test.txt" test_file.write_text("test content") result = gcs_bucket.put_directory(local_path=str(tmp_path)) assert not isinstance(result, Coroutine), "sync context returned coroutine" async def test_put_directory_async_context_works(self, gcs_bucket, tmp_path): """put_directory should work correctly in async context.""" test_file = tmp_path / "test.txt" test_file.write_text("test content") result = await gcs_bucket.aput_directory(local_path=str(tmp_path)) assert result is not None def test_aput_directory_is_available(self, gcs_bucket): """aput_directory should be available for direct async usage.""" assert hasattr(gcs_bucket, "aput_directory") assert callable(gcs_bucket.aput_directory)
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/integrations/prefect-gcp/tests/test_cloud_storage_async_dispatch.py", "license": "Apache License 2.0", "lines": 458, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/integrations/prefect-gcp/tests/test_bigquery_async_dispatch.py
"""Tests for async_dispatch migration in prefect-gcp bigquery. These tests verify the critical behavior from issue #15008 where @sync_compatible would incorrectly return coroutines in sync context. """ from typing import Coroutine from unittest.mock import MagicMock import pytest from prefect_gcp.bigquery import ( BigQueryWarehouse, abigquery_create_table, abigquery_insert_stream, abigquery_load_cloud_storage, abigquery_load_file, abigquery_query, bigquery_create_table, bigquery_insert_stream, bigquery_load_cloud_storage, bigquery_load_file, bigquery_query, ) from prefect import flow @pytest.fixture def mock_connection(): """Mock connection for BigQueryWarehouse tests.""" mock_cursor = MagicMock() results = iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) mock_cursor.fetchone.side_effect = lambda: (next(results),) mock_cursor.fetchmany.side_effect = lambda size: list( (next(results),) for i in range(size) ) mock_cursor.fetchall.side_effect = lambda: [(result,) for result in results] mock_connection = MagicMock() mock_connection.cursor.return_value = mock_cursor return mock_connection class TestBigQueryQueryAsyncDispatch: """Tests for bigquery_query migrated from @sync_compatible to @async_dispatch.""" def test_bigquery_query_sync_context_returns_value_not_coroutine( self, gcp_credentials ): """bigquery_query must return result (not coroutine) in sync context. This is a critical regression test for issues #14712 and #14625. """ @flow def test_flow(): result = bigquery_query("SELECT 1", gcp_credentials) assert not isinstance(result, Coroutine), "sync context returned coroutine" return result result = test_flow() assert result is not None async def test_bigquery_query_async_context_works(self, gcp_credentials): """bigquery_query should work correctly in async context.""" @flow async def test_flow(): result = await abigquery_query("SELECT 1", gcp_credentials) return result result = await test_flow() assert result is not None def test_abigquery_query_is_available(self): """abigquery_query should be available for direct async usage.""" assert callable(abigquery_query) class TestBigQueryCreateTableAsyncDispatch: """Tests for bigquery_create_table migrated from @sync_compatible to @async_dispatch.""" def test_bigquery_create_table_sync_context_returns_value_not_coroutine( self, gcp_credentials ): """bigquery_create_table must return result (not coroutine) in sync context.""" @flow def test_flow(): result = bigquery_create_table( "test_dataset", "test_table", gcp_credentials, schema=[{"name": "col1", "type": "STRING"}], ) assert not isinstance(result, Coroutine), "sync context returned coroutine" return result result = test_flow() assert result is not None async def test_bigquery_create_table_async_context_works(self, gcp_credentials): """bigquery_create_table should work correctly in async context.""" @flow async def test_flow(): result = await abigquery_create_table( "test_dataset", "test_table", gcp_credentials, schema=[{"name": "col1", "type": "STRING"}], ) return result result = await test_flow() assert result is not None def test_abigquery_create_table_is_available(self): """abigquery_create_table should be available for direct async usage.""" assert callable(abigquery_create_table) class TestBigQueryInsertStreamAsyncDispatch: """Tests for bigquery_insert_stream migrated from @sync_compatible to @async_dispatch.""" def test_bigquery_insert_stream_sync_context_returns_value_not_coroutine( self, gcp_credentials ): """bigquery_insert_stream must return result (not coroutine) in sync context.""" @flow def test_flow(): result = bigquery_insert_stream( "test_dataset", "test_table", [{"col1": "value1"}], gcp_credentials, ) assert not isinstance(result, Coroutine), "sync context returned coroutine" return result result = test_flow() assert result is not None async def test_bigquery_insert_stream_async_context_works(self, gcp_credentials): """bigquery_insert_stream should work correctly in async context.""" @flow async def test_flow(): result = await abigquery_insert_stream( "test_dataset", "test_table", [{"col1": "value1"}], gcp_credentials, ) return result result = await test_flow() assert result is not None def test_abigquery_insert_stream_is_available(self): """abigquery_insert_stream should be available for direct async usage.""" assert callable(abigquery_insert_stream) class TestBigQueryLoadCloudStorageAsyncDispatch: """Tests for bigquery_load_cloud_storage migrated from @sync_compatible to @async_dispatch.""" def test_bigquery_load_cloud_storage_sync_context_returns_value_not_coroutine( self, gcp_credentials ): """bigquery_load_cloud_storage must return result (not coroutine) in sync context.""" @flow def test_flow(): result = bigquery_load_cloud_storage( "test_dataset", "test_table", "gs://bucket/file.csv", gcp_credentials, ) assert not isinstance(result, Coroutine), "sync context returned coroutine" return result result = test_flow() assert result is not None async def test_bigquery_load_cloud_storage_async_context_works( self, gcp_credentials ): """bigquery_load_cloud_storage should work correctly in async context.""" @flow async def test_flow(): result = await abigquery_load_cloud_storage( "test_dataset", "test_table", "gs://bucket/file.csv", gcp_credentials, ) return result result = await test_flow() assert result is not None def test_abigquery_load_cloud_storage_is_available(self): """abigquery_load_cloud_storage should be available for direct async usage.""" assert callable(abigquery_load_cloud_storage) class TestBigQueryLoadFileAsyncDispatch: """Tests for bigquery_load_file migrated from @sync_compatible to @async_dispatch.""" def test_bigquery_load_file_sync_context_returns_value_not_coroutine( self, gcp_credentials, tmp_path ): """bigquery_load_file must return result (not coroutine) in sync context.""" test_file = tmp_path / "test.csv" test_file.write_text("col1\nvalue1") @flow def test_flow(): result = bigquery_load_file( "test_dataset", "test_table", str(test_file), gcp_credentials, ) assert not isinstance(result, Coroutine), "sync context returned coroutine" return result result = test_flow() assert result is not None async def test_bigquery_load_file_async_context_works( self, gcp_credentials, tmp_path ): """bigquery_load_file should work correctly in async context.""" test_file = tmp_path / "test.csv" test_file.write_text("col1\nvalue1") @flow async def test_flow(): result = await abigquery_load_file( "test_dataset", "test_table", str(test_file), gcp_credentials, ) return result result = await test_flow() assert result is not None def test_abigquery_load_file_is_available(self): """abigquery_load_file should be available for direct async usage.""" assert callable(abigquery_load_file) class TestBigQueryWarehouseFetchOneAsyncDispatch: """Tests for BigQueryWarehouse.fetch_one migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def warehouse(self, gcp_credentials, mock_connection): warehouse = BigQueryWarehouse(gcp_credentials=gcp_credentials) warehouse._connection = mock_connection return warehouse def test_fetch_one_sync_context_returns_value_not_coroutine(self, warehouse): """fetch_one must return result (not coroutine) in sync context.""" result = warehouse.fetch_one("SELECT 1") assert not isinstance(result, Coroutine), "sync context returned coroutine" async def test_fetch_one_async_context_works(self, warehouse): """fetch_one should work correctly in async context.""" result = await warehouse.afetch_one("SELECT 1") assert result is not None def test_afetch_one_is_available(self, warehouse): """afetch_one should be available for direct async usage.""" assert hasattr(warehouse, "afetch_one") assert callable(warehouse.afetch_one) class TestBigQueryWarehouseFetchManyAsyncDispatch: """Tests for BigQueryWarehouse.fetch_many migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def warehouse(self, gcp_credentials, mock_connection): warehouse = BigQueryWarehouse(gcp_credentials=gcp_credentials) warehouse._connection = mock_connection return warehouse def test_fetch_many_sync_context_returns_value_not_coroutine(self, warehouse): """fetch_many must return result (not coroutine) in sync context.""" result = warehouse.fetch_many("SELECT 1", size=5) assert not isinstance(result, Coroutine), "sync context returned coroutine" async def test_fetch_many_async_context_works(self, warehouse): """fetch_many should work correctly in async context.""" result = await warehouse.afetch_many("SELECT 1", size=5) assert result is not None def test_afetch_many_is_available(self, warehouse): """afetch_many should be available for direct async usage.""" assert hasattr(warehouse, "afetch_many") assert callable(warehouse.afetch_many) class TestBigQueryWarehouseFetchAllAsyncDispatch: """Tests for BigQueryWarehouse.fetch_all migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def warehouse(self, gcp_credentials, mock_connection): warehouse = BigQueryWarehouse(gcp_credentials=gcp_credentials) warehouse._connection = mock_connection return warehouse def test_fetch_all_sync_context_returns_value_not_coroutine(self, warehouse): """fetch_all must return result (not coroutine) in sync context.""" result = warehouse.fetch_all("SELECT 1") assert not isinstance(result, Coroutine), "sync context returned coroutine" async def test_fetch_all_async_context_works(self, warehouse): """fetch_all should work correctly in async context.""" result = await warehouse.afetch_all("SELECT 1") assert result is not None def test_afetch_all_is_available(self, warehouse): """afetch_all should be available for direct async usage.""" assert hasattr(warehouse, "afetch_all") assert callable(warehouse.afetch_all) class TestBigQueryWarehouseExecuteAsyncDispatch: """Tests for BigQueryWarehouse.execute migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def warehouse(self, gcp_credentials, mock_connection): warehouse = BigQueryWarehouse(gcp_credentials=gcp_credentials) warehouse._connection = mock_connection return warehouse def test_execute_sync_context_returns_value_not_coroutine(self, warehouse): """execute must return result (not coroutine) in sync context.""" result = warehouse.execute("SELECT 1") assert not isinstance(result, Coroutine), "sync context returned coroutine" async def test_execute_async_context_works(self, warehouse): """execute should work correctly in async context.""" # execute returns None - just verify it doesn't raise await warehouse.aexecute("SELECT 1") def test_aexecute_is_available(self, warehouse): """aexecute should be available for direct async usage.""" assert hasattr(warehouse, "aexecute") assert callable(warehouse.aexecute) class TestBigQueryWarehouseExecuteManyAsyncDispatch: """Tests for BigQueryWarehouse.execute_many migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def warehouse(self, gcp_credentials, mock_connection): warehouse = BigQueryWarehouse(gcp_credentials=gcp_credentials) warehouse._connection = mock_connection return warehouse def test_execute_many_sync_context_returns_value_not_coroutine(self, warehouse): """execute_many must return result (not coroutine) in sync context.""" result = warehouse.execute_many("SELECT 1", seq_of_parameters=[{}]) assert not isinstance(result, Coroutine), "sync context returned coroutine" async def test_execute_many_async_context_works(self, warehouse): """execute_many should work correctly in async context.""" # execute_many returns None - just verify it doesn't raise await warehouse.aexecute_many("SELECT 1", seq_of_parameters=[{}]) def test_aexecute_many_is_available(self, warehouse): """aexecute_many should be available for direct async usage.""" assert hasattr(warehouse, "aexecute_many") assert callable(warehouse.aexecute_many)
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/integrations/prefect-gcp/tests/test_bigquery_async_dispatch.py", "license": "Apache License 2.0", "lines": 299, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/integrations/prefect-sqlalchemy/tests/test_async_dispatch.py
"""Tests for the SqlAlchemyConnector class split. These tests verify that SqlAlchemyConnector (sync) and AsyncSqlAlchemyConnector (async) work correctly with their respective driver types. """ from typing import Coroutine import pytest from prefect_sqlalchemy.credentials import ( AsyncDriver, ConnectionComponents, SyncDriver, ) from prefect_sqlalchemy.database import AsyncSqlAlchemyConnector, SqlAlchemyConnector from sqlalchemy.engine.cursor import CursorResult from prefect import flow class TestSqlAlchemyConnectorSync: """Tests for SqlAlchemyConnector with sync drivers.""" def test_fetch_one_sync_context_returns_value_not_coroutine(self, tmp_path): """fetch_one must return tuple (not coroutine) in sync context. This is a critical regression test for issues #14712 and #14625. """ @flow def test_flow(): with SqlAlchemyConnector( connection_info=ConnectionComponents( driver=SyncDriver.SQLITE_PYSQLITE, database=str(tmp_path / "test.db"), ) ) as connector: connector.execute( "CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);" ) connector.execute( "INSERT INTO customers (name, address) VALUES (:name, :address);", parameters={"name": "Marvin", "address": "Highway 42"}, ) result = connector.fetch_one("SELECT * FROM customers") assert not isinstance(result, Coroutine), ( "sync context returned coroutine" ) return result result = test_flow() assert result == ("Marvin", "Highway 42") def test_fetch_many_sync_context_returns_value_not_coroutine(self, tmp_path): """fetch_many must return list (not coroutine) in sync context. This is a critical regression test for issues #14712 and #14625. """ @flow def test_flow(): with SqlAlchemyConnector( connection_info=ConnectionComponents( driver=SyncDriver.SQLITE_PYSQLITE, database=str(tmp_path / "test.db"), ) ) as connector: connector.execute( "CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);" ) connector.execute_many( "INSERT INTO customers (name, address) VALUES (:name, :address);", seq_of_parameters=[ {"name": "Ford", "address": "Highway 42"}, {"name": "Unknown", "address": "Space"}, ], ) result = connector.fetch_many("SELECT * FROM customers", size=2) assert not isinstance(result, Coroutine), ( "sync context returned coroutine" ) return result result = test_flow() assert isinstance(result, list) assert len(result) == 2 def test_fetch_all_sync_context_returns_value_not_coroutine(self, tmp_path): """fetch_all must return list (not coroutine) in sync context. This is a critical regression test for issues #14712 and #14625. """ @flow def test_flow(): with SqlAlchemyConnector( connection_info=ConnectionComponents( driver=SyncDriver.SQLITE_PYSQLITE, database=str(tmp_path / "test.db"), ) ) as connector: connector.execute( "CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);" ) connector.execute_many( "INSERT INTO customers (name, address) VALUES (:name, :address);", seq_of_parameters=[ {"name": "Ford", "address": "Highway 42"}, {"name": "Unknown", "address": "Space"}, ], ) result = connector.fetch_all("SELECT * FROM customers") assert not isinstance(result, Coroutine), ( "sync context returned coroutine" ) return result result = test_flow() assert isinstance(result, list) assert len(result) == 2 def test_execute_sync_context_returns_value_not_coroutine(self, tmp_path): """execute must return CursorResult (not coroutine) in sync context. This is a critical regression test for issues #14712 and #14625. """ @flow def test_flow(): with SqlAlchemyConnector( connection_info=ConnectionComponents( driver=SyncDriver.SQLITE_PYSQLITE, database=str(tmp_path / "test.db"), ) ) as connector: result = connector.execute( "CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);" ) assert not isinstance(result, Coroutine), ( "sync context returned coroutine" ) return result result = test_flow() assert isinstance(result, CursorResult) def test_execute_many_sync_context_returns_value_not_coroutine(self, tmp_path): """execute_many must return CursorResult (not coroutine) in sync context. This is a critical regression test for issues #14712 and #14625. """ @flow def test_flow(): with SqlAlchemyConnector( connection_info=ConnectionComponents( driver=SyncDriver.SQLITE_PYSQLITE, database=str(tmp_path / "test.db"), ) ) as connector: connector.execute( "CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);" ) result = connector.execute_many( "INSERT INTO customers (name, address) VALUES (:name, :address);", seq_of_parameters=[ {"name": "Ford", "address": "Highway 42"}, {"name": "Unknown", "address": "Space"}, ], ) assert not isinstance(result, Coroutine), ( "sync context returned coroutine" ) return result result = test_flow() assert isinstance(result, CursorResult) def test_reset_connections_sync_context_returns_none_not_coroutine(self, tmp_path): """reset_connections must not return coroutine in sync context. This is a critical regression test for issues #14712 and #14625. """ @flow def test_flow(): with SqlAlchemyConnector( connection_info=ConnectionComponents( driver=SyncDriver.SQLITE_PYSQLITE, database=str(tmp_path / "test.db"), ) ) as connector: connector.execute( "CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);" ) connector.fetch_one("SELECT 1") result = connector.reset_connections() assert not isinstance(result, Coroutine), ( "sync context returned coroutine" ) return result result = test_flow() assert result is None class TestAsyncSqlAlchemyConnector: """Tests for AsyncSqlAlchemyConnector with async drivers.""" async def test_fetch_one_async_context_works(self, tmp_path): """fetch_one should work correctly in async context.""" @flow async def test_flow(): async with AsyncSqlAlchemyConnector( connection_info=ConnectionComponents( driver=AsyncDriver.SQLITE_AIOSQLITE, database=str(tmp_path / "test.db"), ) ) as connector: await connector.execute( "CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);" ) await connector.execute( "INSERT INTO customers (name, address) VALUES (:name, :address);", parameters={"name": "Marvin", "address": "Highway 42"}, ) result = await connector.fetch_one("SELECT * FROM customers") return result result = await test_flow() assert result == ("Marvin", "Highway 42") async def test_fetch_many_async_context_works(self, tmp_path): """fetch_many should work correctly in async context.""" @flow async def test_flow(): async with AsyncSqlAlchemyConnector( connection_info=ConnectionComponents( driver=AsyncDriver.SQLITE_AIOSQLITE, database=str(tmp_path / "test.db"), ) ) as connector: await connector.execute( "CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);" ) await connector.execute_many( "INSERT INTO customers (name, address) VALUES (:name, :address);", seq_of_parameters=[ {"name": "Ford", "address": "Highway 42"}, {"name": "Unknown", "address": "Space"}, ], ) result = await connector.fetch_many("SELECT * FROM customers", size=2) return result result = await test_flow() assert isinstance(result, list) assert len(result) == 2 async def test_fetch_all_async_context_works(self, tmp_path): """fetch_all should work correctly in async context.""" @flow async def test_flow(): async with AsyncSqlAlchemyConnector( connection_info=ConnectionComponents( driver=AsyncDriver.SQLITE_AIOSQLITE, database=str(tmp_path / "test.db"), ) ) as connector: await connector.execute( "CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);" ) await connector.execute_many( "INSERT INTO customers (name, address) VALUES (:name, :address);", seq_of_parameters=[ {"name": "Ford", "address": "Highway 42"}, {"name": "Unknown", "address": "Space"}, ], ) result = await connector.fetch_all("SELECT * FROM customers") return result result = await test_flow() assert isinstance(result, list) assert len(result) == 2 async def test_execute_async_context_works(self, tmp_path): """execute should work correctly in async context.""" @flow async def test_flow(): async with AsyncSqlAlchemyConnector( connection_info=ConnectionComponents( driver=AsyncDriver.SQLITE_AIOSQLITE, database=str(tmp_path / "test.db"), ) ) as connector: result = await connector.execute( "CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);" ) return result result = await test_flow() assert isinstance(result, CursorResult) async def test_execute_many_async_context_works(self, tmp_path): """execute_many should work correctly in async context.""" @flow async def test_flow(): async with AsyncSqlAlchemyConnector( connection_info=ConnectionComponents( driver=AsyncDriver.SQLITE_AIOSQLITE, database=str(tmp_path / "test.db"), ) ) as connector: await connector.execute( "CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);" ) result = await connector.execute_many( "INSERT INTO customers (name, address) VALUES (:name, :address);", seq_of_parameters=[ {"name": "Ford", "address": "Highway 42"}, {"name": "Unknown", "address": "Space"}, ], ) return result result = await test_flow() assert isinstance(result, CursorResult) async def test_reset_connections_async_context_works(self, tmp_path): """reset_connections should work correctly in async context.""" @flow async def test_flow(): async with AsyncSqlAlchemyConnector( connection_info=ConnectionComponents( driver=AsyncDriver.SQLITE_AIOSQLITE, database=str(tmp_path / "test.db"), ) ) as connector: await connector.execute( "CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);" ) await connector.fetch_one("SELECT 1") result = await connector.reset_connections() return result result = await test_flow() assert result is None class TestDriverTypeEnforcement: """Tests that verify sync connector rejects async drivers and vice versa.""" def test_sync_connector_raises_for_async_driver(self, tmp_path): """SqlAlchemyConnector should raise ValueError when used with async drivers.""" with pytest.raises(ValueError, match="async driver"): SqlAlchemyConnector( connection_info=ConnectionComponents( driver=AsyncDriver.SQLITE_AIOSQLITE, database=str(tmp_path / "test.db"), ) ) def test_async_connector_raises_for_sync_driver(self, tmp_path): """AsyncSqlAlchemyConnector should raise ValueError when used with sync drivers.""" with pytest.raises(ValueError, match="sync driver"): AsyncSqlAlchemyConnector( connection_info=ConnectionComponents( driver=SyncDriver.SQLITE_PYSQLITE, database=str(tmp_path / "test.db"), ) )
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/integrations/prefect-sqlalchemy/tests/test_async_dispatch.py", "license": "Apache License 2.0", "lines": 324, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/integrations/prefect-gcp/tests/test_secret_manager_async_dispatch.py
"""Tests for async_dispatch migration in prefect-gcp secret_manager. These tests verify the critical behavior from issue #15008 where @sync_compatible would incorrectly return coroutines in sync context. """ from typing import Coroutine import pytest from prefect_gcp.secret_manager import ( GcpSecret, acreate_secret, adelete_secret, adelete_secret_version, aread_secret, aupdate_secret, create_secret, delete_secret, delete_secret_version, read_secret, update_secret, ) from prefect import flow class TestCreateSecretAsyncDispatch: """Tests for create_secret migrated from @sync_compatible to @async_dispatch.""" def test_create_secret_sync_context_returns_value_not_coroutine( self, gcp_credentials ): """create_secret must return str (not coroutine) in sync context. This is a critical regression test for issues #14712 and #14625. """ @flow def test_flow(): result = create_secret("test_secret", gcp_credentials) assert not isinstance(result, Coroutine), "sync context returned coroutine" return result result = test_flow() assert isinstance(result, str) assert "test_secret" in result async def test_create_secret_async_context_works(self, gcp_credentials): """create_secret should work correctly in async context.""" @flow async def test_flow(): result = await acreate_secret("test_secret", gcp_credentials) return result result = await test_flow() assert isinstance(result, str) assert "test_secret" in result def test_acreate_secret_is_available(self): """acreate_secret should be available for direct async usage.""" assert callable(acreate_secret) class TestUpdateSecretAsyncDispatch: """Tests for update_secret migrated from @sync_compatible to @async_dispatch.""" def test_update_secret_sync_context_returns_value_not_coroutine( self, gcp_credentials ): """update_secret must return str (not coroutine) in sync context.""" @flow def test_flow(): create_secret("test_secret", gcp_credentials) result = update_secret("test_secret", "secret_value", gcp_credentials) assert not isinstance(result, Coroutine), "sync context returned coroutine" return result result = test_flow() assert isinstance(result, str) async def test_update_secret_async_context_works(self, gcp_credentials): """update_secret should work correctly in async context.""" @flow async def test_flow(): await acreate_secret("test_secret", gcp_credentials) result = await aupdate_secret( "test_secret", "secret_value", gcp_credentials ) return result result = await test_flow() assert isinstance(result, str) def test_aupdate_secret_is_available(self): """aupdate_secret should be available for direct async usage.""" assert callable(aupdate_secret) class TestReadSecretAsyncDispatch: """Tests for read_secret migrated from @sync_compatible to @async_dispatch.""" def test_read_secret_sync_context_returns_value_not_coroutine( self, gcp_credentials ): """read_secret must return str (not coroutine) in sync context.""" @flow def test_flow(): result = read_secret("test_secret", gcp_credentials) assert not isinstance(result, Coroutine), "sync context returned coroutine" return result result = test_flow() assert isinstance(result, str) async def test_read_secret_async_context_works(self, gcp_credentials): """read_secret should work correctly in async context.""" @flow async def test_flow(): result = await aread_secret("test_secret", gcp_credentials) return result result = await test_flow() assert isinstance(result, str) def test_aread_secret_is_available(self): """aread_secret should be available for direct async usage.""" assert callable(aread_secret) class TestDeleteSecretAsyncDispatch: """Tests for delete_secret migrated from @sync_compatible to @async_dispatch.""" def test_delete_secret_sync_context_returns_value_not_coroutine( self, gcp_credentials ): """delete_secret must return str (not coroutine) in sync context.""" @flow def test_flow(): result = delete_secret("test_secret", gcp_credentials) assert not isinstance(result, Coroutine), "sync context returned coroutine" return result result = test_flow() assert isinstance(result, str) async def test_delete_secret_async_context_works(self, gcp_credentials): """delete_secret should work correctly in async context.""" @flow async def test_flow(): result = await adelete_secret("test_secret", gcp_credentials) return result result = await test_flow() assert isinstance(result, str) def test_adelete_secret_is_available(self): """adelete_secret should be available for direct async usage.""" assert callable(adelete_secret) class TestDeleteSecretVersionAsyncDispatch: """Tests for delete_secret_version migrated from @sync_compatible to @async_dispatch.""" def test_delete_secret_version_sync_context_returns_value_not_coroutine( self, gcp_credentials ): """delete_secret_version must return str (not coroutine) in sync context.""" @flow def test_flow(): result = delete_secret_version("test_secret", 1, gcp_credentials) assert not isinstance(result, Coroutine), "sync context returned coroutine" return result result = test_flow() assert isinstance(result, str) async def test_delete_secret_version_async_context_works(self, gcp_credentials): """delete_secret_version should work correctly in async context.""" @flow async def test_flow(): result = await adelete_secret_version("test_secret", 1, gcp_credentials) return result result = await test_flow() assert isinstance(result, str) def test_adelete_secret_version_is_available(self): """adelete_secret_version should be available for direct async usage.""" assert callable(adelete_secret_version) class TestGcpSecretReadSecretAsyncDispatch: """Tests for GcpSecret.read_secret migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def gcp_secret(self, gcp_credentials): return GcpSecret( gcp_credentials=gcp_credentials, secret_name="test_secret_name" ) def test_read_secret_sync_context_returns_value_not_coroutine(self, gcp_secret): """read_secret must return bytes (not coroutine) in sync context.""" result = gcp_secret.read_secret() assert not isinstance(result, Coroutine), "sync context returned coroutine" assert isinstance(result, bytes) async def test_read_secret_async_context_works(self, gcp_secret): """read_secret should work correctly in async context.""" result = await gcp_secret.aread_secret() assert isinstance(result, bytes) def test_aread_secret_is_available(self, gcp_secret): """aread_secret should be available for direct async usage.""" assert hasattr(gcp_secret, "aread_secret") assert callable(gcp_secret.aread_secret) class TestGcpSecretWriteSecretAsyncDispatch: """Tests for GcpSecret.write_secret migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def gcp_secret(self, gcp_credentials): return GcpSecret( gcp_credentials=gcp_credentials, secret_name="test_secret_name" ) def test_write_secret_sync_context_returns_value_not_coroutine(self, gcp_secret): """write_secret must return str (not coroutine) in sync context.""" result = gcp_secret.write_secret(b"test_data") assert not isinstance(result, Coroutine), "sync context returned coroutine" assert isinstance(result, str) async def test_write_secret_async_context_works(self, gcp_secret): """write_secret should work correctly in async context.""" result = await gcp_secret.awrite_secret(b"test_data") assert isinstance(result, str) def test_awrite_secret_is_available(self, gcp_secret): """awrite_secret should be available for direct async usage.""" assert hasattr(gcp_secret, "awrite_secret") assert callable(gcp_secret.awrite_secret) class TestGcpSecretDeleteSecretAsyncDispatch: """Tests for GcpSecret.delete_secret migrated from @sync_compatible to @async_dispatch.""" @pytest.fixture def gcp_secret(self, gcp_credentials): return GcpSecret( gcp_credentials=gcp_credentials, secret_name="test_secret_name" ) def test_delete_secret_sync_context_returns_value_not_coroutine(self, gcp_secret): """delete_secret must return str (not coroutine) in sync context.""" result = gcp_secret.delete_secret() assert not isinstance(result, Coroutine), "sync context returned coroutine" assert isinstance(result, str) async def test_delete_secret_async_context_works(self, gcp_secret): """delete_secret should work correctly in async context.""" result = await gcp_secret.adelete_secret() assert isinstance(result, str) def test_adelete_secret_is_available(self, gcp_secret): """adelete_secret should be available for direct async usage.""" assert hasattr(gcp_secret, "adelete_secret") assert callable(gcp_secret.adelete_secret)
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/integrations/prefect-gcp/tests/test_secret_manager_async_dispatch.py", "license": "Apache License 2.0", "lines": 208, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/server/orchestration/api/test_bulk_operations.py
""" Comprehensive tests for bulk operation endpoints. These tests cover: - Basic bulk operations - Validation errors (limit bounds) - Filter edge cases - Verification of actual deletion - Cascading deletes - Mixed results for state changes - Parameter validation """ from datetime import datetime, timedelta, timezone from uuid import uuid4 from starlette import status from prefect.server import models, schemas class TestFlowRunBulkDelete: """Tests for POST /flow_runs/bulk_delete endpoint.""" async def test_bulk_delete_flow_runs( self, session, flow, hosted_api_client, ): """Test basic bulk deletion of flow runs.""" # Create flow runs flow_runs = [] for _ in range(3): flow_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, state=schemas.states.Pending(), ), ) flow_runs.append(flow_run) await session.commit() # Bulk delete response = await hosted_api_client.post( "/flow_runs/bulk_delete", json={ "flow_runs": {"id": {"any_": [str(fr.id) for fr in flow_runs[:2]]}}, }, ) assert response.status_code == status.HTTP_200_OK data = response.json() assert len(data["deleted"]) == 2 assert set(data["deleted"]) == {str(flow_runs[0].id), str(flow_runs[1].id)} async def test_bulk_delete_verifies_actual_deletion( self, session, flow, hosted_api_client, client, ): """Test that deleted flow runs are actually gone from the database.""" flow_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, state=schemas.states.Pending(), ), ) await session.commit() # Delete the flow run response = await hosted_api_client.post( "/flow_runs/bulk_delete", json={"flow_runs": {"id": {"any_": [str(flow_run.id)]}}}, ) assert response.status_code == status.HTTP_200_OK assert str(flow_run.id) in response.json()["deleted"] # Verify it's actually deleted response = await client.get(f"/flow_runs/{flow_run.id}") assert response.status_code == status.HTTP_404_NOT_FOUND async def test_bulk_delete_with_invalid_ids( self, session, flow, hosted_api_client, ): """Test that only valid flow runs are deleted, invalid IDs are ignored.""" # Create 1 valid flow run flow_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, state=schemas.states.Pending(), ), ) await session.commit() invalid_id = uuid4() response = await hosted_api_client.post( "/flow_runs/bulk_delete", json={ "flow_runs": {"id": {"any_": [str(flow_run.id), str(invalid_id)]}}, }, ) assert response.status_code == status.HTTP_200_OK body = response.json() # Only the valid run should be deleted assert len(body["deleted"]) == 1 assert body["deleted"][0] == str(flow_run.id) async def test_bulk_delete_flow_runs_empty_filter( self, hosted_api_client, ): """Test bulk deletion with empty filter returns empty list.""" response = await hosted_api_client.post( "/flow_runs/bulk_delete", json={ "flow_runs": {"id": {"any_": [str(uuid4())]}}, }, ) assert response.status_code == status.HTTP_200_OK data = response.json() assert data["deleted"] == [] async def test_bulk_delete_flow_runs_respects_limit( self, session, flow, hosted_api_client, ): """Test bulk deletion respects limit.""" # Create flow runs flow_runs = [] for _ in range(5): flow_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, state=schemas.states.Pending(), ), ) flow_runs.append(flow_run) await session.commit() # Bulk delete with limit response = await hosted_api_client.post( "/flow_runs/bulk_delete", json={ "flow_runs": {"id": {"any_": [str(fr.id) for fr in flow_runs]}}, "limit": 2, }, ) assert response.status_code == status.HTTP_200_OK data = response.json() assert len(data["deleted"]) == 2 async def test_bulk_delete_flow_runs_no_filter( self, session, flow, hosted_api_client, ): """Test bulk deletion with no filter.""" # Create flow runs for _ in range(3): await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, state=schemas.states.Pending(), ), ) await session.commit() # Bulk delete with no filter - should delete up to limit response = await hosted_api_client.post( "/flow_runs/bulk_delete", json={"limit": 2}, ) assert response.status_code == status.HTTP_200_OK data = response.json() assert len(data["deleted"]) == 2 async def test_bulk_delete_validation_limit_zero( self, hosted_api_client, ): """Test that limit=0 returns 422.""" response = await hosted_api_client.post( "/flow_runs/bulk_delete", json={ "flow_runs": {}, "limit": 0, }, ) assert response.status_code == 422 async def test_bulk_delete_validation_limit_too_high( self, hosted_api_client, ): """Test that limit > max returns 422.""" response = await hosted_api_client.post( "/flow_runs/bulk_delete", json={ "flow_runs": {}, "limit": 201, }, ) assert response.status_code == 422 async def test_bulk_delete_by_state_name( self, session, flow, hosted_api_client, ): """Test deleting flow runs filtered by state name (e.g., late runs).""" now = datetime.now(timezone.utc) # Create late runs (Scheduled state with name "Late") late_runs = [] for i in range(3): flow_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, name=f"late-run-{i}", state=schemas.states.Scheduled( name="Late", scheduled_time=now - timedelta(hours=2) ), expected_start_time=now - timedelta(hours=2), ), ) late_runs.append(flow_run) # Create on-time scheduled runs that should NOT be deleted future_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, name="future-run", state=schemas.states.Scheduled(scheduled_time=now + timedelta(hours=1)), ), ) # Create a running run running_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, name="running-run", state=schemas.states.Running(), ), ) await session.commit() # Delete all late flow runs (by state name "Late") response = await hosted_api_client.post( "/flow_runs/bulk_delete", json={ "flow_runs": {"state": {"name": {"any_": ["Late"]}}}, }, ) assert response.status_code == status.HTTP_200_OK result = response.json() assert len(result["deleted"]) == 3 # Verify late runs deleted, others remain for run in late_runs: assert str(run.id) in result["deleted"] assert str(future_run.id) not in result["deleted"] assert str(running_run.id) not in result["deleted"] async def test_bulk_delete_by_state_type( self, session, flow, hosted_api_client, ): """Test deleting flow runs filtered by state type.""" # Create runs in different states pending_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, state=schemas.states.Pending(), ), ) running_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, state=schemas.states.Running(), ), ) completed_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, state=schemas.states.Completed(), ), ) await session.commit() # Delete only PENDING runs response = await hosted_api_client.post( "/flow_runs/bulk_delete", json={ "flow_runs": {"state": {"type": {"any_": ["PENDING"]}}}, }, ) assert response.status_code == status.HTTP_200_OK result = response.json() assert len(result["deleted"]) == 1 assert str(pending_run.id) in result["deleted"] assert str(running_run.id) not in result["deleted"] assert str(completed_run.id) not in result["deleted"] class TestFlowRunBulkSetState: """Tests for POST /flow_runs/bulk_set_state endpoint.""" async def test_bulk_set_state( self, session, flow, client, ): """Test bulk state setting for flow runs.""" # Create flow runs flow_runs = [] for _ in range(3): flow_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, state=schemas.states.Pending(), ), ) flow_runs.append(flow_run) await session.commit() # Bulk set state response = await client.post( "/flow_runs/bulk_set_state", json={ "flow_runs": {"id": {"any_": [str(fr.id) for fr in flow_runs[:2]]}}, "state": {"type": "CANCELLED", "name": "Cancelled"}, "force": True, }, ) assert response.status_code == status.HTTP_200_OK data = response.json() assert len(data["results"]) == 2 for result in data["results"]: assert result["status"] == "ACCEPT" async def test_bulk_set_state_verifies_state_change( self, session, flow, client, ): """Test that the state is actually changed in the database.""" flow_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, state=schemas.states.Pending(), ), ) await session.commit() # Set state to cancelled response = await client.post( "/flow_runs/bulk_set_state", json={ "flow_runs": {"id": {"any_": [str(flow_run.id)]}}, "state": {"type": "CANCELLED"}, "force": True, }, ) assert response.status_code == status.HTTP_200_OK # Verify the state changed response = await client.get(f"/flow_runs/{flow_run.id}") assert response.status_code == status.HTTP_200_OK assert response.json()["state"]["type"] == "CANCELLED" async def test_bulk_set_state_with_force( self, session, flow, client, ): """Test force=True bypasses orchestration rules.""" # Create a completed flow run flow_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, state=schemas.states.Completed(), ), ) await session.commit() # Normally COMPLETED -> RUNNING would be rejected, but force=True bypasses response = await client.post( "/flow_runs/bulk_set_state", json={ "flow_runs": {"id": {"any_": [str(flow_run.id)]}}, "state": {"type": "RUNNING"}, "force": True, }, ) assert response.status_code == status.HTTP_200_OK result = response.json() assert len(result["results"]) == 1 assert result["results"][0]["status"] == "ACCEPT" assert result["results"][0]["state"]["type"] == "RUNNING" async def test_bulk_set_state_multiple_flow_runs_different_states( self, session, flow, client, ): """Test bulk state change with flow runs in different states.""" pending_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, name="pending-run", state=schemas.states.Pending(), ), ) running_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, name="running-run", state=schemas.states.Running(), ), ) await session.commit() response = await client.post( "/flow_runs/bulk_set_state", json={ "flow_runs": { "id": {"any_": [str(pending_run.id), str(running_run.id)]} }, "state": {"type": "CANCELLED"}, }, ) assert response.status_code == status.HTTP_200_OK result = response.json() assert len(result["results"]) == 2 # Both should be processed (accepted since CANCELLED is valid target) for item in result["results"]: assert item["status"] == "ACCEPT" assert item["state"]["type"] == "CANCELLED" async def test_bulk_set_state_empty_filter( self, client, ): """Test bulk set state with empty filter returns empty list.""" response = await client.post( "/flow_runs/bulk_set_state", json={ "flow_runs": {"id": {"any_": [str(uuid4())]}}, "state": {"type": "CANCELLED", "name": "Cancelled"}, }, ) assert response.status_code == status.HTTP_200_OK data = response.json() assert data["results"] == [] async def test_bulk_set_state_respects_limit( self, session, flow, client, ): """Test bulk set state respects limit.""" # Create flow runs flow_runs = [] for _ in range(5): flow_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, state=schemas.states.Pending(), ), ) flow_runs.append(flow_run) await session.commit() # Bulk set state with limit response = await client.post( "/flow_runs/bulk_set_state", json={ "flow_runs": {"id": {"any_": [str(fr.id) for fr in flow_runs]}}, "state": {"type": "CANCELLED", "name": "Cancelled"}, "force": True, "limit": 2, }, ) assert response.status_code == status.HTTP_200_OK data = response.json() assert len(data["results"]) == 2 async def test_bulk_set_state_validation_limit_zero( self, client, ): """Test that limit=0 returns 422.""" response = await client.post( "/flow_runs/bulk_set_state", json={ "flow_runs": {}, "state": {"type": "CANCELLED"}, "limit": 0, }, ) assert response.status_code == 422 async def test_bulk_set_state_validation_limit_too_high( self, client, ): """Test that limit > max returns 422.""" response = await client.post( "/flow_runs/bulk_set_state", json={ "flow_runs": {}, "state": {"type": "CANCELLED"}, "limit": 51, }, ) assert response.status_code == 422 async def test_bulk_set_state_by_state_filter( self, session, flow, client, ): """Test bulk state change filtered by current state type.""" # Create runs in different states pending_runs = [] for i in range(2): run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, state=schemas.states.Pending(), ), ) pending_runs.append(run) running_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, state=schemas.states.Running(), ), ) await session.commit() # Cancel only PENDING runs response = await client.post( "/flow_runs/bulk_set_state", json={ "flow_runs": {"state": {"type": {"any_": ["PENDING"]}}}, "state": {"type": "CANCELLED"}, }, ) assert response.status_code == status.HTTP_200_OK result = response.json() assert len(result["results"]) == 2 # Verify running run was not affected response = await client.get(f"/flow_runs/{running_run.id}") assert response.json()["state"]["type"] == "RUNNING" class TestDeploymentBulkDelete: """Tests for POST /deployments/bulk_delete endpoint.""" async def test_bulk_delete_deployments( self, session, flow, client, ): """Test bulk deletion of deployments.""" # Create deployments deployments = [] for i in range(3): deployment = await models.deployments.create_deployment( session=session, deployment=schemas.core.Deployment( name=f"test-deployment-{uuid4()}", flow_id=flow.id, ), ) deployments.append(deployment) await session.commit() # Bulk delete response = await client.post( "/deployments/bulk_delete", json={ "deployments": {"id": {"any_": [str(d.id) for d in deployments[:2]]}}, }, ) assert response.status_code == status.HTTP_200_OK data = response.json() assert len(data["deleted"]) == 2 async def test_bulk_delete_verifies_actual_deletion( self, session, flow, client, ): """Test that deleted deployments are actually gone from the database.""" deployment = await models.deployments.create_deployment( session=session, deployment=schemas.core.Deployment( name=f"test-deployment-{uuid4()}", flow_id=flow.id, ), ) await session.commit() # Delete the deployment response = await client.post( "/deployments/bulk_delete", json={"deployments": {"id": {"any_": [str(deployment.id)]}}}, ) assert response.status_code == status.HTTP_200_OK assert str(deployment.id) in response.json()["deleted"] # Verify it's actually deleted response = await client.get(f"/deployments/{deployment.id}") assert response.status_code == status.HTTP_404_NOT_FOUND async def test_bulk_delete_deployments_empty_filter( self, client, ): """Test bulk deletion with empty filter returns empty list.""" response = await client.post( "/deployments/bulk_delete", json={ "deployments": {"id": {"any_": [str(uuid4())]}}, }, ) assert response.status_code == status.HTTP_200_OK data = response.json() assert data["deleted"] == [] async def test_bulk_delete_deployments_respects_limit( self, session, flow, client, ): """Test bulk delete respects the limit parameter.""" # Create deployments deployments = [] for i in range(4): deployment = await models.deployments.create_deployment( session=session, deployment=schemas.core.Deployment( name=f"test-limit-deployment-{uuid4()}", flow_id=flow.id, ), ) deployments.append(deployment) await session.commit() # Try to delete all with a limit of 2 response = await client.post( "/deployments/bulk_delete", json={ "deployments": {"id": {"any_": [str(d.id) for d in deployments]}}, "limit": 2, }, ) assert response.status_code == status.HTTP_200_OK result = response.json() assert len(result["deleted"]) == 2 async def test_bulk_delete_deployments_by_name_filter( self, session, flow, client, ): """Test bulk delete with name filter.""" unique_prefix = str(uuid4())[:8] deployments = [] for i in range(3): deployment = await models.deployments.create_deployment( session=session, deployment=schemas.core.Deployment( name=f"{unique_prefix}-deployment-{i}", flow_id=flow.id, ), ) deployments.append(deployment) # Create one with different name other_deployment = await models.deployments.create_deployment( session=session, deployment=schemas.core.Deployment( name="other-deployment", flow_id=flow.id, ), ) await session.commit() # Delete by name filter response = await client.post( "/deployments/bulk_delete", json={ "deployments": {"name": {"like_": f"{unique_prefix}%"}}, }, ) assert response.status_code == status.HTTP_200_OK result = response.json() assert len(result["deleted"]) == 3 assert str(other_deployment.id) not in result["deleted"] class TestDeploymentBulkCreateFlowRun: """Tests for POST /deployments/{id}/create_flow_run/bulk endpoint.""" async def test_bulk_create_flow_runs( self, deployment, hosted_api_client, ): """Test bulk creation of flow runs from a deployment.""" response = await hosted_api_client.post( f"/deployments/{deployment.id}/create_flow_run/bulk", json=[ {"name": "run-1"}, {"name": "run-2"}, {"name": "run-3"}, ], ) assert response.status_code == status.HTTP_200_OK data = response.json() assert len(data["results"]) == 3 for result in data["results"]: assert result["status"] == "CREATED" assert result["flow_run_id"] is not None async def test_bulk_create_with_defaults( self, deployment, hosted_api_client, ): """Test bulk create with default parameters (empty objects).""" response = await hosted_api_client.post( f"/deployments/{deployment.id}/create_flow_run/bulk", json=[{}, {}, {}], ) assert response.status_code == status.HTTP_200_OK data = response.json() assert len(data["results"]) == 3 for result in data["results"]: assert result["status"] == "CREATED" assert result["flow_run_id"] is not None async def test_bulk_create_preserves_order( self, deployment, hosted_api_client, client, ): """Results should be returned in the same order as the input.""" response = await hosted_api_client.post( f"/deployments/{deployment.id}/create_flow_run/bulk", json=[ {"name": "first-run"}, {"name": "second-run"}, {"name": "third-run"}, ], ) assert response.status_code == status.HTTP_200_OK results = response.json()["results"] # Verify order by fetching each flow run for i, expected_name in enumerate(["first-run", "second-run", "third-run"]): flow_run_id = results[i]["flow_run_id"] fr_response = await client.get(f"/flow_runs/{flow_run_id}") assert fr_response.json()["name"] == expected_name async def test_bulk_create_flow_runs_empty_list( self, deployment, hosted_api_client, ): """Test bulk creation with empty list returns empty list.""" response = await hosted_api_client.post( f"/deployments/{deployment.id}/create_flow_run/bulk", json=[], ) assert response.status_code == status.HTTP_200_OK data = response.json() assert data["results"] == [] async def test_bulk_create_flow_runs_deployment_not_found( self, hosted_api_client, ): """Test bulk creation with non-existent deployment returns 404.""" response = await hosted_api_client.post( f"/deployments/{uuid4()}/create_flow_run/bulk", json=[{"name": "run-1"}], ) assert response.status_code == status.HTTP_404_NOT_FOUND async def test_bulk_create_empty_list_nonexistent_deployment( self, hosted_api_client, ): """Empty list with nonexistent deployment should return 404, not 200. This verifies that authorization checks run even for empty lists. """ response = await hosted_api_client.post( f"/deployments/{uuid4()}/create_flow_run/bulk", json=[], ) assert response.status_code == status.HTTP_404_NOT_FOUND async def test_bulk_create_flow_runs_exceeds_limit( self, deployment, hosted_api_client, ): """Test bulk creation with too many items returns 400.""" response = await hosted_api_client.post( f"/deployments/{deployment.id}/create_flow_run/bulk", json=[{"name": f"run-{i}"} for i in range(101)], ) assert response.status_code == status.HTTP_400_BAD_REQUEST async def test_bulk_create_flow_runs_with_parameters( self, deployment, hosted_api_client, ): """Test bulk creation of flow runs with custom parameters.""" response = await hosted_api_client.post( f"/deployments/{deployment.id}/create_flow_run/bulk", json=[ {"name": "run-1", "parameters": {"x": 1}}, {"name": "run-2", "parameters": {"x": 2}}, ], ) assert response.status_code == status.HTTP_200_OK data = response.json() assert len(data["results"]) == 2 for result in data["results"]: assert result["status"] == "CREATED" async def test_bulk_create_flow_runs_with_varying_tags( self, deployment, hosted_api_client, client, ): """Test bulk create with different tags for each flow run.""" response = await hosted_api_client.post( f"/deployments/{deployment.id}/create_flow_run/bulk", json=[ {"tags": ["batch-1"]}, {"tags": ["batch-2"]}, {"tags": ["batch-3"]}, ], ) assert response.status_code == status.HTTP_200_OK results = response.json()["results"] assert len(results) == 3 # Verify each flow run has the correct tag for i, result in enumerate(results): assert result["status"] == "CREATED" fr_response = await client.get(f"/flow_runs/{result['flow_run_id']}") assert f"batch-{i + 1}" in fr_response.json()["tags"] async def test_bulk_create_inherits_deployment_tags( self, session, flow, hosted_api_client, client, ): """Test that flow runs inherit tags from deployment.""" deployment = await models.deployments.create_deployment( session=session, deployment=schemas.core.Deployment( name=f"tagged-deployment-{uuid4()}", flow_id=flow.id, tags=["deployment-tag-1", "deployment-tag-2"], ), ) await session.commit() response = await hosted_api_client.post( f"/deployments/{deployment.id}/create_flow_run/bulk", json=[{"tags": ["run-tag"]}], ) assert response.status_code == status.HTTP_200_OK flow_run_id = response.json()["results"][0]["flow_run_id"] fr_response = await client.get(f"/flow_runs/{flow_run_id}") tags = fr_response.json()["tags"] assert "deployment-tag-1" in tags assert "deployment-tag-2" in tags assert "run-tag" in tags class TestFlowBulkDelete: """Tests for POST /flows/bulk_delete endpoint.""" async def test_bulk_delete_flows( self, session, client, ): """Test bulk deletion of flows.""" # Create flows flows = [] for i in range(3): flow = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name=f"test-flow-{uuid4()}"), ) flows.append(flow) await session.commit() # Bulk delete response = await client.post( "/flows/bulk_delete", json={ "flows": {"id": {"any_": [str(f.id) for f in flows[:2]]}}, }, ) assert response.status_code == status.HTTP_200_OK data = response.json() assert len(data["deleted"]) == 2 async def test_bulk_delete_verifies_actual_deletion( self, session, client, ): """Test that deleted flows are actually gone from the database.""" flow = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name=f"test-flow-{uuid4()}"), ) await session.commit() # Delete the flow response = await client.post( "/flows/bulk_delete", json={"flows": {"id": {"any_": [str(flow.id)]}}}, ) assert response.status_code == status.HTTP_200_OK assert str(flow.id) in response.json()["deleted"] # Verify it's actually deleted response = await client.get(f"/flows/{flow.id}") assert response.status_code == status.HTTP_404_NOT_FOUND async def test_bulk_delete_flows_empty_filter( self, client, ): """Test bulk deletion with empty filter returns empty list.""" response = await client.post( "/flows/bulk_delete", json={ "flows": {"id": {"any_": [str(uuid4())]}}, }, ) assert response.status_code == status.HTTP_200_OK data = response.json() assert data["deleted"] == [] async def test_bulk_delete_flows_deletes_deployments( self, session, client, ): """Test bulk deletion of flows also deletes their deployments.""" # Create a flow and deployment flow = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name=f"test-flow-{uuid4()}"), ) deployment = await models.deployments.create_deployment( session=session, deployment=schemas.core.Deployment( name="test-deployment", flow_id=flow.id, ), ) await session.commit() # Bulk delete flow response = await client.post( "/flows/bulk_delete", json={ "flows": {"id": {"any_": [str(flow.id)]}}, }, ) assert response.status_code == status.HTTP_200_OK assert str(flow.id) in response.json()["deleted"] # Verify deployment is also deleted response = await client.get(f"/deployments/{deployment.id}") assert response.status_code == status.HTTP_404_NOT_FOUND async def test_bulk_delete_flows_deletes_multiple_deployments( self, session, client, ): """Test bulk deletion of flow deletes all its associated deployments.""" flow = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name=f"test-flow-{uuid4()}"), ) deployment_ids = [] for i in range(3): deployment = await models.deployments.create_deployment( session=session, deployment=schemas.core.Deployment( name=f"test-deployment-{i}", flow_id=flow.id, ), ) deployment_ids.append(deployment.id) await session.commit() # Delete the flow response = await client.post( "/flows/bulk_delete", json={"flows": {"id": {"any_": [str(flow.id)]}}}, ) assert response.status_code == status.HTTP_200_OK # Verify all deployments are deleted for deployment_id in deployment_ids: response = await client.get(f"/deployments/{deployment_id}") assert response.status_code == status.HTTP_404_NOT_FOUND async def test_bulk_delete_flows_respects_limit( self, session, client, ): """Test bulk delete respects the limit parameter.""" flows = [] for i in range(4): flow = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name=f"test-limit-flow-{uuid4()}"), ) flows.append(flow) await session.commit() # Try to delete all with a limit of 2 response = await client.post( "/flows/bulk_delete", json={ "flows": {"id": {"any_": [str(f.id) for f in flows]}}, "limit": 2, }, ) assert response.status_code == status.HTTP_200_OK result = response.json() assert len(result["deleted"]) == 2 # Verify only 2 were deleted deleted_count = 0 remaining_count = 0 for flow in flows: response = await client.get(f"/flows/{flow.id}") if response.status_code == status.HTTP_404_NOT_FOUND: deleted_count += 1 else: remaining_count += 1 assert deleted_count == 2 assert remaining_count == 2 async def test_bulk_delete_nonexistent_flows_returns_empty( self, client, ): """Test that bulk delete returns empty list when requested flows don't exist.""" nonexistent_ids = [str(uuid4()) for _ in range(3)] response = await client.post( "/flows/bulk_delete", json={ "flows": {"id": {"any_": nonexistent_ids}}, }, ) assert response.status_code == status.HTTP_200_OK result = response.json() assert result["deleted"] == [] class TestFlowFilterNotAny: """Tests for FlowFilter with not_any_ exclusion filter.""" async def test_flow_filter_not_any( self, session, client, ): """Test FlowFilter with not_any_ filter.""" # Create flows with unique names to avoid conflicts unique_prefix = str(uuid4())[:8] flows = [] for i in range(3): flow = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name=f"test-filter-flow-{unique_prefix}-{i}"), ) flows.append(flow) await session.commit() # Query flows excluding one response = await client.post( "/flows/filter", json={ "flows": { "id": {"not_any_": [str(flows[0].id)]}, "name": {"like_": f"%{unique_prefix}%"}, }, }, ) assert response.status_code == status.HTTP_200_OK data = response.json() # Should not include the excluded flow flow_ids = [f["id"] for f in data] assert str(flows[0].id) not in flow_ids # Should include the other flows assert str(flows[1].id) in flow_ids assert str(flows[2].id) in flow_ids async def test_flow_filter_not_any_with_any( self, session, client, ): """Test using both any_ and not_any_ together.""" unique_prefix = str(uuid4())[:8] flows = [] for i in range(4): flow = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name=f"test-combo-flow-{unique_prefix}-{i}"), ) flows.append(flow) await session.commit() # Include flows 0, 1, 2 but exclude flow 1 response = await client.post( "/flows/filter", json={ "flows": { "id": { "any_": [str(flows[0].id), str(flows[1].id), str(flows[2].id)], "not_any_": [str(flows[1].id)], }, }, }, ) assert response.status_code == status.HTTP_200_OK data = response.json() flow_ids = [f["id"] for f in data] # Should include 0 and 2, but not 1 or 3 assert str(flows[0].id) in flow_ids assert str(flows[1].id) not in flow_ids assert str(flows[2].id) in flow_ids assert str(flows[3].id) not in flow_ids
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/server/orchestration/api/test_bulk_operations.py", "license": "Apache License 2.0", "lines": 1104, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/prefect/_internal/analytics/ci_detection.py
""" CI environment detection for SDK telemetry. Telemetry is automatically disabled in CI environments. """ import os # Common CI environment variables CI_ENV_VARS = frozenset( { "CI", "GITHUB_ACTIONS", "GITLAB_CI", "JENKINS_URL", "TRAVIS", "CIRCLECI", "BUILDKITE", "TF_BUILD", # Azure DevOps "CODEBUILD_BUILD_ID", # AWS CodeBuild "BITBUCKET_COMMIT", "TEAMCITY_VERSION", "DRONE", "SEMAPHORE", "APPVEYOR", "BUDDY", "CI_NAME", # Generic CI indicator } ) def is_ci_environment() -> bool: """ Check if the current environment is a CI environment. Returns: True if any known CI environment variable is set """ return any(os.environ.get(var) for var in CI_ENV_VARS)
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_internal/analytics/ci_detection.py", "license": "Apache License 2.0", "lines": 33, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
PrefectHQ/prefect:src/prefect/_internal/analytics/client.py
""" Amplitude client wrapper for SDK telemetry. Provides fire-and-forget event tracking with silent failure handling. """ import atexit import logging import platform from typing import Any from amplitude import Amplitude, BaseEvent, Config import prefect # Amplitude API key for SDK telemetry # This is a write-only key that can only send events, not read data try: from prefect._internal.analytics._config import AMPLITUDE_API_KEY except ImportError: AMPLITUDE_API_KEY = "YOUR_AMPLITUDE_API_KEY_HERE" # Module-level client instance _amplitude_client: Amplitude | None = None _initialized = False def _get_event_properties() -> dict[str, str]: """Get common event properties included with all events.""" return { "prefect_version": prefect.__version__, "python_version": platform.python_version(), "platform": platform.system(), "architecture": platform.machine(), } def _initialize_client() -> bool: """ Initialize the Amplitude client. Returns: True if initialization succeeded, False otherwise """ global _amplitude_client, _initialized if _initialized: return _amplitude_client is not None _initialized = True if AMPLITUDE_API_KEY == "YOUR_AMPLITUDE_API_KEY_HERE": # API key not configured - telemetry disabled return False try: # Create a silent logger for Amplitude to prevent SDK errors from reaching users amplitude_logger = logging.getLogger("amplitude") amplitude_logger.setLevel(logging.CRITICAL) config = Config( # Flush events after a short delay to avoid blocking flush_interval_millis=10000, # 10 seconds flush_queue_size=10, # Minimize network overhead min_id_length=1, ) _amplitude_client = Amplitude(AMPLITUDE_API_KEY, config) # Register shutdown handler to flush remaining events atexit.register(_shutdown_client) return True except Exception: return False def _shutdown_client() -> None: """Shutdown the Amplitude client, flushing any remaining events.""" global _amplitude_client if _amplitude_client is not None: try: _amplitude_client.shutdown() except Exception: pass _amplitude_client = None def track_event( event_name: str, device_id: str, extra_properties: dict[str, Any] | None = None, ) -> bool: """ Track an event with Amplitude. Args: event_name: The name of the event to track device_id: The anonymous device identifier extra_properties: Additional event properties Returns: True if the event was tracked, False otherwise """ properties = _get_event_properties() if extra_properties: properties.update(extra_properties) if not _initialize_client(): return False try: event = BaseEvent( event_type=event_name, device_id=device_id, event_properties=properties, ) _amplitude_client.track(event) return True except Exception: return False def flush() -> None: """Flush any pending events immediately.""" if _amplitude_client is not None: try: _amplitude_client.flush() except Exception: pass
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_internal/analytics/client.py", "license": "Apache License 2.0", "lines": 102, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:src/prefect/_internal/analytics/device_id.py
""" Device ID generation and persistence for SDK telemetry. The device ID is an anonymous identifier used to correlate events from the same installation without identifying the user. """ import os from pathlib import Path from uuid import uuid4 from prefect.settings import get_current_settings def _get_device_id_path() -> Path: """Get the path to the device ID file.""" settings = get_current_settings() return settings.home / ".sdk_telemetry" / "device_id" def get_or_create_device_id() -> str: """ Get the existing device ID or create a new one. The device ID is stored in $PREFECT_HOME/.sdk_telemetry/device_id Returns: A UUID string identifying this installation """ device_id_path = _get_device_id_path() # Try to read existing device ID if device_id_path.exists(): try: device_id = device_id_path.read_text().strip() if device_id: return device_id except Exception: pass # Generate new device ID device_id = str(uuid4()) # Persist device ID try: device_id_path.parent.mkdir(parents=True, exist_ok=True) device_id_path.write_text(device_id) # Set restrictive permissions (owner read/write only) os.chmod(device_id_path, 0o600) except Exception: # If we can't persist, still return the generated ID # (it will be regenerated next time) pass return device_id
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_internal/analytics/device_id.py", "license": "Apache License 2.0", "lines": 42, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
PrefectHQ/prefect:src/prefect/_internal/analytics/emit.py
""" Event emission for SDK analytics. """ import sys from typing import Any from prefect._internal.analytics.device_id import get_or_create_device_id from prefect._internal.analytics.events import SDKEvent from prefect._internal.analytics.service import AnalyticsEvent, AnalyticsService def _is_interactive_terminal() -> bool: """Check if we're running in an interactive terminal.""" try: return sys.stdout.isatty() or sys.stderr.isatty() except Exception: return False def emit_sdk_event( event_name: SDKEvent, extra_properties: dict[str, Any] | None = None, ) -> bool: """ Emit an SDK telemetry event. This is an internal function for tracking SDK events. Events are queued for processing in a background thread (non-blocking). Args: event_name: The name of the event to track extra_properties: Additional event properties Returns: True if the event was queued, False otherwise """ try: device_id = get_or_create_device_id() event = AnalyticsEvent( event_name=event_name, device_id=device_id, extra_properties=extra_properties, ) AnalyticsService.instance().enqueue(event) return True except Exception: return False def emit_integration_event( integration: str, event_name: str, extra_properties: dict[str, Any] | None = None, ) -> bool: """ Emit a telemetry event from an integration library. This is exposed via the public API in prefect.analytics for integration libraries (e.g., prefect-aws, prefect-gcp) to emit telemetry events. Events are automatically namespaced with the integration name. Args: integration: The integration name (e.g., "prefect-aws", "prefect-gcp") event_name: The event name (e.g., "s3_block_created") extra_properties: Additional event properties Returns: True if the event was queued, False otherwise """ try: # Namespace the event with the integration name namespaced_event = f"{integration}:{event_name}" device_id = get_or_create_device_id() # Add integration name to properties properties = {"integration": integration} if extra_properties: properties.update(extra_properties) event = AnalyticsEvent( event_name=namespaced_event, device_id=device_id, extra_properties=properties, ) AnalyticsService.instance().enqueue(event) return True except Exception: return False
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_internal/analytics/emit.py", "license": "Apache License 2.0", "lines": 73, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:src/prefect/_internal/analytics/enabled.py
""" Telemetry enabled check for SDK analytics. """ import os from prefect._internal.analytics.ci_detection import is_ci_environment def is_telemetry_enabled() -> bool: """ Quick non-blocking check of local telemetry settings. Telemetry is disabled if: - DO_NOT_TRACK environment variable is set (client-side) - Running in a CI environment Note: Server-side analytics check is performed in the background service to avoid blocking the main thread. Returns: True if local telemetry checks pass, False otherwise """ # Check DO_NOT_TRACK standard (client-side setting) do_not_track = os.environ.get("DO_NOT_TRACK", "").lower() if do_not_track in ("1", "true", "yes"): return False # Check CI environment if is_ci_environment(): return False return True
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_internal/analytics/enabled.py", "license": "Apache License 2.0", "lines": 24, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
PrefectHQ/prefect:src/prefect/_internal/analytics/events.py
""" Event type definitions for SDK telemetry. """ from typing import Literal # Quick Start Funnel events SDKEvent = Literal[ "first_sdk_import", "first_flow_defined", "first_flow_run", "first_deployment_created", "first_schedule_created", ]
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_internal/analytics/events.py", "license": "Apache License 2.0", "lines": 12, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
PrefectHQ/prefect:src/prefect/_internal/analytics/milestones.py
""" Milestone tracking for SDK telemetry. Tracks first-* events to avoid duplicate telemetry. Milestones are stored in $PREFECT_HOME/.sdk_telemetry/milestones.json For existing users (detected by presence of Prefect artifacts), all milestones are pre-marked as reached to avoid emitting onboarding events on upgrade. """ import json from pathlib import Path from typing import Literal from prefect._internal.analytics.emit import _is_interactive_terminal, emit_sdk_event from prefect.settings import get_current_settings MilestoneName = Literal[ "first_sdk_import", "first_flow_defined", "first_flow_run", "first_deployment_created", "first_schedule_created", ] # All milestone names for pre-marking existing users ALL_MILESTONES: list[MilestoneName] = [ "first_sdk_import", "first_flow_defined", "first_flow_run", "first_deployment_created", "first_schedule_created", ] # Files/directories that indicate an existing Prefect user _EXISTING_USER_INDICATORS = [ "profiles.toml", # User has configured profiles ".prefect.db", # Local server database "prefect.db", # Alternative database location "storage", # Result storage directory ] def _get_telemetry_dir() -> Path: """Get the path to the telemetry directory.""" settings = get_current_settings() return settings.home / ".sdk_telemetry" def _get_milestones_path() -> Path: """Get the path to the milestones file.""" return _get_telemetry_dir() / "milestones.json" def _read_milestones() -> dict[str, bool]: """Read milestones from disk.""" milestones_path = _get_milestones_path() if milestones_path.exists(): try: return json.loads(milestones_path.read_text()) except Exception: pass return {} def _write_milestones(milestones: dict[str, bool]) -> None: """Write milestones to disk.""" milestones_path = _get_milestones_path() try: milestones_path.parent.mkdir(parents=True, exist_ok=True) milestones_path.write_text(json.dumps(milestones, indent=2)) except Exception: pass def _is_existing_user() -> bool: """ Check if this is an existing Prefect user. An existing user is detected by the presence of common Prefect artifacts in PREFECT_HOME that would have been created before telemetry was added. Returns: True if existing user indicators are found """ settings = get_current_settings() prefect_home = settings.home if not prefect_home.exists(): return False for indicator in _EXISTING_USER_INDICATORS: if (prefect_home / indicator).exists(): return True return False def _mark_existing_user_milestones() -> bool: """ Pre-mark all milestones for existing users. This prevents existing users from emitting onboarding events when they upgrade to a version with telemetry. Returns: True if milestones were pre-marked (existing user detected) """ telemetry_dir = _get_telemetry_dir() # If telemetry directory already exists, we've already handled this if telemetry_dir.exists(): return False # Check if this is an existing user if not _is_existing_user(): return False # Pre-mark all milestones for existing users milestones = {milestone: True for milestone in ALL_MILESTONES} _write_milestones(milestones) return True def has_reached_milestone(milestone: MilestoneName) -> bool: """ Check if a milestone has been reached. Args: milestone: The milestone name to check Returns: True if the milestone has been recorded """ milestones = _read_milestones() return milestones.get(milestone, False) def mark_milestone(milestone: MilestoneName) -> None: """ Mark a milestone as reached. Args: milestone: The milestone name to mark """ milestones = _read_milestones() milestones[milestone] = True _write_milestones(milestones) def try_mark_milestone(milestone: MilestoneName) -> bool: """ Try to mark a milestone and emit an event if it's new. This is the primary entry point for milestone tracking. It checks if the milestone is new, marks it, and emits the event. Events are only emitted in interactive terminals to avoid tracking deployed flow runs (e.g., Kubernetes jobs) as new users. Args: milestone: The milestone name to mark Returns: True if this was the first time reaching the milestone """ # Only emit events in interactive terminals # This prevents deployed flow runs from being counted as new users if not _is_interactive_terminal(): return False if has_reached_milestone(milestone): return False mark_milestone(milestone) emit_sdk_event(milestone) return True
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_internal/analytics/milestones.py", "license": "Apache License 2.0", "lines": 133, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:src/prefect/_internal/analytics/notice.py
""" First-run telemetry notice for SDK telemetry. Displays a notice to users the first time telemetry is enabled, but only in interactive terminal sessions. """ import sys from pathlib import Path from prefect._internal.analytics.emit import _is_interactive_terminal from prefect.settings import get_current_settings NOTICE_TEXT = """ Prefect collects anonymous usage data to improve the product. To opt out: set PREFECT_SERVER_ANALYTICS_ENABLED=false on the server, or DO_NOT_TRACK=1 in the client. Learn more: https://docs.prefect.io/concepts/telemetry """ def _get_notice_marker_path() -> Path: """Get the path to the notice marker file.""" settings = get_current_settings() return settings.home / ".sdk_telemetry" / "notice_shown" def _has_shown_notice() -> bool: """Check if the notice has been shown before.""" return _get_notice_marker_path().exists() def _mark_notice_shown() -> None: """Mark that the notice has been shown.""" marker_path = _get_notice_marker_path() try: marker_path.parent.mkdir(parents=True, exist_ok=True) marker_path.touch() except Exception: pass def maybe_show_telemetry_notice() -> None: """ Show the telemetry notice if appropriate. The notice is shown only: - The first time telemetry is enabled - In an interactive terminal (TTY) """ # Only show in interactive terminals if not _is_interactive_terminal(): return # Only show once if _has_shown_notice(): return # Show notice and mark as shown print(NOTICE_TEXT, file=sys.stderr) _mark_notice_shown()
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_internal/analytics/notice.py", "license": "Apache License 2.0", "lines": 45, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
PrefectHQ/prefect:src/prefect/_internal/analytics/service.py
""" Background analytics service for non-blocking event processing. This module provides a singleton service that receives analytics events via a queue and processes them in a background thread. The server analytics check is performed once per process, off the main thread. """ import atexit import os import queue import threading from dataclasses import dataclass from typing import Any, ClassVar from prefect._internal.analytics.ci_detection import is_ci_environment from prefect._internal.analytics.client import track_event @dataclass class AnalyticsEvent: """An analytics event to be processed by the background service.""" event_name: str device_id: str extra_properties: dict[str, Any] | None = None class AnalyticsService: """ Background service for processing analytics events. This service: - Receives events via enqueue() which returns immediately (non-blocking) - Processes events in a background daemon thread - Performs the server analytics check once per process (with 5s timeout) - Caches the server check result in memory The service is lazily started when the first event is queued. """ _instance: ClassVar["AnalyticsService | None"] = None _lock: ClassVar[threading.Lock] = threading.Lock() def __init__(self) -> None: self._queue: queue.Queue[AnalyticsEvent | None] = queue.Queue() self._thread: threading.Thread | None = None self._started = False self._analytics_enabled: bool | None = None self._analytics_checked = threading.Event() self._shutdown_requested = False self._instance_lock = threading.Lock() @classmethod def instance(cls) -> "AnalyticsService": """Get or create the singleton instance.""" if cls._instance is None: with cls._lock: if cls._instance is None: cls._instance = cls() atexit.register(cls._instance.shutdown) return cls._instance @classmethod def reset(cls) -> None: """Reset the singleton instance. Used for testing and fork safety.""" with cls._lock: if cls._instance is not None: cls._instance._shutdown_requested = True # Put None to wake up the thread if it's waiting try: cls._instance._queue.put_nowait(None) except queue.Full: pass cls._instance = None def enqueue(self, event: AnalyticsEvent) -> None: """ Queue an event for processing. Returns immediately (non-blocking). Quick local checks (DO_NOT_TRACK, CI) are performed here to avoid queuing events that will never be sent. """ # Quick local checks - these don't require network calls if not self._quick_enabled_check(): return # Start the background thread on first event self._ensure_started() # Queue the event (non-blocking) try: self._queue.put_nowait(event) except queue.Full: pass # Drop event silently if queue is full def _quick_enabled_check(self) -> bool: """ Quick non-blocking check of local telemetry settings. Returns False if DO_NOT_TRACK is set or we're in a CI environment. """ do_not_track = os.environ.get("DO_NOT_TRACK", "").lower() if do_not_track in ("1", "true", "yes"): return False if is_ci_environment(): return False return True def _ensure_started(self) -> None: """Start the background thread if not already running.""" with self._instance_lock: if not self._started: self._started = True self._thread = threading.Thread( target=self._run, name="prefect-analytics", daemon=True, ) self._thread.start() def _run(self) -> None: """Background thread main loop.""" try: # Check server analytics setting (5s timeout, off main thread) self._analytics_enabled = self._check_server_analytics() self._analytics_checked.set() if not self._analytics_enabled: # Drain the queue and exit - analytics are disabled self._drain_queue() return # Process events in a loop while not self._shutdown_requested: try: event = self._queue.get(timeout=1.0) if event is None: # Shutdown signal or spurious wakeup if self._shutdown_requested: break continue self._process_event(event) self._queue.task_done() except queue.Empty: continue except Exception: pass # Silently ignore processing errors except Exception: self._analytics_checked.set() def _check_server_analytics(self) -> bool: """ Check if the server has analytics enabled. When no API URL is configured, reads the local setting directly. When an API URL is set, queries the remote server using the Prefect client. """ from prefect.settings.context import get_current_settings settings = get_current_settings() api_url = settings.api.url if not api_url: return settings.server.analytics_enabled try: from prefect.client.orchestration import get_client with get_client(sync_client=True) as client: response = client.request("GET", "/admin/settings") response.raise_for_status() server_settings = response.json() return server_settings.get("server", {}).get("analytics_enabled", False) except Exception: return False def _drain_queue(self) -> None: """Drain all events from the queue without processing them.""" while True: try: self._queue.get_nowait() self._queue.task_done() except queue.Empty: break def _process_event(self, event: AnalyticsEvent) -> None: """Process a single analytics event.""" try: track_event( event_name=event.event_name, device_id=event.device_id, extra_properties=event.extra_properties, ) except Exception: pass # Silently ignore tracking errors def shutdown(self, timeout: float = 2.0) -> None: """ Shutdown the service, flushing pending events. Args: timeout: Maximum time to wait for pending events to be processed. """ self._shutdown_requested = True # Put None to wake up the thread if it's waiting try: self._queue.put_nowait(None) except queue.Full: pass if self._thread is not None and self._thread.is_alive(): self._thread.join(timeout=timeout) def wait_for_analytics_check(self, timeout: float | None = None) -> bool | None: """ Wait for the server analytics check to complete. Used primarily for testing. Returns the analytics enabled state, or None if the check hasn't completed within the timeout. """ if self._analytics_checked.wait(timeout=timeout): return self._analytics_enabled return None def _reset_after_fork() -> None: """Reset the service after a fork to avoid sharing state with parent.""" AnalyticsService.reset() # Register fork handler if available (Unix systems) if hasattr(os, "register_at_fork"): os.register_at_fork(after_in_child=_reset_after_fork)
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_internal/analytics/service.py", "license": "Apache License 2.0", "lines": 193, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:tests/_internal/analytics/test_ci_detection.py
""" Tests for CI environment detection. """ import pytest class TestCIDetection: """Test CI environment detection.""" def test_not_ci_by_default(self, monkeypatch: pytest.MonkeyPatch): """Should not detect CI when no CI variables are set.""" # Clear all known CI variables from prefect._internal.analytics.ci_detection import CI_ENV_VARS for var in CI_ENV_VARS: monkeypatch.delenv(var, raising=False) from prefect._internal.analytics.ci_detection import is_ci_environment assert is_ci_environment() is False @pytest.mark.parametrize( "env_var", [ "CI", "GITHUB_ACTIONS", "GITLAB_CI", "JENKINS_URL", "TRAVIS", "CIRCLECI", "BUILDKITE", "TF_BUILD", "CODEBUILD_BUILD_ID", "BITBUCKET_COMMIT", "TEAMCITY_VERSION", "DRONE", "SEMAPHORE", "APPVEYOR", "BUDDY", "CI_NAME", ], ) def test_detects_ci_environments( self, env_var: str, monkeypatch: pytest.MonkeyPatch ): """Should detect various CI environments.""" # Clear all CI variables first from prefect._internal.analytics.ci_detection import CI_ENV_VARS for var in CI_ENV_VARS: monkeypatch.delenv(var, raising=False) # Set the specific CI variable monkeypatch.setenv(env_var, "true") from prefect._internal.analytics.ci_detection import is_ci_environment assert is_ci_environment() is True def test_ci_variable_with_any_value(self, monkeypatch: pytest.MonkeyPatch): """CI should be detected regardless of variable value.""" # Clear all CI variables first from prefect._internal.analytics.ci_detection import CI_ENV_VARS for var in CI_ENV_VARS: monkeypatch.delenv(var, raising=False) monkeypatch.setenv("CI", "1") from prefect._internal.analytics.ci_detection import is_ci_environment assert is_ci_environment() is True
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_internal/analytics/test_ci_detection.py", "license": "Apache License 2.0", "lines": 56, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_internal/analytics/test_client.py
""" Tests for Amplitude client wrapper. """ from pathlib import Path from unittest.mock import MagicMock, patch class TestAmplitudeClient: """Test Amplitude client behavior.""" def test_event_properties_include_version(self, clean_telemetry_state: Path): """Event properties should include Prefect version.""" import prefect from prefect._internal.analytics.client import _get_event_properties props = _get_event_properties() assert props["prefect_version"] == prefect.__version__ def test_event_properties_include_python_version(self, clean_telemetry_state: Path): """Event properties should include Python version.""" import platform from prefect._internal.analytics.client import _get_event_properties props = _get_event_properties() assert props["python_version"] == platform.python_version() def test_event_properties_include_platform(self, clean_telemetry_state: Path): """Event properties should include platform.""" import platform from prefect._internal.analytics.client import _get_event_properties props = _get_event_properties() assert props["platform"] == platform.system() def test_event_properties_include_architecture(self, clean_telemetry_state: Path): """Event properties should include architecture.""" import platform from prefect._internal.analytics.client import _get_event_properties props = _get_event_properties() assert props["architecture"] == platform.machine() def test_track_event_no_api_key(self, clean_telemetry_state: Path): """track_event should return False when API key is not configured.""" from prefect._internal.analytics.client import track_event # The default API key is a placeholder, so initialization should fail result = track_event( event_name="first_sdk_import", device_id="test-device-id", ) assert result is False def test_track_event_with_mock_amplitude(self, clean_telemetry_state: Path): """track_event should send events when Amplitude is configured.""" import prefect._internal.analytics.client as client_module mock_client = MagicMock() with patch.object(client_module, "AMPLITUDE_API_KEY", "test-api-key"): # Reset initialization state client_module._initialized = False client_module._amplitude_client = None # Mock the Amplitude class to return our mock client with patch.object( client_module, "Amplitude", return_value=mock_client ) as mock_amplitude_class: result = client_module.track_event( event_name="first_sdk_import", device_id="test-device-id", ) assert result is True mock_amplitude_class.assert_called_once() mock_client.track.assert_called_once() def test_track_event_includes_extra_properties(self, clean_telemetry_state: Path): """track_event should include extra properties.""" import prefect._internal.analytics.client as client_module mock_client = MagicMock() captured_event = None def capture_track(event): nonlocal captured_event captured_event = event mock_client.track = capture_track with patch.object(client_module, "AMPLITUDE_API_KEY", "test-api-key"): # Reset initialization state client_module._initialized = False client_module._amplitude_client = None with patch.object(client_module, "Amplitude", return_value=mock_client): client_module.track_event( event_name="first_sdk_import", device_id="test-device-id", extra_properties={"custom_key": "custom_value"}, ) # Verify the event was created with custom property assert captured_event is not None assert captured_event.event_properties["custom_key"] == "custom_value" def test_track_event_handles_exceptions_silently(self, clean_telemetry_state: Path): """track_event should handle exceptions without raising.""" import prefect._internal.analytics.client as client_module with patch.object(client_module, "AMPLITUDE_API_KEY", "test-api-key"): # Reset initialization state client_module._initialized = False client_module._amplitude_client = None with patch.object( client_module, "Amplitude", side_effect=Exception("Test error") ): # Should not raise result = client_module.track_event( event_name="first_sdk_import", device_id="test-device-id", ) assert result is False
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_internal/analytics/test_client.py", "license": "Apache License 2.0", "lines": 97, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_internal/analytics/test_device_id.py
""" Tests for device ID generation and persistence. """ import uuid from pathlib import Path import pytest class TestDeviceID: """Test device ID generation and persistence.""" def test_generates_uuid(self, clean_telemetry_state: Path): """Should generate a valid UUID.""" from prefect._internal.analytics.device_id import get_or_create_device_id device_id = get_or_create_device_id() # Should be a valid UUID uuid.UUID(device_id) def test_persists_device_id(self, clean_telemetry_state: Path): """Should persist device ID across calls.""" from prefect._internal.analytics.device_id import get_or_create_device_id device_id_1 = get_or_create_device_id() device_id_2 = get_or_create_device_id() assert device_id_1 == device_id_2 def test_stores_in_expected_location(self, clean_telemetry_state: Path): """Should store device ID in .sdk_telemetry directory.""" from prefect._internal.analytics.device_id import get_or_create_device_id device_id = get_or_create_device_id() device_id_file = clean_telemetry_state / "device_id" assert device_id_file.exists() assert device_id_file.read_text().strip() == device_id def test_creates_directory_if_missing(self, clean_telemetry_state: Path): """Should create the .sdk_telemetry directory if it doesn't exist.""" from prefect._internal.analytics.device_id import get_or_create_device_id # Ensure directory doesn't exist assert not clean_telemetry_state.exists() get_or_create_device_id() assert clean_telemetry_state.exists() assert (clean_telemetry_state / "device_id").exists() def test_regenerates_if_file_empty( self, clean_telemetry_state: Path, monkeypatch: pytest.MonkeyPatch ): """Should regenerate device ID if file is empty.""" from prefect._internal.analytics.device_id import get_or_create_device_id # Create empty device ID file clean_telemetry_state.mkdir(parents=True, exist_ok=True) device_id_file = clean_telemetry_state / "device_id" device_id_file.write_text("") device_id = get_or_create_device_id() # Should have generated a new valid UUID uuid.UUID(device_id) assert device_id_file.read_text().strip() == device_id
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_internal/analytics/test_device_id.py", "license": "Apache License 2.0", "lines": 48, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_internal/analytics/test_events.py
""" Tests for SDK analytics event emission. """ from pathlib import Path from unittest.mock import MagicMock, patch import pytest from prefect._internal.analytics import emit_integration_event from prefect._internal.analytics.service import AnalyticsEvent class TestEventEmission: """Test event emission via emit_sdk_event.""" def test_emit_sdk_event_queues_event( self, clean_telemetry_state: Path, telemetry_enabled ): """emit_sdk_event should queue an event to the service.""" mock_service = MagicMock() with patch( "prefect._internal.analytics.emit.AnalyticsService.instance", return_value=mock_service, ): from prefect._internal.analytics import emit_sdk_event result = emit_sdk_event("first_sdk_import") assert result is True mock_service.enqueue.assert_called_once() event = mock_service.enqueue.call_args[0][0] assert isinstance(event, AnalyticsEvent) assert event.event_name == "first_sdk_import" def test_emit_sdk_event_with_extra_properties( self, clean_telemetry_state: Path, telemetry_enabled ): """emit_sdk_event should pass extra properties in the event.""" mock_service = MagicMock() with patch( "prefect._internal.analytics.emit.AnalyticsService.instance", return_value=mock_service, ): from prefect._internal.analytics import emit_sdk_event emit_sdk_event("first_sdk_import", extra_properties={"key": "value"}) event = mock_service.enqueue.call_args[0][0] assert event.extra_properties == {"key": "value"} def test_emit_sdk_event_handles_exceptions( self, clean_telemetry_state: Path, telemetry_enabled ): """emit_sdk_event should handle exceptions without raising.""" with patch( "prefect._internal.analytics.emit.get_or_create_device_id", side_effect=Exception("Test error"), ): from prefect._internal.analytics import emit_sdk_event # Should not raise result = emit_sdk_event("first_sdk_import") assert result is False def test_emit_sdk_event_includes_device_id( self, clean_telemetry_state: Path, telemetry_enabled ): """emit_sdk_event should include the device ID in the event.""" mock_service = MagicMock() with ( patch( "prefect._internal.analytics.emit.AnalyticsService.instance", return_value=mock_service, ), patch( "prefect._internal.analytics.emit.get_or_create_device_id", return_value="test-device-id", ), ): from prefect._internal.analytics import emit_sdk_event emit_sdk_event("first_sdk_import") event = mock_service.enqueue.call_args[0][0] assert event.device_id == "test-device-id" class TestAnalyticsInitialization: """Test analytics initialization.""" def test_initialize_analytics_skipped_in_non_tty( self, clean_telemetry_state: Path, telemetry_enabled ): """initialize_analytics should skip onboarding events in non-interactive terminals.""" # Patch where functions are imported in __init__.py with ( patch( "prefect._internal.analytics._is_interactive_terminal", return_value=False, ), patch("prefect._internal.analytics.emit_sdk_event") as mock_emit, patch( "prefect._internal.analytics.maybe_show_telemetry_notice" ) as mock_notice, ): import prefect._internal.analytics prefect._internal.analytics._telemetry_initialized = False from prefect._internal.analytics import initialize_analytics initialize_analytics() # Should not emit events or show notice in non-TTY mock_emit.assert_not_called() mock_notice.assert_not_called() def test_initialize_analytics_disabled_by_do_not_track( self, clean_telemetry_state: Path, monkeypatch: pytest.MonkeyPatch ): """initialize_analytics should not emit events when DO_NOT_TRACK is set.""" monkeypatch.setenv("DO_NOT_TRACK", "1") with patch("prefect._internal.analytics.emit_sdk_event") as mock_emit: import prefect._internal.analytics prefect._internal.analytics._telemetry_initialized = False from prefect._internal.analytics import initialize_analytics initialize_analytics() mock_emit.assert_not_called() def test_initialize_analytics_enabled( self, clean_telemetry_state: Path, telemetry_enabled ): """initialize_analytics should mark sdk_imported milestone when enabled.""" with ( patch( "prefect._internal.analytics._is_interactive_terminal", return_value=True, ), patch("prefect._internal.analytics.try_mark_milestone") as mock_milestone, patch("prefect._internal.analytics.maybe_show_telemetry_notice"), ): import prefect._internal.analytics prefect._internal.analytics._telemetry_initialized = False from prefect._internal.analytics import initialize_analytics initialize_analytics() mock_milestone.assert_called_once_with("first_sdk_import") def test_initialize_analytics_only_once( self, clean_telemetry_state: Path, telemetry_enabled ): """initialize_analytics should only run once.""" with ( patch( "prefect._internal.analytics._is_interactive_terminal", return_value=True, ), patch("prefect._internal.analytics.try_mark_milestone") as mock_milestone, patch("prefect._internal.analytics.maybe_show_telemetry_notice"), ): import prefect._internal.analytics prefect._internal.analytics._telemetry_initialized = False from prefect._internal.analytics import initialize_analytics initialize_analytics() initialize_analytics() # Second call # Should only be called once mock_milestone.assert_called_once() def test_initialize_analytics_shows_notice( self, clean_telemetry_state: Path, telemetry_enabled ): """initialize_analytics should show telemetry notice.""" with ( patch( "prefect._internal.analytics._is_interactive_terminal", return_value=True, ), patch("prefect._internal.analytics.try_mark_milestone"), patch( "prefect._internal.analytics.maybe_show_telemetry_notice" ) as mock_notice, ): import prefect._internal.analytics prefect._internal.analytics._telemetry_initialized = False from prefect._internal.analytics import initialize_analytics initialize_analytics() mock_notice.assert_called_once() def test_initialize_analytics_skips_events_for_existing_user( self, clean_telemetry_state: Path, telemetry_enabled ): """initialize_analytics should not emit events for existing users.""" # Create indicator of existing user prefect_home = clean_telemetry_state.parent (prefect_home / "profiles.toml").touch() with ( patch( "prefect._internal.analytics._is_interactive_terminal", return_value=True, ), patch("prefect._internal.analytics.try_mark_milestone") as mock_milestone, patch( "prefect._internal.analytics.maybe_show_telemetry_notice" ) as mock_notice, ): import prefect._internal.analytics prefect._internal.analytics._telemetry_initialized = False from prefect._internal.analytics import initialize_analytics initialize_analytics() # Should not try to mark milestone for existing users mock_milestone.assert_not_called() # Should not show notice for existing users mock_notice.assert_not_called() def test_initialize_analytics_emits_for_new_user( self, clean_telemetry_state: Path, telemetry_enabled ): """initialize_analytics should mark sdk_imported milestone for new users.""" # No existing user indicators - this is a new user with ( patch( "prefect._internal.analytics._is_interactive_terminal", return_value=True, ), patch("prefect._internal.analytics.try_mark_milestone") as mock_milestone, patch( "prefect._internal.analytics.maybe_show_telemetry_notice" ) as mock_notice, ): import prefect._internal.analytics prefect._internal.analytics._telemetry_initialized = False from prefect._internal.analytics import initialize_analytics initialize_analytics() # Should mark sdk_imported milestone for new users mock_milestone.assert_called_once_with("first_sdk_import") # Should show notice for new users mock_notice.assert_called_once() class TestIntegrationEventEmission: """Test emit_integration_event public API.""" def test_event_namespacing(self, clean_telemetry_state: Path, telemetry_enabled): """Event name should be namespaced with 'integration:event_name'.""" mock_service = MagicMock() with patch( "prefect._internal.analytics.emit.AnalyticsService.instance", return_value=mock_service, ): emit_integration_event("prefect-aws", "s3_block_created") mock_service.enqueue.assert_called_once() event = mock_service.enqueue.call_args[0][0] assert event.event_name == "prefect-aws:s3_block_created" def test_integration_property_included( self, clean_telemetry_state: Path, telemetry_enabled ): """Integration name should be included in extra_properties.""" mock_service = MagicMock() with patch( "prefect._internal.analytics.emit.AnalyticsService.instance", return_value=mock_service, ): emit_integration_event("prefect-gcp", "bigquery_task_run") event = mock_service.enqueue.call_args[0][0] assert event.extra_properties["integration"] == "prefect-gcp" def test_extra_properties_merged( self, clean_telemetry_state: Path, telemetry_enabled ): """Extra properties should be merged with integration property.""" mock_service = MagicMock() with patch( "prefect._internal.analytics.emit.AnalyticsService.instance", return_value=mock_service, ): emit_integration_event( "prefect-aws", "s3_block_created", extra_properties={"bucket": "my-bucket", "region": "us-east-1"}, ) event = mock_service.enqueue.call_args[0][0] assert event.extra_properties["integration"] == "prefect-aws" assert event.extra_properties["bucket"] == "my-bucket" assert event.extra_properties["region"] == "us-east-1" def test_returns_true_when_event_queued( self, clean_telemetry_state: Path, telemetry_enabled ): """Should return True when event is successfully queued.""" mock_service = MagicMock() with patch( "prefect._internal.analytics.emit.AnalyticsService.instance", return_value=mock_service, ): result = emit_integration_event("prefect-aws", "s3_block_created") assert result is True def test_handles_exceptions(self, clean_telemetry_state: Path, telemetry_enabled): """Should handle exceptions without raising.""" with patch( "prefect._internal.analytics.emit.get_or_create_device_id", side_effect=Exception("Test error"), ): result = emit_integration_event("prefect-aws", "s3_block_created") assert result is False
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_internal/analytics/test_events.py", "license": "Apache License 2.0", "lines": 270, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_internal/analytics/test_milestones.py
""" Tests for milestone tracking. """ import json from pathlib import Path from unittest.mock import patch class TestMilestones: """Test milestone tracking.""" def test_milestone_not_reached_initially(self, clean_telemetry_state: Path): """Milestones should not be reached initially.""" from prefect._internal.analytics.milestones import has_reached_milestone assert has_reached_milestone("first_flow_defined") is False assert has_reached_milestone("first_flow_run") is False assert has_reached_milestone("first_deployment_created") is False assert has_reached_milestone("first_schedule_created") is False def test_mark_milestone(self, clean_telemetry_state: Path): """Should mark milestone as reached.""" from prefect._internal.analytics.milestones import ( has_reached_milestone, mark_milestone, ) assert has_reached_milestone("first_flow_defined") is False mark_milestone("first_flow_defined") assert has_reached_milestone("first_flow_defined") is True def test_milestones_persist(self, clean_telemetry_state: Path): """Milestones should persist to disk.""" from prefect._internal.analytics.milestones import mark_milestone mark_milestone("first_flow_defined") # Verify file exists milestones_file = clean_telemetry_state / "milestones.json" assert milestones_file.exists() # Verify contents milestones = json.loads(milestones_file.read_text()) assert milestones["first_flow_defined"] is True def test_try_mark_milestone_returns_true_first_time( self, clean_telemetry_state: Path, telemetry_disabled ): """try_mark_milestone should return True the first time.""" # Patch where the function is imported (in milestones.py), not where it's defined with patch( "prefect._internal.analytics.milestones._is_interactive_terminal", return_value=True, ): from prefect._internal.analytics.milestones import try_mark_milestone result = try_mark_milestone("first_flow_defined") assert result is True def test_try_mark_milestone_returns_false_second_time( self, clean_telemetry_state: Path, telemetry_disabled ): """try_mark_milestone should return False if already marked.""" with patch( "prefect._internal.analytics.milestones._is_interactive_terminal", return_value=True, ): from prefect._internal.analytics.milestones import try_mark_milestone try_mark_milestone("first_flow_defined") result = try_mark_milestone("first_flow_defined") assert result is False def test_try_mark_milestone_emits_event( self, clean_telemetry_state: Path, telemetry_enabled ): """try_mark_milestone should emit an event.""" # Patch where the functions are imported (in milestones.py) with ( patch( "prefect._internal.analytics.milestones._is_interactive_terminal", return_value=True, ), patch("prefect._internal.analytics.milestones.emit_sdk_event") as mock_emit, ): from prefect._internal.analytics.milestones import try_mark_milestone try_mark_milestone("first_flow_defined") mock_emit.assert_called_once_with("first_flow_defined") def test_multiple_milestones_independent(self, clean_telemetry_state: Path): """Different milestones should be tracked independently.""" from prefect._internal.analytics.milestones import ( has_reached_milestone, mark_milestone, ) mark_milestone("first_flow_defined") assert has_reached_milestone("first_flow_defined") is True assert has_reached_milestone("first_flow_run") is False def test_try_mark_milestone_skipped_in_non_tty( self, clean_telemetry_state: Path, telemetry_enabled ): """try_mark_milestone should skip in non-interactive terminals.""" with ( patch( "prefect._internal.analytics.milestones._is_interactive_terminal", return_value=False, ), patch("prefect._internal.analytics.milestones.emit_sdk_event") as mock_emit, ): from prefect._internal.analytics.milestones import try_mark_milestone result = try_mark_milestone("first_flow_defined") # Should return False and not emit event in non-TTY assert result is False mock_emit.assert_not_called() class TestExistingUserDetection: """Test existing user detection and milestone pre-marking.""" def test_new_user_not_detected_as_existing(self, clean_telemetry_state: Path): """A fresh PREFECT_HOME should not be detected as an existing user.""" from prefect._internal.analytics.milestones import _is_existing_user assert _is_existing_user() is False def test_existing_user_detected_by_profiles(self, clean_telemetry_state: Path): """User with profiles.toml should be detected as existing.""" from prefect._internal.analytics.milestones import _is_existing_user # Create profiles.toml in PREFECT_HOME (parent of clean_telemetry_state) prefect_home = clean_telemetry_state.parent (prefect_home / "profiles.toml").touch() assert _is_existing_user() is True def test_existing_user_detected_by_database(self, clean_telemetry_state: Path): """User with .prefect.db should be detected as existing.""" from prefect._internal.analytics.milestones import _is_existing_user prefect_home = clean_telemetry_state.parent (prefect_home / ".prefect.db").touch() assert _is_existing_user() is True def test_existing_user_detected_by_storage(self, clean_telemetry_state: Path): """User with storage directory should be detected as existing.""" from prefect._internal.analytics.milestones import _is_existing_user prefect_home = clean_telemetry_state.parent (prefect_home / "storage").mkdir() assert _is_existing_user() is True def test_milestones_pre_marked_for_existing_user(self, clean_telemetry_state: Path): """All milestones should be pre-marked for existing users.""" from prefect._internal.analytics.milestones import ( ALL_MILESTONES, _mark_existing_user_milestones, has_reached_milestone, ) # Create indicator of existing user prefect_home = clean_telemetry_state.parent (prefect_home / "profiles.toml").touch() # Pre-mark milestones result = _mark_existing_user_milestones() assert result is True for milestone in ALL_MILESTONES: assert has_reached_milestone(milestone) is True def test_milestones_not_pre_marked_for_new_user(self, clean_telemetry_state: Path): """Milestones should not be pre-marked for new users.""" from prefect._internal.analytics.milestones import ( _mark_existing_user_milestones, has_reached_milestone, ) # No existing user indicators result = _mark_existing_user_milestones() assert result is False assert has_reached_milestone("first_flow_defined") is False def test_milestones_only_pre_marked_once(self, clean_telemetry_state: Path): """Pre-marking should only happen once (when telemetry dir doesn't exist).""" from prefect._internal.analytics.milestones import ( _mark_existing_user_milestones, ) # Create indicator of existing user prefect_home = clean_telemetry_state.parent (prefect_home / "profiles.toml").touch() # First call should pre-mark result1 = _mark_existing_user_milestones() assert result1 is True # Second call should skip (telemetry dir exists now) result2 = _mark_existing_user_milestones() assert result2 is False
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_internal/analytics/test_milestones.py", "license": "Apache License 2.0", "lines": 162, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_internal/analytics/test_notice.py
""" Tests for telemetry notice display. """ from pathlib import Path from unittest.mock import patch class TestTelemetryNotice: """Test telemetry notice display.""" def test_notice_shown_in_tty(self, clean_telemetry_state: Path, capsys): """Notice should be shown in TTY.""" with patch("sys.stdout.isatty", return_value=True): from prefect._internal.analytics.notice import maybe_show_telemetry_notice maybe_show_telemetry_notice() captured = capsys.readouterr() assert "Prefect collects anonymous usage data" in captured.err def test_notice_not_shown_in_non_tty(self, clean_telemetry_state: Path, capsys): """Notice should not be shown in non-TTY.""" with patch("sys.stdout.isatty", return_value=False): from prefect._internal.analytics.notice import maybe_show_telemetry_notice maybe_show_telemetry_notice() captured = capsys.readouterr() assert captured.err == "" def test_notice_shown_only_once(self, clean_telemetry_state: Path, capsys): """Notice should only be shown once.""" with patch("sys.stdout.isatty", return_value=True): from prefect._internal.analytics.notice import maybe_show_telemetry_notice maybe_show_telemetry_notice() capsys.readouterr() # Clear first output maybe_show_telemetry_notice() # Second call captured = capsys.readouterr() assert captured.err == "" def test_notice_marker_created(self, clean_telemetry_state: Path): """Notice marker file should be created.""" with patch("sys.stdout.isatty", return_value=True): from prefect._internal.analytics.notice import maybe_show_telemetry_notice maybe_show_telemetry_notice() marker_file = clean_telemetry_state / "notice_shown" assert marker_file.exists() def test_notice_contains_opt_out_info(self, clean_telemetry_state: Path, capsys): """Notice should contain opt-out information.""" with patch("sys.stdout.isatty", return_value=True): from prefect._internal.analytics.notice import maybe_show_telemetry_notice maybe_show_telemetry_notice() captured = capsys.readouterr() assert "PREFECT_SERVER_ANALYTICS_ENABLED=false" in captured.err assert "DO_NOT_TRACK=1" in captured.err def test_notice_contains_docs_link(self, clean_telemetry_state: Path, capsys): """Notice should contain link to documentation.""" with patch("sys.stdout.isatty", return_value=True): from prefect._internal.analytics.notice import maybe_show_telemetry_notice maybe_show_telemetry_notice() captured = capsys.readouterr() assert "https://docs.prefect.io/concepts/telemetry" in captured.err
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_internal/analytics/test_notice.py", "license": "Apache License 2.0", "lines": 52, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_internal/analytics/test_service.py
""" Tests for the background analytics service. """ import time from pathlib import Path from unittest.mock import patch import pytest from prefect._internal.analytics.service import AnalyticsEvent, AnalyticsService class TestAnalyticsService: """Test AnalyticsService behavior.""" def test_singleton_instance(self, clean_telemetry_state: Path): """Service should be a singleton.""" service1 = AnalyticsService.instance() service2 = AnalyticsService.instance() assert service1 is service2 def test_reset_clears_instance(self, clean_telemetry_state: Path): """Reset should clear the singleton instance.""" service1 = AnalyticsService.instance() AnalyticsService.reset() service2 = AnalyticsService.instance() assert service1 is not service2 def test_enqueue_returns_immediately(self, clean_telemetry_state: Path): """enqueue() should return immediately (non-blocking).""" # Mock server check to be slow with patch.object( AnalyticsService, "_check_server_analytics", side_effect=lambda self: time.sleep(10), ): service = AnalyticsService.instance() start = time.monotonic() event = AnalyticsEvent( event_name="test_event", device_id="test-device", ) service.enqueue(event) elapsed = time.monotonic() - start # Should return almost immediately (< 100ms) assert elapsed < 0.1 # Cleanup service.shutdown(timeout=0.1) def test_quick_enabled_check_do_not_track( self, clean_telemetry_state: Path, monkeypatch: pytest.MonkeyPatch ): """Quick check should return False when DO_NOT_TRACK is set.""" monkeypatch.setenv("DO_NOT_TRACK", "1") service = AnalyticsService.instance() assert service._quick_enabled_check() is False def test_quick_enabled_check_ci_environment( self, clean_telemetry_state: Path, monkeypatch: pytest.MonkeyPatch ): """Quick check should return False in CI environment.""" monkeypatch.setenv("CI", "true") service = AnalyticsService.instance() assert service._quick_enabled_check() is False def test_quick_enabled_check_enabled( self, clean_telemetry_state: Path, monkeypatch: pytest.MonkeyPatch ): """Quick check should return True when no local disables are set.""" monkeypatch.delenv("DO_NOT_TRACK", raising=False) monkeypatch.delenv("CI", raising=False) # Clear all CI environment variables from prefect._internal.analytics.ci_detection import CI_ENV_VARS for var in CI_ENV_VARS: monkeypatch.delenv(var, raising=False) service = AnalyticsService.instance() assert service._quick_enabled_check() is True def test_events_discarded_when_analytics_disabled( self, clean_telemetry_state: Path, monkeypatch: pytest.MonkeyPatch ): """Events should be discarded when server analytics is disabled.""" monkeypatch.delenv("DO_NOT_TRACK", raising=False) monkeypatch.delenv("CI", raising=False) from prefect._internal.analytics.ci_detection import CI_ENV_VARS for var in CI_ENV_VARS: monkeypatch.delenv(var, raising=False) with patch( "prefect._internal.analytics.service.AnalyticsService._check_server_analytics", return_value=False, ): with patch("prefect._internal.analytics.service.track_event") as mock_track: service = AnalyticsService.instance() event = AnalyticsEvent( event_name="test_event", device_id="test-device", ) service.enqueue(event) # Wait for analytics check to complete result = service.wait_for_analytics_check(timeout=2.0) assert result is False # Give the service time to drain the queue time.sleep(0.1) # track_event should not have been called mock_track.assert_not_called() service.shutdown(timeout=0.5) def test_events_forwarded_when_analytics_enabled( self, clean_telemetry_state: Path, monkeypatch: pytest.MonkeyPatch ): """Events should be forwarded to track_event when analytics is enabled.""" monkeypatch.delenv("DO_NOT_TRACK", raising=False) monkeypatch.delenv("CI", raising=False) from prefect._internal.analytics.ci_detection import CI_ENV_VARS for var in CI_ENV_VARS: monkeypatch.delenv(var, raising=False) with patch( "prefect._internal.analytics.service.AnalyticsService._check_server_analytics", return_value=True, ): with patch("prefect._internal.analytics.service.track_event") as mock_track: service = AnalyticsService.instance() event = AnalyticsEvent( event_name="test_event", device_id="test-device", extra_properties={"key": "value"}, ) service.enqueue(event) # Wait for analytics check to complete service.wait_for_analytics_check(timeout=2.0) # Give the service time to process the event time.sleep(0.2) mock_track.assert_called_once_with( event_name="test_event", device_id="test-device", extra_properties={"key": "value"}, ) service.shutdown(timeout=0.5) def test_server_check_reads_local_setting_when_no_api_url( self, clean_telemetry_state: Path ): """When no API URL is configured, should read the local analytics setting.""" service = AnalyticsService() mock_settings = type( "Settings", (), { "api": type("API", (), {"url": None})(), "server": type("Server", (), {"analytics_enabled": True})(), }, )() with patch( "prefect.settings.context.get_current_settings", return_value=mock_settings, ): result = service._check_server_analytics() assert result is True def test_server_check_reads_local_setting_disabled_when_no_api_url( self, clean_telemetry_state: Path ): """When no API URL is configured and analytics is disabled locally, should return False.""" service = AnalyticsService() mock_settings = type( "Settings", (), { "api": type("API", (), {"url": None})(), "server": type("Server", (), {"analytics_enabled": False})(), }, )() with patch( "prefect.settings.context.get_current_settings", return_value=mock_settings, ): result = service._check_server_analytics() assert result is False def test_server_check_returns_true_when_analytics_enabled( self, clean_telemetry_state: Path ): """Server analytics check should return True when server has analytics enabled.""" service = AnalyticsService() mock_settings = type( "Settings", (), { "api": type("API", (), {"url": "http://localhost:4200/api"})(), }, )() mock_response = type( "Response", (), { "raise_for_status": lambda self: None, "json": lambda self: {"server": {"analytics_enabled": True}}, }, )() mock_client = type( "Client", (), { "request": lambda self, method, path: mock_response, "__enter__": lambda self: self, "__exit__": lambda self, *args: None, }, )() with ( patch( "prefect.settings.context.get_current_settings", return_value=mock_settings, ), patch( "prefect.client.orchestration.get_client", return_value=mock_client, ), ): result = service._check_server_analytics() assert result is True def test_server_check_returns_false_on_error(self, clean_telemetry_state: Path): """Server analytics check should return False on error.""" service = AnalyticsService() mock_settings = type( "Settings", (), { "api": type("API", (), {"url": "http://localhost:4200/api"})(), }, )() with ( patch( "prefect.settings.context.get_current_settings", return_value=mock_settings, ), patch( "prefect.client.orchestration.get_client", side_effect=Exception("Connection error"), ), ): result = service._check_server_analytics() assert result is False def test_server_check_returns_false_when_analytics_disabled( self, clean_telemetry_state: Path ): """Server analytics check should return False when server has analytics disabled.""" service = AnalyticsService() mock_settings = type( "Settings", (), { "api": type("API", (), {"url": "http://localhost:4200/api"})(), }, )() mock_response = type( "Response", (), { "raise_for_status": lambda self: None, "json": lambda self: {"server": {"analytics_enabled": False}}, }, )() mock_client = type( "Client", (), { "request": lambda self, method, path: mock_response, "__enter__": lambda self: self, "__exit__": lambda self, *args: None, }, )() with ( patch( "prefect.settings.context.get_current_settings", return_value=mock_settings, ), patch( "prefect.client.orchestration.get_client", return_value=mock_client, ), ): result = service._check_server_analytics() assert result is False def test_shutdown_flushes_pending_events( self, clean_telemetry_state: Path, monkeypatch: pytest.MonkeyPatch ): """Shutdown should process pending events.""" monkeypatch.delenv("DO_NOT_TRACK", raising=False) monkeypatch.delenv("CI", raising=False) from prefect._internal.analytics.ci_detection import CI_ENV_VARS for var in CI_ENV_VARS: monkeypatch.delenv(var, raising=False) processed_events = [] def capture_track(**kwargs): processed_events.append(kwargs) with patch( "prefect._internal.analytics.service.AnalyticsService._check_server_analytics", return_value=True, ): with patch( "prefect._internal.analytics.service.track_event", side_effect=capture_track, ): service = AnalyticsService.instance() # Queue multiple events for i in range(3): event = AnalyticsEvent( event_name=f"test_event_{i}", device_id="test-device", ) service.enqueue(event) # Wait for processing service.wait_for_analytics_check(timeout=2.0) time.sleep(0.3) # Shutdown should complete service.shutdown(timeout=2.0) # All events should have been processed assert len(processed_events) == 3 def test_thread_starts_lazily(self, clean_telemetry_state: Path): """Background thread should only start when first event is queued.""" service = AnalyticsService.instance() # Thread should not be started yet assert service._thread is None assert service._started is False def test_wait_for_analytics_check_timeout(self, clean_telemetry_state: Path): """wait_for_analytics_check should return None on timeout.""" service = AnalyticsService() # Don't start the thread result = service.wait_for_analytics_check(timeout=0.01) assert result is None class TestAnalyticsEvent: """Test AnalyticsEvent dataclass.""" def test_event_creation(self): """Event should store all fields.""" event = AnalyticsEvent( event_name="test_event", device_id="device-123", extra_properties={"key": "value"}, ) assert event.event_name == "test_event" assert event.device_id == "device-123" assert event.extra_properties == {"key": "value"} def test_event_optional_properties(self): """extra_properties should be optional.""" event = AnalyticsEvent( event_name="test_event", device_id="device-123", ) assert event.extra_properties is None class TestForkSafety: """Test fork safety behavior.""" def test_reset_registered_for_fork(self): """_reset_after_fork should be registered as a fork handler.""" import os if hasattr(os, "register_at_fork"): # We can't easily test that it's registered, but we can verify # the function exists and can be called from prefect._internal.analytics.service import _reset_after_fork # Should not raise _reset_after_fork()
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_internal/analytics/test_service.py", "license": "Apache License 2.0", "lines": 345, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/integrations/prefect-kubernetes/tests/test_async_dispatch.py
"""Tests for async_dispatch migration in prefect-kubernetes. These tests verify the critical behavior from issue #15008 where @sync_compatible would incorrectly return coroutines in sync context. """ from typing import Coroutine import pytest from prefect_kubernetes.jobs import KubernetesJob, KubernetesJobRun from prefect import flow @pytest.fixture def mock_kubernetes_job( kubernetes_credentials, mock_create_namespaced_job, mock_read_namespaced_job, mock_list_namespaced_pod, read_pod_logs, mock_delete_namespaced_job, ): """Create a KubernetesJob with mocked kubernetes API calls.""" return KubernetesJob( credentials=kubernetes_credentials, v1_job={ "apiVersion": "batch/v1", "kind": "Job", "metadata": {"name": "test-job"}, "spec": { "template": { "spec": { "containers": [{"name": "test", "image": "test:latest"}], "restartPolicy": "Never", } } }, }, ) class TestKubernetesJobTriggerAsyncDispatch: """Tests for KubernetesJob.trigger migrated from @sync_compatible to @async_dispatch.""" def test_trigger_sync_context_returns_job_run_not_coroutine( self, mock_kubernetes_job ): """trigger must return KubernetesJobRun (not coroutine) in sync context. This is a critical regression test for issues #14712 and #14625. """ @flow def test_flow(): result = mock_kubernetes_job.trigger() # The result inside the flow should be the actual value, not a coroutine assert not isinstance(result, Coroutine), "sync context returned coroutine" return result job_run = test_flow() assert isinstance(job_run, KubernetesJobRun) async def test_trigger_async_context_works(self, mock_kubernetes_job): """trigger should work correctly in async context.""" @flow async def test_flow(): # In async context, @async_dispatch dispatches to async version result = await mock_kubernetes_job.atrigger() return result job_run = await test_flow() assert isinstance(job_run, KubernetesJobRun) def test_atrigger_is_available(self, mock_kubernetes_job): """atrigger should be available for direct async usage.""" assert hasattr(mock_kubernetes_job, "atrigger") assert callable(mock_kubernetes_job.atrigger) class TestKubernetesJobRunWaitForCompletionAsyncDispatch: """Tests for KubernetesJobRun.wait_for_completion async_dispatch migration.""" def test_wait_for_completion_sync_context_returns_none_not_coroutine( self, mock_kubernetes_job ): """wait_for_completion must not return coroutine in sync context. This is a critical regression test for issues #14712 and #14625. """ @flow def test_flow(): job_run = mock_kubernetes_job.trigger() result = job_run.wait_for_completion() # The result should not be a coroutine assert not isinstance(result, Coroutine), "sync context returned coroutine" return job_run job_run = test_flow() assert isinstance(job_run, KubernetesJobRun) async def test_wait_for_completion_async_context_works(self, mock_kubernetes_job): """wait_for_completion should work correctly in async context.""" @flow async def test_flow(): job_run = await mock_kubernetes_job.atrigger() # In async context, @async_dispatch dispatches to async version await job_run.await_for_completion() return job_run job_run = await test_flow() assert isinstance(job_run, KubernetesJobRun) def test_await_for_completion_is_available(self, mock_kubernetes_job): """await_for_completion should be available for direct async usage.""" @flow def test_flow(): job_run = mock_kubernetes_job.trigger() assert hasattr(job_run, "await_for_completion") assert callable(job_run.await_for_completion) job_run.wait_for_completion() test_flow() class TestKubernetesJobRunFetchResultAsyncDispatch: """Tests for KubernetesJobRun.fetch_result async_dispatch migration.""" def test_fetch_result_sync_context_returns_value_not_coroutine( self, mock_kubernetes_job ): """fetch_result must return dict (not coroutine) in sync context. This is a critical regression test for issues #14712 and #14625. """ @flow def test_flow(): job_run = mock_kubernetes_job.trigger() job_run.wait_for_completion() result = job_run.fetch_result() # The result should not be a coroutine assert not isinstance(result, Coroutine), "sync context returned coroutine" return result result = test_flow() assert isinstance(result, dict) async def test_fetch_result_async_context_works(self, mock_kubernetes_job): """fetch_result should work correctly in async context.""" @flow async def test_flow(): job_run = await mock_kubernetes_job.atrigger() await job_run.await_for_completion() # In async context, @async_dispatch dispatches to async version result = await job_run.afetch_result() return result result = await test_flow() assert isinstance(result, dict) def test_afetch_result_is_available(self, mock_kubernetes_job): """afetch_result should be available for direct async usage.""" @flow def test_flow(): job_run = mock_kubernetes_job.trigger() job_run.wait_for_completion() assert hasattr(job_run, "afetch_result") assert callable(job_run.afetch_result) test_flow()
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/integrations/prefect-kubernetes/tests/test_async_dispatch.py", "license": "Apache License 2.0", "lines": 137, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/integrations/prefect-shell/tests/test_async_dispatch.py
"""Tests for async_dispatch migration in prefect-shell. These tests verify the critical behavior from issue #15008 where @sync_compatible would incorrectly return coroutines in sync context. """ import sys from typing import Coroutine import pytest from prefect_shell.commands import ShellOperation, ShellProcess from prefect import flow if sys.platform == "win32": pytest.skip(reason="see test_commands_windows.py", allow_module_level=True) class TestShellOperationTriggerAsyncDispatch: """Tests for ShellOperation.trigger migrated from @sync_compatible to @async_dispatch.""" def test_trigger_sync_context_returns_process_not_coroutine(self): """trigger must return ShellProcess (not coroutine) in sync context. This is a critical regression test for issues #14712 and #14625. """ op = ShellOperation(commands=["echo 'test'"]) @flow def test_flow(): result = op.trigger() # The result inside the flow should be the actual value, not a coroutine assert not isinstance(result, Coroutine), "sync context returned coroutine" return result with op: process = test_flow() assert isinstance(process, ShellProcess) process.wait_for_completion() async def test_trigger_async_context_works(self): """trigger should work correctly in async context.""" @flow async def test_flow(): op = ShellOperation(commands=["echo 'test'"]) async with op: # In async context, @async_dispatch dispatches to async version result = await op.atrigger() await result.await_for_completion() return result process = await test_flow() assert isinstance(process, ShellProcess) def test_atrigger_is_available(self): """atrigger should be available for direct async usage.""" op = ShellOperation(commands=["echo 'test'"]) assert hasattr(op, "atrigger") assert callable(op.atrigger) class TestShellOperationRunAsyncDispatch: """Tests for ShellOperation.run migrated from @sync_compatible to @async_dispatch.""" def test_run_sync_context_returns_value_not_coroutine(self): """run must return list (not coroutine) in sync context. This is a critical regression test for issues #14712 and #14625. """ op = ShellOperation(commands=["echo 'test output'"]) @flow def test_flow(): result = op.run() # The result should not be a coroutine assert not isinstance(result, Coroutine), "sync context returned coroutine" return result result = test_flow() assert isinstance(result, list) assert "test output" in result async def test_run_async_context_works(self): """run should work correctly in async context.""" @flow async def test_flow(): op = ShellOperation(commands=["echo 'async test'"]) # In async context, @async_dispatch dispatches to async version result = await op.arun() return result result = await test_flow() assert isinstance(result, list) assert "async test" in result def test_arun_is_available(self): """arun should be available for direct async usage.""" op = ShellOperation(commands=["echo 'test'"]) assert hasattr(op, "arun") assert callable(op.arun) class TestShellProcessWaitForCompletionAsyncDispatch: """Tests for ShellProcess.wait_for_completion async_dispatch migration.""" def test_wait_for_completion_sync_context_returns_none_not_coroutine(self): """wait_for_completion must not return coroutine in sync context. This is a critical regression test for issues #14712 and #14625. """ op = ShellOperation(commands=["echo 'test'"]) @flow def test_flow(): with op: process = op.trigger() result = process.wait_for_completion() # The result should not be a coroutine assert not isinstance(result, Coroutine), ( "sync context returned coroutine" ) return process process = test_flow() assert isinstance(process, ShellProcess) def test_await_for_completion_is_available(self): """await_for_completion should be available for direct async usage.""" op = ShellOperation(commands=["echo 'test'"]) @flow def test_flow(): with op: process = op.trigger() assert hasattr(process, "await_for_completion") assert callable(process.await_for_completion) process.wait_for_completion() test_flow() class TestShellProcessFetchResultAsyncDispatch: """Tests for ShellProcess.fetch_result async_dispatch migration.""" def test_fetch_result_sync_context_returns_value_not_coroutine(self): """fetch_result must return list (not coroutine) in sync context. This is a critical regression test for issues #14712 and #14625. """ op = ShellOperation(commands=["echo 'result test'"]) @flow def test_flow(): with op: process = op.trigger() process.wait_for_completion() result = process.fetch_result() # The result should not be a coroutine assert not isinstance(result, Coroutine), ( "sync context returned coroutine" ) return result result = test_flow() assert isinstance(result, list) assert "result test" in result def test_afetch_result_is_available(self): """afetch_result should be available for direct async usage.""" op = ShellOperation(commands=["echo 'test'"]) @flow def test_flow(): with op: process = op.trigger() process.wait_for_completion() assert hasattr(process, "afetch_result") assert callable(process.afetch_result) test_flow() class TestShellOperationCloseAsyncDispatch: """Tests for ShellOperation.close async_dispatch migration.""" def test_close_sync_context_returns_none_not_coroutine(self): """close must not return coroutine in sync context. This is a critical regression test for issues #14712 and #14625. """ op = ShellOperation(commands=["echo 'test'"]) @flow def test_flow(): result = op.close() # The result should not be a coroutine assert not isinstance(result, Coroutine), "sync context returned coroutine" return result test_flow() def test_aclose_is_available(self): """aclose should be available for direct async usage.""" op = ShellOperation(commands=["echo 'test'"]) assert hasattr(op, "aclose") assert callable(op.aclose)
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/integrations/prefect-shell/tests/test_async_dispatch.py", "license": "Apache License 2.0", "lines": 158, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/prefect/_internal/compatibility/backports.py
"""Functionality we want to use from later Python versions.""" try: import tomllib # 3.11+ except ImportError: import toml as tomllib # fallback on Python <3.11 tomllib.TOMLDecodeError = tomllib.TomlDecodeError __all__ = ["tomllib"]
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_internal/compatibility/backports.py", "license": "Apache License 2.0", "lines": 7, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
PrefectHQ/prefect:tests/_internal/compatibility/test_backports.py
import sys import pytest from prefect._internal.compatibility.backports import tomllib min_python_version = (3, 11) @pytest.fixture(scope="session") def toml1_0_content() -> str: return """\ [dependency-groups] dev = [ "dummy-dependency~=1.2.0", {include-group = "dummy-group-name"}, ] """ @pytest.mark.skipif( sys.version_info < min_python_version, reason=f"Test requires Python version {min_python_version[0]}.{min_python_version[1]} or higher", ) def test_can_parse_toml1_0_on_python311_plus(toml1_0_content): assert tomllib.loads(toml1_0_content) @pytest.mark.skipif( sys.version_info >= min_python_version, reason=f"Test requires Python version less than {min_python_version[0]}.{min_python_version[1]}", ) def test_error_parsing_toml1_0_before_python311(toml1_0_content): with pytest.raises(IndexError): tomllib.loads(toml1_0_content)
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_internal/compatibility/test_backports.py", "license": "Apache License 2.0", "lines": 26, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/prefect/_internal/urls.py
from urllib.parse import urlparse, urlunparse def strip_auth_from_url(url: str) -> str: """ Remove authentication credentials (username/password) from a URL. Useful for sanitizing URLs before including them in error messages or logs to avoid leaking secrets. """ parsed = urlparse(url) if not parsed.hostname: return url netloc = parsed.hostname if parsed.port: netloc = f"{netloc}:{parsed.port}" return urlunparse( ( parsed.scheme, netloc, parsed.path, parsed.params, parsed.query, parsed.fragment, ) )
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_internal/urls.py", "license": "Apache License 2.0", "lines": 23, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
PrefectHQ/prefect:tests/_internal/test_urls.py
import pytest from prefect._internal.urls import strip_auth_from_url @pytest.mark.parametrize( "url,expected", [ # URL with username and password ( "https://user:pass@github.com/org/repo.git", "https://github.com/org/repo.git", ), # URL with only token (as username) ( "https://token@github.com/org/repo.git", "https://github.com/org/repo.git", ), # URL without auth - unchanged ( "https://github.com/org/repo.git", "https://github.com/org/repo.git", ), # URL with port ( "https://user:pass@gitlab.example.com:8443/org/repo.git", "https://gitlab.example.com:8443/org/repo.git", ), # HTTP URL ( "http://token@example.com/repo", "http://example.com/repo", ), # URL with query string ( "https://user:pass@example.com/path?query=1", "https://example.com/path?query=1", ), # URL with fragment ( "https://user:pass@example.com/path#section", "https://example.com/path#section", ), # Empty/malformed URL - returned as-is ("", ""), ("not-a-url", "not-a-url"), ], ) def test_strip_auth_from_url(url: str, expected: str): assert strip_auth_from_url(url) == expected
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_internal/test_urls.py", "license": "Apache License 2.0", "lines": 47, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/testing/test_fixtures.py
""" Tests for prefect.testing.fixtures module. """ import subprocess from contextlib import asynccontextmanager from unittest import mock import pytest from prefect.testing import fixtures class TestHostedApiServerWindowsProcessHandling: """Tests for Windows-specific process handling in hosted_api_server fixture.""" @pytest.mark.unix async def test_unix_hosted_api_server_does_not_set_creation_flag( self, monkeypatch: pytest.MonkeyPatch ): """ On Unix-like systems, the hosted_api_server fixture should NOT set creationflags since it's a Windows-only parameter. """ captured_kwargs: dict = {} mock_process = mock.MagicMock() mock_process.returncode = 0 mock_process.pid = 12345 mock_process.terminate = mock.MagicMock() @asynccontextmanager async def mock_open_process(*args, **kwargs): captured_kwargs.update(kwargs) yield mock_process monkeypatch.setattr(fixtures, "open_process", mock_open_process) # Mock the HTTP client to simulate server ready mock_response = mock.MagicMock() mock_response.status_code = 200 mock_response.raise_for_status = mock.MagicMock() mock_client = mock.MagicMock() mock_client.get = mock.AsyncMock(return_value=mock_response) mock_client.__aenter__ = mock.AsyncMock(return_value=mock_client) mock_client.__aexit__ = mock.AsyncMock(return_value=None) monkeypatch.setattr("httpx.AsyncClient", lambda: mock_client) # Call the underlying generator function directly (not as a fixture) gen = fixtures.hosted_api_server.__wrapped__( unused_tcp_port_factory=lambda: 8000, test_database_connection_url=None, ) await gen.__anext__() # Start the fixture # Verify open_process was called without creationflags assert "creationflags" not in captured_kwargs @pytest.mark.windows async def test_windows_hosted_api_server_sets_process_group_creation_flag( self, monkeypatch: pytest.MonkeyPatch ): """ On Windows, the hosted_api_server fixture should create the uvicorn process with CREATE_NEW_PROCESS_GROUP flag so that the entire process tree can be terminated properly during teardown. """ captured_kwargs: dict = {} mock_process = mock.MagicMock() mock_process.returncode = 0 mock_process.pid = 12345 @asynccontextmanager async def mock_open_process(*args, **kwargs): captured_kwargs.update(kwargs) yield mock_process monkeypatch.setattr(fixtures, "open_process", mock_open_process) # Mock the HTTP client to simulate server ready mock_response = mock.MagicMock() mock_response.status_code = 200 mock_response.raise_for_status = mock.MagicMock() mock_client = mock.MagicMock() mock_client.get = mock.AsyncMock(return_value=mock_response) mock_client.__aenter__ = mock.AsyncMock(return_value=mock_client) mock_client.__aexit__ = mock.AsyncMock(return_value=None) monkeypatch.setattr("httpx.AsyncClient", lambda: mock_client) # Call the underlying generator function directly (not as a fixture) gen = fixtures.hosted_api_server.__wrapped__( unused_tcp_port_factory=lambda: 8000, test_database_connection_url=None, ) await gen.__anext__() # Start the fixture # Verify open_process was called with creationflags assert "creationflags" in captured_kwargs assert captured_kwargs["creationflags"] == subprocess.CREATE_NEW_PROCESS_GROUP @pytest.mark.unix async def test_unix_hosted_api_server_uses_terminate_for_shutdown( self, monkeypatch: pytest.MonkeyPatch ): """ On Unix-like systems, the hosted_api_server fixture should use process.terminate() for graceful shutdown. """ mock_process = mock.MagicMock() mock_process.returncode = None # Process still running initially mock_process.pid = 12345 mock_process.terminate = mock.MagicMock() mock_process.kill = mock.MagicMock() # Make terminate set returncode to simulate process exit def set_returncode(): mock_process.returncode = 0 mock_process.terminate.side_effect = set_returncode @asynccontextmanager async def mock_open_process(*args, **kwargs): yield mock_process monkeypatch.setattr(fixtures, "open_process", mock_open_process) # Mock the HTTP client to simulate server ready mock_response = mock.MagicMock() mock_response.status_code = 200 mock_response.raise_for_status = mock.MagicMock() mock_client = mock.MagicMock() mock_client.get = mock.AsyncMock(return_value=mock_response) mock_client.__aenter__ = mock.AsyncMock(return_value=mock_client) mock_client.__aexit__ = mock.AsyncMock(return_value=None) monkeypatch.setattr("httpx.AsyncClient", lambda: mock_client) # Call the underlying generator function directly (not as a fixture) gen = fixtures.hosted_api_server.__wrapped__( unused_tcp_port_factory=lambda: 8000, test_database_connection_url=None, ) await gen.__anext__() # Start the fixture try: await gen.__anext__() # Trigger teardown except StopAsyncIteration: pass # Verify process.terminate() was called mock_process.terminate.assert_called_once()
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/testing/test_fixtures.py", "license": "Apache License 2.0", "lines": 123, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/client/schemas/test_deployments.py
from __future__ import annotations from prefect.client.schemas.objects import DeploymentStatus from prefect.client.schemas.responses import DeploymentResponse def test_deployment_response_accepts_disabled_status(): """DeploymentResponse accepts DISABLED status for Cloud compatibility.""" response = DeploymentResponse.model_validate( { "id": "00000000-0000-0000-0000-000000000000", "name": "test", "flow_id": "00000000-0000-0000-0000-000000000001", "status": "DISABLED", } ) assert response.status == DeploymentStatus.DISABLED
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/client/schemas/test_deployments.py", "license": "Apache License 2.0", "lines": 14, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/prefect/cli/sdk.py
""" SDK command — native cyclopts implementation. Generate typed Python SDK from workspace deployments. """ from pathlib import Path from typing import Annotated, Optional import cyclopts import prefect.cli._app as _cli from prefect.cli._utilities import ( exit_with_error, exit_with_success, with_cli_exception_handling, ) from prefect.client.orchestration import get_client sdk_app: cyclopts.App = cyclopts.App( name="sdk", help="Manage Prefect SDKs. (beta)", version_flags=[], help_flags=["--help"], ) def _is_valid_identifier(name: str) -> bool: """Check if a string is a valid Python identifier.""" return name.isidentifier() @sdk_app.command(name="generate") @with_cli_exception_handling async def generate( *, output: Annotated[ Path, cyclopts.Parameter( "--output", alias="-o", help="Output file path for the generated SDK.", ), ], flow: Annotated[ Optional[list[str]], cyclopts.Parameter( "--flow", alias="-f", help="Filter to specific flow(s). Can be specified multiple times.", ), ] = None, deployment: Annotated[ Optional[list[str]], cyclopts.Parameter( "--deployment", alias="-d", help=( "Filter to specific deployment(s). Can be specified multiple times. " "Use 'flow-name/deployment-name' format for exact matching." ), ), ] = None, ) -> None: """(beta) Generate a typed Python SDK from workspace deployments.""" from prefect._sdk.generator import ( APIConnectionError, AuthenticationError, GenerationResult, NoDeploymentsError, SDKGeneratorError, generate_sdk, ) _cli.console.print( "[yellow]Note:[/yellow] This command is in beta. " "APIs may change in future releases." ) _cli.console.print() # Pre-validate output path if output.exists() and output.is_dir(): exit_with_error( f"Output path '{output}' is a directory. Please provide a file path." ) _cli.console.print("Fetching deployments...") try: async with get_client() as client: result: GenerationResult = await generate_sdk( client=client, output_path=output, flow_names=flow, deployment_names=deployment, ) except AuthenticationError as e: exit_with_error( f"Not authenticated. Run `prefect cloud login` or configure PREFECT_API_URL.\n" f"Details: {e}" ) except APIConnectionError as e: exit_with_error(f"Could not connect to Prefect API.\nDetails: {e}") except NoDeploymentsError as e: exit_with_error( f"No deployments found.\n\n" f"Details: {e}\n\n" f"Make sure you have deployed at least one flow:\n" f" prefect deploy" ) except SDKGeneratorError as e: exit_with_error(f"SDK generation failed.\nDetails: {e}") # Display warnings for warning in result.warnings: _cli.console.print(f"[yellow]Warning:[/yellow] {warning}") # Display errors (non-fatal) for error in result.errors: _cli.console.print(f"[red]Error:[/red] {error}") # Display success message with statistics _cli.console.print() _cli.console.print("[green]SDK generated successfully![/green]") _cli.console.print() _cli.console.print(f" Flows: {result.flow_count}") _cli.console.print(f" Deployments: {result.deployment_count}") _cli.console.print(f" Work pools: {result.work_pool_count}") _cli.console.print() _cli.console.print(f" Output: {result.output_path.absolute()}") _cli.console.print() # Display usage hint module_name = result.output_path.stem _cli.console.print("Usage:") if _is_valid_identifier(module_name): _cli.console.print(f" from {module_name} import deployments") else: suggested_name = module_name.replace("-", "_") _cli.console.print( f" # Note: '{module_name}' is not a valid Python module name." ) _cli.console.print( f" # Rename the file to '{suggested_name}.py' to enable imports:" ) _cli.console.print(f" # from {suggested_name} import deployments") exit_with_success("")
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/cli/sdk.py", "license": "Apache License 2.0", "lines": 130, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:tests/cli/test_sdk.py
"""Tests for the SDK CLI command.""" from __future__ import annotations import ast from pathlib import Path from typing import Any from unittest.mock import AsyncMock, MagicMock, patch from uuid import uuid4 # Eagerly import modules that are mock.patch targets. Without this, the # autouse reset_sys_modules fixture can delete these lazily-imported modules # from sys.modules between tests. mock.patch then resolves its target to a # stale module object (reachable through package attributes) while the CLI # command re-imports a fresh module — so the mock never takes effect. import prefect._sdk.generator # noqa: F401 import prefect.cli.sdk # noqa: F401 from prefect.testing.cli import invoke_and_assert _GET_CLIENT_PATCH_TARGET = "prefect.cli.sdk.get_client" def make_deployment_response( name: str, flow_id: Any = None, work_pool_name: str | None = None, parameter_schema: dict[str, Any] | None = None, description: str | None = None, ) -> MagicMock: """Create a mock DeploymentResponse.""" dep = MagicMock() dep.name = name dep.flow_id = flow_id or uuid4() dep.work_pool_name = work_pool_name dep.parameter_openapi_schema = parameter_schema dep.description = description return dep def make_flow_response(name: str, flow_id: Any = None) -> MagicMock: """Create a mock Flow response.""" flow = MagicMock() flow.name = name flow.id = flow_id or uuid4() return flow def make_work_pool_response( name: str, pool_type: str = "kubernetes", base_job_template: dict[str, Any] | None = None, ) -> MagicMock: """Create a mock WorkPool response.""" wp = MagicMock() wp.name = name wp.type = pool_type wp.base_job_template = base_job_template or {} return wp class TestSDKGenerate: """Tests for the prefect sdk generate command.""" def test_sdk_generate_help(self) -> None: """Shows help text with beta indicator.""" invoke_and_assert( ["sdk", "generate", "--help"], expected_output_contains=[ "beta", "Generate a typed Python SDK", "--output", "--flow", "--deployment", ], expected_code=0, ) def test_sdk_generate_requires_output(self) -> None: """Requires --output option.""" invoke_and_assert( ["sdk", "generate"], expected_output_contains="--output", expected_code=1, ) def test_sdk_generate_basic(self, tmp_path: Path) -> None: """Generates SDK file successfully.""" output_path = tmp_path / "my_sdk.py" flow_id = uuid4() dep = make_deployment_response( "production", flow_id=flow_id, parameter_schema={ "type": "object", "properties": { "source": {"type": "string"}, }, "required": ["source"], }, ) flow = make_flow_response("my-flow", flow_id) mock_client = AsyncMock() mock_client.api_healthcheck = AsyncMock(return_value=None) mock_client.api_url = "https://api.prefect.cloud" mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) with patch( _GET_CLIENT_PATCH_TARGET, return_value=AsyncMock(__aenter__=AsyncMock(return_value=mock_client)), ): invoke_and_assert( ["sdk", "generate", "--output", str(output_path)], expected_output_contains=[ "beta", "SDK generated successfully", "Flows: 1", "Deployments: 1", "from my_sdk import deployments", ], expected_code=0, ) assert output_path.exists() code = output_path.read_text() ast.parse(code) # Verify valid Python def test_sdk_generate_with_flow_filter(self, tmp_path: Path) -> None: """Accepts --flow filter option.""" output_path = tmp_path / "sdk.py" flow_id = uuid4() dep = make_deployment_response("production", flow_id=flow_id) flow = make_flow_response("my-flow", flow_id) mock_client = AsyncMock() mock_client.api_healthcheck = AsyncMock(return_value=None) mock_client.api_url = "https://api.prefect.cloud" mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) with patch( _GET_CLIENT_PATCH_TARGET, return_value=AsyncMock(__aenter__=AsyncMock(return_value=mock_client)), ): invoke_and_assert( [ "sdk", "generate", "--output", str(output_path), "--flow", "my-flow", ], expected_output_contains="SDK generated successfully", expected_code=0, ) def test_sdk_generate_with_deployment_filter(self, tmp_path: Path) -> None: """Accepts --deployment filter option.""" output_path = tmp_path / "sdk.py" flow_id = uuid4() dep = make_deployment_response("production", flow_id=flow_id) flow = make_flow_response("my-flow", flow_id) mock_client = AsyncMock() mock_client.api_healthcheck = AsyncMock(return_value=None) mock_client.api_url = "https://api.prefect.cloud" mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) with patch( _GET_CLIENT_PATCH_TARGET, return_value=AsyncMock(__aenter__=AsyncMock(return_value=mock_client)), ): invoke_and_assert( [ "sdk", "generate", "--output", str(output_path), "--deployment", "my-flow/production", ], expected_output_contains="SDK generated successfully", expected_code=0, ) def test_sdk_generate_with_multiple_filters(self, tmp_path: Path) -> None: """Accepts multiple --flow and --deployment options.""" output_path = tmp_path / "sdk.py" flow_id1 = uuid4() flow_id2 = uuid4() deps = [ make_deployment_response("prod", flow_id=flow_id1), make_deployment_response("staging", flow_id=flow_id2), ] flows = [ make_flow_response("flow-a", flow_id1), make_flow_response("flow-b", flow_id2), ] mock_client = AsyncMock() mock_client.api_healthcheck = AsyncMock(return_value=None) mock_client.api_url = "https://api.prefect.cloud" mock_client.read_deployments = AsyncMock(return_value=deps) mock_client.read_flows = AsyncMock(return_value=flows) with patch( _GET_CLIENT_PATCH_TARGET, return_value=AsyncMock(__aenter__=AsyncMock(return_value=mock_client)), ): invoke_and_assert( [ "sdk", "generate", "--output", str(output_path), "--flow", "flow-a", "--flow", "flow-b", ], expected_output_contains=[ "SDK generated successfully", "Flows: 2", ], expected_code=0, ) def test_sdk_generate_creates_parent_directories(self, tmp_path: Path) -> None: """Creates parent directories if they don't exist.""" output_path = tmp_path / "subdir" / "nested" / "sdk.py" flow_id = uuid4() dep = make_deployment_response("production", flow_id=flow_id) flow = make_flow_response("my-flow", flow_id) mock_client = AsyncMock() mock_client.api_healthcheck = AsyncMock(return_value=None) mock_client.api_url = "https://api.prefect.cloud" mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) with patch( _GET_CLIENT_PATCH_TARGET, return_value=AsyncMock(__aenter__=AsyncMock(return_value=mock_client)), ): invoke_and_assert( ["sdk", "generate", "--output", str(output_path)], expected_output_contains="SDK generated successfully", expected_code=0, ) assert output_path.exists() def test_sdk_generate_overwrites_existing_file(self, tmp_path: Path) -> None: """Overwrites existing file without prompting.""" output_path = tmp_path / "sdk.py" output_path.write_text("# old content") flow_id = uuid4() dep = make_deployment_response("production", flow_id=flow_id) flow = make_flow_response("my-flow", flow_id) mock_client = AsyncMock() mock_client.api_healthcheck = AsyncMock(return_value=None) mock_client.api_url = "https://api.prefect.cloud" mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) with patch( _GET_CLIENT_PATCH_TARGET, return_value=AsyncMock(__aenter__=AsyncMock(return_value=mock_client)), ): invoke_and_assert( ["sdk", "generate", "--output", str(output_path)], expected_output_contains="SDK generated successfully", expected_code=0, ) code = output_path.read_text() assert "# old content" not in code assert "Prefect SDK" in code def test_sdk_generate_displays_warnings(self, tmp_path: Path) -> None: """Displays warnings from generation.""" output_path = tmp_path / "sdk.py" flow_id = uuid4() dep = make_deployment_response( "production", flow_id=flow_id, work_pool_name="missing-pool", ) flow = make_flow_response("my-flow", flow_id) mock_client = AsyncMock() mock_client.api_healthcheck = AsyncMock(return_value=None) mock_client.api_url = "https://api.prefect.cloud" mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) from prefect.exceptions import ObjectNotFound mock_client.read_work_pool = AsyncMock( side_effect=ObjectNotFound(Exception("Not found")) ) with patch( _GET_CLIENT_PATCH_TARGET, return_value=AsyncMock(__aenter__=AsyncMock(return_value=mock_client)), ): invoke_and_assert( ["sdk", "generate", "--output", str(output_path)], expected_output_contains=[ "Warning:", "missing-pool", "SDK generated successfully", ], expected_code=0, ) def test_sdk_generate_auth_error(self, tmp_path: Path) -> None: """Displays error message on authentication failure.""" output_path = tmp_path / "sdk.py" mock_client = AsyncMock() mock_client.api_healthcheck = AsyncMock( return_value=Exception("401 Unauthorized") ) mock_client.api_url = "https://api.prefect.cloud" with patch( _GET_CLIENT_PATCH_TARGET, return_value=AsyncMock(__aenter__=AsyncMock(return_value=mock_client)), ): invoke_and_assert( ["sdk", "generate", "--output", str(output_path)], expected_output_contains="Not authenticated", expected_code=1, ) def test_sdk_generate_connection_error(self, tmp_path: Path) -> None: """Displays error message on connection failure.""" output_path = tmp_path / "sdk.py" mock_client = AsyncMock() mock_client.api_healthcheck = AsyncMock( return_value=Exception("Connection refused") ) mock_client.api_url = "https://api.prefect.cloud" with patch( _GET_CLIENT_PATCH_TARGET, return_value=AsyncMock(__aenter__=AsyncMock(return_value=mock_client)), ): invoke_and_assert( ["sdk", "generate", "--output", str(output_path)], expected_output_contains="Could not connect to Prefect API", expected_code=1, ) def test_sdk_generate_no_deployments(self, tmp_path: Path) -> None: """Displays error message when no deployments found.""" output_path = tmp_path / "sdk.py" mock_client = AsyncMock() mock_client.api_healthcheck = AsyncMock(return_value=None) mock_client.api_url = "https://api.prefect.cloud" mock_client.read_deployments = AsyncMock(return_value=[]) with patch( _GET_CLIENT_PATCH_TARGET, return_value=AsyncMock(__aenter__=AsyncMock(return_value=mock_client)), ): invoke_and_assert( ["sdk", "generate", "--output", str(output_path)], expected_output_contains=[ "No deployments found", "prefect deploy", ], expected_code=1, ) def test_sdk_generate_shows_fetching_message(self, tmp_path: Path) -> None: """Shows 'Fetching deployments...' progress message.""" output_path = tmp_path / "sdk.py" flow_id = uuid4() dep = make_deployment_response("production", flow_id=flow_id) flow = make_flow_response("my-flow", flow_id) mock_client = AsyncMock() mock_client.api_healthcheck = AsyncMock(return_value=None) mock_client.api_url = "https://api.prefect.cloud" mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) with patch( _GET_CLIENT_PATCH_TARGET, return_value=AsyncMock(__aenter__=AsyncMock(return_value=mock_client)), ): invoke_and_assert( ["sdk", "generate", "--output", str(output_path)], expected_output_contains="Fetching deployments", expected_code=0, ) def test_sdk_generate_shows_output_path(self, tmp_path: Path) -> None: """Shows the output path in the success message.""" output_path = tmp_path / "my_custom_sdk.py" flow_id = uuid4() dep = make_deployment_response("production", flow_id=flow_id) flow = make_flow_response("my-flow", flow_id) mock_client = AsyncMock() mock_client.api_healthcheck = AsyncMock(return_value=None) mock_client.api_url = "https://api.prefect.cloud" mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) with patch( _GET_CLIENT_PATCH_TARGET, return_value=AsyncMock(__aenter__=AsyncMock(return_value=mock_client)), ): invoke_and_assert( ["sdk", "generate", "--output", str(output_path)], expected_output_contains=[ "Output:", "my_custom_sdk.py", ], expected_code=0, ) def test_sdk_generate_with_work_pools(self, tmp_path: Path) -> None: """Shows work pool count in statistics.""" output_path = tmp_path / "sdk.py" flow_id = uuid4() dep = make_deployment_response( "production", flow_id=flow_id, work_pool_name="k8s-pool", ) flow = make_flow_response("my-flow", flow_id) wp = make_work_pool_response( "k8s-pool", base_job_template={ "variables": { "type": "object", "properties": { "image": {"type": "string"}, }, } }, ) mock_client = AsyncMock() mock_client.api_healthcheck = AsyncMock(return_value=None) mock_client.api_url = "https://api.prefect.cloud" mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) mock_client.read_work_pool = AsyncMock(return_value=wp) with patch( _GET_CLIENT_PATCH_TARGET, return_value=AsyncMock(__aenter__=AsyncMock(return_value=mock_client)), ): invoke_and_assert( ["sdk", "generate", "--output", str(output_path)], expected_output_contains=[ "Work pools: 1", ], expected_code=0, ) def test_sdk_generate_render_failure(self, tmp_path: Path) -> None: """Displays error message on render failure.""" output_path = tmp_path / "sdk.py" flow_id = uuid4() dep = make_deployment_response("production", flow_id=flow_id) flow = make_flow_response("my-flow", flow_id) mock_client = AsyncMock() mock_client.api_healthcheck = AsyncMock(return_value=None) mock_client.api_url = "https://api.prefect.cloud" mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) with ( patch( _GET_CLIENT_PATCH_TARGET, return_value=AsyncMock(__aenter__=AsyncMock(return_value=mock_client)), ), patch( "prefect._sdk.generator.render_sdk", side_effect=Exception("Template rendering failed"), ), ): invoke_and_assert( ["sdk", "generate", "--output", str(output_path)], expected_output_contains="SDK generation failed", expected_code=1, ) def test_sdk_generate_invalid_module_name(self, tmp_path: Path) -> None: """Shows rename hint when filename is not a valid Python identifier.""" output_path = tmp_path / "my-sdk.py" # Dash makes it invalid flow_id = uuid4() dep = make_deployment_response("production", flow_id=flow_id) flow = make_flow_response("my-flow", flow_id) mock_client = AsyncMock() mock_client.api_healthcheck = AsyncMock(return_value=None) mock_client.api_url = "https://api.prefect.cloud" mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) with patch( _GET_CLIENT_PATCH_TARGET, return_value=AsyncMock(__aenter__=AsyncMock(return_value=mock_client)), ): invoke_and_assert( ["sdk", "generate", "--output", str(output_path)], expected_output_contains=[ "SDK generated successfully", "my-sdk", "not a valid Python module name", "Rename the file to", "my_sdk.py", ], expected_output_does_not_contain="from my-sdk import", expected_code=0, ) def test_sdk_generate_directory_as_output(self, tmp_path: Path) -> None: """Shows clear error when output is a directory.""" output_path = tmp_path / "subdir" output_path.mkdir() # Create directory # No mocking needed - error is caught before API calls invoke_and_assert( ["sdk", "generate", "--output", str(output_path)], expected_output_contains=[ "is a directory", "Please provide a file path", ], expected_code=1, ) class TestSDKHelp: """Tests for the sdk command group help.""" def test_sdk_help(self) -> None: """Shows help for sdk command group.""" invoke_and_assert( ["sdk", "--help"], expected_output_contains=[ "sdk", "generate", ], expected_code=0, )
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/cli/test_sdk.py", "license": "Apache License 2.0", "lines": 491, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/server/api/test_docket_task_keys.py
""" Tests for docket task keys in API handlers. These tests verify that API handlers use consistent, deterministic task keys when scheduling background docket tasks. Task keys ensure at-most-once execution semantics, preventing duplicate task execution when multiple API servers process the same request. See: https://github.com/PrefectHQ/prefect/pull/19936#issuecomment-3744457809 """ from contextlib import asynccontextmanager from datetime import timedelta from typing import Any, AsyncGenerator from uuid import UUID, uuid4 import httpx import pytest from docket import Docket from fastapi import FastAPI from httpx import ASGITransport, AsyncClient from sqlalchemy.ext.asyncio import AsyncSession from prefect._internal.compatibility.starlette import status from prefect.server import models, schemas from prefect.server.schemas.statuses import DeploymentStatus from prefect.settings import get_current_settings @asynccontextmanager async def docket_without_worker_lifespan( app: FastAPI, ) -> AsyncGenerator[None, None]: """ Custom lifespan context that starts Docket and registers task functions, but does NOT start the background worker. This allows tasks to accumulate in the queue for proper deduplication testing. Uses a unique Docket name per test to avoid Redis key collisions when using the shared fakeredis server. """ settings = get_current_settings() unique_name = f"test-docket-{uuid4().hex[:8]}" async with Docket( name=unique_name, url=settings.server.docket.url, execution_ttl=timedelta(0), ) as docket: docket.register_collection( "prefect.server.api.background_workers:task_functions" ) app.api_app.state.docket = docket yield @pytest.fixture async def client_with_real_docket( app: FastAPI, ) -> AsyncGenerator[AsyncClient, Any]: """ Yield a test client with a real Docket instance but NO background worker. This ensures tasks stay in the queue and are not processed, allowing us to properly verify that duplicate task keys are deduplicated. Note: We intentionally do NOT use LifespanManager(app) here because that would trigger the app's lifespan which starts the background worker. The database session fixture handles DB setup separately. """ async with docket_without_worker_lifespan(app): async with httpx.AsyncClient( transport=ASGITransport(app=app), base_url="https://test/api" ) as async_client: yield async_client @pytest.fixture async def real_docket(app: FastAPI, client_with_real_docket: AsyncClient) -> Docket: """Get the real Docket instance from the app. Depends on client_with_real_docket to ensure lifespan context is active. """ return app.api_app.state.docket class TestDocketAtMostOnceExecution: """ Integration tests verifying at-most-once execution semantics using real Docket. These tests make duplicate API requests and verify that only one task is queued in Docket, demonstrating that the task keys prevent duplicate execution. """ @pytest.fixture async def work_queue(self, session: AsyncSession): work_queue = await models.work_queues.create_work_queue( session=session, work_queue=schemas.actions.WorkQueueCreate( name=f"test-wq-{uuid4().hex[:8]}" ), ) await session.commit() return work_queue @pytest.fixture async def work_pool(self, session: AsyncSession): work_pool = await models.workers.create_work_pool( session=session, work_pool=schemas.actions.WorkPoolCreate( name=f"test-pool-{uuid4().hex[:8]}", type="test", ), ) await session.commit() return work_pool @pytest.fixture async def flow(self, session: AsyncSession): flow = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name=f"test-flow-{uuid4().hex[:8]}"), ) await session.commit() return flow @pytest.fixture async def deployments(self, session: AsyncSession, flow): deployments = [] for i in range(3): deployment = await models.deployments.create_deployment( session=session, deployment=schemas.core.Deployment( name=f"test-deployment-{i}", flow_id=flow.id, status=DeploymentStatus.READY, ), ) deployments.append(deployment) await session.commit() return deployments @pytest.fixture async def flow_run(self, session: AsyncSession, flow): flow_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, flow_version="1.0", state=schemas.states.Pending(), ), ) await session.commit() return flow_run @pytest.fixture async def task_run(self, session: AsyncSession, flow_run): task_run = await models.task_runs.create_task_run( session=session, task_run=schemas.core.TaskRun( flow_run_id=flow_run.id, task_key="test-task", dynamic_key="0", state=schemas.states.Pending(), ), ) await session.commit() return task_run async def test_work_queue_duplicate_requests_queue_single_task( self, work_queue, real_docket: Docket, client_with_real_docket: AsyncClient, ): """ Verify that duplicate requests to read work queue runs only queue one task per unique key, demonstrating at-most-once execution. """ # Get initial snapshot initial_snapshot = await real_docket.snapshot() initial_task_count = initial_snapshot.total_tasks # Make the same request multiple times for _ in range(3): response = await client_with_real_docket.post( f"/work_queues/{work_queue.id}/get_runs", ) assert response.status_code == status.HTTP_200_OK # Get snapshot after requests final_snapshot = await real_docket.snapshot() # Should only have 2 new tasks (mark_work_queues_ready and mark_deployments_ready) # despite making 3 requests, because duplicate keys are ignored new_tasks = final_snapshot.total_tasks - initial_task_count assert new_tasks == 2, ( f"Expected 2 tasks (one per unique key), but got {new_tasks}. " "Duplicate requests should not create additional tasks." ) # Verify the tasks have the expected keys # Include both future (queued) and running tasks since the worker may have # already started processing some tasks by the time we take the snapshot task_keys = {task.key for task in final_snapshot.future} task_keys.update(task.key for task in final_snapshot.running) expected_keys = { f"mark_work_queues_ready:{work_queue.id}", f"mark_deployments_ready:work_queue:{work_queue.id}", } assert expected_keys.issubset(task_keys), ( f"Expected keys {expected_keys} not found in {task_keys}" ) async def test_workers_duplicate_requests_queue_single_task( self, work_pool, real_docket: Docket, client_with_real_docket: AsyncClient, ): """ Verify that duplicate requests to get scheduled flow runs only queue one task per unique key. """ initial_snapshot = await real_docket.snapshot() initial_task_count = initial_snapshot.total_tasks # Make the same request multiple times for _ in range(3): response = await client_with_real_docket.post( f"/work_pools/{work_pool.name}/get_scheduled_flow_runs", json={}, ) assert response.status_code == status.HTTP_200_OK final_snapshot = await real_docket.snapshot() new_tasks = final_snapshot.total_tasks - initial_task_count # Should only have 2 new tasks despite 3 requests assert new_tasks == 2, ( f"Expected 2 tasks, but got {new_tasks}. " "Duplicate requests should not create additional tasks." ) # Include both future (queued) and running tasks since the worker may have # already started processing some tasks by the time we take the snapshot task_keys = {task.key for task in final_snapshot.future} task_keys.update(task.key for task in final_snapshot.running) expected_keys = { f"mark_work_queues_ready:work_pool:{work_pool.id}", f"mark_deployments_ready:work_pool:{work_pool.id}", } assert expected_keys.issubset(task_keys) async def test_deployments_duplicate_requests_queue_single_task( self, deployments, real_docket: Docket, client_with_real_docket: AsyncClient, ): """ Verify that duplicate requests for deployment scheduled runs only queue one task per unique key. """ deployment_ids = [str(d.id) for d in deployments] initial_snapshot = await real_docket.snapshot() initial_task_count = initial_snapshot.total_tasks # Make the same request multiple times for _ in range(3): response = await client_with_real_docket.post( "/deployments/get_scheduled_flow_runs", json={"deployment_ids": deployment_ids}, ) assert response.status_code == status.HTTP_200_OK final_snapshot = await real_docket.snapshot() new_tasks = final_snapshot.total_tasks - initial_task_count # Should only have 1 new task despite 3 requests assert new_tasks == 1, ( f"Expected 1 task, but got {new_tasks}. " "Duplicate requests should not create additional tasks." ) sorted_ids = ",".join(str(d) for d in sorted(UUID(id) for id in deployment_ids)) expected_key = f"mark_deployments_ready:deployments:{sorted_ids}" # Include both future (queued) and running tasks since the worker may have # already started processing some tasks by the time we take the snapshot task_keys = {task.key for task in final_snapshot.future} task_keys.update(task.key for task in final_snapshot.running) assert expected_key in task_keys async def test_deployments_different_order_same_task( self, deployments, real_docket: Docket, client_with_real_docket: AsyncClient, ): """ Verify that requests with deployment IDs in different orders result in the same task key, preventing duplicate execution. """ deployment_ids = [str(d.id) for d in deployments] reversed_ids = list(reversed(deployment_ids)) initial_snapshot = await real_docket.snapshot() initial_task_count = initial_snapshot.total_tasks # Request with original order response1 = await client_with_real_docket.post( "/deployments/get_scheduled_flow_runs", json={"deployment_ids": deployment_ids}, ) assert response1.status_code == status.HTTP_200_OK # Request with reversed order - should not create a new task response2 = await client_with_real_docket.post( "/deployments/get_scheduled_flow_runs", json={"deployment_ids": reversed_ids}, ) assert response2.status_code == status.HTTP_200_OK final_snapshot = await real_docket.snapshot() new_tasks = final_snapshot.total_tasks - initial_task_count # Should only have 1 task because both requests generate the same key assert new_tasks == 1, ( f"Expected 1 task (order-independent keys), but got {new_tasks}. " "Requests with same IDs in different order should produce same key." ) async def test_flow_run_delete_queues_single_task( self, flow_run, real_docket: Docket, client_with_real_docket: AsyncClient, ): """ Verify that deleting a flow run queues exactly one log deletion task. """ initial_snapshot = await real_docket.snapshot() initial_task_count = initial_snapshot.total_tasks response = await client_with_real_docket.delete(f"/flow_runs/{flow_run.id}") assert response.status_code == status.HTTP_204_NO_CONTENT final_snapshot = await real_docket.snapshot() new_tasks = final_snapshot.total_tasks - initial_task_count assert new_tasks == 1, f"Expected 1 task, but got {new_tasks}" expected_key = f"delete_flow_run_logs:{flow_run.id}" # Include both future (queued) and running tasks since the worker may have # already started processing some tasks by the time we take the snapshot task_keys = {task.key for task in final_snapshot.future} task_keys.update(task.key for task in final_snapshot.running) assert expected_key in task_keys async def test_task_run_delete_queues_single_task( self, task_run, real_docket: Docket, client_with_real_docket: AsyncClient, ): """ Verify that deleting a task run queues exactly one log deletion task. """ initial_snapshot = await real_docket.snapshot() initial_task_count = initial_snapshot.total_tasks response = await client_with_real_docket.delete(f"/task_runs/{task_run.id}") assert response.status_code == status.HTTP_204_NO_CONTENT final_snapshot = await real_docket.snapshot() new_tasks = final_snapshot.total_tasks - initial_task_count assert new_tasks == 1, f"Expected 1 task, but got {new_tasks}" expected_key = f"delete_task_run_logs:{task_run.id}" # Include both future (queued) and running tasks since the worker may have # already started processing some tasks by the time we take the snapshot task_keys = {task.key for task in final_snapshot.future} task_keys.update(task.key for task in final_snapshot.running) assert expected_key in task_keys
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/server/api/test_docket_task_keys.py", "license": "Apache License 2.0", "lines": 328, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/prefect/_sdk/fetcher.py
""" SDK data fetcher module. This module is responsible for fetching deployment and work pool data from the Prefect API and converting it to the internal data models used by the SDK generator. """ from __future__ import annotations import asyncio from dataclasses import dataclass, field from datetime import datetime, timezone from typing import TYPE_CHECKING, Any from uuid import UUID import prefect from prefect._sdk.models import ( DeploymentInfo, FlowInfo, SDKData, SDKGenerationMetadata, WorkPoolInfo, ) from prefect.client.schemas.filters import ( DeploymentFilter, DeploymentFilterName, FlowFilter, FlowFilterId, FlowFilterName, ) from prefect.exceptions import ObjectNotFound from prefect.settings.context import get_current_settings if TYPE_CHECKING: from prefect.client.orchestration import PrefectClient from prefect.client.schemas.responses import DeploymentResponse @dataclass class FetchResult: """Result of fetching SDK data from the API. Attributes: data: The SDK data if fetching was successful. warnings: List of warnings encountered during fetching. errors: List of errors encountered during fetching (non-fatal). """ data: SDKData warnings: list[str] = field(default_factory=list) errors: list[str] = field(default_factory=list) class SDKFetcherError(Exception): """Base exception for SDK fetcher errors.""" pass class AuthenticationError(SDKFetcherError): """Raised when authentication with the Prefect API fails.""" pass class APIConnectionError(SDKFetcherError): """Raised when the Prefect API cannot be reached.""" pass class NoDeploymentsError(SDKFetcherError): """Raised when no deployments are found.""" pass async def _check_authentication(client: "PrefectClient") -> None: """Check if the client is authenticated. Args: client: The Prefect client to check. Raises: AuthenticationError: If not authenticated. APIConnectionError: If the API cannot be reached. """ try: exc = await client.api_healthcheck() if exc is not None: # Check if it's an authentication error exc_str = str(exc).lower() if ( "unauthorized" in exc_str or "forbidden" in exc_str or "401" in exc_str or "403" in exc_str ): raise AuthenticationError( "Not authenticated. Run `prefect cloud login` or configure " "PREFECT_API_URL." ) raise APIConnectionError( f"Could not connect to Prefect API at {client.api_url}. " f"Check your configuration. Error: {exc}" ) except Exception as e: if isinstance(e, (AuthenticationError, APIConnectionError)): raise raise APIConnectionError( f"Could not connect to Prefect API at {client.api_url}. " f"Check your configuration. Error: {e}" ) from e async def _fetch_deployments( client: "PrefectClient", flow_filter: FlowFilter | None = None, deployment_filter: DeploymentFilter | None = None, ) -> list["DeploymentResponse"]: """Fetch all deployments with pagination. Args: client: The Prefect client to use. flow_filter: Optional filter for flows. deployment_filter: Optional filter for deployments. Returns: List of deployment responses. """ page_size = 200 offset = 0 all_deployments: list[DeploymentResponse] = [] while True: deployments = await client.read_deployments( flow_filter=flow_filter, deployment_filter=deployment_filter, limit=page_size, offset=offset, ) if not deployments: break all_deployments.extend(deployments) if len(deployments) < page_size: break offset += page_size return all_deployments async def _fetch_work_pool( client: "PrefectClient", work_pool_name: str, ) -> WorkPoolInfo | None: """Fetch a single work pool by name. Args: client: The Prefect client to use. work_pool_name: The name of the work pool to fetch. Returns: WorkPoolInfo if found, None if not found. Raises: Exception: For non-ObjectNotFound errors (to be caught by gather). """ try: work_pool = await client.read_work_pool(work_pool_name) # Extract job variables schema from base_job_template job_vars_schema: dict[str, Any] = {} base_job_template = work_pool.base_job_template if base_job_template and "variables" in base_job_template: variables = base_job_template["variables"] if isinstance(variables, dict): job_vars_schema = variables return WorkPoolInfo( name=work_pool.name, pool_type=work_pool.type, job_variables_schema=job_vars_schema, ) except ObjectNotFound: return None # Let other exceptions propagate to be captured by gather(return_exceptions=True) async def _fetch_work_pools_parallel( client: "PrefectClient", work_pool_names: set[str], ) -> tuple[dict[str, WorkPoolInfo], list[str]]: """Fetch multiple work pools in parallel. Args: client: The Prefect client to use. work_pool_names: Set of work pool names to fetch. Returns: Tuple of (work_pools dict, warnings list). """ if not work_pool_names: return {}, [] warnings: list[str] = [] # Convert to sorted list for deterministic iteration order pool_names_list = sorted(work_pool_names) # Fetch work pools in parallel tasks = [_fetch_work_pool(client, name) for name in pool_names_list] results = await asyncio.gather(*tasks, return_exceptions=True) work_pools: dict[str, WorkPoolInfo] = {} for name, result in zip(pool_names_list, results, strict=True): if isinstance(result, BaseException): warnings.append( f"Could not fetch work pool '{name}' - `with_infra()` will not be " f"generated for affected deployments: {result}" ) elif result is None: warnings.append( f"Work pool '{name}' not found - `with_infra()` will not be " f"generated for affected deployments" ) else: # At this point, result is WorkPoolInfo work_pools[name] = result return work_pools, warnings async def _fetch_flows_for_deployments( client: "PrefectClient", deployment_flow_ids: set[str], ) -> tuple[dict[str, str], list[str]]: """Fetch flow names for the given flow IDs. Args: client: The Prefect client to use. deployment_flow_ids: Set of flow IDs (as strings) to look up. Returns: Tuple of (dict mapping flow_id to flow_name, list of warnings). """ if not deployment_flow_ids: return {}, [] warnings: list[str] = [] flow_uuids: list[UUID] = [] # Convert string IDs to UUIDs with defensive handling for fid in deployment_flow_ids: try: flow_uuids.append(UUID(fid)) except (ValueError, TypeError) as e: warnings.append(f"Invalid flow ID '{fid}' - skipping: {e}") if not flow_uuids: return {}, warnings # Fetch flows by ID with pagination page_size = 200 offset = 0 flow_id_to_name: dict[str, str] = {} flow_filter = FlowFilter(id=FlowFilterId(any_=flow_uuids)) while True: flows = await client.read_flows( flow_filter=flow_filter, limit=page_size, offset=offset, ) if not flows: break for flow in flows: flow_id_to_name[str(flow.id)] = flow.name if len(flows) < page_size: break offset += page_size return flow_id_to_name, warnings async def fetch_sdk_data( client: "PrefectClient", flow_names: list[str] | None = None, deployment_names: list[str] | None = None, ) -> FetchResult: """Fetch all data needed for SDK generation. Args: client: An active Prefect client. flow_names: Optional list of flow names to filter to. deployment_names: Optional list of deployment names to filter to. These should be in "flow-name/deployment-name" format for exact matching. Short names (without "/") will match any deployment with that name across all flows. Returns: FetchResult containing SDK data and any warnings/errors. Raises: AuthenticationError: If not authenticated. APIConnectionError: If the API cannot be reached. NoDeploymentsError: If no deployments match the filters. """ warnings: list[str] = [] errors: list[str] = [] # Check authentication first await _check_authentication(client) # Build filters flow_filter: FlowFilter | None = None deployment_filter: DeploymentFilter | None = None if flow_names: flow_filter = FlowFilter(name=FlowFilterName(any_=flow_names)) if deployment_names: # Extract just the deployment name parts (after the /) deploy_name_parts = [] for full_name in deployment_names: if "/" in full_name: _, deploy_name = full_name.split("/", 1) deploy_name_parts.append(deploy_name) else: deploy_name_parts.append(full_name) deployment_filter = DeploymentFilter( name=DeploymentFilterName(any_=deploy_name_parts) ) # Fetch deployments deployment_responses = await _fetch_deployments( client, flow_filter=flow_filter, deployment_filter=deployment_filter, ) if not deployment_responses: if flow_names or deployment_names: raise NoDeploymentsError( f"No deployments matched filters. " f"Filters: flow_names={flow_names}, deployment_names={deployment_names}" ) raise NoDeploymentsError("No deployments found in workspace.") # Get unique flow IDs and work pool names flow_ids: set[str] = set() work_pool_names: set[str] = set() for dep in deployment_responses: flow_ids.add(str(dep.flow_id)) if dep.work_pool_name: work_pool_names.add(dep.work_pool_name) # Fetch flow names and work pools in parallel flow_names_task = _fetch_flows_for_deployments(client, flow_ids) work_pools_task = _fetch_work_pools_parallel(client, work_pool_names) (flow_id_to_name, flow_warnings), (work_pools, wp_warnings) = await asyncio.gather( flow_names_task, work_pools_task ) warnings.extend(flow_warnings) warnings.extend(wp_warnings) # Group deployments by flow flows: dict[str, FlowInfo] = {} # Track short name -> full names mapping to detect ambiguity short_name_matches: dict[str, list[str]] = {} # short_name -> list of full_names for dep in deployment_responses: flow_id = str(dep.flow_id) flow_name = flow_id_to_name.get(flow_id) if not flow_name: errors.append( f"Could not find flow name for deployment '{dep.name}' " f"(flow_id={flow_id}) - skipping" ) continue # If filtering by deployment name, check the full name matches full_name = f"{flow_name}/{dep.name}" if deployment_names and full_name not in deployment_names: # Only include if the full name matches (filter was by name parts) # Skip if user specified full names and this doesn't match found_match = False matched_short_name: str | None = None for dn in deployment_names: if "/" not in dn: # User gave just deployment name, check against dep.name if dep.name == dn: found_match = True matched_short_name = dn break else: # User gave full name, must match exactly if full_name == dn: found_match = True break if not found_match: continue # Track short name matches for ambiguity warning if matched_short_name: if matched_short_name not in short_name_matches: short_name_matches[matched_short_name] = [] short_name_matches[matched_short_name].append(full_name) # Create DeploymentInfo deployment_info = DeploymentInfo( name=dep.name, flow_name=flow_name, full_name=full_name, parameter_schema=dep.parameter_openapi_schema, work_pool_name=dep.work_pool_name, description=dep.description, ) # Add to flow if flow_name not in flows: flows[flow_name] = FlowInfo(name=flow_name, deployments=[]) flows[flow_name].deployments.append(deployment_info) if not flows: if flow_names or deployment_names: raise NoDeploymentsError( f"No deployments matched filters after processing. " f"Filters: flow_names={flow_names}, deployment_names={deployment_names}" ) raise NoDeploymentsError("No deployments could be processed.") # Warn about ambiguous short names that matched multiple flows for short_name, full_names in short_name_matches.items(): if len(full_names) > 1: warnings.append( f"Short deployment name '{short_name}' matched {len(full_names)} " f"deployments across different flows: {', '.join(sorted(full_names))}. " f"Consider using full names (flow/deployment) for precise filtering." ) # Build metadata - prefer client.api_url, fall back to setting for provenance client_api_url = getattr(client, "api_url", None) api_url = str(client_api_url) if client_api_url else get_current_settings().api.url metadata = SDKGenerationMetadata( generation_time=datetime.now(timezone.utc).isoformat(), prefect_version=prefect.__version__, workspace_name=None, # Could be extracted from Cloud API URL if needed api_url=api_url, ) # Build final SDK data sdk_data = SDKData( metadata=metadata, flows=flows, work_pools=work_pools, ) return FetchResult( data=sdk_data, warnings=warnings, errors=errors, )
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_sdk/fetcher.py", "license": "Apache License 2.0", "lines": 381, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:src/prefect/_sdk/generator.py
""" SDK generator module. This module orchestrates the SDK generation process, combining data fetching from the Prefect API with template rendering to produce the final SDK file. """ from __future__ import annotations from dataclasses import dataclass, field from pathlib import Path from typing import TYPE_CHECKING from prefect._sdk.fetcher import ( APIConnectionError, AuthenticationError, FetchResult, NoDeploymentsError, SDKFetcherError, fetch_sdk_data, ) from prefect._sdk.renderer import RenderResult, render_sdk if TYPE_CHECKING: from prefect.client.orchestration import PrefectClient @dataclass class GenerationResult: """Result of SDK generation. Attributes: output_path: Path to the generated SDK file. flow_count: Number of flows included in the SDK. deployment_count: Number of deployments included in the SDK. work_pool_count: Number of work pools included in the SDK. warnings: List of warnings encountered during generation. errors: List of non-fatal errors encountered during generation. """ output_path: Path flow_count: int deployment_count: int work_pool_count: int warnings: list[str] = field(default_factory=list) errors: list[str] = field(default_factory=list) class SDKGeneratorError(Exception): """Base exception for SDK generator errors.""" pass # Re-export fetcher exceptions for convenience __all__ = [ "GenerationResult", "SDKGeneratorError", "AuthenticationError", "APIConnectionError", "NoDeploymentsError", "SDKFetcherError", "generate_sdk", ] async def generate_sdk( client: "PrefectClient", output_path: Path, flow_names: list[str] | None = None, deployment_names: list[str] | None = None, ) -> GenerationResult: """Generate a typed SDK from workspace deployments. This is the main entry point for SDK generation. It fetches deployment and work pool data from the Prefect API, then renders a typed Python SDK file. Args: client: An active Prefect client (must be entered as context manager). output_path: Path where the SDK file should be written. flow_names: Optional list of flow names to filter to. deployment_names: Optional list of deployment names to filter to. These should be in "flow-name/deployment-name" format. Returns: GenerationResult with statistics and any warnings/errors. Raises: AuthenticationError: If not authenticated with Prefect. APIConnectionError: If the Prefect API cannot be reached. NoDeploymentsError: If no deployments match the filters. SDKGeneratorError: For other generation errors. Example: ```python from prefect.client.orchestration import get_client from prefect._sdk.generator import generate_sdk from pathlib import Path async with get_client() as client: result = await generate_sdk( client=client, output_path=Path("my_sdk.py"), flow_names=["my-etl-flow"], ) print(f"Generated SDK with {result.deployment_count} deployments") ``` """ warnings: list[str] = [] errors: list[str] = [] # Fetch data from API fetch_result: FetchResult = await fetch_sdk_data( client=client, flow_names=flow_names, deployment_names=deployment_names, ) warnings.extend(fetch_result.warnings) errors.extend(fetch_result.errors) # Render SDK to file try: render_result: RenderResult = render_sdk( data=fetch_result.data, output_path=output_path, ) warnings.extend(render_result.warnings) except Exception as e: raise SDKGeneratorError(f"Failed to render SDK: {e}") from e # Collect statistics sdk_data = fetch_result.data return GenerationResult( output_path=output_path, flow_count=sdk_data.flow_count, deployment_count=sdk_data.deployment_count, work_pool_count=sdk_data.work_pool_count, warnings=warnings, errors=errors, )
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_sdk/generator.py", "license": "Apache License 2.0", "lines": 116, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
PrefectHQ/prefect:tests/_sdk/test_fetcher.py
"""Tests for the SDK fetcher module.""" from __future__ import annotations from typing import Any from unittest.mock import AsyncMock, MagicMock from uuid import UUID, uuid4 import pytest from prefect._sdk.fetcher import ( APIConnectionError, AuthenticationError, FetchResult, NoDeploymentsError, _check_authentication, _fetch_deployments, _fetch_flows_for_deployments, _fetch_work_pool, _fetch_work_pools_parallel, fetch_sdk_data, ) from prefect.client.schemas.filters import FlowFilter, FlowFilterName from prefect.exceptions import ObjectNotFound def make_deployment_response( name: str, flow_id: UUID | None = None, work_pool_name: str | None = None, parameter_schema: dict[str, Any] | None = None, description: str | None = None, ) -> MagicMock: """Create a mock DeploymentResponse.""" dep = MagicMock() dep.name = name dep.flow_id = flow_id or uuid4() dep.work_pool_name = work_pool_name dep.parameter_openapi_schema = parameter_schema dep.description = description return dep def make_flow_response(name: str, flow_id: UUID | None = None) -> MagicMock: """Create a mock Flow response.""" flow = MagicMock() flow.name = name flow.id = flow_id or uuid4() return flow def make_work_pool_response( name: str, pool_type: str = "kubernetes", base_job_template: dict[str, Any] | None = None, ) -> MagicMock: """Create a mock WorkPool response.""" wp = MagicMock() wp.name = name wp.type = pool_type wp.base_job_template = base_job_template or {} return wp class TestCheckAuthentication: """Tests for _check_authentication.""" async def test_authentication_success(self) -> None: """Successful authentication check passes silently.""" client = AsyncMock() client.api_healthcheck = AsyncMock(return_value=None) await _check_authentication(client) # Should not raise async def test_authentication_failure_unauthorized(self) -> None: """Unauthorized error raises AuthenticationError.""" client = AsyncMock() client.api_healthcheck = AsyncMock( return_value=Exception("401 Unauthorized: Invalid API key") ) client.api_url = "https://api.prefect.cloud" with pytest.raises(AuthenticationError) as exc_info: await _check_authentication(client) assert "Not authenticated" in str(exc_info.value) async def test_authentication_failure_connection_error(self) -> None: """Connection error raises APIConnectionError.""" client = AsyncMock() client.api_healthcheck = AsyncMock(return_value=Exception("Connection refused")) client.api_url = "https://localhost:4200" with pytest.raises(APIConnectionError) as exc_info: await _check_authentication(client) assert "Could not connect" in str(exc_info.value) async def test_authentication_failure_exception(self) -> None: """Exception during healthcheck raises APIConnectionError.""" client = AsyncMock() client.api_healthcheck = AsyncMock(side_effect=RuntimeError("Network error")) client.api_url = "https://api.prefect.cloud" with pytest.raises(APIConnectionError) as exc_info: await _check_authentication(client) assert "Could not connect" in str(exc_info.value) class TestFetchDeployments: """Tests for _fetch_deployments.""" async def test_fetch_single_page(self) -> None: """Fetches single page of deployments.""" client = AsyncMock() deps = [make_deployment_response(f"dep-{i}") for i in range(5)] client.read_deployments = AsyncMock(return_value=deps) result = await _fetch_deployments(client) assert len(result) == 5 client.read_deployments.assert_called_once() async def test_fetch_multiple_pages(self) -> None: """Fetches multiple pages of deployments.""" client = AsyncMock() # First call returns 200 items (full page), second returns 50 (partial page) # Pagination stops when page is not full page1 = [make_deployment_response(f"dep-{i}") for i in range(200)] page2 = [make_deployment_response(f"dep-{i}") for i in range(200, 250)] client.read_deployments = AsyncMock(side_effect=[page1, page2]) result = await _fetch_deployments(client) assert len(result) == 250 # Two calls: first returns full page (200), second returns partial (50) assert client.read_deployments.call_count == 2 async def test_fetch_with_filters(self) -> None: """Passes filters to the API.""" client = AsyncMock() client.read_deployments = AsyncMock(return_value=[]) flow_filter = FlowFilter(name=FlowFilterName(any_=["my-flow"])) await _fetch_deployments(client, flow_filter=flow_filter) call_kwargs = client.read_deployments.call_args.kwargs assert call_kwargs["flow_filter"] == flow_filter async def test_fetch_empty(self) -> None: """Returns empty list when no deployments.""" client = AsyncMock() client.read_deployments = AsyncMock(return_value=[]) result = await _fetch_deployments(client) assert result == [] class TestFetchWorkPool: """Tests for _fetch_work_pool.""" async def test_fetch_work_pool_success(self) -> None: """Successfully fetches a work pool.""" client = AsyncMock() wp = make_work_pool_response( "my-pool", pool_type="kubernetes", base_job_template={ "variables": { "type": "object", "properties": { "image": {"type": "string"}, "cpu": {"type": "string"}, }, } }, ) client.read_work_pool = AsyncMock(return_value=wp) result = await _fetch_work_pool(client, "my-pool") assert result is not None assert result.name == "my-pool" assert result.pool_type == "kubernetes" assert "properties" in result.job_variables_schema async def test_fetch_work_pool_not_found(self) -> None: """Returns None when work pool not found.""" client = AsyncMock() client.read_work_pool = AsyncMock(side_effect=ObjectNotFound("Not found")) result = await _fetch_work_pool(client, "missing-pool") assert result is None async def test_fetch_work_pool_error_propagates(self) -> None: """Non-ObjectNotFound errors propagate for gather to capture.""" client = AsyncMock() client.read_work_pool = AsyncMock(side_effect=RuntimeError("API error")) # Error should propagate (not return None) with pytest.raises(RuntimeError, match="API error"): await _fetch_work_pool(client, "error-pool") async def test_fetch_work_pool_empty_template(self) -> None: """Handles work pool with empty base_job_template.""" client = AsyncMock() wp = make_work_pool_response("my-pool", base_job_template={}) client.read_work_pool = AsyncMock(return_value=wp) result = await _fetch_work_pool(client, "my-pool") assert result is not None assert result.job_variables_schema == {} class TestFetchWorkPoolsParallel: """Tests for _fetch_work_pools_parallel.""" async def test_fetch_multiple_pools_parallel(self) -> None: """Fetches multiple work pools in parallel.""" client = AsyncMock() async def mock_read_pool(name: str) -> MagicMock: return make_work_pool_response(name) client.read_work_pool = mock_read_pool pools, warnings = await _fetch_work_pools_parallel( client, {"pool-1", "pool-2", "pool-3"} ) assert len(pools) == 3 assert "pool-1" in pools assert "pool-2" in pools assert "pool-3" in pools assert warnings == [] async def test_fetch_pools_with_missing(self) -> None: """Handles missing work pools with warnings.""" client = AsyncMock() async def mock_read_pool(name: str) -> MagicMock: if name == "missing": raise ObjectNotFound("Not found") return make_work_pool_response(name) client.read_work_pool = mock_read_pool pools, warnings = await _fetch_work_pools_parallel( client, {"good-pool", "missing"} ) assert len(pools) == 1 assert "good-pool" in pools assert len(warnings) == 1 assert "missing" in warnings[0] async def test_fetch_pools_empty_set(self) -> None: """Returns empty results for empty input.""" client = AsyncMock() pools, warnings = await _fetch_work_pools_parallel(client, set()) assert pools == {} assert warnings == [] class TestFetchFlowsForDeployments: """Tests for _fetch_flows_for_deployments.""" async def test_fetch_flows_success(self) -> None: """Successfully fetches flow names.""" client = AsyncMock() flow_id = uuid4() flow = make_flow_response("my-flow", flow_id) client.read_flows = AsyncMock(return_value=[flow]) result, warnings = await _fetch_flows_for_deployments(client, {str(flow_id)}) assert str(flow_id) in result assert result[str(flow_id)] == "my-flow" assert warnings == [] async def test_fetch_flows_empty(self) -> None: """Returns empty dict for empty input.""" client = AsyncMock() result, warnings = await _fetch_flows_for_deployments(client, set()) assert result == {} assert warnings == [] async def test_fetch_flows_invalid_uuid(self) -> None: """Handles invalid UUID strings with warning.""" client = AsyncMock() valid_id = uuid4() flow = make_flow_response("my-flow", valid_id) client.read_flows = AsyncMock(return_value=[flow]) result, warnings = await _fetch_flows_for_deployments( client, {str(valid_id), "not-a-uuid", "also-invalid"} ) # Valid UUID should still be fetched assert str(valid_id) in result # Invalid UUIDs should generate warnings assert len(warnings) == 2 assert any("not-a-uuid" in w for w in warnings) assert any("also-invalid" in w for w in warnings) async def test_fetch_flows_multiple_pages(self) -> None: """Fetches multiple pages of flows.""" client = AsyncMock() # Create 250 flows - first page of 200, second page of 50 all_flow_ids = [uuid4() for _ in range(250)] page1 = [make_flow_response(f"flow-{i}", all_flow_ids[i]) for i in range(200)] page2 = [ make_flow_response(f"flow-{i}", all_flow_ids[i]) for i in range(200, 250) ] client.read_flows = AsyncMock(side_effect=[page1, page2]) result, warnings = await _fetch_flows_for_deployments( client, {str(fid) for fid in all_flow_ids} ) assert len(result) == 250 assert client.read_flows.call_count == 2 assert warnings == [] class TestFetchSDKData: """Tests for fetch_sdk_data main function.""" @pytest.fixture def mock_client(self) -> AsyncMock: """Create a mock client with basic setup.""" client = AsyncMock() client.api_healthcheck = AsyncMock(return_value=None) client.api_url = "https://api.prefect.cloud" return client async def test_fetch_sdk_data_success(self, mock_client: AsyncMock) -> None: """Successfully fetches SDK data.""" flow_id = uuid4() dep = make_deployment_response( "production", flow_id=flow_id, work_pool_name="k8s-pool", parameter_schema={"type": "object", "properties": {}}, ) flow = make_flow_response("my-flow", flow_id) wp = make_work_pool_response( "k8s-pool", base_job_template={"variables": {"type": "object", "properties": {}}}, ) mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) mock_client.read_work_pool = AsyncMock(return_value=wp) result = await fetch_sdk_data(mock_client) assert isinstance(result, FetchResult) assert result.data.flow_count == 1 assert result.data.deployment_count == 1 assert result.data.work_pool_count == 1 assert "my-flow" in result.data.flows assert len(result.data.flows["my-flow"].deployments) == 1 assert result.data.flows["my-flow"].deployments[0].name == "production" async def test_fetch_sdk_data_no_deployments(self, mock_client: AsyncMock) -> None: """Raises NoDeploymentsError when no deployments found.""" mock_client.read_deployments = AsyncMock(return_value=[]) with pytest.raises(NoDeploymentsError) as exc_info: await fetch_sdk_data(mock_client) assert "No deployments found" in str(exc_info.value) async def test_fetch_sdk_data_filter_no_match(self, mock_client: AsyncMock) -> None: """Raises NoDeploymentsError when filters don't match.""" mock_client.read_deployments = AsyncMock(return_value=[]) with pytest.raises(NoDeploymentsError) as exc_info: await fetch_sdk_data(mock_client, flow_names=["nonexistent-flow"]) assert "No deployments matched filters" in str(exc_info.value) async def test_fetch_sdk_data_with_flow_filter( self, mock_client: AsyncMock ) -> None: """Applies flow name filter.""" flow_id = uuid4() dep = make_deployment_response("production", flow_id=flow_id) flow = make_flow_response("my-flow", flow_id) mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) await fetch_sdk_data(mock_client, flow_names=["my-flow"]) call_kwargs = mock_client.read_deployments.call_args.kwargs assert call_kwargs["flow_filter"] is not None async def test_fetch_sdk_data_with_deployment_filter( self, mock_client: AsyncMock ) -> None: """Applies deployment name filter.""" flow_id = uuid4() dep = make_deployment_response("production", flow_id=flow_id) flow = make_flow_response("my-flow", flow_id) mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) await fetch_sdk_data(mock_client, deployment_names=["my-flow/production"]) call_kwargs = mock_client.read_deployments.call_args.kwargs assert call_kwargs["deployment_filter"] is not None async def test_fetch_sdk_data_missing_work_pool( self, mock_client: AsyncMock ) -> None: """Handles missing work pool with warning.""" flow_id = uuid4() dep = make_deployment_response( "production", flow_id=flow_id, work_pool_name="missing-pool", ) flow = make_flow_response("my-flow", flow_id) mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) mock_client.read_work_pool = AsyncMock(side_effect=ObjectNotFound("Not found")) result = await fetch_sdk_data(mock_client) assert len(result.warnings) >= 1 assert any("missing-pool" in w for w in result.warnings) async def test_fetch_sdk_data_multiple_deployments_same_flow( self, mock_client: AsyncMock ) -> None: """Groups multiple deployments under same flow.""" flow_id = uuid4() dep1 = make_deployment_response("production", flow_id=flow_id) dep2 = make_deployment_response("staging", flow_id=flow_id) flow = make_flow_response("my-flow", flow_id) mock_client.read_deployments = AsyncMock(return_value=[dep1, dep2]) mock_client.read_flows = AsyncMock(return_value=[flow]) result = await fetch_sdk_data(mock_client) assert result.data.flow_count == 1 assert result.data.deployment_count == 2 assert len(result.data.flows["my-flow"].deployments) == 2 async def test_fetch_sdk_data_multiple_flows(self, mock_client: AsyncMock) -> None: """Handles deployments from multiple flows.""" flow_id1 = uuid4() flow_id2 = uuid4() dep1 = make_deployment_response("production", flow_id=flow_id1) dep2 = make_deployment_response("daily", flow_id=flow_id2) flow1 = make_flow_response("etl-flow", flow_id1) flow2 = make_flow_response("sync-flow", flow_id2) mock_client.read_deployments = AsyncMock(return_value=[dep1, dep2]) mock_client.read_flows = AsyncMock(return_value=[flow1, flow2]) result = await fetch_sdk_data(mock_client) assert result.data.flow_count == 2 assert result.data.deployment_count == 2 assert "etl-flow" in result.data.flows assert "sync-flow" in result.data.flows async def test_fetch_sdk_data_auth_error(self, mock_client: AsyncMock) -> None: """Raises AuthenticationError on auth failure.""" mock_client.api_healthcheck = AsyncMock( return_value=Exception("401 Unauthorized") ) with pytest.raises(AuthenticationError): await fetch_sdk_data(mock_client) async def test_fetch_sdk_data_connection_error( self, mock_client: AsyncMock ) -> None: """Raises APIConnectionError on connection failure.""" mock_client.api_healthcheck = AsyncMock( return_value=Exception("Connection refused") ) with pytest.raises(APIConnectionError): await fetch_sdk_data(mock_client) async def test_fetch_sdk_data_metadata(self, mock_client: AsyncMock) -> None: """Generates correct metadata.""" flow_id = uuid4() dep = make_deployment_response("production", flow_id=flow_id) flow = make_flow_response("my-flow", flow_id) mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) result = await fetch_sdk_data(mock_client) assert result.data.metadata.generation_time is not None assert result.data.metadata.prefect_version is not None async def test_fetch_sdk_data_metadata_uses_client_api_url( self, mock_client: AsyncMock ) -> None: """Metadata api_url comes from client.api_url.""" flow_id = uuid4() dep = make_deployment_response("production", flow_id=flow_id) flow = make_flow_response("my-flow", flow_id) mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) mock_client.api_url = "https://custom.prefect.cloud/api" result = await fetch_sdk_data(mock_client) assert result.data.metadata.api_url == "https://custom.prefect.cloud/api" async def test_fetch_sdk_data_work_pool_warning_mentions_with_infra( self, mock_client: AsyncMock ) -> None: """Work pool warning mentions with_infra() will not be generated.""" flow_id = uuid4() dep = make_deployment_response( "production", flow_id=flow_id, work_pool_name="missing-pool", ) flow = make_flow_response("my-flow", flow_id) mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) mock_client.read_work_pool = AsyncMock(side_effect=ObjectNotFound("Not found")) result = await fetch_sdk_data(mock_client) assert any("with_infra()" in w for w in result.warnings) async def test_fetch_sdk_data_work_pool_error_includes_exception( self, mock_client: AsyncMock ) -> None: """Work pool fetch error includes exception details in warning.""" flow_id = uuid4() dep = make_deployment_response( "production", flow_id=flow_id, work_pool_name="error-pool", ) flow = make_flow_response("my-flow", flow_id) mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) mock_client.read_work_pool = AsyncMock( side_effect=RuntimeError("Connection timeout") ) result = await fetch_sdk_data(mock_client) # Warning should include the exception text assert any("Connection timeout" in w for w in result.warnings) assert any("error-pool" in w for w in result.warnings) async def test_fetch_sdk_data_ambiguous_short_name_warning( self, mock_client: AsyncMock ) -> None: """Warns when short deployment name matches multiple flows.""" flow_id1 = uuid4() flow_id2 = uuid4() # "production" deployment in two different flows dep1 = make_deployment_response("production", flow_id=flow_id1) dep2 = make_deployment_response("production", flow_id=flow_id2) flow1 = make_flow_response("etl-flow", flow_id1) flow2 = make_flow_response("sync-flow", flow_id2) mock_client.read_deployments = AsyncMock(return_value=[dep1, dep2]) mock_client.read_flows = AsyncMock(return_value=[flow1, flow2]) # Filter with just short name "production" result = await fetch_sdk_data(mock_client, deployment_names=["production"]) # Should warn about ambiguous short name assert any("Short deployment name 'production'" in w for w in result.warnings) assert any("Consider using full names" in w for w in result.warnings) # Both deployments should still be included assert result.data.deployment_count == 2
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_sdk/test_fetcher.py", "license": "Apache License 2.0", "lines": 462, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_sdk/test_generator.py
"""Tests for the SDK generator module.""" from __future__ import annotations import ast from pathlib import Path from typing import Any from unittest.mock import AsyncMock, MagicMock from uuid import uuid4 import pytest from prefect._sdk.generator import ( APIConnectionError, AuthenticationError, GenerationResult, NoDeploymentsError, generate_sdk, ) from prefect.exceptions import ObjectNotFound def make_deployment_response( name: str, flow_id: Any = None, work_pool_name: str | None = None, parameter_schema: dict[str, Any] | None = None, description: str | None = None, ) -> MagicMock: """Create a mock DeploymentResponse.""" dep = MagicMock() dep.name = name dep.flow_id = flow_id or uuid4() dep.work_pool_name = work_pool_name dep.parameter_openapi_schema = parameter_schema dep.description = description return dep def make_flow_response(name: str, flow_id: Any = None) -> MagicMock: """Create a mock Flow response.""" flow = MagicMock() flow.name = name flow.id = flow_id or uuid4() return flow def make_work_pool_response( name: str, pool_type: str = "kubernetes", base_job_template: dict[str, Any] | None = None, ) -> MagicMock: """Create a mock WorkPool response.""" wp = MagicMock() wp.name = name wp.type = pool_type wp.base_job_template = base_job_template or {} return wp class TestGenerateSDK: """Tests for the generate_sdk function.""" @pytest.fixture def mock_client(self) -> AsyncMock: """Create a mock Prefect client.""" client = AsyncMock() client.api_healthcheck = AsyncMock(return_value=None) client.api_url = "https://api.prefect.cloud" return client @pytest.fixture def output_path(self, tmp_path: Path) -> Path: """Create a temporary output path.""" return tmp_path / "my_sdk.py" async def test_generate_sdk_basic( self, mock_client: AsyncMock, output_path: Path ) -> None: """Generates a valid SDK file.""" flow_id = uuid4() dep = make_deployment_response( "production", flow_id=flow_id, parameter_schema={ "type": "object", "properties": { "source": {"type": "string"}, "count": {"type": "integer", "default": 10}, }, "required": ["source"], }, ) flow = make_flow_response("my-etl-flow", flow_id) mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) result = await generate_sdk(mock_client, output_path) assert isinstance(result, GenerationResult) assert result.output_path == output_path assert result.flow_count == 1 assert result.deployment_count == 1 assert result.work_pool_count == 0 assert output_path.exists() # Verify generated code is valid Python code = output_path.read_text() ast.parse(code) async def test_generate_sdk_with_work_pool( self, mock_client: AsyncMock, output_path: Path ) -> None: """Generates SDK with work pool job variables.""" flow_id = uuid4() dep = make_deployment_response( "production", flow_id=flow_id, work_pool_name="k8s-pool", ) flow = make_flow_response("my-flow", flow_id) wp = make_work_pool_response( "k8s-pool", base_job_template={ "variables": { "type": "object", "properties": { "image": {"type": "string"}, "cpu": {"type": "string"}, }, } }, ) mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) mock_client.read_work_pool = AsyncMock(return_value=wp) result = await generate_sdk(mock_client, output_path) assert result.work_pool_count == 1 code = output_path.read_text() assert "K8SPoolJobVariables" in code or "K8sPoolJobVariables" in code async def test_generate_sdk_with_flow_filter( self, mock_client: AsyncMock, output_path: Path ) -> None: """Passes flow filter to fetcher.""" flow_id = uuid4() dep = make_deployment_response("production", flow_id=flow_id) flow = make_flow_response("my-flow", flow_id) mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) result = await generate_sdk(mock_client, output_path, flow_names=["my-flow"]) assert result.flow_count == 1 async def test_generate_sdk_with_deployment_filter( self, mock_client: AsyncMock, output_path: Path ) -> None: """Passes deployment filter to fetcher.""" flow_id = uuid4() dep = make_deployment_response("production", flow_id=flow_id) flow = make_flow_response("my-flow", flow_id) mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) result = await generate_sdk( mock_client, output_path, deployment_names=["my-flow/production"] ) assert result.deployment_count == 1 async def test_generate_sdk_creates_parent_directories( self, mock_client: AsyncMock, tmp_path: Path ) -> None: """Creates parent directories if they don't exist.""" output_path = tmp_path / "subdir" / "nested" / "my_sdk.py" flow_id = uuid4() dep = make_deployment_response("production", flow_id=flow_id) flow = make_flow_response("my-flow", flow_id) mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) result = await generate_sdk(mock_client, output_path) assert output_path.exists() assert result.output_path == output_path async def test_generate_sdk_overwrites_existing( self, mock_client: AsyncMock, output_path: Path ) -> None: """Overwrites existing file.""" output_path.write_text("# old content") flow_id = uuid4() dep = make_deployment_response("production", flow_id=flow_id) flow = make_flow_response("my-flow", flow_id) mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) await generate_sdk(mock_client, output_path) code = output_path.read_text() assert "# old content" not in code assert "Prefect SDK" in code async def test_generate_sdk_collects_warnings( self, mock_client: AsyncMock, output_path: Path ) -> None: """Collects warnings from fetcher.""" flow_id = uuid4() dep = make_deployment_response( "production", flow_id=flow_id, work_pool_name="missing-pool", ) flow = make_flow_response("my-flow", flow_id) mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) # Simulate work pool not found mock_client.read_work_pool = AsyncMock(side_effect=ObjectNotFound("Not found")) result = await generate_sdk(mock_client, output_path) assert len(result.warnings) >= 1 assert any("missing-pool" in w for w in result.warnings) async def test_generate_sdk_auth_error( self, mock_client: AsyncMock, output_path: Path ) -> None: """Raises AuthenticationError on auth failure.""" mock_client.api_healthcheck = AsyncMock( return_value=Exception("401 Unauthorized") ) with pytest.raises(AuthenticationError): await generate_sdk(mock_client, output_path) async def test_generate_sdk_connection_error( self, mock_client: AsyncMock, output_path: Path ) -> None: """Raises APIConnectionError on connection failure.""" mock_client.api_healthcheck = AsyncMock( return_value=Exception("Connection refused") ) with pytest.raises(APIConnectionError): await generate_sdk(mock_client, output_path) async def test_generate_sdk_no_deployments( self, mock_client: AsyncMock, output_path: Path ) -> None: """Raises NoDeploymentsError when no deployments found.""" mock_client.read_deployments = AsyncMock(return_value=[]) with pytest.raises(NoDeploymentsError): await generate_sdk(mock_client, output_path) async def test_generate_sdk_multiple_deployments( self, mock_client: AsyncMock, output_path: Path ) -> None: """Handles multiple deployments across flows.""" flow_id1 = uuid4() flow_id2 = uuid4() deps = [ make_deployment_response("production", flow_id=flow_id1), make_deployment_response("staging", flow_id=flow_id1), make_deployment_response("daily", flow_id=flow_id2), ] flows = [ make_flow_response("etl-flow", flow_id1), make_flow_response("sync-flow", flow_id2), ] mock_client.read_deployments = AsyncMock(return_value=deps) mock_client.read_flows = AsyncMock(return_value=flows) result = await generate_sdk(mock_client, output_path) assert result.flow_count == 2 assert result.deployment_count == 3 code = output_path.read_text() assert "etl-flow/production" in code assert "etl-flow/staging" in code assert "sync-flow/daily" in code async def test_generate_sdk_valid_python( self, mock_client: AsyncMock, output_path: Path ) -> None: """Generated code is valid Python.""" flow_id = uuid4() dep = make_deployment_response( "production", flow_id=flow_id, work_pool_name="k8s-pool", parameter_schema={ "type": "object", "properties": { "source": {"type": "string"}, "batch_size": {"type": "integer", "default": 100}, "full_refresh": {"type": "boolean", "default": False}, }, "required": ["source"], }, description="Production ETL deployment", ) flow = make_flow_response("my-etl-flow", flow_id) wp = make_work_pool_response( "k8s-pool", base_job_template={ "variables": { "type": "object", "properties": { "image": {"type": "string"}, "cpu": {"type": "string"}, "memory": {"type": "string"}, }, } }, ) mock_client.read_deployments = AsyncMock(return_value=[dep]) mock_client.read_flows = AsyncMock(return_value=[flow]) mock_client.read_work_pool = AsyncMock(return_value=wp) await generate_sdk(mock_client, output_path) code = output_path.read_text() # Verify valid Python ast.parse(code) # Verify expected content assert "deployments" in code assert "from_name" in code assert "my-etl-flow/production" in code assert "DeploymentName" in code async def test_generate_sdk_result_statistics( self, mock_client: AsyncMock, output_path: Path ) -> None: """Returns correct statistics in result.""" flow_id1 = uuid4() flow_id2 = uuid4() deps = [ make_deployment_response("prod", flow_id=flow_id1, work_pool_name="pool-1"), make_deployment_response( "stage", flow_id=flow_id1, work_pool_name="pool-2" ), make_deployment_response( "daily", flow_id=flow_id2, work_pool_name="pool-1" ), ] flows = [ make_flow_response("flow-a", flow_id1), make_flow_response("flow-b", flow_id2), ] mock_client.read_deployments = AsyncMock(return_value=deps) mock_client.read_flows = AsyncMock(return_value=flows) mock_client.read_work_pool = AsyncMock( side_effect=lambda name: make_work_pool_response( name, base_job_template={ "variables": { "type": "object", "properties": {"x": {"type": "string"}}, } }, ) ) result = await generate_sdk(mock_client, output_path) assert result.flow_count == 2 assert result.deployment_count == 3 assert result.work_pool_count == 2 # pool-1 and pool-2
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_sdk/test_generator.py", "license": "Apache License 2.0", "lines": 321, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/prefect/_sdk/renderer.py
""" SDK renderer module. This module is responsible for converting SDKData into template-friendly context and rendering the Jinja2 template to produce the final SDK file. """ import importlib.resources from dataclasses import dataclass, field from pathlib import Path from typing import Any import jinja2 from prefect._sdk.models import DeploymentInfo, SDKData, WorkPoolInfo from prefect._sdk.naming import safe_class_name, safe_identifier from prefect._sdk.schema_converter import extract_fields_from_schema from prefect._sdk.types import FieldInfo @dataclass class ParameterContext: """Processed parameter information for template rendering.""" name: str """Safe Python identifier for the parameter.""" original_name: str """Original parameter name from schema.""" python_type: str """Python type annotation string.""" required: bool """Whether the parameter is required.""" has_default: bool """Whether the parameter has a default value.""" default: Any """The default value, if any.""" default_repr: str """Python repr of the default value for code generation.""" description: str | None = None """Description from schema.""" @dataclass class JobVariableContext: """Processed job variable information for template rendering.""" name: str """Safe Python identifier for the job variable.""" original_name: str """Original job variable name from schema.""" python_type: str """Python type annotation string.""" description: str | None = None """Description from schema.""" @dataclass class WorkPoolContext: """Processed work pool information for template rendering.""" class_name: str """Generated TypedDict class name (e.g., 'KubernetesPoolJobVariables').""" original_name: str """Original work pool name.""" fields: list[JobVariableContext] """List of job variable fields.""" @dataclass class DeploymentContext: """Processed deployment information for template rendering.""" class_name: str """Generated class name (e.g., '_MyEtlFlowProduction').""" full_name: str """Full deployment name (e.g., 'my-etl-flow/production').""" work_pool_name: str | None """Name of the work pool, if any.""" description: str | None """Deployment description for docstring.""" required_params: list[ParameterContext] """Required flow parameters.""" optional_params: list[ParameterContext] """Optional flow parameters.""" has_job_variables: bool """Whether this deployment has job variables to configure.""" job_variable_fields: list[JobVariableContext] """List of job variable fields for with_infra() method.""" @dataclass class TemplateContext: """Complete context for template rendering.""" metadata: dict[str, Any] """Generation metadata.""" module_name: str """Output module name (for docstring examples).""" deployment_names: list[str] """List of all deployment full names.""" work_pool_typeddicts: list[WorkPoolContext] """Work pool TypedDict definitions.""" deployments: list[DeploymentContext] """Deployment class definitions.""" @dataclass class RenderResult: """Result of SDK rendering.""" code: str """The generated Python code.""" warnings: list[str] = field(default_factory=list) """Warnings generated during rendering.""" def _safe_repr(value: Any) -> str: """ Generate a safe repr for a value that can be used in generated code. Handles common types that appear in JSON Schema defaults. """ if value is None: return "None" if isinstance(value, bool): return str(value) if isinstance(value, (int, float, str)): return repr(value) if isinstance(value, list): if not value: return "[]" # For non-empty lists, we need to recurse items = ", ".join(_safe_repr(item) for item in value) return f"[{items}]" if isinstance(value, dict): if not value: return "{}" # For non-empty dicts, we need to recurse items = ", ".join(f"{_safe_repr(k)}: {_safe_repr(v)}" for k, v in value.items()) return "{" + items + "}" # Fallback to repr return repr(value) def _sanitize_docstring(text: str | None) -> str | None: """ Sanitize text for use in docstrings. Handles edge cases like triple quotes that would break docstrings. """ if text is None: return None # Replace triple quotes with single quotes text = text.replace('"""', "'''") # Strip leading/trailing whitespace return text.strip() def _make_optional(python_type: str) -> str: """ Make a type optional (add | None) if it isn't already nullable. Avoids redundant `str | None | None` patterns when the schema type is already nullable. """ # Check if type already includes None # Handle both "T | None" and "None | T" patterns # Also handle "T | None | U" embedded patterns type_parts = [p.strip() for p in python_type.split("|")] if "None" in type_parts: return python_type return f"{python_type} | None" def _process_fields_to_params( fields: list[FieldInfo], existing_identifiers: set[str], ) -> tuple[list[ParameterContext], list[ParameterContext]]: """ Convert FieldInfo objects to ParameterContext objects. Returns (required_params, optional_params) tuple. """ required: list[ParameterContext] = [] optional: list[ParameterContext] = [] for field_info in fields: # Generate safe identifier using deployment context to avoid 'self' collision safe_name = safe_identifier(field_info.name, existing_identifiers, "deployment") existing_identifiers.add(safe_name) param = ParameterContext( name=safe_name, original_name=field_info.name, python_type=field_info.python_type, required=field_info.required, has_default=field_info.has_default, default=field_info.default, default_repr=_safe_repr(field_info.default), description=field_info.description, ) if field_info.required: required.append(param) else: optional.append(param) return required, optional def _process_fields_to_job_variables( fields: list[FieldInfo], existing_identifiers: set[str], ) -> list[JobVariableContext]: """Convert FieldInfo objects to JobVariableContext objects.""" result: list[JobVariableContext] = [] for field_info in fields: # Generate safe identifier using deployment context to avoid 'self' collision safe_name = safe_identifier(field_info.name, existing_identifiers, "deployment") existing_identifiers.add(safe_name) result.append( JobVariableContext( name=safe_name, original_name=field_info.name, python_type=field_info.python_type, description=field_info.description, ) ) return result def _process_work_pool( work_pool: WorkPoolInfo, existing_class_names: set[str], ) -> tuple[WorkPoolContext | None, list[str]]: """ Process a work pool into template context. Returns (context, warnings) tuple. Returns None context if no fields. """ warnings: list[str] = [] # Extract fields from job variables schema schema = work_pool.job_variables_schema if not schema or not schema.get("properties"): return None, warnings fields, field_warnings = extract_fields_from_schema(schema) warnings.extend(field_warnings) if not fields: return None, warnings # Generate class name base_class_name = f"{work_pool.name}JobVariables" class_name = safe_class_name(base_class_name, existing_class_names) existing_class_names.add(class_name) # Process fields to job variables existing_identifiers: set[str] = set() job_vars = _process_fields_to_job_variables(fields, existing_identifiers) return ( WorkPoolContext( class_name=class_name, original_name=work_pool.name, fields=job_vars, ), warnings, ) def _process_deployment( deployment: DeploymentInfo, work_pools: dict[str, WorkPoolInfo], processed_work_pools: dict[str, WorkPoolContext], existing_class_names: set[str], ) -> tuple[DeploymentContext, list[str]]: """ Process a deployment into template context. Returns (context, warnings) tuple. """ warnings: list[str] = [] # Generate class name (underscore prefix for private class) # Combine flow name and deployment name for uniqueness base_name = f"{deployment.flow_name}_{deployment.name}" class_name = f"_{safe_class_name(base_name, existing_class_names)}" existing_class_names.add(class_name) # Process parameter schema required_params: list[ParameterContext] = [] optional_params: list[ParameterContext] = [] if deployment.parameter_schema: schema = deployment.parameter_schema fields, field_warnings = extract_fields_from_schema(schema) warnings.extend(field_warnings) existing_identifiers: set[str] = set() required_params, optional_params = _process_fields_to_params( fields, existing_identifiers ) # Process job variables from work pool job_variable_fields: list[JobVariableContext] = [] has_job_variables = False if deployment.work_pool_name: # Check if we have processed this work pool if deployment.work_pool_name in processed_work_pools: wp_context = processed_work_pools[deployment.work_pool_name] job_variable_fields = wp_context.fields has_job_variables = len(job_variable_fields) > 0 elif deployment.work_pool_name in work_pools: # Work pool exists but has no job variables pass else: warnings.append( f"Work pool '{deployment.work_pool_name}' not found for deployment " f"'{deployment.full_name}'" ) return ( DeploymentContext( class_name=class_name, full_name=deployment.full_name, work_pool_name=deployment.work_pool_name, description=_sanitize_docstring(deployment.description), required_params=required_params, optional_params=optional_params, has_job_variables=has_job_variables, job_variable_fields=job_variable_fields, ), warnings, ) def build_template_context( data: SDKData, module_name: str, ) -> tuple[TemplateContext, list[str]]: """ Convert SDKData to template-friendly context. Args: data: The SDK data from the API. module_name: The output module name (for docstring examples). Returns: A tuple of (TemplateContext, warnings). """ warnings: list[str] = [] existing_class_names: set[str] = set() # Process work pools first (deployments reference them) # Sort by name for deterministic output work_pool_typeddicts: list[WorkPoolContext] = [] processed_work_pools: dict[str, WorkPoolContext] = {} for work_pool in sorted(data.work_pools.values(), key=lambda wp: wp.name): wp_context, wp_warnings = _process_work_pool(work_pool, existing_class_names) warnings.extend(wp_warnings) if wp_context: work_pool_typeddicts.append(wp_context) processed_work_pools[work_pool.name] = wp_context # Process deployments deployment_contexts: list[DeploymentContext] = [] for deployment in data.all_deployments(): dep_context, dep_warnings = _process_deployment( deployment, data.work_pools, processed_work_pools, existing_class_names, ) warnings.extend(dep_warnings) deployment_contexts.append(dep_context) # Build metadata dict metadata = { "generation_time": data.metadata.generation_time, "prefect_version": data.metadata.prefect_version, "workspace_name": data.metadata.workspace_name, "api_url": data.metadata.api_url, } return ( TemplateContext( metadata=metadata, module_name=module_name, deployment_names=data.deployment_names, work_pool_typeddicts=work_pool_typeddicts, deployments=deployment_contexts, ), warnings, ) def _get_template() -> jinja2.Template: """Load the SDK template from package resources. Uses importlib.resources for robust access in different installation layouts (editable installs, wheels, etc.). """ # Use importlib.resources.files() for robust package resource access template_files = importlib.resources.files("prefect._sdk.templates") template_content = template_files.joinpath("sdk.py.jinja").read_text() # Create Jinja2 environment with safe defaults env = jinja2.Environment( autoescape=False, # We're generating Python code, not HTML undefined=jinja2.StrictUndefined, # Raise error on undefined variables keep_trailing_newline=True, # Preserve trailing newline ) # Add custom filters env.filters["repr"] = repr env.filters["make_optional"] = _make_optional return env.from_string(template_content) def render_sdk(data: SDKData, output_path: Path) -> RenderResult: """ Render the SDK template and write to file. Args: data: The SDK data from the API. output_path: Path to write the generated SDK file. Returns: RenderResult with the generated code and any warnings. """ # Extract module name from output path module_name = output_path.stem # Build template context context, warnings = build_template_context(data, module_name) # Load and render template template = _get_template() code = template.render( metadata=context.metadata, module_name=context.module_name, deployment_names=context.deployment_names, work_pool_typeddicts=context.work_pool_typeddicts, deployments=context.deployments, ) # Create parent directories if needed output_path.parent.mkdir(parents=True, exist_ok=True) # Write output file output_path.write_text(code) return RenderResult(code=code, warnings=warnings) def render_sdk_to_string(data: SDKData, module_name: str = "sdk") -> RenderResult: """ Render the SDK template to a string without writing to file. Args: data: The SDK data from the API. module_name: The module name for docstring examples. Returns: RenderResult with the generated code and any warnings. """ # Build template context context, warnings = build_template_context(data, module_name) # Load and render template template = _get_template() code = template.render( metadata=context.metadata, module_name=context.module_name, deployment_names=context.deployment_names, work_pool_typeddicts=context.work_pool_typeddicts, deployments=context.deployments, ) return RenderResult(code=code, warnings=warnings)
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_sdk/renderer.py", "license": "Apache License 2.0", "lines": 396, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:tests/_sdk/test_renderer.py
"""Tests for the SDK renderer module.""" import ast import tempfile from pathlib import Path from prefect._sdk.models import ( DeploymentInfo, FlowInfo, SDKData, SDKGenerationMetadata, WorkPoolInfo, ) from prefect._sdk.renderer import ( JobVariableContext, RenderResult, WorkPoolContext, _make_optional, _process_deployment, _process_fields_to_job_variables, _process_fields_to_params, _process_work_pool, _safe_repr, _sanitize_docstring, build_template_context, render_sdk, render_sdk_to_string, ) from prefect._sdk.types import FieldInfo class TestSafeRepr: """Tests for _safe_repr function.""" def test_none(self): assert _safe_repr(None) == "None" def test_bool_true(self): assert _safe_repr(True) == "True" def test_bool_false(self): assert _safe_repr(False) == "False" def test_int(self): assert _safe_repr(42) == "42" assert _safe_repr(-1) == "-1" assert _safe_repr(0) == "0" def test_float(self): assert _safe_repr(3.14) == "3.14" assert _safe_repr(-0.5) == "-0.5" def test_string(self): assert _safe_repr("hello") == "'hello'" assert _safe_repr("it's") == '"it\'s"' assert _safe_repr('say "hi"') == "'say \"hi\"'" def test_empty_list(self): assert _safe_repr([]) == "[]" def test_list_with_items(self): assert _safe_repr([1, 2, 3]) == "[1, 2, 3]" assert _safe_repr(["a", "b"]) == "['a', 'b']" def test_empty_dict(self): assert _safe_repr({}) == "{}" def test_dict_with_items(self): result = _safe_repr({"a": 1}) assert result == "{'a': 1}" class TestSanitizeDocstring: """Tests for _sanitize_docstring function.""" def test_none(self): assert _sanitize_docstring(None) is None def test_normal_text(self): assert _sanitize_docstring("Hello world") == "Hello world" def test_triple_quotes(self): assert _sanitize_docstring('Say """hello"""') == "Say '''hello'''" def test_whitespace_stripped(self): assert _sanitize_docstring(" hello ") == "hello" class TestMakeOptional: """Tests for _make_optional function.""" def test_simple_type(self): """Adds | None to simple types.""" assert _make_optional("str") == "str | None" assert _make_optional("int") == "int | None" def test_already_nullable(self): """Does not add | None if type already includes None.""" assert _make_optional("str | None") == "str | None" assert _make_optional("None | str") == "None | str" def test_nullable_in_union(self): """Does not add | None if None is part of a larger union.""" assert _make_optional("str | int | None") == "str | int | None" assert _make_optional("str | None | int") == "str | None | int" def test_complex_type(self): """Handles complex types correctly.""" assert _make_optional("list[str]") == "list[str] | None" assert _make_optional("dict[str, Any]") == "dict[str, Any] | None" def test_union_without_none(self): """Adds | None to unions that don't have None.""" assert _make_optional("str | int") == "str | int | None" class TestProcessFieldsToParams: """Tests for _process_fields_to_params function.""" def test_empty_fields(self): required, optional = _process_fields_to_params([], set()) assert required == [] assert optional == [] def test_required_field(self): fields = [ FieldInfo( name="source", python_type="str", required=True, has_default=False, ) ] required, optional = _process_fields_to_params(fields, set()) assert len(required) == 1 assert len(optional) == 0 assert required[0].name == "source" assert required[0].python_type == "str" assert required[0].required is True def test_optional_field_with_default(self): fields = [ FieldInfo( name="batch_size", python_type="int", required=False, has_default=True, default=100, ) ] required, optional = _process_fields_to_params(fields, set()) assert len(required) == 0 assert len(optional) == 1 assert optional[0].name == "batch_size" assert optional[0].python_type == "int" assert optional[0].has_default is True assert optional[0].default_repr == "100" def test_name_collision_handling(self): fields = [ FieldInfo(name="source", python_type="str", required=True), FieldInfo(name="source", python_type="int", required=True), ] required, _ = _process_fields_to_params(fields, set()) # Second field should get a unique name assert required[0].name == "source" assert required[1].name == "source_2" def test_field_with_description(self): fields = [ FieldInfo( name="source", python_type="str", required=True, description="The data source URL to fetch from", ) ] required, _ = _process_fields_to_params(fields, set()) assert required[0].description == "The data source URL to fetch from" def test_field_without_description(self): fields = [ FieldInfo( name="source", python_type="str", required=True, ) ] required, _ = _process_fields_to_params(fields, set()) assert required[0].description is None class TestProcessFieldsToJobVariables: """Tests for _process_fields_to_job_variables function.""" def test_empty_fields(self): result = _process_fields_to_job_variables([], set()) assert result == [] def test_single_field(self): fields = [FieldInfo(name="image", python_type="str", required=False)] result = _process_fields_to_job_variables(fields, set()) assert len(result) == 1 assert result[0].name == "image" assert result[0].original_name == "image" assert result[0].python_type == "str" def test_field_with_description(self): fields = [ FieldInfo( name="image", python_type="str", required=False, description="The Docker image to use", ) ] result = _process_fields_to_job_variables(fields, set()) assert result[0].description == "The Docker image to use" def test_field_without_description(self): fields = [FieldInfo(name="image", python_type="str", required=False)] result = _process_fields_to_job_variables(fields, set()) assert result[0].description is None class TestProcessWorkPool: """Tests for _process_work_pool function.""" def test_empty_schema(self): wp = WorkPoolInfo(name="test-pool", pool_type="docker", job_variables_schema={}) context, warnings = _process_work_pool(wp, set()) assert context is None assert warnings == [] def test_schema_without_properties(self): wp = WorkPoolInfo( name="test-pool", pool_type="docker", job_variables_schema={"type": "object"}, ) context, warnings = _process_work_pool(wp, set()) assert context is None assert warnings == [] def test_schema_with_properties(self): wp = WorkPoolInfo( name="kubernetes-pool", pool_type="kubernetes", job_variables_schema={ "type": "object", "properties": { "image": {"type": "string"}, "namespace": {"type": "string"}, }, }, ) context, warnings = _process_work_pool(wp, set()) assert context is not None assert context.class_name == "KubernetesPoolJobVariables" assert context.original_name == "kubernetes-pool" assert len(context.fields) == 2 assert warnings == [] class TestProcessDeployment: """Tests for _process_deployment function.""" def test_deployment_without_params(self): deployment = DeploymentInfo( name="production", flow_name="my-flow", full_name="my-flow/production", ) context, warnings = _process_deployment(deployment, {}, {}, set()) assert context.class_name == "_MyFlowProduction" assert context.full_name == "my-flow/production" assert context.required_params == [] assert context.optional_params == [] assert context.has_job_variables is False def test_deployment_with_params(self): deployment = DeploymentInfo( name="production", flow_name="my-flow", full_name="my-flow/production", parameter_schema={ "type": "object", "properties": { "source": {"type": "string"}, "batch_size": {"type": "integer", "default": 100}, }, "required": ["source"], }, ) context, warnings = _process_deployment(deployment, {}, {}, set()) assert len(context.required_params) == 1 assert len(context.optional_params) == 1 assert context.required_params[0].name == "source" assert context.optional_params[0].name == "batch_size" assert context.optional_params[0].default_repr == "100" def test_deployment_with_work_pool(self): deployment = DeploymentInfo( name="production", flow_name="my-flow", full_name="my-flow/production", work_pool_name="k8s-pool", ) processed_work_pools = { "k8s-pool": WorkPoolContext( class_name="K8SPoolJobVariables", original_name="k8s-pool", fields=[ JobVariableContext( name="image", original_name="image", python_type="str", ) ], ) } context, warnings = _process_deployment( deployment, {}, processed_work_pools, set() ) assert context.has_job_variables is True assert len(context.job_variable_fields) == 1 assert context.job_variable_fields[0].name == "image" def test_deployment_with_missing_work_pool(self): deployment = DeploymentInfo( name="production", flow_name="my-flow", full_name="my-flow/production", work_pool_name="missing-pool", ) context, warnings = _process_deployment(deployment, {}, {}, set()) assert context.has_job_variables is False assert len(warnings) == 1 assert "missing-pool" in warnings[0] class TestBuildTemplateContext: """Tests for build_template_context function.""" def test_empty_data(self): data = SDKData( metadata=SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ) ) context, warnings = build_template_context(data, "my_sdk") assert context.module_name == "my_sdk" assert context.deployment_names == [] assert context.work_pool_typeddicts == [] assert context.deployments == [] def test_full_data(self): data = SDKData( metadata=SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", workspace_name="my-workspace", ), flows={ "my-flow": FlowInfo( name="my-flow", deployments=[ DeploymentInfo( name="production", flow_name="my-flow", full_name="my-flow/production", parameter_schema={ "type": "object", "properties": {"source": {"type": "string"}}, "required": ["source"], }, work_pool_name="k8s-pool", ) ], ) }, work_pools={ "k8s-pool": WorkPoolInfo( name="k8s-pool", pool_type="kubernetes", job_variables_schema={ "type": "object", "properties": {"image": {"type": "string"}}, }, ) }, ) context, warnings = build_template_context(data, "my_sdk") assert context.module_name == "my_sdk" assert context.deployment_names == ["my-flow/production"] assert len(context.work_pool_typeddicts) == 1 assert len(context.deployments) == 1 assert context.deployments[0].has_job_variables is True class TestRenderSdkToString: """Tests for render_sdk_to_string function.""" def test_empty_sdk(self): data = SDKData( metadata=SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ) ) result = render_sdk_to_string(data) assert isinstance(result, RenderResult) assert "Prefect SDK" in result.code assert "2026-01-06T14:30:00Z" in result.code assert "3.2.0" in result.code def test_generated_code_is_valid_python(self): data = SDKData( metadata=SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ), flows={ "my-flow": FlowInfo( name="my-flow", deployments=[ DeploymentInfo( name="production", flow_name="my-flow", full_name="my-flow/production", parameter_schema={ "type": "object", "properties": { "source": {"type": "string"}, "batch_size": {"type": "integer", "default": 100}, }, "required": ["source"], }, ) ], ) }, ) result = render_sdk_to_string(data) # Should be valid Python ast.parse(result.code) def test_deployment_class_structure(self): data = SDKData( metadata=SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ), flows={ "my-flow": FlowInfo( name="my-flow", deployments=[ DeploymentInfo( name="production", flow_name="my-flow", full_name="my-flow/production", ) ], ) }, ) result = render_sdk_to_string(data) # Check for key components assert "class _MyFlowProduction:" in result.code assert "def with_options(" in result.code assert "def run(" in result.code assert "def run_async(" in result.code assert "from prefect.deployments import run_deployment" in result.code assert "from prefect.deployments import arun_deployment" in result.code def test_deployment_with_job_variables(self): data = SDKData( metadata=SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ), flows={ "my-flow": FlowInfo( name="my-flow", deployments=[ DeploymentInfo( name="production", flow_name="my-flow", full_name="my-flow/production", work_pool_name="k8s-pool", ) ], ) }, work_pools={ "k8s-pool": WorkPoolInfo( name="k8s-pool", pool_type="kubernetes", job_variables_schema={ "type": "object", "properties": {"image": {"type": "string"}}, }, ) }, ) result = render_sdk_to_string(data) assert "def with_infra(" in result.code assert "image:" in result.code def test_deployments_namespace_single(self): """Test deployments namespace with a single deployment (no @overload needed).""" data = SDKData( metadata=SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ), flows={ "my-flow": FlowInfo( name="my-flow", deployments=[ DeploymentInfo( name="production", flow_name="my-flow", full_name="my-flow/production", ) ], ) }, ) result = render_sdk_to_string(data) assert "class deployments:" in result.code assert "def from_name(" in result.code # Python repr uses single quotes for simple strings assert "'my-flow/production'" in result.code # Single deployment shouldn't use @overload decorator # (because overload requires at least 2 variants) def test_deployments_namespace_multiple(self): """Test deployments namespace with multiple deployments (uses @overload).""" data = SDKData( metadata=SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ), flows={ "my-flow": FlowInfo( name="my-flow", deployments=[ DeploymentInfo( name="production", flow_name="my-flow", full_name="my-flow/production", ), DeploymentInfo( name="staging", flow_name="my-flow", full_name="my-flow/staging", ), ], ) }, ) result = render_sdk_to_string(data) assert "class deployments:" in result.code assert "@overload" in result.code assert "def from_name(" in result.code # Both deployments should be in the code assert "'my-flow/production'" in result.code assert "'my-flow/staging'" in result.code class TestRenderSdk: """Tests for render_sdk function.""" def test_writes_file(self): data = SDKData( metadata=SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ) ) with tempfile.TemporaryDirectory() as tmpdir: output_path = Path(tmpdir) / "my_sdk.py" result = render_sdk(data, output_path) assert output_path.exists() assert output_path.read_text() == result.code def test_creates_parent_directories(self): data = SDKData( metadata=SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ) ) with tempfile.TemporaryDirectory() as tmpdir: output_path = Path(tmpdir) / "nested" / "dir" / "my_sdk.py" result = render_sdk(data, output_path) assert output_path.exists() assert output_path.read_text() == result.code class TestEdgeCases: """Tests for edge cases in SDK generation.""" def test_deployment_name_with_special_chars(self): """Test deployment names with special characters are properly escaped.""" data = SDKData( metadata=SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ), flows={ "my-flow": FlowInfo( name="my-flow", deployments=[ DeploymentInfo( name="prod's-test", flow_name="my-flow", full_name="my-flow/prod's-test", ) ], ) }, ) result = render_sdk_to_string(data) # Should be valid Python even with special chars ast.parse(result.code) def test_parameter_name_collision_with_python_keyword(self): """Test parameter names that are Python keywords.""" data = SDKData( metadata=SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ), flows={ "my-flow": FlowInfo( name="my-flow", deployments=[ DeploymentInfo( name="production", flow_name="my-flow", full_name="my-flow/production", parameter_schema={ "type": "object", "properties": { "class": {"type": "string"}, "import": {"type": "string"}, }, }, ) ], ) }, ) result = render_sdk_to_string(data) # Should be valid Python with keyword-safe names ast.parse(result.code) # Keywords should have underscore suffix assert "class_:" in result.code assert "import_:" in result.code def test_work_pool_name_collision(self): """Test work pool names that would create class name collisions.""" data = SDKData( metadata=SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ), work_pools={ "my-pool": WorkPoolInfo( name="my-pool", pool_type="docker", job_variables_schema={ "type": "object", "properties": {"image": {"type": "string"}}, }, ), "my_pool": WorkPoolInfo( name="my_pool", pool_type="docker", job_variables_schema={ "type": "object", "properties": {"namespace": {"type": "string"}}, }, ), }, ) result = render_sdk_to_string(data) # Should be valid Python ast.parse(result.code) # Both should exist with unique names assert "MyPoolJobVariables" in result.code def test_multiple_deployments_same_flow(self): """Test multiple deployments for the same flow.""" data = SDKData( metadata=SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ), flows={ "my-flow": FlowInfo( name="my-flow", deployments=[ DeploymentInfo( name="production", flow_name="my-flow", full_name="my-flow/production", ), DeploymentInfo( name="staging", flow_name="my-flow", full_name="my-flow/staging", ), ], ) }, ) result = render_sdk_to_string(data) # Should be valid Python ast.parse(result.code) assert "class _MyFlowProduction:" in result.code assert "class _MyFlowStaging:" in result.code def test_deployment_with_complex_default_values(self): """Test parameters with complex default values.""" data = SDKData( metadata=SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ), flows={ "my-flow": FlowInfo( name="my-flow", deployments=[ DeploymentInfo( name="production", flow_name="my-flow", full_name="my-flow/production", parameter_schema={ "type": "object", "properties": { "tags": { "type": "array", "items": {"type": "string"}, "default": ["prod", "v1"], }, "config": { "type": "object", "default": {"key": "value"}, }, }, }, ) ], ) }, ) result = render_sdk_to_string(data) # Should be valid Python ast.parse(result.code) assert "['prod', 'v1']" in result.code assert "{'key': 'value'}" in result.code def test_empty_deployment_names_literal(self): """Test that empty deployments still generate valid code.""" data = SDKData( metadata=SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ) ) result = render_sdk_to_string(data) # Should be valid Python ast.parse(result.code) # Should have placeholder Literal assert 'DeploymentName = Literal[""]' in result.code
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_sdk/test_renderer.py", "license": "Apache License 2.0", "lines": 703, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/prefect/_sdk/models.py
""" Data models for SDK generation. This module contains the internal data models used to represent workspace data fetched from the Prefect API, which are then passed to the template renderer. Note: These models are internal to SDK generation and not part of the public API. """ from dataclasses import dataclass, field from typing import Any @dataclass class WorkPoolInfo: """Information about a work pool needed for SDK generation. Attributes: name: The work pool name as it appears in Prefect. pool_type: The work pool type (e.g., "kubernetes", "docker", "process"). job_variables_schema: JSON Schema dict for the work pool's job variables. This is the full schema object (e.g., {"type": "object", "properties": {...}}) from base_job_template["variables"]. Can be empty dict if no job variables are defined. """ name: str pool_type: str job_variables_schema: dict[str, Any] = field(default_factory=dict) @dataclass class DeploymentInfo: """Information about a deployment needed for SDK generation. Attributes: name: The deployment name (just the deployment part, not flow/deployment). flow_name: The name of the flow this deployment belongs to. full_name: The full deployment name in "flow-name/deployment-name" format. parameter_schema: JSON Schema dict for the flow's parameters. This comes from the deployment's parameter_openapi_schema field. Can be empty dict or None if the flow has no parameters. work_pool_name: Name of the work pool this deployment uses. Can be None if the deployment doesn't use a work pool. description: Optional deployment description for docstrings. """ name: str flow_name: str full_name: str parameter_schema: dict[str, Any] | None = None work_pool_name: str | None = None description: str | None = None @dataclass class FlowInfo: """Information about a flow and its deployments. Groups deployments by their parent flow for organized SDK generation. Attributes: name: The flow name. deployments: List of deployments belonging to this flow. """ name: str deployments: list[DeploymentInfo] = field(default_factory=list) @dataclass class SDKGenerationMetadata: """Metadata about the SDK generation process. Attributes: generation_time: ISO 8601 timestamp of when the SDK was generated. prefect_version: Version of Prefect used for generation. workspace_name: Name of the workspace (if applicable). api_url: The Prefect API URL used. """ generation_time: str prefect_version: str workspace_name: str | None = None api_url: str | None = None @dataclass class SDKData: """Complete data needed for SDK generation. This is the top-level container passed to the template renderer. Attributes: metadata: Generation metadata (time, version, workspace). flows: Dictionary mapping flow names to FlowInfo objects. work_pools: Dictionary mapping work pool names to WorkPoolInfo objects. """ metadata: SDKGenerationMetadata flows: dict[str, FlowInfo] = field(default_factory=dict) work_pools: dict[str, WorkPoolInfo] = field(default_factory=dict) @property def deployment_count(self) -> int: """Total number of deployments across all flows.""" return sum(len(flow.deployments) for flow in self.flows.values()) @property def flow_count(self) -> int: """Number of flows.""" return len(self.flows) @property def work_pool_count(self) -> int: """Number of work pools.""" return len(self.work_pools) @property def deployment_names(self) -> list[str]: """List of all deployment full names (derived from flows). Returns names sorted alphabetically for deterministic code generation. """ names: list[str] = [] for flow in self.flows.values(): for deployment in flow.deployments: names.append(deployment.full_name) return sorted(names) def all_deployments(self) -> list[DeploymentInfo]: """Return a flat list of all deployments. Returns deployments sorted by full_name for deterministic code generation. """ deployments: list[DeploymentInfo] = [] for flow in self.flows.values(): deployments.extend(flow.deployments) return sorted(deployments, key=lambda d: d.full_name)
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_sdk/models.py", "license": "Apache License 2.0", "lines": 107, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
PrefectHQ/prefect:src/prefect/_sdk/naming.py
""" Naming utilities for SDK generation. This module converts arbitrary names (deployment names, flow names, work pool names) to valid Python identifiers and class names. Conversion rules for identifiers: 1. Normalize Unicode to ASCII where possible (é -> e via NFKD decomposition) 2. Treat Unicode separators/punctuation (em-dash, non-breaking space, etc.) as word boundaries 3. Replace ASCII hyphens, spaces, and punctuation with underscores 4. Drop remaining non-ASCII characters 5. Collapse consecutive underscores 6. Strip leading/trailing underscores 7. Prefix with underscore if starts with digit 8. Append underscore if Python keyword (class -> class_) 9. Return "_unnamed" if result is empty Conversion rules for class names: 1. Same Unicode handling as identifiers 2. Split on separators/punctuation to get word parts 3. Capitalize first letter of each part (PascalCase) 4. Prefix with underscore if starts with digit 5. Return "_Unnamed" if result is empty """ import keyword import re import unicodedata from typing import Literal # Python keywords that need underscore suffix PYTHON_KEYWORDS = frozenset(keyword.kwlist) # Names reserved in specific contexts - these conflict with generated SDK surface # IMPORTANT: These must be in NORMALIZED form (as returned by to_identifier) # because safe_identifier() normalizes the input before checking reserved names. # Flow identifiers that would shadow the root namespace RESERVED_FLOW_IDENTIFIERS = frozenset({"flows", "deployments", "DeploymentName"}) # Deployment identifiers that would break method signatures # Note: Method names (run, run_async, etc.) don't need to be reserved because # parameters are kwargs to run(), not attributes on the class. # Template locals now use _sdk_ prefix to avoid collisions. RESERVED_DEPLOYMENT_IDENTIFIERS = frozenset( { # Python built-in that would break method signatures "self", } ) # Module-level names that should be avoided # Note: "__all__" normalizes to "all" (underscores stripped) RESERVED_MODULE_IDENTIFIERS = frozenset({"all"}) def _is_separator(char: str) -> bool: """ Check if a character should be treated as a word separator. Handles both ASCII separators and Unicode separators/punctuation. """ # Common ASCII separators if char in "-_ \t\n\r": return True # Check Unicode category for separators and punctuation # Z* = separators (space, line, paragraph) # P* = punctuation (connector, dash, open, close, etc.) # S* = symbols (math, currency, modifier, other) category = unicodedata.category(char) if category.startswith(("Z", "P", "S")): return True return False def to_identifier(name: str) -> str: """ Convert an arbitrary name to a valid Python identifier. Args: name: Any string (deployment name, flow name, etc.) Returns: A valid Python identifier. Examples: >>> to_identifier("my-flow") 'my_flow' >>> to_identifier("123-start") '_123_start' >>> to_identifier("class") 'class_' >>> to_identifier("café-data") 'cafe_data' >>> to_identifier("🚀-deploy") 'deploy' >>> to_identifier("a]b") 'a_b' """ if not name: return "_unnamed" # Normalize unicode to ASCII where possible (é -> e, etc.) normalized = unicodedata.normalize("NFKD", name) # Build result keeping only ASCII alphanumeric and converting separators result: list[str] = [] for char in normalized: if char.isascii() and char.isalnum(): result.append(char) elif _is_separator(char): # Separators (including Unicode dashes, spaces, punctuation) become underscore result.append("_") elif char.isascii(): # Other ASCII becomes underscore (shouldn't hit often after separator check) result.append("_") # Non-ASCII non-separator characters are dropped identifier = "".join(result) # Collapse consecutive underscores identifier = re.sub(r"_+", "_", identifier) # Strip leading/trailing underscores (we'll add prefix if needed) identifier = identifier.strip("_") # Handle empty result if not identifier: return "_unnamed" # Prefix with underscore if starts with digit if identifier[0].isdigit(): identifier = f"_{identifier}" # Append underscore if Python keyword if identifier in PYTHON_KEYWORDS: identifier = f"{identifier}_" return identifier def to_class_name(name: str) -> str: """ Convert an arbitrary name to a valid PascalCase class name. Args: name: Any string (deployment name, flow name, etc.) Returns: A valid Python class name in PascalCase. Examples: >>> to_class_name("my-flow") 'MyFlow' >>> to_class_name("123-start") '_123Start' >>> to_class_name("class") 'Class' >>> to_class_name("my_flow") 'MyFlow' >>> to_class_name("café-data") 'CafeData' >>> to_class_name("🚀-deploy") 'Deploy' >>> to_class_name("a]b") 'AB' """ if not name: return "_Unnamed" # Normalize unicode to ASCII where possible normalized = unicodedata.normalize("NFKD", name) # Build parts for PascalCase by splitting on separators parts: list[str] = [] current_part: list[str] = [] for char in normalized: if char.isascii() and char.isalnum(): current_part.append(char) elif _is_separator(char): # Separator - save current part and start new one if current_part: parts.append("".join(current_part)) current_part = [] elif char.isascii(): # Other ASCII - treat as separator if current_part: parts.append("".join(current_part)) current_part = [] # Non-ASCII non-separator characters are dropped # Don't forget the last part if current_part: parts.append("".join(current_part)) # Handle empty result if not parts: return "_Unnamed" # Convert to PascalCase result_parts: list[str] = [] for part in parts: if part: # Capitalize first letter, preserve rest result_parts.append(part[0].upper() + part[1:]) class_name = "".join(result_parts) # Handle empty result after joining (shouldn't happen, but be safe) if not class_name: return "_Unnamed" # Prefix with underscore if starts with digit if class_name[0].isdigit(): class_name = f"_{class_name}" # Note: We don't append underscore for keywords here because: # 1. Python is case-sensitive, so "Class" is valid (not a keyword) # 2. PascalCase naturally avoids most keywords (e.g., "class" -> "Class") # 3. Lowercase keywords as input would become "Class", "For", etc. which are valid return class_name def make_unique_identifier( base: str, existing: set[str], reserved: frozenset[str] | None = None, ) -> str: """ Make an identifier unique by appending numeric suffix if needed. Args: base: The base identifier (already converted via to_identifier). existing: Set of already-used identifiers. reserved: Optional set of reserved names to avoid. Returns: A unique identifier, possibly with _2, _3, etc. suffix. Examples: >>> make_unique_identifier("my_flow", {"my_flow"}) 'my_flow_2' >>> make_unique_identifier("my_flow", {"my_flow", "my_flow_2"}) 'my_flow_3' >>> make_unique_identifier("run", set(), RESERVED_DEPLOYMENT_IDENTIFIERS) 'run_2' """ reserved = reserved or frozenset() # Check if base is available if base not in existing and base not in reserved: return base # Find the next available suffix suffix = 2 while True: candidate = f"{base}_{suffix}" if candidate not in existing and candidate not in reserved: return candidate suffix += 1 def make_unique_class_name( base: str, existing: set[str], ) -> str: """ Make a class name unique by appending numeric suffix if needed. Args: base: The base class name (already converted via to_class_name). existing: Set of already-used class names. Returns: A unique class name, possibly with 2, 3, etc. suffix. Examples: >>> make_unique_class_name("MyFlow", {"MyFlow"}) 'MyFlow2' >>> make_unique_class_name("MyFlow", {"MyFlow", "MyFlow2"}) 'MyFlow3' """ if base not in existing: return base # Find the next available suffix suffix = 2 while True: candidate = f"{base}{suffix}" if candidate not in existing: return candidate suffix += 1 NameContext = Literal["flow", "deployment", "work_pool", "module", "general"] def get_reserved_names(context: NameContext) -> frozenset[str]: """ Get the reserved names for a given context. These names would conflict with the generated SDK surface and must be avoided. Args: context: The naming context. Returns: A frozenset of reserved names. """ if context == "flow": return RESERVED_FLOW_IDENTIFIERS elif context == "deployment": return RESERVED_DEPLOYMENT_IDENTIFIERS elif context == "module": return RESERVED_MODULE_IDENTIFIERS else: return frozenset() def safe_identifier( name: str, existing: set[str], context: NameContext = "general", ) -> str: """ Convert a name to a unique, safe Python identifier. This is the main entry point for identifier generation. Args: name: The original name. existing: Set of already-used identifiers. context: The naming context (affects reserved names). Returns: A unique, valid Python identifier. """ base = to_identifier(name) reserved = get_reserved_names(context) return make_unique_identifier(base, existing, reserved) def safe_class_name( name: str, existing: set[str], ) -> str: """ Convert a name to a unique, safe Python class name. This is the main entry point for class name generation. Args: name: The original name. existing: Set of already-used class names. Returns: A unique, valid PascalCase class name. """ base = to_class_name(name) return make_unique_class_name(base, existing)
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_sdk/naming.py", "license": "Apache License 2.0", "lines": 290, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
PrefectHQ/prefect:tests/_sdk/test_models.py
"""Tests for SDK generation data models.""" from prefect._sdk.models import ( DeploymentInfo, FlowInfo, SDKData, SDKGenerationMetadata, WorkPoolInfo, ) class TestWorkPoolInfo: """Test WorkPoolInfo data model.""" def test_basic_creation(self): wp = WorkPoolInfo( name="my-pool", pool_type="kubernetes", ) assert wp.name == "my-pool" assert wp.pool_type == "kubernetes" assert wp.job_variables_schema == {} def test_with_schema(self): schema = { "type": "object", "properties": { "image": {"type": "string"}, "cpu": {"type": "string"}, }, } wp = WorkPoolInfo( name="k8s-pool", pool_type="kubernetes", job_variables_schema=schema, ) assert wp.job_variables_schema == schema def test_different_types(self): docker = WorkPoolInfo(name="docker-pool", pool_type="docker") process = WorkPoolInfo(name="local-pool", pool_type="process") ecs = WorkPoolInfo(name="ecs-pool", pool_type="ecs") assert docker.pool_type == "docker" assert process.pool_type == "process" assert ecs.pool_type == "ecs" class TestDeploymentInfo: """Test DeploymentInfo data model.""" def test_basic_creation(self): dep = DeploymentInfo( name="production", flow_name="my-etl-flow", full_name="my-etl-flow/production", ) assert dep.name == "production" assert dep.flow_name == "my-etl-flow" assert dep.full_name == "my-etl-flow/production" assert dep.parameter_schema is None assert dep.work_pool_name is None assert dep.description is None def test_with_all_fields(self): schema = { "type": "object", "properties": { "source": {"type": "string"}, "batch_size": {"type": "integer", "default": 100}, }, "required": ["source"], } dep = DeploymentInfo( name="production", flow_name="my-etl-flow", full_name="my-etl-flow/production", parameter_schema=schema, work_pool_name="k8s-pool", description="Production ETL deployment", ) assert dep.parameter_schema == schema assert dep.work_pool_name == "k8s-pool" assert dep.description == "Production ETL deployment" def test_empty_parameter_schema(self): dep = DeploymentInfo( name="simple", flow_name="simple-flow", full_name="simple-flow/simple", parameter_schema={}, ) assert dep.parameter_schema == {} class TestFlowInfo: """Test FlowInfo data model.""" def test_basic_creation(self): flow = FlowInfo(name="my-flow") assert flow.name == "my-flow" assert flow.deployments == [] def test_with_deployments(self): dep1 = DeploymentInfo( name="production", flow_name="my-flow", full_name="my-flow/production", ) dep2 = DeploymentInfo( name="staging", flow_name="my-flow", full_name="my-flow/staging", ) flow = FlowInfo(name="my-flow", deployments=[dep1, dep2]) assert len(flow.deployments) == 2 assert flow.deployments[0].name == "production" assert flow.deployments[1].name == "staging" class TestSDKGenerationMetadata: """Test SDKGenerationMetadata data model.""" def test_basic_creation(self): meta = SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ) assert meta.generation_time == "2026-01-06T14:30:00Z" assert meta.prefect_version == "3.2.0" assert meta.workspace_name is None assert meta.api_url is None def test_with_all_fields(self): meta = SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", workspace_name="my-workspace", api_url="https://api.prefect.cloud/api/accounts/xxx/workspaces/yyy", ) assert meta.workspace_name == "my-workspace" assert ( meta.api_url == "https://api.prefect.cloud/api/accounts/xxx/workspaces/yyy" ) class TestSDKData: """Test SDKData data model.""" def test_basic_creation(self): meta = SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ) data = SDKData(metadata=meta) assert data.metadata == meta assert data.flows == {} assert data.work_pools == {} assert data.deployment_names == [] def test_counts_with_empty_data(self): meta = SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ) data = SDKData(metadata=meta) assert data.flow_count == 0 assert data.deployment_count == 0 assert data.work_pool_count == 0 def test_counts_with_data(self): meta = SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ) dep1 = DeploymentInfo( name="production", flow_name="flow-a", full_name="flow-a/production", ) dep2 = DeploymentInfo( name="staging", flow_name="flow-a", full_name="flow-a/staging", ) dep3 = DeploymentInfo( name="daily", flow_name="flow-b", full_name="flow-b/daily", ) flow_a = FlowInfo(name="flow-a", deployments=[dep1, dep2]) flow_b = FlowInfo(name="flow-b", deployments=[dep3]) wp = WorkPoolInfo(name="k8s-pool", pool_type="kubernetes") data = SDKData( metadata=meta, flows={"flow-a": flow_a, "flow-b": flow_b}, work_pools={"k8s-pool": wp}, ) assert data.flow_count == 2 assert data.deployment_count == 3 assert data.work_pool_count == 1 def test_deployment_names_derived(self): """Test that deployment_names is derived from flows.""" meta = SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ) dep1 = DeploymentInfo( name="production", flow_name="flow-a", full_name="flow-a/production", ) dep2 = DeploymentInfo( name="staging", flow_name="flow-a", full_name="flow-a/staging", ) dep3 = DeploymentInfo( name="daily", flow_name="flow-b", full_name="flow-b/daily", ) flow_a = FlowInfo(name="flow-a", deployments=[dep1, dep2]) flow_b = FlowInfo(name="flow-b", deployments=[dep3]) data = SDKData( metadata=meta, flows={"flow-a": flow_a, "flow-b": flow_b}, ) # deployment_names should be derived from flows names = data.deployment_names assert len(names) == 3 assert "flow-a/production" in names assert "flow-a/staging" in names assert "flow-b/daily" in names def test_all_deployments(self): meta = SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ) dep1 = DeploymentInfo( name="production", flow_name="flow-a", full_name="flow-a/production", ) dep2 = DeploymentInfo( name="staging", flow_name="flow-a", full_name="flow-a/staging", ) dep3 = DeploymentInfo( name="daily", flow_name="flow-b", full_name="flow-b/daily", ) flow_a = FlowInfo(name="flow-a", deployments=[dep1, dep2]) flow_b = FlowInfo(name="flow-b", deployments=[dep3]) data = SDKData( metadata=meta, flows={"flow-a": flow_a, "flow-b": flow_b}, ) all_deps = data.all_deployments() assert len(all_deps) == 3 full_names = [d.full_name for d in all_deps] assert "flow-a/production" in full_names assert "flow-a/staging" in full_names assert "flow-b/daily" in full_names def test_all_deployments_empty(self): meta = SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ) data = SDKData(metadata=meta) assert data.all_deployments() == [] def test_deployment_names_sorted(self): """Test that deployment_names returns sorted names for deterministic output.""" meta = SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ) # Create deployments in non-alphabetical order dep_z = DeploymentInfo(name="z", flow_name="flow", full_name="flow/z") dep_a = DeploymentInfo(name="a", flow_name="flow", full_name="flow/a") dep_m = DeploymentInfo(name="m", flow_name="flow", full_name="flow/m") flow = FlowInfo(name="flow", deployments=[dep_z, dep_a, dep_m]) data = SDKData(metadata=meta, flows={"flow": flow}) # Should be sorted alphabetically assert data.deployment_names == ["flow/a", "flow/m", "flow/z"] def test_all_deployments_sorted(self): """Test that all_deployments returns sorted deployments for deterministic output.""" meta = SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", ) # Create deployments in non-alphabetical order dep_z = DeploymentInfo(name="z", flow_name="flow", full_name="flow/z") dep_a = DeploymentInfo(name="a", flow_name="flow", full_name="flow/a") dep_m = DeploymentInfo(name="m", flow_name="flow", full_name="flow/m") flow = FlowInfo(name="flow", deployments=[dep_z, dep_a, dep_m]) data = SDKData(metadata=meta, flows={"flow": flow}) # Should be sorted by full_name all_deps = data.all_deployments() assert [d.full_name for d in all_deps] == ["flow/a", "flow/m", "flow/z"] class TestDataModelIntegration: """Integration tests for data models working together.""" def test_complete_sdk_data_structure(self): """Test building a complete SDKData structure like the generator would.""" # Create metadata meta = SDKGenerationMetadata( generation_time="2026-01-06T14:30:00Z", prefect_version="3.2.0", workspace_name="my-workspace", ) # Create work pool k8s_pool = WorkPoolInfo( name="kubernetes-pool", pool_type="kubernetes", job_variables_schema={ "type": "object", "properties": { "image": {"type": "string"}, "namespace": {"type": "string", "default": "default"}, }, }, ) # Create deployments etl_prod = DeploymentInfo( name="production", flow_name="my-etl-flow", full_name="my-etl-flow/production", parameter_schema={ "type": "object", "properties": { "source": {"type": "string"}, "batch_size": {"type": "integer", "default": 100}, }, "required": ["source"], }, work_pool_name="kubernetes-pool", description="Production ETL pipeline", ) etl_staging = DeploymentInfo( name="staging", flow_name="my-etl-flow", full_name="my-etl-flow/staging", parameter_schema={ "type": "object", "properties": { "source": {"type": "string"}, "batch_size": {"type": "integer", "default": 100}, }, "required": ["source"], }, work_pool_name="kubernetes-pool", ) # Create flow etl_flow = FlowInfo( name="my-etl-flow", deployments=[etl_prod, etl_staging], ) # Build complete SDKData sdk_data = SDKData( metadata=meta, flows={"my-etl-flow": etl_flow}, work_pools={"kubernetes-pool": k8s_pool}, ) # Verify structure assert sdk_data.flow_count == 1 assert sdk_data.deployment_count == 2 assert sdk_data.work_pool_count == 1 assert len(sdk_data.deployment_names) == 2 # Verify we can access nested data flow = sdk_data.flows["my-etl-flow"] assert len(flow.deployments) == 2 deployment = flow.deployments[0] assert deployment.work_pool_name == "kubernetes-pool" assert deployment.parameter_schema is not None assert "source" in deployment.parameter_schema["properties"] work_pool = sdk_data.work_pools["kubernetes-pool"] assert "image" in work_pool.job_variables_schema["properties"]
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_sdk/test_models.py", "license": "Apache License 2.0", "lines": 354, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_sdk/test_naming.py
"""Tests for naming utilities.""" from prefect._sdk.naming import ( PYTHON_KEYWORDS, RESERVED_DEPLOYMENT_IDENTIFIERS, RESERVED_FLOW_IDENTIFIERS, get_reserved_names, make_unique_class_name, make_unique_identifier, safe_class_name, safe_identifier, to_class_name, to_identifier, ) class TestToIdentifier: """Test conversion of names to valid Python identifiers.""" def test_simple_name(self): assert to_identifier("my_flow") == "my_flow" def test_hyphenated_name(self): assert to_identifier("my-flow") == "my_flow" def test_spaces(self): assert to_identifier("my flow") == "my_flow" def test_mixed_separators(self): assert to_identifier("my-flow_name here") == "my_flow_name_here" def test_leading_digit(self): assert to_identifier("123-start") == "_123_start" def test_all_digits(self): assert to_identifier("123") == "_123" def test_python_keyword_class(self): assert to_identifier("class") == "class_" def test_python_keyword_import(self): assert to_identifier("import") == "import_" def test_python_keyword_def(self): assert to_identifier("def") == "def_" def test_python_keyword_return(self): assert to_identifier("return") == "return_" def test_python_keyword_if(self): assert to_identifier("if") == "if_" def test_python_keyword_for(self): assert to_identifier("for") == "for_" def test_python_keyword_while(self): assert to_identifier("while") == "while_" def test_python_keyword_try(self): assert to_identifier("try") == "try_" def test_python_keyword_except(self): assert to_identifier("except") == "except_" def test_python_keyword_with(self): assert to_identifier("with") == "with_" def test_python_keyword_as(self): assert to_identifier("as") == "as_" def test_python_keyword_from(self): assert to_identifier("from") == "from_" def test_python_keyword_lambda(self): assert to_identifier("lambda") == "lambda_" def test_python_keyword_async(self): assert to_identifier("async") == "async_" def test_python_keyword_await(self): assert to_identifier("await") == "await_" def test_unicode_normalized(self): """Unicode characters that can be normalized to ASCII are converted.""" assert to_identifier("café-data") == "cafe_data" def test_unicode_emoji_stripped(self): """Emoji and non-normalizable unicode are stripped.""" # Emoji is a symbol (category So), treated as separator, then stripped assert to_identifier("🚀-deploy") == "deploy" def test_unicode_only_emoji(self): """All emoji becomes _unnamed.""" assert to_identifier("🚀🎉✨") == "_unnamed" def test_empty_string(self): assert to_identifier("") == "_unnamed" def test_only_special_chars(self): assert to_identifier("---") == "_unnamed" def test_consecutive_underscores_collapsed(self): assert to_identifier("my--flow") == "my_flow" assert to_identifier("my___flow") == "my_flow" assert to_identifier("my - flow") == "my_flow" def test_leading_underscore_stripped_then_reapplied_if_digit(self): assert to_identifier("_123flow") == "_123flow" def test_punctuation_becomes_underscore(self): assert to_identifier("my.flow") == "my_flow" assert to_identifier("my/flow") == "my_flow" assert to_identifier("my@flow") == "my_flow" def test_mixed_case_preserved(self): assert to_identifier("MyFlow") == "MyFlow" assert to_identifier("myFlow") == "myFlow" def test_accented_characters(self): assert to_identifier("naïve") == "naive" assert to_identifier("résumé") == "resume" def test_german_umlaut(self): assert to_identifier("über") == "uber" def test_spanish_tilde(self): assert to_identifier("señor") == "senor" def test_all_keywords_handled(self): """Verify all Python keywords get underscore suffix.""" for kw in PYTHON_KEYWORDS: result = to_identifier(kw) assert result == f"{kw}_", f"Keyword {kw} not handled correctly" # Non-ASCII separator tests def test_em_dash_as_separator(self): """Em-dash (U+2014) should be treated as word separator.""" assert to_identifier("my—flow") == "my_flow" def test_en_dash_as_separator(self): """En-dash (U+2013) should be treated as word separator.""" assert to_identifier("my–flow") == "my_flow" def test_non_breaking_space_as_separator(self): """Non-breaking space (U+00A0) should be treated as word separator.""" assert to_identifier("my\u00a0flow") == "my_flow" def test_figure_dash_as_separator(self): """Figure dash (U+2012) should be treated as word separator.""" assert to_identifier("my\u2012flow") == "my_flow" def test_bracket_as_separator(self): """Brackets should be treated as separators.""" assert to_identifier("a[b]c") == "a_b_c" assert to_identifier("a]b") == "a_b" def test_german_eszett(self): """German ß - NFKD doesn't decompose it, so it's dropped.""" # ß does not decompose to ss via NFKD, it stays as ß and gets dropped assert to_identifier("straße") == "strae" def test_unicode_digits_dropped(self): """Non-ASCII digits are dropped.""" # Arabic-Indic digits (٠١٢٣) assert to_identifier("test٠١٢٣") == "test" # Only non-ASCII digits assert to_identifier("٠١٢٣") == "_unnamed" class TestToClassName: """Test conversion of names to valid PascalCase class names.""" def test_simple_name(self): assert to_class_name("my_flow") == "MyFlow" def test_hyphenated_name(self): assert to_class_name("my-flow") == "MyFlow" def test_already_pascal_case(self): assert to_class_name("MyFlow") == "MyFlow" def test_spaces(self): assert to_class_name("my flow") == "MyFlow" def test_leading_digit(self): assert to_class_name("123-start") == "_123Start" def test_all_digits(self): assert to_class_name("123") == "_123" def test_python_keyword_becomes_pascal(self): """Keywords become valid PascalCase without underscore suffix.""" # Python is case-sensitive, so "Class" is valid (not a keyword) assert to_class_name("class") == "Class" assert to_class_name("for") == "For" assert to_class_name("if") == "If" def test_capitalized_keyword_is_valid(self): """Already capitalized 'Class' should remain 'Class' (not 'Class_').""" assert to_class_name("Class") == "Class" def test_unicode_normalized(self): assert to_class_name("café-data") == "CafeData" def test_unicode_emoji_stripped(self): assert to_class_name("🚀-deploy") == "Deploy" def test_unicode_only_emoji(self): assert to_class_name("🚀🎉✨") == "_Unnamed" def test_empty_string(self): assert to_class_name("") == "_Unnamed" def test_only_special_chars(self): assert to_class_name("---") == "_Unnamed" def test_multiple_words(self): assert to_class_name("my-etl-flow") == "MyEtlFlow" def test_mixed_separators(self): assert to_class_name("my-flow_name here") == "MyFlowNameHere" def test_single_char_parts(self): assert to_class_name("a-b-c") == "ABC" def test_lowercase_preserved_after_first(self): """Only first char of each part is uppercased.""" assert to_class_name("myFLOW") == "MyFLOW" assert to_class_name("my-FLOW") == "MyFLOW" def test_accented_characters(self): assert to_class_name("café") == "Cafe" def test_punctuation_as_separator(self): assert to_class_name("my.flow") == "MyFlow" assert to_class_name("my/flow") == "MyFlow" # Non-ASCII separator tests def test_em_dash_as_separator(self): """Em-dash should split words for PascalCase.""" assert to_class_name("my—flow") == "MyFlow" def test_non_breaking_space_as_separator(self): """Non-breaking space should split words for PascalCase.""" assert to_class_name("my\u00a0flow") == "MyFlow" def test_bracket_as_separator(self): """Brackets should split words for PascalCase.""" assert to_class_name("a[b]c") == "ABC" assert to_class_name("a]b") == "AB" class TestMakeUniqueIdentifier: """Test unique identifier generation with collision handling.""" def test_no_collision(self): result = make_unique_identifier("my_flow", set()) assert result == "my_flow" def test_single_collision(self): result = make_unique_identifier("my_flow", {"my_flow"}) assert result == "my_flow_2" def test_multiple_collisions(self): result = make_unique_identifier("my_flow", {"my_flow", "my_flow_2"}) assert result == "my_flow_3" def test_many_collisions(self): existing = {"my_flow", "my_flow_2", "my_flow_3", "my_flow_4"} result = make_unique_identifier("my_flow", existing) assert result == "my_flow_5" def test_reserved_name_avoided(self): # 'self' is reserved for deployment context result = make_unique_identifier("self", set(), RESERVED_DEPLOYMENT_IDENTIFIERS) assert result == "self_2" def test_reserved_name_with_existing(self): existing = {"self_2"} result = make_unique_identifier( "self", existing, RESERVED_DEPLOYMENT_IDENTIFIERS ) assert result == "self_3" def test_run_not_reserved_in_deployment_context(self): # 'run' is a method name but doesn't conflict with parameter names result = make_unique_identifier("run", set(), RESERVED_DEPLOYMENT_IDENTIFIERS) assert result == "run" def test_flows_reserved(self): result = make_unique_identifier("flows", set(), RESERVED_FLOW_IDENTIFIERS) assert result == "flows_2" def test_with_options_not_reserved(self): """with_options is not reserved - no functional collision with method.""" result = make_unique_identifier( "with_options", set(), RESERVED_DEPLOYMENT_IDENTIFIERS ) assert result == "with_options" def test_with_infra_not_reserved(self): """with_infra is not reserved - no functional collision with method.""" result = make_unique_identifier( "with_infra", set(), RESERVED_DEPLOYMENT_IDENTIFIERS ) assert result == "with_infra" def test_all_reserved_in_module_context(self): """Test that 'all' is reserved (normalized form of '__all__').""" from prefect._sdk.naming import RESERVED_MODULE_IDENTIFIERS result = make_unique_identifier("all", set(), RESERVED_MODULE_IDENTIFIERS) assert result == "all_2" class TestMakeUniqueClassName: """Test unique class name generation with collision handling.""" def test_no_collision(self): result = make_unique_class_name("MyFlow", set()) assert result == "MyFlow" def test_single_collision(self): result = make_unique_class_name("MyFlow", {"MyFlow"}) assert result == "MyFlow2" def test_multiple_collisions(self): result = make_unique_class_name("MyFlow", {"MyFlow", "MyFlow2"}) assert result == "MyFlow3" def test_many_collisions(self): existing = {"MyFlow", "MyFlow2", "MyFlow3", "MyFlow4"} result = make_unique_class_name("MyFlow", existing) assert result == "MyFlow5" class TestGetReservedNames: """Test reserved name lookup by context.""" def test_flow_context(self): reserved = get_reserved_names("flow") assert "flows" in reserved assert "deployments" in reserved assert "DeploymentName" in reserved def test_deployment_context(self): reserved = get_reserved_names("deployment") # Only 'self' is reserved - would break method signatures assert "self" in reserved # Method names are NOT reserved - no functional collision with parameters assert "run" not in reserved assert "run_async" not in reserved assert "with_options" not in reserved assert "with_infra" not in reserved def test_work_pool_context(self): reserved = get_reserved_names("work_pool") assert len(reserved) == 0 def test_module_context(self): reserved = get_reserved_names("module") # Reserved names are in normalized form assert "all" in reserved def test_general_context(self): reserved = get_reserved_names("general") assert len(reserved) == 0 class TestSafeIdentifier: """Test the main safe_identifier entry point.""" def test_simple_conversion_and_uniqueness(self): existing: set[str] = set() result1 = safe_identifier("my-flow", existing) existing.add(result1) result2 = safe_identifier("my_flow", existing) assert result1 == "my_flow" assert result2 == "my_flow_2" def test_with_reserved_names(self): # 'self' is reserved in deployment context result = safe_identifier("self", set(), "deployment") assert result == "self_2" def test_keyword_handling(self): result = safe_identifier("class", set()) assert result == "class_" def test_unicode_handling(self): result = safe_identifier("🚀-café-deploy", set()) # Emoji (symbol) becomes separator, stripped; café -> cafe assert result == "cafe_deploy" def test_dunder_all_reserved_in_module_context(self): """Test that __all__ is avoided in module context (normalizes to 'all').""" result = safe_identifier("__all__", set(), "module") # __all__ normalizes to "all" which is reserved, so becomes "all_2" assert result == "all_2" class TestSafeClassName: """Test the main safe_class_name entry point.""" def test_simple_conversion_and_uniqueness(self): existing: set[str] = set() result1 = safe_class_name("my-flow", existing) existing.add(result1) result2 = safe_class_name("my_flow", existing) assert result1 == "MyFlow" assert result2 == "MyFlow2" def test_keyword_handling(self): # "class" becomes "Class" which is valid (not a keyword) result = safe_class_name("class", set()) assert result == "Class" def test_unicode_handling(self): result = safe_class_name("🚀-café-deploy", set()) assert result == "CafeDeploy" class TestEdgeCases: """Test edge cases and unusual inputs.""" def test_very_long_name(self): """Long names should be handled without truncation.""" long_name = "a" * 1000 result = to_identifier(long_name) assert len(result) == 1000 assert result == long_name def test_mixed_unicode_and_ascii(self): result = to_identifier("test-日本語-flow") # Japanese characters are dropped, dashes are separators assert result == "test_flow" def test_chinese_characters(self): """Chinese characters are dropped.""" result = to_identifier("测试流程") assert result == "_unnamed" def test_japanese_hiragana(self): """Japanese hiragana are dropped.""" result = to_identifier("てすと") assert result == "_unnamed" def test_korean_characters(self): """Korean characters are dropped.""" result = to_identifier("테스트") assert result == "_unnamed" def test_arabic_characters(self): """Arabic characters are dropped.""" result = to_identifier("اختبار") assert result == "_unnamed" def test_cyrillic_characters(self): """Cyrillic characters are dropped.""" result = to_identifier("тест") assert result == "_unnamed" def test_numeric_suffix_doesnt_conflict_with_uniqueness(self): """Ensure manual numeric suffixes don't cause issues.""" existing = {"my_flow", "my_flow2", "my_flow_2"} result = make_unique_identifier("my_flow", existing) # Should find _3 since _2 exists assert result == "my_flow_3" def test_trailing_underscores_stripped(self): """Trailing underscores from input are stripped.""" result = to_identifier("flow___") assert result == "flow" def test_leading_underscores_stripped(self): """Leading underscores from input are stripped (unless for digit prefix).""" result = to_identifier("___flow") assert result == "flow" def test_single_underscore_input(self): """Single underscore becomes _unnamed.""" result = to_identifier("_") assert result == "_unnamed" def test_double_underscore_preserved_in_middle(self): """Double underscores become single underscore.""" result = to_identifier("my__flow") assert result == "my_flow" def test_tabs_and_newlines_as_separators(self): """Tabs and newlines should be treated as separators.""" assert to_identifier("my\tflow") == "my_flow" assert to_identifier("my\nflow") == "my_flow" assert to_identifier("my\rflow") == "my_flow"
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_sdk/test_naming.py", "license": "Apache License 2.0", "lines": 372, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/prefect/_sdk/schema_converter.py
""" JSON Schema to Python type conversion for SDK generation. This module converts JSON Schema definitions (like those from Pydantic models) to Python type annotation strings suitable for TypedDict field definitions. Limitations: - allOf: Returns the first $ref or non-Any type. Intersection semantics (combining constraints from multiple schemas) are not modeled. - Nested objects: Objects with properties return "dict[str, Any]". The template renderer in Phase 3 handles nested TypedDict generation. - External $ref: Only internal references (#/definitions/... and #/$defs/...) are supported. External URLs and other formats return Any with a warning. - Tuple constraints: prefixItems and items-as-list are converted to tuple[...], but minItems, maxItems, additionalItems, and unevaluatedItems are ignored. - patternProperties and propertyNames are not supported. Note: To capture warnings during conversion, pass a ConversionContext and check its `conversion_warnings` list after calling json_schema_to_python_type(). """ from typing import Any from prefect._sdk.types import ( CircularReferenceError, ConversionContext, FieldInfo, ) from prefect._sdk.unions import flatten_union # Re-export public types __all__ = [ "CircularReferenceError", "ConversionContext", "FieldInfo", "json_schema_to_python_type", "extract_fields_from_schema", ] def json_schema_to_python_type( schema: dict[str, Any] | bool, context: ConversionContext | None = None, ) -> str: """ Convert a JSON Schema to a Python type annotation string. Args: schema: A JSON Schema dict or boolean schema. context: Conversion context for tracking definitions and visited refs. Pass a context to capture warnings during conversion. Returns: A Python type annotation string (e.g., "str", "list[int]", "dict[str, Any]"). Raises: CircularReferenceError: If a circular $ref is detected. Examples: >>> json_schema_to_python_type({"type": "string"}) 'str' >>> json_schema_to_python_type({"type": "array", "items": {"type": "integer"}}) 'list[int]' >>> json_schema_to_python_type({"anyOf": [{"type": "string"}, {"type": "null"}]}) 'str | None' """ if context is None: context = ConversionContext() # Handle boolean schemas if isinstance(schema, bool): # true = accepts anything, false = accepts nothing # Using "Never" for false would be more accurate, but "Any" is more # practical since Never is rarely used and may cause type checker issues. # In practice, boolean false schemas are extremely rare. return "Any" # Handle empty schema (accepts anything) if not schema: return "Any" # Handle $ref first (before type checks) if "$ref" in schema: return _resolve_ref(schema["$ref"], context) # Handle anyOf / oneOf (union types) if "anyOf" in schema: return _convert_any_of(schema["anyOf"], context) if "oneOf" in schema: return _convert_any_of(schema["oneOf"], context) # Handle allOf (intersection - we just use the first one with useful info) if "allOf" in schema: return _convert_all_of(schema["allOf"], context) # Handle enum types if "enum" in schema: return _convert_enum(schema["enum"]) # Handle const (single-value enum) if "const" in schema: return _convert_const(schema["const"]) # Get the type field # Note: prefixItems (tuples) are handled by _convert_array when type is "array" schema_type = schema.get("type") # No type field - could be a complex schema or missing type if schema_type is None: # Check if it has properties (object without explicit type) if "properties" in schema: return "dict[str, Any]" # Check if it has items (array without explicit type) if "items" in schema: items_schema: dict[str, Any] = schema["items"] items_type = json_schema_to_python_type(items_schema, context) return f"list[{items_type}]" # Unknown schema structure return "Any" # Handle type arrays (e.g., ["string", "null"]) if isinstance(schema_type, list): type_list: list[str] = schema_type return _convert_type_array(type_list, schema, context) # Handle single type return _convert_single_type(schema_type, schema, context) def _resolve_ref(ref: str, context: ConversionContext) -> str: """Resolve a $ref pointer to its type.""" # Check for circular reference if ref in context.visited_refs: raise CircularReferenceError(f"Circular reference detected: {ref}") # Extract definition name from ref # Handles both "#/definitions/Foo" and "#/$defs/Foo" if ref.startswith("#/definitions/"): def_name = ref[len("#/definitions/") :] elif ref.startswith("#/$defs/"): def_name = ref[len("#/$defs/") :] else: # Unknown ref format (external URLs, OpenAPI-style, etc.) context.conversion_warnings.append(f"Unknown $ref format: {ref}") return "Any" # Look up in definitions definition = context.definitions.get(def_name) if definition is None: context.conversion_warnings.append(f"Definition not found: {def_name}") return "Any" # Mark as visited and recurse context.visited_refs.add(ref) try: return json_schema_to_python_type(definition, context) finally: context.visited_refs.discard(ref) def _convert_any_of(variants: list[dict[str, Any]], context: ConversionContext) -> str: """Convert anyOf/oneOf to a union type with proper flattening.""" types: list[str] = [] for variant in variants: variant_type = json_schema_to_python_type(variant, context) types.append(variant_type) return flatten_union(types) def _convert_all_of(variants: list[dict[str, Any]], context: ConversionContext) -> str: """ Convert allOf to a type. Note: This does not model true intersection semantics. It returns the first $ref found, or the first non-Any variant. This is typically sufficient for Pydantic schemas where allOf is used to wrap a $ref with additional metadata. """ # allOf is often used to wrap a $ref # Try to find the most specific type for variant in variants: if "$ref" in variant: return _resolve_ref(variant["$ref"], context) # Otherwise convert the first variant with useful type info for variant in variants: result = json_schema_to_python_type(variant, context) if result != "Any": return result return "Any" def _format_literal_value(value: Any) -> str | None: """ Format a value for use in a Literal type annotation. Args: value: The value to format. Returns: A string representation suitable for Literal[], or None if unsupported. """ if isinstance(value, str): # Use repr() for proper escaping of all special characters # (newlines, tabs, quotes, backslashes, etc.) return repr(value) elif isinstance(value, bool): # Must check bool before int since bool is subclass of int return str(value) elif isinstance(value, int): return str(value) elif isinstance(value, float): # Support float literals return repr(value) elif value is None: return "None" else: # Unsupported type (dict, list, etc.) return None def _convert_enum(values: list[Any]) -> str: """Convert enum to Literal type.""" if not values: return "Any" literals: list[str] = [] for value in values: formatted = _format_literal_value(value) if formatted is None: # Unsupported enum value type - fall back to Any return "Any" literals.append(formatted) return f"Literal[{', '.join(literals)}]" def _convert_const(value: Any) -> str: """Convert const to Literal type.""" return _convert_enum([value]) def _convert_tuple( prefix_items: list[dict[str, Any]], context: ConversionContext ) -> str: """ Convert prefixItems to tuple type. Note: Constraints like minItems, maxItems, additionalItems are not validated here - we generate the tuple type based solely on prefixItems. """ if not prefix_items: return "tuple[()]" item_types = [json_schema_to_python_type(item, context) for item in prefix_items] return f"tuple[{', '.join(item_types)}]" def _convert_type_array( types: list[str], schema: dict[str, Any], context: ConversionContext ) -> str: """Convert a type array (e.g., ["string", "null"]) to union type.""" # Deduplicate types first unique_types: list[str] = [] for t in types: if t not in unique_types: unique_types.append(t) # Filter out null and convert rest non_null_types = [t for t in unique_types if t != "null"] has_null = "null" in unique_types if not non_null_types: return "None" # Convert each type converted: list[str] = [] for t in non_null_types: # Create a schema copy with single type for conversion type_schema = {k: v for k, v in schema.items() if k != "type"} type_schema["type"] = t converted.append(_convert_single_type(t, type_schema, context)) # Use flatten_union to handle deduplication if has_null: converted.append("None") return flatten_union(converted) def _convert_single_type( schema_type: str, schema: dict[str, Any], context: ConversionContext ) -> str: """Convert a single JSON Schema type to Python type.""" if schema_type == "string": return "str" elif schema_type == "integer": return "int" elif schema_type == "number": return "float" elif schema_type == "boolean": return "bool" elif schema_type == "null": return "None" elif schema_type == "array": return _convert_array(schema, context) elif schema_type == "object": return _convert_object(schema, context) else: context.conversion_warnings.append(f"Unknown type: {schema_type}") return "Any" def _convert_array(schema: dict[str, Any], context: ConversionContext) -> str: """Convert array schema to list or tuple type.""" # Check for prefixItems (tuple) if "prefixItems" in schema: prefix_items: list[dict[str, Any]] = schema["prefixItems"] return _convert_tuple(prefix_items, context) # Check for items (Pydantic v1 tuple format with list of items) items = schema.get("items") if isinstance(items, list): # This is a tuple pattern (items as array) items_list: list[dict[str, Any]] = items return _convert_tuple(items_list, context) # Regular array with items schema if items is not None: items_dict: dict[str, Any] = items item_type = json_schema_to_python_type(items_dict, context) return f"list[{item_type}]" # No items schema - generic list return "list[Any]" def _convert_object(schema: dict[str, Any], context: ConversionContext) -> str: """ Convert object schema to dict type. Note: This always returns a dict type, not a TypedDict. Objects with defined properties are handled at a higher level (the template renderer) which generates named TypedDict classes. This converter only handles the type annotation for inline use. """ # Check for additionalProperties additional = schema.get("additionalProperties") if additional is True or additional is None: # Accept any values return "dict[str, Any]" if additional is False: # Only defined properties allowed, but we still return dict return "dict[str, Any]" if isinstance(additional, dict): # Typed additional properties additional_dict: dict[str, Any] = additional value_type = json_schema_to_python_type(additional_dict, context) return f"dict[str, {value_type}]" return "dict[str, Any]" def extract_fields_from_schema( schema: dict[str, Any], required_fields: list[str] | None = None, ) -> tuple[list[FieldInfo], list[str]]: """ Extract TypedDict field information from a JSON Schema. Args: schema: A JSON Schema dict with properties. required_fields: List of required field names. If None, uses schema's "required". Returns: A tuple of (fields, warnings) where fields is a list of FieldInfo objects. Note: A field is considered required (no NotRequired wrapper) if: - It is listed in the schema's "required" array AND - It does NOT have a "default" key in its property definition This matches Prefect's server-side parameter validation. """ # Build context with definitions context = ConversionContext( definitions={ **schema.get("definitions", {}), **schema.get("$defs", {}), } ) properties: dict[str, Any] = schema.get("properties", {}) required_list: list[str] = ( required_fields if required_fields is not None else schema.get("required", []) ) fields: list[FieldInfo] = [] for prop_name, prop_schema in properties.items(): # Determine if field is required # Required means: in required list AND no default value is_required = prop_name in required_list and "default" not in prop_schema # Get type try: python_type = json_schema_to_python_type(prop_schema, context) except CircularReferenceError: context.conversion_warnings.append( f"Circular reference in field '{prop_name}', using Any" ) python_type = "Any" # Extract other info default_value = prop_schema.get("default") has_default = "default" in prop_schema description = prop_schema.get("description") or prop_schema.get("title") fields.append( FieldInfo( name=prop_name, python_type=python_type, required=is_required, default=default_value, has_default=has_default, description=description, ) ) return fields, context.conversion_warnings
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_sdk/schema_converter.py", "license": "Apache License 2.0", "lines": 351, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:src/prefect/_sdk/types.py
""" Core types for SDK generation. This module contains the data classes and exceptions used across the SDK generation modules. """ from dataclasses import dataclass, field from typing import Any @dataclass class ConversionContext: """Context for schema conversion, tracking definitions and visited refs.""" definitions: dict[str, Any] = field(default_factory=dict) """Schema definitions (from 'definitions' or '$defs' key).""" visited_refs: set[str] = field(default_factory=set) """Set of $ref paths currently being resolved (for circular detection).""" conversion_warnings: list[str] = field(default_factory=list) """Warnings accumulated during conversion.""" class CircularReferenceError(Exception): """Raised when a circular $ref is detected in the schema.""" pass @dataclass class FieldInfo: """Information about a TypedDict field.""" name: str """The field name.""" python_type: str """The Python type annotation string.""" required: bool """Whether the field is required.""" default: Any | None = None """The default value, if any.""" has_default: bool = False """Whether a default value is present.""" description: str | None = None """Field description from schema."""
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_sdk/types.py", "license": "Apache License 2.0", "lines": 34, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
PrefectHQ/prefect:src/prefect/_sdk/unions.py
""" Union type utilities for SDK generation. This module provides bracket and quote-aware union type handling. """ def split_union_top_level(type_str: str) -> list[str]: """ Split a union type string on " | " only at the top level. This is bracket and quote-aware, so it won't split inside: - Brackets: list[str | int] stays intact - Quotes: Literal['a | b'] stays intact Args: type_str: A type annotation string, possibly containing unions. Returns: List of individual type parts. """ parts: list[str] = [] current: list[str] = [] bracket_depth = 0 in_single_quote = False in_double_quote = False i = 0 while i < len(type_str): char = type_str[i] # Handle escape sequences inside quotes if (in_single_quote or in_double_quote) and char == "\\": current.append(char) if i + 1 < len(type_str): current.append(type_str[i + 1]) i += 2 continue i += 1 continue # Track quote state if char == "'" and not in_double_quote: in_single_quote = not in_single_quote current.append(char) elif char == '"' and not in_single_quote: in_double_quote = not in_double_quote current.append(char) # Track bracket depth (only when not in quotes) elif char == "[" and not in_single_quote and not in_double_quote: bracket_depth += 1 current.append(char) elif char == "]" and not in_single_quote and not in_double_quote: bracket_depth -= 1 current.append(char) # Check for " | " at top level elif ( char == " " and bracket_depth == 0 and not in_single_quote and not in_double_quote and type_str[i : i + 3] == " | " ): # Found a top-level union separator part = "".join(current).strip() if part: parts.append(part) current = [] i += 3 # Skip " | " continue else: current.append(char) i += 1 # Add the last part part = "".join(current).strip() if part: parts.append(part) return parts def flatten_union(types: list[str]) -> str: """ Flatten and deduplicate union type parts. Handles nested unions (e.g., "str | int" combined with "str | None") by splitting on " | " at the top level only (bracket- and quote-aware), deduplicating, and placing None at the end. Args: types: List of type strings, possibly containing unions. Returns: A single union type string with duplicates removed and None at end. """ # Split any nested unions and collect all parts all_parts: list[str] = [] has_none = False for t in types: # Split on " | " only at top level (respecting brackets and quotes) parts = split_union_top_level(t) for part in parts: if part == "None": has_none = True elif part and part not in all_parts: all_parts.append(part) # Handle edge case: only None if not all_parts: return "None" # Build result with None at the end if present if len(all_parts) == 1: result = all_parts[0] else: result = " | ".join(all_parts) if has_none: result = f"{result} | None" return result
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_sdk/unions.py", "license": "Apache License 2.0", "lines": 102, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:tests/_sdk/test_schema_converter_complex.py
"""Tests for complex schemas, unknown types, and edge cases.""" from prefect._sdk.schema_converter import ( ConversionContext, json_schema_to_python_type, ) class TestComplexSchemas: """Test complex real-world schema patterns.""" def test_pydantic_v2_optional_pattern(self): """Pydantic v2 optional fields use anyOf.""" schema = { "anyOf": [{"type": "integer"}, {"type": "null"}], "default": None, } assert json_schema_to_python_type(schema) == "int | None" def test_nested_array_of_objects(self): schema = { "type": "array", "items": { "type": "object", "additionalProperties": {"type": "string"}, }, } assert json_schema_to_python_type(schema) == "list[dict[str, str]]" def test_deeply_nested(self): schema = { "type": "array", "items": { "type": "array", "items": { "type": "array", "items": {"type": "string"}, }, }, } assert json_schema_to_python_type(schema) == "list[list[list[str]]]" def test_union_of_arrays(self): schema = { "anyOf": [ {"type": "array", "items": {"type": "string"}}, {"type": "array", "items": {"type": "integer"}}, ] } assert json_schema_to_python_type(schema) == "list[str] | list[int]" class TestUnknownTypes: """Test handling of unknown or unsupported types.""" def test_unknown_type(self): """Unknown type values should return Any.""" context = ConversionContext() schema = {"type": "unknowntype"} result = json_schema_to_python_type(schema, context) assert result == "Any" assert len(context.conversion_warnings) == 1 assert "Unknown type" in context.conversion_warnings[0] def test_unsupported_enum_value(self): """Unsupported enum value types should return Any.""" schema = {"enum": [{"complex": "object"}]} assert json_schema_to_python_type(schema) == "Any" class TestEdgeCases: """Test edge cases for improved coverage.""" def test_allof_all_any_types(self): """allOf where all variants return Any should return Any.""" schema = {"allOf": [{}, {}]} # Empty schemas return Any assert json_schema_to_python_type(schema) == "Any" def test_array_with_prefixitems_converted_to_tuple(self): """Array with prefixItems should be converted to tuple.""" schema = {"type": "array", "prefixItems": [{"type": "string"}]} assert json_schema_to_python_type(schema) == "tuple[str]" def test_object_with_non_standard_additional_properties(self): """Object with unusual additionalProperties value.""" # additionalProperties as a non-dict, non-bool value # In practice this shouldn't happen, but test the fallback schema = {"type": "object", "additionalProperties": "invalid"} assert json_schema_to_python_type(schema) == "dict[str, Any]"
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_sdk/test_schema_converter_complex.py", "license": "Apache License 2.0", "lines": 73, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_sdk/test_schema_converter_enums.py
"""Tests for enum and tuple type conversion in JSON Schema.""" from prefect._sdk.schema_converter import json_schema_to_python_type class TestEnumTypes: """Test conversion of enum types to Literal.""" def test_string_enum(self): schema = {"enum": ["A", "B", "C"], "type": "string"} assert json_schema_to_python_type(schema) == "Literal['A', 'B', 'C']" def test_integer_enum(self): schema = {"enum": [1, 2, 3], "type": "integer"} assert json_schema_to_python_type(schema) == "Literal[1, 2, 3]" def test_boolean_enum(self): schema = {"enum": [True, False]} assert json_schema_to_python_type(schema) == "Literal[True, False]" def test_mixed_enum(self): schema = {"enum": ["a", 1, True]} assert json_schema_to_python_type(schema) == "Literal['a', 1, True]" def test_enum_with_null(self): schema = {"enum": ["A", "B", None]} assert json_schema_to_python_type(schema) == "Literal['A', 'B', None]" def test_empty_enum(self): schema = {"enum": []} assert json_schema_to_python_type(schema) == "Any" def test_string_with_quotes(self): """Strings containing quotes should be properly escaped using repr().""" schema = {"enum": ['say "hello"', "it's"]} # repr() uses single quotes by default for strings result = json_schema_to_python_type(schema) assert result == """Literal['say "hello"', "it's"]""" def test_const(self): """const is treated as a single-value enum.""" schema = {"const": "fixed_value"} assert json_schema_to_python_type(schema) == "Literal['fixed_value']" def test_float_enum(self): """Float enum values should be supported.""" schema = {"enum": [1.5, 2.5, 3.0]} assert json_schema_to_python_type(schema) == "Literal[1.5, 2.5, 3.0]" def test_mixed_numeric_enum(self): """Mixed int and float enums should work.""" schema = {"enum": [1, 2.5, 3]} assert json_schema_to_python_type(schema) == "Literal[1, 2.5, 3]" def test_string_with_control_characters(self): """Strings with control characters should be properly escaped.""" schema = {"enum": ["line1\nline2", "tab\there", "back\\slash"]} result = json_schema_to_python_type(schema) # repr() properly escapes these assert result == r"Literal['line1\nline2', 'tab\there', 'back\\slash']" def test_string_with_unicode(self): """Unicode strings should be preserved.""" schema = {"enum": ["café", "日本語", "emoji: 🎉"]} result = json_schema_to_python_type(schema) assert result == "Literal['café', '日本語', 'emoji: 🎉']" class TestTupleTypes: """Test conversion of tuple types (prefixItems).""" def test_single_element_tuple(self): schema = {"type": "array", "prefixItems": [{"type": "string"}]} assert json_schema_to_python_type(schema) == "tuple[str]" def test_two_element_tuple(self): schema = { "type": "array", "prefixItems": [{"type": "string"}, {"type": "integer"}], } assert json_schema_to_python_type(schema) == "tuple[str, int]" def test_multi_element_tuple(self): schema = { "type": "array", "prefixItems": [ {"type": "string"}, {"type": "integer"}, {"type": "boolean"}, ], } assert json_schema_to_python_type(schema) == "tuple[str, int, bool]" def test_empty_tuple(self): schema = {"type": "array", "prefixItems": []} assert json_schema_to_python_type(schema) == "tuple[()]" def test_pydantic_v1_tuple_format(self): """Pydantic v1 uses 'items' as array for tuples.""" schema = { "type": "array", "items": [{"type": "string"}, {"type": "integer"}], "minItems": 2, "maxItems": 2, } assert json_schema_to_python_type(schema) == "tuple[str, int]"
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_sdk/test_schema_converter_enums.py", "license": "Apache License 2.0", "lines": 84, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_sdk/test_schema_converter_fields.py
"""Tests for extracting field information from JSON Schema.""" from prefect._sdk.schema_converter import extract_fields_from_schema class TestExtractFieldsFromSchema: """Test extracting field information from schemas.""" def test_simple_fields(self): schema = { "type": "object", "properties": { "name": {"type": "string"}, "age": {"type": "integer"}, }, "required": ["name"], } fields, warnings = extract_fields_from_schema(schema) assert len(fields) == 2 assert len(warnings) == 0 name_field = next(f for f in fields if f.name == "name") assert name_field.python_type == "str" assert name_field.required is True age_field = next(f for f in fields if f.name == "age") assert age_field.python_type == "int" assert age_field.required is False def test_field_with_default_not_required(self): """Field in required list but with default should be optional.""" schema = { "type": "object", "properties": { "count": {"type": "integer", "default": 0}, }, "required": ["count"], } fields, _ = extract_fields_from_schema(schema) assert len(fields) == 1 assert fields[0].name == "count" assert fields[0].required is False # Has default, so not required assert fields[0].has_default is True assert fields[0].default == 0 def test_field_descriptions(self): schema = { "type": "object", "properties": { "name": {"type": "string", "description": "The user's name"}, "age": {"type": "integer", "title": "User Age"}, }, } fields, _ = extract_fields_from_schema(schema) name_field = next(f for f in fields if f.name == "name") assert name_field.description == "The user's name" age_field = next(f for f in fields if f.name == "age") assert age_field.description == "User Age" # Falls back to title def test_with_definitions(self): """Fields can reference definitions.""" schema = { "type": "object", "properties": { "user": {"$ref": "#/definitions/User"}, }, "definitions": { "User": {"type": "object", "additionalProperties": False}, }, } fields, _ = extract_fields_from_schema(schema) assert len(fields) == 1 assert fields[0].python_type == "dict[str, Any]" def test_with_defs(self): """Fields can reference $defs (Pydantic v2 format).""" schema = { "type": "object", "properties": { "user": {"$ref": "#/$defs/User"}, }, "$defs": { "User": {"type": "object"}, }, } fields, _ = extract_fields_from_schema(schema) assert len(fields) == 1 assert fields[0].python_type == "dict[str, Any]" def test_circular_reference_in_field(self): """Circular references in fields should be handled gracefully.""" schema = { "type": "object", "properties": { "node": {"$ref": "#/definitions/Node"}, }, "definitions": { "Node": {"$ref": "#/definitions/Node"}, }, } fields, warnings = extract_fields_from_schema(schema) assert len(fields) == 1 assert fields[0].python_type == "Any" # Circular ref fallback assert any("Circular" in w for w in warnings) def test_empty_properties(self): schema = {"type": "object", "properties": {}} fields, warnings = extract_fields_from_schema(schema) assert len(fields) == 0 assert len(warnings) == 0 def test_no_properties(self): schema = {"type": "object"} fields, warnings = extract_fields_from_schema(schema) assert len(fields) == 0 assert len(warnings) == 0
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_sdk/test_schema_converter_fields.py", "license": "Apache License 2.0", "lines": 103, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_sdk/test_schema_converter_primitives.py
"""Tests for primitive and basic JSON Schema type conversion.""" from prefect._sdk.schema_converter import json_schema_to_python_type class TestPrimitiveTypes: """Test conversion of primitive JSON Schema types.""" def test_string(self): assert json_schema_to_python_type({"type": "string"}) == "str" def test_integer(self): assert json_schema_to_python_type({"type": "integer"}) == "int" def test_number(self): assert json_schema_to_python_type({"type": "number"}) == "float" def test_boolean(self): assert json_schema_to_python_type({"type": "boolean"}) == "bool" def test_null(self): assert json_schema_to_python_type({"type": "null"}) == "None" class TestEmptyAndBooleanSchemas: """Test handling of empty and boolean schemas.""" def test_empty_schema(self): assert json_schema_to_python_type({}) == "Any" def test_boolean_true_schema(self): """Boolean true schema accepts anything.""" assert json_schema_to_python_type(True) == "Any" def test_boolean_false_schema(self): """Boolean false schema accepts nothing. Note: We return "Any" rather than "Never" because false schemas are extremely rare in practice, and "Any" is more practical for type checking. """ assert json_schema_to_python_type(False) == "Any" def test_no_type_key(self): """Schema with no 'type' key should return Any.""" assert json_schema_to_python_type({"title": "Unknown"}) == "Any" class TestArrayTypes: """Test conversion of array types.""" def test_array_without_items(self): assert json_schema_to_python_type({"type": "array"}) == "list[Any]" def test_array_with_string_items(self): schema = {"type": "array", "items": {"type": "string"}} assert json_schema_to_python_type(schema) == "list[str]" def test_array_with_integer_items(self): schema = {"type": "array", "items": {"type": "integer"}} assert json_schema_to_python_type(schema) == "list[int]" def test_array_with_nested_array_items(self): schema = { "type": "array", "items": {"type": "array", "items": {"type": "string"}}, } assert json_schema_to_python_type(schema) == "list[list[str]]" def test_array_with_object_items(self): schema = {"type": "array", "items": {"type": "object"}} assert json_schema_to_python_type(schema) == "list[dict[str, Any]]" class TestObjectTypes: """Test conversion of object types.""" def test_object_without_additional_properties(self): schema = {"type": "object"} assert json_schema_to_python_type(schema) == "dict[str, Any]" def test_object_with_additional_properties_true(self): schema = {"type": "object", "additionalProperties": True} assert json_schema_to_python_type(schema) == "dict[str, Any]" def test_object_with_additional_properties_false(self): schema = {"type": "object", "additionalProperties": False} assert json_schema_to_python_type(schema) == "dict[str, Any]" def test_object_with_typed_additional_properties(self): schema = {"type": "object", "additionalProperties": {"type": "string"}} assert json_schema_to_python_type(schema) == "dict[str, str]" def test_object_with_typed_additional_properties_integer(self): schema = {"type": "object", "additionalProperties": {"type": "integer"}} assert json_schema_to_python_type(schema) == "dict[str, int]" def test_object_with_properties(self): """Object with properties but no additionalProperties still returns dict. Note: Nested TypedDict generation is handled by the template renderer, not this converter. """ schema = { "type": "object", "properties": {"name": {"type": "string"}, "age": {"type": "integer"}}, } assert json_schema_to_python_type(schema) == "dict[str, Any]" class TestImplicitTypes: """Test schemas that imply types without explicit type field.""" def test_properties_implies_object(self): """Schema with properties but no type implies object.""" schema = {"properties": {"name": {"type": "string"}}} assert json_schema_to_python_type(schema) == "dict[str, Any]" def test_items_implies_array(self): """Schema with items but no type implies array.""" schema = {"items": {"type": "string"}} assert json_schema_to_python_type(schema) == "list[str]"
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_sdk/test_schema_converter_primitives.py", "license": "Apache License 2.0", "lines": 86, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_sdk/test_schema_converter_refs.py
"""Tests for $ref resolution and allOf handling in JSON Schema.""" import pytest from prefect._sdk.schema_converter import ( CircularReferenceError, ConversionContext, json_schema_to_python_type, ) class TestReferenceResolution: """Test $ref resolution.""" def test_simple_ref(self): context = ConversionContext( definitions={"MyType": {"type": "string"}}, ) schema = {"$ref": "#/definitions/MyType"} assert json_schema_to_python_type(schema, context) == "str" def test_ref_to_object(self): context = ConversionContext( definitions={"Person": {"type": "object", "additionalProperties": False}}, ) schema = {"$ref": "#/definitions/Person"} assert json_schema_to_python_type(schema, context) == "dict[str, Any]" def test_defs_format(self): """Pydantic v2 uses $defs instead of definitions.""" context = ConversionContext( definitions={"MyType": {"type": "integer"}}, ) schema = {"$ref": "#/$defs/MyType"} assert json_schema_to_python_type(schema, context) == "int" def test_nested_ref(self): """Test ref that points to another ref.""" context = ConversionContext( definitions={ "TypeA": {"$ref": "#/definitions/TypeB"}, "TypeB": {"type": "string"}, }, ) schema = {"$ref": "#/definitions/TypeA"} assert json_schema_to_python_type(schema, context) == "str" def test_circular_ref_detection(self): """Circular references should raise an error.""" context = ConversionContext( definitions={ "Node": { "type": "object", "properties": {"child": {"$ref": "#/definitions/Node"}}, } }, ) schema = {"$ref": "#/definitions/Node"} # The outer ref should work, but inner ref in properties causes dict[str, Any] # Actually, since we're converting the whole object, it returns dict[str, Any] assert json_schema_to_python_type(schema, context) == "dict[str, Any]" def test_self_referencing_directly(self): """Direct self-reference should raise CircularReferenceError.""" context = ConversionContext( definitions={"Loop": {"$ref": "#/definitions/Loop"}}, ) schema = {"$ref": "#/definitions/Loop"} with pytest.raises(CircularReferenceError): json_schema_to_python_type(schema, context) def test_unknown_ref_format(self): """Unknown ref format should return Any with warning.""" context = ConversionContext() schema = {"$ref": "http://example.com/schema"} result = json_schema_to_python_type(schema, context) assert result == "Any" assert len(context.conversion_warnings) == 1 assert "Unknown $ref format" in context.conversion_warnings[0] def test_missing_definition(self): """Missing definition should return Any with warning.""" context = ConversionContext(definitions={}) schema = {"$ref": "#/definitions/Missing"} result = json_schema_to_python_type(schema, context) assert result == "Any" assert len(context.conversion_warnings) == 1 assert "Definition not found" in context.conversion_warnings[0] def test_external_url_ref(self): """External URL refs should return Any with warning.""" context = ConversionContext() schema = {"$ref": "https://json-schema.org/draft/2020-12/schema"} result = json_schema_to_python_type(schema, context) assert result == "Any" assert any("Unknown $ref format" in w for w in context.conversion_warnings) def test_openapi_style_ref(self): """OpenAPI-style refs (#/components/schemas/...) are not supported.""" context = ConversionContext() schema = {"$ref": "#/components/schemas/User"} result = json_schema_to_python_type(schema, context) assert result == "Any" assert any("Unknown $ref format" in w for w in context.conversion_warnings) class TestAllOf: """Test allOf handling.""" def test_allof_with_ref(self): """allOf with $ref should resolve the ref.""" context = ConversionContext( definitions={"MyType": {"type": "string"}}, ) schema = {"allOf": [{"$ref": "#/definitions/MyType"}]} assert json_schema_to_python_type(schema, context) == "str" def test_allof_multiple_refs(self): """allOf with multiple refs should use the first one. Note: True intersection semantics are not modeled. """ context = ConversionContext( definitions={ "TypeA": {"type": "string"}, "TypeB": {"type": "integer"}, }, ) schema = { "allOf": [ {"$ref": "#/definitions/TypeA"}, {"$ref": "#/definitions/TypeB"}, ] } assert json_schema_to_python_type(schema, context) == "str" def test_allof_without_ref(self): """allOf without ref should convert the first useful variant.""" schema = {"allOf": [{"type": "string"}]} assert json_schema_to_python_type(schema) == "str"
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_sdk/test_schema_converter_refs.py", "license": "Apache License 2.0", "lines": 120, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_sdk/test_schema_converter_unions.py
"""Tests for union type conversion in JSON Schema.""" from prefect._sdk.schema_converter import json_schema_to_python_type class TestNullableTypes: """Test conversion of nullable (anyOf with null) types.""" def test_nullable_string(self): schema = {"anyOf": [{"type": "string"}, {"type": "null"}]} assert json_schema_to_python_type(schema) == "str | None" def test_nullable_integer(self): schema = {"anyOf": [{"type": "integer"}, {"type": "null"}]} assert json_schema_to_python_type(schema) == "int | None" def test_null_first(self): """Null can be in any position.""" schema = {"anyOf": [{"type": "null"}, {"type": "string"}]} assert json_schema_to_python_type(schema) == "str | None" def test_only_null(self): schema = {"anyOf": [{"type": "null"}]} assert json_schema_to_python_type(schema) == "None" class TestUnionTypes: """Test conversion of union types (anyOf/oneOf).""" def test_string_or_integer(self): schema = {"anyOf": [{"type": "string"}, {"type": "integer"}]} assert json_schema_to_python_type(schema) == "str | int" def test_multi_type_union(self): schema = { "anyOf": [{"type": "string"}, {"type": "integer"}, {"type": "boolean"}] } assert json_schema_to_python_type(schema) == "str | int | bool" def test_nullable_union(self): schema = {"anyOf": [{"type": "string"}, {"type": "integer"}, {"type": "null"}]} assert json_schema_to_python_type(schema) == "str | int | None" def test_one_of(self): """oneOf is treated the same as anyOf.""" schema = {"oneOf": [{"type": "string"}, {"type": "integer"}]} assert json_schema_to_python_type(schema) == "str | int" def test_duplicate_types_in_union(self): """Duplicate types should be deduplicated.""" schema = {"anyOf": [{"type": "string"}, {"type": "string"}]} assert json_schema_to_python_type(schema) == "str" def test_nested_union_flattening(self): """Unions containing unions should be flattened and deduplicated.""" # This simulates anyOf containing a variant that itself is a union # e.g., anyOf with str and (str | None) schema = { "anyOf": [ {"type": "string"}, {"anyOf": [{"type": "string"}, {"type": "null"}]}, ] } # Should flatten to just "str | None", not "str | str | None" assert json_schema_to_python_type(schema) == "str | None" def test_nested_union_with_different_types(self): """Nested unions with different types should be properly flattened.""" schema = { "anyOf": [ {"anyOf": [{"type": "string"}, {"type": "integer"}]}, {"anyOf": [{"type": "boolean"}, {"type": "null"}]}, ] } assert json_schema_to_python_type(schema) == "str | int | bool | None" def test_deeply_nested_unions(self): """Deeply nested unions should all flatten correctly.""" schema = { "anyOf": [ {"type": "string"}, { "anyOf": [ {"type": "integer"}, {"anyOf": [{"type": "boolean"}, {"type": "null"}]}, ] }, ] } assert json_schema_to_python_type(schema) == "str | int | bool | None" class TestTypeArrays: """Test type specified as array (e.g., ["string", "null"]).""" def test_string_or_null(self): schema = {"type": ["string", "null"]} assert json_schema_to_python_type(schema) == "str | None" def test_integer_or_null(self): schema = {"type": ["integer", "null"]} assert json_schema_to_python_type(schema) == "int | None" def test_multi_type(self): schema = {"type": ["string", "integer"]} assert json_schema_to_python_type(schema) == "str | int" def test_only_null(self): schema = {"type": ["null"]} assert json_schema_to_python_type(schema) == "None" def test_duplicate_types_deduplicated(self): """Duplicate types in type array should be deduplicated.""" schema = {"type": ["string", "string", "null"]} assert json_schema_to_python_type(schema) == "str | None" def test_all_duplicates(self): """All duplicate types should result in single type.""" schema = {"type": ["integer", "integer", "integer"]} assert json_schema_to_python_type(schema) == "int" class TestFlattenUnion: """Test the union flattening helper behavior.""" def test_simple_types(self): """Simple types should be joined with |.""" schema = {"anyOf": [{"type": "string"}, {"type": "integer"}]} assert json_schema_to_python_type(schema) == "str | int" def test_none_at_end(self): """None should always be at the end.""" # Multiple orderings should all result in None at end schema1 = {"anyOf": [{"type": "null"}, {"type": "string"}, {"type": "integer"}]} schema2 = {"anyOf": [{"type": "string"}, {"type": "null"}, {"type": "integer"}]} schema3 = {"anyOf": [{"type": "string"}, {"type": "integer"}, {"type": "null"}]} expected = "str | int | None" assert json_schema_to_python_type(schema1) == expected assert json_schema_to_python_type(schema2) == expected assert json_schema_to_python_type(schema3) == expected def test_preserves_order_of_non_null_types(self): """Non-null types should preserve their order of first appearance.""" schema = { "anyOf": [{"type": "boolean"}, {"type": "string"}, {"type": "integer"}] } assert json_schema_to_python_type(schema) == "bool | str | int" def test_array_with_union_items_in_union(self): """Union containing array-of-union should not split inside brackets. e.g., anyOf: [ {"type":"array","items":{"anyOf":[str, int]}}, null ] should produce: list[str | int] | None (not corrupted) """ schema = { "anyOf": [ { "type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "integer"}]}, }, {"type": "null"}, ] } result = json_schema_to_python_type(schema) assert result == "list[str | int] | None" def test_literal_with_pipe_in_string(self): """Union containing Literal with ' | ' in string should not split inside quotes.""" schema = { "anyOf": [ {"enum": ["a | b", "c | d"]}, {"type": "integer"}, ] } result = json_schema_to_python_type(schema) assert result == "Literal['a | b', 'c | d'] | int" def test_complex_nested_union_with_brackets(self): """Complex nested types with unions inside brackets should be preserved.""" schema = { "anyOf": [ { "type": "object", "additionalProperties": { "anyOf": [{"type": "string"}, {"type": "integer"}] }, }, {"type": "array", "items": {"type": "boolean"}}, {"type": "null"}, ] } result = json_schema_to_python_type(schema) assert result == "dict[str, str | int] | list[bool] | None"
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_sdk/test_schema_converter_unions.py", "license": "Apache License 2.0", "lines": 159, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_sdk/test_union_utils.py
"""Tests for union utility functions.""" from prefect._sdk.unions import split_union_top_level class TestSplitUnionTopLevel: """Test the split_union_top_level helper function directly.""" def test_simple_union(self): """Simple union should split correctly.""" assert split_union_top_level("str | int") == ["str", "int"] def test_no_union(self): """Non-union should return single element.""" assert split_union_top_level("str") == ["str"] def test_union_inside_brackets_not_split(self): """Union inside brackets should not be split.""" assert split_union_top_level("list[str | int]") == ["list[str | int]"] def test_union_inside_nested_brackets_not_split(self): """Union inside nested brackets should not be split.""" result = split_union_top_level("dict[str, list[int | float]]") assert result == ["dict[str, list[int | float]]"] def test_top_level_with_nested_brackets(self): """Top-level union with nested brackets should split correctly.""" result = split_union_top_level("list[str | int] | None") assert result == ["list[str | int]", "None"] def test_union_inside_single_quotes_not_split(self): """Union inside single quotes should not be split.""" result = split_union_top_level("Literal['a | b']") assert result == ["Literal['a | b']"] def test_union_inside_double_quotes_not_split(self): """Union inside double quotes should not be split.""" result = split_union_top_level('Literal["a | b"]') assert result == ['Literal["a | b"]'] def test_escaped_quotes_handled(self): """Escaped quotes inside strings should be handled.""" result = split_union_top_level(r"Literal['it\'s | ok'] | int") assert result == [r"Literal['it\'s | ok']", "int"] def test_multiple_literals_in_union(self): """Multiple Literals in a union should work.""" result = split_union_top_level("Literal['a | b', 'c'] | int | None") assert result == ["Literal['a | b', 'c']", "int", "None"] def test_empty_string(self): """Empty string should return empty list.""" assert split_union_top_level("") == [] def test_whitespace_handling(self): """Whitespace should be trimmed from parts.""" result = split_union_top_level(" str | int ") assert result == ["str", "int"] def test_trailing_backslash(self): """Trailing backslash should be handled gracefully.""" # Edge case: backslash at end of string result = split_union_top_level("Literal['test\\") assert result == ["Literal['test\\"]
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_sdk/test_union_utils.py", "license": "Apache License 2.0", "lines": 49, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:examples/resume_flow_run_on_pr_merge.py
# --- # title: Resume Flow Run on PR Merge # description: Automatically resume failed flow runs when a hotfix PR is merged in GitHub. # icon: github # keywords: ["github", "webhooks", "automations", "ci-cd", "retry", "resume"] # --- # # <Note> # This example uses [webhooks](/v3/automate/events/webhooks), which are only available in Prefect Cloud. # </Note> # # When a flow run fails due to a bug in your code, you typically need to: # 1. Fix the bug in a pull request # 2. Merge the PR # 3. Manually retry the failed flow run # # This example shows how to automate step 3 by creating a webhook and automation that # automatically resumes a failed flow run when a PR containing the flow run URL is merged. # # ## Prerequisites # # - A Prefect Cloud workspace # - A GitHub repository with webhook access # - A failed flow run you want to resume # # ## Step 1: Create a Prefect webhook # # Create a webhook in Prefect Cloud that transforms GitHub PR events into Prefect events. # The webhook template extracts the flow run ID from the PR body when present. # # Navigate to your workspace's **Webhooks** page and create a new webhook with this template: # # ```jinja # { # "event": "github.{{ headers.get('x-github-event', 'unknown') }}.{{ body.action|default('no-action') }}", # "resource": { # "prefect.resource.id": "{% set frid = body.pull_request.body|flow_run_id %}{% if frid %}prefect.flow-run.{{ frid }}{% else %}github.pr.{{ body.pull_request.number|default(0) }}{% endif %}", # "pr.number": "{{ body.pull_request.number|default(0) }}", # "pr.merged": "{{ body.pull_request.merged|default(false) }}", # "pr.title": "{{ body.pull_request.title|default('')|truncate(100) }}" # } # } # ``` # # This template uses the `flow_run_id` filter to extract a flow run UUID from any Prefect Cloud URL # in the PR body. If no flow run URL is found, it falls back to `github.pr.<number>`. # The `pr.merged` label enables filtering for merged PRs only. # # Copy the webhook URL for the next step. # # ## Step 2: Configure GitHub webhook # # In your GitHub repository: # # 1. Go to **Settings** → **Webhooks** → **Add webhook** # 2. Set the **Payload URL** to your Prefect webhook URL # 3. Set **Content type** to `application/json` # 4. Under **Which events would you like to trigger this webhook?**, select **Let me select individual events** and check **Pull requests** # 5. Click **Add webhook** # # ## Step 3: Create an automation # # Create an automation that triggers when a PR is merged and the event contains a flow run ID. # # Navigate to your workspace's **Automations** page and create a new automation: # # **Trigger configuration:** # - **Trigger type**: Event # - **Event name**: `github.pull_request.closed` # - **Resource**: Match `prefect.resource.id` starting with `prefect.flow-run.` # - **Resource labels**: Match `pr.merged` equals `True` # # **Action configuration:** # - **Action type**: Change flow run state # - **New state**: Scheduled # - **Force**: Yes (required to transition from Failed state) # # The automation extracts the flow run ID from the event's `prefect.resource.id` and changes # its state to resume execution. # # ## Example flow # # Here's a simple flow that reads configuration and can fail based on its contents: import json from pathlib import Path from prefect import flow @flow(log_prints=True) def my_flow(): config = json.loads(Path("config.json").read_text()) if error := config.get("error"): raise ValueError(f"Flow failed: {error}") print("Flow completed successfully!") if __name__ == "__main__": my_flow() # ## Using the workflow # # When this flow fails: # # 1. Create a PR to fix the issue (e.g., fix `config.json`) # 2. Include the flow run URL in the PR body: # ``` # This PR fixes the data validation issue. # # Fixes: https://app.prefect.cloud/account/.../workspace/.../runs/flow-run/abc123-... # ``` # 3. Merge the PR # 4. The automation triggers and resumes the flow run # # ## How it works # # 1. **GitHub sends webhook**: When a PR is closed, GitHub sends a POST request to your Prefect webhook # 2. **Webhook transforms event**: The Jinja template extracts the flow run ID from the PR body and creates a Prefect event with `prefect.resource.id` set to `prefect.flow-run.<uuid>` # 3. **Automation matches**: The automation triggers on `github.pull_request.closed` events where `pr.merged` is `True` and the resource ID matches a flow run # 4. **State change**: The automation changes the flow run state to `Scheduled`, which resumes execution
{ "repo_id": "PrefectHQ/prefect", "file_path": "examples/resume_flow_run_on_pr_merge.py", "license": "Apache License 2.0", "lines": 114, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
PrefectHQ/prefect:tests/events/jinja_filters/test_flow_run_id.py
from prefect.server.events.jinja_filters import flow_run_id def test_flow_run_id_extraction(): url = "https://app.prefect.cloud/account/abc/workspace/xyz/runs/flow-run/12345678-1234-5678-1234-567812345678" body = f"Fixes {url}" assert flow_run_id(body) == "12345678-1234-5678-1234-567812345678" def test_flow_run_id_extraction_no_match(): body = "Fixes something else" assert flow_run_id(body) is None def test_flow_run_id_extraction_none(): assert flow_run_id(None) is None
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/events/jinja_filters/test_flow_run_id.py", "license": "Apache License 2.0", "lines": 10, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/client/test_state_serialization.py
""" Tests for state serialization, specifically around the defer_build=True behavior in PrefectBaseModel that can cause MockValSer errors. Regression test for: TypeError: 'MockValSer' object cannot be converted to 'SchemaSerializer' Root cause: PrefectBaseModel has defer_build=True, and when FlowRun.model_validate() is called, pydantic rebuilds FlowRun but NOT nested models like StateDetails. StateDetails stays incomplete with MockValSer as its serializer, causing model_dump(serialize_as_any=True) to fail. """ import subprocess import sys from uuid import uuid4 import pytest from pydantic._internal._mock_val_ser import MockValSer from prefect.client.schemas.objects import FlowRun, StateDetails, StateType from prefect.states import to_state_create class TestStateSerializationWithDeferBuild: """ Tests that verify state serialization works correctly despite defer_build=True. """ def test_state_create_serialization_in_fresh_process(self): """ Regression test: StateCreate.model_dump(serialize_as_any=True) should work even when StateDetails hasn't been explicitly rebuilt. This test runs in a subprocess to ensure a fresh Python interpreter state, which is required to reproduce the defer_build issue. """ # NOTE: This test must use subprocess with inline code because the bug only # manifests in a fresh Python process before any other imports trigger model # rebuilding. Module-level imports in THIS file would mask the bug. code = """ import sys from uuid import uuid4 from prefect.client.schemas.objects import FlowRun, StateType from prefect.states import to_state_create # Simulate reading a FlowRun from the API (exactly what workers do) flow_run_data = { "id": str(uuid4()), "name": "test-run", "flow_id": str(uuid4()), "state": { "type": "PENDING", "name": "Pending", "timestamp": "2024-01-01T00:00:00Z", "state_details": {"flow_run_id": str(uuid4())}, }, } flow_run = FlowRun.model_validate(flow_run_data) # This is the code path from _mark_flow_run_as_cancelled state_updates = {"name": "Cancelled", "type": StateType.CANCELLED} state = flow_run.state.model_copy(update=state_updates) state_create = to_state_create(state) # This should NOT raise: # TypeError: 'MockValSer' object cannot be converted to 'SchemaSerializer' try: result = state_create.model_dump(mode="json", serialize_as_any=True) print("SUCCESS") sys.exit(0) except TypeError as e: if "MockValSer" in str(e): print(f"MOCKVALSER_ERROR: {e}") sys.exit(1) raise """ result = subprocess.run( [sys.executable, "-c", code], capture_output=True, text=True, ) # Check for the specific MockValSer error if "MOCKVALSER_ERROR" in result.stdout: pytest.fail( f"StateCreate serialization failed with MockValSer error. " f"This indicates StateDetails was not properly rebuilt. " f"Output: {result.stdout}{result.stderr}" ) # Check for general failure if result.returncode != 0: pytest.fail( f"StateCreate serialization failed unexpectedly. " f"stdout: {result.stdout}, stderr: {result.stderr}" ) assert "SUCCESS" in result.stdout def test_state_details_model_rebuild_works(self): """Verify that explicitly calling model_rebuild() on StateDetails works.""" # Force rebuild StateDetails.model_rebuild(force=True) # Should be complete after rebuild assert StateDetails.__pydantic_complete__ is True # Create and serialize sd = StateDetails(flow_run_id=uuid4()) result = sd.model_dump(mode="json", serialize_as_any=True) assert "flow_run_id" in result def test_state_details_serializer_is_not_mock(self): """ Verify that StateDetails has a proper serializer, not MockValSer. After any model operation that should trigger building, the serializer should be a SchemaSerializer, not MockValSer. """ # Force rebuild to ensure it's built StateDetails.model_rebuild(force=True) assert not isinstance(StateDetails.__pydantic_serializer__, MockValSer), ( "StateDetails.__pydantic_serializer__ should not be MockValSer after rebuild" ) def test_flow_run_state_to_state_create_serialization(self): """ Test the full flow: FlowRun -> State -> StateCreate -> model_dump. This is the exact code path used in worker cancellation. """ flow_run_data = { "id": str(uuid4()), "name": "test-run", "flow_id": str(uuid4()), "state": { "type": "PENDING", "name": "Pending", "timestamp": "2024-01-01T00:00:00Z", "state_details": {"flow_run_id": str(uuid4())}, }, } flow_run = FlowRun.model_validate(flow_run_data) state_updates = {"name": "Cancelled", "type": StateType.CANCELLED} state = flow_run.state.model_copy(update=state_updates) state_create = to_state_create(state) # This should not raise result = state_create.model_dump(mode="json", serialize_as_any=True) assert result["type"] == "CANCELLED" assert result["name"] == "Cancelled" assert "state_details" in result
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/client/test_state_serialization.py", "license": "Apache License 2.0", "lines": 129, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/prefect/_internal/send_entrypoint_logs.py
""" Internal utility for sending error logs to Prefect from the entrypoint. Usage: python -m prefect._internal.send_entrypoint_logs < /tmp/output.log python -m prefect._internal.send_entrypoint_logs /tmp/output.log Reads PREFECT__FLOW_RUN_ID from environment. Exits silently on failure. """ import logging import os import sys from uuid import UUID from prefect.client.orchestration import get_client from prefect.client.schemas.actions import LogCreate from prefect.types._datetime import now def _send(content: str, flow_run_id: UUID | None) -> None: logs = [ LogCreate( name="prefect.entrypoint", level=logging.ERROR, message=content, timestamp=now("UTC"), flow_run_id=flow_run_id, ) ] with get_client(sync_client=True) as client: client.create_logs(logs) def main() -> None: if len(sys.argv) > 1: content = open(sys.argv[1]).read() elif not sys.stdin.isatty(): content = sys.stdin.read() else: return if not content.strip(): return flow_run_id = None if env_val := os.environ.get("PREFECT__FLOW_RUN_ID"): try: flow_run_id = UUID(env_val) except ValueError: pass try: _send(content, flow_run_id) except Exception: pass if __name__ == "__main__": main()
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_internal/send_entrypoint_logs.py", "license": "Apache License 2.0", "lines": 47, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
PrefectHQ/prefect:tests/_internal/test_send_entrypoint_logs.py
import logging import subprocess import sys from unittest.mock import MagicMock, patch from uuid import UUID from prefect._internal.send_entrypoint_logs import _send, main class TestSend: def test_creates_error_log_with_flow_run_id(self): mock_client = MagicMock() flow_run_id = UUID("12345678-1234-5678-1234-567812345678") with patch( "prefect._internal.send_entrypoint_logs.get_client", return_value=MagicMock(__enter__=MagicMock(return_value=mock_client)), ): _send("test message", flow_run_id) mock_client.create_logs.assert_called_once() logs = mock_client.create_logs.call_args[0][0] assert len(logs) == 1 assert logs[0].message == "test message" assert logs[0].level == logging.ERROR assert logs[0].flow_run_id == flow_run_id assert logs[0].name == "prefect.entrypoint" def test_works_without_flow_run_id(self): mock_client = MagicMock() with patch( "prefect._internal.send_entrypoint_logs.get_client", return_value=MagicMock(__enter__=MagicMock(return_value=mock_client)), ): _send("test message", None) logs = mock_client.create_logs.call_args[0][0] assert logs[0].flow_run_id is None class TestMain: def test_reads_from_file(self, tmp_path): log_file = tmp_path / "test.log" log_file.write_text("file content") with ( patch("sys.argv", ["send_entrypoint_logs", str(log_file)]), patch("prefect._internal.send_entrypoint_logs._send") as mock_send, ): main() assert mock_send.called def test_reads_from_stdin(self): mock_stdin = MagicMock() mock_stdin.isatty.return_value = False mock_stdin.read.return_value = "stdin content" with ( patch("sys.argv", ["send_entrypoint_logs"]), patch("sys.stdin", mock_stdin), patch("prefect._internal.send_entrypoint_logs._send") as mock_send, ): main() assert mock_send.called def test_skips_empty_content(self, tmp_path): log_file = tmp_path / "empty.log" log_file.write_text(" \n ") with ( patch("sys.argv", ["send_entrypoint_logs", str(log_file)]), patch("prefect._internal.send_entrypoint_logs._send") as mock_send, ): main() mock_send.assert_not_called() def test_reads_flow_run_id_from_env(self, tmp_path, monkeypatch): log_file = tmp_path / "test.log" log_file.write_text("content") monkeypatch.setenv( "PREFECT__FLOW_RUN_ID", "12345678-1234-5678-1234-567812345678" ) with ( patch("sys.argv", ["send_entrypoint_logs", str(log_file)]), patch("prefect._internal.send_entrypoint_logs._send") as mock_send, ): main() assert mock_send.called def test_ignores_invalid_flow_run_id(self, tmp_path, monkeypatch): log_file = tmp_path / "test.log" log_file.write_text("content") monkeypatch.setenv("PREFECT__FLOW_RUN_ID", "not-a-uuid") with ( patch("sys.argv", ["send_entrypoint_logs", str(log_file)]), patch("prefect._internal.send_entrypoint_logs._send") as mock_send, ): main() assert mock_send.called def test_silently_swallows_exceptions(self, tmp_path): log_file = tmp_path / "test.log" log_file.write_text("content") with ( patch("sys.argv", ["send_entrypoint_logs", str(log_file)]), patch( "prefect._internal.send_entrypoint_logs._send", side_effect=Exception("connection failed"), ), ): main() # should not raise class TestModuleInvocation: def test_invokable_as_module(self): result = subprocess.run( [sys.executable, "-m", "prefect._internal.send_entrypoint_logs"], capture_output=True, text=True, input="", ) assert result.returncode == 0
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_internal/test_send_entrypoint_logs.py", "license": "Apache License 2.0", "lines": 103, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:scripts/generate_prefect_yaml_schema.py
import json from pydantic.json_schema import GenerateJsonSchema from prefect import __development_base_path__ from prefect.cli.deploy._models import PrefectYamlModel class PrefectYamlGenerateJsonSchema(GenerateJsonSchema): def generate(self, schema, mode="validation"): json_schema = super().generate(schema, mode=mode) json_schema["title"] = "Prefect YAML" json_schema["$schema"] = self.schema_dialect json_schema["$id"] = ( "https://github.com/PrefectHQ/prefect/schemas/prefect.yaml.schema.json" ) return json_schema def main(): with open( __development_base_path__ / "schemas" / "prefect.yaml.schema.json", "w" ) as f: json.dump( PrefectYamlModel.model_json_schema( schema_generator=PrefectYamlGenerateJsonSchema ), f, indent=4, ) if __name__ == "__main__": main()
{ "repo_id": "PrefectHQ/prefect", "file_path": "scripts/generate_prefect_yaml_schema.py", "license": "Apache License 2.0", "lines": 26, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
PrefectHQ/prefect:tests/cli/deploy/test_prefect_yaml_schema.py
"""Tests for the prefect.yaml JSON schema generation.""" import json import uuid import pytest from jsonschema import Draft202012Validator, ValidationError, validate from prefect import __development_base_path__ from prefect.cli.deploy._models import PrefectYamlModel @pytest.fixture def schema(): """Load the generated JSON schema.""" schema_path = __development_base_path__ / "schemas" / "prefect.yaml.schema.json" if not schema_path.exists(): pytest.skip( "Schema file not generated yet - run generate_prefect_yaml_schema.py" ) with open(schema_path) as f: return json.load(f) class TestPrefectYamlSchema: def test_schema_file_exists(self): """Verify the schema file exists in the expected location.""" schema_path = __development_base_path__ / "schemas" / "prefect.yaml.schema.json" assert schema_path.exists(), ( "Schema file should exist at schemas/prefect.yaml.schema.json" ) def test_schema_is_valid_json_schema(self, schema): """Verify the generated schema is a valid JSON schema.""" # This will raise if the schema itself is invalid Draft202012Validator.check_schema(schema) def test_schema_has_required_metadata(self, schema): """Verify the schema has the expected metadata fields.""" assert schema.get("title") == "Prefect YAML" assert "$schema" in schema assert "$id" in schema assert "prefect.yaml.schema.json" in schema["$id"] def test_schema_validates_empty_config(self, schema): """An empty config should be valid.""" validate({}, schema) def test_schema_validates_minimal_deployment(self, schema): """A minimal deployment config should be valid.""" deployment_name = f"my-deployment-{uuid.uuid4()}" config = { "deployments": [ { "name": deployment_name, "entrypoint": "flows.py:my_flow", } ] } validate(config, schema) def test_schema_validates_full_deployment(self, schema): """A fully-specified deployment config should be valid.""" project_name = f"my-project-{uuid.uuid4()}" deployment_name = f"my-deployment-{uuid.uuid4()}" pool_name = f"my-pool-{uuid.uuid4()}" config = { "name": project_name, "prefect-version": "3.0.0", "build": [ {"prefect.deployments.steps.run_shell_script": {"script": "echo hello"}} ], "push": [], "pull": [ { "prefect.deployments.steps.git_clone": { "repository": "https://github.com/org/repo" } } ], "deployments": [ { "name": deployment_name, "version": "1.0.0", "tags": ["prod", "critical"], "description": "A test deployment", "entrypoint": "flows.py:my_flow", "parameters": {"param1": "value1"}, "work_pool": { "name": pool_name, "work_queue_name": "default", "job_variables": {"cpu": 2}, }, "schedules": [ {"cron": "0 0 * * *", "timezone": "America/New_York"}, ], } ], } validate(config, schema) def test_schema_rejects_invalid_deployments_type(self, schema): """deployments must be a list, not a string.""" config = {"deployments": "not a list"} with pytest.raises(ValidationError) as exc_info: validate(config, schema) assert "not of type 'array'" in str(exc_info.value) def test_schema_matches_pydantic_model(self, schema): """The generated schema should match what PrefectYamlModel produces.""" model_schema = PrefectYamlModel.model_json_schema() # Check that key definitions exist in both assert "DeploymentConfig" in schema.get("$defs", {}) assert "DeploymentConfig" in model_schema.get("$defs", {})
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/cli/deploy/test_prefect_yaml_schema.py", "license": "Apache License 2.0", "lines": 100, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/integrations/prefect-aws/prefect_aws/assume_role_parameters.py
"""Module handling Assume Role parameters""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Field from prefect_aws.utilities import hash_collection class AssumeRoleParameters(BaseModel): """ Model used to manage parameters for the AWS STS assume_role call. Refer to the [boto3 STS assume_role docs](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sts/client/assume_role.html) for more information about the possible assume role configurations. Attributes: RoleSessionName: An identifier for the assumed role session. This value is used to uniquely identify a session when the same role is assumed by different principals or for different reasons. If not provided, a default will be generated. DurationSeconds: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 43,200 seconds (12 hours). Policy: An IAM policy in JSON format that you want to use as an inline session policy. PolicyArns: The ARNs of the IAM managed policies to use as managed session policies. Each item should be a dict with an 'arn' key. Tags: A list of session tags. Each tag should be a dict with 'Key' and 'Value' keys. TransitiveTagKeys: A list of keys for session tags that you want to set as transitive. Transitive tags persist during role chaining. ExternalId: A unique identifier that is used by third parties to assume a role in their customers' accounts. SerialNumber: The identification number of the MFA device that is associated with the user who is making the AssumeRole call. TokenCode: The value provided by the MFA device, if MFA authentication is required. SourceIdentity: The source identity specified by the principal that is calling the AssumeRole operation. ProvidedContexts: A list of context information. Each context should be a dict with 'ProviderArn' and 'ContextAssertion' keys. """ # noqa E501 RoleSessionName: Optional[str] = Field( default=None, description=( "An identifier for the assumed role session. " "If not provided, a default will be generated." ), title="Role Session Name", ) DurationSeconds: Optional[int] = Field( default=None, description=( "The duration, in seconds, of the role session. " "The value can range from 900 seconds (15 minutes) to 43,200 seconds (12 hours)." ), title="Duration Seconds", ) Policy: Optional[str] = Field( default=None, description="An IAM policy in JSON format that you want to use as an inline session policy.", title="Policy", ) PolicyArns: Optional[List[Dict[str, str]]] = Field( default=None, description=( "The ARNs of the IAM managed policies to use as managed session policies. " "Each item should be a dict with an 'arn' key." ), title="Policy ARNs", ) Tags: Optional[List[Dict[str, str]]] = Field( default=None, description=( "A list of session tags. Each tag should be a dict with 'Key' and 'Value' keys." ), title="Tags", ) TransitiveTagKeys: Optional[List[str]] = Field( default=None, description=( "A list of keys for session tags that you want to set as transitive. " "Transitive tags persist during role chaining." ), title="Transitive Tag Keys", ) ExternalId: Optional[str] = Field( default=None, description=( "A unique identifier that is used by third parties to assume a role " "in their customers' accounts." ), title="External ID", ) SerialNumber: Optional[str] = Field( default=None, description=( "The identification number of the MFA device that is associated " "with the user who is making the AssumeRole call." ), title="Serial Number", ) TokenCode: Optional[str] = Field( default=None, description="The value provided by the MFA device, if MFA authentication is required.", title="Token Code", ) SourceIdentity: Optional[str] = Field( default=None, description=( "The source identity specified by the principal that is calling " "the AssumeRole operation." ), title="Source Identity", ) ProvidedContexts: Optional[List[Dict[str, str]]] = Field( default=None, description=( "A list of context information. Each context should be a dict " "with 'ProviderArn' and 'ContextAssertion' keys." ), title="Provided Contexts", ) def __hash__(self): """Compute hash of the assume role parameters.""" return hash( ( self.RoleSessionName, self.DurationSeconds, self.Policy, hash_collection(self.PolicyArns), hash_collection(self.Tags), hash_collection(self.TransitiveTagKeys), self.ExternalId, self.SerialNumber, self.TokenCode, self.SourceIdentity, hash_collection(self.ProvidedContexts), ) ) def get_params_override(self) -> Dict[str, Any]: """ Return the dictionary of the parameters to override. The parameters to override are the ones which are not None. """ params = self.model_dump() params_override = {} for key, value in params.items(): if value is not None: params_override[key] = value return params_override
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/integrations/prefect-aws/prefect_aws/assume_role_parameters.py", "license": "Apache License 2.0", "lines": 142, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:src/integrations/prefect-aws/tests/test_assume_role_parameters.py
from typing import Any, Dict import pytest from prefect_aws.assume_role_parameters import AssumeRoleParameters class TestAssumeRoleParameters: @pytest.mark.parametrize( "params,result", [ (AssumeRoleParameters(), {}), ( AssumeRoleParameters( RoleSessionName="my-session", DurationSeconds=3600, ), { "RoleSessionName": "my-session", "DurationSeconds": 3600, }, ), ( AssumeRoleParameters( RoleSessionName="test-session", ExternalId="unique-external-id", ), { "RoleSessionName": "test-session", "ExternalId": "unique-external-id", }, ), ( AssumeRoleParameters( DurationSeconds=7200, SourceIdentity="test-identity", ), { "DurationSeconds": 7200, "SourceIdentity": "test-identity", }, ), ], ) def test_get_params_override_expected_output( self, params: AssumeRoleParameters, result: Dict[str, Any] ): assert result == params.get_params_override() @pytest.mark.parametrize( "params,result", [ ( AssumeRoleParameters( Tags=[ {"Key": "Project", "Value": "MyProject"}, {"Key": "Environment", "Value": "Production"}, ], ), { "Tags": [ {"Key": "Project", "Value": "MyProject"}, {"Key": "Environment", "Value": "Production"}, ], }, ), ( AssumeRoleParameters( PolicyArns=[ {"arn": "arn:aws:iam::aws:policy/ReadOnlyAccess"}, {"arn": "arn:aws:iam::aws:policy/PowerUserAccess"}, ], ), { "PolicyArns": [ {"arn": "arn:aws:iam::aws:policy/ReadOnlyAccess"}, {"arn": "arn:aws:iam::aws:policy/PowerUserAccess"}, ], }, ), ( AssumeRoleParameters( TransitiveTagKeys=["Project", "Environment"], ), { "TransitiveTagKeys": ["Project", "Environment"], }, ), ( AssumeRoleParameters( ProvidedContexts=[ { "ProviderArn": "arn:aws:iam::123456789012:oidc-provider/example", "ContextAssertion": "example-assertion", } ], ), { "ProvidedContexts": [ { "ProviderArn": "arn:aws:iam::123456789012:oidc-provider/example", "ContextAssertion": "example-assertion", } ], }, ), ], ) def test_get_params_override_with_list_parameters( self, params: AssumeRoleParameters, result: Dict[str, Any] ): override_params = params.get_params_override() assert override_params == result def test_get_params_override_with_policy(self): policy = '{"Version": "2012-10-17", "Statement": [{"Effect": "Allow", "Action": "s3:GetObject", "Resource": "*"}]}' params = AssumeRoleParameters(Policy=policy) override_params = params.get_params_override() assert override_params["Policy"] == policy def test_get_params_override_with_mfa(self): params = AssumeRoleParameters( SerialNumber="arn:aws:iam::123456789012:mfa/user", TokenCode="123456", ) override_params = params.get_params_override() assert override_params["SerialNumber"] == "arn:aws:iam::123456789012:mfa/user" assert override_params["TokenCode"] == "123456" def test_get_params_override_with_all_parameters(self): params = AssumeRoleParameters( RoleSessionName="comprehensive-session", DurationSeconds=3600, Policy='{"Version": "2012-10-17", "Statement": []}', PolicyArns=[{"arn": "arn:aws:iam::aws:policy/ReadOnlyAccess"}], Tags=[{"Key": "Project", "Value": "Test"}], TransitiveTagKeys=["Project"], ExternalId="external-123", SerialNumber="arn:aws:iam::123456789012:mfa/user", TokenCode="123456", SourceIdentity="source-identity", ProvidedContexts=[ { "ProviderArn": "arn:aws:iam::123456789012:oidc-provider/example", "ContextAssertion": "assertion", } ], ) override_params = params.get_params_override() assert len(override_params) == 11 assert override_params["RoleSessionName"] == "comprehensive-session" assert override_params["DurationSeconds"] == 3600 assert override_params["Policy"] == '{"Version": "2012-10-17", "Statement": []}' assert len(override_params["PolicyArns"]) == 1 assert len(override_params["Tags"]) == 1 assert len(override_params["TransitiveTagKeys"]) == 1 assert override_params["ExternalId"] == "external-123" assert override_params["SerialNumber"] == "arn:aws:iam::123456789012:mfa/user" assert override_params["TokenCode"] == "123456" assert override_params["SourceIdentity"] == "source-identity" assert len(override_params["ProvidedContexts"]) == 1 def test_get_params_override_with_default_values(self): params = AssumeRoleParameters() override_params = params.get_params_override() assert override_params == {}, ( "get_params_override should return empty dict when all values are None" ) def test_get_params_override_excludes_none_values(self): params = AssumeRoleParameters( RoleSessionName="test-session", DurationSeconds=None, ExternalId=None, ) override_params = params.get_params_override() assert "RoleSessionName" in override_params assert "DurationSeconds" not in override_params assert "ExternalId" not in override_params assert override_params["RoleSessionName"] == "test-session" def test_hash_with_nested_structures(self): params1 = AssumeRoleParameters( Tags=[ {"Key": "Project", "Value": "MyProject"}, {"Key": "Environment", "Value": "Production"}, ], PolicyArns=[{"arn": "arn:aws:iam::aws:policy/ReadOnlyAccess"}], ) params2 = AssumeRoleParameters( Tags=[ {"Key": "Project", "Value": "MyProject"}, {"Key": "Environment", "Value": "Production"}, ], PolicyArns=[{"arn": "arn:aws:iam::aws:policy/ReadOnlyAccess"}], ) params3 = AssumeRoleParameters( Tags=[ {"Key": "Project", "Value": "MyProject"}, {"Key": "Environment", "Value": "Development"}, ], PolicyArns=[{"arn": "arn:aws:iam::aws:policy/ReadOnlyAccess"}], ) # Same parameters should have same hash assert hash(params1) == hash(params2) # Different parameters should have different hash assert hash(params1) != hash(params3) def test_hash_with_different_parameters(self): params1 = AssumeRoleParameters(RoleSessionName="session1") params2 = AssumeRoleParameters(RoleSessionName="session2") params3 = AssumeRoleParameters(DurationSeconds=3600) params4 = AssumeRoleParameters(DurationSeconds=7200) # Different RoleSessionName should have different hash assert hash(params1) != hash(params2) # Different DurationSeconds should have different hash assert hash(params3) != hash(params4) # Different field types should have different hash assert hash(params1) != hash(params3) def test_hash_with_empty_parameters(self): params1 = AssumeRoleParameters() params2 = AssumeRoleParameters() # Empty parameters should have same hash assert hash(params1) == hash(params2) def test_hash_with_none_and_empty_lists(self): params1 = AssumeRoleParameters(Tags=None, TransitiveTagKeys=None) params2 = AssumeRoleParameters(Tags=None, TransitiveTagKeys=None) params3 = AssumeRoleParameters(Tags=[], TransitiveTagKeys=[]) # None values should have same hash assert hash(params1) == hash(params2) # Empty list should have different hash than None assert hash(params1) != hash(params3)
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/integrations/prefect-aws/tests/test_assume_role_parameters.py", "license": "Apache License 2.0", "lines": 220, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/server/orchestration/test_validate_deployment_concurrency_at_running.py
""" Tests for the ValidateDeploymentConcurrencyAtRunning orchestration rule. """ import contextlib import datetime from uuid import UUID, uuid4 from sqlalchemy.ext.asyncio import AsyncSession import prefect.server.models as models import prefect.server.schemas as schemas from prefect.server.concurrency.lease_storage import ( ConcurrencyLeaseHolder, ConcurrencyLimitLeaseMetadata, get_concurrency_lease_storage, ) from prefect.server.database import orm_models from prefect.server.orchestration.core_policy import ( CoreFlowPolicy, ValidateDeploymentConcurrencyAtRunning, ) from prefect.server.schemas import states from prefect.server.schemas.core import ConcurrencyLimitStrategy from prefect.server.schemas.responses import SetStateStatus class TestValidateDeploymentConcurrencyAtRunning: """Tests for ValidateDeploymentConcurrencyAtRunning orchestration rule.""" async def create_deployment_with_concurrency_limit( self, session: AsyncSession, limit: int, flow: orm_models.Flow, grace_period: int = 600, collision_strategy: ConcurrencyLimitStrategy = ConcurrencyLimitStrategy.CANCEL_NEW, ) -> orm_models.Deployment: """Helper to create a deployment with a concurrency limit.""" deployment_schema = schemas.core.Deployment( name=f"test-deployment-{uuid4()}", flow_id=flow.id, concurrency_limit=limit, concurrency_options={ "collision_strategy": collision_strategy.value, "grace_period_seconds": grace_period, }, ) deployment = await models.deployments.create_deployment( session=session, deployment=deployment_schema, ) await session.flush() return deployment async def test_copies_lease_id_when_no_validation_needed( self, session: AsyncSession, initialize_orchestration, flow: orm_models.Flow, ): """Test that lease ID is copied when there's no lease to validate.""" deployment = await self.create_deployment_with_concurrency_limit( session, 1, flow ) running_transition = (states.StateType.PENDING, states.StateType.RUNNING) # Create a flow run context without a lease ctx = await initialize_orchestration( session, "flow", *running_transition, deployment_id=deployment.id, initial_details={}, # No lease ID ) async with contextlib.AsyncExitStack() as stack: ctx = await stack.enter_async_context( ValidateDeploymentConcurrencyAtRunning(ctx, *running_transition) ) await ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.ACCEPT assert ctx.proposed_state is not None assert ctx.proposed_state.state_details.deployment_concurrency_lease_id is None async def test_renews_lease_when_valid( self, session: AsyncSession, initialize_orchestration, flow: orm_models.Flow, ): """Test that a valid lease is renewed successfully.""" deployment = await self.create_deployment_with_concurrency_limit( session, 1, flow, grace_period=60 ) running_transition = (states.StateType.PENDING, states.StateType.RUNNING) # Create a lease assert deployment.concurrency_limit_id is not None lease_storage = get_concurrency_lease_storage() lease = await lease_storage.create_lease( resource_ids=[deployment.concurrency_limit_id], ttl=datetime.timedelta(seconds=60), metadata=ConcurrencyLimitLeaseMetadata( slots=1, holder=ConcurrencyLeaseHolder(type="flow_run", id=str(uuid4())), ), ) ctx = await initialize_orchestration( session, "flow", *running_transition, deployment_id=deployment.id, initial_details={"deployment_concurrency_lease_id": str(lease.id)}, client_version="3.5.0", ) async with contextlib.AsyncExitStack() as stack: ctx = await stack.enter_async_context( ValidateDeploymentConcurrencyAtRunning(ctx, *running_transition) ) await ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.ACCEPT assert ctx.proposed_state is not None assert ( ctx.proposed_state.state_details.deployment_concurrency_lease_id == lease.id ) async def test_reacquires_slot_after_lease_expiry( self, session: AsyncSession, initialize_orchestration, flow: orm_models.Flow, ): """Test that a slot is re-acquired when lease has expired.""" deployment = await self.create_deployment_with_concurrency_limit( session, 1, flow ) running_transition = (states.StateType.PENDING, states.StateType.RUNNING) # Create a lease and immediately expire it by revoking assert deployment.concurrency_limit_id is not None lease_storage = get_concurrency_lease_storage() lease = await lease_storage.create_lease( resource_ids=[deployment.concurrency_limit_id], ttl=datetime.timedelta(seconds=60), metadata=ConcurrencyLimitLeaseMetadata( slots=1, holder=ConcurrencyLeaseHolder(type="flow_run", id=str(uuid4())), ), ) # Revoke the lease to simulate expiry await lease_storage.revoke_lease(lease.id) # Decrement the active slots to simulate the lease reaper cleanup await models.concurrency_limits_v2.bulk_decrement_active_slots( session=session, concurrency_limit_ids=[deployment.concurrency_limit_id], slots=1, ) ctx = await initialize_orchestration( session, "flow", *running_transition, deployment_id=deployment.id, initial_details={"deployment_concurrency_lease_id": str(lease.id)}, client_version="3.5.0", ) async with contextlib.AsyncExitStack() as stack: ctx = await stack.enter_async_context( ValidateDeploymentConcurrencyAtRunning(ctx, *running_transition) ) await ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.ACCEPT assert ctx.proposed_state is not None # Should have a new lease ID after re-acquisition new_lease_id = ctx.proposed_state.state_details.deployment_concurrency_lease_id assert new_lease_id is not None assert new_lease_id != lease.id async def test_cancels_when_no_slots_available( self, session: AsyncSession, initialize_orchestration, flow: orm_models.Flow, ): """Test that flow is cancelled when no slots available after lease expiry.""" deployment = await self.create_deployment_with_concurrency_limit( session, 1, flow ) running_transition = (states.StateType.PENDING, states.StateType.RUNNING) # Create first lease and let flow1 acquire a slot assert deployment.concurrency_limit_id is not None # Flow1 initially had a slot await models.concurrency_limits_v2.bulk_increment_active_slots( session=session, concurrency_limit_ids=[deployment.concurrency_limit_id], slots=1, ) lease_storage = get_concurrency_lease_storage() lease1 = await lease_storage.create_lease( resource_ids=[deployment.concurrency_limit_id], ttl=datetime.timedelta(seconds=60), metadata=ConcurrencyLimitLeaseMetadata( slots=1, holder=ConcurrencyLeaseHolder(type="flow_run", id=str(uuid4())), ), ) # Simulate lease expiry by revoking it (but slot remains occupied by another flow) await lease_storage.revoke_lease(lease1.id) # Keep the slot occupied to simulate another flow taking it ctx = await initialize_orchestration( session, "flow", *running_transition, deployment_id=deployment.id, initial_details={"deployment_concurrency_lease_id": str(lease1.id)}, client_version="3.5.0", ) async with contextlib.AsyncExitStack() as stack: ctx = await stack.enter_async_context( ValidateDeploymentConcurrencyAtRunning(ctx, *running_transition) ) await ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.REJECT assert ctx.validated_state is not None assert ctx.validated_state.type == states.StateType.CANCELLED assert ctx.validated_state.message is not None assert "concurrency slot lost" in ctx.validated_state.message.lower() async def test_regression_concurrent_execution_prevention( self, session: AsyncSession, initialize_orchestration, flow: orm_models.Flow, ): """ Regression test for the core bug: Ensure no concurrency violation occurs when a lease expires and another flow takes the slot. Scenario: 1. Flow1 goes PENDING with lease 2. Lease expires (simulated by revocation) 3. Flow2 acquires the freed slot 4. Flow1 tries to go RUNNING 5. Flow1 should be cancelled (not allowed to run) """ deployment = await self.create_deployment_with_concurrency_limit( session, 1, flow ) running_transition = (states.StateType.PENDING, states.StateType.RUNNING) # Flow1 gets a lease and slot assert deployment.concurrency_limit_id is not None # First increment slots for flow1 await models.concurrency_limits_v2.bulk_increment_active_slots( session=session, concurrency_limit_ids=[deployment.concurrency_limit_id], slots=1, ) lease_storage = get_concurrency_lease_storage() flow1_lease = await lease_storage.create_lease( resource_ids=[deployment.concurrency_limit_id], ttl=datetime.timedelta(seconds=60), metadata=ConcurrencyLimitLeaseMetadata( slots=1, holder=ConcurrencyLeaseHolder(type="flow_run", id=str(uuid4())), ), ) # Simulate lease expiry by revoking (reaper would do this) # This also decrements the active slots await lease_storage.revoke_lease(flow1_lease.id) await models.concurrency_limits_v2.bulk_decrement_active_slots( session=session, concurrency_limit_ids=[deployment.concurrency_limit_id], slots=1, ) # Flow2 now acquires the freed slot flow2_acquired = await models.concurrency_limits_v2.bulk_increment_active_slots( session=session, concurrency_limit_ids=[deployment.concurrency_limit_id], slots=1, ) assert flow2_acquired, "Flow2 should be able to acquire the slot" # Flow2 gets its own lease await lease_storage.create_lease( resource_ids=[deployment.concurrency_limit_id], ttl=datetime.timedelta(seconds=60), metadata=ConcurrencyLimitLeaseMetadata( slots=1, holder=ConcurrencyLeaseHolder(type="flow_run", id=str(uuid4())), ), ) # Now Flow1 tries to transition to RUNNING ctx = await initialize_orchestration( session, "flow", *running_transition, deployment_id=deployment.id, initial_details={"deployment_concurrency_lease_id": str(flow1_lease.id)}, client_version="3.5.0", ) async with contextlib.AsyncExitStack() as stack: ctx = await stack.enter_async_context( ValidateDeploymentConcurrencyAtRunning(ctx, *running_transition) ) await ctx.validate_proposed_state() # Flow1 should be cancelled assert ctx.response_status == SetStateStatus.REJECT assert ctx.validated_state is not None assert ctx.validated_state.type == states.StateType.CANCELLED assert ctx.validated_state.message is not None assert "concurrency slot lost" in ctx.validated_state.message.lower() # Verify that only 1 slot is active (Flow2's slot) limit = await models.concurrency_limits_v2.read_concurrency_limit( session=session, concurrency_limit_id=deployment.concurrency_limit_id, ) assert limit is not None assert limit.active_slots == 1, "Only Flow2 should have an active slot" async def test_skips_validation_for_old_client_versions( self, session: AsyncSession, initialize_orchestration, flow: orm_models.Flow, ): """Test that validation is skipped for client versions < 3.4.11.""" deployment = await self.create_deployment_with_concurrency_limit( session, 1, flow ) running_transition = (states.StateType.PENDING, states.StateType.RUNNING) # Create and expire a lease assert deployment.concurrency_limit_id is not None lease_storage = get_concurrency_lease_storage() lease = await lease_storage.create_lease( resource_ids=[deployment.concurrency_limit_id], ttl=datetime.timedelta(seconds=60), metadata=ConcurrencyLimitLeaseMetadata( slots=1, holder=ConcurrencyLeaseHolder(type="flow_run", id=str(uuid4())), ), ) # Expire the lease by revoking it await lease_storage.revoke_lease(lease.id) # Old client version should skip validation and succeed ctx = await initialize_orchestration( session, "flow", *running_transition, deployment_id=deployment.id, initial_details={"deployment_concurrency_lease_id": str(lease.id)}, client_version="3.4.10", # Old version ) async with contextlib.AsyncExitStack() as stack: for rule in CoreFlowPolicy.compile_transition_rules( ctx.initial_state_type, ctx.proposed_state_type ): await stack.enter_async_context(rule(ctx, *running_transition)) await ctx.validate_proposed_state() # Should succeed despite expired lease because validation was skipped assert ctx.response_status == SetStateStatus.ACCEPT # The lease ID should be copied during the transition since validation was skipped assert ctx.proposed_state is not None assert ( ctx.proposed_state.state_details.deployment_concurrency_lease_id == lease.id ) # Note: RemoveDeploymentConcurrencyLeaseForOldClientVersions will remove the lease # in its after_transition hook, but that's not reflected in proposed_state async def test_default_client_version_to_2_0_0( self, session: AsyncSession, initialize_orchestration, flow: orm_models.Flow, ): """Test that missing client version defaults to 2.0.0 and skips validation.""" deployment = await self.create_deployment_with_concurrency_limit( session, 1, flow ) running_transition = (states.StateType.PENDING, states.StateType.RUNNING) # Create and expire a lease assert deployment.concurrency_limit_id is not None lease_storage = get_concurrency_lease_storage() lease = await lease_storage.create_lease( resource_ids=[deployment.concurrency_limit_id], ttl=datetime.timedelta(seconds=60), metadata=ConcurrencyLimitLeaseMetadata( slots=1, holder=ConcurrencyLeaseHolder(type="flow_run", id=str(uuid4())), ), ) # Expire the lease by revoking it await lease_storage.revoke_lease(lease.id) # No client version provided - should default to 2.0.0 ctx = await initialize_orchestration( session, "flow", *running_transition, deployment_id=deployment.id, initial_details={"deployment_concurrency_lease_id": str(lease.id)}, # No client_version in parameters ) async with contextlib.AsyncExitStack() as stack: ctx = await stack.enter_async_context( ValidateDeploymentConcurrencyAtRunning(ctx, *running_transition) ) await ctx.validate_proposed_state() # Should succeed and just copy the lease ID (no validation for old version) assert ctx.response_status == SetStateStatus.ACCEPT assert ctx.proposed_state is not None # Lease ID should be copied even though it's expired (no validation occurred) assert ( ctx.proposed_state.state_details.deployment_concurrency_lease_id == lease.id ) async def test_concurrent_reacquisition_only_one_succeeds( self, session: AsyncSession, initialize_orchestration, flow: orm_models.Flow, ): """Test that when multiple flows try to re-acquire after expiry, only one succeeds.""" deployment = await self.create_deployment_with_concurrency_limit( session, 1, flow ) running_transition = (states.StateType.PENDING, states.StateType.RUNNING) # Create two flows with leases assert deployment.concurrency_limit_id is not None lease_storage = get_concurrency_lease_storage() # Create leases for both flows lease1 = await lease_storage.create_lease( resource_ids=[deployment.concurrency_limit_id], ttl=datetime.timedelta(seconds=60), metadata=ConcurrencyLimitLeaseMetadata( slots=1, holder=ConcurrencyLeaseHolder(type="flow_run", id=str(uuid4())), ), ) lease2 = await lease_storage.create_lease( resource_ids=[deployment.concurrency_limit_id], ttl=datetime.timedelta(seconds=60), metadata=ConcurrencyLimitLeaseMetadata( slots=1, holder=ConcurrencyLeaseHolder(type="flow_run", id=str(uuid4())), ), ) # Expire both leases await lease_storage.revoke_lease(lease1.id) await lease_storage.revoke_lease(lease2.id) # Set up two orchestration contexts ctx1 = await initialize_orchestration( session, "flow", *running_transition, deployment_id=deployment.id, initial_details={"deployment_concurrency_lease_id": str(lease1.id)}, client_version="3.5.0", ) ctx2 = await initialize_orchestration( session, "flow", *running_transition, deployment_id=deployment.id, initial_details={"deployment_concurrency_lease_id": str(lease2.id)}, client_version="3.5.0", ) # Try to transition both to RUNNING # First one should succeed in re-acquiring async with contextlib.AsyncExitStack() as stack: ctx1 = await stack.enter_async_context( ValidateDeploymentConcurrencyAtRunning(ctx1, *running_transition) ) await ctx1.validate_proposed_state() assert ctx1.response_status == SetStateStatus.ACCEPT assert ctx1.proposed_state is not None new_lease_id1 = ( ctx1.proposed_state.state_details.deployment_concurrency_lease_id ) assert new_lease_id1 is not None assert new_lease_id1 != lease1.id # Got a new lease # Second one should be cancelled (no slots available) async with contextlib.AsyncExitStack() as stack: ctx2 = await stack.enter_async_context( ValidateDeploymentConcurrencyAtRunning(ctx2, *running_transition) ) await ctx2.validate_proposed_state() assert ctx2.response_status == SetStateStatus.REJECT assert ctx2.validated_state is not None assert ctx2.validated_state.type == states.StateType.CANCELLED assert ctx2.validated_state.message is not None assert "concurrency slot lost" in ctx2.validated_state.message.lower() async def test_handles_no_lease_id( self, session: AsyncSession, initialize_orchestration, flow: orm_models.Flow, ): """Test that rule handles case when there's no lease ID.""" deployment = await self.create_deployment_with_concurrency_limit( session, 1, flow ) running_transition = (states.StateType.PENDING, states.StateType.RUNNING) ctx = await initialize_orchestration( session, "flow", *running_transition, deployment_id=deployment.id, initial_details={}, # No lease ID client_version="3.5.0", ) async with contextlib.AsyncExitStack() as stack: ctx = await stack.enter_async_context( ValidateDeploymentConcurrencyAtRunning(ctx, *running_transition) ) await ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.ACCEPT assert ctx.proposed_state is not None assert ctx.proposed_state.state_details.deployment_concurrency_lease_id is None async def test_handles_no_deployment_concurrency_limit( self, session: AsyncSession, initialize_orchestration, flow: orm_models.Flow, ): """Test that rule handles deployments without concurrency limits.""" # Create deployment without concurrency limit deployment_schema = schemas.core.Deployment( name=f"test-deployment-{uuid4()}", flow_id=flow.id, ) deployment = await models.deployments.create_deployment( session=session, deployment=deployment_schema, ) await session.flush() running_transition = (states.StateType.PENDING, states.StateType.RUNNING) # Create a dummy lease ID (shouldn't be validated since no limit exists) fake_lease_id = str(uuid4()) ctx = await initialize_orchestration( session, "flow", *running_transition, deployment_id=deployment.id, initial_details={"deployment_concurrency_lease_id": fake_lease_id}, client_version="3.5.0", ) async with contextlib.AsyncExitStack() as stack: ctx = await stack.enter_async_context( ValidateDeploymentConcurrencyAtRunning(ctx, *running_transition) ) await ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.ACCEPT assert ctx.proposed_state is not None # Lease ID should be copied even though deployment has no limit assert ctx.proposed_state.state_details.deployment_concurrency_lease_id == UUID( fake_lease_id )
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/server/orchestration/test_validate_deployment_concurrency_at_running.py", "license": "Apache License 2.0", "lines": 536, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/integrations/prefect-aws/prefect_aws/plugins.py
from __future__ import annotations import ssl from typing import Any, Mapping import boto3 import sqlalchemy as sa from prefect._experimental.plugins import register_hook from prefect_aws.settings import AwsSettings @register_hook def set_database_connection_params( connection_url: str, settings: Any ) -> Mapping[str, Any]: iam_settings = AwsSettings().rds.iam if not iam_settings.enabled: return {} url = sa.engine.make_url(connection_url) connect_args = {} def get_iam_token() -> str: session = boto3.Session() region = iam_settings.region_name or session.region_name client = session.client("rds", region_name=region) token = client.generate_db_auth_token( DBHostname=url.host, Port=url.port or 5432, DBUsername=url.username, Region=region, ) return token # IAM authentication requires SSL # Use create_default_context() for secure defaults (cert verification enabled) ctx = ssl.create_default_context() connect_args["ssl"] = ctx connect_args["password"] = get_iam_token return connect_args
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/integrations/prefect-aws/prefect_aws/plugins.py", "license": "Apache License 2.0", "lines": 33, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
PrefectHQ/prefect:src/integrations/prefect-aws/tests/test_plugins.py
import ssl from unittest.mock import MagicMock import pytest from prefect_aws.plugins import set_database_connection_params @pytest.fixture def mock_boto3_session(monkeypatch): mock_session = MagicMock() monkeypatch.setattr("boto3.Session", MagicMock(return_value=mock_session)) return mock_session @pytest.fixture def mock_iam_disabled(monkeypatch): """Mock AwsSettings with IAM disabled.""" mock_settings = MagicMock() mock_settings.rds.iam.enabled = False monkeypatch.setattr("prefect_aws.plugins.AwsSettings", lambda: mock_settings) return mock_settings @pytest.fixture def mock_iam_enabled(monkeypatch): """Mock AwsSettings with IAM enabled.""" mock_settings = MagicMock() mock_settings.rds.iam.enabled = True mock_settings.rds.iam.region_name = "us-west-2" monkeypatch.setattr("prefect_aws.plugins.AwsSettings", lambda: mock_settings) return mock_settings def test_set_database_connection_params_disabled(mock_iam_disabled): params = set_database_connection_params( connection_url="postgresql+asyncpg://user:pass@localhost/db", settings=None, # Not used anymore ) assert params == {} def test_set_database_connection_params_enabled(mock_boto3_session, mock_iam_enabled): params = set_database_connection_params( connection_url="postgresql+asyncpg://user:pass@localhost:5432/db", settings=None, # Not used anymore ) assert "ssl" in params assert "password" in params assert callable(params["password"]) # Verify SSL context has secure defaults ssl_ctx = params["ssl"] assert ssl_ctx.check_hostname is True assert ssl_ctx.verify_mode == ssl.CERT_REQUIRED def test_set_database_connection_params_token_generation( mock_boto3_session, monkeypatch ): mock_client = MagicMock() mock_boto3_session.client.return_value = mock_client mock_client.generate_db_auth_token.return_value = "fake-token" mock_settings = MagicMock() mock_settings.rds.iam.enabled = True mock_settings.rds.iam.region_name = "us-east-1" monkeypatch.setattr("prefect_aws.plugins.AwsSettings", lambda: mock_settings) params = set_database_connection_params( connection_url="postgresql+asyncpg://myuser:pass@myhost:5432/mydb", settings=None, ) token = params["password"]() assert token == "fake-token" mock_boto3_session.client.assert_called_with("rds", region_name="us-east-1") mock_client.generate_db_auth_token.assert_called_with( DBHostname="myhost", Port=5432, DBUsername="myuser", Region="us-east-1", ) def test_set_database_connection_params_defaults_region( mock_boto3_session, monkeypatch ): mock_boto3_session.region_name = "eu-central-1" mock_settings = MagicMock() mock_settings.rds.iam.enabled = True mock_settings.rds.iam.region_name = None monkeypatch.setattr("prefect_aws.plugins.AwsSettings", lambda: mock_settings) params = set_database_connection_params( connection_url="postgresql+asyncpg://user:pass@host/db", settings=None, ) params["password"]() # Should use session region mock_boto3_session.client.assert_called_with("rds", region_name="eu-central-1")
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/integrations/prefect-aws/tests/test_plugins.py", "license": "Apache License 2.0", "lines": 80, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/prefect/server/services/perpetual_services.py
""" Perpetual services are background services that run on a periodic schedule using docket. This module provides the registry and scheduling logic for perpetual services, using docket's Perpetual dependency for distributed, HA-aware task scheduling. """ from __future__ import annotations import logging from dataclasses import dataclass from typing import Callable, TypeVar from docket import Docket, Perpetual from docket.dependencies import get_single_dependency_parameter_of_type from docket.execution import TaskFunction from prefect.logging import get_logger logger: logging.Logger = get_logger(__name__) EnabledGetter = Callable[[], bool] """A callable that returns whether a service is enabled.""" F = TypeVar("F", bound=TaskFunction) @dataclass class PerpetualServiceConfig: """Configuration for a perpetual service function.""" function: TaskFunction enabled_getter: EnabledGetter run_in_ephemeral: bool = False run_in_webserver: bool = False # Registry of all perpetual service functions _PERPETUAL_SERVICES: list[PerpetualServiceConfig] = [] def perpetual_service( enabled_getter: EnabledGetter, run_in_ephemeral: bool = False, run_in_webserver: bool = False, ) -> Callable[[F], F]: """ Decorator to register a perpetual service function. Args: enabled_getter: A callable that returns whether the service is enabled. run_in_ephemeral: If True, this service runs in ephemeral server mode. run_in_webserver: If True, this service runs in webserver-only mode. Example: @perpetual_service( enabled_getter=lambda: get_current_settings().server.services.scheduler.enabled, ) async def schedule_deployments(...) -> None: ... """ def decorator(func: F) -> F: _PERPETUAL_SERVICES.append( PerpetualServiceConfig( function=func, enabled_getter=enabled_getter, run_in_ephemeral=run_in_ephemeral, run_in_webserver=run_in_webserver, ) ) return func return decorator def get_perpetual_services( ephemeral: bool = False, webserver_only: bool = False, ) -> list[PerpetualServiceConfig]: """ Get perpetual services that should run in the current mode. Args: ephemeral: If True, only return services marked with run_in_ephemeral. webserver_only: If True, only return services marked with run_in_webserver. Returns: List of perpetual service configurations to run. """ services = [] for config in _PERPETUAL_SERVICES: if webserver_only: if not config.run_in_webserver: continue elif ephemeral: if not config.run_in_ephemeral: continue services.append(config) return services def get_enabled_perpetual_services( ephemeral: bool = False, webserver_only: bool = False, ) -> list[PerpetualServiceConfig]: """ Get perpetual services that are enabled and should run in the current mode. Args: ephemeral: If True, only return services marked with run_in_ephemeral. webserver_only: If True, only return services marked with run_in_webserver. Returns: List of enabled perpetual service configurations. """ services = [] for config in get_perpetual_services(ephemeral, webserver_only): if config.enabled_getter(): services.append(config) else: logger.debug( f"Skipping disabled perpetual service: {config.function.__name__}" ) return services async def register_and_schedule_perpetual_services( docket: Docket, ephemeral: bool = False, webserver_only: bool = False, ) -> None: """ Register enabled perpetual service functions with docket and schedule them. Disabled services are not registered at all, so they never run. Args: docket: The docket instance to register functions with. ephemeral: If True, only register services for ephemeral mode. webserver_only: If True, only register services for webserver mode. """ all_services = get_perpetual_services(ephemeral, webserver_only) enabled_services = get_enabled_perpetual_services(ephemeral, webserver_only) for config in enabled_services: docket.register(config.function) logger.debug(f"Registered perpetual service: {config.function.__name__}") for config in enabled_services: perpetual = get_single_dependency_parameter_of_type(config.function, Perpetual) if perpetual is None: logger.warning( f"Perpetual service {config.function.__name__} has no Perpetual " "dependency - skipping scheduling" ) continue logger.info(f"Scheduling perpetual service: {config.function.__name__}") await docket.add(config.function, key=config.function.__name__)() total = len(all_services) enabled = len(enabled_services) disabled = total - enabled logger.info( f"Perpetual services: {enabled} enabled, {disabled} disabled, {total} total" )
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/server/services/perpetual_services.py", "license": "Apache License 2.0", "lines": 132, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:tests/server/services/test_perpetual_services.py
"""Tests for the perpetual services registry and scheduling.""" from prefect.server.services.perpetual_services import ( _PERPETUAL_SERVICES, get_enabled_perpetual_services, get_perpetual_services, ) def test_db_vacuum_service_registered(): """Test that db vacuum perpetual service is registered.""" service_names = [config.function.__name__ for config in _PERPETUAL_SERVICES] assert "schedule_vacuum_tasks" in service_names def test_db_vacuum_disabled_by_default(): """Test that flow_runs vacuum is disabled by default (not in the enabled set).""" config = next( c for c in _PERPETUAL_SERVICES if c.function.__name__ == "schedule_vacuum_tasks" ) assert config.enabled_getter() is False def test_event_vacuum_service_registered(): """Test that event vacuum perpetual service is registered.""" service_names = [config.function.__name__ for config in _PERPETUAL_SERVICES] assert "schedule_event_vacuum_tasks" in service_names def test_event_vacuum_enabled_by_default(monkeypatch): """Test that event vacuum is enabled by default when event persister is also enabled.""" from prefect.settings.context import get_current_settings settings = get_current_settings() # The test suite disables event_persister globally; restore the production # default so we can verify that event vacuum is enabled when both settings # are at their defaults. monkeypatch.setattr(settings.server.services.event_persister, "enabled", True) config = next( c for c in _PERPETUAL_SERVICES if c.function.__name__ == "schedule_event_vacuum_tasks" ) assert config.enabled_getter() is True def test_event_vacuum_disabled_when_not_in_enabled_set(monkeypatch): """Test that event vacuum is disabled when 'events' is not in the enabled set.""" from prefect.settings.context import get_current_settings settings = get_current_settings() monkeypatch.setattr(settings.server.services.event_persister, "enabled", True) monkeypatch.setattr(settings.server.services.db_vacuum, "enabled", set()) config = next( c for c in _PERPETUAL_SERVICES if c.function.__name__ == "schedule_event_vacuum_tasks" ) assert config.enabled_getter() is False def test_event_vacuum_disabled_when_event_persister_disabled(monkeypatch): """Test that event vacuum is disabled when event persister is disabled. Operators who opted out of event processing via PREFECT_SERVER_SERVICES_EVENT_PERSISTER_ENABLED=false should not see unexpected trimming on upgrade. """ from prefect.settings.context import get_current_settings settings = get_current_settings() monkeypatch.setattr(settings.server.services.event_persister, "enabled", False) config = next( c for c in _PERPETUAL_SERVICES if c.function.__name__ == "schedule_event_vacuum_tasks" ) assert config.enabled_getter() is False def test_flow_runs_vacuum_enabled_when_in_enabled_set(monkeypatch): """Test that flow_runs vacuum is enabled when 'flow_runs' is in the enabled set.""" from prefect.settings.context import get_current_settings settings = get_current_settings() monkeypatch.setattr( settings.server.services.db_vacuum, "enabled", {"events", "flow_runs"} ) config = next( c for c in _PERPETUAL_SERVICES if c.function.__name__ == "schedule_vacuum_tasks" ) assert config.enabled_getter() is True def test_event_vacuum_runs_in_ephemeral_mode(): """Test that event vacuum runs in ephemeral mode (replacing EventPersister.trim()).""" config = next( c for c in _PERPETUAL_SERVICES if c.function.__name__ == "schedule_event_vacuum_tasks" ) assert config.run_in_ephemeral is True def test_cancellation_cleanup_services_registered(): """Test that cancellation cleanup perpetual services are registered.""" service_names = [config.function.__name__ for config in _PERPETUAL_SERVICES] assert "monitor_cancelled_flow_runs" in service_names assert "monitor_subflow_runs" in service_names def test_pause_expirations_service_registered(): """Test that pause expirations perpetual service is registered.""" service_names = [config.function.__name__ for config in _PERPETUAL_SERVICES] assert "monitor_expired_pauses" in service_names def test_late_runs_service_registered(): """Test that late runs perpetual service is registered.""" service_names = [config.function.__name__ for config in _PERPETUAL_SERVICES] assert "monitor_late_runs" in service_names def test_repossessor_service_registered(): """Test that repossessor perpetual service is registered.""" service_names = [config.function.__name__ for config in _PERPETUAL_SERVICES] assert "monitor_expired_leases" in service_names def test_foreman_service_registered(): """Test that foreman perpetual service is registered.""" service_names = [config.function.__name__ for config in _PERPETUAL_SERVICES] assert "monitor_worker_health" in service_names def test_telemetry_service_registered(): """Test that telemetry perpetual service is registered.""" service_names = [config.function.__name__ for config in _PERPETUAL_SERVICES] assert "send_telemetry_heartbeat" in service_names def test_telemetry_runs_in_all_modes(): """Test that telemetry is configured to run in ephemeral and webserver modes.""" config = next( c for c in _PERPETUAL_SERVICES if c.function.__name__ == "send_telemetry_heartbeat" ) assert config.run_in_ephemeral is True assert config.run_in_webserver is True def test_scheduler_services_registered(): """Test that scheduler perpetual services are registered.""" service_names = [config.function.__name__ for config in _PERPETUAL_SERVICES] assert "schedule_deployments" in service_names assert "schedule_recent_deployments" in service_names def test_proactive_triggers_service_registered(): """Test that proactive triggers perpetual service is registered.""" service_names = [config.function.__name__ for config in _PERPETUAL_SERVICES] assert "evaluate_proactive_triggers_periodic" in service_names def test_get_perpetual_services_returns_all_in_default_mode(): """Test that get_perpetual_services returns all services in default mode.""" services = get_perpetual_services(ephemeral=False, webserver_only=False) service_names = [config.function.__name__ for config in services] assert "monitor_cancelled_flow_runs" in service_names assert "monitor_subflow_runs" in service_names def test_get_perpetual_services_filters_ephemeral_mode(): """Test that ephemeral mode filters services correctly.""" services = get_perpetual_services(ephemeral=True, webserver_only=False) # Cancellation cleanup services are not marked for ephemeral mode service_names = [config.function.__name__ for config in services] assert "monitor_cancelled_flow_runs" not in service_names assert "monitor_subflow_runs" not in service_names # Proactive triggers and event vacuum ARE marked for ephemeral mode assert "evaluate_proactive_triggers_periodic" in service_names assert "schedule_event_vacuum_tasks" in service_names def test_get_perpetual_services_filters_webserver_mode(): """Test that webserver mode filters services correctly.""" services = get_perpetual_services(ephemeral=False, webserver_only=True) # Cancellation cleanup services are not marked for webserver mode service_names = [config.function.__name__ for config in services] assert "monitor_cancelled_flow_runs" not in service_names assert "monitor_subflow_runs" not in service_names def test_get_enabled_perpetual_services_respects_settings(monkeypatch): """Test that get_enabled_perpetual_services respects the enabled setting.""" from prefect.settings.context import get_current_settings settings = get_current_settings() # Enable cancellation cleanup monkeypatch.setattr(settings.server.services.cancellation_cleanup, "enabled", True) services = get_enabled_perpetual_services(ephemeral=False, webserver_only=False) service_names = [config.function.__name__ for config in services] assert "monitor_cancelled_flow_runs" in service_names assert "monitor_subflow_runs" in service_names # Disable cancellation cleanup monkeypatch.setattr(settings.server.services.cancellation_cleanup, "enabled", False) services = get_enabled_perpetual_services(ephemeral=False, webserver_only=False) service_names = [config.function.__name__ for config in services] assert "monitor_cancelled_flow_runs" not in service_names assert "monitor_subflow_runs" not in service_names
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/server/services/test_perpetual_services.py", "license": "Apache License 2.0", "lines": 162, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/client/schemas/test_concurrency.py
"""Tests for concurrency schema validation.""" import pytest from pydantic import ValidationError from prefect.client.schemas.objects import ConcurrencyLimitConfig, ConcurrencyOptions class TestConcurrencyOptionsValidation: """Tests for ConcurrencyOptions grace_period_seconds validation.""" def test_grace_period_seconds_default(self): """Test that grace_period_seconds defaults to None (falls back to server setting).""" options = ConcurrencyOptions(collision_strategy="ENQUEUE") assert options.grace_period_seconds is None def test_grace_period_seconds_minimum_boundary_valid(self): """Test that grace_period_seconds=60 is valid (minimum boundary).""" options = ConcurrencyOptions( collision_strategy="ENQUEUE", grace_period_seconds=60 ) assert options.grace_period_seconds == 60 def test_grace_period_seconds_maximum_boundary_valid(self): """Test that grace_period_seconds=86400 is valid (maximum boundary).""" options = ConcurrencyOptions( collision_strategy="ENQUEUE", grace_period_seconds=86400 ) assert options.grace_period_seconds == 86400 def test_grace_period_seconds_below_minimum_invalid(self): """Test that grace_period_seconds=59 raises validation error.""" with pytest.raises(ValidationError) as exc_info: ConcurrencyOptions(collision_strategy="ENQUEUE", grace_period_seconds=59) assert "greater than or equal to 60" in str(exc_info.value) def test_grace_period_seconds_above_maximum_invalid(self): """Test that grace_period_seconds=86401 raises validation error.""" with pytest.raises(ValidationError) as exc_info: ConcurrencyOptions(collision_strategy="ENQUEUE", grace_period_seconds=86401) assert "less than or equal to 86400" in str(exc_info.value) def test_grace_period_seconds_mid_range_valid(self): """Test that a mid-range value is valid.""" options = ConcurrencyOptions( collision_strategy="ENQUEUE", grace_period_seconds=3600 ) assert options.grace_period_seconds == 3600 class TestConcurrencyLimitConfigValidation: """Tests for ConcurrencyLimitConfig grace_period_seconds validation.""" def test_grace_period_seconds_default_none(self): """Test that grace_period_seconds defaults to None.""" config = ConcurrencyLimitConfig(limit=1) assert config.grace_period_seconds is None def test_grace_period_seconds_minimum_boundary_valid(self): """Test that grace_period_seconds=60 is valid (minimum boundary).""" config = ConcurrencyLimitConfig(limit=1, grace_period_seconds=60) assert config.grace_period_seconds == 60 def test_grace_period_seconds_maximum_boundary_valid(self): """Test that grace_period_seconds=86400 is valid (maximum boundary).""" config = ConcurrencyLimitConfig(limit=1, grace_period_seconds=86400) assert config.grace_period_seconds == 86400 def test_grace_period_seconds_below_minimum_invalid(self): """Test that grace_period_seconds=59 raises validation error.""" with pytest.raises(ValidationError) as exc_info: ConcurrencyLimitConfig(limit=1, grace_period_seconds=59) assert "greater than or equal to 60" in str(exc_info.value) def test_grace_period_seconds_above_maximum_invalid(self): """Test that grace_period_seconds=86401 raises validation error.""" with pytest.raises(ValidationError) as exc_info: ConcurrencyLimitConfig(limit=1, grace_period_seconds=86401) assert "less than or equal to 86400" in str(exc_info.value) def test_grace_period_seconds_mid_range_valid(self): """Test that a mid-range value is valid.""" config = ConcurrencyLimitConfig(limit=1, grace_period_seconds=3600) assert config.grace_period_seconds == 3600 def test_collision_strategy_default(self): """Test that collision_strategy defaults to ENQUEUE.""" config = ConcurrencyLimitConfig(limit=1) assert config.collision_strategy == "ENQUEUE" def test_collision_strategy_cancel_new(self): """Test that collision_strategy can be set to CANCEL_NEW.""" config = ConcurrencyLimitConfig(limit=1, collision_strategy="CANCEL_NEW") assert config.collision_strategy == "CANCEL_NEW" class TestConcurrencyOptionsSerialization: """Tests for ConcurrencyOptions serialization behavior. Regression tests for https://github.com/PrefectHQ/prefect/issues/19778 """ def test_grace_period_seconds_excluded_when_unset(self): """Test that grace_period_seconds is excluded when not explicitly set. When grace_period_seconds is not provided, model_dump(exclude_unset=True) should not include it in the output. This prevents API 422 errors. """ options = ConcurrencyOptions(collision_strategy="ENQUEUE") payload = options.model_dump(mode="json", exclude_unset=True) assert "grace_period_seconds" not in payload def test_grace_period_seconds_included_when_set(self): """Test that grace_period_seconds IS included when explicitly set.""" options = ConcurrencyOptions( collision_strategy="ENQUEUE", grace_period_seconds=120 ) payload = options.model_dump(mode="json", exclude_unset=True) assert "grace_period_seconds" in payload assert payload["grace_period_seconds"] == 120
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/client/schemas/test_concurrency.py", "license": "Apache License 2.0", "lines": 94, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/prefect/_states.py
""" Private module containing sync versions of state functions. These functions are used internally by the sync task engine to avoid run_coro_as_sync overhead on Windows. """ from __future__ import annotations import datetime import sys import uuid from types import GeneratorType from typing import TYPE_CHECKING, Any, Optional import anyio import httpx import sniffio from prefect.client.schemas.objects import State, StateType from prefect.exceptions import MissingContextError, TerminationSignal from prefect.logging.loggers import get_logger, get_run_logger from prefect.states import ( Completed, Crashed, Failed, StateGroup, format_exception, is_state_iterable, ) from prefect.utilities.collections import ensure_iterable if TYPE_CHECKING: import logging from prefect.results import ( R, ResultStore, ) logger: "logging.Logger" = get_logger("states") def exception_to_crashed_state_sync( exc: BaseException, result_store: Optional["ResultStore"] = None, ) -> State: """ Sync version of exception_to_crashed_state. Takes an exception that occurs _outside_ of user code and converts it to a 'Crash' exception with a 'Crashed' state. """ state_message = None # Check for anyio cancellation - but only if we're in an async context. # anyio.get_cancelled_exc_class() requires an active async backend; # calling it from sync-only code raises an error. Since anyio cancellation # exceptions can only occur in async contexts anyway, we can safely skip # this check when no async backend is running. # anyio 4.12+ raises anyio.NoEventLoopError, older versions raise # sniffio.AsyncLibraryNotFoundError. Catch both for compatibility. # TODO: remove sniffio handling once anyio lower bound is >=4.12.1 try: cancelled_exc_class = anyio.get_cancelled_exc_class() is_anyio_cancelled = isinstance(exc, cancelled_exc_class) except (sniffio.AsyncLibraryNotFoundError, anyio.NoEventLoopError): is_anyio_cancelled = False if is_anyio_cancelled: state_message = "Execution was cancelled by the runtime environment." elif isinstance(exc, KeyboardInterrupt): state_message = "Execution was aborted by an interrupt signal." elif isinstance(exc, TerminationSignal): state_message = "Execution was aborted by a termination signal." elif isinstance(exc, SystemExit): state_message = "Execution was aborted by Python system exit call." elif isinstance(exc, (httpx.TimeoutException, httpx.ConnectError)): try: request: httpx.Request = exc.request except RuntimeError: # The request property is not set state_message = ( "Request failed while attempting to contact the server:" f" {format_exception(exc)}" ) else: # TODO: We can check if this is actually our API url state_message = f"Request to {request.url} failed: {format_exception(exc)}." else: state_message = ( "Execution was interrupted by an unexpected exception:" f" {format_exception(exc)}" ) if result_store: key = uuid.uuid4().hex data = result_store.create_result_record(exc, key=key) else: # Attach the exception for local usage, will not be available when retrieved # from the API data = exc return Crashed(message=state_message, data=data) def exception_to_failed_state_sync( exc: Optional[BaseException] = None, result_store: Optional["ResultStore"] = None, write_result: bool = False, **kwargs: Any, ) -> State[BaseException]: """ Sync version of exception_to_failed_state. Convenience function for creating `Failed` states from exceptions """ try: local_logger = get_run_logger() except MissingContextError: local_logger = logger if not exc: _, exc, _ = sys.exc_info() if exc is None: raise ValueError( "Exception was not passed and no active exception could be found." ) else: pass if result_store: key = uuid.uuid4().hex data = result_store.create_result_record(exc, key=key) if write_result: try: result_store.persist_result_record(data) except Exception as nested_exc: local_logger.warning( "Failed to write result: %s Execution will continue, but the result has not been written", nested_exc, ) else: # Attach the exception for local usage, will not be available when retrieved # from the API data = exc existing_message = kwargs.pop("message", "") if existing_message and not existing_message.endswith(" "): existing_message += " " # TODO: Consider if we want to include traceback information, it is intentionally # excluded from messages for now message = existing_message + format_exception(exc) state = Failed(data=data, message=message, **kwargs) state.state_details.retriable = False return state def return_value_to_state_sync( retval: "R", result_store: "ResultStore", key: Optional[str] = None, expiration: Optional[datetime.datetime] = None, write_result: bool = False, ) -> "State[R]": """ Sync version of return_value_to_state. Given a return value from a user's function, create a `State` the run should be placed in. - If data is returned, we create a 'COMPLETED' state with the data - If a single, manually created state is returned, we use that state as given (manual creation is determined by the lack of ids) - If an upstream state or iterable of upstream states is returned, we apply the aggregate rule The aggregate rule says that given multiple states we will determine the final state such that: - If any states are not COMPLETED the final state is FAILED - If all of the states are COMPLETED the final state is COMPLETED - The states will be placed in the final state `data` attribute Callers should resolve all futures into states before passing return values to this function. """ from prefect.results import ( ResultRecord, ResultRecordMetadata, ) try: local_logger = get_run_logger() except MissingContextError: local_logger = logger if ( isinstance(retval, State) # Check for manual creation and not retval.state_details.flow_run_id and not retval.state_details.task_run_id ): state = retval # Unless the user has already constructed a result explicitly, use the store # to update the data to the correct type if not isinstance(state.data, (ResultRecord, ResultRecordMetadata)): result_record = result_store.create_result_record( state.data, key=key, expiration=expiration, ) if write_result: try: result_store.persist_result_record(result_record) except Exception as exc: local_logger.warning( "Encountered an error while persisting result: %s Execution will continue, but the result has not been persisted", exc, ) state.data = result_record return state # Determine a new state from the aggregate of contained states if isinstance(retval, State) or is_state_iterable(retval): states = StateGroup(ensure_iterable(retval)) # Determine the new state type if states.all_completed(): new_state_type = StateType.COMPLETED elif states.any_cancelled(): new_state_type = StateType.CANCELLED elif states.any_paused(): new_state_type = StateType.PAUSED else: new_state_type = StateType.FAILED # Generate a nice message for the aggregate if states.all_completed(): message = "All states completed." elif states.any_cancelled(): message = f"{states.cancelled_count}/{states.total_count} states cancelled." elif states.any_paused(): message = f"{states.paused_count}/{states.total_count} states paused." elif states.any_failed(): message = f"{states.fail_count}/{states.total_count} states failed." elif not states.all_final(): message = ( f"{states.not_final_count}/{states.total_count} states are not final." ) else: message = "Given states: " + states.counts_message() # TODO: We may actually want to set the data to a `StateGroup` object and just # allow it to be unpacked into a tuple and such so users can interact with # it result_record = result_store.create_result_record( retval, key=key, expiration=expiration, ) if write_result: try: result_store.persist_result_record(result_record) except Exception as exc: local_logger.warning( "Encountered an error while persisting result: %s Execution will continue, but the result has not been persisted", exc, ) return State( type=new_state_type, message=message, data=result_record, ) # Generators aren't portable, implicitly convert them to a list. if isinstance(retval, GeneratorType): data = list(retval) else: data = retval # Otherwise, they just gave data and this is a completed retval if isinstance(data, ResultRecord): return Completed(data=data) else: result_record = result_store.create_result_record( data, key=key, expiration=expiration, ) if write_result: try: result_store.persist_result_record(result_record) except Exception as exc: local_logger.warning( "Encountered an error while persisting result: %s Execution will continue, but the result has not been persisted", exc, ) return Completed(data=result_record)
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_states.py", "license": "Apache License 2.0", "lines": 263, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:examples/per_worker_task_concurrency.py
# --- # title: Per-worker task concurrency # description: Use Global Concurrency Limits to control how many tasks can use a worker's local resources simultaneously. # icon: layer-group # dependencies: ["prefect"] # keywords: ["concurrency", "workers", "advanced"] # draft: false # --- # # When a worker runs multiple flow runs concurrently, those flow runs share the # worker machine's resources—CPU, memory, GPU, local software. Some tasks may # need to limit how many can run at once to avoid overloading these resources. # # **The problem:** Using `--limit 1` on the worker forces entire flow runs to be # sequential. But often only specific tasks need limits—other tasks could overlap. # # **The solution:** Use [Global Concurrency Limits](https://docs.prefect.io/v3/develop/global-concurrency-limits) # with worker-specific names. GCLs are coordinated by the Prefect server, so they # work across the separate subprocesses that each flow run executes in. # # ## Example: Image processing with ML inference # # Consider a pipeline that processes images through an ML model: # # 1. **Download image** — network-bound, can run many in parallel # 2. **Run ML model** — uses GPU memory, need to limit concurrent runs # 3. **Save results** — disk I/O, can run many in parallel # # Without limits, if 5 flow runs hit the ML step simultaneously, they'd all try # to load the model into GPU memory and crash. With per-worker limits, only 1-2 # run at a time while others wait. # # ## Setup import os import time from prefect import flow, get_run_logger, task from prefect.concurrency.sync import concurrency def get_worker_id() -> str: """ Get worker identity from environment. Set this when starting the worker: WORKER_ID=gpu-1 prefect worker start --pool ml-pool """ return os.getenv("WORKER_ID", "default") # ## Tasks without limits # # These tasks don't contend for limited resources, so they run freely. @task def download_image(image_id: int) -> dict: """Download an image from storage. Network-bound, no local resource contention.""" logger = get_run_logger() logger.info(f"Image {image_id}: downloading...") time.sleep(1) # simulate download return {"image_id": image_id, "path": f"/tmp/image_{image_id}.jpg"} @task def save_results(data: dict) -> str: """Save processed results. Fast operation, no limits needed.""" logger = get_run_logger() logger.info(f"Image {data['image_id']}: saving results...") time.sleep(0.5) return f"processed-{data['image_id']}" # ## Task with per-worker limit # # This task uses a local resource (GPU) that can only handle limited concurrent # usage. The limit is scoped to this worker so each machine has independent limits. @task def run_ml_model(data: dict) -> dict: """ Run image through ML model. GPU memory is limited—only 1-2 can run at once per worker machine. Uses a Global Concurrency Limit scoped to this worker's identity. """ logger = get_run_logger() worker_id = get_worker_id() image_id = data["image_id"] # Limit key includes worker ID: each worker has its own limit with concurrency(f"gpu:{worker_id}", occupy=1): logger.info(f"Image {image_id}: running ML inference (GPU)...") time.sleep(3) # simulate model inference return {**data, "predictions": [0.9, 0.1]} # ## The flow @flow(log_prints=True) def process_image(image_id: int = 1) -> str: """ Process an image through the ML pipeline. When multiple instances run concurrently on the same worker: - download and save tasks overlap freely - run_ml_model tasks are limited by the per-worker GPU concurrency limit """ logger = get_run_logger() logger.info(f"Processing image {image_id} on worker '{get_worker_id()}'") image = download_image(image_id) predictions = run_ml_model(image) result = save_results(predictions) return result # ## Running the example # # ### 1. Create a Global Concurrency Limit for each worker # # Each worker machine needs its own limit. The limit value controls how many # ML tasks can run simultaneously on that machine. # # ```bash # # GPU machine 1: allow 2 concurrent ML tasks # prefect gcl create gpu:gpu-1 --limit 2 # # # GPU machine 2: allow 2 concurrent ML tasks # prefect gcl create gpu:gpu-2 --limit 2 # ``` # # ### 2. Create work pool and deploy # # ```bash # prefect work-pool create ml-pool --type process # prefect deploy --all # ``` # # ### 3. Start workers with unique IDs # # Each worker needs a unique ID that matches its GCL name: # # ```bash # # Machine 1 # WORKER_ID=gpu-1 prefect worker start --pool ml-pool --limit 10 # # # Machine 2 # WORKER_ID=gpu-2 prefect worker start --pool ml-pool --limit 10 # ``` # # The `--limit 10` allows up to 10 concurrent flow runs, but the GCL ensures # only 2 are in the ML step at any time. # # ### 4. Submit jobs # # ```bash # for i in {1..20}; do # prefect deployment run process-image/process-image --param image_id=$i --timeout 0 # done # ``` # # ## What you'll see # # With 10 concurrent flow runs on a worker: # # - **Download tasks** from all 10 start immediately and overlap # - **ML tasks** queue up—only 2 run at a time (per the GCL limit) # - **Save tasks** run as soon as their ML task completes # # Flow runs aren't blocked entirely—just the resource-intensive step is limited. # This maximizes throughput while protecting the GPU from overload. # # ## Why this works # # 1. **GCLs are server-coordinated** — The Prefect server tracks who holds what # limit. It doesn't matter that flow runs are separate processes. # # 2. **Worker-specific names** — By including `worker_id` in the limit name, # each worker machine has independent limits. GPU-1's limit doesn't affect GPU-2. # # 3. **Selective application** — Only the tasks that need limits acquire them. # Everything else runs at full concurrency. # # ## Adapting this pattern # # The same pattern works for any local resource constraint: # # - **Software licenses**: A tool that only allows N concurrent instances # - **Memory-intensive processing**: Limit concurrent jobs to avoid OOM # - **Disk I/O**: Limit concurrent writes to a local SSD # - **Local services**: A sidecar database with connection limits # # Just change the limit name and value to match your constraint. # # ## Related docs # # - [Global Concurrency Limits](https://docs.prefect.io/v3/develop/global-concurrency-limits) # - [Workers](https://docs.prefect.io/v3/deploy/infrastructure-concepts/workers) if __name__ == "__main__": process_image(image_id=1)
{ "repo_id": "PrefectHQ/prefect", "file_path": "examples/per_worker_task_concurrency.py", "license": "Apache License 2.0", "lines": 179, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:examples/ai_database_cleanup_with_approval.py
# --- # title: Database Cleanup with Human or AI Approval # description: Build database cleanup workflows that evolve from human oversight to AI autonomy. # icon: database # keywords: ["ai", "agents", "pydantic-ai", "database-maintenance", "cleanup", "automation", "approval-workflow", "mcp"] # github_url: https://github.com/zzstoatzz/prefect-mcp-server-demo # order: 7 # --- # # Database cleanup is critical for self-hosted Prefect deployments (see [database maintenance guide](/v3/advanced/database-maintenance)), # but it's risky: too automated and you might delete important data, too manual and it becomes a constant burden. # # This example shows how to build a cleanup workflow that evolves with your confidence: # # - **Start with human approval**: Preview what will be deleted, pause the flow, and manually approve/reject via a UI form # - **Graduate to AI autonomy**: Switch to an AI agent that investigates system health using Prefect MCP tools and returns structured decisions with confidence scores # # Build trust incrementally by monitoring decisions in lower-risk environments before enabling AI autonomy in production. # # For a full deployment example with scheduling and environment configuration, see: # [github.com/zzstoatzz/prefect-mcp-server-demo](https://github.com/zzstoatzz/prefect-mcp-server-demo) # # ## Setup # # ```bash # # For human approval only # uv add prefect # # # For AI approval, add pydantic-ai # uv add 'pydantic-ai[prefect]' # export ANTHROPIC_API_KEY='your-key' # ``` # # The Prefect MCP server provides AI agents with read-only tools for investigating your Prefect instance. # See [How to use the Prefect MCP server](/v3/how-to-guides/ai/use-prefect-mcp-server) for setup. from __future__ import annotations import asyncio from datetime import datetime, timedelta, timezone from typing import Literal from pydantic import BaseModel, Field from pydantic_ai import Agent from pydantic_ai.durable_exec.prefect import PrefectAgent, TaskConfig from pydantic_ai.models.anthropic import AnthropicModel from pydantic_ai_mcp import MCPServerStdio from prefect import flow, get_client from prefect.client.schemas.filters import ( FlowRunFilter, FlowRunFilterStartTime, FlowRunFilterStateName, ) from prefect.exceptions import ObjectNotFound from prefect.flow_runs import pause_flow_run from prefect.input import RunInput # ## Configuration: Make Cleanup Policy Explicit # # Instead of scattering configuration across your code, define it as a structured Pydantic model. # This becomes a UI form automatically - see [form building guide](/v3/advanced/form-building). class RetentionConfig(BaseModel): """Define what to clean and how.""" retention_period: timedelta = Field( default=timedelta(days=30), description="How far back to keep flow runs" ) states_to_clean: list[str] = Field( default=["Completed", "Failed", "Cancelled"], description="Which states to clean", ) batch_size: int = Field(default=100, ge=10, le=1000) dry_run: bool = Field(default=False, description="Preview without deleting") approval_type: Literal["human", "ai"] = Field( default="human", description="Human form or AI agent approval" ) # <AccordionGroup> # # <Accordion title="Human Approval: Pause and Review"> # # When using `approval_type="human"`, the flow pauses and shows a form in the UI. class CleanupApproval(RunInput): """Human approval form for cleanup operations.""" approve: bool = Field(default=False) notes: str = Field(default="", description="Why approve/reject?") @flow(name="human-approval") def get_human_approval(preview: str, count: int) -> tuple[bool, str]: """Pause and wait for human decision via UI form.""" print(f"⏸️ pausing for human review of {count} flow runs...") approval = pause_flow_run( wait_for_input=CleanupApproval.with_initial_data( description=f"**Preview ({count} runs):**\n{preview}" ), timeout=3600, ) return approval.approve, approval.notes # </Accordion> # # <Accordion title="AI Approval: Autonomous Investigation"> # # When using `approval_type="ai"`, a pydantic-ai agent investigates using Prefect MCP tools to decide if cleanup is safe. AGENT_PROMPT = """you are a prefect infrastructure operations agent reviewing a proposed database cleanup. use the prefect mcp tools to investigate system health: - query recent flow run patterns - check deployment schedules - review system status return your decision with confidence (0-1), reasoning, and any concerns. approve routine cleanup unless you detect risks like ongoing incidents or critical deployments needing history.""" class CleanupDecision(BaseModel): """Structured AI decision.""" approved: bool confidence: float = Field(ge=0.0, le=1.0) reasoning: str concerns: list[str] | None = None def create_cleanup_agent() -> PrefectAgent[None, CleanupDecision]: """Create AI agent with Prefect MCP tools for autonomous approval.""" # Connect to Prefect MCP server - provides read-only Prefect tools mcp_server = MCPServerStdio( "prefect", "uvx", args=["--from", "prefect-mcp", "prefect-mcp-server"] ) agent = Agent( model=AnthropicModel("claude-sonnet-4-5-20250929"), output_type=CleanupDecision, system_prompt=AGENT_PROMPT, mcp_servers=[mcp_server], ) # Wrap with PrefectAgent for retry/timeout handling return PrefectAgent( agent, model_task_config=TaskConfig(retries=2, timeout_seconds=120.0), ) @flow(name="ai-approval", log_prints=True) async def get_ai_approval( preview: str, count: int, config: RetentionConfig ) -> tuple[bool, str]: """Use AI agent to autonomously decide approval.""" print("🤖 requesting ai agent decision...") agent = create_cleanup_agent() context = f""" proposed cleanup: - retention: {config.retention_period} - states: {", ".join(config.states_to_clean)} - count: {count} flow runs preview: {preview} investigate using your prefect mcp tools and decide if safe to proceed. """ result = await agent.run(context) decision = result.output print(f"decision: {'✅ approved' if decision.approved else '❌ rejected'}") print(f"confidence: {decision.confidence:.0%}") print(f"reasoning: {decision.reasoning}") return decision.approved, decision.reasoning # </Accordion> # # <Accordion title="Main Cleanup Flow"> @flow(name="database-cleanup", log_prints=True) async def database_cleanup_flow(config: RetentionConfig | None = None) -> dict: """Database cleanup with configurable approval workflow.""" if config is None: config = RetentionConfig() print(f"🚀 starting cleanup (approval: {config.approval_type})") # Fetch flow runs matching retention policy async with get_client() as client: cutoff = datetime.now(timezone.utc) - config.retention_period flow_runs = await client.read_flow_runs( flow_run_filter=FlowRunFilter( start_time=FlowRunFilterStartTime(before_=cutoff), state=FlowRunFilterStateName(any_=config.states_to_clean), ), limit=config.batch_size * 5, ) if not flow_runs: print("✨ nothing to clean") return {"status": "no_action", "deleted": 0} # Preview what will be deleted preview = "\n".join( f"- {r.name} ({r.state.type.value}) - {r.start_time}" for r in flow_runs[:5] ) if len(flow_runs) > 5: preview += f"\n... and {len(flow_runs) - 5} more" print(f"\n📋 preview:\n{preview}\n") # Get approval (human or AI based on config) if config.approval_type == "human": approved, notes = get_human_approval(preview, len(flow_runs)) else: approved, notes = await get_ai_approval(preview, len(flow_runs), config) if not approved: print(f"❌ cleanup rejected: {notes}") return {"status": "rejected", "reason": notes} print(f"✅ cleanup approved: {notes}") if config.dry_run: print("🔍 dry run - no deletions") return {"status": "dry_run", "would_delete": len(flow_runs)} # Perform deletion with batching and rate limiting deleted = 0 async with get_client() as client: for i in range(0, len(flow_runs), config.batch_size): batch = flow_runs[i : i + config.batch_size] for run in batch: try: await client.delete_flow_run(run.id) deleted += 1 except ObjectNotFound: # Already deleted (e.g., by concurrent cleanup) - treat as success deleted += 1 except Exception as e: print(f"failed to delete {run.id}: {e}") await asyncio.sleep(0.1) # rate limiting print(f"✅ deleted {deleted}/{len(flow_runs)} flow runs") return {"status": "completed", "deleted": deleted} # </Accordion> # # </AccordionGroup> # # ## Deployment Examples if __name__ == "__main__": # Start with human approval in production prod_config = RetentionConfig( retention_period=timedelta(days=30), dry_run=False, approval_type="human", ) # Graduate to AI approval in dev/staging dev_config = RetentionConfig( retention_period=timedelta(minutes=5), dry_run=False, approval_type="ai", # requires ANTHROPIC_API_KEY ) database_cleanup_flow.serve( name="database-cleanup-deployment", tags=["database-maintenance", "cleanup"], ) # ## Related Documentation # # - [Database Maintenance Guide](/v3/advanced/database-maintenance) - SQL queries, retention strategies, VACUUM # - [Form Building](/v3/advanced/form-building) - Create validated UI forms from Pydantic models # - [Interactive Workflows](/v3/advanced/interactive) - Pause flows and wait for human input # - [Prefect MCP Server](/v3/how-to-guides/ai/use-prefect-mcp-server) - Connect AI agents to Prefect # - [pydantic-ai + Prefect](https://ai.pydantic.dev/durable_execution/prefect/) - Durable AI agents with retries
{ "repo_id": "PrefectHQ/prefect", "file_path": "examples/ai_database_cleanup_with_approval.py", "license": "Apache License 2.0", "lines": 235, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:tests/concurrency/test_concurrency_slot_acquisition_with_lease_service.py
"""Tests for ConcurrencySlotAcquisitionWithLeaseService.""" import asyncio from concurrent.futures import Future from typing import Any from unittest import mock from uuid import uuid4 import pytest from httpx import HTTPStatusError, Request, Response from prefect.client.orchestration import PrefectClient, get_client from prefect.client.schemas.objects import ConcurrencyLeaseHolder from prefect.concurrency.services import ( ConcurrencySlotAcquisitionWithLeaseService, _create_empty_limits_response, _no_limits_cache, _should_use_cache, ) class ClientWrapper: """Wrapper to make mocked client work with async context manager.""" def __init__(self, client: PrefectClient): self.client = client async def __aenter__(self) -> PrefectClient: return self.client async def __aexit__(self, *args: Any) -> None: pass @pytest.fixture async def mocked_client(test_database_connection_url: str) -> Any: """Fixture providing a mocked client with increment_concurrency_slots_with_lease patched.""" async with get_client() as client: with mock.patch.object( client, "increment_concurrency_slots_with_lease", autospec=True ): wrapped_client = ClientWrapper(client) with mock.patch( "prefect.concurrency.services.get_client", lambda: wrapped_client ): yield wrapped_client async def test_returns_successful_response(mocked_client: Any) -> None: """Test that the service returns a successful response with lease information.""" lease_id = uuid4() response_data = { "lease_id": str(lease_id), "limits": [{"id": str(uuid4()), "name": "test-limit", "limit": 10}], } response = Response(200, json=response_data) mocked_method = mocked_client.client.increment_concurrency_slots_with_lease mocked_method.return_value = response expected_names = sorted(["tag:test"]) expected_slots = 1 expected_mode = "concurrency" expected_lease_duration = 60.0 expected_holder = ConcurrencyLeaseHolder(type="task_run", id=uuid4()) service = ConcurrencySlotAcquisitionWithLeaseService.instance( frozenset(expected_names) ) future: Future[Response] = service.send( ( expected_slots, expected_mode, None, # timeout_seconds None, # max_retries expected_lease_duration, False, # strict expected_holder, ) ) await service.drain() returned_response = await asyncio.wrap_future(future) assert returned_response == response mocked_method.assert_called_once_with( names=expected_names, slots=expected_slots, mode=expected_mode, lease_duration=expected_lease_duration, holder=expected_holder, ) async def test_retries_failed_call_respects_retry_after_header( mocked_client: Any, ) -> None: """Test that the service respects Retry-After headers on 423 responses.""" lease_id = uuid4() responses = [ HTTPStatusError( "Limit is locked", request=Request("post", "/v2/concurrency_limits/increment-with-lease"), response=Response(423, headers={"Retry-After": "10"}), ), Response( 200, json={ "lease_id": str(lease_id), "limits": [{"id": str(uuid4()), "name": "tag:test", "limit": 10}], }, ), ] mocked_client.client.increment_concurrency_slots_with_lease.side_effect = responses limit_names = sorted(["tag:test"]) service = ConcurrencySlotAcquisitionWithLeaseService.instance( frozenset(limit_names) ) with mock.patch("asyncio.sleep") as sleep: future: Future[Response] = service.send( ( 1, # slots "concurrency", # mode None, # timeout_seconds None, # max_retries 60.0, # lease_duration False, # strict None, # holder ) ) await service.drain() returned_response = await asyncio.wrap_future(future) assert returned_response == responses[1] # Verify sleep was called with the Retry-After value sleep.assert_called_once_with( float(responses[0].response.headers["Retry-After"]) ) assert ( mocked_client.client.increment_concurrency_slots_with_lease.call_count == 2 ) async def test_failed_call_status_code_not_retryable_returns_exception( mocked_client: Any, ) -> None: """Test that non-423 errors are not retried and are returned as exceptions.""" response = HTTPStatusError( "Internal server error", request=Request("post", "/v2/concurrency_limits/increment-with-lease"), response=Response(500, headers={"Retry-After": "2"}), ) mocked_client.client.increment_concurrency_slots_with_lease.side_effect = response limit_names = sorted(["tag:test"]) service = ConcurrencySlotAcquisitionWithLeaseService.instance( frozenset(limit_names) ) future: Future[Response] = service.send( (1, "concurrency", None, None, 60.0, False, None) ) await service.drain() with pytest.raises(HTTPStatusError) as exc_info: await asyncio.wrap_future(future) assert exc_info.value == response async def test_max_retries_honored(mocked_client: Any) -> None: """Test that max_retries limit is respected and acquisition stops after exhausting retries.""" responses = [ HTTPStatusError( "Limit is locked", request=Request("post", "/v2/concurrency_limits/increment-with-lease"), response=Response(423, headers={"Retry-After": "1"}), ) ] * 5 # More 423s than max_retries mocked_client.client.increment_concurrency_slots_with_lease.side_effect = responses limit_names = sorted(["tag:test"]) service = ConcurrencySlotAcquisitionWithLeaseService.instance( frozenset(limit_names) ) with mock.patch("asyncio.sleep"): future: Future[Response] = service.send( ( 1, # slots "concurrency", # mode None, # timeout_seconds 2, # max_retries - only allow 2 retries 60.0, # lease_duration False, # strict None, # holder ) ) await service.drain() # Should get an exception after max_retries is exhausted with pytest.raises(HTTPStatusError): await asyncio.wrap_future(future) # Should have called increment 3 times (initial + 2 retries) assert ( mocked_client.client.increment_concurrency_slots_with_lease.call_count == 3 ) async def test_basic_exception_returns_exception(mocked_client: Any) -> None: """Test that basic exceptions are propagated correctly.""" exc = Exception("Something went wrong") mocked_client.client.increment_concurrency_slots_with_lease.side_effect = exc limit_names = sorted(["tag:test"]) service = ConcurrencySlotAcquisitionWithLeaseService.instance( frozenset(limit_names) ) future: Future[Response] = service.send( (1, "concurrency", None, None, 60.0, False, None) ) await service.drain() with pytest.raises(Exception) as exc_info: await asyncio.wrap_future(future) assert exc_info.value == exc async def test_singleton_per_limit_names(mocked_client: Any) -> None: """Test that the service is a singleton per unique set of limit names.""" names_a = frozenset(["tag:test-a"]) names_b = frozenset(["tag:test-b"]) names_a_duplicate = frozenset(["tag:test-a"]) service_a1 = ConcurrencySlotAcquisitionWithLeaseService.instance(names_a) service_a2 = ConcurrencySlotAcquisitionWithLeaseService.instance(names_a_duplicate) service_b = ConcurrencySlotAcquisitionWithLeaseService.instance(names_b) # Same limit names should return the same instance assert service_a1 is service_a2 # Different limit names should return different instances assert service_a1 is not service_b assert service_a2 is not service_b async def test_serialization_behavior(mocked_client: Any) -> None: """Test that multiple concurrent acquisitions are serialized through the service. This is the key test that validates the fix for the thundering herd issue. When multiple tasks try to acquire slots simultaneously, the service ensures they are processed one at a time rather than all hitting the server at once. """ call_order: list[dict[str, int | None]] = [] async def mock_increment(*args: Any, **kwargs: Any) -> Response: # Record when this call starts call_index = len(call_order) call_order.append({"start": call_index, "end": None}) # Simulate some processing time await asyncio.sleep(0.01) # Record when this call ends call_order[-1]["end"] = len([c for c in call_order if c["end"] is not None]) return Response( 200, json={ "lease_id": str(uuid4()), "limits": [{"id": str(uuid4()), "name": "tag:test", "limit": 10}], }, ) mocked_client.client.increment_concurrency_slots_with_lease.side_effect = ( mock_increment ) limit_names = frozenset(["tag:test"]) service = ConcurrencySlotAcquisitionWithLeaseService.instance(limit_names) # Send 10 concurrent acquisition requests futures: list[Future[Response]] = [] for i in range(10): future = service.send((1, "concurrency", None, None, 60.0, False, None)) futures.append(future) # Wait for all acquisitions to complete await service.drain() responses = await asyncio.gather(*[asyncio.wrap_future(f) for f in futures]) # Verify all succeeded assert len(responses) == 10 assert all(r.status_code == 200 for r in responses) # Verify they were processed serially (no overlapping execution) # Each call should complete before the next one starts for i in range(len(call_order) - 1): # Current call's end index should be <= next call's start index # This proves serialization assert call_order[i]["end"] is not None assert call_order[i]["end"] <= len( [c for c in call_order[: i + 2] if c["end"] is not None] ) class TestShouldUseCache: """Tests for the _should_use_cache helper function.""" def test_returns_false_when_holder_is_none(self) -> None: """Cache should not be used when holder is None.""" assert _should_use_cache(["tag:test"], None) is False def test_returns_false_when_holder_type_is_not_task_run(self) -> None: """Cache should not be used when holder type is not 'task_run'.""" holder = ConcurrencyLeaseHolder(type="flow_run", id=uuid4()) assert _should_use_cache(["tag:test"], holder) is False def test_returns_false_when_names_is_empty(self) -> None: """Cache should not be used when names list is empty.""" holder = ConcurrencyLeaseHolder(type="task_run", id=uuid4()) assert _should_use_cache([], holder) is False def test_returns_false_when_names_do_not_start_with_tag(self) -> None: """Cache should not be used when names don't start with 'tag:' prefix.""" holder = ConcurrencyLeaseHolder(type="task_run", id=uuid4()) assert _should_use_cache(["test-limit"], holder) is False assert _should_use_cache(["concurrency-limit"], holder) is False def test_returns_false_when_some_names_do_not_start_with_tag(self) -> None: """Cache should not be used when any name doesn't start with 'tag:' prefix.""" holder = ConcurrencyLeaseHolder(type="task_run", id=uuid4()) assert _should_use_cache(["tag:test", "other-limit"], holder) is False def test_returns_true_for_task_run_with_tag_names(self) -> None: """Cache should be used for task_run holder with tag-prefixed names.""" holder = ConcurrencyLeaseHolder(type="task_run", id=uuid4()) assert _should_use_cache(["tag:test"], holder) is True assert _should_use_cache(["tag:a", "tag:b", "tag:c"], holder) is True class TestCreateEmptyLimitsResponse: """Tests for the _create_empty_limits_response helper function.""" def test_returns_200_response(self) -> None: """Response should have 200 status code.""" response = _create_empty_limits_response() assert response.status_code == 200 def test_returns_empty_limits_list(self) -> None: """Response should contain empty limits list.""" response = _create_empty_limits_response() data = response.json() assert data["limits"] == [] def test_returns_valid_lease_id(self) -> None: """Response should contain a valid UUID lease_id.""" response = _create_empty_limits_response() data = response.json() assert "lease_id" in data from uuid import UUID UUID(data["lease_id"]) def test_returns_json_content_type(self) -> None: """Response should have JSON content type header.""" response = _create_empty_limits_response() assert response.headers["content-type"] == "application/json" class TestCachingBehavior: """Tests for the caching behavior in ConcurrencySlotAcquisitionWithLeaseService.""" @pytest.fixture(autouse=True) def clear_cache(self) -> None: """Clear the cache before each test.""" _no_limits_cache.clear() async def test_caches_empty_limits_response_for_task_run_tags( self, mocked_client: Any ) -> None: """Test that empty limits responses are cached for task_run with tag names.""" response = Response(200, json={"lease_id": str(uuid4()), "limits": []}) mocked_client.client.increment_concurrency_slots_with_lease.return_value = ( response ) limit_names = frozenset(["tag:test-cache"]) holder = ConcurrencyLeaseHolder(type="task_run", id=uuid4()) service = ConcurrencySlotAcquisitionWithLeaseService.instance(limit_names) future1: Future[Response] = service.send( (1, "concurrency", None, None, 60.0, False, holder) ) future2: Future[Response] = service.send( (1, "concurrency", None, None, 60.0, False, holder) ) await service.drain() resp1 = await asyncio.wrap_future(future1) resp2 = await asyncio.wrap_future(future2) assert ( mocked_client.client.increment_concurrency_slots_with_lease.call_count == 1 ) assert resp1.status_code == 200 assert resp2.status_code == 200 assert resp1.json()["limits"] == [] assert resp2.json()["limits"] == [] async def test_does_not_cache_when_limits_exist(self, mocked_client: Any) -> None: """Test that responses with limits are not cached.""" response = Response( 200, json={ "lease_id": str(uuid4()), "limits": [{"id": str(uuid4()), "name": "tag:test", "limit": 10}], }, ) mocked_client.client.increment_concurrency_slots_with_lease.return_value = ( response ) limit_names = frozenset(["tag:test-with-limits"]) holder = ConcurrencyLeaseHolder(type="task_run", id=uuid4()) service = ConcurrencySlotAcquisitionWithLeaseService.instance(limit_names) future1: Future[Response] = service.send( (1, "concurrency", None, None, 60.0, False, holder) ) future2: Future[Response] = service.send( (1, "concurrency", None, None, 60.0, False, holder) ) await service.drain() await asyncio.wrap_future(future1) await asyncio.wrap_future(future2) assert ( mocked_client.client.increment_concurrency_slots_with_lease.call_count == 2 ) async def test_does_not_cache_when_holder_is_none(self, mocked_client: Any) -> None: """Test that caching is disabled when holder is None.""" response = Response(200, json={"lease_id": str(uuid4()), "limits": []}) mocked_client.client.increment_concurrency_slots_with_lease.return_value = ( response ) limit_names = frozenset(["tag:test-no-holder"]) service = ConcurrencySlotAcquisitionWithLeaseService.instance(limit_names) future1: Future[Response] = service.send( (1, "concurrency", None, None, 60.0, False, None) ) future2: Future[Response] = service.send( (1, "concurrency", None, None, 60.0, False, None) ) await service.drain() await asyncio.wrap_future(future1) await asyncio.wrap_future(future2) assert ( mocked_client.client.increment_concurrency_slots_with_lease.call_count == 2 ) async def test_does_not_cache_for_non_tag_names(self, mocked_client: Any) -> None: """Test that caching is disabled for non-tag limit names.""" response = Response(200, json={"lease_id": str(uuid4()), "limits": []}) mocked_client.client.increment_concurrency_slots_with_lease.return_value = ( response ) limit_names = frozenset(["regular-limit"]) holder = ConcurrencyLeaseHolder(type="task_run", id=uuid4()) service = ConcurrencySlotAcquisitionWithLeaseService.instance(limit_names) future1: Future[Response] = service.send( (1, "concurrency", None, None, 60.0, False, holder) ) future2: Future[Response] = service.send( (1, "concurrency", None, None, 60.0, False, holder) ) await service.drain() await asyncio.wrap_future(future1) await asyncio.wrap_future(future2) assert ( mocked_client.client.increment_concurrency_slots_with_lease.call_count == 2 )
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/concurrency/test_concurrency_slot_acquisition_with_lease_service.py", "license": "Apache License 2.0", "lines": 401, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/prefect/client/orchestration/_events/client.py
from typing import TYPE_CHECKING from prefect.client.orchestration.base import BaseAsyncClient, BaseClient from prefect.client.schemas.events import EventPage if TYPE_CHECKING: from prefect.events.filters import EventFilter class EventClient(BaseClient): def read_events( self, filter: "EventFilter | None" = None, limit: int = 100, ) -> EventPage: """ query historical events from the API. args: filter: optional filter criteria to narrow down events limit: maximum number of events to return per page (default 100) returns: EventPage containing events, total count, and next page link """ response = self.request( "POST", "/events/filter", json={ "filter": filter.model_dump(mode="json") if filter else None, "limit": limit, }, ) return EventPage.model_validate(response.json()) def read_events_page(self, next_page_url: str) -> EventPage: """ retrieve the next page of events using a next_page URL. args: next_page_url: the next_page URL from a previous EventPage response returns: EventPage containing the next page of events """ response = self._client.get(str(next_page_url)) response.raise_for_status() return EventPage.model_validate(response.json()) class EventAsyncClient(BaseAsyncClient): async def read_events( self, filter: "EventFilter | None" = None, limit: int = 100, ) -> EventPage: """ query historical events from the API. args: filter: optional filter criteria to narrow down events limit: maximum number of events to return per page (default 100) returns: EventPage containing events, total count, and next page link """ response = await self.request( "POST", "/events/filter", json={ "filter": filter.model_dump(mode="json") if filter else None, "limit": limit, }, ) return EventPage.model_validate(response.json()) async def read_events_page(self, next_page_url: str) -> EventPage: """ retrieve the next page of events using a next_page URL. args: next_page_url: the next_page URL from a previous EventPage response returns: EventPage containing the next page of events """ response = await self._client.get(str(next_page_url)) response.raise_for_status() return EventPage.model_validate(response.json())
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/client/orchestration/_events/client.py", "license": "Apache License 2.0", "lines": 73, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
PrefectHQ/prefect:src/prefect/client/schemas/events.py
from typing import TYPE_CHECKING from pydantic import AnyHttpUrl, Field from prefect._internal.schemas.bases import PrefectBaseModel from prefect.events.schemas.events import ReceivedEvent if TYPE_CHECKING: from prefect.client.orchestration import PrefectClient, SyncPrefectClient class EventPage(PrefectBaseModel): """a single page of events returned from the API""" events: list[ReceivedEvent] = Field(description="the events matching the query") total: int = Field(description="the total number of matching events") next_page: AnyHttpUrl | None = Field( description="the URL for the next page of results, if there are more" ) async def get_next_page(self, client: "PrefectClient") -> "EventPage | None": """ fetch the next page of events. args: client: the PrefectClient instance to use for fetching returns: the next EventPage, or None if there are no more pages """ if not self.next_page: return None return await client.read_events_page(self.next_page) def get_next_page_sync(self, client: "SyncPrefectClient") -> "EventPage | None": """ fetch the next page of events (sync version). args: client: the SyncPrefectClient instance to use for fetching returns: the next EventPage, or None if there are no more pages """ if not self.next_page: return None return client.read_events_page(self.next_page)
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/client/schemas/events.py", "license": "Apache License 2.0", "lines": 35, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
PrefectHQ/prefect:tests/client/test_events_client.py
from datetime import timedelta from typing import TYPE_CHECKING import prefect.types._datetime from prefect.client.orchestration import get_client from prefect.events.filters import EventFilter, EventOccurredFilter if TYPE_CHECKING: from prefect.testing.utilities import PrefectTestHarness class TestReadEventsAsync: async def test_read_events_with_filter( self, prefect_client: "PrefectTestHarness" ) -> None: """test querying events with a filter""" async with get_client() as client: # create a filter for recent events now = prefect.types._datetime.now("UTC") event_filter = EventFilter( occurred=EventOccurredFilter( since=now - timedelta(days=1), until=now, ) ) # query events result = await client.read_events(filter=event_filter, limit=10) # verify response structure assert result.events is not None assert isinstance(result.events, list) assert result.total is not None assert isinstance(result.total, int) async def test_read_events_without_filter( self, prefect_client: "PrefectTestHarness" ) -> None: """test querying events without a filter""" async with get_client() as client: # query events without filter result = await client.read_events(limit=5) # verify response structure assert result.events is not None assert isinstance(result.events, list) assert result.total is not None assert isinstance(result.total, int) async def test_read_events_respects_limit( self, prefect_client: "PrefectTestHarness" ) -> None: """test that limit parameter is respected""" async with get_client() as client: limit = 3 result = await client.read_events(limit=limit) # verify we don't get more than limit assert len(result.events) <= limit async def test_read_events_page(self, prefect_client: "PrefectTestHarness") -> None: """test paginating through events""" async with get_client() as client: # get first page with small limit first_page = await client.read_events(limit=5) # if there's a next page, fetch it if first_page.next_page: second_page = await client.read_events_page(first_page.next_page) # verify we got events assert second_page.events is not None assert isinstance(second_page.events, list) assert second_page.total is not None assert isinstance(second_page.total, int) async def test_get_next_page(self, prefect_client: "PrefectTestHarness") -> None: """test using EventPage.get_next_page() method""" async with get_client() as client: # get first page with small limit first_page = await client.read_events(limit=5) # fetch next page using the method on EventPage second_page = await first_page.get_next_page(client) if second_page: # verify we got events assert second_page.events is not None assert isinstance(second_page.events, list) assert second_page.total is not None assert isinstance(second_page.total, int) class TestReadEventsSync: def test_read_events_with_filter( self, sync_prefect_client: "PrefectTestHarness" ) -> None: """test querying events with a filter using sync client""" with get_client(sync_client=True) as client: # create a filter for recent events now = prefect.types._datetime.now("UTC") event_filter = EventFilter( occurred=EventOccurredFilter( since=now - timedelta(days=1), until=now, ) ) # query events result = client.read_events(filter=event_filter, limit=10) # verify response structure assert result.events is not None assert isinstance(result.events, list) assert result.total is not None assert isinstance(result.total, int) def test_read_events_without_filter( self, sync_prefect_client: "PrefectTestHarness" ) -> None: """test querying events without a filter using sync client""" with get_client(sync_client=True) as client: # query events without filter result = client.read_events(limit=5) # verify response structure assert result.events is not None assert isinstance(result.events, list) assert result.total is not None assert isinstance(result.total, int) def test_read_events_respects_limit( self, sync_prefect_client: "PrefectTestHarness" ) -> None: """test that limit parameter is respected using sync client""" with get_client(sync_client=True) as client: limit = 3 result = client.read_events(limit=limit) # verify we don't get more than limit assert len(result.events) <= limit def test_read_events_page(self, sync_prefect_client: "PrefectTestHarness") -> None: """test paginating through events using sync client""" with get_client(sync_client=True) as client: # get first page with small limit first_page = client.read_events(limit=5) # if there's a next page, fetch it if first_page.next_page: second_page = client.read_events_page(first_page.next_page) # verify we got events assert second_page.events is not None assert isinstance(second_page.events, list) assert second_page.total is not None assert isinstance(second_page.total, int) def test_get_next_page_sync( self, sync_prefect_client: "PrefectTestHarness" ) -> None: """test using EventPage.get_next_page_sync() method""" with get_client(sync_client=True) as client: # get first page with small limit first_page = client.read_events(limit=5) # fetch next page using the method on EventPage second_page = first_page.get_next_page_sync(client) if second_page: # verify we got events assert second_page.events is not None assert isinstance(second_page.events, list) assert second_page.total is not None assert isinstance(second_page.total, int)
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/client/test_events_client.py", "license": "Apache License 2.0", "lines": 145, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:src/prefect/_internal/observability.py
""" internal module for configuring observability tooling (logfire, etc.) """ from typing import Any from pydantic import Field from pydantic_settings import BaseSettings, SettingsConfigDict class LogfireSettings(BaseSettings): """ configuration for logfire observability integration. """ model_config = SettingsConfigDict( env_prefix="PREFECT_LOGFIRE_", extra="ignore", ) enabled: bool = Field( default=False, description="whether to enable logfire observability", ) write_token: str | None = Field( default=None, description="API token for writing to logfire. required when enabled=true.", ) sampling_head_rate: float = Field( default=0.1, ge=0.0, le=1.0, description="fraction of traces to sample upfront (0.0-1.0). reduces total volume.", ) sampling_level_threshold: str = Field( default="warn", description="minimum log level to always include (debug, info, warn, error). keeps all warnings/errors.", ) sampling_duration_threshold: float = Field( default=5.0, ge=0.0, description="minimum duration in seconds to always include. catches slow operations.", ) sampling_background_rate: float = Field( default=0.01, ge=0.0, le=1.0, description="fraction of non-notable traces to keep anyway (0.0-1.0). maintains baseline visibility.", ) def configure_logfire() -> Any | None: """ configure and return logfire instance with sampling, or None if disabled. this function: 1. checks if logfire is enabled via PREFECT_LOGFIRE_ENABLED 2. validates PREFECT_LOGFIRE_WRITE_TOKEN is set 3. loads sampling configuration from env vars 4. configures logfire with sampling options 5. returns configured logfire module (or None if disabled) can be called multiple times safely - logfire.configure is idempotent. """ # load logfire settings from env vars settings = LogfireSettings() if not settings.enabled: return None if settings.write_token is None: raise ValueError( "PREFECT_LOGFIRE_WRITE_TOKEN must be set when PREFECT_LOGFIRE_ENABLED is true" ) try: import logfire # pyright: ignore except ImportError as exc: raise ImportError( "logfire is not installed. install it with: uv add logfire" ) from exc # build sampling options sampling_options = logfire.SamplingOptions.level_or_duration( head=settings.sampling_head_rate, level_threshold=settings.sampling_level_threshold, duration_threshold=settings.sampling_duration_threshold, background_rate=settings.sampling_background_rate, ) logfire.configure(token=settings.write_token, sampling=sampling_options) # pyright: ignore return logfire
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_internal/observability.py", "license": "Apache License 2.0", "lines": 77, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
PrefectHQ/prefect:src/prefect/_internal/testing.py
"""Testing utilities for internal use.""" import asyncio from typing import Any, AsyncIterator from typing_extensions import Self class AssertionRetryAttempt: """Context manager for capturing exceptions during retry attempts.""" def __init__(self, attempt_number: int): self.attempt_number = attempt_number self.exception: Exception | None = None def __enter__(self) -> Self: return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: Any, ) -> bool: if exc_val is not None: self.exception = exc_val # type: ignore return exc_type is AssertionError async def retry_asserts( max_attempts: int = 3, delay: float = 1.0, ) -> AsyncIterator[AssertionRetryAttempt]: """ Async generator that retries a block of assertions until it succeeds or max attempts is reached. Useful for testing eventual consistency scenarios where changes may not propagate immediately. Args: max_attempts: Maximum number of attempts before raising the exception. delay: Time in seconds to wait between retry attempts. Yields: A context manager that captures exceptions during each attempt. Raises: The last exception raised within the block if all attempts fail. Example: ```python async for attempt in retry_asserts(max_attempts=3): with attempt: for deployment in deployments: await session.refresh(deployment) assert deployment.status == DeploymentStatus.READY ``` """ for attempt_number in range(1, max_attempts + 1): attempt = AssertionRetryAttempt(attempt_number) yield attempt if attempt.exception is None: return # Success, exit early if attempt_number == max_attempts: raise attempt.exception await asyncio.sleep(delay)
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_internal/testing.py", "license": "Apache License 2.0", "lines": 52, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
PrefectHQ/prefect:src/prefect/server/api/background_workers.py
import asyncio from contextlib import asynccontextmanager from logging import Logger from typing import Any, AsyncGenerator, Callable from docket import Docket, Worker from prefect.logging import get_logger from prefect.server.api.flow_runs import delete_flow_run_logs from prefect.server.api.task_runs import delete_task_run_logs from prefect.server.events.services import triggers as _triggers_module # noqa: F401 from prefect.server.models.deployments import mark_deployments_ready from prefect.server.models.work_queues import mark_work_queues_ready from prefect.server.services.cancellation_cleanup import ( cancel_child_task_runs, cancel_subflow_run, ) from prefect.server.services.db_vacuum import ( vacuum_events_with_retention_overrides, vacuum_old_events, vacuum_old_flow_runs, vacuum_orphaned_artifacts, vacuum_orphaned_logs, vacuum_stale_artifact_collections, ) from prefect.server.services.late_runs import mark_flow_run_late from prefect.server.services.pause_expirations import fail_expired_pause from prefect.server.services.perpetual_services import ( register_and_schedule_perpetual_services, ) from prefect.server.services.repossessor import revoke_expired_lease logger: Logger = get_logger(__name__) # Task functions to register with docket for background processing task_functions: list[Callable[..., Any]] = [ # Simple background tasks (from Alex's PR #19377) mark_work_queues_ready, mark_deployments_ready, delete_task_run_logs, delete_flow_run_logs, # Find-and-flood pattern tasks used by perpetual services cancel_child_task_runs, cancel_subflow_run, fail_expired_pause, mark_flow_run_late, revoke_expired_lease, vacuum_orphaned_logs, vacuum_orphaned_artifacts, vacuum_stale_artifact_collections, vacuum_old_flow_runs, vacuum_events_with_retention_overrides, vacuum_old_events, ] @asynccontextmanager async def background_worker( docket: Docket, ephemeral: bool = False, webserver_only: bool = False, ) -> AsyncGenerator[None, None]: worker_task: asyncio.Task[None] | None = None async with Worker(docket) as worker: # Register background task functions docket.register_collection( "prefect.server.api.background_workers:task_functions" ) # Register and schedule enabled perpetual services await register_and_schedule_perpetual_services( docket, ephemeral=ephemeral, webserver_only=webserver_only ) try: worker_task = asyncio.create_task(worker.run_forever()) yield finally: if worker_task: worker_task.cancel() try: await worker_task except asyncio.CancelledError: pass
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/server/api/background_workers.py", "license": "Apache License 2.0", "lines": 76, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
PrefectHQ/prefect:src/prefect/settings/models/server/docket.py
from typing import ClassVar from pydantic import Field from pydantic_settings import SettingsConfigDict from prefect.settings.base import PrefectBaseSettings, build_settings_config class ServerDocketSettings(PrefectBaseSettings): """ Settings for controlling Docket behavior """ model_config: ClassVar[SettingsConfigDict] = build_settings_config( ("server", "docket") ) name: str = Field( default="prefect-server", description="The name of the Docket instance.", ) url: str = Field( default="memory://", description="The URL of the Redis server to use for Docket.", )
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/settings/models/server/docket.py", "license": "Apache License 2.0", "lines": 19, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
PrefectHQ/prefect:src/prefect/_internal/lazy.py
"""Utilities for lazy-loading module-level objects.""" from typing import Callable, Generic, TypeVar K = TypeVar("K") V = TypeVar("V") class LazyDict(Generic[K, V]): """ A dictionary-like object that defers loading its contents until first access. Useful for module-level registries that import heavy dependencies. The loader function is called once on first access, and the result is cached. Example: >>> def load_plugins() -> dict[str, type]: ... from heavy_module import PluginA, PluginB ... return {"a": PluginA, "b": PluginB} >>> >>> plugins: LazyDict[str, type] = LazyDict(load_plugins) >>> # Nothing loaded yet >>> plugins.get("a") # Triggers load, returns PluginA >>> plugins.get("b") # Uses cached result, returns PluginB """ def __init__(self, loader_func: Callable[[], dict[K, V]]) -> None: self._loader = loader_func self._cache: dict[K, V] | None = None def _ensure_loaded(self) -> dict[K, V]: """Load the dictionary contents if not already loaded.""" if self._cache is None: self._cache = self._loader() return self._cache def get(self, key: K, default: V | None = None) -> V | None: """Get an item from the lazy dict, loading if necessary.""" return self._ensure_loaded().get(key, default) def __contains__(self, key: K) -> bool: """Check if key is in the lazy dict, loading if necessary.""" return key in self._ensure_loaded() def __getitem__(self, key: K) -> V: """Get an item from the lazy dict, loading if necessary.""" return self._ensure_loaded()[key]
{ "repo_id": "PrefectHQ/prefect", "file_path": "src/prefect/_internal/lazy.py", "license": "Apache License 2.0", "lines": 36, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
PrefectHQ/prefect:tests/_internal/test_lazy.py
import pytest from prefect._internal.lazy import LazyDict def test_lazy_dict_defers_loading(): """LazyDict should not call loader function on initialization.""" call_count = 0 def loader() -> dict[str, int]: nonlocal call_count call_count += 1 return {"a": 1, "b": 2} _lazy = LazyDict(loader) assert call_count == 0 def test_lazy_dict_loads_once_and_caches(): """LazyDict should call loader once on first access and cache result.""" call_count = 0 def loader() -> dict[str, int]: nonlocal call_count call_count += 1 return {"a": 1, "b": 2} lazy = LazyDict(loader) # Multiple accesses using different methods assert lazy.get("a") == 1 assert "b" in lazy assert lazy["b"] == 2 # Loader should only be called once assert call_count == 1 @pytest.mark.parametrize( "key,default,expected", [ ("a", None, 1), ("missing", None, None), ("missing", 42, 42), ], ) def test_lazy_dict_get_with_default(key, default, expected): """LazyDict.get() should support default values.""" lazy = LazyDict(lambda: {"a": 1}) assert lazy.get(key, default) == expected def test_lazy_dict_getitem_raises_keyerror(): """LazyDict should raise KeyError for missing keys with bracket notation.""" lazy = LazyDict(lambda: {"a": 1}) with pytest.raises(KeyError): _ = lazy["missing"]
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_internal/test_lazy.py", "license": "Apache License 2.0", "lines": 42, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
PrefectHQ/prefect:tests/_internal/schemas/test_v2_schema.py
import inspect from pydantic import Field from pydantic_core import PydanticUndefined from prefect._internal.pydantic.v2_schema import process_v2_params class TestProcessV2Params: """Test the process_v2_params functions with and without FieldInfo.""" def test_process_v2_params_with_existing_fieldinfo(self): """Test parameter processing with an existing FieldInfo object as default.""" existing_field = Field( default="default_name", description="Existing field description", json_schema_extra={"position": 99}, ) param = inspect.Parameter( "name", inspect.Parameter.POSITIONAL_OR_KEYWORD, default=existing_field, annotation=str, ) name, type_, field = process_v2_params( param, position=0, docstrings={"name": "Docstring description"}, aliases={} ) assert name == "name" assert type_ is str assert field.default == "default_name" assert field.title == "name" assert field.description == "Existing field description" assert field.json_schema_extra == {"position": 99} def test_process_v2_params_with_existing_fieldinfo_no_description(self): """Test parameter processing with existing FieldInfo that has no description.""" existing_field = Field( default="default_name", description=None, json_schema_extra={"position": 99} ) param = inspect.Parameter( "name", inspect.Parameter.POSITIONAL_OR_KEYWORD, default=existing_field, annotation=str, ) name, type_, field = process_v2_params( param, position=0, docstrings={"name": "Docstring description"}, aliases={} ) assert name == "name" assert field.description == "Docstring description" def test_process_v2_params_with_existing_fieldinfo_empty_description(self): """Test parameter processing with existing FieldInfo that has empty description.""" existing_field = Field( default="default_name", description="", json_schema_extra={"position": 99} ) param = inspect.Parameter( "name", inspect.Parameter.POSITIONAL_OR_KEYWORD, default=existing_field, annotation=str, ) name, type_, field = process_v2_params( param, position=0, docstrings={"name": "Docstring description"}, aliases={} ) assert name == "name" assert field.description == "Docstring description" def test_process_v2_params_with_existing_fieldinfo_required(self): """Test parameter processing with existing FieldInfo for a required field.""" existing_field = Field(description="Required field with existing info") param = inspect.Parameter( "name", inspect.Parameter.POSITIONAL_OR_KEYWORD, default=existing_field, annotation=str, ) name, type_, field = process_v2_params( param, position=0, docstrings={}, aliases={} ) assert name == "name" assert field.default is PydanticUndefined # Required field assert field.description == "Required field with existing info" def test_process_v2_params_with_existing_fieldinfo_no_json_schema_extra(self): """Test parameter processing with existing FieldInfo that has no json_schema_extra.""" existing_field = Field( default="default_value", description="Field without json_schema_extra" ) param = inspect.Parameter( "name", inspect.Parameter.POSITIONAL_OR_KEYWORD, default=existing_field, annotation=str, ) name, type_, field = process_v2_params( param, position=5, docstrings={}, aliases={} ) assert name == "name" assert field.json_schema_extra == {"position": 5}
{ "repo_id": "PrefectHQ/prefect", "file_path": "tests/_internal/schemas/test_v2_schema.py", "license": "Apache License 2.0", "lines": 91, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test