sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
PrefectHQ/prefect:src/prefect/settings/models/_defaults.py | from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any
from pydantic import SecretStr, ValidationInfo
if TYPE_CHECKING:
from prefect.settings.models.root import Settings
def default_profiles_path(values: dict[str, Any]) -> Path:
"""Default profiles_path based on home directory."""
home = values.get("home", Path("~/.prefect").expanduser())
if not isinstance(home, Path):
home = Path("~/.prefect").expanduser()
return home / "profiles.toml"
def substitute_home_template(v: Any, info: ValidationInfo) -> Any:
"""Validator that substitutes $PREFECT_HOME in a path string if present."""
home_path = info.data.get("home")
path_str: str | None = None
if isinstance(v, Path):
return v
elif isinstance(v, str):
path_str = v
elif v is None:
return None
else:
return v
if path_str and "$PREFECT_HOME" in path_str:
if home_path and isinstance(home_path, Path):
resolved_str = path_str.replace("$PREFECT_HOME", str(home_path))
try:
return Path(resolved_str)
except Exception as e:
raise ValueError(
f"Error creating path after substituting $PREFECT_HOME: {e}"
) from e
else:
raise ValueError(
f'Cannot resolve $PREFECT_HOME in "{path_str}" because '
f"PREFECT_HOME setting ({home_path!r}) is not a valid resolved path."
)
return path_str
def default_local_storage_path(values: dict[str, Any]) -> Path:
"""Default local_storage_path based on home directory."""
home = values.get("home")
if not isinstance(home, Path):
home = Path("~/.prefect").expanduser()
return home / "storage"
def default_memo_store_path(values: dict[str, Any]) -> Path:
"""Default memo_store_path based on home directory."""
home = values.get("home")
if not isinstance(home, Path):
home = Path("~/.prefect").expanduser()
return home / "memo_store.toml"
def default_logging_config_path(values: dict[str, Any]) -> Path:
"""Default logging_config_path based on home directory."""
home = values.get("home")
if not isinstance(home, Path):
home = Path("~/.prefect").expanduser()
return home / "logging.yml"
def default_database_connection_url(settings: "Settings") -> SecretStr:
value: str = f"sqlite+aiosqlite:///{settings.home}/prefect.db"
if settings.server.database.driver == "postgresql+asyncpg":
required = [
"host",
"user",
"name",
]
missing = [
attr for attr in required if getattr(settings.server.database, attr) is None
]
if missing:
raise ValueError(
f"Missing required database connection settings: {', '.join(missing)}"
)
from sqlalchemy import URL
value = URL(
drivername=settings.server.database.driver,
host=settings.server.database.host,
port=settings.server.database.port or 5432,
username=settings.server.database.user,
password=(
settings.server.database.password.get_secret_value()
if settings.server.database.password
else None
),
database=settings.server.database.name,
query=[], # type: ignore
).render_as_string(hide_password=False)
elif settings.server.database.driver == "sqlite+aiosqlite":
if settings.server.database.name:
value = (
f"{settings.server.database.driver}:///{settings.server.database.name}"
)
else:
value = f"sqlite+aiosqlite:///{settings.home}/prefect.db"
elif settings.server.database.driver:
raise ValueError(
f"Unsupported database driver: {settings.server.database.driver}"
)
return SecretStr(value)
def default_ui_url(settings: "Settings") -> str | None:
value = settings.ui_url
if value is not None:
return value
# Otherwise, infer a value from the API URL
ui_url = api_url = settings.api.url
if not api_url:
return None
assert ui_url is not None
cloud_url = settings.cloud.api_url
cloud_ui_url = settings.cloud.ui_url
if api_url.startswith(cloud_url) and cloud_ui_url:
ui_url = ui_url.replace(cloud_url, cloud_ui_url)
if ui_url.endswith("/api"):
# Handles open-source APIs
ui_url = ui_url[:-4]
# Handles Cloud APIs with content after `/api`
ui_url = ui_url.replace("/api/", "/")
# Update routing
ui_url = ui_url.replace("/accounts/", "/account/")
ui_url = ui_url.replace("/workspaces/", "/workspace/")
return ui_url
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/settings/models/_defaults.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:tests/assets/test_materializing_tasks.py | from prefect.assets import Asset, materialize
from prefect.cache_policies import DEFAULT
class TestMaterializingTask:
def test_with_options_assets_parameter_keeps_existing(self):
@materialize("storage://original/asset.csv", persist_result=True)
def initial_task():
pass
task_with_options = initial_task.with_options(persist_result=False)
assert task_with_options.assets == [Asset(key="storage://original/asset.csv")]
assert not task_with_options.persist_result
def test_with_options_assets_takes_precedence_over_existing(self):
@materialize("storage://foo/bar/asset.csv", persist_result=False)
def initial_task():
pass
task_with_options = initial_task.with_options(
assets=["storage://foo/baz/asset.csv"]
)
assert task_with_options.assets == [Asset(key="storage://foo/baz/asset.csv")]
assert not task_with_options.persist_result
def test_with_options_assets_allows_both(self):
@materialize("storage://foo/bar/asset.csv", persist_result=False)
def initial_task():
pass
task_with_options = initial_task.with_options(
assets=["storage://foo/baz/asset.csv"], persist_result=True
)
assert task_with_options.assets == [Asset(key="storage://foo/baz/asset.csv")]
assert task_with_options.persist_result
def test_with_options_preserves_user_provided_persist_result_and_cache_policy(self):
@materialize("storage://original/asset.csv")
def initial_task():
pass
assert initial_task.cache_policy == DEFAULT
assert initial_task.persist_result is None
assert initial_task._user_persist_result is None
task_with_options = initial_task.with_options(name="something")
assert task_with_options.cache_policy == DEFAULT
assert task_with_options.persist_result is None
assert task_with_options._user_persist_result is None
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/assets/test_materializing_tasks.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-dbt/prefect_dbt/core/_tracker.py | """
State for managing tasks across callbacks.
"""
import threading
from typing import Any, Optional, Union
from uuid import UUID
from prefect.client.schemas.objects import Flow, State
from prefect.context import hydrated_context
from prefect.logging.loggers import PrefectLogAdapter, get_logger
from prefect.task_engine import run_task_sync
from prefect.tasks import Task
class NodeTaskTracker:
"""
Maintains the state, upstream dependencies, logger, and results
for all the tasks called by the execution of each node in the dbt project.
"""
def __init__(self):
self._tasks: dict[str, Task[Any, Any]] = {}
self._task_results: dict[str, Any] = {}
self._node_status: dict[str, dict[str, Any]] = {}
self._node_complete: dict[str, bool] = {}
self._node_dependencies: dict[str, list[str]] = {}
self._node_events: dict[str, threading.Event] = {}
self._task_run_ids: dict[str, UUID] = {}
self._task_run_names: dict[str, str] = {}
def start_task(self, node_id: str, task: Task[Any, Any]) -> None:
"""Start a task for a node."""
self._tasks[node_id] = task
self._node_complete[node_id] = False
self._node_events[node_id] = threading.Event()
def get_task_logger(
self,
node_id: str,
flow_run: Optional[dict[str, Any]] = None,
flow: Optional[Flow] = None,
**kwargs: Any,
) -> PrefectLogAdapter:
"""Get the logger for a task."""
logger = PrefectLogAdapter(
get_logger("prefect.task_runs"),
extra={
**{
"task_run_id": self.get_task_run_id(node_id),
"flow_run_id": str(flow_run.get("id")) if flow_run else "<unknown>",
"task_run_name": self.get_task_run_name(node_id),
"task_name": "execute_dbt_node",
"flow_run_name": flow_run.get("name") if flow_run else "<unknown>",
"flow_name": flow.name if flow else "<unknown>",
},
**kwargs,
},
)
return logger
def set_node_status(
self, node_id: str, event_data: dict[str, Any], event_message: str
) -> None:
"""Set the status for a node."""
self._node_status[node_id] = {
"event_data": event_data,
"event_message": event_message,
}
# Mark node as complete when status is set
self._node_complete[node_id] = True
# Signal the event to wake up any waiting threads
if node_id in self._node_events:
self._node_events[node_id].set()
def get_node_status(self, node_id: str) -> Union[dict[str, Any], None]:
"""Get the status for a node."""
return self._node_status.get(node_id)
def is_node_complete(self, node_id: str) -> bool:
"""Check if a node is complete."""
return self._node_complete.get(node_id, False)
def wait_for_node_completion(
self, node_id: str, timeout: Union[float, None] = None
) -> bool:
"""Wait for a node to complete using threading.Event.
Args:
node_id: The ID of the node to wait for
timeout: Maximum time to wait in seconds. None means wait indefinitely.
Returns:
True if the node completed, False if timeout occurred
"""
if node_id not in self._node_events:
# If no event exists, the node might already be complete
return self.is_node_complete(node_id)
return self._node_events[node_id].wait(timeout=timeout)
def set_task_result(self, node_id: str, result: Any) -> None:
"""Set the result for a task."""
self._task_results[node_id] = result
def get_task_result(self, node_id: str) -> Union[Any, None]:
"""Get the result for a task."""
return self._task_results.get(node_id)
def set_node_dependencies(self, node_id: str, dependencies: list[str]) -> None:
"""Set the dependencies for a node."""
self._node_dependencies[node_id] = dependencies
def get_node_dependencies(self, node_id: str) -> list[str]:
"""Get the dependencies for a node."""
return self._node_dependencies.get(node_id, [])
def set_task_run_id(self, node_id: str, task_run_id: UUID) -> None:
"""Set the task run ID for a node."""
self._task_run_ids[node_id] = task_run_id
def get_task_run_id(self, node_id: str) -> Union[UUID, None]:
"""Get the task run ID for a node."""
return self._task_run_ids.get(node_id)
def set_task_run_name(self, node_id: str, task_run_name: str) -> None:
"""Set the task run name for a node."""
self._task_run_names[node_id] = task_run_name
def get_task_run_name(self, node_id: str) -> Union[str, None]:
"""Get the task run name for a node."""
return self._task_run_names.get(node_id)
def run_task_in_thread(
self,
node_id: str,
task: Task[Any, Any],
task_run_id: UUID,
parameters: dict[str, Any],
context: dict[str, Any],
) -> None:
"""Run a task in a separate thread."""
def run_task():
with hydrated_context(context):
states: list[State] = []
dependencies = self.get_node_dependencies(node_id)
for dep_id in dependencies:
state = self.get_task_result(dep_id)
if state:
states.append(state)
state = run_task_sync(
task,
task_run_id=task_run_id,
parameters=parameters,
wait_for=states,
context=context,
return_type="state",
)
# Wait for the task to complete
if state:
self.set_task_result(node_id, state)
else:
self.set_task_result(node_id, None)
thread = threading.Thread(target=run_task)
thread.daemon = True
thread.start()
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/prefect_dbt/core/_tracker.py",
"license": "Apache License 2.0",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/integrations/prefect-dbt/tests/core/test_tracker.py | """
Tests for NodeTaskTracker class and related functionality.
"""
import threading
import time
from typing import Any, Dict
from unittest.mock import Mock
from uuid import UUID
import pytest
from prefect_dbt.core._tracker import NodeTaskTracker
from prefect.client.schemas.objects import Flow, State
from prefect.logging.loggers import PrefectLogAdapter
from prefect.tasks import Task
@pytest.fixture
def mock_task():
"""Create a mock Task instance."""
task = Mock(spec=Task)
task.name = "test_task"
return task
@pytest.fixture
def mock_flow():
"""Create a mock Flow instance."""
flow = Mock(spec=Flow)
flow.name = "test_flow"
return flow
@pytest.fixture
def mock_state():
"""Create a mock State instance."""
state = Mock(spec=State)
state.id = "test-state-id"
return state
@pytest.fixture
def sample_node_id():
"""Sample node ID for testing."""
return "model.test_project.test_model"
@pytest.fixture
def sample_task_run_id():
"""Sample task run ID for testing."""
return UUID("12345678-1234-5678-9abc-123456789abc")
@pytest.fixture
def sample_event_data():
"""Sample event data for testing."""
return {
"node_info": {
"unique_id": "model.test_project.test_model",
"node_status": "success",
},
"status": "success",
}
@pytest.fixture
def sample_flow_run_context():
"""Sample flow run context for testing."""
return {
"id": "test-flow-run-id",
"name": "test_flow_run",
}
class TestNodeTaskTrackerInitialization:
"""Test NodeTaskTracker initialization and basic functionality."""
def test_initializes_with_empty_state(self):
"""Test that tracker initializes with empty internal state."""
tracker = NodeTaskTracker()
# Verify all internal collections are empty by testing public methods
assert tracker.get_task_result("unknown") is None
assert tracker.get_node_status("unknown") is None
assert tracker.is_node_complete("unknown") is False
assert tracker.get_node_dependencies("unknown") == []
assert tracker.get_task_run_id("unknown") is None
assert tracker.get_task_run_name("unknown") is None
def test_start_task_registers_task_and_creates_event(
self, mock_task: Mock, sample_node_id: str
):
"""Test that start_task properly registers a task and creates completion event."""
tracker = NodeTaskTracker()
tracker.start_task(sample_node_id, mock_task)
# Verify node is marked as incomplete initially
assert tracker.is_node_complete(sample_node_id) is False
# Verify we can wait for completion (indicates event was created)
# Start a thread to complete the node
def complete_node():
time.sleep(0.01)
tracker.set_node_status(sample_node_id, {}, "completed")
thread = threading.Thread(target=complete_node)
thread.daemon = True
thread.start()
# Wait for completion
result = tracker.wait_for_node_completion(sample_node_id, timeout=0.1)
assert result is True
class TestNodeTaskTrackerStatusManagement:
"""Test node status management functionality."""
def test_set_node_status_stores_status_and_marks_complete(
self, sample_node_id: str, sample_event_data: Dict[str, Any]
):
"""Test that set_node_status stores status and marks node as complete."""
tracker = NodeTaskTracker()
event_message = "Node completed successfully"
tracker.set_node_status(sample_node_id, sample_event_data, event_message)
# Verify status is stored
stored_status = tracker.get_node_status(sample_node_id)
assert stored_status is not None
assert stored_status["event_data"] == sample_event_data
assert stored_status["event_message"] == event_message
# Verify node is marked as complete
assert tracker.is_node_complete(sample_node_id) is True
def test_set_node_status_signals_completion_event(
self, sample_node_id, sample_event_data
):
"""Test that set_node_status signals the completion event."""
tracker = NodeTaskTracker()
tracker.start_task(sample_node_id, Mock(spec=Task))
# Verify event is not set initially
assert not tracker._node_events[sample_node_id].is_set()
# Set status
tracker.set_node_status(sample_node_id, sample_event_data, "completed")
# Verify event is now set
assert tracker._node_events[sample_node_id].is_set()
class TestNodeTaskTrackerCompletionWaiting:
"""Test node completion waiting functionality."""
@pytest.mark.parametrize(
"timeout,expected_result",
[
(1.0, True),
(None, True),
],
)
def test_wait_for_node_completion_when_complete(
self, sample_node_id, sample_event_data, timeout, expected_result
):
"""Test that wait_for_node_completion returns True when node is already complete."""
tracker = NodeTaskTracker()
tracker.set_node_status(sample_node_id, sample_event_data, "completed")
result = tracker.wait_for_node_completion(sample_node_id, timeout=timeout)
assert result == expected_result
def test_wait_for_node_completion_waits_for_completion(
self, sample_node_id, sample_event_data
):
"""Test that wait_for_node_completion waits for node to complete."""
tracker = NodeTaskTracker()
tracker.start_task(sample_node_id, Mock(spec=Task))
# Start a thread that will complete the node after a delay
def complete_node():
time.sleep(0.05)
tracker.set_node_status(sample_node_id, sample_event_data, "completed")
thread = threading.Thread(target=complete_node)
thread.daemon = True
thread.start()
# Wait for completion
result = tracker.wait_for_node_completion(sample_node_id, timeout=1.0)
assert result is True
assert tracker.is_node_complete(sample_node_id) is True
class TestNodeTaskTrackerTaskResults:
"""Test task result management functionality."""
@pytest.mark.parametrize(
"result_value",
[
Mock(spec=State),
None,
],
)
def test_task_result_set_and_get(self, sample_node_id, result_value):
"""Test that set_task_result stores and get_task_result retrieves the result."""
tracker = NodeTaskTracker()
# Set result
tracker.set_task_result(sample_node_id, result_value)
# Get result
retrieved_result = tracker.get_task_result(sample_node_id)
# Verify result matches
assert retrieved_result == result_value
assert tracker._task_results[sample_node_id] == result_value
class TestNodeTaskTrackerDependencies:
"""Test dependency management functionality."""
@pytest.mark.parametrize(
"dependencies",
[
["dep1", "dep2", "dep3"],
[],
],
)
def test_node_dependencies_set_and_get(self, sample_node_id, dependencies):
"""Test that set_node_dependencies stores and get_node_dependencies retrieves dependencies."""
tracker = NodeTaskTracker()
# Set dependencies
tracker.set_node_dependencies(sample_node_id, dependencies)
# Get dependencies
retrieved_dependencies = tracker.get_node_dependencies(sample_node_id)
# Verify dependencies match
assert retrieved_dependencies == dependencies
assert tracker._node_dependencies[sample_node_id] == dependencies
class TestNodeTaskTrackerTaskRunIds:
"""Test task run ID management functionality."""
def test_task_run_id_set_and_get(self, sample_node_id, sample_task_run_id):
"""Test that set_task_run_id stores and get_task_run_id retrieves the task run ID."""
tracker = NodeTaskTracker()
# Set task run ID
tracker.set_task_run_id(sample_node_id, sample_task_run_id)
# Get task run ID
retrieved_id = tracker.get_task_run_id(sample_node_id)
# Verify ID matches
assert retrieved_id == sample_task_run_id
assert tracker._task_run_ids[sample_node_id] == sample_task_run_id
class TestNodeTaskTrackerTaskRunNames:
"""Test task run name management functionality."""
def test_task_run_name_set_and_get(self, sample_node_id):
"""Test that set_task_run_name stores and get_task_run_name retrieves the task run name."""
tracker = NodeTaskTracker()
task_run_name = "test_task_run"
# Set task run name
tracker.set_task_run_name(sample_node_id, task_run_name)
# Get task run name
retrieved_name = tracker.get_task_run_name(sample_node_id)
# Verify name matches
assert retrieved_name == task_run_name
assert tracker._task_run_names[sample_node_id] == task_run_name
class TestNodeTaskTrackerLogging:
"""Test logging functionality."""
@pytest.mark.parametrize(
"flow_context",
[
(None, None),
({"id": "test-id", "name": "test_name"}, None),
(None, Mock(spec=Flow)),
({"id": "test-flow-run-id", "name": "test_flow_run"}, Mock(spec=Flow)),
],
)
def test_get_task_logger_with_various_contexts(
self, sample_node_id, sample_task_run_id, flow_context
):
"""Test that get_task_logger works with various flow context combinations."""
tracker = NodeTaskTracker()
tracker.set_task_run_id(sample_node_id, sample_task_run_id)
tracker.set_task_run_name(sample_node_id, "test_task_run")
flow_run, flow = flow_context
# Configure mock flow if present
if flow is not None:
flow.name = "test_flow"
logger = tracker.get_task_logger(
sample_node_id,
flow_run=flow_run,
flow=flow,
)
assert isinstance(logger, PrefectLogAdapter)
assert logger.extra["task_run_id"] == sample_task_run_id
assert logger.extra["task_run_name"] == "test_task_run"
assert logger.extra["task_name"] == "execute_dbt_node"
# Verify flow context
if flow_run:
assert logger.extra["flow_run_id"] == flow_run["id"]
assert logger.extra["flow_run_name"] == flow_run["name"]
else:
assert logger.extra["flow_run_id"] == "<unknown>"
assert logger.extra["flow_run_name"] == "<unknown>"
if flow:
assert logger.extra["flow_name"] == flow.name
else:
assert logger.extra["flow_name"] == "<unknown>"
def test_get_task_logger_with_additional_kwargs(
self, sample_node_id, sample_task_run_id
):
"""Test that get_task_logger includes additional kwargs in extra data."""
tracker = NodeTaskTracker()
tracker.set_task_run_id(sample_node_id, sample_task_run_id)
logger = tracker.get_task_logger(
sample_node_id,
custom_key="custom_value",
another_key=123,
)
assert logger.extra["custom_key"] == "custom_value"
assert logger.extra["another_key"] == 123
def test_get_task_logger_without_task_run_id(self, sample_node_id):
"""Test that get_task_logger works without task run ID."""
tracker = NodeTaskTracker()
logger = tracker.get_task_logger(sample_node_id)
assert logger.extra["task_run_id"] is None
assert logger.extra["task_run_name"] is None
class TestNodeTaskTrackerThreadExecution:
"""Test thread execution functionality."""
@pytest.fixture
def mock_thread_execution_setup(self, monkeypatch):
"""Set up common mocking for thread execution tests."""
# Mock run_task_sync
mock_run_task = Mock(return_value=Mock(spec=State))
monkeypatch.setattr("prefect_dbt.core._tracker.run_task_sync", mock_run_task)
# Mock hydrated_context
mock_context_manager = Mock()
mock_context_manager.__enter__ = Mock()
mock_context_manager.__exit__ = Mock()
monkeypatch.setattr(
"prefect_dbt.core._tracker.hydrated_context",
Mock(return_value=mock_context_manager),
)
return mock_run_task
@pytest.mark.parametrize(
"return_value,expected_result",
[
(Mock(spec=State), Mock(spec=State)),
(None, None),
],
)
def test_run_task_in_thread_stores_result(
self,
sample_node_id,
mock_task,
sample_task_run_id,
mock_thread_execution_setup,
return_value,
expected_result,
):
"""Test that run_task_in_thread stores the correct result."""
tracker = NodeTaskTracker()
parameters = {"param": "value"}
context = {"context": "data"}
# Configure mock to return specified value
mock_thread_execution_setup.return_value = return_value
tracker.run_task_in_thread(
sample_node_id, mock_task, sample_task_run_id, parameters, context
)
# Wait for thread to complete
time.sleep(0.2)
# Verify result was stored
result = tracker.get_task_result(sample_node_id)
if return_value is not None:
# For mock objects, just verify it's a mock with the same spec
assert isinstance(result, Mock)
assert result._spec_class == return_value._spec_class
else:
assert result == expected_result
@pytest.mark.parametrize(
"dependencies_setup,expected_wait_count",
[
({"dep1": Mock(spec=State), "dep2": Mock(spec=State)}, 2),
({"dep1": Mock(spec=State)}, 1),
({}, 0),
],
)
def test_run_task_in_thread_with_dependencies(
self,
sample_node_id,
mock_task,
sample_task_run_id,
mock_state,
mock_thread_execution_setup,
dependencies_setup,
expected_wait_count,
):
"""Test that run_task_in_thread handles dependencies correctly."""
tracker = NodeTaskTracker()
parameters = {"param": "value"}
context = {"context": "data"}
# Set up dependencies
dependencies = list(dependencies_setup.keys())
tracker.set_node_dependencies(sample_node_id, dependencies)
# Set up dependency results
for dep_id, result in dependencies_setup.items():
tracker.set_task_result(dep_id, result)
# Configure mock
mock_thread_execution_setup.return_value = mock_state
tracker.run_task_in_thread(
sample_node_id, mock_task, sample_task_run_id, parameters, context
)
# Wait for thread to complete
time.sleep(0.2)
# Verify run_task_sync was called with correct dependencies
mock_thread_execution_setup.assert_called_once()
call_args = mock_thread_execution_setup.call_args
assert len(call_args.kwargs["wait_for"]) == expected_wait_count
def test_run_task_in_thread_starts_daemon_thread(
self, sample_node_id, mock_task, sample_task_run_id, mock_thread_execution_setup
):
"""Test that run_task_in_thread starts a daemon thread."""
tracker = NodeTaskTracker()
parameters = {"param": "value"}
context = {"context": "data"}
tracker.run_task_in_thread(
sample_node_id, mock_task, sample_task_run_id, parameters, context
)
# Wait for thread to start and potentially complete
time.sleep(0.1)
# Verify run_task_sync was called
mock_thread_execution_setup.assert_called_once()
class TestNodeTaskTrackerIntegration:
"""Test integration scenarios and complex workflows."""
def test_comprehensive_workflow_with_multiple_nodes(self):
"""Test a comprehensive workflow with multiple nodes and dependencies."""
tracker = NodeTaskTracker()
# Set up multiple nodes
nodes = ["model_1", "model_2", "model_3"]
mock_tasks = {node: Mock(spec=Task) for node in nodes}
# Start all nodes
for node in nodes:
tracker.start_task(node, mock_tasks[node])
# Set up dependencies: model_3 depends on model_1 and model_2
tracker.set_node_dependencies("model_3", ["model_1", "model_2"])
# Complete model_1 and model_2
tracker.set_node_status("model_1", {"status": "success"}, "Model 1 complete")
tracker.set_node_status("model_2", {"status": "success"}, "Model 2 complete")
# Set task results for dependencies
tracker.set_task_result("model_1", Mock(spec=State))
tracker.set_task_result("model_2", Mock(spec=State))
# Verify all nodes are in expected states
assert tracker.is_node_complete("model_1") is True
assert tracker.is_node_complete("model_2") is True
assert tracker.is_node_complete("model_3") is False
# Verify dependencies are correctly stored
deps = tracker.get_node_dependencies("model_3")
assert deps == ["model_1", "model_2"]
# Verify loggers are available
assert tracker.get_task_logger("model_1") is not None
assert tracker.get_task_logger("model_2") is not None
assert tracker.get_task_logger("model_3") is not None
def test_concurrent_node_completion(self):
"""Test that multiple nodes can complete concurrently."""
tracker = NodeTaskTracker()
nodes = ["node_1", "node_2", "node_3"]
# Start all nodes
for node in nodes:
tracker.start_task(node, Mock(spec=Task))
# Complete nodes concurrently
def complete_node(node_id: str, delay: float):
time.sleep(delay)
tracker.set_node_status(
node_id, {"status": "success"}, f"{node_id} complete"
)
threads = []
for i, node in enumerate(nodes):
thread = threading.Thread(target=complete_node, args=(node, i * 0.05))
thread.daemon = True
threads.append(thread)
thread.start()
# Wait for all nodes to complete
for node in nodes:
assert tracker.wait_for_node_completion(node, timeout=1.0) is True
assert tracker.is_node_complete(node) is True
# Wait for all threads to finish
for thread in threads:
thread.join(timeout=1.0)
def test_node_lifecycle_with_task_execution(
self, sample_node_id, mock_task, sample_task_run_id, mock_state, monkeypatch
):
"""Test complete node lifecycle including task execution."""
tracker = NodeTaskTracker()
# Mock run_task_sync
monkeypatch.setattr(
"prefect_dbt.core._tracker.run_task_sync", Mock(return_value=mock_state)
)
# Mock hydrated_context
mock_context_manager = Mock()
mock_context_manager.__enter__ = Mock()
mock_context_manager.__exit__ = Mock()
monkeypatch.setattr(
"prefect_dbt.core._tracker.hydrated_context",
Mock(return_value=mock_context_manager),
)
# Start the node
tracker.start_task(sample_node_id, mock_task)
assert not tracker.is_node_complete(sample_node_id)
# Set up task run ID and name
tracker.set_task_run_id(sample_node_id, sample_task_run_id)
tracker.set_task_run_name(sample_node_id, "test_task_run")
# Run task in thread
parameters = {"param": "value"}
context = {"context": "data"}
tracker.run_task_in_thread(
sample_node_id, mock_task, sample_task_run_id, parameters, context
)
# Wait for task to complete
time.sleep(0.2)
# Verify task result was stored
result = tracker.get_task_result(sample_node_id)
assert result == mock_state
# Complete the node
tracker.set_node_status(sample_node_id, {"status": "success"}, "completed")
assert tracker.is_node_complete(sample_node_id) is True
# Verify logger has correct information
logger = tracker.get_task_logger(sample_node_id)
assert logger.extra["task_run_id"] == sample_task_run_id
assert logger.extra["task_run_name"] == "test_task_run"
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-dbt/tests/core/test_tracker.py",
"license": "Apache License 2.0",
"lines": 475,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/_internal/websockets.py | """
Internal WebSocket proxy utilities for Prefect client connections.
This module provides shared WebSocket proxy connection logic and SSL configuration
to avoid duplication between events and logs clients.
"""
import ssl
import warnings
from functools import wraps
from typing import Any, Optional
from urllib.parse import urlparse
import certifi
from websockets.asyncio.client import connect
from prefect.settings import get_current_settings
def create_ssl_context_for_websocket(uri: str) -> Optional[ssl.SSLContext]:
"""Create SSL context for WebSocket connections based on URI scheme."""
u = urlparse(uri)
if u.scheme != "wss":
return None
if get_current_settings().api.tls_insecure_skip_verify:
# Create an unverified context for insecure connections
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
return ctx
else:
# Create a verified context with the certificate file
cert_file = get_current_settings().api.ssl_cert_file
if not cert_file:
cert_file = certifi.where()
return ssl.create_default_context(cafile=cert_file)
@wraps(connect)
def websocket_connect(uri: str, **kwargs: Any) -> connect:
"""
Create a WebSocket connection with proxy and SSL support.
Proxy support is automatic via HTTP_PROXY/HTTPS_PROXY environment variables.
The websockets library handles proxy detection and connection automatically.
"""
# Configure SSL context for HTTPS connections
ssl_context = create_ssl_context_for_websocket(uri)
if ssl_context:
kwargs.setdefault("ssl", ssl_context)
# Add custom headers from settings
custom_headers = get_current_settings().client.custom_headers
if custom_headers:
# Get existing additional_headers or create new dict
additional_headers = kwargs.get("additional_headers", {})
if not isinstance(additional_headers, dict):
additional_headers = {}
for header_name, header_value in custom_headers.items():
# Check for protected headers that shouldn't be overridden
if header_name.lower() in {
"user-agent",
"sec-websocket-key",
"sec-websocket-version",
"sec-websocket-extensions",
"sec-websocket-protocol",
"connection",
"upgrade",
"host",
}:
warnings.warn(
f"Custom header '{header_name}' is ignored because it conflicts with "
f"a protected WebSocket header. Protected headers include: "
f"User-Agent, Sec-WebSocket-Key, Sec-WebSocket-Version, "
f"Sec-WebSocket-Extensions, Sec-WebSocket-Protocol, Connection, "
f"Upgrade, Host",
UserWarning,
stacklevel=2,
)
else:
additional_headers[header_name] = header_value
kwargs["additional_headers"] = additional_headers
return connect(uri, **kwargs)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/_internal/websockets.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/logging/clients.py | import asyncio
from datetime import timedelta
from types import TracebackType
from typing import (
TYPE_CHECKING,
Any,
MutableMapping,
Optional,
Tuple,
Type,
cast,
)
from uuid import UUID
import orjson
from cachetools import TTLCache
from prometheus_client import Counter
from typing_extensions import Self
from websockets import Subprotocol
from websockets.asyncio.client import ClientConnection
from websockets.exceptions import (
ConnectionClosed,
ConnectionClosedError,
ConnectionClosedOK,
)
from prefect._internal.websockets import (
create_ssl_context_for_websocket,
websocket_connect,
)
from prefect.client.schemas.objects import Log
from prefect.logging import get_logger
from prefect.settings import (
PREFECT_API_AUTH_STRING,
PREFECT_API_KEY,
PREFECT_API_URL,
PREFECT_CLOUD_API_URL,
PREFECT_SERVER_ALLOW_EPHEMERAL_MODE,
)
from prefect.types._datetime import now
if TYPE_CHECKING:
import logging
from prefect.client.schemas.filters import LogFilter
logger: "logging.Logger" = get_logger(__name__)
LOGS_OBSERVED = Counter(
"prefect_logs_observed",
"The number of logs observed by Prefect log subscribers",
labelnames=["client"],
)
LOG_WEBSOCKET_CONNECTIONS = Counter(
"prefect_log_websocket_connections",
(
"The number of times Prefect log clients have connected to a log stream, "
"broken down by direction (in/out) and connection (initial/reconnect)"
),
labelnames=["client", "direction", "connection"],
)
SEEN_LOGS_SIZE = 500_000
SEEN_LOGS_TTL = 120
def http_to_ws(url: str) -> str:
return url.replace("https://", "wss://").replace("http://", "ws://").rstrip("/")
def logs_out_socket_from_api_url(url: str) -> str:
return http_to_ws(url) + "/logs/out"
def _get_api_url_and_key(
api_url: Optional[str], api_key: Optional[str]
) -> Tuple[str, str]:
api_url = api_url or PREFECT_API_URL.value()
api_key = api_key or PREFECT_API_KEY.value()
if not api_url or not api_key:
raise ValueError(
"api_url and api_key must be provided or set in the Prefect configuration"
)
return api_url, api_key
def get_logs_subscriber(
filter: Optional["LogFilter"] = None,
reconnection_attempts: int = 10,
) -> "PrefectLogsSubscriber":
"""
Get a logs subscriber based on the current Prefect configuration.
Similar to get_events_subscriber, this automatically detects whether
you're using Prefect Cloud or OSS and returns the appropriate subscriber.
"""
api_url = PREFECT_API_URL.value()
if isinstance(api_url, str) and api_url.startswith(PREFECT_CLOUD_API_URL.value()):
return PrefectCloudLogsSubscriber(
filter=filter, reconnection_attempts=reconnection_attempts
)
elif api_url:
return PrefectLogsSubscriber(
api_url=api_url,
filter=filter,
reconnection_attempts=reconnection_attempts,
)
elif PREFECT_SERVER_ALLOW_EPHEMERAL_MODE:
from prefect.server.api.server import SubprocessASGIServer
server = SubprocessASGIServer()
server.start()
return PrefectLogsSubscriber(
api_url=server.api_url,
filter=filter,
reconnection_attempts=reconnection_attempts,
)
else:
raise ValueError(
"No Prefect API URL provided. Please set PREFECT_API_URL to the address of a running Prefect server."
)
class PrefectLogsSubscriber:
"""
Subscribes to a Prefect logs stream, yielding logs as they occur.
Example:
from prefect.logging.clients import PrefectLogsSubscriber
from prefect.client.schemas.filters import LogFilter, LogFilterLevel
import logging
filter = LogFilter(level=LogFilterLevel(ge_=logging.INFO))
async with PrefectLogsSubscriber(filter=filter) as subscriber:
async for log in subscriber:
print(log.timestamp, log.level, log.message)
"""
_websocket: Optional[ClientConnection]
_filter: "LogFilter"
_seen_logs: MutableMapping[UUID, bool]
_api_key: Optional[str]
_auth_token: Optional[str]
def __init__(
self,
api_url: Optional[str] = None,
filter: Optional["LogFilter"] = None,
reconnection_attempts: int = 10,
):
"""
Args:
api_url: The base URL for a Prefect workspace
filter: Log filter to apply
reconnection_attempts: When the client is disconnected, how many times
the client should attempt to reconnect
"""
self._api_key = None
self._auth_token = PREFECT_API_AUTH_STRING.value()
if not api_url:
api_url = cast(str, PREFECT_API_URL.value())
from prefect.client.schemas.filters import LogFilter
self._filter = filter or LogFilter() # type: ignore[call-arg]
self._seen_logs = TTLCache(maxsize=SEEN_LOGS_SIZE, ttl=SEEN_LOGS_TTL)
socket_url = logs_out_socket_from_api_url(api_url)
logger.debug("Connecting to %s", socket_url)
# Configure SSL context for the connection
ssl_context = create_ssl_context_for_websocket(socket_url)
connect_kwargs: dict[str, Any] = {"subprotocols": [Subprotocol("prefect")]}
if ssl_context:
connect_kwargs["ssl"] = ssl_context
self._connect = websocket_connect(socket_url, **connect_kwargs)
self._websocket = None
self._reconnection_attempts = reconnection_attempts
if self._reconnection_attempts < 0:
raise ValueError("reconnection_attempts must be a non-negative integer")
@property
def client_name(self) -> str:
return self.__class__.__name__
async def __aenter__(self) -> Self:
# Don't handle any errors in the initial connection, because these are most
# likely a permission or configuration issue that should propagate
try:
await self._reconnect()
finally:
LOG_WEBSOCKET_CONNECTIONS.labels(self.client_name, "out", "initial").inc()
return self
async def _reconnect(self) -> None:
logger.debug("Reconnecting...")
if self._websocket:
self._websocket = None
await self._connect.__aexit__(None, None, None)
self._websocket = await self._connect.__aenter__()
# make sure we have actually connected
logger.debug(" pinging...")
pong = await self._websocket.ping()
await pong
# Send authentication message - logs WebSocket requires auth handshake
auth_token = self._api_key or self._auth_token
auth_message = {"type": "auth", "token": auth_token}
logger.debug(" authenticating...")
await self._websocket.send(orjson.dumps(auth_message).decode())
# Wait for auth response
try:
message = orjson.loads(await self._websocket.recv())
logger.debug(" auth result %s", message)
assert message["type"] == "auth_success", message.get("reason", "")
except AssertionError as e:
raise Exception(
"Unable to authenticate to the log stream. Please ensure the "
"provided api_key or auth_token you are using is valid for this environment. "
f"Reason: {e.args[0]}"
)
except ConnectionClosedError as e:
reason = getattr(e.rcvd, "reason", None)
msg = "Unable to authenticate to the log stream. Please ensure the "
msg += "provided api_key or auth_token you are using is valid for this environment. "
msg += f"Reason: {reason}" if reason else ""
raise Exception(msg) from e
from prefect.client.schemas.filters import LogFilterTimestamp
current_time = now("UTC")
self._filter.timestamp = LogFilterTimestamp(
after_=current_time - timedelta(minutes=1), # type: ignore[arg-type]
before_=current_time + timedelta(days=365), # type: ignore[arg-type]
)
logger.debug(" filtering logs since %s...", self._filter.timestamp.after_)
filter_message = {
"type": "filter",
"filter": self._filter.model_dump(mode="json"),
}
await self._websocket.send(orjson.dumps(filter_message).decode())
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self._websocket = None
await self._connect.__aexit__(exc_type, exc_val, exc_tb)
def __aiter__(self) -> Self:
return self
async def __anext__(self) -> Log:
assert self._reconnection_attempts >= 0
for i in range(self._reconnection_attempts + 1): # pragma: no branch
try:
# If we're here and the websocket is None, then we've had a failure in a
# previous reconnection attempt.
#
# Otherwise, after the first time through this loop, we're recovering
# from a ConnectionClosed, so reconnect now.
if not self._websocket or i > 0:
try:
await self._reconnect()
finally:
LOG_WEBSOCKET_CONNECTIONS.labels(
self.client_name, "out", "reconnect"
).inc()
assert self._websocket
while True:
message = orjson.loads(await self._websocket.recv())
log: Log = Log.model_validate(message["log"])
if log.id in self._seen_logs:
continue
self._seen_logs[log.id] = True
try:
return log
finally:
LOGS_OBSERVED.labels(self.client_name).inc()
except ConnectionClosedOK:
logger.debug('Connection closed with "OK" status')
raise StopAsyncIteration
except ConnectionClosed:
logger.debug(
"Connection closed with %s/%s attempts",
i + 1,
self._reconnection_attempts,
)
if i == self._reconnection_attempts:
# this was our final chance, raise the most recent error
raise
if i > 2:
# let the first two attempts happen quickly in case this is just
# a standard load balancer timeout, but after that, just take a
# beat to let things come back around.
await asyncio.sleep(1)
raise StopAsyncIteration
class PrefectCloudLogsSubscriber(PrefectLogsSubscriber):
"""Logs subscriber for Prefect Cloud"""
def __init__(
self,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
filter: Optional["LogFilter"] = None,
reconnection_attempts: int = 10,
):
"""
Args:
api_url: The base URL for a Prefect Cloud workspace
api_key: The API key of an actor with the see_flows scope
filter: Log filter to apply
reconnection_attempts: When the client is disconnected, how many times
the client should attempt to reconnect
"""
api_url, api_key = _get_api_url_and_key(api_url, api_key)
super().__init__(
api_url=api_url,
filter=filter,
reconnection_attempts=reconnection_attempts,
)
self._api_key = api_key
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/logging/clients.py",
"license": "Apache License 2.0",
"lines": 287,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:tests/_internal/test_websockets.py | import json
import ssl
import warnings
from unittest.mock import patch
from websockets.asyncio.client import connect
from websockets.protocol import Subprotocol
from prefect._internal.websockets import (
create_ssl_context_for_websocket,
websocket_connect,
)
from prefect.events.clients import events_in_socket_from_api_url
from prefect.settings import (
PREFECT_API_TLS_INSECURE_SKIP_VERIFY,
PREFECT_CLIENT_CUSTOM_HEADERS,
temporary_settings,
)
def test_websocket_connect_factory():
"""Test that websocket_connect returns a connect instance"""
connector = websocket_connect("wss://example.com")
assert isinstance(connector, connect)
def test_create_ssl_context_for_websocket_ws_scheme():
"""Test SSL context creation returns None for ws:// URLs"""
ssl_context = create_ssl_context_for_websocket("ws://example.com")
assert ssl_context is None
def test_create_ssl_context_for_websocket_wss_insecure():
"""Test SSL context creation for insecure connections"""
with temporary_settings({PREFECT_API_TLS_INSECURE_SKIP_VERIFY: True}):
ssl_context = create_ssl_context_for_websocket("wss://example.com")
assert ssl_context is not None
assert not ssl_context.check_hostname
assert ssl_context.verify_mode == ssl.CERT_NONE
def test_create_ssl_context_for_websocket_wss_secure():
"""Test SSL context creation for secure connections"""
with temporary_settings({PREFECT_API_TLS_INSECURE_SKIP_VERIFY: False}):
ssl_context = create_ssl_context_for_websocket("wss://example.com")
assert ssl_context is not None
assert ssl_context.check_hostname is True
assert ssl_context.verify_mode == ssl.CERT_REQUIRED
def test_websocket_connect_ssl_integration():
"""Test that websocket_connect integrates with SSL context creation"""
with temporary_settings({PREFECT_API_TLS_INSECURE_SKIP_VERIFY: True}):
connector = websocket_connect("wss://example.com")
assert isinstance(connector, connect)
# Verify SSL context is configured
assert "ssl" in connector.connection_kwargs
ssl_context = connector.connection_kwargs["ssl"]
assert not ssl_context.check_hostname
assert ssl_context.verify_mode == ssl.CERT_NONE
def test_websocket_connect_no_ssl_for_ws():
"""Test that websocket_connect doesn't add SSL for ws:// URLs"""
connector = websocket_connect("ws://example.com")
assert isinstance(connector, connect)
# Verify SSL is not configured for ws:// URLs
assert "ssl" not in connector.connection_kwargs
def test_websocket_connect_kwargs_preservation():
"""Test that additional kwargs are preserved"""
additional_headers = {"Authorization": "Bearer token"}
connector = websocket_connect(
"wss://example.com", additional_headers=additional_headers
)
assert isinstance(connector, connect)
# Verify headers are preserved
assert connector.additional_headers == additional_headers
def test_create_ssl_context_with_custom_cert_file():
"""Test SSL context creation with custom certificate file"""
from prefect.settings import PREFECT_API_SSL_CERT_FILE
with temporary_settings(
{
PREFECT_API_TLS_INSECURE_SKIP_VERIFY: False,
PREFECT_API_SSL_CERT_FILE: "/custom/cert.pem",
}
):
with patch("ssl.create_default_context") as mock_ssl_context:
create_ssl_context_for_websocket("wss://example.com")
mock_ssl_context.assert_called_once_with(cafile="/custom/cert.pem")
def test_websocket_custom_headers():
"""Test that custom headers from settings are added to additional_headers"""
custom_headers = {"X-Custom-Header": "test-value", "Authorization": "Bearer token"}
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: custom_headers}):
connector = websocket_connect("wss://example.com")
assert isinstance(connector, connect)
# Verify custom headers are added
assert connector.additional_headers["X-Custom-Header"] == "test-value"
assert connector.additional_headers["Authorization"] == "Bearer token"
def test_websocket_custom_headers_merge_with_existing():
"""Test that custom headers merge with existing additional_headers"""
custom_headers = {"X-Custom-Header": "test-value"}
existing_headers = {"X-Existing-Header": "existing-value"}
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: custom_headers}):
connector = websocket_connect(
"wss://example.com", additional_headers=existing_headers
)
assert isinstance(connector, connect)
# Verify both custom and existing headers are present
assert connector.additional_headers["X-Custom-Header"] == "test-value"
assert connector.additional_headers["X-Existing-Header"] == "existing-value"
def test_websocket_custom_headers_protected_headers_warning():
"""Test that protected headers generate warnings and are ignored"""
custom_headers = {
"User-Agent": "custom-agent",
"Sec-WebSocket-Key": "custom-key",
"X-Custom-Header": "test-value",
}
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: custom_headers}):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
connector = websocket_connect("wss://example.com")
# Should have warnings for protected headers
assert len(w) == 2
assert "User-Agent" in str(w[0].message)
assert "Sec-WebSocket-Key" in str(w[1].message)
assert "protected WebSocket header" in str(w[0].message)
# Verify only non-protected headers are in additional_headers
assert connector.additional_headers["X-Custom-Header"] == "test-value"
assert "User-Agent" not in connector.additional_headers
assert "Sec-WebSocket-Key" not in connector.additional_headers
def test_websocket_custom_headers_empty_settings():
"""Test that empty custom headers don't cause issues"""
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: {}}):
connector = websocket_connect("wss://example.com")
assert isinstance(connector, connect)
# Verify no additional headers when settings are empty (should be None or empty dict)
assert (
connector.additional_headers is None or connector.additional_headers == {}
)
async def test_websocket_custom_headers_with_websocket_connect(hosted_api_server: str):
"""Test that custom headers work with the websocket_connect utility function"""
custom_headers = {"X-Custom-Header": "test-value"}
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: custom_headers}):
connector = websocket_connect(
events_in_socket_from_api_url(hosted_api_server),
subprotocols=[Subprotocol("prefect")],
)
# Make sure we can connect to the websocket successfully with the custom headers
# The /events/in endpoint requires the "prefect" subprotocol and auth handshake
async with connector as websocket:
pong = await websocket.ping()
await pong
# Send auth message (required by the server)
await websocket.send(json.dumps({"type": "auth", "token": None}))
auth_response = json.loads(await websocket.recv())
assert auth_response["type"] == "auth_success"
# If we get here, the connection worked with custom headers
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/_internal/test_websockets.py",
"license": "Apache License 2.0",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/logging/test_logs_subscriber.py | import json
import logging
from typing import Optional, Type
from unittest.mock import AsyncMock
from uuid import uuid4
import orjson
import pytest
from websockets.exceptions import ConnectionClosedError
from prefect.client.schemas.filters import LogFilter, LogFilterLevel
from prefect.client.schemas.objects import Log
from prefect.logging.clients import (
PrefectCloudLogsSubscriber,
PrefectLogsSubscriber,
get_logs_subscriber,
)
from prefect.settings import (
PREFECT_API_AUTH_STRING,
PREFECT_API_KEY,
PREFECT_API_URL,
PREFECT_CLOUD_API_URL,
PREFECT_SERVER_ALLOW_EPHEMERAL_MODE,
temporary_settings,
)
from prefect.types._datetime import now
@pytest.fixture
def example_log_1() -> Log:
return Log(
id=uuid4(),
name="test.logger.flow",
level=logging.INFO,
message="Flow started successfully",
timestamp=now("UTC"),
flow_run_id=uuid4(),
task_run_id=None,
)
@pytest.fixture
def example_log_2() -> Log:
return Log(
id=uuid4(),
name="test.logger.task",
level=logging.WARNING,
message="Task execution warning",
timestamp=now("UTC"),
flow_run_id=uuid4(),
task_run_id=uuid4(),
)
@pytest.fixture
def ephemeral_settings():
with temporary_settings(
{
PREFECT_API_URL: None,
PREFECT_API_KEY: None,
PREFECT_CLOUD_API_URL: "https://cloudy/api",
PREFECT_SERVER_ALLOW_EPHEMERAL_MODE: True,
}
):
yield
@pytest.fixture
def server_settings():
with temporary_settings(
{
PREFECT_API_URL: "https://locally/api",
PREFECT_CLOUD_API_URL: "https://cloudy/api",
}
):
yield
@pytest.fixture
def cloud_settings():
with temporary_settings(
{
PREFECT_API_URL: "https://cloudy/api/accounts/1/workspaces/2",
PREFECT_CLOUD_API_URL: "https://cloudy/api",
PREFECT_API_KEY: "howdy-doody",
}
):
yield
async def test_constructs_server_client(server_settings):
assert isinstance(get_logs_subscriber(), PrefectLogsSubscriber)
async def test_constructs_client_when_ephemeral_enabled(ephemeral_settings):
assert isinstance(get_logs_subscriber(), PrefectLogsSubscriber)
def test_errors_when_missing_api_url_and_ephemeral_disabled():
with temporary_settings(
{
PREFECT_API_URL: None,
PREFECT_API_KEY: None,
PREFECT_CLOUD_API_URL: "https://cloudy/api",
PREFECT_SERVER_ALLOW_EPHEMERAL_MODE: False,
}
):
with pytest.raises(ValueError, match="PREFECT_API_URL"):
get_logs_subscriber()
async def test_constructs_cloud_client(cloud_settings):
assert isinstance(get_logs_subscriber(), PrefectCloudLogsSubscriber)
def pytest_generate_tests(metafunc: pytest.Metafunc):
fixtures = set(metafunc.fixturenames)
cloud_subscribers = [
(
PrefectCloudLogsSubscriber,
"/accounts/A/workspaces/W/logs/out",
"my-token",
),
]
subscribers = [
# The base subscriber for OSS will just use the API URL, which is set to a
# Cloud URL here, but it would usually be just /logs/out
(PrefectLogsSubscriber, "/accounts/A/workspaces/W/logs/out", None),
] + cloud_subscribers
if "Subscriber" in fixtures:
metafunc.parametrize("Subscriber,socket_path,token", subscribers)
elif "CloudSubscriber" in fixtures:
metafunc.parametrize("CloudSubscriber,socket_path,token", cloud_subscribers)
# Create a modified Recorder and Puppeteer for logs instead of events
class LogRecorder:
connections: int
path: Optional[str]
logs: list[Log]
token: Optional[str]
filter: Optional[LogFilter]
def __init__(self):
self.connections = 0
self.path = None
self.logs = []
class LogPuppeteer:
token: Optional[str]
hard_auth_failure: bool
refuse_any_further_connections: bool
hard_disconnect_after: Optional[str] # log id
outgoing_logs: list[Log]
def __init__(self):
self.hard_auth_failure = False
self.refuse_any_further_connections = False
self.hard_disconnect_after = None
self.outgoing_logs = []
@pytest.fixture
def log_recorder() -> LogRecorder:
return LogRecorder()
@pytest.fixture
def log_puppeteer() -> LogPuppeteer:
return LogPuppeteer()
@pytest.fixture
async def logs_server(
unused_tcp_port: int, log_recorder: LogRecorder, log_puppeteer: LogPuppeteer
):
from starlette.status import WS_1008_POLICY_VIOLATION
from websockets.asyncio.server import Server, ServerConnection, serve
server: Server
async def handler(socket: ServerConnection) -> None:
assert socket.request
path = socket.request.path
log_recorder.connections += 1
if log_puppeteer.refuse_any_further_connections:
raise ValueError("nope")
log_recorder.path = path
if path.endswith("/logs/out"):
await outgoing_logs(socket)
async def outgoing_logs(socket: ServerConnection):
# 1. authentication
auth_message = json.loads(await socket.recv())
assert auth_message["type"] == "auth"
log_recorder.token = auth_message["token"]
if log_puppeteer.token != log_recorder.token:
if not log_puppeteer.hard_auth_failure:
await socket.send(
json.dumps({"type": "auth_failure", "reason": "nope"})
)
await socket.close(WS_1008_POLICY_VIOLATION)
return
await socket.send(json.dumps({"type": "auth_success"}))
# 2. filter
filter_message = json.loads(await socket.recv())
assert filter_message["type"] == "filter"
log_recorder.filter = LogFilter.model_validate(filter_message["filter"])
# 3. send logs
for log in log_puppeteer.outgoing_logs:
await socket.send(
json.dumps(
{
"type": "log",
"log": log.model_dump(mode="json"),
}
)
)
if log_puppeteer.hard_disconnect_after == str(log.id):
log_puppeteer.hard_disconnect_after = None
raise ValueError("zonk")
async with serve(handler, host="localhost", port=unused_tcp_port) as server:
yield server
@pytest.fixture
def logs_api_url(logs_server, unused_tcp_port: int) -> str:
return f"http://localhost:{unused_tcp_port}"
@pytest.fixture
def logs_cloud_api_url(logs_server, unused_tcp_port: int) -> str:
return f"http://localhost:{unused_tcp_port}/accounts/A/workspaces/W"
@pytest.fixture(autouse=True)
def api_setup(logs_cloud_api_url: str):
with temporary_settings(
updates={
PREFECT_API_URL: logs_cloud_api_url,
PREFECT_API_KEY: "my-token",
PREFECT_API_AUTH_STRING: "my-token", # For base subscriber
}
):
yield
async def test_subscriber_can_connect_with_defaults(
Subscriber: Type[PrefectLogsSubscriber],
socket_path: str,
token: Optional[str],
example_log_1: Log,
example_log_2: Log,
log_recorder: LogRecorder,
log_puppeteer: LogPuppeteer,
):
# For base subscriber (token=None), it will use auth string "my-token"
# For cloud subscriber (token="my-token"), it will use the provided token
expected_token = token or "my-token"
log_puppeteer.token = expected_token
log_puppeteer.outgoing_logs = [example_log_1, example_log_2]
async with Subscriber() as subscriber:
async for log in subscriber:
log_recorder.logs.append(log)
assert log_recorder.connections == 1
assert log_recorder.path == socket_path
assert log_recorder.logs == [example_log_1, example_log_2]
assert log_recorder.token == expected_token
assert subscriber._filter
assert log_recorder.filter == subscriber._filter
async def test_cloud_subscriber_complains_without_api_url_and_key(
CloudSubscriber: Type[PrefectCloudLogsSubscriber],
socket_path: str,
token: Optional[str],
example_log_1: Log,
example_log_2: Log,
log_recorder: LogRecorder,
log_puppeteer: LogPuppeteer,
):
with temporary_settings(updates={PREFECT_API_KEY: "", PREFECT_API_URL: ""}):
with pytest.raises(ValueError, match="must be provided or set"):
CloudSubscriber()
async def test_subscriber_can_connect_and_receive_one_log(
Subscriber: Type[PrefectLogsSubscriber],
socket_path: str,
token: Optional[str],
example_log_1: Log,
example_log_2: Log,
log_recorder: LogRecorder,
log_puppeteer: LogPuppeteer,
):
expected_token = token or "my-token"
log_puppeteer.token = expected_token
log_puppeteer.outgoing_logs = [example_log_1, example_log_2]
filter = LogFilter(level=LogFilterLevel(ge_=logging.INFO))
async with Subscriber(
filter=filter,
reconnection_attempts=0,
) as subscriber:
async for log in subscriber:
log_recorder.logs.append(log)
assert log_recorder.connections == 1
assert log_recorder.path == socket_path
assert log_recorder.logs == [example_log_1, example_log_2]
assert log_recorder.token == expected_token
assert log_recorder.filter == filter
async def test_subscriber_specifying_negative_reconnects_gets_error(
Subscriber: Type[PrefectLogsSubscriber],
socket_path: str,
token: Optional[str],
example_log_1: Log,
example_log_2: Log,
log_recorder: LogRecorder,
log_puppeteer: LogPuppeteer,
):
expected_token = token or "my-token"
log_puppeteer.token = expected_token
log_puppeteer.outgoing_logs = [example_log_1, example_log_2]
filter = LogFilter(level=LogFilterLevel(ge_=logging.INFO))
with pytest.raises(ValueError, match="non-negative"):
Subscriber(
filter=filter,
reconnection_attempts=-1,
)
assert log_recorder.connections == 0
async def test_subscriber_raises_on_invalid_auth_with_soft_denial(
CloudSubscriber: Type[PrefectCloudLogsSubscriber],
socket_path: str,
token: Optional[str],
logs_cloud_api_url: str,
example_log_1: Log,
example_log_2: Log,
log_recorder: LogRecorder,
log_puppeteer: LogPuppeteer,
):
log_puppeteer.token = "my-token"
log_puppeteer.outgoing_logs = [example_log_1, example_log_2]
filter = LogFilter(level=LogFilterLevel(ge_=logging.INFO))
with pytest.raises(Exception, match="Unable to authenticate"):
subscriber = CloudSubscriber(
logs_cloud_api_url,
"bogus",
filter=filter,
reconnection_attempts=0,
)
await subscriber.__aenter__()
assert log_recorder.connections == 1
assert log_recorder.path == socket_path
assert log_recorder.token == "bogus"
assert log_recorder.logs == []
async def test_cloud_subscriber_raises_on_invalid_auth_with_hard_denial(
CloudSubscriber: Type[PrefectCloudLogsSubscriber],
socket_path: str,
token: Optional[str],
logs_cloud_api_url: str,
example_log_1: Log,
example_log_2: Log,
log_recorder: LogRecorder,
log_puppeteer: LogPuppeteer,
):
log_puppeteer.hard_auth_failure = True
log_puppeteer.token = "my-token"
log_puppeteer.outgoing_logs = [example_log_1, example_log_2]
filter = LogFilter(level=LogFilterLevel(ge_=logging.INFO))
with pytest.raises(Exception, match="Unable to authenticate"):
subscriber = CloudSubscriber(
logs_cloud_api_url,
"bogus",
filter=filter,
reconnection_attempts=0,
)
await subscriber.__aenter__()
assert log_recorder.connections == 1
assert log_recorder.path == socket_path
assert log_recorder.token == "bogus"
assert log_recorder.logs == []
async def test_subscriber_reconnects_on_hard_disconnects(
Subscriber: Type[PrefectLogsSubscriber],
socket_path: str,
token: Optional[str],
example_log_1: Log,
example_log_2: Log,
log_recorder: LogRecorder,
log_puppeteer: LogPuppeteer,
):
expected_token = token or "my-token"
log_puppeteer.token = expected_token
log_puppeteer.outgoing_logs = [example_log_1, example_log_2]
log_puppeteer.hard_disconnect_after = str(example_log_1.id)
filter = LogFilter(level=LogFilterLevel(ge_=logging.INFO))
async with Subscriber(
filter=filter,
reconnection_attempts=2,
) as subscriber:
async for log in subscriber:
log_recorder.logs.append(log)
assert log_recorder.connections == 2
assert log_recorder.logs == [example_log_1, example_log_2]
async def test_subscriber_gives_up_after_so_many_attempts(
Subscriber: Type[PrefectLogsSubscriber],
socket_path: str,
token: Optional[str],
example_log_1: Log,
example_log_2: Log,
log_recorder: LogRecorder,
log_puppeteer: LogPuppeteer,
):
expected_token = token or "my-token"
log_puppeteer.token = expected_token
log_puppeteer.outgoing_logs = [example_log_1, example_log_2]
log_puppeteer.hard_disconnect_after = str(example_log_1.id)
filter = LogFilter(level=LogFilterLevel(ge_=logging.INFO))
with pytest.raises(ConnectionClosedError):
async with Subscriber(
filter=filter,
reconnection_attempts=4,
) as subscriber:
async for log in subscriber:
log_puppeteer.refuse_any_further_connections = True
log_recorder.logs.append(log)
assert log_recorder.connections == 1 + 4
async def test_subscriber_skips_duplicate_logs(
Subscriber: Type[PrefectLogsSubscriber],
socket_path: str,
token: Optional[str],
example_log_1: Log,
example_log_2: Log,
log_recorder: LogRecorder,
log_puppeteer: LogPuppeteer,
):
expected_token = token or "my-token"
log_puppeteer.token = expected_token
log_puppeteer.outgoing_logs = [example_log_1, example_log_1, example_log_2]
filter = LogFilter(level=LogFilterLevel(ge_=logging.INFO))
async with Subscriber(filter=filter) as subscriber:
async for log in subscriber:
log_recorder.logs.append(log)
assert log_recorder.logs == [example_log_1, example_log_2]
def test_http_to_ws_conversion():
"""Test HTTP to WebSocket URL conversion utility"""
from prefect.logging.clients import http_to_ws
assert http_to_ws("http://example.com/api") == "ws://example.com/api"
assert http_to_ws("https://example.com/api/") == "wss://example.com/api"
assert http_to_ws("https://example.com/api/v1/") == "wss://example.com/api/v1"
def test_logs_out_socket_from_api_url():
"""Test log WebSocket URL construction"""
from prefect.logging.clients import logs_out_socket_from_api_url
assert (
logs_out_socket_from_api_url("http://example.com/api")
== "ws://example.com/api/logs/out"
)
assert (
logs_out_socket_from_api_url("https://example.com/api/")
== "wss://example.com/api/logs/out"
)
def test_get_api_url_and_key_missing_values():
"""Test _get_api_url_and_key error handling"""
from prefect.logging.clients import _get_api_url_and_key
with temporary_settings({PREFECT_API_URL: None, PREFECT_API_KEY: None}):
with pytest.raises(ValueError, match="must be provided or set"):
_get_api_url_and_key(None, None)
with pytest.raises(ValueError, match="must be provided or set"):
_get_api_url_and_key("http://example.com", None)
with pytest.raises(ValueError, match="must be provided or set"):
_get_api_url_and_key(None, "my-key")
def test_get_api_url_and_key_success():
"""Test _get_api_url_and_key with valid values"""
from prefect.logging.clients import _get_api_url_and_key
url, key = _get_api_url_and_key("http://example.com", "my-key")
assert url == "http://example.com"
assert key == "my-key"
def test_subscriber_auth_token_missing_error():
"""Test authentication error when no token is available"""
from prefect.logging.clients import PrefectLogsSubscriber
with temporary_settings({PREFECT_API_AUTH_STRING: None}):
subscriber = PrefectLogsSubscriber("http://example.com")
subscriber._api_key = None
subscriber._auth_token = None
# The auth check logic should fail when there's no token
auth_token = subscriber._api_key or subscriber._auth_token
assert auth_token is None # Verify that no token is available
# This test validates that the subscriber correctly identifies missing tokens
# The actual connection would fail with ValueError during _reconnect()
async def test_subscriber_connection_closed_gracefully_stops_iteration():
"""Test that ConnectionClosedOK gracefully stops iteration"""
from unittest.mock import AsyncMock
from websockets.exceptions import ConnectionClosedOK
from prefect.logging.clients import PrefectLogsSubscriber
subscriber = PrefectLogsSubscriber("http://example.com")
subscriber._websocket = AsyncMock()
subscriber._websocket.recv.side_effect = ConnectionClosedOK(None, None, None)
with pytest.raises(StopAsyncIteration):
await subscriber.__anext__()
def test_subscriber_sleep_logic():
"""Test that sleep logic is correct (without actually sleeping)"""
# Just test that the sleep would be called correctly
# The actual sleep is in the reconnection loop and depends on attempt number
# For attempts > 2, sleep(1) should be called
# This is tested by the condition: if i > 2: await asyncio.sleep(1)
assert 3 > 2 # This would trigger sleep on attempt 3
assert 4 > 2 # This would trigger sleep on attempt 4
assert 1 <= 2 # This would NOT trigger sleep on attempt 1
assert 2 <= 2 # This would NOT trigger sleep on attempt 2
async def test_subscriber_auth_with_none_token():
"""Test that authentication works when auth token is None (Prefect server)"""
from prefect.logging.clients import PrefectLogsSubscriber
with temporary_settings({PREFECT_API_AUTH_STRING: None}):
subscriber = PrefectLogsSubscriber("http://example.com")
subscriber._api_key = None
subscriber._auth_token = None
# Mock the websocket connection to succeed
mock_connect = AsyncMock()
mock_websocket = AsyncMock()
# Create a mock pong that can be awaited
class MockPong:
def __await__(self):
return iter([None])
mock_websocket.ping.return_value = MockPong()
# Mock auth success response
mock_websocket.recv.return_value = orjson.dumps(
{"type": "auth_success"}
).decode()
mock_connect.__aenter__.return_value = mock_websocket
mock_connect.__aexit__ = AsyncMock()
subscriber._connect = mock_connect
# Should not raise ValueError - None tokens are valid for Prefect server
await subscriber._reconnect()
# Verify auth message was sent with None token
# _reconnect sends two messages: auth first, then filter
assert mock_websocket.send.call_count == 2
auth_call = mock_websocket.send.call_args_list[0]
auth_message = orjson.loads(auth_call[0][0])
assert auth_message["type"] == "auth"
assert auth_message["token"] is None
async def test_subscriber_auth_with_empty_token():
"""Test that authentication works when auth token is empty string"""
from prefect.logging.clients import PrefectLogsSubscriber
with temporary_settings({PREFECT_API_AUTH_STRING: ""}):
subscriber = PrefectLogsSubscriber("http://example.com")
subscriber._api_key = None
subscriber._auth_token = ""
# Mock the websocket connection to succeed
mock_connect = AsyncMock()
mock_websocket = AsyncMock()
# Create a mock pong that can be awaited
class MockPong:
def __await__(self):
return iter([None])
mock_websocket.ping.return_value = MockPong()
# Mock auth success response
mock_websocket.recv.return_value = orjson.dumps(
{"type": "auth_success"}
).decode()
mock_connect.__aenter__.return_value = mock_websocket
mock_connect.__aexit__ = AsyncMock()
subscriber._connect = mock_connect
# Should not raise ValueError - empty tokens are valid
await subscriber._reconnect()
# Verify auth message was sent with empty token
assert mock_websocket.send.call_count == 2
auth_call = mock_websocket.send.call_args_list[0]
auth_message = orjson.loads(auth_call[0][0])
assert auth_message["type"] == "auth"
assert auth_message["token"] == ""
async def test_subscriber_auth_with_falsy_tokens():
"""Test authentication with various falsy token values"""
from prefect.logging.clients import PrefectLogsSubscriber
falsy_values = [None, ""] # Only test string-compatible falsy values
for falsy_token in falsy_values:
with temporary_settings({PREFECT_API_AUTH_STRING: falsy_token}):
subscriber = PrefectLogsSubscriber("http://example.com")
subscriber._api_key = None
subscriber._auth_token = falsy_token
# Mock the websocket connection to succeed
mock_connect = AsyncMock()
mock_websocket = AsyncMock()
# Create a mock pong that can be awaited
class MockPong:
def __await__(self):
return iter([None])
mock_websocket.ping.return_value = MockPong()
# Mock auth success response
mock_websocket.recv.return_value = orjson.dumps(
{"type": "auth_success"}
).decode()
mock_connect.__aenter__.return_value = mock_websocket
mock_connect.__aexit__ = AsyncMock()
subscriber._connect = mock_connect
# Should not raise ValueError - all falsy tokens should be sent
await subscriber._reconnect()
# Verify auth message was sent with the falsy token
assert mock_websocket.send.call_count == 2
auth_call = mock_websocket.send.call_args_list[0]
auth_message = orjson.loads(auth_call[0][0])
assert auth_message["type"] == "auth"
assert auth_message["token"] == falsy_token
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/logging/test_logs_subscriber.py",
"license": "Apache License 2.0",
"lines": 552,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/server/logs/messaging.py | """
Log messaging for streaming logs through the messaging system.
"""
from __future__ import annotations
from contextlib import asynccontextmanager
from typing import TYPE_CHECKING, AsyncGenerator
from prefect.logging import get_logger
from prefect.server.schemas.core import Log
from prefect.server.utilities import messaging
from prefect.settings.context import get_current_settings
if TYPE_CHECKING:
import logging
logger: "logging.Logger" = get_logger(__name__)
@asynccontextmanager
async def create_log_publisher() -> AsyncGenerator[messaging.Publisher, None]:
"""
Creates a publisher for sending logs to the messaging system.
Returns:
A messaging publisher configured for the "logs" topic
"""
async with messaging.create_publisher(topic="logs") as publisher:
yield publisher
async def publish_logs(logs: list[Log]) -> None:
"""
Publishes logs to the messaging system.
Args:
logs: The logs to publish
"""
if not get_current_settings().server.logs.stream_publishing_enabled:
return
if not logs:
return
async with create_log_publisher() as publisher:
for log in logs:
await publisher.publish_data(
data=log.model_dump_json().encode(),
attributes={"log_id": str(log.id)} if log.id else {},
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/server/logs/messaging.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:src/prefect/server/logs/stream.py | """
Log streaming for live log distribution via websockets.
"""
from __future__ import annotations
import asyncio
from asyncio import Queue
from contextlib import asynccontextmanager
from typing import (
TYPE_CHECKING,
AsyncGenerator,
AsyncIterable,
NoReturn,
)
from prefect.logging import get_logger
from prefect.server.schemas.core import Log
from prefect.server.schemas.filters import LogFilter
from prefect.server.services.base import RunInEphemeralServers, RunInWebservers, Service
from prefect.server.utilities import messaging
from prefect.settings.context import get_current_settings
from prefect.settings.models.server.services import ServicesBaseSetting
if TYPE_CHECKING:
import logging
logger: "logging.Logger" = get_logger(__name__)
subscribers: set["Queue[Log]"] = set()
filters: dict["Queue[Log]", LogFilter] = {}
# The maximum number of messages that can be waiting for one subscriber, after which
# new messages will be dropped
SUBSCRIPTION_BACKLOG = 256
@asynccontextmanager
async def subscribed(
filter: LogFilter,
) -> AsyncGenerator["Queue[Log]", None]:
"""
Subscribe to a stream of logs matching the given filter.
Args:
filter: The log filter to apply
Yields:
A queue that will receive matching logs
"""
queue: "Queue[Log]" = Queue(maxsize=SUBSCRIPTION_BACKLOG)
subscribers.add(queue)
filters[queue] = filter
try:
yield queue
finally:
subscribers.remove(queue)
del filters[queue]
@asynccontextmanager
async def logs(
filter: LogFilter,
) -> AsyncGenerator[AsyncIterable[Log | None], None]:
"""
Create a stream of logs matching the given filter.
Args:
filter: The log filter to apply
Yields:
An async iterable of logs (or None for timeouts)
"""
async with subscribed(filter) as queue:
async def consume() -> AsyncGenerator[Log | None, None]:
while True:
# Use a brief timeout to allow for cancellation, especially when a
# client disconnects. Without a timeout here, a consumer may block
# forever waiting for a message to be put on the queue, and never notice
# that their client (like a websocket) has actually disconnected.
try:
log = await asyncio.wait_for(queue.get(), timeout=1)
except asyncio.TimeoutError:
# If the queue is empty, we'll yield to the caller with a None in
# order to give it control over what happens next. This helps with
# the outbound websocket, where we want to check if the client is
# still connected periodically.
yield None
continue
yield log
yield consume()
def log_matches_filter(log: Log, filter: LogFilter) -> bool:
"""
Check if a log matches the given filter criteria.
Args:
log: The log to check
filter: The filter to apply
Returns:
True if the log matches the filter, False otherwise
"""
# Check level filter
if filter.level:
if filter.level.ge_ is not None and log.level < filter.level.ge_:
return False
if filter.level.le_ is not None and log.level > filter.level.le_:
return False
# Check timestamp filter
if filter.timestamp:
if (
filter.timestamp.before_ is not None
and log.timestamp > filter.timestamp.before_
):
return False
if (
filter.timestamp.after_ is not None
and log.timestamp < filter.timestamp.after_
):
return False
# Check flow_run_id filter
if filter.flow_run_id:
if filter.flow_run_id.any_ is not None:
if log.flow_run_id not in filter.flow_run_id.any_:
return False
# Check task_run_id filter
if filter.task_run_id:
if filter.task_run_id.any_ is not None:
if log.task_run_id not in filter.task_run_id.any_:
return False
if filter.task_run_id.is_null_ is not None:
is_null = log.task_run_id is None
if filter.task_run_id.is_null_ != is_null:
return False
return True
@asynccontextmanager
async def distributor() -> AsyncGenerator[messaging.MessageHandler, None]:
"""
Create a message handler that distributes logs to subscribed clients.
Yields:
A message handler function
"""
async def message_handler(message: messaging.Message):
assert message.data
try:
assert message.attributes
except Exception:
return
if subscribers:
try:
log = Log.model_validate_json(message.data)
except Exception as e:
logger.warning(f"Failed to parse log message: {e}")
return
for queue in subscribers:
filter = filters[queue]
if not log_matches_filter(log, filter):
continue
try:
queue.put_nowait(log)
except asyncio.QueueFull:
continue
yield message_handler
_distributor_task: asyncio.Task[None] | None = None
_distributor_started: asyncio.Event | None = None
async def start_distributor() -> None:
"""Starts the distributor consumer as a global background task"""
global _distributor_task
global _distributor_started
if _distributor_task:
return
_distributor_started = asyncio.Event()
_distributor_task = asyncio.create_task(run_distributor(_distributor_started))
await _distributor_started.wait()
async def stop_distributor() -> None:
"""Stops the distributor consumer global background task"""
global _distributor_task
global _distributor_started
if not _distributor_task:
return
task = _distributor_task
_distributor_task = None
_distributor_started = None
task.cancel()
try:
await asyncio.shield(task)
except asyncio.CancelledError:
pass
class LogDistributor(RunInEphemeralServers, RunInWebservers, Service):
"""Service for distributing logs to websocket subscribers"""
name: str = "LogDistributor"
@classmethod
def service_settings(cls) -> ServicesBaseSetting:
raise NotImplementedError("LogDistributor does not have settings")
@classmethod
def environment_variable_name(cls) -> str:
return "PREFECT_SERVER_LOGS_STREAM_OUT_ENABLED"
@classmethod
def enabled(cls) -> bool:
return get_current_settings().server.logs.stream_out_enabled
async def start(self) -> NoReturn:
await start_distributor()
try:
# start_distributor should have set _distributor_task
assert _distributor_task is not None
await _distributor_task
except asyncio.CancelledError:
pass
# This should never be reached due to the infinite loop above
raise RuntimeError("LogDistributor service unexpectedly terminated")
async def stop(self) -> None:
await stop_distributor()
async def run_distributor(started: asyncio.Event) -> NoReturn:
"""Runs the distributor consumer forever until it is cancelled"""
global _distributor_started
async with messaging.ephemeral_subscription(
topic="logs",
) as create_consumer_kwargs:
started.set()
async with distributor() as handler:
consumer = messaging.create_consumer(**create_consumer_kwargs)
await consumer.run(
handler=handler,
)
# This should never be reached due to the infinite nature of consumer.run()
raise RuntimeError("Log distributor consumer unexpectedly terminated")
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/server/logs/stream.py",
"license": "Apache License 2.0",
"lines": 209,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/settings/models/server/logs.py | from __future__ import annotations
from typing import ClassVar
from pydantic import Field
from pydantic_settings import SettingsConfigDict
from prefect.settings.base import PrefectBaseSettings, build_settings_config
class ServerLogsSettings(PrefectBaseSettings):
"""
Settings for controlling behavior of the logs subsystem
"""
model_config: ClassVar[SettingsConfigDict] = build_settings_config(
("server", "logs")
)
stream_out_enabled: bool = Field(
default=False,
description="Whether or not to stream logs out to the API via websockets.",
)
stream_publishing_enabled: bool = Field(
default=False,
description="Whether or not to publish logs to the streaming system.",
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/settings/models/server/logs.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:tests/server/api/test_logs_websocket.py | import datetime
from contextlib import asynccontextmanager
from typing import AsyncGenerator, AsyncIterable
from uuid import uuid4
import pytest
from starlette.status import (
WS_1002_PROTOCOL_ERROR,
WS_1008_POLICY_VIOLATION,
)
from starlette.testclient import TestClient
from starlette.websockets import WebSocketDisconnect
from prefect.server.schemas.core import Log
from prefect.server.schemas.filters import LogFilter, LogFilterLevel
from prefect.settings import PREFECT_SERVER_API_AUTH_STRING, temporary_settings
from prefect.types._datetime import now
@pytest.fixture
def stream_mock(
monkeypatch: pytest.MonkeyPatch,
):
"""Mock the logs stream to return test logs"""
# Create sample logs
sample_log1 = Log(
id=uuid4(),
name="test.logger",
level=20,
message="Test message 1",
timestamp=now("UTC"),
flow_run_id=uuid4(),
task_run_id=uuid4(),
)
sample_log2 = Log(
id=uuid4(),
name="test.logger2",
level=40,
message="Test message 2",
timestamp=now("UTC") + datetime.timedelta(seconds=1),
flow_run_id=uuid4(),
task_run_id=None,
)
@asynccontextmanager
async def mock_stream(
filter: LogFilter,
) -> AsyncGenerator[AsyncIterable[Log], None]:
assert isinstance(filter, LogFilter)
async def _fake_stream() -> AsyncGenerator[Log, None]:
yield sample_log1
yield sample_log2
yield _fake_stream()
monkeypatch.setattr("prefect.server.logs.stream.logs", mock_stream)
@pytest.fixture
def default_liberal_filter() -> LogFilter:
"""A filter that should match most logs"""
return LogFilter(
level=LogFilterLevel(ge_=1) # Very low threshold
)
def test_streaming_requires_prefect_subprotocol_when_auth_configured(
test_client: TestClient,
default_liberal_filter: LogFilter,
):
"""Test that websocket requires prefect subprotocol when auth is configured"""
with temporary_settings(updates={PREFECT_SERVER_API_AUTH_STRING: "valid-token"}):
with pytest.raises(WebSocketDisconnect) as exception:
with test_client.websocket_connect("api/logs/out", subprotocols=[]):
pass
assert exception.value.code == WS_1002_PROTOCOL_ERROR
def test_streaming_accepts_legacy_clients_without_auth(
test_client: TestClient,
default_liberal_filter: LogFilter,
):
"""When auth is not configured, old clients without prefect subprotocol are accepted."""
# Legacy mode: connection is accepted but needs to send filter
with test_client.websocket_connect("api/logs/out", subprotocols=[]) as websocket:
# Legacy clients still need to send a filter to subscribe
websocket.send_json(
{"type": "filter", "filter": default_liberal_filter.model_dump(mode="json")}
)
def test_streaming_requires_authentication(
test_client: TestClient,
default_liberal_filter: LogFilter,
):
"""Test that websocket requires authentication"""
with pytest.raises(WebSocketDisconnect) as exception:
with test_client.websocket_connect(
"api/logs/out", subprotocols=["prefect"]
) as websocket:
# The first message must be an auth message, otherwise the server
# will disconnect the websocket.
message = {
"type": "filter",
"filter": default_liberal_filter.model_dump(mode="json"),
}
websocket.send_json(message)
websocket.receive_json()
assert exception.value.code == WS_1008_POLICY_VIOLATION
assert exception.value.reason == "Expected 'auth' message"
async def test_streaming_requires_a_filter(
monkeypatch: pytest.MonkeyPatch,
test_client: TestClient,
default_liberal_filter: LogFilter,
stream_mock: None,
):
"""Test that websocket requires a filter message after auth"""
with pytest.raises(WebSocketDisconnect) as exception:
with test_client.websocket_connect(
"api/logs/out",
subprotocols=["prefect"],
) as websocket:
auth_message = {
"type": "auth",
"token": "my-token",
}
websocket.send_json(auth_message)
message = websocket.receive_json() # Auth success response
assert message["type"] == "auth_success"
filter_message = {
"type": "what?", # Wrong type
"filter": default_liberal_filter.model_dump(mode="json"),
}
websocket.send_json(filter_message)
websocket.receive_json() # will prompt the server-side disconnection
assert exception.value.code == WS_1002_PROTOCOL_ERROR
assert exception.value.reason == "Expected 'filter' message"
async def test_streaming_requires_a_valid_filter(
monkeypatch: pytest.MonkeyPatch,
test_client: TestClient,
default_liberal_filter: LogFilter,
stream_mock: None,
):
"""Test that websocket requires a valid filter"""
with pytest.raises(WebSocketDisconnect) as exception:
with test_client.websocket_connect(
"api/logs/out",
subprotocols=["prefect"],
) as websocket:
auth_message = {
"type": "auth",
"token": "my-token",
}
websocket.send_json(auth_message)
message = websocket.receive_json() # Auth success response
assert message["type"] == "auth_success"
filter_message = {"type": "filter", "filter": "invalid_filter"}
websocket.send_json(filter_message)
websocket.receive_json() # will prompt the server-side disconnection
assert exception.value.code == WS_1002_PROTOCOL_ERROR
assert exception.value.reason.startswith("Invalid filter:")
async def test_successful_log_streaming(
monkeypatch: pytest.MonkeyPatch,
test_client: TestClient,
default_liberal_filter: LogFilter,
stream_mock: None,
):
"""Test successful log streaming"""
with test_client.websocket_connect(
"api/logs/out",
subprotocols=["prefect"],
) as websocket:
auth_message = {
"type": "auth",
"token": "my-token",
}
websocket.send_json(auth_message)
message = websocket.receive_json() # Auth success response
assert message["type"] == "auth_success"
filter_message = {
"type": "filter",
"filter": default_liberal_filter.model_dump(mode="json"),
}
websocket.send_json(filter_message)
# Should receive the first log
log_message = websocket.receive_json()
assert log_message["type"] == "log"
assert "log" in log_message
received_log = Log.model_validate(log_message["log"])
assert received_log.message == "Test message 1"
# Should receive the second log
log_message = websocket.receive_json()
assert log_message["type"] == "log"
assert "log" in log_message
received_log = Log.model_validate(log_message["log"])
assert received_log.message == "Test message 2"
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/server/api/test_logs_websocket.py",
"license": "Apache License 2.0",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/server/logs/test_messaging.py | from unittest.mock import AsyncMock, patch
from uuid import uuid4
import pytest
from prefect.server.logs.messaging import (
create_log_publisher,
publish_logs,
)
from prefect.server.schemas.core import Log
from prefect.settings import (
PREFECT_SERVER_LOGS_STREAM_PUBLISHING_ENABLED,
temporary_settings,
)
from prefect.types._datetime import now
@pytest.fixture
def sample_log():
"""A sample log record"""
return Log(
id=uuid4(),
name="test.logger",
level=20,
message="Test message",
timestamp=now("UTC"),
flow_run_id=uuid4(),
task_run_id=uuid4(),
)
@pytest.fixture
def sample_logs(sample_log):
"""Multiple sample log records"""
log2 = Log(
id=uuid4(),
name="test.logger2",
level=40,
message="Test message 2",
timestamp=now("UTC"),
flow_run_id=uuid4(),
task_run_id=None,
)
return [sample_log, log2]
async def test_create_log_publisher():
"""Test creating a log publisher"""
with patch(
"prefect.server.logs.messaging.messaging.create_publisher"
) as mock_create:
mock_publisher = AsyncMock()
mock_create.return_value.__aenter__.return_value = mock_publisher
async with create_log_publisher() as publisher:
assert publisher == mock_publisher
mock_create.assert_called_once_with(topic="logs")
async def test_publish_logs_when_disabled_single_log(sample_log):
"""Test that publish_logs does nothing when streaming is disabled"""
with temporary_settings({PREFECT_SERVER_LOGS_STREAM_PUBLISHING_ENABLED: False}):
# Should return early without doing anything
await publish_logs([sample_log])
# No publisher should be created
with patch("prefect.server.logs.messaging.create_log_publisher") as mock_create:
await publish_logs([sample_log])
mock_create.assert_not_called()
async def test_publish_logs_when_enabled_single_log(sample_log):
"""Test that publish_logs works when streaming is enabled"""
with temporary_settings({PREFECT_SERVER_LOGS_STREAM_PUBLISHING_ENABLED: True}):
with patch("prefect.server.logs.messaging.create_log_publisher") as mock_create:
mock_publisher = AsyncMock()
mock_create.return_value.__aenter__.return_value = mock_publisher
mock_create.return_value.__aexit__ = AsyncMock(return_value=None)
await publish_logs([sample_log])
mock_create.assert_called_once()
mock_publisher.publish_data.assert_called_once()
# Check the published data
call_args = mock_publisher.publish_data.call_args
assert call_args[1]["data"] == sample_log.model_dump_json().encode()
assert call_args[1]["attributes"]["log_id"] == str(sample_log.id)
async def test_publish_logs_with_id_none_in_message():
"""Test the case where log ID gets set to None in the message attributes"""
log = Log(
name="test.logger",
level=20,
message="Test message",
timestamp=now("UTC"),
flow_run_id=uuid4(),
task_run_id=None,
)
with temporary_settings({PREFECT_SERVER_LOGS_STREAM_PUBLISHING_ENABLED: True}):
with patch("prefect.server.logs.messaging.create_log_publisher") as mock_create:
mock_publisher = AsyncMock()
mock_create.return_value.__aenter__.return_value = mock_publisher
mock_create.return_value.__aexit__ = AsyncMock(return_value=None)
# Mock the log ID to be None for testing the attributes logic
with patch.object(log, "id", None):
await publish_logs([log])
# Check that attributes are empty when ID is None
call_args = mock_publisher.publish_data.call_args
assert call_args[1]["attributes"] == {}
async def test_publish_logs_when_disabled(sample_logs):
"""Test that publish_logs does nothing when streaming is disabled"""
with temporary_settings({PREFECT_SERVER_LOGS_STREAM_PUBLISHING_ENABLED: False}):
await publish_logs(sample_logs)
# No publisher should be created
with patch("prefect.server.logs.messaging.create_log_publisher") as mock_create:
await publish_logs(sample_logs)
mock_create.assert_not_called()
async def test_publish_logs_empty_list():
"""Test that publish_logs handles empty list"""
with temporary_settings({PREFECT_SERVER_LOGS_STREAM_PUBLISHING_ENABLED: True}):
with patch("prefect.server.logs.messaging.create_log_publisher") as mock_create:
await publish_logs([])
# Should return early without creating publisher
mock_create.assert_not_called()
async def test_publish_logs_when_enabled(sample_logs):
"""Test that publish_logs works when streaming is enabled"""
with temporary_settings({PREFECT_SERVER_LOGS_STREAM_PUBLISHING_ENABLED: True}):
with patch("prefect.server.logs.messaging.create_log_publisher") as mock_create:
mock_publisher = AsyncMock()
mock_create.return_value.__aenter__.return_value = mock_publisher
mock_create.return_value.__aexit__ = AsyncMock(return_value=None)
await publish_logs(sample_logs)
mock_create.assert_called_once()
# Should be called once for each log
assert mock_publisher.publish_data.call_count == len(sample_logs)
class TestLogSchemaTypeValidation:
"""Tests for schema type validation in the messaging system"""
async def test_publish_logs_uses_log_id_in_attributes(self):
"""Test that publish_logs uses the Log object's id field in message attributes"""
log_full = Log(
name="test.logger",
level=20,
message="Test message",
timestamp=now("UTC"),
flow_run_id=uuid4(),
)
with temporary_settings({PREFECT_SERVER_LOGS_STREAM_PUBLISHING_ENABLED: True}):
with patch(
"prefect.server.logs.messaging.create_log_publisher"
) as mock_create:
mock_publisher = AsyncMock()
mock_create.return_value.__aenter__.return_value = mock_publisher
mock_create.return_value.__aexit__ = AsyncMock(return_value=None)
await publish_logs([log_full])
mock_publisher.publish_data.assert_called_once()
call_args = mock_publisher.publish_data.call_args
# This was the key issue: messaging needs the log's ID (only available on Log, not LogCreate)
assert call_args[1]["attributes"]["log_id"] == str(log_full.id)
assert log_full.id is not None
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/server/logs/test_messaging.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/server/logs/test_settings.py | from prefect.settings.models.server.logs import ServerLogsSettings
def test_logs_settings_defaults():
"""Test that logs settings have correct defaults"""
settings = ServerLogsSettings()
# Both streaming features should be disabled by default for safety
assert settings.stream_out_enabled is False
assert settings.stream_publishing_enabled is False
def test_logs_settings_can_be_enabled():
"""Test that logs settings can be enabled"""
settings = ServerLogsSettings(
stream_out_enabled=True, stream_publishing_enabled=True
)
assert settings.stream_out_enabled is True
assert settings.stream_publishing_enabled is True
def test_logs_settings_environment_variable_names():
"""Test that environment variable aliases work"""
import os
# Test stream_out_enabled aliases
os.environ["PREFECT_SERVER_LOGS_STREAM_OUT_ENABLED"] = "true"
settings = ServerLogsSettings()
assert settings.stream_out_enabled is True
# Clean up
del os.environ["PREFECT_SERVER_LOGS_STREAM_OUT_ENABLED"]
# Test stream_publishing_enabled aliases
os.environ["PREFECT_SERVER_LOGS_STREAM_PUBLISHING_ENABLED"] = "true"
settings = ServerLogsSettings()
assert settings.stream_publishing_enabled is True
# Clean up
del os.environ["PREFECT_SERVER_LOGS_STREAM_PUBLISHING_ENABLED"]
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/server/logs/test_settings.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/server/logs/test_stream.py | import asyncio
import datetime
from unittest.mock import AsyncMock, Mock, patch
from uuid import uuid4
import pytest
from prefect.server.logs.stream import (
LogDistributor,
distributor,
filters,
log_matches_filter,
logs,
start_distributor,
stop_distributor,
subscribed,
subscribers,
)
from prefect.server.schemas.core import Log
from prefect.server.schemas.filters import (
LogFilter,
LogFilterFlowRunId,
LogFilterLevel,
LogFilterTaskRunId,
LogFilterTimestamp,
)
from prefect.settings import PREFECT_SERVER_LOGS_STREAM_OUT_ENABLED, temporary_settings
from prefect.types._datetime import now
@pytest.fixture
def sample_log1():
"""A sample log record"""
return Log(
id=uuid4(),
name="test.logger",
level=20, # INFO
message="Test message 1",
timestamp=now("UTC"),
flow_run_id=uuid4(),
task_run_id=uuid4(),
)
@pytest.fixture
def sample_log2():
"""Another sample log record"""
return Log(
id=uuid4(),
name="test.logger2",
level=40, # ERROR
message="Test message 2",
timestamp=now("UTC") + datetime.timedelta(seconds=1),
flow_run_id=uuid4(),
task_run_id=None,
)
def test_log_distributor_service_properties():
"""Test that LogDistributor has the expected service properties"""
assert LogDistributor.name == "LogDistributor"
assert (
LogDistributor.environment_variable_name()
== "PREFECT_SERVER_LOGS_STREAM_OUT_ENABLED"
)
def test_log_matches_filter_level(sample_log1):
"""Test log filtering by level"""
# Should match - log level 20 >= 20
filter = LogFilter(level=LogFilterLevel(ge_=20))
assert log_matches_filter(sample_log1, filter)
# Should not match - log level 20 < 30
filter = LogFilter(level=LogFilterLevel(ge_=30))
assert not log_matches_filter(sample_log1, filter)
# Should match - log level 20 <= 30
filter = LogFilter(level=LogFilterLevel(le_=30))
assert log_matches_filter(sample_log1, filter)
# Should not match - log level 20 > 10
filter = LogFilter(level=LogFilterLevel(le_=10))
assert not log_matches_filter(sample_log1, filter)
def test_log_matches_filter_timestamp(sample_log1):
"""Test log filtering by timestamp"""
before_time = sample_log1.timestamp + datetime.timedelta(seconds=1)
after_time = sample_log1.timestamp - datetime.timedelta(seconds=1)
# Should match - log timestamp is before the filter time
filter = LogFilter(timestamp=LogFilterTimestamp(before_=before_time))
assert log_matches_filter(sample_log1, filter)
# Should not match - log timestamp is after the filter time
filter = LogFilter(timestamp=LogFilterTimestamp(before_=after_time))
assert not log_matches_filter(sample_log1, filter)
# Should match - log timestamp is after the filter time
filter = LogFilter(timestamp=LogFilterTimestamp(after_=after_time))
assert log_matches_filter(sample_log1, filter)
def test_log_matches_filter_flow_run_id(sample_log1):
"""Test log filtering by flow_run_id"""
# Should match - flow_run_id is in the list
filter = LogFilter(flow_run_id=LogFilterFlowRunId(any_=[sample_log1.flow_run_id]))
assert log_matches_filter(sample_log1, filter)
# Should not match - flow_run_id is not in the list
filter = LogFilter(flow_run_id=LogFilterFlowRunId(any_=[uuid4()]))
assert not log_matches_filter(sample_log1, filter)
def test_log_matches_filter_task_run_id(sample_log1, sample_log2):
"""Test log filtering by task_run_id"""
# Should match - task_run_id is in the list
filter = LogFilter(task_run_id=LogFilterTaskRunId(any_=[sample_log1.task_run_id]))
assert log_matches_filter(sample_log1, filter)
# Should not match - task_run_id is not in the list
filter = LogFilter(task_run_id=LogFilterTaskRunId(any_=[uuid4()]))
assert not log_matches_filter(sample_log1, filter)
# Test null filtering - sample_log2 has None task_run_id
filter = LogFilter(task_run_id=LogFilterTaskRunId(is_null_=True))
assert log_matches_filter(sample_log2, filter)
assert not log_matches_filter(sample_log1, filter)
filter = LogFilter(task_run_id=LogFilterTaskRunId(is_null_=False))
assert not log_matches_filter(sample_log2, filter)
assert log_matches_filter(sample_log1, filter)
def test_log_matches_filter_empty():
"""Test that empty filter matches all logs"""
log = Log(
id=uuid4(),
name="test",
level=20,
message="test",
timestamp=now("UTC"),
flow_run_id=None,
task_run_id=None,
)
empty_filter = LogFilter()
assert log_matches_filter(log, empty_filter)
@pytest.mark.asyncio
async def test_subscribed_context_manager(sample_log1):
"""Test the subscribed context manager"""
filter = LogFilter()
async with subscribed(filter) as queue:
assert queue is not None
assert queue in subscribed.__wrapped__.__globals__["subscribers"]
assert subscribed.__wrapped__.__globals__["filters"][queue] == filter
# Should be cleaned up after context exit
assert queue not in subscribed.__wrapped__.__globals__["subscribers"]
assert queue not in subscribed.__wrapped__.__globals__["filters"]
@pytest.mark.asyncio
async def test_logs_context_manager():
"""Test the logs context manager returns an async iterable"""
filter = LogFilter()
async with logs(filter) as log_stream:
assert hasattr(log_stream, "__aiter__")
# We can't easily test the actual streaming without setting up
# the full messaging system, but we can verify the structure
@pytest.mark.asyncio
async def test_logs_consume_timeout():
"""Test that the logs consumer yields None on timeout"""
filter = LogFilter()
async with logs(filter) as log_stream:
# Mock the queue.get to raise TimeoutError
with patch("asyncio.wait_for", side_effect=asyncio.TimeoutError):
async for log in log_stream:
assert log is None
break # Exit after first None
@pytest.fixture
async def mock_subscriber():
"""Fixture that provides a mock subscriber and cleans up automatically"""
queue = asyncio.Queue()
filter = LogFilter()
subscribers.add(queue)
filters[queue] = filter
yield queue, filter
# Cleanup
if queue in subscribers:
subscribers.remove(queue)
if queue in filters:
del filters[queue]
@pytest.mark.asyncio
async def test_distributor_message_handler(sample_log1, mock_subscriber):
"""Test the distributor message handler"""
queue, filter = mock_subscriber
# Create a mock message
mock_message = Mock()
mock_message.data = sample_log1.model_dump_json().encode()
mock_message.attributes = {"log_id": "test"}
async with distributor() as handler:
await handler(mock_message)
# Should have put the log in the queue
assert not queue.empty()
log = await queue.get()
assert log.message == "Test message 1"
@pytest.mark.asyncio
async def test_distributor_message_handler_no_attributes():
"""Test distributor handles messages without attributes"""
# Create a mock message without attributes
mock_message = Mock()
mock_message.data = b"test data"
mock_message.attributes = None
async with distributor() as handler:
# Should not raise an exception
await handler(mock_message)
@pytest.mark.asyncio
async def test_distributor_message_handler_invalid_json():
"""Test distributor handles invalid JSON gracefully"""
# Create a mock message with invalid JSON
mock_message = Mock()
mock_message.data = b"invalid json"
mock_message.attributes = {"test": "value"}
# Need at least one subscriber for the parsing to be attempted
queue = asyncio.Queue()
subscribers.add(queue)
try:
with patch("prefect.server.logs.stream.logger.warning") as mock_warning:
async with distributor() as handler:
await handler(mock_message)
# Should log a warning about parsing failure
mock_warning.assert_called_once()
finally:
# Clean up
if queue in subscribers:
subscribers.remove(queue)
@pytest.fixture
async def mock_subscriber_with_filter():
"""Fixture that provides a mock subscriber with a restrictive filter"""
queue = asyncio.Queue()
filter = LogFilter(level=LogFilterLevel(ge_=50)) # ERROR level or higher
subscribers.add(queue)
filters[queue] = filter
yield queue, filter
# Cleanup
if queue in subscribers:
subscribers.remove(queue)
if queue in filters:
del filters[queue]
@pytest.mark.asyncio
async def test_distributor_message_handler_filtered_out(
sample_log1, mock_subscriber_with_filter
):
"""Test distributor filters out logs that don't match"""
queue, filter = mock_subscriber_with_filter
# Create a mock message
mock_message = Mock()
mock_message.data = sample_log1.model_dump_json().encode()
mock_message.attributes = {"log_id": "test"}
async with distributor() as handler:
await handler(mock_message)
# Queue should be empty because log was filtered out
assert queue.empty()
@pytest.fixture
async def mock_full_subscriber():
"""Fixture that provides a mock subscriber with a full queue"""
queue = asyncio.Queue(maxsize=1)
await queue.put("dummy") # Fill the queue
filter = LogFilter()
subscribers.add(queue)
filters[queue] = filter
yield queue, filter
# Cleanup
if queue in subscribers:
subscribers.remove(queue)
if queue in filters:
del filters[queue]
@pytest.mark.asyncio
async def test_distributor_message_handler_queue_full(
sample_log1, mock_full_subscriber
):
"""Test distributor handles full queues gracefully"""
queue, filter = mock_full_subscriber
# Create a mock message
mock_message = Mock()
mock_message.data = sample_log1.model_dump_json().encode()
mock_message.attributes = {"log_id": "test"}
async with distributor() as handler:
# Should not raise an exception even with full queue
await handler(mock_message)
@pytest.mark.asyncio
async def test_start_stop_distributor():
"""Test starting and stopping the distributor"""
from prefect.server.logs import stream
# Ensure clean initial state
await stop_distributor()
try:
# Initially should be None
assert stream._distributor_task is None
# Start distributor
await start_distributor()
assert stream._distributor_task is not None
assert not stream._distributor_task.done()
finally:
# Stop distributor
await stop_distributor()
assert stream._distributor_task is None
@pytest.mark.asyncio
async def test_log_distributor_service_lifecycle():
"""Test LogDistributor service lifecycle"""
# Test that service is disabled by default
with temporary_settings({PREFECT_SERVER_LOGS_STREAM_OUT_ENABLED: False}):
assert not LogDistributor.enabled()
# Test that service can be enabled
with temporary_settings({PREFECT_SERVER_LOGS_STREAM_OUT_ENABLED: True}):
assert LogDistributor.enabled()
def test_log_distributor_service_class_methods():
"""Test LogDistributor service class methods"""
# Test service name
assert LogDistributor.name == "LogDistributor"
# Test environment variable name
assert (
LogDistributor.environment_variable_name()
== "PREFECT_SERVER_LOGS_STREAM_OUT_ENABLED"
)
# Test that service_settings raises NotImplementedError
with pytest.raises(NotImplementedError):
LogDistributor.service_settings()
@pytest.mark.asyncio
async def test_start_distributor_already_started():
"""Test starting distributor when already started"""
from prefect.server.logs import stream
# Ensure clean state
await stop_distributor()
try:
# Start distributor first time
await start_distributor()
# Get the current task
first_task = stream._distributor_task
assert first_task is not None
# Start again - should not create a new task
await start_distributor()
assert stream._distributor_task is first_task # Should be the same task
finally:
# Clean up
await stop_distributor()
@pytest.mark.asyncio
async def test_stop_distributor_not_started():
"""Test stopping distributor when not started"""
# Ensure clean state first
await stop_distributor()
# Should not raise an exception when stopping again
await stop_distributor()
def test_log_matches_filter_complex_flow_run_id_case():
"""Test flow_run_id filtering edge case"""
log = Log(
id=uuid4(),
name="test",
level=20,
message="test",
timestamp=now("UTC"),
flow_run_id=None, # No flow run ID
task_run_id=None,
)
# Filter with flow_run_id requirement - should not match
filter = LogFilter(flow_run_id=LogFilterFlowRunId(any_=[uuid4()]))
assert not log_matches_filter(log, filter)
def test_log_matches_filter_timestamp_after(sample_log1):
"""Test timestamp filtering with after condition"""
# Should not match - log timestamp is before the filter time
after_time = sample_log1.timestamp + datetime.timedelta(seconds=1)
filter = LogFilter(timestamp=LogFilterTimestamp(after_=after_time))
assert not log_matches_filter(sample_log1, filter)
@pytest.mark.asyncio
async def test_logs_consumer_continue_after_timeout():
"""Test that logs consumer continues after timeout"""
filter = LogFilter()
async with logs(filter) as log_stream:
# Create an iterator to test the continue path
iterator = log_stream.__aiter__()
# Mock wait_for to raise TimeoutError then return a log
call_count = 0
async def mock_wait_for(*args, **kwargs):
nonlocal call_count
call_count += 1
if call_count == 1:
raise asyncio.TimeoutError()
else:
# Return a dummy log for the second call
return Log(
id=uuid4(),
name="test",
level=20,
message="test",
timestamp=now("UTC"),
flow_run_id=None,
task_run_id=None,
)
with patch("asyncio.wait_for", side_effect=mock_wait_for):
# First call should return None (timeout)
result1 = await iterator.__anext__()
assert result1 is None
# Second call should return the log
result2 = await iterator.__anext__()
assert result2 is not None
assert result2.message == "test"
def test_log_matches_filter_flow_run_id_none_edge_case():
"""Test flow_run_id filtering when log has None flow_run_id"""
log = Log(
id=uuid4(),
name="test",
level=20,
message="test",
timestamp=now("UTC"),
flow_run_id=None, # This is the key case we're testing
task_run_id=None,
)
# When flow_run_id is None and filter requires specific IDs, should not match
target_id = uuid4()
filter = LogFilter(flow_run_id=LogFilterFlowRunId(any_=[target_id]))
assert not log_matches_filter(log, filter)
@pytest.mark.asyncio
async def test_distributor_message_handler_no_subscribers(sample_log1):
"""Test distributor early exit when no subscribers"""
# Ensure no subscribers exist
subscribers.clear()
# Create a valid message
mock_message = Mock()
mock_message.data = sample_log1.model_dump_json().encode()
mock_message.attributes = {"log_id": "test"}
async with distributor() as handler:
# Should exit early since there are no subscribers
await handler(mock_message)
# No assertion needed - just testing that it doesn't crash
@pytest.mark.asyncio
async def test_log_distributor_service_stop():
"""Test LogDistributor service stop method"""
distributor_service = LogDistributor()
with patch(
"prefect.server.logs.stream.stop_distributor", new_callable=AsyncMock
) as mock_stop:
await distributor_service.stop()
mock_stop.assert_called_once()
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/server/logs/test_stream.py",
"license": "Apache License 2.0",
"lines": 413,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/server/utilities/postgres_listener.py | from __future__ import annotations
import asyncio
import ssl
from typing import TYPE_CHECKING, Any, AsyncGenerator
from urllib.parse import urlsplit
import asyncpg # type: ignore
from pydantic import SecretStr
from sqlalchemy.engine.url import make_url
if TYPE_CHECKING:
from asyncpg import Connection
from prefect.logging import get_logger
from prefect.settings import get_current_settings
_logger = get_logger(__name__)
async def get_pg_notify_connection() -> Connection | None:
"""
Establishes and returns a raw asyncpg connection for LISTEN/NOTIFY.
Returns None if not a PostgreSQL connection URL.
"""
db_url_str = get_current_settings().server.database.connection_url
if isinstance(db_url_str, SecretStr):
db_url_str = db_url_str.get_secret_value()
if not db_url_str:
_logger.debug(
"Cannot create Postgres LISTEN connection: PREFECT_API_DATABASE_CONNECTION_URL is not set."
)
return None
try:
db_url = make_url(db_url_str)
except Exception as e:
_logger.error(f"Invalid PREFECT_API_DATABASE_CONNECTION_URL: {e}")
return None
if db_url.drivername.split("+")[0] not in ("postgresql", "postgres"):
_logger.debug(
"Cannot create Postgres LISTEN connection: PREFECT_API_DATABASE_CONNECTION_URL "
f"is not a PostgreSQL connection URL (driver: {db_url.drivername})."
)
return None
# Construct a DSN for asyncpg by stripping the SQLAlchemy dialect suffix
# (e.g. +asyncpg) via simple string replacement on the scheme portion. This
# preserves the original URL structure exactly, including:
# - multihost connection strings (?host=A:5432&host=B:5432)
# - Kerberos/GSSAPI params (krbsrvname, gsslib)
# - UNIX domain socket paths (triple-slash URLs like postgresql:///db)
# We intentionally avoid SQLAlchemy's render_as_string() here because it
# URL-encodes query param values (e.g. ':' -> '%3A'), which breaks asyncpg's
# parsing of host:port pairs in multihost configurations.
original_scheme = urlsplit(db_url_str).scheme # e.g. "postgresql+asyncpg"
base_scheme = original_scheme.split("+")[0] # e.g. "postgresql"
dsn_string = base_scheme + db_url_str[len(original_scheme) :]
connect_args: dict[str, Any] = {}
# Include server_settings if configured
settings = get_current_settings()
server_settings: dict[str, str] = {}
app_name = settings.server.database.sqlalchemy.connect_args.application_name
if app_name:
server_settings["application_name"] = app_name
search_path = settings.server.database.sqlalchemy.connect_args.search_path
if search_path:
server_settings["search_path"] = search_path
if server_settings:
connect_args["server_settings"] = server_settings
try:
# Include TLS/SSL configuration if enabled, mirroring the main engine setup
# in AsyncPostgresConfiguration.engine(). This is inside the try block so
# that TLS misconfigurations (e.g. invalid cert paths) are caught and result
# in returning None, consistent with this function's fault-tolerant contract.
tls_config = settings.server.database.sqlalchemy.connect_args.tls
if tls_config.enabled:
if tls_config.ca_file:
pg_ctx = ssl.create_default_context(
purpose=ssl.Purpose.SERVER_AUTH, cafile=tls_config.ca_file
)
else:
pg_ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)
pg_ctx.minimum_version = ssl.TLSVersion.TLSv1_2
if tls_config.cert_file and tls_config.key_file:
pg_ctx.load_cert_chain(
certfile=tls_config.cert_file, keyfile=tls_config.key_file
)
pg_ctx.check_hostname = tls_config.check_hostname
pg_ctx.verify_mode = ssl.CERT_REQUIRED
connect_args["ssl"] = pg_ctx
# Pass the full DSN to asyncpg so it can parse all connection parameters
# natively, including authentication-related query params (e.g. krbsrvname
# for Kerberos/GSSAPI) and UNIX domain socket paths.
# This connection is outside SQLAlchemy's pool and needs its own lifecycle
# management.
conn = await asyncpg.connect(dsn_string, **connect_args)
_logger.info(
f"Successfully established raw asyncpg connection for LISTEN/NOTIFY to "
f"{db_url.host or db_url.query.get('host', 'localhost')}/"
f"{db_url.database}"
)
return conn
except Exception as e:
_logger.error(
f"Failed to establish raw asyncpg connection for LISTEN/NOTIFY: {e}",
exc_info=True,
)
return None
async def pg_listen(
connection: Connection, channel_name: str, heartbeat_interval: float = 5.0
) -> AsyncGenerator[str, None]:
"""
Listens to a specific Postgres channel and yields payloads.
Manages adding and removing the listener on the given connection.
"""
listen_queue: asyncio.Queue[str] = asyncio.Queue()
# asyncpg expects a regular function for the callback, not an async one directly.
# This callback will be run in asyncpg's event loop / thread context.
def queue_notifications_callback(
conn_unused: Connection, pid: int, chan: str, payload: str
):
try:
listen_queue.put_nowait(payload)
except asyncio.QueueFull:
_logger.warning(
f"Postgres listener queue full for channel {channel_name}. Notification may be lost."
)
try:
# Add the listener that uses the queue
await connection.add_listener(channel_name, queue_notifications_callback)
_logger.info(f"Listening on Postgres channel: {channel_name}")
while True:
try:
# Wait for a notification with a timeout to allow checking if connection is still alive
payload: str = await asyncio.wait_for(
listen_queue.get(), timeout=heartbeat_interval
)
yield payload
listen_queue.task_done() # Acknowledge processing if using Queue for tracking
except asyncio.TimeoutError:
if connection.is_closed():
_logger.info(
f"Postgres connection closed while listening on {channel_name}."
)
break
continue # Continue listening
except (
Exception
) as e: # Catch broader exceptions during listen_queue.get() or yield
_logger.error(
f"Error during notification processing on {channel_name}: {e}",
exc_info=True,
)
# Depending on the error, you might want to break or continue
if isinstance(
e, (GeneratorExit, asyncio.CancelledError)
): # Graceful shutdown
raise
if isinstance(
e, (asyncpg.exceptions.PostgresConnectionError, OSError)
): # Connection critical
_logger.error(
f"Connection error on {channel_name}. Listener stopping."
)
break
await asyncio.sleep(1) # Prevent tight loop on other continuous errors
except (
asyncpg.exceptions.PostgresConnectionError,
OSError,
) as e: # Errors during setup
_logger.error(
f"Connection error setting up listener for {channel_name}: {e}",
exc_info=True,
)
raise
except (GeneratorExit, asyncio.CancelledError): # Handle task cancellation
_logger.info(f"Listener for {channel_name} cancelled.")
raise
except Exception as e: # Catch-all for unexpected errors during setup
_logger.error(
f"Unexpected error setting up or during listen on {channel_name}: {e}",
exc_info=True,
)
raise
finally:
if not connection.is_closed():
try:
await connection.remove_listener(
channel_name, queue_notifications_callback
)
_logger.info(f"Removed listener from Postgres channel: {channel_name}")
except Exception as e:
_logger.error(
f"Error removing listener for {channel_name}: {e}", exc_info=True
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/server/utilities/postgres_listener.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:tests/events/server/models/test_automation_notifications.py | """Test that automation changes trigger notifications."""
import asyncio
from unittest.mock import patch
import pytest
from sqlalchemy.ext.asyncio import AsyncSession
from prefect.server.events.actions import DoNothing
from prefect.server.events.models.automations import (
create_automation,
delete_automation,
update_automation,
)
from prefect.server.events.schemas.automations import (
Automation,
AutomationUpdate,
EventTrigger,
Posture,
)
from prefect.server.events.triggers import listen_for_automation_changes
from prefect.server.utilities.database import get_dialect
from prefect.settings import PREFECT_API_SERVICES_TRIGGERS_ENABLED, temporary_settings
@pytest.fixture
def sample_automation() -> Automation:
"""Create a sample automation."""
return Automation(
name="Test Automation",
description="Test",
enabled=True,
trigger=EventTrigger(
expect={"test.event"},
posture=Posture.Reactive,
threshold=1,
),
actions=[DoNothing()],
)
async def test_automation_crud_operations_complete_successfully(
automations_session: AsyncSession, sample_automation: Automation
):
"""Test that automation CRUD operations work with NOTIFY enabled
and verify cache updates happen correctly.
"""
from prefect.server.events import triggers
from prefect.server.utilities.database import get_dialect
with temporary_settings({PREFECT_API_SERVICES_TRIGGERS_ENABLED: True}):
# Clear any existing automations for a clean test
triggers.automations_by_id.clear()
triggers.triggers.clear()
# Check if we're using PostgreSQL
dialect_name = get_dialect(automations_session.sync_session).name
is_postgres = dialect_name == "postgresql"
# Create automation
created = await create_automation(automations_session, sample_automation)
await automations_session.commit()
assert created.id is not None
# For PostgreSQL, manually trigger cache update since listener isn't running in tests
if is_postgres:
from prefect.server.events.triggers import automation_changed
await automation_changed(created.id, "automation__created")
else:
# Allow time for SQLite after_commit handler
await asyncio.sleep(0.1)
# Verify automation was loaded into cache
assert created.id in triggers.automations_by_id
assert len(triggers.triggers) > 0 # Should have at least one trigger
# Update automation
update = AutomationUpdate(
name="Updated Name",
description="Updated",
enabled=True,
trigger=created.trigger,
actions=created.actions,
)
result = await update_automation(automations_session, update, created.id)
await automations_session.commit()
assert result is True
# For PostgreSQL, manually trigger cache update
if is_postgres:
await automation_changed(created.id, "automation__updated")
else:
# Allow time for SQLite after_commit handler
await asyncio.sleep(0.1)
# Verify automation was updated in cache
assert created.id in triggers.automations_by_id
cached_automation = triggers.automations_by_id[created.id]
assert cached_automation.name == "Updated Name"
# Delete automation
result = await delete_automation(automations_session, created.id)
await automations_session.commit()
assert result is True
# For PostgreSQL, manually trigger cache update
if is_postgres:
await automation_changed(created.id, "automation__deleted")
else:
# Allow time for SQLite after_commit handler
await asyncio.sleep(0.1)
# Verify automation was removed from cache
assert created.id not in triggers.automations_by_id
async def test_automation_listener_receives_notifications_and_processes_them(
automations_session: AsyncSession, sample_automation: Automation
):
"""Test that the listener receives notifications and processes them correctly.
This test validates:
1. NOTIFY is sent when automations are created/updated/deleted
2. The listener receives these notifications
3. The automation_changed function is called with correct parameters
"""
if get_dialect(automations_session.sync_session).name != "postgresql":
pytest.skip("This test requires PostgreSQL for NOTIFY/LISTEN")
with temporary_settings({PREFECT_API_SERVICES_TRIGGERS_ENABLED: True}):
# Track all calls to automation_changed
automation_changed_calls = []
async def mock_automation_changed(automation_id, event):
automation_changed_calls.append((automation_id, event))
with patch(
"prefect.server.events.triggers.automation_changed", mock_automation_changed
):
# Start the listener
listener_task = asyncio.create_task(listen_for_automation_changes())
try:
await asyncio.sleep(0.1)
# Create automation - should trigger "created" notification
created = await create_automation(
automations_session, sample_automation
)
await automations_session.commit()
await asyncio.sleep(0.1)
# Update automation - should trigger "updated" notification
update = AutomationUpdate(
name="Updated Name",
description="Updated",
enabled=True,
trigger=created.trigger,
actions=created.actions,
)
await update_automation(automations_session, update, created.id)
await automations_session.commit()
await asyncio.sleep(0.1)
# Delete automation - should trigger "deleted" notification
await delete_automation(automations_session, created.id)
await automations_session.commit()
await asyncio.sleep(0.1)
# Verify all three notifications were received and processed
assert len(automation_changed_calls) == 3
# Check each notification
assert automation_changed_calls[0] == (
created.id,
"automation__created",
)
assert automation_changed_calls[1] == (
created.id,
"automation__updated",
)
assert automation_changed_calls[2] == (
created.id,
"automation__deleted",
)
finally:
listener_task.cancel()
try:
await listener_task
except asyncio.CancelledError:
pass
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/events/server/models/test_automation_notifications.py",
"license": "Apache License 2.0",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:tests/server/utilities/test_postgres_listener.py | """Tests for PostgreSQL NOTIFY/LISTEN utilities."""
import ssl
from unittest import mock
from unittest.mock import AsyncMock, MagicMock
import pytest
from prefect.server.utilities.postgres_listener import (
get_pg_notify_connection,
)
from prefect.settings import PREFECT_API_DATABASE_CONNECTION_URL, temporary_settings
class TestGetPgNotifyConnection:
"""Tests for get_pg_notify_connection function."""
async def test_returns_none_for_non_postgres_url(self):
"""Test that non-PostgreSQL URLs return None."""
with temporary_settings(
{PREFECT_API_DATABASE_CONNECTION_URL: "sqlite:///test.db"}
):
conn = await get_pg_notify_connection()
assert conn is None
async def test_attempts_connection_for_postgres_urls(self):
"""Test that PostgreSQL URLs attempt to connect."""
with temporary_settings(
{PREFECT_API_DATABASE_CONNECTION_URL: "postgresql://user:pass@localhost/db"}
):
with mock.patch("asyncpg.connect", new_callable=AsyncMock) as mock_connect:
mock_conn = MagicMock()
mock_connect.return_value = mock_conn
conn = await get_pg_notify_connection()
assert conn == mock_conn
mock_connect.assert_called_once()
async def test_handles_connection_failure(self):
"""Test that connection failures are handled gracefully."""
with temporary_settings(
{PREFECT_API_DATABASE_CONNECTION_URL: "postgresql://localhost/test"}
):
with mock.patch(
"asyncpg.connect", side_effect=Exception("Connection failed")
):
conn = await get_pg_notify_connection()
assert conn is None
async def test_includes_application_name_when_configured(
self, monkeypatch: pytest.MonkeyPatch
):
"""Test that application_name is passed to asyncpg when configured."""
monkeypatch.setenv(
"PREFECT_SERVER_DATABASE_SQLALCHEMY_CONNECT_ARGS_APPLICATION_NAME",
"test-app-name",
)
with temporary_settings(
{PREFECT_API_DATABASE_CONNECTION_URL: "postgresql://user:pass@localhost/db"}
):
with mock.patch("asyncpg.connect", new_callable=AsyncMock) as mock_connect:
mock_conn = MagicMock()
mock_connect.return_value = mock_conn
conn = await get_pg_notify_connection()
assert conn == mock_conn
mock_connect.assert_called_once()
call_kwargs = mock_connect.call_args.kwargs
assert "server_settings" in call_kwargs
assert (
call_kwargs["server_settings"]["application_name"]
== "test-app-name"
)
async def test_excludes_application_name_when_not_configured(self):
"""Test that server_settings is not added when application_name is not configured."""
with temporary_settings(
{PREFECT_API_DATABASE_CONNECTION_URL: "postgresql://user:pass@localhost/db"}
):
with mock.patch("asyncpg.connect", new_callable=AsyncMock) as mock_connect:
mock_conn = MagicMock()
mock_connect.return_value = mock_conn
conn = await get_pg_notify_connection()
assert conn == mock_conn
mock_connect.assert_called_once()
call_kwargs = mock_connect.call_args.kwargs
assert "server_settings" not in call_kwargs
async def test_includes_search_path_when_configured(
self, monkeypatch: pytest.MonkeyPatch
):
"""Test that search_path is passed to asyncpg when configured."""
monkeypatch.setenv(
"PREFECT_SERVER_DATABASE_SQLALCHEMY_CONNECT_ARGS_SEARCH_PATH",
"myschema",
)
with temporary_settings(
{PREFECT_API_DATABASE_CONNECTION_URL: "postgresql://user:pass@localhost/db"}
):
with mock.patch("asyncpg.connect", new_callable=AsyncMock) as mock_connect:
mock_conn = MagicMock()
mock_connect.return_value = mock_conn
conn = await get_pg_notify_connection()
assert conn == mock_conn
mock_connect.assert_called_once()
call_kwargs = mock_connect.call_args.kwargs
assert "server_settings" in call_kwargs
assert call_kwargs["server_settings"]["search_path"] == "myschema"
async def test_includes_both_application_name_and_search_path_when_configured(
self, monkeypatch: pytest.MonkeyPatch
):
"""Test that both application_name and search_path are passed when configured."""
monkeypatch.setenv(
"PREFECT_SERVER_DATABASE_SQLALCHEMY_CONNECT_ARGS_APPLICATION_NAME",
"test-app-name",
)
monkeypatch.setenv(
"PREFECT_SERVER_DATABASE_SQLALCHEMY_CONNECT_ARGS_SEARCH_PATH",
"myschema",
)
with temporary_settings(
{PREFECT_API_DATABASE_CONNECTION_URL: "postgresql://user:pass@localhost/db"}
):
with mock.patch("asyncpg.connect", new_callable=AsyncMock) as mock_connect:
mock_conn = MagicMock()
mock_connect.return_value = mock_conn
conn = await get_pg_notify_connection()
assert conn == mock_conn
mock_connect.assert_called_once()
call_kwargs = mock_connect.call_args.kwargs
assert "server_settings" in call_kwargs
assert (
call_kwargs["server_settings"]["application_name"]
== "test-app-name"
)
assert call_kwargs["server_settings"]["search_path"] == "myschema"
async def test_passes_full_dsn_to_asyncpg(self):
"""Test that the full DSN is passed as the first positional arg to asyncpg."""
with temporary_settings(
{PREFECT_API_DATABASE_CONNECTION_URL: "postgresql://user:pass@localhost/db"}
):
with mock.patch("asyncpg.connect", new_callable=AsyncMock) as mock_connect:
mock_conn = MagicMock()
mock_connect.return_value = mock_conn
conn = await get_pg_notify_connection()
assert conn == mock_conn
mock_connect.assert_called_once()
# The DSN should be passed as the first positional arg
dsn = mock_connect.call_args.args[0]
assert dsn == "postgresql://user:pass@localhost/db"
async def test_strips_asyncpg_dialect_from_dsn(self):
"""Test that +asyncpg dialect is stripped from the DSN."""
with temporary_settings(
{
PREFECT_API_DATABASE_CONNECTION_URL: "postgresql+asyncpg://user:pass@localhost/db"
}
):
with mock.patch("asyncpg.connect", new_callable=AsyncMock) as mock_connect:
mock_conn = MagicMock()
mock_connect.return_value = mock_conn
conn = await get_pg_notify_connection()
assert conn == mock_conn
dsn = mock_connect.call_args.args[0]
assert dsn.startswith("postgresql://")
assert "+asyncpg" not in dsn
async def test_unix_domain_socket_url_preserves_query_params(self):
"""Test that UNIX domain socket URLs with host/port in query params are
preserved in the DSN passed to asyncpg."""
with temporary_settings(
{
PREFECT_API_DATABASE_CONNECTION_URL: "postgresql+asyncpg:///prefect?host=/tmp/.SOSHUB&port=25432"
}
):
with mock.patch("asyncpg.connect", new_callable=AsyncMock) as mock_connect:
mock_conn = MagicMock()
mock_connect.return_value = mock_conn
conn = await get_pg_notify_connection()
assert conn == mock_conn
mock_connect.assert_called_once()
dsn = mock_connect.call_args.args[0]
# The DSN should contain the query params for asyncpg to parse,
# with the original values preserved (no URL-encoding of slashes).
assert "host=/tmp/.SOSHUB" in dsn
assert "port=25432" in dsn
assert dsn.startswith("postgresql:///")
async def test_unix_domain_socket_url_without_port(self):
"""Test that UNIX domain socket URLs without port still work."""
with temporary_settings(
{
PREFECT_API_DATABASE_CONNECTION_URL: "postgresql+asyncpg:///mydb?host=/var/run/postgresql"
}
):
with mock.patch("asyncpg.connect", new_callable=AsyncMock) as mock_connect:
mock_conn = MagicMock()
mock_connect.return_value = mock_conn
conn = await get_pg_notify_connection()
assert conn == mock_conn
mock_connect.assert_called_once()
dsn = mock_connect.call_args.args[0]
# Original path is preserved without URL-encoding
assert "host=/var/run/postgresql" in dsn
assert "port" not in dsn
async def test_standard_tcp_url_still_works(self):
"""Test that standard TCP URLs with host in authority section still work."""
with temporary_settings(
{
PREFECT_API_DATABASE_CONNECTION_URL: "postgresql+asyncpg://user:pass@myhost:5433/mydb"
}
):
with mock.patch("asyncpg.connect", new_callable=AsyncMock) as mock_connect:
mock_conn = MagicMock()
mock_connect.return_value = mock_conn
conn = await get_pg_notify_connection()
assert conn == mock_conn
mock_connect.assert_called_once()
dsn = mock_connect.call_args.args[0]
assert "user:pass@myhost:5433/mydb" in dsn
assert dsn.startswith("postgresql://")
async def test_preserves_kerberos_query_params(self):
"""Test that Kerberos-related query params (e.g. krbsrvname) are preserved
in the DSN passed to asyncpg."""
with temporary_settings(
{
PREFECT_API_DATABASE_CONNECTION_URL: "postgresql+asyncpg://user@myhost/mydb?krbsrvname=postgres"
}
):
with mock.patch("asyncpg.connect", new_callable=AsyncMock) as mock_connect:
mock_conn = MagicMock()
mock_connect.return_value = mock_conn
conn = await get_pg_notify_connection()
assert conn == mock_conn
mock_connect.assert_called_once()
dsn = mock_connect.call_args.args[0]
assert "krbsrvname=postgres" in dsn
assert "+asyncpg" not in dsn
async def test_includes_tls_ssl_when_configured(
self, monkeypatch: pytest.MonkeyPatch
):
"""Test that TLS/SSL context is passed to asyncpg when TLS is enabled."""
monkeypatch.setenv(
"PREFECT_SERVER_DATABASE_SQLALCHEMY_CONNECT_ARGS_TLS_ENABLED",
"true",
)
with temporary_settings(
{PREFECT_API_DATABASE_CONNECTION_URL: "postgresql://user:pass@localhost/db"}
):
with mock.patch("asyncpg.connect", new_callable=AsyncMock) as mock_connect:
mock_conn = MagicMock()
mock_connect.return_value = mock_conn
conn = await get_pg_notify_connection()
assert conn == mock_conn
mock_connect.assert_called_once()
call_kwargs = mock_connect.call_args.kwargs
assert "ssl" in call_kwargs
ssl_ctx = call_kwargs["ssl"]
assert isinstance(ssl_ctx, ssl.SSLContext)
assert ssl_ctx.verify_mode == ssl.CERT_REQUIRED
async def test_excludes_tls_ssl_when_not_configured(self):
"""Test that ssl is not added when TLS is not enabled."""
with temporary_settings(
{PREFECT_API_DATABASE_CONNECTION_URL: "postgresql://user:pass@localhost/db"}
):
with mock.patch("asyncpg.connect", new_callable=AsyncMock) as mock_connect:
mock_conn = MagicMock()
mock_connect.return_value = mock_conn
conn = await get_pg_notify_connection()
assert conn == mock_conn
mock_connect.assert_called_once()
call_kwargs = mock_connect.call_args.kwargs
assert "ssl" not in call_kwargs
async def test_multihost_connection_string_preserved(self):
"""Test that multihost connection strings with multiple host query params
are preserved exactly in the DSN passed to asyncpg, without URL-encoding
the colon in host:port pairs."""
multihost_url = (
"postgresql+asyncpg://user@/dbname"
"?host=HostA:5432&host=HostB:5432&host=HostC:5432"
)
with temporary_settings({PREFECT_API_DATABASE_CONNECTION_URL: multihost_url}):
with mock.patch("asyncpg.connect", new_callable=AsyncMock) as mock_connect:
mock_conn = MagicMock()
mock_connect.return_value = mock_conn
conn = await get_pg_notify_connection()
assert conn == mock_conn
mock_connect.assert_called_once()
dsn = mock_connect.call_args.args[0]
# The dialect should be stripped
assert dsn.startswith("postgresql://")
assert "+asyncpg" not in dsn
# All host:port pairs must be preserved without URL-encoding
# (colons must NOT become %3A)
assert "host=HostA:5432" in dsn
assert "host=HostB:5432" in dsn
assert "host=HostC:5432" in dsn
assert "%3A" not in dsn
async def test_multihost_with_extra_params_preserved(self):
"""Test that multihost connection strings with additional params like
target_session_attrs, gsslib, krbsrvname, and sslmode are preserved."""
multihost_url = (
"postgresql+asyncpg://user@/dbname"
"?host=HostA:5432&host=HostB:5432&host=HostC:5432"
"&gsslib=gssapi&krbsrvname=postgresql&ssl=require"
"&target_session_attrs=primary"
)
with temporary_settings({PREFECT_API_DATABASE_CONNECTION_URL: multihost_url}):
with mock.patch("asyncpg.connect", new_callable=AsyncMock) as mock_connect:
mock_conn = MagicMock()
mock_connect.return_value = mock_conn
conn = await get_pg_notify_connection()
assert conn == mock_conn
mock_connect.assert_called_once()
dsn = mock_connect.call_args.args[0]
assert dsn.startswith("postgresql://")
# All multihost params preserved
assert "host=HostA:5432" in dsn
assert "host=HostB:5432" in dsn
assert "host=HostC:5432" in dsn
# Additional connection params preserved
assert "gsslib=gssapi" in dsn
assert "krbsrvname=postgresql" in dsn
assert "ssl=require" in dsn
assert "target_session_attrs=primary" in dsn
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/server/utilities/test_postgres_listener.py",
"license": "Apache License 2.0",
"lines": 309,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/assets/core.py | from __future__ import annotations
from typing import Any, ClassVar, Optional
from pydantic import ConfigDict, Field
from prefect._internal.schemas.bases import PrefectBaseModel
from prefect.types import ValidAssetKey
MAX_ASSET_DESCRIPTION_LENGTH = 2500
class AssetProperties(PrefectBaseModel):
"""
Metadata properties to configure on an Asset
"""
model_config: ClassVar[ConfigDict] = ConfigDict(frozen=True)
name: Optional[str] = Field(
default=None, description="Human readable name of the Asset."
)
url: Optional[str] = Field(
default=None, description="Visitable url to view the Asset."
)
description: Optional[str] = Field(
default=None,
description="Description of the Asset.",
max_length=MAX_ASSET_DESCRIPTION_LENGTH,
)
owners: Optional[list[str]] = Field(
default=None, description="Owners of the Asset."
)
class Asset(PrefectBaseModel):
"""
Assets are objects that represent materialized data,
providing a way to track lineage and dependencies.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(frozen=True)
key: ValidAssetKey
properties: Optional[AssetProperties] = Field(
default=None,
description="Properties of the asset. "
"Setting this will overwrite properties of a known asset.",
)
def __repr__(self) -> str:
return f"Asset(key={self.key!r})"
def __hash__(self) -> int:
return hash(self.key)
def add_metadata(self, metadata: dict[str, Any]) -> None:
from prefect.context import AssetContext
asset_ctx = AssetContext.get()
if not asset_ctx:
raise RuntimeError(
"Unable add Asset metadata when not inside of an AssetContext"
)
asset_ctx.add_asset_metadata(self.key, metadata)
def add_asset_metadata(asset: str | Asset, metadata: dict[str, Any]) -> None:
from prefect.context import AssetContext
asset_ctx = AssetContext.get()
if not asset_ctx:
raise RuntimeError(
"Unable to call `add_asset_metadata` when not inside of an AssetContext"
)
asset_key = asset if isinstance(asset, str) else asset.key
asset_ctx.add_asset_metadata(asset_key, metadata)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/assets/core.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:src/prefect/assets/materialize.py | from __future__ import annotations
from typing import TYPE_CHECKING, Callable, TypeVar, Union
from typing_extensions import ParamSpec, Unpack
from .core import Asset
T = TypeVar("T")
P = ParamSpec("P")
R = TypeVar("R")
if TYPE_CHECKING:
from prefect.tasks import MaterializingTask, TaskOptions
def materialize(
*assets: Union[str, Asset],
by: str | None = None,
**task_kwargs: Unpack[TaskOptions],
) -> Callable[[Callable[P, R]], MaterializingTask[P, R]]:
"""
Decorator for materializing assets.
Args:
*assets: Assets to materialize
by: An optional tool that is ultimately responsible for materializing the asset e.g. "dbt" or "spark"
**task_kwargs: Additional task configuration
"""
if not assets:
raise TypeError(
"materialize requires at least one asset argument, e.g. `@materialize(asset)`"
)
from prefect.tasks import MaterializingTask
def decorator(fn: Callable[P, R]) -> MaterializingTask[P, R]:
return MaterializingTask(
fn=fn, assets=assets, materialized_by=by, **task_kwargs
)
return decorator
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/assets/materialize.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:tests/test_assets.py | import pytest
from prefect.assets import Asset, AssetProperties, materialize
from prefect.assets.core import MAX_ASSET_DESCRIPTION_LENGTH
from prefect.context import AssetContext
from prefect.events.worker import EventsWorker
from prefect.flows import flow
from prefect.tasks import task
from prefect.types.names import MAX_ASSET_KEY_LENGTH
def _asset_events(worker: EventsWorker):
return [e for e in worker._client.events if e.event.startswith("prefect.asset.")]
def _first_event(worker: EventsWorker):
events = _asset_events(worker)
assert events, "No asset events were captured by the worker"
return events[0]
def _materialization_events(events):
"""Filter events to only include asset materialization events."""
return [e for e in events if e.event.startswith("prefect.asset.materialization")]
def _reference_events(events):
"""Filter events to only include asset reference events."""
return [e for e in events if e.event.startswith("prefect.asset.referenced")]
def _event_with_resource_id(events, resource_id: str):
for e in events:
if e.resource.id == resource_id:
return e
else:
raise ValueError(f"No events with resource_id: {resource_id}")
def _has_upstream_asset(event, upstream_asset_key: str) -> bool:
return any(
r["prefect.resource.id"] == upstream_asset_key
and r["prefect.resource.role"] == "asset"
for r in event.related
)
def _has_related_of_role(event, role):
return any(r["prefect.resource.role"] == role for r in event.related)
# =============================================================================
# Basic Asset Validation and Utilities
# =============================================================================
@pytest.mark.parametrize(
"invalid_key",
[
"invalid-key",
"assets/my-asset",
"/path/to/file",
"no-protocol-prefix",
"UPPERCASE://resource",
"://missing-protocol",
],
)
def test_asset_invalid_uri(invalid_key):
with pytest.raises(ValueError, match="Key must be a valid URI"):
Asset(key=invalid_key)
@pytest.mark.parametrize(
"invalid_key",
[
"s3://bucket/file with space.csv",
"s3://bucket/file\nwith\nnewlines.csv",
"s3://bucket/file\twith\ttabs.csv",
"s3://bucket/file#fragment.csv",
"s3://bucket/file?query=param.csv",
"s3://bucket/file¶m=value.csv",
"s3://bucket/file%encoded.csv",
's3://bucket/file"quoted".csv',
"s3://bucket/file'quoted'.csv",
"s3://bucket/file<bracket>.csv",
"s3://bucket/file[bracket].csv",
"s3://bucket/file{brace}.csv",
"s3://bucket/file|pipe.csv",
"s3://bucket/file\\backslash.csv",
"s3://bucket/file^caret.csv",
"s3://bucket/file`backtick`.csv",
"s3://bucket/file\r\ncarriage.csv",
"s3://bucket/file\0null.csv",
],
)
def test_asset_restricted_characters(invalid_key):
with pytest.raises(ValueError):
Asset(key=invalid_key)
def test_asset_max_length():
valid_key = "s3://bucket/" + "a" * (MAX_ASSET_KEY_LENGTH - len("s3://bucket/"))
asset = Asset(key=valid_key)
assert asset.key == valid_key
invalid_key = "s3://bucket/" + "a" * (
MAX_ASSET_KEY_LENGTH + 1 - len("s3://bucket/")
)
with pytest.raises(
ValueError, match=f"Asset key cannot exceed {MAX_ASSET_KEY_LENGTH} characters"
):
Asset(key=invalid_key)
def test_asset_length_edge_cases():
# Test a few characters under the limit
under_limit_key = "s3://bucket/" + "x" * (
MAX_ASSET_KEY_LENGTH - 2 - len("s3://bucket/")
)
asset = Asset(key=under_limit_key)
assert asset.key == under_limit_key
# Test way over the limit
way_over_key = "s3://bucket/" + "z" * 1000
with pytest.raises(
ValueError, match=f"Asset key cannot exceed {MAX_ASSET_KEY_LENGTH} characters"
):
Asset(key=way_over_key)
# Test minimum viable URI
min_key = "s3://a"
asset = Asset(key=min_key)
assert asset.key == min_key
def test_asset_valid_characters():
"""Test that common valid characters work fine."""
valid_keys = [
"s3://bucket/folder/file.csv",
"postgres://database/table",
"file://local/path.txt",
"custom://resource-with_underscores.data",
"protocol://host:port/path",
"scheme://user@host/resource",
"s3://bucket/folder/file-name_123.parquet",
]
for key in valid_keys:
asset = Asset(key=key)
assert asset.key == key
def test_asset_as_resource():
asset = Asset(key="s3://bucket/data")
resource = AssetContext.asset_as_resource(asset)
assert resource["prefect.resource.id"] == "s3://bucket/data"
def test_asset_as_related():
asset = Asset(key="postgres://prod/users")
related = AssetContext.asset_as_related(asset)
assert related["prefect.resource.id"] == "postgres://prod/users"
assert related["prefect.resource.role"] == "asset"
def test_asset_as_resource_with_no_properties():
asset = Asset(key="s3://bucket/data")
resource = AssetContext.asset_as_resource(asset)
assert resource == {"prefect.resource.id": "s3://bucket/data"}
assert "prefect.resource.name" not in resource
assert "prefect.asset.description" not in resource
assert "prefect.asset.url" not in resource
assert "prefect.asset.owners" not in resource
def test_asset_as_resource_with_partial_properties():
asset = Asset(
key="postgres://prod/users",
properties=AssetProperties(name="Users Table", description="Main users table"),
)
resource = AssetContext.asset_as_resource(asset)
expected = {
"prefect.resource.id": "postgres://prod/users",
"prefect.resource.name": "Users Table",
"prefect.asset.description": "Main users table",
}
assert resource == expected
assert "prefect.asset.url" not in resource
assert "prefect.asset.owners" not in resource
def test_asset_as_resource_with_all_properties():
asset = Asset(
key="s3://data-lake/enriched/customers.parquet",
properties=AssetProperties(
name="Customer Data",
description="Enriched customer dataset",
url="https://dashboard.company.com/datasets/customers",
owners=["data-team", "analytics"],
),
)
resource = AssetContext.asset_as_resource(asset)
expected = {
"prefect.resource.id": "s3://data-lake/enriched/customers.parquet",
"prefect.resource.name": "Customer Data",
"prefect.asset.description": "Enriched customer dataset",
"prefect.asset.url": "https://dashboard.company.com/datasets/customers",
"prefect.asset.owners": '["data-team", "analytics"]',
}
assert resource == expected
def test_asset_as_resource_excludes_unset_properties():
"""Test that asset_as_resource excludes properties that were not explicitly set."""
asset = Asset(
key="postgres://prod/transactions",
properties=AssetProperties(
name="Transactions",
# description is not set (will be None)
# url is not set (will be None)
owners=["finance-team"],
),
)
resource = AssetContext.asset_as_resource(asset)
# Should only include the fields that were explicitly set
expected = {
"prefect.resource.id": "postgres://prod/transactions",
"prefect.resource.name": "Transactions",
"prefect.asset.owners": '["finance-team"]',
}
assert resource == expected
# Ensure unset fields are not included
assert "prefect.asset.description" not in resource
assert "prefect.asset.url" not in resource
def test_asset_as_resource_excludes_explicit_none_properties():
asset = Asset(
key="postgres://prod/users",
properties=AssetProperties(
name="Users",
description=None,
url=None,
owners=None,
),
)
resource = AssetContext.asset_as_resource(asset)
assert resource == {
"prefect.resource.id": "postgres://prod/users",
"prefect.resource.name": "Users",
}
def test_asset_description_max_length():
# Test with description exactly at the limit
exact_limit_description = "X" * MAX_ASSET_DESCRIPTION_LENGTH
properties_exact = AssetProperties(description=exact_limit_description)
assert len(properties_exact.description) == MAX_ASSET_DESCRIPTION_LENGTH
assert properties_exact.description == exact_limit_description
# Test with description under the limit
short_description = "Short description"
properties_short = AssetProperties(description=short_description)
assert properties_short.description == short_description
# Test with None description
properties_none = AssetProperties(description=None)
assert properties_none.description is None
# Test that description longer than 5000 characters raises ValidationError
long_description = "A" * (MAX_ASSET_DESCRIPTION_LENGTH + 1)
with pytest.raises(
ValueError,
match=f"String should have at most {MAX_ASSET_DESCRIPTION_LENGTH} characters",
):
AssetProperties(description=long_description)
# =============================================================================
# Single Asset Operations
# =============================================================================
@pytest.mark.usefixtures("reset_worker_events")
def test_single_asset_materialization_success(asserting_events_worker: EventsWorker):
"""Test single asset materialization success.
Expected graph: [M: postgres://prod/users]
"""
users = Asset(key="postgres://prod/users")
@materialize(users)
def make_users():
return {"rows": 100}
@flow
def pipeline():
make_users()
pipeline()
asserting_events_worker.drain()
evt = _first_event(asserting_events_worker)
assert evt.event == "prefect.asset.materialization.succeeded"
assert evt.resource.id == users.key
assert any(r.id.startswith("prefect.flow-run.") for r in evt.related)
@pytest.mark.usefixtures("reset_worker_events")
def test_single_asset_materialization_failure(asserting_events_worker: EventsWorker):
"""Test single asset materialization failure.
Expected graph: [M: s3://data/broken] (failed)
"""
asset = Asset(key="s3://data/broken")
@materialize(asset)
def always_broken():
raise RuntimeError("boom")
@flow
def pipeline():
try:
always_broken()
except RuntimeError:
pass
pipeline()
asserting_events_worker.drain()
evt = _first_event(asserting_events_worker)
assert evt.event == "prefect.asset.materialization.failed"
assert evt.resource.id == asset.key
@pytest.mark.usefixtures("reset_worker_events")
def test_single_asset_reference(asserting_events_worker: EventsWorker):
"""Test single asset reference.
Expected graph: [], without a materialization no reference is emitted
"""
@task(asset_deps=["s3://bucket/raw_data.csv"])
def read_data():
return {"rows": 100}
@flow
def pipeline():
read_data()
pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
assert not events
@pytest.mark.usefixtures("reset_worker_events")
def test_multiple_asset_materializations(asserting_events_worker: EventsWorker):
"""Test multiple assets materialized by single function.
Expected graph: [M: postgres://prod/users_raw], [M: postgres://prod/orders_raw]
"""
user_asset = Asset(key="postgres://prod/users_raw")
orders_asset = Asset(key="postgres://prod/orders_raw")
@materialize(user_asset, orders_asset)
def ingest():
return ({"rows": 1}, {"rows": 1})
@flow
def pipeline():
ingest()
pipeline()
asserting_events_worker.drain()
ids = {e.resource.id for e in _asset_events(asserting_events_worker)}
assert ids == {user_asset.key, orders_asset.key}
# =============================================================================
# String Key Conversion
# =============================================================================
@pytest.mark.usefixtures("reset_worker_events")
def test_mixed_asset_objects_and_string_keys(asserting_events_worker: EventsWorker):
"""Test that mixed Asset objects and string keys work together.
This comprehensively tests string key conversion in both @materialize and @task(asset_deps).
Expected graph:
[R: postgres://db/users] --> [M: s3://bucket/final.parquet]
[R: s3://bucket/raw.csv] --> [M: s3://bucket/summary.json]
"""
# Mix Asset object and string in asset_deps
asset_obj = Asset(key="postgres://db/users")
@task(asset_deps=[asset_obj, "s3://bucket/raw.csv"])
def read_mixed_deps():
return {"data": "mixed"}
# Mix Asset object and string in materialize
output_asset = Asset(key="s3://bucket/final.parquet")
@materialize(output_asset, "s3://bucket/summary.json")
def write_mixed_outputs(data):
return ({"final": True}, {"summary": True})
@flow
def pipeline():
data = read_mixed_deps()
write_mixed_outputs(data)
pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
assert len(events) == 4 # 2 references + 2 materializations
ref_events = _reference_events(events)
mat_events = _materialization_events(events)
assert len(ref_events) == 2
assert len(mat_events) == 2
# Check reference events include both Asset object and string key
ref_keys = {evt.resource.id for evt in ref_events}
assert ref_keys == {"postgres://db/users", "s3://bucket/raw.csv"}
# Check materialization events include both Asset object and string key
mat_keys = {evt.resource.id for evt in mat_events}
assert mat_keys == {"s3://bucket/final.parquet", "s3://bucket/summary.json"}
# Check that materialization events have the references as related assets
for mat_evt in mat_events:
related_asset_ids = {r.id for r in mat_evt.related if r.role == "asset"}
assert "postgres://db/users" in related_asset_ids
assert "s3://bucket/raw.csv" in related_asset_ids
# =============================================================================
# Linear Dependencies
# =============================================================================
@pytest.mark.usefixtures("reset_worker_events")
def test_materialization_to_materialization_dependency(
asserting_events_worker: EventsWorker,
):
"""Test linear asset dependency between two materializations.
Expected graph: [M: postgres://prod/users] --> [M: postgres://prod/users_clean]
"""
upstream = Asset(key="postgres://prod/users")
downstream = Asset(key="postgres://prod/users_clean")
@materialize(upstream)
def extract():
return {"rows": 10}
@materialize(downstream)
def load(data):
return {"rows": 10}
@flow
def pipeline():
df = extract()
load(df)
pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
assert len(events) == 3
ref_events = _reference_events(events)
mat_events = _materialization_events(events)
assert len(ref_events) == 1
assert len(mat_events) == 2
assert _event_with_resource_id(ref_events, upstream.key)
assert {mat.resource.id for mat in mat_events} == {upstream.key, downstream.key}
downstream_mat = _event_with_resource_id(mat_events, downstream.key)
assert _has_upstream_asset(downstream_mat, upstream.key)
@pytest.mark.usefixtures("reset_worker_events")
def test_reference_to_materialization_dependency(
asserting_events_worker: EventsWorker,
):
"""Test linear dependency from reference to materialization.
Expected graph: [R: postgres://prod/users] --> [M: postgres://prod/users_clean]
"""
upstream = Asset(key="postgres://prod/users")
downstream = Asset(key="postgres://prod/users_clean")
@task(asset_deps=[upstream])
def read():
return {"rows": 1}
@materialize(downstream)
def load(data):
return {"rows": 1}
@flow
def pipeline():
data = read()
load(data)
pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
mat_events = _materialization_events(events)
ref_events = _reference_events(events)
assert len(mat_events) == 1
assert len(ref_events) == 1
mat_evt = _event_with_resource_id(mat_events, downstream.key)
assert _has_upstream_asset(mat_evt, upstream.key)
@pytest.mark.usefixtures("reset_worker_events")
def test_linear_dependency_with_intermediate_task(
asserting_events_worker: EventsWorker,
):
"""Test linear dependency with intermediate non-asset task.
Expected graph: [M: s3://data/raw_data] --> [M: s3://data/processed_data]
"""
upstream = Asset(key="s3://data/raw_data")
downstream = Asset(key="s3://data/processed_data")
@materialize(upstream)
def extract():
return {"rows": 100}
@task
def transform(data):
return {"rows": data["rows"], "processed": True}
@materialize(downstream)
def load(transformed_data):
return {"rows": transformed_data["rows"]}
@flow
def pipeline():
raw_data = extract()
transformed_data = transform(raw_data)
load(transformed_data)
pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
assert len(events) == 3
ref_events = _reference_events(events)
mat_events = _materialization_events(events)
assert _event_with_resource_id(ref_events, upstream.key)
assert {mat.resource.id for mat in mat_events} == {upstream.key, downstream.key}
downstream_mat = _event_with_resource_id(mat_events, downstream.key)
assert _has_upstream_asset(downstream_mat, upstream.key)
@pytest.mark.usefixtures("reset_worker_events")
def test_materialize_with_explicit_asset_deps(asserting_events_worker: EventsWorker):
"""Test @materialize with explicit asset_deps parameter.
Expected graph: [R: s3://bucket/raw_data.csv] --> [M: s3://bucket/data.csv]
"""
@materialize("s3://bucket/data.csv", asset_deps=["s3://bucket/raw_data.csv"])
def write_data():
return {"rows": 100}
@flow
def pipeline():
write_data()
pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
assert len(events) == 2
# Find reference and materialization events
ref_events = _reference_events(events)
mat_events = _materialization_events(events)
assert len(ref_events) == 1
assert len(mat_events) == 1
# Check reference
assert ref_events[0].resource.id == "s3://bucket/raw_data.csv"
# Check materialization
mat_evt = _event_with_resource_id(mat_events, "s3://bucket/data.csv")
assert _has_upstream_asset(mat_evt, "s3://bucket/raw_data.csv")
@pytest.mark.usefixtures("reset_worker_events")
def test_three_stage_linear_pipeline(asserting_events_worker: EventsWorker):
"""Test three-stage linear pipeline with direct dependencies only.
Expected graph: [M: s3://lake/bronze/users] --> [M: s3://lake/silver/users] --> [M: s3://lake/gold/users]
"""
bronze = Asset(key="s3://lake/bronze/users")
silver = Asset(key="s3://lake/silver/users")
gold = Asset(key="s3://lake/gold/users")
@materialize(bronze)
def stage_bronze():
return {"rows": 100}
@materialize(silver)
def stage_silver(df):
return {"rows": df["rows"]}
@materialize(gold)
def stage_gold(df):
return {"rows": df["rows"]}
@flow
def pipeline():
bronze_df = stage_bronze()
silver_df = stage_silver(bronze_df)
stage_gold(silver_df)
pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
assert len(events) == 5 # 3 materializations + 2 reference events
# Get materialization and reference events using helper functions
mat_events = _materialization_events(events)
ref_events = _reference_events(events)
assert len(mat_events) == 3
assert len(ref_events) == 2
# Get specific materialization events using helper function
evt_bronze = _event_with_resource_id(mat_events, bronze.key)
evt_silver = _event_with_resource_id(mat_events, silver.key)
evt_gold = _event_with_resource_id(mat_events, gold.key)
# Bronze has no upstream dependencies
assert not _has_related_of_role(evt_bronze, "asset")
# Silver has bronze as upstream dependency
assert _has_upstream_asset(evt_silver, bronze.key)
# Gold has silver as upstream dependency
assert _has_upstream_asset(evt_gold, silver.key)
# Check that reference events are emitted for upstream assets
ref_asset_ids = {e.resource.id for e in ref_events}
assert ref_asset_ids == {bronze.key, silver.key}
for e in (evt_bronze, evt_silver, evt_gold):
assert any(r.id.startswith("prefect.flow-run.") for r in e.related)
# =============================================================================
# Complex Dependency Patterns
# =============================================================================
@pytest.mark.usefixtures("reset_worker_events")
def test_fan_in_dependency(asserting_events_worker: EventsWorker):
"""Test fan-in dependency pattern.
Expected graph:
[M: postgres://prod/users] \
--> [M: postgres://prod/user_orders_enriched]
[M: postgres://prod/orders] /
"""
raw_users = Asset(key="postgres://prod/users")
raw_orders = Asset(key="postgres://prod/orders")
user_orders = Asset(key="postgres://prod/user_orders_enriched")
@materialize(raw_users)
def extract_users():
return {"rows": 10}
@materialize(raw_orders)
def extract_orders():
return {"rows": 20}
@task
def enrich(users_df, orders_df):
return {"rows": 15}
@materialize(user_orders)
def load_user_orders(enriched_df):
return {"rows": 15}
@flow
def user_orders_pipeline():
users_df = extract_users()
orders_df = extract_orders()
enriched = enrich(users_df, orders_df)
load_user_orders(enriched)
user_orders_pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
assert len(events) == 5 # 3 materializations + 2 reference events
mat_events = _materialization_events(events)
ref_events = _reference_events(events)
assert len(mat_events) == 3
assert len(ref_events) == 2
# Check reference events are emitted for upstream assets
ref_asset_ids = {e.resource.id for e in ref_events}
assert ref_asset_ids == {raw_users.key, raw_orders.key}
# Check the downstream materialization event
downstream_evt = _event_with_resource_id(mat_events, user_orders.key)
assert _has_upstream_asset(downstream_evt, raw_users.key)
assert _has_upstream_asset(downstream_evt, raw_orders.key)
assert any(r.id.startswith("prefect.flow-run.") for r in downstream_evt.related)
@pytest.mark.usefixtures("reset_worker_events")
def test_fan_out_dependency(asserting_events_worker: EventsWorker):
"""Test fan-out dependency pattern.
Expected graph:
--> [M: s3://data/events_daily]
[M: s3://data/events_raw]
--> [M: s3://data/events_hourly]
IMPORTANT: Sibling assets (events_daily and events_hourly) should NOT
have each other in their related arrays - they only share the same
upstream parent, they are not dependencies of each other.
"""
events_raw = Asset(key="s3://data/events_raw")
events_daily = Asset(key="s3://data/events_daily")
events_hourly = Asset(key="s3://data/events_hourly")
@materialize(events_raw)
def ingest_events():
return {"rows": 100}
@task
def aggregate_daily(df):
return {"daily_rows": 30}
@task
def aggregate_hourly(df):
return {"hourly_rows": 24}
@materialize(events_daily)
def load_daily(df):
return {"rows": 30}
@materialize(events_hourly)
def load_hourly(df):
return {"rows": 24}
@flow
def events_pipeline():
raw = ingest_events()
daily_df = aggregate_daily(raw)
hourly_df = aggregate_hourly(raw)
load_daily(daily_df)
load_hourly(hourly_df)
events_pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
assert len(events) == 5 # 3 materializations + 2 reference events
mat_events = _materialization_events(events)
ref_events = _reference_events(events)
assert len(mat_events) == 3
assert len(ref_events) == 2
# Check reference events are emitted for upstream assets (2 events for same asset)
ref_asset_ids = {e.resource.id for e in ref_events}
assert ref_asset_ids == {events_raw.key}
assert len(ref_events) == 2 # Two reference events for the same upstream asset
# Check the downstream materialization events
daily_evt = _event_with_resource_id(mat_events, events_daily.key)
hourly_evt = _event_with_resource_id(mat_events, events_hourly.key)
assert _has_upstream_asset(daily_evt, events_raw.key)
assert _has_upstream_asset(hourly_evt, events_raw.key)
# Verify sibling assets do NOT reference each other.
# In a fan-out pattern, the daily and hourly assets both depend on raw,
# but they should NOT have each other as dependencies.
assert not _has_upstream_asset(daily_evt, events_hourly.key), (
"daily should NOT have hourly as upstream"
)
assert not _has_upstream_asset(hourly_evt, events_daily.key), (
"hourly should NOT have daily as upstream"
)
# Also check for flow-run context
assert any(r.id.startswith("prefect.flow-run.") for r in daily_evt.related)
assert any(r.id.startswith("prefect.flow-run.") for r in hourly_evt.related)
@pytest.mark.usefixtures("reset_worker_events")
def test_fan_in_to_fan_out_dependency(asserting_events_worker: EventsWorker):
"""Test fan-in to fan-out dependency pattern.
Expected graph:
[M: postgres://prod/users_raw] ---> [M: postgres://prod/orders_per_user]
|
[M: postgres://prod/orders_raw] ---> [M: postgres://prod/orders_summary]
"""
users_raw = Asset(key="postgres://prod/users_raw")
orders_raw = Asset(key="postgres://prod/orders_raw")
per_user = Asset(key="postgres://prod/orders_per_user")
summary = Asset(key="postgres://prod/orders_summary")
@materialize(users_raw, orders_raw)
def ingest():
return ({"users": 50}, {"orders": 200})
@materialize(per_user, summary)
def build(u_df, o_df):
return ({"per_user_rows": 50}, {"summary_rows": 10})
@flow
def pipeline():
u_df, o_df = ingest()
build(u_df, o_df)
pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
assert len(events) == 6 # 4 materializations + 2 reference events
mat_events = _materialization_events(events)
ref_events = _reference_events(events)
assert len(mat_events) == 4
assert len(ref_events) == 2
# Check reference events are emitted for upstream assets
ref_asset_ids = {e.resource.id for e in ref_events}
assert ref_asset_ids == {users_raw.key, orders_raw.key}
# Check each materialization event
users_evt = _event_with_resource_id(mat_events, users_raw.key)
orders_evt = _event_with_resource_id(mat_events, orders_raw.key)
per_user_evt = _event_with_resource_id(mat_events, per_user.key)
summary_evt = _event_with_resource_id(mat_events, summary.key)
# Raw assets have no upstream dependencies
assert not _has_related_of_role(users_evt, "asset")
assert not _has_related_of_role(orders_evt, "asset")
# Downstream assets have both raw assets as upstream dependencies
assert _has_upstream_asset(per_user_evt, users_raw.key)
assert _has_upstream_asset(per_user_evt, orders_raw.key)
assert _has_upstream_asset(summary_evt, users_raw.key)
assert _has_upstream_asset(summary_evt, orders_raw.key)
# Also check for flow-run context
for evt in [users_evt, orders_evt, per_user_evt, summary_evt]:
assert any(r.id.startswith("prefect.flow-run.") for r in evt.related)
@pytest.mark.usefixtures("reset_worker_events")
def test_forward_propagation_asset_lineage(asserting_events_worker: EventsWorker):
"""Test that asset lineage flows forward through task graph without backward traversal.
Expected graph:
[R: s3://bucket/raw.csv] \
--> [M: s3://bucket/final.csv]
[R: postgres://prod/users] /
"""
@task(asset_deps=["s3://bucket/raw.csv"])
def extract():
return {"data": "raw"}
@task(asset_deps=["postgres://prod/users"])
def transform(data):
return {"data": "transformed"}
@materialize("s3://bucket/final.csv")
def load(data):
return {"data": "final"}
@flow
def etl_pipeline():
raw = extract()
transformed = transform(raw)
load(transformed)
etl_pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
assert len(events) == 3
# Find all event types
ref_events = _reference_events(events)
mat_events = _materialization_events(events)
assert len(ref_events) == 2 # Two reference events
assert len(mat_events) == 1 # One materialization event
# Check references
refs_resources = {e.resource.id for e in ref_events}
assert "s3://bucket/raw.csv" in refs_resources
assert "postgres://prod/users" in refs_resources
# Check materialization - should include both upstream assets as related
mat_event = mat_events[0]
assert mat_event.resource.id == "s3://bucket/final.csv"
# The materialization should have both upstream assets as related
related_asset_ids = {r.id for r in mat_event.related if r.role == "asset"}
assert "s3://bucket/raw.csv" in related_asset_ids
assert "postgres://prod/users" in related_asset_ids
@pytest.mark.usefixtures("reset_worker_events")
def test_complex_snowflake_aggregation(asserting_events_worker: EventsWorker):
"""Test complex Snowflake aggregation pattern with multiple references and materializations.
Expected graph:
[R: .../table-1-raw] --> [M: .../table-1-cleaned] \
[R: .../table-2-raw] --> [M: .../table-2-cleaned] --> [M: .../aggregated-table]
[R: .../table-3-raw] --> [M: .../table-3-cleaned] /
"""
SNOWFLAKE_SCHEMA = "snowflake://my-database/my-schema"
@task(asset_deps=[Asset(key=f"{SNOWFLAKE_SCHEMA}/table-1-raw")])
def table_1_raw():
return "fake data 1"
@task(asset_deps=[Asset(key=f"{SNOWFLAKE_SCHEMA}/table-2-raw")])
def table_2_raw():
return "fake data 2"
@task(asset_deps=[Asset(key=f"{SNOWFLAKE_SCHEMA}/table-3-raw")])
def table_3_raw():
return "fake data 3"
table_1_cleaned_asset = Asset(key=f"{SNOWFLAKE_SCHEMA}/table-1-cleaned")
table_2_cleaned_asset = Asset(key=f"{SNOWFLAKE_SCHEMA}/table-2-cleaned")
table_3_cleaned_asset = Asset(key=f"{SNOWFLAKE_SCHEMA}/table-3-cleaned")
@materialize(table_1_cleaned_asset)
def table_1_cleaned(raw_table_1):
return f"cleaned {raw_table_1}"
@materialize(table_2_cleaned_asset)
def table_2_cleaned(raw_table_2):
return f"cleaned {raw_table_2}"
@materialize(table_3_cleaned_asset)
def table_3_cleaned(raw_table_3):
return f"cleaned {raw_table_3}"
aggregated_asset = Asset(key=f"{SNOWFLAKE_SCHEMA}/aggregated-table")
@materialize(aggregated_asset)
def aggregated_table(cleaned_table_1, cleaned_table_2, cleaned_table_3):
return None
@flow
def my_flow():
r1 = table_1_raw()
r2 = table_2_raw()
r3 = table_3_raw()
c1 = table_1_cleaned(r1)
c2 = table_2_cleaned(r2)
c3 = table_3_cleaned(r3)
aggregated_table(c1, c2, c3)
my_flow()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
assert len(events) == 10 # 4 materializations + 6 reference events
mat_events = _materialization_events(events)
ref_events = _reference_events(events)
assert len(mat_events) == 4
assert len(ref_events) == 6
by_id = {e.resource.id: e for e in events}
# Check reference events for raw assets (direct asset dependencies)
for raw_key in (
f"{SNOWFLAKE_SCHEMA}/table-1-raw",
f"{SNOWFLAKE_SCHEMA}/table-2-raw",
f"{SNOWFLAKE_SCHEMA}/table-3-raw",
):
evt = by_id[raw_key]
assert evt.event == "prefect.asset.referenced"
assert not _has_related_of_role(evt, "asset")
# Check materialization events for cleaned assets
for cleaned_key, raw_key in [
(table_1_cleaned_asset.key, f"{SNOWFLAKE_SCHEMA}/table-1-raw"),
(table_2_cleaned_asset.key, f"{SNOWFLAKE_SCHEMA}/table-2-raw"),
(table_3_cleaned_asset.key, f"{SNOWFLAKE_SCHEMA}/table-3-raw"),
]:
evt = _event_with_resource_id(mat_events, cleaned_key)
assert _has_upstream_asset(evt, raw_key)
# Check aggregated materialization event
agg_evt = _event_with_resource_id(mat_events, aggregated_asset.key)
assert _has_upstream_asset(agg_evt, table_1_cleaned_asset.key)
assert _has_upstream_asset(agg_evt, table_2_cleaned_asset.key)
assert _has_upstream_asset(agg_evt, table_3_cleaned_asset.key)
for e in events:
assert any(r.id.startswith("prefect.flow-run.") for r in e.related)
# =============================================================================
# Advanced Execution Patterns
# =============================================================================
@pytest.mark.usefixtures("reset_worker_events")
async def test_async_materialization(asserting_events_worker: EventsWorker):
"""Test async asset materialization.
Expected graph: [M: postgres://prod/async]
"""
asset = Asset(key="postgres://prod/async")
@materialize(asset)
async def do_async():
return {"rows": 5}
@flow
async def async_flow():
await do_async()
await async_flow()
await asserting_events_worker.drain()
evt = _first_event(asserting_events_worker)
assert evt.event == "prefect.asset.materialization.succeeded"
assert evt.resource.id == asset.key
@pytest.mark.usefixtures("reset_worker_events")
def test_cached_asset_does_not_emit_duplicate_events(
asserting_events_worker: EventsWorker,
):
"""Test that cached assets don't emit duplicate events.
Expected graph: [M: s3://bucket/cached-data] (only first execution, second is cached)
"""
asset = Asset(key="s3://bucket/cached-data")
@materialize(asset, persist_result=True)
def make_data():
return {"rows": 100}
@flow
def pipeline():
# First run - should emit materialization event
make_data()
# Second run - should use cache and NOT emit event
make_data()
pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
assert len(events) == 1
assert events[0].event == "prefect.asset.materialization.succeeded"
assert events[0].resource.id == asset.key
@pytest.mark.usefixtures("reset_worker_events")
def test_linear_dependency_with_submit(asserting_events_worker):
"""Test linear dependency using task.submit().
Expected graph: [R: postgres://prod/users_submit] --> [M: postgres://prod/users_clean_submit]
"""
upstream = Asset(key="postgres://prod/users_submit")
downstream = Asset(key="postgres://prod/users_clean_submit")
@task(asset_deps=[upstream])
def extract():
return {"rows": 10}
@materialize(downstream)
def load(data):
return {"rows": 10}
@flow
def pipeline():
fut_up = extract.submit()
fut_down = load.submit(fut_up)
# explicitly wait
fut_down.wait()
pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
assert len(events) == 2
upstream_events = [e for e in events if e.resource.id == upstream.key]
downstream_events = [e for e in events if e.resource.id == downstream.key]
assert len(upstream_events) == 1
assert len(downstream_events) == 1
downstream_evt = _event_with_resource_id(events, downstream.key)
assert _has_upstream_asset(downstream_evt, upstream.key)
assert _has_related_of_role(downstream_evt, "flow-run")
@pytest.mark.usefixtures("reset_worker_events")
def test_map_with_asset_dependency(asserting_events_worker):
"""Test map operation with asset dependency.
Expected graph:
[R: s3://data/source_data] --> [M: s3://data/processed] (latest of task 1, 2, 3)
"""
source_asset = Asset(key="s3://data/source_data")
destination_asset = Asset(key="s3://data/processed")
@task(asset_deps=[source_asset])
def extract_source():
return ["item1", "item2", "item3"]
@materialize(destination_asset)
def process_item(item):
return {"processed": item}
@flow
def pipeline():
source_data = extract_source()
futures = process_item.map(source_data)
for future in futures:
future.wait()
pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
assert len(events) == 6
source_events = [e for e in events if e.resource.id == source_asset.key]
assert len(source_events) == 3
destination_events = [e for e in events if e.resource.id == destination_asset.key]
assert len(destination_events) == 3
for evt in destination_events:
assert evt.event == "prefect.asset.materialization.succeeded"
assert _has_upstream_asset(evt, source_asset.key)
assert _has_related_of_role(evt, "flow-run")
@pytest.mark.usefixtures("reset_worker_events")
def test_asset_dependency_with_wait_for(asserting_events_worker):
"""Test asset dependency using wait_for parameter.
Expected graph: [R: s3://data/dependencies/source] --> [M: s3://data/dependencies/dependent]
"""
source_asset = Asset(key="s3://data/dependencies/source")
dependent_asset = Asset(key="s3://data/dependencies/dependent")
@task(asset_deps=[source_asset])
def create_source():
return {"source_data": "value"}
@materialize(dependent_asset)
def create_dependent():
return {"dependent_data": "processed"}
@flow
def pipeline():
source_future = create_source.submit()
dependent_future = create_dependent.submit(wait_for=source_future)
dependent_future.wait()
pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
assert len(events) == 2
source_events = [e for e in events if e.resource.id == source_asset.key]
dependent_events = [e for e in events if e.resource.id == dependent_asset.key]
assert len(source_events) == 1
assert len(dependent_events) == 1
dependent_evt = _event_with_resource_id(events, dependent_asset.key)
assert _has_upstream_asset(dependent_evt, source_asset.key)
assert _has_related_of_role(dependent_evt, "flow-run")
# =============================================================================
# @materialize(... by=...)
# =============================================================================
@pytest.mark.usefixtures("reset_worker_events")
def test_materialization_with_by_parameter(asserting_events_worker: EventsWorker):
"""Test that @materialize with by parameter includes materialized-by tool as related resource.
Expected graph: [M: s3://bucket/dbt_table] (materialized by dbt)
"""
asset = Asset(key="s3://bucket/dbt_table")
@materialize(asset, by="dbt")
def create_dbt_table():
return {"rows": 100}
@flow
def pipeline():
create_dbt_table()
pipeline()
asserting_events_worker.drain()
evt = _first_event(asserting_events_worker)
assert evt.event == "prefect.asset.materialization.succeeded"
assert evt.resource.id == asset.key
assert _has_related_of_role(evt, "asset-materialized-by")
materialized_by_resources = [
r for r in evt.related if r.role == "asset-materialized-by"
]
assert len(materialized_by_resources) == 1
assert materialized_by_resources[0].id == "dbt"
@pytest.mark.usefixtures("reset_worker_events")
def test_materialization_with_by_parameter_and_dependencies(
asserting_events_worker: EventsWorker,
):
"""Test materialization with by parameter includes tool alongside asset dependencies.
Expected graph: [R: postgres://prod/raw_users] --> [M: s3://warehouse/users] (materialized by spark)
"""
source_asset = Asset(key="postgres://prod/raw_users")
target_asset = Asset(key="s3://warehouse/users")
@task(asset_deps=[source_asset])
def extract_users():
return {"users": 500}
@materialize(target_asset, by="spark")
def transform_users(raw_data):
return {"processed_users": raw_data["users"]}
@flow
def pipeline():
raw_data = extract_users()
transform_users(raw_data)
pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
assert len(events) == 2
# Find the materialization event
mat_events = _materialization_events(events)
assert len(mat_events) == 1
mat_evt = mat_events[0]
assert mat_evt.resource.id == target_asset.key
related_by_role = {r.role: r.id for r in mat_evt.related}
assert "asset" in related_by_role
assert related_by_role["asset"] == source_asset.key
assert "asset-materialized-by" in related_by_role
assert related_by_role["asset-materialized-by"] == "spark"
# =============================================================================
# Duplicate Asset Prevention
# =============================================================================
@pytest.mark.usefixtures("reset_worker_events")
def test_materialize_prevents_duplicate_assets(asserting_events_worker: EventsWorker):
"""Test that @materialize prevents duplicate assets in args and asset_deps."""
asset1 = Asset(key="s3://bucket/data1.csv")
asset2 = Asset(key="s3://bucket/data2.csv")
# Test duplicate assets as positional arguments
@materialize(asset1, asset1, asset2) # asset1 appears twice
def make_data_with_duplicate_args():
return ({"rows": 100}, {"rows": 100}, {"rows": 200})
# Test duplicate assets in asset_deps
@materialize(
"s3://bucket/output.csv",
asset_deps=[asset1, asset1, asset2], # asset1 appears twice
)
def make_data_with_duplicate_deps():
return {"rows": 300}
@flow
def pipeline():
make_data_with_duplicate_args()
make_data_with_duplicate_deps()
pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
mat_events = _materialization_events(events)
ref_events = _reference_events(events)
# Should only have unique materialization events (no duplicates)
mat_keys = {evt.resource.id for evt in mat_events}
assert mat_keys == {asset1.key, asset2.key, "s3://bucket/output.csv"}
# Should only have unique reference events (no duplicates)
ref_keys = {evt.resource.id for evt in ref_events}
assert ref_keys == {asset1.key, asset2.key}
# Verify exact count - duplicates should be eliminated
assert len(mat_events) == 3 # asset1, asset2, output.csv (no duplicate asset1)
assert len(ref_events) == 2 # asset1, asset2 (no duplicate asset1)
@pytest.mark.usefixtures("reset_worker_events")
def test_task_asset_deps_prevents_duplicates(asserting_events_worker: EventsWorker):
"""Test that @task asset_deps prevents duplicate assets."""
asset1 = Asset(key="postgres://db/table1")
asset2 = Asset(key="postgres://db/table2")
# Test duplicate assets in asset_deps using mix of Asset objects and strings
@task(
asset_deps=[
asset1,
"postgres://db/table1", # Same as asset1 but as string
asset2,
asset2, # Direct duplicate
]
)
def read_data():
return {"data": "processed"}
@materialize("s3://output/result.csv")
def save_data(data):
return {"rows": 100}
@flow
def pipeline():
data = read_data()
save_data(data)
pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
ref_events = _reference_events(events)
mat_events = _materialization_events(events)
# Should only have unique reference events (duplicates eliminated)
ref_keys = {evt.resource.id for evt in ref_events}
assert ref_keys == {asset1.key, asset2.key}
# Should have exactly 2 reference events (no duplicates)
assert len(ref_events) == 2
# Should have 1 materialization event
assert len(mat_events) == 1
assert mat_events[0].resource.id == "s3://output/result.csv"
# The materialization should have both unique upstream assets
mat_evt = mat_events[0]
related_asset_ids = {r.id for r in mat_evt.related if r.role == "asset"}
assert related_asset_ids == {asset1.key, asset2.key}
# =============================================================================
# Metadata
# =============================================================================
@pytest.mark.usefixtures("reset_worker_events")
def test_linear_dependency_with_asset_properties(asserting_events_worker: EventsWorker):
"""Test linear dependency from reference to materialization where both assets have properties.
Expected graph: [R: s3://lake/raw/customer_data.parquet] --> [M: postgres://warehouse/customers]
"""
source_asset = Asset(
key="s3://lake/raw/customer_data.parquet",
properties=AssetProperties(
name="Raw Customer Data",
description="Raw customer data from external source",
url="https://dashboard.company.com/datasets/raw-customers",
owners=["data-ingestion-team"],
),
)
target_asset = Asset(
key="postgres://warehouse/customers",
properties=AssetProperties(
name="Customer Table",
description="Processed customer data in warehouse",
url="https://dashboard.company.com/tables/customers",
owners=["data-team", "analytics-team"],
),
)
@task(asset_deps=[source_asset])
def extract_customers():
return {"rows": 1000, "extracted": True}
@materialize(target_asset)
def load_customers(data):
return {"rows": data["rows"], "processed": True}
@flow
def customer_pipeline():
raw_data = extract_customers()
load_customers(raw_data)
customer_pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
assert len(events) == 2
ref_events = _reference_events(events)
mat_events = _materialization_events(events)
assert len(ref_events) == 1
assert len(mat_events) == 1
ref_evt = ref_events[0]
assert ref_evt.resource.id == source_asset.key
assert ref_evt.event == "prefect.asset.referenced"
mat_evt = mat_events[0]
assert mat_evt.resource.id == target_asset.key
assert mat_evt.event == "prefect.asset.materialization.succeeded"
assert _has_upstream_asset(mat_evt, source_asset.key)
assert _has_related_of_role(ref_evt, "flow-run")
assert _has_related_of_role(mat_evt, "flow-run")
@pytest.mark.usefixtures("reset_worker_events")
def test_materialization_metadata(asserting_events_worker: EventsWorker):
"""Test that metadata is still captured when a materializing task succeeds."""
asset = Asset(key="s3://bucket/data.csv")
@materialize(asset)
def my_task():
asset.add_metadata({"wrote_rows": 1000})
@flow
def pipeline():
my_task()
pipeline()
asserting_events_worker.drain()
event = _first_event(asserting_events_worker)
assert event.event == "prefect.asset.materialization.succeeded"
assert event.resource.id == "s3://bucket/data.csv"
assert event.payload == {"wrote_rows": 1000}
@pytest.mark.usefixtures("reset_worker_events")
def test_materialization_metadata_str_utility(asserting_events_worker: EventsWorker):
"""Test that metadata is still captured when a materializing task succeeds."""
from prefect.assets import add_asset_metadata
@materialize("s3://bucket/data.csv")
def my_task():
add_asset_metadata("s3://bucket/data.csv", {"wrote_rows": 1000})
@flow
def pipeline():
my_task()
pipeline()
asserting_events_worker.drain()
event = _first_event(asserting_events_worker)
assert event.event == "prefect.asset.materialization.succeeded"
assert event.resource.id == "s3://bucket/data.csv"
assert event.payload == {"wrote_rows": 1000}
@pytest.mark.usefixtures("reset_worker_events")
def test_stacking_materialization_metadata(asserting_events_worker: EventsWorker):
"""Test that metadata is still captured when a materializing task succeeds."""
asset = Asset(key="s3://bucket/data.csv")
@materialize(asset)
def my_task():
asset.add_metadata({"wrote_rows": 1000})
asset.add_metadata({"wrote_columns": 5})
@flow
def pipeline():
my_task()
pipeline()
asserting_events_worker.drain()
event = _first_event(asserting_events_worker)
assert event.event == "prefect.asset.materialization.succeeded"
assert event.resource.id == "s3://bucket/data.csv"
assert event.payload == {"wrote_rows": 1000, "wrote_columns": 5}
@pytest.mark.usefixtures("reset_worker_events")
def test_materialization_metadata_multiple_assets(
asserting_events_worker: EventsWorker,
):
"""Test that metadata is still captured when a materializing task succeeds."""
asset1 = Asset(key="s3://bucket/data1.csv")
asset2 = Asset(key="s3://bucket/data2.csv")
@materialize(asset1, asset2)
def my_task():
asset1.add_metadata({"wrote_rows": 1000})
asset2.add_metadata({"wrote_columns": 5})
@flow
def pipeline():
my_task()
pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
event1 = next(
(
e
for e in events
if e.event == "prefect.asset.materialization.succeeded"
and e.resource.id == "s3://bucket/data1.csv"
),
None,
)
assert event1 is not None
assert event1.payload == {"wrote_rows": 1000}
event2 = next(
(
e
for e in events
if e.event == "prefect.asset.materialization.succeeded"
and e.resource.id == "s3://bucket/data2.csv"
),
None,
)
assert event2 is not None
assert event2.payload == {"wrote_columns": 5}
@pytest.mark.usefixtures("reset_worker_events")
def test_materialization_metadata_with_task_failure(
asserting_events_worker: EventsWorker,
):
"""Test that metadata is still captured when a task fails."""
asset = Asset(key="s3://bucket/failed_output.csv")
@materialize(asset)
def failing_task():
asset.add_metadata({"attempted_rows": 1000})
raise RuntimeError("Processing failed")
@flow
def pipeline():
try:
failing_task()
except RuntimeError:
pass
pipeline()
asserting_events_worker.drain()
event = _first_event(asserting_events_worker)
assert event.event == "prefect.asset.materialization.failed"
assert event.resource.id == "s3://bucket/failed_output.csv"
assert event.payload == {"attempted_rows": 1000}
def test_add_asset_metadata_throws_error_for_invalid_asset_key():
"""Test that add_asset_metadata throws ValueError for asset keys not in downstream_assets."""
from prefect.assets import add_asset_metadata
# Test case 1: Valid asset key should work
valid_asset = Asset(key="s3://bucket/valid_data.csv")
@materialize(valid_asset)
def valid_task():
# This should work - asset is in downstream_assets
add_asset_metadata("s3://bucket/valid_data.csv", {"rows": 100})
return {"success": True}
@flow
def valid_pipeline():
valid_task()
# This should not raise an error
valid_pipeline()
# Test case 2: Invalid asset key should throw error
materialized_asset = Asset(key="s3://bucket/materialized.csv")
@materialize(materialized_asset)
def invalid_task():
# This should fail - different asset key not in downstream_assets
add_asset_metadata("s3://bucket/different_asset.csv", {"rows": 200})
return {"success": False}
@flow
def invalid_pipeline():
invalid_task()
# This should raise a ValueError
with pytest.raises(
ValueError,
match="Can only add metadata to assets that are arguments to @materialize",
):
invalid_pipeline()
# Test case 3: Non-materializing task should throw error
@task
def non_materializing_task():
# This should fail - no downstream_assets in a regular task
add_asset_metadata("s3://bucket/any_asset.csv", {"rows": 300})
return {"success": False}
@flow
def non_materializing_pipeline():
non_materializing_task()
# This should raise a ValueError
with pytest.raises(
ValueError,
match="Can only add metadata to assets that are arguments to @materialize",
):
non_materializing_pipeline()
@pytest.mark.usefixtures("reset_worker_events")
def test_nested_materialization(asserting_events_worker: EventsWorker):
"""Test nested materialization - a materialize task called inside another materialize task.
Expected behavior: Both materializations should emit events, but there should be
no relationship between the two assets.
Expected graph: [M: s3://bucket/outer.csv], [M: s3://bucket/inner.csv] (no connection)
"""
outer_asset = Asset(key="s3://bucket/outer.csv")
inner_asset = Asset(key="s3://bucket/inner.csv")
@materialize(inner_asset)
def inner_task():
return {"inner_data": "processed"}
@materialize(outer_asset)
def outer_task():
inner_result = inner_task()
return {"outer_data": "wrapped", "inner_result": inner_result}
@flow
def pipeline():
outer_task()
pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
mat_events = _materialization_events(events)
# Should have exactly 2 materialization events
assert len(mat_events) == 2
# Get the specific events
outer_evt = _event_with_resource_id(mat_events, outer_asset.key)
inner_evt = _event_with_resource_id(mat_events, inner_asset.key)
# Both should be successful materializations
assert outer_evt.event == "prefect.asset.materialization.succeeded"
assert inner_evt.event == "prefect.asset.materialization.succeeded"
# Check that neither asset has the other as a related asset
outer_related_assets = {r.id for r in outer_evt.related if r.role == "asset"}
inner_related_assets = {r.id for r in inner_evt.related if r.role == "asset"}
# Inner asset should not be in outer's related assets
assert inner_asset.key not in outer_related_assets
# Outer asset should not be in inner's related assets
assert outer_asset.key not in inner_related_assets
# Both should have flow-run context
assert any(r.id.startswith("prefect.flow-run.") for r in outer_evt.related)
assert any(r.id.startswith("prefect.flow-run.") for r in inner_evt.related)
@pytest.mark.usefixtures("reset_worker_events")
def test_materialization_from_regular_task(asserting_events_worker: EventsWorker):
"""Test that a @materialize task called from inside a regular @task works correctly.
Expected behavior: The materialization should emit an event, but no reference event
should be emitted since the asset dependency is on the regular task, not the materialization.
Expected graph: [M: s3://bucket/output.csv] (no reference event)
"""
source_asset = Asset(key="postgres://db/source")
output_asset = Asset(key="s3://bucket/output.csv")
@materialize(output_asset)
def materialize_data(data):
return {"rows": data["transformed_rows"]}
@task(asset_deps=[source_asset])
def transform_data():
transformed = {"transformed_rows": 100}
result = materialize_data(transformed)
return result
@flow
def pipeline():
transform_data()
pipeline()
asserting_events_worker.drain()
events = _asset_events(asserting_events_worker)
ref_events = _reference_events(events)
mat_events = _materialization_events(events)
# Should have no reference events and 1 materialization event
assert len(ref_events) == 0
assert len(mat_events) == 1
# Check materialization event
mat_evt = mat_events[0]
assert mat_evt.resource.id == output_asset.key
assert mat_evt.event == "prefect.asset.materialization.succeeded"
# The materialization should NOT have the source asset as an upstream dependency
# since the dependency was on the regular task, not the materialization
assert not _has_upstream_asset(mat_evt, source_asset.key)
# Should have flow-run context
assert any(r.id.startswith("prefect.flow-run.") for r in mat_evt.related)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/test_assets.py",
"license": "Apache License 2.0",
"lines": 1346,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/server/utilities/messaging/_consumer_names.py | import os
import socket
import uuid
def generate_unique_consumer_name(group_name: str) -> str:
"""
Generates a unique consumer name for a given group.
The name is composed of the group name, hostname, process ID, and a short UUID
to ensure uniqueness across different machines and processes.
Args:
group_name: The logical group name for the consumer.
Returns:
A unique string to be used as the consumer name.
"""
hostname = socket.gethostname()
pid = os.getpid()
short_uuid = uuid.uuid4().hex[:8]
return f"{group_name}-{hostname}-{pid}-{short_uuid}"
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/server/utilities/messaging/_consumer_names.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
PrefectHQ/prefect:tests/_internal/test_uuid.py | import time
from prefect._internal.uuid7 import format_byte_array_as_uuid, uuid7, uuidfromvalues
def test_uuid7():
"""
Some simple tests
"""
# Note the sequence value increments by 1 between each of these uuid7(...) calls
ms = time.time_ns() // 1_000_000
out1 = str(uuid7(ms))
out2 = str(uuid7(ms))
assert out1[:13] == out2[:13]
def test_monotonicity():
last = ""
for n in range(100_000):
i = str(uuid7())
if n > 0 and i <= last:
raise RuntimeError(f"UUIDs are not monotonic: {last} versus {i}")
def test_vector():
# test vectors from
# https://www.ietf.org/archive/id/draft-peabody-dispatch-new-uuid-format-04.html#name-example-of-a-uuidv7-value
unix_ts_ms = 0x17F22E279B0
rand_a = 0xCC3
rand_b = 0x18C4DC0C0C07398F
expected = "017f22e279b07cc398c4dc0c0c07398f"
found = uuidfromvalues(unix_ts_ms, rand_a, rand_b).hex()
assert expected == found
def test_formatting():
expected = "017f22e2-79b0-7cc3-98c4-dc0c0c07398f"
found = format_byte_array_as_uuid(
b'\x01\x7f"\xe2y\xb0|\xc3\x98\xc4\xdc\x0c\x0c\x079\x8f'
)
assert expected == found
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "tests/_internal/test_uuid.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:scripts/all_links_should_be_ok.py | #!/usr/bin/env -S uv run --script --quiet
# /// script
# requires-python = ">=3.12"
# dependencies = ["httpx"]
# ///
import argparse
import glob
import re
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import Iterable
import anyio
import anyio.to_thread
import httpx
GREY = "\033[90m"
GREEN = "\033[92m"
RED = "\033[91m"
_END = "\033[0m"
_URL_RE = re.compile(r'https?://[^\s<>"\')\]]+', re.IGNORECASE)
@dataclass(slots=True)
class LinkResult:
url: str
status: int | None
ok: bool
sources: frozenset[Path]
error: str | None = None
async def extract_links(path: Path) -> set[str]:
try:
content = await anyio.to_thread.run_sync(path.read_text, "utf-8", "ignore")
return {m.group(0).rstrip(".,)") for m in _URL_RE.finditer(content)}
except Exception:
return set()
async def _probe(client: httpx.AsyncClient, url: str) -> LinkResult:
try:
r = await client.head(url, follow_redirects=True)
if r.status_code in {405, 403}:
r = await client.get(url, follow_redirects=True)
return LinkResult(url, r.status_code, 200 <= r.status_code < 400, frozenset())
except Exception as exc:
return LinkResult(url, None, False, frozenset(), str(exc))
async def check_links(urls: Iterable[str], concurrency: int) -> list[LinkResult]:
sem = anyio.Semaphore(concurrency)
results: list[LinkResult] = []
async with httpx.AsyncClient(timeout=10) as client:
async def bound(u: str) -> None:
async with sem:
results.append(await _probe(client, u))
async with anyio.create_task_group() as tg:
for url in urls:
tg.start_soon(bound, url)
return results
async def audit(
paths: set[Path],
ignored_prefixes: tuple[str, ...],
concurrency: int,
) -> list[LinkResult]:
link_to_files: dict[str, set[Path]] = {}
async def process_file(p: Path) -> None:
for url in await extract_links(p):
if any(url.startswith(pref) for pref in ignored_prefixes):
continue
if re.search(r"{[^}]+}", url): # skip template tokens like {var}
continue
link_to_files.setdefault(url, set()).add(p)
chunk_size = 100
for i in range(0, len(paths), chunk_size):
paths_chunk = list(paths)[i : i + chunk_size]
async with anyio.create_task_group() as tg:
for path in paths_chunk:
tg.start_soon(process_file, path)
return [
LinkResult(
url=r.url,
status=r.status,
ok=r.ok,
sources=frozenset(link_to_files[r.url]),
error=r.error,
)
for r in await check_links(link_to_files, concurrency)
]
async def main() -> None:
parser = argparse.ArgumentParser(
description="Fail the build if any HTTP link is unreachable."
)
parser.add_argument("include", nargs="+", help="Glob pattern(s) to scan.")
parser.add_argument(
"--exclude", nargs="*", default=[], help="Glob pattern(s) to skip."
)
parser.add_argument(
"--ignore-url",
nargs="*",
default=("http://localhost", "https://localhost"),
metavar="PREFIX",
help="URL prefixes to ignore.",
)
parser.add_argument("-c", "--concurrency", type=int, default=50)
ns = parser.parse_args()
include = {Path(p) for pat in ns.include for p in glob.glob(pat, recursive=True)}
exclude = {Path(p) for pat in ns.exclude for p in glob.glob(pat, recursive=True)}
if not (files := include - exclude):
print("No files to scan.", file=sys.stderr)
sys.exit(2)
links = await audit(files, tuple(ns.ignore_url), concurrency=ns.concurrency)
broken_links: list[LinkResult] = []
for r in sorted(links, key=lambda x: sorted(x.sources)[0].as_posix()):
status = r.status or "ERR"
icon = f"{GREEN}✓{_END}" if r.ok else f"{RED}✗{_END}"
url_repr = r.url if r.ok else f"{RED}{r.url}{_END}"
srcs = ", ".join(s.as_posix() for s in sorted(r.sources))
print(f"{GREY}{srcs}:{_END} {status:>4} {icon} {url_repr}")
if not r.ok:
broken_links.append(r)
if broken_links:
print(f"\n{len(broken_links)} broken link(s) detected.", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
anyio.run(main)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "scripts/all_links_should_be_ok.py",
"license": "Apache License 2.0",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/prefect/_internal/compatibility/blocks.py | import inspect
from typing import Any, Union
from prefect.filesystems import NullFileSystem, WritableFileSystem
def call_explicitly_sync_block_method(
block: Union[WritableFileSystem, NullFileSystem],
method: str,
args: tuple[Any, ...],
kwargs: dict[str, Any],
) -> Any:
"""
Call a block method synchronously.
TODO: remove this once we have explicit sync/async methods on all storage blocks
see https://github.com/PrefectHQ/prefect/issues/15008
"""
# Pass _sync=True to ensure we get synchronous execution even when called
# from an async context (e.g., within a sync flow running in an async test)
return getattr(block, method)(*args, _sync=True, **kwargs)
async def call_explicitly_async_block_method(
block: Union[WritableFileSystem, NullFileSystem],
method: str,
args: tuple[Any, ...],
kwargs: dict[str, Any],
) -> Any:
"""
TODO: remove this once we have explicit async methods on all storage blocks
see https://github.com/PrefectHQ/prefect/issues/15008
"""
if hasattr(block, f"a{method}"): # explicit async method
return await getattr(block, f"a{method}")(*args, **kwargs)
elif hasattr(getattr(block, method, None), "aio"): # sync_compatible
return await getattr(block, method).aio(block, *args, **kwargs)
else: # should not happen in prefect, but users can override impls
maybe_coro = getattr(block, method)(*args, **kwargs)
if inspect.isawaitable(maybe_coro):
return await maybe_coro
else:
return maybe_coro
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/_internal/compatibility/blocks.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PrefectHQ/prefect:src/integrations/prefect-kubernetes/integration_tests/src/prefect_kubernetes_integration_tests/test_crash_detection.py | import asyncio
import os
import subprocess
from typing import Any
import anyio
import pytest
from rich.console import Console
from prefect import get_client
from prefect.states import StateType
from prefect_kubernetes_integration_tests.utils import display, k8s, prefect_core
console = Console()
DEFAULT_JOB_VARIABLES: dict[str, Any] = {
"image": "prefecthq/prefect:3.2.11-python3.12",
}
if os.environ.get("CI", False):
DEFAULT_JOB_VARIABLES["env"] = {"PREFECT_API_URL": "http://172.17.0.1:4200/api"}
DEFAULT_PARAMETERS = {"n": 5}
# Default source is a simple flow that sleeps
DEFAULT_FLOW_SOURCE = "https://gist.github.com/772d095672484b76da40a4e6158187f0.git"
DEFAULT_FLOW_ENTRYPOINT = "sleeping.py:sleepy"
@pytest.mark.usefixtures("kind_cluster")
async def test_failed_pod_start(
work_pool_name: str,
):
"""Test flow runs with pods that fail to start are marked as crashed."""
flow_run = await prefect_core.create_flow_run(
source=DEFAULT_FLOW_SOURCE,
entrypoint=DEFAULT_FLOW_ENTRYPOINT,
name="failed-pod-start",
work_pool_name=work_pool_name,
# Use an invalid image to ensure the pod will fail to start
job_variables=DEFAULT_JOB_VARIABLES
| {"image": "ceci-nest-pas-une-image:latest"},
parameters=DEFAULT_PARAMETERS,
flow_run_name="failed-pod-start-test",
)
display.print_flow_run_created(flow_run)
# Use run_once mode for the crash detection test since we just need one attempt
# Unlike the subprocess approach, this will block until the worker completes
print("Starting worker in run_once mode to detect crash...")
prefect_core.start_worker(work_pool_name, run_once=True)
# After worker completes, give the observer a moment to process events
await asyncio.sleep(5)
# Check the final state
print("Worker completed, checking final state...")
state_type, message = prefect_core.get_flow_run_state(flow_run.id)
print(f"Final state after worker run: {state_type} - {message}")
# Allow both CRASHED and PENDING states - the important thing is
# that the flow run didn't transition to RUNNING since the pod couldn't start
acceptable_states = (StateType.CRASHED, StateType.PENDING)
assert state_type in acceptable_states, (
f"Expected flow run to be in one of {acceptable_states}, got {state_type}"
)
# Collect any events that were generated
events = []
with anyio.move_on_after(10):
while len(events) < 1:
events = await prefect_core.read_pod_events_for_flow_run(flow_run.id)
await asyncio.sleep(1)
async with get_client() as client:
updated_flow_run = await client.read_flow_run(flow_run.id)
display.print_flow_run_result(updated_flow_run)
# Check if we got at least the pending event - but only if we have events
# It's possible we don't get any events if the pod never started
if events:
event_types = {event.event for event in events}
print(f"Found events: {event_types}")
assert "prefect.kubernetes.pod.pending" in event_types, (
f"Expected at least the 'pending' event, got: {event_types}"
)
@pytest.mark.usefixtures("kind_cluster")
async def test_backoff_limit_exhausted(
work_pool_name: str,
):
"""Test flow runs with pods that exhaust their backoff limit are marked as crashed."""
flow_run = await prefect_core.create_flow_run(
source=DEFAULT_FLOW_SOURCE,
entrypoint=DEFAULT_FLOW_ENTRYPOINT,
name="backoff-limit-exhausted",
work_pool_name=work_pool_name,
# Use an invalid image to ensure the pod will fail to start
job_variables=DEFAULT_JOB_VARIABLES | {"backoff_limit": 1},
)
display.print_flow_run_created(flow_run)
with subprocess.Popen(
["prefect", "worker", "start", "--pool", work_pool_name],
) as worker_process:
try:
job = k8s.get_job_for_flow_run(flow_run.name, timeout=120)
# this loop is a bully
while job.status and job.status.completion_time is None:
try:
pod_name = k8s.wait_for_pod(job.metadata.name, timeout=15)
except TimeoutError:
break
# Should hit the backoff limit after final eviction
k8s.evict_pod(pod_name)
await asyncio.sleep(1)
prefect_core.wait_for_flow_run_state(
flow_run.id, StateType.CRASHED, timeout=60
)
finally:
worker_process.terminate()
async with get_client() as client:
updated_flow_run = await client.read_flow_run(flow_run.id)
assert updated_flow_run.state is not None
assert updated_flow_run.state.type == StateType.CRASHED
display.print_flow_run_result(updated_flow_run)
# Collect events with a more generous timeout
events = []
max_events = 0
with anyio.move_on_after(15):
while True:
current_events = await prefect_core.read_pod_events_for_flow_run(
flow_run.id
)
if len(current_events) > max_events:
max_events = len(current_events)
events = current_events
print(
f"Found {len(events)} events: {[event.event for event in events]}"
)
# If we got at least 5 events, that's enough
if len(events) >= 5:
break
await asyncio.sleep(1)
# Instead of expecting exactly 6 events, check for at least 5
assert len(events) >= 5, (
f"Expected at least 5 events, got {len(events)}: {[event.event for event in events]}"
)
# Instead of checking exact order, check the event types
event_types = {event.event for event in events}
assert "prefect.kubernetes.pod.pending" in event_types, "Missing pending event"
assert "prefect.kubernetes.pod.running" in event_types, "Missing running event"
assert "prefect.kubernetes.pod.evicted" in event_types, "Missing evicted event"
# Verify we have events from both pod attempts
event_list = [event.event for event in events]
# Count occurrences to verify retries
pending_count = event_list.count("prefect.kubernetes.pod.pending")
assert pending_count >= 1, "Expected at least one pending event"
running_count = event_list.count("prefect.kubernetes.pod.running")
assert running_count >= 1, "Expected at least one running event"
evicted_count = event_list.count("prefect.kubernetes.pod.evicted")
assert evicted_count >= 1, "Expected at least one evicted event"
# Verify the backoff retry happened
total_events = pending_count + running_count + evicted_count
assert total_events >= 4, (
f"Expected at least 4 events for retry, got {total_events}"
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-kubernetes/integration_tests/src/prefect_kubernetes_integration_tests/test_crash_detection.py",
"license": "Apache License 2.0",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-kubernetes/integration_tests/src/prefect_kubernetes_integration_tests/test_events.py | import asyncio
import os
import subprocess
from typing import Any
import anyio
import pytest
from prefect import get_client
from prefect.states import StateType
from prefect_kubernetes_integration_tests.utils import display, prefect_core
DEFAULT_JOB_VARIABLES: dict[str, Any] = {
"image": "prefecthq/prefect:3.2.11-python3.12",
}
if os.environ.get("CI", False):
DEFAULT_JOB_VARIABLES["env"] = {"PREFECT_API_URL": "http://172.17.0.1:4200/api"}
DEFAULT_PARAMETERS = {"n": 5}
# Default source is a simple flow that sleeps
DEFAULT_FLOW_SOURCE = "https://gist.github.com/772d095672484b76da40a4e6158187f0.git"
DEFAULT_FLOW_ENTRYPOINT = "sleeping.py:sleepy"
@pytest.mark.usefixtures("kind_cluster")
async def test_happy_path_events(
work_pool_name: str,
):
"""Test that we get the expected events when a flow run is successful."""
flow_run = await prefect_core.create_flow_run(
source=DEFAULT_FLOW_SOURCE,
entrypoint=DEFAULT_FLOW_ENTRYPOINT,
name="happy-path-pod-events",
work_pool_name=work_pool_name,
job_variables=DEFAULT_JOB_VARIABLES,
parameters=DEFAULT_PARAMETERS,
flow_run_name="happy-path-events",
)
display.print_flow_run_created(flow_run)
with subprocess.Popen(
["prefect", "worker", "start", "--pool", work_pool_name],
) as worker_process:
try:
prefect_core.wait_for_flow_run_state(
flow_run.id, StateType.COMPLETED, timeout=30
)
async with get_client() as client:
updated_flow_run = await client.read_flow_run(flow_run.id)
display.print_flow_run_result(updated_flow_run)
# Collect events while worker is still running
events = []
with anyio.move_on_after(30):
while len(events) < 3:
events = await prefect_core.read_pod_events_for_flow_run(
flow_run.id
)
await asyncio.sleep(1)
finally:
worker_process.terminate()
assert len(events) == 3, (
f"Expected 3 events, got {len(events)}: {[event.event for event in events]}"
)
assert {event.event for event in events} == {
"prefect.kubernetes.pod.pending",
"prefect.kubernetes.pod.running",
"prefect.kubernetes.pod.succeeded",
}, (
f"Expected events to be Pending, Running, and Succeeded, got: {[event.event for event in events]}"
)
@pytest.mark.usefixtures("kind_cluster")
async def test_disable_pod_event_replication(
work_pool_name: str,
):
"""Test that pod events are not replicated when disabled via settings."""
flow_run = await prefect_core.create_flow_run(
source=DEFAULT_FLOW_SOURCE,
entrypoint=DEFAULT_FLOW_ENTRYPOINT,
name="disabled-events",
work_pool_name=work_pool_name,
job_variables=DEFAULT_JOB_VARIABLES,
parameters=DEFAULT_PARAMETERS,
flow_run_name="disabled-pod-events",
)
display.print_flow_run_created(flow_run)
# Start worker with pod event replication disabled
env = os.environ.copy()
env["PREFECT_INTEGRATIONS_KUBERNETES_OBSERVER_REPLICATE_POD_EVENTS"] = "false"
with subprocess.Popen(
["prefect", "worker", "start", "--pool", work_pool_name],
env=env,
) as worker_process:
try:
prefect_core.wait_for_flow_run_state(
flow_run.id, StateType.COMPLETED, timeout=30
)
async with get_client() as client:
updated_flow_run = await client.read_flow_run(flow_run.id)
display.print_flow_run_result(updated_flow_run)
# Wait for any potential events to be sent (if they were going to be)
await asyncio.sleep(15)
events = await prefect_core.read_pod_events_for_flow_run(flow_run.id)
finally:
worker_process.terminate()
assert len(events) == 0, (
f"Expected 0 events, got {len(events)}: {[event.event for event in events]}"
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-kubernetes/integration_tests/src/prefect_kubernetes_integration_tests/test_events.py",
"license": "Apache License 2.0",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-kubernetes/integration_tests/src/prefect_kubernetes_integration_tests/test_filtering.py | import asyncio
import os
import subprocess
from typing import Any
import anyio
import pytest
from prefect import get_client
from prefect.states import StateType
from prefect_kubernetes_integration_tests.utils import display, prefect_core
DEFAULT_JOB_VARIABLES: dict[str, Any] = {
"image": "prefecthq/prefect:3.2.11-python3.12",
}
if os.environ.get("CI", False):
DEFAULT_JOB_VARIABLES["env"] = {"PREFECT_API_URL": "http://172.17.0.1:4200/api"}
DEFAULT_PARAMETERS = {"n": 1}
# Default source is a simple flow that sleeps
DEFAULT_FLOW_SOURCE = "https://gist.github.com/772d095672484b76da40a4e6158187f0.git"
DEFAULT_FLOW_ENTRYPOINT = "sleeping.py:sleepy"
@pytest.mark.usefixtures("kind_cluster")
async def test_filter_by_namespace(
work_pool_name: str, monkeypatch: pytest.MonkeyPatch
):
"""Test that the observer doesn't emit events for flow runs in namespaces that are not watched."""
monkeypatch.setenv(
"PREFECT_INTEGRATIONS_KUBERNETES_OBSERVER_NAMESPACES",
"other-namespace,yet-another-namespace",
)
flow_run = await prefect_core.create_flow_run(
source=DEFAULT_FLOW_SOURCE,
entrypoint=DEFAULT_FLOW_ENTRYPOINT,
name="filter-by-namespace",
work_pool_name=work_pool_name,
job_variables=DEFAULT_JOB_VARIABLES,
parameters=DEFAULT_PARAMETERS,
)
display.print_flow_run_created(flow_run)
with subprocess.Popen(
["prefect", "worker", "start", "--pool", work_pool_name],
) as worker_process:
try:
prefect_core.wait_for_flow_run_state(
flow_run.id, StateType.COMPLETED, timeout=30
)
async with get_client() as client:
updated_flow_run = await client.read_flow_run(flow_run.id)
display.print_flow_run_result(updated_flow_run)
# Collect events while worker is still running
events = []
with anyio.move_on_after(30):
while len(events) < 3:
events = await prefect_core.read_pod_events_for_flow_run(
flow_run.id
)
await asyncio.sleep(1)
finally:
worker_process.terminate()
events = await prefect_core.read_pod_events_for_flow_run(flow_run.id)
assert len(events) == 0, (
f"Expected 0 events, got {len(events)}: {[event.event for event in events]}"
)
@pytest.mark.usefixtures("kind_cluster")
async def test_filter_by_label(work_pool_name: str, monkeypatch: pytest.MonkeyPatch):
"""Test that the observer doesn't emit events for flow runs that don't match the label filter."""
monkeypatch.setenv(
"PREFECT_INTEGRATIONS_KUBERNETES_OBSERVER_ADDITIONAL_LABEL_FILTERS",
"prefect.io/deployment-id=not-real-deployment-id",
)
flow_run = await prefect_core.create_flow_run(
source=DEFAULT_FLOW_SOURCE,
entrypoint=DEFAULT_FLOW_ENTRYPOINT,
name="filter-by-label",
work_pool_name=work_pool_name,
job_variables=DEFAULT_JOB_VARIABLES,
parameters=DEFAULT_PARAMETERS,
)
display.print_flow_run_created(flow_run)
with subprocess.Popen(
["prefect", "worker", "start", "--pool", work_pool_name],
) as worker_process:
try:
prefect_core.wait_for_flow_run_state(
flow_run.id, StateType.COMPLETED, timeout=30
)
async with get_client() as client:
updated_flow_run = await client.read_flow_run(flow_run.id)
display.print_flow_run_result(updated_flow_run)
finally:
worker_process.terminate()
events = await prefect_core.read_pod_events_for_flow_run(flow_run.id)
assert len(events) == 0, (
f"Expected 0 events, got {len(events)}: {[event.event for event in events]}"
)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-kubernetes/integration_tests/src/prefect_kubernetes_integration_tests/test_filtering.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-kubernetes/integration_tests/src/prefect_kubernetes_integration_tests/test_jobs.py | from __future__ import annotations
import os
import subprocess
from typing import Any
import pytest
from prefect import get_client
from prefect.states import StateType
from prefect_kubernetes_integration_tests.utils import display, prefect_core
DEFAULT_JOB_VARIABLES: dict[str, Any] = {"image": "prefecthq/prefect:3.2.11-python3.12"}
if os.environ.get("CI", False):
DEFAULT_JOB_VARIABLES["env"] = {"PREFECT_API_URL": "http://172.17.0.1:4200/api"}
DEFAULT_PARAMETERS = {"n": 5} # Short sleep time for faster tests
# Default source is a simple flow that sleeps
DEFAULT_FLOW_SOURCE = "https://gist.github.com/772d095672484b76da40a4e6158187f0.git"
DEFAULT_FLOW_ENTRYPOINT = "sleeping.py:sleepy"
DEFAULT_FLOW_NAME = "job-state-test"
@pytest.mark.usefixtures("kind_cluster")
async def test_successful_job_completion(
work_pool_name: str,
):
"""Test that jobs complete successfully and don't trigger state changes."""
flow_run = await prefect_core.create_flow_run(
source=DEFAULT_FLOW_SOURCE,
entrypoint=DEFAULT_FLOW_ENTRYPOINT,
name=DEFAULT_FLOW_NAME,
work_pool_name=work_pool_name,
job_variables=DEFAULT_JOB_VARIABLES,
parameters=DEFAULT_PARAMETERS,
flow_run_name="successful-job-completion",
)
display.print_flow_run_created(flow_run)
# Start worker and wait for completion
with subprocess.Popen(
["prefect", "worker", "start", "--pool", work_pool_name],
) as worker_process:
try:
# Wait for the flow run to complete
prefect_core.wait_for_flow_run_state(
flow_run.id, StateType.COMPLETED, timeout=30
)
async with get_client() as client:
updated_flow_run = await client.read_flow_run(flow_run.id)
assert updated_flow_run.state is not None, (
"Flow run state should not be None"
)
assert updated_flow_run.state.type == StateType.COMPLETED, (
"Expected flow run to be COMPLETED. Got "
f"{updated_flow_run.state.type} instead."
)
display.print_flow_run_result(updated_flow_run)
finally:
worker_process.terminate()
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-kubernetes/integration_tests/src/prefect_kubernetes_integration_tests/test_jobs.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/integrations/prefect-kubernetes/prefect_kubernetes/observer.py | from __future__ import annotations
import asyncio
import json
import logging
import sys
import threading
import uuid
from datetime import datetime, timedelta, timezone
from typing import Any
import anyio
import kopf
from cachetools import TTLCache
from kubernetes_asyncio import config
from kubernetes_asyncio.client import ApiClient, BatchV1Api, V1Job
from prefect import __version__, get_client
from prefect.client.orchestration import PrefectClient
from prefect.events import Event, RelatedResource
from prefect.events.clients import EventsClient, get_events_client
from prefect.events.filters import (
EventFilter,
EventNameFilter,
EventOccurredFilter,
EventResourceFilter,
)
from prefect.events.schemas.events import Resource
from prefect.exceptions import ObjectNotFound
from prefect.states import Crashed
from prefect.types import DateTime
from prefect.utilities.engine import propose_state
from prefect.utilities.slugify import slugify
from prefect_kubernetes.settings import KubernetesSettings
# Cache used to keep track of the last event for a pod. This is used populate the `follows` field
# on events to get correct event ordering. We only hold each pod's last event for 5 minutes to avoid
# holding onto too much memory and 5 minutes is the same as the `TIGHT_TIMING` in `prefect.events.utilities`.
_last_event_cache: TTLCache[str, Event] = TTLCache(
maxsize=1000, ttl=60 * 5
) # 5 minutes
settings = KubernetesSettings()
events_client: EventsClient | None = None
orchestration_client: PrefectClient | None = None
_startup_event_semaphore: asyncio.Semaphore | None = None
@kopf.on.startup()
def configure(settings: kopf.OperatorSettings, **_):
settings.scanning.disabled = True
@kopf.on.startup()
async def initialize_clients(logger: kopf.Logger, **kwargs: Any):
logger.info("Initializing clients")
global events_client
global orchestration_client
global _startup_event_semaphore
_startup_event_semaphore = asyncio.Semaphore(
settings.observer.startup_event_concurrency
)
orchestration_client = await get_client().__aenter__()
events_client = await get_events_client().__aenter__()
logger.info("Clients successfully initialized")
@kopf.on.cleanup()
async def cleanup_fn(logger: kopf.Logger, **kwargs: Any):
logger.info("Cleaning up clients")
await events_client.__aexit__(None, None, None)
await orchestration_client.__aexit__(None, None, None)
logger.info("Clients successfully cleaned up")
async def _replicate_pod_event( # pyright: ignore[reportUnusedFunction]
event: kopf.RawEvent,
uid: str,
name: str,
namespace: str,
labels: kopf.Labels,
status: kopf.Status,
logger: logging.Logger,
**kwargs: Any,
):
"""
Replicates a pod event to the Prefect event system.
This handler is resilient to restarts of the observer and allows
multiple instances of the observer to coexist without duplicate events.
"""
global events_client
global orchestration_client
event_type = event["type"]
phase = status["phase"]
logger.debug(f"Pod event received - type: {event_type}, phase: {phase}, uid: {uid}")
# Extract the creation timestamp from the Kubernetes event
k8s_created_time = None
if isinstance(event, dict) and "object" in event:
obj = event["object"]
if isinstance(obj, dict) and "metadata" in obj:
metadata = obj["metadata"]
if "creationTimestamp" in metadata:
k8s_created_time = DateTime.fromisoformat(
metadata["creationTimestamp"].replace("Z", "+00:00")
)
# Create a deterministic event ID based on the pod's ID, phase, and restart count.
# This ensures that the event ID is the same for the same pod in the same phase and restart count
# and Prefect's event system will be able to deduplicate events.
event_id = uuid.uuid5(
uuid.NAMESPACE_URL,
json.dumps(
{
"uid": uid,
"phase": phase,
"restart_count": sum(
cs.get("restartCount", 0)
for cs in status.get("containerStatuses", [])
),
},
sort_keys=True,
),
)
# Check if a corresponding event already exists. If so, we don't need to emit a new one.
# This handles the case where the observer is restarted and we don't want to emit duplicate events
# and the case where you're moving from an older version of the worker without the observer to a newer version with the observer.
if event_type is None:
if orchestration_client is None:
raise RuntimeError("Orchestration client not initialized")
if _startup_event_semaphore is None:
raise RuntimeError("Startup event semaphore not initialized")
# Use semaphore to limit concurrent API calls during startup to prevent
# overwhelming the API server when there are many existing pods/jobs
async with _startup_event_semaphore:
# Use the Kubernetes event timestamp for the filter to avoid "Query time range is too large" error
event_filter = EventFilter(
event=EventNameFilter(name=[f"prefect.kubernetes.pod.{phase.lower()}"]),
resource=EventResourceFilter(
id=[f"prefect.kubernetes.pod.{uid}"],
),
occurred=EventOccurredFilter(
since=(
k8s_created_time
if k8s_created_time
else (datetime.now(timezone.utc) - timedelta(hours=1))
)
),
)
response = await orchestration_client.request(
"POST",
"/events/filter",
json=dict(
filter=event_filter.model_dump(exclude_unset=True, mode="json")
),
)
# If the event already exists, we don't need to emit a new one.
if response.json()["events"]:
return
resource = {
"prefect.resource.id": f"prefect.kubernetes.pod.{uid}",
"prefect.resource.name": name,
"kubernetes.namespace": namespace,
}
# Add eviction reason if the pod was evicted for debugging purposes
if event_type == "MODIFIED" and phase == "Failed":
for container_status in status.get("containerStatuses", []):
if (
terminated := container_status.get("state", {}).get("terminated", {})
) and (reason := terminated.get("reason")):
phase = "evicted"
resource["kubernetes.reason"] = reason
break
# Create the Prefect event, using the K8s event timestamp as the occurred time if available
prefect_event = Event(
event=f"prefect.kubernetes.pod.{phase.lower()}",
resource=Resource.model_validate(resource),
id=event_id,
related=_related_resources_from_labels(labels),
)
if (prev_event := _last_event_cache.get(uid)) is not None:
# This check replicates a similar check in `emit_event` in `prefect.events.utilities`
if (
-timedelta(minutes=5)
< (prefect_event.occurred - prev_event.occurred)
< timedelta(minutes=5)
):
prefect_event.follows = prev_event.id
if events_client is None:
raise RuntimeError("Events client not initialized")
await events_client.emit(event=prefect_event)
_last_event_cache[uid] = prefect_event
if settings.observer.replicate_pod_events:
kopf.on.event(
"pods",
labels={
"prefect.io/flow-run-id": kopf.PRESENT,
**settings.observer.additional_label_filters,
},
)(_replicate_pod_event) # type: ignore
async def _get_kubernetes_client() -> ApiClient:
"""Get a configured Kubernetes client.
Returns:
ApiClient: A configured Kubernetes API client
"""
try:
# Try to load in-cluster configuration
config.load_incluster_config() # type: ignore
client = ApiClient()
except config.ConfigException:
# If in-cluster config fails, load the local kubeconfig
client = await config.new_client_from_config() # type: ignore
return client
async def _get_k8s_jobs(
flow_run_id: str, namespace: str, logger: kopf.Logger
) -> list[V1Job]:
"""Get all jobs from the k8s API with the given flow run id as a label.
Uses kubernetes-asyncio to list jobs.
"""
try:
client = await _get_kubernetes_client()
batch_client = BatchV1Api(client)
jobs = await batch_client.list_namespaced_job( # type: ignore
namespace=namespace, label_selector=f"prefect.io/flow-run-id={flow_run_id}"
)
return jobs.items # type: ignore
except Exception as e:
logger.error(f"Failed to get jobs for flow run {flow_run_id}: {e}")
return []
finally:
await client.close() # type: ignore
@kopf.on.event(
"jobs",
labels={
"prefect.io/flow-run-id": kopf.PRESENT,
**settings.observer.additional_label_filters,
},
) # type: ignore
async def _mark_flow_run_as_crashed( # pyright: ignore[reportUnusedFunction]
event: kopf.RawEvent,
name: str,
labels: kopf.Labels,
status: kopf.Status,
logger: logging.Logger,
spec: kopf.Spec,
**kwargs: Any,
):
"""
Marks a flow run as crashed if the corresponding job has failed and no other active jobs exist.
"""
global orchestration_client
if not (flow_run_id := labels.get("prefect.io/flow-run-id")):
return
logger.debug(
f"Job event received - name: {name}, flow_run_id: {flow_run_id}, status: {status}"
)
backoff_limit = spec.get("backoffLimit", 6)
# Check current job status from the event
current_job_failed = status.get("failed", 0) > backoff_limit
# If the job is still active or has succeeded, don't mark as crashed
if not current_job_failed:
logger.debug(f"Job {name} is still active or has succeeded, skipping")
return
# Get the flow run to check its state
try:
if orchestration_client is None:
raise RuntimeError("Orchestration client not initialized")
flow_run = await orchestration_client.read_flow_run(
flow_run_id=uuid.UUID(flow_run_id)
)
except ObjectNotFound:
logger.debug(f"Flow run {flow_run_id} not found, skipping")
return
assert flow_run.state is not None, "Expected flow run state to be set"
# Exit early for terminal/final/scheduled/paused states
if (
flow_run.state.is_final()
or flow_run.state.is_scheduled()
or flow_run.state.is_paused()
):
logger.debug(
f"Flow run {flow_run_id} is in final, scheduled, or paused state, skipping"
)
return
# In the case where a flow run is rescheduled due to a SIGTERM, it will show up as another active job if the
# rescheduling was successful. If this is the case, we want to find the other active job so that we don't mark
# the flow run as crashed.
#
# If the flow run is PENDING, it's possible that the job hasn't been created yet, so we'll wait and query new state
# to make a determination.
has_other_active_job = False
with anyio.move_on_after(30):
while True:
# Check if there are any other jobs with this flow run label
k8s_jobs = await _get_k8s_jobs(
flow_run_id, namespace=kwargs["namespace"], logger=logger
)
# Filter out the current job from the list
other_jobs = [job for job in k8s_jobs if job.metadata.name != name] # type: ignore
# Check if any other job is completed or running
has_other_active_job = any(
(job.status and job.status.succeeded) # type: ignore
or (job.status and job.status.active and job.status.active > 0) # type: ignore
for job in other_jobs
)
logger.debug(
f"Other jobs status - count: {len(other_jobs)}, has_active: {has_other_active_job}"
)
flow_run = await orchestration_client.read_flow_run(
flow_run_id=uuid.UUID(flow_run_id)
)
assert flow_run.state is not None, "Expected flow run state to be set"
if not flow_run.state.is_pending() or has_other_active_job:
break
logger.info(
f"Flow run {flow_run_id} in state {flow_run.state!r} with no other active jobs, waiting for 5 seconds before checking again"
)
await anyio.sleep(5)
if not has_other_active_job:
logger.warning(
f"Job {name} has failed and no other active jobs found for flow run {flow_run_id}, marking as crashed"
)
await propose_state(
client=orchestration_client,
state=Crashed(message="No active or succeeded pods found for any job"),
flow_run_id=uuid.UUID(flow_run_id),
)
def _related_resources_from_labels(labels: kopf.Labels) -> list[RelatedResource]:
"""Convert labels to related resources"""
related: list[RelatedResource] = []
if flow_run_id := labels.get("prefect.io/flow-run-id"):
related.append(
RelatedResource.model_validate(
{
"prefect.resource.id": f"prefect.flow-run.{flow_run_id}",
"prefect.resource.role": "flow-run",
"prefect.resource.name": labels.get("prefect.io/flow-run-name"),
}
)
)
if deployment_id := labels.get("prefect.io/deployment-id"):
related.append(
RelatedResource.model_validate(
{
"prefect.resource.id": f"prefect.deployment.{deployment_id}",
"prefect.resource.role": "deployment",
"prefect.resource.name": labels.get("prefect.io/deployment-name"),
}
)
)
if flow_id := labels.get("prefect.io/flow-id"):
related.append(
RelatedResource.model_validate(
{
"prefect.resource.id": f"prefect.flow.{flow_id}",
"prefect.resource.role": "flow",
"prefect.resource.name": labels.get("prefect.io/flow-name"),
}
)
)
if work_pool_id := labels.get("prefect.io/work-pool-id"):
related.append(
RelatedResource.model_validate(
{
"prefect.resource.id": f"prefect.work-pool.{work_pool_id}",
"prefect.resource.role": "work-pool",
"prefect.resource.name": labels.get("prefect.io/work-pool-name"),
}
)
)
if worker_name := labels.get("prefect.io/worker-name"):
related.append(
RelatedResource.model_validate(
{
"prefect.resource.id": f"prefect.worker.kubernetes.{slugify(worker_name)}",
"prefect.resource.role": "worker",
"prefect.resource.name": worker_name,
"prefect.worker-type": "kubernetes",
"prefect.version": __version__,
}
)
)
return related
_observer_thread: threading.Thread | None = None
_stop_flag: threading.Event | None = None
_ready_flag: threading.Event | None = None
def _observer_thread_entry():
global _stop_flag
global _ready_flag
_stop_flag = threading.Event()
_ready_flag = threading.Event()
namespaces = settings.observer.namespaces
if namespaces:
asyncio.run(
kopf.operator(
namespaces=namespaces,
stop_flag=_stop_flag,
ready_flag=_ready_flag,
standalone=True,
identity=uuid.uuid4().hex,
)
)
else:
asyncio.run(
kopf.operator(
clusterwide=True,
stop_flag=_stop_flag,
ready_flag=_ready_flag,
standalone=True,
identity=uuid.uuid4().hex,
)
)
def start_observer():
"""
Start the observer in a separate thread.
"""
global _observer_thread
global _ready_flag
# Suppress the warning about running the observer in a non-main thread. The starter of the observer
# will handle the OS signals.
class ThreadWarningFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
return (
"OS signals are ignored: running not in the main thread"
not in record.getMessage()
)
logging.getLogger("kopf._core.reactor.running").addFilter(ThreadWarningFilter())
# Configure kopf logging to match Prefect's logging format
from prefect.logging.configuration import PROCESS_LOGGING_CONFIG
if PROCESS_LOGGING_CONFIG:
console_formatter = (
PROCESS_LOGGING_CONFIG.get("handlers", {})
.get("console", {})
.get("formatter")
)
if console_formatter == "json":
# Configure kopf to use its own JSON formatter instead of Prefect's
# which cannot serialize kopf internal objects
from prefect_kubernetes._logging import KopfObjectJsonFormatter
kopf_logger = logging.getLogger("kopf")
kopf_handler = logging.StreamHandler(sys.stderr)
kopf_handler.setFormatter(KopfObjectJsonFormatter())
kopf_logger.addHandler(kopf_handler)
# Turn off propagation to prevent kopf logs from being propagated to Prefect's JSON formatter
# which cannot serialize kopf internal objects
kopf_logger.propagate = False
if _observer_thread is not None:
return
_observer_thread = threading.Thread(
target=_observer_thread_entry, name="prefect-kubernetes-observer", daemon=True
)
_observer_thread.start()
if _ready_flag:
_ready_flag.wait()
_ready_flag = None
def stop_observer():
"""
Stop the observer thread.
"""
global _stop_flag
global _observer_thread
if _stop_flag:
_stop_flag.set()
if _observer_thread:
_observer_thread.join()
_observer_thread = None
_stop_flag = None
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-kubernetes/prefect_kubernetes/observer.py",
"license": "Apache License 2.0",
"lines": 452,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PrefectHQ/prefect:src/integrations/prefect-kubernetes/tests/test_observer.py | import asyncio
import logging
import uuid
from contextlib import asynccontextmanager
from io import StringIO
from time import sleep
from unittest.mock import AsyncMock, MagicMock
import pytest
from prefect_kubernetes._logging import KopfObjectJsonFormatter
from prefect_kubernetes.observer import (
_mark_flow_run_as_crashed,
_replicate_pod_event,
start_observer,
stop_observer,
)
from prefect.client.schemas.objects import FlowRun, State
from prefect.events.schemas.events import RelatedResource, Resource
@pytest.fixture
def mock_events_client(monkeypatch: pytest.MonkeyPatch):
events_client = AsyncMock()
@asynccontextmanager
async def mock_get_events_client():
try:
yield events_client
finally:
pass
monkeypatch.setattr(
"prefect_kubernetes.observer.get_events_client", mock_get_events_client
)
monkeypatch.setattr("prefect_kubernetes.observer.events_client", events_client)
return events_client
@pytest.fixture
def mock_orchestration_client(monkeypatch: pytest.MonkeyPatch):
orchestration_client = AsyncMock()
json_response = MagicMock()
json_response.json.return_value = {"events": [{"id": "existing-event"}]}
orchestration_client.request.return_value = json_response
@asynccontextmanager
async def mock_get_orchestration_client():
try:
yield orchestration_client
finally:
pass
monkeypatch.setattr(
"prefect_kubernetes.observer.get_client",
mock_get_orchestration_client,
)
monkeypatch.setattr(
"prefect_kubernetes.observer.orchestration_client", orchestration_client
)
# Initialize the startup event semaphore for tests
monkeypatch.setattr(
"prefect_kubernetes.observer._startup_event_semaphore",
asyncio.Semaphore(5),
)
return orchestration_client
class TestReplicatePodEvent:
async def test_minimal(self, mock_events_client: AsyncMock):
flow_run_id = uuid.uuid4()
pod_id = uuid.uuid4()
await _replicate_pod_event(
event={"type": "ADDED", "status": {"phase": "Running"}},
uid=str(pod_id),
name="test",
namespace="test",
labels={
"prefect.io/flow-run-id": str(flow_run_id),
"prefect.io/flow-run-name": "test",
},
status={"phase": "Running"},
logger=MagicMock(),
)
emitted_event = mock_events_client.emit.call_args[1]["event"]
assert emitted_event.event == "prefect.kubernetes.pod.running"
assert emitted_event.resource == Resource(
{
"prefect.resource.id": f"prefect.kubernetes.pod.{pod_id}",
"prefect.resource.name": "test",
"kubernetes.namespace": "test",
}
)
assert emitted_event.related == [
RelatedResource.model_validate(
{
"prefect.resource.id": f"prefect.flow-run.{flow_run_id}",
"prefect.resource.role": "flow-run",
"prefect.resource.name": "test",
}
)
]
async def test_deterministic_event_id(self, mock_events_client: AsyncMock):
"""Test that the event ID is deterministic"""
pod_id = uuid.uuid4()
await _replicate_pod_event(
event={"type": "ADDED", "status": {"phase": "Running"}},
uid=str(pod_id),
name="test",
namespace="test",
labels={
"prefect.io/flow-run-id": str(uuid.uuid4()),
"prefect.io/flow-run-name": "test-run",
},
status={"phase": "Running"},
logger=MagicMock(),
)
first_event_id = mock_events_client.emit.call_args[1]["event"].id
mock_events_client.emit.reset_mock()
# Call the function again
await _replicate_pod_event(
event={"type": "ADDED", "status": {"phase": "Running"}},
uid=str(pod_id),
name="test",
namespace="test",
labels={
"prefect.io/flow-run-id": str(uuid.uuid4()),
"prefect.io/flow-run-name": "test-run",
},
status={"phase": "Running"},
logger=MagicMock(),
)
second_event_id = mock_events_client.emit.call_args[1]["event"].id
assert first_event_id == second_event_id
async def test_evicted_pod(self, mock_events_client: AsyncMock):
"""Test handling of evicted pods"""
pod_id = uuid.uuid4()
await _replicate_pod_event(
event={"type": "MODIFIED"},
uid=str(pod_id),
name="test",
namespace="test",
labels={
"prefect.io/flow-run-id": str(uuid.uuid4()),
"prefect.io/flow-run-name": "test-run",
},
status={
"phase": "Failed",
"containerStatuses": [
{"state": {"terminated": {"reason": "OOMKilled"}}}
],
},
logger=MagicMock(),
)
emitted_event = mock_events_client.emit.call_args[1]["event"]
assert emitted_event.event == "prefect.kubernetes.pod.evicted"
assert emitted_event.resource == Resource(
{
"prefect.resource.id": f"prefect.kubernetes.pod.{pod_id}",
"prefect.resource.name": "test",
"kubernetes.namespace": "test",
"kubernetes.reason": "OOMKilled",
},
)
async def test_all_related_resources(self, mock_events_client: AsyncMock):
"""Test that all possible related resources are included"""
flow_run_id = uuid.uuid4()
deployment_id = uuid.uuid4()
flow_id = uuid.uuid4()
work_pool_id = uuid.uuid4()
pod_id = uuid.uuid4()
await _replicate_pod_event(
event={"type": "ADDED"},
uid=str(pod_id),
name="test",
namespace="test",
labels={
"prefect.io/flow-run-id": str(flow_run_id),
"prefect.io/flow-run-name": "test-run",
"prefect.io/deployment-id": str(deployment_id),
"prefect.io/deployment-name": "test-deployment",
"prefect.io/flow-id": str(flow_id),
"prefect.io/flow-name": "test-flow",
"prefect.io/work-pool-id": str(work_pool_id),
"prefect.io/work-pool-name": "test-pool",
"prefect.io/worker-name": "test-worker",
},
status={"phase": "Running"},
logger=MagicMock(),
)
mock_events_client.emit.assert_called_once()
emitted_event = mock_events_client.emit.call_args[1]["event"]
related_resources = emitted_event.related
# Verify all related resources are present
resource_ids = {
r.model_dump()["prefect.resource.id"] for r in related_resources
}
assert resource_ids == {
f"prefect.flow-run.{flow_run_id}",
f"prefect.deployment.{deployment_id}",
f"prefect.flow.{flow_id}",
f"prefect.work-pool.{work_pool_id}",
"prefect.worker.kubernetes.test-worker",
}
resource_names = {
r.model_dump()["prefect.resource.name"] for r in related_resources
}
assert resource_names == {
"test-run",
"test-deployment",
"test-flow",
"test-pool",
"test-worker",
}
async def test_event_deduplication(
self, mock_events_client: AsyncMock, mock_orchestration_client: AsyncMock
):
"""Test that checks from existing events when receiving events on startup"""
pod_id = uuid.uuid4()
await _replicate_pod_event(
# Event types with None are received when reading current cluster state
event={"type": None},
uid=str(pod_id),
name="test",
namespace="test",
labels={"prefect.io/flow-run-id": str(uuid.uuid4())},
status={"phase": "Running"},
logger=MagicMock(),
)
# Verify the request was made with correct payload structure
mock_orchestration_client.request.assert_called_once()
call_args = mock_orchestration_client.request.call_args
assert call_args[0] == ("POST", "/events/filter")
# Verify the json payload has the correct structure: {"filter": {...}}
json_payload = call_args[1]["json"]
assert "filter" in json_payload, "Expected 'filter' key in json payload"
# Verify the nested filter contains expected fields
event_filter = json_payload["filter"]
assert "event" in event_filter, "Expected 'event' field in filter"
assert "resource" in event_filter, "Expected 'resource' field in filter"
assert "occurred" in event_filter, "Expected 'occurred' field in filter"
# Verify no event was emitted since one already existed
mock_events_client.emit.assert_not_called()
@pytest.mark.parametrize("phase", ["Pending", "Running", "Succeeded", "Failed"])
async def test_different_phases(self, mock_events_client: AsyncMock, phase: str):
"""Test handling of different pod phases"""
pod_id = uuid.uuid4()
flow_run_id = uuid.uuid4()
mock_events_client.emit.reset_mock()
await _replicate_pod_event(
event={"type": "ADDED"},
uid=str(pod_id),
name="test",
namespace="test",
labels={
"prefect.io/flow-run-id": str(flow_run_id),
"prefect.io/flow-run-name": "test-run",
},
status={"phase": phase},
logger=MagicMock(),
)
mock_events_client.emit.assert_called_once()
emitted_event = mock_events_client.emit.call_args[1]["event"]
assert emitted_event.event == f"prefect.kubernetes.pod.{phase.lower()}"
async def test_startup_event_semaphore_limits_concurrency(
self,
mock_events_client: AsyncMock,
mock_orchestration_client: AsyncMock,
monkeypatch: pytest.MonkeyPatch,
):
"""Test that startup event deduplication respects semaphore concurrency limit"""
# Track concurrent requests
concurrent_count = 0
max_concurrent = 0
semaphore_limit = 2
# Set up a semaphore with a small limit for testing
monkeypatch.setattr(
"prefect_kubernetes.observer._startup_event_semaphore",
asyncio.Semaphore(semaphore_limit),
)
# Configure mock to return no existing events so we can track the full request
json_response = MagicMock()
json_response.json.return_value = {"events": []}
mock_orchestration_client.request.return_value = json_response
async def slow_request(*args, **kwargs):
nonlocal concurrent_count, max_concurrent
concurrent_count += 1
max_concurrent = max(max_concurrent, concurrent_count)
await asyncio.sleep(0.1) # Simulate network delay
concurrent_count -= 1
return json_response
mock_orchestration_client.request.side_effect = slow_request
# Launch multiple startup events concurrently
tasks = []
for i in range(5):
tasks.append(
asyncio.create_task(
_replicate_pod_event(
event={"type": None},
uid=str(uuid.uuid4()),
name=f"test-{i}",
namespace="test",
labels={
"prefect.io/flow-run-id": str(uuid.uuid4()),
"prefect.io/flow-run-name": f"test-run-{i}",
},
status={"phase": "Running"},
logger=MagicMock(),
)
)
)
await asyncio.gather(*tasks)
# Verify the semaphore limited concurrency
assert max_concurrent <= semaphore_limit, (
f"Expected max {semaphore_limit} concurrent requests, but got {max_concurrent}"
)
# Verify all requests were eventually made
assert mock_orchestration_client.request.call_count == 5
class TestMarkFlowRunAsCrashed:
@pytest.fixture
def flow_run_id(self):
return uuid.uuid4()
@pytest.fixture
def base_kwargs(self, flow_run_id):
return {
"event": {"type": "MODIFIED"},
"name": "test-job",
"labels": {"prefect.io/flow-run-id": str(flow_run_id)},
"status": {"failed": 7},
"logger": MagicMock(),
"spec": {"backoffLimit": 6},
"namespace": "default",
}
async def test_skips_paused_states(
self, mock_orchestration_client: AsyncMock, flow_run_id, base_kwargs
):
flow_run = FlowRun(
id=flow_run_id,
name="test-flow-run",
flow_id=uuid.uuid4(),
state=State(type="PAUSED", name="Suspended"),
)
mock_orchestration_client.read_flow_run.return_value = flow_run
with pytest.MonkeyPatch.context() as m:
mock_propose = AsyncMock()
m.setattr("prefect_kubernetes.observer.propose_state", mock_propose)
await _mark_flow_run_as_crashed(**base_kwargs)
mock_propose.assert_not_called()
async def test_skips_final_states(
self, mock_orchestration_client: AsyncMock, flow_run_id, base_kwargs
):
flow_run = FlowRun(
id=flow_run_id,
name="test-flow-run",
flow_id=uuid.uuid4(),
state=State(type="COMPLETED", name="Completed"),
)
mock_orchestration_client.read_flow_run.return_value = flow_run
with pytest.MonkeyPatch.context() as m:
mock_propose = AsyncMock()
m.setattr("prefect_kubernetes.observer.propose_state", mock_propose)
await _mark_flow_run_as_crashed(**base_kwargs)
mock_propose.assert_not_called()
async def test_skips_scheduled_states(
self, mock_orchestration_client: AsyncMock, flow_run_id, base_kwargs
):
flow_run = FlowRun(
id=flow_run_id,
name="test-flow-run",
flow_id=uuid.uuid4(),
state=State(type="SCHEDULED", name="Scheduled"),
)
mock_orchestration_client.read_flow_run.return_value = flow_run
with pytest.MonkeyPatch.context() as m:
mock_propose = AsyncMock()
m.setattr("prefect_kubernetes.observer.propose_state", mock_propose)
await _mark_flow_run_as_crashed(**base_kwargs)
mock_propose.assert_not_called()
class TestStartAndStopObserver:
@pytest.mark.timeout(10)
@pytest.mark.usefixtures("mock_events_client", "mock_orchestration_client")
def test_start_and_stop(self, monkeypatch: pytest.MonkeyPatch):
"""
Test that the observer can be started and stopped without errors
and without hanging.
"""
start_observer()
sleep(1)
stop_observer()
class TestLoggingConfiguration:
"""Tests for the logging configuration logic in start_observer()"""
@pytest.mark.usefixtures("mock_events_client", "mock_orchestration_client")
def test_json_formatter_configures_kopf_logger(
self, monkeypatch: pytest.MonkeyPatch
):
"""
Test that when Prefect uses JSON formatting, kopf logger gets its own
handler with KopfObjectJsonFormatter and propagation is disabled.
"""
# Stop any existing observer first
stop_observer()
# Set up Prefect to use JSON formatting
monkeypatch.setenv("PREFECT_LOGGING_HANDLERS_CONSOLE_FORMATTER", "json")
# Import and setup logging fresh to pick up env var
from prefect.logging.configuration import PROCESS_LOGGING_CONFIG, setup_logging
PROCESS_LOGGING_CONFIG.clear()
setup_logging(incremental=False)
# Clear any existing kopf logger configuration
kopf_logger = logging.getLogger("kopf")
kopf_logger.handlers.clear()
kopf_logger.propagate = True
# Start the observer which should configure kopf logging
try:
start_observer()
sleep(0.5) # Give it time to configure
# Verify kopf logger has its own handler
assert len(kopf_logger.handlers) > 0, "kopf logger should have a handler"
# Verify the handler has the correct formatter
handler = kopf_logger.handlers[0]
assert isinstance(handler.formatter, KopfObjectJsonFormatter), (
f"Expected KopfObjectJsonFormatter, got {type(handler.formatter)}"
)
# Verify propagation is disabled
assert kopf_logger.propagate is False, (
"kopf logger propagation should be disabled"
)
finally:
stop_observer()
monkeypatch.delenv("PREFECT_LOGGING_HANDLERS_CONSOLE_FORMATTER")
@pytest.mark.usefixtures("mock_events_client", "mock_orchestration_client")
def test_standard_formatter_uses_default_behavior(
self, monkeypatch: pytest.MonkeyPatch
):
"""
Test that when Prefect uses standard formatting (default),
kopf logger uses default propagation behavior.
"""
# Stop any existing observer first
stop_observer()
# Use default logging configuration (standard formatter)
from prefect.logging.configuration import PROCESS_LOGGING_CONFIG, setup_logging
PROCESS_LOGGING_CONFIG.clear()
setup_logging(incremental=False)
# Clear any existing kopf logger configuration
kopf_logger = logging.getLogger("kopf")
kopf_logger.handlers.clear()
kopf_logger.propagate = True
# Start the observer
try:
start_observer()
sleep(0.5)
# Verify kopf logger doesn't have a dedicated handler added by start_observer
# (it should propagate to root logger since we're using standard formatting)
assert len(kopf_logger.handlers) == 0, (
"kopf logger should not have handlers with standard formatting"
)
# Verify propagation is still enabled (default behavior)
assert kopf_logger.propagate is True, (
"kopf logger propagation should remain enabled with standard formatting"
)
finally:
stop_observer()
@pytest.mark.usefixtures("mock_events_client", "mock_orchestration_client")
def test_no_duplicate_logs_with_json_formatting(
self, monkeypatch: pytest.MonkeyPatch
):
"""
Test that kopf logs don't appear duplicated when JSON formatting is enabled.
"""
# Stop any existing observer first
stop_observer()
# Set up JSON formatting
monkeypatch.setenv("PREFECT_LOGGING_HANDLERS_CONSOLE_FORMATTER", "json")
from prefect.logging.configuration import PROCESS_LOGGING_CONFIG, setup_logging
PROCESS_LOGGING_CONFIG.clear()
setup_logging(incremental=False)
# Clear kopf logger
kopf_logger = logging.getLogger("kopf.test")
kopf_logger.handlers.clear()
kopf_logger.propagate = True
try:
start_observer()
sleep(0.5)
# Create a custom handler to capture logs
# (caplog won't work since propagation is disabled)
captured_logs: list[logging.LogRecord] = []
class CaptureHandler(logging.Handler):
def emit(self, record: logging.LogRecord):
captured_logs.append(record)
capture_handler = CaptureHandler()
kopf_logger.addHandler(capture_handler)
# Emit a test message
kopf_logger.warning("Test message for duplicate check")
# Count how many times the message appears
matching_records = [
r
for r in captured_logs
if "Test message for duplicate check" in r.message
]
assert len(matching_records) == 1, (
f"Expected 1 log message, got {len(matching_records)}"
)
finally:
stop_observer()
monkeypatch.delenv("PREFECT_LOGGING_HANDLERS_CONSOLE_FORMATTER")
@pytest.mark.usefixtures("mock_events_client", "mock_orchestration_client")
def test_kopf_logs_visible_with_json_formatting(
self, monkeypatch: pytest.MonkeyPatch
):
"""
Test that kopf logs are actually emitted and visible when JSON formatting is enabled.
"""
# Stop any existing observer first
stop_observer()
# Set up JSON formatting
monkeypatch.setenv("PREFECT_LOGGING_HANDLERS_CONSOLE_FORMATTER", "json")
from prefect.logging.configuration import PROCESS_LOGGING_CONFIG, setup_logging
PROCESS_LOGGING_CONFIG.clear()
setup_logging(incremental=False)
# Clear kopf logger
kopf_logger = logging.getLogger("kopf.test")
kopf_logger.handlers.clear()
kopf_logger.propagate = True
try:
start_observer()
sleep(0.5)
# Create a string buffer to capture output
log_capture = StringIO()
test_handler = logging.StreamHandler(log_capture)
test_handler.setFormatter(KopfObjectJsonFormatter())
kopf_logger.addHandler(test_handler)
# Emit a test log message
kopf_logger.warning("Test message for visibility check")
# Get the captured output
log_output = log_capture.getvalue()
# Verify the message was emitted
assert "Test message for visibility check" in log_output, (
"kopf log message should be visible in output"
)
# Verify it's JSON formatted
assert '"message"' in log_output or '"msg"' in log_output, (
"Log output should be JSON formatted"
)
finally:
stop_observer()
monkeypatch.delenv("PREFECT_LOGGING_HANDLERS_CONSOLE_FORMATTER")
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/integrations/prefect-kubernetes/tests/test_observer.py",
"license": "Apache License 2.0",
"lines": 534,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PrefectHQ/prefect:src/prefect/_internal/uuid7.py | from __future__ import annotations
import os
import time
import uuid
from typing import Callable, Optional
def _time_ms() -> int:
return time.time_ns() // 1_000_000
def uuid7(
ms: Optional[int] = None,
time_func: Callable[[], int] = _time_ms,
) -> uuid.UUID:
"""
UUID v7, following the proposed extension to RFC4122 described in
https://www.ietf.org/id/draft-peabody-dispatch-new-uuid-format-02.html.
All representations (string, byte array, int) sort chronologically,
with a potential time resolution of 50ns (if the system clock
supports this).
Parameters
----------
ms - Optional integer with the whole number of milliseconds
since Unix epoch, to set the "as of" timestamp.
as_type - Optional string to return the UUID in a different format.
A uuid.UUID (version 7, variant 0x10) is returned unless
this is one of 'str', 'int', 'hex' or 'bytes'.
time_func - Set the time function, which must return integer
milliseconds since the Unix epoch, midnight on 1-Jan-1970.
Defaults to time.time_ns()/1e6. This is exposed because
time.time_ns() may have a low resolution on Windows.
Returns
-------
A UUID object, or if as_type is specified, a string, int or
bytes of length 16.
Implementation notes
--------------------
The 128 bits in the UUID are allocated as follows:
- 36 bits of whole seconds
- 24 bits of fractional seconds, giving approx 50ns resolution
- 14 bits of sequential counter, if called repeatedly in same time tick
- 48 bits of randomness
plus, at locations defined by RFC4122, 4 bits for the
uuid version (0b111) and 2 bits for the uuid variant (0b10).
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| unix_ts_ms |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| unix_ts_ms | ver | rand_a |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|var| rand_b |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| rand_b |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Indicative timings:
- uuid.uuid4() 2.4us
- uuid7() 3.7us
- uuid7(as_type='int') 1.6us
- uuid7(as_type='str') 2.5us
Examples
--------
```python
uuid7()
# UUID('061cb26a-54b8-7a52-8000-2124e7041024')
for fmt in ('bytes', 'hex', 'int', 'str', 'uuid', None):
print(fmt, repr(uuid7(as_type=fmt)))
# bytes b'\x06\x1c\xb8\xfe\x0f\x0b|9\x80\x00\tjt\x85\xb3\xbb'
# hex '061cb8fe0f0b7c3980011863b956b758'
# int 8124504378724980906989670469352026642
# str '061cb8fe-0f0b-7c39-8003-d44a7ee0bdf6'
# uuid UUID('061cb8fe-0f0b-7c39-8004-0489578299f6')
# None UUID('061cb8fe-0f0f-7df2-8000-afd57c2bf446')
```
"""
if ms is None:
ms = time_func()
else:
ms = int(ms) # Fail fast if not an int
rand_a = int.from_bytes(bytes=os.urandom(2), byteorder="big")
rand_b = int.from_bytes(bytes=os.urandom(8), byteorder="big")
uuid_bytes = uuidfromvalues(ms, rand_a, rand_b)
uuid_int = int.from_bytes(bytes=uuid_bytes, byteorder="big")
return uuid.UUID(int=uuid_int)
def uuidfromvalues(unix_ts_ms: int, rand_a: int, rand_b: int):
version = 0x07
var = 2
rand_a &= 0xFFF
rand_b &= 0x3FFFFFFFFFFFFFFF
final_bytes = unix_ts_ms.to_bytes(length=6, byteorder="big")
final_bytes += ((version << 12) + rand_a).to_bytes(length=2, byteorder="big")
final_bytes += ((var << 62) + rand_b).to_bytes(length=8, byteorder="big")
return final_bytes
def format_byte_array_as_uuid(arr: bytes):
return f"{arr[:4].hex()}-{arr[4:6].hex()}-{arr[6:8].hex()}-{arr[8:10].hex()}-{arr[10:].hex()}"
__all__ = ("uuid7",)
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/_internal/uuid7.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
PrefectHQ/prefect:src/prefect/types/names.py | from __future__ import annotations
import re
from functools import partial
from typing import Annotated, overload
from pydantic import AfterValidator, BeforeValidator, Field
LOWERCASE_LETTERS_NUMBERS_AND_DASHES_ONLY_REGEX = "^[a-z0-9-]*$"
LOWERCASE_LETTERS_NUMBERS_AND_UNDERSCORES_REGEX = "^[a-z0-9_]*$"
LOWERCASE_LETTERS_NUMBERS_AND_DASHES_OR_UNDERSCORES_REGEX = "^[a-z0-9-_]*$"
@overload
def raise_on_name_alphanumeric_dashes_only(
value: str, field_name: str = ...
) -> str: ...
@overload
def raise_on_name_alphanumeric_dashes_only(
value: None, field_name: str = ...
) -> None: ...
def raise_on_name_alphanumeric_dashes_only(
value: str | None, field_name: str = "value"
) -> str | None:
if value is not None and not bool(
re.match(LOWERCASE_LETTERS_NUMBERS_AND_DASHES_ONLY_REGEX, value)
):
raise ValueError(
f"{field_name} must only contain lowercase letters, numbers, and dashes."
)
return value
@overload
def raise_on_name_alphanumeric_underscores_only(
value: str, field_name: str = ...
) -> str: ...
@overload
def raise_on_name_alphanumeric_underscores_only(
value: None, field_name: str = ...
) -> None: ...
def raise_on_name_alphanumeric_underscores_only(
value: str | None, field_name: str = "value"
) -> str | None:
if value is not None and not re.match(
LOWERCASE_LETTERS_NUMBERS_AND_UNDERSCORES_REGEX, value
):
raise ValueError(
f"{field_name} must only contain lowercase letters, numbers, and"
" underscores."
)
return value
def raise_on_name_alphanumeric_dashes_underscores_only(
value: str, field_name: str = "value"
) -> str:
if not re.match(LOWERCASE_LETTERS_NUMBERS_AND_DASHES_OR_UNDERSCORES_REGEX, value):
raise ValueError(
f"{field_name} must only contain lowercase letters, numbers, and"
" dashes or underscores."
)
return value
BANNED_CHARACTERS = ["/", "%", "&", ">", "<"]
WITHOUT_BANNED_CHARACTERS = r"^[^" + "".join(BANNED_CHARACTERS) + "]+$"
Name = Annotated[str, Field(pattern=WITHOUT_BANNED_CHARACTERS)]
WITHOUT_BANNED_CHARACTERS_EMPTY_OK = r"^[^" + "".join(BANNED_CHARACTERS) + "]*$"
NameOrEmpty = Annotated[str, Field(pattern=WITHOUT_BANNED_CHARACTERS_EMPTY_OK)]
def non_emptyish(value: str) -> str:
if not value.strip("' \""):
raise ValueError("name cannot be an empty string")
return value
NonEmptyishName = Annotated[
str,
Field(pattern=WITHOUT_BANNED_CHARACTERS),
BeforeValidator(non_emptyish),
]
### specific names
BlockDocumentName = Annotated[
Name,
AfterValidator(
partial(
raise_on_name_alphanumeric_dashes_only, field_name="Block document name"
)
),
]
BlockTypeSlug = Annotated[
str,
AfterValidator(
partial(raise_on_name_alphanumeric_dashes_only, field_name="Block type slug")
),
]
ArtifactKey = Annotated[
str,
AfterValidator(
partial(raise_on_name_alphanumeric_dashes_only, field_name="Artifact key")
),
]
MAX_VARIABLE_NAME_LENGTH = 255
VariableName = Annotated[
str,
AfterValidator(
partial(
raise_on_name_alphanumeric_dashes_underscores_only,
field_name="Variable name",
)
),
Field(
max_length=MAX_VARIABLE_NAME_LENGTH,
description="The name of the variable",
examples=["my_variable"],
),
]
# URI validation
URI_REGEX = re.compile(r"^[a-z0-9]+://")
def validate_uri(value: str) -> str:
"""Validate that a string is a valid URI with lowercase protocol."""
if not URI_REGEX.match(value):
raise ValueError(
"Key must be a valid URI, e.g. storage://bucket/folder/asset.csv"
)
return value
URILike = Annotated[
str,
AfterValidator(validate_uri),
Field(
description="A URI-like string with a lowercase protocol",
examples=["s3://bucket/folder/data.csv", "postgres://dbtable"],
),
]
MAX_ASSET_KEY_LENGTH = 512
RESTRICTED_ASSET_CHARACTERS = [
"\n",
"\r",
"\t",
"\0",
" ",
"#",
"?",
"&",
"%",
'"',
"'",
"<",
">",
"[",
"]",
"{",
"}",
"|",
"\\",
"^",
"`",
]
def validate_valid_asset_key(value: str) -> str:
"""Validate asset key with character restrictions and length limit."""
for char in RESTRICTED_ASSET_CHARACTERS:
if char in value:
raise ValueError(f"Asset key cannot contain '{char}'")
if len(value) > MAX_ASSET_KEY_LENGTH:
raise ValueError(f"Asset key cannot exceed {MAX_ASSET_KEY_LENGTH} characters")
return validate_uri(value)
ValidAssetKey = Annotated[
str,
AfterValidator(validate_valid_asset_key),
Field(
max_length=MAX_ASSET_KEY_LENGTH,
description=f"A URI-like string with a lowercase protocol, restricted characters, and max {MAX_ASSET_KEY_LENGTH} characters",
examples=["s3://bucket/folder/data.csv", "postgres://dbtable"],
),
]
| {
"repo_id": "PrefectHQ/prefect",
"file_path": "src/prefect/types/names.py",
"license": "Apache License 2.0",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/utils/watsonx_client.py | import json
from typing import List, Dict, Any, Optional
import base64
from io import BytesIO
from PIL import Image
class WatsonXClient:
"""
A client for IBM Watson X AI that provides similar interface to OllamaClient
for seamless integration with the RAG system.
"""
def __init__(
self,
api_key: str,
project_id: str,
url: str = "https://us-south.ml.cloud.ibm.com",
):
"""
Initialize the Watson X client.
Args:
api_key: IBM Cloud API key for authentication
project_id: Watson X project ID
url: Watson X service URL (default: us-south region)
"""
self.api_key = api_key
self.project_id = project_id
self.url = url
try:
from ibm_watsonx_ai import APIClient
from ibm_watsonx_ai import Credentials
from ibm_watsonx_ai.foundation_models import ModelInference
from ibm_watsonx_ai.foundation_models.schema import TextGenParameters
except ImportError:
raise ImportError(
"ibm-watsonx-ai package is required. "
"Install it with: pip install ibm-watsonx-ai"
)
self._APIClient = APIClient
self._Credentials = Credentials
self._ModelInference = ModelInference
self._TextGenParameters = TextGenParameters
self.credentials = self._Credentials(
api_key=self.api_key,
url=self.url
)
self.client = self._APIClient(self.credentials)
self.client.set.default_project(self.project_id)
def _image_to_base64(self, image: Image.Image) -> str:
"""Converts a Pillow Image to a base64 string."""
buffered = BytesIO()
image.save(buffered, format="PNG")
return base64.b64encode(buffered.getvalue()).decode('utf-8')
def generate_embedding(self, model: str, text: str) -> List[float]:
"""
Generate embeddings using Watson X embedding models.
Note: This requires using Watson X embedding models through the embeddings API.
"""
try:
from ibm_watsonx_ai.foundation_models import Embeddings
embedding_model = Embeddings(
model_id=model,
credentials=self.credentials,
project_id=self.project_id
)
result = embedding_model.embed_query(text)
return result if isinstance(result, list) else []
except Exception as e:
print(f"Error generating embedding: {e}")
return []
def generate_completion(
self,
model: str,
prompt: str,
*,
format: str = "",
images: Optional[List[Image.Image]] = None,
enable_thinking: Optional[bool] = None,
**kwargs
) -> Dict[str, Any]:
"""
Generates a completion using Watson X foundation models.
Args:
model: The name/ID of the Watson X model (e.g., 'ibm/granite-13b-chat-v2')
prompt: The text prompt for the model
format: The format for the response (e.g., "json")
images: List of Pillow Image objects (for multimodal models)
enable_thinking: Optional flag (not used in Watson X, kept for compatibility)
**kwargs: Additional parameters for text generation
Returns:
Dictionary with response in Ollama-compatible format
"""
try:
gen_params = {}
if kwargs.get('max_tokens'):
gen_params['max_new_tokens'] = kwargs['max_tokens']
if kwargs.get('temperature'):
gen_params['temperature'] = kwargs['temperature']
if kwargs.get('top_p'):
gen_params['top_p'] = kwargs['top_p']
if kwargs.get('top_k'):
gen_params['top_k'] = kwargs['top_k']
parameters = self._TextGenParameters(**gen_params) if gen_params else None
model_inference = self._ModelInference(
model_id=model,
credentials=self.credentials,
project_id=self.project_id,
params=parameters
)
if images:
print("Warning: Image support in Watson X may vary by model")
result = model_inference.generate(prompt=prompt)
else:
result = model_inference.generate(prompt=prompt)
generated_text = ""
if isinstance(result, dict):
generated_text = result.get('results', [{}])[0].get('generated_text', '')
else:
generated_text = str(result)
return {
'response': generated_text,
'model': model,
'done': True
}
except Exception as e:
print(f"Error generating completion: {e}")
return {'response': '', 'error': str(e)}
async def generate_completion_async(
self,
model: str,
prompt: str,
*,
format: str = "",
images: Optional[List[Image.Image]] = None,
enable_thinking: Optional[bool] = None,
timeout: int = 60,
**kwargs
) -> Dict[str, Any]:
"""
Asynchronous version of generate_completion.
Note: IBM Watson X SDK may not have native async support,
so this is a wrapper around the sync version.
"""
import asyncio
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.generate_completion(
model, prompt, format=format, images=images,
enable_thinking=enable_thinking, **kwargs
)
)
def stream_completion(
self,
model: str,
prompt: str,
*,
images: Optional[List[Image.Image]] = None,
enable_thinking: Optional[bool] = None,
**kwargs
):
"""
Generator that yields partial response strings as they arrive.
Note: Watson X streaming support depends on the SDK version and model.
"""
try:
gen_params = {}
if kwargs.get('max_tokens'):
gen_params['max_new_tokens'] = kwargs['max_tokens']
if kwargs.get('temperature'):
gen_params['temperature'] = kwargs['temperature']
parameters = self._TextGenParameters(**gen_params) if gen_params else None
model_inference = self._ModelInference(
model_id=model,
credentials=self.credentials,
project_id=self.project_id,
params=parameters
)
try:
for chunk in model_inference.generate_text_stream(prompt=prompt):
if chunk:
yield chunk
except AttributeError:
result = model_inference.generate(prompt=prompt)
generated_text = ""
if isinstance(result, dict):
generated_text = result.get('results', [{}])[0].get('generated_text', '')
else:
generated_text = str(result)
yield generated_text
except Exception as e:
print(f"Error in stream_completion: {e}")
yield ""
if __name__ == '__main__':
print("Watson X Client for IBM watsonx.ai integration")
print("This client provides Ollama-compatible interface for Watson X granite models")
print("\nTo use this client, you need:")
print("1. IBM Cloud API key")
print("2. Watson X project ID")
print("3. ibm-watsonx-ai package installed")
print("\nExample usage:")
print("""
from rag_system.utils.watsonx_client import WatsonXClient
client = WatsonXClient(
api_key="your-api-key",
project_id="your-project-id"
)
response = client.generate_completion(
model="ibm/granite-13b-chat-v2",
prompt="What is AI?"
)
print(response['response'])
""")
| {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/utils/watsonx_client.py",
"license": "MIT License",
"lines": 211,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/ingestion/document_converter.py | from typing import List, Tuple, Dict, Any
from docling.document_converter import DocumentConverter as DoclingConverter, PdfFormatOption
from docling.datamodel.pipeline_options import PdfPipelineOptions, OcrMacOptions
from docling.datamodel.base_models import InputFormat
import fitz # PyMuPDF for quick text inspection
import os
class DocumentConverter:
"""
A class to convert various document formats to structured Markdown using the docling library.
Supports PDF, DOCX, HTML, and other formats.
"""
# Mapping of file extensions to InputFormat
SUPPORTED_FORMATS = {
'.pdf': InputFormat.PDF,
'.docx': InputFormat.DOCX,
'.html': InputFormat.HTML,
'.htm': InputFormat.HTML,
'.md': InputFormat.MD,
'.txt': 'TXT', # Special handling for plain text files
}
def __init__(self):
"""Initializes the docling document converter with forced OCR enabled for macOS."""
try:
# --- Converter WITHOUT OCR (fast path) ---
pipeline_no_ocr = PdfPipelineOptions()
pipeline_no_ocr.do_ocr = False
format_no_ocr = {
InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_no_ocr)
}
self.converter_no_ocr = DoclingConverter(format_options=format_no_ocr)
# --- Converter WITH OCR (fallback) ---
pipeline_ocr = PdfPipelineOptions()
pipeline_ocr.do_ocr = True
ocr_options = OcrMacOptions(force_full_page_ocr=True)
pipeline_ocr.ocr_options = ocr_options
format_ocr = {
InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_ocr)
}
self.converter_ocr = DoclingConverter(format_options=format_ocr)
self.converter_general = DoclingConverter()
print("docling DocumentConverter(s) initialized (OCR + no-OCR + general).")
except Exception as e:
print(f"Error initializing docling DocumentConverter(s): {e}")
self.converter_no_ocr = None
self.converter_ocr = None
self.converter_general = None
def convert_to_markdown(self, file_path: str) -> List[Tuple[str, Dict[str, Any]]]:
"""
Converts a document to a single Markdown string, preserving layout and tables.
Supports PDF, DOCX, HTML, and other formats.
"""
if not (self.converter_no_ocr and self.converter_ocr and self.converter_general):
print("docling converters not available. Skipping conversion.")
return []
file_ext = os.path.splitext(file_path)[1].lower()
if file_ext not in self.SUPPORTED_FORMATS:
print(f"Unsupported file format: {file_ext}")
return []
input_format = self.SUPPORTED_FORMATS[file_ext]
if input_format == InputFormat.PDF:
return self._convert_pdf_to_markdown(file_path)
elif input_format == 'TXT':
return self._convert_txt_to_markdown(file_path)
else:
return self._convert_general_to_markdown(file_path, input_format)
def _convert_pdf_to_markdown(self, pdf_path: str) -> List[Tuple[str, Dict[str, Any]]]:
"""Convert PDF with OCR detection logic."""
# Quick heuristic: if the PDF already contains a text layer, skip OCR for speed
def _pdf_has_text(path: str) -> bool:
try:
doc = fitz.open(path)
for page in doc:
if page.get_text("text").strip():
return True
except Exception:
pass
return False
use_ocr = not _pdf_has_text(pdf_path)
converter = self.converter_ocr if use_ocr else self.converter_no_ocr
ocr_msg = "(OCR enabled)" if use_ocr else "(no OCR)"
print(f"Converting {pdf_path} to Markdown using docling {ocr_msg}...")
return self._perform_conversion(pdf_path, converter, ocr_msg)
def _convert_txt_to_markdown(self, file_path: str) -> List[Tuple[str, Dict[str, Any]]]:
"""Convert plain text files to markdown by reading content directly."""
print(f"Converting {file_path} (TXT) to Markdown...")
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
markdown_content = f"```\n{content}\n```"
metadata = {"source": file_path}
print(f"Successfully converted {file_path} (TXT) to Markdown.")
return [(markdown_content, metadata)]
except Exception as e:
print(f"Error processing TXT file {file_path}: {e}")
return []
def _convert_general_to_markdown(self, file_path: str, input_format: InputFormat) -> List[Tuple[str, Dict[str, Any]]]:
"""Convert non-PDF formats using general converter."""
print(f"Converting {file_path} ({input_format.name}) to Markdown using docling...")
return self._perform_conversion(file_path, self.converter_general, f"({input_format.name})")
def _perform_conversion(self, file_path: str, converter, format_msg: str) -> List[Tuple[str, Dict[str, Any]]]:
"""Perform the actual conversion using the specified converter."""
pages_data = []
try:
result = converter.convert(file_path)
markdown_content = result.document.export_to_markdown()
metadata = {"source": file_path}
# Return the *DoclingDocument* object as third tuple element so downstream
# chunkers that understand the element tree can use it. Legacy callers that
# expect only (markdown, metadata) can simply ignore the extra value.
pages_data.append((markdown_content, metadata, result.document))
print(f"Successfully converted {file_path} with docling {format_msg}.")
return pages_data
except Exception as e:
print(f"Error processing {file_path} with docling: {e}")
return []
| {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/ingestion/document_converter.py",
"license": "MIT License",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:backend/database.py | import sqlite3
import uuid
import json
from datetime import datetime
from typing import List, Dict, Optional, Tuple
class ChatDatabase:
def __init__(self, db_path: str = None):
if db_path is None:
# Auto-detect environment and set appropriate path
import os
if os.path.exists("/app"): # Docker environment
self.db_path = "/app/backend/chat_data.db"
else: # Local development environment
self.db_path = "backend/chat_data.db"
else:
self.db_path = db_path
self.init_database()
def init_database(self):
"""Initialize the SQLite database with required tables"""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
# Enable foreign keys
conn.execute("PRAGMA foreign_keys = ON")
# Sessions table
conn.execute('''
CREATE TABLE IF NOT EXISTS sessions (
id TEXT PRIMARY KEY,
title TEXT NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
model_used TEXT NOT NULL,
message_count INTEGER DEFAULT 0
)
''')
# Messages table
conn.execute('''
CREATE TABLE IF NOT EXISTS messages (
id TEXT PRIMARY KEY,
session_id TEXT NOT NULL,
content TEXT NOT NULL,
sender TEXT NOT NULL CHECK (sender IN ('user', 'assistant')),
timestamp TEXT NOT NULL,
metadata TEXT DEFAULT '{}',
FOREIGN KEY (session_id) REFERENCES sessions (id) ON DELETE CASCADE
)
''')
# Create indexes for better performance
conn.execute('CREATE INDEX IF NOT EXISTS idx_messages_session_id ON messages(session_id)')
conn.execute('CREATE INDEX IF NOT EXISTS idx_messages_timestamp ON messages(timestamp)')
conn.execute('CREATE INDEX IF NOT EXISTS idx_sessions_updated_at ON sessions(updated_at)')
# Documents table
conn.execute('''
CREATE TABLE IF NOT EXISTS session_documents (
id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id TEXT NOT NULL,
file_path TEXT NOT NULL,
indexed INTEGER DEFAULT 0,
FOREIGN KEY (session_id) REFERENCES sessions (id) ON DELETE CASCADE
)
''')
conn.execute('CREATE INDEX IF NOT EXISTS idx_session_documents_session_id ON session_documents(session_id)')
# --- NEW: Index persistence tables ---
cursor.execute('''
CREATE TABLE IF NOT EXISTS indexes (
id TEXT PRIMARY KEY,
name TEXT UNIQUE,
description TEXT,
created_at TEXT,
updated_at TEXT,
vector_table_name TEXT,
metadata TEXT
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS index_documents (
id INTEGER PRIMARY KEY AUTOINCREMENT,
index_id TEXT,
original_filename TEXT,
stored_path TEXT,
FOREIGN KEY(index_id) REFERENCES indexes(id)
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS session_indexes (
id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id TEXT,
index_id TEXT,
linked_at TEXT,
FOREIGN KEY(session_id) REFERENCES sessions(id),
FOREIGN KEY(index_id) REFERENCES indexes(id)
)
''')
conn.commit()
conn.close()
print("✅ Database initialized successfully")
def create_session(self, title: str, model: str) -> str:
"""Create a new chat session"""
session_id = str(uuid.uuid4())
now = datetime.now().isoformat()
conn = sqlite3.connect(self.db_path)
conn.execute('''
INSERT INTO sessions (id, title, created_at, updated_at, model_used)
VALUES (?, ?, ?, ?, ?)
''', (session_id, title, now, now, model))
conn.commit()
conn.close()
print(f"📝 Created new session: {session_id[:8]}... - {title}")
return session_id
def get_sessions(self, limit: int = 50) -> List[Dict]:
"""Get all chat sessions, ordered by most recent"""
conn = sqlite3.connect(self.db_path)
conn.row_factory = sqlite3.Row
cursor = conn.execute('''
SELECT id, title, created_at, updated_at, model_used, message_count
FROM sessions
ORDER BY updated_at DESC
LIMIT ?
''', (limit,))
sessions = [dict(row) for row in cursor.fetchall()]
conn.close()
return sessions
def get_session(self, session_id: str) -> Optional[Dict]:
"""Get a specific session"""
conn = sqlite3.connect(self.db_path)
conn.row_factory = sqlite3.Row
cursor = conn.execute('''
SELECT id, title, created_at, updated_at, model_used, message_count
FROM sessions
WHERE id = ?
''', (session_id,))
row = cursor.fetchone()
conn.close()
return dict(row) if row else None
def add_message(self, session_id: str, content: str, sender: str, metadata: Dict = None) -> str:
"""Add a message to a session"""
message_id = str(uuid.uuid4())
now = datetime.now().isoformat()
metadata_json = json.dumps(metadata or {})
conn = sqlite3.connect(self.db_path)
# Add the message
conn.execute('''
INSERT INTO messages (id, session_id, content, sender, timestamp, metadata)
VALUES (?, ?, ?, ?, ?, ?)
''', (message_id, session_id, content, sender, now, metadata_json))
# Update session timestamp and message count
conn.execute('''
UPDATE sessions
SET updated_at = ?,
message_count = message_count + 1
WHERE id = ?
''', (now, session_id))
conn.commit()
conn.close()
return message_id
def get_messages(self, session_id: str, limit: int = 100) -> List[Dict]:
"""Get all messages for a session"""
conn = sqlite3.connect(self.db_path)
conn.row_factory = sqlite3.Row
cursor = conn.execute('''
SELECT id, content, sender, timestamp, metadata
FROM messages
WHERE session_id = ?
ORDER BY timestamp ASC
LIMIT ?
''', (session_id, limit))
messages = []
for row in cursor.fetchall():
message = dict(row)
message['metadata'] = json.loads(message['metadata'])
messages.append(message)
conn.close()
return messages
def get_conversation_history(self, session_id: str) -> List[Dict]:
"""Get conversation history in the format expected by Ollama"""
messages = self.get_messages(session_id)
history = []
for msg in messages:
history.append({
"role": msg["sender"],
"content": msg["content"]
})
return history
def update_session_title(self, session_id: str, title: str):
"""Update session title"""
conn = sqlite3.connect(self.db_path)
conn.execute('''
UPDATE sessions
SET title = ?, updated_at = ?
WHERE id = ?
''', (title, datetime.now().isoformat(), session_id))
conn.commit()
conn.close()
def delete_session(self, session_id: str) -> bool:
"""Delete a session and all its messages"""
conn = sqlite3.connect(self.db_path)
cursor = conn.execute('DELETE FROM sessions WHERE id = ?', (session_id,))
deleted = cursor.rowcount > 0
conn.commit()
conn.close()
if deleted:
print(f"🗑️ Deleted session: {session_id[:8]}...")
return deleted
def cleanup_empty_sessions(self) -> int:
"""Remove sessions with no messages"""
conn = sqlite3.connect(self.db_path)
# Find sessions with no messages
cursor = conn.execute('''
SELECT s.id FROM sessions s
LEFT JOIN messages m ON s.id = m.session_id
WHERE m.id IS NULL
''')
empty_sessions = [row[0] for row in cursor.fetchall()]
# Delete empty sessions
deleted_count = 0
for session_id in empty_sessions:
cursor = conn.execute('DELETE FROM sessions WHERE id = ?', (session_id,))
if cursor.rowcount > 0:
deleted_count += 1
print(f"🗑️ Cleaned up empty session: {session_id[:8]}...")
conn.commit()
conn.close()
if deleted_count > 0:
print(f"✨ Cleaned up {deleted_count} empty sessions")
return deleted_count
def get_stats(self) -> Dict:
"""Get database statistics"""
conn = sqlite3.connect(self.db_path)
# Get session count
cursor = conn.execute('SELECT COUNT(*) FROM sessions')
session_count = cursor.fetchone()[0]
# Get message count
cursor = conn.execute('SELECT COUNT(*) FROM messages')
message_count = cursor.fetchone()[0]
# Get most used model
cursor = conn.execute('''
SELECT model_used, COUNT(*) as count
FROM sessions
GROUP BY model_used
ORDER BY count DESC
LIMIT 1
''')
most_used_model = cursor.fetchone()
conn.close()
return {
"total_sessions": session_count,
"total_messages": message_count,
"most_used_model": most_used_model[0] if most_used_model else None
}
def add_document_to_session(self, session_id: str, file_path: str) -> int:
"""Adds a document file path to a session."""
conn = sqlite3.connect(self.db_path)
cursor = conn.execute(
"INSERT INTO session_documents (session_id, file_path) VALUES (?, ?)",
(session_id, file_path)
)
doc_id = cursor.lastrowid
conn.commit()
conn.close()
print(f"📄 Added document '{file_path}' to session {session_id[:8]}...")
return doc_id
def get_documents_for_session(self, session_id: str) -> List[str]:
"""Retrieves all document file paths for a given session."""
conn = sqlite3.connect(self.db_path)
cursor = conn.execute(
"SELECT file_path FROM session_documents WHERE session_id = ?",
(session_id,)
)
paths = [row[0] for row in cursor.fetchall()]
conn.close()
return paths
# -------- Index helpers ---------
def create_index(self, name: str, description: str|None = None, metadata: dict | None = None) -> str:
idx_id = str(uuid.uuid4())
created = datetime.now().isoformat()
vector_table = f"text_pages_{idx_id}"
conn = sqlite3.connect(self.db_path)
conn.execute('''
INSERT INTO indexes (id, name, description, created_at, updated_at, vector_table_name, metadata)
VALUES (?,?,?,?,?,?,?)
''', (idx_id, name, description, created, created, vector_table, json.dumps(metadata or {})))
conn.commit()
conn.close()
print(f"📂 Created new index '{name}' ({idx_id[:8]})")
return idx_id
def get_index(self, index_id: str) -> dict | None:
conn = sqlite3.connect(self.db_path)
conn.row_factory = sqlite3.Row
cur = conn.execute('SELECT * FROM indexes WHERE id=?', (index_id,))
row = cur.fetchone()
if not row:
conn.close()
return None
idx = dict(row)
idx['metadata'] = json.loads(idx['metadata'] or '{}')
cur = conn.execute('SELECT original_filename, stored_path FROM index_documents WHERE index_id=?', (index_id,))
docs = [{'filename': r[0], 'stored_path': r[1]} for r in cur.fetchall()]
idx['documents'] = docs
conn.close()
return idx
def list_indexes(self) -> list[dict]:
conn = sqlite3.connect(self.db_path)
conn.row_factory = sqlite3.Row
rows = conn.execute('SELECT * FROM indexes').fetchall()
res = []
for r in rows:
item = dict(r)
item['metadata'] = json.loads(item['metadata'] or '{}')
# attach documents list for convenience
docs_cur = conn.execute('SELECT original_filename, stored_path FROM index_documents WHERE index_id=?', (item['id'],))
docs = [{'filename':d[0],'stored_path':d[1]} for d in docs_cur.fetchall()]
item['documents'] = docs
res.append(item)
conn.close()
return res
def add_document_to_index(self, index_id: str, filename: str, stored_path: str):
conn = sqlite3.connect(self.db_path)
conn.execute('INSERT INTO index_documents (index_id, original_filename, stored_path) VALUES (?,?,?)', (index_id, filename, stored_path))
conn.commit()
conn.close()
def link_index_to_session(self, session_id: str, index_id: str):
conn = sqlite3.connect(self.db_path)
conn.execute('INSERT INTO session_indexes (session_id, index_id, linked_at) VALUES (?,?,?)', (session_id, index_id, datetime.now().isoformat()))
conn.commit()
conn.close()
def get_indexes_for_session(self, session_id: str) -> list[str]:
conn = sqlite3.connect(self.db_path)
cursor = conn.execute('SELECT index_id FROM session_indexes WHERE session_id=? ORDER BY linked_at', (session_id,))
ids = [r[0] for r in cursor.fetchall()]
conn.close()
return ids
def delete_index(self, index_id: str) -> bool:
"""Delete an index and its related records (documents, session links). Returns True if deleted."""
conn = sqlite3.connect(self.db_path)
try:
# Get vector table name before deletion (optional, for LanceDB cleanup)
cur = conn.execute('SELECT vector_table_name FROM indexes WHERE id = ?', (index_id,))
row = cur.fetchone()
vector_table_name = row[0] if row else None
# Remove child rows first due to foreign‐key constraints
conn.execute('DELETE FROM index_documents WHERE index_id = ?', (index_id,))
conn.execute('DELETE FROM session_indexes WHERE index_id = ?', (index_id,))
cursor = conn.execute('DELETE FROM indexes WHERE id = ?', (index_id,))
deleted = cursor.rowcount > 0
conn.commit()
finally:
conn.close()
if deleted:
print(f"🗑️ Deleted index {index_id[:8]}... and related records")
# Optional: attempt to drop LanceDB table if available
if vector_table_name:
try:
from rag_system.indexing.embedders import LanceDBManager
import os
db_path = os.getenv('LANCEDB_PATH') or './rag_system/index_store/lancedb'
ldb = LanceDBManager(db_path)
db = ldb.db
if hasattr(db, 'table_names') and vector_table_name in db.table_names():
db.drop_table(vector_table_name)
print(f"🚮 Dropped LanceDB table '{vector_table_name}'")
except Exception as e:
print(f"⚠️ Could not drop LanceDB table '{vector_table_name}': {e}")
return deleted
def update_index_metadata(self, index_id: str, updates: dict):
"""Merge new key/values into an index's metadata JSON column."""
conn = sqlite3.connect(self.db_path)
conn.row_factory = sqlite3.Row
cur = conn.execute('SELECT metadata FROM indexes WHERE id=?', (index_id,))
row = cur.fetchone()
if row is None:
conn.close()
raise ValueError("Index not found")
existing = json.loads(row['metadata'] or '{}')
existing.update(updates)
conn.execute('UPDATE indexes SET metadata=?, updated_at=? WHERE id=?', (json.dumps(existing), datetime.now().isoformat(), index_id))
conn.commit()
conn.close()
def inspect_and_populate_index_metadata(self, index_id: str) -> dict:
"""
Inspect LanceDB table to extract metadata for older indexes.
Returns the inferred metadata or empty dict if inspection fails.
"""
try:
# Get index info
index_info = self.get_index(index_id)
if not index_info:
return {}
# Check if metadata is already populated
if index_info.get('metadata') and len(index_info['metadata']) > 0:
return index_info['metadata']
# Try to inspect the LanceDB table
vector_table_name = index_info.get('vector_table_name')
if not vector_table_name:
return {}
try:
# Try to import the RAG system modules
try:
from rag_system.indexing.embedders import LanceDBManager
import os
# Use the same path as the system
db_path = os.getenv('LANCEDB_PATH') or './rag_system/index_store/lancedb'
ldb = LanceDBManager(db_path)
# Check if table exists
if not hasattr(ldb.db, 'table_names') or vector_table_name not in ldb.db.table_names():
# Table doesn't exist - this means the index was never properly built
inferred_metadata = {
'status': 'incomplete',
'issue': 'Vector table not found - index may not have been built properly',
'vector_table_expected': vector_table_name,
'available_tables': list(ldb.db.table_names()) if hasattr(ldb.db, 'table_names') else [],
'metadata_inferred_at': datetime.now().isoformat(),
'metadata_source': 'lancedb_inspection'
}
self.update_index_metadata(index_id, inferred_metadata)
print(f"⚠️ Index {index_id[:8]}... appears incomplete - vector table missing")
return inferred_metadata
# Get table and inspect schema/data
table = ldb.db.open_table(vector_table_name)
# Get a sample record to inspect - use correct LanceDB API
try:
# Try to get sample data using proper LanceDB methods
sample_df = table.to_pandas()
if len(sample_df) == 0:
inferred_metadata = {
'status': 'empty',
'issue': 'Vector table exists but contains no data',
'metadata_inferred_at': datetime.now().isoformat(),
'metadata_source': 'lancedb_inspection'
}
self.update_index_metadata(index_id, inferred_metadata)
return inferred_metadata
# Take only first row for inspection
sample_df = sample_df.head(1)
except Exception as e:
print(f"⚠️ Could not read data from table {vector_table_name}: {e}")
return {}
# Infer metadata from table structure
inferred_metadata = {
'status': 'functional',
'total_chunks': len(table.to_pandas()), # Get total count
}
# Check vector dimensions
if 'vector' in sample_df.columns:
vector_data = sample_df['vector'].iloc[0]
if isinstance(vector_data, list):
inferred_metadata['vector_dimensions'] = len(vector_data)
# Try to infer embedding model from vector dimensions
dim_to_model = {
384: 'BAAI/bge-small-en-v1.5 (or similar)',
512: 'sentence-transformers/all-MiniLM-L6-v2 (or similar)',
768: 'BAAI/bge-base-en-v1.5 (or similar)',
1024: 'Qwen/Qwen3-Embedding-0.6B (or similar)',
1536: 'text-embedding-ada-002 (or similar)'
}
if len(vector_data) in dim_to_model:
inferred_metadata['embedding_model_inferred'] = dim_to_model[len(vector_data)]
# Try to parse metadata from sample record
if 'metadata' in sample_df.columns:
try:
sample_metadata = json.loads(sample_df['metadata'].iloc[0])
# Look for common metadata fields that might give us clues
if 'document_id' in sample_metadata:
inferred_metadata['has_document_structure'] = True
if 'chunk_index' in sample_metadata:
inferred_metadata['has_chunk_indexing'] = True
if 'original_text' in sample_metadata:
inferred_metadata['has_contextual_enrichment'] = True
inferred_metadata['retrieval_mode_inferred'] = 'hybrid (contextual enrichment detected)'
# Check for chunk size patterns
if 'text' in sample_df.columns:
text_length = len(sample_df['text'].iloc[0])
if text_length > 0:
inferred_metadata['sample_chunk_length'] = text_length
# Rough chunk size estimation
estimated_tokens = text_length // 4 # rough estimate: 4 chars per token
if estimated_tokens < 300:
inferred_metadata['chunk_size_inferred'] = '256 tokens (estimated)'
elif estimated_tokens < 600:
inferred_metadata['chunk_size_inferred'] = '512 tokens (estimated)'
else:
inferred_metadata['chunk_size_inferred'] = '1024+ tokens (estimated)'
except (json.JSONDecodeError, KeyError):
pass
# Check if FTS index exists
try:
indices = table.list_indices()
fts_exists = any('fts' in idx.name.lower() for idx in indices)
if fts_exists:
inferred_metadata['has_fts_index'] = True
inferred_metadata['retrieval_mode_inferred'] = 'hybrid (FTS + vector)'
else:
inferred_metadata['retrieval_mode_inferred'] = 'vector-only'
except:
pass
# Add inspection timestamp
inferred_metadata['metadata_inferred_at'] = datetime.now().isoformat()
inferred_metadata['metadata_source'] = 'lancedb_inspection'
# Update the database with inferred metadata
if inferred_metadata:
self.update_index_metadata(index_id, inferred_metadata)
print(f"🔍 Inferred metadata for index {index_id[:8]}...: {len(inferred_metadata)} fields")
return inferred_metadata
except ImportError as import_error:
# RAG system modules not available - provide basic fallback metadata
print(f"⚠️ RAG system modules not available for inspection: {import_error}")
# Check if this is actually a legacy index by looking at creation date
created_at = index_info.get('created_at', '')
is_recent = False
if created_at:
try:
from datetime import datetime, timedelta
created_date = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
# Consider indexes created in the last 30 days as "recent"
is_recent = created_date > datetime.now().replace(tzinfo=created_date.tzinfo) - timedelta(days=30)
except:
pass
# Provide basic fallback metadata with better status detection
if is_recent:
status = 'functional'
issue = 'Detailed configuration inspection requires RAG system modules, but index appears functional'
else:
status = 'legacy'
issue = 'This index was created before metadata tracking was implemented. Configuration details are not available.'
fallback_metadata = {
'status': status,
'issue': issue,
'metadata_inferred_at': datetime.now().isoformat(),
'metadata_source': 'fallback_inspection',
'documents_count': len(index_info.get('documents', [])),
'created_at': index_info.get('created_at', 'unknown'),
'inspection_limitation': 'Backend server cannot access full RAG system modules for detailed inspection'
}
# Try to infer some basic info from the vector table name
if vector_table_name:
fallback_metadata['vector_table_name'] = vector_table_name
fallback_metadata['note'] = 'Vector table exists but detailed inspection requires RAG system modules'
self.update_index_metadata(index_id, fallback_metadata)
status_msg = "recent but limited inspection" if is_recent else "legacy"
print(f"📝 Added fallback metadata for {status_msg} index {index_id[:8]}...")
return fallback_metadata
except Exception as e:
print(f"⚠️ Could not inspect LanceDB table for index {index_id[:8]}...: {e}")
return {}
except Exception as e:
print(f"⚠️ Failed to inspect index metadata for {index_id[:8]}...: {e}")
return {}
def generate_session_title(first_message: str, max_length: int = 50) -> str:
"""Generate a session title from the first message"""
# Clean up the message
title = first_message.strip()
# Remove common prefixes
prefixes = ["hey", "hi", "hello", "can you", "please", "i want", "i need"]
title_lower = title.lower()
for prefix in prefixes:
if title_lower.startswith(prefix):
title = title[len(prefix):].strip()
break
# Capitalize first letter
if title:
title = title[0].upper() + title[1:]
# Truncate if too long
if len(title) > max_length:
title = title[:max_length].strip() + "..."
# Fallback
if not title or len(title) < 3:
title = "New Chat"
return title
# Global database instance
db = ChatDatabase()
if __name__ == "__main__":
# Test the database
print("🧪 Testing database...")
# Create a test session
session_id = db.create_session("Test Chat", "llama3.2:latest")
# Add some messages
db.add_message(session_id, "Hello!", "user")
db.add_message(session_id, "Hi there! How can I help you?", "assistant")
# Get messages
messages = db.get_messages(session_id)
print(f"📨 Messages: {len(messages)}")
# Get sessions
sessions = db.get_sessions()
print(f"📋 Sessions: {len(sessions)}")
# Get stats
stats = db.get_stats()
print(f"📊 Stats: {stats}")
print("✅ Database test completed!") | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "backend/database.py",
"license": "MIT License",
"lines": 586,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:backend/ollama_client.py | import requests
import json
import os
from typing import List, Dict, Optional
class OllamaClient:
def __init__(self, base_url: Optional[str] = None):
if base_url is None:
base_url = os.getenv("OLLAMA_HOST", "http://localhost:11434")
self.base_url = base_url
self.api_url = f"{base_url}/api"
def is_ollama_running(self) -> bool:
"""Check if Ollama server is running"""
try:
response = requests.get(f"{self.base_url}/api/tags", timeout=5)
return response.status_code == 200
except requests.exceptions.RequestException:
return False
def list_models(self) -> List[str]:
"""Get list of available models"""
try:
response = requests.get(f"{self.api_url}/tags")
if response.status_code == 200:
models = response.json().get("models", [])
return [model["name"] for model in models]
return []
except requests.exceptions.RequestException as e:
print(f"Error fetching models: {e}")
return []
def pull_model(self, model_name: str) -> bool:
"""Pull a model if not available"""
try:
response = requests.post(
f"{self.api_url}/pull",
json={"name": model_name},
stream=True
)
if response.status_code == 200:
print(f"Pulling model {model_name}...")
for line in response.iter_lines():
if line:
data = json.loads(line)
if "status" in data:
print(f"Status: {data['status']}")
if data.get("status") == "success":
return True
return True
return False
except requests.exceptions.RequestException as e:
print(f"Error pulling model: {e}")
return False
def chat(self, message: str, model: str = "llama3.2", conversation_history: List[Dict] = None, enable_thinking: bool = True) -> str:
"""Send a chat message to Ollama"""
if conversation_history is None:
conversation_history = []
# Add user message to conversation
messages = conversation_history + [{"role": "user", "content": message}]
try:
payload = {
"model": model,
"messages": messages,
"stream": False,
}
# Multiple approaches to disable thinking tokens
if not enable_thinking:
payload.update({
"think": False, # Native Ollama parameter
"options": {
"think": False,
"thinking": False,
"temperature": 0.7,
"top_p": 0.9
}
})
else:
payload["think"] = True
response = requests.post(
f"{self.api_url}/chat",
json=payload,
timeout=60
)
if response.status_code == 200:
result = response.json()
response_text = result["message"]["content"]
# Additional cleanup: remove any thinking tokens that might slip through
if not enable_thinking:
# Remove common thinking token patterns
import re
response_text = re.sub(r'<think>.*?</think>', '', response_text, flags=re.DOTALL | re.IGNORECASE)
response_text = re.sub(r'<thinking>.*?</thinking>', '', response_text, flags=re.DOTALL | re.IGNORECASE)
response_text = response_text.strip()
return response_text
else:
return f"Error: {response.status_code} - {response.text}"
except requests.exceptions.RequestException as e:
return f"Connection error: {e}"
def chat_stream(self, message: str, model: str = "llama3.2", conversation_history: List[Dict] = None, enable_thinking: bool = True):
"""Stream chat response from Ollama"""
if conversation_history is None:
conversation_history = []
messages = conversation_history + [{"role": "user", "content": message}]
try:
payload = {
"model": model,
"messages": messages,
"stream": True,
}
# Multiple approaches to disable thinking tokens
if not enable_thinking:
payload.update({
"think": False, # Native Ollama parameter
"options": {
"think": False,
"thinking": False,
"temperature": 0.7,
"top_p": 0.9
}
})
else:
payload["think"] = True
response = requests.post(
f"{self.api_url}/chat",
json=payload,
stream=True,
timeout=60
)
if response.status_code == 200:
for line in response.iter_lines():
if line:
try:
data = json.loads(line)
if "message" in data and "content" in data["message"]:
content = data["message"]["content"]
# Filter out thinking tokens in streaming mode
if not enable_thinking:
# Skip content that looks like thinking tokens
if '<think>' in content.lower() or '<thinking>' in content.lower():
continue
yield content
except json.JSONDecodeError:
continue
else:
yield f"Error: {response.status_code} - {response.text}"
except requests.exceptions.RequestException as e:
yield f"Connection error: {e}"
def main():
"""Test the Ollama client"""
client = OllamaClient()
# Check if Ollama is running
if not client.is_ollama_running():
print("❌ Ollama is not running. Please start Ollama first.")
print("Install: https://ollama.ai")
print("Run: ollama serve")
return
print("✅ Ollama is running!")
# List available models
models = client.list_models()
print(f"Available models: {models}")
# Try to use llama3.2, pull if needed
model_name = "llama3.2"
if model_name not in [m.split(":")[0] for m in models]:
print(f"Model {model_name} not found. Pulling...")
if client.pull_model(model_name):
print(f"✅ Model {model_name} pulled successfully!")
else:
print(f"❌ Failed to pull model {model_name}")
return
# Test chat
print("\n🤖 Testing chat...")
response = client.chat("Hello! Can you tell me a short joke?", model_name)
print(f"AI: {response}")
if __name__ == "__main__":
main() | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "backend/ollama_client.py",
"license": "MIT License",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:backend/server.py | import json
import http.server
import socketserver
import cgi
import os
import uuid
from urllib.parse import urlparse, parse_qs
import requests # 🆕 Import requests for making HTTP calls
import sys
from datetime import datetime
# Add parent directory to path so we can import rag_system modules
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Import RAG system modules for complete metadata
try:
from rag_system.main import PIPELINE_CONFIGS
RAG_SYSTEM_AVAILABLE = True
print("✅ RAG system modules accessible from backend")
except ImportError as e:
PIPELINE_CONFIGS = {}
RAG_SYSTEM_AVAILABLE = False
print(f"⚠️ RAG system modules not available: {e}")
from ollama_client import OllamaClient
from database import db, generate_session_title
import simple_pdf_processor as pdf_module
from simple_pdf_processor import initialize_simple_pdf_processor
from typing import List, Dict, Any
import re
# 🆕 Reusable TCPServer with address reuse enabled
class ReusableTCPServer(socketserver.TCPServer):
allow_reuse_address = True
class ChatHandler(http.server.BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.ollama_client = OllamaClient()
super().__init__(*args, **kwargs)
def do_OPTIONS(self):
"""Handle CORS preflight requests"""
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, DELETE, OPTIONS')
self.send_header('Access-Control-Allow-Headers', 'Content-Type')
self.end_headers()
def do_GET(self):
"""Handle GET requests"""
parsed_path = urlparse(self.path)
if parsed_path.path == '/health':
self.send_json_response({
"status": "ok",
"ollama_running": self.ollama_client.is_ollama_running(),
"available_models": self.ollama_client.list_models(),
"database_stats": db.get_stats()
})
elif parsed_path.path == '/sessions':
self.handle_get_sessions()
elif parsed_path.path == '/sessions/cleanup':
self.handle_cleanup_sessions()
elif parsed_path.path == '/models':
self.handle_get_models()
elif parsed_path.path == '/indexes':
self.handle_get_indexes()
elif parsed_path.path.startswith('/indexes/') and parsed_path.path.count('/') == 2:
index_id = parsed_path.path.split('/')[-1]
self.handle_get_index(index_id)
elif parsed_path.path.startswith('/sessions/') and parsed_path.path.endswith('/documents'):
session_id = parsed_path.path.split('/')[-2]
self.handle_get_session_documents(session_id)
elif parsed_path.path.startswith('/sessions/') and parsed_path.path.endswith('/indexes'):
session_id = parsed_path.path.split('/')[-2]
self.handle_get_session_indexes(session_id)
elif parsed_path.path.startswith('/sessions/') and parsed_path.path.count('/') == 2:
session_id = parsed_path.path.split('/')[-1]
self.handle_get_session(session_id)
else:
self.send_response(404)
self.end_headers()
def do_POST(self):
"""Handle POST requests"""
parsed_path = urlparse(self.path)
if parsed_path.path == '/chat':
self.handle_chat()
elif parsed_path.path == '/sessions':
self.handle_create_session()
elif parsed_path.path == '/indexes':
self.handle_create_index()
elif parsed_path.path.startswith('/indexes/') and parsed_path.path.endswith('/upload'):
index_id = parsed_path.path.split('/')[-2]
self.handle_index_file_upload(index_id)
elif parsed_path.path.startswith('/indexes/') and parsed_path.path.endswith('/build'):
index_id = parsed_path.path.split('/')[-2]
self.handle_build_index(index_id)
elif parsed_path.path.startswith('/sessions/') and '/indexes/' in parsed_path.path:
parts = parsed_path.path.split('/')
session_id = parts[2]
index_id = parts[4]
self.handle_link_index_to_session(session_id, index_id)
elif parsed_path.path.startswith('/sessions/') and parsed_path.path.endswith('/messages'):
session_id = parsed_path.path.split('/')[-2]
self.handle_session_chat(session_id)
elif parsed_path.path.startswith('/sessions/') and parsed_path.path.endswith('/upload'):
session_id = parsed_path.path.split('/')[-2]
self.handle_file_upload(session_id)
elif parsed_path.path.startswith('/sessions/') and parsed_path.path.endswith('/index'):
session_id = parsed_path.path.split('/')[-2]
self.handle_index_documents(session_id)
elif parsed_path.path.startswith('/sessions/') and parsed_path.path.endswith('/rename'):
session_id = parsed_path.path.split('/')[-2]
self.handle_rename_session(session_id)
else:
self.send_response(404)
self.end_headers()
def do_DELETE(self):
"""Handle DELETE requests"""
parsed_path = urlparse(self.path)
if parsed_path.path.startswith('/sessions/') and parsed_path.path.count('/') == 2:
session_id = parsed_path.path.split('/')[-1]
self.handle_delete_session(session_id)
elif parsed_path.path.startswith('/indexes/') and parsed_path.path.count('/') == 2:
index_id = parsed_path.path.split('/')[-1]
self.handle_delete_index(index_id)
else:
self.send_response(404)
self.end_headers()
def handle_chat(self):
"""Handle legacy chat requests (without sessions)"""
try:
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
data = json.loads(post_data.decode('utf-8'))
message = data.get('message', '')
model = data.get('model', 'llama3.2:latest')
conversation_history = data.get('conversation_history', [])
if not message:
self.send_json_response({
"error": "Message is required"
}, status_code=400)
return
# Check if Ollama is running
if not self.ollama_client.is_ollama_running():
self.send_json_response({
"error": "Ollama is not running. Please start Ollama first."
}, status_code=503)
return
# Get response from Ollama
response = self.ollama_client.chat(message, model, conversation_history)
self.send_json_response({
"response": response,
"model": model,
"message_count": len(conversation_history) + 1
})
except json.JSONDecodeError:
self.send_json_response({
"error": "Invalid JSON"
}, status_code=400)
except Exception as e:
self.send_json_response({
"error": f"Server error: {str(e)}"
}, status_code=500)
def handle_get_sessions(self):
"""Get all chat sessions"""
try:
sessions = db.get_sessions()
self.send_json_response({
"sessions": sessions,
"total": len(sessions)
})
except Exception as e:
self.send_json_response({
"error": f"Failed to get sessions: {str(e)}"
}, status_code=500)
def handle_cleanup_sessions(self):
"""Clean up empty sessions"""
try:
cleanup_count = db.cleanup_empty_sessions()
self.send_json_response({
"message": f"Cleaned up {cleanup_count} empty sessions",
"cleanup_count": cleanup_count
})
except Exception as e:
self.send_json_response({
"error": f"Failed to cleanup sessions: {str(e)}"
}, status_code=500)
def handle_get_session(self, session_id: str):
"""Get a specific session with its messages"""
try:
session = db.get_session(session_id)
if not session:
self.send_json_response({
"error": "Session not found"
}, status_code=404)
return
messages = db.get_messages(session_id)
self.send_json_response({
"session": session,
"messages": messages
})
except Exception as e:
self.send_json_response({
"error": f"Failed to get session: {str(e)}"
}, status_code=500)
def handle_get_session_documents(self, session_id: str):
"""Return documents and basic info for a session."""
try:
session = db.get_session(session_id)
if not session:
self.send_json_response({"error": "Session not found"}, status_code=404)
return
docs = db.get_documents_for_session(session_id)
# Extract original filenames from stored paths
filenames = [os.path.basename(p).split('_', 1)[-1] if '_' in os.path.basename(p) else os.path.basename(p) for p in docs]
self.send_json_response({
"session": session,
"files": filenames,
"file_count": len(docs)
})
except Exception as e:
self.send_json_response({"error": f"Failed to get documents: {str(e)}"}, status_code=500)
def handle_create_session(self):
"""Create a new chat session"""
try:
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
data = json.loads(post_data.decode('utf-8'))
title = data.get('title', 'New Chat')
model = data.get('model', 'llama3.2:latest')
session_id = db.create_session(title, model)
session = db.get_session(session_id)
self.send_json_response({
"session": session,
"session_id": session_id
}, status_code=201)
except json.JSONDecodeError:
self.send_json_response({
"error": "Invalid JSON"
}, status_code=400)
except Exception as e:
self.send_json_response({
"error": f"Failed to create session: {str(e)}"
}, status_code=500)
def handle_session_chat(self, session_id: str):
"""
Handle chat within a specific session.
Intelligently routes between direct LLM (fast) and RAG pipeline (document-aware).
"""
try:
session = db.get_session(session_id)
if not session:
self.send_json_response({"error": "Session not found"}, status_code=404)
return
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
data = json.loads(post_data.decode('utf-8'))
message = data.get('message', '')
if not message:
self.send_json_response({"error": "Message is required"}, status_code=400)
return
if session['message_count'] == 0:
title = generate_session_title(message)
db.update_session_title(session_id, title)
# Add user message to database first
user_message_id = db.add_message(session_id, message, "user")
# 🎯 SMART ROUTING: Decide between direct LLM vs RAG
idx_ids = db.get_indexes_for_session(session_id)
force_rag = bool(data.get("force_rag", False))
use_rag = True if force_rag else self._should_use_rag(message, idx_ids)
if use_rag:
# 🔍 --- Use RAG Pipeline for Document-Related Queries ---
print(f"🔍 Using RAG pipeline for document query: '{message[:50]}...'")
response_text, source_docs = self._handle_rag_query(session_id, message, data, idx_ids)
else:
# ⚡ --- Use Direct LLM for General Queries (FAST) ---
print(f"⚡ Using direct LLM for general query: '{message[:50]}...'")
response_text, source_docs = self._handle_direct_llm_query(session_id, message, session)
# Add AI response to database
ai_message_id = db.add_message(session_id, response_text, "assistant")
updated_session = db.get_session(session_id)
# Send response with proper error handling
self.send_json_response({
"response": response_text,
"session": updated_session,
"source_documents": source_docs,
"used_rag": use_rag
})
except BrokenPipeError:
# Client disconnected - this is normal for long queries, just log it
print(f"⚠️ Client disconnected during RAG processing for query: '{message[:30]}...'")
except json.JSONDecodeError:
self.send_json_response({
"error": "Invalid JSON"
}, status_code=400)
except Exception as e:
print(f"❌ Server error in session chat: {str(e)}")
try:
self.send_json_response({
"error": f"Server error: {str(e)}"
}, status_code=500)
except BrokenPipeError:
print(f"⚠️ Client disconnected during error response")
def _should_use_rag(self, message: str, idx_ids: List[str]) -> bool:
"""
🧠 ENHANCED: Determine if a query should use RAG pipeline using document overviews.
Args:
message: The user's query
idx_ids: List of index IDs associated with the session
Returns:
bool: True if should use RAG, False for direct LLM
"""
# No indexes = definitely no RAG needed
if not idx_ids:
return False
# Load document overviews for intelligent routing
try:
doc_overviews = self._load_document_overviews(idx_ids)
if doc_overviews:
return self._route_using_overviews(message, doc_overviews)
except Exception as e:
print(f"⚠️ Overview-based routing failed, falling back to simple routing: {e}")
# Fallback to simple pattern matching if overviews unavailable
return self._simple_pattern_routing(message, idx_ids)
def _load_document_overviews(self, idx_ids: List[str]) -> List[str]:
"""Load and aggregate overviews for the given index IDs.
Strategy:
1. Attempt to load each index's dedicated overview file.
2. Aggregate all overviews found across available files (deduplicated).
3. If none of the index files exist, fall back to the legacy global overview file.
"""
import os, json
aggregated: list[str] = []
# 1️⃣ Collect overviews from per-index files
for idx in idx_ids:
candidate_paths = [
f"../index_store/overviews/{idx}.jsonl",
f"index_store/overviews/{idx}.jsonl",
f"./index_store/overviews/{idx}.jsonl",
]
for p in candidate_paths:
if os.path.exists(p):
print(f"📖 Loading overviews from: {p}")
try:
with open(p, "r", encoding="utf-8") as f:
for line in f:
if not line.strip():
continue
try:
record = json.loads(line)
overview = record.get("overview", "").strip()
if overview:
aggregated.append(overview)
except json.JSONDecodeError:
continue # skip malformed lines
break # Stop after the first existing path for this idx
except Exception as e:
print(f"⚠️ Error reading {p}: {e}")
break # Don't keep trying other paths for this idx if read failed
# 2️⃣ Fall back to legacy global file if no per-index overviews found
if not aggregated:
legacy_paths = [
"../index_store/overviews/overviews.jsonl",
"index_store/overviews/overviews.jsonl",
"./index_store/overviews/overviews.jsonl",
]
for p in legacy_paths:
if os.path.exists(p):
print(f"⚠️ Falling back to legacy overviews file: {p}")
try:
with open(p, "r", encoding="utf-8") as f:
for line in f:
if not line.strip():
continue
try:
record = json.loads(line)
overview = record.get("overview", "").strip()
if overview:
aggregated.append(overview)
except json.JSONDecodeError:
continue
except Exception as e:
print(f"⚠️ Error reading legacy overviews file {p}: {e}")
break
# Limit for performance
if aggregated:
print(f"✅ Loaded {len(aggregated)} document overviews from {len(idx_ids)} index(es)")
else:
print(f"⚠️ No overviews found for indices {idx_ids}")
return aggregated[:40]
def _route_using_overviews(self, query: str, overviews: List[str]) -> bool:
"""
🎯 Use document overviews and LLM to make intelligent routing decisions.
Returns True if RAG should be used, False for direct LLM.
"""
if not overviews:
return False
# Format overviews for the routing prompt
overviews_block = "\n".join(f"[{i+1}] {ov}" for i, ov in enumerate(overviews))
router_prompt = f"""You are an AI router deciding whether a user question should be answered via:
• "USE_RAG" – search the user's private documents (described below)
• "DIRECT_LLM" – reply from general knowledge (greetings, public facts, unrelated topics)
CRITICAL PRINCIPLE: When documents exist in the KB, strongly prefer USE_RAG unless the query is purely conversational or completely unrelated to any possible document content.
RULES:
1. If ANY overview clearly relates to the question (entities, numbers, addresses, dates, amounts, companies, technical terms) → USE_RAG
2. For document operations (summarize, analyze, explain, extract, find) → USE_RAG
3. For greetings only ("Hi", "Hello", "Thanks") → DIRECT_LLM
4. For pure math/world knowledge clearly unrelated to documents → DIRECT_LLM
5. When in doubt → USE_RAG
DOCUMENT OVERVIEWS:
{overviews_block}
DECISION EXAMPLES:
• "What invoice amounts are mentioned?" → USE_RAG (document-specific)
• "Who is PromptX AI LLC?" → USE_RAG (entity in documents)
• "What is the DeepSeek model?" → USE_RAG (mentioned in documents)
• "Summarize the research paper" → USE_RAG (document operation)
• "What is 2+2?" → DIRECT_LLM (pure math)
• "Hi there" → DIRECT_LLM (greeting only)
USER QUERY: "{query}"
Respond with exactly one word: USE_RAG or DIRECT_LLM"""
try:
# Use Ollama to make the routing decision
response = self.ollama_client.chat(
message=router_prompt,
model="qwen3:0.6b", # Fast model for routing
enable_thinking=False # Fast routing
)
# The response is directly the text, not a dict
decision = response.strip().upper()
# Parse decision
if "USE_RAG" in decision:
print(f"🎯 Overview-based routing: USE_RAG for query: '{query[:50]}...'")
return True
elif "DIRECT_LLM" in decision:
print(f"⚡ Overview-based routing: DIRECT_LLM for query: '{query[:50]}...'")
return False
else:
print(f"⚠️ Unclear routing decision '{decision}', defaulting to RAG")
return True # Default to RAG when uncertain
except Exception as e:
print(f"❌ LLM routing failed: {e}, falling back to pattern matching")
return self._simple_pattern_routing(query, [])
def _simple_pattern_routing(self, message: str, idx_ids: List[str]) -> bool:
"""
📝 FALLBACK: Simple pattern-based routing (original logic).
"""
message_lower = message.lower()
# Always use Direct LLM for greetings and casual conversation
greeting_patterns = [
'hello', 'hi', 'hey', 'greetings', 'good morning', 'good afternoon', 'good evening',
'how are you', 'how do you do', 'nice to meet', 'pleasure to meet',
'thanks', 'thank you', 'bye', 'goodbye', 'see you', 'talk to you later',
'test', 'testing', 'check', 'ping', 'just saying', 'nevermind',
'ok', 'okay', 'alright', 'got it', 'understood', 'i see'
]
# Check for greeting patterns
for pattern in greeting_patterns:
if pattern in message_lower:
return False # Use Direct LLM for greetings
# Keywords that strongly suggest document-related queries
rag_indicators = [
'document', 'doc', 'file', 'pdf', 'text', 'content', 'page',
'according to', 'based on', 'mentioned', 'states', 'says',
'what does', 'summarize', 'summary', 'analyze', 'analysis',
'quote', 'citation', 'reference', 'source', 'evidence',
'explain from', 'extract', 'find in', 'search for'
]
# Check for strong RAG indicators
for indicator in rag_indicators:
if indicator in message_lower:
return True
# Question words + substantial length might benefit from RAG
question_words = ['what', 'how', 'when', 'where', 'why', 'who', 'which']
starts_with_question = any(message_lower.startswith(word) for word in question_words)
if starts_with_question and len(message) > 40:
return True
# Very short messages - use direct LLM
if len(message.strip()) < 20:
return False
# Default to Direct LLM unless there's clear indication of document query
return False
def _handle_direct_llm_query(self, session_id: str, message: str, session: dict):
"""
Handle query using direct Ollama client with thinking disabled for speed.
Returns:
tuple: (response_text, empty_source_docs)
"""
try:
# Get conversation history for context
conversation_history = db.get_conversation_history(session_id)
# Use the session's model or default
model = session.get('model', 'qwen3:8b') # Default to fast model
# Direct Ollama call with thinking disabled for speed
response_text = self.ollama_client.chat(
message=message,
model=model,
conversation_history=conversation_history,
enable_thinking=False # ⚡ DISABLE THINKING FOR SPEED
)
return response_text, [] # No source docs for direct LLM
except Exception as e:
print(f"❌ Direct LLM error: {e}")
return f"Error processing query: {str(e)}", []
def _handle_rag_query(self, session_id: str, message: str, data: dict, idx_ids: List[str]):
"""
Handle query using the full RAG pipeline (delegates to the advanced RAG API running on port 8001).
Returns:
tuple[str, List[dict]]: (response_text, source_documents)
"""
# Defaults
response_text = ""
source_docs: List[dict] = []
# Build payload for RAG API
rag_api_url = "http://localhost:8001/chat"
table_name = f"text_pages_{idx_ids[-1]}" if idx_ids else None
payload: Dict[str, Any] = {
"query": message,
"session_id": session_id,
}
if table_name:
payload["table_name"] = table_name
# Copy optional parameters from the incoming request
optional_params: Dict[str, tuple[type, str]] = {
"compose_sub_answers": (bool, "compose_sub_answers"),
"query_decompose": (bool, "query_decompose"),
"ai_rerank": (bool, "ai_rerank"),
"context_expand": (bool, "context_expand"),
"verify": (bool, "verify"),
"retrieval_k": (int, "retrieval_k"),
"context_window_size": (int, "context_window_size"),
"reranker_top_k": (int, "reranker_top_k"),
"search_type": (str, "search_type"),
"dense_weight": (float, "dense_weight"),
"provence_prune": (bool, "provence_prune"),
"provence_threshold": (float, "provence_threshold"),
}
for key, (caster, payload_key) in optional_params.items():
val = data.get(key)
if val is not None:
try:
payload[payload_key] = caster(val) # type: ignore[arg-type]
except Exception:
payload[payload_key] = val
try:
rag_response = requests.post(rag_api_url, json=payload)
if rag_response.status_code == 200:
rag_data = rag_response.json()
response_text = rag_data.get("answer", "No answer found.")
source_docs = rag_data.get("source_documents", [])
else:
response_text = f"Error from RAG API ({rag_response.status_code}): {rag_response.text}"
print(f"❌ RAG API error: {response_text}")
except requests.exceptions.ConnectionError:
response_text = "Could not connect to the RAG API server. Please ensure it is running."
print("❌ Connection to RAG API failed (port 8001).")
except Exception as e:
response_text = f"Error processing RAG query: {str(e)}"
print(f"❌ RAG processing error: {e}")
# Strip any <think>/<thinking> tags that might slip through
response_text = re.sub(r'<(think|thinking)>.*?</\\1>', '', response_text, flags=re.DOTALL | re.IGNORECASE).strip()
return response_text, source_docs
def handle_delete_session(self, session_id: str):
"""Delete a session and its messages"""
try:
deleted = db.delete_session(session_id)
if deleted:
self.send_json_response({'deleted': deleted})
else:
self.send_json_response({'error': 'Session not found'}, status_code=404)
except Exception as e:
self.send_json_response({'error': str(e)}, status_code=500)
def handle_file_upload(self, session_id: str):
"""Handle file uploads, save them, and associate with the session."""
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type']}
)
uploaded_files = []
if 'files' in form:
files = form['files']
if not isinstance(files, list):
files = [files]
upload_dir = "shared_uploads"
os.makedirs(upload_dir, exist_ok=True)
for file_item in files:
if file_item.filename:
# Create a unique filename to avoid overwrites
unique_filename = f"{uuid.uuid4()}_{file_item.filename}"
file_path = os.path.join(upload_dir, unique_filename)
with open(file_path, 'wb') as f:
f.write(file_item.file.read())
# Store the absolute path for the indexing service
absolute_file_path = os.path.abspath(file_path)
db.add_document_to_session(session_id, absolute_file_path)
uploaded_files.append({"filename": file_item.filename, "stored_path": absolute_file_path})
if not uploaded_files:
self.send_json_response({"error": "No files were uploaded"}, status_code=400)
return
self.send_json_response({
"message": f"Successfully uploaded {len(uploaded_files)} files.",
"uploaded_files": uploaded_files
})
def handle_index_documents(self, session_id: str):
"""Triggers indexing for all documents in a session."""
print(f"🔥 Received request to index documents for session {session_id[:8]}...")
try:
file_paths = db.get_documents_for_session(session_id)
if not file_paths:
self.send_json_response({"message": "No documents to index for this session."}, status_code=200)
return
print(f"Found {len(file_paths)} documents to index. Sending to RAG API...")
rag_api_url = "http://localhost:8001/index"
rag_response = requests.post(rag_api_url, json={"file_paths": file_paths, "session_id": session_id})
if rag_response.status_code == 200:
print("✅ RAG API successfully indexed documents.")
# Merge key config values into index metadata
idx_meta = {
"session_linked": True,
"retrieval_mode": "hybrid",
}
try:
db.update_index_metadata(session_id, idx_meta) # session_id used as index_id in text table naming
except Exception as e:
print(f"⚠️ Failed to update index metadata for session index: {e}")
self.send_json_response(rag_response.json())
else:
error_info = rag_response.text
print(f"❌ RAG API indexing failed ({rag_response.status_code}): {error_info}")
self.send_json_response({"error": f"Indexing failed: {error_info}"}, status_code=500)
except Exception as e:
print(f"❌ Exception during indexing: {str(e)}")
self.send_json_response({"error": f"An unexpected error occurred: {str(e)}"}, status_code=500)
def handle_pdf_upload(self, session_id: str):
"""
Processes PDF files: extracts text and stores it in the database.
DEPRECATED: This is the old method. Use handle_file_upload instead.
"""
# This function is now deprecated in favor of the new indexing workflow
# but is kept for potential legacy/compatibility reasons.
# For new functionality, it should not be used.
self.send_json_response({
"warning": "This upload method is deprecated. Use the new file upload and indexing flow.",
"message": "No action taken."
}, status_code=410) # 410 Gone
def handle_get_models(self):
"""Get available models from both Ollama and HuggingFace, grouped by capability"""
try:
generation_models = []
embedding_models = []
# Get Ollama models if available
if self.ollama_client.is_ollama_running():
all_ollama_models = self.ollama_client.list_models()
# Very naive classification - same logic as RAG API server
ollama_embedding_models = [m for m in all_ollama_models if any(k in m for k in ['embed','bge','embedding','text'])]
ollama_generation_models = [m for m in all_ollama_models if m not in ollama_embedding_models]
generation_models.extend(ollama_generation_models)
embedding_models.extend(ollama_embedding_models)
# Add supported HuggingFace embedding models
huggingface_embedding_models = [
"Qwen/Qwen3-Embedding-0.6B",
"Qwen/Qwen3-Embedding-4B",
"Qwen/Qwen3-Embedding-8B"
]
embedding_models.extend(huggingface_embedding_models)
# Sort models for consistent ordering
generation_models.sort()
embedding_models.sort()
self.send_json_response({
"generation_models": generation_models,
"embedding_models": embedding_models
})
except Exception as e:
self.send_json_response({
"error": f"Could not list models: {str(e)}"
}, status_code=500)
def handle_get_indexes(self):
try:
data = db.list_indexes()
self.send_json_response({'indexes': data, 'total': len(data)})
except Exception as e:
self.send_json_response({'error': str(e)}, status_code=500)
def handle_get_index(self, index_id: str):
try:
data = db.get_index(index_id)
if not data:
self.send_json_response({'error': 'Index not found'}, status_code=404)
return
self.send_json_response(data)
except Exception as e:
self.send_json_response({'error': str(e)}, status_code=500)
def handle_create_index(self):
try:
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
data = json.loads(post_data.decode('utf-8'))
name = data.get('name')
description = data.get('description')
metadata = data.get('metadata', {})
if not name:
self.send_json_response({'error': 'Name required'}, status_code=400)
return
# Add complete metadata from RAG system configuration if available
if RAG_SYSTEM_AVAILABLE and PIPELINE_CONFIGS.get('default'):
default_config = PIPELINE_CONFIGS['default']
complete_metadata = {
'status': 'created',
'metadata_source': 'rag_system_config',
'created_at': json.loads(json.dumps(datetime.now().isoformat())),
'chunk_size': 512, # From default config
'chunk_overlap': 64, # From default config
'retrieval_mode': 'hybrid', # From default config
'window_size': 5, # From default config
'embedding_model': 'Qwen/Qwen3-Embedding-0.6B', # From default config
'enrich_model': 'qwen3:0.6b', # From default config
'overview_model': 'qwen3:0.6b', # From default config
'enable_enrich': True, # From default config
'latechunk': True, # From default config
'docling_chunk': True, # From default config
'note': 'Default configuration from RAG system'
}
# Merge with any provided metadata
complete_metadata.update(metadata)
metadata = complete_metadata
idx_id = db.create_index(name, description, metadata)
self.send_json_response({'index_id': idx_id}, status_code=201)
except Exception as e:
self.send_json_response({'error': str(e)}, status_code=500)
def handle_index_file_upload(self, index_id: str):
"""Reuse file upload logic but store docs under index."""
form = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={'REQUEST_METHOD':'POST', 'CONTENT_TYPE': self.headers['Content-Type']})
uploaded_files=[]
if 'files' in form:
files=form['files']
if not isinstance(files, list):
files=[files]
upload_dir='shared_uploads'
os.makedirs(upload_dir, exist_ok=True)
for f in files:
if f.filename:
unique=f"{uuid.uuid4()}_{f.filename}"
path=os.path.join(upload_dir, unique)
with open(path,'wb') as out: out.write(f.file.read())
db.add_document_to_index(index_id, f.filename, os.path.abspath(path))
uploaded_files.append({'filename':f.filename,'stored_path':os.path.abspath(path)})
if not uploaded_files:
self.send_json_response({'error':'No files uploaded'}, status_code=400); return
self.send_json_response({'message':f"Uploaded {len(uploaded_files)} files","uploaded_files":uploaded_files})
def handle_build_index(self, index_id: str):
try:
index=db.get_index(index_id)
if not index:
self.send_json_response({'error':'Index not found'}, status_code=404); return
file_paths=[d['stored_path'] for d in index.get('documents',[])]
if not file_paths:
self.send_json_response({'error':'No documents to index'}, status_code=400); return
# Parse request body for optional flags and configuration
latechunk = False
docling_chunk = False
chunk_size = 512
chunk_overlap = 64
retrieval_mode = 'hybrid'
window_size = 2
enable_enrich = True
embedding_model = None
enrich_model = None
batch_size_embed = 50
batch_size_enrich = 25
overview_model = None
if 'Content-Length' in self.headers and int(self.headers['Content-Length']) > 0:
try:
length = int(self.headers['Content-Length'])
body = self.rfile.read(length)
opts = json.loads(body.decode('utf-8'))
latechunk = bool(opts.get('latechunk', False))
docling_chunk = bool(opts.get('doclingChunk', False))
chunk_size = int(opts.get('chunkSize', 512))
chunk_overlap = int(opts.get('chunkOverlap', 64))
retrieval_mode = str(opts.get('retrievalMode', 'hybrid'))
window_size = int(opts.get('windowSize', 2))
enable_enrich = bool(opts.get('enableEnrich', True))
embedding_model = opts.get('embeddingModel')
enrich_model = opts.get('enrichModel')
batch_size_embed = int(opts.get('batchSizeEmbed', 50))
batch_size_enrich = int(opts.get('batchSizeEnrich', 25))
overview_model = opts.get('overviewModel')
except Exception:
# Keep defaults on parse error
pass
# Set per-index overview file path
overview_path = f"index_store/overviews/{index_id}.jsonl"
# Ensure config_override includes overview_path
def ensure_overview_path(cfg: dict):
cfg["overview_path"] = overview_path
# we'll inject later when we build config_override
# Delegate to advanced RAG API same as session indexing
rag_api_url = "http://localhost:8001/index"
import requests, json as _json
# Use the index's dedicated LanceDB table so retrieval matches
table_name = index.get("vector_table_name")
payload = {
"file_paths": file_paths,
"session_id": index_id, # reuse index_id for progress tracking
"table_name": table_name,
"chunk_size": chunk_size,
"chunk_overlap": chunk_overlap,
"retrieval_mode": retrieval_mode,
"window_size": window_size,
"enable_enrich": enable_enrich,
"batch_size_embed": batch_size_embed,
"batch_size_enrich": batch_size_enrich
}
if latechunk:
payload["enable_latechunk"] = True
if docling_chunk:
payload["enable_docling_chunk"] = True
if embedding_model:
payload["embedding_model"] = embedding_model
if enrich_model:
payload["enrich_model"] = enrich_model
if overview_model:
payload["overview_model_name"] = overview_model
rag_resp = requests.post(rag_api_url, json=payload)
if rag_resp.status_code==200:
meta_updates = {
"chunk_size": chunk_size,
"chunk_overlap": chunk_overlap,
"retrieval_mode": retrieval_mode,
"window_size": window_size,
"enable_enrich": enable_enrich,
"latechunk": latechunk,
"docling_chunk": docling_chunk,
}
if embedding_model:
meta_updates["embedding_model"] = embedding_model
if enrich_model:
meta_updates["enrich_model"] = enrich_model
if overview_model:
meta_updates["overview_model"] = overview_model
try:
db.update_index_metadata(index_id, meta_updates)
except Exception as e:
print(f"⚠️ Failed to update index metadata: {e}")
self.send_json_response({
"response": rag_resp.json(),
**meta_updates
})
else:
# Gracefully handle scenario where table already exists (idempotent build)
try:
err_json = rag_resp.json()
except Exception:
err_json = {}
err_text = err_json.get('error') if isinstance(err_json, dict) else rag_resp.text
if err_text and 'already exists' in err_text:
# Treat as non-fatal; return message indicating index previously built
self.send_json_response({
"message": "Index already built – skipping rebuild.",
"note": err_text
})
else:
self.send_json_response({"error":f"RAG indexing failed: {rag_resp.text}"}, status_code=500)
except Exception as e:
self.send_json_response({'error':str(e)}, status_code=500)
def handle_link_index_to_session(self, session_id: str, index_id: str):
try:
db.link_index_to_session(session_id, index_id)
self.send_json_response({'message':'Index linked to session'})
except Exception as e:
self.send_json_response({'error':str(e)}, status_code=500)
def handle_get_session_indexes(self, session_id: str):
try:
idx_ids = db.get_indexes_for_session(session_id)
indexes = []
for idx_id in idx_ids:
idx = db.get_index(idx_id)
if idx:
# Try to populate metadata for older indexes that have empty metadata
if not idx.get('metadata') or len(idx['metadata']) == 0:
print(f"🔍 Attempting to infer metadata for index {idx_id[:8]}...")
inferred_metadata = db.inspect_and_populate_index_metadata(idx_id)
if inferred_metadata:
# Refresh the index data with the new metadata
idx = db.get_index(idx_id)
indexes.append(idx)
self.send_json_response({'indexes': indexes, 'total': len(indexes)})
except Exception as e:
self.send_json_response({'error': str(e)}, status_code=500)
def handle_delete_index(self, index_id: str):
"""Remove an index, its documents, links, and the underlying LanceDB table."""
try:
deleted = db.delete_index(index_id)
if deleted:
self.send_json_response({'message': 'Index deleted successfully', 'index_id': index_id})
else:
self.send_json_response({'error': 'Index not found'}, status_code=404)
except Exception as e:
self.send_json_response({'error': str(e)}, status_code=500)
def handle_rename_session(self, session_id: str):
"""Rename an existing session title"""
try:
session = db.get_session(session_id)
if not session:
self.send_json_response({"error": "Session not found"}, status_code=404)
return
content_length = int(self.headers.get('Content-Length', 0))
if content_length == 0:
self.send_json_response({"error": "Request body required"}, status_code=400)
return
post_data = self.rfile.read(content_length)
data = json.loads(post_data.decode('utf-8'))
new_title: str = data.get('title', '').strip()
if not new_title:
self.send_json_response({"error": "Title cannot be empty"}, status_code=400)
return
db.update_session_title(session_id, new_title)
updated_session = db.get_session(session_id)
self.send_json_response({
"message": "Session renamed successfully",
"session": updated_session
})
except json.JSONDecodeError:
self.send_json_response({"error": "Invalid JSON"}, status_code=400)
except Exception as e:
self.send_json_response({"error": f"Failed to rename session: {str(e)}"}, status_code=500)
def send_json_response(self, data, status_code: int = 200):
"""Send a JSON (UTF-8) response with CORS headers. Safe against client disconnects."""
try:
self.send_response(status_code)
self.send_header('Content-Type', 'application/json')
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, OPTIONS')
self.send_header('Access-Control-Allow-Headers', 'Content-Type, Authorization')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.end_headers()
response_bytes = json.dumps(data, indent=2).encode('utf-8')
self.wfile.write(response_bytes)
except BrokenPipeError:
# Client disconnected before we could finish sending
print("⚠️ Client disconnected during response – ignoring.")
except Exception as e:
print(f"❌ Error sending response: {e}")
def log_message(self, format, *args):
"""Custom log format"""
print(f"[{self.date_time_string()}] {format % args}")
def main():
"""Main function to initialize and start the server"""
PORT = 8000 # 🆕 Define port
try:
# Initialize the database
print("✅ Database initialized successfully")
# Initialize the PDF processor
try:
pdf_module.initialize_simple_pdf_processor()
print("📄 Initializing simple PDF processing...")
if pdf_module.simple_pdf_processor:
print("✅ Simple PDF processor initialized")
else:
print("⚠️ PDF processing could not be initialized.")
except Exception as e:
print(f"❌ Error initializing PDF processor: {e}")
print("⚠️ PDF processing disabled - server will run without RAG functionality")
# Set a global reference to the initialized processor if needed elsewhere
global pdf_processor
pdf_processor = pdf_module.simple_pdf_processor
if pdf_processor:
print("✅ Global PDF processor initialized")
else:
print("⚠️ PDF processing disabled - server will run without RAG functionality")
# Cleanup empty sessions on startup
print("🧹 Cleaning up empty sessions...")
cleanup_count = db.cleanup_empty_sessions()
if cleanup_count > 0:
print(f"✨ Cleaned up {cleanup_count} empty sessions")
else:
print("✨ No empty sessions to clean up")
# Start the server
with ReusableTCPServer(("", PORT), ChatHandler) as httpd:
print(f"🚀 Starting localGPT backend server on port {PORT}")
print(f"📍 Chat endpoint: http://localhost:{PORT}/chat")
print(f"🔍 Health check: http://localhost:{PORT}/health")
# Test Ollama connection
client = OllamaClient()
if client.is_ollama_running():
models = client.list_models()
print(f"✅ Ollama is running with {len(models)} models")
print(f"📋 Available models: {', '.join(models[:3])}{'...' if len(models) > 3 else ''}")
else:
print("⚠️ Ollama is not running. Please start Ollama:")
print(" Install: https://ollama.ai")
print(" Run: ollama serve")
print(f"\n🌐 Frontend should connect to: http://localhost:{PORT}")
print("💬 Ready to chat!\n")
httpd.serve_forever()
except KeyboardInterrupt:
print("\n🛑 Server stopped")
if __name__ == "__main__":
main() | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "backend/server.py",
"license": "MIT License",
"lines": 991,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:backend/simple_pdf_processor.py | """
Simple PDF Processing Service
Handles PDF upload and text extraction for RAG functionality
"""
import uuid
from typing import List, Dict, Any
import PyPDF2
from io import BytesIO
import sqlite3
from datetime import datetime
class SimplePDFProcessor:
def __init__(self, db_path: str = "chat_data.db"):
"""Initialize simple PDF processor with SQLite storage"""
self.db_path = db_path
self.init_database()
print("✅ Simple PDF processor initialized")
def init_database(self):
"""Initialize SQLite database for storing PDF content"""
conn = sqlite3.connect(self.db_path)
conn.execute('''
CREATE TABLE IF NOT EXISTS pdf_documents (
id TEXT PRIMARY KEY,
session_id TEXT NOT NULL,
filename TEXT NOT NULL,
content TEXT NOT NULL,
created_at TEXT NOT NULL
)
''')
conn.commit()
conn.close()
def extract_text_from_pdf(self, pdf_bytes: bytes) -> str:
"""Extract text from PDF bytes"""
try:
print(f"📄 Starting PDF text extraction ({len(pdf_bytes)} bytes)")
pdf_file = BytesIO(pdf_bytes)
pdf_reader = PyPDF2.PdfReader(pdf_file)
print(f"📖 PDF has {len(pdf_reader.pages)} pages")
text = ""
for page_num, page in enumerate(pdf_reader.pages):
print(f"📄 Processing page {page_num + 1}")
try:
page_text = page.extract_text()
if page_text.strip():
text += f"\n--- Page {page_num + 1} ---\n"
text += page_text + "\n"
print(f"✅ Page {page_num + 1}: extracted {len(page_text)} characters")
except Exception as page_error:
print(f"❌ Error on page {page_num + 1}: {str(page_error)}")
continue
print(f"📄 Total extracted text: {len(text)} characters")
return text.strip()
except Exception as e:
print(f"❌ Error extracting text from PDF: {str(e)}")
print(f"❌ Error type: {type(e).__name__}")
return ""
def process_pdf(self, pdf_bytes: bytes, filename: str, session_id: str) -> Dict[str, Any]:
"""Process a PDF file and store in database"""
print(f"📄 Processing PDF: {filename}")
# Extract text
text = self.extract_text_from_pdf(pdf_bytes)
if not text:
return {
"success": False,
"error": "Could not extract text from PDF",
"filename": filename
}
print(f"📝 Extracted {len(text)} characters from {filename}")
# Store in database
document_id = str(uuid.uuid4())
now = datetime.now().isoformat()
try:
conn = sqlite3.connect(self.db_path)
# Store document
conn.execute('''
INSERT INTO pdf_documents (id, session_id, filename, content, created_at)
VALUES (?, ?, ?, ?, ?)
''', (document_id, session_id, filename, text, now))
conn.commit()
conn.close()
print(f"💾 Stored document {filename} in database")
return {
"success": True,
"filename": filename,
"file_id": document_id,
"text_length": len(text)
}
except Exception as e:
print(f"❌ Error storing in database: {str(e)}")
return {
"success": False,
"error": f"Database storage failed: {str(e)}",
"filename": filename
}
def get_session_documents(self, session_id: str) -> List[Dict[str, Any]]:
"""Get all documents for a session"""
try:
conn = sqlite3.connect(self.db_path)
conn.row_factory = sqlite3.Row
cursor = conn.execute('''
SELECT id, filename, created_at
FROM pdf_documents
WHERE session_id = ?
ORDER BY created_at DESC
''', (session_id,))
documents = [dict(row) for row in cursor.fetchall()]
conn.close()
return documents
except Exception as e:
print(f"❌ Error getting session documents: {str(e)}")
return []
def get_document_content(self, session_id: str) -> str:
"""Get all document content for a session (for LLM context)"""
try:
conn = sqlite3.connect(self.db_path)
cursor = conn.execute('''
SELECT filename, content
FROM pdf_documents
WHERE session_id = ?
ORDER BY created_at ASC
''', (session_id,))
rows = cursor.fetchall()
conn.close()
if not rows:
return ""
# Combine all document content
combined_content = ""
for filename, content in rows:
combined_content += f"\n\n=== Document: {filename} ===\n\n"
combined_content += content
return combined_content.strip()
except Exception as e:
print(f"❌ Error getting document content: {str(e)}")
return ""
def delete_session_documents(self, session_id: str) -> bool:
"""Delete all documents for a session"""
try:
conn = sqlite3.connect(self.db_path)
cursor = conn.execute('''
DELETE FROM pdf_documents
WHERE session_id = ?
''', (session_id,))
deleted_count = cursor.rowcount
conn.commit()
conn.close()
if deleted_count > 0:
print(f"🗑️ Deleted {deleted_count} documents for session {session_id[:8]}...")
return deleted_count > 0
except Exception as e:
print(f"❌ Error deleting session documents: {str(e)}")
return False
# Global instance
simple_pdf_processor = None
def initialize_simple_pdf_processor():
"""Initialize the global PDF processor"""
global simple_pdf_processor
try:
simple_pdf_processor = SimplePDFProcessor()
print("✅ Global PDF processor initialized")
except Exception as e:
print(f"❌ Failed to initialize PDF processor: {str(e)}")
simple_pdf_processor = None
def get_simple_pdf_processor():
"""Get the global PDF processor instance"""
global simple_pdf_processor
if simple_pdf_processor is None:
initialize_simple_pdf_processor()
return simple_pdf_processor
if __name__ == "__main__":
# Test the simple PDF processor
print("🧪 Testing simple PDF processor...")
processor = SimplePDFProcessor()
print("✅ Simple PDF processor test completed!") | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "backend/simple_pdf_processor.py",
"license": "MIT License",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:backend/test_backend.py | #!/usr/bin/env python3
"""
Simple test script for the localGPT backend
"""
import requests
def test_health_endpoint():
"""Test the health endpoint"""
print("🔍 Testing health endpoint...")
try:
response = requests.get("http://localhost:8000/health", timeout=5)
if response.status_code == 200:
data = response.json()
print(f"✅ Health check passed")
print(f" Ollama running: {data['ollama_running']}")
print(f" Models available: {len(data['available_models'])}")
return True
else:
print(f"❌ Health check failed: {response.status_code}")
return False
except requests.exceptions.RequestException as e:
print(f"❌ Health check failed: {e}")
return False
def test_chat_endpoint():
"""Test the chat endpoint"""
print("\n💬 Testing chat endpoint...")
test_message = {
"message": "Say 'Hello World' and nothing else.",
"model": "llama3.2:latest"
}
try:
response = requests.post(
"http://localhost:8000/chat",
headers={"Content-Type": "application/json"},
json=test_message,
timeout=30
)
if response.status_code == 200:
data = response.json()
print(f"✅ Chat test passed")
print(f" Model: {data['model']}")
print(f" Response: {data['response']}")
print(f" Message count: {data['message_count']}")
return True
else:
print(f"❌ Chat test failed: {response.status_code}")
print(f" Response: {response.text}")
return False
except requests.exceptions.RequestException as e:
print(f"❌ Chat test failed: {e}")
return False
def test_conversation_history():
"""Test conversation with history"""
print("\n🗨️ Testing conversation history...")
# First message
conversation = []
message1 = {
"message": "My name is Alice. Remember this.",
"model": "llama3.2:latest",
"conversation_history": conversation
}
try:
response1 = requests.post(
"http://localhost:8000/chat",
headers={"Content-Type": "application/json"},
json=message1,
timeout=30
)
if response1.status_code == 200:
data1 = response1.json()
# Add to conversation history
conversation.append({"role": "user", "content": "My name is Alice. Remember this."})
conversation.append({"role": "assistant", "content": data1["response"]})
# Second message asking about the name
message2 = {
"message": "What is my name?",
"model": "llama3.2:latest",
"conversation_history": conversation
}
response2 = requests.post(
"http://localhost:8000/chat",
headers={"Content-Type": "application/json"},
json=message2,
timeout=30
)
if response2.status_code == 200:
data2 = response2.json()
print(f"✅ Conversation history test passed")
print(f" First response: {data1['response']}")
print(f" Second response: {data2['response']}")
# Check if the AI remembered the name
if "alice" in data2['response'].lower():
print(f"✅ AI correctly remembered the name!")
else:
print(f"⚠️ AI might not have remembered the name")
return True
else:
print(f"❌ Second message failed: {response2.status_code}")
return False
else:
print(f"❌ First message failed: {response1.status_code}")
return False
except requests.exceptions.RequestException as e:
print(f"❌ Conversation test failed: {e}")
return False
def main():
print("🧪 Testing localGPT Backend")
print("=" * 40)
# Test health endpoint
health_ok = test_health_endpoint()
if not health_ok:
print("\n❌ Backend server is not running or not healthy")
print(" Make sure to run: python server.py")
return
# Test basic chat
chat_ok = test_chat_endpoint()
if not chat_ok:
print("\n❌ Chat functionality is not working")
return
# Test conversation history
conversation_ok = test_conversation_history()
print("\n" + "=" * 40)
if health_ok and chat_ok and conversation_ok:
print("🎉 All tests passed! Backend is ready for frontend integration.")
else:
print("⚠️ Some tests failed. Check the issues above.")
print("\n🔗 Ready to connect to frontend at http://localhost:3000")
if __name__ == "__main__":
main() | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "backend/test_backend.py",
"license": "MIT License",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PromtEngineer/localGPT:backend/test_ollama_connectivity.py | #!/usr/bin/env python3
import os
import sys
def test_ollama_connectivity():
"""Test Ollama connectivity from within Docker container"""
print("🧪 Testing Ollama Connectivity")
print("=" * 40)
ollama_host = os.getenv('OLLAMA_HOST', 'Not set')
print(f"OLLAMA_HOST environment variable: {ollama_host}")
try:
from ollama_client import OllamaClient
client = OllamaClient()
print(f"OllamaClient base_url: {client.base_url}")
is_running = client.is_ollama_running()
print(f"Ollama running: {is_running}")
if is_running:
models = client.list_models()
print(f"Available models: {models}")
print("✅ Ollama connectivity test passed!")
return True
else:
print("❌ Ollama connectivity test failed!")
return False
except Exception as e:
print(f"❌ Error testing Ollama connectivity: {e}")
return False
if __name__ == "__main__":
success = test_ollama_connectivity()
sys.exit(0 if success else 1)
| {
"repo_id": "PromtEngineer/localGPT",
"file_path": "backend/test_ollama_connectivity.py",
"license": "MIT License",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PromtEngineer/localGPT:create_index_script.py | #!/usr/bin/env python3
"""
Interactive Index Creation Script for LocalGPT RAG System
This script provides a user-friendly interface for creating document indexes
using the LocalGPT RAG system. It supports both single documents and batch
processing of multiple documents.
Usage:
python create_index_script.py
python create_index_script.py --batch
python create_index_script.py --config custom_config.json
"""
import os
import sys
import json
import argparse
from typing import List, Optional
from pathlib import Path
# Add the project root to the path so we can import rag_system modules
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
try:
from rag_system.main import PIPELINE_CONFIGS, get_agent
from rag_system.pipelines.indexing_pipeline import IndexingPipeline
from rag_system.utils.ollama_client import OllamaClient
from backend.database import ChatDatabase
except ImportError as e:
print(f"❌ Error importing required modules: {e}")
print("Please ensure you're running this script from the project root directory.")
sys.exit(1)
class IndexCreator:
"""Interactive index creation utility."""
def __init__(self, config_path: Optional[str] = None):
"""Initialize the index creator with optional custom configuration."""
self.db = ChatDatabase()
self.config = self._load_config(config_path)
# Initialize Ollama client
self.ollama_client = OllamaClient()
self.ollama_config = {
"generation_model": "qwen3:0.6b",
"embedding_model": "qwen3:0.6b"
}
# Initialize indexing pipeline
self.pipeline = IndexingPipeline(
self.config,
self.ollama_client,
self.ollama_config
)
def _load_config(self, config_path: Optional[str] = None) -> dict:
"""Load configuration from file or use default."""
if config_path and os.path.exists(config_path):
try:
with open(config_path, 'r') as f:
return json.load(f)
except Exception as e:
print(f"⚠️ Error loading config from {config_path}: {e}")
print("Using default configuration...")
return PIPELINE_CONFIGS.get("default", {})
def get_user_input(self, prompt: str, default: str = "") -> str:
"""Get user input with optional default value."""
if default:
user_input = input(f"{prompt} [{default}]: ").strip()
return user_input if user_input else default
return input(f"{prompt}: ").strip()
def select_documents(self) -> List[str]:
"""Interactive document selection."""
print("\n📁 Document Selection")
print("=" * 50)
documents = []
while True:
print("\nOptions:")
print("1. Add a single document")
print("2. Add all documents from a directory")
print("3. Finish and proceed with selected documents")
print("4. Show selected documents")
choice = self.get_user_input("Select an option (1-4)", "1")
if choice == "1":
doc_path = self.get_user_input("Enter document path")
if os.path.exists(doc_path):
documents.append(os.path.abspath(doc_path))
print(f"✅ Added: {doc_path}")
else:
print(f"❌ File not found: {doc_path}")
elif choice == "2":
dir_path = self.get_user_input("Enter directory path")
if os.path.isdir(dir_path):
supported_extensions = ['.pdf', '.txt', '.docx', '.md', '.html', '.htm']
found_docs = []
for ext in supported_extensions:
found_docs.extend(Path(dir_path).glob(f"*{ext}"))
found_docs.extend(Path(dir_path).glob(f"**/*{ext}"))
if found_docs:
print(f"Found {len(found_docs)} documents:")
for doc in found_docs:
print(f" - {doc}")
if self.get_user_input("Add all these documents? (y/n)", "y").lower() == 'y':
documents.extend([str(doc.absolute()) for doc in found_docs])
print(f"✅ Added {len(found_docs)} documents")
else:
print("❌ No supported documents found in directory")
else:
print(f"❌ Directory not found: {dir_path}")
elif choice == "3":
if documents:
break
else:
print("❌ No documents selected. Please add at least one document.")
elif choice == "4":
if documents:
print(f"\n📄 Selected documents ({len(documents)}):")
for i, doc in enumerate(documents, 1):
print(f" {i}. {doc}")
else:
print("No documents selected yet.")
else:
print("Invalid choice. Please select 1-4.")
return documents
def configure_processing(self) -> dict:
"""Interactive processing configuration."""
print("\n⚙️ Processing Configuration")
print("=" * 50)
print("Configure how documents will be processed:")
# Basic settings
chunk_size = int(self.get_user_input("Chunk size", "512"))
chunk_overlap = int(self.get_user_input("Chunk overlap", "64"))
# Advanced settings
print("\nAdvanced options:")
enable_enrich = self.get_user_input("Enable contextual enrichment? (y/n)", "y").lower() == 'y'
enable_latechunk = self.get_user_input("Enable late chunking? (y/n)", "y").lower() == 'y'
enable_docling = self.get_user_input("Enable Docling chunking? (y/n)", "y").lower() == 'y'
# Model selection
print("\nModel Configuration:")
embedding_model = self.get_user_input("Embedding model", "Qwen/Qwen3-Embedding-0.6B")
generation_model = self.get_user_input("Generation model", "qwen3:0.6b")
return {
"chunk_size": chunk_size,
"chunk_overlap": chunk_overlap,
"enable_enrich": enable_enrich,
"enable_latechunk": enable_latechunk,
"enable_docling": enable_docling,
"embedding_model": embedding_model,
"generation_model": generation_model,
"retrieval_mode": "hybrid",
"window_size": 2
}
def create_index_interactive(self) -> None:
"""Run the interactive index creation process."""
print("🚀 LocalGPT Index Creation Tool")
print("=" * 50)
# Get index details
index_name = self.get_user_input("Enter index name")
index_description = self.get_user_input("Enter index description (optional)")
# Select documents
documents = self.select_documents()
# Configure processing
processing_config = self.configure_processing()
# Confirm creation
print("\n📋 Index Summary")
print("=" * 50)
print(f"Name: {index_name}")
print(f"Description: {index_description or 'None'}")
print(f"Documents: {len(documents)}")
print(f"Chunk size: {processing_config['chunk_size']}")
print(f"Enrichment: {'Enabled' if processing_config['enable_enrich'] else 'Disabled'}")
print(f"Embedding model: {processing_config['embedding_model']}")
if self.get_user_input("\nProceed with index creation? (y/n)", "y").lower() != 'y':
print("❌ Index creation cancelled.")
return
# Create the index
try:
print("\n🔥 Creating index...")
# Create index record in database
index_id = self.db.create_index(
name=index_name,
description=index_description,
metadata=processing_config
)
# Add documents to index
for doc_path in documents:
filename = os.path.basename(doc_path)
self.db.add_document_to_index(index_id, filename, doc_path)
# Process documents through pipeline
print("📚 Processing documents...")
self.pipeline.process_documents(documents)
print(f"\n✅ Index '{index_name}' created successfully!")
print(f"Index ID: {index_id}")
print(f"Processed {len(documents)} documents")
# Test the index
if self.get_user_input("\nTest the index with a sample query? (y/n)", "y").lower() == 'y':
self.test_index(index_id)
except Exception as e:
print(f"❌ Error creating index: {e}")
import traceback
traceback.print_exc()
def test_index(self, index_id: str) -> None:
"""Test the created index with a sample query."""
try:
print("\n🧪 Testing Index")
print("=" * 50)
# Get agent for testing
agent = get_agent("default")
# Test query
test_query = self.get_user_input("Enter a test query", "What is this document about?")
print(f"\nProcessing query: {test_query}")
response = agent.run(test_query, table_name=f"text_pages_{index_id}")
print(f"\n🤖 Response:")
print(response)
except Exception as e:
print(f"❌ Error testing index: {e}")
def batch_create_from_config(self, config_file: str) -> None:
"""Create index from batch configuration file."""
try:
with open(config_file, 'r') as f:
batch_config = json.load(f)
index_name = batch_config.get("index_name", "Batch Index")
index_description = batch_config.get("index_description", "")
documents = batch_config.get("documents", [])
processing_config = batch_config.get("processing", {})
if not documents:
print("❌ No documents specified in batch configuration")
return
# Validate documents exist
valid_documents = []
for doc_path in documents:
if os.path.exists(doc_path):
valid_documents.append(doc_path)
else:
print(f"⚠️ Document not found: {doc_path}")
if not valid_documents:
print("❌ No valid documents found")
return
print(f"🚀 Creating batch index: {index_name}")
print(f"📄 Processing {len(valid_documents)} documents...")
# Create index
index_id = self.db.create_index(
name=index_name,
description=index_description,
metadata=processing_config
)
# Add documents
for doc_path in valid_documents:
filename = os.path.basename(doc_path)
self.db.add_document_to_index(index_id, filename, doc_path)
# Process documents
self.pipeline.process_documents(valid_documents)
print(f"✅ Batch index '{index_name}' created successfully!")
print(f"Index ID: {index_id}")
except Exception as e:
print(f"❌ Error creating batch index: {e}")
import traceback
traceback.print_exc()
def create_sample_batch_config():
"""Create a sample batch configuration file."""
sample_config = {
"index_name": "Sample Batch Index",
"index_description": "Example batch index configuration",
"documents": [
"./rag_system/documents/invoice_1039.pdf",
"./rag_system/documents/invoice_1041.pdf"
],
"processing": {
"chunk_size": 512,
"chunk_overlap": 64,
"enable_enrich": True,
"enable_latechunk": True,
"enable_docling": True,
"embedding_model": "Qwen/Qwen3-Embedding-0.6B",
"generation_model": "qwen3:0.6b",
"retrieval_mode": "hybrid",
"window_size": 2
}
}
with open("batch_indexing_config.json", "w") as f:
json.dump(sample_config, f, indent=2)
print("📄 Sample batch configuration created: batch_indexing_config.json")
def main():
"""Main entry point for the script."""
parser = argparse.ArgumentParser(description="LocalGPT Index Creation Tool")
parser.add_argument("--batch", help="Batch configuration file", type=str)
parser.add_argument("--config", help="Custom pipeline configuration file", type=str)
parser.add_argument("--create-sample", action="store_true", help="Create sample batch config")
args = parser.parse_args()
if args.create_sample:
create_sample_batch_config()
return
try:
creator = IndexCreator(config_path=args.config)
if args.batch:
creator.batch_create_from_config(args.batch)
else:
creator.create_index_interactive()
except KeyboardInterrupt:
print("\n\n❌ Operation cancelled by user.")
except Exception as e:
print(f"❌ Unexpected error: {e}")
import traceback
traceback.print_exc()
if __name__ == "__main__":
main() | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "create_index_script.py",
"license": "MIT License",
"lines": 297,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:demo_batch_indexing.py | #!/usr/bin/env python3
"""
Demo Batch Indexing Script for LocalGPT RAG System
This script demonstrates how to perform batch indexing of multiple documents
using configuration files. It's designed to showcase the full capabilities
of the indexing pipeline with various configuration options.
Usage:
python demo_batch_indexing.py --config batch_indexing_config.json
python demo_batch_indexing.py --create-sample-config
python demo_batch_indexing.py --help
"""
import os
import sys
import json
import argparse
import time
import logging
from typing import List, Dict, Any, Optional
from pathlib import Path
from datetime import datetime
# Add the project root to the path so we can import rag_system modules
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
try:
from rag_system.main import PIPELINE_CONFIGS
from rag_system.pipelines.indexing_pipeline import IndexingPipeline
from rag_system.utils.ollama_client import OllamaClient
from backend.database import ChatDatabase
except ImportError as e:
print(f"❌ Error importing required modules: {e}")
print("Please ensure you're running this script from the project root directory.")
sys.exit(1)
# Configure logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s | %(levelname)-7s | %(name)s | %(message)s",
)
class BatchIndexingDemo:
"""Demonstration of batch indexing capabilities."""
def __init__(self, config_path: str):
"""Initialize the batch indexing demo."""
self.config_path = config_path
self.config = self._load_config()
self.db = ChatDatabase()
# Initialize Ollama client
self.ollama_client = OllamaClient()
# Initialize pipeline with merged configuration
self.pipeline_config = self._merge_configurations()
self.pipeline = IndexingPipeline(
self.pipeline_config,
self.ollama_client,
self.config.get("ollama_config", {
"generation_model": "qwen3:0.6b",
"embedding_model": "qwen3:0.6b"
})
)
def _load_config(self) -> Dict[str, Any]:
"""Load batch indexing configuration from file."""
try:
with open(self.config_path, 'r') as f:
config = json.load(f)
print(f"✅ Loaded configuration from {self.config_path}")
return config
except FileNotFoundError:
print(f"❌ Configuration file not found: {self.config_path}")
sys.exit(1)
except json.JSONDecodeError as e:
print(f"❌ Invalid JSON in configuration file: {e}")
sys.exit(1)
def _merge_configurations(self) -> Dict[str, Any]:
"""Merge batch config with default pipeline config."""
# Start with default pipeline configuration
merged_config = PIPELINE_CONFIGS.get("default", {}).copy()
# Override with batch-specific settings
batch_settings = self.config.get("pipeline_settings", {})
# Deep merge for nested dictionaries
def deep_merge(base: dict, override: dict) -> dict:
result = base.copy()
for key, value in override.items():
if key in result and isinstance(result[key], dict) and isinstance(value, dict):
result[key] = deep_merge(result[key], value)
else:
result[key] = value
return result
return deep_merge(merged_config, batch_settings)
def validate_documents(self, documents: List[str]) -> List[str]:
"""Validate and filter document paths."""
valid_documents = []
print(f"📋 Validating {len(documents)} documents...")
for doc_path in documents:
# Handle relative paths
if not os.path.isabs(doc_path):
doc_path = os.path.abspath(doc_path)
if os.path.exists(doc_path):
# Check file extension
ext = Path(doc_path).suffix.lower()
if ext in ['.pdf', '.txt', '.docx', '.md', '.html', '.htm']:
valid_documents.append(doc_path)
print(f" ✅ {doc_path}")
else:
print(f" ⚠️ Unsupported file type: {doc_path}")
else:
print(f" ❌ File not found: {doc_path}")
print(f"📊 {len(valid_documents)} valid documents found")
return valid_documents
def create_indexes(self) -> List[str]:
"""Create multiple indexes based on configuration."""
indexes = self.config.get("indexes", [])
created_indexes = []
for index_config in indexes:
index_id = self.create_single_index(index_config)
if index_id:
created_indexes.append(index_id)
return created_indexes
def create_single_index(self, index_config: Dict[str, Any]) -> Optional[str]:
"""Create a single index from configuration."""
try:
# Extract index metadata
index_name = index_config.get("name", "Unnamed Index")
index_description = index_config.get("description", "")
documents = index_config.get("documents", [])
if not documents:
print(f"⚠️ No documents specified for index '{index_name}', skipping...")
return None
# Validate documents
valid_documents = self.validate_documents(documents)
if not valid_documents:
print(f"❌ No valid documents found for index '{index_name}'")
return None
print(f"\n🚀 Creating index: {index_name}")
print(f"📄 Processing {len(valid_documents)} documents")
# Create index record in database
index_metadata = {
"created_by": "demo_batch_indexing.py",
"created_at": datetime.now().isoformat(),
"document_count": len(valid_documents),
"config_used": index_config.get("processing_options", {})
}
index_id = self.db.create_index(
name=index_name,
description=index_description,
metadata=index_metadata
)
# Add documents to index
for doc_path in valid_documents:
filename = os.path.basename(doc_path)
self.db.add_document_to_index(index_id, filename, doc_path)
# Process documents through pipeline
start_time = time.time()
self.pipeline.process_documents(valid_documents)
processing_time = time.time() - start_time
print(f"✅ Index '{index_name}' created successfully!")
print(f" Index ID: {index_id}")
print(f" Processing time: {processing_time:.2f} seconds")
print(f" Documents processed: {len(valid_documents)}")
return index_id
except Exception as e:
print(f"❌ Error creating index '{index_name}': {e}")
import traceback
traceback.print_exc()
return None
def demonstrate_features(self):
"""Demonstrate various indexing features."""
print("\n🎯 Batch Indexing Demo Features:")
print("=" * 50)
# Show configuration
print(f"📋 Configuration file: {self.config_path}")
print(f"📊 Number of indexes to create: {len(self.config.get('indexes', []))}")
# Show pipeline settings
pipeline_settings = self.config.get("pipeline_settings", {})
if pipeline_settings:
print("\n⚙️ Pipeline Settings:")
for key, value in pipeline_settings.items():
print(f" {key}: {value}")
# Show model configuration
ollama_config = self.config.get("ollama_config", {})
if ollama_config:
print("\n🤖 Model Configuration:")
for key, value in ollama_config.items():
print(f" {key}: {value}")
def run_demo(self):
"""Run the complete batch indexing demo."""
print("🚀 LocalGPT Batch Indexing Demo")
print("=" * 50)
# Show demo features
self.demonstrate_features()
# Create indexes
print(f"\n📚 Starting batch indexing process...")
start_time = time.time()
created_indexes = self.create_indexes()
total_time = time.time() - start_time
# Summary
print(f"\n📊 Batch Indexing Summary")
print("=" * 50)
print(f"✅ Successfully created {len(created_indexes)} indexes")
print(f"⏱️ Total processing time: {total_time:.2f} seconds")
if created_indexes:
print(f"\n📋 Created Indexes:")
for i, index_id in enumerate(created_indexes, 1):
index_info = self.db.get_index(index_id)
if index_info:
print(f" {i}. {index_info['name']} ({index_id[:8]}...)")
print(f" Documents: {len(index_info.get('documents', []))}")
print(f"\n🎉 Demo completed successfully!")
print(f"💡 You can now use these indexes in the LocalGPT interface.")
def create_sample_config():
"""Create a comprehensive sample configuration file."""
sample_config = {
"description": "Demo batch indexing configuration showcasing various features",
"pipeline_settings": {
"embedding_model_name": "Qwen/Qwen3-Embedding-0.6B",
"indexing": {
"embedding_batch_size": 50,
"enrichment_batch_size": 25,
"enable_progress_tracking": True
},
"contextual_enricher": {
"enabled": True,
"window_size": 2,
"model_name": "qwen3:0.6b"
},
"chunking": {
"chunk_size": 512,
"chunk_overlap": 64,
"enable_latechunk": True,
"enable_docling": True
},
"retrievers": {
"dense": {
"enabled": True,
"lancedb_table_name": "demo_text_pages"
},
"bm25": {
"enabled": True,
"index_name": "demo_bm25_index"
}
},
"storage": {
"lancedb_uri": "./index_store/lancedb",
"bm25_path": "./index_store/bm25"
}
},
"ollama_config": {
"generation_model": "qwen3:0.6b",
"embedding_model": "qwen3:0.6b"
},
"indexes": [
{
"name": "Sample Invoice Collection",
"description": "Demo index containing sample invoice documents",
"documents": [
"./rag_system/documents/invoice_1039.pdf",
"./rag_system/documents/invoice_1041.pdf"
],
"processing_options": {
"chunk_size": 512,
"enable_enrichment": True,
"retrieval_mode": "hybrid"
}
},
{
"name": "Research Papers Demo",
"description": "Demo index for research papers and whitepapers",
"documents": [
"./rag_system/documents/Newwhitepaper_Agents2.pdf"
],
"processing_options": {
"chunk_size": 1024,
"enable_enrichment": True,
"retrieval_mode": "dense"
}
}
]
}
config_filename = "batch_indexing_config.json"
with open(config_filename, "w") as f:
json.dump(sample_config, f, indent=2)
print(f"✅ Sample configuration created: {config_filename}")
print(f"📝 Edit this file to customize your batch indexing setup")
print(f"🚀 Run: python demo_batch_indexing.py --config {config_filename}")
def main():
"""Main entry point for the demo script."""
parser = argparse.ArgumentParser(
description="LocalGPT Batch Indexing Demo",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python demo_batch_indexing.py --config batch_indexing_config.json
python demo_batch_indexing.py --create-sample-config
This demo showcases the advanced batch indexing capabilities of LocalGPT,
including multi-index creation, advanced configuration options, and
comprehensive processing pipelines.
"""
)
parser.add_argument(
"--config",
type=str,
default="batch_indexing_config.json",
help="Path to batch indexing configuration file"
)
parser.add_argument(
"--create-sample-config",
action="store_true",
help="Create a sample configuration file"
)
args = parser.parse_args()
if args.create_sample_config:
create_sample_config()
return
if not os.path.exists(args.config):
print(f"❌ Configuration file not found: {args.config}")
print(f"💡 Create a sample config with: python {sys.argv[0]} --create-sample-config")
sys.exit(1)
try:
demo = BatchIndexingDemo(args.config)
demo.run_demo()
except KeyboardInterrupt:
print("\n\n❌ Demo cancelled by user.")
except Exception as e:
print(f"❌ Demo failed: {e}")
import traceback
traceback.print_exc()
if __name__ == "__main__":
main() | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "demo_batch_indexing.py",
"license": "MIT License",
"lines": 323,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/agent/loop.py | from typing import Dict, Any, Optional
import json
import time, asyncio, os
import numpy as np
import concurrent.futures
from cachetools import TTLCache, LRUCache
from rag_system.utils.ollama_client import OllamaClient
from rag_system.pipelines.retrieval_pipeline import RetrievalPipeline
from rag_system.agent.verifier import Verifier
from rag_system.retrieval.query_transformer import QueryDecomposer, GraphQueryTranslator
from rag_system.retrieval.retrievers import GraphRetriever
class Agent:
"""
The main agent, now fully wired to use a live Ollama client.
"""
def __init__(self, pipeline_configs: Dict[str, Dict], llm_client: OllamaClient, ollama_config: Dict[str, str]):
self.pipeline_configs = pipeline_configs
self.llm_client = llm_client
self.ollama_config = ollama_config
gen_model = self.ollama_config["generation_model"]
# Initialize the single, persistent retrieval pipeline for this agent
self.retrieval_pipeline = RetrievalPipeline(pipeline_configs, self.llm_client, self.ollama_config)
self.verifier = Verifier(llm_client, gen_model)
self.query_decomposer = QueryDecomposer(llm_client, gen_model)
# 🚀 OPTIMIZED: TTL cache now stores embeddings for semantic matching
self._cache_max_size = 100 # fallback size limit for manual eviction helper
self._query_cache: TTLCache = TTLCache(maxsize=self._cache_max_size, ttl=300)
self.semantic_cache_threshold = self.pipeline_configs.get("semantic_cache_threshold", 0.98)
# If set to "session", semantic-cache hits will be restricted to the same chat session.
# Otherwise (default "global") answers can be reused across sessions.
self.cache_scope = self.pipeline_configs.get("cache_scope", "global") # 'global' or 'session'
# 🚀 NEW: In-memory store for conversational history per session
self.chat_histories: LRUCache = LRUCache(maxsize=100) # Stores history for 100 recent sessions
graph_config = self.pipeline_configs.get("graph_strategy", {})
if graph_config.get("enabled"):
self.graph_query_translator = GraphQueryTranslator(llm_client, gen_model)
self.graph_retriever = GraphRetriever(graph_config["graph_path"])
print("Agent initialized with live GraphRAG capabilities.")
else:
print("Agent initialized (GraphRAG disabled).")
# ---- Load document overviews for fast routing ----
self._global_overview_path = os.path.join("index_store", "overviews", "overviews.jsonl")
self.doc_overviews: list[str] = []
self._current_overview_session: str | None = None # cache key to avoid rereading on every query
self._load_overviews(self._global_overview_path)
def _load_overviews(self, path: str):
"""Helper to load overviews from a .jsonl file into self.doc_overviews."""
import json, os
self.doc_overviews.clear()
if not os.path.exists(path):
return
try:
with open(path, encoding="utf-8") as fh:
for line in fh:
try:
rec = json.loads(line)
if isinstance(rec, dict) and rec.get("overview"):
self.doc_overviews.append(rec["overview"].strip())
except Exception:
continue
print(f"📖 Loaded {len(self.doc_overviews)} overviews from {path}")
except Exception as e:
print(f"⚠️ Failed to load document overviews from {path}: {e}")
def load_overviews_for_indexes(self, idx_ids: list[str]):
"""Aggregate overviews for the given indexes or fall back to global file."""
import os, json
aggregated: list[str] = []
for idx in idx_ids:
path = os.path.join("index_store", "overviews", f"{idx}.jsonl")
if os.path.exists(path):
try:
with open(path, encoding="utf-8") as fh:
for line in fh:
if not line.strip():
continue
try:
rec = json.loads(line)
ov = rec.get("overview", "").strip()
if ov:
aggregated.append(ov)
except json.JSONDecodeError:
continue
except Exception as e:
print(f"⚠️ Error reading {path}: {e}")
if aggregated:
self.doc_overviews = aggregated
self._current_overview_session = "|".join(idx_ids) # cache composite key so no overwrite
print(f"📖 Loaded {len(aggregated)} overviews for indexes {[i[:8] for i in idx_ids]}")
else:
print(f"⚠️ No per-index overviews found for {idx_ids}. Using global overview file.")
self._load_overviews(self._global_overview_path)
self._current_overview_session = "GLOBAL"
def _cosine_similarity(self, v1: np.ndarray, v2: np.ndarray) -> float:
"""Computes cosine similarity between two vectors."""
if not isinstance(v1, np.ndarray): v1 = np.array(v1)
if not isinstance(v2, np.ndarray): v2 = np.array(v2)
if v1.shape != v2.shape:
raise ValueError("Vectors must have the same shape for cosine similarity.")
if np.all(v1 == 0) or np.all(v2 == 0):
return 0.0
dot_product = np.dot(v1, v2)
norm_v1 = np.linalg.norm(v1)
norm_v2 = np.linalg.norm(v2)
# Avoid division by zero
if norm_v1 == 0 or norm_v2 == 0:
return 0.0
return dot_product / (norm_v1 * norm_v2)
def _find_in_semantic_cache(self, query_embedding: np.ndarray, session_id: Optional[str] = None) -> Optional[Dict[str, Any]]:
"""Finds a semantically similar query in the cache."""
if not self._query_cache or query_embedding is None:
return None
for key, cached_item in self._query_cache.items():
cached_embedding = cached_item.get('embedding')
if cached_embedding is None:
continue
# Respect cache scoping: if scope is session-level, skip results from other sessions
if self.cache_scope == "session" and session_id is not None:
if cached_item.get("session_id") != session_id:
continue
try:
similarity = self._cosine_similarity(query_embedding, cached_embedding)
if similarity >= self.semantic_cache_threshold:
print(f"🚀 Semantic cache hit! Similarity: {similarity:.3f} with cached query '{key}'")
return cached_item.get('result')
except ValueError:
# In case of shape mismatch, just skip
continue
return None
def _format_query_with_history(self, query: str, history: list) -> str:
"""Formats the user query with conversation history for context."""
if not history:
return query
formatted_history = "\n".join([f"User: {turn['query']}\nAssistant: {turn['answer']}" for turn in history])
prompt = f"""
Given the following conversation history, answer the user's latest query. The history provides context for resolving pronouns or follow-up questions.
--- Conversation History ---
{formatted_history}
---
Latest User Query: "{query}"
"""
return prompt
# ---------------- Asynchronous triage using Ollama ----------------
async def _triage_query_async(self, query: str, history: list) -> str:
print(f"🔍 ROUTING DEBUG: Starting triage for query: '{query[:100]}...'")
# 1️⃣ Fast routing using precomputed overviews (if available)
print(f"📖 ROUTING DEBUG: Attempting overview-based routing...")
routed = self._route_via_overviews(query)
if routed:
print(f"✅ ROUTING DEBUG: Overview routing decided: '{routed}'")
return routed
else:
print(f"❌ ROUTING DEBUG: Overview routing returned None, falling back to LLM triage")
if history:
# If there's history, the query is likely a follow-up, so we default to RAG.
# A more advanced implementation could use an LLM to see if the new query
# changes the topic entirely.
print(f"📜 ROUTING DEBUG: History exists, defaulting to 'rag_query'")
return "rag_query"
print(f"🤖 ROUTING DEBUG: No history, using LLM fallback triage...")
prompt = f"""
You are a query routing expert. Analyze the user's question and decide which backend should handle it.
Choose **exactly one** category:
1. "rag_query" – Questions about the user's uploaded documents or specific document content that should be searched. Examples: "What is the invoice amount?", "Summarize the research paper", "What companies are mentioned?"
2. "direct_answer" – General knowledge questions, greetings, or queries unrelated to uploaded documents. Examples: "Who are the CEOs of Tesla and Amazon?", "What is the capital of France?", "Hello", "Explain quantum physics"
3. "graph_query" – Specific factual relations for knowledge-graph lookup (currently limited use)
IMPORTANT: For general world knowledge about well-known companies, people, or facts NOT related to uploaded documents, choose "direct_answer".
User query: "{query}"
Respond with JSON: {{"category": "<your_choice>"}}
"""
resp = self.llm_client.generate_completion(
model=self.ollama_config["generation_model"], prompt=prompt, format="json"
)
try:
data = json.loads(resp.get("response", "{}"))
decision = data.get("category", "rag_query")
print(f"🤖 ROUTING DEBUG: LLM fallback triage decided: '{decision}'")
return decision
except json.JSONDecodeError:
print(f"❌ ROUTING DEBUG: LLM fallback triage JSON parsing failed, defaulting to 'rag_query'")
return "rag_query"
def _run_graph_query(self, query: str, history: list) -> Dict[str, Any]:
contextual_query = self._format_query_with_history(query, history)
structured_query = self.graph_query_translator.translate(contextual_query)
if not structured_query.get("start_node"):
return self.retrieval_pipeline.run(contextual_query, window_size_override=0)
results = self.graph_retriever.retrieve(structured_query)
if not results:
return self.retrieval_pipeline.run(contextual_query, window_size_override=0)
answer = ", ".join([res['details']['node_id'] for res in results])
return {"answer": f"From the knowledge graph: {answer}", "source_documents": results}
def _get_cache_key(self, query: str, query_type: str) -> str:
"""Generate a cache key for the query"""
# Simple cache key based on query and type
return f"{query_type}:{query.strip().lower()}"
def _cache_result(self, cache_key: str, result: Dict[str, Any], session_id: Optional[str] = None):
"""Cache a result with size limit"""
if len(self._query_cache) >= self._cache_max_size:
# Remove oldest entry (simple FIFO eviction)
oldest_key = next(iter(self._query_cache))
del self._query_cache[oldest_key]
self._query_cache[cache_key] = {
'result': result,
'timestamp': time.time(),
'session_id': session_id
}
# ---------------- Public sync API (kept for backwards compatibility) --------------
def run(self, query: str, table_name: str = None, session_id: str = None, compose_sub_answers: Optional[bool] = None, query_decompose: Optional[bool] = None, ai_rerank: Optional[bool] = None, context_expand: Optional[bool] = None, verify: Optional[bool] = None, retrieval_k: Optional[int] = None, context_window_size: Optional[int] = None, reranker_top_k: Optional[int] = None, search_type: Optional[str] = None, dense_weight: Optional[float] = None, max_retries: int = 1, event_callback: Optional[callable] = None) -> Dict[str, Any]:
"""Synchronous helper. If *event_callback* is supplied, important
milestones will be forwarded to that callable as
event_callback(phase:str, payload:Any)
"""
return asyncio.run(self._run_async(query, table_name, session_id, compose_sub_answers, query_decompose, ai_rerank, context_expand, verify, retrieval_k, context_window_size, reranker_top_k, search_type, dense_weight, max_retries, event_callback))
# ---------------- Main async implementation --------------------------------------
async def _run_async(self, query: str, table_name: str = None, session_id: str = None, compose_sub_answers: Optional[bool] = None, query_decompose: Optional[bool] = None, ai_rerank: Optional[bool] = None, context_expand: Optional[bool] = None, verify: Optional[bool] = None, retrieval_k: Optional[int] = None, context_window_size: Optional[int] = None, reranker_top_k: Optional[int] = None, search_type: Optional[str] = None, dense_weight: Optional[float] = None, max_retries: int = 1, event_callback: Optional[callable] = None) -> Dict[str, Any]:
start_time = time.time()
# Emit analyze event at the start
if event_callback:
event_callback("analyze", {"query": query})
# 🚀 NEW: Get conversation history
history = self.chat_histories.get(session_id, []) if session_id else []
# 🔄 Refresh overviews for this session if available
# if session_id and session_id != getattr(self, "_current_overview_session", None):
# candidate_path = os.path.join("index_store", "overviews", f"{session_id}.jsonl")
# if os.path.exists(candidate_path):
# self._load_overviews(candidate_path)
# self._current_overview_session = session_id
# else:
# # Fall back to global overviews if per-session file not found
# if self._current_overview_session != "GLOBAL":
# self._load_overviews(self._global_overview_path)
# self._current_overview_session = "GLOBAL"
query_type = await self._triage_query_async(query, history)
print(f"🎯 ROUTING DEBUG: Final triage decision: '{query_type}'")
print(f"Agent Triage Decision: '{query_type}'")
# Create a contextual query that includes history for most operations
contextual_query = self._format_query_with_history(query, history)
raw_query = query.strip()
# --- Apply runtime AI reranker override (must happen before any retrieval calls) ---
if ai_rerank is not None:
rr_cfg = self.retrieval_pipeline.config.setdefault("reranker", {})
rr_cfg["enabled"] = bool(ai_rerank)
if ai_rerank:
# Ensure the pipeline knows to use the external ColBERT reranker
rr_cfg.setdefault("type", "ai")
rr_cfg.setdefault("strategy", "rerankers-lib")
rr_cfg.setdefault(
"model_name",
# Falls back to ColBERT-small if the caller did not supply one
self.ollama_config.get("rerank_model", "answerai-colbert-small-v1"),
)
# --- Apply runtime retrieval configuration overrides ---
if retrieval_k is not None:
self.retrieval_pipeline.config["retrieval_k"] = retrieval_k
print(f"🔍 Retrieval K set to: {retrieval_k}")
if context_window_size is not None:
self.retrieval_pipeline.config["context_window_size"] = context_window_size
print(f"🔍 Context window size set to: {context_window_size}")
if reranker_top_k is not None:
rr_cfg = self.retrieval_pipeline.config.setdefault("reranker", {})
rr_cfg["top_k"] = reranker_top_k
print(f"🔍 Reranker top K set to: {reranker_top_k}")
if search_type is not None:
retrieval_cfg = self.retrieval_pipeline.config.setdefault("retrieval", {})
retrieval_cfg["search_type"] = search_type
print(f"🔍 Search type set to: {search_type}")
if dense_weight is not None:
dense_cfg = self.retrieval_pipeline.config.setdefault("retrieval", {}).setdefault("dense", {})
dense_cfg["weight"] = dense_weight
print(f"🔍 Dense search weight set to: {dense_weight}")
query_embedding = None
# 🚀 OPTIMIZED: Semantic Cache Check
if query_type != "direct_answer":
text_embedder = self.retrieval_pipeline._get_text_embedder()
if text_embedder:
# The embedder expects a list, so we wrap the *raw* query only.
query_embedding_list = text_embedder.create_embeddings([raw_query])
if isinstance(query_embedding_list, np.ndarray):
query_embedding = query_embedding_list[0]
else:
# Some embedders return a list – convert if necessary
query_embedding = np.array(query_embedding_list[0])
cached_result = self._find_in_semantic_cache(query_embedding, session_id)
if cached_result:
# Update history even on cache hit
if session_id:
history.append({"query": query, "answer": cached_result.get('answer', 'Cached answer not found.')})
self.chat_histories[session_id] = history
return cached_result
if query_type == "direct_answer":
print(f"✅ ROUTING DEBUG: Executing DIRECT_ANSWER path")
if event_callback:
event_callback("direct_answer", {})
prompt = (
"You are a helpful assistant. Read the conversation history below. "
"If the answer to the user's latest question is already present in the history, quote it concisely. "
"Otherwise answer from your general world knowledge. Provide a short, factual reply (1‒2 sentences).\n\n"
f"Conversation + Latest Question:\n{contextual_query}\n\nAssistant:"
)
async def _run_stream():
answer_parts: list[str] = []
def _blocking_stream():
for tok in self.llm_client.stream_completion(
model=self.ollama_config["generation_model"], prompt=prompt
):
answer_parts.append(tok)
if event_callback:
event_callback("token", {"text": tok})
# Run the blocking generator in a thread so the event loop stays responsive
await asyncio.to_thread(_blocking_stream)
return "".join(answer_parts)
final_answer = await _run_stream()
result = {"answer": final_answer, "source_documents": []}
elif query_type == "graph_query" and hasattr(self, 'graph_retriever'):
print(f"✅ ROUTING DEBUG: Executing GRAPH_QUERY path")
result = self._run_graph_query(query, history)
# --- RAG Query Processing with Optional Query Decomposition ---
else: # Default to rag_query
print(f"✅ ROUTING DEBUG: Executing RAG_QUERY path (query_type='{query_type}')")
query_decomp_config = self.pipeline_configs.get("query_decomposition", {})
decomp_enabled = query_decomp_config.get("enabled", False)
if query_decompose is not None:
decomp_enabled = query_decompose
if decomp_enabled:
print(f"\n--- Query Decomposition Enabled ---")
# Use the raw user query (without conversation history) for decomposition to avoid leakage of prior context
# Pass the last 5 conversation turns for context resolution within the decomposer
recent_history = history[-5:] if history else []
sub_queries = self.query_decomposer.decompose(raw_query, recent_history)
if event_callback:
event_callback("decomposition", {"sub_queries": sub_queries})
print(f"Original query: '{query}' (Contextual: '{contextual_query}')")
print(f"Decomposed into {len(sub_queries)} sub-queries: {sub_queries}")
# Emit retrieval_started event before any retrievals
if event_callback:
event_callback("retrieval_started", {"count": len(sub_queries)})
# If decomposition produced only a single sub-query, skip the
# parallel/composition machinery for efficiency.
if len(sub_queries) == 1:
print("--- Only one sub-query after decomposition; using direct retrieval path ---")
result = self.retrieval_pipeline.run(
sub_queries[0],
table_name,
0 if context_expand is False else None,
event_callback=event_callback
)
if event_callback:
event_callback("single_query_result", result)
# Emit retrieval_done and rerank_done for single sub-query
if event_callback:
event_callback("retrieval_done", {"count": 1})
event_callback("rerank_started", {"count": 1})
event_callback("rerank_done", {"count": 1})
else:
compose_from_sub_answers = query_decomp_config.get("compose_from_sub_answers", True)
if compose_sub_answers is not None:
compose_from_sub_answers = compose_sub_answers
print(f"\n--- Processing {len(sub_queries)} sub-queries in parallel ---")
start_time_inner = time.time()
# Shared containers
sub_answers = [] # For two-stage composition
all_source_docs = [] # For single-stage aggregation
citations_seen = set()
# Emit rerank_started event before parallel retrievals (since each sub-query will rerank)
if event_callback:
event_callback("rerank_started", {"count": len(sub_queries)})
# Emit token chunks as soon as we receive them. The UI
# keeps answers separated by `index`, so interleaving is
# harmless and gives continuous feedback.
def make_cb(idx: int):
def _cb(ev_type: str, payload):
if event_callback is None:
return
if ev_type == "token":
event_callback("sub_query_token", {"index": idx, "text": payload.get("text", ""), "question": sub_queries[idx]})
else:
event_callback(ev_type, payload)
return _cb
with concurrent.futures.ThreadPoolExecutor(max_workers=min(3, len(sub_queries))) as executor:
future_to_query = {
executor.submit(
self.retrieval_pipeline.run,
sub_query,
table_name,
0 if context_expand is False else None,
make_cb(i),
): (i, sub_query)
for i, sub_query in enumerate(sub_queries)
}
for future in concurrent.futures.as_completed(future_to_query):
i, sub_query = future_to_query[future]
try:
sub_result = future.result()
print(f"✅ Sub-Query {i+1} completed: '{sub_query}'")
if event_callback:
event_callback("sub_query_result", {
"index": i,
"query": sub_query,
"answer": sub_result.get("answer", ""),
"source_documents": sub_result.get("source_documents", []),
})
if compose_from_sub_answers:
sub_answers.append({
"question": sub_query,
"answer": sub_result.get("answer", "")
})
# Keep up to 5 citations per sub-query for traceability
for doc in sub_result.get("source_documents", [])[:5]:
if doc['chunk_id'] not in citations_seen:
all_source_docs.append(doc)
citations_seen.add(doc['chunk_id'])
else:
# Aggregate unique docs (single-stage path)
for doc in sub_result.get('source_documents', []):
if doc['chunk_id'] not in citations_seen:
all_source_docs.append(doc)
citations_seen.add(doc['chunk_id'])
except Exception as e:
print(f"❌ Sub-Query {i+1} failed: '{sub_query}' - {e}")
parallel_time = time.time() - start_time_inner
print(f"🚀 Parallel processing completed in {parallel_time:.2f}s")
# Emit retrieval_done and rerank_done after all sub-queries are processed
if event_callback:
event_callback("retrieval_done", {"count": len(sub_queries)})
event_callback("rerank_done", {"count": len(sub_queries)})
if compose_from_sub_answers:
print("\n--- Composing final answer from sub-answers ---")
compose_prompt = f"""
You are an expert answer composer for a Retrieval-Augmented Generation (RAG) system.
Context:
• The ORIGINAL QUESTION from the user is shown below.
• That question was automatically decomposed into simpler SUB-QUESTIONS.
• Each sub-question has already been answered by an earlier step and the resulting Question→Answer pairs are provided to you in JSON.
Your task:
1. Read every sub-answer carefully.
2. Write a single, final answer to the ORIGINAL QUESTION **using only the information contained in the sub-answers**. Do NOT invent facts that are not present.
3. If the original question includes a comparison (e.g., "Which, A or B, …") clearly state the outcome (e.g., "A > B"). Quote concrete numbers when available.
4. If any aspect of the original question cannot be answered with the given sub-answers, explicitly say so (e.g., "The provided context does not mention …").
5. Keep the answer concise (≤ 5 sentences) and use a factual, third-person tone.
Input
------
ORIGINAL QUESTION:
"{contextual_query}"
SUB-ANSWERS (JSON):
{json.dumps(sub_answers, indent=2)}
------
FINAL ANSWER:
"""
# --- Stream composition answer token-by-token ---
answer_parts: list[str] = []
for tok in self.llm_client.stream_completion(
model=self.ollama_config["generation_model"],
prompt=compose_prompt,
):
answer_parts.append(tok)
if event_callback:
event_callback("token", {"text": tok})
final_answer = "".join(answer_parts) or "Unable to generate an answer."
result = {
"answer": final_answer,
"source_documents": all_source_docs
}
if event_callback:
event_callback("final_answer", result)
else:
print(f"\n--- Aggregated {len(all_source_docs)} unique documents from all sub-queries ---")
if all_source_docs:
aggregated_context = "\n\n".join([doc['text'] for doc in all_source_docs])
final_answer = self.retrieval_pipeline._synthesize_final_answer(contextual_query, aggregated_context)
result = {
"answer": final_answer,
"source_documents": all_source_docs
}
if event_callback:
event_callback("final_answer", result)
else:
result = {
"answer": "I could not find relevant information to answer your question.",
"source_documents": []
}
if event_callback:
event_callback("final_answer", result)
else:
# Standard retrieval (single-query)
retrieved_docs = (self.retrieval_pipeline.retriever.retrieve(
text_query=contextual_query,
table_name=table_name or self.retrieval_pipeline.storage_config["text_table_name"],
k=self.retrieval_pipeline.config.get("retrieval_k", 10),
) if hasattr(self.retrieval_pipeline, "retriever") and self.retrieval_pipeline.retriever else [])
print("\n=== DEBUG: Original retrieval order ===")
for i, d in enumerate(retrieved_docs[:10]):
snippet = (d.get('text','') or '')[:200].replace('\n',' ')
print(f"Orig[{i}] id={d.get('chunk_id')} dist={d.get('_distance','') or d.get('score','')} {snippet}")
result = self.retrieval_pipeline.run(contextual_query, table_name, 0 if context_expand is False else None, event_callback=event_callback)
# After run, result['source_documents'] is reranked list
reranked_docs = result.get('source_documents', [])
print("\n=== DEBUG: Reranked docs order ===")
for i, d in enumerate(reranked_docs[:10]):
snippet = (d.get('text','') or '')[:200].replace('\n',' ')
print(f"ReRank[{i}] id={d.get('chunk_id')} score={d.get('rerank_score','')} {snippet}")
# Verification step (simplified for now) - Skip in fast mode
verification_enabled = self.pipeline_configs.get("verification", {}).get("enabled", True)
if verify is not None:
verification_enabled = verify
if verification_enabled and result.get("source_documents"):
context_str = "\n".join([doc['text'] for doc in result['source_documents']])
verification = await self.verifier.verify_async(contextual_query, context_str, result['answer'])
score = verification.confidence_score
# Only include confidence details if we received a non-zero score (0 usually means JSON parse failure)
if score > 0:
result['answer'] += f" [Confidence: {score}%]"
# Add warning only when the verifier explicitly reported low confidence / not grounded
if (not verification.is_grounded) or score < 50:
result['answer'] += f" [Warning: Low confidence. Groundedness: {verification.is_grounded}]"
else:
# Skip appending any verifier note – 0 likely indicates a parser error
print("⚠️ Verifier returned 0 confidence – likely JSON parse error; omitting tags.")
else:
print("🚀 Skipping verification for speed or lack of sources")
# 🚀 NEW: Update history
if session_id:
history.append({"query": query, "answer": result['answer']})
self.chat_histories[session_id] = history
# 🚀 OPTIMIZED: Cache the result for future queries
if query_type != "direct_answer" and query_embedding is not None:
cache_key = raw_query # Key is for logging/debugging
self._query_cache[cache_key] = {
"embedding": query_embedding,
"result": result,
"session_id": session_id,
}
total_time = time.time() - start_time
print(f"🚀 Total query processing time: {total_time:.2f}s")
return result
# ------------------------------------------------------------------
def _route_via_overviews(self, query: str) -> str | None:
"""Use document overviews and a small model to decide routing.
Returns 'rag_query', 'direct_answer', or None if unsure/disabled."""
if not self.doc_overviews:
print(f"📖 ROUTING DEBUG: No document overviews available, returning None")
return None
print(f"📖 ROUTING DEBUG: Found {len(self.doc_overviews)} document overviews, using LLM routing...")
# Keep prompt concise: if more than 40 overviews, take first 40
overviews_snip = self.doc_overviews[:40]
overviews_block = "\n".join(f"[{i+1}] {ov}" for i, ov in enumerate(overviews_snip))
router_prompt = f"""Task: Route query to correct system.
Documents available: Invoices, DeepSeek-V3 research papers
Query: "{query}"
Is this query asking about:
A) Greetings/social: "Hi", "Hello", "Thanks", "What's up", "How are you"
B) General knowledge: "CEO of Tesla", "capital of France", "what is 2+2"
C) Document content: invoice amounts, DeepSeek-V3 details, companies mentioned
If A or B → {{"category": "direct_answer"}}
If C → {{"category": "rag_query"}}
Response:"""
resp = self.llm_client.generate_completion(
model=self.ollama_config["generation_model"], prompt=router_prompt, format="json"
)
try:
raw_response = resp.get("response", "{}")
print(f"📖 ROUTING DEBUG: Overview LLM raw response: '{raw_response[:200]}...'")
data = json.loads(raw_response)
decision = data.get("category", "rag_query")
print(f"📖 ROUTING DEBUG: Overview routing final decision: '{decision}'")
return decision
except json.JSONDecodeError as e:
print(f"❌ ROUTING DEBUG: Overview routing JSON parsing failed: {e}, defaulting to 'rag_query'")
return "rag_query"
| {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/agent/loop.py",
"license": "MIT License",
"lines": 567,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/agent/verifier.py | import json
from rag_system.utils.ollama_client import OllamaClient
class VerificationResult:
def __init__(self, is_grounded: bool, reasoning: str, verdict: str, confidence_score: int):
self.is_grounded = is_grounded
self.reasoning = reasoning
self.verdict = verdict
self.confidence_score = confidence_score
class Verifier:
"""
Verifies if a generated answer is grounded in the provided context using Ollama.
"""
def __init__(self, llm_client: OllamaClient, llm_model: str):
self.llm_client = llm_client
self.llm_model = llm_model
print(f"Initialized Verifier with Ollama model '{self.llm_model}'.")
# Synchronous verify() method removed – async version is used everywhere.
# --- Async wrapper ------------------------------------------------
async def verify_async(self, query: str, context: str, answer: str) -> VerificationResult:
"""Async variant that calls the Ollama client asynchronously."""
prompt = f"""
You are an automated fact-checker. Determine whether the ANSWER is fully supported by the CONTEXT and output a single line of JSON.
# EXAMPLES
<QUERY>
What color is the sky?
</QUERY>
<CONTEXT>
During the day, the sky appears blue due to Rayleigh scattering.
</CONTEXT>
<ANSWER>
The sky is blue during the day.
</ANSWER>
<OUTPUT>
{{"verdict": "SUPPORTED", "is_grounded": true, "reasoning": "The context explicitly supports that the sky is blue during the day.", "confidence_score": 100}}
</OUTPUT>
<QUERY>
Where are apples and oranges grown?
</QUERY>
<CONTEXT>
Apples are grown in orchards.
</CONTEXT>
<ANSWER>
Apples are grown in orchards and oranges are grown in groves.
</ANSWER>
<OUTPUT>
{{"verdict": "NOT_SUPPORTED", "is_grounded": false, "reasoning": "The context mentions orchards, but not oranges or groves.", "confidence_score": 80}}
</OUTPUT>
<QUERY>
How long is the process?
</QUERY>
<CONTEXT>
The first step takes 3 days. The second step takes 5 days.
</CONTEXT>
<ANSWER>
The process takes 3 days.
</ANSWER>
<OUTPUT>
{{"verdict": "NEEDS_CLARIFICATION", "is_grounded": false, "reasoning": "The answer omits the 5 days required for the second step.", "confidence_score": 70}}
</OUTPUT>
# TASK
<QUERY>
"{query}"
</QUERY>
<CONTEXT>
"""
prompt += context[:4000] # Clamp to avoid huge prompts
prompt += """
</CONTEXT>
<ANSWER>
"""
prompt += answer
prompt += """
</ANSWER>
<OUTPUT>
"""
resp = await self.llm_client.generate_completion_async(self.llm_model, prompt, format="json")
try:
data = json.loads(resp.get("response", "{}"))
return VerificationResult(
is_grounded=data.get("is_grounded", False),
reasoning=data.get("reasoning", "async parse error"),
verdict=data.get("verdict", "NOT_SUPPORTED"),
confidence_score=data.get('confidence_score', 0)
)
except (json.JSONDecodeError, AttributeError):
return VerificationResult(False, "Failed async parse", "NOT_SUPPORTED", 0)
| {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/agent/verifier.py",
"license": "MIT License",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
PromtEngineer/localGPT:rag_system/api_server.py | import json
import http.server
import socketserver
from urllib.parse import urlparse, parse_qs
import os
import requests
import sys
import logging
# Add backend directory to path for database imports
backend_dir = os.path.join(os.path.dirname(__file__), '..', 'backend')
if backend_dir not in sys.path:
sys.path.append(backend_dir)
from backend.database import ChatDatabase, generate_session_title
from rag_system.main import get_agent
from rag_system.factory import get_indexing_pipeline
# Initialize database connection once at module level
# Use auto-detection for environment-appropriate path
db = ChatDatabase()
# Get the desired agent mode from environment variables, defaulting to 'default'
# This allows us to easily switch between 'default', 'fast', 'react', etc.
AGENT_MODE = os.getenv("RAG_CONFIG_MODE", "default")
RAG_AGENT = get_agent(AGENT_MODE)
INDEXING_PIPELINE = get_indexing_pipeline(AGENT_MODE)
# --- Global Singleton for the RAG Agent ---
# The agent is initialized once when the server starts.
# This avoids reloading all the models on every request.
print("🧠 Initializing RAG Agent with MAXIMUM ACCURACY... (This may take a moment)")
if RAG_AGENT is None:
print("❌ Critical error: RAG Agent could not be initialized. Exiting.")
exit(1)
print("✅ RAG Agent initialized successfully with MAXIMUM ACCURACY.")
# ---
# Add helper near top after db & agent init
# -------------- Helper ----------------
def _apply_index_embedding_model(idx_ids):
"""Ensure retrieval pipeline uses the embedding model stored with the first index."""
debug_info = f"🔧 _apply_index_embedding_model called with idx_ids: {idx_ids}\n"
if not idx_ids:
debug_info += "⚠️ No index IDs provided\n"
with open("logs/embedding_debug.log", "a") as f:
f.write(debug_info)
return
try:
idx = db.get_index(idx_ids[0])
debug_info += f"🔧 Retrieved index: {idx.get('id')} with metadata: {idx.get('metadata', {})}\n"
model = (idx.get("metadata") or {}).get("embedding_model")
debug_info += f"🔧 Embedding model from metadata: {model}\n"
if model:
rp = RAG_AGENT.retrieval_pipeline
current_model = rp.config.get("embedding_model_name")
debug_info += f"🔧 Current embedding model: {current_model}\n"
rp.update_embedding_model(model)
debug_info += f"🔧 Updated embedding model to: {model}\n"
else:
debug_info += "⚠️ No embedding model found in metadata\n"
except Exception as e:
debug_info += f"⚠️ Could not apply index embedding model: {e}\n"
# Write debug info to file
with open("logs/embedding_debug.log", "a") as f:
f.write(debug_info)
def _get_table_name_for_session(session_id):
"""Get the correct vector table name for a session by looking up its linked indexes."""
logger = logging.getLogger(__name__)
if not session_id:
logger.info("❌ No session_id provided")
return None
try:
# Get indexes linked to this session
idx_ids = db.get_indexes_for_session(session_id)
logger.info(f"🔍 Session {session_id[:8]}... has {len(idx_ids)} indexes: {idx_ids}")
if not idx_ids:
logger.warning(f"⚠️ No indexes found for session {session_id}")
# Use the default table name from config instead of session-specific name
from rag_system.main import PIPELINE_CONFIGS
default_table = PIPELINE_CONFIGS["default"]["storage"]["text_table_name"]
logger.info(f"📊 Using default table '{default_table}' for session {session_id[:8]}...")
return default_table
# Use the first index's vector table name
idx = db.get_index(idx_ids[0])
if idx and idx.get('vector_table_name'):
table_name = idx['vector_table_name']
logger.info(f"📊 Using table '{table_name}' for session {session_id[:8]}...")
print(f"📊 RAG API: Using table '{table_name}' for session {session_id[:8]}...")
return table_name
else:
logger.warning(f"⚠️ Index found but no vector table name for session {session_id}")
# Use the default table name from config instead of session-specific name
from rag_system.main import PIPELINE_CONFIGS
default_table = PIPELINE_CONFIGS["default"]["storage"]["text_table_name"]
logger.info(f"📊 Using default table '{default_table}' for session {session_id[:8]}...")
return default_table
except Exception as e:
logger.error(f"❌ Error getting table name for session {session_id}: {e}")
# Use the default table name from config instead of session-specific name
from rag_system.main import PIPELINE_CONFIGS
default_table = PIPELINE_CONFIGS["default"]["storage"]["text_table_name"]
logger.info(f"📊 Using default table '{default_table}' for session {session_id[:8]}...")
return default_table
class AdvancedRagApiHandler(http.server.BaseHTTPRequestHandler):
def do_OPTIONS(self):
"""Handle CORS preflight requests for frontend integration."""
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'POST, OPTIONS')
self.send_header('Access-Control-Allow-Headers', 'Content-Type')
self.end_headers()
def do_POST(self):
"""Handle POST requests for chat and indexing."""
parsed_path = urlparse(self.path)
if parsed_path.path == '/chat':
self.handle_chat()
elif parsed_path.path == '/chat/stream':
self.handle_chat_stream()
elif parsed_path.path == '/index':
self.handle_index()
else:
self.send_json_response({"error": "Not Found"}, status_code=404)
def do_GET(self):
parsed_path = urlparse(self.path)
if parsed_path.path == '/models':
self.handle_models()
else:
self.send_json_response({"error": "Not Found"}, status_code=404)
def handle_chat(self):
"""Handles a chat query by calling the agentic RAG pipeline."""
try:
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
data = json.loads(post_data.decode('utf-8'))
query = data.get('query')
session_id = data.get('session_id')
compose_flag = data.get('compose_sub_answers')
decomp_flag = data.get('query_decompose')
ai_rerank_flag = data.get('ai_rerank')
ctx_expand_flag = data.get('context_expand')
verify_flag = data.get('verify')
# ✨ NEW RETRIEVAL PARAMETERS
retrieval_k = data.get('retrieval_k', 20)
context_window_size = data.get('context_window_size', 1)
reranker_top_k = data.get('reranker_top_k', 10)
search_type = data.get('search_type', 'hybrid')
dense_weight = data.get('dense_weight', 0.7)
# 🚩 NEW: Force RAG override from frontend
force_rag = bool(data.get('force_rag', False))
# 🌿 Provence sentence pruning
provence_prune = data.get('provence_prune')
provence_threshold = data.get('provence_threshold')
# User-selected generation model
requested_model = data.get('model')
if isinstance(requested_model,str) and requested_model:
RAG_AGENT.ollama_config['generation_model']=requested_model
if not query:
self.send_json_response({"error": "Query is required"}, status_code=400)
return
# 🔄 UPDATE SESSION TITLE: If this is the first message in the session, update the title
if session_id:
try:
# Check if this is the first message by calling the backend server
backend_url = f"http://localhost:8000/sessions/{session_id}"
session_resp = requests.get(backend_url)
if session_resp.status_code == 200:
session_data = session_resp.json()
session = session_data.get('session', {})
# If message_count is 0, this is the first message
if session.get('message_count', 0) == 0:
# Generate a title from the first message
title = generate_session_title(query)
# Update the session title via backend API
# We'll need to add this endpoint to the backend, for now let's make a direct database call
# This is a temporary solution until we add a proper API endpoint
db.update_session_title(session_id, title)
print(f"📝 Updated session title to: {title}")
# 💾 STORE USER MESSAGE: Add the user message to the database
user_message_id = db.add_message(session_id, query, "user")
print(f"💾 Stored user message: {user_message_id}")
else:
# Not the first message, but still store the user message
user_message_id = db.add_message(session_id, query, "user")
print(f"💾 Stored user message: {user_message_id}")
except Exception as e:
print(f"⚠️ Failed to update session title or store user message: {e}")
# Continue with the request even if title update fails
# Allow explicit table_name override
table_name = data.get('table_name')
if not table_name and session_id:
table_name = _get_table_name_for_session(session_id)
# Decide execution path
print(f"🔧 Force RAG flag: {force_rag}")
if force_rag:
# --- Apply runtime overrides manually because we skip Agent.run()
rp_cfg = RAG_AGENT.retrieval_pipeline.config
if retrieval_k is not None:
rp_cfg["retrieval_k"] = retrieval_k
if reranker_top_k is not None:
rp_cfg.setdefault("reranker", {})["top_k"] = reranker_top_k
if search_type is not None:
rp_cfg.setdefault("retrieval", {})["search_type"] = search_type
if dense_weight is not None:
rp_cfg.setdefault("retrieval", {}).setdefault("dense", {})["weight"] = dense_weight
# Provence overrides
if provence_prune is not None:
rp_cfg.setdefault("provence", {})["enabled"] = bool(provence_prune)
if provence_threshold is not None:
rp_cfg.setdefault("provence", {})["threshold"] = float(provence_threshold)
# 🔄 Apply embedding model for this session (same as in agent path)
if session_id:
idx_ids = db.get_indexes_for_session(session_id)
_apply_index_embedding_model(idx_ids)
# Directly invoke retrieval pipeline to bypass triage
result = RAG_AGENT.retrieval_pipeline.run(
query,
table_name=table_name,
window_size_override=context_window_size,
)
else:
# Use full agent with smart routing
# Apply Provence overrides even in agent path
rp_cfg = RAG_AGENT.retrieval_pipeline.config
if provence_prune is not None:
rp_cfg.setdefault("provence", {})["enabled"] = bool(provence_prune)
if provence_threshold is not None:
rp_cfg.setdefault("provence", {})["threshold"] = float(provence_threshold)
# 🔄 Refresh document overviews for this session
if session_id:
idx_ids = db.get_indexes_for_session(session_id)
_apply_index_embedding_model(idx_ids)
RAG_AGENT.load_overviews_for_indexes(idx_ids)
# 🔧 Set index-specific overview path
if session_id:
rp_cfg["overview_path"] = f"index_store/overviews/{session_id}.jsonl"
# 🔧 Configure late chunking
rp_cfg.setdefault("retrievers", {}).setdefault("latechunk", {})["enabled"] = True
result = RAG_AGENT.run(
query,
table_name=table_name,
session_id=session_id,
compose_sub_answers=compose_flag,
query_decompose=decomp_flag,
ai_rerank=ai_rerank_flag,
context_expand=ctx_expand_flag,
verify=verify_flag,
retrieval_k=retrieval_k,
context_window_size=context_window_size,
reranker_top_k=reranker_top_k,
search_type=search_type,
dense_weight=dense_weight,
)
# The result is a dict, so we need to dump it to a JSON string
self.send_json_response(result)
# 💾 STORE AI RESPONSE: Add the AI response to the database
if session_id and result and result.get("answer"):
try:
ai_message_id = db.add_message(session_id, result["answer"], "assistant")
print(f"💾 Stored AI response: {ai_message_id}")
except Exception as e:
print(f"⚠️ Failed to store AI response: {e}")
# Continue even if storage fails
except json.JSONDecodeError:
self.send_json_response({"error": "Invalid JSON"}, status_code=400)
except Exception as e:
self.send_json_response({"error": f"Server error: {str(e)}"}, status_code=500)
def handle_chat_stream(self):
"""Stream internal phases and final answer using SSE (text/event-stream)."""
try:
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
data = json.loads(post_data.decode('utf-8'))
query = data.get('query')
session_id = data.get('session_id')
compose_flag = data.get('compose_sub_answers')
decomp_flag = data.get('query_decompose')
ai_rerank_flag = data.get('ai_rerank')
ctx_expand_flag = data.get('context_expand')
verify_flag = data.get('verify')
# ✨ NEW RETRIEVAL PARAMETERS
retrieval_k = data.get('retrieval_k', 20)
context_window_size = data.get('context_window_size', 1)
reranker_top_k = data.get('reranker_top_k', 10)
search_type = data.get('search_type', 'hybrid')
dense_weight = data.get('dense_weight', 0.7)
# 🚩 NEW: Force RAG override from frontend
force_rag = bool(data.get('force_rag', False))
# 🌿 Provence sentence pruning
provence_prune = data.get('provence_prune')
provence_threshold = data.get('provence_threshold')
# User-selected generation model
requested_model = data.get('model')
if isinstance(requested_model,str) and requested_model:
RAG_AGENT.ollama_config['generation_model']=requested_model
if not query:
self.send_json_response({"error": "Query is required"}, status_code=400)
return
# 🔄 UPDATE SESSION TITLE: If this is the first message in the session, update the title
if session_id:
try:
# Check if this is the first message by calling the backend server
backend_url = f"http://localhost:8000/sessions/{session_id}"
session_resp = requests.get(backend_url)
if session_resp.status_code == 200:
session_data = session_resp.json()
session = session_data.get('session', {})
# If message_count is 0, this is the first message
if session.get('message_count', 0) == 0:
# Generate a title from the first message
title = generate_session_title(query)
# Update the session title via backend API
# We'll need to add this endpoint to the backend, for now let's make a direct database call
# This is a temporary solution until we add a proper API endpoint
db.update_session_title(session_id, title)
print(f"📝 Updated session title to: {title}")
# 💾 STORE USER MESSAGE: Add the user message to the database
user_message_id = db.add_message(session_id, query, "user")
print(f"💾 Stored user message: {user_message_id}")
else:
# Not the first message, but still store the user message
user_message_id = db.add_message(session_id, query, "user")
print(f"💾 Stored user message: {user_message_id}")
except Exception as e:
print(f"⚠️ Failed to update session title or store user message: {e}")
# Continue with the request even if title update fails
# Allow explicit table_name override
table_name = data.get('table_name')
if not table_name and session_id:
table_name = _get_table_name_for_session(session_id)
# Prepare response headers for SSE
self.send_response(200)
self.send_header('Content-Type', 'text/event-stream')
self.send_header('Cache-Control', 'no-cache')
# Keep connection alive for SSE; no manual chunked encoding (Python http.server
# does not add chunk sizes automatically, so declaring it breaks clients).
self.send_header('Connection', 'keep-alive')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
def emit(event_type: str, payload):
"""Send a single SSE event."""
try:
data_str = json.dumps({"type": event_type, "data": payload})
self.wfile.write(f"data: {data_str}\n\n".encode('utf-8'))
self.wfile.flush()
except BrokenPipeError:
# Client disconnected
raise
# Run the agent synchronously, emitting checkpoints
try:
if force_rag:
# Apply overrides same as above since we bypass Agent.run
rp_cfg = RAG_AGENT.retrieval_pipeline.config
if retrieval_k is not None:
rp_cfg["retrieval_k"] = retrieval_k
if reranker_top_k is not None:
rp_cfg.setdefault("reranker", {})["top_k"] = reranker_top_k
if search_type is not None:
rp_cfg.setdefault("retrieval", {})["search_type"] = search_type
if dense_weight is not None:
rp_cfg.setdefault("retrieval", {}).setdefault("dense", {})["weight"] = dense_weight
# Provence overrides
if provence_prune is not None:
rp_cfg.setdefault("provence", {})["enabled"] = bool(provence_prune)
if provence_threshold is not None:
rp_cfg.setdefault("provence", {})["threshold"] = float(provence_threshold)
# 🔄 Apply embedding model for this session (same as in agent path)
if session_id:
idx_ids = db.get_indexes_for_session(session_id)
_apply_index_embedding_model(idx_ids)
# 🔧 Set index-specific overview path so each index writes separate file
if session_id:
rp_cfg["overview_path"] = f"index_store/overviews/{session_id}.jsonl"
# 🔧 Configure late chunking
rp_cfg.setdefault("retrievers", {}).setdefault("latechunk", {})["enabled"] = True
# Straight retrieval pipeline with streaming events
final_result = RAG_AGENT.retrieval_pipeline.run(
query,
table_name=table_name,
window_size_override=context_window_size,
event_callback=emit,
)
else:
# Provence overrides
rp_cfg = RAG_AGENT.retrieval_pipeline.config
if provence_prune is not None:
rp_cfg.setdefault("provence", {})["enabled"] = bool(provence_prune)
if provence_threshold is not None:
rp_cfg.setdefault("provence", {})["threshold"] = float(provence_threshold)
# 🔄 Refresh overviews for this session
if session_id:
idx_ids = db.get_indexes_for_session(session_id)
_apply_index_embedding_model(idx_ids)
RAG_AGENT.load_overviews_for_indexes(idx_ids)
# 🔧 Set index-specific overview path
if session_id:
rp_cfg["overview_path"] = f"index_store/overviews/{session_id}.jsonl"
# 🔧 Configure late chunking
rp_cfg.setdefault("retrievers", {}).setdefault("latechunk", {})["enabled"] = True
final_result = RAG_AGENT.run(
query,
table_name=table_name,
session_id=session_id,
compose_sub_answers=compose_flag,
query_decompose=decomp_flag,
ai_rerank=ai_rerank_flag,
context_expand=ctx_expand_flag,
verify=verify_flag,
# ✨ NEW RETRIEVAL PARAMETERS
retrieval_k=retrieval_k,
context_window_size=context_window_size,
reranker_top_k=reranker_top_k,
search_type=search_type,
dense_weight=dense_weight,
event_callback=emit,
)
# Ensure the final answer is sent (in case callback missed it)
emit("complete", final_result)
# 💾 STORE AI RESPONSE: Add the AI response to the database
if session_id and final_result and final_result.get("answer"):
try:
ai_message_id = db.add_message(session_id, final_result["answer"], "assistant")
print(f"💾 Stored AI response: {ai_message_id}")
except Exception as e:
print(f"⚠️ Failed to store AI response: {e}")
# Continue even if storage fails
except BrokenPipeError:
print("🔌 Client disconnected from SSE stream.")
except Exception as e:
# Send error event then close
error_payload = {"error": str(e)}
try:
emit("error", error_payload)
finally:
print(f"❌ Stream error: {e}")
except json.JSONDecodeError:
self.send_json_response({"error": "Invalid JSON"}, status_code=400)
except Exception as e:
self.send_json_response({"error": f"Server error: {str(e)}"}, status_code=500)
def handle_index(self):
"""Triggers the document indexing pipeline for specific files."""
try:
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
data = json.loads(post_data.decode('utf-8'))
file_paths = data.get('file_paths')
session_id = data.get('session_id')
compose_flag = data.get('compose_sub_answers')
decomp_flag = data.get('query_decompose')
ai_rerank_flag = data.get('ai_rerank')
ctx_expand_flag = data.get('context_expand')
enable_latechunk = bool(data.get("enable_latechunk", False))
enable_docling_chunk = bool(data.get("enable_docling_chunk", False))
# 🆕 NEW CONFIGURATION OPTIONS:
chunk_size = int(data.get("chunk_size", 512))
chunk_overlap = int(data.get("chunk_overlap", 64))
retrieval_mode = data.get("retrieval_mode", "hybrid")
window_size = int(data.get("window_size", 2))
enable_enrich = bool(data.get("enable_enrich", True))
embedding_model = data.get('embeddingModel')
enrich_model = data.get('enrichModel')
overview_model = data.get('overviewModel') or data.get('overview_model_name')
batch_size_embed = int(data.get("batch_size_embed", 50))
batch_size_enrich = int(data.get("batch_size_enrich", 25))
if not file_paths or not isinstance(file_paths, list):
self.send_json_response({
"error": "A 'file_paths' list is required."
}, status_code=400)
return
# Allow explicit table_name override
table_name = data.get('table_name')
if not table_name and session_id:
table_name = _get_table_name_for_session(session_id)
# The INDEXING_PIPELINE is already initialized. We just need to use it.
# If a session-specific table is needed, we can override the config for this run.
if table_name:
import copy
config_override = copy.deepcopy(INDEXING_PIPELINE.config)
config_override["storage"]["text_table_name"] = table_name
config_override.setdefault("retrievers", {}).setdefault("dense", {})["lancedb_table_name"] = table_name
# 🔧 Configure late chunking
if enable_latechunk:
config_override["retrievers"].setdefault("latechunk", {})["enabled"] = True
else:
# ensure disabled if not requested
config_override["retrievers"].setdefault("latechunk", {})["enabled"] = False
# 🔧 Configure docling chunking
if enable_docling_chunk:
config_override["chunker_mode"] = "docling"
# 🔧 Configure contextual enrichment (THIS WAS MISSING!)
config_override.setdefault("contextual_enricher", {})
config_override["contextual_enricher"]["enabled"] = enable_enrich
config_override["contextual_enricher"]["window_size"] = window_size
# 🔧 Configure indexing batch sizes
config_override.setdefault("indexing", {})
config_override["indexing"]["embedding_batch_size"] = batch_size_embed
config_override["indexing"]["enrichment_batch_size"] = batch_size_enrich
# 🔧 Configure chunking parameters
config_override.setdefault("chunking", {})
config_override["chunking"]["chunk_size"] = chunk_size
config_override["chunking"]["chunk_overlap"] = chunk_overlap
# 🔧 Configure embedding model if specified
if embedding_model:
config_override["embedding_model_name"] = embedding_model
# 🔧 Configure enrichment model if specified
if enrich_model:
config_override["enrich_model"] = enrich_model
# 🔧 Overview model (can differ from enrichment)
if overview_model:
config_override["overview_model_name"] = overview_model
print(f"🔧 INDEXING CONFIG: Contextual Enrichment: {enable_enrich}, Window Size: {window_size}")
print(f"🔧 CHUNKING CONFIG: Size: {chunk_size}, Overlap: {chunk_overlap}")
print(f"🔧 MODEL CONFIG: Embedding: {embedding_model or 'default'}, Enrichment: {enrich_model or 'default'}")
# 🔧 Set index-specific overview path so each index writes separate file
if session_id:
config_override["overview_path"] = f"index_store/overviews/{session_id}.jsonl"
# 🔧 Configure late chunking
config_override.setdefault("retrievers", {}).setdefault("latechunk", {})["enabled"] = True
# Create a temporary pipeline instance with the overridden config
temp_pipeline = INDEXING_PIPELINE.__class__(
config_override,
INDEXING_PIPELINE.llm_client,
INDEXING_PIPELINE.ollama_config
)
temp_pipeline.run(file_paths)
else:
# Use the default pipeline with overrides
import copy
config_override = copy.deepcopy(INDEXING_PIPELINE.config)
# 🔧 Configure late chunking
if enable_latechunk:
config_override.setdefault("retrievers", {}).setdefault("latechunk", {})["enabled"] = True
# 🔧 Configure docling chunking
if enable_docling_chunk:
config_override["chunker_mode"] = "docling"
# 🔧 Configure contextual enrichment (THIS WAS MISSING!)
config_override.setdefault("contextual_enricher", {})
config_override["contextual_enricher"]["enabled"] = enable_enrich
config_override["contextual_enricher"]["window_size"] = window_size
# 🔧 Configure indexing batch sizes
config_override.setdefault("indexing", {})
config_override["indexing"]["embedding_batch_size"] = batch_size_embed
config_override["indexing"]["enrichment_batch_size"] = batch_size_enrich
# 🔧 Configure chunking parameters
config_override.setdefault("chunking", {})
config_override["chunking"]["chunk_size"] = chunk_size
config_override["chunking"]["chunk_overlap"] = chunk_overlap
# 🔧 Configure embedding model if specified
if embedding_model:
config_override["embedding_model_name"] = embedding_model
# 🔧 Configure enrichment model if specified
if enrich_model:
config_override["enrich_model"] = enrich_model
# 🔧 Overview model (can differ from enrichment)
if overview_model:
config_override["overview_model_name"] = overview_model
print(f"🔧 INDEXING CONFIG: Contextual Enrichment: {enable_enrich}, Window Size: {window_size}")
print(f"🔧 CHUNKING CONFIG: Size: {chunk_size}, Overlap: {chunk_overlap}")
print(f"🔧 MODEL CONFIG: Embedding: {embedding_model or 'default'}, Enrichment: {enrich_model or 'default'}")
# 🔧 Set index-specific overview path so each index writes separate file
if session_id:
config_override["overview_path"] = f"index_store/overviews/{session_id}.jsonl"
# 🔧 Configure late chunking
config_override.setdefault("retrievers", {}).setdefault("latechunk", {})["enabled"] = True
# Create temporary pipeline with overridden config
temp_pipeline = INDEXING_PIPELINE.__class__(
config_override,
INDEXING_PIPELINE.llm_client,
INDEXING_PIPELINE.ollama_config
)
temp_pipeline.run(file_paths)
self.send_json_response({
"message": f"Indexing process for {len(file_paths)} file(s) completed successfully.",
"table_name": table_name or "default_text_table",
"latechunk": enable_latechunk,
"docling_chunk": enable_docling_chunk,
"indexing_config": {
"chunk_size": chunk_size,
"chunk_overlap": chunk_overlap,
"retrieval_mode": retrieval_mode,
"window_size": window_size,
"enable_enrich": enable_enrich,
"embedding_model": embedding_model,
"enrich_model": enrich_model,
"batch_size_embed": batch_size_embed,
"batch_size_enrich": batch_size_enrich
}
})
if embedding_model:
try:
db.update_index_metadata(session_id, {"embedding_model": embedding_model})
except Exception as e:
print(f"⚠️ Could not update embedding_model metadata: {e}")
except json.JSONDecodeError:
self.send_json_response({"error": "Invalid JSON"}, status_code=400)
except Exception as e:
self.send_json_response({"error": f"Failed to start indexing: {str(e)}"}, status_code=500)
def handle_models(self):
"""Return a list of locally installed Ollama models and supported HuggingFace models, grouped by capability."""
try:
generation_models = []
embedding_models = []
# Get Ollama models if available
try:
resp = requests.get(f"{RAG_AGENT.ollama_config['host']}/api/tags", timeout=5)
resp.raise_for_status()
data = resp.json()
all_ollama_models = [m.get('name') for m in data.get('models', [])]
# Very naive classification
ollama_embedding_models = [m for m in all_ollama_models if any(k in m for k in ['embed','bge','embedding','text'])]
ollama_generation_models = [m for m in all_ollama_models if m not in ollama_embedding_models]
generation_models.extend(ollama_generation_models)
embedding_models.extend(ollama_embedding_models)
except Exception as e:
print(f"⚠️ Could not get Ollama models: {e}")
# Add supported HuggingFace embedding models
huggingface_embedding_models = [
"Qwen/Qwen3-Embedding-0.6B",
"Qwen/Qwen3-Embedding-4B",
"Qwen/Qwen3-Embedding-8B"
]
embedding_models.extend(huggingface_embedding_models)
# Sort models for consistent ordering
generation_models.sort()
embedding_models.sort()
self.send_json_response({
"generation_models": generation_models,
"embedding_models": embedding_models
})
except Exception as e:
self.send_json_response({"error": f"Could not list models: {e}"}, status_code=500)
def send_json_response(self, data, status_code=200):
"""Utility to send a JSON response with CORS headers."""
self.send_response(status_code)
self.send_header('Content-Type', 'application/json')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
response = json.dumps(data, indent=2)
self.wfile.write(response.encode('utf-8'))
def start_server(port=8001):
"""Starts the API server."""
# Use a reusable TCP server to avoid "address in use" errors on restart
class ReusableTCPServer(socketserver.TCPServer):
allow_reuse_address = True
with ReusableTCPServer(("", port), AdvancedRagApiHandler) as httpd:
print(f"🚀 Starting Advanced RAG API server on port {port}")
print(f"💬 Chat endpoint: http://localhost:{port}/chat")
print(f"✨ Indexing endpoint: http://localhost:{port}/index")
httpd.serve_forever()
if __name__ == "__main__":
# To run this server: python -m rag_system.api_server
start_server() | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/api_server.py",
"license": "MIT License",
"lines": 646,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/api_server_with_progress.py | import json
import threading
import time
from typing import Dict, List, Any
import logging
from urllib.parse import urlparse, parse_qs
import http.server
import socketserver
# Import the core logic and batch processing utilities
from rag_system.main import get_agent
from rag_system.utils.batch_processor import ProgressTracker, timer
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Global progress tracking storage
ACTIVE_PROGRESS_SESSIONS: Dict[str, Dict[str, Any]] = {}
# --- Global Singleton for the RAG Agent ---
print("🧠 Initializing RAG Agent... (This may take a moment)")
RAG_AGENT = get_agent()
if RAG_AGENT is None:
print("❌ Critical error: RAG Agent could not be initialized. Exiting.")
exit(1)
print("✅ RAG Agent initialized successfully.")
class ServerSentEventsHandler:
"""Handler for Server-Sent Events (SSE) for real-time progress updates"""
active_connections: Dict[str, Any] = {}
@classmethod
def add_connection(cls, session_id: str, response_handler):
"""Add a new SSE connection"""
cls.active_connections[session_id] = response_handler
logger.info(f"SSE connection added for session: {session_id}")
@classmethod
def remove_connection(cls, session_id: str):
"""Remove an SSE connection"""
if session_id in cls.active_connections:
del cls.active_connections[session_id]
logger.info(f"SSE connection removed for session: {session_id}")
@classmethod
def send_event(cls, session_id: str, event_type: str, data: Dict[str, Any]):
"""Send an SSE event to a specific session"""
if session_id not in cls.active_connections:
return
try:
handler = cls.active_connections[session_id]
event_data = json.dumps(data)
message = f"event: {event_type}\ndata: {event_data}\n\n"
handler.wfile.write(message.encode('utf-8'))
handler.wfile.flush()
except Exception as e:
logger.error(f"Failed to send SSE event: {e}")
cls.remove_connection(session_id)
class RealtimeProgressTracker(ProgressTracker):
"""Enhanced ProgressTracker that sends updates via Server-Sent Events"""
def __init__(self, total_items: int, operation_name: str, session_id: str):
super().__init__(total_items, operation_name)
self.session_id = session_id
self.last_update = 0
self.update_interval = 1 # Update every 1 second
# Initialize session progress
ACTIVE_PROGRESS_SESSIONS[session_id] = {
"operation_name": operation_name,
"total_items": total_items,
"processed_items": 0,
"errors_encountered": 0,
"start_time": self.start_time,
"status": "running",
"current_step": "",
"eta_seconds": 0,
"throughput": 0,
"progress_percentage": 0
}
# Send initial progress update
self._send_progress_update()
def update(self, items_processed: int, errors: int = 0, current_step: str = ""):
"""Update progress and send notification"""
super().update(items_processed, errors)
# Update session data
session_data = ACTIVE_PROGRESS_SESSIONS.get(self.session_id)
if session_data:
session_data.update({
"processed_items": self.processed_items,
"errors_encountered": self.errors_encountered,
"current_step": current_step,
"progress_percentage": (self.processed_items / self.total_items) * 100,
})
# Calculate throughput and ETA
elapsed = time.time() - self.start_time
if elapsed > 0:
session_data["throughput"] = self.processed_items / elapsed
remaining = self.total_items - self.processed_items
session_data["eta_seconds"] = remaining / session_data["throughput"] if session_data["throughput"] > 0 else 0
# Send update if enough time has passed
current_time = time.time()
if current_time - self.last_update >= self.update_interval:
self._send_progress_update()
self.last_update = current_time
def finish(self):
"""Mark progress as finished and send final update"""
super().finish()
# Update session status
session_data = ACTIVE_PROGRESS_SESSIONS.get(self.session_id)
if session_data:
session_data.update({
"status": "completed",
"progress_percentage": 100,
"eta_seconds": 0
})
# Send final update
self._send_progress_update(final=True)
def _send_progress_update(self, final: bool = False):
"""Send progress update via Server-Sent Events"""
session_data = ACTIVE_PROGRESS_SESSIONS.get(self.session_id, {})
event_data = {
"session_id": self.session_id,
"progress": session_data.copy(),
"final": final,
"timestamp": time.time()
}
ServerSentEventsHandler.send_event(self.session_id, "progress", event_data)
def run_indexing_with_progress(file_paths: List[str], session_id: str):
"""Enhanced indexing function with real-time progress tracking"""
from rag_system.pipelines.indexing_pipeline import IndexingPipeline
from rag_system.utils.ollama_client import OllamaClient
import json
try:
# Send initial status
ServerSentEventsHandler.send_event(session_id, "status", {
"message": "Initializing indexing pipeline...",
"session_id": session_id
})
# Load configuration
config_file = "batch_indexing_config.json"
try:
with open(config_file, 'r') as f:
config = json.load(f)
except FileNotFoundError:
# Fallback to default config
config = {
"embedding_model_name": "Qwen/Qwen3-Embedding-0.6B",
"indexing": {
"embedding_batch_size": 50,
"enrichment_batch_size": 10,
"enable_progress_tracking": True
},
"contextual_enricher": {"enabled": True, "window_size": 1},
"retrievers": {
"dense": {"enabled": True, "lancedb_table_name": "default_text_table"},
"bm25": {"enabled": True, "index_name": "default_bm25_index"}
},
"storage": {
"chunk_store_path": "./index_store/chunks/chunks.pkl",
"lancedb_uri": "./index_store/lancedb",
"bm25_path": "./index_store/bm25"
}
}
# Initialize components
ollama_client = OllamaClient()
ollama_config = {
"generation_model": "llama3.2:1b",
"embedding_model": "mxbai-embed-large"
}
# Create enhanced pipeline
pipeline = IndexingPipeline(config, ollama_client, ollama_config)
# Create progress tracker for the overall process
total_steps = 6 # Rough estimate of pipeline steps
step_tracker = RealtimeProgressTracker(total_steps, "Document Indexing", session_id)
with timer("Complete Indexing Pipeline"):
try:
# Step 1: Document Processing
step_tracker.update(1, current_step="Processing documents...")
# Run the indexing pipeline
pipeline.run(file_paths)
# Update progress through the steps
step_tracker.update(1, current_step="Chunking completed...")
step_tracker.update(1, current_step="BM25 indexing completed...")
step_tracker.update(1, current_step="Contextual enrichment completed...")
step_tracker.update(1, current_step="Vector embeddings completed...")
step_tracker.update(1, current_step="Indexing finalized...")
step_tracker.finish()
# Send completion notification
ServerSentEventsHandler.send_event(session_id, "completion", {
"message": f"Successfully indexed {len(file_paths)} file(s)",
"file_count": len(file_paths),
"session_id": session_id
})
except Exception as e:
# Send error notification
ServerSentEventsHandler.send_event(session_id, "error", {
"message": str(e),
"session_id": session_id
})
raise
except Exception as e:
logger.error(f"Indexing failed for session {session_id}: {e}")
ServerSentEventsHandler.send_event(session_id, "error", {
"message": str(e),
"session_id": session_id
})
raise
class EnhancedRagApiHandler(http.server.BaseHTTPRequestHandler):
"""Enhanced API handler with progress tracking support"""
def do_OPTIONS(self):
"""Handle CORS preflight requests for frontend integration."""
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
self.send_header('Access-Control-Allow-Headers', 'Content-Type')
self.end_headers()
def do_GET(self):
"""Handle GET requests for progress status and SSE streams"""
parsed_path = urlparse(self.path)
if parsed_path.path == '/progress':
self.handle_progress_status()
elif parsed_path.path == '/stream':
self.handle_progress_stream()
else:
self.send_json_response({"error": "Not Found"}, status_code=404)
def do_POST(self):
"""Handle POST requests for chat and indexing."""
parsed_path = urlparse(self.path)
if parsed_path.path == '/chat':
self.handle_chat()
elif parsed_path.path == '/index':
self.handle_index_with_progress()
else:
self.send_json_response({"error": "Not Found"}, status_code=404)
def handle_chat(self):
"""Handles a chat query by calling the agentic RAG pipeline."""
try:
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
data = json.loads(post_data.decode('utf-8'))
query = data.get('query')
if not query:
self.send_json_response({"error": "Query is required"}, status_code=400)
return
# Use the single, persistent agent instance to run the query
result = RAG_AGENT.run(query)
# The result is a dict, so we need to dump it to a JSON string
self.send_json_response(result)
except json.JSONDecodeError:
self.send_json_response({"error": "Invalid JSON"}, status_code=400)
except Exception as e:
self.send_json_response({"error": f"Server error: {str(e)}"}, status_code=500)
def handle_index_with_progress(self):
"""Triggers the document indexing pipeline with real-time progress tracking."""
try:
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
data = json.loads(post_data.decode('utf-8'))
file_paths = data.get('file_paths')
session_id = data.get('session_id')
if not file_paths or not isinstance(file_paths, list):
self.send_json_response({
"error": "A 'file_paths' list is required."
}, status_code=400)
return
if not session_id:
self.send_json_response({
"error": "A 'session_id' is required for progress tracking."
}, status_code=400)
return
# Start indexing in a separate thread to avoid blocking
def run_indexing_thread():
try:
run_indexing_with_progress(file_paths, session_id)
except Exception as e:
logger.error(f"Indexing thread failed: {e}")
thread = threading.Thread(target=run_indexing_thread)
thread.daemon = True
thread.start()
# Return immediate response
self.send_json_response({
"message": f"Indexing started for {len(file_paths)} file(s)",
"session_id": session_id,
"status": "started",
"progress_stream_url": f"http://localhost:8001/stream?session_id={session_id}"
})
except json.JSONDecodeError:
self.send_json_response({"error": "Invalid JSON"}, status_code=400)
except Exception as e:
self.send_json_response({"error": f"Failed to start indexing: {str(e)}"}, status_code=500)
def handle_progress_status(self):
"""Handle GET requests for current progress status"""
parsed_url = urlparse(self.path)
params = parse_qs(parsed_url.query)
session_id = params.get('session_id', [None])[0]
if not session_id:
self.send_json_response({"error": "session_id is required"}, status_code=400)
return
progress_data = ACTIVE_PROGRESS_SESSIONS.get(session_id)
if not progress_data:
self.send_json_response({"error": "No active progress for this session"}, status_code=404)
return
self.send_json_response({
"session_id": session_id,
"progress": progress_data
})
def handle_progress_stream(self):
"""Handle Server-Sent Events stream for real-time progress"""
parsed_url = urlparse(self.path)
params = parse_qs(parsed_url.query)
session_id = params.get('session_id', [None])[0]
if not session_id:
self.send_response(400)
self.end_headers()
return
# Set up SSE headers
self.send_response(200)
self.send_header('Content-Type', 'text/event-stream')
self.send_header('Cache-Control', 'no-cache')
self.send_header('Connection', 'keep-alive')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
# Add this connection to the SSE handler
ServerSentEventsHandler.add_connection(session_id, self)
# Send initial connection message
initial_message = json.dumps({
"session_id": session_id,
"message": "Progress stream connected",
"timestamp": time.time()
})
self.wfile.write(f"event: connected\ndata: {initial_message}\n\n".encode('utf-8'))
self.wfile.flush()
# Keep connection alive
try:
while session_id in ServerSentEventsHandler.active_connections:
time.sleep(1)
# Send heartbeat
heartbeat = json.dumps({"type": "heartbeat", "timestamp": time.time()})
self.wfile.write(f"event: heartbeat\ndata: {heartbeat}\n\n".encode('utf-8'))
self.wfile.flush()
except Exception as e:
logger.info(f"SSE connection closed for session {session_id}: {e}")
finally:
ServerSentEventsHandler.remove_connection(session_id)
def send_json_response(self, data, status_code=200):
"""Utility to send a JSON response with CORS headers."""
self.send_response(status_code)
self.send_header('Content-Type', 'application/json')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
response = json.dumps(data, indent=2)
self.wfile.write(response.encode('utf-8'))
def start_enhanced_server(port=8000):
"""Start the enhanced API server with a reusable TCP socket."""
# Use a custom TCPServer that allows address reuse
class ReusableTCPServer(socketserver.TCPServer):
allow_reuse_address = True
with ReusableTCPServer(("", port), EnhancedRagApiHandler) as httpd:
print(f"🚀 Starting Enhanced RAG API server on port {port}")
print(f"💬 Chat endpoint: http://localhost:{port}/chat")
print(f"✨ Indexing endpoint: http://localhost:{port}/index")
print(f"📊 Progress endpoint: http://localhost:{port}/progress")
print(f"🌊 Progress stream: http://localhost:{port}/stream")
print(f"📈 Real-time progress tracking enabled via Server-Sent Events!")
httpd.serve_forever()
if __name__ == '__main__':
# Start the server on a dedicated thread
server_thread = threading.Thread(target=start_enhanced_server)
server_thread.daemon = True
server_thread.start()
print("🚀 Enhanced RAG API server with progress tracking is running.")
print("Press Ctrl+C to stop.")
# Keep the main thread alive
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print("\nStopping server...") | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/api_server_with_progress.py",
"license": "MIT License",
"lines": 370,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/factory.py | from dotenv import load_dotenv
def get_agent(mode: str = "default"):
"""
Factory function to get an instance of the RAG agent based on the specified mode.
This uses local imports to prevent circular dependencies.
"""
from rag_system.agent.loop import Agent
from rag_system.utils.ollama_client import OllamaClient
from rag_system.main import PIPELINE_CONFIGS, OLLAMA_CONFIG, LLM_BACKEND, WATSONX_CONFIG
load_dotenv()
# Initialize the appropriate LLM client based on backend configuration
if LLM_BACKEND.lower() == "watsonx":
from rag_system.utils.watsonx_client import WatsonXClient
if not WATSONX_CONFIG["api_key"] or not WATSONX_CONFIG["project_id"]:
raise ValueError(
"Watson X configuration incomplete. Please set WATSONX_API_KEY and WATSONX_PROJECT_ID "
"environment variables."
)
llm_client = WatsonXClient(
api_key=WATSONX_CONFIG["api_key"],
project_id=WATSONX_CONFIG["project_id"],
url=WATSONX_CONFIG["url"]
)
llm_config = WATSONX_CONFIG
else:
llm_client = OllamaClient(host=OLLAMA_CONFIG["host"])
llm_config = OLLAMA_CONFIG
config = PIPELINE_CONFIGS.get(mode, PIPELINE_CONFIGS['default'])
if 'storage' not in config:
config['storage'] = {
'db_path': 'lancedb',
'text_table_name': 'text_pages_default',
'image_table_name': 'image_pages'
}
agent = Agent(
pipeline_configs=config,
llm_client=llm_client,
ollama_config=llm_config
)
return agent
def get_indexing_pipeline(mode: str = "default"):
"""
Factory function to get an instance of the Indexing Pipeline.
"""
from rag_system.pipelines.indexing_pipeline import IndexingPipeline
from rag_system.main import PIPELINE_CONFIGS, OLLAMA_CONFIG, LLM_BACKEND, WATSONX_CONFIG
from rag_system.utils.ollama_client import OllamaClient
load_dotenv()
# Initialize the appropriate LLM client based on backend configuration
if LLM_BACKEND.lower() == "watsonx":
from rag_system.utils.watsonx_client import WatsonXClient
if not WATSONX_CONFIG["api_key"] or not WATSONX_CONFIG["project_id"]:
raise ValueError(
"Watson X configuration incomplete. Please set WATSONX_API_KEY and WATSONX_PROJECT_ID "
"environment variables."
)
llm_client = WatsonXClient(
api_key=WATSONX_CONFIG["api_key"],
project_id=WATSONX_CONFIG["project_id"],
url=WATSONX_CONFIG["url"]
)
llm_config = WATSONX_CONFIG
else:
llm_client = OllamaClient(host=OLLAMA_CONFIG["host"])
llm_config = OLLAMA_CONFIG
config = PIPELINE_CONFIGS.get(mode, PIPELINE_CONFIGS['default'])
return IndexingPipeline(config, llm_client, llm_config) | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/factory.py",
"license": "MIT License",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PromtEngineer/localGPT:rag_system/indexing/contextualizer.py | from typing import List, Dict, Any
from rag_system.utils.ollama_client import OllamaClient
from rag_system.ingestion.chunking import create_contextual_window
import logging
import re
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Define the structured prompt templates, adapted from the example
SYSTEM_PROMPT = "You are an expert at summarizing and providing context for document sections based on their local surroundings."
LOCAL_CONTEXT_PROMPT_TEMPLATE = """<local_context>
{local_context_text}
</local_context>"""
CHUNK_PROMPT_TEMPLATE = """Here is the specific chunk we want to situate within the local context provided:
<chunk>
{chunk_content}
</chunk>
Based *only* on the local context provided, give a very short (2-5 sentence) context summary to situate this specific chunk.
Focus on the chunk's topic and its relation to the immediately surrounding text shown in the local context.
Focus on the the overall theme of the context, make sure to include topics, concepts, and other relevant information.
Answer *only* with the succinct context and nothing else."""
class ContextualEnricher:
"""
Enriches chunks with a prepended summary of their surrounding context using Ollama,
while preserving the original text.
"""
def __init__(self, llm_client: OllamaClient, llm_model: str, batch_size: int = 10):
self.llm_client = llm_client
self.llm_model = llm_model
self.batch_size = batch_size
logger.info(f"Initialized ContextualEnricher with Ollama model '{self.llm_model}' (batch_size={batch_size}).")
def _generate_summary(self, local_context_text: str, chunk_text: str) -> str:
"""Generates a contextual summary using a structured, multi-part prompt."""
# Combine the templates to form the final content for the HumanMessage equivalent
human_prompt_content = (
f"{LOCAL_CONTEXT_PROMPT_TEMPLATE.format(local_context_text=local_context_text)}\n\n"
f"{CHUNK_PROMPT_TEMPLATE.format(chunk_content=chunk_text)}"
)
try:
# Although we don't use LangChain's message objects, we can simulate the
# System + Human message structure in the single prompt for the Ollama client.
# A common way is to provide the system prompt and then the user's request.
full_prompt = f"{SYSTEM_PROMPT}\n\n{human_prompt_content}"
response = self.llm_client.generate_completion(self.llm_model, full_prompt, enable_thinking=False)
summary_raw = response.get('response', '').strip()
# --- Sanitize the summary to remove chain-of-thought markers ---
# Many Qwen models wrap reasoning in <think>...</think> or similar tags.
cleaned = re.sub(r'<think[^>]*>.*?</think>', '', summary_raw, flags=re.IGNORECASE | re.DOTALL)
# Remove any assistant role tags that may appear
cleaned = re.sub(r'<assistant[^>]*>|</assistant>', '', cleaned, flags=re.IGNORECASE)
# If the model used an explicit "Answer:" delimiter keep only the part after it
if 'Answer:' in cleaned:
cleaned = cleaned.split('Answer:', 1)[1]
# Take the first non-empty line to avoid leftover blank lines
summary = next((ln.strip() for ln in cleaned.splitlines() if ln.strip()), '')
# Fallback to raw if cleaning removed everything
if not summary:
summary = summary_raw
if not summary or len(summary) < 5:
logger.warning("Generated context summary is too short or empty. Skipping enrichment for this chunk.")
return ""
return summary
except Exception as e:
logger.error(f"LLM invocation failed during contextualization: {e}", exc_info=True)
return "" # Gracefully fail by returning no summary
def enrich_chunks(self, chunks: List[Dict[str, Any]], window_size: int = 1) -> List[Dict[str, Any]]:
if not chunks:
return []
logger.info(f"Enriching {len(chunks)} chunks with contextual summaries (window_size={window_size}) using Ollama...")
# Import batch processor
from rag_system.utils.batch_processor import BatchProcessor, estimate_memory_usage
# Estimate memory usage
memory_mb = estimate_memory_usage(chunks)
logger.info(f"Estimated memory usage for contextual enrichment: {memory_mb:.1f}MB")
# Use batch processing for better performance and progress tracking
batch_processor = BatchProcessor(batch_size=self.batch_size)
def process_chunk_batch(chunk_indices):
"""Process a batch of chunk indices for contextual enrichment"""
batch_results = []
for i in chunk_indices:
chunk = chunks[i]
try:
local_context_text = create_contextual_window(chunks, chunk_index=i, window_size=window_size)
# The summary is generated based on the original, unmodified text
original_text = chunk['text']
summary = self._generate_summary(local_context_text, original_text)
new_chunk = chunk.copy()
# Ensure metadata is a dictionary
if 'metadata' not in new_chunk or not isinstance(new_chunk['metadata'], dict):
new_chunk['metadata'] = {}
# Store original text and summary in metadata
new_chunk['metadata']['original_text'] = original_text
new_chunk['metadata']['contextual_summary'] = "N/A"
# Prepend the context summary ONLY if it was successfully generated
if summary:
new_chunk['text'] = f"Context: {summary}\n\n---\n\n{original_text}"
new_chunk['metadata']['contextual_summary'] = summary
batch_results.append(new_chunk)
except Exception as e:
logger.error(f"Error enriching chunk {i}: {e}")
# Return original chunk if enrichment fails
batch_results.append(chunk)
return batch_results
# Create list of chunk indices for batch processing
chunk_indices = list(range(len(chunks)))
# Process chunks in batches
enriched_chunks = batch_processor.process_in_batches(
chunk_indices,
process_chunk_batch,
"Contextual Enrichment"
)
return enriched_chunks
def enrich_chunks_sequential(self, chunks: List[Dict[str, Any]], window_size: int = 1) -> List[Dict[str, Any]]:
"""Sequential enrichment method (legacy) - kept for comparison"""
if not chunks:
return []
logger.info(f"Enriching {len(chunks)} chunks sequentially (window_size={window_size})...")
enriched_chunks = []
for i, chunk in enumerate(chunks):
local_context_text = create_contextual_window(chunks, chunk_index=i, window_size=window_size)
# The summary is generated based on the original, unmodified text
original_text = chunk['text']
summary = self._generate_summary(local_context_text, original_text)
new_chunk = chunk.copy()
# Ensure metadata is a dictionary
if 'metadata' not in new_chunk or not isinstance(new_chunk['metadata'], dict):
new_chunk['metadata'] = {}
# Store original text and summary in metadata
new_chunk['metadata']['original_text'] = original_text
new_chunk['metadata']['contextual_summary'] = "N/A"
# Prepend the context summary ONLY if it was successfully generated
if summary:
new_chunk['text'] = f"Context: {summary}\n\n---\n\n{original_text}"
new_chunk['metadata']['contextual_summary'] = summary
enriched_chunks.append(new_chunk)
if (i + 1) % 10 == 0 or i == len(chunks) - 1:
logger.info(f" ...processed {i+1}/{len(chunks)} chunks.")
return enriched_chunks | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/indexing/contextualizer.py",
"license": "MIT License",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/indexing/embedders.py | # from rag_system.indexing.representations import BM25Generator
import lancedb
import pyarrow as pa
from typing import List, Dict, Any
import numpy as np
import json
class LanceDBManager:
def __init__(self, db_path: str):
self.db_path = db_path
self.db = lancedb.connect(db_path)
print(f"LanceDB connection established at: {db_path}")
def get_table(self, table_name: str):
return self.db.open_table(table_name)
def create_table(self, table_name: str, schema: pa.Schema, mode: str = "overwrite"):
print(f"Creating table '{table_name}' with mode '{mode}'...")
return self.db.create_table(table_name, schema=schema, mode=mode)
class VectorIndexer:
"""
Handles the indexing of vector embeddings and rich metadata into LanceDB.
The 'text' field is the content that gets embedded (which can be enriched).
The original, clean text is stored in the metadata.
"""
def __init__(self, db_manager: LanceDBManager):
self.db_manager = db_manager
def index(self, table_name: str, chunks: List[Dict[str, Any]], embeddings: np.ndarray):
if len(chunks) != len(embeddings):
raise ValueError("The number of chunks and embeddings must be the same.")
if not chunks:
print("No chunks to index.")
return
vector_dim = embeddings[0].shape[0]
# The schema stores the text that was used for the embedding (potentially enriched)
# and the full metadata object as a JSON string.
schema = pa.schema([
pa.field("vector", pa.list_(pa.float32(), vector_dim)),
pa.field("text", pa.string(), nullable=False),
pa.field("chunk_id", pa.string()),
pa.field("document_id", pa.string()),
pa.field("chunk_index", pa.int32()),
pa.field("metadata", pa.string())
])
data = []
skipped_count = 0
for chunk, vector in zip(chunks, embeddings):
# Check for NaN values in the vector
if np.isnan(vector).any():
print(f"⚠️ Skipping chunk '{chunk.get('chunk_id', 'unknown')}' due to NaN values in embedding")
skipped_count += 1
continue
# Check for infinite values in the vector
if np.isinf(vector).any():
print(f"⚠️ Skipping chunk '{chunk.get('chunk_id', 'unknown')}' due to infinite values in embedding")
skipped_count += 1
continue
# Ensure original_text is in metadata if not already present
if 'original_text' not in chunk['metadata']:
chunk['metadata']['original_text'] = chunk['text']
# Extract document_id and chunk_index for top-level storage
doc_id = chunk.get("metadata", {}).get("document_id", "unknown")
chunk_idx = chunk.get("metadata", {}).get("chunk_index", -1)
# Defensive check for text content to ensure it's a non-empty string
text_content = chunk.get('text', '')
if not text_content or not isinstance(text_content, str):
text_content = ""
data.append({
"vector": vector.tolist(),
"text": text_content,
"chunk_id": chunk['chunk_id'],
"document_id": doc_id,
"chunk_index": chunk_idx,
"metadata": json.dumps(chunk)
})
if skipped_count > 0:
print(f"⚠️ Skipped {skipped_count} chunks due to invalid embeddings (NaN or infinite values)")
if not data:
print("❌ No valid embeddings to index after filtering out NaN/infinite values")
return
# Incremental indexing: append to existing table if present, otherwise create it
db = self.db_manager.db # underlying LanceDB connection
if hasattr(db, "table_names") and table_name in db.table_names():
tbl = self.db_manager.get_table(table_name)
print(f"Appending {len(data)} vectors to existing table '{table_name}'.")
else:
print(f"Creating table '{table_name}' (new) and adding {len(data)} vectors...")
tbl = self.db_manager.create_table(table_name, schema=schema, mode="create")
# Add data with NaN handling configuration
try:
tbl.add(data, on_bad_vectors='drop')
print(f"✅ Indexed {len(data)} vectors into table '{table_name}'.")
except Exception as e:
print(f"❌ Failed to add data to table: {e}")
# Fallback: try with fill strategy
try:
print("🔄 Retrying with NaN fill strategy...")
tbl.add(data, on_bad_vectors='fill', fill_value=0.0)
print(f"✅ Indexed {len(data)} vectors into table '{table_name}' (with NaN fill).")
except Exception as e2:
print(f"❌ Failed to add data even with NaN fill: {e2}")
raise
# BM25Indexer is no longer needed as we are moving to LanceDB's native FTS.
# class BM25Indexer:
# ...
if __name__ == '__main__':
print("embedders.py updated for contextual enrichment.")
# This chunk has been "enriched". The 'text' field contains the context.
enriched_chunk = {
'chunk_id': 'doc1_0',
'text': 'Context: Discusses animals.\n\n---\n\nOriginal: The cat sat on the mat.',
'metadata': {
'original_text': 'The cat sat on the mat.',
'contextual_summary': 'Discusses animals.',
'document_id': 'doc1',
'title': 'Pet Stories'
}
}
sample_embeddings = np.random.rand(1, 128).astype('float32')
DB_PATH = "./rag_system/index_store/lancedb"
db_manager = LanceDBManager(db_path=DB_PATH)
vector_indexer = VectorIndexer(db_manager=db_manager)
vector_indexer.index(
table_name="enriched_text_embeddings",
chunks=[enriched_chunk],
embeddings=sample_embeddings
)
try:
tbl = db_manager.get_table("enriched_text_embeddings")
df = tbl.limit(1).to_pandas()
df['metadata'] = df['metadata'].apply(json.loads)
print("\n--- Verification ---")
print("Embedded Text:", df['text'].iloc[0])
print("Original Text from Metadata:", df['metadata'].iloc[0]['original_text'])
except Exception as e:
print(f"Could not verify LanceDB table. Error: {e}")
| {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/indexing/embedders.py",
"license": "MIT License",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/indexing/graph_extractor.py | from typing import List, Dict, Any
import json
from rag_system.utils.ollama_client import OllamaClient
class GraphExtractor:
"""
Extracts entities and relationships from text chunks using a live Ollama model.
"""
def __init__(self, llm_client: OllamaClient, llm_model: str):
self.llm_client = llm_client
self.llm_model = llm_model
print(f"Initialized GraphExtractor with Ollama model '{self.llm_model}'.")
def extract(self, chunks: List[Dict[str, Any]]) -> Dict[str, List[Dict]]:
all_entities = {}
all_relationships = set()
print(f"Extracting graph from {len(chunks)} chunks with Ollama...")
for i, chunk in enumerate(chunks):
# Step 1: Extract Entities
entity_prompt = f"""
From the following text, extract key entities (people, companies, locations).
Return the answer as a JSON object with a single key 'entities', which is a list of strings.
Each entity should be a short, specific name, not a long string of text.
Text: "{chunk['text']}"
"""
entity_response = self.llm_client.generate_completion(
self.llm_model,
entity_prompt,
format="json"
)
entity_response_text = entity_response.get('response', '{}')
try:
entity_data = json.loads(entity_response_text)
entities = entity_data.get('entities', [])
if not entities:
continue
# Clean up entities
cleaned_entities = []
for entity in entities:
if len(entity) < 50 and not any(c in entity for c in "[]{}()"):
cleaned_entities.append(entity)
if not cleaned_entities:
continue
# Step 2: Extract Relationships
relationship_prompt = f"""
Given the following entities: {cleaned_entities}
And the following text: "{chunk['text']}"
Extract the relationships between the entities.
Return the answer as a JSON object with a single key 'relationships', which is a list of objects, each with 'source', 'target', and 'label'.
"""
relationship_response = self.llm_client.generate_completion(
self.llm_model,
relationship_prompt,
format="json"
)
relationship_response_text = relationship_response.get('response', '{}')
relationship_data = json.loads(relationship_response_text)
for entity_name in cleaned_entities:
all_entities[entity_name] = {"id": entity_name, "type": "Unknown"} # Placeholder type
for rel in relationship_data.get("relationships", []):
if 'source' in rel and 'target' in rel and 'label' in rel:
all_relationships.add(
(rel['source'], rel['target'], rel['label'])
)
except json.JSONDecodeError:
print(f"Warning: Could not decode JSON from LLM for chunk {i+1}.")
continue
return {
"entities": list(all_entities.values()),
"relationships": [{"source": s, "target": t, "label": l} for s, t, l in all_relationships]
}
| {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/indexing/graph_extractor.py",
"license": "MIT License",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/indexing/latechunk.py | from __future__ import annotations
"""Late Chunking encoder.
This helper feeds the *entire* document to the embedding model, collects
per-token hidden-states and then mean-pools those vectors inside pre-defined
chunk spans. The end result is one vector per chunk – but each vector has
been produced with knowledge of the *whole* document, alleviating context-loss
issues of vanilla chunking.
We purposefully keep this class lightweight and free of LanceDB/Chunking
logic so it can be re-used elsewhere (e.g. notebook experiments).
"""
from typing import List, Tuple
import torch
from transformers import AutoModel, AutoTokenizer
import numpy as np
class LateChunkEncoder:
"""Generate late-chunked embeddings given character-offset spans."""
def __init__(self, model_name: str = "Qwen/Qwen3-Embedding-0.6B", *, max_tokens: int = 8192) -> None:
self.model_name = model_name
self.max_len = max_tokens
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Back-compat: allow short alias without repo namespace
repo_id = model_name
if "/" not in model_name and not model_name.startswith("Qwen/"):
# map common alias to official repo
alias_map = {
"qwen3-embedding-0.6b": "Qwen/Qwen3-Embedding-0.6B",
}
repo_id = alias_map.get(model_name.lower(), model_name)
self.tokenizer = AutoTokenizer.from_pretrained(repo_id, trust_remote_code=True)
self.model = AutoModel.from_pretrained(repo_id, trust_remote_code=True)
self.model.to(self.device)
self.model.eval()
@torch.inference_mode()
def encode(self, text: str, chunk_spans: List[Tuple[int, int]]) -> List[np.ndarray]:
"""Return one vector *per* span.
Args:
text: Full document text.
chunk_spans: List of (char_start, char_end) offsets for each chunk.
Returns:
List of numpy float32 arrays – one per chunk.
"""
if not chunk_spans:
return []
# Tokenise and obtain per-token hidden states
inputs = self.tokenizer(
text,
return_tensors="pt",
return_offsets_mapping=True,
truncation=True,
max_length=self.max_len,
)
inputs = {k: v.to(self.device) for k, v in inputs.items()}
offsets = inputs.pop("offset_mapping").squeeze(0).cpu().tolist() # (seq_len, 2)
out = self.model(**inputs)
last_hidden = out.last_hidden_state.squeeze(0) # (seq_len, dim)
last_hidden = last_hidden.cpu()
# For each chunk span, gather token indices belonging to it
vectors: List[np.ndarray] = []
for start_char, end_char in chunk_spans:
token_indices = [i for i, (s, e) in enumerate(offsets) if s >= start_char and e <= end_char]
if not token_indices:
# Fallback: if tokenizer lost the span (e.g. due to trimming) just average CLS + SEP
token_indices = [0]
chunk_vec = last_hidden[token_indices].mean(dim=0).numpy().astype("float32")
# Check for NaN or infinite values
if np.isnan(chunk_vec).any() or np.isinf(chunk_vec).any():
print(f"⚠️ Warning: Invalid values detected in late chunk embedding for span ({start_char}, {end_char})")
# Replace invalid values with zeros
chunk_vec = np.nan_to_num(chunk_vec, nan=0.0, posinf=0.0, neginf=0.0)
print(f"🔄 Replaced invalid values with zeros")
vectors.append(chunk_vec)
return vectors | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/indexing/latechunk.py",
"license": "MIT License",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/indexing/multimodal.py | import fitz # PyMuPDF
from PIL import Image
import torch
import os
from typing import List, Dict, Any
from rag_system.indexing.embedders import LanceDBManager, VectorIndexer
from rag_system.indexing.representations import QwenEmbedder
from transformers import ColPaliForRetrieval, ColPaliProcessor, Qwen2TokenizerFast
class LocalVisionModel:
"""
A wrapper for a local vision model (ColPali) from the transformers library.
"""
def __init__(self, model_name: str = "vidore/colqwen2-v1.0", device: str = "cpu"):
print(f"Initializing local vision model '{model_name}' on device '{device}'.")
self.device = device
self.model = ColPaliForRetrieval.from_pretrained(model_name).to(self.device).eval()
self.tokenizer = Qwen2TokenizerFast.from_pretrained(model_name)
self.image_processor = ColPaliProcessor.from_pretrained(model_name).image_processor
self.processor = ColPaliProcessor(tokenizer=self.tokenizer, image_processor=self.image_processor)
print("Local vision model loaded successfully.")
def embed_image(self, image: Image.Image) -> torch.Tensor:
"""
Generates a multi-vector embedding for a single image.
"""
inputs = self.processor(text="", images=image, return_tensors="pt").to(self.device)
with torch.no_grad():
image_embeds = self.model.get_image_features(**inputs)
return image_embeds
class MultimodalProcessor:
"""
Processes PDFs into separate text and image embeddings using local models.
"""
def __init__(self, vision_model: LocalVisionModel, text_embedder: QwenEmbedder, db_manager: LanceDBManager):
self.vision_model = vision_model
self.text_embedder = text_embedder
self.text_vector_indexer = VectorIndexer(db_manager)
self.image_vector_indexer = VectorIndexer(db_manager)
def process_and_index(
self,
pdf_path: str,
text_table_name: str,
image_table_name: str
):
print(f"\n--- Processing PDF for multimodal indexing: {os.path.basename(pdf_path)} ---")
doc = fitz.open(pdf_path)
document_id = os.path.basename(pdf_path)
all_pages_text_chunks = []
all_pages_images = []
for page_num in range(len(doc)):
page = doc.load_page(page_num)
# 1. Extract Text
text = page.get_text("text")
if not text.strip():
text = f"Page {page_num + 1} contains no extractable text."
all_pages_text_chunks.append({
"chunk_id": f"{document_id}_page_{page_num+1}",
"text": text,
"metadata": {"document_id": document_id, "page_number": page_num + 1}
})
# 2. Extract Image
pix = page.get_pixmap()
img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
all_pages_images.append(img)
# --- Batch Indexing ---
# Index all text chunks
if all_pages_text_chunks:
text_embeddings = self.text_embedder.create_embeddings([c['text'] for c in all_pages_text_chunks])
self.text_vector_indexer.index(text_table_name, all_pages_text_chunks, text_embeddings)
print(f"Indexed {len(all_pages_text_chunks)} text pages into '{text_table_name}'.")
# Index all images
if all_pages_images:
image_embeddings = self.vision_model.create_image_embeddings(all_pages_images)
# We use the text chunks as placeholders for metadata
self.image_vector_indexer.index(image_table_name, all_pages_text_chunks, image_embeddings)
print(f"Indexed {len(all_pages_images)} image pages into '{image_table_name}'.")
if __name__ == '__main__':
# This test requires an internet connection to download the models.
try:
# 1. Setup models and dependencies
text_embedder = QwenEmbedder()
vision_model = LocalVisionModel()
db_manager = LanceDBManager(db_path="./rag_system/index_store/lancedb")
# 2. Create a dummy PDF
dummy_pdf_path = "multimodal_test.pdf"
doc = fitz.open()
page = doc.new_page()
page.insert_text((50, 72), "This is a test page with text and an image.")
doc.save(dummy_pdf_path)
# 3. Run the processor
processor = MultimodalProcessor(vision_model, text_embedder, db_manager)
processor.process_and_index(
pdf_path=dummy_pdf_path,
text_table_name="test_text_pages",
image_table_name="test_image_pages"
)
# 4. Verify
print("\n--- Verification ---")
text_tbl = db_manager.get_table("test_text_pages")
img_tbl = db_manager.get_table("test_image_pages")
print(f"Text table has {len(text_tbl)} rows.")
print(f"Image table has {len(img_tbl)} rows.")
except Exception as e:
print(f"\nAn error occurred during the multimodal test: {e}")
print("Please ensure you have an internet connection for model downloads.") | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/indexing/multimodal.py",
"license": "MIT License",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/indexing/overview_builder.py | from __future__ import annotations
import os, json, logging, re
from typing import List, Dict, Any
logger = logging.getLogger(__name__)
class OverviewBuilder:
"""Generates and stores a one-paragraph overview for each document.
The overview is derived from the first *n* chunks of the document.
"""
DEFAULT_PROMPT = (
"You will receive the beginning of a document. "
"In no more than 120 tokens, describe what the document is about, "
"state its type (e.g. invoice, slide deck, policy, research paper, receipt) "
"and mention 3-5 important entities, numbers or dates it contains.\n\n"
"DOCUMENT_START:\n{text}\n\nOVERVIEW:"
)
def __init__(self, llm_client, model: str = "qwen3:0.6b", first_n_chunks: int = 5,
out_path: str | None = None):
if out_path is None:
out_path = "index_store/overviews/overviews.jsonl"
self.llm_client = llm_client
self.model = model
self.first_n = first_n_chunks
self.out_path = out_path
os.makedirs(os.path.dirname(out_path), exist_ok=True)
def build_and_store(self, doc_id: str, chunks: List[Dict[str, Any]]):
if not chunks:
return
head_text = "\n".join(c["text"] for c in chunks[: self.first_n] if c.get("text"))
prompt = self.DEFAULT_PROMPT.format(text=head_text[:5000]) # safety cap
try:
resp = self.llm_client.generate_completion(model=self.model, prompt=prompt, enable_thinking=False)
summary_raw = resp.get("response", "")
# Remove any lingering <think>...</think> blocks just in case
summary = re.sub(r'<think[^>]*>.*?</think>', '', summary_raw, flags=re.IGNORECASE | re.DOTALL).strip()
except Exception as e:
summary = f"Failed to generate overview: {e}"
record = {"doc_id": doc_id, "overview": summary.strip()}
with open(self.out_path, "a", encoding="utf-8") as f:
f.write(json.dumps(record, ensure_ascii=False) + "\n")
logger.info(f"📄 Overview generated for {doc_id} (stored in {self.out_path})") | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/indexing/overview_builder.py",
"license": "MIT License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PromtEngineer/localGPT:rag_system/indexing/representations.py | from typing import List, Dict, Any, Protocol
import numpy as np
from transformers import AutoModel, AutoTokenizer
import torch
import os
# We keep the protocol to ensure a consistent interface
class EmbeddingModel(Protocol):
def create_embeddings(self, texts: List[str]) -> np.ndarray: ...
# Global cache for models - use dict to cache by model name
_MODEL_CACHE = {}
# --- New Ollama Embedder ---
class QwenEmbedder(EmbeddingModel):
"""
An embedding model that uses a local Hugging Face transformer model.
"""
def __init__(self, model_name: str = "Qwen/Qwen3-Embedding-0.6B"):
self.model_name = model_name
# Auto-select the best available device: CUDA > MPS > CPU
if torch.cuda.is_available():
self.device = "cuda"
elif getattr(torch.backends, "mps", None) and torch.backends.mps.is_available():
self.device = "mps"
else:
self.device = "cpu"
# Use model-specific cache
if model_name not in _MODEL_CACHE:
print(f"Initializing HF Embedder with model '{model_name}' on device '{self.device}'. (first load)")
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, padding_side="left")
model = AutoModel.from_pretrained(
model_name,
trust_remote_code=True,
torch_dtype=torch.float16 if self.device != "cpu" else None,
).to(self.device).eval()
_MODEL_CACHE[model_name] = (tokenizer, model)
print(f"QwenEmbedder weights loaded and cached for {model_name}.")
else:
print(f"Reusing cached QwenEmbedder weights for {model_name}.")
self.tokenizer, self.model = _MODEL_CACHE[model_name]
def create_embeddings(self, texts: List[str]) -> np.ndarray:
print(f"Generating {len(texts)} embeddings with {self.model_name} model...")
inputs = self.tokenizer(texts, padding=True, truncation=True, return_tensors="pt").to(self.device)
with torch.no_grad():
outputs = self.model(**inputs)
last_hidden = outputs.last_hidden_state # [B, seq, dim]
# Pool via last valid token per sequence (recommended for Qwen3)
seq_len = inputs["attention_mask"].sum(dim=1) - 1 # index of last token
batch_indices = torch.arange(last_hidden.size(0), device=self.device)
embeddings = last_hidden[batch_indices, seq_len]
# Convert to numpy and validate
embeddings_np = embeddings.cpu().numpy()
# Check for NaN or infinite values
if np.isnan(embeddings_np).any():
print(f"⚠️ Warning: NaN values detected in embeddings from {self.model_name}")
# Replace NaN values with zeros
embeddings_np = np.nan_to_num(embeddings_np, nan=0.0, posinf=0.0, neginf=0.0)
print(f"🔄 Replaced NaN values with zeros")
if np.isinf(embeddings_np).any():
print(f"⚠️ Warning: Infinite values detected in embeddings from {self.model_name}")
# Replace infinite values with zeros
embeddings_np = np.nan_to_num(embeddings_np, nan=0.0, posinf=0.0, neginf=0.0)
print(f"🔄 Replaced infinite values with zeros")
return embeddings_np
class EmbeddingGenerator:
def __init__(self, embedding_model: EmbeddingModel, batch_size: int = 50):
self.model = embedding_model
self.batch_size = batch_size
def generate(self, chunks: List[Dict[str, Any]]) -> List[np.ndarray]:
"""Generate embeddings for all chunks using batch processing"""
texts_to_embed = [chunk['text'] for chunk in chunks]
if not texts_to_embed:
return []
from rag_system.utils.batch_processor import BatchProcessor, estimate_memory_usage
memory_mb = estimate_memory_usage(chunks)
print(f"Estimated memory usage for {len(chunks)} chunks: {memory_mb:.1f}MB")
batch_processor = BatchProcessor(batch_size=self.batch_size)
def process_text_batch(text_batch):
if not text_batch:
return []
batch_embeddings = self.model.create_embeddings(text_batch)
return [embedding for embedding in batch_embeddings]
all_embeddings = batch_processor.process_in_batches(
texts_to_embed,
process_text_batch,
"Embedding Generation"
)
return all_embeddings
class OllamaEmbedder(EmbeddingModel):
"""Call Ollama's /api/embeddings endpoint for each text."""
def __init__(self, model_name: str, host: str | None = None, timeout: int = 60):
self.model_name = model_name
self.host = (host or os.getenv("OLLAMA_HOST") or "http://localhost:11434").rstrip("/")
self.timeout = timeout
def _embed_single(self, text: str):
import requests, numpy as np, json
payload = {"model": self.model_name, "prompt": text}
r = requests.post(f"{self.host}/api/embeddings", json=payload, timeout=self.timeout)
r.raise_for_status()
data = r.json()
# Ollama may return {"embedding": [...]} or {"data": [...]} depending on version
vec = data.get("embedding") or data.get("data")
if vec is None:
raise ValueError("Unexpected Ollama embeddings response format")
return np.array(vec, dtype="float32")
def create_embeddings(self, texts: List[str]):
import numpy as np
vectors = [self._embed_single(t) for t in texts]
embeddings_np = np.vstack(vectors)
# Check for NaN or infinite values
if np.isnan(embeddings_np).any():
print(f"⚠️ Warning: NaN values detected in Ollama embeddings from {self.model_name}")
# Replace NaN values with zeros
embeddings_np = np.nan_to_num(embeddings_np, nan=0.0, posinf=0.0, neginf=0.0)
print(f"🔄 Replaced NaN values with zeros")
if np.isinf(embeddings_np).any():
print(f"⚠️ Warning: Infinite values detected in Ollama embeddings from {self.model_name}")
# Replace infinite values with zeros
embeddings_np = np.nan_to_num(embeddings_np, nan=0.0, posinf=0.0, neginf=0.0)
print(f"🔄 Replaced infinite values with zeros")
return embeddings_np
def select_embedder(model_name: str, ollama_host: str | None = None):
"""Return appropriate EmbeddingModel implementation for the given name."""
if "/" in model_name or model_name.startswith("http"):
# Treat as HF model path
return QwenEmbedder(model_name=model_name)
# Otherwise assume it's an Ollama tag
return OllamaEmbedder(model_name=model_name, host=ollama_host)
if __name__ == '__main__':
print("representations.py cleaned up.")
try:
qwen_embedder = QwenEmbedder()
emb_gen = EmbeddingGenerator(embedding_model=qwen_embedder)
sample_chunks = [{'text': 'Hello world'}, {'text': 'This is a test'}]
embeddings = emb_gen.generate(sample_chunks)
print(f"\nSuccessfully generated {len(embeddings)} embeddings.")
print(f"Shape of first embedding: {embeddings[0].shape}")
except Exception as e:
print(f"\nAn error occurred during the QwenEmbedder test: {e}")
print("Please ensure you have an internet connection for model downloads.") | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/indexing/representations.py",
"license": "MIT License",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/ingestion/chunking.py | from typing import List, Dict, Any, Optional
import re
from transformers import AutoTokenizer
class MarkdownRecursiveChunker:
"""
A recursive chunker that splits Markdown text based on its semantic structure
and embeds document-level metadata into each chunk.
"""
def __init__(self, max_chunk_size: int = 1500, min_chunk_size: int = 200, tokenizer_model: str = "Qwen/Qwen3-Embedding-0.6B"):
self.max_chunk_size = max_chunk_size
self.min_chunk_size = min_chunk_size
self.split_priority = ["\n## ", "\n### ", "\n#### ", "```", "\n\n"]
repo_id = tokenizer_model
if "/" not in tokenizer_model and not tokenizer_model.startswith("Qwen/"):
repo_id = {
"qwen3-embedding-0.6b": "Qwen/Qwen3-Embedding-0.6B",
}.get(tokenizer_model.lower(), tokenizer_model)
try:
self.tokenizer = AutoTokenizer.from_pretrained(repo_id, trust_remote_code=True)
except Exception as e:
print(f"Warning: Failed to load tokenizer {repo_id}: {e}")
print("Falling back to character-based approximation (4 chars ≈ 1 token)")
self.tokenizer = None
def _token_len(self, text: str) -> int:
"""Get token count for text using the tokenizer."""
if self.tokenizer is not None:
return len(self.tokenizer.tokenize(text))
else:
return max(1, len(text) // 4)
def _split_text(self, text: str, separators: List[str]) -> List[str]:
final_chunks = []
chunks_to_process = [text]
for sep in separators:
new_chunks = []
for chunk in chunks_to_process:
if self._token_len(chunk) > self.max_chunk_size:
sub_chunks = re.split(f'({sep})', chunk)
combined = []
i = 0
while i < len(sub_chunks):
if i + 1 < len(sub_chunks) and sub_chunks[i+1] == sep:
combined.append(sub_chunks[i+1] + sub_chunks[i+2])
i += 3
else:
if sub_chunks[i]:
combined.append(sub_chunks[i])
i += 1
new_chunks.extend(combined)
else:
new_chunks.append(chunk)
chunks_to_process = new_chunks
final_chunks = []
for chunk in chunks_to_process:
if self._token_len(chunk) > self.max_chunk_size:
words = chunk.split()
current_chunk = ""
for word in words:
test_chunk = current_chunk + " " + word if current_chunk else word
if self._token_len(test_chunk) <= self.max_chunk_size:
current_chunk = test_chunk
else:
if current_chunk:
final_chunks.append(current_chunk)
current_chunk = word
if current_chunk:
final_chunks.append(current_chunk)
else:
final_chunks.append(chunk)
return final_chunks
def chunk(self, text: str, document_id: str, document_metadata: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
"""
Chunks the Markdown text and injects metadata.
Args:
text: The Markdown text to chunk.
document_id: The identifier for the source document.
document_metadata: A dictionary of metadata for the source document.
Returns:
A list of dictionaries, where each dictionary is a chunk with metadata.
"""
if not text:
return []
raw_chunks = self._split_text(text, self.split_priority)
merged_chunks_text = []
current_chunk = ""
for chunk_text in raw_chunks:
test_chunk = current_chunk + chunk_text if current_chunk else chunk_text
if not current_chunk or self._token_len(test_chunk) <= self.max_chunk_size:
current_chunk = test_chunk
elif self._token_len(current_chunk) < self.min_chunk_size:
current_chunk = test_chunk
else:
merged_chunks_text.append(current_chunk)
current_chunk = chunk_text
if current_chunk:
merged_chunks_text.append(current_chunk)
final_chunks = []
for i, chunk_text in enumerate(merged_chunks_text):
# Combine document-level metadata with chunk-specific metadata
combined_metadata = (document_metadata or {}).copy()
combined_metadata.update({
"document_id": document_id,
"chunk_number": i,
})
final_chunks.append({
"chunk_id": f"{document_id}_{i}", # Create a more unique ID
"text": chunk_text.strip(),
"metadata": combined_metadata
})
return final_chunks
def create_contextual_window(all_chunks: List[Dict[str, Any]], chunk_index: int, window_size: int = 1) -> str:
if not (0 <= chunk_index < len(all_chunks)):
raise ValueError("chunk_index is out of bounds.")
start = max(0, chunk_index - window_size)
end = min(len(all_chunks), chunk_index + window_size + 1)
context_chunks = all_chunks[start:end]
return " ".join([chunk['text'] for chunk in context_chunks])
if __name__ == '__main__':
print("chunking.py updated to include document metadata in each chunk.")
sample_markdown = "# Doc Title\n\nContent paragraph."
doc_meta = {"title": "My Awesome Document", "author": "Jane Doe", "year": 2024}
chunker = MarkdownRecursiveChunker()
chunks = chunker.chunk(
text=sample_markdown,
document_id="doc456",
document_metadata=doc_meta
)
print(f"\n--- Created {len(chunks)} chunk(s) ---")
for chunk in chunks:
print(f"Chunk ID: {chunk['chunk_id']}")
print(f"Text: '{chunk['text']}'")
print(f"Metadata: {chunk['metadata']}")
print("-" * 20)
| {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/ingestion/chunking.py",
"license": "MIT License",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/ingestion/docling_chunker.py | from __future__ import annotations
"""Docling-aware chunker (simplified).
For now we proxy the old MarkdownRecursiveChunker but add:
• sentence-aware packing to max_tokens with overlap
• breadcrumb metadata stubs so downstream code already handles them
In a follow-up we can replace the internals with true Docling element-tree
walking once the PDFConverter returns structured nodes.
"""
from typing import List, Dict, Any, Tuple
import math
import re
from itertools import islice
from rag_system.ingestion.chunking import MarkdownRecursiveChunker
from transformers import AutoTokenizer
class DoclingChunker:
def __init__(self, *, max_tokens: int = 512, overlap: int = 1, tokenizer_model: str = "Qwen/Qwen3-Embedding-0.6B"):
self.max_tokens = max_tokens
self.overlap = overlap # sentences of overlap
repo_id = tokenizer_model
if "/" not in tokenizer_model and not tokenizer_model.startswith("Qwen/"):
repo_id = {
"qwen3-embedding-0.6b": "Qwen/Qwen3-Embedding-0.6B",
}.get(tokenizer_model.lower(), tokenizer_model)
try:
self.tokenizer = AutoTokenizer.from_pretrained(repo_id, trust_remote_code=True)
except Exception as e:
print(f"Warning: Failed to load tokenizer {repo_id}: {e}")
print("Falling back to character-based approximation (4 chars ≈ 1 token)")
self.tokenizer = None
# Fallback simple sentence splitter (period, question, exclamation, newline)
self._sent_re = re.compile(r"(?<=[\.\!\?])\s+|\n+")
self.legacy = MarkdownRecursiveChunker(max_chunk_size=10_000, min_chunk_size=100)
# ------------------------------------------------------------------
def _token_len(self, text: str) -> int:
if self.tokenizer is not None:
return len(self.tokenizer.tokenize(text))
else:
# Fallback: approximate 4 characters per token
return max(1, len(text) // 4)
def split_markdown(self, markdown: str, *, document_id: str, metadata: Dict[str, Any]) -> List[Dict[str, Any]]:
"""Split one Markdown doc into chunks with max_tokens limit."""
base_chunks = self.legacy.chunk(markdown, document_id, metadata)
new_chunks: List[Dict[str, Any]] = []
global_idx = 0
for ch in base_chunks:
sentences = [s.strip() for s in self._sent_re.split(ch["text"]) if s.strip()]
if not sentences:
continue
window: List[str] = []
while sentences:
# Add until over limit
while sentences and self._token_len(" ".join(window + [sentences[0]])) <= self.max_tokens:
window.append(sentences.pop(0))
if not window: # single sentence > limit → hard cut
window.append(sentences.pop(0))
chunk_text = " ".join(window)
new_chunk = {
"chunk_id": f"{document_id}_{global_idx}",
"text": chunk_text,
"metadata": {
**metadata,
"document_id": document_id,
"chunk_index": global_idx,
"heading_path": metadata.get("heading_path", []),
"heading_level": len(metadata.get("heading_path", [])),
"block_type": metadata.get("block_type", "paragraph"),
},
}
new_chunks.append(new_chunk)
global_idx += 1
# Overlap: prepend last `overlap` sentences of the current window to the remaining queue
if self.overlap and sentences:
back = window[-self.overlap:] if self.overlap <= len(window) else window[:]
sentences = back + sentences
window = []
return new_chunks
# ------------------------------------------------------------------
# Element-tree based chunking (true Docling path)
# ------------------------------------------------------------------
def chunk_document(self, doc, *, document_id: str, metadata: Dict[str, Any] | None = None) -> List[Dict[str, Any]]:
"""Walk a DoclingDocument and emit chunks.
Tables / Code / Figures are emitted as atomic chunks.
Paragraph-like nodes are sentence-packed to <= max_tokens.
"""
metadata = metadata or {}
def _token_len(txt: str) -> int:
if self.tokenizer is not None:
return len(self.tokenizer.tokenize(txt))
else:
# Fallback: approximate 4 characters per token
return max(1, len(txt) // 4)
chunks: List[Dict[str, Any]] = []
global_idx = 0
# Helper to create a chunk and append to list
def _add_chunk(text: str, block_type: str, heading_path: List[str], page_no: int | None = None):
nonlocal global_idx
if not text.strip():
return
chunk_meta = {
**metadata,
"document_id": document_id,
"chunk_index": global_idx,
"heading_path": heading_path,
"heading_level": len(heading_path),
"block_type": block_type,
}
if page_no is not None:
chunk_meta["page"] = page_no
chunks.append({
"chunk_id": f"{document_id}_{global_idx}",
"text": text,
"metadata": chunk_meta,
})
global_idx += 1
# The Docling API exposes .body which is a tree of nodes; we fall back to .texts/.tables lists if available
try:
# We walk doc.texts (reading order). We'll buffer consecutive paragraph items
current_heading_path: List[str] = []
buffer: List[str] = []
buffer_tokens = 0
buffer_page = None
def flush_buffer():
nonlocal buffer, buffer_tokens, buffer_page
if buffer:
_add_chunk(" ".join(buffer), "paragraph", heading_path=current_heading_path[:], page_no=buffer_page)
buffer, buffer_tokens, buffer_page = [], 0, None
# Create quick lookup for table items by id to preserve later insertion order if needed
tables_by_anchor = {
getattr(t, "anchor_text_id", None): t
for t in getattr(doc, "tables", [])
if getattr(t, "anchor_text_id", None) is not None
}
for txt_item in getattr(doc, "texts", []):
# If this text item is a placeholder for a table anchor, emit table first
anchor_id = getattr(txt_item, "id", None)
if anchor_id in tables_by_anchor:
flush_buffer()
tbl = tables_by_anchor[anchor_id]
try:
tbl_md = tbl.export_to_markdown(doc) # pass doc for deprecation compliance
except Exception:
tbl_md = tbl.export_to_markdown() if hasattr(tbl, "export_to_markdown") else str(tbl)
_add_chunk(tbl_md, "table", heading_path=current_heading_path[:], page_no=getattr(tbl, "page_no", None))
role = getattr(txt_item, "role", None)
if role == "heading":
flush_buffer()
level = getattr(txt_item, "level", 1)
current_heading_path = current_heading_path[: max(0, level - 1)]
current_heading_path.append(txt_item.text.strip())
continue # skip heading as content
text_piece = txt_item.text if hasattr(txt_item, "text") else str(txt_item)
piece_tokens = _token_len(text_piece)
if piece_tokens > self.max_tokens: # very long paragraph
flush_buffer()
_add_chunk(text_piece, "paragraph", heading_path=current_heading_path[:], page_no=getattr(txt_item, "page_no", None))
continue
if buffer_tokens + piece_tokens > self.max_tokens:
flush_buffer()
buffer.append(text_piece)
buffer_tokens += piece_tokens
if buffer_page is None:
buffer_page = getattr(txt_item, "page_no", None)
flush_buffer()
# Emit any remaining tables that were not anchored
for tbl in getattr(doc, "tables", []):
if tbl in tables_by_anchor.values():
continue # already emitted
try:
tbl_md = tbl.export_to_markdown(doc)
except Exception:
tbl_md = tbl.export_to_markdown() if hasattr(tbl, "export_to_markdown") else str(tbl)
_add_chunk(tbl_md, "table", heading_path=current_heading_path[:], page_no=getattr(tbl, "page_no", None))
except Exception as e:
print(f"⚠️ Docling tree walk failed: {e}. Falling back to markdown splitter.")
return self.split_markdown(doc.export_to_markdown(), document_id=document_id, metadata=metadata)
# --------------------------------------------------------------
# Second-pass consolidation: merge small consecutive paragraph
# chunks that share heading & page into up-to-max_tokens blobs.
# --------------------------------------------------------------
consolidated: List[Dict[str, Any]] = []
buf_txt: List[str] = []
buf_meta: Dict[str, Any] | None = None
def flush_paragraph_buffer():
nonlocal buf_txt, buf_meta
if not buf_txt:
return
merged_text = " ".join(buf_txt)
# Re-use meta from first piece but update chunk_id later
new_chunk = {
"chunk_id": buf_meta["chunk_id"],
"text": merged_text,
"metadata": buf_meta["metadata"],
}
consolidated.append(new_chunk)
buf_txt = []
buf_meta = None
for ch in chunks:
if ch["metadata"].get("block_type") != "paragraph":
flush_paragraph_buffer()
consolidated.append(ch)
continue
if not buf_txt:
buf_txt.append(ch["text"])
buf_meta = ch
continue
same_page = ch["metadata"].get("page") == buf_meta["metadata"].get("page")
same_heading = ch["metadata"].get("heading_path") == buf_meta["metadata"].get("heading_path")
prospective_len = self._token_len(" ".join(buf_txt + [ch["text"]]))
if same_page and same_heading and prospective_len <= self.max_tokens:
buf_txt.append(ch["text"])
else:
flush_paragraph_buffer()
buf_txt.append(ch["text"])
buf_meta = ch
flush_paragraph_buffer()
return consolidated
# Public API expected by IndexingPipeline --------------------------------
def chunk(self, text: str, document_id: str, document_metadata: Dict[str, Any] | None = None) -> List[Dict[str, Any]]:
return self.split_markdown(text, document_id=document_id, metadata=document_metadata or {}) | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/ingestion/docling_chunker.py",
"license": "MIT License",
"lines": 219,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/main.py | import os
import json
import sys
import argparse
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
# The sys.path manipulation has been removed to prevent import conflicts.
# This script should be run as a module from the project root, e.g.:
# python -m rag_system.main api
from rag_system.agent.loop import Agent
from rag_system.utils.ollama_client import OllamaClient
# Configuration is now defined in this file - no import needed
# Advanced RAG System Configuration
# ==================================
# This file contains the MASTER configuration for all models used in the RAG system.
# All components should reference these configurations to ensure consistency.
# ============================================================================
# 🎯 MASTER MODEL CONFIGURATION
# ============================================================================
# All model configurations are centralized here to prevent conflicts
# LLM Backend Configuration
LLM_BACKEND = os.getenv("LLM_BACKEND", "ollama")
# Ollama Models Configuration (for inference via Ollama)
OLLAMA_CONFIG = {
"host": os.getenv("OLLAMA_HOST", "http://localhost:11434"),
"generation_model": "qwen3:8b", # Main text generation model
"enrichment_model": "qwen3:0.6b", # Lightweight model for routing/enrichment
}
WATSONX_CONFIG = {
"api_key": os.getenv("WATSONX_API_KEY", ""),
"project_id": os.getenv("WATSONX_PROJECT_ID", ""),
"url": os.getenv("WATSONX_URL", "https://us-south.ml.cloud.ibm.com"),
"generation_model": os.getenv("WATSONX_GENERATION_MODEL", "ibm/granite-13b-chat-v2"),
"enrichment_model": os.getenv("WATSONX_ENRICHMENT_MODEL", "ibm/granite-8b-japanese"), # Lightweight model
}
# External Model Configuration (HuggingFace models used directly)
EXTERNAL_MODELS = {
"embedding_model": "Qwen/Qwen3-Embedding-0.6B", # HuggingFace embedding model (1024 dims - fresh start)
"reranker_model": "answerdotai/answerai-colbert-small-v1", # ColBERT reranker
"vision_model": "Qwen/Qwen-VL-Chat", # Vision model for multimodal
"fallback_reranker": "BAAI/bge-reranker-base", # Backup reranker
}
# ============================================================================
# 🔧 PIPELINE CONFIGURATIONS
# ============================================================================
PIPELINE_CONFIGS = {
"default": {
"description": "Production-ready pipeline with hybrid search, AI reranking, and verification",
"storage": {
"lancedb_uri": "./lancedb",
"text_table_name": "text_pages_v3",
"image_table_name": "image_pages_v3",
"bm25_path": "./index_store/bm25",
"graph_path": "./index_store/graph/knowledge_graph.gml"
},
"retrieval": {
"retriever": "multivector",
"search_type": "hybrid",
"late_chunking": {
"enabled": True,
"table_suffix": "_lc_v3"
},
"dense": {
"enabled": True,
"weight": 0.7
},
"bm25": {
"enabled": True,
"index_name": "rag_bm25_index"
},
"graph": {
"enabled": False,
"graph_path": "./index_store/graph/knowledge_graph.gml"
}
},
# 🎯 EMBEDDING MODEL: Uses HuggingFace Qwen model directly
"embedding_model_name": EXTERNAL_MODELS["embedding_model"],
# 🎯 VISION MODEL: For multimodal capabilities
"vision_model_name": EXTERNAL_MODELS["vision_model"],
# 🎯 RERANKER: AI-powered reranking with ColBERT
"reranker": {
"enabled": True,
"type": "ai",
"strategy": "rerankers-lib",
"model_name": EXTERNAL_MODELS["reranker_model"],
"top_k": 10
},
"query_decomposition": {
"enabled": True,
"max_sub_queries": 3,
"compose_from_sub_answers": True
},
"verification": {"enabled": True},
"retrieval_k": 20,
"context_window_size": 0,
"semantic_cache_threshold": 0.98,
"cache_scope": "global",
# 🔧 Contextual enrichment configuration
"contextual_enricher": {
"enabled": True,
"window_size": 1
},
# 🔧 Indexing configuration
"indexing": {
"embedding_batch_size": 50,
"enrichment_batch_size": 10,
"enable_progress_tracking": True
}
},
"fast": {
"description": "Speed-optimized pipeline with minimal overhead",
"storage": {
"lancedb_uri": "./lancedb",
"text_table_name": "text_pages_v3",
"image_table_name": "image_pages_v3",
"bm25_path": "./index_store/bm25"
},
"retrieval": {
"retriever": "multivector",
"search_type": "vector_only",
"late_chunking": {"enabled": False},
"dense": {"enabled": True}
},
"embedding_model_name": EXTERNAL_MODELS["embedding_model"],
"reranker": {"enabled": False},
"query_decomposition": {"enabled": False},
"verification": {"enabled": False},
"retrieval_k": 10,
"context_window_size": 0,
# 🔧 Contextual enrichment (disabled for speed)
"contextual_enricher": {
"enabled": False,
"window_size": 1
},
# 🔧 Indexing configuration
"indexing": {
"embedding_batch_size": 100,
"enrichment_batch_size": 50,
"enable_progress_tracking": False
}
},
"bm25": {
"enabled": True,
"index_name": "rag_bm25_index"
},
"graph_rag": {
"enabled": False, # Keep disabled for now unless specified
}
}
# ============================================================================
# 🏭 FACTORY FUNCTIONS
# ============================================================================
def get_agent(mode: str = "default") -> Agent:
"""
Factory function to get an instance of the RAG agent based on the specified mode.
Args:
mode: Configuration mode ("default", "fast")
Returns:
Configured Agent instance
"""
load_dotenv()
# Initialize the appropriate LLM client based on backend configuration
if LLM_BACKEND.lower() == "watsonx":
from rag_system.utils.watsonx_client import WatsonXClient
if not WATSONX_CONFIG["api_key"] or not WATSONX_CONFIG["project_id"]:
raise ValueError(
"Watson X configuration incomplete. Please set WATSONX_API_KEY and WATSONX_PROJECT_ID "
"environment variables."
)
llm_client = WatsonXClient(
api_key=WATSONX_CONFIG["api_key"],
project_id=WATSONX_CONFIG["project_id"],
url=WATSONX_CONFIG["url"]
)
llm_config = WATSONX_CONFIG
print(f"🔧 Using Watson X backend with granite models")
else:
llm_client = OllamaClient(host=OLLAMA_CONFIG["host"])
llm_config = OLLAMA_CONFIG
print(f"🔧 Using Ollama backend")
# Get the configuration for the specified mode
config = PIPELINE_CONFIGS.get(mode, PIPELINE_CONFIGS['default'])
agent = Agent(
pipeline_configs=config,
llm_client=llm_client,
ollama_config=llm_config
)
return agent
def validate_model_config():
"""
Validates the model configuration for consistency and availability.
Raises:
ValueError: If configuration conflicts are detected
"""
print("🔍 Validating model configuration...")
# Check for embedding model consistency
default_embedding = PIPELINE_CONFIGS["default"]["embedding_model_name"]
external_embedding = EXTERNAL_MODELS["embedding_model"]
if default_embedding != external_embedding:
raise ValueError(f"Embedding model mismatch: {default_embedding} != {external_embedding}")
# Check reranker configuration
default_reranker = PIPELINE_CONFIGS["default"]["reranker"]["model_name"]
external_reranker = EXTERNAL_MODELS["reranker_model"]
if default_reranker != external_reranker:
raise ValueError(f"Reranker model mismatch: {default_reranker} != {external_reranker}")
print("✅ Model configuration validation passed!")
return True
# ============================================================================
# 🚀 UTILITY FUNCTIONS
# ============================================================================
def run_indexing(docs_path: str, config_mode: str = "default"):
"""Runs the indexing pipeline for the specified documents."""
print(f"📚 Starting indexing for documents in: {docs_path}")
validate_model_config()
# Local import to avoid circular dependencies
from rag_system.pipelines.indexing_pipeline import IndexingPipeline
# Get the appropriate indexing pipeline from the factory
indexing_pipeline = IndexingPipeline(PIPELINE_CONFIGS[config_mode])
# Find all PDF files in the directory
pdf_files = [os.path.join(docs_path, f) for f in os.listdir(docs_path) if f.endswith(".pdf")]
if not pdf_files:
print("No PDF files found to index.")
return
# Process all documents through the pipeline
indexing_pipeline.process_documents(pdf_files)
print("✅ Indexing complete.")
def run_chat(query: str):
"""
Runs the agentic RAG pipeline for a given query.
Returns the result as a JSON string.
"""
try:
validate_model_config()
ollama_client = OllamaClient(OLLAMA_CONFIG["host"])
except ConnectionError as e:
print(e)
return json.dumps({"error": str(e)}, indent=2)
except ValueError as e:
print(f"Configuration Error: {e}")
return json.dumps({"error": f"Configuration Error: {e}"}, indent=2)
agent = Agent(PIPELINE_CONFIGS['default'], ollama_client, OLLAMA_CONFIG)
result = agent.run(query)
return json.dumps(result, indent=2, ensure_ascii=False)
def show_graph():
"""
Loads and displays the knowledge graph.
"""
import networkx as nx
import matplotlib.pyplot as plt
graph_path = PIPELINE_CONFIGS["indexing"]["graph_path"]
if not os.path.exists(graph_path):
print("Knowledge graph not found. Please run the 'index' command first.")
return
G = nx.read_gml(graph_path)
print("--- Knowledge Graph ---")
print("Nodes:", G.nodes(data=True))
print("Edges:", G.edges(data=True))
print("---------------------")
# Optional: Visualize the graph
try:
pos = nx.spring_layout(G)
nx.draw(G, pos, with_labels=True, node_size=2000, node_color="skyblue", font_size=10, font_weight="bold")
edge_labels = nx.get_edge_attributes(G, 'label')
nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)
plt.title("Knowledge Graph Visualization")
plt.show()
except Exception as e:
print(f"\nCould not visualize the graph. Matplotlib might not be installed or configured for your environment.")
print(f"Error: {e}")
def run_api_server():
"""Starts the advanced RAG API server."""
from rag_system.api_server import start_server
start_server()
def main():
if len(sys.argv) < 2:
print("Usage: python main.py [index|chat|show_graph|api] [query]")
return
command = sys.argv[1]
if command == "index":
# Allow passing file paths from the command line
files = sys.argv[2:] if len(sys.argv) > 2 else None
run_indexing(files)
elif command == "chat":
if len(sys.argv) < 3:
print("Usage: python main.py chat <query>")
return
query = " ".join(sys.argv[2:])
# 🆕 Print the result for command-line usage
print(run_chat(query))
elif command == "show_graph":
show_graph()
elif command == "api":
run_api_server()
else:
print(f"Unknown command: {command}")
if __name__ == "__main__":
# This allows running the script from the command line to index documents.
parser = argparse.ArgumentParser(description="Main entry point for the RAG system.")
parser.add_argument(
'--index',
type=str,
help='Path to the directory containing documents to index.'
)
parser.add_argument(
'--config',
type=str,
default='default',
help='The configuration profile to use (e.g., "default", "fast").'
)
args = parser.parse_args()
# Load environment variables
load_dotenv()
if args.index:
run_indexing(args.index, args.config)
else:
# This is where you might start a server or interactive session
print("No action specified. Use --index to process documents.")
# Example of how to get an agent instance
# agent = get_agent(args.config)
# print(f"Agent loaded with '{args.config}' config.")
| {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/main.py",
"license": "MIT License",
"lines": 321,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/pipelines/indexing_pipeline.py | from typing import List, Dict, Any
import os
import networkx as nx
from rag_system.ingestion.document_converter import DocumentConverter
from rag_system.ingestion.chunking import MarkdownRecursiveChunker
from rag_system.indexing.representations import EmbeddingGenerator, select_embedder
from rag_system.indexing.embedders import LanceDBManager, VectorIndexer
from rag_system.indexing.graph_extractor import GraphExtractor
from rag_system.utils.ollama_client import OllamaClient
from rag_system.indexing.contextualizer import ContextualEnricher
from rag_system.indexing.overview_builder import OverviewBuilder
class IndexingPipeline:
def __init__(self, config: Dict[str, Any], ollama_client: OllamaClient, ollama_config: Dict[str, str]):
self.config = config
self.llm_client = ollama_client
self.ollama_config = ollama_config
self.document_converter = DocumentConverter()
# Chunker selection: docling (token-based) or legacy (character-based)
chunker_mode = config.get("chunker_mode", "docling")
# 🔧 Get chunking configuration from frontend parameters
chunking_config = config.get("chunking", {})
chunk_size = chunking_config.get("chunk_size", config.get("chunk_size", 1500))
chunk_overlap = chunking_config.get("chunk_overlap", config.get("chunk_overlap", 200))
print(f"🔧 CHUNKING CONFIG: Size: {chunk_size}, Overlap: {chunk_overlap}, Mode: {chunker_mode}")
if chunker_mode == "docling":
try:
from rag_system.ingestion.docling_chunker import DoclingChunker
self.chunker = DoclingChunker(
max_tokens=config.get("max_tokens", chunk_size),
overlap=config.get("overlap_sentences", 1),
tokenizer_model=config.get("embedding_model_name", "qwen3-embedding-0.6b"),
)
print("🪄 Using DoclingChunker for high-recall sentence packing.")
except Exception as e:
print(f"⚠️ Failed to initialise DoclingChunker: {e}. Falling back to legacy chunker.")
self.chunker = MarkdownRecursiveChunker(
max_chunk_size=chunk_size,
min_chunk_size=min(chunk_overlap, chunk_size // 4), # Sensible minimum
tokenizer_model=config.get("embedding_model_name", "Qwen/Qwen3-Embedding-0.6B")
)
else:
self.chunker = MarkdownRecursiveChunker(
max_chunk_size=chunk_size,
min_chunk_size=min(chunk_overlap, chunk_size // 4), # Sensible minimum
tokenizer_model=config.get("embedding_model_name", "Qwen/Qwen3-Embedding-0.6B")
)
retriever_configs = self.config.get("retrievers") or self.config.get("retrieval", {})
storage_config = self.config["storage"]
# Get batch processing configuration
indexing_config = self.config.get("indexing", {})
self.embedding_batch_size = indexing_config.get("embedding_batch_size", 50)
self.enrichment_batch_size = indexing_config.get("enrichment_batch_size", 10)
self.enable_progress_tracking = indexing_config.get("enable_progress_tracking", True)
# Treat dense retrieval as enabled by default unless explicitly disabled
dense_cfg = retriever_configs.setdefault("dense", {})
dense_cfg.setdefault("enabled", True)
if dense_cfg.get("enabled"):
# Accept modern keys: db_path or lancedb_path; fall back to legacy lancedb_uri
db_path = (
storage_config.get("db_path")
or storage_config.get("lancedb_path")
or storage_config.get("lancedb_uri")
)
if not db_path:
raise KeyError(
"Storage config must include 'db_path', 'lancedb_path', or 'lancedb_uri' for LanceDB."
)
self.lancedb_manager = LanceDBManager(db_path=db_path)
self.vector_indexer = VectorIndexer(self.lancedb_manager)
embedding_model = select_embedder(
self.config.get("embedding_model_name", "BAAI/bge-small-en-v1.5"),
self.ollama_config.get("host") if isinstance(self.ollama_config, dict) else None,
)
self.embedding_generator = EmbeddingGenerator(
embedding_model=embedding_model,
batch_size=self.embedding_batch_size
)
if retriever_configs.get("graph", {}).get("enabled"):
self.graph_extractor = GraphExtractor(
llm_client=self.llm_client,
llm_model=self.ollama_config["generation_model"]
)
if self.config.get("contextual_enricher", {}).get("enabled"):
# 🔧 Use frontend enrich_model parameter if provided
enrichment_model = (
self.config.get("enrich_model") or # Frontend parameter
self.config.get("enrichment_model_name") or # Alternative config key
self.ollama_config.get("enrichment_model") or # Default from ollama config
self.ollama_config["generation_model"] # Final fallback
)
print(f"🔧 ENRICHMENT MODEL: Using '{enrichment_model}' for contextual enrichment")
self.contextual_enricher = ContextualEnricher(
llm_client=self.llm_client,
llm_model=enrichment_model,
batch_size=self.enrichment_batch_size
)
# Overview builder always enabled for triage routing
ov_path = self.config.get("overview_path")
self.overview_builder = OverviewBuilder(
llm_client=self.llm_client,
model=self.config.get("overview_model_name", self.ollama_config.get("enrichment_model", "qwen3:0.6b")),
first_n_chunks=self.config.get("overview_first_n_chunks", 5),
out_path=ov_path if ov_path else None,
)
# ------------------------------------------------------------------
# Late-Chunk encoder initialisation (optional)
# ------------------------------------------------------------------
self.latechunk_enabled = retriever_configs.get("latechunk", {}).get("enabled", False)
if self.latechunk_enabled:
try:
from rag_system.indexing.latechunk import LateChunkEncoder
self.latechunk_cfg = retriever_configs["latechunk"]
self.latechunk_encoder = LateChunkEncoder(model_name=self.config.get("embedding_model_name", "qwen3-embedding-0.6b"))
except Exception as e:
print(f"⚠️ Failed to initialise LateChunkEncoder: {e}. Disabling latechunk retrieval.")
self.latechunk_enabled = False
def run(self, file_paths: List[str] | None = None, *, documents: List[str] | None = None):
"""
Processes and indexes documents based on the pipeline's configuration.
Accepts legacy keyword *documents* as an alias for *file_paths* so that
older callers (backend/index builder) keep working.
"""
# Back-compat shim ---------------------------------------------------
if file_paths is None and documents is not None:
file_paths = documents
if file_paths is None:
raise TypeError("IndexingPipeline.run() expects 'file_paths' (or alias 'documents') argument")
print(f"--- Starting indexing process for {len(file_paths)} files. ---")
# Import progress tracking utilities
from rag_system.utils.batch_processor import timer, ProgressTracker, estimate_memory_usage
with timer("Complete Indexing Pipeline"):
# Step 1: Document Processing and Chunking
all_chunks = []
doc_chunks_map = {}
with timer("Document Processing & Chunking"):
file_tracker = ProgressTracker(len(file_paths), "Document Processing")
for file_path in file_paths:
try:
document_id = os.path.basename(file_path)
print(f"Processing: {document_id}")
pages_data = self.document_converter.convert_to_markdown(file_path)
file_chunks = []
for tpl in pages_data:
if len(tpl) == 3:
markdown_text, metadata, doc_obj = tpl
if hasattr(self.chunker, "chunk_document"):
chunks = self.chunker.chunk_document(doc_obj, document_id=document_id, metadata=metadata)
else:
chunks = self.chunker.chunk(markdown_text, document_id, metadata)
else:
markdown_text, metadata = tpl
chunks = self.chunker.chunk(markdown_text, document_id, metadata)
file_chunks.extend(chunks)
# Add a sequential chunk_index to each chunk within the document
for i, chunk in enumerate(file_chunks):
if 'metadata' not in chunk:
chunk['metadata'] = {}
chunk['metadata']['chunk_index'] = i
# Build and persist document overview (non-blocking errors)
try:
self.overview_builder.build_and_store(document_id, file_chunks)
except Exception as e:
print(f" ⚠️ Failed to create overview for {document_id}: {e}")
all_chunks.extend(file_chunks)
doc_chunks_map[document_id] = file_chunks # save for late-chunk step
print(f" Generated {len(file_chunks)} chunks from {document_id}")
file_tracker.update(1)
except Exception as e:
print(f" ❌ Error processing {file_path}: {e}")
file_tracker.update(1, errors=1)
continue
file_tracker.finish()
if not all_chunks:
print("No text chunks were generated. Skipping indexing.")
return
print(f"\n✅ Generated {len(all_chunks)} text chunks total.")
memory_mb = estimate_memory_usage(all_chunks)
print(f"📊 Estimated memory usage: {memory_mb:.1f}MB")
retriever_configs = self.config.get("retrievers") or self.config.get("retrieval", {})
# Step 3: Optional Contextual Enrichment (before indexing for consistency)
enricher_config = self.config.get("contextual_enricher", {})
enricher_enabled = enricher_config.get("enabled", False)
print(f"\n🔍 CONTEXTUAL ENRICHMENT DEBUG:")
print(f" Config present: {bool(enricher_config)}")
print(f" Enabled: {enricher_enabled}")
print(f" Has enricher object: {hasattr(self, 'contextual_enricher')}")
if hasattr(self, 'contextual_enricher') and enricher_enabled:
with timer("Contextual Enrichment"):
window_size = enricher_config.get("window_size", 1)
print(f"\n🚀 CONTEXTUAL ENRICHMENT ACTIVE!")
print(f" Window size: {window_size}")
print(f" Model: {self.contextual_enricher.llm_model}")
print(f" Batch size: {self.contextual_enricher.batch_size}")
print(f" Processing {len(all_chunks)} chunks...")
# Show before/after example
if all_chunks:
print(f" Example BEFORE: '{all_chunks[0]['text'][:100]}...'")
# This modifies the 'text' field in each chunk dictionary
all_chunks = self.contextual_enricher.enrich_chunks(all_chunks, window_size=window_size)
if all_chunks:
print(f" Example AFTER: '{all_chunks[0]['text'][:100]}...'")
print(f"✅ Enriched {len(all_chunks)} chunks with context for indexing.")
else:
print(f"⚠️ CONTEXTUAL ENRICHMENT SKIPPED:")
if not hasattr(self, 'contextual_enricher'):
print(f" Reason: No enricher object (config enabled={enricher_enabled})")
elif not enricher_enabled:
print(f" Reason: Disabled in config")
print(f" Chunks will be indexed without contextual enrichment.")
# Step 4: Create BM25 Index from enriched chunks (for consistency with vector index)
if hasattr(self, 'vector_indexer') and hasattr(self, 'embedding_generator'):
with timer("Vector Embedding & Indexing"):
table_name = self.config["storage"].get("text_table_name") or retriever_configs.get("dense", {}).get("lancedb_table_name", "default_text_table")
print(f"\n--- Generating embeddings with {self.config.get('embedding_model_name')} ---")
embeddings = self.embedding_generator.generate(all_chunks)
print(f"\n--- Indexing {len(embeddings)} vectors into LanceDB table: {table_name} ---")
self.vector_indexer.index(table_name, all_chunks, embeddings)
print("✅ Vector embeddings indexed successfully")
# Create FTS index on the 'text' field after adding data
print(f"\n--- Ensuring Full-Text Search (FTS) index on table '{table_name}' ---")
try:
tbl = self.lancedb_manager.get_table(table_name)
# LanceDB's default index name is "text_idx" while older
# revisions of this pipeline used our own name "fts_text".
# Guard against both so we don't attempt to create a
# duplicate index and trigger a LanceError.
existing_indices = [idx.name for idx in tbl.list_indices()]
if not any(name in existing_indices for name in ("text_idx", "fts_text")):
# Use LanceDB default index naming ("text_idx")
tbl.create_fts_index(
"text",
use_tantivy=False,
replace=False,
)
print("✅ FTS index created successfully (using Lance native FTS).")
else:
print("ℹ️ FTS index already exists – skipped creation.")
except Exception as e:
print(f"❌ Failed to create/verify FTS index: {e}")
# ---------------------------------------------------
# Late-Chunk Embedding + Indexing (optional)
# ---------------------------------------------------
if self.latechunk_enabled:
with timer("Late-Chunk Embedding & Indexing"):
lc_table_name = self.latechunk_cfg.get("lancedb_table_name", f"{table_name}_lc")
print(f"\n--- Generating late-chunk embeddings (table={lc_table_name}) ---")
total_lc_vecs = 0
for doc_id, doc_chunks in doc_chunks_map.items():
# Build full text and span list
full_text_parts = []
spans = []
current_pos = 0
for ch in doc_chunks:
ch_text = ch["text"]
full_text_parts.append(ch_text)
start = current_pos
end = start + len(ch_text)
spans.append((start, end))
current_pos = end + 1 # +1 for newline to join later
full_doc = "\n".join(full_text_parts)
try:
lc_vecs = self.latechunk_encoder.encode(full_doc, spans)
except Exception as e:
print(f"⚠️ LateChunk encode failed for {doc_id}: {e}")
continue
if len(doc_chunks) == 0 or len(lc_vecs) == 0:
# Nothing to index for this document
continue
if len(lc_vecs) != len(doc_chunks):
print(f"⚠️ Mismatch LC vecs ({len(lc_vecs)}) vs chunks ({len(doc_chunks)}) for {doc_id}. Skipping.")
continue
self.vector_indexer.index(lc_table_name, doc_chunks, lc_vecs)
total_lc_vecs += len(lc_vecs)
print(f"✅ Late-chunk vectors indexed: {total_lc_vecs}")
# Step 6: Knowledge Graph Extraction (Optional)
if hasattr(self, 'graph_extractor'):
with timer("Knowledge Graph Extraction"):
graph_path = retriever_configs.get("graph", {}).get("graph_path", "./index_store/graph/default_graph.gml")
print(f"\n--- Building and saving knowledge graph to: {graph_path} ---")
graph_data = self.graph_extractor.extract(all_chunks)
G = nx.DiGraph()
for entity in graph_data['entities']:
G.add_node(entity['id'], type=entity.get('type', 'Unknown'), properties=entity.get('properties', {}))
for rel in graph_data['relationships']:
G.add_edge(rel['source'], rel['target'], label=rel['label'])
os.makedirs(os.path.dirname(graph_path), exist_ok=True)
nx.write_gml(G, graph_path)
print(f"✅ Knowledge graph saved successfully.")
print("\n--- ✅ Indexing Complete ---")
self._print_final_statistics(len(file_paths), len(all_chunks))
def _print_final_statistics(self, num_files: int, num_chunks: int):
"""Print final indexing statistics"""
print(f"\n📈 Final Statistics:")
print(f" Files processed: {num_files}")
print(f" Chunks generated: {num_chunks}")
print(f" Average chunks per file: {num_chunks/num_files:.1f}")
# Component status
components = []
if hasattr(self, 'contextual_enricher'):
components.append("✅ Contextual Enrichment")
if hasattr(self, 'vector_indexer'):
components.append("✅ Vector & FTS Index")
if hasattr(self, 'graph_extractor'):
components.append("✅ Knowledge Graph")
print(f" Components: {', '.join(components)}")
print(f" Batch sizes: Embeddings={self.embedding_batch_size}, Enrichment={self.enrichment_batch_size}")
| {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/pipelines/indexing_pipeline.py",
"license": "MIT License",
"lines": 306,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/pipelines/retrieval_pipeline.py | import pymupdf
from typing import List, Dict, Any, Tuple, Optional
from PIL import Image
import concurrent.futures
import time
import json
import lancedb
import logging
import math
import numpy as np
from threading import Lock
from rag_system.utils.ollama_client import OllamaClient
from rag_system.retrieval.retrievers import MultiVectorRetriever, GraphRetriever
from rag_system.indexing.multimodal import LocalVisionModel
from rag_system.indexing.representations import select_embedder
from rag_system.indexing.embedders import LanceDBManager
from rag_system.rerankers.reranker import QwenReranker
from rag_system.rerankers.sentence_pruner import SentencePruner
# from rag_system.indexing.chunk_store import ChunkStore
import os
from PIL import Image
# ---------------------------------------------------------------------------
# Thread-safety helpers
# ---------------------------------------------------------------------------
# 1. ColBERT (via `rerankers` lib) is not thread-safe. We protect the actual
# `.rank()` call with `_rerank_lock`.
_rerank_lock: Lock = Lock()
# 2. Loading a large cross-encoder or ColBERT model can easily take >1 GB of
# RAM. When multiple sub-queries are processed in parallel they may try to
# instantiate the reranker simultaneously, which results in PyTorch meta
# tensor errors. We therefore guard the *initialisation* with its own
# lock so only one thread carries out the heavy `from_pretrained()` call.
_ai_reranker_init_lock: Lock = Lock()
# Lock to serialise first-time Provence model load
_sentence_pruner_lock: Lock = Lock()
class RetrievalPipeline:
"""
Orchestrates the state-of-the-art multimodal RAG pipeline.
"""
def __init__(self, config: Dict[str, Any], ollama_client: OllamaClient, ollama_config: Dict[str, Any]):
self.config = config
self.ollama_config = ollama_config
self.ollama_client = ollama_client
# Support both legacy "retrievers" key and newer "retrieval" key
self.retriever_configs = self.config.get("retrievers") or self.config.get("retrieval", {})
self.storage_config = self.config["storage"]
# Defer initialization to just-in-time methods
self.db_manager = None
self.text_embedder = None
self.dense_retriever = None
self.bm25_retriever = None
# Use a private attribute to avoid clashing with the public property
self._graph_retriever = None
self.reranker = None
self.ai_reranker = None
def _get_db_manager(self):
if self.db_manager is None:
# Accept either "db_path" (preferred) or legacy "lancedb_uri"
db_path = self.storage_config.get("db_path") or self.storage_config.get("lancedb_uri")
if not db_path:
raise ValueError("Storage config must contain 'db_path' or 'lancedb_uri'.")
self.db_manager = LanceDBManager(db_path=db_path)
return self.db_manager
def _get_text_embedder(self):
if self.text_embedder is None:
from rag_system.indexing.representations import select_embedder
self.text_embedder = select_embedder(
self.config.get("embedding_model_name", "BAAI/bge-small-en-v1.5"),
self.ollama_config.get("host") if isinstance(self.ollama_config, dict) else None,
)
return self.text_embedder
def _get_dense_retriever(self):
"""Ensure a dense MultiVectorRetriever is always available unless explicitly disabled."""
if self.dense_retriever is None:
# If the config explicitly sets dense.enabled to False, respect it
if self.retriever_configs.get("dense", {}).get("enabled", True) is False:
return None
try:
db_manager = self._get_db_manager()
text_embedder = self._get_text_embedder()
fusion_cfg = self.config.get("fusion", {})
self.dense_retriever = MultiVectorRetriever(
db_manager,
text_embedder,
vision_model=None,
fusion_config=fusion_cfg,
)
except Exception as e:
print(f"❌ Failed to initialise dense retriever: {e}")
self.dense_retriever = None
return self.dense_retriever
def _get_bm25_retriever(self):
if self.bm25_retriever is None and self.retriever_configs.get("bm25", {}).get("enabled"):
try:
print(f"🔧 Lazily initializing BM25 retriever...")
self.bm25_retriever = BM25Retriever(
index_path=self.storage_config["bm25_path"],
index_name=self.retriever_configs["bm25"]["index_name"]
)
print("✅ BM25 retriever initialized successfully")
except Exception as e:
print(f"❌ Failed to initialize BM25 retriever on demand: {e}")
# Keep it None so we don't try again
return self.bm25_retriever
def _get_graph_retriever(self):
if self._graph_retriever is None and self.retriever_configs.get("graph", {}).get("enabled"):
self._graph_retriever = GraphRetriever(graph_path=self.storage_config["graph_path"])
return self._graph_retriever
def _get_reranker(self):
"""Initializes the reranker for hybrid search score fusion."""
reranker_config = self.config.get("reranker", {})
# This is for the LanceDB internal reranker, not the AI one.
if self.reranker is None and reranker_config.get("type") == "linear_combination":
rerank_weight = reranker_config.get("weight", 0.5)
self.reranker = lancedb.rerankers.LinearCombinationReranker(weight=rerank_weight)
print(f"✅ Initialized LinearCombinationReranker with weight {rerank_weight}")
return self.reranker
def _get_ai_reranker(self):
"""Initializes a dedicated AI-based reranker."""
reranker_config = self.config.get("reranker", {})
if self.ai_reranker is None and reranker_config.get("enabled"):
# Serialise first-time initialisation so only one thread attempts
# to load the (very large) model. Other threads will wait and use
# the instance once ready, preventing the meta-tensor crash.
with _ai_reranker_init_lock:
# Another thread may have completed init while we waited
if self.ai_reranker is None:
try:
model_name = reranker_config.get("model_name")
strategy = reranker_config.get("strategy", "qwen")
if strategy == "rerankers-lib":
print(f"🔧 Initialising Answer.AI ColBERT reranker ({model_name}) via rerankers lib…")
from rerankers import Reranker
self.ai_reranker = Reranker(model_name, model_type="colbert")
else:
print(f"🔧 Lazily initializing Qwen reranker ({model_name})…")
self.ai_reranker = QwenReranker(model_name=model_name)
print("✅ AI reranker initialized successfully.")
except Exception as e:
# Leave as None so the pipeline can proceed without reranking
print(f"❌ Failed to initialize AI reranker: {e}")
return self.ai_reranker
def _get_sentence_pruner(self):
if getattr(self, "_sentence_pruner", None) is None:
with _sentence_pruner_lock:
if getattr(self, "_sentence_pruner", None) is None:
self._sentence_pruner = SentencePruner()
return self._sentence_pruner
def _get_surrounding_chunks_lancedb(self, chunk: Dict[str, Any], window_size: int) -> List[Dict[str, Any]]:
"""
Retrieves a window of chunks around a central chunk using LanceDB.
"""
db_manager = self._get_db_manager()
if not db_manager:
return [chunk]
# Extract identifiers needed for the query
document_id = chunk.get("document_id")
chunk_index = chunk.get("chunk_index")
# If essential identifiers are missing, return the chunk itself
if document_id is None or chunk_index is None or chunk_index == -1:
return [chunk]
table_name = self.config["storage"]["text_table_name"]
try:
tbl = db_manager.get_table(table_name)
except Exception:
# If the table can't be opened, we can't get surrounding chunks
return [chunk]
# Define the window for the search
start_index = max(0, chunk_index - window_size)
end_index = chunk_index + window_size
# Construct the SQL filter for an efficient metadata-based search
sql_filter = f"document_id = '{document_id}' AND chunk_index >= {start_index} AND chunk_index <= {end_index}"
try:
# Execute a filter-only search, which is very fast on indexed metadata
results = tbl.search().where(sql_filter).to_list()
# The results must be sorted by chunk_index to maintain logical order
results.sort(key=lambda c: c['chunk_index'])
# The 'metadata' field is a JSON string and needs to be parsed
for res in results:
if isinstance(res.get('metadata'), str):
try:
res['metadata'] = json.loads(res['metadata'])
except json.JSONDecodeError:
res['metadata'] = {} # Handle corrupted metadata gracefully
return results
except Exception:
# If the query fails for any reason, fall back to the single chunk
return [chunk]
def _synthesize_final_answer(self, query: str, facts: str, *, event_callback=None) -> str:
"""Uses a text LLM to synthesize a final answer from extracted facts."""
prompt = f"""
You are an AI assistant specialised in answering questions from retrieved context.
Context you receive
• VERIFIED FACTS – text snippets retrieved from the user's documents. Some may be irrelevant noise.
• ORIGINAL QUESTION – the user's actual query.
Instructions
1. Evaluate each snippet for relevance to the ORIGINAL QUESTION; ignore those that do not help answer it.
2. Synthesise an answer **using only information from the relevant snippets**.
3. If snippets contradict one another, mention the contradiction explicitly.
4. If the snippets do not contain the needed information, reply exactly with:
"I could not find that information in the provided documents."
5. Provide a thorough, well-structured answer. Use paragraphs or bullet points where helpful, and include any relevant numbers/names exactly as they appear. There is **no strict sentence limit**, but aim for clarity over brevity.
6. Do **not** introduce external knowledge unless step 4 applies; in that case you may add a clearly-labelled "General knowledge" sentence after the required statement.
Output format
Answer:
<your answer here>
––––– Retrieved Snippets –––––
{facts}
––––––––––––––––––––––––––––––
ORIGINAL QUESTION: "{query}"
"""
# Stream the answer token-by-token so the caller can forward them as SSE
answer_parts: list[str] = []
for tok in self.ollama_client.stream_completion(
model=self.ollama_config["generation_model"],
prompt=prompt,
):
answer_parts.append(tok)
if event_callback:
event_callback("token", {"text": tok})
return "".join(answer_parts)
def run(self, query: str, table_name: str = None, window_size_override: Optional[int] = None, event_callback=None) -> Dict[str, Any]:
start_time = time.time()
retrieval_k = self.config.get("retrieval_k", 10)
logger = logging.getLogger(__name__)
logger.debug("--- Running Hybrid Search for query '%s' (table=%s) ---", query, table_name or self.storage_config.get("text_table_name"))
# If a custom table_name is provided, propagate it to storage config so helper methods use it
if table_name:
self.storage_config["text_table_name"] = table_name
if event_callback:
event_callback("retrieval_started", {})
# Unified retrieval using the refactored MultiVectorRetriever
dense_retriever = self._get_dense_retriever()
# Get the LanceDB reranker for initial score fusion
lancedb_reranker = self._get_reranker()
retrieved_docs = []
if dense_retriever:
retrieved_docs = dense_retriever.retrieve(
text_query=query,
table_name=table_name or self.storage_config["text_table_name"],
k=retrieval_k,
reranker=lancedb_reranker # Pass the reranker to enable hybrid search
)
# ---------------------------------------------------------------
# Late-Chunk retrieval (optional)
# ---------------------------------------------------------------
if self.retriever_configs.get("latechunk", {}).get("enabled"):
lc_table = self.retriever_configs["latechunk"].get("lancedb_table_name")
if lc_table:
try:
lc_docs = dense_retriever.retrieve(
text_query=query,
table_name=lc_table,
k=retrieval_k,
reranker=lancedb_reranker,
)
retrieved_docs.extend(lc_docs)
except Exception as e:
print(f"⚠️ Late-chunk retrieval failed: {e}")
if event_callback:
event_callback("retrieval_done", {"count": len(retrieved_docs)})
retrieval_time = time.time() - start_time
logger.debug("Retrieved %s chunks in %.2fs", len(retrieved_docs), retrieval_time)
# -----------------------------------------------------------
# LATE-CHUNK MERGING (merge ±1 sub-vector into central hit)
# -----------------------------------------------------------
if self.retriever_configs.get("latechunk", {}).get("enabled") and retrieved_docs:
merged_count = 0
for doc in retrieved_docs:
try:
cid = doc.get("chunk_id")
meta = doc.get("metadata", {})
if meta.get("latechunk_merged"):
continue # already processed
doc_id = doc.get("document_id")
cidx = doc.get("chunk_index")
if doc_id is None or cidx is None or cidx == -1:
continue
# Fetch neighbouring late-chunks inside same document (±1)
siblings = self._get_surrounding_chunks_lancedb(doc, window_size=1)
# Keep only same document_id and ordered by chunk_index
siblings = [s for s in siblings if s.get("document_id") == doc_id]
siblings.sort(key=lambda s: s.get("chunk_index", 0))
merged_text = " \n".join(s.get("text", "") for s in siblings)
if merged_text:
doc["text"] = merged_text
meta["latechunk_merged"] = True
merged_count += 1
except Exception as e:
print(f"⚠️ Late-chunk merge failed for chunk {doc.get('chunk_id')}: {e}")
if merged_count:
print(f"🪄 Late-chunk merging applied to {merged_count} retrieved chunks.")
# --- AI Reranking Step ---
ai_reranker = self._get_ai_reranker()
if ai_reranker and retrieved_docs:
if event_callback:
event_callback("rerank_started", {"count": len(retrieved_docs)})
print(f"\n--- Reranking top {len(retrieved_docs)} docs with AI model... ---")
start_rerank_time = time.time()
rerank_cfg = self.config.get("reranker", {})
top_k_cfg = rerank_cfg.get("top_k")
top_percent = rerank_cfg.get("top_percent") # value in range 0–1
if top_percent is not None:
try:
pct = float(top_percent)
assert 0 < pct <= 1
top_k = max(1, int(len(retrieved_docs) * pct))
except Exception:
print("⚠️ Invalid top_percent value; falling back to top_k")
top_k = top_k_cfg or len(retrieved_docs)
else:
top_k = top_k_cfg or len(retrieved_docs)
strategy = self.config.get("reranker", {}).get("strategy", "qwen")
if strategy == "rerankers-lib":
texts = [d['text'] for d in retrieved_docs]
# ColBERT's Rust backend isn't Sync; serialise calls.
with _rerank_lock:
ranked = ai_reranker.rank(query=query, docs=texts)
# ranked is RankedResults; convert to list of (score, idx)
try:
pairs = [(r.score, r.document.doc_id) for r in ranked.results]
if any(p[1] is None for p in pairs):
pairs = [(r.score, i) for i, r in enumerate(ranked.results)]
except Exception:
pairs = ranked
# Keep only top_k results if requested
if top_k is not None and len(pairs) > top_k:
pairs = pairs[:top_k]
reranked_docs = [retrieved_docs[idx] | {"rerank_score": score} for score, idx in pairs]
else:
try:
reranked_docs = ai_reranker.rerank(query, retrieved_docs, top_k=top_k)
except TypeError:
texts = [d['text'] for d in retrieved_docs]
pairs = ai_reranker.rank(query, texts, top_k=top_k)
reranked_docs = [retrieved_docs[idx] | {"rerank_score": score} for score, idx in pairs]
rerank_time = time.time() - start_rerank_time
print(f"✅ Reranking completed in {rerank_time:.2f}s. Refined to {len(reranked_docs)} docs.")
if event_callback:
event_callback("rerank_done", {"count": len(reranked_docs)})
else:
# If no AI reranker, proceed with the initially retrieved docs
reranked_docs = retrieved_docs
window_size = self.config.get("context_window_size", 1)
if window_size_override is not None:
window_size = window_size_override
if window_size > 0 and reranked_docs:
if event_callback:
event_callback("context_expand_started", {"count": len(reranked_docs)})
print(f"\n--- Expanding context for {len(reranked_docs)} top documents (window size: {window_size})... ---")
expanded_chunks = {}
with concurrent.futures.ThreadPoolExecutor() as executor:
future_to_chunk = {executor.submit(self._get_surrounding_chunks_lancedb, chunk, window_size): chunk for chunk in reranked_docs}
for future in concurrent.futures.as_completed(future_to_chunk):
try:
seed_chunk = future_to_chunk[future]
surrounding_chunks = future.result()
for surrounding_chunk in surrounding_chunks:
cid = surrounding_chunk['chunk_id']
if cid not in expanded_chunks:
# If this is the *central* chunk we already reranked, carry over its score
if cid == seed_chunk.get('chunk_id') and 'rerank_score' in seed_chunk:
surrounding_chunk['rerank_score'] = seed_chunk['rerank_score']
expanded_chunks[cid] = surrounding_chunk
except Exception as e:
print(f"Error expanding context for a chunk: {e}")
final_docs = list(expanded_chunks.values())
# Sort by reranker score if present, otherwise by raw score/distance
if any('rerank_score' in d for d in final_docs):
final_docs.sort(key=lambda c: c.get('rerank_score', -1), reverse=True)
elif any('_distance' in d for d in final_docs):
# For vector search smaller distance is better
final_docs.sort(key=lambda c: c.get('_distance', 1e9))
elif any('score' in d for d in final_docs):
final_docs.sort(key=lambda c: c.get('score', 0), reverse=True)
else:
# Fallback to document order
final_docs.sort(key=lambda c: (c.get('document_id', ''), c.get('chunk_index', 0)))
print(f"Expanded to {len(final_docs)} unique chunks for synthesis.")
if event_callback:
event_callback("context_expand_done", {"count": len(final_docs)})
else:
final_docs = reranked_docs
# Optionally hide non-reranked chunks: if any chunk carries a
# `rerank_score`, we assume the caller wants to focus on those.
if any('rerank_score' in d for d in final_docs):
final_docs = [d for d in final_docs if 'rerank_score' in d]
# ------------------------------------------------------------------
# Sentence-level pruning (Provence)
# ------------------------------------------------------------------
prov_cfg = self.config.get("provence", {})
if prov_cfg.get("enabled"):
if event_callback:
event_callback("prune_started", {"count": len(final_docs)})
thresh = float(prov_cfg.get("threshold", 0.1))
print(f"\n--- Provence pruning enabled (threshold={thresh}) ---")
pruner = self._get_sentence_pruner()
final_docs = pruner.prune_documents(query, final_docs, threshold=thresh)
# Remove any chunks that were fully pruned (empty text)
final_docs = [d for d in final_docs if d.get('text', '').strip()]
if event_callback:
event_callback("prune_done", {"count": len(final_docs)})
print("\n--- Final Documents for Synthesis ---")
if not final_docs:
print("No documents to synthesize.")
else:
for i, doc in enumerate(final_docs):
print(f" [{i+1}] Chunk ID: {doc.get('chunk_id')}")
print(f" Score: {doc.get('score', 'N/A')}")
if 'rerank_score' in doc:
print(f" Rerank Score: {doc.get('rerank_score'):.4f}")
print(f" Text: \"{doc.get('text', '').strip()}\"")
print("------------------------------------")
if not final_docs:
return {"answer": "I could not find an answer in the documents.", "source_documents": []}
# --- Sanitize docs for JSON serialization (no NaN/Inf types) ---
def _clean_val(v):
if isinstance(v, float) and (math.isnan(v) or math.isinf(v)):
return None
if isinstance(v, (np.floating,)):
try:
f = float(v)
if math.isnan(f) or math.isinf(f):
return None
return f
except Exception:
return None
return v
for doc in final_docs:
# Remove heavy or internal-only fields before serialising
doc.pop("vector", None)
doc.pop("_distance", None)
# Clean numeric fields
for key in ['score', '_distance', 'rerank_score']:
if key in doc:
doc[key] = _clean_val(doc[key])
context = "\n\n".join([doc['text'] for doc in final_docs])
# 👀 DEBUG: Show the exact context passed to the LLM after pruning
print("\n=== Context passed to LLM (post-pruning) ===")
if len(context) > 2000:
print(context[:2000] + "…\n[truncated] (total {} chars)".format(len(context)))
else:
print(context)
print("=== End of context ===\n")
final_answer = self._synthesize_final_answer(query, context, event_callback=event_callback)
return {"answer": final_answer, "source_documents": final_docs}
# ------------------------------------------------------------------
# Public utility
# ------------------------------------------------------------------
def list_document_titles(self, max_items: int = 25) -> List[str]:
"""Return up to *max_items* distinct document titles (or IDs).
This is used only for prompt-routing, so we favour robustness over
perfect recall. If anything goes wrong we return an empty list so
the caller can degrade gracefully.
"""
try:
tbl_name = self.storage_config.get("text_table_name")
if not tbl_name:
return []
tbl = self._get_db_manager().get_table(tbl_name)
field_name = "document_title" if "document_title" in tbl.schema.names else "document_id"
# Use a cheap SQL filter to grab distinct values; fall back to a
# simple scan if the driver lacks DISTINCT support.
try:
sql = f"SELECT DISTINCT {field_name} FROM tbl LIMIT {max_items}"
rows = tbl.search().where("true").sql(sql).to_list() # type: ignore
titles = [r[field_name] for r in rows if r.get(field_name)]
except Exception:
# Fallback: scan first N rows
rows = tbl.search().select(field_name).limit(max_items * 4).to_list()
seen = set()
titles = []
for r in rows:
val = r.get(field_name)
if val and val not in seen:
titles.append(val)
seen.add(val)
if len(titles) >= max_items:
break
# Ensure we don't exceed max_items
return titles[:max_items]
except Exception:
# Any issues (missing table, bad schema, etc.) –> just return []
return []
# -------------------- Public helper properties --------------------
@property
def retriever(self):
"""Lazily exposes the main (dense) retriever so external components
like the ReAct agent tools can call `.retrieve()` directly without
reaching into private helpers. If the retriever has not yet been
instantiated, it is created on first access via `_get_dense_retriever`."""
return self._get_dense_retriever()
def update_embedding_model(self, model_name: str):
"""Switch embedding model at runtime and clear cached objects so they re-initialize."""
if self.config.get("embedding_model_name") == model_name:
return # nothing to do
print(f"🔧 RetrievalPipeline switching embedding model to '{model_name}' (was '{self.config.get('embedding_model_name')}')")
self.config["embedding_model_name"] = model_name
# Reset caches so new instances are built on demand
self.text_embedder = None
self.dense_retriever = None | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/pipelines/retrieval_pipeline.py",
"license": "MIT License",
"lines": 501,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/rerankers/reranker.py | from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
from typing import List, Dict, Any
class QwenReranker:
"""
A reranker that uses a local Hugging Face transformer model.
"""
def __init__(self, model_name: str = "BAAI/bge-reranker-base"):
# Auto-select the best available device: CUDA > MPS > CPU
if torch.cuda.is_available():
self.device = "cuda"
elif getattr(torch.backends, "mps", None) and torch.backends.mps.is_available():
self.device = "mps"
else:
self.device = "cpu"
print(f"Initializing BGE Reranker with model '{model_name}' on device '{self.device}'.")
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = AutoModelForSequenceClassification.from_pretrained(
model_name,
torch_dtype=torch.float16 if self.device != "cpu" else None,
).to(self.device).eval()
print("BGE Reranker loaded successfully.")
def _format_instruction(self, query: str, doc: str):
instruction = 'Given a web search query, retrieve relevant passages that answer the query'
return f"<Instruct>: {instruction}\n<Query>: {query}\n<Document>: {doc}"
def rerank(self, query: str, documents: List[Dict[str, Any]], top_k: int = 5, *, early_exit: bool = True, margin: float = 0.4, min_scored: int = 8, batch_size: int = 8) -> List[Dict[str, Any]]:
"""
Reranks a list of documents based on their relevance to a query.
If *early_exit* is True the cross-encoder scores documents in mini-batches and
stops once the best-so-far score beats the worst-so-far by *margin* after at
least *min_scored* docs have been processed. This accelerates "easy" queries
where strong positives dominate.
"""
if not documents:
return []
# Sort by the upstream (hybrid) score so that the strongest candidates are evaluated first.
docs_sorted = sorted(documents, key=lambda d: d.get('score', 0.0), reverse=True)
scored_pairs: List[tuple[float, Dict[str, Any]]] = []
with torch.no_grad():
for start in range(0, len(docs_sorted), batch_size):
batch_docs = docs_sorted[start : start + batch_size]
batch_pairs = [[query, d['text']] for d in batch_docs]
inputs = self.tokenizer(
batch_pairs,
padding=True,
truncation=True,
return_tensors="pt",
max_length=512,
).to(self.device)
logits = self.model(**inputs).logits.view(-1)
batch_scores = logits.float().cpu().tolist()
scored_pairs.extend(zip(batch_scores, batch_docs))
# --- Early-exit check ---
if early_exit and len(scored_pairs) >= min_scored:
# Current best and worst among *already* scored docs
best_score = max(scored_pairs, key=lambda x: x[0])[0]
worst_score = min(scored_pairs, key=lambda x: x[0])[0]
if best_score - worst_score >= margin:
break
# Sort final set and attach scores
sorted_by_score = sorted(scored_pairs, key=lambda x: x[0], reverse=True)
reranked_docs: List[Dict[str, Any]] = []
for score, doc in sorted_by_score[:top_k]:
doc_with_score = doc.copy()
doc_with_score['rerank_score'] = score
reranked_docs.append(doc_with_score)
return reranked_docs
if __name__ == '__main__':
# This test requires an internet connection to download the models.
try:
reranker = QwenReranker(model_name="BAAI/bge-reranker-base")
query = "What is the capital of France?"
documents = [
{'text': "Paris is the capital of France.", 'metadata': {'doc_id': 'a'}},
{'text': "The Eiffel Tower is in Paris.", 'metadata': {'doc_id': 'b'}},
{'text': "France is a country in Europe.", 'metadata': {'doc_id': 'c'}},
]
reranked_documents = reranker.rerank(query, documents)
print("\n--- Verification ---")
print(f"Query: {query}")
print("Reranked documents:")
for doc in reranked_documents:
print(f" - Score: {doc['rerank_score']:.4f}, Text: {doc['text']}")
except Exception as e:
print(f"\nAn error occurred during the QwenReranker test: {e}")
print("Please ensure you have an internet connection for model downloads.")
| {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/rerankers/reranker.py",
"license": "MIT License",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/rerankers/sentence_pruner.py | from __future__ import annotations
"""Sentence-level context pruning using the Provence model (ICLR 2025).
This lightweight helper wraps the HuggingFace model hosted at
`naver/provence-reranker-debertav3-v1` and exposes a thread-safe
`prune_documents()` method that converts a list of RAG chunks into their
pruned variants.
The module fails gracefully – if the model weights cannot be downloaded
(or the `transformers` / `nltk` deps are missing) we simply return the
original documents unchanged so the upstream pipeline continues
unaffected.
"""
from threading import Lock
from typing import List, Dict, Any
class SentencePruner:
"""Lightweight singleton wrapper around the Provence model."""
_model = None # shared across all instances
_init_lock: Lock = Lock()
def __init__(self, model_name: str = "naver/provence-reranker-debertav3-v1") -> None:
self.model_name = model_name
self._ensure_model()
# ---------------------------------------------------------------------
# Internal helpers
# ---------------------------------------------------------------------
def _ensure_model(self) -> None:
"""Lazily download and load the Provence model exactly once."""
if SentencePruner._model is not None:
return
with SentencePruner._init_lock:
if SentencePruner._model is not None:
return # another thread beat us
try:
from transformers import AutoModel # local import to keep base deps light
print("🔧 Loading Provence sentence-pruning model …")
SentencePruner._model = AutoModel.from_pretrained(
self.model_name,
trust_remote_code=True,
)
print("✅ Provence model loaded successfully.")
except Exception as e:
# Any failure leaves the singleton as None so callers can skip pruning.
print(f"❌ Failed to load Provence model: {e}. Context pruning will be skipped.")
SentencePruner._model = None
# ------------------------------------------------------------------
# Public API
# ------------------------------------------------------------------
def prune_documents(
self,
question: str,
docs: List[Dict[str, Any]],
*,
threshold: float = 0.1,
) -> List[Dict[str, Any]]:
"""Return *docs* with their `text` field pruned sentence-wise.
If the model could not be initialised we simply echo the input.
"""
if SentencePruner._model is None:
return docs # model unavailable – no-op
# Batch texts for efficiency when >1 doc
texts = [d.get("text", "") for d in docs]
try:
if len(texts) == 1:
# returns dict
outputs = [SentencePruner._model.process(question, texts[0], threshold=threshold)]
else:
# Batch call expects list[list[str]] with same outer length as questions list (1)
batched_out = SentencePruner._model.process(question, [texts], threshold=threshold)
# HF returns List[Dict] per question
outputs = batched_out[0] if isinstance(batched_out, list) else batched_out
if isinstance(outputs, dict):
outputs = [outputs]
if len(outputs) != len(texts):
print("⚠️ Provence batch size mismatch; falling back to per-doc loop")
raise ValueError
pruned: List[Dict[str, Any]] = []
for doc, out in zip(docs, outputs):
raw = out.get("pruned_context", doc.get("text", "")) if isinstance(out, dict) else doc.get("text", "")
new_text = raw if isinstance(raw, str) else " ".join(raw) # HF model may return a list of sentences
pruned.append({**doc, "text": new_text})
except Exception as e:
print(f"⚠️ Provence batch pruning failed ({e}); falling back to individual calls")
pruned = []
for doc in docs:
text = doc.get("text", "")
if not text:
pruned.append(doc)
continue
try:
res = SentencePruner._model.process(question, text, threshold=threshold)
raw = res.get("pruned_context", text) if isinstance(res, dict) else text
new_text = raw if isinstance(raw, str) else " ".join(raw)
pruned.append({**doc, "text": new_text})
except Exception as err:
print(f"⚠️ Provence pruning failed for chunk {doc.get('chunk_id')}: {err}")
pruned.append(doc)
return pruned | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/rerankers/sentence_pruner.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/retrieval/query_transformer.py | from typing import List, Any, Dict
import json
from rag_system.utils.ollama_client import OllamaClient
class QueryDecomposer:
def __init__(self, llm_client: OllamaClient, llm_model: str):
self.llm_client = llm_client
self.llm_model = llm_model
def decompose(self, query: str, chat_history: List[Dict[str, Any]] | None = None) -> List[str]:
"""Decompose *query* into standalone sub-queries.
Parameters
----------
query : str
The latest user message.
chat_history : list[dict] | None
Recent conversation turns (each item should contain at least the original
user query under the key ``"query"``). Only the **last 5** turns are
included to keep the prompt short.
"""
# ---- Limit history to last 5 user turns and extract the queries ----
history_snippets: List[str] = []
if chat_history:
# Keep only the last 5 turns
recent_turns = chat_history[-5:]
# Extract user queries (fallback: full dict as string if key missing)
for turn in recent_turns:
history_snippets.append(str(turn.get("query", turn)))
# Serialize chat_history for the prompt (single string)
chat_history_text = " | ".join(history_snippets)
# ---- Build the new SYSTEM prompt with added legacy examples ----
system_prompt = """
You are an expert at query decomposition for a Retrieval-Augmented Generation (RAG) system.
Return one RFC-8259-compliant JSON object and nothing else.
Schema:
{
“requires_decomposition”: <bool>,
“reasoning”: <string>, // ≤ 50 words
“resolved_query”: <string>, // query after context resolution
“sub_queries”: <string[]> // 1–10 standalone items
}
Think step-by-step internally, but reveal only the concise reasoning.
⸻
Context Resolution (perform FIRST)
You will receive:
• query – the current user message
• chat_history – the most recent user turns (may be empty)
If query contains pronouns, ellipsis, or shorthand that can be unambiguously linked to something in chat_history, rewrite it to a fully self-contained question and place the result in resolved_query.
Otherwise, copy query into resolved_query unchanged.
⸻
When is decomposition REQUIRED?
• MULTI-PART questions joined by “and”, “or”, “also”, list commas, etc.
• COMPARATIVE / SUPERLATIVE questions (two or more entities, e.g. “bigger, better, fastest”).
• TEMPORAL / SEQUENTIAL questions (changes over time, event timelines).
• ENUMERATIONS (pros, cons, impacts).
• ENTITY-SET COMPARISONS (A, B, C revenue…).
When is decomposition NOT REQUIRED?
• A single, factual information need.
• Ambiguous queries needing clarification rather than splitting.
⸻
Output rules
1. Use resolved_query—not the raw query—to decide on decomposition.
2. If requires_decomposition is false, sub_queries must contain exactly resolved_query.
3. Otherwise, produce 2–10 self-contained questions; avoid pronouns and shared context.
⸻
"""
# ---- Append NEW examples provided by the user ----
new_examples = """
Normalise pronouns and references: turn “this paper” into the explicit title if it can be inferred, otherwise leave as-is.
chat_history: “What is the email address of the computer vision consultants?”
query: “What is their revenue?”
{
"requires_decomposition": false,
"reasoning": "Pronoun resolved; single information need.",
"resolved_query": "What is the revenue of the computer vision consultants?",
"sub_queries": [
"What is the revenue of the computer vision consultants?"
]
}
Context resolution (single info need)
chat_history: “What is the email address of the computer vision consultants?”
query: “What is the address?”
{
"requires_decomposition": false,
"reasoning": "Pronoun resolved; single information need.",
"resolved_query": "What is the physical address of the computer vision consultants?",
"sub_queries": [
"What is the physical address of the computer vision consultants?"
]
}
Context resolution (single info need)
chat_history: “ComputeX has a revenue of 100M?”
query: “Who is the CEO?”
{
"requires_decomposition": false,
"reasoning": "entities normalization.",
"resolved_query": "who is the CEO of ComputeX",
"sub_queries": [
"who is the CEO of ComputeX"
]
}
No unique antecedent → leave unresolved
chat_history: “Tell me about the paper.”
query: “What is the address?”
{
"requires_decomposition": false,
"reasoning": "Ambiguous reference; cannot resolve safely.",
"resolved_query": "What is the address?",
"sub_queries": ["What is the address?"]
}
Temporal + Comparative
chat_history: ""
query: “How did Nvidia’s 2024 revenue compare with 2023?”
{
"requires_decomposition": true,
"reasoning": "Needs revenue for two separate years before comparison.",
"resolved_query": "How did Nvidia’s 2024 revenue compare with 2023?",
"sub_queries": [
"What was Nvidia’s revenue in 2024?",
"What was Nvidia’s revenue in 2023?"
]
}
Enumeration (pros / cons / cost)
chat_history: ""
query: “List the pros, cons, and estimated implementation cost of adopting a vector database.”
{
"requires_decomposition": true,
"reasoning": "Three distinct information needs: pros, cons, cost.",
"resolved_query": "List the pros, cons, and estimated implementation cost of adopting a vector database.",
"sub_queries": [
"What are the pros of adopting a vector database?",
"What are the cons of adopting a vector database?",
"What is the estimated implementation cost of adopting a vector database?"
]
}
Entity-set comparison (multiple companies)
chat_history: ""
query: “How did Nvidia, AMD, and Intel perform in Q2 2025 in terms of revenue?”
{
"requires_decomposition": true,
"reasoning": "Need revenue for each of three entities before comparison.",
"resolved_query": "How did Nvidia, AMD, and Intel perform in Q2 2025 in terms of revenue?",
"sub_queries": [
"What was Nvidia's revenue in Q2 2025?",
"What was AMD's revenue in Q2 2025?",
"What was Intel's revenue in Q2 2025?"
]
}
Multi-part question (limitations + mitigations)
chat_history: ""
query: “What are the limitations of GPT-4o and what are the recommended mitigations?”
{
"requires_decomposition": true,
"reasoning": "Two distinct pieces of information: limitations and mitigations.",
"resolved_query": "What are the limitations of GPT-4o and what are the recommended mitigations?",
"sub_queries": [
"What are the known limitations of GPT-4o?",
"What are the recommended mitigations for the limitations of GPT-4o?"
]
}
"""
# ---- Append legacy examples that already existed in the old prompt ----
legacy_examples_header = """
⸻
Additional legacy examples
"""
legacy_examples_body = """
**Example 1: Multi-Part Query**
Query: "What were the main findings of the aiconfig report and how do they compare to the results from the RAG paper?"
JSON Output:
{
"reasoning": "The query asks for two distinct pieces of information: the findings from one report and a comparison to another. This requires two separate retrieval steps.",
"sub_queries": [
"What were the main findings of the aiconfig report?",
"How do the findings of the aiconfig report compare to the results from the RAG paper?"
]
}
**Example 2: Simple Query**
Query: "Summarize the contributions of the DeepSeek-V3 paper."
JSON Output:
{
"reasoning": "This is a direct request for a summary of a single document and does not contain multiple parts.",
"sub_queries": [
"Summarize the contributions of the DeepSeek-V3 paper."
]
}
**Example 3: Comparative Query**
Query: "Did Microsoft or Google make more money last year?"
JSON Output:
{
"reasoning": "This is a comparative query that requires fetching the profit for each company before a comparison can be made.",
"sub_queries": [
"How much profit did Microsoft make last year?",
"How much profit did Google make last year?"
]
}
**Example 4: Comparative Query with different phrasing**
Query: "Who has more siblings, Jamie or Sansa?"
JSON Output:
{
"reasoning": "This comparative query needs the sibling count for both individuals to be answered.",
"sub_queries": [
"How many siblings does Jamie have?",
"How many siblings does Sansa have?"
]
}
"""
full_prompt = (
system_prompt
+ new_examples
# + legacy_examples_header
# + legacy_examples_body
+ """
⸻
Now process
Input payload:
""" + json.dumps({"query": query, "chat_history": chat_history_text}, indent=2) + """
"""
)
# ---- Call the LLM ----
response = self.llm_client.generate_completion(self.llm_model, full_prompt, format="json")
response_text = response.get('response', '{}')
try:
# Handle potential markdown code blocks in the response
if response_text.strip().startswith("```json"):
response_text = response_text.strip()[7:-3].strip()
data = json.loads(response_text)
sub_queries = data.get('sub_queries') or [query]
reasoning = data.get('reasoning', 'No reasoning provided.')
print(f"Query Decomposition Reasoning: {reasoning}")
# Fallback: ensure at least the resolved_query if sub_queries empty
if not sub_queries:
sub_queries = [data.get('resolved_query', query)]
# Deduplicate while preserving order
sub_queries = list(dict.fromkeys(sub_queries))
# Enforce 10 sub-query limit per new requirements
return sub_queries[:10]
except json.JSONDecodeError:
print(f"Failed to decode JSON from query decomposer: {response_text}")
return [query]
class HyDEGenerator:
def __init__(self, llm_client: OllamaClient, llm_model: str):
self.llm_client = llm_client
self.llm_model = llm_model
def generate(self, query: str) -> str:
prompt = f"Generate a short, hypothetical document that answers the following question. The document should be dense with keywords and concepts related to the query.\n\nQuery: {query}\n\nHypothetical Document:"
response = self.llm_client.generate_completion(self.llm_model, prompt)
return response.get('response', '')
class GraphQueryTranslator:
def __init__(self, llm_client: OllamaClient, llm_model: str):
self.llm_client = llm_client
self.llm_model = llm_model
def _generate_translation_prompt(self, query: str) -> str:
return f"""
You are an expert query planner. Convert the user's question into a structured JSON query for a knowledge graph.
The JSON should contain a 'start_node' (the known entity in the query) and an 'edge_label' (the relationship being asked about).
The graph has nodes (entities) and directed edges (relationships). For example, (Tim Cook) -[IS_CEO_OF]-> (Apple).
Return ONLY the JSON object.
User Question: "{query}"
JSON Output:
"""
def translate(self, query: str) -> Dict[str, Any]:
prompt = self._generate_translation_prompt(query)
response = self.llm_client.generate_completion(self.llm_model, prompt, format="json")
try:
return json.loads(response.get('response', '{}'))
except json.JSONDecodeError:
return {} | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/retrieval/query_transformer.py",
"license": "MIT License",
"lines": 266,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
PromtEngineer/localGPT:rag_system/retrieval/retrievers.py | import lancedb
import pickle
import json
from typing import List, Dict, Any
import numpy as np
import networkx as nx
import os
from PIL import Image
from transformers import CLIPProcessor, CLIPModel
import torch
import logging
import pandas as pd
import math
import concurrent.futures
from functools import lru_cache
from rag_system.indexing.embedders import LanceDBManager
from rag_system.indexing.representations import QwenEmbedder
from rag_system.indexing.multimodal import LocalVisionModel
from rag_system.utils.logging_utils import log_retrieval_results
# BM25Retriever is no longer needed.
# class BM25Retriever: ...
from fuzzywuzzy import process
class GraphRetriever:
def __init__(self, graph_path: str):
self.graph = nx.read_gml(graph_path)
def retrieve(self, query: str, k: int = 5, score_cutoff: int = 80) -> List[Dict[str, Any]]:
print(f"\n--- Performing Graph Retrieval for query: '{query}' ---")
query_parts = query.split()
entities = []
for part in query_parts:
match = process.extractOne(part, self.graph.nodes(), score_cutoff=score_cutoff)
if match and isinstance(match[0], str):
entities.append(match[0])
retrieved_docs = []
for entity in set(entities):
for neighbor in self.graph.neighbors(entity):
retrieved_docs.append({
'chunk_id': f"graph_{entity}_{neighbor}",
'text': f"Entity: {entity}, Neighbor: {neighbor}",
'score': 1.0,
'metadata': {'source': 'graph'}
})
print(f"Retrieved {len(retrieved_docs)} documents from the graph.")
return retrieved_docs[:k]
# region === MultiVectorRetriever ===
class MultiVectorRetriever:
"""
Performs hybrid (vector + FTS) or vector-only retrieval.
"""
def __init__(self, db_manager: LanceDBManager, text_embedder: QwenEmbedder, vision_model: LocalVisionModel = None, *, fusion_config: Dict[str, Any] | None = None):
self.db_manager = db_manager
self.text_embedder = text_embedder
self.vision_model = vision_model
self.fusion_config = fusion_config or {"method": "linear", "bm25_weight": 0.5, "vec_weight": 0.5}
# Lightweight in-memory LRU cache for single-query embeddings (256 entries)
@lru_cache(maxsize=256)
def _embed_single(q: str):
return self.text_embedder.create_embeddings([q])[0]
self._embed_single = _embed_single
def retrieve(self, text_query: str, table_name: str, k: int, reranker=None) -> List[Dict[str, Any]]:
"""
Performs a search on a single LanceDB table.
If a reranker is provided, it performs a hybrid search.
Otherwise, it performs a standard vector search.
"""
print(f"\n--- Performing Retrieval for query: '{text_query}' on table '{table_name}' ---")
try:
if table_name is None:
table_name = "default_text_table"
tbl = self.db_manager.get_table(table_name)
# Create / fetch cached text embedding for the query
text_query_embedding = self._embed_single(text_query)
logger = logging.getLogger(__name__)
# Always perform hybrid lexical + vector search
logger.debug(
"Running hybrid search on table '%s' (k=%s, have_reranker=%s)",
table_name,
k,
bool(reranker),
)
if reranker:
logger.debug("Hybrid + reranker path not yet implemented with manual fusion; proceeding without extra reranker.")
# Manual two-leg hybrid: take half from each modality
fts_k = k // 2
vec_k = k - fts_k
# Run FTS and vector search in parallel to cut latency
def _run_fts():
# Very short queries often underperform → add fuzzy wildcard
fts_query = text_query
if len(text_query.split()) == 1:
fts_query = f"{text_query}* OR {text_query}~"
return (
tbl.search(query=fts_query, query_type="fts")
.limit(fts_k)
.to_df()
)
def _run_vec():
if vec_k == 0:
return None
return (
tbl.search(text_query_embedding)
.limit(vec_k * 2) # fetch extra to allow for dedup
.to_df()
)
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
fts_future = executor.submit(_run_fts)
vec_future = executor.submit(_run_vec)
fts_df = fts_future.result()
vec_df = vec_future.result()
if vec_df is not None:
combined = pd.concat([fts_df, vec_df])
else:
combined = fts_df
# Remove duplicates preserving first occurrence, then trim to k
dedup_subset = ["_rowid"] if "_rowid" in combined.columns else (["chunk_id"] if "chunk_id" in combined.columns else None)
if dedup_subset:
combined = combined.drop_duplicates(subset=dedup_subset, keep="first")
combined = combined.head(k)
results_df = combined
logger.debug(
"Hybrid (fts=%s, vec=%s) → %s unique chunks",
len(fts_df),
0 if vec_df is None else len(vec_df),
len(results_df),
)
retrieved_docs = []
for _, row in results_df.iterrows():
metadata = json.loads(row.get('metadata', '{}'))
# Add top-level fields back into metadata for consistency if they don't exist
metadata.setdefault('document_id', row.get('document_id'))
metadata.setdefault('chunk_index', row.get('chunk_index'))
# Determine score (vector distance or FTS). Replace NaN with 0.0
raw_score = row.get('_distance') if '_distance' in row else row.get('score')
try:
if raw_score is None or (isinstance(raw_score, float) and math.isnan(raw_score)):
raw_score = 0.0
except Exception:
raw_score = 0.0
combined_score = raw_score
# Optional linear-weight fusion if both FTS & vector scores exist
if '_distance' in row and 'score' in row:
try:
bm25 = row.get('score', 0.0)
vec_sim = 1.0 / (1.0 + row.get('_distance', 1.0)) # convert distance to similarity
w_bm25 = float(self.fusion_config.get('bm25_weight', 0.5))
w_vec = float(self.fusion_config.get('vec_weight', 0.5))
combined_score = w_bm25 * bm25 + w_vec * vec_sim
except Exception:
pass
retrieved_docs.append({
'chunk_id': row.get('chunk_id'),
'text': metadata.get('original_text', row.get('text')),
'score': combined_score,
'bm25': row.get('score'),
'_distance': row.get('_distance'),
'document_id': row.get('document_id'),
'chunk_index': row.get('chunk_index'),
'metadata': metadata
})
logger.debug("Hybrid search returned %s results", len(retrieved_docs))
log_retrieval_results(retrieved_docs, k)
print(f"Retrieved {len(retrieved_docs)} documents.")
return retrieved_docs
except Exception as e:
print(f"Could not search table '{table_name}': {e}")
return []
# endregion
if __name__ == '__main__':
print("retrievers.py updated for LanceDB FTS Hybrid Search.")
| {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/retrieval/retrievers.py",
"license": "MIT License",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/utils/batch_processor.py | import time
import logging
from typing import List, Dict, Any, Callable, Optional, Iterator
from contextlib import contextmanager
import gc
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
@contextmanager
def timer(operation_name: str):
"""Context manager to time operations"""
start = time.time()
try:
yield
finally:
duration = time.time() - start
logger.info(f"{operation_name} completed in {duration:.2f}s")
class ProgressTracker:
"""Tracks progress and performance metrics for batch operations"""
def __init__(self, total_items: int, operation_name: str = "Processing"):
self.total_items = total_items
self.operation_name = operation_name
self.processed_items = 0
self.errors_encountered = 0
self.start_time = time.time()
self.last_report_time = time.time()
self.report_interval = 10 # Report every 10 seconds
def update(self, items_processed: int, errors: int = 0):
"""Update progress with number of items processed"""
self.processed_items += items_processed
self.errors_encountered += errors
current_time = time.time()
if current_time - self.last_report_time >= self.report_interval:
self._report_progress()
self.last_report_time = current_time
def _report_progress(self):
"""Report current progress"""
elapsed = time.time() - self.start_time
if elapsed > 0:
rate = self.processed_items / elapsed
remaining = self.total_items - self.processed_items
eta = remaining / rate if rate > 0 else 0
progress_pct = (self.processed_items / self.total_items) * 100
logger.info(
f"{self.operation_name}: {self.processed_items}/{self.total_items} "
f"({progress_pct:.1f}%) - {rate:.2f} items/sec - "
f"ETA: {eta/60:.1f}min - Errors: {self.errors_encountered}"
)
def finish(self):
"""Report final statistics"""
elapsed = time.time() - self.start_time
rate = self.processed_items / elapsed if elapsed > 0 else 0
logger.info(
f"{self.operation_name} completed: {self.processed_items}/{self.total_items} items "
f"in {elapsed:.2f}s ({rate:.2f} items/sec) - {self.errors_encountered} errors"
)
class BatchProcessor:
"""Generic batch processor with progress tracking and error handling"""
def __init__(self, batch_size: int = 50, enable_gc: bool = True):
self.batch_size = batch_size
self.enable_gc = enable_gc
def process_in_batches(
self,
items: List[Any],
process_func: Callable,
operation_name: str = "Processing",
**kwargs
) -> List[Any]:
"""
Process items in batches with progress tracking
Args:
items: List of items to process
process_func: Function to process each batch
operation_name: Name for progress reporting
**kwargs: Additional arguments passed to process_func
Returns:
List of results from all batches
"""
if not items:
logger.info(f"{operation_name}: No items to process")
return []
tracker = ProgressTracker(len(items), operation_name)
results = []
logger.info(f"Starting {operation_name} for {len(items)} items in batches of {self.batch_size}")
with timer(f"{operation_name} (total)"):
for i in range(0, len(items), self.batch_size):
batch = items[i:i + self.batch_size]
batch_num = i // self.batch_size + 1
total_batches = (len(items) + self.batch_size - 1) // self.batch_size
try:
with timer(f"Batch {batch_num}/{total_batches}"):
batch_results = process_func(batch, **kwargs)
results.extend(batch_results)
tracker.update(len(batch))
except Exception as e:
logger.error(f"Error in batch {batch_num}: {e}")
tracker.update(len(batch), errors=len(batch))
# Continue processing other batches
continue
# Optional garbage collection to manage memory
if self.enable_gc and batch_num % 5 == 0:
gc.collect()
tracker.finish()
return results
def batch_iterator(self, items: List[Any]) -> Iterator[List[Any]]:
"""Generate batches as an iterator for memory-efficient processing"""
for i in range(0, len(items), self.batch_size):
yield items[i:i + self.batch_size]
class StreamingProcessor:
"""Process items one at a time with minimal memory usage"""
def __init__(self, enable_gc_interval: int = 100):
self.enable_gc_interval = enable_gc_interval
def process_streaming(
self,
items: List[Any],
process_func: Callable,
operation_name: str = "Streaming Processing",
**kwargs
) -> List[Any]:
"""
Process items one at a time with minimal memory footprint
Args:
items: List of items to process
process_func: Function to process each item
operation_name: Name for progress reporting
**kwargs: Additional arguments passed to process_func
Returns:
List of results
"""
if not items:
logger.info(f"{operation_name}: No items to process")
return []
tracker = ProgressTracker(len(items), operation_name)
results = []
logger.info(f"Starting {operation_name} for {len(items)} items (streaming)")
with timer(f"{operation_name} (streaming)"):
for i, item in enumerate(items):
try:
result = process_func(item, **kwargs)
results.append(result)
tracker.update(1)
except Exception as e:
logger.error(f"Error processing item {i}: {e}")
tracker.update(1, errors=1)
continue
# Periodic garbage collection
if self.enable_gc_interval and (i + 1) % self.enable_gc_interval == 0:
gc.collect()
tracker.finish()
return results
# Utility functions for common batch operations
def batch_chunks_by_document(chunks: List[Dict[str, Any]]) -> Dict[str, List[Dict[str, Any]]]:
"""Group chunks by document_id for document-level batch processing"""
document_batches = {}
for chunk in chunks:
doc_id = chunk.get('metadata', {}).get('document_id', 'unknown')
if doc_id not in document_batches:
document_batches[doc_id] = []
document_batches[doc_id].append(chunk)
return document_batches
def estimate_memory_usage(chunks: List[Dict[str, Any]]) -> float:
"""Estimate memory usage of chunks in MB"""
if not chunks:
return 0.0
# Rough estimate: average text length * number of chunks * 2 (for overhead)
avg_text_length = sum(len(chunk.get('text', '')) for chunk in chunks[:min(10, len(chunks))]) / min(10, len(chunks))
estimated_bytes = avg_text_length * len(chunks) * 2
return estimated_bytes / (1024 * 1024) # Convert to MB
if __name__ == '__main__':
# Test the batch processor
def dummy_process_func(batch):
time.sleep(0.1) # Simulate processing time
return [f"processed_{item}" for item in batch]
test_items = list(range(100))
processor = BatchProcessor(batch_size=10)
results = processor.process_in_batches(
test_items,
dummy_process_func,
"Test Processing"
)
print(f"Processed {len(results)} items") | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/utils/batch_processor.py",
"license": "MIT License",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/utils/logging_utils.py | import logging
from typing import List, Dict
from textwrap import shorten
logger = logging.getLogger("rag-system")
# Global log format – only set if user has not configured logging
if not logger.handlers:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s | %(levelname)-7s | %(name)s | %(message)s",
)
def log_query(query: str, sub_queries: List[str] | None = None) -> None:
"""Emit a nicely-formatted block describing the incoming query and any
decomposition."""
border = "=" * 60
logger.info("\n%s\nUSER QUERY: %s", border, query)
if sub_queries:
for i, q in enumerate(sub_queries, 1):
logger.info(" sub-%d → %s", i, q)
logger.info("%s", border)
def log_retrieval_results(results: List[Dict], k: int) -> None:
"""Show chunk_id, truncated text and score for the first *k* rows."""
if not results:
logger.info("Retrieval returned 0 documents.")
return
logger.info("Top %d results:", min(k, len(results)))
header = f"{'chunk_id':<14} {'score':<7} preview"
logger.info(header)
logger.info("-" * len(header))
for row in results[:k]:
preview = shorten(row.get("text", ""), width=60, placeholder="…")
logger.info("%s %-7.3f %s", str(row.get("chunk_id"))[:12], row.get("score", 0.0), preview) | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/utils/logging_utils.py",
"license": "MIT License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PromtEngineer/localGPT:rag_system/utils/ollama_client.py | import requests
import json
from typing import List, Dict, Any
import base64
from io import BytesIO
from PIL import Image
import httpx, asyncio
class OllamaClient:
"""
An enhanced client for Ollama that now handles image data for VLM models.
"""
def __init__(self, host: str = "http://localhost:11434"):
self.host = host
self.api_url = f"{host}/api"
# (Connection check remains the same)
def _image_to_base64(self, image: Image.Image) -> str:
"""Converts a Pillow Image to a base64 string."""
buffered = BytesIO()
image.save(buffered, format="PNG")
return base64.b64encode(buffered.getvalue()).decode('utf-8')
def generate_embedding(self, model: str, text: str) -> List[float]:
try:
response = requests.post(
f"{self.api_url}/embeddings",
json={"model": model, "prompt": text}
)
response.raise_for_status()
return response.json().get("embedding", [])
except requests.exceptions.RequestException as e:
print(f"Error generating embedding: {e}")
return []
def generate_completion(
self,
model: str,
prompt: str,
*,
format: str = "",
images: List[Image.Image] | None = None,
enable_thinking: bool | None = None,
) -> Dict[str, Any]:
"""
Generates a completion, now with optional support for images.
Args:
model: The name of the generation model (e.g., 'llava', 'qwen-vl').
prompt: The text prompt for the model.
format: The format for the response, e.g., "json".
images: A list of Pillow Image objects to send to the VLM.
enable_thinking: Optional flag to disable chain-of-thought for Qwen models.
"""
try:
payload = {
"model": model,
"prompt": prompt,
"stream": False
}
if format:
payload["format"] = format
if images:
payload["images"] = [self._image_to_base64(img) for img in images]
# Optional: disable thinking mode for Qwen3 / DeepSeek models
if enable_thinking is not None:
payload["chat_template_kwargs"] = {"enable_thinking": enable_thinking}
response = requests.post(
f"{self.api_url}/generate",
json=payload
)
response.raise_for_status()
response_lines = response.text.strip().split('\n')
final_response = json.loads(response_lines[-1])
return final_response
except requests.exceptions.RequestException as e:
print(f"Error generating completion: {e}")
return {}
# -------------------------------------------------------------
# Async variant – uses httpx so the caller can await multiple
# LLM calls concurrently (triage, verification, etc.).
# -------------------------------------------------------------
async def generate_completion_async(
self,
model: str,
prompt: str,
*,
format: str = "",
images: List[Image.Image] | None = None,
enable_thinking: bool | None = None,
timeout: int = 60,
) -> Dict[str, Any]:
"""Asynchronous version of generate_completion using httpx."""
payload = {"model": model, "prompt": prompt, "stream": False}
if format:
payload["format"] = format
if images:
payload["images"] = [self._image_to_base64(img) for img in images]
if enable_thinking is not None:
payload["chat_template_kwargs"] = {"enable_thinking": enable_thinking}
try:
async with httpx.AsyncClient(timeout=timeout) as client:
resp = await client.post(f"{self.api_url}/generate", json=payload)
resp.raise_for_status()
return json.loads(resp.text.strip().split("\n")[-1])
except (httpx.HTTPError, asyncio.CancelledError) as e:
print(f"Async Ollama completion error: {e}")
return {}
# -------------------------------------------------------------
# Streaming variant – yields token chunks in real time
# -------------------------------------------------------------
def stream_completion(
self,
model: str,
prompt: str,
*,
images: List[Image.Image] | None = None,
enable_thinking: bool | None = None,
):
"""Generator that yields partial *response* strings as they arrive.
Example:
for tok in client.stream_completion("qwen2", "Hello"):
print(tok, end="", flush=True)
"""
payload: Dict[str, Any] = {"model": model, "prompt": prompt, "stream": True}
if images:
payload["images"] = [self._image_to_base64(img) for img in images]
if enable_thinking is not None:
payload["chat_template_kwargs"] = {"enable_thinking": enable_thinking}
with requests.post(f"{self.api_url}/generate", json=payload, stream=True) as resp:
resp.raise_for_status()
for raw_line in resp.iter_lines():
if not raw_line:
# Keep-alive newline
continue
try:
data = json.loads(raw_line.decode())
except json.JSONDecodeError:
continue
# The Ollama streaming API sends objects like {"response":"Hi","done":false}
chunk = data.get("response", "")
if chunk:
yield chunk
if data.get("done"):
break
if __name__ == '__main__':
# This test now requires a VLM model like 'llava' or 'qwen-vl' to be pulled.
print("Ollama client updated for multimodal (VLM) support.")
try:
client = OllamaClient()
# Create a dummy black image for testing
dummy_image = Image.new('RGB', (100, 100), 'black')
# Test VLM completion
vlm_response = client.generate_completion(
model="llava", # Make sure you have run 'ollama pull llava'
prompt="What color is this image?",
images=[dummy_image]
)
if vlm_response and 'response' in vlm_response:
print("\n--- VLM Test Response ---")
print(vlm_response['response'])
else:
print("\nFailed to get VLM response. Is 'llava' model pulled and running?")
except Exception as e:
print(f"An error occurred: {e}") | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/utils/ollama_client.py",
"license": "MIT License",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:rag_system/utils/validate_model_config.py | #!/usr/bin/env python3
"""
Model Configuration Validation Script
=====================================
This script validates the consolidated model configuration system to ensure:
1. No configuration conflicts exist
2. All model names are consistent across components
3. Models are accessible and properly configured
4. The configuration validation system works correctly
Run this after making configuration changes to catch issues early.
"""
import sys
import os
# Add parent directories to path for imports
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from rag_system.main import (
PIPELINE_CONFIGS,
OLLAMA_CONFIG,
EXTERNAL_MODELS,
validate_model_config
)
def print_header(title: str):
"""Print a formatted header."""
print(f"\n{'='*60}")
print(f"🔍 {title}")
print(f"{'='*60}")
def print_section(title: str):
"""Print a formatted section header."""
print(f"\n{'─'*40}")
print(f"📋 {title}")
print(f"{'─'*40}")
def validate_configuration_consistency():
"""Validate that all configurations are consistent."""
print_header("CONFIGURATION CONSISTENCY VALIDATION")
errors = []
# 1. Check embedding model consistency
print_section("Embedding Model Consistency")
default_embedding = PIPELINE_CONFIGS["default"]["embedding_model_name"]
external_embedding = EXTERNAL_MODELS["embedding_model"]
fast_embedding = PIPELINE_CONFIGS["fast"]["embedding_model_name"]
print(f"Default Config: {default_embedding}")
print(f"External Models: {external_embedding}")
print(f"Fast Config: {fast_embedding}")
if default_embedding != external_embedding:
errors.append(f"❌ Embedding model mismatch: default={default_embedding}, external={external_embedding}")
elif default_embedding != fast_embedding:
errors.append(f"❌ Embedding model mismatch: default={default_embedding}, fast={fast_embedding}")
else:
print("✅ Embedding models are consistent")
# 2. Check reranker model consistency
print_section("Reranker Model Consistency")
default_reranker = PIPELINE_CONFIGS["default"]["reranker"]["model_name"]
external_reranker = EXTERNAL_MODELS["reranker_model"]
print(f"Default Config: {default_reranker}")
print(f"External Models: {external_reranker}")
if default_reranker != external_reranker:
errors.append(f"❌ Reranker model mismatch: default={default_reranker}, external={external_reranker}")
else:
print("✅ Reranker models are consistent")
# 3. Check vision model consistency
print_section("Vision Model Consistency")
default_vision = PIPELINE_CONFIGS["default"]["vision_model_name"]
external_vision = EXTERNAL_MODELS["vision_model"]
print(f"Default Config: {default_vision}")
print(f"External Models: {external_vision}")
if default_vision != external_vision:
errors.append(f"❌ Vision model mismatch: default={default_vision}, external={external_vision}")
else:
print("✅ Vision models are consistent")
return errors
def print_model_usage_map():
"""Print a comprehensive map of which models are used where."""
print_header("MODEL USAGE MAP")
print_section("🤖 Ollama Models (Local Inference)")
for model_type, model_name in OLLAMA_CONFIG.items():
if model_type != "host":
print(f" {model_type.replace('_', ' ').title()}: {model_name}")
print_section("🔗 External Models (HuggingFace/Direct)")
for model_type, model_name in EXTERNAL_MODELS.items():
print(f" {model_type.replace('_', ' ').title()}: {model_name}")
print_section("📍 Model Usage by Component")
usage_map = {
"🔤 Text Embedding": {
"Model": EXTERNAL_MODELS["embedding_model"],
"Used In": ["Retrieval Pipeline", "Semantic Cache", "Dense Retrieval", "Late Chunking"],
"Component": "QwenEmbedder (representations.py)"
},
"🧠 Text Generation": {
"Model": OLLAMA_CONFIG["generation_model"],
"Used In": ["Agent Loop", "Answer Synthesis", "Query Decomposition", "Verification"],
"Component": "OllamaClient"
},
"🚀 Enrichment/Routing": {
"Model": OLLAMA_CONFIG["enrichment_model"],
"Used In": ["Query Routing", "Document Overview Analysis"],
"Component": "Agent Loop (_route_via_overviews)"
},
"🔀 Reranking": {
"Model": EXTERNAL_MODELS["reranker_model"],
"Used In": ["Hybrid Search", "Document Reranking", "AI Reranker"],
"Component": "ColBERT (rerankers-lib) or QwenReranker"
},
"👁️ Vision": {
"Model": EXTERNAL_MODELS["vision_model"],
"Used In": ["Multimodal Processing", "Image Embeddings"],
"Component": "Vision Pipeline (when enabled)"
}
}
for model_name, details in usage_map.items():
print(f"\n{model_name}")
print(f" Model: {details['Model']}")
print(f" Component: {details['Component']}")
print(f" Used In: {', '.join(details['Used In'])}")
def test_validation_function():
"""Test the built-in validation function."""
print_header("VALIDATION FUNCTION TEST")
try:
result = validate_model_config()
if result:
print("✅ validate_model_config() passed successfully!")
else:
print("❌ validate_model_config() returned False")
except Exception as e:
print(f"❌ validate_model_config() failed with error: {e}")
return False
return True
def check_pipeline_configurations():
"""Check all pipeline configurations for completeness."""
print_header("PIPELINE CONFIGURATION COMPLETENESS")
required_keys = {
"default": ["storage", "retrieval", "embedding_model_name", "reranker"],
"fast": ["storage", "retrieval", "embedding_model_name"]
}
errors = []
for config_name, required in required_keys.items():
print_section(f"{config_name.title()} Configuration")
config = PIPELINE_CONFIGS.get(config_name, {})
for key in required:
if key in config:
print(f" ✅ {key}: {type(config[key]).__name__}")
else:
error_msg = f"❌ Missing required key '{key}' in {config_name} config"
errors.append(error_msg)
print(f" {error_msg}")
return errors
def main():
"""Run all validation checks."""
print("🚀 Starting Model Configuration Validation")
print(f"Python Path: {sys.path[0]}")
all_errors = []
# Run all validation checks
all_errors.extend(validate_configuration_consistency())
all_errors.extend(check_pipeline_configurations())
# Print model usage map
print_model_usage_map()
# Test validation function
validation_passed = test_validation_function()
# Final summary
print_header("VALIDATION SUMMARY")
if all_errors:
print("❌ VALIDATION FAILED - Issues Found:")
for error in all_errors:
print(f" {error}")
return 1
elif not validation_passed:
print("❌ VALIDATION FAILED - validate_model_config() function failed")
return 1
else:
print("✅ ALL VALIDATIONS PASSED!")
print("\n🎉 Your model configuration is consistent and properly structured!")
print("\n📋 Summary:")
print(f" • Embedding Model: {EXTERNAL_MODELS['embedding_model']}")
print(f" • Generation Model: {OLLAMA_CONFIG['generation_model']}")
print(f" • Enrichment Model: {OLLAMA_CONFIG['enrichment_model']}")
print(f" • Reranker Model: {EXTERNAL_MODELS['reranker_model']}")
print(f" • Vision Model: {EXTERNAL_MODELS['vision_model']}")
return 0
if __name__ == "__main__":
sys.exit(main()) | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "rag_system/utils/validate_model_config.py",
"license": "MIT License",
"lines": 179,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:run_system.py | #!/usr/bin/env python3
"""
RAG System Unified Launcher
===========================
A comprehensive launcher that starts all RAG system components:
- Ollama server
- RAG API server (port 8001)
- Backend server (port 8000)
- Frontend server (port 3000)
Features:
- Single command startup
- Real-time log aggregation
- Process health monitoring
- Graceful shutdown
- Production-ready deployment support
Usage:
python run_system.py [--mode dev|prod] [--logs-only] [--no-frontend]
"""
import subprocess
import threading
import time
import signal
import sys
import os
import argparse
import json
import requests
from pathlib import Path
from datetime import datetime
from typing import Dict, List, Optional, TextIO
import logging
from dataclasses import dataclass
import psutil
@dataclass
class ServiceConfig:
name: str
command: List[str]
port: int
cwd: Optional[str] = None
env: Optional[Dict[str, str]] = None
health_check_path: str = "/health"
startup_delay: int = 2
required: bool = True
class ColoredFormatter(logging.Formatter):
"""Custom formatter with colors for different log levels and services."""
COLORS = {
'DEBUG': '\033[36m', # Cyan
'INFO': '\033[32m', # Green
'WARNING': '\033[33m', # Yellow
'ERROR': '\033[31m', # Red
'CRITICAL': '\033[35m', # Magenta
}
SERVICE_COLORS = {
'ollama': '\033[94m', # Blue
'rag-api': '\033[95m', # Magenta
'backend': '\033[96m', # Cyan
'frontend': '\033[93m', # Yellow
'system': '\033[92m', # Green
}
RESET = '\033[0m'
def format(self, record):
# Add service-specific coloring
service_name = getattr(record, 'service', 'system')
service_color = self.SERVICE_COLORS.get(service_name, self.COLORS.get(record.levelname, ''))
# Format timestamp
timestamp = datetime.fromtimestamp(record.created).strftime('%H:%M:%S')
# Create colored log line
colored_service = f"{service_color}[{service_name.upper()}]{self.RESET}"
colored_level = f"{self.COLORS.get(record.levelname, '')}{record.levelname}{self.RESET}"
return f"{timestamp} {colored_service} {colored_level}: {record.getMessage()}"
class ServiceManager:
"""Manages multiple system services with logging and health monitoring."""
def __init__(self, mode: str = "dev", logs_dir: str = "logs"):
self.mode = mode
self.logs_dir = Path(logs_dir)
self.logs_dir.mkdir(exist_ok=True)
self.processes: Dict[str, subprocess.Popen] = {}
self.log_threads: Dict[str, threading.Thread] = {}
self.running = False
# Setup logging
self.setup_logging()
# Service configurations
self.services = self._get_service_configs()
# Register signal handlers for graceful shutdown
signal.signal(signal.SIGINT, self._signal_handler)
signal.signal(signal.SIGTERM, self._signal_handler)
def setup_logging(self):
"""Setup centralized logging with colors."""
# Create main logger
self.logger = logging.getLogger('system')
self.logger.setLevel(logging.INFO)
# Console handler with colors
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(ColoredFormatter())
self.logger.addHandler(console_handler)
# File handler for system logs
file_handler = logging.FileHandler(self.logs_dir / 'system.log')
file_handler.setFormatter(logging.Formatter(
'%(asctime)s [%(levelname)s] %(message)s'
))
self.logger.addHandler(file_handler)
def _get_service_configs(self) -> Dict[str, ServiceConfig]:
"""Define service configurations based on mode."""
base_configs = {
'ollama': ServiceConfig(
name='ollama',
command=['ollama', 'serve'],
port=11434,
startup_delay=5,
required=True
),
'rag-api': ServiceConfig(
name='rag-api',
command=[sys.executable, '-m', 'rag_system.api_server'],
port=8001,
startup_delay=3,
required=True
),
'backend': ServiceConfig(
name='backend',
command=[sys.executable, 'backend/server.py'],
port=8000,
startup_delay=2,
required=True
),
'frontend': ServiceConfig(
name='frontend',
command=['npm', 'run', 'dev' if self.mode == 'dev' else 'start'],
port=3000,
startup_delay=5,
required=False # Optional in case Node.js not available
)
}
# Production mode adjustments
if self.mode == 'prod':
# Use production build for frontend
base_configs['frontend'].command = ['npm', 'run', 'start']
# Add production environment variables
base_configs['rag-api'].env = {'NODE_ENV': 'production'}
base_configs['backend'].env = {'NODE_ENV': 'production'}
return base_configs
def _signal_handler(self, signum, frame):
"""Handle shutdown signals gracefully."""
self.logger.info(f"Received signal {signum}, shutting down...")
self.shutdown()
sys.exit(0)
def is_port_in_use(self, port: int) -> bool:
"""Check if a port is already in use."""
try:
for conn in psutil.net_connections():
if conn.laddr.port == port and conn.status == 'LISTEN':
return True
return False
except (psutil.AccessDenied, AttributeError):
# Fallback method
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(('localhost', port)) == 0
def check_prerequisites(self) -> bool:
"""Check if all required tools are available."""
self.logger.info("🔍 Checking prerequisites...")
missing_tools = []
# Check Ollama
if not self._command_exists('ollama'):
missing_tools.append('ollama (https://ollama.ai)')
# Check Python
if not self._command_exists('python') and not self._command_exists('python3'):
missing_tools.append('python')
# Check Node.js (optional)
if not self._command_exists('npm'):
self.logger.warning("⚠️ npm not found - frontend will be disabled")
self.services['frontend'].required = False
if missing_tools:
self.logger.error(f"❌ Missing required tools: {', '.join(missing_tools)}")
return False
self.logger.info("✅ All prerequisites satisfied")
return True
def _command_exists(self, command: str) -> bool:
"""Check if a command exists in PATH."""
try:
subprocess.run([command, '--version'],
capture_output=True, check=True, timeout=5)
return True
except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError):
return False
def ensure_models(self):
"""Ensure required Ollama models are available."""
self.logger.info("📥 Checking required models...")
required_models = ['qwen3:8b', 'qwen3:0.6b']
try:
# Get list of installed models
result = subprocess.run(['ollama', 'list'],
capture_output=True, text=True, timeout=10)
installed_models = result.stdout
for model in required_models:
if model not in installed_models:
self.logger.info(f"📥 Pulling {model}...")
subprocess.run(['ollama', 'pull', model],
check=True, timeout=300) # 5 min timeout
self.logger.info(f"✅ {model} ready")
else:
self.logger.info(f"✅ {model} already available")
except subprocess.TimeoutExpired:
self.logger.warning("⚠️ Model check timed out - continuing anyway")
except subprocess.CalledProcessError as e:
self.logger.warning(f"⚠️ Could not check/pull models: {e}")
def start_service(self, service_name: str, config: ServiceConfig) -> bool:
"""Start a single service."""
if service_name in self.processes:
self.logger.warning(f"⚠️ {service_name} already running")
return True
# Check if port is in use
if self.is_port_in_use(config.port):
self.logger.warning(f"⚠️ Port {config.port} already in use, skipping {service_name}")
return not config.required
self.logger.info(f"🔄 Starting {service_name} on port {config.port}...")
try:
# Setup environment
env = os.environ.copy()
if config.env:
env.update(config.env)
# Start process
process = subprocess.Popen(
config.command,
cwd=config.cwd,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
bufsize=1,
universal_newlines=True
)
self.processes[service_name] = process
# Start log monitoring thread
log_thread = threading.Thread(
target=self._monitor_service_logs,
args=(service_name, process),
daemon=True
)
log_thread.start()
self.log_threads[service_name] = log_thread
# Wait for startup
time.sleep(config.startup_delay)
# Check if process is still running
if process.poll() is None:
self.logger.info(f"✅ {service_name} started successfully (PID: {process.pid})")
return True
else:
self.logger.error(f"❌ {service_name} failed to start")
return False
except Exception as e:
self.logger.error(f"❌ Failed to start {service_name}: {e}")
return False
def _monitor_service_logs(self, service_name: str, process: subprocess.Popen):
"""Monitor service logs and forward to main logger."""
service_logger = logging.getLogger(service_name)
service_logger.setLevel(logging.INFO)
# Add file handler for this service
file_handler = logging.FileHandler(self.logs_dir / f'{service_name}.log')
file_handler.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
service_logger.addHandler(file_handler)
try:
for line in iter(process.stdout.readline, ''):
if line.strip():
# Create log record with service context
record = logging.LogRecord(
name=service_name,
level=logging.INFO,
pathname='',
lineno=0,
msg=line.strip(),
args=(),
exc_info=None
)
record.service = service_name
# Log to both service file and main console
service_logger.handle(record)
self.logger.handle(record)
except Exception as e:
self.logger.error(f"Error monitoring {service_name} logs: {e}")
def health_check(self, service_name: str, config: ServiceConfig) -> bool:
"""Perform health check on a service."""
try:
url = f"http://localhost:{config.port}{config.health_check_path}"
response = requests.get(url, timeout=5)
return response.status_code == 200
except:
return False
def start_all(self, skip_frontend: bool = False) -> bool:
"""Start all services in order."""
self.logger.info("🚀 Starting RAG System Components...")
if not self.check_prerequisites():
return False
self.running = True
failed_services = []
# Start services in dependency order
service_order = ['ollama', 'rag-api', 'backend']
if not skip_frontend and 'frontend' in self.services:
service_order.append('frontend')
for service_name in service_order:
if service_name not in self.services:
continue
config = self.services[service_name]
# Special handling for Ollama
if service_name == 'ollama':
if not self._start_ollama():
if config.required:
failed_services.append(service_name)
continue
else:
self.logger.warning(f"⚠️ Skipping optional service: {service_name}")
continue
else:
if not self.start_service(service_name, config):
if config.required:
failed_services.append(service_name)
else:
self.logger.warning(f"⚠️ Skipping optional service: {service_name}")
if failed_services:
self.logger.error(f"❌ Failed to start required services: {', '.join(failed_services)}")
return False
# Print status summary
self._print_status_summary()
return True
def _start_ollama(self) -> bool:
"""Special handling for Ollama startup."""
# Check if Ollama is already running
if self.is_port_in_use(11434):
self.logger.info("✅ Ollama already running")
self.ensure_models()
return True
# Start Ollama
if self.start_service('ollama', self.services['ollama']):
self.ensure_models()
return True
return False
def _print_status_summary(self):
"""Print system status summary."""
self.logger.info("")
self.logger.info("🎉 RAG System Started!")
self.logger.info("📊 Services Status:")
for service_name, config in self.services.items():
if service_name in self.processes or self.is_port_in_use(config.port):
status = "✅ Running"
url = f"http://localhost:{config.port}"
self.logger.info(f" • {service_name.capitalize():<10}: {status:<10} {url}")
else:
self.logger.info(f" • {service_name.capitalize():<10}: ❌ Stopped")
self.logger.info("")
self.logger.info("🌐 Access your RAG system at: http://localhost:3000")
self.logger.info("")
self.logger.info("📋 Useful commands:")
self.logger.info(" • Stop system: Ctrl+C")
self.logger.info(" • Check logs: tail -f logs/*.log")
self.logger.info(" • Health check: python run_system.py --health")
def shutdown(self):
"""Gracefully shutdown all services."""
if not self.running:
return
self.logger.info("🛑 Shutting down RAG system...")
self.running = False
# Stop services in reverse order
for service_name in reversed(list(self.processes.keys())):
self._stop_service(service_name)
self.logger.info("✅ All services stopped")
def _stop_service(self, service_name: str):
"""Stop a single service."""
if service_name not in self.processes:
return
process = self.processes[service_name]
self.logger.info(f"🔄 Stopping {service_name}...")
try:
# Try graceful shutdown first
process.terminate()
# Wait up to 10 seconds for graceful shutdown
try:
process.wait(timeout=10)
except subprocess.TimeoutExpired:
# Force kill if graceful shutdown fails
process.kill()
process.wait()
self.logger.info(f"✅ {service_name} stopped")
except Exception as e:
self.logger.error(f"❌ Error stopping {service_name}: {e}")
finally:
del self.processes[service_name]
def monitor(self):
"""Monitor running services and restart if needed."""
self.logger.info("👁️ Monitoring services... (Press Ctrl+C to stop)")
try:
while self.running:
time.sleep(30) # Check every 30 seconds
for service_name, process in list(self.processes.items()):
if process.poll() is not None:
self.logger.warning(f"⚠️ {service_name} has stopped unexpectedly")
# Restart the service
config = self.services[service_name]
if config.required:
self.logger.info(f"🔄 Restarting {service_name}...")
del self.processes[service_name]
self.start_service(service_name, config)
except KeyboardInterrupt:
self.logger.info("Monitoring stopped by user")
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description='RAG System Unified Launcher')
parser.add_argument('--mode', choices=['dev', 'prod'], default='dev',
help='Run mode (default: dev)')
parser.add_argument('--logs-only', action='store_true',
help='Only show aggregated logs from running services')
parser.add_argument('--no-frontend', action='store_true',
help='Skip frontend startup')
parser.add_argument('--health', action='store_true',
help='Check health of running services')
parser.add_argument('--stop', action='store_true',
help='Stop all running services')
args = parser.parse_args()
# Create service manager
manager = ServiceManager(mode=args.mode)
try:
if args.health:
# Health check mode
manager._print_status_summary()
return
if args.stop:
# Stop mode - kill any running processes
manager.logger.info("🛑 Stopping all RAG system processes...")
# Implementation for stopping would go here
return
if args.logs_only:
# Logs only mode - just tail existing logs
manager.logger.info("📋 Showing aggregated logs... (Press Ctrl+C to stop)")
manager.monitor()
return
# Normal startup mode
if manager.start_all(skip_frontend=args.no_frontend):
manager.monitor()
else:
manager.logger.error("❌ System startup failed")
sys.exit(1)
except KeyboardInterrupt:
manager.logger.info("Received interrupt signal")
finally:
manager.shutdown()
if __name__ == "__main__":
main() | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "run_system.py",
"license": "MIT License",
"lines": 446,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PromtEngineer/localGPT:system_health_check.py | #!/usr/bin/env python3
"""
System Health Check for RAG System
Quick validation of configurations, models, and data access.
"""
import sys
import traceback
from pathlib import Path
def print_status(message, success=None):
"""Print status with emoji"""
if success is True:
print(f"✅ {message}")
elif success is False:
print(f"❌ {message}")
else:
print(f"🔍 {message}")
def check_imports():
"""Test basic imports"""
print_status("Testing basic imports...")
try:
from rag_system.main import get_agent, EXTERNAL_MODELS, OLLAMA_CONFIG, PIPELINE_CONFIGS
print_status("Basic imports successful", True)
return True
except Exception as e:
print_status(f"Import failed: {e}", False)
return False
def check_configurations():
"""Validate configurations"""
print_status("Checking configurations...")
try:
from rag_system.main import EXTERNAL_MODELS, OLLAMA_CONFIG, PIPELINE_CONFIGS
print(f"📊 External Models: {EXTERNAL_MODELS}")
print(f"📊 Ollama Config: {OLLAMA_CONFIG}")
print(f"📊 Pipeline Configs: {PIPELINE_CONFIGS}")
# Check for common model dimension issues
embedding_model = EXTERNAL_MODELS.get("embedding_model", "Unknown")
if "bge-small" in embedding_model:
print_status(f"Embedding model: {embedding_model} (384 dims)", True)
elif "Qwen3-Embedding" in embedding_model:
print_status(f"Embedding model: {embedding_model} (1024 dims) - Check data compatibility!", None)
else:
print_status(f"Embedding model: {embedding_model} - Verify dimensions!", None)
print_status("Configuration check completed", True)
return True
except Exception as e:
print_status(f"Configuration check failed: {e}", False)
return False
def check_agent_initialization():
"""Test agent initialization"""
print_status("Testing agent initialization...")
try:
from rag_system.main import get_agent
agent = get_agent('default')
print_status("Agent initialization successful", True)
return agent
except Exception as e:
print_status(f"Agent initialization failed: {e}", False)
traceback.print_exc()
return None
def check_embedding_model(agent):
"""Test embedding model"""
print_status("Testing embedding model...")
try:
embedder = agent.retrieval_pipeline._get_text_embedder()
test_emb = embedder.create_embeddings(['test'])
model_name = getattr(embedder.model, 'name_or_path', 'Unknown')
dimensions = test_emb.shape[1]
print_status(f"Embedding model: {model_name}", True)
print_status(f"Vector dimension: {dimensions}", True)
# Warn about dimension compatibility
if dimensions == 384:
print_status("Using 384-dim embeddings (bge-small compatible)", True)
elif dimensions == 1024:
print_status("Using 1024-dim embeddings (Qwen3 compatible) - Ensure data compatibility!", None)
return True
except Exception as e:
print_status(f"Embedding model test failed: {e}", False)
return False
def check_database_access():
"""Test database access"""
print_status("Testing database access...")
try:
import lancedb
db = lancedb.connect('./lancedb')
tables = db.table_names()
print_status(f"LanceDB connected - {len(tables)} tables available", True)
if tables:
print("📋 Available tables:")
for table in tables[:5]: # Show first 5 tables
print(f" - {table}")
if len(tables) > 5:
print(f" ... and {len(tables) - 5} more")
else:
print_status("No tables found - may need to index documents first", None)
return True
except Exception as e:
print_status(f"Database access failed: {e}", False)
return False
def check_sample_query(agent):
"""Test a sample query if tables exist"""
print_status("Testing sample query...")
try:
import lancedb
db = lancedb.connect('./lancedb')
tables = db.table_names()
if not tables:
print_status("No tables available for query test", None)
return True
# Use first available table
table_name = tables[0]
print_status(f"Testing query on table: {table_name}")
result = agent.run('what is this document about?', table_name=table_name)
if result and 'answer' in result:
print_status("Sample query successful", True)
print(f"📝 Answer preview: {result['answer'][:100]}...")
print(f"📊 Found {len(result.get('source_documents', []))} source documents")
else:
print_status("Query returned empty result", None)
return True
except Exception as e:
print_status(f"Sample query failed: {e}", False)
return False
def main():
"""Run complete system health check"""
print("🏥 RAG System Health Check")
print("=" * 50)
checks_passed = 0
total_checks = 6
# Basic checks
if check_imports():
checks_passed += 1
if check_configurations():
checks_passed += 1
if check_database_access():
checks_passed += 1
# Agent-dependent checks
agent = check_agent_initialization()
if agent:
checks_passed += 1
if check_embedding_model(agent):
checks_passed += 1
if check_sample_query(agent):
checks_passed += 1
# Summary
print("\n" + "=" * 50)
print(f"🏥 Health Check Complete: {checks_passed}/{total_checks} checks passed")
if checks_passed == total_checks:
print_status("System is healthy! 🎉", True)
return 0
elif checks_passed >= total_checks - 1:
print_status("System mostly healthy with minor issues", None)
return 0
else:
print_status("System has significant issues that need attention", False)
return 1
if __name__ == "__main__":
sys.exit(main()) | {
"repo_id": "PromtEngineer/localGPT",
"file_path": "system_health_check.py",
"license": "MIT License",
"lines": 157,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
RVC-Boss/GPT-SoVITS:GPT_SoVITS/module/ddp_utils.py | import torch
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel.distributed import _find_tensors
from packaging import version
# from https://github.com/Lightning-AI/lightning-bolts/blob/5d61197cd2f491f69e238137a5edabe80ae14ad9/pl_bolts/models/self_supervised/simclr/simclr_module.py#L20
class SyncFunction(torch.autograd.Function):
@staticmethod
# @torch.no_grad()
def forward(ctx, tensor):
world_size = torch.distributed.get_world_size()
# Collect batch sizes from all processes
local_bs = torch.tensor([tensor.shape[0]], device=tensor.device)
batch_sizes = [torch.zeros_like(local_bs) for _ in range(world_size)]
torch.distributed.all_gather(batch_sizes, local_bs)
# Convert to integer list and find the minimum
batch_sizes_int = [bs.item() for bs in batch_sizes]
min_bs = min(batch_sizes_int)
# Crop the tensor to the minimum batch size if needed
cropped_tensor = tensor[:min_bs] if tensor.shape[0] > min_bs else tensor
# Prepare for gathering
out_shape = (min_bs * world_size,) + tensor.shape[1:]
gathered_tensor = torch.zeros(out_shape, dtype=tensor.dtype, device=tensor.device)
# Build tensor list for all_gather
tensor_list = list(torch.chunk(gathered_tensor, world_size))
# Perform all_gather using the cropped tensors
torch.distributed.all_gather(tensor_list, cropped_tensor)
# Save for backward pass
ctx.min_bs = min_bs
ctx.world_size = world_size
ctx.orig_shape = tensor.shape
return gathered_tensor
@staticmethod
def backward(ctx, grad_output):
assert False
grad_input = grad_output.clone()
torch.distributed.all_reduce(grad_input, op=torch.distributed.ReduceOp.SUM, async_op=False)
idx_from = torch.distributed.get_rank() * ctx.batch_size
idx_to = (torch.distributed.get_rank() + 1) * ctx.batch_size
return grad_input[idx_from:idx_to]
class DDP(DistributedDataParallel):
"""
Override the forward call in lightning so it goes to training and validation step respectively
"""
def forward(self, *inputs, **kwargs): # pragma: no cover
if version.parse(torch.__version__[:6]) < version.parse("1.11"):
self._sync_params()
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
assert len(self.device_ids) == 1
if self.module.training:
output = self.module.training_step(*inputs[0], **kwargs[0])
elif self.module.testing:
output = self.module.test_step(*inputs[0], **kwargs[0])
else:
output = self.module.validation_step(*inputs[0], **kwargs[0])
if torch.is_grad_enabled():
# We'll return the output object verbatim since it is a freeform
# object. We need to find any tensors in this object, though,
# because we need to figure out which parameters were used during
# this forward pass, to ensure we short circuit reduction for any
# unused parameters. Only if `find_unused_parameters` is set.
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
from torch.nn.parallel.distributed import (
Join,
_DDPSink,
_tree_flatten_with_rref,
_tree_unflatten_with_rref,
)
with torch.autograd.profiler.record_function("DistributedDataParallel.forward"):
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.logger.set_runtime_stats_and_log()
self.num_iterations += 1
self.reducer.prepare_for_forward()
# Notify the join context that this process has not joined, if
# needed
work = Join.notify_join_context(self)
if work:
self.reducer._set_forward_pass_work_handle(work, self._divide_by_initial_world_size)
# Calling _rebuild_buckets before forward compuation,
# It may allocate new buckets before deallocating old buckets
# inside _rebuild_buckets. To save peak memory usage,
# call _rebuild_buckets before the peak memory usage increases
# during forward computation.
# This should be called only once during whole training period.
if torch.is_grad_enabled() and self.reducer._rebuild_buckets():
print("Reducer buckets have been rebuilt in this iteration.")
self._has_rebuilt_buckets = True
# sync params according to location (before/after forward) user
# specified as part of hook, if hook was specified.
buffer_hook_registered = hasattr(self, "buffer_hook")
if self._check_sync_bufs_pre_fwd():
self._sync_buffers()
if self._join_config.enable:
# Notify joined ranks whether they should sync in backwards pass or not.
self._check_global_requires_backward_grad_sync(is_joined_rank=False)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if self.module.training:
output = self.module.training_step(*inputs[0], **kwargs[0])
elif self.module.testing:
output = self.module.test_step(*inputs[0], **kwargs[0])
else:
output = self.module.validation_step(*inputs[0], **kwargs[0])
# sync params according to location (before/after forward) user
# specified as part of hook, if hook was specified.
if self._check_sync_bufs_post_fwd():
self._sync_buffers()
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.require_forward_param_sync = True
# We'll return the output object verbatim since it is a freeform
# object. We need to find any tensors in this object, though,
# because we need to figure out which parameters were used during
# this forward pass, to ensure we short circuit reduction for any
# unused parameters. Only if `find_unused_parameters` is set.
if self.find_unused_parameters and not self.static_graph:
# Do not need to populate this for static graph.
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
self.require_forward_param_sync = False
# TODO: DDPSink is currently enabled for unused parameter detection and
# static graph training for first iteration.
if (self.find_unused_parameters and not self.static_graph) or (
self.static_graph and self.num_iterations == 1
):
state_dict = {
"static_graph": self.static_graph,
"num_iterations": self.num_iterations,
}
output_tensor_list, treespec, output_is_rref = _tree_flatten_with_rref(output)
output_placeholders = [None for _ in range(len(output_tensor_list))]
# Do not touch tensors that have no grad_fn, which can cause issues
# such as https://github.com/pytorch/pytorch/issues/60733
for i, output in enumerate(output_tensor_list):
if torch.is_tensor(output) and output.grad_fn is None:
output_placeholders[i] = output
# When find_unused_parameters=True, makes tensors which require grad
# run through the DDPSink backward pass. When not all outputs are
# used in loss, this makes those corresponding tensors receive
# undefined gradient which the reducer then handles to ensure
# param.grad field is not touched and we don't error out.
passthrough_tensor_list = _DDPSink.apply(
self.reducer,
state_dict,
*output_tensor_list,
)
for i in range(len(output_placeholders)):
if output_placeholders[i] is None:
output_placeholders[i] = passthrough_tensor_list[i]
# Reconstruct output data structure.
output = _tree_unflatten_with_rref(output_placeholders, treespec, output_is_rref)
return output
| {
"repo_id": "RVC-Boss/GPT-SoVITS",
"file_path": "GPT_SoVITS/module/ddp_utils.py",
"license": "MIT License",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
RVC-Boss/GPT-SoVITS:GPT_SoVITS/module/distrib.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Torch distributed utilities."""
import typing as tp
import torch
def rank():
if torch.distributed.is_initialized():
return torch.distributed.get_rank()
else:
return 0
def world_size():
if torch.distributed.is_initialized():
return torch.distributed.get_world_size()
else:
return 1
def is_distributed():
return world_size() > 1
def all_reduce(tensor: torch.Tensor, op=torch.distributed.ReduceOp.SUM):
if is_distributed():
return torch.distributed.all_reduce(tensor, op)
def _is_complex_or_float(tensor):
return torch.is_floating_point(tensor) or torch.is_complex(tensor)
def _check_number_of_params(params: tp.List[torch.Tensor]):
# utility function to check that the number of params in all workers is the same,
# and thus avoid a deadlock with distributed all reduce.
if not is_distributed() or not params:
return
# print('params[0].device ', params[0].device)
tensor = torch.tensor([len(params)], device=params[0].device, dtype=torch.long)
all_reduce(tensor)
if tensor.item() != len(params) * world_size():
# If not all the workers have the same number, for at least one of them,
# this inequality will be verified.
raise RuntimeError(
f"Mismatch in number of params: ours is {len(params)}, at least one worker has a different one."
)
def broadcast_tensors(tensors: tp.Iterable[torch.Tensor], src: int = 0):
"""Broadcast the tensors from the given parameters to all workers.
This can be used to ensure that all workers have the same model to start with.
"""
if not is_distributed():
return
tensors = [tensor for tensor in tensors if _is_complex_or_float(tensor)]
_check_number_of_params(tensors)
handles = []
for tensor in tensors:
handle = torch.distributed.broadcast(tensor.data, src=src, async_op=True)
handles.append(handle)
for handle in handles:
handle.wait()
def sync_buffer(buffers, average=True):
"""
Sync grad for buffers. If average is False, broadcast instead of averaging.
"""
if not is_distributed():
return
handles = []
for buffer in buffers:
if torch.is_floating_point(buffer.data):
if average:
handle = torch.distributed.all_reduce(buffer.data, op=torch.distributed.ReduceOp.SUM, async_op=True)
else:
handle = torch.distributed.broadcast(buffer.data, src=0, async_op=True)
handles.append((buffer, handle))
for buffer, handle in handles:
handle.wait()
if average:
buffer.data /= world_size
def sync_grad(params):
"""
Simpler alternative to DistributedDataParallel, that doesn't rely
on any black magic. For simple models it can also be as fast.
Just call this on your model parameters after the call to backward!
"""
if not is_distributed():
return
handles = []
for p in params:
if p.grad is not None:
handle = torch.distributed.all_reduce(p.grad.data, op=torch.distributed.ReduceOp.SUM, async_op=True)
handles.append((p, handle))
for p, handle in handles:
handle.wait()
p.grad.data /= world_size()
def average_metrics(metrics: tp.Dict[str, float], count=1.0):
"""Average a dictionary of metrics across all workers, using the optional
`count` as unormalized weight.
"""
if not is_distributed():
return metrics
keys, values = zip(*metrics.items())
device = "cuda" if torch.cuda.is_available() else "cpu"
tensor = torch.tensor(list(values) + [1], device=device, dtype=torch.float32)
tensor *= count
all_reduce(tensor)
averaged = (tensor[:-1] / tensor[-1]).cpu().tolist()
return dict(zip(keys, averaged))
| {
"repo_id": "RVC-Boss/GPT-SoVITS",
"file_path": "GPT_SoVITS/module/distrib.py",
"license": "MIT License",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
RVC-Boss/GPT-SoVITS:GPT_SoVITS/prepare_datasets/2-get-sv.py | # -*- coding: utf-8 -*-
import sys
import os
inp_text = os.environ.get("inp_text")
inp_wav_dir = os.environ.get("inp_wav_dir")
exp_name = os.environ.get("exp_name")
i_part = os.environ.get("i_part")
all_parts = os.environ.get("all_parts")
if "_CUDA_VISIBLE_DEVICES" in os.environ:
os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"]
opt_dir = os.environ.get("opt_dir")
sv_path = os.environ.get("sv_path")
import torch
is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available()
import traceback
import torchaudio
now_dir = os.getcwd()
sys.path.append(now_dir)
sys.path.append(f"{now_dir}/GPT_SoVITS/eres2net")
from tools.my_utils import clean_path
from time import time as ttime
import shutil
from ERes2NetV2 import ERes2NetV2
import kaldi as Kaldi
def my_save(fea, path): #####fix issue: torch.save doesn't support chinese path
dir = os.path.dirname(path)
name = os.path.basename(path)
# tmp_path="%s/%s%s.pth"%(dir,ttime(),i_part)
tmp_path = "%s%s.pth" % (ttime(), i_part)
torch.save(fea, tmp_path)
shutil.move(tmp_path, "%s/%s" % (dir, name))
sv_cn_dir = "%s/7-sv_cn" % (opt_dir)
wav32dir = "%s/5-wav32k" % (opt_dir)
os.makedirs(opt_dir, exist_ok=True)
os.makedirs(sv_cn_dir, exist_ok=True)
os.makedirs(wav32dir, exist_ok=True)
maxx = 0.95
alpha = 0.5
if torch.cuda.is_available():
device = "cuda:0"
# elif torch.backends.mps.is_available():
# device = "mps"
else:
device = "cpu"
class SV:
def __init__(self, device, is_half):
pretrained_state = torch.load(sv_path, map_location="cpu")
embedding_model = ERes2NetV2(baseWidth=24, scale=4, expansion=4)
embedding_model.load_state_dict(pretrained_state)
embedding_model.eval()
self.embedding_model = embedding_model
self.res = torchaudio.transforms.Resample(32000, 16000).to(device)
if is_half == False:
self.embedding_model = self.embedding_model.to(device)
else:
self.embedding_model = self.embedding_model.half().to(device)
self.is_half = is_half
def compute_embedding3(self, wav): # (1,x)#-1~1
with torch.no_grad():
wav = self.res(wav)
if self.is_half == True:
wav = wav.half()
feat = torch.stack(
[Kaldi.fbank(wav0.unsqueeze(0), num_mel_bins=80, sample_frequency=16000, dither=0) for wav0 in wav]
)
sv_emb = self.embedding_model.forward3(feat)
return sv_emb
sv = SV(device, is_half)
def name2go(wav_name, wav_path):
sv_cn_path = "%s/%s.pt" % (sv_cn_dir, wav_name)
if os.path.exists(sv_cn_path):
return
wav_path = "%s/%s" % (wav32dir, wav_name)
wav32k, sr0 = torchaudio.load(wav_path)
assert sr0 == 32000
wav32k = wav32k.to(device)
emb = sv.compute_embedding3(wav32k).cpu() # torch.Size([1, 20480])
my_save(emb, sv_cn_path)
with open(inp_text, "r", encoding="utf8") as f:
lines = f.read().strip("\n").split("\n")
for line in lines[int(i_part) :: int(all_parts)]:
try:
wav_name, spk_name, language, text = line.split("|")
wav_name = clean_path(wav_name)
if inp_wav_dir != "" and inp_wav_dir != None:
wav_name = os.path.basename(wav_name)
wav_path = "%s/%s" % (inp_wav_dir, wav_name)
else:
wav_path = wav_name
wav_name = os.path.basename(wav_name)
name2go(wav_name, wav_path)
except:
print(line, traceback.format_exc())
| {
"repo_id": "RVC-Boss/GPT-SoVITS",
"file_path": "GPT_SoVITS/prepare_datasets/2-get-sv.py",
"license": "MIT License",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
RVC-Boss/GPT-SoVITS:GPT_SoVITS/eres2net/ERes2Net.py | # Copyright 3D-Speaker (https://github.com/alibaba-damo-academy/3D-Speaker). All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
Res2Net implementation is adapted from https://github.com/wenet-e2e/wespeaker.
ERes2Net incorporates both local and global feature fusion techniques to improve the performance.
The local feature fusion (LFF) fuses the features within one single residual block to extract the local signal.
The global feature fusion (GFF) takes acoustic features of different scales as input to aggregate global signal.
"""
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
import pooling_layers as pooling_layers
from fusion import AFF
class ReLU(nn.Hardtanh):
def __init__(self, inplace=False):
super(ReLU, self).__init__(0, 20, inplace)
def __repr__(self):
inplace_str = "inplace" if self.inplace else ""
return self.__class__.__name__ + " (" + inplace_str + ")"
class BasicBlockERes2Net(nn.Module):
expansion = 2
def __init__(self, in_planes, planes, stride=1, baseWidth=32, scale=2):
super(BasicBlockERes2Net, self).__init__()
width = int(math.floor(planes * (baseWidth / 64.0)))
self.conv1 = nn.Conv2d(in_planes, width * scale, kernel_size=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(width * scale)
self.nums = scale
convs = []
bns = []
for i in range(self.nums):
convs.append(nn.Conv2d(width, width, kernel_size=3, padding=1, bias=False))
bns.append(nn.BatchNorm2d(width))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.relu = ReLU(inplace=True)
self.conv3 = nn.Conv2d(width * scale, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes),
)
self.stride = stride
self.width = width
self.scale = scale
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
spx = torch.split(out, self.width, 1)
for i in range(self.nums):
if i == 0:
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp)
sp = self.relu(self.bns[i](sp))
if i == 0:
out = sp
else:
out = torch.cat((out, sp), 1)
out = self.conv3(out)
out = self.bn3(out)
residual = self.shortcut(x)
out += residual
out = self.relu(out)
return out
class BasicBlockERes2Net_diff_AFF(nn.Module):
expansion = 2
def __init__(self, in_planes, planes, stride=1, baseWidth=32, scale=2):
super(BasicBlockERes2Net_diff_AFF, self).__init__()
width = int(math.floor(planes * (baseWidth / 64.0)))
self.conv1 = nn.Conv2d(in_planes, width * scale, kernel_size=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(width * scale)
self.nums = scale
convs = []
fuse_models = []
bns = []
for i in range(self.nums):
convs.append(nn.Conv2d(width, width, kernel_size=3, padding=1, bias=False))
bns.append(nn.BatchNorm2d(width))
for j in range(self.nums - 1):
fuse_models.append(AFF(channels=width))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.fuse_models = nn.ModuleList(fuse_models)
self.relu = ReLU(inplace=True)
self.conv3 = nn.Conv2d(width * scale, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes),
)
self.stride = stride
self.width = width
self.scale = scale
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
spx = torch.split(out, self.width, 1)
for i in range(self.nums):
if i == 0:
sp = spx[i]
else:
sp = self.fuse_models[i - 1](sp, spx[i])
sp = self.convs[i](sp)
sp = self.relu(self.bns[i](sp))
if i == 0:
out = sp
else:
out = torch.cat((out, sp), 1)
out = self.conv3(out)
out = self.bn3(out)
residual = self.shortcut(x)
out += residual
out = self.relu(out)
return out
class ERes2Net(nn.Module):
def __init__(
self,
block=BasicBlockERes2Net,
block_fuse=BasicBlockERes2Net_diff_AFF,
num_blocks=[3, 4, 6, 3],
m_channels=32,
feat_dim=80,
embedding_size=192,
pooling_func="TSTP",
two_emb_layer=False,
):
super(ERes2Net, self).__init__()
self.in_planes = m_channels
self.feat_dim = feat_dim
self.embedding_size = embedding_size
self.stats_dim = int(feat_dim / 8) * m_channels * 8
self.two_emb_layer = two_emb_layer
self.conv1 = nn.Conv2d(1, m_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(m_channels)
self.layer1 = self._make_layer(block, m_channels, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, m_channels * 2, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block_fuse, m_channels * 4, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block_fuse, m_channels * 8, num_blocks[3], stride=2)
# Downsampling module for each layer
self.layer1_downsample = nn.Conv2d(
m_channels * 2, m_channels * 4, kernel_size=3, stride=2, padding=1, bias=False
)
self.layer2_downsample = nn.Conv2d(
m_channels * 4, m_channels * 8, kernel_size=3, padding=1, stride=2, bias=False
)
self.layer3_downsample = nn.Conv2d(
m_channels * 8, m_channels * 16, kernel_size=3, padding=1, stride=2, bias=False
)
# Bottom-up fusion module
self.fuse_mode12 = AFF(channels=m_channels * 4)
self.fuse_mode123 = AFF(channels=m_channels * 8)
self.fuse_mode1234 = AFF(channels=m_channels * 16)
self.n_stats = 1 if pooling_func == "TAP" or pooling_func == "TSDP" else 2
self.pool = getattr(pooling_layers, pooling_func)(in_dim=self.stats_dim * block.expansion)
self.seg_1 = nn.Linear(self.stats_dim * block.expansion * self.n_stats, embedding_size)
if self.two_emb_layer:
self.seg_bn_1 = nn.BatchNorm1d(embedding_size, affine=False)
self.seg_2 = nn.Linear(embedding_size, embedding_size)
else:
self.seg_bn_1 = nn.Identity()
self.seg_2 = nn.Identity()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
x = x.permute(0, 2, 1) # (B,T,F) => (B,F,T)
x = x.unsqueeze_(1)
out = F.relu(self.bn1(self.conv1(x)))
out1 = self.layer1(out)
out2 = self.layer2(out1)
out1_downsample = self.layer1_downsample(out1)
fuse_out12 = self.fuse_mode12(out2, out1_downsample)
out3 = self.layer3(out2)
fuse_out12_downsample = self.layer2_downsample(fuse_out12)
fuse_out123 = self.fuse_mode123(out3, fuse_out12_downsample)
out4 = self.layer4(out3)
fuse_out123_downsample = self.layer3_downsample(fuse_out123)
fuse_out1234 = self.fuse_mode1234(out4, fuse_out123_downsample)
stats = self.pool(fuse_out1234)
embed_a = self.seg_1(stats)
if self.two_emb_layer:
out = F.relu(embed_a)
out = self.seg_bn_1(out)
embed_b = self.seg_2(out)
return embed_b
else:
return embed_a
def forward3(self, x):
x = x.permute(0, 2, 1) # (B,T,F) => (B,F,T)
x = x.unsqueeze_(1)
out = F.relu(self.bn1(self.conv1(x)))
out1 = self.layer1(out)
out2 = self.layer2(out1)
out1_downsample = self.layer1_downsample(out1)
fuse_out12 = self.fuse_mode12(out2, out1_downsample)
out3 = self.layer3(out2)
fuse_out12_downsample = self.layer2_downsample(fuse_out12)
fuse_out123 = self.fuse_mode123(out3, fuse_out12_downsample)
out4 = self.layer4(out3)
fuse_out123_downsample = self.layer3_downsample(fuse_out123)
fuse_out1234 = self.fuse_mode1234(out4, fuse_out123_downsample).flatten(start_dim=1, end_dim=2).mean(-1)
return fuse_out1234
if __name__ == "__main__":
x = torch.zeros(10, 300, 80)
model = ERes2Net(feat_dim=80, embedding_size=192, pooling_func="TSTP")
model.eval()
out = model(x)
print(out.shape) # torch.Size([10, 192])
num_params = sum(param.numel() for param in model.parameters())
print("{} M".format(num_params / 1e6)) # 6.61M
| {
"repo_id": "RVC-Boss/GPT-SoVITS",
"file_path": "GPT_SoVITS/eres2net/ERes2Net.py",
"license": "MIT License",
"lines": 224,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
RVC-Boss/GPT-SoVITS:GPT_SoVITS/eres2net/ERes2NetV2.py | # Copyright 3D-Speaker (https://github.com/alibaba-damo-academy/3D-Speaker). All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
To further improve the short-duration feature extraction capability of ERes2Net, we expand the channel dimension
within each stage. However, this modification also increases the number of model parameters and computational complexity.
To alleviate this problem, we propose an improved ERes2NetV2 by pruning redundant structures, ultimately reducing
both the model parameters and its computational cost.
"""
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
import pooling_layers as pooling_layers
from fusion import AFF
class ReLU(nn.Hardtanh):
def __init__(self, inplace=False):
super(ReLU, self).__init__(0, 20, inplace)
def __repr__(self):
inplace_str = "inplace" if self.inplace else ""
return self.__class__.__name__ + " (" + inplace_str + ")"
class BasicBlockERes2NetV2(nn.Module):
def __init__(self, in_planes, planes, stride=1, baseWidth=26, scale=2, expansion=2):
super(BasicBlockERes2NetV2, self).__init__()
width = int(math.floor(planes * (baseWidth / 64.0)))
self.conv1 = nn.Conv2d(in_planes, width * scale, kernel_size=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(width * scale)
self.nums = scale
self.expansion = expansion
convs = []
bns = []
for i in range(self.nums):
convs.append(nn.Conv2d(width, width, kernel_size=3, padding=1, bias=False))
bns.append(nn.BatchNorm2d(width))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.relu = ReLU(inplace=True)
self.conv3 = nn.Conv2d(width * scale, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes),
)
self.stride = stride
self.width = width
self.scale = scale
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
spx = torch.split(out, self.width, 1)
for i in range(self.nums):
if i == 0:
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp)
sp = self.relu(self.bns[i](sp))
if i == 0:
out = sp
else:
out = torch.cat((out, sp), 1)
out = self.conv3(out)
out = self.bn3(out)
residual = self.shortcut(x)
out += residual
out = self.relu(out)
return out
class BasicBlockERes2NetV2AFF(nn.Module):
def __init__(self, in_planes, planes, stride=1, baseWidth=26, scale=2, expansion=2):
super(BasicBlockERes2NetV2AFF, self).__init__()
width = int(math.floor(planes * (baseWidth / 64.0)))
self.conv1 = nn.Conv2d(in_planes, width * scale, kernel_size=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(width * scale)
self.nums = scale
self.expansion = expansion
convs = []
fuse_models = []
bns = []
for i in range(self.nums):
convs.append(nn.Conv2d(width, width, kernel_size=3, padding=1, bias=False))
bns.append(nn.BatchNorm2d(width))
for j in range(self.nums - 1):
fuse_models.append(AFF(channels=width, r=4))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.fuse_models = nn.ModuleList(fuse_models)
self.relu = ReLU(inplace=True)
self.conv3 = nn.Conv2d(width * scale, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes),
)
self.stride = stride
self.width = width
self.scale = scale
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
spx = torch.split(out, self.width, 1)
for i in range(self.nums):
if i == 0:
sp = spx[i]
else:
sp = self.fuse_models[i - 1](sp, spx[i])
sp = self.convs[i](sp)
sp = self.relu(self.bns[i](sp))
if i == 0:
out = sp
else:
out = torch.cat((out, sp), 1)
out = self.conv3(out)
out = self.bn3(out)
residual = self.shortcut(x)
out += residual
out = self.relu(out)
return out
class ERes2NetV2(nn.Module):
def __init__(
self,
block=BasicBlockERes2NetV2,
block_fuse=BasicBlockERes2NetV2AFF,
num_blocks=[3, 4, 6, 3],
m_channels=64,
feat_dim=80,
embedding_size=192,
baseWidth=26,
scale=2,
expansion=2,
pooling_func="TSTP",
two_emb_layer=False,
):
super(ERes2NetV2, self).__init__()
self.in_planes = m_channels
self.feat_dim = feat_dim
self.embedding_size = embedding_size
self.stats_dim = int(feat_dim / 8) * m_channels * 8
self.two_emb_layer = two_emb_layer
self.baseWidth = baseWidth
self.scale = scale
self.expansion = expansion
self.conv1 = nn.Conv2d(1, m_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(m_channels)
self.layer1 = self._make_layer(block, m_channels, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, m_channels * 2, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block_fuse, m_channels * 4, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block_fuse, m_channels * 8, num_blocks[3], stride=2)
# Downsampling module
self.layer3_ds = nn.Conv2d(
m_channels * 4 * self.expansion,
m_channels * 8 * self.expansion,
kernel_size=3,
padding=1,
stride=2,
bias=False,
)
# Bottom-up fusion module
self.fuse34 = AFF(channels=m_channels * 8 * self.expansion, r=4)
self.n_stats = 1 if pooling_func == "TAP" or pooling_func == "TSDP" else 2
self.pool = getattr(pooling_layers, pooling_func)(in_dim=self.stats_dim * self.expansion)
self.seg_1 = nn.Linear(self.stats_dim * self.expansion * self.n_stats, embedding_size)
if self.two_emb_layer:
self.seg_bn_1 = nn.BatchNorm1d(embedding_size, affine=False)
self.seg_2 = nn.Linear(embedding_size, embedding_size)
else:
self.seg_bn_1 = nn.Identity()
self.seg_2 = nn.Identity()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(
block(
self.in_planes, planes, stride, baseWidth=self.baseWidth, scale=self.scale, expansion=self.expansion
)
)
self.in_planes = planes * self.expansion
return nn.Sequential(*layers)
def forward(self, x):
x = x.permute(0, 2, 1) # (B,T,F) => (B,F,T)
x = x.unsqueeze_(1)
out = F.relu(self.bn1(self.conv1(x)))
out1 = self.layer1(out)
out2 = self.layer2(out1)
out3 = self.layer3(out2)
out4 = self.layer4(out3)
out3_ds = self.layer3_ds(out3)
fuse_out34 = self.fuse34(out4, out3_ds)
stats = self.pool(fuse_out34)
embed_a = self.seg_1(stats)
if self.two_emb_layer:
out = F.relu(embed_a)
out = self.seg_bn_1(out)
embed_b = self.seg_2(out)
return embed_b
else:
return embed_a
def forward3(self, x):
x = x.permute(0, 2, 1) # (B,T,F) => (B,F,T)
x = x.unsqueeze_(1)
out = F.relu(self.bn1(self.conv1(x)))
out1 = self.layer1(out)
out2 = self.layer2(out1)
out3 = self.layer3(out2)
out4 = self.layer4(out3)
out3_ds = self.layer3_ds(out3)
fuse_out34 = self.fuse34(out4, out3_ds)
# print(111111111,fuse_out34.shape)#111111111 torch.Size([16, 2048, 10, 72])
return fuse_out34.flatten(start_dim=1, end_dim=2).mean(-1)
# stats = self.pool(fuse_out34)
#
# embed_a = self.seg_1(stats)
# if self.two_emb_layer:
# out = F.relu(embed_a)
# out = self.seg_bn_1(out)
# embed_b = self.seg_2(out)
# return embed_b
# else:
# return embed_a
if __name__ == "__main__":
x = torch.randn(1, 300, 80)
model = ERes2NetV2(feat_dim=80, embedding_size=192, m_channels=64, baseWidth=26, scale=2, expansion=2)
model.eval()
y = model(x)
print(y.size())
macs, num_params = profile(model, inputs=(x,))
print("Params: {} M".format(num_params / 1e6)) # 17.86 M
print("MACs: {} G".format(macs / 1e9)) # 12.69 G
| {
"repo_id": "RVC-Boss/GPT-SoVITS",
"file_path": "GPT_SoVITS/eres2net/ERes2NetV2.py",
"license": "MIT License",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
RVC-Boss/GPT-SoVITS:GPT_SoVITS/eres2net/ERes2Net_huge.py | # Copyright 3D-Speaker (https://github.com/alibaba-damo-academy/3D-Speaker). All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Res2Net implementation is adapted from https://github.com/wenet-e2e/wespeaker.
ERes2Net incorporates both local and global feature fusion techniques to improve the performance.
The local feature fusion (LFF) fuses the features within one single residual block to extract the local signal.
The global feature fusion (GFF) takes acoustic features of different scales as input to aggregate global signal.
ERes2Net-huge is an upgraded version of ERes2Net that uses a larger number of parameters to achieve better
recognition performance. Parameters expansion, baseWidth, and scale can be modified to obtain optimal performance.
"""
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
import pooling_layers as pooling_layers
from fusion import AFF
class ReLU(nn.Hardtanh):
def __init__(self, inplace=False):
super(ReLU, self).__init__(0, 20, inplace)
def __repr__(self):
inplace_str = "inplace" if self.inplace else ""
return self.__class__.__name__ + " (" + inplace_str + ")"
class BasicBlockERes2Net(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, baseWidth=24, scale=3):
super(BasicBlockERes2Net, self).__init__()
width = int(math.floor(planes * (baseWidth / 64.0)))
self.conv1 = nn.Conv2d(in_planes, width * scale, kernel_size=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(width * scale)
self.nums = scale
convs = []
bns = []
for i in range(self.nums):
convs.append(nn.Conv2d(width, width, kernel_size=3, padding=1, bias=False))
bns.append(nn.BatchNorm2d(width))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.relu = ReLU(inplace=True)
self.conv3 = nn.Conv2d(width * scale, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes),
)
self.stride = stride
self.width = width
self.scale = scale
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
spx = torch.split(out, self.width, 1)
for i in range(self.nums):
if i == 0:
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp)
sp = self.relu(self.bns[i](sp))
if i == 0:
out = sp
else:
out = torch.cat((out, sp), 1)
out = self.conv3(out)
out = self.bn3(out)
residual = self.shortcut(x)
out += residual
out = self.relu(out)
return out
class BasicBlockERes2Net_diff_AFF(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, baseWidth=24, scale=3):
super(BasicBlockERes2Net_diff_AFF, self).__init__()
width = int(math.floor(planes * (baseWidth / 64.0)))
self.conv1 = nn.Conv2d(in_planes, width * scale, kernel_size=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(width * scale)
self.nums = scale
convs = []
fuse_models = []
bns = []
for i in range(self.nums):
convs.append(nn.Conv2d(width, width, kernel_size=3, padding=1, bias=False))
bns.append(nn.BatchNorm2d(width))
for j in range(self.nums - 1):
fuse_models.append(AFF(channels=width))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.fuse_models = nn.ModuleList(fuse_models)
self.relu = ReLU(inplace=True)
self.conv3 = nn.Conv2d(width * scale, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes),
)
self.stride = stride
self.width = width
self.scale = scale
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
spx = torch.split(out, self.width, 1)
for i in range(self.nums):
if i == 0:
sp = spx[i]
else:
sp = self.fuse_models[i - 1](sp, spx[i])
sp = self.convs[i](sp)
sp = self.relu(self.bns[i](sp))
if i == 0:
out = sp
else:
out = torch.cat((out, sp), 1)
out = self.conv3(out)
out = self.bn3(out)
residual = self.shortcut(x)
out += residual
out = self.relu(out)
return out
class ERes2Net(nn.Module):
def __init__(
self,
block=BasicBlockERes2Net,
block_fuse=BasicBlockERes2Net_diff_AFF,
num_blocks=[3, 4, 6, 3],
m_channels=64,
feat_dim=80,
embedding_size=192,
pooling_func="TSTP",
two_emb_layer=False,
):
super(ERes2Net, self).__init__()
self.in_planes = m_channels
self.feat_dim = feat_dim
self.embedding_size = embedding_size
self.stats_dim = int(feat_dim / 8) * m_channels * 8
self.two_emb_layer = two_emb_layer
self.conv1 = nn.Conv2d(1, m_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(m_channels)
self.layer1 = self._make_layer(block, m_channels, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, m_channels * 2, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block_fuse, m_channels * 4, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block_fuse, m_channels * 8, num_blocks[3], stride=2)
self.layer1_downsample = nn.Conv2d(
m_channels * 4, m_channels * 8, kernel_size=3, padding=1, stride=2, bias=False
)
self.layer2_downsample = nn.Conv2d(
m_channels * 8, m_channels * 16, kernel_size=3, padding=1, stride=2, bias=False
)
self.layer3_downsample = nn.Conv2d(
m_channels * 16, m_channels * 32, kernel_size=3, padding=1, stride=2, bias=False
)
self.fuse_mode12 = AFF(channels=m_channels * 8)
self.fuse_mode123 = AFF(channels=m_channels * 16)
self.fuse_mode1234 = AFF(channels=m_channels * 32)
self.n_stats = 1 if pooling_func == "TAP" or pooling_func == "TSDP" else 2
self.pool = getattr(pooling_layers, pooling_func)(in_dim=self.stats_dim * block.expansion)
self.seg_1 = nn.Linear(self.stats_dim * block.expansion * self.n_stats, embedding_size)
if self.two_emb_layer:
self.seg_bn_1 = nn.BatchNorm1d(embedding_size, affine=False)
self.seg_2 = nn.Linear(embedding_size, embedding_size)
else:
self.seg_bn_1 = nn.Identity()
self.seg_2 = nn.Identity()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
x = x.permute(0, 2, 1) # (B,T,F) => (B,F,T)
x = x.unsqueeze_(1)
out = F.relu(self.bn1(self.conv1(x)))
out1 = self.layer1(out)
out2 = self.layer2(out1)
out1_downsample = self.layer1_downsample(out1)
fuse_out12 = self.fuse_mode12(out2, out1_downsample)
out3 = self.layer3(out2)
fuse_out12_downsample = self.layer2_downsample(fuse_out12)
fuse_out123 = self.fuse_mode123(out3, fuse_out12_downsample)
out4 = self.layer4(out3)
fuse_out123_downsample = self.layer3_downsample(fuse_out123)
fuse_out1234 = self.fuse_mode1234(out4, fuse_out123_downsample)
stats = self.pool(fuse_out1234)
embed_a = self.seg_1(stats)
if self.two_emb_layer:
out = F.relu(embed_a)
out = self.seg_bn_1(out)
embed_b = self.seg_2(out)
return embed_b
else:
return embed_a
def forward2(self, x, if_mean):
x = x.permute(0, 2, 1) # (B,T,F) => (B,F,T)
x = x.unsqueeze_(1)
out = F.relu(self.bn1(self.conv1(x)))
out1 = self.layer1(out)
out2 = self.layer2(out1)
out1_downsample = self.layer1_downsample(out1)
fuse_out12 = self.fuse_mode12(out2, out1_downsample)
out3 = self.layer3(out2)
fuse_out12_downsample = self.layer2_downsample(fuse_out12)
fuse_out123 = self.fuse_mode123(out3, fuse_out12_downsample)
out4 = self.layer4(out3)
fuse_out123_downsample = self.layer3_downsample(fuse_out123)
fuse_out1234 = self.fuse_mode1234(out4, fuse_out123_downsample).flatten(start_dim=1, end_dim=2) # bs,20480,T
if if_mean == False:
mean = fuse_out1234[0].transpose(1, 0) # (T,20480),bs=T
else:
mean = fuse_out1234.mean(2) # bs,20480
mean_std = torch.cat([mean, torch.zeros_like(mean)], 1)
return self.seg_1(mean_std) # (T,192)
# stats = self.pool(fuse_out1234)
# if self.two_emb_layer:
# out = F.relu(embed_a)
# out = self.seg_bn_1(out)
# embed_b = self.seg_2(out)
# return embed_b
# else:
# return embed_a
def forward3(self, x):
x = x.permute(0, 2, 1) # (B,T,F) => (B,F,T)
x = x.unsqueeze_(1)
out = F.relu(self.bn1(self.conv1(x)))
out1 = self.layer1(out)
out2 = self.layer2(out1)
out1_downsample = self.layer1_downsample(out1)
fuse_out12 = self.fuse_mode12(out2, out1_downsample)
out3 = self.layer3(out2)
fuse_out12_downsample = self.layer2_downsample(fuse_out12)
fuse_out123 = self.fuse_mode123(out3, fuse_out12_downsample)
out4 = self.layer4(out3)
fuse_out123_downsample = self.layer3_downsample(fuse_out123)
fuse_out1234 = self.fuse_mode1234(out4, fuse_out123_downsample).flatten(start_dim=1, end_dim=2).mean(-1)
return fuse_out1234
# print(fuse_out1234.shape)
# print(fuse_out1234.flatten(start_dim=1,end_dim=2).shape)
# pdb.set_trace()
| {
"repo_id": "RVC-Boss/GPT-SoVITS",
"file_path": "GPT_SoVITS/eres2net/ERes2Net_huge.py",
"license": "MIT License",
"lines": 246,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
RVC-Boss/GPT-SoVITS:GPT_SoVITS/eres2net/fusion.py | # Copyright 3D-Speaker (https://github.com/alibaba-damo-academy/3D-Speaker). All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import torch
import torch.nn as nn
class AFF(nn.Module):
def __init__(self, channels=64, r=4):
super(AFF, self).__init__()
inter_channels = int(channels // r)
self.local_att = nn.Sequential(
nn.Conv2d(channels * 2, inter_channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(inter_channels),
nn.SiLU(inplace=True),
nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(channels),
)
def forward(self, x, ds_y):
xa = torch.cat((x, ds_y), dim=1)
x_att = self.local_att(xa)
x_att = 1.0 + torch.tanh(x_att)
xo = torch.mul(x, x_att) + torch.mul(ds_y, 2.0 - x_att)
return xo
| {
"repo_id": "RVC-Boss/GPT-SoVITS",
"file_path": "GPT_SoVITS/eres2net/fusion.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
RVC-Boss/GPT-SoVITS:GPT_SoVITS/eres2net/pooling_layers.py | # Copyright 3D-Speaker (https://github.com/alibaba-damo-academy/3D-Speaker). All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""This implementation is adapted from https://github.com/wenet-e2e/wespeaker."""
import torch
import torch.nn as nn
class TAP(nn.Module):
"""
Temporal average pooling, only first-order mean is considered
"""
def __init__(self, **kwargs):
super(TAP, self).__init__()
def forward(self, x):
pooling_mean = x.mean(dim=-1)
# To be compatable with 2D input
pooling_mean = pooling_mean.flatten(start_dim=1)
return pooling_mean
class TSDP(nn.Module):
"""
Temporal standard deviation pooling, only second-order std is considered
"""
def __init__(self, **kwargs):
super(TSDP, self).__init__()
def forward(self, x):
# The last dimension is the temporal axis
pooling_std = torch.sqrt(torch.var(x, dim=-1) + 1e-8)
pooling_std = pooling_std.flatten(start_dim=1)
return pooling_std
class TSTP(nn.Module):
"""
Temporal statistics pooling, concatenate mean and std, which is used in
x-vector
Comment: simple concatenation can not make full use of both statistics
"""
def __init__(self, **kwargs):
super(TSTP, self).__init__()
def forward(self, x):
# The last dimension is the temporal axis
pooling_mean = x.mean(dim=-1)
pooling_std = torch.sqrt(torch.var(x, dim=-1) + 1e-8)
pooling_mean = pooling_mean.flatten(start_dim=1)
pooling_std = pooling_std.flatten(start_dim=1)
stats = torch.cat((pooling_mean, pooling_std), 1)
return stats
class ASTP(nn.Module):
"""Attentive statistics pooling: Channel- and context-dependent
statistics pooling, first used in ECAPA_TDNN.
"""
def __init__(self, in_dim, bottleneck_dim=128, global_context_att=False):
super(ASTP, self).__init__()
self.global_context_att = global_context_att
# Use Conv1d with stride == 1 rather than Linear, then we don't
# need to transpose inputs.
if global_context_att:
self.linear1 = nn.Conv1d(in_dim * 3, bottleneck_dim, kernel_size=1) # equals W and b in the paper
else:
self.linear1 = nn.Conv1d(in_dim, bottleneck_dim, kernel_size=1) # equals W and b in the paper
self.linear2 = nn.Conv1d(bottleneck_dim, in_dim, kernel_size=1) # equals V and k in the paper
def forward(self, x):
"""
x: a 3-dimensional tensor in tdnn-based architecture (B,F,T)
or a 4-dimensional tensor in resnet architecture (B,C,F,T)
0-dim: batch-dimension, last-dim: time-dimension (frame-dimension)
"""
if len(x.shape) == 4:
x = x.reshape(x.shape[0], x.shape[1] * x.shape[2], x.shape[3])
assert len(x.shape) == 3
if self.global_context_att:
context_mean = torch.mean(x, dim=-1, keepdim=True).expand_as(x)
context_std = torch.sqrt(torch.var(x, dim=-1, keepdim=True) + 1e-10).expand_as(x)
x_in = torch.cat((x, context_mean, context_std), dim=1)
else:
x_in = x
# DON'T use ReLU here! ReLU may be hard to converge.
alpha = torch.tanh(self.linear1(x_in)) # alpha = F.relu(self.linear1(x_in))
alpha = torch.softmax(self.linear2(alpha), dim=2)
mean = torch.sum(alpha * x, dim=2)
var = torch.sum(alpha * (x**2), dim=2) - mean**2
std = torch.sqrt(var.clamp(min=1e-10))
return torch.cat([mean, std], dim=1)
| {
"repo_id": "RVC-Boss/GPT-SoVITS",
"file_path": "GPT_SoVITS/eres2net/pooling_layers.py",
"license": "MIT License",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
RVC-Boss/GPT-SoVITS:GPT_SoVITS/eres2net/kaldi.py | import math
from typing import Tuple
import torch
import torchaudio
from torch import Tensor
__all__ = [
"get_mel_banks",
"inverse_mel_scale",
"inverse_mel_scale_scalar",
"mel_scale",
"mel_scale_scalar",
"spectrogram",
"fbank",
"mfcc",
"vtln_warp_freq",
"vtln_warp_mel_freq",
]
# numeric_limits<float>::epsilon() 1.1920928955078125e-07
EPSILON = torch.tensor(torch.finfo(torch.float).eps)
# 1 milliseconds = 0.001 seconds
MILLISECONDS_TO_SECONDS = 0.001
# window types
HAMMING = "hamming"
HANNING = "hanning"
POVEY = "povey"
RECTANGULAR = "rectangular"
BLACKMAN = "blackman"
WINDOWS = [HAMMING, HANNING, POVEY, RECTANGULAR, BLACKMAN]
def _get_epsilon(device, dtype):
return EPSILON.to(device=device, dtype=dtype)
def _next_power_of_2(x: int) -> int:
r"""Returns the smallest power of 2 that is greater than x"""
return 1 if x == 0 else 2 ** (x - 1).bit_length()
def _get_strided(waveform: Tensor, window_size: int, window_shift: int, snip_edges: bool) -> Tensor:
r"""Given a waveform (1D tensor of size ``num_samples``), it returns a 2D tensor (m, ``window_size``)
representing how the window is shifted along the waveform. Each row is a frame.
Args:
waveform (Tensor): Tensor of size ``num_samples``
window_size (int): Frame length
window_shift (int): Frame shift
snip_edges (bool): If True, end effects will be handled by outputting only frames that completely fit
in the file, and the number of frames depends on the frame_length. If False, the number of frames
depends only on the frame_shift, and we reflect the data at the ends.
Returns:
Tensor: 2D tensor of size (m, ``window_size``) where each row is a frame
"""
assert waveform.dim() == 1
num_samples = waveform.size(0)
strides = (window_shift * waveform.stride(0), waveform.stride(0))
if snip_edges:
if num_samples < window_size:
return torch.empty((0, 0), dtype=waveform.dtype, device=waveform.device)
else:
m = 1 + (num_samples - window_size) // window_shift
else:
reversed_waveform = torch.flip(waveform, [0])
m = (num_samples + (window_shift // 2)) // window_shift
pad = window_size // 2 - window_shift // 2
pad_right = reversed_waveform
if pad > 0:
# torch.nn.functional.pad returns [2,1,0,1,2] for 'reflect'
# but we want [2, 1, 0, 0, 1, 2]
pad_left = reversed_waveform[-pad:]
waveform = torch.cat((pad_left, waveform, pad_right), dim=0)
else:
# pad is negative so we want to trim the waveform at the front
waveform = torch.cat((waveform[-pad:], pad_right), dim=0)
sizes = (m, window_size)
return waveform.as_strided(sizes, strides)
def _feature_window_function(
window_type: str,
window_size: int,
blackman_coeff: float,
device: torch.device,
dtype: int,
) -> Tensor:
r"""Returns a window function with the given type and size"""
if window_type == HANNING:
return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype)
elif window_type == HAMMING:
return torch.hamming_window(window_size, periodic=False, alpha=0.54, beta=0.46, device=device, dtype=dtype)
elif window_type == POVEY:
# like hanning but goes to zero at edges
return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype).pow(0.85)
elif window_type == RECTANGULAR:
return torch.ones(window_size, device=device, dtype=dtype)
elif window_type == BLACKMAN:
a = 2 * math.pi / (window_size - 1)
window_function = torch.arange(window_size, device=device, dtype=dtype)
# can't use torch.blackman_window as they use different coefficients
return (
blackman_coeff
- 0.5 * torch.cos(a * window_function)
+ (0.5 - blackman_coeff) * torch.cos(2 * a * window_function)
).to(device=device, dtype=dtype)
else:
raise Exception("Invalid window type " + window_type)
def _get_log_energy(strided_input: Tensor, epsilon: Tensor, energy_floor: float) -> Tensor:
r"""Returns the log energy of size (m) for a strided_input (m,*)"""
device, dtype = strided_input.device, strided_input.dtype
log_energy = torch.max(strided_input.pow(2).sum(1), epsilon).log() # size (m)
if energy_floor == 0.0:
return log_energy
return torch.max(log_energy, torch.tensor(math.log(energy_floor), device=device, dtype=dtype))
def _get_waveform_and_window_properties(
waveform: Tensor,
channel: int,
sample_frequency: float,
frame_shift: float,
frame_length: float,
round_to_power_of_two: bool,
preemphasis_coefficient: float,
) -> Tuple[Tensor, int, int, int]:
r"""Gets the waveform and window properties"""
channel = max(channel, 0)
assert channel < waveform.size(0), "Invalid channel {} for size {}".format(channel, waveform.size(0))
waveform = waveform[channel, :] # size (n)
window_shift = int(sample_frequency * frame_shift * MILLISECONDS_TO_SECONDS)
window_size = int(sample_frequency * frame_length * MILLISECONDS_TO_SECONDS)
padded_window_size = _next_power_of_2(window_size) if round_to_power_of_two else window_size
assert 2 <= window_size <= len(waveform), "choose a window size {} that is [2, {}]".format(
window_size, len(waveform)
)
assert 0 < window_shift, "`window_shift` must be greater than 0"
assert padded_window_size % 2 == 0, (
"the padded `window_size` must be divisible by two. use `round_to_power_of_two` or change `frame_length`"
)
assert 0.0 <= preemphasis_coefficient <= 1.0, "`preemphasis_coefficient` must be between [0,1]"
assert sample_frequency > 0, "`sample_frequency` must be greater than zero"
return waveform, window_shift, window_size, padded_window_size
def _get_window(
waveform: Tensor,
padded_window_size: int,
window_size: int,
window_shift: int,
window_type: str,
blackman_coeff: float,
snip_edges: bool,
raw_energy: bool,
energy_floor: float,
dither: float,
remove_dc_offset: bool,
preemphasis_coefficient: float,
) -> Tuple[Tensor, Tensor]:
r"""Gets a window and its log energy
Returns:
(Tensor, Tensor): strided_input of size (m, ``padded_window_size``) and signal_log_energy of size (m)
"""
device, dtype = waveform.device, waveform.dtype
epsilon = _get_epsilon(device, dtype)
# size (m, window_size)
strided_input = _get_strided(waveform, window_size, window_shift, snip_edges)
if dither != 0.0:
rand_gauss = torch.randn(strided_input.shape, device=device, dtype=dtype)
strided_input = strided_input + rand_gauss * dither
if remove_dc_offset:
# Subtract each row/frame by its mean
row_means = torch.mean(strided_input, dim=1).unsqueeze(1) # size (m, 1)
strided_input = strided_input - row_means
if raw_energy:
# Compute the log energy of each row/frame before applying preemphasis and
# window function
signal_log_energy = _get_log_energy(strided_input, epsilon, energy_floor) # size (m)
if preemphasis_coefficient != 0.0:
# strided_input[i,j] -= preemphasis_coefficient * strided_input[i, max(0, j-1)] for all i,j
offset_strided_input = torch.nn.functional.pad(strided_input.unsqueeze(0), (1, 0), mode="replicate").squeeze(
0
) # size (m, window_size + 1)
strided_input = strided_input - preemphasis_coefficient * offset_strided_input[:, :-1]
# Apply window_function to each row/frame
window_function = _feature_window_function(window_type, window_size, blackman_coeff, device, dtype).unsqueeze(
0
) # size (1, window_size)
strided_input = strided_input * window_function # size (m, window_size)
# Pad columns with zero until we reach size (m, padded_window_size)
if padded_window_size != window_size:
padding_right = padded_window_size - window_size
strided_input = torch.nn.functional.pad(
strided_input.unsqueeze(0), (0, padding_right), mode="constant", value=0
).squeeze(0)
# Compute energy after window function (not the raw one)
if not raw_energy:
signal_log_energy = _get_log_energy(strided_input, epsilon, energy_floor) # size (m)
return strided_input, signal_log_energy
def _subtract_column_mean(tensor: Tensor, subtract_mean: bool) -> Tensor:
# subtracts the column mean of the tensor size (m, n) if subtract_mean=True
# it returns size (m, n)
if subtract_mean:
col_means = torch.mean(tensor, dim=0).unsqueeze(0)
tensor = tensor - col_means
return tensor
def spectrogram(
waveform: Tensor,
blackman_coeff: float = 0.42,
channel: int = -1,
dither: float = 0.0,
energy_floor: float = 1.0,
frame_length: float = 25.0,
frame_shift: float = 10.0,
min_duration: float = 0.0,
preemphasis_coefficient: float = 0.97,
raw_energy: bool = True,
remove_dc_offset: bool = True,
round_to_power_of_two: bool = True,
sample_frequency: float = 16000.0,
snip_edges: bool = True,
subtract_mean: bool = False,
window_type: str = POVEY,
) -> Tensor:
r"""Create a spectrogram from a raw audio signal. This matches the input/output of Kaldi's
compute-spectrogram-feats.
Args:
waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2)
blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``)
channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``)
dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set
the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``)
energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution:
this floor is applied to the zeroth component, representing the total signal energy. The floor on the
individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``)
frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``)
frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``)
min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``)
preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``)
raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``)
remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``)
round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input
to FFT. (Default: ``True``)
sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if
specified there) (Default: ``16000.0``)
snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit
in the file, and the number of frames depends on the frame_length. If False, the number of frames
depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``)
subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do
it this way. (Default: ``False``)
window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman')
(Default: ``'povey'``)
Returns:
Tensor: A spectrogram identical to what Kaldi would output. The shape is
(m, ``padded_window_size // 2 + 1``) where m is calculated in _get_strided
"""
device, dtype = waveform.device, waveform.dtype
epsilon = _get_epsilon(device, dtype)
waveform, window_shift, window_size, padded_window_size = _get_waveform_and_window_properties(
waveform, channel, sample_frequency, frame_shift, frame_length, round_to_power_of_two, preemphasis_coefficient
)
if len(waveform) < min_duration * sample_frequency:
# signal is too short
return torch.empty(0)
strided_input, signal_log_energy = _get_window(
waveform,
padded_window_size,
window_size,
window_shift,
window_type,
blackman_coeff,
snip_edges,
raw_energy,
energy_floor,
dither,
remove_dc_offset,
preemphasis_coefficient,
)
# size (m, padded_window_size // 2 + 1, 2)
fft = torch.fft.rfft(strided_input)
# Convert the FFT into a power spectrum
power_spectrum = torch.max(fft.abs().pow(2.0), epsilon).log() # size (m, padded_window_size // 2 + 1)
power_spectrum[:, 0] = signal_log_energy
power_spectrum = _subtract_column_mean(power_spectrum, subtract_mean)
return power_spectrum
def inverse_mel_scale_scalar(mel_freq: float) -> float:
return 700.0 * (math.exp(mel_freq / 1127.0) - 1.0)
def inverse_mel_scale(mel_freq: Tensor) -> Tensor:
return 700.0 * ((mel_freq / 1127.0).exp() - 1.0)
def mel_scale_scalar(freq: float) -> float:
return 1127.0 * math.log(1.0 + freq / 700.0)
def mel_scale(freq: Tensor) -> Tensor:
return 1127.0 * (1.0 + freq / 700.0).log()
def vtln_warp_freq(
vtln_low_cutoff: float,
vtln_high_cutoff: float,
low_freq: float,
high_freq: float,
vtln_warp_factor: float,
freq: Tensor,
) -> Tensor:
r"""This computes a VTLN warping function that is not the same as HTK's one,
but has similar inputs (this function has the advantage of never producing
empty bins).
This function computes a warp function F(freq), defined between low_freq
and high_freq inclusive, with the following properties:
F(low_freq) == low_freq
F(high_freq) == high_freq
The function is continuous and piecewise linear with two inflection
points.
The lower inflection point (measured in terms of the unwarped
frequency) is at frequency l, determined as described below.
The higher inflection point is at a frequency h, determined as
described below.
If l <= f <= h, then F(f) = f/vtln_warp_factor.
If the higher inflection point (measured in terms of the unwarped
frequency) is at h, then max(h, F(h)) == vtln_high_cutoff.
Since (by the last point) F(h) == h/vtln_warp_factor, then
max(h, h/vtln_warp_factor) == vtln_high_cutoff, so
h = vtln_high_cutoff / max(1, 1/vtln_warp_factor).
= vtln_high_cutoff * min(1, vtln_warp_factor).
If the lower inflection point (measured in terms of the unwarped
frequency) is at l, then min(l, F(l)) == vtln_low_cutoff
This implies that l = vtln_low_cutoff / min(1, 1/vtln_warp_factor)
= vtln_low_cutoff * max(1, vtln_warp_factor)
Args:
vtln_low_cutoff (float): Lower frequency cutoffs for VTLN
vtln_high_cutoff (float): Upper frequency cutoffs for VTLN
low_freq (float): Lower frequency cutoffs in mel computation
high_freq (float): Upper frequency cutoffs in mel computation
vtln_warp_factor (float): Vtln warp factor
freq (Tensor): given frequency in Hz
Returns:
Tensor: Freq after vtln warp
"""
assert vtln_low_cutoff > low_freq, "be sure to set the vtln_low option higher than low_freq"
assert vtln_high_cutoff < high_freq, "be sure to set the vtln_high option lower than high_freq [or negative]"
l = vtln_low_cutoff * max(1.0, vtln_warp_factor)
h = vtln_high_cutoff * min(1.0, vtln_warp_factor)
scale = 1.0 / vtln_warp_factor
Fl = scale * l # F(l)
Fh = scale * h # F(h)
assert l > low_freq and h < high_freq
# slope of left part of the 3-piece linear function
scale_left = (Fl - low_freq) / (l - low_freq)
# [slope of center part is just "scale"]
# slope of right part of the 3-piece linear function
scale_right = (high_freq - Fh) / (high_freq - h)
res = torch.empty_like(freq)
outside_low_high_freq = torch.lt(freq, low_freq) | torch.gt(freq, high_freq) # freq < low_freq || freq > high_freq
before_l = torch.lt(freq, l) # freq < l
before_h = torch.lt(freq, h) # freq < h
after_h = torch.ge(freq, h) # freq >= h
# order of operations matter here (since there is overlapping frequency regions)
res[after_h] = high_freq + scale_right * (freq[after_h] - high_freq)
res[before_h] = scale * freq[before_h]
res[before_l] = low_freq + scale_left * (freq[before_l] - low_freq)
res[outside_low_high_freq] = freq[outside_low_high_freq]
return res
def vtln_warp_mel_freq(
vtln_low_cutoff: float,
vtln_high_cutoff: float,
low_freq,
high_freq: float,
vtln_warp_factor: float,
mel_freq: Tensor,
) -> Tensor:
r"""
Args:
vtln_low_cutoff (float): Lower frequency cutoffs for VTLN
vtln_high_cutoff (float): Upper frequency cutoffs for VTLN
low_freq (float): Lower frequency cutoffs in mel computation
high_freq (float): Upper frequency cutoffs in mel computation
vtln_warp_factor (float): Vtln warp factor
mel_freq (Tensor): Given frequency in Mel
Returns:
Tensor: ``mel_freq`` after vtln warp
"""
return mel_scale(
vtln_warp_freq(
vtln_low_cutoff, vtln_high_cutoff, low_freq, high_freq, vtln_warp_factor, inverse_mel_scale(mel_freq)
)
)
def get_mel_banks(
num_bins: int,
window_length_padded: int,
sample_freq: float,
low_freq: float,
high_freq: float,
vtln_low: float,
vtln_high: float,
vtln_warp_factor: float,
device=None,
dtype=None,
) -> Tuple[Tensor, Tensor]:
"""
Returns:
(Tensor, Tensor): The tuple consists of ``bins`` (which is
melbank of size (``num_bins``, ``num_fft_bins``)) and ``center_freqs`` (which is
center frequencies of bins of size (``num_bins``)).
"""
assert num_bins > 3, "Must have at least 3 mel bins"
assert window_length_padded % 2 == 0
num_fft_bins = window_length_padded / 2
nyquist = 0.5 * sample_freq
if high_freq <= 0.0:
high_freq += nyquist
assert (0.0 <= low_freq < nyquist) and (0.0 < high_freq <= nyquist) and (low_freq < high_freq), (
"Bad values in options: low-freq {} and high-freq {} vs. nyquist {}".format(low_freq, high_freq, nyquist)
)
# fft-bin width [think of it as Nyquist-freq / half-window-length]
fft_bin_width = sample_freq / window_length_padded
mel_low_freq = mel_scale_scalar(low_freq)
mel_high_freq = mel_scale_scalar(high_freq)
# divide by num_bins+1 in next line because of end-effects where the bins
# spread out to the sides.
mel_freq_delta = (mel_high_freq - mel_low_freq) / (num_bins + 1)
if vtln_high < 0.0:
vtln_high += nyquist
assert vtln_warp_factor == 1.0 or (
(low_freq < vtln_low < high_freq) and (0.0 < vtln_high < high_freq) and (vtln_low < vtln_high)
), "Bad values in options: vtln-low {} and vtln-high {}, versus low-freq {} and high-freq {}".format(
vtln_low, vtln_high, low_freq, high_freq
)
bin = torch.arange(num_bins).unsqueeze(1)
left_mel = mel_low_freq + bin * mel_freq_delta # size(num_bins, 1)
center_mel = mel_low_freq + (bin + 1.0) * mel_freq_delta # size(num_bins, 1)
right_mel = mel_low_freq + (bin + 2.0) * mel_freq_delta # size(num_bins, 1)
if vtln_warp_factor != 1.0:
left_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, left_mel)
center_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, center_mel)
right_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, right_mel)
# center_freqs = inverse_mel_scale(center_mel) # size (num_bins)
# size(1, num_fft_bins)
mel = mel_scale(fft_bin_width * torch.arange(num_fft_bins)).unsqueeze(0)
# size (num_bins, num_fft_bins)
up_slope = (mel - left_mel) / (center_mel - left_mel)
down_slope = (right_mel - mel) / (right_mel - center_mel)
if vtln_warp_factor == 1.0:
# left_mel < center_mel < right_mel so we can min the two slopes and clamp negative values
bins = torch.max(torch.zeros(1), torch.min(up_slope, down_slope))
else:
# warping can move the order of left_mel, center_mel, right_mel anywhere
bins = torch.zeros_like(up_slope)
up_idx = torch.gt(mel, left_mel) & torch.le(mel, center_mel) # left_mel < mel <= center_mel
down_idx = torch.gt(mel, center_mel) & torch.lt(mel, right_mel) # center_mel < mel < right_mel
bins[up_idx] = up_slope[up_idx]
bins[down_idx] = down_slope[down_idx]
return bins.to(device=device, dtype=dtype) # , center_freqs
cache = {}
def fbank(
waveform: Tensor,
blackman_coeff: float = 0.42,
channel: int = -1,
dither: float = 0.0,
energy_floor: float = 1.0,
frame_length: float = 25.0,
frame_shift: float = 10.0,
high_freq: float = 0.0,
htk_compat: bool = False,
low_freq: float = 20.0,
min_duration: float = 0.0,
num_mel_bins: int = 23,
preemphasis_coefficient: float = 0.97,
raw_energy: bool = True,
remove_dc_offset: bool = True,
round_to_power_of_two: bool = True,
sample_frequency: float = 16000.0,
snip_edges: bool = True,
subtract_mean: bool = False,
use_energy: bool = False,
use_log_fbank: bool = True,
use_power: bool = True,
vtln_high: float = -500.0,
vtln_low: float = 100.0,
vtln_warp: float = 1.0,
window_type: str = POVEY,
) -> Tensor:
r"""Create a fbank from a raw audio signal. This matches the input/output of Kaldi's
compute-fbank-feats.
Args:
waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2)
blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``)
channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``)
dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set
the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``)
energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution:
this floor is applied to the zeroth component, representing the total signal energy. The floor on the
individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``)
frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``)
frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``)
high_freq (float, optional): High cutoff frequency for mel bins (if <= 0, offset from Nyquist)
(Default: ``0.0``)
htk_compat (bool, optional): If true, put energy last. Warning: not sufficient to get HTK compatible features
(need to change other parameters). (Default: ``False``)
low_freq (float, optional): Low cutoff frequency for mel bins (Default: ``20.0``)
min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``)
num_mel_bins (int, optional): Number of triangular mel-frequency bins (Default: ``23``)
preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``)
raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``)
remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``)
round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input
to FFT. (Default: ``True``)
sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if
specified there) (Default: ``16000.0``)
snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit
in the file, and the number of frames depends on the frame_length. If False, the number of frames
depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``)
subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do
it this way. (Default: ``False``)
use_energy (bool, optional): Add an extra dimension with energy to the FBANK output. (Default: ``False``)
use_log_fbank (bool, optional):If true, produce log-filterbank, else produce linear. (Default: ``True``)
use_power (bool, optional): If true, use power, else use magnitude. (Default: ``True``)
vtln_high (float, optional): High inflection point in piecewise linear VTLN warping function (if
negative, offset from high-mel-freq (Default: ``-500.0``)
vtln_low (float, optional): Low inflection point in piecewise linear VTLN warping function (Default: ``100.0``)
vtln_warp (float, optional): Vtln warp factor (only applicable if vtln_map not specified) (Default: ``1.0``)
window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman')
(Default: ``'povey'``)
Returns:
Tensor: A fbank identical to what Kaldi would output. The shape is (m, ``num_mel_bins + use_energy``)
where m is calculated in _get_strided
"""
device, dtype = waveform.device, waveform.dtype
waveform, window_shift, window_size, padded_window_size = _get_waveform_and_window_properties(
waveform, channel, sample_frequency, frame_shift, frame_length, round_to_power_of_two, preemphasis_coefficient
)
if len(waveform) < min_duration * sample_frequency:
# signal is too short
return torch.empty(0, device=device, dtype=dtype)
# strided_input, size (m, padded_window_size) and signal_log_energy, size (m)
strided_input, signal_log_energy = _get_window(
waveform,
padded_window_size,
window_size,
window_shift,
window_type,
blackman_coeff,
snip_edges,
raw_energy,
energy_floor,
dither,
remove_dc_offset,
preemphasis_coefficient,
)
# size (m, padded_window_size // 2 + 1)
spectrum = torch.fft.rfft(strided_input).abs()
if use_power:
spectrum = spectrum.pow(2.0)
# size (num_mel_bins, padded_window_size // 2)
# print(num_mel_bins, padded_window_size, sample_frequency, low_freq, high_freq, vtln_low, vtln_high, vtln_warp)
cache_key = "%s-%s-%s-%s-%s-%s-%s-%s-%s-%s" % (
num_mel_bins,
padded_window_size,
sample_frequency,
low_freq,
high_freq,
vtln_low,
vtln_high,
vtln_warp,
device,
dtype,
)
if cache_key not in cache:
mel_energies = get_mel_banks(
num_mel_bins,
padded_window_size,
sample_frequency,
low_freq,
high_freq,
vtln_low,
vtln_high,
vtln_warp,
device,
dtype,
)
cache[cache_key] = mel_energies
else:
mel_energies = cache[cache_key]
# pad right column with zeros and add dimension, size (num_mel_bins, padded_window_size // 2 + 1)
mel_energies = torch.nn.functional.pad(mel_energies, (0, 1), mode="constant", value=0)
# sum with mel fiterbanks over the power spectrum, size (m, num_mel_bins)
mel_energies = torch.mm(spectrum, mel_energies.T)
if use_log_fbank:
# avoid log of zero (which should be prevented anyway by dithering)
mel_energies = torch.max(mel_energies, _get_epsilon(device, dtype)).log()
# if use_energy then add it as the last column for htk_compat == true else first column
if use_energy:
signal_log_energy = signal_log_energy.unsqueeze(1) # size (m, 1)
# returns size (m, num_mel_bins + 1)
if htk_compat:
mel_energies = torch.cat((mel_energies, signal_log_energy), dim=1)
else:
mel_energies = torch.cat((signal_log_energy, mel_energies), dim=1)
mel_energies = _subtract_column_mean(mel_energies, subtract_mean)
return mel_energies
def _get_dct_matrix(num_ceps: int, num_mel_bins: int) -> Tensor:
# returns a dct matrix of size (num_mel_bins, num_ceps)
# size (num_mel_bins, num_mel_bins)
dct_matrix = torchaudio.functional.create_dct(num_mel_bins, num_mel_bins, "ortho")
# kaldi expects the first cepstral to be weighted sum of factor sqrt(1/num_mel_bins)
# this would be the first column in the dct_matrix for torchaudio as it expects a
# right multiply (which would be the first column of the kaldi's dct_matrix as kaldi
# expects a left multiply e.g. dct_matrix * vector).
dct_matrix[:, 0] = math.sqrt(1 / float(num_mel_bins))
dct_matrix = dct_matrix[:, :num_ceps]
return dct_matrix
def _get_lifter_coeffs(num_ceps: int, cepstral_lifter: float) -> Tensor:
# returns size (num_ceps)
# Compute liftering coefficients (scaling on cepstral coeffs)
# coeffs are numbered slightly differently from HTK: the zeroth index is C0, which is not affected.
i = torch.arange(num_ceps)
return 1.0 + 0.5 * cepstral_lifter * torch.sin(math.pi * i / cepstral_lifter)
def mfcc(
waveform: Tensor,
blackman_coeff: float = 0.42,
cepstral_lifter: float = 22.0,
channel: int = -1,
dither: float = 0.0,
energy_floor: float = 1.0,
frame_length: float = 25.0,
frame_shift: float = 10.0,
high_freq: float = 0.0,
htk_compat: bool = False,
low_freq: float = 20.0,
num_ceps: int = 13,
min_duration: float = 0.0,
num_mel_bins: int = 23,
preemphasis_coefficient: float = 0.97,
raw_energy: bool = True,
remove_dc_offset: bool = True,
round_to_power_of_two: bool = True,
sample_frequency: float = 16000.0,
snip_edges: bool = True,
subtract_mean: bool = False,
use_energy: bool = False,
vtln_high: float = -500.0,
vtln_low: float = 100.0,
vtln_warp: float = 1.0,
window_type: str = POVEY,
) -> Tensor:
r"""Create a mfcc from a raw audio signal. This matches the input/output of Kaldi's
compute-mfcc-feats.
Args:
waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2)
blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``)
cepstral_lifter (float, optional): Constant that controls scaling of MFCCs (Default: ``22.0``)
channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``)
dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set
the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``)
energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution:
this floor is applied to the zeroth component, representing the total signal energy. The floor on the
individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``)
frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``)
frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``)
high_freq (float, optional): High cutoff frequency for mel bins (if <= 0, offset from Nyquist)
(Default: ``0.0``)
htk_compat (bool, optional): If true, put energy last. Warning: not sufficient to get HTK compatible
features (need to change other parameters). (Default: ``False``)
low_freq (float, optional): Low cutoff frequency for mel bins (Default: ``20.0``)
num_ceps (int, optional): Number of cepstra in MFCC computation (including C0) (Default: ``13``)
min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``)
num_mel_bins (int, optional): Number of triangular mel-frequency bins (Default: ``23``)
preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``)
raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``)
remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``)
round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input
to FFT. (Default: ``True``)
sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if
specified there) (Default: ``16000.0``)
snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit
in the file, and the number of frames depends on the frame_length. If False, the number of frames
depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``)
subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do
it this way. (Default: ``False``)
use_energy (bool, optional): Add an extra dimension with energy to the FBANK output. (Default: ``False``)
vtln_high (float, optional): High inflection point in piecewise linear VTLN warping function (if
negative, offset from high-mel-freq (Default: ``-500.0``)
vtln_low (float, optional): Low inflection point in piecewise linear VTLN warping function (Default: ``100.0``)
vtln_warp (float, optional): Vtln warp factor (only applicable if vtln_map not specified) (Default: ``1.0``)
window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman')
(Default: ``"povey"``)
Returns:
Tensor: A mfcc identical to what Kaldi would output. The shape is (m, ``num_ceps``)
where m is calculated in _get_strided
"""
assert num_ceps <= num_mel_bins, "num_ceps cannot be larger than num_mel_bins: %d vs %d" % (num_ceps, num_mel_bins)
device, dtype = waveform.device, waveform.dtype
# The mel_energies should not be squared (use_power=True), not have mean subtracted
# (subtract_mean=False), and use log (use_log_fbank=True).
# size (m, num_mel_bins + use_energy)
feature = fbank(
waveform=waveform,
blackman_coeff=blackman_coeff,
channel=channel,
dither=dither,
energy_floor=energy_floor,
frame_length=frame_length,
frame_shift=frame_shift,
high_freq=high_freq,
htk_compat=htk_compat,
low_freq=low_freq,
min_duration=min_duration,
num_mel_bins=num_mel_bins,
preemphasis_coefficient=preemphasis_coefficient,
raw_energy=raw_energy,
remove_dc_offset=remove_dc_offset,
round_to_power_of_two=round_to_power_of_two,
sample_frequency=sample_frequency,
snip_edges=snip_edges,
subtract_mean=False,
use_energy=use_energy,
use_log_fbank=True,
use_power=True,
vtln_high=vtln_high,
vtln_low=vtln_low,
vtln_warp=vtln_warp,
window_type=window_type,
)
if use_energy:
# size (m)
signal_log_energy = feature[:, num_mel_bins if htk_compat else 0]
# offset is 0 if htk_compat==True else 1
mel_offset = int(not htk_compat)
feature = feature[:, mel_offset : (num_mel_bins + mel_offset)]
# size (num_mel_bins, num_ceps)
dct_matrix = _get_dct_matrix(num_ceps, num_mel_bins).to(dtype=dtype, device=device)
# size (m, num_ceps)
feature = feature.matmul(dct_matrix)
if cepstral_lifter != 0.0:
# size (1, num_ceps)
lifter_coeffs = _get_lifter_coeffs(num_ceps, cepstral_lifter).unsqueeze(0)
feature *= lifter_coeffs.to(device=device, dtype=dtype)
# if use_energy then replace the last column for htk_compat == true else first column
if use_energy:
feature[:, 0] = signal_log_energy
if htk_compat:
energy = feature[:, 0].unsqueeze(1) # size (m, 1)
feature = feature[:, 1:] # size (m, num_ceps - 1)
if not use_energy:
# scale on C0 (actually removing a scale we previously added that's
# part of one common definition of the cosine transform.)
energy *= math.sqrt(2)
feature = torch.cat((feature, energy), dim=1)
feature = _subtract_column_mean(feature, subtract_mean)
return feature
| {
"repo_id": "RVC-Boss/GPT-SoVITS",
"file_path": "GPT_SoVITS/eres2net/kaldi.py",
"license": "MIT License",
"lines": 731,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
RVC-Boss/GPT-SoVITS:GPT_SoVITS/sv.py | import sys
import os
import torch
sys.path.append(f"{os.getcwd()}/GPT_SoVITS/eres2net")
sv_path = "GPT_SoVITS/pretrained_models/sv/pretrained_eres2netv2w24s4ep4.ckpt"
from ERes2NetV2 import ERes2NetV2
import kaldi as Kaldi
class SV:
def __init__(self, device, is_half):
pretrained_state = torch.load(sv_path, map_location="cpu", weights_only=False)
embedding_model = ERes2NetV2(baseWidth=24, scale=4, expansion=4)
embedding_model.load_state_dict(pretrained_state)
embedding_model.eval()
self.embedding_model = embedding_model
if is_half == False:
self.embedding_model = self.embedding_model.to(device)
else:
self.embedding_model = self.embedding_model.half().to(device)
self.is_half = is_half
def compute_embedding3(self, wav):
with torch.no_grad():
if self.is_half == True:
wav = wav.half()
feat = torch.stack(
[Kaldi.fbank(wav0.unsqueeze(0), num_mel_bins=80, sample_frequency=16000, dither=0) for wav0 in wav]
)
sv_emb = self.embedding_model.forward3(feat)
return sv_emb
| {
"repo_id": "RVC-Boss/GPT-SoVITS",
"file_path": "GPT_SoVITS/sv.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ScrapeGraphAI/Scrapegraph-ai:tests/fixtures/benchmarking.py | """
Performance benchmarking framework for ScrapeGraphAI.
This module provides utilities for:
- Measuring execution time
- Tracking token usage
- Monitoring API calls
- Generating performance reports
- Comparing performance across runs
"""
import json
import statistics
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional
import pytest
@dataclass
class BenchmarkResult:
"""Results from a single benchmark run."""
test_name: str
execution_time: float
memory_usage: Optional[float] = None
token_usage: Optional[int] = None
api_calls: int = 0
success: bool = True
error: Optional[str] = None
metadata: Dict[str, Any] = field(default_factory=dict)
@dataclass
class BenchmarkSummary:
"""Summary statistics for multiple benchmark runs."""
test_name: str
num_runs: int
mean_time: float
median_time: float
std_dev: float
min_time: float
max_time: float
success_rate: float
total_tokens: Optional[int] = None
total_api_calls: int = 0
class BenchmarkTracker:
"""Track and analyze benchmark results."""
def __init__(self, output_dir: Optional[Path] = None):
"""Initialize the benchmark tracker.
Args:
output_dir: Directory to save benchmark results
"""
self.output_dir = output_dir or Path("benchmark_results")
self.output_dir.mkdir(exist_ok=True)
self.results: List[BenchmarkResult] = []
def record(self, result: BenchmarkResult):
"""Record a benchmark result."""
self.results.append(result)
def get_summary(self, test_name: str) -> Optional[BenchmarkSummary]:
"""Get summary statistics for a specific test.
Args:
test_name: Name of the test
Returns:
BenchmarkSummary if results exist, None otherwise
"""
test_results = [r for r in self.results if r.test_name == test_name]
if not test_results:
return None
times = [r.execution_time for r in test_results]
successes = [r.success for r in test_results]
tokens = [r.token_usage for r in test_results if r.token_usage is not None]
api_calls = sum(r.api_calls for r in test_results)
return BenchmarkSummary(
test_name=test_name,
num_runs=len(test_results),
mean_time=statistics.mean(times),
median_time=statistics.median(times),
std_dev=statistics.stdev(times) if len(times) > 1 else 0.0,
min_time=min(times),
max_time=max(times),
success_rate=sum(successes) / len(successes),
total_tokens=sum(tokens) if tokens else None,
total_api_calls=api_calls,
)
def save_results(self, filename: str = "benchmark_results.json"):
"""Save all benchmark results to a JSON file.
Args:
filename: Name of the output file
"""
filepath = self.output_dir / filename
data = {
"results": [
{
"test_name": r.test_name,
"execution_time": r.execution_time,
"memory_usage": r.memory_usage,
"token_usage": r.token_usage,
"api_calls": r.api_calls,
"success": r.success,
"error": r.error,
"metadata": r.metadata,
}
for r in self.results
]
}
with open(filepath, "w") as f:
json.dump(data, f, indent=2)
def generate_report(self) -> str:
"""Generate a human-readable performance report.
Returns:
Formatted report string
"""
if not self.results:
return "No benchmark results available."
# Get unique test names
test_names = list({r.test_name for r in self.results})
report = ["=" * 80, "Performance Benchmark Report", "=" * 80, ""]
for test_name in sorted(test_names):
summary = self.get_summary(test_name)
if not summary:
continue
report.append(f"\n{test_name}")
report.append("-" * 80)
report.append(f" Runs: {summary.num_runs}")
report.append(f" Mean Time: {summary.mean_time:.4f}s")
report.append(f" Median Time: {summary.median_time:.4f}s")
report.append(f" Std Dev: {summary.std_dev:.4f}s")
report.append(f" Min Time: {summary.min_time:.4f}s")
report.append(f" Max Time: {summary.max_time:.4f}s")
report.append(f" Success Rate: {summary.success_rate * 100:.1f}%")
if summary.total_tokens:
report.append(f" Total Tokens: {summary.total_tokens}")
if summary.total_api_calls:
report.append(f" API Calls: {summary.total_api_calls}")
report.append("\n" + "=" * 80)
return "\n".join(report)
def benchmark(
func: Callable,
name: Optional[str] = None,
warmup_runs: int = 1,
test_runs: int = 3,
tracker: Optional[BenchmarkTracker] = None,
) -> BenchmarkSummary:
"""Benchmark a function with multiple runs.
Args:
func: Function to benchmark
name: Name for the benchmark (defaults to function name)
warmup_runs: Number of warmup runs to discard
test_runs: Number of actual test runs to measure
tracker: Optional BenchmarkTracker to record results
Returns:
BenchmarkSummary with statistics
"""
test_name = name or func.__name__
local_tracker = tracker or BenchmarkTracker()
# Warmup runs
for _ in range(warmup_runs):
try:
func()
except Exception:
pass
# Test runs
for run in range(test_runs):
start_time = time.perf_counter()
success = True
error = None
try:
result = func()
# Try to extract metadata if result is dict-like
metadata = {}
if isinstance(result, dict):
metadata = result.get("metadata", {})
except Exception as e:
success = False
error = str(e)
metadata = {}
end_time = time.perf_counter()
execution_time = end_time - start_time
benchmark_result = BenchmarkResult(
test_name=test_name,
execution_time=execution_time,
success=success,
error=error,
metadata=metadata,
)
local_tracker.record(benchmark_result)
return local_tracker.get_summary(test_name)
@pytest.fixture
def benchmark_tracker():
"""Pytest fixture for benchmark tracking."""
tracker = BenchmarkTracker()
yield tracker
# Save results after test completes
tracker.save_results()
def pytest_benchmark_compare(baseline_file: Path, current_file: Path) -> Dict[str, Any]:
"""Compare current benchmark results against a baseline.
Args:
baseline_file: Path to baseline results JSON
current_file: Path to current results JSON
Returns:
Dictionary with comparison results
"""
with open(baseline_file) as f:
baseline = json.load(f)
with open(current_file) as f:
current = json.load(f)
# Create lookup for baseline results
baseline_by_name = {r["test_name"]: r for r in baseline["results"]}
comparison = {"regressions": [], "improvements": [], "new_tests": []}
for current_result in current["results"]:
test_name = current_result["test_name"]
if test_name not in baseline_by_name:
comparison["new_tests"].append(test_name)
continue
baseline_result = baseline_by_name[test_name]
current_time = current_result["execution_time"]
baseline_time = baseline_result["execution_time"]
# Calculate percentage change
change_pct = ((current_time - baseline_time) / baseline_time) * 100
# Threshold for regression (e.g., 10% slower)
regression_threshold = 10.0
if change_pct > regression_threshold:
comparison["regressions"].append(
{
"test_name": test_name,
"baseline_time": baseline_time,
"current_time": current_time,
"change_pct": change_pct,
}
)
elif change_pct < -regression_threshold:
comparison["improvements"].append(
{
"test_name": test_name,
"baseline_time": baseline_time,
"current_time": current_time,
"change_pct": change_pct,
}
)
return comparison
| {
"repo_id": "ScrapeGraphAI/Scrapegraph-ai",
"file_path": "tests/fixtures/benchmarking.py",
"license": "MIT License",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ScrapeGraphAI/Scrapegraph-ai:tests/fixtures/helpers.py | """
Test utilities and helpers for ScrapeGraphAI tests.
This module provides:
- Assertion helpers
- Data validation utilities
- Mock response builders
- Test data generators
"""
import json
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from unittest.mock import Mock
# ============================================================================
# Assertion Helpers
# ============================================================================
def assert_valid_scrape_result(result: Any, expected_keys: Optional[List[str]] = None):
"""Assert that a scraping result is valid.
Args:
result: The scraping result to validate
expected_keys: Optional list of keys that should be present
"""
assert result is not None, "Result should not be None"
assert isinstance(result, (dict, str)), f"Result should be dict or str, got {type(result)}"
if isinstance(result, dict) and expected_keys:
for key in expected_keys:
assert key in result, f"Expected key '{key}' not found in result"
def assert_execution_info_valid(exec_info: Dict[str, Any]):
"""Assert that execution info is valid and contains expected fields.
Args:
exec_info: Execution info dictionary
"""
assert exec_info is not None, "Execution info should not be None"
assert isinstance(exec_info, dict), "Execution info should be a dictionary"
def assert_response_time_acceptable(execution_time: float, max_time: float = 30.0):
"""Assert that response time is within acceptable limits.
Args:
execution_time: Actual execution time in seconds
max_time: Maximum acceptable time in seconds
"""
assert (
execution_time <= max_time
), f"Execution time {execution_time}s exceeded maximum {max_time}s"
def assert_no_errors_in_result(result: Union[Dict, str]):
"""Assert that the result doesn't contain common error indicators.
Args:
result: The result to check
"""
result_str = json.dumps(result) if isinstance(result, dict) else str(result)
error_indicators = [
"error",
"exception",
"failed",
"timeout",
"rate limit",
]
for indicator in error_indicators:
assert indicator.lower() not in result_str.lower(), (
f"Result contains error indicator: {indicator}"
)
# ============================================================================
# Mock Response Builders
# ============================================================================
def create_mock_llm_response(content: str, **kwargs) -> Mock:
"""Create a mock LLM response.
Args:
content: Response content
**kwargs: Additional response attributes
Returns:
Mock response object
"""
mock = Mock()
mock.content = content
mock.response_metadata = kwargs.get("metadata", {})
mock.__str__ = lambda: content
return mock
def create_mock_graph_result(
answer: Any = None,
exec_info: Optional[Dict] = None,
error: Optional[str] = None,
) -> tuple:
"""Create a mock graph execution result.
Args:
answer: The answer/result
exec_info: Execution info dictionary
error: Optional error message
Returns:
Tuple of (state, exec_info)
"""
state = {}
if answer is not None:
state["answer"] = answer
if error:
state["error"] = error
info = exec_info or {}
return (state, info)
# ============================================================================
# Data Generators
# ============================================================================
def generate_test_html(
title: str = "Test Page",
num_items: int = 3,
item_template: str = "Item {n}",
) -> str:
"""Generate test HTML with customizable content.
Args:
title: Page title
num_items: Number of list items to generate
item_template: Template for item text (use {n} for number)
Returns:
HTML string
"""
items = "\n".join(
[f"<li>{item_template.format(n=i+1)}</li>" for i in range(num_items)]
)
return f"""
<!DOCTYPE html>
<html>
<head><title>{title}</title></head>
<body>
<h1>{title}</h1>
<ul>{items}</ul>
</body>
</html>
"""
def generate_test_json(num_records: int = 3) -> Dict[str, Any]:
"""Generate test JSON data.
Args:
num_records: Number of records to generate
Returns:
Dictionary with test data
"""
return {
"items": [
{
"id": i + 1,
"name": f"Item {i + 1}",
"description": f"Description for item {i + 1}",
"value": (i + 1) * 10,
}
for i in range(num_records)
],
"total": num_records,
}
def generate_test_csv(num_rows: int = 3) -> str:
"""Generate test CSV data.
Args:
num_rows: Number of data rows to generate
Returns:
CSV string
"""
header = "id,name,value"
rows = [f"{i+1},Item {i+1},{(i+1)*10}" for i in range(num_rows)]
return header + "\n" + "\n".join(rows)
# ============================================================================
# Validation Utilities
# ============================================================================
def validate_schema_match(data: Dict, schema_class) -> bool:
"""Validate that data matches a Pydantic schema.
Args:
data: Data to validate
schema_class: Pydantic schema class
Returns:
True if valid, False otherwise
"""
try:
schema_class(**data)
return True
except Exception:
return False
def validate_extracted_fields(
result: Dict, required_fields: List[str], min_values: int = 1
) -> bool:
"""Validate that required fields were extracted with minimum values.
Args:
result: Extraction result
required_fields: List of required field names
min_values: Minimum number of values per field
Returns:
True if validation passes
"""
for field in required_fields:
if field not in result:
return False
value = result[field]
if isinstance(value, list) and len(value) < min_values:
return False
return True
# ============================================================================
# File Utilities
# ============================================================================
def load_test_fixture(fixture_name: str, fixture_dir: Optional[Path] = None) -> str:
"""Load a test fixture file.
Args:
fixture_name: Name of the fixture file
fixture_dir: Directory containing fixtures (defaults to tests/fixtures)
Returns:
File contents as string
"""
if fixture_dir is None:
fixture_dir = Path(__file__).parent
fixture_path = fixture_dir / fixture_name
return fixture_path.read_text()
def save_test_output(
content: str, filename: str, output_dir: Optional[Path] = None
):
"""Save test output to a file for debugging.
Args:
content: Content to save
filename: Output filename
output_dir: Output directory (defaults to tests/output)
"""
if output_dir is None:
output_dir = Path(__file__).parent.parent / "output"
output_dir.mkdir(exist_ok=True)
output_path = output_dir / filename
output_path.write_text(content)
# ============================================================================
# Comparison Utilities
# ============================================================================
def compare_results(result1: Dict, result2: Dict, ignore_keys: Optional[List[str]] = None) -> bool:
"""Compare two scraping results, optionally ignoring certain keys.
Args:
result1: First result
result2: Second result
ignore_keys: Keys to ignore in comparison
Returns:
True if results match
"""
ignore_keys = ignore_keys or []
# Create copies and remove ignored keys
r1 = {k: v for k, v in result1.items() if k not in ignore_keys}
r2 = {k: v for k, v in result2.items() if k not in ignore_keys}
return r1 == r2
def fuzzy_match_strings(str1: str, str2: str, threshold: float = 0.8) -> bool:
"""Check if two strings are similar enough.
Args:
str1: First string
str2: Second string
threshold: Similarity threshold (0-1)
Returns:
True if strings are similar enough
"""
# Simple implementation using character overlap
# For production, consider using libraries like difflib or fuzzywuzzy
set1 = set(str1.lower().split())
set2 = set(str2.lower().split())
if not set1 and not set2:
return True
if not set1 or not set2:
return False
overlap = len(set1.intersection(set2))
total = len(set1.union(set2))
similarity = overlap / total if total > 0 else 0
return similarity >= threshold
# ============================================================================
# Rate Limiting Utilities
# ============================================================================
class RateLimitHelper:
"""Helper for testing rate limiting behavior."""
def __init__(self, max_requests: int, time_window: float):
"""Initialize rate limit helper.
Args:
max_requests: Maximum number of requests allowed
time_window: Time window in seconds
"""
self.max_requests = max_requests
self.time_window = time_window
self.requests = []
def can_make_request(self) -> bool:
"""Check if a new request can be made.
Returns:
True if request is allowed
"""
import time
now = time.time()
# Remove old requests outside the time window
self.requests = [r for r in self.requests if now - r < self.time_window]
return len(self.requests) < self.max_requests
def record_request(self):
"""Record a new request."""
import time
self.requests.append(time.time())
# ============================================================================
# Retry Utilities
# ============================================================================
def retry_with_backoff(
func,
max_retries: int = 3,
initial_delay: float = 1.0,
backoff_factor: float = 2.0,
):
"""Retry a function with exponential backoff.
Args:
func: Function to retry
max_retries: Maximum number of retry attempts
initial_delay: Initial delay in seconds
backoff_factor: Multiplier for delay on each retry
Returns:
Function result
Raises:
Last exception if all retries fail
"""
import time
delay = initial_delay
last_exception = None
for attempt in range(max_retries + 1):
try:
return func()
except Exception as e:
last_exception = e
if attempt < max_retries:
time.sleep(delay)
delay *= backoff_factor
else:
raise last_exception
| {
"repo_id": "ScrapeGraphAI/Scrapegraph-ai",
"file_path": "tests/fixtures/helpers.py",
"license": "MIT License",
"lines": 315,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ScrapeGraphAI/Scrapegraph-ai:tests/fixtures/mock_server/server.py | """
Mock HTTP server for consistent testing without external dependencies.
This server provides:
- Static HTML pages with predictable content
- JSON/XML/CSV endpoints
- Rate limiting simulation
- Error condition simulation
- Dynamic content generation
"""
import json
import time
from http.server import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
from typing import Dict, Optional
from urllib.parse import parse_qs, urlparse
class MockHTTPRequestHandler(BaseHTTPRequestHandler):
"""Request handler for the mock HTTP server."""
# Track request count for rate limiting simulation
request_count: Dict[str, int] = {}
def log_message(self, format, *args):
"""Suppress default logging."""
pass
def do_GET(self):
"""Handle GET requests."""
parsed_path = urlparse(self.path)
path = parsed_path.path
query_params = parse_qs(parsed_path.query)
# Route requests
if path == "/":
self._serve_home()
elif path == "/products":
self._serve_products()
elif path == "/projects":
self._serve_projects()
elif path == "/api/data.json":
self._serve_json_data()
elif path == "/api/data.xml":
self._serve_xml_data()
elif path == "/api/data.csv":
self._serve_csv_data()
elif path == "/slow":
self._serve_slow_response()
elif path == "/error/404":
self._serve_404()
elif path == "/error/500":
self._serve_500()
elif path == "/rate-limited":
self._serve_rate_limited()
elif path == "/dynamic":
self._serve_dynamic_content()
elif path == "/pagination":
self._serve_pagination(query_params)
else:
self._serve_404()
def _serve_home(self):
"""Serve home page."""
html = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Mock Test Website</title>
</head>
<body>
<h1>Welcome to Mock Test Website</h1>
<p>This is a test website for ScrapeGraphAI testing.</p>
<nav>
<a href="/products">Products</a>
<a href="/projects">Projects</a>
</nav>
</body>
</html>
"""
self._send_html_response(html)
def _serve_products(self):
"""Serve products page."""
html = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Products</title>
</head>
<body>
<h1>Our Products</h1>
<div class="products">
<article class="product" data-id="1">
<h2 class="product-name">Product Alpha</h2>
<p class="product-description">High-quality product for testing</p>
<span class="product-price">$99.99</span>
<span class="product-stock">In Stock</span>
</article>
<article class="product" data-id="2">
<h2 class="product-name">Product Beta</h2>
<p class="product-description">Another great product</p>
<span class="product-price">$149.99</span>
<span class="product-stock">Limited Stock</span>
</article>
<article class="product" data-id="3">
<h2 class="product-name">Product Gamma</h2>
<p class="product-description">Premium product option</p>
<span class="product-price">$199.99</span>
<span class="product-stock">Out of Stock</span>
</article>
</div>
</body>
</html>
"""
self._send_html_response(html)
def _serve_projects(self):
"""Serve projects page."""
html = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Projects</title>
</head>
<body>
<h1>Our Projects</h1>
<div class="projects">
<article class="project">
<h2>Project Alpha</h2>
<p class="description">A comprehensive web scraping solution</p>
<a href="https://github.com/example/alpha">GitHub</a>
</article>
<article class="project">
<h2>Project Beta</h2>
<p class="description">AI-powered data extraction tool</p>
<a href="https://github.com/example/beta">GitHub</a>
</article>
</div>
</body>
</html>
"""
self._send_html_response(html)
def _serve_json_data(self):
"""Serve JSON endpoint."""
data = {
"company": "Test Company",
"description": "A mock company for testing",
"employees": [
{"name": "Alice", "role": "Engineer", "department": "Engineering"},
{"name": "Bob", "role": "Designer", "department": "Design"},
{"name": "Charlie", "role": "Manager", "department": "Operations"},
],
"founded": "2020",
"location": "San Francisco",
}
self._send_json_response(data)
def _serve_xml_data(self):
"""Serve XML endpoint."""
xml = """<?xml version="1.0" encoding="UTF-8"?>
<company>
<name>Test Company</name>
<description>A mock company for testing</description>
<employees>
<employee>
<name>Alice</name>
<role>Engineer</role>
</employee>
<employee>
<name>Bob</name>
<role>Designer</role>
</employee>
</employees>
</company>
"""
self._send_xml_response(xml)
def _serve_csv_data(self):
"""Serve CSV endpoint."""
csv = """name,role,department
Alice,Engineer,Engineering
Bob,Designer,Design
Charlie,Manager,Operations"""
self._send_csv_response(csv)
def _serve_slow_response(self):
"""Simulate a slow response."""
time.sleep(2) # 2 second delay
self._send_html_response("<html><body><h1>Slow Response</h1></body></html>")
def _serve_404(self):
"""Serve 404 error."""
self.send_response(404)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b"<html><body><h1>404 Not Found</h1></body></html>")
def _serve_500(self):
"""Serve 500 error."""
self.send_response(500)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b"<html><body><h1>500 Internal Server Error</h1></body></html>")
def _serve_rate_limited(self):
"""Simulate rate limiting."""
client_ip = self.client_address[0]
self.request_count[client_ip] = self.request_count.get(client_ip, 0) + 1
if self.request_count[client_ip] > 5:
self.send_response(429)
self.send_header("Content-type", "text/html")
self.send_header("Retry-After", "60")
self.end_headers()
self.wfile.write(b"<html><body><h1>429 Too Many Requests</h1></body></html>")
else:
self._send_html_response("<html><body><h1>Rate Limited Endpoint</h1></body></html>")
def _serve_dynamic_content(self):
"""Serve dynamically generated content."""
timestamp = int(time.time())
html = f"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Dynamic Content</title>
</head>
<body>
<h1>Dynamic Content</h1>
<p class="timestamp">Generated at: {timestamp}</p>
<p class="random-data">Random value: {timestamp % 1000}</p>
</body>
</html>
"""
self._send_html_response(html)
def _serve_pagination(self, query_params):
"""Serve paginated content."""
page = int(query_params.get("page", ["1"])[0])
per_page = 10
total_items = 50
items = []
start = (page - 1) * per_page
end = min(start + per_page, total_items)
for i in range(start, end):
items.append(f'<li class="item">Item {i + 1}</li>')
next_page = page + 1 if end < total_items else None
prev_page = page - 1 if page > 1 else None
html = f"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Pagination - Page {page}</title>
</head>
<body>
<h1>Paginated Content - Page {page}</h1>
<ul class="items">
{''.join(items)}
</ul>
<nav class="pagination">
{f'<a href="/pagination?page={prev_page}">Previous</a>' if prev_page else ''}
<span>Page {page}</span>
{f'<a href="/pagination?page={next_page}">Next</a>' if next_page else ''}
</nav>
</body>
</html>
"""
self._send_html_response(html)
def _send_html_response(self, html: str, status: int = 200):
"""Send HTML response."""
self.send_response(status)
self.send_header("Content-type", "text/html; charset=utf-8")
self.end_headers()
self.wfile.write(html.encode("utf-8"))
def _send_json_response(self, data: dict, status: int = 200):
"""Send JSON response."""
self.send_response(status)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(data).encode("utf-8"))
def _send_xml_response(self, xml: str, status: int = 200):
"""Send XML response."""
self.send_response(status)
self.send_header("Content-type", "application/xml")
self.end_headers()
self.wfile.write(xml.encode("utf-8"))
def _send_csv_response(self, csv: str, status: int = 200):
"""Send CSV response."""
self.send_response(status)
self.send_header("Content-type", "text/csv")
self.end_headers()
self.wfile.write(csv.encode("utf-8"))
class MockHTTPServer:
"""Mock HTTP server for testing."""
def __init__(self, host: str = "localhost", port: int = 8888):
self.host = host
self.port = port
self.server: Optional[HTTPServer] = None
self.thread: Optional[Thread] = None
def start(self):
"""Start the mock server in a background thread."""
self.server = HTTPServer((self.host, self.port), MockHTTPRequestHandler)
self.thread = Thread(target=self.server.serve_forever, daemon=True)
self.thread.start()
time.sleep(0.1) # Give server time to start
def stop(self):
"""Stop the mock server."""
if self.server:
self.server.shutdown()
self.server.server_close()
if self.thread:
self.thread.join(timeout=1)
def get_url(self, path: str = "") -> str:
"""Get full URL for a given path."""
return f"http://{self.host}:{self.port}{path}"
def __enter__(self):
"""Context manager entry."""
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Context manager exit."""
self.stop()
| {
"repo_id": "ScrapeGraphAI/Scrapegraph-ai",
"file_path": "tests/fixtures/mock_server/server.py",
"license": "MIT License",
"lines": 309,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ScrapeGraphAI/Scrapegraph-ai:tests/integration/test_file_formats_integration.py | """
Integration tests for different file format scrapers.
Tests for:
- JSONScraperGraph
- XMLScraperGraph
- CSVScraperGraph
"""
import pytest
from scrapegraphai.graphs import (
CSVScraperGraph,
JSONScraperGraph,
XMLScraperGraph,
)
from tests.fixtures.helpers import assert_valid_scrape_result
@pytest.mark.integration
@pytest.mark.requires_api_key
class TestJSONScraperIntegration:
"""Integration tests for JSONScraperGraph."""
def test_scrape_json_file(self, openai_config, temp_json_file):
"""Test scraping a JSON file."""
scraper = JSONScraperGraph(
prompt="What is the company name and location?",
source=temp_json_file,
config=openai_config,
)
result = scraper.run()
assert_valid_scrape_result(result)
def test_scrape_json_url(self, openai_config, mock_server):
"""Test scraping JSON from a URL."""
url = mock_server.get_url("/api/data.json")
scraper = JSONScraperGraph(
prompt="List all employees and their roles",
source=url,
config=openai_config,
)
result = scraper.run()
assert_valid_scrape_result(result)
@pytest.mark.integration
@pytest.mark.requires_api_key
class TestXMLScraperIntegration:
"""Integration tests for XMLScraperGraph."""
def test_scrape_xml_file(self, openai_config, temp_xml_file):
"""Test scraping an XML file."""
scraper = XMLScraperGraph(
prompt="What employees are listed?",
source=temp_xml_file,
config=openai_config,
)
result = scraper.run()
assert_valid_scrape_result(result)
def test_scrape_xml_url(self, openai_config, mock_server):
"""Test scraping XML from a URL."""
url = mock_server.get_url("/api/data.xml")
scraper = XMLScraperGraph(
prompt="What is the company name?",
source=url,
config=openai_config,
)
result = scraper.run()
assert_valid_scrape_result(result)
@pytest.mark.integration
@pytest.mark.requires_api_key
class TestCSVScraperIntegration:
"""Integration tests for CSVScraperGraph."""
def test_scrape_csv_file(self, openai_config, temp_csv_file):
"""Test scraping a CSV file."""
scraper = CSVScraperGraph(
prompt="How many people work in Engineering?",
source=temp_csv_file,
config=openai_config,
)
result = scraper.run()
assert_valid_scrape_result(result)
def test_scrape_csv_url(self, openai_config, mock_server):
"""Test scraping CSV from a URL."""
url = mock_server.get_url("/api/data.csv")
scraper = CSVScraperGraph(
prompt="List all departments",
source=url,
config=openai_config,
)
result = scraper.run()
assert_valid_scrape_result(result)
@pytest.mark.integration
@pytest.mark.benchmark
class TestFileFormatPerformance:
"""Performance benchmarks for file format scrapers."""
@pytest.mark.requires_api_key
def test_json_scraping_performance(
self, openai_config, temp_json_file, benchmark_tracker
):
"""Benchmark JSON scraping performance."""
import time
start_time = time.perf_counter()
scraper = JSONScraperGraph(
prompt="Summarize the data",
source=temp_json_file,
config=openai_config,
)
result = scraper.run()
end_time = time.perf_counter()
execution_time = end_time - start_time
from tests.fixtures.benchmarking import BenchmarkResult
benchmark_result = BenchmarkResult(
test_name="json_scraper_performance",
execution_time=execution_time,
success=result is not None,
)
benchmark_tracker.record(benchmark_result)
assert_valid_scrape_result(result)
| {
"repo_id": "ScrapeGraphAI/Scrapegraph-ai",
"file_path": "tests/integration/test_file_formats_integration.py",
"license": "MIT License",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ScrapeGraphAI/Scrapegraph-ai:tests/integration/test_multi_graph_integration.py | """
Integration tests for multi-page scraping graphs.
Tests for:
- SmartScraperMultiGraph
- SearchGraph
- Other multi-page scrapers
"""
import pytest
from scrapegraphai.graphs import SmartScraperMultiGraph
from tests.fixtures.helpers import assert_valid_scrape_result
@pytest.mark.integration
@pytest.mark.requires_api_key
class TestMultiGraphIntegration:
"""Integration tests for multi-page scraping."""
def test_scrape_multiple_pages(self, openai_config, mock_server):
"""Test scraping multiple pages simultaneously."""
urls = [
mock_server.get_url("/projects"),
mock_server.get_url("/products"),
]
scraper = SmartScraperMultiGraph(
prompt="List all items from each page",
source=urls,
config=openai_config,
)
result = scraper.run()
assert_valid_scrape_result(result)
assert isinstance(result, (list, dict))
def test_concurrent_scraping_performance(
self, openai_config, mock_server, benchmark_tracker
):
"""Test performance of concurrent scraping."""
import time
urls = [
mock_server.get_url("/projects"),
mock_server.get_url("/products"),
mock_server.get_url("/"),
]
start_time = time.perf_counter()
scraper = SmartScraperMultiGraph(
prompt="Extract main content from each page",
source=urls,
config=openai_config,
)
result = scraper.run()
end_time = time.perf_counter()
execution_time = end_time - start_time
# Record benchmark
from tests.fixtures.benchmarking import BenchmarkResult
benchmark_result = BenchmarkResult(
test_name="multi_graph_concurrent",
execution_time=execution_time,
success=result is not None,
)
benchmark_tracker.record(benchmark_result)
assert_valid_scrape_result(result)
@pytest.mark.integration
@pytest.mark.slow
class TestSearchGraphIntegration:
"""Integration tests for SearchGraph."""
@pytest.mark.requires_api_key
@pytest.mark.skip(reason="Requires internet access and search API")
def test_search_and_scrape(self, openai_config):
"""Test searching and scraping results."""
from scrapegraphai.graphs import SearchGraph
scraper = SearchGraph(
prompt="What is ScrapeGraphAI?",
config=openai_config,
)
result = scraper.run()
assert_valid_scrape_result(result)
| {
"repo_id": "ScrapeGraphAI/Scrapegraph-ai",
"file_path": "tests/integration/test_multi_graph_integration.py",
"license": "MIT License",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ScrapeGraphAI/Scrapegraph-ai:tests/integration/test_smart_scraper_integration.py | """
Integration tests for SmartScraperGraph with multiple LLM providers.
These tests verify that SmartScraperGraph works correctly with:
- Different LLM providers (OpenAI, Ollama, etc.)
- Various content types
- Real and mock websites
"""
import pytest
from pydantic import BaseModel, Field
from scrapegraphai.graphs import SmartScraperGraph
from tests.fixtures.helpers import (
assert_execution_info_valid,
assert_valid_scrape_result,
)
class ProjectSchema(BaseModel):
"""Schema for project data."""
title: str = Field(description="Project title")
description: str = Field(description="Project description")
class ProjectListSchema(BaseModel):
"""Schema for list of projects."""
projects: list[ProjectSchema]
@pytest.mark.integration
@pytest.mark.requires_api_key
class TestSmartScraperIntegration:
"""Integration tests for SmartScraperGraph."""
def test_scrape_with_openai(self, openai_config, mock_server):
"""Test scraping with OpenAI using mock server."""
url = mock_server.get_url("/projects")
scraper = SmartScraperGraph(
prompt="List all projects with their descriptions",
source=url,
config=openai_config,
)
result = scraper.run()
assert_valid_scrape_result(result)
exec_info = scraper.get_execution_info()
assert_execution_info_valid(exec_info)
def test_scrape_with_schema(self, openai_config, mock_server):
"""Test scraping with a Pydantic schema."""
url = mock_server.get_url("/projects")
scraper = SmartScraperGraph(
prompt="List all projects with their descriptions",
source=url,
config=openai_config,
schema=ProjectListSchema,
)
result = scraper.run()
assert_valid_scrape_result(result)
assert isinstance(result, dict)
# Validate schema fields
if "projects" in result:
assert isinstance(result["projects"], list)
@pytest.mark.slow
def test_scrape_products_page(self, openai_config, mock_server):
"""Test scraping a products page."""
url = mock_server.get_url("/products")
scraper = SmartScraperGraph(
prompt="Extract all product names and prices",
source=url,
config=openai_config,
)
result = scraper.run()
assert_valid_scrape_result(result)
assert isinstance(result, dict)
def test_scrape_with_timeout(self, openai_config, mock_server):
"""Test scraping with a slow-loading page."""
url = mock_server.get_url("/slow")
config = openai_config.copy()
config["loader_kwargs"] = {"timeout": 5000} # 5 second timeout
scraper = SmartScraperGraph(
prompt="Extract the heading from the page",
source=url,
config=config,
)
# This should complete within timeout
result = scraper.run()
assert_valid_scrape_result(result)
def test_error_handling_404(self, openai_config, mock_server):
"""Test handling of 404 errors."""
url = mock_server.get_url("/error/404")
config = openai_config.copy()
scraper = SmartScraperGraph(
prompt="Extract content",
source=url,
config=config,
)
# Should handle error gracefully
try:
result = scraper.run()
# Depending on implementation, might return error or empty result
assert result is not None
except Exception as e:
# Error should be informative
assert "404" in str(e) or "not found" in str(e).lower()
@pytest.mark.integration
class TestMultiProviderIntegration:
"""Test SmartScraperGraph with multiple LLM providers."""
@pytest.mark.requires_api_key
def test_consistent_results_across_providers(
self, openai_config, mock_server
):
"""Test that different providers produce consistent results."""
url = mock_server.get_url("/projects")
prompt = "How many projects are listed?"
# Test with OpenAI
scraper_openai = SmartScraperGraph(
prompt=prompt,
source=url,
config=openai_config,
)
result_openai = scraper_openai.run()
assert_valid_scrape_result(result_openai)
# Note: Add more provider tests when API keys are available
# For now, we just verify OpenAI works
@pytest.mark.integration
@pytest.mark.slow
class TestRealWebsiteIntegration:
"""Integration tests with real websites (using test website)."""
@pytest.mark.requires_api_key
def test_scrape_test_website(self, openai_config, mock_website_url):
"""Test scraping the official test website."""
scraper = SmartScraperGraph(
prompt="List all the main sections of the website",
source=mock_website_url,
config=openai_config,
)
result = scraper.run()
assert_valid_scrape_result(result)
exec_info = scraper.get_execution_info()
assert_execution_info_valid(exec_info)
@pytest.mark.benchmark
class TestSmartScraperPerformance:
"""Performance benchmarks for SmartScraperGraph."""
@pytest.mark.requires_api_key
def test_scraping_performance(
self, openai_config, mock_server, benchmark_tracker
):
"""Benchmark scraping performance."""
import time
url = mock_server.get_url("/projects")
start_time = time.perf_counter()
scraper = SmartScraperGraph(
prompt="List all projects",
source=url,
config=openai_config,
)
result = scraper.run()
end_time = time.perf_counter()
execution_time = end_time - start_time
# Record benchmark result
from tests.fixtures.benchmarking import BenchmarkResult
benchmark_result = BenchmarkResult(
test_name="smart_scraper_basic",
execution_time=execution_time,
success=result is not None,
)
benchmark_tracker.record(benchmark_result)
# Assert reasonable performance
assert execution_time < 30.0, f"Execution took {execution_time}s, expected < 30s"
| {
"repo_id": "ScrapeGraphAI/Scrapegraph-ai",
"file_path": "tests/integration/test_smart_scraper_integration.py",
"license": "MIT License",
"lines": 159,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ScrapeGraphAI/Scrapegraph-ai:examples/smart_scraper_graph/nvidia/smart_scraper_nvidia.py | """
Basic example of scraping pipeline using SmartScraper with NVIDIA
"""
import json
import os
from dotenv import load_dotenv
from scrapegraphai.graphs import SmartScraperGraph
from scrapegraphai.utils import prettify_exec_info
load_dotenv()
# ************************************************
# Define the configuration for the graph
# ************************************************
graph_config = {
"llm": {
"api_key": os.getenv("NVIDIA_API_KEY"),
"model": "nvidia/meta/llama3-70b-instruct",
"model_provider": "nvidia",
},
"verbose": True,
"headless": False,
}
# ************************************************
# Create the SmartScraperGraph instance and run it
# ************************************************
smart_scraper_graph = SmartScraperGraph(
prompt="Extract me the first article",
source="https://www.wired.com",
config=graph_config,
)
result = smart_scraper_graph.run()
print(json.dumps(result, indent=4))
# ************************************************
# Get graph execution info
# ************************************************
graph_exec_info = smart_scraper_graph.get_execution_info()
print(prettify_exec_info(graph_exec_info))
| {
"repo_id": "ScrapeGraphAI/Scrapegraph-ai",
"file_path": "examples/smart_scraper_graph/nvidia/smart_scraper_nvidia.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ScrapeGraphAI/Scrapegraph-ai:tests/test_fetch_node_timeout.py | """
Unit tests for FetchNode timeout functionality.
These tests verify that:
1. The timeout configuration is properly read and stored
2. HTTP requests use the configured timeout
3. PDF parsing respects the timeout
4. Timeout is propagated to ChromiumLoader via loader_kwargs
"""
import sys
import time
import unittest
from unittest.mock import Mock, patch, MagicMock
from pathlib import Path
# Add the project root to path to import modules
sys.path.insert(0, str(Path(__file__).parent.parent))
class TestFetchNodeTimeout(unittest.TestCase):
"""Test suite for FetchNode timeout configuration and usage."""
def setUp(self):
"""Set up test fixtures."""
# Mock all the heavy external dependencies at import time
self.mock_modules = {}
for module in ['langchain_core', 'langchain_core.documents',
'langchain_community', 'langchain_community.document_loaders',
'langchain_openai', 'minify_html', 'pydantic',
'langchain', 'langchain.prompts']:
if module not in sys.modules:
sys.modules[module] = MagicMock()
# Create mock Document class
class MockDocument:
def __init__(self, page_content, metadata=None):
self.page_content = page_content
self.metadata = metadata or {}
sys.modules['langchain_core.documents'].Document = MockDocument
# Create mock PyPDFLoader
class MockPyPDFLoader:
def __init__(self, source):
self.source = source
def load(self):
time.sleep(0.1) # Simulate some work
return [MockDocument(page_content=f"PDF content from {self.source}")]
sys.modules['langchain_community.document_loaders'].PyPDFLoader = MockPyPDFLoader
# Now import FetchNode
from scrapegraphai.nodes.fetch_node import FetchNode
self.FetchNode = FetchNode
def tearDown(self):
"""Clean up after tests."""
# Remove mocked modules
for module in list(sys.modules.keys()):
if 'langchain' in module or module in ['minify_html', 'pydantic']:
if module in self.mock_modules or module.startswith('langchain'):
sys.modules.pop(module, None)
def test_timeout_default_value(self):
"""Test that default timeout is set to 30 seconds."""
node = self.FetchNode(
input="url",
output=["doc"],
node_config={}
)
self.assertEqual(node.timeout, 30)
def test_timeout_custom_value(self):
"""Test that custom timeout value is properly stored."""
node = self.FetchNode(
input="url",
output=["doc"],
node_config={"timeout": 10}
)
self.assertEqual(node.timeout, 10)
def test_timeout_none_value(self):
"""Test that timeout can be disabled by setting to None."""
node = self.FetchNode(
input="url",
output=["doc"],
node_config={"timeout": None}
)
self.assertIsNone(node.timeout)
def test_timeout_no_config(self):
"""Test that timeout defaults to 30 when no node_config provided."""
node = self.FetchNode(
input="url",
output=["doc"],
node_config=None
)
self.assertEqual(node.timeout, 30)
@patch('scrapegraphai.nodes.fetch_node.requests')
def test_requests_get_with_timeout(self, mock_requests):
"""Test that requests.get is called with timeout when use_soup=True."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = "<html><body>Test content</body></html>"
mock_requests.get.return_value = mock_response
node = self.FetchNode(
input="url",
output=["doc"],
node_config={"use_soup": True, "timeout": 15}
)
# Execute with a URL
state = {"url": "https://example.com"}
node.execute(state)
# Verify requests.get was called with timeout
mock_requests.get.assert_called_once()
call_args = mock_requests.get.call_args
self.assertEqual(call_args[1].get('timeout'), 15)
@patch('scrapegraphai.nodes.fetch_node.requests')
def test_requests_get_without_timeout_when_none(self, mock_requests):
"""Test that requests.get is called without timeout argument when timeout=None."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = "<html><body>Test content</body></html>"
mock_requests.get.return_value = mock_response
node = self.FetchNode(
input="url",
output=["doc"],
node_config={"use_soup": True, "timeout": None}
)
# Execute with a URL
state = {"url": "https://example.com"}
node.execute(state)
# Verify requests.get was called without timeout
mock_requests.get.assert_called_once()
call_args = mock_requests.get.call_args
self.assertNotIn('timeout', call_args[1])
def test_pdf_parsing_with_timeout(self):
"""Test that PDF parsing completes within timeout."""
node = self.FetchNode(
input="pdf",
output=["doc"],
node_config={"timeout": 5}
)
# Execute with a PDF file
state = {"pdf": "test.pdf"}
result = node.execute(state)
# Should complete successfully
self.assertIn("doc", result)
self.assertIsNotNone(result["doc"])
def test_pdf_parsing_timeout_exceeded(self):
"""Test that PDF parsing raises TimeoutError when timeout is exceeded."""
# Create a mock loader that takes longer than timeout
class SlowPyPDFLoader:
def __init__(self, source):
self.source = source
def load(self):
time.sleep(2) # Sleep longer than timeout
return []
with patch('scrapegraphai.nodes.fetch_node.PyPDFLoader', SlowPyPDFLoader):
node = self.FetchNode(
input="pdf",
output=["doc"],
node_config={"timeout": 0.5} # Very short timeout
)
# Execute should raise TimeoutError
state = {"pdf": "slow.pdf"}
with self.assertRaises(TimeoutError) as context:
node.execute(state)
self.assertIn("PDF parsing exceeded timeout", str(context.exception))
@patch('scrapegraphai.nodes.fetch_node.ChromiumLoader')
def test_timeout_propagated_to_chromium_loader(self, mock_loader_class):
"""Test that timeout is propagated to ChromiumLoader via loader_kwargs."""
mock_loader = Mock()
mock_doc = Mock()
mock_doc.page_content = "<html>Test</html>"
mock_loader.load.return_value = [mock_doc]
mock_loader_class.return_value = mock_loader
node = self.FetchNode(
input="url",
output=["doc"],
node_config={"timeout": 20, "headless": True}
)
# Execute with a URL (not using soup, so ChromiumLoader is used)
state = {"url": "https://example.com"}
node.execute(state)
# Verify ChromiumLoader was instantiated with timeout in kwargs
mock_loader_class.assert_called_once()
call_kwargs = mock_loader_class.call_args[1]
self.assertEqual(call_kwargs.get('timeout'), 20)
@patch('scrapegraphai.nodes.fetch_node.ChromiumLoader')
def test_timeout_not_overridden_in_loader_kwargs(self, mock_loader_class):
"""Test that existing timeout in loader_kwargs is not overridden."""
mock_loader = Mock()
mock_doc = Mock()
mock_doc.page_content = "<html>Test</html>"
mock_loader.load.return_value = [mock_doc]
mock_loader_class.return_value = mock_loader
node = self.FetchNode(
input="url",
output=["doc"],
node_config={
"timeout": 20,
"loader_kwargs": {"timeout": 50} # Explicit loader timeout
}
)
# Execute with a URL
state = {"url": "https://example.com"}
node.execute(state)
# Verify ChromiumLoader got the loader_kwargs timeout, not node timeout
mock_loader_class.assert_called_once()
call_kwargs = mock_loader_class.call_args[1]
self.assertEqual(call_kwargs.get('timeout'), 50)
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "ScrapeGraphAI/Scrapegraph-ai",
"file_path": "tests/test_fetch_node_timeout.py",
"license": "MIT License",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ScrapeGraphAI/Scrapegraph-ai:examples/markdownify/markdownify_scrapegraphai.py | """
Example script demonstrating the markdownify functionality
"""
import os
from dotenv import load_dotenv
from scrapegraph_py import Client
from scrapegraph_py.logger import sgai_logger
def main():
# Load environment variables
load_dotenv()
# Set up logging
sgai_logger.set_logging(level="INFO")
# Initialize the client
api_key = os.getenv("SCRAPEGRAPH_API_KEY")
if not api_key:
raise ValueError("SCRAPEGRAPH_API_KEY environment variable not found")
sgai_client = Client(api_key=api_key)
# Example 1: Convert a website to Markdown
print("Example 1: Converting website to Markdown")
print("-" * 50)
response = sgai_client.markdownify(
website_url="https://example.com"
)
print("Markdown output:")
print(response["result"]) # Access the result key from the dictionary
print("\nMetadata:")
print(response.get("metadata", {})) # Use get() with default value
print("\n" + "=" * 50 + "\n")
if __name__ == "__main__":
main()
| {
"repo_id": "ScrapeGraphAI/Scrapegraph-ai",
"file_path": "examples/markdownify/markdownify_scrapegraphai.py",
"license": "MIT License",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ScrapeGraphAI/Scrapegraph-ai:examples/search_graph/scrapegraphai/searchscraper_scrapegraphai.py | """
Example implementation of search-based scraping using Scrapegraph AI.
This example demonstrates how to use the searchscraper to extract information from the web.
"""
import os
from typing import Dict, Any
from dotenv import load_dotenv
from scrapegraph_py import Client
from scrapegraph_py.logger import sgai_logger
def format_response(response: Dict[str, Any]) -> None:
"""
Format and print the search response in a readable way.
Args:
response (Dict[str, Any]): The response from the search API
"""
print("\n" + "="*50)
print("SEARCH RESULTS")
print("="*50)
# Print request ID
print(f"\nRequest ID: {response['request_id']}")
# Print number of sources
urls = response.get('reference_urls', [])
print(f"\nSources Processed: {len(urls)}")
# Print the extracted information
print("\nExtracted Information:")
print("-"*30)
if isinstance(response['result'], dict):
for key, value in response['result'].items():
print(f"\n{key.upper()}:")
if isinstance(value, list):
for item in value:
print(f" • {item}")
else:
print(f" {value}")
else:
print(response['result'])
# Print source URLs
if urls:
print("\nSources:")
print("-"*30)
for i, url in enumerate(urls, 1):
print(f"{i}. {url}")
print("\n" + "="*50)
def main():
# Load environment variables
load_dotenv()
# Get API key
api_key = os.getenv("SCRAPEGRAPH_API_KEY")
if not api_key:
raise ValueError("SCRAPEGRAPH_API_KEY not found in environment variables")
# Configure logging
sgai_logger.set_logging(level="INFO")
# Initialize client
sgai_client = Client(api_key=api_key)
try:
# Basic search scraper example
print("\nSearching for information...")
search_response = sgai_client.searchscraper(
user_prompt="Extract webpage information"
)
format_response(search_response)
except Exception as e:
print(f"\nError occurred: {str(e)}")
finally:
# Always close the client
sgai_client.close()
if __name__ == "__main__":
main()
| {
"repo_id": "ScrapeGraphAI/Scrapegraph-ai",
"file_path": "examples/search_graph/scrapegraphai/searchscraper_scrapegraphai.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ScrapeGraphAI/Scrapegraph-ai:examples/smart_scraper_graph/scrapegraphai/smartscraper_scrapegraphai.py | """
Example implementation using scrapegraph-py client directly.
"""
import os
from dotenv import load_dotenv
from scrapegraph_py import Client
from scrapegraph_py.logger import sgai_logger
def main():
# Load environment variables from .env file
load_dotenv()
# Get API key from environment variables
api_key = os.getenv("SCRAPEGRAPH_API_KEY")
if not api_key:
raise ValueError("SCRAPEGRAPH_API_KEY non trovato nelle variabili d'ambiente")
# Set up logging
sgai_logger.set_logging(level="INFO")
# Initialize the client with API key from environment
sgai_client = Client(api_key=api_key)
try:
# SmartScraper request
response = sgai_client.smartscraper(
website_url="https://scrapegraphai.com",
user_prompt="Extract the founders' informations"
)
# Print the response
print(f"Request ID: {response['request_id']}")
print(f"Result: {response['result']}")
if response.get('reference_urls'):
print(f"Reference URLs: {response['reference_urls']}")
except Exception as e:
print(f"Error occurred: {str(e)}")
finally:
# Always close the client
sgai_client.close()
if __name__ == "__main__":
main()
| {
"repo_id": "ScrapeGraphAI/Scrapegraph-ai",
"file_path": "examples/smart_scraper_graph/scrapegraphai/smartscraper_scrapegraphai.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
ScrapeGraphAI/Scrapegraph-ai:scrapegraphai/graphs/markdownify_graph.py | """
markdownify_graph module
"""
from typing import Dict, List, Optional, Tuple
from ..nodes import (
FetchNode,
MarkdownifyNode,
)
from .base_graph import BaseGraph
class MarkdownifyGraph(BaseGraph):
"""
A graph that converts HTML content to Markdown format.
This graph takes a URL or HTML content as input and converts it to clean, readable Markdown.
It uses a two-step process:
1. Fetch the content (if URL is provided)
2. Convert the content to Markdown format
Args:
llm_model: The language model to use for processing
embedder_model: The embedding model to use (optional)
node_config: Additional configuration for the nodes (optional)
Example:
>>> graph = MarkdownifyGraph(
... llm_model=your_llm_model,
... embedder_model=your_embedder_model
... )
>>> result, _ = graph.execute({"url": "https://example.com"})
>>> print(result["markdown"])
"""
def __init__(
self,
llm_model,
embedder_model=None,
node_config: Optional[Dict] = None,
):
# Initialize nodes
fetch_node = FetchNode(
input="url | html",
output=["html_content"],
node_config=node_config,
)
markdownify_node = MarkdownifyNode(
input="html_content",
output=["markdown"],
node_config=node_config,
)
# Define graph structure
nodes = [fetch_node, markdownify_node]
edges = [(fetch_node, markdownify_node)]
super().__init__(
nodes=nodes,
edges=edges,
entry_point=fetch_node,
graph_name="Markdownify",
)
def execute(self, initial_state: Dict) -> Tuple[Dict, List[Dict]]:
"""
Execute the markdownify graph.
Args:
initial_state: A dictionary containing either:
- "url": The URL to fetch and convert to markdown
- "html": The HTML content to convert to markdown
Returns:
Tuple containing:
- Dictionary with the markdown result in the "markdown" key
- List of execution logs
"""
return super().execute(initial_state)
| {
"repo_id": "ScrapeGraphAI/Scrapegraph-ai",
"file_path": "scrapegraphai/graphs/markdownify_graph.py",
"license": "MIT License",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ScrapeGraphAI/Scrapegraph-ai:scrapegraphai/nodes/markdownify_node.py | """
MarkdownifyNode Module
"""
from typing import List, Optional
from ..utils.convert_to_md import convert_to_md
from .base_node import BaseNode
class MarkdownifyNode(BaseNode):
"""
A node responsible for converting HTML content to Markdown format.
This node takes HTML content from the state and converts it to clean, readable Markdown.
It uses the convert_to_md utility function to perform the conversion.
Attributes:
verbose (bool): A flag indicating whether to show print statements during execution.
Args:
input (str): Boolean expression defining the input keys needed from the state.
output (List[str]): List of output keys to be updated in the state.
node_config (Optional[dict]): Additional configuration for the node.
node_name (str): The unique identifier name for the node, defaulting to "Markdownify".
"""
def __init__(
self,
input: str,
output: List[str],
node_config: Optional[dict] = None,
node_name: str = "Markdownify",
):
super().__init__(node_name, "node", input, output, 1, node_config)
self.verbose = (
False if node_config is None else node_config.get("verbose", False)
)
def execute(self, state: dict) -> dict:
"""
Executes the node's logic to convert HTML content to Markdown.
Args:
state (dict): The current state of the graph. The input keys will be used to fetch the
HTML content from the state.
Returns:
dict: The updated state with the output key containing the Markdown content.
Raises:
KeyError: If the input keys are not found in the state, indicating that the
necessary HTML content is missing.
"""
self.logger.info(f"--- Executing {self.node_name} Node ---")
input_keys = self.get_input_keys(state)
html_content = state[input_keys[0]]
# Convert HTML to Markdown
markdown_content = convert_to_md(html_content)
# Update state with markdown content
state.update({self.output[0]: markdown_content})
return state
| {
"repo_id": "ScrapeGraphAI/Scrapegraph-ai",
"file_path": "scrapegraphai/nodes/markdownify_node.py",
"license": "MIT License",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ScrapeGraphAI/Scrapegraph-ai:scrapegraphai/models/xai.py | """
xAI Grok Module
"""
from langchain_openai import ChatOpenAI
class XAI(ChatOpenAI):
"""
A wrapper for the ChatOpenAI class (xAI uses an OpenAI-compatible API) that
provides default configuration and could be extended with additional methods
if needed.
Args:
llm_config (dict): Configuration parameters for the language model.
"""
def __init__(self, **llm_config):
if "api_key" in llm_config:
llm_config["openai_api_key"] = llm_config.pop("api_key")
llm_config["openai_api_base"] = "https://api.x.ai/v1"
super().__init__(**llm_config)
| {
"repo_id": "ScrapeGraphAI/Scrapegraph-ai",
"file_path": "scrapegraphai/models/xai.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Shubhamsaboo/awesome-llm-apps:rag_tutorials/rag_failure_diagnostics_clinic/rag_failure_diagnostics_clinic.py | """
RAG Failure Diagnostics Clinic
Framework-agnostic example for awesome-llm-apps.
Diagnose LLM + RAG bugs into reusable failure patterns (P01–P12).
"""
import json
import os
import textwrap
from getpass import getpass
from openai import OpenAI
PATTERNS = [
{
"id": "P01",
"name": "Retrieval hallucination / grounding drift",
"summary": "Answer confidently contradicts or ignores retrieved documents.",
},
{
"id": "P02",
"name": "Chunk boundary or segmentation bug",
"summary": "Relevant facts are split, truncated, or mis-grouped across chunks.",
},
{
"id": "P03",
"name": "Embedding mismatch / semantic vs vector distance",
"summary": "Vector similarity does not match true semantic relevance.",
},
{
"id": "P04",
"name": "Index skew or staleness",
"summary": "Index returns old or missing data relative to the source of truth.",
},
{
"id": "P05",
"name": "Query rewriting or router misalignment",
"summary": "Router or rewriter sends queries to the wrong tool or dataset.",
},
{
"id": "P06",
"name": "Long-chain reasoning drift",
"summary": "Multi-step tasks gradually forget earlier constraints or goals.",
},
{
"id": "P07",
"name": "Tool-call misuse or ungrounded tools",
"summary": "Tools are called with wrong arguments or without proper grounding.",
},
{
"id": "P08",
"name": "Session memory leak / missing context",
"summary": "Conversation loses important facts between turns or sessions.",
},
{
"id": "P09",
"name": "Evaluation blind spots",
"summary": "System passes tests but fails on real incidents or edge cases.",
},
{
"id": "P10",
"name": "Startup ordering / dependency not ready",
"summary": "Services crash or return 5xx during the first minutes after deploy.",
},
{
"id": "P11",
"name": "Config or secrets drift across environments",
"summary": "Works locally but breaks in staging or production because of settings.",
},
{
"id": "P12",
"name": "Multi-tenant or multi-agent interference",
"summary": "Requests or agents overwrite each other’s state or resources.",
},
]
EXAMPLE_1 = """=== Example 1 — retrieval hallucination (P01 style) ===
Context:
You have a simple RAG chatbot that answers questions from a product FAQ.
The FAQ only covers billing rules for your SaaS product and does NOT mention anything about cryptocurrency.
User prompt:
"Can I pay my subscription with Bitcoin?"
Retrieved context (from vector store):
- "We only accept major credit cards and PayPal."
- "All payments are processed in USD."
Model answer:
"Yes, you can pay with Bitcoin. We support several cryptocurrencies through a third-party payment gateway."
Logs:
No errors. Retrieval shows the FAQ chunks above, but the model still confidently invents Bitcoin support.
"""
EXAMPLE_2 = """=== Example 2 — startup ordering / dependency not ready (P10 style) ===
Context:
You have a RAG API with three services: api-gateway, rag-worker, and vector-db (for example Qdrant or FAISS).
In local docker compose everything works.
Deployment:
In production, services are deployed on Kubernetes.
Symptom:
Right after a fresh deploy, api-gateway returns 500 errors for the first few minutes.
Logs show connection timeouts from api-gateway to vector-db.
After a few minutes, the errors disappear and the system behaves normally.
You suspect a startup race between api-gateway and vector-db but are not sure how to fix it properly.
"""
EXAMPLE_3 = """=== Example 3 — config or secrets drift (P11 style) ===
Context:
You added a new environment variable for the RAG pipeline: SECRET_RAG_KEY.
This is required by middleware that signs outgoing requests to an internal search API.
Local:
On developer machines, SECRET_RAG_KEY is defined in .env and everything works.
Production:
You deployed a new version but forgot to add SECRET_RAG_KEY to the production environment.
The first requests after deploy fail with 500 errors and "missing secret" messages in the logs.
After hot-patching the secret into production, the errors stop.
However, similar "first deploy breaks because of missing config" incidents keep happening.
"""
def build_system_prompt() -> str:
"""Build the system prompt that explains the patterns and the task."""
header = """
You are an assistant that triages failures in LLM + RAG pipelines.
You have a library of reusable failure patterns P01–P12.
For each bug description, you must:
1. Choose exactly ONE primary pattern id from P01–P12.
2. Optionally choose up to TWO secondary candidate pattern ids.
3. Explain your reasoning in clear bullet points.
4. Propose a MINIMAL structural fix:
- changes to retrieval, indexing, routing, evaluation, tooling, or infra
- avoid generic advice like "add more context" or "use a better model"
You are not allowed to invent new pattern ids.
Always select from the patterns listed below.
Return your answer as structured Markdown with the following sections:
- Primary pattern
- Secondary candidates (optional)
- Reasoning
- Minimal structural fix
"""
pattern_lines = []
for p in PATTERNS:
line = f"{p['id']}: {p['name']} — {p['summary']}"
pattern_lines.append(line)
patterns_block = "\n".join(pattern_lines)
return textwrap.dedent(header).strip() + "\n\nFailure patterns:\n" + patterns_block
def make_client_and_model():
"""Create an OpenAI-compatible client and read model settings."""
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
api_key = getpass("Enter your OpenAI-compatible API key: ").strip()
base_url = os.getenv("OPENAI_BASE_URL", "").strip() or "https://api.openai.com/v1"
model_name = os.getenv("OPENAI_MODEL", "").strip() or "gpt-4o"
client = OpenAI(api_key=api_key, base_url=base_url)
print(f"\nUsing base URL: {base_url}")
print(f"Using model: {model_name}\n")
return client, model_name
def choose_bug_description() -> str:
"""Let the user choose one of the examples or paste their own bug."""
print("Choose an example or paste your own bug description:\n")
print(" [1] Example 1 — retrieval hallucination (P01 style)")
print(" [2] Example 2 — startup ordering / dependency not ready (P10 style)")
print(" [3] Example 3 — config or secrets drift (P11 style)")
print(" [p] Paste my own RAG / LLM bug\n")
choice = input("Your choice: ").strip().lower()
print()
if choice == "1":
bug = EXAMPLE_1
print("You selected Example 1. Full bug description:\n")
print(bug)
print()
return bug
if choice == "2":
bug = EXAMPLE_2
print("You selected Example 2. Full bug description:\n")
print(bug)
print()
return bug
if choice == "3":
bug = EXAMPLE_3
print("You selected Example 3. Full bug description:\n")
print(bug)
print()
return bug
print("Paste your bug description. End with an empty line.")
lines = []
while True:
try:
line = input()
except EOFError:
break
if not line.strip():
break
lines.append(line)
user_bug = "\n".join(lines).strip()
if not user_bug:
print("No bug description detected, aborting this round.\n")
return ""
print("\nYou pasted the following bug description:\n")
print(user_bug)
print()
return user_bug
def run_once(client: OpenAI, model_name: str, system_prompt: str) -> None:
"""Run one diagnosis round."""
bug = choose_bug_description()
if not bug:
return
print("Running diagnosis ...\n")
try:
completion = client.chat.completions.create(
model=model_name,
temperature=0.2,
messages=[
{"role": "system", "content": system_prompt},
{
"role": "user",
"content": (
"Here is the bug description. "
"Follow the pattern rules described above.\n\n"
+ bug
),
},
],
)
except Exception as exc:
print(f"Error while calling the model: {exc}")
return
reply = completion.choices[0].message.content or ""
print(reply)
report = {
"bug_description": bug,
"model": model_name,
"assistant_markdown": reply,
}
try:
with open("rag_failure_report.json", "w", encoding="utf-8") as f:
json.dump(report, f, indent=2)
print("\nSaved report to rag_failure_report.json\n")
except OSError as exc:
print(f"\nCould not write report file: {exc}\n")
def main():
system_prompt = build_system_prompt()
client, model_name = make_client_and_model()
while True:
run_once(client, model_name, system_prompt)
again = input("Debug another bug? (y/n): ").strip().lower()
if again != "y":
print("Session finished. Goodbye.")
break
print()
if __name__ == "__main__":
main()
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "rag_tutorials/rag_failure_diagnostics_clinic/rag_failure_diagnostics_clinic.py",
"license": "Apache License 2.0",
"lines": 242,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/devpulse_ai/adapters/arxiv.py | """
ArXiv Adapter - Fetches recent AI/ML research papers.
This is a simplified, stateless adapter for the DevPulseAI reference implementation.
ArXiv API is public and requires no authentication.
"""
import httpx
import xml.etree.ElementTree as ET
from typing import List, Dict, Any
def fetch_arxiv_papers(limit: int = 5) -> List[Dict[str, Any]]:
"""
Fetch recent AI/ML papers from ArXiv.
Args:
limit: Maximum number of papers to return.
Returns:
List of signal dictionaries with standardized schema.
"""
base_url = "https://export.arxiv.org/api/query"
params = {
"search_query": "cat:cs.AI OR cat:cs.LG",
"start": 0,
"max_results": limit,
"sortBy": "submittedDate",
"sortOrder": "descending"
}
signals = []
try:
response = httpx.get(base_url, params=params, timeout=15.0)
response.raise_for_status()
# Parse Atom XML response
root = ET.fromstring(response.content)
ns = {"atom": "http://www.w3.org/2005/Atom"}
for entry in root.findall("atom:entry", ns):
title_elem = entry.find("atom:title", ns)
summary_elem = entry.find("atom:summary", ns)
id_elem = entry.find("atom:id", ns)
published_elem = entry.find("atom:published", ns)
title = title_elem.text.strip() if title_elem is not None else "Untitled"
summary = summary_elem.text.strip() if summary_elem is not None else ""
arxiv_id = id_elem.text.strip() if id_elem is not None else ""
published = published_elem.text if published_elem is not None else ""
# Get PDF link
pdf_link = arxiv_id
link_elem = entry.find("atom:link[@title='pdf']", ns)
if link_elem is not None:
pdf_link = link_elem.attrib.get("href", arxiv_id)
signal = {
"id": arxiv_id,
"source": "arxiv",
"title": title,
"description": summary[:500] + "..." if len(summary) > 500 else summary,
"url": arxiv_id,
"metadata": {
"pdf": pdf_link,
"published": published
}
}
signals.append(signal)
except httpx.HTTPError as e:
print(f"[ArXiv Adapter] HTTP error: {e}")
except ET.ParseError as e:
print(f"[ArXiv Adapter] XML parse error: {e}")
except Exception as e:
print(f"[ArXiv Adapter] Error: {e}")
return signals
if __name__ == "__main__":
# Quick test
results = fetch_arxiv_papers(limit=3)
for r in results:
print(f"- {r['title'][:60]}...")
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/devpulse_ai/adapters/arxiv.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/devpulse_ai/adapters/github.py | """
GitHub Adapter - Fetches trending repositories from GitHub.
This is a simplified, stateless adapter for the DevPulseAI reference implementation.
No authentication required for basic public API access.
"""
import httpx
from datetime import datetime, timedelta
from typing import List, Dict, Any
def fetch_github_trending(limit: int = 5) -> List[Dict[str, Any]]:
"""
Fetch trending GitHub repositories created in the last 24 hours.
Args:
limit: Maximum number of repositories to return.
Returns:
List of signal dictionaries with standardized schema.
"""
base_url = "https://api.github.com/search/repositories"
date_query = (datetime.utcnow() - timedelta(days=1)).strftime("%Y-%m-%d")
params = {
"q": f"created:>{date_query} sort:stars",
"per_page": limit
}
signals = []
try:
response = httpx.get(base_url, params=params, timeout=10.0)
response.raise_for_status()
data = response.json()
for item in data.get("items", []):
signal = {
"id": str(item["id"]),
"source": "github",
"title": item["full_name"],
"description": item.get("description") or "No description",
"url": item["html_url"],
"metadata": {
"stars": item["stargazers_count"],
"language": item.get("language"),
"topics": item.get("topics", [])
}
}
signals.append(signal)
except httpx.HTTPError as e:
print(f"[GitHub Adapter] HTTP error: {e}")
except Exception as e:
print(f"[GitHub Adapter] Error: {e}")
return signals
if __name__ == "__main__":
# Quick test
results = fetch_github_trending(limit=3)
for r in results:
print(f"- {r['title']}: {r['metadata']['stars']} stars")
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/devpulse_ai/adapters/github.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/devpulse_ai/adapters/hackernews.py | """
HackerNews Adapter - Fetches top AI/ML stories from HackerNews.
This is a simplified, stateless adapter for the DevPulseAI reference implementation.
Uses the Algolia HN API for better search capabilities.
"""
import httpx
from typing import List, Dict, Any
def fetch_hackernews_stories(limit: int = 5) -> List[Dict[str, Any]]:
"""
Fetch recent AI/ML related stories from HackerNews.
Args:
limit: Maximum number of stories to return.
Returns:
List of signal dictionaries with standardized schema.
"""
base_url = "https://hn.algolia.com/api/v1/search_by_date"
params = {
"query": "AI OR LLM OR Machine Learning OR GPT",
"tags": "story",
"hitsPerPage": limit,
"numericFilters": "points>5"
}
signals = []
try:
response = httpx.get(base_url, params=params, timeout=10.0)
response.raise_for_status()
data = response.json()
for hit in data.get("hits", []):
# Skip stories without URLs (Ask HN, etc.)
if not hit.get("url") and not hit.get("story_text"):
continue
external_id = str(hit.get("objectID", ""))
hn_url = f"https://news.ycombinator.com/item?id={external_id}"
signal = {
"id": external_id,
"source": "hackernews",
"title": hit.get("title", "Untitled"),
"description": hit.get("story_text", "")[:300] if hit.get("story_text") else "",
"url": hit.get("url") or hn_url,
"metadata": {
"points": hit.get("points", 0),
"comments": hit.get("num_comments", 0),
"author": hit.get("author", "unknown"),
"hn_url": hn_url
}
}
signals.append(signal)
except httpx.HTTPError as e:
print(f"[HackerNews Adapter] HTTP error: {e}")
except Exception as e:
print(f"[HackerNews Adapter] Error: {e}")
return signals
if __name__ == "__main__":
# Quick test
results = fetch_hackernews_stories(limit=3)
for r in results:
print(f"- {r['title']}: {r['metadata']['points']} points")
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/devpulse_ai/adapters/hackernews.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/devpulse_ai/adapters/huggingface.py | """
HuggingFace Adapter - Fetches trending models from HuggingFace Hub.
This is a simplified, stateless adapter for the DevPulseAI reference implementation.
Uses the public HuggingFace API (no authentication required for basic access).
"""
import httpx
from typing import List, Dict, Any
def fetch_huggingface_models(limit: int = 5) -> List[Dict[str, Any]]:
"""
Fetch trending/popular models from HuggingFace Hub.
Args:
limit: Maximum number of models to return.
Returns:
List of signal dictionaries with standardized schema.
"""
base_url = "https://huggingface.co/api/models"
params = {
"sort": "likes",
"direction": "-1",
"limit": limit
}
signals = []
try:
response = httpx.get(base_url, params=params, timeout=10.0)
response.raise_for_status()
data = response.json()
for item in data:
model_id = item.get("modelId", item.get("id", "unknown"))
# Build description from model metadata
tags = item.get("tags", [])
pipeline = item.get("pipeline_tag", "")
description_parts = []
if pipeline:
description_parts.append(f"Pipeline: {pipeline}")
if tags:
description_parts.append(f"Tags: {', '.join(tags[:5])}")
description_parts.append(f"Downloads: {item.get('downloads', 0):,}")
description_parts.append(f"Likes: {item.get('likes', 0):,}")
signal = {
"id": model_id,
"source": "huggingface",
"title": f"HF Model: {model_id}",
"description": " | ".join(description_parts),
"url": f"https://huggingface.co/{model_id}",
"metadata": {
"downloads": item.get("downloads", 0),
"likes": item.get("likes", 0),
"pipeline_tag": pipeline,
"tags": tags[:10],
"author": item.get("author", "")
}
}
signals.append(signal)
except httpx.HTTPError as e:
print(f"[HuggingFace Adapter] HTTP error: {e}")
except Exception as e:
print(f"[HuggingFace Adapter] Error: {e}")
return signals
if __name__ == "__main__":
# Quick test
results = fetch_huggingface_models(limit=3)
for r in results:
print(f"- {r['title']}: {r['metadata']['likes']} likes")
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/devpulse_ai/adapters/huggingface.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/devpulse_ai/adapters/medium.py | """
Medium Adapter - Fetches tech blogs from Medium and other RSS feeds.
This is a simplified, stateless adapter for the DevPulseAI reference implementation.
Uses feedparser to fetch from RSS/Atom feeds.
"""
import feedparser
from typing import List, Dict, Any
# Tech blog feeds to monitor
FEEDS = [
"https://medium.com/feed/tag/artificial-intelligence",
"https://medium.com/feed/tag/machine-learning",
"https://medium.com/feed/@netflixtechblog",
"https://engineering.fb.com/feed/",
]
def fetch_medium_blogs(limit: int = 5) -> List[Dict[str, Any]]:
"""
Fetch recent tech blogs from Medium and engineering blogs.
Args:
limit: Maximum number of entries per feed.
Returns:
List of signal dictionaries with standardized schema.
"""
signals = []
for feed_url in FEEDS:
try:
feed = feedparser.parse(feed_url)
for entry in feed.entries[:limit]:
# Get summary or description
summary = getattr(entry, "summary", "") or getattr(entry, "description", "")
# Clean HTML tags from summary (simple approach)
if summary:
import re
summary = re.sub(r'<[^>]+>', '', summary)[:500]
signal = {
"id": entry.get("id", entry.link),
"source": "medium",
"title": entry.title,
"description": summary,
"url": entry.link,
"metadata": {
"published": getattr(entry, "published", ""),
"author": getattr(entry, "author", "Unknown"),
"feed": feed_url
}
}
signals.append(signal)
except Exception as e:
print(f"[Medium Adapter] Error fetching {feed_url}: {e}")
return signals
if __name__ == "__main__":
# Quick test
results = fetch_medium_blogs(limit=2)
for r in results:
print(f"- {r['title'][:60]}...")
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/devpulse_ai/adapters/medium.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.