sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
OpenBMB/ChatDev:server/services/attachment_service.py | """Attachment helpers shared by HTTP routes and executors."""
import logging
import mimetypes
import os
import shutil
import tempfile
from pathlib import Path
from typing import Any, Dict, List, Optional
from fastapi import UploadFile
from entity.messages import MessageBlock, MessageBlockType
from utils.attachments import AttachmentStore, AttachmentRecord
class AttachmentService:
"""Handles attachment lifecycle per session."""
def __init__(self, *, root: Path | str = Path("WareHouse")) -> None:
self.logger = logging.getLogger(__name__)
self.attachments_root = Path(root)
self.attachments_root.mkdir(parents=True, exist_ok=True)
env_flag = os.environ.get("MAC_AUTO_CLEAN_ATTACHMENTS", "0").strip().lower()
self.clean_on_cleanup = env_flag in {"1", "true", "yes"}
def prepare_session_workspace(self, session_id: str) -> Path:
return self._session_attachments_path(session_id, create=True)
def cleanup_session(self, session_id: str) -> None:
attachment_dir = self._session_attachments_path(session_id, create=False)
if not attachment_dir:
return
if self.clean_on_cleanup:
shutil.rmtree(attachment_dir, ignore_errors=True)
self.logger.info("Cleaned attachment directory for session %s", session_id)
else:
self.logger.info(
"Attachment cleanup disabled; preserved files for session %s", session_id
)
def get_attachment_store(self, session_id: str) -> AttachmentStore:
path = self.prepare_session_workspace(session_id)
return AttachmentStore(path)
async def save_upload_file(self, session_id: str, upload: UploadFile) -> AttachmentRecord:
filename = upload.filename or "upload.bin"
temp_dir = Path(tempfile.mkdtemp(prefix="mac_upload_"))
temp_path = temp_dir / filename
try:
with temp_path.open("wb") as buffer:
while True:
chunk = await upload.read(1024 * 1024)
if not chunk:
break
buffer.write(chunk)
store = self.get_attachment_store(session_id)
mime_type = upload.content_type or mimetypes.guess_type(filename)[0]
record = store.register_file(
temp_path,
kind=MessageBlockType.from_mime_type(mime_type),
display_name=filename,
mime_type=mime_type,
extra={
"source": "user_upload",
"origin": "web_upload",
"session_id": session_id,
},
)
return record
finally:
if temp_path.exists():
try:
temp_path.unlink()
except OSError:
pass
try:
temp_dir.rmdir()
except OSError:
pass
def build_attachment_blocks(
self,
session_id: str,
attachment_ids: List[str],
*,
target_store: Optional[AttachmentStore] = None,
) -> List[MessageBlock]:
if not attachment_ids:
return []
source_store = self.get_attachment_store(session_id)
source_root = source_store.root.resolve()
target_root = target_store.root.resolve() if target_store else None
blocks: List[MessageBlock] = []
for attachment_id in attachment_ids:
record = source_store.get(attachment_id)
if not record:
continue
if target_store:
copy_required = target_root != source_root
record = target_store.ingest_record(record, copy_file=copy_required)
blocks.append(record.as_message_block())
return blocks
def list_attachment_manifests(self, session_id: str) -> Dict[str, Any]:
store = self.get_attachment_store(session_id)
return store.export_manifest()
def _session_attachments_path(self, session_id: str, *, create: bool = True) -> Optional[Path]:
session_dir_name = session_id if session_id.startswith("session_") else f"session_{session_id}"
path = self.attachments_root / session_dir_name / "code_workspace" / "attachments"
if create:
path.mkdir(parents=True, exist_ok=True)
return path
return path if path.exists() else None
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "server/services/attachment_service.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:server/services/message_handler.py | import logging
from typing import Any, Dict
from utils.exceptions import ValidationError
from server.services.session_execution import SessionExecutionController
from server.services.session_store import WorkflowSessionStore
class MessageHandler:
"""Routes WebSocket messages to the appropriate handlers."""
def __init__(
self,
session_store: WorkflowSessionStore,
session_controller: SessionExecutionController,
workflow_run_service=None,
) -> None:
self.session_store = session_store
self.session_controller = session_controller
self.workflow_run_service = workflow_run_service
self.logger = logging.getLogger(__name__)
async def handle_message(self, session_id: str, data: Dict[str, Any], websocket_manager):
message_type = data.get("type")
if message_type == "human_input":
await self._handle_human_input(session_id, data, websocket_manager)
elif message_type == "ping":
await self._handle_ping(session_id, websocket_manager)
elif message_type == "get_status":
await self._handle_get_status(session_id, websocket_manager)
else:
await websocket_manager.send_message(
session_id,
{"type": "error", "data": {"message": f"Unknown message type: {message_type}"}},
)
async def _handle_human_input(self, session_id: str, data: Dict[str, Any], websocket_manager):
try:
payload = data.get("data", {}) or {}
user_input = payload.get("input", "")
attachments = payload.get("attachments") or []
if not user_input and not attachments:
await websocket_manager.send_message(
session_id,
{"type": "error", "data": {"message": "Empty input"}},
)
return
self.session_controller.provide_human_input(
session_id,
{"text": user_input, "attachments": attachments},
)
await websocket_manager.send_message(
session_id,
{"type": "input_received", "data": {"message": "Input received"}},
)
except ValidationError as exc:
await websocket_manager.send_message(
session_id,
{"type": "error", "data": {"message": str(exc)}},
)
except Exception as exc:
self.logger.error("Error handling human input for session %s: %s", session_id, exc)
await websocket_manager.send_message(
session_id,
{"type": "error", "data": {"message": str(exc)}},
)
async def _handle_ping(self, session_id: str, websocket_manager):
await websocket_manager.handle_heartbeat(session_id)
async def _handle_get_status(self, session_id: str, websocket_manager):
session_info = self.session_store.get_session_info(session_id)
await websocket_manager.send_message(
session_id,
{"type": "status", "data": session_info or {"message": "Session not found"}},
)
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "server/services/message_handler.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
OpenBMB/ChatDev:server/services/prompt_channel.py | """PromptChannel implementation backed by WebSocket sessions."""
import asyncio
from typing import Any, Dict, List, Optional
from entity.messages import MessageBlock
from server.services.attachment_service import AttachmentService
from server.services.session_execution import SessionExecutionController
from utils.attachments import AttachmentStore
from utils.exceptions import TimeoutError
from utils.human_prompt import PromptChannel, PromptResult
from utils.structured_logger import get_server_logger
class WebPromptChannel(PromptChannel):
"""Prompt channel that mediates through the WebSocket session controller."""
def __init__(
self,
*,
session_id: str,
session_controller: SessionExecutionController,
websocket_manager: Any,
attachment_service: AttachmentService,
attachment_store: AttachmentStore,
) -> None:
self.session_id = session_id
self.session_controller = session_controller
self.websocket_manager = websocket_manager
self.attachment_service = attachment_service
self.attachment_store = attachment_store
try:
self._loop = asyncio.get_running_loop()
except RuntimeError:
self._loop = None
def request(
self,
*,
node_id: str,
task: str,
inputs: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> PromptResult:
preview = inputs or ""
payload = {
"input": preview,
"task_description": task,
**(metadata or {}),
}
self.session_controller.set_waiting_for_input(
self.session_id,
node_id,
payload,
)
self._notify_human_prompt(node_id, preview, task)
try:
human_response = self.session_controller.wait_for_human_input(self.session_id)
except TimeoutError:
raise
except Exception as exc: # pragma: no cover - propagated upstream
logger = get_server_logger()
logger.log_exception(exc, "Error waiting for human input", node_id=node_id, session_id=self.session_id)
raise
response_text, attachment_ids = self._extract_response(human_response)
blocks = self._build_blocks(response_text, attachment_ids)
metadata_out = {
"attachment_count": len(attachment_ids),
"input_size": len(preview),
}
return PromptResult(text=response_text, blocks=blocks, metadata=metadata_out)
def _extract_response(self, payload: Any) -> tuple[str, List[str]]:
if isinstance(payload, dict):
response_text = payload.get("text") or ""
attachments = payload.get("attachments") or []
return response_text, attachments
if payload is None:
return "", []
return str(payload), []
def _build_blocks(self, text: str, attachment_ids: List[str]) -> List[MessageBlock]:
blocks: List[MessageBlock] = []
if text:
blocks.append(MessageBlock.text_block(text))
if attachment_ids:
blocks.extend(
self.attachment_service.build_attachment_blocks(
self.session_id,
attachment_ids,
target_store=self.attachment_store,
)
)
if not blocks:
blocks.append(MessageBlock.text_block(""))
return blocks
def _notify_human_prompt(self, node_id: str, preview: str, task: str) -> None:
message = {
"type": "human_input_required",
"data": {
"node_id": node_id,
"input": preview,
"task_description": task,
},
}
if self._loop and self._loop.is_running():
future = asyncio.run_coroutine_threadsafe(
self.websocket_manager.send_message(self.session_id, message),
self._loop,
)
try:
future.result()
except Exception:
# fallback to sync send to surface errors/logging
self.websocket_manager.send_message_sync(self.session_id, message)
else:
self.websocket_manager.send_message_sync(self.session_id, message)
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "server/services/prompt_channel.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:server/services/session_execution.py | """Human input coordination for workflow sessions."""
import concurrent.futures
import logging
import time
from concurrent.futures import Future
from typing import Any, Dict, Optional
from utils.exceptions import ValidationError, TimeoutError as CustomTimeoutError, WorkflowCancelledError
from utils.structured_logger import LogType, get_server_logger
from .session_store import SessionStatus, WorkflowSessionStore
class SessionExecutionController:
"""Handles blocking wait/provide cycles for human input."""
def __init__(self, store: WorkflowSessionStore) -> None:
self.store = store
self.logger = logging.getLogger(__name__)
def set_waiting_for_input(self, session_id: str, node_id: str, input_data: Dict[str, Any]) -> None:
session = self.store.get_session(session_id)
if not session:
raise ValidationError("Session not found", details={"session_id": session_id})
session.waiting_for_input = True
session.current_node_id = node_id
session.pending_input_data = input_data
session.status = SessionStatus.WAITING_FOR_INPUT
session.human_input_future = Future()
session.human_input_value = None
self.logger.info("Session %s waiting for input at node %s", session_id, node_id)
def wait_for_human_input(self, session_id: str, timeout: float = 1800.0) -> Any:
session = self.store.get_session(session_id)
if not session:
logger = get_server_logger()
logger.warning(
"Session %s not found when waiting for human input", session_id, log_type=LogType.WORKFLOW
)
raise ValidationError("Session not found", details={"session_id": session_id})
future: Optional[Future] = session.human_input_future
if not session.waiting_for_input or future is None:
logger = get_server_logger()
logger.warning(
"Session %s is not waiting for input", session_id, log_type=LogType.WORKFLOW
)
raise ValidationError(
"Session is not waiting for input",
details={"session_id": session_id, "waiting_for_input": session.waiting_for_input},
)
start_time = time.time()
poll_interval = 1.0
try:
while True:
if session.cancel_event.is_set():
raise WorkflowCancelledError("Workflow execution cancelled", workflow_id=session_id)
elapsed = time.time() - start_time
remaining = timeout - elapsed
if remaining <= 0:
raise concurrent.futures.TimeoutError()
wait_time = min(poll_interval, remaining)
try:
result = future.result(timeout=wait_time)
logger = get_server_logger()
input_length = 0
if isinstance(result, dict):
input_length = len(result.get("text") or "")
elif result is not None:
input_length = len(str(result))
logger.info(
"Human input received",
log_type=LogType.WORKFLOW,
session_id=session_id,
input_length=input_length,
)
return result
except concurrent.futures.TimeoutError:
continue
except concurrent.futures.TimeoutError:
self.logger.warning("Session %s human input timeout", session_id)
logger = get_server_logger()
logger.warning(
"Human input timeout",
log_type=LogType.WORKFLOW,
session_id=session_id,
timeout_duration=timeout,
)
raise CustomTimeoutError("Input timeout", operation="wait_for_human_input", timeout_duration=timeout)
finally:
session.waiting_for_input = False
session.current_node_id = None
session.pending_input_data = None
session.human_input_future = None
def provide_human_input(self, session_id: str, user_input: Any) -> None:
session = self.store.get_session(session_id)
if not session:
logger = get_server_logger()
logger.warning("Session %s not found when providing human input", session_id)
raise ValidationError(
"Session not found", details={"session_id": session_id, "input_provided": user_input is not None}
)
future: Optional[Future] = session.human_input_future
if not session.waiting_for_input or future is None:
logger = get_server_logger()
logger.warning("Session %s is not waiting for input when providing data", session_id)
raise ValidationError(
"Session is not waiting for input",
details={"session_id": session_id, "waiting_for_input": session.waiting_for_input},
)
future.set_result(user_input)
session.waiting_for_input = False
length = 0
if isinstance(user_input, dict):
length = len(user_input.get("text") or "")
elif user_input is not None:
length = len(str(user_input))
logger = get_server_logger()
logger.info(
"Human input provided",
log_type=LogType.WORKFLOW,
session_id=session_id,
input_length=length,
)
def cleanup_session(self, session_id: str) -> None:
session = self.store.get_session(session_id)
if not session:
return
future: Optional[Future] = session.human_input_future
if future and not future.done():
future.cancel()
promise = session.input_promise
if promise and not promise.done():
promise.cancel()
session.waiting_for_input = False
session.current_node_id = None
session.pending_input_data = None
session.human_input_future = None
session.human_input_value = None
self.logger.info("Session %s cleaned from execution controller", session_id)
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "server/services/session_execution.py",
"license": "Apache License 2.0",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:server/services/session_store.py | """Session persistence primitives for workflow runs."""
import logging
import time
from dataclasses import dataclass, field
from enum import Enum
from threading import Event
from typing import Any, Dict, Optional
from server.services.artifact_events import ArtifactEventQueue
class SessionStatus(Enum):
"""Lifecycle states for a workflow session."""
IDLE = "idle"
RUNNING = "running"
WAITING_FOR_INPUT = "waiting_for_input"
COMPLETED = "completed"
ERROR = "error"
CANCELLED = "cancelled"
@dataclass
class WorkflowSession:
"""Mutable record describing a workflow session."""
session_id: str
yaml_file: str
task_prompt: str
task_attachments: list[str] = field(default_factory=list)
status: SessionStatus = SessionStatus.IDLE
created_at: float = field(default_factory=lambda: time.time())
updated_at: float = field(default_factory=lambda: time.time())
# Execution metadata
executor: Optional[Any] = None
graph: Optional[Any] = None
current_node_id: Optional[str] = None
# Human input tracking
waiting_for_input: bool = False
input_promise: Optional[Any] = None
pending_input_data: Optional[Dict[str, Any]] = None
human_input_future: Optional[Any] = None
human_input_value: Optional[str] = None
# Results + errors
results: Dict[str, Any] = field(default_factory=dict)
error_message: Optional[str] = None
# Artifact streaming
artifact_queue: ArtifactEventQueue = field(default_factory=ArtifactEventQueue)
# Cancellation tracking
cancel_event: Event = field(default_factory=Event)
cancel_reason: Optional[str] = None
class WorkflowSessionStore:
"""In-memory registry that tracks workflow session metadata."""
def __init__(self) -> None:
self._sessions: Dict[str, WorkflowSession] = {}
self.logger = logging.getLogger(__name__)
def create_session(
self,
*,
yaml_file: str,
task_prompt: str,
session_id: str,
attachments: Optional[list[str]] = None,
) -> WorkflowSession:
session = WorkflowSession(
session_id=session_id,
yaml_file=yaml_file,
task_prompt=task_prompt,
task_attachments=list(attachments or []),
)
self._sessions[session_id] = session
self.logger.info("Created session %s for workflow %s", session_id, yaml_file)
return session
def get_session(self, session_id: str) -> Optional[WorkflowSession]:
return self._sessions.get(session_id)
def has_session(self, session_id: str) -> bool:
return session_id in self._sessions
def update_session_status(self, session_id: str, status: SessionStatus, **kwargs: Any) -> None:
session = self._sessions.get(session_id)
if not session:
return
session.status = status
session.updated_at = time.time()
for key, value in kwargs.items():
if hasattr(session, key):
setattr(session, key, value)
self.logger.info("Updated session %s status to %s", session_id, status.value)
def set_session_error(self, session_id: str, error_message: str) -> None:
self.update_session_status(session_id, SessionStatus.ERROR, error_message=error_message)
def complete_session(self, session_id: str, results: Dict[str, Any]) -> None:
self.update_session_status(session_id, SessionStatus.COMPLETED, results=results)
def pop_session(self, session_id: str) -> Optional[WorkflowSession]:
return self._sessions.pop(session_id, None)
def get_session_info(self, session_id: str) -> Optional[Dict[str, Any]]:
session = self._sessions.get(session_id)
if not session:
return None
return {
"session_id": session.session_id,
"yaml_file": session.yaml_file,
"status": session.status.value,
"created_at": session.created_at,
"updated_at": session.updated_at,
"current_node_id": session.current_node_id,
"waiting_for_input": session.waiting_for_input,
"error_message": session.error_message,
}
def list_sessions(self) -> Dict[str, Dict[str, Any]]:
return {session_id: self.get_session_info(session_id) for session_id in self._sessions.keys()}
def get_artifact_queue(self, session_id: str) -> Optional[ArtifactEventQueue]:
session = self._sessions.get(session_id)
return session.artifact_queue if session else None
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "server/services/session_store.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
OpenBMB/ChatDev:server/services/vuegraphs_storage.py | """
SQLite-backed storage helpers for Vue graph editor payloads.
"""
import os
import sqlite3
from pathlib import Path
from typing import Optional
_INITIALIZED_PATHS: set[Path] = set()
def _get_db_path() -> Path:
"""Resolve the SQLite database path, allowing overrides via env."""
return Path(os.getenv("VUEGRAPHS_DB_PATH", "data/vuegraphs.db"))
def _ensure_db_initialized() -> Path:
"""Create the SQLite database and table if they do not already exist."""
db_path = _get_db_path()
if db_path not in _INITIALIZED_PATHS or not db_path.exists():
db_path.parent.mkdir(parents=True, exist_ok=True)
with sqlite3.connect(db_path) as connection:
connection.execute(
"""
CREATE TABLE IF NOT EXISTS vuegraphs (
filename TEXT PRIMARY KEY,
content TEXT NOT NULL
)
"""
)
connection.commit()
_INITIALIZED_PATHS.add(db_path)
return db_path
def save_vuegraph_content(filename: str, content: str) -> None:
"""Insert or update the stored content for the provided filename."""
db_path = _ensure_db_initialized()
with sqlite3.connect(db_path) as connection:
connection.execute(
"""
INSERT INTO vuegraphs (filename, content)
VALUES (?, ?)
ON CONFLICT(filename) DO UPDATE SET content=excluded.content
""",
(filename, content),
)
connection.commit()
def fetch_vuegraph_content(filename: str) -> Optional[str]:
"""Return the stored content for filename, or None when absent."""
db_path = _ensure_db_initialized()
with sqlite3.connect(db_path) as connection:
cursor = connection.execute(
"SELECT content FROM vuegraphs WHERE filename = ?",
(filename,),
)
row = cursor.fetchone()
return row[0] if row else None
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "server/services/vuegraphs_storage.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
OpenBMB/ChatDev:server/services/websocket_executor.py | """GraphExecutor variant that reports results over WebSocket."""
import asyncio
from typing import List
from utils.logger import WorkflowLogger
from workflow.graph import GraphExecutor
from workflow.graph_context import GraphContext
from server.services.attachment_service import AttachmentService
from server.services.artifact_dispatcher import ArtifactDispatcher
from server.services.prompt_channel import WebPromptChannel
from server.services.session_store import WorkflowSessionStore
from server.services.session_execution import SessionExecutionController
from workflow.hooks.workspace_artifact import WorkspaceArtifact, WorkspaceArtifactHook
class WebSocketGraphExecutor(GraphExecutor):
"""GraphExecutor subclass that emits events via WebSocket."""
def __init__(
self,
graph: GraphContext,
session_id: str,
session_controller: SessionExecutionController,
attachment_service: AttachmentService,
websocket_manager,
session_store: WorkflowSessionStore,
cancel_event=None,
):
self.session_id = session_id
self.session_controller = session_controller
self.attachment_service = attachment_service
self.websocket_manager = websocket_manager
self.session_store = session_store
self.results = {}
self.artifact_dispatcher = ArtifactDispatcher(session_id, session_store, websocket_manager)
def hook_factory(runtime_context):
prompt_channel = WebPromptChannel(
session_id=session_id,
session_controller=session_controller,
websocket_manager=websocket_manager,
attachment_service=attachment_service,
attachment_store=runtime_context.attachment_store,
)
return WorkspaceArtifactHook(
attachment_store=runtime_context.attachment_store,
emit_callback=self._handle_workspace_artifacts,
prompt_channel=prompt_channel,
)
super().__init__(
graph,
session_id=session_id,
workspace_hook_factory=hook_factory,
cancel_event=cancel_event,
)
def _create_logger(self) -> WorkflowLogger:
from server.services.websocket_logger import WebSocketLogger
return WebSocketLogger(self.websocket_manager, self.session_id, self.graph.name, self.graph.log_level)
async def execute_graph_async(self, task_prompt):
await asyncio.get_event_loop().run_in_executor(None, self._execute, task_prompt)
def get_results(self):
return self.outputs
def _handle_workspace_artifacts(self, artifacts: List[WorkspaceArtifact]) -> None:
self.artifact_dispatcher.emit_workspace_artifacts(artifacts)
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "server/services/websocket_executor.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
OpenBMB/ChatDev:server/services/websocket_logger.py | import asyncio
from typing import Any, Dict
from entity.enums import LogLevel, EventType
from utils.logger import WorkflowLogger, LogEntry
from utils.structured_logger import get_workflow_logger
class WebSocketLogger(WorkflowLogger):
"""Workflow logger that also pushes entries via WebSocket."""
def __init__(self, websocket_manager, session_id: str, workflow_id: str = None, log_level: LogLevel = LogLevel.DEBUG):
super().__init__(workflow_id, log_level, log_to_console=False)
self.websocket_manager = websocket_manager
self.session_id = session_id
def add_log(self, level: LogLevel, message: str = None, node_id: str = None,
event_type: EventType = None, details: Dict[str, Any] = None,
duration: float = None) -> LogEntry | None:
log_entry = super().add_log(level, message, node_id, event_type, details, duration)
if not log_entry:
return None
# Send the message using the sync method which handles event loop properly
self.websocket_manager.send_message_sync(self.session_id, {
"type": "log",
"data": log_entry.to_dict()
})
return log_entry
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "server/services/websocket_logger.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
OpenBMB/ChatDev:server/services/websocket_manager.py | """WebSocket connection manager used by FastAPI app."""
import asyncio
import json
import logging
import time
import traceback
import uuid
from typing import Any, Dict, Optional
from fastapi import WebSocket
from server.services.message_handler import MessageHandler
from server.services.attachment_service import AttachmentService
from server.services.session_execution import SessionExecutionController
from server.services.session_store import WorkflowSessionStore, SessionStatus
from server.services.workflow_run_service import WorkflowRunService
def _json_default(value):
to_dict = getattr(value, "to_dict", None)
if callable(to_dict):
try:
return to_dict()
except Exception:
pass
if hasattr(value, "__dict__"):
try:
return vars(value)
except Exception:
pass
return str(value)
def _encode_ws_message(message: Any) -> str:
if isinstance(message, str):
return message
return json.dumps(message, default=_json_default)
class WebSocketManager:
def __init__(
self,
*,
session_store: WorkflowSessionStore | None = None,
session_controller: SessionExecutionController | None = None,
attachment_service: AttachmentService | None = None,
workflow_run_service: WorkflowRunService | None = None,
):
self.active_connections: Dict[str, WebSocket] = {}
self.connection_timestamps: Dict[str, float] = {}
self.send_locks: Dict[str, asyncio.Lock] = {}
self.loop: asyncio.AbstractEventLoop | None = None
self.session_store = session_store or WorkflowSessionStore()
self.session_controller = session_controller or SessionExecutionController(self.session_store)
self.attachment_service = attachment_service or AttachmentService()
self.workflow_run_service = workflow_run_service or WorkflowRunService(
self.session_store,
self.session_controller,
self.attachment_service,
)
self.message_handler = MessageHandler(
self.session_store,
self.session_controller,
self.workflow_run_service,
)
async def connect(self, websocket: WebSocket, session_id: Optional[str] = None) -> str:
await websocket.accept()
if self.loop is None:
try:
self.loop = asyncio.get_running_loop()
except RuntimeError:
self.loop = None
if not session_id:
session_id = str(uuid.uuid4())
self.active_connections[session_id] = websocket
self.connection_timestamps[session_id] = time.time()
self.send_locks[session_id] = asyncio.Lock()
logging.info("WebSocket connected: %s", session_id)
await self.send_message(
session_id,
{
"type": "connection",
"data": {"session_id": session_id, "status": "connected"},
},
)
return session_id
def disconnect(self, session_id: str) -> None:
session = self.session_store.get_session(session_id)
if session and session.status in {SessionStatus.RUNNING, SessionStatus.WAITING_FOR_INPUT}:
self.workflow_run_service.request_cancel(
session_id,
reason="WebSocket disconnected",
)
if session_id in self.active_connections:
del self.active_connections[session_id]
if session_id in self.connection_timestamps:
del self.connection_timestamps[session_id]
if session_id in self.send_locks:
del self.send_locks[session_id]
self.session_controller.cleanup_session(session_id)
remaining_session = self.session_store.get_session(session_id)
if remaining_session and remaining_session.executor is None:
self.session_store.pop_session(session_id)
self.attachment_service.cleanup_session(session_id)
logging.info("WebSocket disconnected: %s", session_id)
async def send_message(self, session_id: str, message: Dict[str, Any]) -> None:
if session_id in self.active_connections:
websocket = self.active_connections[session_id]
try:
lock = self.send_locks.get(session_id)
if lock is None:
await websocket.send_text(_encode_ws_message(message))
else:
async with lock:
await websocket.send_text(_encode_ws_message(message))
except Exception as exc:
traceback.print_exc()
logging.error("Failed to send message to %s: %s", session_id, exc)
# self.disconnect(session_id)
def send_message_sync(self, session_id: str, message: Dict[str, Any]) -> None:
try:
loop = asyncio.get_running_loop()
if loop.is_running():
asyncio.create_task(self.send_message(session_id, message))
else:
asyncio.run(self.send_message(session_id, message))
except RuntimeError:
if self.loop and self.loop.is_running():
asyncio.run_coroutine_threadsafe(
self.send_message(session_id, message),
self.loop,
)
else:
asyncio.run(self.send_message(session_id, message))
async def broadcast(self, message: Dict[str, Any]) -> None:
for session_id in list(self.active_connections.keys()):
await self.send_message(session_id, message)
async def handle_heartbeat(self, session_id: str) -> None:
if session_id in self.active_connections:
await self.send_message(
session_id,
{"type": "pong", "data": {"timestamp": time.time()}},
)
else:
logging.warning("Heartbeat request from disconnected session: %s", session_id)
async def handle_message(self, session_id: str, message: str) -> None:
try:
data = json.loads(message)
await self.message_handler.handle_message(session_id, data, self)
except json.JSONDecodeError:
await self.send_message(
session_id,
{"type": "error", "data": {"message": "Invalid JSON format"}},
)
except Exception as exc:
logging.error("Error handling message from %s: %s", session_id, exc)
await self.send_message(
session_id,
{"type": "error", "data": {"message": str(exc)}},
)
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "server/services/websocket_manager.py",
"license": "Apache License 2.0",
"lines": 152,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:server/services/workflow_run_service.py | """Service responsible for executing workflows for WebSocket sessions."""
import logging
from pathlib import Path
from typing import List, Optional, Union
from check.check import load_config
from entity.graph_config import GraphConfig
from entity.messages import Message
from entity.enums import LogLevel
from utils.exceptions import ValidationError, WorkflowCancelledError
from utils.structured_logger import get_server_logger, LogType
from utils.task_input import TaskInputBuilder
from workflow.graph_context import GraphContext
from server.services.attachment_service import AttachmentService
from server.services.session_execution import SessionExecutionController
from server.services.session_store import SessionStatus, WorkflowSessionStore
from server.services.websocket_executor import WebSocketGraphExecutor
from server.services.workflow_storage import validate_workflow_filename
from server.settings import WARE_HOUSE_DIR, YAML_DIR
class WorkflowRunService:
def __init__(
self,
session_store: WorkflowSessionStore,
session_controller: SessionExecutionController,
attachment_service: AttachmentService,
) -> None:
self.session_store = session_store
self.session_controller = session_controller
self.attachment_service = attachment_service
self.logger = logging.getLogger(__name__)
def request_cancel(self, session_id: str, *, reason: Optional[str] = None) -> bool:
session = self.session_store.get_session(session_id)
if not session:
return False
cancel_message = reason or "Cancellation requested"
session.cancel_reason = cancel_message
if not session.cancel_event.is_set():
session.cancel_event.set()
self.logger.info("Cancellation requested for session %s", session_id)
if session.executor:
try:
session.executor.request_cancel(cancel_message)
except Exception as exc:
self.logger.warning("Failed to propagate cancellation to executor for %s: %s", session_id, exc)
self.session_store.update_session_status(session_id, SessionStatus.CANCELLED, error_message=cancel_message)
return True
async def start_workflow(
self,
session_id: str,
yaml_file: str,
task_prompt: str,
websocket_manager,
*,
attachments: Optional[List[str]] = None,
log_level: Optional[LogLevel] = None,
) -> None:
normalized_yaml_name = (yaml_file or "").strip()
try:
yaml_path = self._resolve_yaml_path(normalized_yaml_name)
normalized_yaml_name = yaml_path.name
attachments = attachments or []
if (not task_prompt or not task_prompt.strip()) and not attachments:
raise ValidationError(
"Task prompt cannot be empty",
details={"task_prompt_provided": bool(task_prompt)},
)
self.attachment_service.prepare_session_workspace(session_id)
self.session_store.create_session(
yaml_file=normalized_yaml_name,
task_prompt=task_prompt,
session_id=session_id,
attachments=attachments,
)
self.session_store.update_session_status(session_id, SessionStatus.RUNNING)
await websocket_manager.send_message(
session_id,
{
"type": "workflow_started",
"data": {"yaml_file": normalized_yaml_name, "task_prompt": task_prompt},
},
)
await self._execute_workflow_async(
session_id,
yaml_path,
task_prompt,
websocket_manager,
attachments,
log_level,
)
except ValidationError as exc:
self.logger.error(str(exc))
logger = get_server_logger()
logger.error(
"Workflow validation error",
log_type=LogType.WORKFLOW,
session_id=session_id,
yaml_file=normalized_yaml_name,
validation_details=getattr(exc, "details", None),
)
self.session_store.set_session_error(session_id, str(exc))
await websocket_manager.send_message(
session_id,
{"type": "error", "data": {"message": str(exc)}},
)
except Exception as exc:
self.logger.error(f"Error starting workflow for session {session_id}: {exc}")
logger = get_server_logger()
logger.log_exception(
exc,
"Error starting workflow",
session_id=session_id,
yaml_file=normalized_yaml_name,
)
self.session_store.set_session_error(session_id, str(exc))
await websocket_manager.send_message(
session_id,
{
"type": "error",
"data": {"message": f"Failed to start workflow: {exc}"},
},
)
async def _execute_workflow_async(
self,
session_id: str,
yaml_path: Path,
task_prompt: str,
websocket_manager,
attachments: List[str],
log_level: LogLevel,
) -> None:
session = self.session_store.get_session(session_id)
cancel_event = session.cancel_event if session else None
try:
design = load_config(yaml_path)
graph_config = GraphConfig.from_definition(
design.graph,
name=f"session_{session_id}",
output_root=WARE_HOUSE_DIR,
source_path=str(yaml_path),
vars=design.vars,
)
if log_level:
graph_config.log_level = log_level
graph_config.definition.log_level = log_level
graph_context = GraphContext(config=graph_config)
executor = WebSocketGraphExecutor(
graph_context,
session_id,
self.session_controller,
self.attachment_service,
websocket_manager,
self.session_store,
cancel_event=cancel_event,
)
if session:
session.graph = graph_context
session.executor = executor
if session.cancel_event.is_set():
executor.request_cancel(session.cancel_reason or "Cancellation requested")
task_input = self._build_initial_task_input(
session_id,
graph_context,
task_prompt,
attachments,
executor.attachment_store,
)
await executor.execute_graph_async(task_input)
# If cancellation was requested during execution but not raised inside threads,
# treat the run as cancelled to avoid conflicting status.
if cancel_event and cancel_event.is_set():
reason = session.cancel_reason if session else "Cancellation requested"
raise WorkflowCancelledError(reason, workflow_id=graph_context.name)
results = executor.get_results()
self.session_store.complete_session(session_id, results)
await websocket_manager.send_message(
session_id,
{
"type": "workflow_completed",
"data": {
"results": results,
"summary": graph_context.final_message(),
"token_usage": executor.token_tracker.get_token_usage(),
},
},
)
logger = get_server_logger()
logger.info(
"Workflow execution completed successfully",
log_type=LogType.WORKFLOW,
session_id=session_id,
yaml_path=str(yaml_path),
result_count=len(results) if isinstance(results, dict) else 0,
)
except WorkflowCancelledError as exc:
reason = str(exc)
self.session_store.update_session_status(session_id, SessionStatus.CANCELLED, error_message=reason)
await websocket_manager.send_message(
session_id,
{
"type": "workflow_cancelled",
"data": {"message": reason},
},
)
logger = get_server_logger()
logger.info(
"Workflow execution cancelled",
log_type=LogType.WORKFLOW,
session_id=session_id,
yaml_path=str(yaml_path),
cancellation_reason=reason,
)
except ValidationError as exc:
self.session_store.set_session_error(session_id, str(exc))
await websocket_manager.send_message(
session_id,
{"type": "error", "data": {"message": str(exc)}},
)
logger = get_server_logger()
logger.error(
"Workflow validation error",
log_type=LogType.WORKFLOW,
session_id=session_id,
yaml_path=str(yaml_path),
validation_details=getattr(exc, "details", None),
)
except Exception as exc:
self.session_store.set_session_error(session_id, str(exc))
await websocket_manager.send_message(
session_id,
{"type": "error", "data": {"message": f"Workflow execution error: {exc}"}},
)
logger = get_server_logger()
logger.log_exception(
exc,
f"Error executing workflow for session {session_id}",
session_id=session_id,
yaml_path=str(yaml_path),
)
finally:
session_ref = self.session_store.get_session(session_id)
if session_ref:
session_ref.executor = None
session_ref.graph = None
self.session_controller.cleanup_session(session_id)
if session_id not in websocket_manager.active_connections:
self.session_store.pop_session(session_id)
def _build_initial_task_input(
self,
session_id: str,
graph_context: GraphContext,
prompt: str,
attachment_ids: List[str],
store,
) -> Union[List[Message], str]:
if not attachment_ids:
return prompt
blocks = self.attachment_service.build_attachment_blocks(
session_id,
attachment_ids,
target_store=store,
)
return TaskInputBuilder(store).build_from_blocks(prompt, blocks)
def _resolve_yaml_path(self, yaml_filename: str) -> Path:
"""Validate and resolve YAML paths inside the configured directory."""
safe_name = validate_workflow_filename(yaml_filename, require_yaml_extension=True)
yaml_path = YAML_DIR / safe_name
if not yaml_path.exists():
raise ValidationError("YAML file not found", details={"yaml_file": safe_name})
return yaml_path
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "server/services/workflow_run_service.py",
"license": "Apache License 2.0",
"lines": 268,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:server/services/workflow_storage.py | """Utilities for validating and persisting workflow YAML files."""
import re
from pathlib import Path
from typing import Any, Tuple
import yaml
from check.check import check_config
from utils.exceptions import (
ResourceConflictError,
ResourceNotFoundError,
SecurityError,
ValidationError,
WorkflowExecutionError,
)
from utils.structured_logger import get_server_logger, LogType
def _update_workflow_id(content: str, workflow_id: str) -> str:
# Pattern to match graph:\n id: <value>
pattern = re.compile(r"(graph:\s*\n\s*id:\s*).*$", re.MULTILINE)
match = pattern.search(content)
if match:
# Replace the value after "graph:\n id: "
return pattern.sub(rf"\1{workflow_id}", content, count=1)
# If no graph.id found, look for standalone id: at root level (legacy support)
root_id_pattern = re.compile(r"^(id:\s*).*$", re.MULTILINE)
root_match = root_id_pattern.search(content)
if root_match:
return root_id_pattern.sub(rf"\1{workflow_id}", content, count=1)
# If neither found, add graph.id after graph: section if it exists
graph_pattern = re.compile(r"(graph:\s*\n)")
graph_match = graph_pattern.search(content)
if graph_match:
return graph_pattern.sub(rf"\1 id: {workflow_id}\n", content, count=1)
# Fallback (is invalid)
lines = content.splitlines()
insert_index = 0
if lines and lines[0].strip() == "---":
insert_index = 1
lines.insert(insert_index, f"graph:\n id: {workflow_id}")
updated = "\n".join(lines)
if content.endswith("\n"):
updated += "\n"
return updated
def validate_workflow_filename(filename: str, *, require_yaml_extension: bool = True) -> str:
"""Sanitize workflow filenames and guard against traversal attempts."""
value = (filename or "").strip()
if not value:
raise ValidationError("Filename cannot be empty", field="filename")
if ".." in value or value.startswith(("/", "\\")):
logger = get_server_logger()
logger.log_security_event(
"PATH_TRAVERSAL_ATTEMPT",
f"Suspicious filename detected: {value}",
details={"received_filename": value},
)
raise SecurityError("Invalid filename format", details={"filename": value})
if not re.match(r"^[a-zA-Z0-9._-]+$", value):
raise ValidationError(
"Invalid filename: only letters, digits, dots, underscores, and hyphens are allowed",
field="filename",
)
if require_yaml_extension and not value.endswith((".yaml", ".yml")):
raise ValidationError("Filename must end with .yaml or .yml", field="filename")
return Path(value).name
def validate_workflow_content(filename: str, content: str) -> Tuple[str, Any]:
safe_filename = validate_workflow_filename(filename, require_yaml_extension=True)
try:
yaml_content = yaml.safe_load(content)
if yaml_content is None:
raise ValidationError("YAML content is empty", field="content")
errors = check_config(yaml_content)
if errors:
raise ValidationError(f"YAML validation errors:\n{errors}", field="content")
except yaml.YAMLError as exc:
logger = get_server_logger()
logger.warning("Invalid YAML content in upload", details={"error": str(exc)})
raise ValidationError(f"Invalid YAML syntax: {exc}", field="content")
return safe_filename, yaml_content
def persist_workflow(
safe_filename: str,
content: str,
yaml_content: Any,
*,
action: str,
directory: Path,
) -> None:
save_path = directory / safe_filename
logger = get_server_logger()
try:
save_path.write_text(content, encoding="utf-8")
except Exception as exc:
logger.log_exception(exc, f"Failed to save workflow file {safe_filename}")
raise WorkflowExecutionError(
"Failed to save workflow file", details={"filename": safe_filename}
)
logger.info(
"Workflow file persisted",
log_type=LogType.WORKFLOW,
filename=safe_filename,
action=action,
)
def rename_workflow(source_filename: str, target_filename: str, *, directory: Path) -> None:
source_safe = validate_workflow_filename(source_filename, require_yaml_extension=True)
target_safe = validate_workflow_filename(target_filename, require_yaml_extension=True)
if source_safe == target_safe:
raise ValidationError("Source and target filenames must be different", field="new_filename")
source_path = directory / source_safe
target_path = directory / target_safe
if not source_path.exists() or not source_path.is_file():
raise ResourceNotFoundError(
"Workflow file not found",
resource_type="workflow",
resource_id=source_safe,
)
if target_path.exists():
raise ResourceConflictError(
"Target workflow already exists",
resource_type="workflow",
resource_id=target_safe,
)
logger = get_server_logger()
try:
source_path.rename(target_path)
except Exception as exc:
logger.log_exception(exc, f"Failed to rename workflow file {source_safe} to {target_safe}")
raise WorkflowExecutionError(
"Failed to rename workflow file",
details={"source": source_safe, "target": target_safe},
)
try:
new_workflow_id = Path(target_safe).stem
content = target_path.read_text(encoding="utf-8")
updated = _update_workflow_id(content, new_workflow_id)
if updated != content:
target_path.write_text(updated, encoding="utf-8")
except Exception as exc:
logger.log_exception(exc, f"Failed to update workflow id after rename to {target_safe}")
raise WorkflowExecutionError(
"Failed to update workflow id after rename",
details={"target": target_safe},
)
logger.info(
"Workflow file renamed",
log_type=LogType.WORKFLOW,
source=source_safe,
target=target_safe,
action="rename",
)
def copy_workflow(source_filename: str, target_filename: str, *, directory: Path) -> None:
source_safe = validate_workflow_filename(source_filename, require_yaml_extension=True)
target_safe = validate_workflow_filename(target_filename, require_yaml_extension=True)
if source_safe == target_safe:
raise ValidationError("Source and target filenames must be different", field="new_filename")
source_path = directory / source_safe
target_path = directory / target_safe
if not source_path.exists() or not source_path.is_file():
raise ResourceNotFoundError(
"Workflow file not found",
resource_type="workflow",
resource_id=source_safe,
)
if target_path.exists():
raise ResourceConflictError(
"Target workflow already exists",
resource_type="workflow",
resource_id=target_safe,
)
logger = get_server_logger()
try:
target_path.write_text(source_path.read_text(encoding="utf-8"), encoding="utf-8")
except Exception as exc:
logger.log_exception(exc, f"Failed to copy workflow file {source_safe} to {target_safe}")
raise WorkflowExecutionError(
"Failed to copy workflow file",
details={"source": source_safe, "target": target_safe},
)
logger.info(
"Workflow file copied",
log_type=LogType.WORKFLOW,
source=source_safe,
target=target_safe,
action="copy",
)
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "server/services/workflow_storage.py",
"license": "Apache License 2.0",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:server/state.py | """Global state shared across server modules."""
from typing import Optional
from server.services.websocket_manager import WebSocketManager
from utils.exceptions import ValidationError
websocket_manager: Optional[WebSocketManager] = None
def init_state() -> None:
"""Ensure global singletons are ready for incoming requests."""
get_websocket_manager()
def get_websocket_manager() -> WebSocketManager:
global websocket_manager
if websocket_manager is None:
websocket_manager = WebSocketManager()
return websocket_manager
def ensure_known_session(session_id: str, *, require_connection: bool = False) -> WebSocketManager:
"""Return the WebSocket manager if the session is connected or known."""
manager = get_websocket_manager()
if not session_id:
raise ValidationError("Session not connected", details={"session_id": session_id})
if session_id in manager.active_connections:
return manager
if not require_connection and manager.session_store.has_session(session_id):
return manager
raise ValidationError("Session not connected", details={"session_id": session_id})
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "server/state.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
OpenBMB/ChatDev:server_main.py | import argparse
import logging
from pathlib import Path
from runtime.bootstrap.schema import ensure_schema_registry_populated
from server.app import app
ensure_schema_registry_populated()
def main():
import uvicorn
parser = argparse.ArgumentParser(description="DevAll Workflow Server")
parser.add_argument(
"--host",
type=str,
default="0.0.0.0",
help="Server host (default: 0.0.0.0)"
)
parser.add_argument(
"--port",
type=int,
default=8000,
help="Server port (default: 8000)"
)
parser.add_argument(
"--log-level",
choices=["debug", "info", "warning", "error", "critical"],
default="info",
help="Log level (default: info)"
)
parser.add_argument(
"--reload",
action="store_true",
help="Enable auto-reload for development"
)
args = parser.parse_args()
# Configure structured logging
import os
os.environ['LOG_LEVEL'] = args.log_level.upper()
# Ensure log directory exists
log_dir = Path("logs")
log_dir.mkdir(exist_ok=True)
# Configure logging
logging.basicConfig(
level=getattr(logging, args.log_level.upper()),
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[
logging.FileHandler(log_dir / "server.log"),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
logger.info(f"Starting DevAll Workflow Server on {args.host}:{args.port}")
# Launch the server
uvicorn.run(
"server.app:app",
host=args.host,
port=args.port,
reload=args.reload,
log_level=args.log_level,
ws="wsproto",
)
if __name__ == "__main__":
main()
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "server_main.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
OpenBMB/ChatDev:tools/export_design_template.py | """CLI for exporting DesignConfig YAML templates from typed schemas."""
import argparse
from collections import OrderedDict
from pathlib import Path
from typing import Any, Dict, Mapping, Sequence, Tuple
import yaml
from runtime.bootstrap.schema import ensure_schema_registry_populated
from entity.configs import BaseConfig, DesignConfig
from entity.configs.base import ChildKey, ConfigFieldSpec
TYPE_ALIASES: Dict[str, str] = {
"str": "string",
"string": "string",
"int": "int",
"float": "float",
"bool": "bool",
"list": "list",
"dict": "dict",
"mapping": "mapping",
"enum": "enum",
}
ensure_schema_registry_populated()
class DesignTemplateEmitter:
"""Builds human-oriented YAML templates from config schemas."""
def __init__(self, root_cls: type[BaseConfig] = DesignConfig):
self.root_cls = root_cls
def build(self, *, version: str | None = None) -> OrderedDict[str, Any]:
document = self._emit_config(self.root_cls, stack=[])
if version:
document["version"] = version
return document
# ------------------------------------------------------------------
# Rendering helpers
# ------------------------------------------------------------------
def _emit_config(self, config_cls: type[BaseConfig], *, stack: list[type[BaseConfig]]) -> OrderedDict[str, Any]:
if config_cls in stack:
return OrderedDict(
{
self._format_recursive_placeholder(config_cls, stack):
"See earlier definition to avoid infinite recursion",
}
)
stack.append(config_cls)
payload: "OrderedDict[str, Any]" = OrderedDict()
field_specs = config_cls.field_specs()
try:
for name, spec in field_specs.items():
payload[name] = self._emit_field(config_cls, spec, stack=stack)
finally:
stack.pop()
return payload
def _emit_field(self, parent_cls: type[BaseConfig], spec: ConfigFieldSpec, *, stack: list[type[BaseConfig]]) -> Any:
routes = self._routes_for_field(parent_cls, spec.name)
if routes:
variant_block: "OrderedDict[str, Any]" = OrderedDict()
for label, child_cls in routes:
variant_block[label] = self._wrap_with_container(
spec,
self._emit_config(child_cls, stack=stack),
)
return variant_block
if spec.child is not None:
return self._wrap_with_container(
spec,
self._emit_config(spec.child, stack=stack),
)
hint = (spec.type_hint or "value").lower()
if self._looks_like_dict(hint):
value_placeholder = self._format_placeholder(type_hint="value", required=True, default=None, enum=None)
return OrderedDict({"<key>": value_placeholder})
if self._looks_like_list(hint):
inner_hint = self._extract_inner_type(spec.type_hint)
entry_placeholder = self._format_placeholder(
type_hint=inner_hint,
required=spec.required,
default=None,
enum=spec.enum,
)
return [entry_placeholder]
return self._format_placeholder_from_spec(spec)
def _routes_for_field(self, parent_cls: type[BaseConfig], field_name: str) -> Sequence[Tuple[str, type[BaseConfig]]]:
routes: list[Tuple[str, type[BaseConfig]]] = []
for key, child in parent_cls.child_routes().items():
if key.field != field_name:
continue
label = self._format_variant_label(key)
routes.append((label, child))
return routes
def _wrap_with_container(self, spec: ConfigFieldSpec, payload: Any) -> Any:
hint = (spec.type_hint or "").lower()
if self._looks_like_list(hint):
return [payload]
return payload
# ------------------------------------------------------------------
# Formatting utilities
# ------------------------------------------------------------------
def _format_placeholder_from_spec(self, spec: ConfigFieldSpec) -> str:
return self._format_placeholder(
type_hint=spec.type_hint,
required=spec.required,
default=spec.default,
enum=spec.enum,
)
def _format_placeholder(
self,
*,
type_hint: str | None,
required: bool,
default: Any,
enum: Sequence[Any] | None,
) -> str:
type_label = self._normalize_type(type_hint)
default_label = self._format_default(enum=enum, default=default, required=required)
return f"<{type_label}> | {default_label}"
def _format_default(
self,
*,
enum: Sequence[Any] | None,
default: Any,
required: bool,
) -> str:
if enum:
return f"[{', '.join(map(str, enum))}]"
if default is None:
return "required" if required else "None"
if isinstance(default, bool):
return "true" if default else "false"
if isinstance(default, (int, float)):
return str(default)
if isinstance(default, str):
return f'"{default}"'
if isinstance(default, Mapping):
return "{}"
if isinstance(default, Sequence) and not isinstance(default, (str, bytes)):
return "[]"
return str(default)
def _normalize_type(self, type_hint: str | None) -> str:
if not type_hint:
return "value"
normalized = type_hint.strip()
base = normalized.split("[", 1)[0].split("|", 1)[0].strip().lower()
alias = TYPE_ALIASES.get(base)
if alias:
normalized = normalized.replace(base, alias, 1)
return normalized
@staticmethod
def _looks_like_list(type_hint: str) -> bool:
return type_hint.startswith("list") or type_hint.endswith("[]")
@staticmethod
def _looks_like_dict(type_hint: str) -> bool:
return type_hint.startswith("dict") or "mapping" in type_hint
@staticmethod
def _extract_inner_type(type_hint: str | None) -> str:
if not type_hint or "[" not in type_hint or "]" not in type_hint:
return "value"
start = type_hint.find("[") + 1
end = type_hint.rfind("]")
inner = type_hint[start:end].strip()
return inner or "value"
@staticmethod
def _format_variant_label(key: ChildKey) -> str:
if key.value is None:
return f"<variant[{key.field}]>"
return f"<variant[{key.field}]={key.value}>"
@staticmethod
def _format_recursive_placeholder(config_cls: type[BaseConfig], stack: list[type[BaseConfig]]) -> str:
cycle = " → ".join(cls.__name__ for cls in (*stack, config_cls))
return f"<recursive[{config_cls.__name__}] path: {cycle}>"
def dump_yaml(data: Mapping[str, Any], path: Path) -> None:
class _Dumper(yaml.SafeDumper):
pass
def _represent_ordered_dict(dumper: yaml.SafeDumper, value: OrderedDict) -> yaml.nodes.MappingNode: # type: ignore
return dumper.represent_dict(value.items())
_Dumper.add_representer(OrderedDict, _represent_ordered_dict)
with path.open("w", encoding="utf-8") as handle:
yaml.dump(data, handle, Dumper=_Dumper, sort_keys=False, allow_unicode=True)
def parse_args(argv: Sequence[str] | None = None) -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Export design_0.4.0 YAML templates from config schemas")
parser.add_argument("--output", type=Path, required=True, help="Primary output YAML path")
parser.add_argument("--version", type=str, default=None, help="Version string to pin in the template")
parser.add_argument(
"--mirror",
type=Path,
nargs="*",
default=(),
help="Optional additional paths that should receive the same generated document",
)
return parser.parse_args(argv)
def main(argv: Sequence[str] | None = None) -> int:
args = parse_args(argv)
emitter = DesignTemplateEmitter(DesignConfig)
document = emitter.build(version=args.version)
targets = [args.output, *args.mirror]
for target in targets:
target.parent.mkdir(parents=True, exist_ok=True)
dump_yaml(document, target)
print("Exported design template to:")
for target in targets:
print(f" - {target.resolve()}")
return 0
if __name__ == "__main__": # pragma: no cover
# uv run -m tools.export_design_template --output yaml_template/design.yaml
raise SystemExit(main())
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "tools/export_design_template.py",
"license": "Apache License 2.0",
"lines": 202,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:utils/attachments.py | """Attachment storage and serialization helpers."""
import base64
import hashlib
import json
import mimetypes
import shutil
import uuid
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, Optional
from entity.messages import AttachmentRef, MessageBlock, MessageBlockType
DEFAULT_INLINE_LIMIT = 512 * 1024 # 512 KB
@dataclass
class AttachmentRecord:
"""Stores metadata about an attachment tracked inside a workflow run."""
ref: AttachmentRef
kind: MessageBlockType = MessageBlockType.FILE
description: Optional[str] = None
extra: Dict[str, Any] = field(default_factory=dict)
def to_dict(self) -> Dict[str, Any]:
return {
"ref": self.ref.to_dict(),
"kind": self.kind.value,
"description": self.description,
"extra": self.extra,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "AttachmentRecord":
ref_data = data.get("ref") or {}
raw_kind = data.get("kind", MessageBlockType.FILE.value)
try:
kind = MessageBlockType(raw_kind)
except ValueError:
kind = MessageBlockType.FILE
return cls(
ref=AttachmentRef.from_dict(ref_data),
kind=kind,
description=data.get("description"),
extra=data.get("extra") or {},
)
def as_message_block(self) -> MessageBlock:
"""Convert to a MessageBlock referencing this attachment."""
return MessageBlock(
type=self.kind,
attachment=self.ref.copy(),
data=dict(self.extra),
)
class AttachmentStore:
"""Filesystem-backed attachment manifest for a workflow execution."""
def __init__(self, root_dir: Path | str, inline_size_limit: int = DEFAULT_INLINE_LIMIT) -> None:
self.root = Path(root_dir)
self.inline_size_limit = inline_size_limit
self.root.mkdir(parents=True, exist_ok=True)
self.manifest_path = self.root / "attachments_manifest.json"
self._records: Dict[str, AttachmentRecord] = {}
self._persistent_ids: set[str] = set()
self._hash_index: Dict[str, str] = {}
self._load_manifest()
def register_file(
self,
file_path: Path | str,
*,
kind: MessageBlockType = MessageBlockType.FILE,
display_name: Optional[str] = None,
mime_type: Optional[str] = None,
attachment_id: Optional[str] = None,
copy_file: bool = True,
description: Optional[str] = None,
extra: Optional[Dict[str, Any]] = None,
persist: bool = True,
deduplicate: bool = False,
) -> AttachmentRecord:
"""Register a local file and return its attachment record."""
source = Path(file_path)
if not source.exists():
raise FileNotFoundError(f"Attachment source not found: {source}")
guessed_mime = mime_type or (mimetypes.guess_type(source.name)[0] or "application/octet-stream")
attachment_id = attachment_id or uuid.uuid4().hex
sha256_source = _sha256_file(source)
if deduplicate:
existing = self._find_duplicate_by_hash(
sha256_source,
copy_file=copy_file,
source_path=source,
)
if existing:
return existing
if copy_file:
target_dir = self.root / attachment_id
target_dir.mkdir(parents=True, exist_ok=True)
target_path = target_dir / source.name
shutil.copy2(source, target_path)
else:
target_path = source.resolve()
size = target_path.stat().st_size
sha256 = sha256_source or _sha256_file(target_path)
data_uri = None
# if size <= self.inline_size_limit:
# data_uri = encode_file_to_data_uri(target_path, guessed_mime)
ref = AttachmentRef(
attachment_id=attachment_id,
mime_type=guessed_mime,
name=display_name or source.name,
size=size,
sha256=sha256,
local_path=str(target_path),
data_uri=data_uri,
)
record = AttachmentRecord(
ref=ref,
kind=kind,
description=description,
extra=dict(extra) if extra else {},
)
self._records[attachment_id] = record
if sha256:
self._hash_index[sha256] = attachment_id
if persist:
self._persistent_ids.add(attachment_id)
self._save_manifest()
else:
self._persistent_ids.discard(attachment_id)
return record
def register_bytes(
self,
data: bytes | bytearray,
*,
kind: MessageBlockType = MessageBlockType.FILE,
mime_type: Optional[str] = None,
display_name: Optional[str] = None,
attachment_id: Optional[str] = None,
description: Optional[str] = None,
extra: Optional[Dict[str, Any]] = None,
persist: bool = True,
) -> AttachmentRecord:
"""Register an in-memory payload as an attachment."""
if not isinstance(data, (bytes, bytearray)):
raise TypeError("register_bytes expects bytes or bytearray data")
attachment_id = attachment_id or uuid.uuid4().hex
filename = display_name or _default_filename_for_mime(mime_type)
target_dir = self.root / attachment_id
target_dir.mkdir(parents=True, exist_ok=True)
target_path = target_dir / filename
with target_path.open("wb") as handle:
handle.write(bytes(data))
return self.register_file(
target_path,
kind=kind,
display_name=display_name or filename,
mime_type=mime_type,
attachment_id=attachment_id,
copy_file=False,
description=description,
extra=extra,
persist=persist,
)
def register_remote_file(
self,
*,
remote_file_id: str,
name: str,
mime_type: Optional[str] = None,
size: Optional[int] = None,
kind: MessageBlockType = MessageBlockType.FILE,
attachment_id: Optional[str] = None,
description: Optional[str] = None,
extra: Optional[Dict[str, Any]] = None,
persist: bool = True,
) -> AttachmentRecord:
"""Register an already-uploaded file (e.g., OpenAI file ID)."""
attachment_id = attachment_id or uuid.uuid4().hex
ref = AttachmentRef(
attachment_id=attachment_id,
mime_type=mime_type,
name=name,
size=size,
remote_file_id=remote_file_id,
)
record = AttachmentRecord(ref=ref, kind=kind, description=description, extra=extra or {})
self._records[attachment_id] = record
if persist:
self._persistent_ids.add(attachment_id)
self._save_manifest()
else:
self._persistent_ids.discard(attachment_id)
if ref.sha256:
self._hash_index[ref.sha256] = attachment_id
return record
def update_remote_file_id(self, attachment_id: str, remote_file_id: str) -> None:
"""Attach a provider file_id to an existing record (after upload)."""
record = self._records.get(attachment_id)
if not record:
raise KeyError(f"Attachment '{attachment_id}' not found")
record.ref.remote_file_id = remote_file_id
if attachment_id in self._persistent_ids:
self._save_manifest()
def get(self, attachment_id: str) -> AttachmentRecord | None:
return self._records.get(attachment_id)
def to_message_block(self, attachment_id: str) -> MessageBlock:
record = self._records.get(attachment_id)
if not record:
raise KeyError(f"Attachment '{attachment_id}' not found")
return record.as_message_block()
def list_records(self) -> Dict[str, AttachmentRecord]:
return dict(self._records)
def export_manifest(self) -> Dict[str, Any]:
return {
attachment_id: record.to_dict()
for attachment_id, record in self._records.items()
if attachment_id in self._persistent_ids
}
def _find_duplicate_by_hash(
self,
sha256: Optional[str],
*,
copy_file: bool,
source_path: Optional[Path],
) -> Optional[AttachmentRecord]:
if not sha256:
return None
existing_id = self._hash_index.get(sha256)
if not existing_id:
return None
record = self._records.get(existing_id)
if not record:
self._hash_index.pop(sha256, None)
return None
if not copy_file and source_path is not None:
existing_path = record.ref.local_path
if not existing_path:
return None
try:
if Path(existing_path).resolve() != source_path.resolve():
return None
except FileNotFoundError:
return None
return record
def ingest_record(
self,
record: AttachmentRecord,
*,
copy_file: bool = True,
persist: bool = True,
) -> AttachmentRecord:
"""
Import an existing attachment record (e.g., from a session upload) into this store.
Optionally copies the underlying file into the store directory.
"""
source_ref = record.ref
attachment_id = source_ref.attachment_id or uuid.uuid4().hex
new_ref = source_ref.copy()
new_ref.attachment_id = attachment_id
local_path = source_ref.local_path
if local_path and copy_file:
source_path = Path(local_path)
if source_path.exists():
target_dir = self.root / attachment_id
target_dir.mkdir(parents=True, exist_ok=True)
target_path = target_dir / source_path.name
shutil.copy2(source_path, target_path)
new_ref.local_path = str(target_path)
self._records[attachment_id] = AttachmentRecord(
ref=new_ref,
kind=record.kind,
description=record.description,
extra=dict(record.extra),
)
if persist:
self._persistent_ids.add(attachment_id)
self._save_manifest()
else:
self._persistent_ids.discard(attachment_id)
if new_ref.sha256:
self._hash_index[new_ref.sha256] = attachment_id
return self._records[attachment_id]
def _load_manifest(self) -> None:
if not self.manifest_path.exists():
return
try:
data = json.loads(self.manifest_path.read_text(encoding="utf-8"))
except json.JSONDecodeError:
return
for attachment_id, record_data in data.items():
try:
record = AttachmentRecord.from_dict(record_data)
except Exception:
continue
self._records[attachment_id] = record
self._persistent_ids.add(attachment_id)
if record.ref.sha256:
self._hash_index[record.ref.sha256] = attachment_id
def _save_manifest(self) -> None:
serialized = self.export_manifest()
self.manifest_path.write_text(json.dumps(serialized, ensure_ascii=False, indent=2), encoding="utf-8")
def _sha256_file(path: Path) -> str:
hasher = hashlib.sha256()
with path.open("rb") as handle:
for chunk in iter(lambda: handle.read(1024 * 1024), b""):
hasher.update(chunk)
return hasher.hexdigest()
def encode_file_to_data_uri(path: Path, mime_type: str) -> str:
data = path.read_bytes()
encoded = base64.b64encode(data).decode("utf-8")
return f"data:{mime_type};base64,{encoded}"
def _default_filename_for_mime(mime_type: Optional[str]) -> str:
if mime_type:
ext = mimetypes.guess_extension(mime_type)
if ext:
return f"attachment{ext}"
return "attachment.bin"
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/attachments.py",
"license": "Apache License 2.0",
"lines": 310,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:utils/env_loader.py | """Environment loading utilities for root-level vars interpolation."""
import os
from pathlib import Path
from typing import Dict
_DOTENV_LOADED = False
def load_dotenv_file(dotenv_path: Path | None = None) -> None:
"""Populate ``os.environ`` with key/value pairs from a .env file once per process."""
global _DOTENV_LOADED
if _DOTENV_LOADED:
return
path = dotenv_path or Path(".env")
if path.exists():
for line in path.read_text(encoding="utf-8").splitlines():
stripped = line.strip()
if not stripped or stripped.startswith("#"):
continue
if "=" not in stripped:
continue
key, value = stripped.split("=", 1)
key = key.strip()
value = value.strip().strip('"').strip("'")
os.environ.setdefault(key, value)
_DOTENV_LOADED = True
def build_env_var_map(extra_vars: Dict[str, str] | None = None) -> Dict[str, str]:
merged: Dict[str, str] = dict(os.environ)
merged.update(extra_vars or {})
return merged
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/env_loader.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
OpenBMB/ChatDev:utils/error_handler.py | """Error handling utilities for the DevAll workflow system."""
from fastapi import Request
from fastapi.responses import JSONResponse
from fastapi.encoders import jsonable_encoder
import traceback
from utils.structured_logger import get_server_logger
from utils.exceptions import MACException, ValidationError, SecurityError, ConfigurationError, \
WorkflowExecutionError, ResourceNotFoundError, ResourceConflictError, TimeoutError, ExternalServiceError
# Error code mapping to HTTP status codes
ERROR_CODE_TO_STATUS = {
"VALIDATION_ERROR": 400,
"SECURITY_ERROR": 403,
"CONFIGURATION_ERROR": 500,
"WORKFLOW_EXECUTION_ERROR": 500,
"RESOURCE_NOT_FOUND": 404,
"RESOURCE_CONFLICT": 409,
"TIMEOUT_ERROR": 408,
"EXTERNAL_SERVICE_ERROR": 502,
"GENERIC_ERROR": 500
}
async def handle_validation_error(request: Request, exc: ValidationError) -> JSONResponse:
"""Handle validation errors."""
return await handle_mac_exception(request, exc)
async def handle_security_error(request: Request, exc: SecurityError) -> JSONResponse:
"""Handle security errors."""
return await handle_mac_exception(request, exc)
async def handle_configuration_error(request: Request, exc: ConfigurationError) -> JSONResponse:
"""Handle configuration errors."""
return await handle_mac_exception(request, exc)
async def handle_workflow_execution_error(request: Request, exc: WorkflowExecutionError) -> JSONResponse:
"""Handle workflow execution errors."""
return await handle_mac_exception(request, exc)
async def handle_resource_not_found_error(request: Request, exc: ResourceNotFoundError) -> JSONResponse:
"""Handle resource not found errors."""
return await handle_mac_exception(request, exc)
async def handle_resource_conflict_error(request: Request, exc: ResourceConflictError) -> JSONResponse:
"""Handle resource conflict errors."""
return await handle_mac_exception(request, exc)
async def handle_timeout_error(request: Request, exc: TimeoutError) -> JSONResponse:
"""Handle timeout errors."""
return await handle_mac_exception(request, exc)
async def handle_external_service_error(request: Request, exc: ExternalServiceError) -> JSONResponse:
"""Handle external service errors."""
return await handle_mac_exception(request, exc)
async def handle_mac_exception(request: Request, exc: MACException) -> JSONResponse:
"""Handle DevAll exceptions and return standardized error response."""
logger = get_server_logger()
# Log the error
logger.log_exception(
exc,
f"DevAll exception occurred: {exc.error_code} - {exc.message}",
correlation_id=getattr(request.state, 'correlation_id', None),
url=str(request.url),
method=request.method
)
# Determine the HTTP status code
status_code = ERROR_CODE_TO_STATUS.get(exc.error_code, 500)
# Prepare response data
response_data = {
"error": {
"code": exc.error_code,
"message": exc.message,
"details": exc.details
},
"timestamp": exc.__dict__.get('_timestamp', __import__('datetime').datetime.utcnow().isoformat())
}
return JSONResponse(
status_code=status_code,
content=jsonable_encoder(response_data)
)
async def handle_general_exception(request: Request, exc: Exception) -> JSONResponse:
"""Handle general exceptions and return standardized error response."""
logger = get_server_logger()
# Log the error with traceback
logger.log_exception(
exc,
f"General exception occurred: {type(exc).__name__} - {str(exc)}",
correlation_id=getattr(request.state, 'correlation_id', None),
url=str(request.url),
method=request.method
)
# For security, don't expose internal error details to the client
error_details = {
"code": "INTERNAL_ERROR",
"message": "An internal server error occurred",
"details": {} # Don't send internal details to client
}
# In development, we might want to include more details
import os
if os.getenv("ENVIRONMENT") == "development":
error_details["details"]["debug_info"] = {
"exception_type": type(exc).__name__,
"exception_message": str(exc),
"traceback": traceback.format_exc()
}
return JSONResponse(
status_code=500,
content=jsonable_encoder({"error": error_details})
)
def add_exception_handlers(app):
"""Add exception handlers to FastAPI app."""
app.add_exception_handler(ValidationError, handle_validation_error)
app.add_exception_handler(SecurityError, handle_security_error)
app.add_exception_handler(ConfigurationError, handle_configuration_error)
app.add_exception_handler(WorkflowExecutionError, handle_workflow_execution_error)
app.add_exception_handler(ResourceNotFoundError, handle_resource_not_found_error)
app.add_exception_handler(ResourceConflictError, handle_resource_conflict_error)
app.add_exception_handler(TimeoutError, handle_timeout_error)
app.add_exception_handler(ExternalServiceError, handle_external_service_error)
app.add_exception_handler(MACException, handle_mac_exception)
app.add_exception_handler(Exception, handle_general_exception)
return app | {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/error_handler.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
OpenBMB/ChatDev:utils/exceptions.py | """Custom exceptions for the DevAll workflow system."""
from typing import Optional, Dict, Any
import json
class MACException(Exception):
"""Base exception for DevAll workflow system."""
def __init__(self, message: str, error_code: str = None, details: Dict[str, Any] = None):
super().__init__(message)
self.message = message
self.error_code = error_code or "GENERIC_ERROR"
self.details = details or {}
def to_dict(self) -> Dict[str, Any]:
"""Convert exception to dictionary format for JSON response."""
return {
"error_code": self.error_code,
"message": self.message,
"details": self.details
}
def to_json(self) -> str:
"""Convert exception to JSON string."""
return json.dumps(self.to_dict())
class ValidationError(MACException):
"""Raised when validation fails."""
def __init__(self, message: str, field: str = None, details: Dict[str, Any] = None):
super().__init__(message, "VALIDATION_ERROR", details or {})
if field:
self.details["field"] = field
class SecurityError(MACException):
"""Raised when a security violation occurs."""
def __init__(self, message: str, details: Dict[str, Any] = None):
super().__init__(message, "SECURITY_ERROR", details or {})
class ConfigurationError(MACException):
"""Raised when configuration is invalid or missing."""
def __init__(self, message: str, config_key: str = None, details: Dict[str, Any] = None):
super().__init__(message, "CONFIGURATION_ERROR", details or {})
if config_key:
self.details["config_key"] = config_key
class WorkflowExecutionError(MACException):
"""Raised when workflow execution fails."""
def __init__(self, message: str, workflow_id: str = None, node_id: str = None, details: Dict[str, Any] = None):
super().__init__(message, "WORKFLOW_EXECUTION_ERROR", details or {})
if workflow_id:
self.details["workflow_id"] = workflow_id
if node_id:
self.details["node_id"] = node_id
class WorkflowCancelledError(MACException):
"""Raised when a workflow execution is cancelled mid-flight."""
def __init__(self, message: str, workflow_id: str = None, details: Dict[str, Any] = None):
super().__init__(message, "WORKFLOW_CANCELLED", details or {})
if workflow_id:
self.details["workflow_id"] = workflow_id
class ResourceNotFoundError(MACException):
"""Raised when a requested resource is not found."""
def __init__(self, message: str, resource_type: str = None, resource_id: str = None, details: Dict[str, Any] = None):
super().__init__(message, "RESOURCE_NOT_FOUND", details or {})
if resource_type:
self.details["resource_type"] = resource_type
if resource_id:
self.details["resource_id"] = resource_id
class ResourceConflictError(MACException):
"""Raised when there's a conflict with an existing resource."""
def __init__(self, message: str, resource_type: str = None, resource_id: str = None, details: Dict[str, Any] = None):
super().__init__(message, "RESOURCE_CONFLICT", details or {})
if resource_type:
self.details["resource_type"] = resource_type
if resource_id:
self.details["resource_id"] = resource_id
class TimeoutError(MACException):
"""Raised when an operation times out."""
def __init__(self, message: str, operation: str = None, timeout_duration: float = None, details: Dict[str, Any] = None):
super().__init__(message, "TIMEOUT_ERROR", details or {})
if operation:
self.details["operation"] = operation
if timeout_duration is not None:
self.details["timeout_duration"] = timeout_duration
class ExternalServiceError(MACException):
"""Raised when an external service call fails."""
def __init__(self, message: str, service_name: str = None, status_code: int = None, details: Dict[str, Any] = None):
super().__init__(message, "EXTERNAL_SERVICE_ERROR", details or {})
if service_name:
self.details["service_name"] = service_name
if status_code is not None:
self.details["status_code"] = status_code
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/exceptions.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:utils/function_catalog.py | """Utility helpers for introspecting function-calling tools."""
import inspect
from collections import abc
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Annotated, Any, Dict, List, Literal, Mapping, Sequence, Tuple, Union, get_args, get_origin
from utils.function_manager import FUNCTION_CALLING_DIR, get_function_manager
@dataclass(frozen=True)
class ParamMeta:
"""Declarative metadata for Annotated parameters."""
description: str | None = None
enum: Sequence[Any] | None = None
@dataclass(frozen=True)
class FunctionMetadata:
"""Normalized metadata for a Python callable."""
name: str
description: str | None
parameters_schema: Dict[str, Any]
module: str
file_path: str
module_name: str
class FunctionCatalog:
"""Inspect and cache callable metadata for tool schemas."""
def __init__(self, functions_dir: str | Path = FUNCTION_CALLING_DIR) -> None:
self._functions_dir = Path(functions_dir).resolve()
self._metadata: Dict[str, FunctionMetadata] = {}
self._loaded = False
self._load_error: Exception | None = None
self._module_index: Dict[str, List[str]] = {}
def refresh(self) -> None:
"""Reload metadata from the function directory."""
self._metadata.clear()
self._module_index = {}
self._load_error = None
manager = get_function_manager(self._functions_dir)
try:
manager.load_functions()
except Exception as exc: # pragma: no cover - propagated via catalog usage
self._loaded = True
self._load_error = exc
return
module_index: Dict[str, List[str]] = {}
for name, fn in manager.list_functions().items():
try:
metadata = _build_function_metadata(name, fn, self._functions_dir)
self._metadata[name] = metadata
module_bucket = module_index.setdefault(metadata.module_name, [])
module_bucket.append(name)
except Exception as exc: # pragma: no cover - guarded to avoid cascading failures
print(f"[FunctionCatalog] Failed to load metadata for {name}: {exc}")
for module_name, names in module_index.items():
names.sort()
self._module_index = module_index
self._loaded = True
def _ensure_loaded(self) -> None:
if not self._loaded:
self.refresh()
def get(self, name: str) -> FunctionMetadata | None:
self._ensure_loaded()
return self._metadata.get(name)
def list_function_names(self) -> List[str]:
self._ensure_loaded()
return sorted(self._metadata.keys())
def list_metadata(self) -> Dict[str, FunctionMetadata]:
self._ensure_loaded()
return self._metadata.copy()
def iter_modules(self) -> List[Tuple[str, List[FunctionMetadata]]]:
"""Return functions grouped by Python file (module_name)."""
self._ensure_loaded()
modules: List[Tuple[str, List[FunctionMetadata]]] = []
for module_name in sorted(self._module_index.keys()):
names = self._module_index.get(module_name, [])
entries: List[FunctionMetadata] = []
for fn_name in names:
meta = self._metadata.get(fn_name)
if meta is not None:
entries.append(meta)
modules.append((module_name, entries))
return modules
def functions_for_module(self, module_name: str) -> List[str]:
"""Return sorted function names for the given module."""
self._ensure_loaded()
return list(self._module_index.get(module_name, []))
@property
def load_error(self) -> Exception | None:
self._ensure_loaded()
return self._load_error
_catalog_registry: Dict[Path, FunctionCatalog] = {}
def get_function_catalog(functions_dir: str | Path = FUNCTION_CALLING_DIR) -> FunctionCatalog:
directory = Path(functions_dir).resolve()
catalog = _catalog_registry.get(directory)
if catalog is None:
catalog = FunctionCatalog(directory)
_catalog_registry[directory] = catalog
return catalog
def _build_function_metadata(name: str, fn: Any, functions_dir: Path) -> FunctionMetadata:
signature = inspect.signature(fn)
annotations = _resolve_annotations(fn)
description = _extract_description(fn)
schema = _build_parameters_schema(signature, annotations)
module = getattr(fn, "__module__", "")
file_path = inspect.getsourcefile(fn) or ""
module_name = _derive_module_name(file_path, functions_dir)
return FunctionMetadata(
name=name,
description=description,
parameters_schema=schema,
module=module,
file_path=file_path,
module_name=module_name,
)
def _derive_module_name(file_path: str, functions_dir: Path) -> str:
if not file_path:
return "unknown"
try:
relative = Path(file_path).resolve().relative_to(functions_dir.resolve())
if relative.suffix:
relative = relative.with_suffix("")
parts = list(relative.parts)
if not parts:
return "unknown"
return "/".join(parts)
except Exception:
stem = Path(file_path).stem
return stem or "unknown"
def _extract_description(fn: Any) -> str | None:
doc = inspect.getdoc(fn)
if not doc:
return None
trimmed = doc.strip()
if not trimmed:
return None
first_paragraph = trimmed.split("\n\n", 1)[0]
normalized_lines = [line.strip() for line in first_paragraph.splitlines() if line.strip()]
normalized = " ".join(normalized_lines)
max_len = 600
if len(normalized) > max_len:
normalized = normalized[: max_len - 1].rstrip() + "…"
return normalized or None
def _resolve_annotations(fn: Any) -> Mapping[str, Any]:
fallback = getattr(fn, "__annotations__", {}) or {}
get_annotations = getattr(inspect, "get_annotations", None)
if get_annotations is None:
return fallback
try:
return inspect.get_annotations(fn, eval_str=True, include_extras=True)
except TypeError:
try:
return inspect.get_annotations(fn, eval_str=True)
except TypeError:
try:
return inspect.get_annotations(fn)
except Exception:
return fallback
except Exception:
return fallback
def _build_parameters_schema(signature: inspect.Signature, annotations: Mapping[str, Any]) -> Dict[str, Any]:
properties: Dict[str, Any] = {}
required: List[str] = []
for param in signature.parameters.values():
if param.name.startswith("_"):
continue
if param.kind in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD):
continue
annotation = annotations.get(param.name, inspect._empty)
annotation, meta = _unwrap_annotation(annotation)
annotation, optional_from_type = _strip_optional(annotation)
schema = _annotation_to_schema(annotation)
schema = _apply_param_meta(schema, meta)
if param.default is not inspect._empty:
schema.setdefault("default", param.default)
properties[param.name] = schema
is_required = param.default is inspect._empty and not optional_from_type
if is_required:
required.append(param.name)
payload: Dict[str, Any] = {
"type": "object",
"properties": properties,
}
if required:
payload["required"] = required
return payload
def _unwrap_annotation(annotation: Any) -> Tuple[Any, ParamMeta | None]:
origin = get_origin(annotation)
if origin is Annotated:
args = get_args(annotation)
if not args:
return annotation, None
base = args[0]
meta = next((arg for arg in args[1:] if isinstance(arg, ParamMeta)), None)
return base, meta
return annotation, None
def _strip_optional(annotation: Any) -> Tuple[Any, bool]:
origin = get_origin(annotation)
if origin is Union:
args = [arg for arg in get_args(annotation) if arg is not type(None)] # noqa: E721
if len(args) == 1 and len(args) != len(get_args(annotation)):
return args[0], True
return annotation, False
def _annotation_to_schema(annotation: Any) -> Dict[str, Any]:
if annotation is inspect._empty or annotation is Any:
return {"type": "string"}
origin = get_origin(annotation)
if origin is None:
return _primitive_schema(annotation)
if origin is list or origin is List or origin is abc.Sequence or origin is abc.MutableSequence:
item_annotation = get_args(annotation)[0] if get_args(annotation) else Any
return {
"type": "array",
"items": _annotation_to_schema(item_annotation),
}
if origin in {dict, Dict, abc.Mapping, abc.MutableMapping}:
return {"type": "object"}
if origin is Union:
literals = [arg for arg in get_args(annotation) if arg is not type(None)] # noqa: E721
literal_schema = _try_literal_schema(literals)
if literal_schema:
return literal_schema
return {"type": "string"}
if origin is Literal:
values = list(get_args(annotation))
return _literal_schema(values)
return {"type": "string"}
def _primitive_schema(annotation: Any) -> Dict[str, Any]:
if isinstance(annotation, type) and issubclass(annotation, Enum):
values = [member.value for member in annotation]
schema = _literal_schema(values)
return schema if schema else {"type": "string"}
if annotation in {str}:
return {"type": "string"}
if annotation in {int}:
return {"type": "integer"}
if annotation in {float}:
return {"type": "number"}
if annotation in {bool}:
return {"type": "boolean"}
if annotation in {dict, abc.Mapping}:
return {"type": "object"}
if annotation in {list, abc.Sequence}:
return {"type": "array", "items": {"type": "string"}}
return {"type": "string"}
def _apply_param_meta(schema: Dict[str, Any], meta: ParamMeta | None) -> Dict[str, Any]:
if meta is None:
return schema
updated = dict(schema)
if meta.description:
updated["description"] = meta.description
if meta.enum:
updated["enum"] = list(meta.enum)
inferred = _infer_literal_type(meta.enum)
if inferred:
updated["type"] = inferred
return updated
def _literal_schema(values: Sequence[Any]) -> Dict[str, Any]:
if not values:
return {"type": "string"}
schema: Dict[str, Any] = {"enum": list(values)}
literal_type = _infer_literal_type(values)
if literal_type:
schema["type"] = literal_type
return schema
def _try_literal_schema(values: Sequence[Any]) -> Dict[str, Any] | None:
if not values:
return None
literal_type = _infer_literal_type(values)
if literal_type is None:
return None
return {"type": literal_type, "enum": list(values)}
def _infer_literal_type(values: Sequence[Any]) -> str | None:
if all(isinstance(value, bool) for value in values):
return "boolean"
if all(isinstance(value, int) and not isinstance(value, bool) for value in values):
return "integer"
if all(isinstance(value, float) for value in values):
return "number"
if all(isinstance(value, str) for value in values):
return "string"
return None
__all__ = [
"FunctionCatalog",
"FunctionMetadata",
"ParamMeta",
"get_function_catalog",
]
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/function_catalog.py",
"license": "Apache License 2.0",
"lines": 285,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:utils/function_manager.py | """Unified function management."""
import importlib.util
import inspect
import os
from pathlib import Path
from typing import Any, Callable, Dict, Optional
_MODULE_PREFIX = "_dynamic_functions"
_FUNCTION_CALLING_ENV = "MAC_FUNCTIONS_DIR"
_EDGE_FUNCTION_ENV = "MAC_EDGE_FUNCTIONS_DIR"
_EDGE_PROCESSOR_FUNCTION_ENV = "MAC_EDGE_PROCESSOR_FUNCTIONS_DIR"
_REPO_ROOT = Path(__file__).resolve().parents[1]
_DEFAULT_FUNCTIONS_ROOT = Path("functions")
_DEFAULT_FUNCTION_CALLING_DIR = _DEFAULT_FUNCTIONS_ROOT / "function_calling"
_DEFAULT_EDGE_FUNCTION_DIR = _DEFAULT_FUNCTIONS_ROOT / "edge"
_DEFAULT_EDGE_PROCESSOR_DIR = _DEFAULT_FUNCTIONS_ROOT / "edge_processor"
def _resolve_dir(default: Path, env_var: str | None = None) -> Path:
"""Resolve a directory path with optional environment override."""
override = os.environ.get(env_var) if env_var else None
if override:
return Path(override).expanduser()
if default.is_absolute():
return default
return _REPO_ROOT / default
FUNCTION_CALLING_DIR = _resolve_dir(_DEFAULT_FUNCTION_CALLING_DIR, _FUNCTION_CALLING_ENV).resolve()
EDGE_FUNCTION_DIR = _resolve_dir(_DEFAULT_EDGE_FUNCTION_DIR, _EDGE_FUNCTION_ENV).resolve()
EDGE_PROCESSOR_FUNCTION_DIR = _resolve_dir(_DEFAULT_EDGE_PROCESSOR_DIR, _EDGE_PROCESSOR_FUNCTION_ENV).resolve()
class FunctionManager:
"""Unified function manager for loading and managing functions across the project."""
def __init__(self, functions_dir: str | Path = "functions") -> None:
self.functions_dir = Path(functions_dir)
self.functions: Dict[str, Callable] = {}
self._loaded = False
def load_functions(self) -> None:
"""Load all Python functions from functions directory."""
if self._loaded:
return
if not self.functions_dir.exists():
raise ValueError(f"Functions directory does not exist: {self.functions_dir}")
for file in self.functions_dir.rglob("*.py"):
if file.name.startswith("_") or file.name == "__init__.py":
continue
if "__pycache__" in file.parts:
continue
module_name = self._build_module_name(file)
try:
# Import module dynamically
spec = importlib.util.spec_from_file_location(module_name, file)
if spec is None or spec.loader is None:
continue
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
current_file = file.resolve()
# Get all functions defined in the module
for name, obj in inspect.getmembers(module, inspect.isfunction):
if name.startswith("_"):
continue
# Only register functions defined in the current module/file
if getattr(obj, "__module__", None) != module.__name__:
code = getattr(obj, "__code__", None)
source_path = Path(code.co_filename).resolve() if code else None
if source_path != current_file:
continue
self.functions[name] = obj
except Exception as e:
print(f"Error loading module {module_name}: {e}")
self._loaded = True
def _build_module_name(self, filepath: Path) -> str:
"""Create a unique module name for a function file."""
relative = filepath.relative_to(self.functions_dir)
parts = "_".join(relative.with_suffix("").parts) or "module"
unique_suffix = f"{abs(hash(filepath.as_posix())) & 0xFFFFFFFF:X}"
return f"{_MODULE_PREFIX}.{parts}_{unique_suffix}"
def get_function(self, name: str) -> Optional[Callable]:
"""Get a function by name."""
if not self._loaded:
self.load_functions()
return self.functions.get(name)
def has_function(self, name: str) -> bool:
"""Check if a function exists."""
if not self._loaded:
self.load_functions()
return name in self.functions
def call_function(self, name: str, *args, **kwargs) -> Any:
"""Call a function by name with given arguments."""
func = self.get_function(name)
if func is None:
raise ValueError(f"Function {name} not found")
return func(*args, **kwargs)
def list_functions(self) -> Dict[str, Callable]:
"""List all available functions."""
if not self._loaded:
self.load_functions()
return self.functions.copy()
def reload_functions(self) -> None:
"""Reload all functions from the functions directory."""
self.functions.clear()
self._loaded = False
self.load_functions()
# Global function manager registry keyed by directory
_function_managers: Dict[Path, FunctionManager] = {}
def get_function_manager(functions_dir: str | Path) -> FunctionManager:
"""Get or create the global function manager instance for a directory."""
directory = Path(functions_dir).resolve()
manager = _function_managers.get(directory)
if manager is None:
manager = FunctionManager(directory)
_function_managers[directory] = manager
return manager
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/function_manager.py",
"license": "Apache License 2.0",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:utils/human_prompt.py | """Human-in-the-loop prompt service with pluggable channels."""
import threading
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional, Protocol
from entity.messages import MessageBlock, MessageBlockType, MessageContent
from utils.log_manager import LogManager
@dataclass
class PromptResult:
"""Typed result returned from prompt channels."""
text: str
blocks: Optional[List[MessageBlock]] = None
metadata: Dict[str, Any] = field(default_factory=dict)
def as_message_content(self) -> MessageContent:
return self.blocks if self.blocks is not None else self.text
class PromptChannel(Protocol):
"""Channel interface that performs the actual user interaction."""
def request(
self,
*,
node_id: str,
task: str,
inputs: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> PromptResult:
"""Collect user feedback and return the structured response."""
@dataclass
class CliPromptChannel:
"""Default channel that prompts the operator via CLI input()."""
input_func: Callable[[str], str] = input
def request(
self,
*,
node_id: str,
task: str,
inputs: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> PromptResult:
header = ["===== HUMAN INPUT REQUIRED ====="]
if inputs:
header.append("=== Node inputs ===")
header.append(inputs)
header.append(f"=== Task for human ({node_id}) ===")
header.append(task)
header.append("=== Your response: ===")
prompt = "\n".join(header) + "\n"
response = self.input_func(prompt)
return PromptResult(
text=response,
blocks=[MessageBlock.text_block(response or "")],
)
class HumanPromptService:
"""Coordinates human feedback collection across nodes and tools."""
def __init__(
self,
*,
log_manager: LogManager,
channel: PromptChannel,
session_id: Optional[str] = None,
) -> None:
self._log_manager = log_manager
self._channel = channel
self._session_id = session_id
self._lock = threading.Lock()
def request(
self,
node_id: str,
task_description: str,
*,
inputs: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> PromptResult:
"""Request human input through the configured channel."""
meta = dict(metadata or {})
if self._session_id and "session_id" not in meta:
meta["session_id"] = self._session_id
with self._lock:
with self._log_manager.human_timer(node_id):
raw_result = self._channel.request(
node_id=node_id,
task=task_description,
inputs=inputs,
metadata=meta,
)
prompt_result = self._normalize_result(raw_result)
sanitized_text = self._sanitize_response(prompt_result.text)
normalized_blocks = self._normalize_blocks(prompt_result.blocks, sanitized_text)
combined_metadata = {**prompt_result.metadata, **meta}
self._log_manager.record_human_interaction(
node_id,
inputs,
sanitized_text,
details={"task_description": task_description, **combined_metadata},
)
return PromptResult(
text=sanitized_text,
blocks=normalized_blocks,
metadata=combined_metadata,
)
@staticmethod
def _sanitize_response(response: Any) -> str:
text = response if isinstance(response, str) else str(response)
return text.encode("utf-8", errors="ignore").decode("utf-8", errors="ignore")
def _normalize_result(self, raw_result: PromptResult | str | Any) -> PromptResult:
if isinstance(raw_result, PromptResult):
return raw_result
text = self._sanitize_response(raw_result)
return PromptResult(text=text, blocks=[MessageBlock.text_block(text)])
def _normalize_blocks(
self,
blocks: Optional[List[MessageBlock]],
fallback_text: str,
) -> List[MessageBlock]:
if not blocks:
return [MessageBlock.text_block(fallback_text)]
normalized: List[MessageBlock] = []
for block in blocks:
dup = block.copy()
if dup.type is MessageBlockType.TEXT and dup.text is not None:
dup.text = self._sanitize_response(dup.text)
normalized.append(dup)
return normalized
def resolve_prompt_channel(workspace_hook: Any) -> PromptChannel | None:
"""Helper to fetch a PromptChannel from a workspace hook if available."""
if workspace_hook is None:
return None
getter = getattr(workspace_hook, "get_prompt_channel", None)
if callable(getter):
channel = getter()
if channel is not None:
return channel
channel = getattr(workspace_hook, "prompt_channel", None)
if channel is not None:
return channel
return None
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/human_prompt.py",
"license": "Apache License 2.0",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:utils/io_utils.py | from typing import Any, Dict
import yaml
def read_yaml(path) -> Dict[str, Any]:
with open(path, mode="r", encoding="utf-8") as f:
return yaml.load(f, Loader=yaml.FullLoader) | {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/io_utils.py",
"license": "Apache License 2.0",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
OpenBMB/ChatDev:utils/log_manager.py | """Log manager compatibility shim.
LogManager now wraps WorkflowLogger for backward compatibility.
All timing helpers live inside WorkflowLogger; prefer using it directly.
"""
import time
from contextlib import contextmanager
from typing import Any, Dict, List
from entity.enums import CallStage, LogLevel
from utils.logger import WorkflowLogger
class LogManager:
"""Backward-compatible wrapper that delegates to ``WorkflowLogger``."""
def __init__(self, logger: WorkflowLogger = None):
self.logger = logger
def get_logger(self) -> WorkflowLogger:
"""Return the underlying ``WorkflowLogger`` instance."""
return self.logger
# ================================================================
# Timer context managers delegated to WorkflowLogger
# ================================================================
@contextmanager
def node_timer(self, node_id: str):
"""Context manager that times node execution."""
with self.logger.node_timer(node_id):
yield
@contextmanager
def model_timer(self, node_id: str):
"""Context manager that times model invocations."""
with self.logger.model_timer(node_id):
yield
@contextmanager
def agent_timer(self, node_id: str):
"""Context manager that times agent invocations."""
with self.logger.agent_timer(node_id):
yield
@contextmanager
def human_timer(self, node_id: str):
"""Context manager that times human interactions."""
with self.logger.human_timer(node_id):
yield
@contextmanager
def tool_timer(self, node_id: str, tool_name: str):
"""Context manager that times tool invocations."""
with self.logger.tool_timer(node_id, tool_name):
yield
@contextmanager
def thinking_timer(self, node_id: str, stage: str):
"""Context manager that times thinking workflows."""
with self.logger.thinking_timer(node_id, stage):
yield
@contextmanager
def memory_timer(self, node_id: str, operation_type: str, stage: str):
"""Context manager that times memory operations."""
with self.logger.memory_timer(node_id, operation_type, stage):
yield
@contextmanager
def operation_timer(self, operation_name: str):
"""Context manager that times custom operations."""
start_time = time.time()
try:
yield
finally:
end_time = time.time()
duration = (end_time - start_time)
self.logger._timers[operation_name] = duration
# ================================================================
# Logging methods delegated to WorkflowLogger
# ================================================================
def record_node_start(self, node_id: str, inputs: List[Dict[str, str]], node_type: str = None,
details: Dict[str, Any] = None) -> None:
"""Record the start of a node."""
self.logger.enter_node(node_id, inputs, node_type, details)
def record_node_end(self, node_id: str, output: str = None,
details: Dict[str, Any] = None) -> None:
"""Record the end of a node."""
output_size = len(str(output)) if output is not None else 0
duration = self.logger.get_timer(node_id)
self.logger.exit_node(node_id, output, duration, output_size, details)
def record_edge_process(self, from_node: str, to_node: str,
details: Dict[str, Any] = None) -> None:
"""Record an edge processing event."""
self.logger.record_edge_process(from_node, to_node, details)
def record_human_interaction(self, node_id: str, input_data: Any = None, output: Any = None,
details: Dict[str, Any] = None) -> None:
"""Record a human interaction."""
input_size = len(str(input_data)) if input_data is not None else 0
output_size = len(str(output)) if output is not None else 0
duration = self.logger.get_timer(f"human_{node_id}")
call_details = {
"input_size": input_size,
"output_size": output_size,
**(details or {})
}
self.logger.record_human_interaction(
node_id, input_data, output, duration, call_details
)
def record_model_call(self, node_id: str, model_name: str,
input_data: Any = None, output: Any = None,
details: Dict[str, Any] = None,
stage: CallStage = CallStage.AFTER) -> None:
"""Record a model invocation."""
input_size = len(str(input_data)) if input_data is not None else 0
output_size = len(str(output)) if output is not None else 0
duration = self.logger.get_timer(f"model_{node_id}")
call_details = {
"input_size": input_size,
"output_size": output_size,
**(details or {})
}
self.logger.record_model_call(
node_id, model_name, input_data, output, duration, call_details, stage
)
def record_tool_call(self, node_id: str, tool_name: str,
success: bool | None = True, tool_result: Any = None,
details: Dict[str, Any] = None,
stage: CallStage = CallStage.AFTER) -> None:
"""Record a tool invocation."""
duration = self.logger.get_timer(f"tool_{node_id}_{tool_name}")
tool_details = {
"result_size": len(str(tool_result)) if tool_result is not None else 0,
**(details or {})
}
self.logger.record_tool_call(node_id, tool_name, tool_result, duration, success, tool_details, stage)
def record_thinking_process(self, node_id: str, thinking_mode: str, thinking_result: str,
stage: str, details: Dict[str, Any] = None) -> None:
"""Record a thinking stage."""
duration = self.logger.get_timer(f"thinking_{node_id}_{stage}")
self.logger.record_thinking_process(node_id, thinking_mode, thinking_result, stage, duration, details)
def record_memory_operation(self, node_id: str, operation_type: str,
stage: str, retrieved_memory: Any = None,
details: Dict[str, Any] = None) -> None:
"""Record a memory operation."""
duration = self.logger.get_timer(f"memory_{node_id}_{operation_type}_{stage}")
memory_details = {
"result_size": len(str(retrieved_memory)) if retrieved_memory is not None else 0,
**(details or {})
}
self.logger.record_memory_operation(node_id, retrieved_memory, operation_type, stage, duration, memory_details)
def record_workflow_start(self, workflow_config: Dict[str, Any] = None) -> None:
"""Record the workflow start event."""
self.logger.record_workflow_start(workflow_config)
def record_workflow_end(self, success: bool = True,
details: Dict[str, Any] = None) -> None:
"""Record the workflow end event."""
workflow_duration = (time.time() - self.logger.start_time.timestamp())
self.logger.record_workflow_end(success, workflow_duration, details)
def debug(self, message: str, node_id: str = None,
details: Dict[str, Any] = None) -> None:
"""Record debug information."""
self.logger.debug(message, node_id, details=details)
def info(self, message: str, node_id: str = None,
details: Dict[str, Any] = None) -> None:
"""Record general information."""
self.logger.info(message, node_id, details=details)
def warning(self, message: str, node_id: str = None,
details: Dict[str, Any] = None) -> None:
"""Record warning information."""
self.logger.warning(message, node_id, details=details)
def error(self, message: str, node_id: str = None,
details: Dict[str, Any] = None) -> None:
"""Record error information."""
self.logger.error(message, node_id, details=details)
def critical(self, message: str, node_id: str = None,
details: Dict[str, Any] = None) -> None:
"""Record critical error information."""
self.logger.critical(message, node_id, details=details)
def get_execution_summary(self) -> Dict[str, Any]:
"""Return the execution summary."""
return self.logger.get_execution_summary()
def get_all_logs(self) -> list:
"""Return all logs."""
return self.logger.get_logs()
def logs_to_dict(self) -> Dict[str, Any]:
"""Convert the logs to dictionary form."""
return self.logger.to_dict()
def save_logs(self, filepath: str) -> None:
"""Persist logs to a file."""
self.logger.save_to_file(filepath)
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/log_manager.py",
"license": "Apache License 2.0",
"lines": 177,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:utils/logger.py | import os
import time
from contextlib import contextmanager
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Optional
import json
import copy
import traceback
from entity.enums import CallStage, EventType, LogLevel
from utils.structured_logger import StructuredLogger, LogType, get_workflow_logger
from utils.exceptions import MACException
def _json_safe(value: Any) -> Any:
"""Recursively convert objects into JSON-encodable primitives."""
if value is None or isinstance(value, (str, int, float, bool)):
return value
if isinstance(value, dict):
return {str(key): _json_safe(val) for key, val in value.items()}
if isinstance(value, (list, tuple, set)):
return [_json_safe(item) for item in value]
to_dict = getattr(value, "to_dict", None)
if callable(to_dict):
try:
return _json_safe(to_dict())
except Exception:
pass
if hasattr(value, "__dict__"):
try:
return _json_safe(vars(value))
except Exception:
pass
return str(value)
@dataclass
class LogEntry:
"""Single log entry that captures execution details."""
timestamp: str
level: LogLevel
node_id: Optional[str] = None
event_type: Optional[EventType] = None
message: Optional[str] = None
details: Dict[str, Any] = field(default_factory=dict)
execution_path: List[str] = field(default_factory=list) # Execution path for tracing
duration: Optional[float] = None # Duration in seconds
def to_dict(self) -> Dict[str, Any]:
return {
"timestamp": self.timestamp,
"level": self.level,
"node_id": self.node_id,
"event_type": self.event_type,
"message": self.message,
"details": self.details,
"execution_path": self.execution_path,
"duration": self.duration
}
class WorkflowLogger:
"""Workflow logger that tracks the entire execution lifecycle."""
def __init__(self, workflow_id: str = None, log_level: LogLevel = LogLevel.DEBUG, use_structured_logging: bool = True, log_to_console: bool = True):
self.workflow_id = workflow_id or f"workflow_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
self.logs: List[LogEntry] = []
self.start_time = datetime.now()
self.current_path: List[str] = []
self.log_level: LogLevel = log_level
self.log_to_console: bool = log_to_console
self.use_structured_logging = use_structured_logging
self.structured_logger: Optional[StructuredLogger] = None
if use_structured_logging:
self.structured_logger = get_workflow_logger(self.workflow_id)
def add_log(self, level: LogLevel, message: str = None, node_id: str = None,
event_type: EventType = None, details: Dict[str, Any] = None,
duration: float = None) -> LogEntry | None:
"""Add a log entry."""
if level < self.log_level:
return None
timestamp = datetime.now().isoformat()
execution_path = copy.deepcopy(self.current_path)
safe_details = _json_safe(details or {})
log_entry = LogEntry(
timestamp=timestamp,
level=level,
node_id=node_id,
event_type=event_type,
message=message,
details=safe_details,
execution_path=execution_path,
duration=duration
)
self.logs.append(log_entry)
# Log to console if enabled
if self.log_to_console:
print(f"[{timestamp}] [{level.value}] "
f"{f'Node {node_id} - ' if node_id else ''}"
f"{f'Event {event_type} - ' if event_type else ''}"
f"{message} "
f"{f'Details: {details} ' if details else ''}"
f"{f'Duration: {duration}' if duration else ''}")
# Log using structured logger if enabled
if self.use_structured_logging and self.structured_logger:
structured_details = {
"workflow_id": self.workflow_id,
"node_id": node_id,
"event_type": event_type.value if event_type else None,
"execution_path": execution_path,
"duration": duration,
**safe_details
}
if level == LogLevel.DEBUG:
self.structured_logger.debug(message, **structured_details)
elif level == LogLevel.INFO:
self.structured_logger.info(message, **structured_details)
elif level == LogLevel.WARNING:
self.structured_logger.warning(message, **structured_details)
elif level == LogLevel.ERROR:
self.structured_logger.error(message, **structured_details)
elif level == LogLevel.CRITICAL:
self.structured_logger.critical(message, **structured_details)
return log_entry
def debug(self, message: str, node_id: str = None, event_type: EventType = None,
details: Dict[str, Any] = None, duration: float | None = None) -> None:
self.add_log(LogLevel.DEBUG, message, node_id, event_type, details, duration)
def info(self, message: str, node_id: str = None, event_type: EventType = None,
details: Dict[str, Any] = None, duration: float | None = None) -> None:
self.add_log(LogLevel.INFO, message, node_id, event_type, details, duration)
def warning(self, message: str, node_id: str = None, event_type: EventType = None,
details: Dict[str, Any] = None, duration: float | None = None) -> None:
self.add_log(LogLevel.WARNING, message, node_id, event_type, details, duration)
def error(self, message: str, node_id: str = None, event_type: EventType = None,
details: Dict[str, Any] = None, duration: float | None = None) -> None:
self.add_log(LogLevel.ERROR, message, node_id, event_type, details, duration)
def critical(self, message: str, node_id: str = None, event_type: EventType = None,
details: Dict[str, Any] = None) -> None:
self.add_log(LogLevel.CRITICAL, message, node_id, event_type, details)
def enter_node(self, node_id: str, inputs: List[Dict[str, str]], node_type: str = None,
details: Dict[str, Any] = None) -> None:
"""Record data when entering a node."""
self.current_path.append(node_id)
self.info(
f"Entering node {node_id}",
node_id=node_id,
event_type=EventType.NODE_START,
details={
"inputs": inputs,
# "combined_input": combined_input,
"node_type": node_type,
**(details or {})
}
)
def exit_node(self, node_id: str, output: str, duration: float = None,
output_size: int = None, details: Dict[str, Any] = None) -> None:
"""Record data when exiting a node."""
# Keep enter and exit logs separate so we can easily identify progress
if self.current_path and self.current_path[-1] == node_id:
self.current_path.pop()
exit_details = {
"output": output,
"output_size": output_size,
**(details or {})
}
self.info(
f"Exiting node {node_id}",
node_id=node_id,
event_type=EventType.NODE_END,
details=exit_details,
duration=duration
)
def record_edge_process(self, from_node: str, to_node: str,
details: Dict[str, Any] = None) -> None:
"""Record an edge-processing event."""
self.debug(
f"Processing edge from {from_node} to {to_node}",
node_id=from_node,
event_type=EventType.EDGE_PROCESS,
details={
"to_node": to_node,
**(details or {})
}
)
def record_human_interaction(self, node_id: str, input_data: str = None, output: str = None,
duration: float = None, details: Dict[str, Any] = None) -> None:
"""Record a human interaction."""
call_details = {
"input_data": input_data,
"output": output,
**(details or {})
}
self.info(
f"Human interaction for node {node_id}",
node_id=node_id,
event_type=EventType.HUMAN_INTERACTION,
details=call_details,
duration=duration
)
def record_model_call(self, node_id: str, model_name: str,
input_data: str = None, output: str = None,
duration: float = None, details: Dict[str, Any] = None,
stage: CallStage | str | None = None) -> None:
"""Record a model invocation."""
stage_value = stage.value if isinstance(stage, CallStage) else stage
call_details = {
"model_name": model_name,
"input_data": input_data,
"output": output,
**(details or {})
}
if stage_value:
call_details["stage"] = stage_value
self.info(
f"Model call for node {node_id}",
node_id=node_id,
event_type=EventType.MODEL_CALL,
details=call_details,
duration=duration
)
def record_tool_call(self, node_id: str, tool_name: str, tool_result: str,
duration: float = None, success: bool | None = True,
details: Dict[str, Any] = None,
stage: CallStage | str | None = None) -> None:
"""Record a tool invocation."""
stage_value = stage.value if isinstance(stage, CallStage) else stage
tool_details = {
"tool_result": tool_result,
"tool_name": tool_name,
"success": success,
**(details or {})
}
if stage_value:
tool_details["stage"] = stage_value
level = LogLevel.INFO if success is not False else LogLevel.ERROR
self.add_log(
level,
f"Tool call {tool_name} for node {node_id}",
node_id=node_id,
event_type=EventType.TOOL_CALL,
details=tool_details,
duration=duration
)
def record_thinking_process(self, node_id: str, thinking_mode: str, thinking_result: str, stage: str,
duration: float = None, details: Dict[str, Any] = None) -> None:
"""Record a thinking-stage entry."""
thinking_details = {
"thinking_result": thinking_result,
"thinking_mode": thinking_mode,
"stage": stage,
**(details or {})
}
self.info(
f"Thinking process for node {node_id} ({thinking_mode} at {stage})",
node_id=node_id,
event_type=EventType.THINKING_PROCESS,
details=thinking_details,
duration=duration
)
def record_memory_operation(self, node_id: str, retrieved_memory: str, operation_type: str, stage: str,
duration: float = None, details: Dict[str, Any] = None) -> None:
"""Record a memory operation (retrieve/update)."""
memory_details = {
"retrieved_memory": retrieved_memory,
"operation_type": operation_type, # RETRIEVE or UPDATE
"stage": stage,
**(details or {})
}
self.info(
f"Memory {operation_type} operation for node {node_id} at {stage}",
node_id=node_id,
event_type=EventType.MEMORY_OPERATION,
details=memory_details,
duration=duration
)
def record_workflow_start(self, workflow_config: Dict[str, Any] = None) -> None:
"""Record the workflow start event."""
self.info(
"Workflow execution started",
event_type=EventType.WORKFLOW_START,
details={
"workflow_id": self.workflow_id,
"node_count": workflow_config.get("node_count") if workflow_config else None,
"edge_count": workflow_config.get("edge_count") if workflow_config else None,
}
)
def record_workflow_end(self, success: bool = True,
duration: float = None, details: Dict[str, Any] = None) -> None:
"""Record the workflow end event."""
end_details = {
"success": success,
"total_logs": len(self.logs),
**(details or {})
}
level = LogLevel.INFO if success else LogLevel.ERROR
self.add_log(
level,
"Workflow execution completed",
event_type=EventType.WORKFLOW_END,
details=end_details,
duration=duration
)
def get_logs(self) -> List[Dict[str, Any]]:
"""Return all log entries as dictionaries."""
return [log.to_dict() for log in self.logs]
def get_logs_by_level(self, level: str) -> List[Dict[str, Any]]:
"""Return logs filtered by level."""
return [log.to_dict() for log in self.logs if log.level == level]
def get_logs_by_node(self, node_id: str) -> List[Dict[str, Any]]:
"""Return logs filtered by node id."""
return [log.to_dict() for log in self.logs if log.node_id == node_id]
def get_execution_summary(self) -> Dict[str, Any]:
"""Return an execution summary."""
total_duration = (datetime.now() - self.start_time).total_seconds() * 1000
node_durations = {}
for log in self.logs:
if log.node_id and log.duration:
if log.node_id not in node_durations:
node_durations[log.node_id] = 0
node_durations[log.node_id] += log.duration
error_count = len([log for log in self.logs if log.level in ["ERROR", "CRITICAL"]])
warning_count = len([log for log in self.logs if log.level == "WARNING"])
return {
"workflow_id": self.workflow_id,
"start_time": self.start_time.isoformat(),
"total_duration": total_duration,
"total_logs": len(self.logs),
"error_count": error_count,
"warning_count": warning_count,
"node_durations": node_durations,
"execution_path": self.current_path
}
def to_dict(self) -> Dict[str, Any]:
log_data = {
"workflow_id": self.workflow_id,
"start_time": self.start_time.isoformat(),
"logs": self.get_logs(),
"summary": self.get_execution_summary()
}
return log_data
def to_json(self) -> str:
"""Serialize all logs to a JSON string."""
return json.dumps(self.to_dict(), ensure_ascii=False, indent=2)
def save_to_file(self, filepath: str) -> None:
"""Persist logs to a file on disk."""
# with open(filepath, 'w', encoding='utf-8') as f:
# f.write(self.to_json())
path = Path(filepath)
path.parent.mkdir(parents=True, exist_ok=True) # Create any missing parent directories
path.write_text(self.to_json(), encoding='utf-8')
# ================================================================
# Timer Context Managers (integrated from LogManager)
# ================================================================
def __init_timers__(self):
"""Initialize timer storage if not exists."""
if not hasattr(self, '_timers'):
self._timers: Dict[str, float] = {}
@contextmanager
def node_timer(self, node_id: str):
"""Context manager that times node execution."""
self.__init_timers__()
start_time = time.time()
try:
yield
finally:
end_time = time.time()
duration = (end_time - start_time)
self._timers[node_id] = duration
@contextmanager
def model_timer(self, node_id: str):
"""Context manager that times model invocations."""
self.__init_timers__()
start_time = time.time()
try:
yield
finally:
end_time = time.time()
duration = (end_time - start_time)
self._timers[f"model_{node_id}"] = duration
@contextmanager
def agent_timer(self, node_id: str):
"""Context manager that times agent invocations."""
self.__init_timers__()
start_time = time.time()
try:
yield
finally:
end_time = time.time()
duration = (end_time - start_time)
self._timers[f"agent_{node_id}"] = duration
@contextmanager
def human_timer(self, node_id: str):
"""Context manager that times human interactions."""
self.__init_timers__()
start_time = time.time()
try:
yield
finally:
end_time = time.time()
duration = (end_time - start_time)
self._timers[f"human_{node_id}"] = duration
@contextmanager
def tool_timer(self, node_id: str, tool_name: str):
"""Context manager that times tool invocations."""
self.__init_timers__()
start_time = time.time()
try:
yield
finally:
end_time = time.time()
duration = (end_time - start_time)
self._timers[f"tool_{node_id}_{tool_name}"] = duration
@contextmanager
def thinking_timer(self, node_id: str, stage: str):
"""Context manager that times thinking stages."""
self.__init_timers__()
start_time = time.time()
try:
yield
finally:
end_time = time.time()
duration = (end_time - start_time)
self._timers[f"thinking_{node_id}_{stage}"] = duration
@contextmanager
def memory_timer(self, node_id: str, operation_type: str, stage: str):
"""Context manager that times memory operations."""
self.__init_timers__()
start_time = time.time()
try:
yield
finally:
end_time = time.time()
duration = (end_time - start_time)
self._timers[f"memory_{node_id}_{operation_type}_{stage}"] = duration
def get_timer(self, timer_key: str) -> Optional[float]:
"""Return the elapsed time recorded by the timer key."""
self.__init_timers__()
return self._timers.get(timer_key)
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/logger.py",
"license": "Apache License 2.0",
"lines": 432,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:utils/middleware.py | """Custom middleware for the DevAll workflow system."""
import uuid
from typing import Callable, Awaitable
from fastapi import Request, HTTPException, FastAPI
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import time
import re
import os
from utils.structured_logger import get_server_logger, LogType
from utils.exceptions import SecurityError
async def correlation_id_middleware(request: Request, call_next: Callable):
"""Add correlation ID to requests for tracing."""
correlation_id = request.headers.get("X-Correlation-ID") or str(uuid.uuid4())
request.state.correlation_id = correlation_id
start_time = time.time()
response = await call_next(request)
duration = time.time() - start_time
# Log the request and response
logger = get_server_logger()
logger.log_request(
request.method,
str(request.url),
correlation_id=correlation_id,
path=request.url.path,
query_params=dict(request.query_params),
client_host=request.client.host if request.client else None,
user_agent=request.headers.get("user-agent")
)
logger.log_response(
response.status_code,
duration,
correlation_id=correlation_id,
content_length=response.headers.get("content-length")
)
# Add correlation ID to response headers
response.headers["X-Correlation-ID"] = correlation_id
return response
async def security_middleware(request: Request, call_next: Callable):
"""Security middleware to validate requests."""
# Validate content type for JSON endpoints
if request.url.path.startswith("/api/") and request.method in ["POST", "PUT", "PATCH"]:
content_type = request.headers.get("content-type", "").lower()
if not content_type.startswith("application/json") and request.method != "GET":
# Skip validation for file uploads
if not content_type.startswith("multipart/form-data"):
raise HTTPException(
status_code=400,
detail="Content-Type must be application/json for API endpoints"
)
# Validate file paths to prevent path traversal
# Check URL path for suspicious patterns
path = request.url.path
if ".." in path or "./" in path:
# Use a more thorough check
if re.search(r"(\.{2}[/\\])|([/\\]\.{2})", path):
logger = get_server_logger()
logger.log_security_event(
"PATH_TRAVERSAL_ATTEMPT",
f"Suspicious path detected: {path}",
correlation_id=getattr(request.state, 'correlation_id', str(uuid.uuid4()))
)
raise HTTPException(status_code=400, detail="Invalid path")
response = await call_next(request)
return response
async def rate_limit_middleware(request: Request, call_next: Callable):
"""Rate limiting middleware (basic implementation)."""
# This is a simple rate limiting implementation
# In production, you would use Redis or other storage for tracking
# This is just a placeholder for now
response = await call_next(request)
return response
def add_cors_middleware(app: FastAPI) -> None:
"""Configure and attach CORS middleware."""
# Dev defaults; override via CORS_ALLOW_ORIGINS (comma-separated)
default_origins = [
"http://localhost:5173",
"http://127.0.0.1:5173",
]
env_origins = os.getenv("CORS_ALLOW_ORIGINS")
if env_origins:
origins = [o.strip() for o in env_origins.split(",") if o.strip()]
origin_regex = None
else:
origins = default_origins
# Helpful in dev: allow localhost/127.0.0.1 on any port
origin_regex = r"^https?://(localhost|127\.0\.0\.1)(:\d+)?$"
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_origin_regex=origin_regex,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
expose_headers=["X-Correlation-ID"],
max_age=600,
)
def add_middleware(app: FastAPI):
"""Add all middleware to the FastAPI application."""
# Attach CORS first to handle preflight requests and allow origins.
add_cors_middleware(app)
# Add other middleware
app.middleware("http")(correlation_id_middleware)
app.middleware("http")(security_middleware)
# app.middleware("http")(rate_limit_middleware) # Enable if needed
return app
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/middleware.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:utils/registry.py | """Generic registry utilities for pluggable backend components."""
from dataclasses import dataclass, field
from importlib import import_module
from typing import Any, Callable, Dict, Iterable, Optional
class RegistryError(RuntimeError):
"""Raised when registering duplicated or invalid entries."""
@dataclass(slots=True)
class RegistryEntry:
name: str
loader: Callable[[], Any]
metadata: Dict[str, Any] = field(default_factory=dict)
def load(self) -> Any:
return self.loader()
class Registry:
"""Lightweight registry with lazy module loading support."""
def __init__(self, namespace: str) -> None:
self.namespace = namespace
self._entries: Dict[str, RegistryEntry] = {}
def register(
self,
name: str,
*,
loader: Callable[[], Any] | None = None,
target: Any | None = None,
metadata: Optional[Dict[str, Any]] = None,
module_path: str | None = None,
attr_name: str | None = None,
) -> None:
if name in self._entries:
raise RegistryError(f"Duplicate registration for '{name}' in {self.namespace}")
if loader is None:
if target is None and module_path is None:
raise RegistryError("Must provide loader, target, or module_path/attr_name")
if target is not None:
loader = lambda target=target: target
else:
if not attr_name:
raise RegistryError("module_path requires attr_name")
def _lazy_loader(mod_path: str = module_path, attr: str = attr_name) -> Any:
module = import_module(mod_path)
return getattr(module, attr)
loader = _lazy_loader
entry = RegistryEntry(name=name, loader=loader, metadata=dict(metadata or {}))
self._entries[name] = entry
def get(self, name: str) -> RegistryEntry:
try:
return self._entries[name]
except KeyError as exc:
raise RegistryError(f"Unknown entry '{name}' in {self.namespace}") from exc
def names(self) -> Iterable[str]:
return self._entries.keys()
def items(self) -> Iterable[tuple[str, RegistryEntry]]:
return self._entries.items()
def metadata_for(self, name: str) -> Dict[str, Any]:
return dict(self.get(name).metadata)
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/registry.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:utils/schema_exporter.py | """Schema exporter for dynamic configuration metadata."""
import hashlib
import json
from dataclasses import dataclass
from typing import Any, Dict, Iterable, List, Mapping, Sequence, Type
from entity.configs import BaseConfig
from entity.configs.graph import DesignConfig
SCHEMA_VERSION = "0.1.0"
class SchemaResolutionError(ValueError):
"""Raised when breadcrumbs fail to resolve to a config node."""
@dataclass(frozen=True)
class Breadcrumb:
"""Describes one hop in the config tree."""
node: str
field: str | None = None
value: Any | None = None
@classmethod
def from_mapping(cls, data: Mapping[str, Any]) -> "Breadcrumb":
node = str(data.get("node")) if data.get("node") else ""
if not node:
raise SchemaResolutionError("breadcrumb entry missing 'node'")
field = data.get("field")
if field is not None:
field = str(field)
index = data.get("index")
if index is not None and not isinstance(index, int):
raise SchemaResolutionError("breadcrumb 'index' must be integer when provided")
value = data.get("value")
return cls(node=node, field=field, value=value)
def to_json(self) -> Dict[str, Any]:
payload: Dict[str, Any] = {"node": self.node}
if self.field is not None:
payload["field"] = self.field
if self.value is not None:
payload["value"] = self.value
return payload
def _normalize_breadcrumbs(raw: Sequence[Mapping[str, Any]] | None) -> List[Breadcrumb]:
if not raw:
return []
return [Breadcrumb.from_mapping(item) for item in raw]
def _resolve_config_class(
breadcrumbs: Sequence[Breadcrumb],
*,
root_cls: Type[BaseConfig] = DesignConfig,
) -> Type[BaseConfig]:
current_cls: Type[BaseConfig] = root_cls
for crumb in breadcrumbs:
if crumb.node != current_cls.__name__:
raise SchemaResolutionError(
f"breadcrumb node '{crumb.node}' does not match current config '{current_cls.__name__}'"
)
if crumb.field is None:
continue
child_cls = current_cls.resolve_child(crumb.field, crumb.value)
if child_cls is None:
spec = current_cls.field_specs().get(crumb.field)
if not spec or spec.child is None:
raise SchemaResolutionError(
f"field '{crumb.field}' on {current_cls.__name__} is not navigable"
)
child_cls = spec.child
current_cls = child_cls
return current_cls
def _serialize_field(config_cls: Type[BaseConfig], name: str, spec_dict: Dict[str, Any]) -> Dict[str, Any]:
field_spec = spec_dict[name]
data = field_spec.to_json()
routes = [
{
"childKey": key.to_json(),
"childNode": target.__name__,
}
for key, target in config_cls.child_routes().items()
if key.field == name
]
if routes:
data["childRoutes"] = routes
return data
def _ordered_field_names(specs: Mapping[str, Any]) -> List[str]:
"""Return field names with required ones first while keeping relative order."""
items = list(specs.items())
required_names = [name for name, spec in items if getattr(spec, "required", False)]
optional_names = [name for name, spec in items if not getattr(spec, "required", False)]
return required_names + optional_names
def _hash_payload(payload: Dict[str, Any]) -> str:
serialized = json.dumps(payload, sort_keys=True, ensure_ascii=False, default=str)
return hashlib.sha1(serialized.encode("utf-8")).hexdigest()
def build_schema_response(
breadcrumbs_raw: Sequence[Mapping[str, Any]] | None = None,
*,
root_cls: Type[BaseConfig] = DesignConfig,
) -> Dict[str, Any]:
"""Return a JSON-serializable schema response for the provided breadcrumbs."""
breadcrumbs = _normalize_breadcrumbs(breadcrumbs_raw)
target_cls = _resolve_config_class(breadcrumbs, root_cls=root_cls)
schema_node = target_cls.collect_schema()
field_specs = target_cls.field_specs()
ordered_fields = _ordered_field_names(field_specs)
fields_payload = [_serialize_field(target_cls, name, field_specs) for name in ordered_fields]
response = {
"schemaVersion": SCHEMA_VERSION,
"node": schema_node.node,
"fields": fields_payload,
"constraints": [constraint.to_json() for constraint in schema_node.constraints],
"breadcrumbs": [crumb.to_json() for crumb in breadcrumbs],
}
response["cacheKey"] = _hash_payload({"node": schema_node.node, "breadcrumbs": response["breadcrumbs"]})
return response
__all__ = [
"Breadcrumb",
"SchemaResolutionError",
"build_schema_response",
]
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/schema_exporter.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:utils/strs.py | def titleize(value: str) -> str:
sanitized = value.replace("_", " ").replace("-", " ").strip()
if not sanitized:
return value
return " ".join(part.capitalize() for part in sanitized.split()) | {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/strs.py",
"license": "Apache License 2.0",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
OpenBMB/ChatDev:utils/structured_logger.py | """Structured logging utilities for the DevAll workflow system."""
import json
import logging
import sys
import traceback
import datetime
from enum import Enum
from pathlib import Path
import os
from entity.enums import LogLevel
from utils.exceptions import MACException
class LogType(str, Enum):
"""Types of structured logs."""
REQUEST = "request"
RESPONSE = "response"
ERROR = "error"
WORKFLOW = "workflow"
SECURITY = "security"
PERFORMANCE = "performance"
class StructuredLogger:
"""A structured logger that outputs JSON format logs with consistent fields."""
def __init__(self, name: str, log_level: LogLevel = LogLevel.INFO, log_file: str = None):
self.name = name
self.log_level = log_level
self.logger = logging.getLogger(name)
self.logger.setLevel(self._get_logging_level(log_level))
# Create formatter
formatter = logging.Formatter('%(message)s')
# Create handler
if log_file:
# Ensure log directory exists
log_path = Path(log_file)
log_path.parent.mkdir(parents=True, exist_ok=True)
handler = logging.FileHandler(log_file)
else:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
# For correlation IDs
self.correlation_id = None
def _get_logging_level(self, log_level: LogLevel) -> int:
"""Convert LogLevel enum to logging module level."""
level_map = {
LogLevel.DEBUG: logging.DEBUG,
LogLevel.INFO: logging.INFO,
LogLevel.WARNING: logging.WARNING,
LogLevel.ERROR: logging.ERROR,
LogLevel.CRITICAL: logging.CRITICAL
}
return level_map.get(log_level, logging.INFO)
def _should_log(self, level: LogLevel) -> bool:
"""Check if a log level should be logged based on configured level."""
return level >= self.log_level
def _format_log(self, log_type: LogType, level: LogLevel, message: str,
correlation_id: str = None, **kwargs) -> str:
"""Format log entry as JSON string."""
log_entry = {
"timestamp": datetime.datetime.now(datetime.UTC),
"log_type": log_type.value,
"level": level.value,
"logger": self.name,
"message": message,
"correlation_id": correlation_id or self.correlation_id,
**kwargs
}
return json.dumps(log_entry, default=str)
def _log(self, log_type: LogType, level: LogLevel, message: str,
correlation_id: str = None, **kwargs):
"""Internal logging method."""
if self._should_log(level):
formatted_log = self._format_log(log_type, level, message, correlation_id, **kwargs)
log_level = self._get_logging_level(level)
self.logger.log(log_level, formatted_log)
def info(self, message: str, correlation_id: str = None, log_type: LogType = LogType.WORKFLOW, **kwargs):
"""Log information."""
self._log(log_type, LogLevel.INFO, message, correlation_id, **kwargs)
def debug(self, message: str, correlation_id: str = None, log_type: LogType = LogType.WORKFLOW, **kwargs):
"""Log debug information."""
self._log(log_type, LogLevel.DEBUG, message, correlation_id, **kwargs)
def warning(self, message: str, correlation_id: str = None, log_type: LogType = LogType.WORKFLOW, **kwargs):
"""Log warning."""
self._log(log_type, LogLevel.WARNING, message, correlation_id, **kwargs)
def error(self, message: str, correlation_id: str = None, log_type: LogType = LogType.ERROR, **kwargs):
"""Log error with details."""
self._log(log_type, LogLevel.ERROR, message, correlation_id, **kwargs)
def critical(self, message: str, correlation_id: str = None, log_type: LogType = LogType.ERROR, **kwargs):
"""Log critical error."""
self._log(log_type, LogLevel.CRITICAL, message, correlation_id, **kwargs)
def log_exception(self, exception: Exception, message: str = None,
correlation_id: str = None, **kwargs) -> None:
"""Log an exception with its traceback."""
if message is None:
message = str(exception)
# Include exception info
exception_info = {
"exception_type": type(exception).__name__,
"exception_message": str(exception),
"traceback": traceback.format_exc()
}
if isinstance(exception, MACException):
exception_info["error_code"] = exception.error_code
exception_info["exception_details"] = exception.details
self._log(LogType.ERROR, LogLevel.ERROR, message, correlation_id,
exception=exception_info, **kwargs)
def log_request(self, method: str, url: str, correlation_id: str = None, **kwargs):
"""Log incoming request."""
self._log(LogType.REQUEST, LogLevel.INFO, f"Incoming {method} request to {url}",
correlation_id, method=method, url=url, **kwargs)
def log_response(self, status_code: int, response_time: float, correlation_id: str = None, **kwargs):
"""Log outgoing response."""
self._log(LogType.RESPONSE, LogLevel.INFO,
f"Response with status {status_code} in {response_time:.3f}s",
correlation_id, status_code=status_code, response_time=response_time, **kwargs)
def log_security_event(self, event_type: str, message: str, correlation_id: str = None, **kwargs):
"""Log security-related events."""
self._log(LogType.SECURITY, LogLevel.WARNING, message, correlation_id,
event_type=event_type, **kwargs)
def log_performance(self, operation: str, duration: float, correlation_id: str = None, **kwargs):
"""Log performance metrics."""
self._log(LogType.PERFORMANCE, LogLevel.INFO,
f"Operation {operation} completed in {duration:.3f}s",
correlation_id, operation=operation, duration=duration, **kwargs)
def log_workflow_event(self, workflow_id: str, event_type: str, message: str,
correlation_id: str = None, **kwargs):
"""Log workflow-specific events."""
self._log(LogType.WORKFLOW, LogLevel.INFO, message, correlation_id,
workflow_id=workflow_id, event_type=event_type, **kwargs)
def set_correlation_id(self, correlation_id: str):
"""Set the correlation ID for this logger instance."""
self.correlation_id = correlation_id
# Global logger instances
_server_logger = None
_workflow_logger = None
def get_server_logger() -> StructuredLogger:
"""Get the global server logger instance."""
global _server_logger
if _server_logger is None:
log_file = os.getenv('SERVER_LOG_FILE', 'logs/server.log')
log_level_str = os.getenv('LOG_LEVEL', 'INFO').upper()
log_level = LogLevel[log_level_str]
_server_logger = StructuredLogger('server', log_level, log_file)
return _server_logger
def get_workflow_logger(name: str = 'workflow') -> StructuredLogger:
"""Get a workflow logger instance."""
global _workflow_logger
if _workflow_logger is None:
log_file = os.getenv('WORKFLOW_LOG_FILE', f'logs/{name}.log')
log_level_str = os.getenv('LOG_LEVEL', 'INFO').upper()
log_level = LogLevel[log_level_str]
_workflow_logger = StructuredLogger(name, log_level, log_file)
return _workflow_logger | {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/structured_logger.py",
"license": "Apache License 2.0",
"lines": 151,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:utils/task_input.py | """Helpers for building initial task inputs with optional attachments."""
import mimetypes
from pathlib import Path
from typing import List, Sequence, Union
from entity.messages import Message, MessageBlock, MessageBlockType, MessageRole
from utils.attachments import AttachmentStore
class TaskInputBuilder:
"""Builds task input payloads that optionally include attachments."""
def __init__(self, attachment_store: AttachmentStore):
self.attachment_store = attachment_store
def build_from_file_paths(
self,
prompt: str,
attachment_paths: Sequence[str],
) -> Union[str, List[Message]]:
if not attachment_paths:
return prompt
blocks: List[MessageBlock] = []
for raw_path in attachment_paths:
file_path = Path(raw_path).expanduser()
if not file_path.exists():
raise FileNotFoundError(f"Attachment not found: {file_path}")
mime_type, _ = mimetypes.guess_type(str(file_path))
record = self.attachment_store.register_file(
file_path,
kind=MessageBlockType.from_mime_type(mime_type),
display_name=file_path.name,
mime_type=mime_type,
extra={
"source": "user_upload",
"origin": "cli_attachment",
"original_path": str(file_path),
},
)
blocks.append(record.as_message_block())
return self.build_from_blocks(prompt, blocks)
@staticmethod
def build_from_blocks(prompt: str, blocks: Sequence[MessageBlock]) -> List[Message]:
final_blocks: List[MessageBlock] = []
if prompt:
final_blocks.append(MessageBlock.text_block(prompt))
final_blocks.extend(blocks)
if not final_blocks:
final_blocks.append(MessageBlock.text_block(""))
return [
Message(
role=MessageRole.USER,
content=final_blocks,
metadata={"source": "TASK"},
)
]
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/task_input.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
OpenBMB/ChatDev:utils/token_tracker.py | """Token usage tracking module for DevAll project."""
from dataclasses import dataclass, field
from datetime import datetime
from typing import Dict, List, Optional, Any
from collections import defaultdict
@dataclass
class TokenUsage:
"""Stores token usage metrics for individual API calls."""
input_tokens: int = 0
output_tokens: int = 0
total_tokens: int = 0
metadata: Dict[str, Any] = field(default_factory=dict)
timestamp: datetime = field(default_factory=datetime.now)
node_id: Optional[str] = None
model_name: Optional[str] = None
workflow_id: Optional[str] = None
provider: Optional[str] = None # Add provider field
def to_dict(self):
"""Convert to dictionary format."""
return {
"input_tokens": self.input_tokens,
"output_tokens": self.output_tokens,
"total_tokens": self.total_tokens,
"metadata": dict(self.metadata),
"timestamp": self.timestamp.isoformat(),
"node_id": self.node_id,
"model_name": self.model_name,
"workflow_id": self.workflow_id,
"provider": self.provider # Include provider in output
}
class TokenTracker:
"""Singleton class to track token usage across a workflow."""
def __init__(self, workflow_id: str):
self.workflow_id = workflow_id
self.total_usage = TokenUsage()
self.node_usages = defaultdict(TokenUsage)
self.model_usages = defaultdict(TokenUsage)
self.call_history = []
self.node_call_counts = defaultdict(int) # Track how many times each node is called
def record_usage(self, node_id: str, model_name: str, usage: TokenUsage, provider: str = None):
"""Records token usage for a specific call, handling multiple node executions."""
# Update the usage with provider if it wasn't set already
if provider and not usage.provider:
usage.provider = provider
# Add to total usage
self.total_usage.input_tokens += usage.input_tokens
self.total_usage.output_tokens += usage.output_tokens
self.total_usage.total_tokens += usage.total_tokens
# Add to node-specific usage
node_usage = self.node_usages[node_id]
node_usage.input_tokens += usage.input_tokens
node_usage.output_tokens += usage.output_tokens
node_usage.total_tokens += usage.total_tokens
if provider:
node_usage.provider = provider # Store provider info
# Add to model-specific usage
model_usage = self.model_usages[model_name]
model_usage.input_tokens += usage.input_tokens
model_usage.output_tokens += usage.output_tokens
model_usage.total_tokens += usage.total_tokens
if provider:
model_usage.provider = provider # Store provider info
# Increment call count for this node
self.node_call_counts[node_id] += 1
# Add to call history
history_entry = {
"node_id": node_id,
"model_name": model_name,
"input_tokens": usage.input_tokens,
"output_tokens": usage.output_tokens,
"total_tokens": usage.total_tokens,
"metadata": dict(usage.metadata),
"timestamp": usage.timestamp.isoformat(),
"execution_number": self.node_call_counts[node_id] # Track which execution this is
}
# Add provider to history entry if available
if provider:
history_entry["provider"] = provider
self.call_history.append(history_entry)
def get_total_usage(self) -> TokenUsage:
"""Get total token usage for the workflow."""
return self.total_usage
def get_node_usage(self, node_id: str) -> TokenUsage:
"""Get token usage for a specific node (across all its executions)."""
return self.node_usages[node_id]
def get_model_usage(self, model_name: str) -> TokenUsage:
"""Get token usage for a specific model."""
return self.model_usages[model_name]
def get_node_execution_count(self, node_id: str) -> int:
"""Get how many times a node was executed."""
return self.node_call_counts[node_id]
def get_token_usage(self) -> Dict[str, Any]:
data = {
"workflow_id": self.workflow_id,
"total_usage": {
"input_tokens": self.total_usage.input_tokens,
"output_tokens": self.total_usage.output_tokens,
"total_tokens": self.total_usage.total_tokens,
},
"node_usages": {
node_id: {
"input_tokens": usage.input_tokens,
"output_tokens": usage.output_tokens,
"total_tokens": usage.total_tokens,
}
for node_id, usage in self.node_usages.items()
},
"model_usages": {
model_name: {
"input_tokens": usage.input_tokens,
"output_tokens": usage.output_tokens,
"total_tokens": usage.total_tokens,
}
for model_name, usage in self.model_usages.items()
},
"node_execution_counts": dict(self.node_call_counts),
"call_history": self.call_history,
}
return data
def export_to_file(self, filepath: str):
"""Export token usage data to a JSON file."""
import json
from pathlib import Path
# Create directory if it doesn't exist
path = Path(filepath)
path.parent.mkdir(parents=True, exist_ok=True)
data = self.get_token_usage()
with open(filepath, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=2)
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/token_tracker.py",
"license": "Apache License 2.0",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:utils/vars_resolver.py | """Placeholder resolution for design configs."""
import re
from typing import Any, Dict, Mapping, MutableMapping, Sequence
from entity.configs.base import ConfigError, extend_path
_PLACEHOLDER_PATTERN = re.compile(r"\$\{([A-Za-z0-9_]+)\}")
_PLACEHOLDER_ONLY_PATTERN = re.compile(r"^\s*\$\{([A-Za-z0-9_]+)\}\s*$")
class PlaceholderResolver:
"""Resolve ``${VAR}`` placeholders within nested structures."""
def __init__(self, env_lookup: Mapping[str, Any], root_vars: Mapping[str, Any]):
self._env_lookup = dict(env_lookup)
self._raw_root = dict(root_vars or {})
self._resolved_root: Dict[str, Any] = {}
@property
def resolved_root(self) -> Dict[str, Any]:
# include untouched root vars so undeclared-but-needed entries remain available
merged = dict(self._raw_root)
merged.update(self._resolved_root)
return merged
def resolve(self, data: MutableMapping[str, Any], *, path: str = "root") -> MutableMapping[str, Any]:
if not isinstance(data, MutableMapping):
raise ConfigError("YAML root must be a mapping", path=path)
self._resolve_value(data, path, stack=())
return data
def _resolve_value(self, value: Any, path: str, *, stack: Sequence[str]) -> Any:
if isinstance(value, str):
return self._resolve_string(value, path, stack)
if isinstance(value, list):
for idx, item in enumerate(value):
value[idx] = self._resolve_value(item, extend_path(path, f"[{idx}]"), stack=stack)
return value
if isinstance(value, MutableMapping):
for key in list(value.keys()):
child_path = extend_path(path, str(key))
value[key] = self._resolve_value(value[key], child_path, stack=stack)
return value
return value
def _resolve_string(self, raw: str, path: str, stack: Sequence[str]) -> Any:
only_match = _PLACEHOLDER_ONLY_PATTERN.fullmatch(raw)
if only_match:
var_name = only_match.group(1)
return self._lookup(var_name, path, stack)
def replacer(match: re.Match[str]) -> str:
var_name = match.group(1)
resolved = self._lookup(var_name, path, stack)
return str(resolved)
return _PLACEHOLDER_PATTERN.sub(replacer, raw)
def _lookup(self, name: str, path: str, stack: Sequence[str]) -> Any:
if name in self._resolved_root:
return self._resolved_root[name]
if name in stack:
raise ConfigError(f"Detected placeholder cycle referencing '{name}'", path)
if name in self._raw_root:
resolved = self._resolve_value(self._raw_root[name], extend_path("vars", name), stack=stack + (name,))
self._resolved_root[name] = resolved
return resolved
if name in self._env_lookup:
return self._env_lookup[name]
raise ConfigError(f"Unresolved placeholder '${{{name}}}'", path)
def resolve_design_placeholders(data: MutableMapping[str, Any], *, env_lookup: Mapping[str, Any], path: str = "root") -> Dict[str, Any]:
"""Resolve placeholders in-place and return the resolved root vars."""
resolver = PlaceholderResolver(env_lookup, data.get("vars") or {})
resolver.resolve(data, path=path)
data["vars"] = resolver.resolved_root
return resolver.resolved_root
def resolve_mapping_with_vars(
data: MutableMapping[str, Any],
*,
env_lookup: Mapping[str, Any],
vars_map: Mapping[str, Any],
path: str = "root",
) -> MutableMapping[str, Any]:
"""Resolve placeholders using an explicit vars map without mutating it."""
resolver = PlaceholderResolver(env_lookup, vars_map)
return resolver.resolve(data, path=path)
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/vars_resolver.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:utils/workspace_scanner.py | """Utilities for scanning nested code_workspace directories."""
from dataclasses import dataclass
from pathlib import Path
from typing import Iterator, List, Optional
@dataclass
class WorkspaceEntry:
"""Metadata about a workspace file or directory."""
path: str # relative path from workspace root
type: str # "file" | "directory"
size: Optional[int]
modified_ts: Optional[float]
depth: int
def iter_workspace_entries(
root: Path | str,
*,
recursive: bool = True,
max_depth: int = 5,
include_hidden: bool = False,
) -> Iterator[WorkspaceEntry]:
"""Yield entries under the workspace root respecting depth/hidden filters."""
base = Path(root).resolve()
if not base.exists():
return
stack: List[tuple[Path, int]] = [(base, 0)]
while stack:
current, depth = stack.pop()
try:
children = sorted(current.iterdir(), key=lambda p: p.name.lower())
except FileNotFoundError:
continue
except PermissionError:
continue
for child in children:
try:
rel = child.relative_to(base)
except ValueError:
continue
if not include_hidden and _is_hidden(rel):
continue
entry_type = "directory" if child.is_dir() else "file"
size = None
modified = None
try:
stat = child.stat()
modified = stat.st_mtime
if child.is_file():
size = stat.st_size
except (FileNotFoundError, PermissionError, OSError):
pass
child_depth = depth + 1
yield WorkspaceEntry(
path=str(rel),
type=entry_type,
size=size,
modified_ts=modified,
depth=child_depth,
)
if recursive and child.is_dir() and child_depth < max_depth:
stack.append((child, child_depth))
def _is_hidden(relative_path: Path) -> bool:
return any(part.startswith(".") for part in relative_path.parts)
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "utils/workspace_scanner.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:workflow/cycle_manager.py | """Cycle detection and management for workflow graphs."""
from typing import Dict, List, Set, Optional, Any
from dataclasses import dataclass, field
from entity.configs import Node
@dataclass
class CycleInfo:
"""Information about a detected cycle in the workflow graph."""
cycle_id: str
nodes: Set[str] # Node IDs in the cycle
entry_nodes: Set[str] # Nodes that can enter the cycle (kept for compatibility)
exit_edges: List[Dict[str, Any]] # Edges that can exit the cycle
iteration_count: int = 0
max_iterations: Optional[int] = None # Safety limit
is_active: bool = False
execution_state: Dict[str, Any] = field(default_factory=dict)
# New fields for refactored cycle execution
initial_node: Optional[str] = None # The unique initial node when first entering the cycle
configured_entry_node: Optional[str] = None # User-configured entry node (if any)
max_iterations_default: int = 100 # Default maximum iterations if max_iterations is None
def add_node(self, node_id: str) -> None:
"""Add a node to the cycle."""
self.nodes.add(node_id)
def add_entry_node(self, node_id: str) -> None:
"""Add an entry node to the cycle."""
self.entry_nodes.add(node_id)
def add_exit_edge(self, edge_config: Dict[str, Any]) -> None:
"""Add an exit edge configuration."""
self.exit_edges.append(edge_config)
def increment_iteration(self) -> None:
"""Increment the iteration counter."""
self.iteration_count += 1
def is_within_iteration_limit(self) -> bool:
"""Check if the cycle should continue executing."""
max_iter = self.max_iterations if self.max_iterations is not None else self.max_iterations_default
return self.iteration_count < max_iter
def get_max_iterations(self) -> int:
"""Get the effective maximum iterations."""
return self.max_iterations if self.max_iterations is not None else self.max_iterations_default
def reset_iteration_count(self) -> None:
"""Reset the iteration counter."""
self.iteration_count = 0
def is_node_in_cycle(self, node_id: str) -> bool:
"""Check if a node is part of this cycle."""
return node_id in self.nodes
def is_entry_node(self, node_id: str) -> bool:
"""Check if a node is an entry node for this cycle."""
return node_id in self.entry_nodes
class CycleDetector:
"""Detects cycles in workflow graphs using Tarjan's algorithm."""
def __init__(self):
self.index_counter = 0
self.index: Dict[str, int] = {}
self.low_link: Dict[str, int] = {}
self.stack: List[str] = []
self.on_stack: Set[str] = set()
self.cycles: List[Set[str]] = []
def detect_cycles(self, nodes: Dict[str, Node]) -> List[Set[str]]:
"""Detect all cycles in the graph using Tarjan's strongly connected components' algorithm."""
self.cycles.clear()
self.index_counter = 0
self.index.clear()
self.low_link.clear()
self.stack.clear()
self.on_stack.clear()
for node_id in nodes:
if node_id not in self.index:
self._strong_connect(node_id, nodes)
# return [cycle for cycle in self.cycles if len(cycle) > 1 or self._has_self_loop(next(iter(cycle)), nodes)]
return self.cycles
def _has_self_loop(self, node_id: str, nodes: Dict[str, Node]) -> bool:
"""Check if a node has a self-loop."""
node = nodes.get(node_id)
if not node:
return False
return any(edge_link.target.id == node_id for edge_link in node.iter_outgoing_edges())
def _strong_connect(self, node_id: str, nodes: Dict[str, Node]) -> None:
"""Recursive part of Tarjan's algorithm."""
self.index[node_id] = self.index_counter
self.low_link[node_id] = self.index_counter
self.index_counter += 1
self.stack.append(node_id)
self.on_stack.add(node_id)
node = nodes.get(node_id)
if not node:
return
# Consider successors of node
for edge_link in node.iter_outgoing_edges():
successor_id = edge_link.target.id
if successor_id not in self.index:
self._strong_connect(successor_id, nodes)
self.low_link[node_id] = min(self.low_link[node_id], self.low_link[successor_id])
elif successor_id in self.on_stack:
self.low_link[node_id] = min(self.low_link[node_id], self.index[successor_id])
# If node is a root node, pop the stack and generate an SCC
if self.low_link[node_id] == self.index[node_id]:
cycle = set()
while True:
w = self.stack.pop()
self.on_stack.remove(w)
cycle.add(w)
if w == node_id:
break
if len(cycle) > 1 or self._has_self_loop(node_id, nodes):
self.cycles.append(cycle)
class CycleManager:
"""Manages execution of cycles in the workflow graph."""
def __init__(self):
self.cycles: Dict[str, CycleInfo] = {}
self.node_to_cycle: Dict[str, str] = {} # Maps node ID to cycle ID
self.active_cycles: Set[str] = set()
def initialize_cycles(self, cycles: List[Set[str]], nodes: Dict[str, Node]) -> None:
"""Initialize cycle information from detected cycles."""
self.cycles.clear()
self.node_to_cycle.clear()
for i, cycle_nodes in enumerate(cycles):
cycle_id = f"cycle_{i}_{cycle_nodes}"
cycle_info = CycleInfo(
cycle_id=cycle_id,
nodes=set(cycle_nodes),
entry_nodes=set(),
exit_edges=[]
)
# Find entry nodes and exit edges
self._analyze_cycle_structure(cycle_info, nodes)
self.cycles[cycle_id] = cycle_info
# Map nodes to their cycle
for node_id in cycle_nodes:
self.node_to_cycle[node_id] = cycle_id
def _analyze_cycle_structure(self, cycle: CycleInfo, nodes: Dict[str, Node]) -> None:
"""Analyze cycle structure to find entry nodes and exit edges."""
cycle_nodes = cycle.nodes
# Find entry nodes (nodes with predecessors outside the cycle)
def judge_entry_predecessor(_predecessor: Node, _predecessor_id: str) -> bool:
if _predecessor_id in cycle_nodes:
return False
for _edge_link in _predecessor.iter_outgoing_edges():
if _edge_link.target.id == node_id:
if _edge_link.trigger:
return True
else:
return False
return False
for node_id in cycle_nodes:
node = nodes.get(node_id)
if not node:
continue
for predecessor in node.predecessors:
if judge_entry_predecessor(predecessor, predecessor.id):
cycle.add_entry_node(node_id)
break
# Find exit edges (edges from cycle nodes to nodes outside the cycle)
for node_id in cycle_nodes:
node = nodes.get(node_id)
if not node:
continue
for edge_link in node.iter_outgoing_edges():
if edge_link.target.id not in cycle_nodes and edge_link.trigger:
exit_edge = {
"from": node_id,
"to": edge_link.target.id,
"condition": edge_link.condition,
"trigger": edge_link.trigger,
"config": edge_link.config
}
cycle.add_exit_edge(exit_edge)
def activate_cycle(self, cycle_id: str) -> None:
"""Activate a cycle for execution."""
if cycle_id in self.cycles:
self.cycles[cycle_id].is_active = True
self.active_cycles.add(cycle_id)
def deactivate_cycle(self, cycle_id: str) -> None:
"""Deactivate a cycle."""
if cycle_id in self.cycles:
self.cycles[cycle_id].is_active = False
self.cycles[cycle_id].iteration_count = 0
self.active_cycles.discard(cycle_id)
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "workflow/cycle_manager.py",
"license": "Apache License 2.0",
"lines": 176,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:workflow/executor/cycle_executor.py | """Cycle executor that runs workflow graphs containing loops."""
import copy
import threading
from typing import Dict, List, Callable, Any, Set, Optional
from entity.configs import Node, EdgeLink
from utils.log_manager import LogManager
from workflow.cycle_manager import CycleManager
from workflow.executor.parallel_executor import ParallelExecutor
from workflow.topology_builder import GraphTopologyBuilder
class CycleExecutor:
"""Execute workflow graphs that contain cycles.
Features:
- Scheduling is based on "super nodes"
- Parallel execution inside cycles
- Automatic detection of exit conditions
"""
def __init__(
self,
log_manager: LogManager,
nodes: Dict[str, Node],
cycle_execution_order: List[Dict[str, Any]],
cycle_manager: CycleManager,
execute_node_func: Callable[[Node], None],
):
"""Initialize the cycle executor.
Args:
log_manager: Logger instance
nodes: Mapping of node ids to nodes
cycle_execution_order: Super-node execution order with cycles
cycle_manager: Cycle manager coordinating iterations
execute_node_func: Callable that executes a single node
"""
self.log_manager = log_manager
self.nodes = nodes
self.cycle_execution_order = cycle_execution_order
self.cycle_manager = cycle_manager
self.execute_node_func = execute_node_func
self.parallel_executor = ParallelExecutor(log_manager, nodes)
def execute(self) -> None:
"""Run the workflow that contains cycles."""
self.log_manager.debug("Executing graph with cycles using super-node scheduler")
for layer_idx, layer_items in enumerate(self.cycle_execution_order):
self.log_manager.debug(f"Executing super-node layer {layer_idx} with {len(layer_items)} items")
self._execute_super_layer(layer_items)
def _execute_super_layer(self, layer_items: List[Dict[str, Any]]) -> None:
"""Execute a single super-node layer."""
self._execute_super_layer_parallel(layer_items)
def _execute_super_layer_parallel(self, layer_items: List[Dict[str, Any]]) -> None:
"""Execute a super-node layer in parallel."""
def item_desc_func(item: Dict[str, Any]) -> str:
if item["type"] == "cycle":
return f"cycle {item['cycle_id']}"
elif item["type"] == "node":
# New format
return f"node {item['node_id']}"
else:
# Old format: "layer"
return f"node {item['nodes'][0]}"
self.parallel_executor.execute_items_parallel(
layer_items,
self._execute_super_item,
item_desc_func
)
def _execute_super_item(self, item: Dict[str, Any]) -> None:
"""Execute a single super-node item (node or cycle)."""
if item["type"] == "layer":
# Old format: {"type": "layer", "nodes": [node_id]}
self._execute_single_node(item["nodes"][0])
elif item["type"] == "node":
# New format from GraphTopologyBuilder: {"type": "node", "node_id": "..."}
self._execute_single_node(item["node_id"])
elif item["type"] == "cycle":
self._execute_cycle(item)
def _execute_single_node(self, node_id: str) -> None:
"""Execute a non-cycle node."""
self.log_manager.debug(f"Executing non-cycle node: {node_id}")
node = self.nodes[node_id]
if node.is_triggered():
self.execute_node_func(node)
else:
self.log_manager.warning(f"Node {node_id} is not triggered, skipping execution")
def _execute_cycle(self, cycle_info: Dict[str, Any]) -> None:
"""Execute a cycle using the multi-iteration logic."""
cycle_id = cycle_info["cycle_id"]
nodes = cycle_info["nodes"]
self.log_manager.debug(f"Executing cycle {cycle_id} with nodes: {nodes}")
# Step 2: Validate cycle entry uniqueness
try:
initial_node_id = self._validate_cycle_entry(cycle_id, nodes)
except ValueError as e:
self.log_manager.error(str(e))
raise
if initial_node_id is None:
self.log_manager.debug(
f"Cycle {cycle_id} has no triggered entry node in this pass; skipping execution"
)
return
# Store initial node in cycle_manager
self.cycle_manager.cycles[cycle_id].initial_node = initial_node_id
self.log_manager.debug(f"Cycle {cycle_id} initial node: {initial_node_id}")
# Activate cycle
self.cycle_manager.activate_cycle(cycle_id)
# Step 4: Execute cycle with iterations
self._execute_cycle_with_iterations(
cycle_id,
nodes,
initial_node_id,
max_iterations=self.cycle_manager.cycles[cycle_id].get_max_iterations()
)
# Cleanup
self.cycle_manager.deactivate_cycle(cycle_id)
self.log_manager.debug(f"Cycle {cycle_id} completed")
# ==================== New Methods for Refactored Cycle Execution ====================
def _validate_cycle_entry(self, cycle_id: str, nodes: List[str]) -> str | None:
"""
Validate that exactly one node in the cycle is triggered by external edges.
Args:
cycle_id: The cycle ID
nodes: List of node IDs in the cycle
Returns:
The ID of the unique initial node
Raises:
ValueError: If no node or multiple nodes are triggered
"""
triggered_nodes: List[str] = []
for node_id in nodes:
node = self.nodes[node_id]
# Check if any external predecessor (node outside the cycle) triggers this node
for predecessor in node.predecessors:
if predecessor.id not in nodes: # External node
edge = predecessor.find_outgoing_edge(node_id)
if edge and edge.trigger and edge.triggered:
triggered_nodes.append(node_id)
break
cycle_info = self.cycle_manager.cycles.get(cycle_id)
configured_entry = cycle_info.configured_entry_node if cycle_info else None
if len(triggered_nodes) == 0:
if configured_entry:
return configured_entry
return None
elif len(triggered_nodes) > 1:
raise ValueError(
f"Cycle {cycle_id} has multiple triggered entry nodes: {triggered_nodes}. "
"Only one entry node must be triggered when entering a cycle."
)
entry_node = triggered_nodes[0]
if configured_entry and entry_node != configured_entry:
raise ValueError(
f"Cycle {cycle_id} entry mismatch: configured '{configured_entry}' "
f"but triggered '{entry_node}'",
)
return entry_node
def _execute_cycle_with_iterations(
self,
cycle_id: str,
cycle_nodes: List[str],
initial_node_id: str,
max_iterations: int,
) -> Set[str]:
"""
Execute a cycle with multiple iterations.
Args:
cycle_id: Cycle ID
cycle_nodes: List of all nodes in the cycle
initial_node_id: Initial node ID
max_iterations: Maximum number of iterations
Returns:
A tuple of two sets:
- exit_nodes: nodes triggered outside the *current* cycle scope
- external_nodes: subset of exit_nodes that are also outside the
provided parent_cycle_nodes scope
"""
iteration = 0
while iteration < max_iterations:
self.log_manager.debug(
f"Cycle {cycle_id} iteration {iteration + 1}/{max_iterations}"
)
# Step 1: Detect nested cycles in the scoped subgraph
inner_cycles = self._detect_cycles_in_scope(cycle_nodes, initial_node_id)
# Build topological layers (whether there are nested cycles or not)
execution_layers = self._build_topological_layers_in_scope(
cycle_nodes, initial_node_id, inner_cycles,
is_first_iteration=(iteration == 0)
)
# Execute the topological layers
external_nodes = self._execute_scope_layers(
execution_layers,
cycle_id,
cycle_nodes,
initial_node_id=initial_node_id,
is_first_iteration=(iteration == 0)
)
if external_nodes:
self.log_manager.debug(
f"Cycle {cycle_id} exited - external nodes triggered: {sorted(external_nodes)}"
)
return external_nodes
# Step 4: Check if initial node is retriggered
if not self._is_initial_node_retriggered(initial_node_id, cycle_nodes):
self.log_manager.debug(
f"Cycle {cycle_id} completed - initial node not retriggered"
)
break
iteration += 1
if iteration >= max_iterations:
self.log_manager.warning(
f"Cycle {cycle_id} reached max iterations ({max_iterations})"
)
return set()
def _detect_cycles_in_scope(
self,
scope_nodes: List[str],
initial_node_id: str
) -> List[Set[str]]:
"""
Detect nested cycles within the scoped subgraph.
Constructs a subgraph containing only:
1. Nodes in scope_nodes
2. Edges where both source and target are in scope_nodes
3. Initial node's incoming edges are REMOVED (to break the outer cycle)
Args:
scope_nodes: List of node IDs in the current scope
initial_node_id: Initial node ID (whose incoming edges are removed)
Returns:
List of detected nested cycles (excluding the current cycle itself)
"""
# Build scoped nodes with initial node's incoming edges removed
scoped_nodes = self._build_scoped_nodes(scope_nodes, clear_entry_node=initial_node_id)
# Use GraphTopologyBuilder to detect cycles
all_cycles = GraphTopologyBuilder.detect_cycles(scoped_nodes)
# Filter out single-node "cycles" (unless they have self-loops)
nested_cycles = [
cycle for cycle in all_cycles
if len(cycle) > 1
]
return nested_cycles
def _build_scoped_nodes(
self,
scope_nodes: List[str],
clear_entry_node: Optional[str] = None
) -> Dict[str, Node]:
"""
Build a scoped subgraph containing only nodes and edges within the scope.
Args:
scope_nodes: List of node IDs in the scope
clear_entry_node: If specified, this node's incoming edges will be removed
(used to break the outer cycle when detecting nested cycles)
Returns:
Dictionary of scoped nodes
"""
scoped_nodes = {}
scope_nodes_set = set(scope_nodes)
for node_id in scope_nodes:
original_node = self.nodes[node_id]
# Shallow copy the node
scoped_node = copy.copy(original_node)
# Filter outgoing edges: only keep edges where target is in scope AND trigger=true
# Special case: if target is clear_entry_node, remove this edge
scoped_edges = [
edge_link for edge_link in original_node.iter_outgoing_edges()
if edge_link.target.id in scope_nodes_set
and edge_link.trigger
and edge_link.target.id != clear_entry_node # Remove edges to entry node
]
scoped_node._outgoing_edges = scoped_edges
# Filter predecessors: only keep predecessors in scope AND with trigger=true edge
# Special case: if this node is clear_entry_node, clear all predecessors
if node_id == clear_entry_node:
scoped_node.predecessors = []
else:
scoped_predecessors = []
for pred in original_node.predecessors:
if pred.id in scope_nodes_set:
# Check if the edge from pred to node has trigger=true
edge = pred.find_outgoing_edge(node_id)
if edge and edge.trigger:
scoped_predecessors.append(pred)
scoped_node.predecessors = scoped_predecessors
# Filter successors: only keep successors in scope AND with trigger=true edge
# Special case: remove clear_entry_node from successors
scoped_successors = [
succ for succ in original_node.successors
if succ.id in scope_nodes_set
and succ.id != clear_entry_node # Remove entry node from successors
and any(
edge_link.target.id == succ.id and edge_link.trigger
for edge_link in original_node.iter_outgoing_edges()
)
]
scoped_node.successors = scoped_successors
scoped_nodes[node_id] = scoped_node
return scoped_nodes
def _build_topological_layers_in_scope(
self,
scope_nodes: List[str],
initial_node_id: str,
inner_cycles: List[Set[str]],
is_first_iteration: bool = False
) -> List[Dict[str, Any]]:
"""
Build topological execution order for the scoped subgraph.
Args:
scope_nodes: List of node IDs in the scope
initial_node_id: Initial node ID
inner_cycles: List of nested cycles detected in the scope
is_first_iteration: Whether this is the first iteration (affects initial node handling)
Returns:
List of execution layers, each containing execution items
"""
# Build scoped nodes WITHOUT clearing entry node
# We want to keep all edges intact for execution
scoped_nodes = self._build_scoped_nodes(scope_nodes, clear_entry_node=None)
# Handle entry points based on iteration:
# - First iteration: manually clear initial node's predecessors (for in_degree calculation only)
# - Subsequent iterations: clear predecessors for all triggered nodes
if is_first_iteration:
# Clear initial node's predecessors to make it an entry point
if initial_node_id in scoped_nodes:
scoped_nodes[initial_node_id].predecessors = []
else:
# Subsequent iterations: clear predecessors for all triggered nodes
for node_id in scope_nodes:
if self.nodes[node_id].is_triggered():
scoped_nodes[node_id].predecessors = []
# Extract scoped edges from scoped_nodes (not original nodes)
# This ensures consistency with the filtered graph structure
scoped_edges = []
# Collect nodes whose incoming edges should be excluded
# (to break cycles in topological sorting)
exclude_targets = set()
if is_first_iteration:
# First iteration: exclude edges to initial_node
exclude_targets.add(initial_node_id)
else:
# Subsequent iterations: exclude edges to all triggered nodes
for node_id in scope_nodes:
if self.nodes[node_id].is_triggered():
exclude_targets.add(node_id)
for node_id in scope_nodes:
# Use scoped_node to get filtered edges
scoped_node = scoped_nodes.get(node_id)
if scoped_node:
for edge_link in scoped_node.iter_outgoing_edges():
# Exclude edges pointing to nodes in exclude_targets
if edge_link.target.id in exclude_targets:
continue
scoped_edges.append({
"from": node_id,
"to": edge_link.target.id,
"trigger": edge_link.trigger,
"condition": edge_link.condition
})
# Use GraphTopologyBuilder to build execution order
if not inner_cycles:
# No nested cycles, use DAG layers
layers = GraphTopologyBuilder.build_dag_layers(scoped_nodes)
return layers
else:
# Has nested cycles, use super-node approach
super_graph = GraphTopologyBuilder.create_super_node_graph(
scoped_nodes, scoped_edges, inner_cycles
)
layers = GraphTopologyBuilder.topological_sort_super_nodes(
super_graph, inner_cycles
)
return layers
def _execute_scope_layers(
self,
execution_layers: List[List[Dict[str, Any]]],
parent_cycle_id: str,
parent_cycle_nodes: List[str],
initial_node_id: Optional[str] = None,
is_first_iteration: bool = False
) -> Set[str]:
"""
Execute scoped layers with parallelism, supporting nested cycles.
Args:
execution_layers: List of execution layers
parent_cycle_id: Parent cycle ID
parent_cycle_nodes: List of nodes in the parent cycle
initial_node_id: Initial node ID (for first iteration special handling)
is_first_iteration: Whether this is the first iteration
Returns:
external_nodes: subset of exit_nodes outside parent_cycle_nodes_set
"""
scope_node_set = set(parent_cycle_nodes)
external_nodes: Set[str] = set()
stop_event = threading.Event()
result_lock = threading.Lock()
def record_external(nodes: Set[str]) -> None:
nonlocal external_nodes
if not nodes:
return
with result_lock:
if nodes:
external_nodes.update(nodes)
stop_event.set()
def item_desc(item: Dict[str, Any]) -> str:
if item["type"] == "node":
return f"node {item['node_id']}"
if item["type"] == "cycle":
return f"cycle {item['cycle_id']}"
return "layer_item"
for layer in execution_layers:
if stop_event.is_set():
break
def executor_func(item: Dict[str, Any]) -> None:
if stop_event.is_set():
return
if item["type"] == "node":
_node_id = item["node_id"]
force_execute = is_first_iteration and (_node_id == initial_node_id)
targets = self._execute_single_cycle_node_in_scope(
_node_id,
scope_node_set,
force_execute=force_execute
)
if targets:
record_external(targets)
elif item["type"] == "cycle":
inner_cycle_nodes = item["nodes"]
inner_cycle_id = item["cycle_id"]
self.log_manager.debug(
f"Executing nested cycle {inner_cycle_id} within cycle {parent_cycle_id}"
)
try:
inner_initial_node = self._validate_cycle_entry(
inner_cycle_id, inner_cycle_nodes
)
except ValueError as e:
self.log_manager.error(str(e))
raise
if inner_initial_node is None:
self.log_manager.debug(
f"Nested cycle {inner_cycle_id} has no triggered entry; skipping"
)
return
inner_external_nodes = self._execute_cycle_with_iterations(
inner_cycle_id,
inner_cycle_nodes,
inner_initial_node,
max_iterations=100,
)
if inner_external_nodes:
filtered = {
node
for node in inner_external_nodes
if node not in scope_node_set
}
if filtered:
record_external(filtered)
self.parallel_executor.execute_items_parallel(
layer,
executor_func,
item_desc
)
if stop_event.is_set():
break
if external_nodes:
for node_id in scope_node_set:
self.nodes[node_id].reset_triggers()
return external_nodes
def _execute_single_cycle_node_in_scope(
self,
node_id: str,
scope_node_set: Set[str],
force_execute: bool = False
) -> Set[str]:
"""
Execute a single node within a cycle scope.
Args:
node_id: Node ID to execute
scope_node_set: Nodes that belong to the current scoped cycle
force_execute: If True, execute even if not triggered (for initial node in first iteration)
Returns:
Set of node IDs triggered outside the current scoped cycle
"""
node = self.nodes[node_id]
# Check if node is triggered (unless force_execute is True)
if not force_execute:
if not node.is_triggered():
return set()
# Reset edge triggers
for edge_link in node.iter_outgoing_edges():
edge_link.triggered = False
# Execute the node
self.execute_node_func(node)
# Check if any external node was triggered
external_targets: Set[str] = set()
for edge_link in node.iter_outgoing_edges():
if edge_link.target.id not in scope_node_set and edge_link.triggered:
self.log_manager.debug(
f"Node {node_id} triggered external node {edge_link.target.id}"
)
external_targets.add(edge_link.target.id)
return external_targets
def _is_initial_node_retriggered(
self,
initial_node_id: str,
cycle_nodes: List[str]
) -> bool:
"""
Check if the initial node is retriggered by any internal edge (from within the cycle).
Args:
initial_node_id: Initial node ID
cycle_nodes: List of nodes in the cycle
Returns:
True if the initial node is retriggered by an internal edge
"""
initial_node = self.nodes[initial_node_id]
for predecessor in initial_node.predecessors:
# Only check predecessors within the cycle
if predecessor.id in cycle_nodes:
edge = predecessor.find_outgoing_edge(initial_node_id)
if edge and edge.trigger and edge.triggered:
return True
return False
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "workflow/executor/cycle_executor.py",
"license": "Apache License 2.0",
"lines": 517,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:workflow/executor/dag_executor.py | """Executor for DAG (Directed Acyclic Graph) workflows."""
from typing import Dict, List, Callable
from entity.configs import Node
from utils.log_manager import LogManager
from workflow.executor.parallel_executor import ParallelExecutor
class DAGExecutor:
"""Execute DAG workflows.
Features:
- Execute layer by layer following the topology
- Support parallel execution inside a layer
- Serialize Human nodes automatically
"""
def __init__(
self,
log_manager: LogManager,
nodes: Dict[str, Node],
layers: List[List[str]],
execute_node_func: Callable[[Node], None]
):
"""Initialize the executor.
Args:
log_manager: Logger instance
nodes: Mapping of node ids to ``Node`` objects
layers: Topological layers
execute_node_func: Callable used to execute a single node
"""
self.log_manager = log_manager
self.nodes = nodes
self.layers = layers
self.execute_node_func = execute_node_func
self.parallel_executor = ParallelExecutor(log_manager, nodes)
def execute(self) -> None:
"""Execute the DAG workflow."""
for layer_idx, layer_nodes in enumerate(self.layers):
self.log_manager.debug(f"Executing Layer {layer_idx} with nodes: {layer_nodes}")
self._execute_layer(layer_nodes)
def _execute_layer(self, layer_nodes: List[str]) -> None:
"""Execute a single topological layer."""
def execute_if_triggered(node_id: str) -> None:
node = self.nodes[node_id]
if node.is_triggered():
self.execute_node_func(node)
else:
self.log_manager.debug(f"Node {node_id} skipped - not triggered")
self.parallel_executor.execute_nodes_parallel(layer_nodes, execute_if_triggered)
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "workflow/executor/dag_executor.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
OpenBMB/ChatDev:workflow/executor/dynamic_edge_executor.py | """Dynamic edge executor for edge-level Map and Tree execution.
Handles dynamic node expansion based on edge-level dynamic configuration.
When a message passes through an edge with dynamic config, the target node
is virtually expanded into multiple instances based on split results.
"""
import concurrent.futures
from typing import Callable, Dict, List, Optional
from entity.configs import Node
from entity.configs.edge.dynamic_edge_config import DynamicEdgeConfig
from entity.messages import Message, MessageRole
from runtime.node.splitter import create_splitter_from_config, group_messages
from utils.log_manager import LogManager
class DynamicEdgeExecutor:
"""Execute edge-level dynamic expansion.
When an edge has dynamic configuration, this executor:
1. Splits the payload passing through the edge
2. Executes the target node for each split unit
3. Collects and returns results (flat for Map, reduced for Tree)
"""
def __init__(
self,
log_manager: LogManager,
node_executor_func: Callable[[Node, List[Message]], List[Message]],
):
"""Initialize the dynamic edge executor.
Args:
log_manager: Logger instance
node_executor_func: Function to execute a node with inputs
"""
self.log_manager = log_manager
self.node_executor_func = node_executor_func
def execute(
self,
target_node: Node,
payload: Message,
dynamic_config: DynamicEdgeConfig,
static_inputs: Optional[List[Message]] = None,
) -> List[Message]:
"""Execute dynamic expansion for an edge.
Args:
target_node: The node to execute (will be used as template)
payload: The message passing through the edge
dynamic_config: Edge dynamic configuration
static_inputs: Optional static inputs from non-dynamic edges
Returns:
List of output messages from all executions
"""
split_config = dynamic_config.split
# Create splitter based on config
splitter = create_splitter_from_config(split_config)
# Split the payload into execution units
execution_units = splitter.split([payload])
if not execution_units:
self.log_manager.debug(
f"Dynamic edge -> {target_node.id}: no execution units after split"
)
return []
self.log_manager.info(
f"Dynamic edge -> {target_node.id}: splitting into {len(execution_units)} parallel units"
)
if dynamic_config.is_map():
return self._execute_map(
target_node, execution_units, dynamic_config, static_inputs
)
elif dynamic_config.is_tree():
return self._execute_tree(
target_node, execution_units, dynamic_config, static_inputs
)
else:
raise ValueError(f"Unknown dynamic type: {dynamic_config.type}")
def execute_from_inputs(
self,
target_node: Node,
inputs: List[Message],
dynamic_config: DynamicEdgeConfig,
static_inputs: Optional[List[Message]] = None,
) -> List[Message]:
"""Execute dynamic expansion using all collected inputs.
This method is called from _execute_node when a node has incoming edges
with dynamic configuration. All inputs are already collected and passed here.
Args:
target_node: The node to execute
inputs: Dynamic edge inputs to be split
dynamic_config: Edge dynamic configuration
static_inputs: Non-dynamic edge inputs to be replicated to all units
Returns:
List of output messages from all executions
"""
split_config = dynamic_config.split
static_inputs = static_inputs or []
# Create splitter based on config
splitter = create_splitter_from_config(split_config)
# Split only dynamic inputs into execution units
execution_units = splitter.split(inputs)
if not execution_units:
self.log_manager.debug(
f"Dynamic node {target_node.id}: no execution units after split"
)
# If no dynamic inputs but have static inputs, execute once with static inputs
if static_inputs:
return self.node_executor_func(target_node, static_inputs)
return []
self.log_manager.info(
f"Dynamic node {target_node.id}: splitting {len(inputs)} dynamic inputs into "
f"{len(execution_units)} parallel units ({dynamic_config.type} mode)"
+ (f", with {len(static_inputs)} static inputs replicated to each" if static_inputs else "")
)
if dynamic_config.is_map():
return self._execute_map(
target_node, execution_units, dynamic_config, static_inputs
)
elif dynamic_config.is_tree():
return self._execute_tree(
target_node, execution_units, dynamic_config, static_inputs
)
else:
raise ValueError(f"Unknown dynamic type: {dynamic_config.type}")
def _execute_map(
self,
target_node: Node,
execution_units: List[List[Message]],
dynamic_config: DynamicEdgeConfig,
static_inputs: Optional[List[Message]] = None,
) -> List[Message]:
"""Execute in Map mode (fan-out only).
Args:
target_node: Target node template
execution_units: Split message units
dynamic_config: Dynamic configuration
static_inputs: Static inputs to copy to all units
Returns:
Flat list of all output messages
"""
map_config = dynamic_config.as_map_config()
max_parallel = map_config.max_parallel
all_outputs: List[Message] = []
static_inputs = static_inputs or []
if len(execution_units) == 1:
# Single unit - execute directly
unit_inputs = list(static_inputs) + execution_units[0]
outputs = self._execute_unit(target_node, unit_inputs, 0)
all_outputs.extend(outputs)
else:
# Multiple units - parallel execution
effective_workers = min(len(execution_units), max_parallel)
with concurrent.futures.ThreadPoolExecutor(max_workers=effective_workers) as executor:
futures: Dict[concurrent.futures.Future, int] = {}
for idx, unit in enumerate(execution_units):
unit_inputs = list(static_inputs) + unit
future = executor.submit(
self._execute_unit, target_node, unit_inputs, idx
)
futures[future] = idx
results_by_idx: Dict[int, List[Message]] = {}
for future in concurrent.futures.as_completed(futures):
idx = futures[future]
try:
result = future.result()
results_by_idx[idx] = result
self.log_manager.debug(
f"Dynamic edge -> {target_node.id}#{idx}: "
f"completed with {len(result)} outputs"
)
except Exception as e:
self.log_manager.error(
f"Dynamic edge -> {target_node.id}#{idx}: "
f"failed with error: {e}"
)
raise
# Combine results in original order
for idx in range(len(execution_units)):
if idx in results_by_idx:
all_outputs.extend(results_by_idx[idx])
self.log_manager.info(
f"Dynamic edge -> {target_node.id}: "
f"Map completed with {len(all_outputs)} total outputs"
)
return all_outputs
def _execute_tree(
self,
target_node: Node,
execution_units: List[List[Message]],
dynamic_config: DynamicEdgeConfig,
static_inputs: Optional[List[Message]] = None,
) -> List[Message]:
"""Execute in Tree mode (fan-out + reduce).
Args:
target_node: Target node template
execution_units: Split message units
dynamic_config: Dynamic configuration
static_inputs: Static inputs (used in first layer only)
Returns:
Single-element list with the final reduced result
"""
tree_config = dynamic_config.as_tree_config()
if tree_config is None:
raise ValueError(f"Invalid tree configuration for edge -> {target_node.id}")
group_size = tree_config.group_size
max_parallel = tree_config.max_parallel
static_inputs = static_inputs or []
# Flatten execution units to individual messages
current_messages: List[Message] = []
for unit in execution_units:
current_messages.extend(unit)
if not current_messages:
return []
self.log_manager.info(
f"Dynamic edge -> {target_node.id}: "
f"Tree starting with {len(current_messages)} inputs, group_size={group_size}"
)
layer = 0
is_first_layer = True
# Reduction loop
while len(current_messages) > 1:
layer += 1
# Group messages
groups = group_messages(current_messages, group_size)
self.log_manager.debug(
f"Dynamic edge -> {target_node.id} layer {layer}: "
f"processing {len(groups)} groups"
)
layer_outputs: List[Message] = []
if len(groups) == 1:
# Single group - execute directly
group_inputs = groups[0]
if is_first_layer:
group_inputs = list(static_inputs) + group_inputs
outputs = self._execute_group(target_node, group_inputs, layer, 0)
layer_outputs.extend(outputs)
else:
# Multiple groups - parallel execution
effective_workers = min(len(groups), max_parallel)
with concurrent.futures.ThreadPoolExecutor(max_workers=effective_workers) as executor:
futures: Dict[concurrent.futures.Future, int] = {}
for idx, group in enumerate(groups):
group_inputs = group
if is_first_layer:
group_inputs = list(static_inputs) + group_inputs
future = executor.submit(
self._execute_group, target_node, group_inputs, layer, idx
)
futures[future] = idx
results_by_idx: Dict[int, List[Message]] = {}
for future in concurrent.futures.as_completed(futures):
idx = futures[future]
try:
result = future.result()
results_by_idx[idx] = result
except Exception as e:
self.log_manager.error(
f"Dynamic edge -> {target_node.id}#{layer}-{idx}: "
f"failed with error: {e}"
)
raise
for idx in range(len(groups)):
if idx in results_by_idx:
layer_outputs.extend(results_by_idx[idx])
self.log_manager.debug(
f"Dynamic edge -> {target_node.id} layer {layer}: "
f"produced {len(layer_outputs)} outputs"
)
current_messages = layer_outputs
is_first_layer = False
# Safety check
if layer > 100:
self.log_manager.error(
f"Dynamic edge -> {target_node.id}: exceeded maximum layers"
)
break
self.log_manager.info(
f"Dynamic edge -> {target_node.id}: "
f"Tree completed after {layer} layers with {len(current_messages)} output(s)"
)
return current_messages
def _execute_unit(
self,
node: Node,
unit_inputs: List[Message],
unit_index: int,
) -> List[Message]:
"""Execute a single map unit."""
self.log_manager.debug(
f"Dynamic edge -> {node.id}#{unit_index}: "
f"executing with {len(unit_inputs)} inputs"
)
# Tag inputs with unit index
# Clone messages first to avoid mutating shared inputs in parallel threads
unit_inputs = [msg.clone() for msg in unit_inputs]
for msg in unit_inputs:
metadata = dict(msg.metadata)
metadata["dynamic_edge_unit_index"] = unit_index
msg.metadata = metadata
# Execute using node executor
outputs = self.node_executor_func(node, unit_inputs)
# Tag outputs with unit index
for msg in outputs:
metadata = dict(msg.metadata)
metadata["dynamic_edge_unit_index"] = unit_index
msg.metadata = metadata
return outputs
def _execute_group(
self,
node: Node,
group_inputs: List[Message],
layer: int,
group_index: int,
) -> List[Message]:
"""Execute a single tree group."""
instance_id = f"{node.id}#{layer}-{group_index}"
self.log_manager.debug(
f"Dynamic edge -> {instance_id}: executing with {len(group_inputs)} inputs"
)
# Tag inputs
# Clone messages first to avoid mutating shared inputs in parallel threads
group_inputs = [msg.clone() for msg in group_inputs]
for msg in group_inputs:
metadata = dict(msg.metadata)
metadata["dynamic_edge_tree_layer"] = layer
metadata["dynamic_edge_tree_group"] = group_index
msg.metadata = metadata
# Execute
outputs = self.node_executor_func(node, group_inputs)
# Tag outputs
for msg in outputs:
metadata = dict(msg.metadata)
metadata["dynamic_edge_tree_layer"] = layer
metadata["dynamic_edge_tree_group"] = group_index
metadata["dynamic_edge_instance_id"] = instance_id
msg.metadata = metadata
msg.role = MessageRole.USER # Mark as user-generated
return outputs
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "workflow/executor/dynamic_edge_executor.py",
"license": "Apache License 2.0",
"lines": 331,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:workflow/executor/parallel_executor.py | """Parallel execution helpers that eliminate duplicated code."""
import concurrent.futures
from typing import Any, Callable, List, Tuple
from utils.log_manager import LogManager
class ParallelExecutor:
"""Manage parallel execution for workflow nodes.
Provides shared logic for parallel batches and serializes Human nodes when needed.
"""
def __init__(self, log_manager: LogManager, nodes_dict: dict):
"""Initialize the parallel executor.
Args:
log_manager: Logger instance
nodes_dict: Mapping of ``node_id`` to ``Node``
"""
self.log_manager = log_manager
self.nodes_dict = nodes_dict
def execute_items_parallel(
self,
items: List[Any],
executor_func: Callable,
item_desc_func: Callable[[Any], str],
has_blocking_func: Callable[[Any], bool] | None = None,
) -> None:
"""Execute a list of items in parallel when possible.
Args:
items: Items to execute
executor_func: Callable that executes a single item
item_desc_func: Callable for logging a human-readable description
has_blocking_func: Optional callable to decide if an item requires serialization
"""
blocking_items, parallel_items = self._partition_blocking_items(items, has_blocking_func)
if parallel_items:
self._execute_parallel_batch(parallel_items, executor_func, item_desc_func)
if blocking_items:
self._execute_sequential_batch(blocking_items, executor_func, item_desc_func)
def execute_nodes_parallel(
self,
node_ids: List[str],
executor_func: Callable[[str], None]
) -> None:
"""Execute a list of nodes in parallel.
Convenience wrapper around ``execute_items_parallel`` specialized for nodes.
Args:
node_ids: List of node identifiers
executor_func: Callable that executes a single node
"""
def item_desc_func(node_id: str) -> str:
return f"node {node_id}"
def has_blocking_func(node_id: str) -> bool:
return False
self.execute_items_parallel(
node_ids,
executor_func,
item_desc_func,
has_blocking_func
)
def _partition_blocking_items(
self,
items: List[Any],
has_blocking_func: Callable[[Any], bool] | None
) -> Tuple[List[Any], List[Any]]:
"""Split items into blocking and parallelizable lists."""
blocking_items = []
parallel_items = []
for item in items:
if has_blocking_func and has_blocking_func(item):
blocking_items.append(item)
else:
parallel_items.append(item)
return blocking_items, parallel_items
def _execute_parallel_batch(
self,
items: List[Any],
executor_func: Callable,
item_desc_func: Callable[[Any], str]
) -> None:
"""Execute a batch of items in parallel.
Args:
items: Items to execute
executor_func: Callable per item
item_desc_func: Callable returning a readable description
"""
self.log_manager.debug(f"Executing {len(items)} items in parallel")
with concurrent.futures.ThreadPoolExecutor(max_workers=len(items)) as executor:
futures = []
for item in items:
future = executor.submit(executor_func, item)
futures.append((item, future))
# Wait for every future to finish
for item, future in futures:
try:
future.result()
self.log_manager.debug(f"{item_desc_func(item)} completed successfully")
except Exception as e:
self.log_manager.error(f"{item_desc_func(item)} failed: {str(e)}")
raise
def _execute_sequential_batch(
self,
items: List[Any],
executor_func: Callable,
item_desc_func: Callable[[Any], str]
) -> None:
"""Execute a batch of items sequentially.
Args:
items: Items to execute
executor_func: Callable per item
item_desc_func: Callable returning a readable description
"""
for item in items:
self.log_manager.debug(f"Executing {item_desc_func(item)} (sequential)")
try:
executor_func(item)
self.log_manager.debug(f"{item_desc_func(item)} completed successfully")
except Exception as e:
self.log_manager.error(f"{item_desc_func(item)} failed: {str(e)}")
raise
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "workflow/executor/parallel_executor.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:workflow/executor/resource_manager.py | """Resource coordination helpers for workflow node execution."""
import threading
from contextlib import contextmanager
from dataclasses import dataclass
from typing import Dict, Iterable, List, Tuple
from entity.configs import Node
from runtime.node.registry import get_node_registration
from utils.log_manager import LogManager
@dataclass(frozen=True, slots=True)
class ResourceRequest:
"""Represents a single resource requirement."""
key: str
limit: int
@dataclass(slots=True)
class _ResourceSlot:
semaphore: threading.Semaphore
limit: int
class ResourceManager:
"""Coordinates shared resource usage across nodes."""
def __init__(self, log_manager: LogManager | None = None):
self.log_manager = log_manager
self._lock = threading.Lock()
self._resources: Dict[str, _ResourceSlot] = {}
@contextmanager
def guard_node(self, node: Node):
"""Acquire all resources required by the given node."""
requests = self._resolve_node_requests(node)
with self._acquire_resources(requests):
yield
def _resolve_node_requests(self, node: Node) -> List[ResourceRequest]:
registration = get_node_registration(node.node_type)
caps = registration.capabilities
requests: List[ResourceRequest] = []
key = caps.resource_key
limit = caps.resource_limit
if key and limit and limit > 0:
requests.append(ResourceRequest(key=key, limit=limit))
return requests
@contextmanager
def _acquire_resources(self, requests: Iterable[ResourceRequest]):
acquired: List[Tuple[str, threading.Semaphore]] = []
try:
for request in sorted(requests, key=lambda item: item.key):
semaphore = self._get_or_create_resource(request)
self._log_debug(f"Acquiring resource {request.key}")
semaphore.acquire()
acquired.append((request.key, semaphore))
yield
finally:
for key, semaphore in reversed(acquired):
semaphore.release()
self._log_debug(f"Released resource {key}")
def _get_or_create_resource(self, request: ResourceRequest) -> threading.Semaphore:
with self._lock:
slot = self._resources.get(request.key)
if slot and slot.limit != request.limit:
slot = None
if not slot:
slot = _ResourceSlot(
semaphore=threading.Semaphore(request.limit),
limit=request.limit,
)
self._resources[request.key] = slot
return slot.semaphore
def _log_debug(self, message: str) -> None:
if self.log_manager:
self.log_manager.debug(message)
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "workflow/executor/resource_manager.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
OpenBMB/ChatDev:workflow/graph.py | """Graph orchestration adapted to ChatDev design_0.4.0 workflows."""
import threading
from typing import Any, Callable, Dict, List, Optional
from runtime.node.agent.memory import MemoryBase, MemoryFactory, MemoryManager
from runtime.node.agent.thinking import ThinkingManagerBase, ThinkingManagerFactory
from entity.configs import Node, EdgeLink, AgentConfig, ConfigError
from entity.configs.edge import EdgeConditionConfig
from entity.configs.node.memory import SimpleMemoryConfig
from entity.messages import Message, MessageRole
from runtime.node.executor.base import ExecutionContext
from runtime.node.executor.factory import NodeExecutorFactory
from utils.logger import WorkflowLogger
from utils.exceptions import ValidationError, WorkflowExecutionError, WorkflowCancelledError
from utils.structured_logger import get_server_logger
from utils.human_prompt import (
CliPromptChannel,
HumanPromptService,
resolve_prompt_channel,
)
from workflow.cycle_manager import CycleManager
from workflow.graph_context import GraphContext
from workflow.graph_manager import GraphManager
from workflow.executor.resource_manager import ResourceManager
from workflow.runtime import (
RuntimeBuilder,
ResultArchiver,
DagExecutionStrategy,
CycleExecutionStrategy,
MajorityVoteStrategy,
)
from workflow.runtime.runtime_context import RuntimeContext
from runtime.edge.conditions import (
ConditionFactoryContext,
build_edge_condition_manager,
)
from runtime.edge.processors import (
ProcessorFactoryContext as PayloadProcessorFactoryContext,
build_edge_processor as build_edge_payload_processor,
)
from workflow.executor.dynamic_edge_executor import DynamicEdgeExecutor
# ------------------------------------------------------------------
# Executor class (includes all Memory and Thinking logic)
# ------------------------------------------------------------------
class ExecutionError(RuntimeError):
"""Raised when the workflow graph cannot be executed."""
class GraphExecutor:
"""Executes ChatDev_new graph workflows with integrated memory and thinking management."""
def __init__(
self,
graph: GraphContext,
*,
session_id: Optional[str] = None,
workspace_hook_factory: Optional[Callable[[RuntimeContext], Any]] = None,
cancel_event: Optional[threading.Event] = None,
) -> None:
"""Initialize executor with graph context instance."""
self.majority_result = None
self.graph: GraphContext = graph
self.outputs = {}
self.logger = self._create_logger()
self._cancel_event = cancel_event or threading.Event()
self._cancel_reason: Optional[str] = None
runtime = RuntimeBuilder(graph).build(logger=self.logger, session_id=session_id)
if workspace_hook_factory:
runtime.workspace_hook = workspace_hook_factory(runtime)
self.runtime_context = runtime
self.tool_manager = runtime.tool_manager
self.function_manager = runtime.function_manager
self.edge_processor_function_manager = runtime.edge_processor_function_manager
self.log_manager = runtime.log_manager
self.resource_manager = ResourceManager(self.log_manager)
# Memory and Thinking management (moved from Graph)
self.thinking_managers: Dict[str, ThinkingManagerBase] = {}
self.global_memories: Dict[str, MemoryBase] = {}
self.agent_memory_managers: Dict[str, MemoryManager] = {}
# Token tracking
self.token_tracker = runtime.token_tracker
# Workspace roots
self.code_workspace = runtime.code_workspace
self.attachment_store = runtime.attachment_store
# Cycle management
self.cycle_manager: Optional[CycleManager] = None
# Node executors (new strategy pattern implementation)
self.__execution_context: Optional[ExecutionContext] = None
self.node_executors: Dict[str, Any] = {}
self._human_prompt_service: Optional[HumanPromptService] = None
# for majority voting mode
self.initial_task_messages: List[Message] = []
def request_cancel(self, reason: Optional[str] = None) -> None:
"""Signal the executor to stop as soon as possible."""
if reason:
self._cancel_reason = reason
elif not self._cancel_reason:
self._cancel_reason = "Workflow execution cancelled"
self._cancel_event.set()
self.logger.info(f"Cancellation requested for workflow {self.graph.name}")
def is_cancelled(self) -> bool:
return self._cancel_event.is_set()
def _raise_if_cancelled(self) -> None:
if self.is_cancelled():
message = self._cancel_reason or "Workflow execution cancelled"
raise WorkflowCancelledError(message, workflow_id=self.graph.name)
def _create_logger(self) -> WorkflowLogger:
"""Create and return a logger instance."""
return WorkflowLogger(self.graph.name, self.graph.log_level)
@classmethod
def execute_graph(
cls,
graph: GraphContext,
task_prompt: Any,
*,
cancel_event: Optional[threading.Event] = None,
) -> "GraphExecutor":
"""Convenience method to execute a graph with a task prompt."""
executor = cls(graph, cancel_event=cancel_event)
executor._execute(task_prompt)
return executor
def _execute(self, task_prompt: Any):
self._raise_if_cancelled()
results = self.run(task_prompt)
self.graph.record(results)
def _build_memories_and_thinking(self) -> None:
"""Initialize all memory and thinking managers before execution."""
self._build_global_memories()
self._build_thinking_managers()
self._build_agent_memories()
self._build_node_executors()
def _build_global_memories(self) -> None:
"""Build global memories from config."""
memory_config = self.graph.config.get_memory_config()
if not memory_config:
return
for store in memory_config:
if store.name in self.global_memories:
error_msg = f"Duplicated memory name detected: {store.name}"
self.log_manager.error(error_msg)
raise ValidationError(error_msg, details={"memory_name": store.name})
simple_cfg = store.as_config(SimpleMemoryConfig)
if simple_cfg and (not simple_cfg.memory_path or simple_cfg.memory_path == "auto"):
path = self.graph.directory / f"memory_{store.name}.json"
simple_cfg.memory_path = str(path)
try:
memory_instance = MemoryFactory.create_memory(store)
self.global_memories[store.name] = memory_instance
memory_instance.load()
self.log_manager.info(
f"Global memory '{store.name}' built successfully",
details={"memory_name": store.name},
)
except Exception as e:
error_msg = f"Failed to create memory '{store.name}': {str(e)}"
self.log_manager.error(error_msg, details={"memory_name": store.name})
logger = get_server_logger()
logger.log_exception(e, error_msg, memory_name=store.name)
raise WorkflowExecutionError(error_msg, details={"memory_name": store.name})
def _build_thinking_managers(self) -> None:
"""Build thinking managers for nodes that require them."""
for node_id, node in self.graph.nodes.items():
agent_config = node.as_config(AgentConfig)
if agent_config and agent_config.thinking:
self.thinking_managers[node_id] = ThinkingManagerFactory.get_thinking_manager(
agent_config.thinking
)
def _build_agent_memories(self) -> None:
"""Build memory managers for agent nodes referencing global stores."""
for node_id, node in self.graph.nodes.items():
agent_config = node.as_config(AgentConfig)
if not (agent_config and agent_config.memories):
continue
try:
self.agent_memory_managers[node_id] = MemoryManager(agent_config.memories, self.global_memories)
self.log_manager.info(
f"Memory manager built for node {node_id}",
node_id=node_id,
details={"memory_refs": [mem.name for mem in agent_config.memories]},
)
except Exception as e:
error_msg = f"Failed to create memory manager for node {node_id}: {str(e)}"
self.log_manager.error(error_msg, node_id=node_id)
logger = get_server_logger()
logger.log_exception(e, error_msg, node_id=node_id)
raise WorkflowExecutionError(error_msg, node_id=node_id)
def _get_execution_context(self) -> ExecutionContext:
if self.__execution_context is None:
global_state = dict(self.runtime_context.global_state)
global_state.setdefault("attachment_store", self.attachment_store)
prompt_service = self._ensure_human_prompt_service()
global_state.setdefault("human_prompt", prompt_service)
self.__execution_context = ExecutionContext(
tool_manager=self.tool_manager,
function_manager=self.function_manager,
log_manager=self.log_manager,
memory_managers=self.agent_memory_managers,
thinking_managers=self.thinking_managers,
token_tracker=self.token_tracker,
global_state=global_state,
workspace_hook=self.runtime_context.workspace_hook,
human_prompt_service=prompt_service,
cancel_event=self._cancel_event,
)
return self.__execution_context
def _build_node_executors(self) -> None:
"""Build node executors using strategy pattern."""
# Create node executors
self.node_executors = NodeExecutorFactory.create_executors(
self._get_execution_context(),
self.graph.subgraphs
)
def _ensure_human_prompt_service(self) -> HumanPromptService:
if self._human_prompt_service:
return self._human_prompt_service
channel = resolve_prompt_channel(self.runtime_context.workspace_hook)
if channel is None:
channel = CliPromptChannel()
self._human_prompt_service = HumanPromptService(
log_manager=self.log_manager,
channel=channel,
session_id=self.runtime_context.session_id,
)
return self._human_prompt_service
def _save_memories(self) -> None:
"""Save all memories after execution."""
for memory in self.global_memories.values():
memory.save()
def run(self, task_prompt: Any) -> Dict[str, Any]:
"""Execute the graph based on topological layers structure or cycle-aware execution."""
self._raise_if_cancelled()
graph_manager = GraphManager(self.graph)
try:
graph_manager.build_graph()
except ConfigError as err:
error_msg = f"Graph configuration error: {str(err)}"
self.log_manager.logger.error(error_msg)
raise err
self._prepare_edge_conditions()
if not self.graph.layers:
raise ExecutionError("Graph not built. Call GraphManager.build_graph() first.")
# Record workflow start
self.log_manager.record_workflow_start(self.graph.metadata)
# Initialize memory and thinking before execution
self._build_memories_and_thinking()
# Initialize cycle manager if graph has cycles
if self.graph.has_cycles:
self.cycle_manager = graph_manager.get_cycle_manager()
self.initial_task_messages = [msg.clone() for msg in self._normalize_task_input(task_prompt)]
start_node_ids = set(self.graph.start_nodes)
# Reset all trigger states and initialize configured start nodes
for node_id, node in self.graph.nodes.items():
self._raise_if_cancelled()
node.reset_triggers()
if node_id in start_node_ids:
node.start_triggered = True
node.clear_input()
for message in self.initial_task_messages:
node.append_input(message.clone())
# Execute based on graph type (using strategy objects)
if self.graph.is_majority_voting:
strategy = MajorityVoteStrategy(
log_manager=self.log_manager,
nodes=self.graph.nodes,
initial_messages=self.initial_task_messages,
execute_node_func=self._execute_node,
payload_to_text_func=self._payload_to_text,
)
self.majority_result = strategy.run()
elif self.graph.has_cycles:
strategy = CycleExecutionStrategy(
log_manager=self.log_manager,
nodes=self.graph.nodes,
cycle_execution_order=self.graph.cycle_execution_order,
cycle_manager=self.cycle_manager,
execute_node_func=self._execute_node,
)
strategy.run()
else:
strategy = DagExecutionStrategy(
log_manager=self.log_manager,
nodes=self.graph.nodes,
layers=self.graph.layers,
execute_node_func=self._execute_node,
)
strategy.run()
self._raise_if_cancelled()
# Collect final outputs and save memories
self._collect_all_outputs()
# Get the final result according to the new logic
final_result = self.get_final_output()
self._save_memories()
# Export runtime artifacts
archiver = ResultArchiver(self.graph, self.log_manager, self.token_tracker)
archiver.export(final_result)
return self.outputs
def _prepare_edge_conditions(self) -> None:
"""Compile registered edge condition types into callable evaluators."""
context = ConditionFactoryContext(function_manager=self.function_manager, log_manager=self.log_manager)
processor_context = PayloadProcessorFactoryContext(
function_manager=self.edge_processor_function_manager,
log_manager=self.log_manager,
)
for node in self.graph.nodes.values():
for edge_link in node.iter_outgoing_edges():
condition_config = edge_link.condition_config
if not isinstance(condition_config, EdgeConditionConfig):
raw_value = edge_link.config.get("condition", "true")
condition_config = EdgeConditionConfig.from_dict(raw_value, path=f"{node.path}.edges")
edge_link.condition_config = condition_config
try:
manager = build_edge_condition_manager(condition_config, context, self._get_execution_context())
except Exception as exc: # pragma: no cover - defensive logging
error_msg = f"Failed to prepare condition '{condition_config.display_label()}': {exc}"
self.log_manager.error(error_msg)
logger = get_server_logger()
logger.log_exception(exc, error_msg, condition_type=condition_config.type)
raise WorkflowExecutionError(error_msg) from exc
edge_link.condition_manager = manager
label = getattr(manager, "label", None) or condition_config.display_label()
metadata = getattr(manager, "metadata", {}) or {}
edge_link.condition = label
edge_link.condition_metadata = metadata
edge_link.condition_type = condition_config.type
process_config = edge_link.process_config
if process_config:
try:
processor = build_edge_payload_processor(process_config, processor_context)
except Exception as exc: # pragma: no cover
error_msg = (
f"Failed to prepare processor '{process_config.display_label()}': {exc}"
)
self.log_manager.error(error_msg)
logger = get_server_logger()
logger.log_exception(exc, error_msg, processor_type=process_config.type)
raise WorkflowExecutionError(error_msg) from exc
edge_link.payload_processor = processor
edge_link.process_type = process_config.type
edge_link.process_metadata = getattr(processor, "metadata", {}) or {}
processor_label = getattr(processor, "label", None)
if processor_label:
edge_link.config["process_label"] = processor_label
else:
edge_link.payload_processor = None
edge_link.process_metadata = {}
edge_link.process_type = None
def _process_edge_output(
self,
edge_link: EdgeLink,
source_result: Message,
from_node: Node
) -> None:
"""Perform edge instantiation behavior.
Edges with dynamic configuration still pass messages normally to the target
node's input queue. Dynamic execution happens when the target node executes.
"""
# All edges (including dynamic ones) use standard processing to pass messages
# Dynamic execution will happen in _execute_node when the target node runs
# Standard edge processing (no dynamic config)
manager = edge_link.condition_manager
if manager is None:
raise WorkflowExecutionError(
f"Edge {from_node.id}->{edge_link.target.id} is missing a condition manager"
)
try:
manager.process(
edge_link,
source_result,
from_node,
self.log_manager,
)
except Exception as exc: # pragma: no cover - defensive logging
error_msg = (
f"Edge manager failed for {from_node.id} -> {edge_link.target.id}: {exc}"
)
self.log_manager.error(
error_msg,
details={
"condition_type": edge_link.condition_type,
"condition_metadata": edge_link.condition_metadata,
},
)
logger = get_server_logger()
logger.log_exception(
exc,
error_msg,
condition_type=edge_link.condition_type,
condition_metadata=edge_link.condition_metadata,
)
raise WorkflowExecutionError(error_msg) from exc
def _get_dynamic_config_for_node(self, node: Node):
"""Get the dynamic configuration for a node from its incoming edges.
If multiple incoming edges have dynamic config, they must be identical
(same type and parameters). Otherwise raises an error.
Returns the dynamic config if found, or None.
"""
from entity.configs.edge.dynamic_edge_config import DynamicEdgeConfig
found_configs = [] # List of (source_node_id, dynamic_config)
for predecessor in node.predecessors:
for edge_link in predecessor.iter_outgoing_edges():
if edge_link.target is node and edge_link.dynamic_config is not None:
found_configs.append((predecessor.id, edge_link.dynamic_config))
if not found_configs:
return None
if len(found_configs) == 1:
return found_configs[0][1]
# Multiple dynamic configs found - verify they are consistent
first_source, first_config = found_configs[0]
for source_id, config in found_configs[1:]:
# Check type consistency
if config.type != first_config.type:
raise WorkflowExecutionError(
f"Node '{node.id}' has inconsistent dynamic configurations on incoming edges: "
f"edge from '{first_source}' has type '{first_config.type}', "
f"but edge from '{source_id}' has type '{config.type}'. "
f"All dynamic edges to the same node must use the same configuration."
)
# Check split config consistency
if (config.split.type != first_config.split.type or
config.split.pattern != first_config.split.pattern or
config.split.json_path != first_config.split.json_path):
raise WorkflowExecutionError(
f"Node '{node.id}' has inconsistent split configurations on incoming edges: "
f"edges from '{first_source}' and '{source_id}' have different split settings. "
f"All dynamic edges to the same node must use the same configuration."
)
# Check mode-specific config consistency
if config.max_parallel != first_config.max_parallel:
raise WorkflowExecutionError(
f"Node '{node.id}' has inconsistent max_parallel on incoming edges: "
f"edge from '{first_source}' has max_parallel={first_config.max_parallel}, "
f"but edge from '{source_id}' has max_parallel={config.max_parallel}."
)
if config.type == "tree" and config.group_size != first_config.group_size:
raise WorkflowExecutionError(
f"Node '{node.id}' has inconsistent group_size on incoming edges: "
f"edge from '{first_source}' has group_size={first_config.group_size}, "
f"but edge from '{source_id}' has group_size={config.group_size}."
)
return first_config
def _execute_with_dynamic_config(
self,
node: Node,
inputs: List[Message],
dynamic_config,
) -> List[Message]:
"""Execute a node with dynamic configuration from incoming edges.
Args:
node: Target node to execute
inputs: All input messages collected for this node
dynamic_config: Dynamic configuration from the incoming edge
Returns:
Output messages from dynamic execution
"""
# Separate inputs: dynamic edge inputs vs static (non-dynamic) edge inputs
# Dynamic edge inputs will be split, static inputs will be replicated to all units
dynamic_inputs: List[Message] = []
static_inputs: List[Message] = []
for msg in inputs:
if msg.metadata.get("_from_dynamic_edge"):
dynamic_inputs.append(msg)
else:
static_inputs.append(msg)
self.log_manager.info(
f"Executing node {node.id} with edge dynamic config ({dynamic_config.type} mode): "
f"{len(dynamic_inputs)} dynamic inputs, {len(static_inputs)} static inputs"
)
# Create node executor function
def node_executor_func(n: Node, inp: List[Message]) -> List[Message]:
return self._process_result(n, inp)
# Execute with dynamic edge executor
dynamic_executor = DynamicEdgeExecutor(self.log_manager, node_executor_func)
# Pass dynamic inputs for splitting, static inputs for replication
return dynamic_executor.execute_from_inputs(
node, dynamic_inputs, dynamic_config, static_inputs=static_inputs
)
def _execute_node(self, node: Node) -> None:
"""Execute a single node."""
self._raise_if_cancelled()
with self.resource_manager.guard_node(node):
input_results = node.input
# Clear incoming triggers so future iterations wait for fresh signals
node.reset_triggers()
serialized_inputs = [message.to_dict(include_data=False) for message in input_results]
# Record node start
self.log_manager.record_node_start(node.id, serialized_inputs, node.node_type, {
"input_count": len(input_results),
"predecessors": [p.id for p in node.predecessors],
"successors": [s.id for s in node.successors]
})
self.log_manager.debug(f"Processing {len(input_results)} inputs together for node {node.id}")
# Check if any incoming edge has dynamic configuration
dynamic_config = self._get_dynamic_config_for_node(node)
# Process all inputs together in a single executor call
with self.log_manager.node_timer(node.id):
if dynamic_config is not None:
raw_outputs = self._execute_with_dynamic_config(node, input_results, dynamic_config)
else:
raw_outputs = self._process_result(node, input_results)
# Process all output messages
output_messages: List[Message] = []
for raw_output in raw_outputs:
msg = self._ensure_source_output(raw_output, node.id)
node.append_output(msg)
output_messages.append(msg)
# Use first output for context trace handling (backward compat)
unified_output = output_messages[0] if output_messages else None
context_trace_payload = None
context_restored = False
if unified_output is not None and isinstance(unified_output.metadata, dict):
context_trace_payload = unified_output.metadata.get("context_trace")
if node.context_window != 0 and context_trace_payload:
context_restored = self._restore_context_trace(node, context_trace_payload)
if node.context_window != -1:
preserved_inputs = node.clear_input(preserve_kept=True, context_window=node.context_window)
if preserved_inputs:
self.log_manager.debug(
f"Node {node.id} cleaned up its input context after execution (preserved {preserved_inputs} keep-marked inputs)"
)
else:
self.log_manager.debug(
f"Node {node.id} cleaned up its input context after execution"
)
if output_messages:
self.log_manager.debug(
f"Node {node.id} processed {len(input_results)} inputs into {len(output_messages)} output(s)"
)
else:
self.log_manager.debug(
f"Node {node.id} produced no output; downstream edges suppressed"
)
# Record node end
output_text = ""
if output_messages:
if len(output_messages) == 1:
output_text = unified_output.text_content()
else:
for idx, msg in enumerate(output_messages):
output_text += f"===== OUTPUT {idx} =====\n\n" + msg.text_content() + "\n\n"
output_role = unified_output.role.value
output_source = unified_output.metadata.get("source")
else:
output_text = ""
output_role = "none"
output_source = None
self.log_manager.record_node_end(node.id, output_text if node.log_output else "", {
"output_size": len(output_text),
"output_count": len(output_messages),
"output_role": output_role,
"output_source": output_source
})
# Pass results to successor nodes via edges
# For each output message, process all edges
for output_msg in output_messages:
for edge_link in node.iter_outgoing_edges():
self._process_edge_output(edge_link, output_msg, node)
if output_messages and node.context_window != 0 and not context_restored:
# Use first output for pseudo edge
pseudo_condition = EdgeConditionConfig.from_dict("true", path=f"{node.path}.pseudo_edge")
pseudo_link = EdgeLink(target=node, trigger=False)
pseudo_link.condition_config = pseudo_condition
pseudo_context = ConditionFactoryContext(
function_manager=self.function_manager,
log_manager=self.log_manager,
)
pseudo_link.condition_manager = build_edge_condition_manager(pseudo_condition, pseudo_context, self._get_execution_context())
pseudo_link.condition = pseudo_condition.display_label()
pseudo_link.condition_type = pseudo_condition.type
for output_msg in output_messages:
self._process_edge_output(pseudo_link, output_msg, node)
def _process_result(self, node: Node, input_payload: List[Message]) -> List[Message]:
"""Process a single input result using strategy pattern executors.
This method delegates to specific node executors based on node type.
Returns a list of messages (maybe empty if node suppresses output).
"""
if not self.node_executors:
raise RuntimeError("Node executors not initialized. Call _build_memories_and_thinking() first.")
if node.type not in self.node_executors:
raise ValueError(f"Unsupported node type: {node.type}")
executor = self.node_executors[node.type]
hook = self.runtime_context.workspace_hook
workspace = self.runtime_context.code_workspace
if hook:
try:
hook.before_node(node, workspace)
except Exception:
self.log_manager.warning("workspace hook before_node failed for %s", node.id)
success = False
try:
result = executor.execute(node, input_payload)
success = True
return result
finally:
if hook:
try:
hook.after_node(node, workspace, success=success)
except Exception:
self.log_manager.warning("workspace hook after_node failed for %s", node.id)
def _collect_all_outputs(self) -> None:
"""Collect final outputs from all nodes, especially sink nodes."""
all_outputs = {}
# For majority voting, we might want to collect differently
if self.graph.is_majority_voting:
# In majority voting mode, collect all outputs and the final majority result
for node_id, node in self.graph.nodes.items():
if node.output:
node_output = {
"node_id": node_id,
"node_type": node.node_type,
"predecessors_num": len(node.predecessors),
"successors_num": len(node.successors),
"results": [self._serialize_output_payload(item) for item in node.output]
}
all_outputs[f"node_{node_id}"] = node_output
# Add the majority result
if hasattr(self, 'majority_result'):
all_outputs["majority_result"] = self.majority_result
else:
# Collect outputs from all nodes normally
for node_id, node in self.graph.nodes.items():
if node.output:
node_output = {
"node_id": node_id,
"node_type": node.node_type,
"predecessors_num": len(node.predecessors),
"successors_num": len(node.successors),
"results": [self._serialize_output_payload(item) for item in node.output]
}
all_outputs[f"node_{node_id}"] = node_output
# Add graph summary
all_outputs["graph_summary"] = {
"total_nodes": len(self.graph.nodes),
"total_edges": len(self.graph.edges),
"total_transmissions": len([k for k in self.outputs.keys() if "->" in k]),
"layers": len(self.graph.layers),
"execution_completed": True,
"is_majority_voting": self.graph.is_majority_voting
}
self.outputs.update(all_outputs)
def get_final_output(self) -> str:
final_message = self.get_final_output_message()
return final_message.text_content() if final_message else ""
def get_final_output_message(self) -> Message | None:
if self.graph.is_majority_voting:
if self.majority_result is None:
return None
if isinstance(self.majority_result, Message):
return self.majority_result.clone()
return self._create_message(MessageRole.ASSISTANT, str(self.majority_result), "MAJORITY_VOTE")
final_node = self._get_final_node()
if not final_node:
return None
if final_node.output:
value = final_node.output[-1]
if isinstance(value, Message):
return value.clone()
return self._create_message(MessageRole.ASSISTANT, str(value), final_node.id)
return None
def get_final_output_messages(self) -> List[Message]:
"""Return all messages from the final node."""
if self.graph.is_majority_voting:
msg = self.get_final_output_message()
return [msg] if msg else []
final_node = self._get_final_node()
if not final_node:
return []
results = []
for value in final_node.output:
if isinstance(value, Message):
results.append(value.clone())
else:
results.append(self._create_message(MessageRole.ASSISTANT, str(value), final_node.id))
return results
def _get_final_node(self) -> Node:
"""Return the explicitly configured end node, or sink node as fallback."""
end_node_ids = self.graph.config.definition.end_nodes
if end_node_ids:
for end_node_id in end_node_ids:
if end_node_id in self.graph.nodes:
node = self.graph.nodes[end_node_id]
# Check if node has output
if node.output:
return node
# Fallback to default behavior - return sink node
sink_node = [node for node in self.graph.nodes.values() if not node.successors]
return sink_node[0] if sink_node else None
def _restore_context_trace(self, node: Node, trace_payload: Any) -> bool:
if not isinstance(trace_payload, list):
return False
restored = 0
for entry in trace_payload:
if not isinstance(entry, dict):
continue
try:
message = Message.from_dict(entry)
if message.role not in [MessageRole.USER, MessageRole.ASSISTANT]:
continue
except Exception as exc:
self.log_manager.warning(
f"Failed to deserialize context trace for node {node.id}: {exc}"
)
continue
node.append_input(self._ensure_source(message, node.id))
restored += 1
if restored:
self.log_manager.debug(
f"Node {node.id} preserved {restored} messages from its tool execution trace"
)
return restored > 0
def _payload_to_text(self, payload: Any) -> str:
if isinstance(payload, Message):
return payload.text_content()
if payload is None:
return ""
return str(payload)
def _serialize_output_payload(self, payload: Any) -> Any:
if isinstance(payload, Message):
return {"type": "message", "payload": payload.to_dict(include_data=False)}
return {"type": "text", "payload": str(payload)}
def _normalize_task_input(self, raw_input: Any) -> List[Message]:
if isinstance(raw_input, list):
messages: List[Message] = []
for item in raw_input:
if isinstance(item, Message):
messages.append(self._ensure_source(item, "TASK"))
elif isinstance(item, str):
messages.append(self._create_message(MessageRole.USER, item, "TASK"))
return messages or [self._create_message(MessageRole.USER, "", "TASK")]
if isinstance(raw_input, Message):
return [self._ensure_source(raw_input, "TASK")]
return [self._create_message(MessageRole.USER, str(raw_input), "TASK")]
def _ensure_source(self, message: Message, default_source: str) -> Message:
cloned = message.clone()
metadata = dict(cloned.metadata)
metadata.setdefault("source", default_source)
cloned.metadata = metadata
return cloned
def _create_message(self, role: MessageRole, content: str, source: str) -> Message:
return Message(role=role, content=content, metadata={"source": source})
def _ensure_source_output(self, message: Any, node_id: str) -> Message:
if not isinstance(message, Message):
return self._create_message(MessageRole.ASSISTANT, str(message), node_id)
cloned = message.clone()
metadata = dict(message.metadata)
metadata.setdefault("source", node_id)
cloned.metadata = metadata
return cloned
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "workflow/graph.py",
"license": "Apache License 2.0",
"lines": 743,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:workflow/graph_context.py | """Runtime context for workflow graphs.
This module stores execution-time state and business logic for graphs.
"""
from datetime import datetime
from typing import Any, Dict, List
import yaml
from entity.configs import Node
from entity.graph_config import GraphConfig
class GraphContext:
"""Runtime context for a workflow graph (state + business logic).
Differences from ``GraphConfig``:
- ``GraphConfig`` is immutable configuration data.
- ``GraphContext`` is mutable runtime state with dynamic execution data.
Attributes:
config: Graph configuration
nodes: Mapping of ``node_id`` to ``Node``
edges: List of edges
layers: Topological layer layout
outputs: Node outputs captured during execution
topology: Topological ordering list
subgraphs: Mapping of ``node_id`` to nested ``GraphContext``
has_cycles: Whether the graph contains cycles
cycle_execution_order: Execution order for cycles
directory: Output directory for artifacts
depth: Graph depth
"""
def __init__(self, config: GraphConfig) -> None:
"""Initialize the graph context.
Args:
config: Graph configuration
"""
self.config = config
self.vars: Dict[str, Any] = dict(config.vars)
# Graph structure
self.nodes: Dict[str, Node] = {}
self.edges: List[Dict[str, Any]] = []
self.layers: List[List[str]] = []
self.topology: List[str] = []
self.depth: int = 0
self.start_nodes: List[str] = []
self.explicit_start_nodes: List[str] = []
# Runtime state
self.outputs: Dict[str, str] = {}
self.subgraphs: Dict[str, "GraphContext"] = {}
# Cycle support
self.has_cycles: bool = False
self.cycle_execution_order: List[Dict[str, Any]] = []
# Output directory
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
fixed_output_dir = bool(config.metadata.get("fixed_output_dir"))
if fixed_output_dir or "session_" in config.name:
self.directory = config.output_root / config.name
else:
self.directory = config.output_root / f"{config.name}_{timestamp}"
self.directory.mkdir(parents=True, exist_ok=True)
# Voting mode flag
self.is_majority_voting: bool = config.is_majority_voting
@property
def name(self) -> str:
"""Return the project name."""
return self.config.name
@property
def log_level(self):
"""Return the configured log level."""
return self.config.log_level
@property
def metadata(self) -> Dict[str, Any]:
"""Return graph metadata."""
return self.config.metadata
@metadata.setter
def metadata(self, value: Dict[str, Any]) -> None:
"""Set graph metadata."""
self.config.metadata = value
def record(self, outputs: Dict[str, Any]) -> None:
"""Persist execution results to disk.
Args:
outputs: Mapping of node outputs
"""
self.outputs = outputs
# self.directory.mkdir(parents=True, exist_ok=True)
# Persist node outputs
outputs_path = self.directory / "node_outputs.yaml"
if self.outputs:
with outputs_path.open("w", encoding="utf-8") as handle:
yaml.dump(self.outputs, handle, allow_unicode=True, sort_keys=False)
# Persist workflow summary
summary = {
"project": self.config.name,
"organization": self.config.get_organization(),
"design_path": self.config.get_source_path(),
"metadata": self.config.metadata,
}
summary_path = self.directory / "workflow_summary.yaml"
with summary_path.open("w", encoding="utf-8") as handle:
yaml.dump(summary, handle, allow_unicode=True, sort_keys=False)
def final_message(self) -> str:
"""Build the final completion string.
Returns:
Completion message text
"""
if not self.outputs:
return "Workflow finished with no outputs."
sink_nodes = [node_id for node_id, node in self.nodes.items() if not node.successors]
return (
f"Workflow finished with {len(self.outputs)} node outputs"
f" ({len(sink_nodes)} terminal nodes)."
)
def get_sink_nodes(self) -> List[Node]:
"""Return all leaf nodes (nodes without successors)."""
return [node for node in self.nodes.values() if not node.successors]
def get_source_nodes(self) -> List[Node]:
"""Return all source nodes (nodes without predecessors)."""
return [node for node in self.nodes.values() if not node.predecessors]
def to_dict(self) -> Dict[str, Any]:
"""Convert the graph context to a dictionary."""
return {
"config": self.config.to_dict(),
"nodes": {node_id: node.to_dict() for node_id, node in self.nodes.items()},
"edges": list(self.edges),
"layers": list(self.layers),
"topology": list(self.topology),
"depth": self.depth,
"has_cycles": self.has_cycles,
"start_nodes": list(self.start_nodes),
"explicit_start_nodes": list(self.explicit_start_nodes),
"outputs": dict(self.outputs),
}
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "workflow/graph_context.py",
"license": "Apache License 2.0",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:workflow/graph_manager.py | """Graph management and construction utilities for workflow graphs."""
from typing import Dict, List, Set, Any
import copy
from entity.configs import ConfigError, SubgraphConfig
from entity.configs.edge.edge_condition import EdgeConditionConfig
from entity.configs.base import extend_path
from entity.configs.node.subgraph import SubgraphFileConfig, SubgraphInlineConfig
from workflow.cycle_manager import CycleManager
from workflow.subgraph_loader import load_subgraph_config
from workflow.topology_builder import GraphTopologyBuilder
from utils.env_loader import build_env_var_map
from utils.vars_resolver import resolve_mapping_with_vars
from workflow.graph_context import GraphContext
class GraphManager:
"""Manages graph construction, cycle detection, and execution order determination."""
def __init__(self, graph: "GraphContext") -> None:
"""Initialize GraphManager with a GraphContext instance."""
self.graph = graph
self.cycle_manager = CycleManager()
def build_graph_structure(self) -> None:
"""Build the complete graph structure including nodes, edges, and layers."""
self._instantiate_nodes()
self._initiate_edges()
self._determine_start_nodes()
self._warn_on_untriggerable_nodes()
self._build_topology_and_metadata()
def _instantiate_nodes(self) -> None:
"""Instantiate all nodes from configuration."""
self.graph.nodes.clear()
for node_def in self.graph.config.get_node_definitions():
node_id = node_def.id
if node_id in self.graph.nodes:
print(f"Duplicated node id detected: {node_id}")
continue
node_instance = copy.deepcopy(node_def)
node_instance.predecessors = []
node_instance.successors = []
node_instance._outgoing_edges = []
node_instance.vars = dict(self.graph.vars)
self.graph.nodes[node_id] = node_instance
if node_instance.node_type == "subgraph":
self._build_subgraph(node_id)
def _build_subgraph(self, node_id: str) -> None:
"""Build a subgraph for the given node ID."""
from entity.graph_config import GraphConfig
from workflow.graph_context import GraphContext
subgraph_config_data = self.graph.nodes[node_id].as_config(SubgraphConfig)
if not subgraph_config_data:
return
parent_source = self.graph.config.get_source_path()
subgraph_vars: Dict[str, Any] = {}
if subgraph_config_data.type == "config":
inline_cfg = subgraph_config_data.as_config(SubgraphInlineConfig)
if not inline_cfg:
raise ConfigError(
f"Inline subgraph configuration missing for node '{node_id}'",
subgraph_config_data.path,
)
config_payload = copy.deepcopy(inline_cfg.graph)
source_path = parent_source
elif subgraph_config_data.type == "file":
file_cfg = subgraph_config_data.as_config(SubgraphFileConfig)
if not file_cfg:
raise ConfigError(
f"File subgraph configuration missing for node '{node_id}'",
subgraph_config_data.path,
)
config_payload, subgraph_vars, source_path = load_subgraph_config(
file_cfg.file_path,
parent_source=parent_source,
)
else:
raise ConfigError(
f"Unsupported subgraph configuration on node '{node_id}'",
subgraph_config_data.path,
)
combined_vars = dict(self.graph.config.vars)
combined_vars.update(subgraph_vars)
resolve_mapping_with_vars(
config_payload,
env_lookup=build_env_var_map(),
vars_map=combined_vars,
path=f"subgraph[{node_id}]",
)
if config_payload.get("log_level", None) is None:
config_payload["log_level"] = self.graph.log_level.value
subgraph_config = GraphConfig.from_dict(
config=config_payload,
name=f"{self.graph.name}_{node_id}_subgraph",
output_root=self.graph.config.output_root,
source_path=source_path,
vars=combined_vars,
)
subgraph = GraphContext(config=subgraph_config)
subgraph_manager = GraphManager(subgraph)
subgraph_manager.build_graph_structure()
self.graph.subgraphs[node_id] = subgraph
def _initiate_edges(self) -> None:
"""Initialize edges and determine layers or cycle execution order."""
# For majority voting mode, there are no edges by design
if self.graph.is_majority_voting:
print("Majority voting mode detected - skipping edge initialization")
self.graph.edges = []
# For majority voting, all nodes are independent and can be executed in parallel
# Create a single layer with all nodes
all_node_ids = list(self.graph.nodes.keys())
self.graph.layers = [all_node_ids]
return
self.graph.edges = []
for edge_config in self.graph.config.get_edge_definitions():
src = edge_config.source
dst = edge_config.target
if src not in self.graph.nodes or dst not in self.graph.nodes:
print(f"Edge references unknown node: {src}->{dst}")
continue
condition_config = edge_config.condition
if condition_config is None:
condition_config = EdgeConditionConfig.from_dict("true", path=extend_path(edge_config.path, "condition"))
condition_value = condition_config.to_external_value()
process_config = edge_config.process
process_value = process_config.to_external_value() if process_config else None
dynamic_config = edge_config.dynamic
payload = {
"trigger": edge_config.trigger,
"condition": condition_value,
"condition_config": condition_config,
"condition_label": condition_config.display_label(),
"condition_type": condition_config.type,
"carry_data": edge_config.carry_data,
"keep_message": edge_config.keep_message,
"clear_context": edge_config.clear_context,
"clear_kept_context": edge_config.clear_kept_context,
"process_config": process_config,
"process": process_value,
"process_type": process_config.type if process_config else None,
"dynamic_config": dynamic_config,
}
self.graph.nodes[src].add_successor(self.graph.nodes[dst], payload)
self.graph.nodes[dst].add_predecessor(self.graph.nodes[src])
self.graph.edges.append({
"from": src,
"to": dst,
"trigger": edge_config.trigger,
"condition": condition_value,
"condition_type": condition_config.type,
"carry_data": edge_config.carry_data,
"keep_message": edge_config.keep_message,
"clear_context": edge_config.clear_context,
"clear_kept_context": edge_config.clear_kept_context,
"process": process_value,
"process_type": process_config.type if process_config else None,
"dynamic": dynamic_config is not None,
})
# Check for cycles and build appropriate execution structure
cycles = self._detect_cycles()
self.graph.has_cycles = len(cycles) > 0
if self.graph.has_cycles:
print(f"Detected {len(cycles)} cycle(s) in the workflow graph.")
self.graph.layers = self._build_cycle_execution_order(cycles)
else:
self.graph.layers = self._build_dag_layers()
def _detect_cycles(self) -> List[Set[str]]:
"""Detect cycles in the graph using GraphTopologyBuilder."""
return GraphTopologyBuilder.detect_cycles(self.graph.nodes)
def _build_dag_layers(self) -> List[List[str]]:
"""Build layers for DAG (Directed Acyclic Graph) using GraphTopologyBuilder."""
layers_with_items = GraphTopologyBuilder.build_dag_layers(self.graph.nodes)
# Convert format to be compatible with existing code
layers = [
[item["node_id"] for item in layer]
for layer in layers_with_items
]
print(f"layers: {layers}")
if len(set(node_id for layer in layers for node_id in layer)) != len(self.graph.nodes):
print("Detected a cycle in the workflow graph; a DAG is required.")
return layers
def _build_cycle_execution_order(self, cycles: List[Set[str]]) -> List[List[str]]:
"""Build execution order for graphs with cycles using super-node abstraction and GraphTopologyBuilder."""
# Initialize cycle manager
self.cycle_manager.initialize_cycles(cycles, self.graph.nodes)
# Use GraphTopologyBuilder to create super-node graph
super_node_graph = GraphTopologyBuilder.create_super_node_graph(
self.graph.nodes,
self.graph.edges,
cycles
)
# Use GraphTopologyBuilder for topological sorting
execution_order = GraphTopologyBuilder.topological_sort_super_nodes(
super_node_graph,
cycles
)
# Enrich execution_order with entry_nodes and exit_edges from cycle_manager
for layer in execution_order:
for item in layer:
if item["type"] == "cycle":
cycle_id = item["cycle_id"]
cycle_info = self.cycle_manager.cycles[cycle_id]
item["entry_nodes"] = list(cycle_info.entry_nodes)
item["exit_edges"] = cycle_info.exit_edges
self.graph.cycle_execution_order = execution_order
# Return a simplified layer structure for compatibility
return [["__CYCLE_AWARE__"]] # Special marker for cycle-aware execution
def _build_topology_and_metadata(self) -> None:
"""Build topology and metadata for the graph."""
self.graph.topology = [node_id for layer in self.graph.layers for node_id in layer]
self.graph.depth = len(self.graph.layers) - 1 if self.graph.layers else 0
self.graph.metadata = self._build_metadata()
def _build_metadata(self) -> Dict[str, Any]:
"""Build metadata for the graph."""
graph_def = self.graph.config.definition
catalog: Dict[str, Any] = {}
for node_id, node in self.graph.nodes.items():
catalog[node_id] = {
"type": node.node_type,
"description": node.description,
"model_name": node.model_name,
"role": node.role,
"tools": node.tools,
"memories": node.memories,
"params": node.params,
}
return {
"design_id": graph_def.id,
"node_count": len(self.graph.nodes),
"edge_count": len(self.graph.edges),
"start": list(self.graph.start_nodes),
"end": graph_def.end_nodes,
"catalog": catalog,
"topology": self.graph.topology,
"layers": self.graph.layers,
}
def _determine_start_nodes(self) -> None:
"""Determine the effective set of start nodes (explicit only)."""
definition = self.graph.config.definition
explicit_ordered = list(definition.start_nodes)
explicit_set = set(explicit_ordered)
# if explicit_ordered and not self.graph.has_cycles:
# raise ConfigError(
# "start nodes can only be specified for graphs that contain cycles",
# extend_path(definition.path, "start"),
# )
if explicit_set:
cycle_path = extend_path(definition.path, "start")
for node_id in explicit_ordered:
if node_id not in self.graph.nodes:
raise ConfigError(
f"start node '{node_id}' not defined in nodes",
cycle_path,
)
cycle_id = self.cycle_manager.node_to_cycle.get(node_id)
if cycle_id is None:
continue
cycle_info = self.cycle_manager.cycles.get(cycle_id)
if cycle_info is None:
raise ConfigError(
f"cycle data missing for start node '{node_id}'",
cycle_path,
)
if cycle_info.configured_entry_node and cycle_info.configured_entry_node != node_id:
raise ConfigError(
f"cycle '{cycle_id}' already has start node '{cycle_info.configured_entry_node}'",
cycle_path,
)
cycle_info.configured_entry_node = node_id
if not explicit_ordered:
raise ConfigError(
"Unable to determine a start node for this graph. Configure at least one Start Node via Configure Graph > Advanced Settings > Start Node > input node ID.",
extend_path(definition.path, "start"),
)
self.graph.start_nodes = explicit_ordered
self.graph.explicit_start_nodes = explicit_ordered
def _warn_on_untriggerable_nodes(self) -> None:
"""Emit warnings for nodes that cannot be triggered by any predecessor."""
start_nodes = set(self.graph.start_nodes or [])
for node_id, node in self.graph.nodes.items():
if not node.predecessors:
continue
if node_id in start_nodes:
continue
has_triggerable_edge = False
for predecessor in node.predecessors:
for edge_link in predecessor.iter_outgoing_edges():
if edge_link.target is node and edge_link.trigger:
has_triggerable_edge = True
break
if has_triggerable_edge:
break
if not has_triggerable_edge:
print(
f"Warning: node '{node_id}' has no triggerable incoming edges and will never execute."
)
def get_cycle_manager(self) -> CycleManager:
"""Get the cycle manager instance."""
return self.cycle_manager
def build_graph(self) -> None:
"""Build graph structure only (no memory/thinking initialization)."""
self.build_graph_structure()
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "workflow/graph_manager.py",
"license": "Apache License 2.0",
"lines": 297,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:workflow/hooks/workspace_artifact.py | """Hook that scans a node workspace for newly created files."""
import hashlib
import logging
import mimetypes
import os
import time
from dataclasses import dataclass
from pathlib import Path
from typing import Callable, Dict, List, Optional, Sequence, Set, Tuple
from entity.configs import Node
from entity.messages import MessageBlockType
from utils.attachments import AttachmentRecord, AttachmentStore
from utils.human_prompt import PromptChannel
@dataclass
class WorkspaceArtifact:
"""Represents a file artifact detected by the workspace hook."""
node_id: str
attachment_id: str
file_name: str
relative_path: str
absolute_path: str
mime_type: Optional[str]
size: Optional[int]
sha256: Optional[str]
data_uri: Optional[str]
created_at: float
change_type: str
extra: Dict[str, object]
@dataclass
class _FileSignature:
sha256: str
size: int
@dataclass
class _TrackedEntry:
sha256: str
attachment_id: str
absolute_path: str
mime_type: Optional[str]
size: Optional[int]
data_uri: Optional[str]
class WorkspaceArtifactHook:
"""Detects workspace file changes for selected node types."""
def __init__(
self,
*,
attachment_store: AttachmentStore,
emit_callback: Callable[[List[WorkspaceArtifact]], None],
node_types: Optional[Sequence[str]] = None,
exclude_dirs: Optional[Sequence[str]] = None,
max_files_scanned: int = 500,
max_bytes_scanned: int = 500 * 1024 * 1024,
prompt_channel: Optional[PromptChannel] = None,
) -> None:
self.attachment_store = attachment_store
self.emit_callback = emit_callback
self.node_types: Set[str] = set(node_types or {"python", "agent"})
self.exclude_dirs = set(exclude_dirs or {"attachments", "__pycache__"})
self.max_files_scanned = max_files_scanned
self.max_bytes_scanned = max_bytes_scanned
self.logger = logging.getLogger(__name__)
self._snapshots: Dict[str, Dict[str, _FileSignature]] = {}
self._last_emitted: Dict[str, _TrackedEntry] = {}
self.prompt_channel = prompt_channel
def can_handle(self, node: Node) -> bool:
return node.node_type in self.node_types
def get_prompt_channel(self) -> Optional[PromptChannel]:
return self.prompt_channel
def before_node(self, node: Node, workspace: Path) -> None:
if not self.can_handle(node):
return
snapshot, _ = self._snapshot(workspace)
self._snapshots[node.id] = snapshot
def after_node(
self,
node: Node,
workspace: Path,
*,
success: bool,
) -> None:
if not success or not self.can_handle(node):
self._snapshots.pop(node.id, None)
return
before = self._snapshots.pop(node.id, {})
after, truncated = self._snapshot(workspace)
if not after and not self._last_emitted:
return
changed_paths = [
Path(path_str)
for path_str, signature in after.items()
if path_str not in before or before[path_str].sha256 != signature.sha256
]
artifacts: List[WorkspaceArtifact] = []
for relative_path in changed_paths:
signature = after[str(relative_path)]
full_path = workspace / relative_path
if not full_path.exists() or not full_path.is_file():
continue
try:
tracked = self._last_emitted.get(str(relative_path))
change_type = "created" if tracked is None else "updated"
record = self._register_artifact(
full_path,
relative_path,
node,
attachment_id=tracked.attachment_id if tracked else None,
)
except Exception as exc:
self.logger.warning(
"Failed to register artifact %s for node %s: %s",
relative_path,
node.id,
exc,
)
continue
artifacts.append(
self._to_artifact(
record,
node,
relative_path,
full_path,
change_type=change_type,
)
)
self._last_emitted[str(relative_path)] = _TrackedEntry(
sha256=signature.sha256,
attachment_id=record.ref.attachment_id or "",
absolute_path=str(full_path),
mime_type=record.ref.mime_type,
size=record.ref.size,
data_uri=record.ref.data_uri,
)
if not truncated:
deleted_paths = [
relative_path
for relative_path in list(self._last_emitted.keys())
if relative_path not in after
]
for relative_path in deleted_paths:
tracked = self._last_emitted.pop(relative_path, None)
if not tracked:
continue
artifacts.append(
WorkspaceArtifact(
node_id=node.id,
attachment_id=tracked.attachment_id,
file_name=Path(relative_path).name,
relative_path=relative_path,
absolute_path=tracked.absolute_path,
mime_type=tracked.mime_type,
size=tracked.size,
sha256=tracked.sha256,
data_uri=tracked.data_uri,
created_at=time.time(),
change_type="deleted",
extra={
"hook": "workspace_scan",
"relative_path": relative_path,
},
)
)
if artifacts:
self.emit_callback(artifacts)
def _snapshot(self, workspace: Path) -> Tuple[Dict[str, _FileSignature], bool]:
entries: Dict[str, _FileSignature] = {}
total_bytes = 0
file_count = 0
for root, dirs, files in os.walk(workspace):
rel_root = Path(root).relative_to(workspace)
dirs[:] = [d for d in dirs if not self._is_excluded(rel_root / d)]
for filename in files:
rel_path = rel_root / filename
if self._is_excluded(rel_path):
continue
full_path = Path(root) / filename
try:
stat = full_path.stat()
sha256 = self._hash_file(full_path)
except OSError:
continue
file_count += 1
total_bytes += stat.st_size
entries[str(rel_path)] = _FileSignature(sha256=sha256, size=stat.st_size)
if file_count >= self.max_files_scanned or total_bytes >= self.max_bytes_scanned:
self.logger.warning(
"Workspace scan truncated (files=%s total_bytes=%s) for session %s",
file_count,
total_bytes,
)
return entries, True
return entries, False
def _is_excluded(self, rel_path: Path) -> bool:
if not rel_path.parts:
return False
return rel_path.parts[0] in self.exclude_dirs
def _register_artifact(
self,
full_path: Path,
relative_path: Path,
node: Node,
*,
attachment_id: Optional[str] = None,
) -> AttachmentRecord:
mime_type = mimetypes.guess_type(relative_path.name)[0] or "application/octet-stream"
return self.attachment_store.register_file(
full_path,
kind=MessageBlockType.from_mime_type(mime_type),
mime_type=mime_type,
display_name=full_path.name,
copy_file=False,
persist=True,
deduplicate=False,
attachment_id=attachment_id,
extra={
"node_id": node.id,
"relative_path": str(relative_path),
"hook": "workspace_scan",
},
)
def _to_artifact(
self,
record: AttachmentRecord,
node: Node,
relative_path: Path,
full_path: Path,
*,
change_type: str,
) -> WorkspaceArtifact:
ref = record.ref
return WorkspaceArtifact(
node_id=node.id,
attachment_id=ref.attachment_id or "",
file_name=ref.name or full_path.name,
relative_path=str(relative_path),
absolute_path=str(full_path),
mime_type=ref.mime_type,
size=ref.size,
sha256=ref.sha256,
data_uri=ref.data_uri,
created_at=time.time(),
change_type=change_type,
extra=dict(record.extra),
)
def _hash_file(self, path: Path) -> str:
hasher = hashlib.sha256()
with path.open("rb") as handle:
for chunk in iter(lambda: handle.read(1024 * 1024), b""):
hasher.update(chunk)
return hasher.hexdigest()
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "workflow/hooks/workspace_artifact.py",
"license": "Apache License 2.0",
"lines": 248,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:workflow/runtime/execution_strategy.py | """Execution strategies for different graph topologies."""
from collections import Counter
from typing import Callable, Dict, List, Sequence
from entity.configs import Node
from entity.messages import Message
from utils.log_manager import LogManager
from workflow.executor.dag_executor import DAGExecutor
from workflow.executor.cycle_executor import CycleExecutor
from workflow.executor.parallel_executor import ParallelExecutor
class DagExecutionStrategy:
"""Executes acyclic graphs using the DAGExecutor."""
def __init__(
self,
log_manager: LogManager,
nodes: Dict[str, Node],
layers: List[List[str]],
execute_node_func: Callable[[Node], None],
) -> None:
self.log_manager = log_manager
self.nodes = nodes
self.layers = layers
self.execute_node_func = execute_node_func
def run(self) -> None:
dag_executor = DAGExecutor(
log_manager=self.log_manager,
nodes=self.nodes,
layers=self.layers,
execute_node_func=self.execute_node_func,
)
dag_executor.execute()
class CycleExecutionStrategy:
"""Executes graphs containing cycles via CycleExecutor."""
def __init__(
self,
log_manager: LogManager,
nodes: Dict[str, Node],
cycle_execution_order: List[Dict[str, str]],
cycle_manager,
execute_node_func: Callable[[Node], None],
) -> None:
self.log_manager = log_manager
self.nodes = nodes
self.cycle_execution_order = cycle_execution_order
self.cycle_manager = cycle_manager
self.execute_node_func = execute_node_func
def run(self) -> None:
cycle_executor = CycleExecutor(
log_manager=self.log_manager,
nodes=self.nodes,
cycle_execution_order=self.cycle_execution_order,
cycle_manager=self.cycle_manager,
execute_node_func=self.execute_node_func,
)
cycle_executor.execute()
class MajorityVoteStrategy:
"""Executes graphs configured for majority voting (no edges)."""
def __init__(
self,
log_manager: LogManager,
nodes: Dict[str, Node],
initial_messages: Sequence[Message],
execute_node_func: Callable[[Node], None],
payload_to_text_func: Callable[[object], str],
) -> None:
self.log_manager = log_manager
self.nodes = nodes
self.initial_messages = initial_messages
self.execute_node_func = execute_node_func
self.payload_to_text = payload_to_text_func
def run(self) -> str:
self.log_manager.info("Executing graph with majority voting approach")
all_nodes = list(self.nodes.values())
if not all_nodes:
self.log_manager.error("No nodes to execute in majority voting mode")
return ""
for node in all_nodes:
node.clear_input()
for message in self.initial_messages:
node.append_input(message.clone())
node_ids = [node.id for node in all_nodes]
def _execute(node_id: str) -> None:
self.execute_node_func(self.nodes[node_id])
parallel_executor = ParallelExecutor(self.log_manager, self.nodes)
parallel_executor.execute_nodes_parallel(node_ids, _execute)
return self._collect_majority_result()
def _collect_majority_result(self) -> str:
node_outputs: List[Dict[str, str]] = []
for node_id, node in self.nodes.items():
if node.output:
output_text = self.payload_to_text(node.output[-1])
else:
output_text = ""
node_outputs.append(
{
"node_id": node_id,
"node_type": node.node_type,
"output": output_text,
}
)
output_values = [item["output"] for item in node_outputs]
output_counts = Counter(output_values)
non_empty_outputs = [value for value in output_values if value.strip()]
if non_empty_outputs:
output_counts = Counter(non_empty_outputs)
if not output_counts:
self.log_manager.warning("No outputs available for majority voting")
return ""
majority_output, count = output_counts.most_common(1)[0]
self.log_manager.info(
"Majority output determined",
details={"result": majority_output, "votes": count},
)
self.log_manager.info(
"All node outputs",
details={
"outputs": [
(
item["node_id"],
item["output"][:50] + "..." if len(item["output"]) > 50 else item["output"],
)
for item in node_outputs
]
},
)
return majority_output
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "workflow/runtime/execution_strategy.py",
"license": "Apache License 2.0",
"lines": 125,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:workflow/runtime/result_archiver.py | """Utilities for persisting execution artifacts."""
from utils.log_manager import LogManager
from utils.token_tracker import TokenTracker
from workflow.graph_context import GraphContext
class ResultArchiver:
"""Handles post-execution persistence (tokens, logs, metadata)."""
def __init__(
self,
graph: GraphContext,
log_manager: LogManager,
token_tracker: TokenTracker,
) -> None:
self.graph = graph
self.log_manager = log_manager
self.token_tracker = token_tracker
def export(self, final_result: str) -> None:
token_usage_path = self.graph.directory / f"token_usage_{self.graph.name}.json"
self.token_tracker.export_to_file(str(token_usage_path))
self.log_manager.record_workflow_end(
success=True,
details={
"token_usage": self.token_tracker.get_token_usage(),
"final_result": final_result,
},
)
log_file_path = self.graph.directory / "execution_logs.json"
self.log_manager.save_logs(str(log_file_path))
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "workflow/runtime/result_archiver.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
OpenBMB/ChatDev:workflow/runtime/runtime_builder.py | """Builder that assembles the runtime context for workflow execution."""
from dataclasses import dataclass
from typing import Any, Dict, Optional
from runtime.node.agent import ToolManager
from utils.attachments import AttachmentStore
from utils.function_manager import EDGE_FUNCTION_DIR, EDGE_PROCESSOR_FUNCTION_DIR, get_function_manager
from utils.log_manager import LogManager
from utils.logger import WorkflowLogger
from utils.token_tracker import TokenTracker
from workflow.graph_context import GraphContext
from .runtime_context import RuntimeContext
@dataclass
class RuntimeBuilder:
"""Constructs RuntimeContext instances for GraphExecutor."""
graph: GraphContext
def build(self, logger: Optional[WorkflowLogger] = None, *, session_id: Optional[str] = None) -> RuntimeContext:
tool_manager = ToolManager()
function_manager = get_function_manager(EDGE_FUNCTION_DIR)
processor_function_manager = get_function_manager(EDGE_PROCESSOR_FUNCTION_DIR)
logger = logger or WorkflowLogger(self.graph.name, self.graph.log_level)
log_manager = LogManager(logger)
token_tracker = TokenTracker(workflow_id=self.graph.name)
code_workspace = (self.graph.directory / "code_workspace").resolve()
code_workspace.mkdir(parents=True, exist_ok=True)
attachments_dir = code_workspace / "attachments"
attachments_dir.mkdir(parents=True, exist_ok=True)
attachment_store = AttachmentStore(attachments_dir)
global_state: Dict[str, Any] = {
"graph_directory": self.graph.directory,
"vars": self.graph.config.vars,
"python_workspace_root": code_workspace,
"attachment_store": attachment_store,
}
context = RuntimeContext(
tool_manager=tool_manager,
function_manager=function_manager,
edge_processor_function_manager=processor_function_manager,
logger=logger,
log_manager=log_manager,
token_tracker=token_tracker,
attachment_store=attachment_store,
code_workspace=code_workspace,
global_state=global_state,
)
context.session_id = session_id
if session_id:
context.global_state.setdefault("session_id", session_id)
return context
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "workflow/runtime/runtime_builder.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
OpenBMB/ChatDev:workflow/runtime/runtime_context.py | """Shared runtime context for workflow execution."""
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, Optional
from runtime.node.agent import ToolManager
from utils.function_manager import FunctionManager
from utils.logger import WorkflowLogger
from utils.log_manager import LogManager
from utils.token_tracker import TokenTracker
from utils.attachments import AttachmentStore
@dataclass
class RuntimeContext:
"""Container for runtime-wide dependencies required by GraphExecutor."""
tool_manager: ToolManager
function_manager: FunctionManager
edge_processor_function_manager: FunctionManager
logger: WorkflowLogger
log_manager: LogManager
token_tracker: TokenTracker
attachment_store: AttachmentStore
code_workspace: Path
global_state: Dict[str, Any] = field(default_factory=dict)
cycle_manager: Optional[Any] = None # Late-bound by GraphManager
session_id: Optional[str] = None
workspace_hook: Optional[Any] = None
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "workflow/runtime/runtime_context.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
OpenBMB/ChatDev:workflow/subgraph_loader.py | """Utilities for loading reusable subgraph YAML definitions."""
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, List, Tuple
from entity.configs import ConfigError
from utils.io_utils import read_yaml
_REPO_ROOT = Path(__file__).resolve().parents[1]
_DEFAULT_SUBGRAPH_ROOT = (_REPO_ROOT / "yaml_instance").resolve()
_SUBGRAPH_CACHE: Dict[Path, Dict[str, Any]] = {}
def _resolve_candidate_paths(file_path: str, parent_source: str | None) -> List[Path]:
path = Path(file_path)
if path.is_absolute():
return [path]
candidates: List[Path] = []
default_candidate = (_DEFAULT_SUBGRAPH_ROOT / path).resolve()
candidates.append(default_candidate)
if parent_source:
parent = Path(parent_source)
parent_dir = parent.parent if parent.is_file() else parent
candidates.append((parent_dir / path).resolve())
# As a last resort, allow relative to repo root / current working dir
candidates.append((_REPO_ROOT / path).resolve())
return candidates
def _resolve_existing_path(candidates: List[Path]) -> Path:
checked: List[str] = []
for candidate in candidates:
checked.append(str(candidate))
if candidate.exists():
return candidate
raise ConfigError(
f"subgraph YAML not found; tried: {', '.join(checked)}",
path=checked[-1] if checked else None,
)
def _load_graph_dict(path: Path) -> Dict[str, Any]:
data = read_yaml(path)
if not isinstance(data, dict):
raise ConfigError("subgraph YAML root must be a mapping", path=str(path))
graph_block = data.get("graph")
if graph_block is None:
graph_block = data
if not isinstance(graph_block, dict):
raise ConfigError("subgraph graph section must be a mapping", path=f"{path}.graph")
vars_block = data.get("vars") if isinstance(data.get("vars"), dict) else {}
return {"graph": graph_block, "vars": vars_block}
def load_subgraph_config(file_path: str, *, parent_source: str | None = None) -> Tuple[Dict[str, Any], Dict[str, Any], str]:
"""Load a subgraph definition from disk.
Returns a tuple of (graph_dict, resolved_path).
"""
candidates = _resolve_candidate_paths(file_path, parent_source)
resolved_path = _resolve_existing_path(candidates).resolve()
if resolved_path not in _SUBGRAPH_CACHE:
_SUBGRAPH_CACHE[resolved_path] = _load_graph_dict(resolved_path)
payload = _SUBGRAPH_CACHE[resolved_path]
graph_dict = deepcopy(payload["graph"])
vars_dict = dict(payload["vars"])
return graph_dict, vars_dict, str(resolved_path)
__all__ = ["load_subgraph_config"]
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "workflow/subgraph_loader.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
OpenBMB/ChatDev:workflow/topology_builder.py | """Graph topology builder utility for cycle detection and topological sorting.
This module provides stateless utilities for building execution order of graphs,
supporting both global graphs and scoped subgraphs (e.g., within cycles).
"""
from typing import Dict, List, Set, Any
from entity.configs import Node
from workflow.cycle_manager import CycleDetector
class GraphTopologyBuilder:
"""
Graph topology structure builder.
Responsibilities:
1. Detect cycles (based on CycleDetector)
2. Build super-node graphs
3. Perform topological sorting
Features:
- Stateless (pure static methods)
- Can be used for both global graphs and local subgraphs
- Does not depend on specific GraphContext instances
"""
@staticmethod
def detect_cycles(nodes: Dict[str, Node]) -> List[Set[str]]:
"""
Detect cycles in the given node set.
Args:
nodes: Dictionary of nodes to analyze
Returns:
List of cycles, where each cycle is a set of node IDs
"""
detector = CycleDetector()
return detector.detect_cycles(nodes)
@staticmethod
def create_super_node_graph(
nodes: Dict[str, Node],
edges: List[Dict[str, Any]],
cycles: List[Set[str]]
) -> Dict[str, Set[str]]:
"""
Create a super-node graph where each cycle is treated as a single node.
Args:
nodes: Node dictionary
edges: Edge configuration list (only edges to consider)
cycles: List of detected cycles
Returns:
Super-node dependency graph: {super_node_id: set(predecessor_super_node_ids)}
"""
super_nodes = {}
node_to_super = {}
# Create super-nodes for cycles
for i, cycle_nodes in enumerate(cycles):
super_node_id = f"super_cycle_{i}"
super_nodes[super_node_id] = set()
for node_id in cycle_nodes:
node_to_super[node_id] = super_node_id
# Create super-nodes for non-cycle nodes (each non-cycle node is its own super-node)
for node_id in nodes.keys():
if node_id not in node_to_super:
super_node_id = f"node_{node_id}"
super_nodes[super_node_id] = set()
node_to_super[node_id] = super_node_id
# Build dependencies between super-nodes
for edge_config in edges:
from_node = edge_config["from"]
to_node = edge_config["to"]
# Skip edges not in the node set
if from_node not in nodes or to_node not in nodes:
continue
from_super = node_to_super[from_node]
to_super = node_to_super[to_node]
# Only add dependency if between different super-nodes
if from_super != to_super:
super_nodes[to_super].add(from_super)
return super_nodes
@staticmethod
def topological_sort_super_nodes(
super_node_graph: Dict[str, Set[str]],
cycles: List[Set[str]]
) -> List[List[Dict[str, Any]]]:
"""
Perform topological sort on super-node graph to determine execution order.
Args:
super_node_graph: Super-node dependency graph
cycles: List of cycles for mapping super-nodes to cycle info
Returns:
Execution layers, where each layer contains items that can be executed in parallel.
Format: [
[{"type": "node", "node_id": "A"}, {"type": "cycle", "cycle_id": "...", "nodes": [...]}],
[...]
]
"""
# Calculate in-degrees
in_degree = {
super_node: len(predecessors)
for super_node, predecessors in super_node_graph.items()
}
# Find super-nodes with no dependencies
ready = [node for node, degree in in_degree.items() if degree == 0]
execution_layers = []
# Create cycle lookup
cycle_lookup = {}
for i, cycle_nodes in enumerate(cycles):
cycle_id = f"cycle_{i}_{cycle_nodes}"
cycle_lookup[f"super_cycle_{i}"] = {
"cycle_id": cycle_id,
"nodes": cycle_nodes
}
while ready:
current_layer = ready[:]
ready.clear()
# Convert to execution items
layer_items = []
for super_node in current_layer:
if super_node.startswith("super_cycle_"):
# Cycle super-node
cycle_data = cycle_lookup[super_node]
layer_items.append({
"type": "cycle",
"cycle_id": cycle_data["cycle_id"],
"nodes": list(cycle_data["nodes"])
})
elif super_node.startswith("node_"):
# Regular node
node_id = super_node.replace("node_", "")
layer_items.append({
"type": "node",
"node_id": node_id
})
# Update dependencies
for dependent in super_node_graph:
if super_node in super_node_graph[dependent]:
super_node_graph[dependent].remove(super_node)
in_degree[dependent] -= 1
if in_degree[dependent] == 0:
ready.append(dependent)
if layer_items:
execution_layers.append(layer_items)
return execution_layers
@staticmethod
def build_execution_order(
nodes: Dict[str, Node],
edges: List[Dict[str, Any]]
) -> List[List[Dict[str, Any]]]:
"""
One-stop method to build execution order.
Combines cycle detection, super-node construction, and topological sorting.
Args:
nodes: Node dictionary
edges: Edge configuration list
Returns:
Execution layers
"""
cycles = GraphTopologyBuilder.detect_cycles(nodes)
if not cycles:
# No cycles, return DAG layers directly
return GraphTopologyBuilder.build_dag_layers(nodes)
super_graph = GraphTopologyBuilder.create_super_node_graph(
nodes, edges, cycles
)
return GraphTopologyBuilder.topological_sort_super_nodes(
super_graph, cycles
)
@staticmethod
def build_dag_layers(nodes: Dict[str, Node]) -> List[List[Dict[str, Any]]]:
"""
Build topological layers for DAG (Directed Acyclic Graph).
Args:
nodes: Node dictionary
Returns:
Layers in execution item format
"""
in_degree = {
node_id: len(node.predecessors)
for node_id, node in nodes.items()
}
frontier = [
node_id for node_id, deg in in_degree.items() if deg == 0
]
layers = []
while frontier:
# Convert to execution item format
layer_items = [
{"type": "node", "node_id": node_id}
for node_id in frontier
]
layers.append(layer_items)
next_frontier = []
for node_id in frontier:
for successor in nodes[node_id].successors:
in_degree[successor.id] -= 1
if in_degree[successor.id] == 0:
next_frontier.append(successor.id)
frontier = next_frontier
return layers
| {
"repo_id": "OpenBMB/ChatDev",
"file_path": "workflow/topology_builder.py",
"license": "Apache License 2.0",
"lines": 192,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
PaddlePaddle/PaddleOCR:deploy/paddleocr_vl_docker/hps/gateway/app.py | #!/usr/bin/env python
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import json
import logging
import os
import urllib.request
from contextlib import asynccontextmanager
from typing import Optional
import fastapi
from fastapi import Request
from fastapi.exceptions import RequestValidationError
from fastapi.responses import JSONResponse
from paddlex.inference.serving.infra.models import AIStudioNoResultResponse
from paddlex.inference.serving.infra.utils import generate_log_id
from paddlex_hps_client import triton_request_async
from tritonclient.grpc import aio as triton_grpc_aio
TRITON_URL = os.getenv("HPS_TRITON_URL", "paddleocr-vl-tritonserver:8001")
MAX_CONCURRENT_INFERENCE_REQUESTS = int(
os.getenv("HPS_MAX_CONCURRENT_INFERENCE_REQUESTS", "16")
)
MAX_CONCURRENT_NON_INFERENCE_REQUESTS = int(
os.getenv("HPS_MAX_CONCURRENT_NON_INFERENCE_REQUESTS", "64")
)
INFERENCE_TIMEOUT = int(os.getenv("HPS_INFERENCE_TIMEOUT", "600"))
LOG_LEVEL = os.getenv("HPS_LOG_LEVEL", "INFO")
HEALTH_CHECK_TIMEOUT = int(os.getenv("HPS_HEALTH_CHECK_TIMEOUT", "5"))
FILTER_HEALTH_ACCESS_LOG = os.getenv(
"HPS_FILTER_HEALTH_ACCESS_LOG", "true"
).lower() in (
"true",
"1",
"yes",
)
VLM_URL = os.getenv("HPS_VLM_URL", "http://paddleocr-vlm-server:8080")
TRITON_MODEL_LAYOUT_PARSING = "layout-parsing"
TRITON_MODEL_RESTRUCTURE_PAGES = "restructure-pages"
TRITON_MODELS = (TRITON_MODEL_LAYOUT_PARSING, TRITON_MODEL_RESTRUCTURE_PAGES)
logger = logging.getLogger(__name__)
def _configure_logger(logger: logging.Logger) -> None:
level = getattr(logging, LOG_LEVEL.upper(), logging.INFO)
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(funcName)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
_configure_logger(logger)
def _create_aistudio_output_without_result(
error_code: int, error_msg: str, *, log_id: Optional[str] = None
) -> dict:
"""Create a standardized error response in AIStudio format."""
resp = AIStudioNoResultResponse(
logId=log_id if log_id is not None else generate_log_id(),
errorCode=error_code,
errorMsg=error_msg,
)
return resp.model_dump()
@asynccontextmanager
async def _lifespan(app: fastapi.FastAPI):
"""
Manage application lifecycle:
- Initialize Triton client and semaphores on startup
- Clean up resources on shutdown
"""
logger.info("Initializing gateway...")
logger.info("Triton URL: %s", TRITON_URL)
logger.info(
"Max concurrent inference requests: %d", MAX_CONCURRENT_INFERENCE_REQUESTS
)
logger.info(
"Max concurrent non-inference requests: %d",
MAX_CONCURRENT_NON_INFERENCE_REQUESTS,
)
logger.info("Inference timeout: %ds", INFERENCE_TIMEOUT)
# Initialize async Triton client
app.state.triton_client = triton_grpc_aio.InferenceServerClient(
url=TRITON_URL,
keepalive_options=triton_grpc_aio.KeepAliveOptions(
keepalive_timeout_ms=INFERENCE_TIMEOUT * 1000,
),
)
# Separate semaphores for inference and non-inference operations
app.state.inference_semaphore = asyncio.Semaphore(MAX_CONCURRENT_INFERENCE_REQUESTS)
app.state.non_inference_semaphore = asyncio.Semaphore(
MAX_CONCURRENT_NON_INFERENCE_REQUESTS
)
logger.info("Gateway initialized successfully")
yield
# Cleanup
logger.info("Shutting down gateway...")
await app.state.triton_client.close()
logger.info("Gateway shutdown complete")
app = fastapi.FastAPI(
title="PaddleOCR-VL HPS Gateway",
description="High Performance Server Gateway for PaddleOCR-VL",
version="1.0.0",
lifespan=_lifespan,
)
@app.get("/health", operation_id="checkHealth")
async def health():
"""Liveness check - returns healthy if the gateway process is running."""
return _create_aistudio_output_without_result(0, "Healthy")
async def _check_vlm_ready() -> bool:
"""Check if the VLM server is ready by querying its health endpoint."""
def _do_check():
req = urllib.request.Request(f"{VLM_URL}/health")
try:
with urllib.request.urlopen(req, timeout=HEALTH_CHECK_TIMEOUT) as resp:
return resp.status == 200
except Exception:
return False
return await asyncio.to_thread(_do_check)
@app.get("/health/ready", operation_id="checkReady")
async def ready(request: Request):
"""Readiness check - verifies Triton server, models, and VLM server."""
try:
client = request.app.state.triton_client
# Check Triton server readiness with timeout
is_server_ready = await asyncio.wait_for(
client.is_server_ready(),
timeout=HEALTH_CHECK_TIMEOUT,
)
if not is_server_ready:
return JSONResponse(
status_code=503,
content=_create_aistudio_output_without_result(
503, "Triton server not ready"
),
)
# Check if required models are ready
for model_name in TRITON_MODELS:
is_model_ready = await asyncio.wait_for(
client.is_model_ready(model_name),
timeout=HEALTH_CHECK_TIMEOUT,
)
if not is_model_ready:
return JSONResponse(
status_code=503,
content=_create_aistudio_output_without_result(
503, f"Model '{model_name}' not ready"
),
)
# Check VLM server readiness
vlm_ready = await _check_vlm_ready()
if not vlm_ready:
return JSONResponse(
status_code=503,
content=_create_aistudio_output_without_result(
503, "VLM server not ready"
),
)
return _create_aistudio_output_without_result(0, "Ready")
except asyncio.TimeoutError:
logger.error("Health check timed out after %ds", HEALTH_CHECK_TIMEOUT)
return JSONResponse(
status_code=503,
content=_create_aistudio_output_without_result(
503, "Health check timed out"
),
)
except Exception as e:
logger.error("Health check failed: %s", e)
return JSONResponse(
status_code=503,
content=_create_aistudio_output_without_result(
503, f"Service unavailable: {e}"
),
)
async def _process_triton_request(
request: Request,
body: dict,
model_name: str,
semaphore: asyncio.Semaphore,
) -> JSONResponse:
"""Process a request through Triton inference server."""
request_log_id = body.get("logId", generate_log_id())
logger.info(
"Processing %r request %s",
model_name,
request_log_id,
)
if "logId" in body:
logger.debug(
"Using external logId for %r request: %s",
model_name,
request_log_id,
)
body["logId"] = request_log_id
client = request.app.state.triton_client
try:
async with semaphore:
output = await triton_request_async(
client,
model_name,
body,
timeout=INFERENCE_TIMEOUT,
)
except asyncio.TimeoutError:
logger.warning(
"Timeout processing %r request %s",
model_name,
request_log_id,
)
return JSONResponse(
status_code=504,
content=_create_aistudio_output_without_result(
504, "Gateway timeout", log_id=request_log_id
),
)
except triton_grpc_aio.InferenceServerException as e:
if "Deadline Exceeded" in str(e):
logger.warning(
"Triton timeout for %r request %s",
model_name,
request_log_id,
)
return JSONResponse(
status_code=504,
content=_create_aistudio_output_without_result(
504, "Gateway timeout", log_id=request_log_id
),
)
logger.error(
"Triton error for %r request %s: %s",
model_name,
request_log_id,
e,
)
return JSONResponse(
status_code=500,
content=_create_aistudio_output_without_result(
500, "Internal server error", log_id=request_log_id
),
)
except Exception:
logger.exception(
"Unexpected error for %r request %s",
model_name,
request_log_id,
)
return JSONResponse(
status_code=500,
content=_create_aistudio_output_without_result(
500, "Internal server error", log_id=request_log_id
),
)
if output.get("errorCode", 0) != 0:
error_code = output.get("errorCode", 500)
error_msg = output.get("errorMsg", "Unknown error")
logger.warning(
"Triton returned error for %r request %s: %s",
model_name,
request_log_id,
error_msg,
)
return JSONResponse(
status_code=error_code,
content=_create_aistudio_output_without_result(
error_code, error_msg, log_id=request_log_id
),
)
logger.info(
"Completed %r request %s",
model_name,
request_log_id,
)
return JSONResponse(status_code=200, content=output)
@app.post(
"/layout-parsing",
operation_id="infer",
summary=f"Invoke {TRITON_MODEL_LAYOUT_PARSING} model",
response_class=JSONResponse,
)
async def _handle_infer(request: Request, body: dict):
"""Handle layout-parsing inference request."""
return await _process_triton_request(
request,
body,
TRITON_MODEL_LAYOUT_PARSING,
request.app.state.inference_semaphore,
)
@app.post(
"/restructure-pages",
operation_id="restructurePages",
summary=f"Invoke {TRITON_MODEL_RESTRUCTURE_PAGES} model",
response_class=JSONResponse,
)
async def _handle_restructure_pages(request: Request, body: dict):
"""Handle restructure-pages request (non-inference)."""
return await _process_triton_request(
request,
body,
TRITON_MODEL_RESTRUCTURE_PAGES,
request.app.state.non_inference_semaphore,
)
@app.exception_handler(json.JSONDecodeError)
async def _json_decode_exception_handler(request: Request, exc: json.JSONDecodeError):
"""Handle invalid JSON in request body."""
logger.warning("Invalid JSON for %s: %s", request.url.path, exc.msg)
return JSONResponse(
status_code=400,
content=_create_aistudio_output_without_result(400, f"Invalid JSON: {exc.msg}"),
)
@app.exception_handler(RequestValidationError)
async def _validation_exception_handler(request: Request, exc: RequestValidationError):
"""Handle request validation errors."""
error_details = exc.errors()
# Format error messages for readability
error_messages = []
for error in error_details:
loc = ".".join(str(x) for x in error.get("loc", []))
msg = error.get("msg", "Unknown error")
error_messages.append(f"{loc}: {msg}" if loc else msg)
error_msg = "; ".join(error_messages)
logger.warning("Validation error for %s: %s", request.url.path, error_msg)
return JSONResponse(
status_code=422,
content=_create_aistudio_output_without_result(422, error_msg),
)
@app.exception_handler(asyncio.TimeoutError)
async def _timeout_exception_handler(request: Request, exc: asyncio.TimeoutError):
"""Handle timeout errors."""
logger.warning("Request timed out: %s", request.url.path)
return JSONResponse(
status_code=504,
content=_create_aistudio_output_without_result(504, "Gateway timeout"),
)
@app.exception_handler(Exception)
async def _general_exception_handler(request: Request, exc: Exception):
"""Handle unexpected errors."""
logger.exception("Unhandled exception for %s", request.url.path)
return JSONResponse(
status_code=500,
content=_create_aistudio_output_without_result(500, "Internal server error"),
)
class _HealthEndpointFilter(logging.Filter):
"""Filter out health check endpoints from access logs."""
def filter(self, record: logging.LogRecord) -> bool:
message = record.getMessage()
return "/health" not in message
# Apply filter to reduce log noise from health checks
if FILTER_HEALTH_ACCESS_LOG:
logging.getLogger("uvicorn.access").addFilter(_HealthEndpointFilter())
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "deploy/paddleocr_vl_docker/hps/gateway/app.py",
"license": "Apache License 2.0",
"lines": 358,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:langchain-paddleocr/scripts/check_imports.py | import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception:
has_failure = True
print(file) # noqa: T201
traceback.print_exc()
print() # noqa: T201
sys.exit(1 if has_failure else 0)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "langchain-paddleocr/scripts/check_imports.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
PaddlePaddle/PaddleOCR:langchain-paddleocr/tests/integration_tests/document_loaders/test_paddleocr_vl_loader.py | from __future__ import annotations
import os
from pathlib import Path
import pytest
from pydantic import SecretStr
from langchain_paddleocr import PaddleOCRVLLoader
def test_paddleocr_vl_loader_live_integration() -> None:
"""Live integration test against a real PaddleOCR-VL endpoint.
This test requires the following environment variables to be set:
- ``PADDLEOCR_VL_API_URL``: The PaddleOCR-VL HTTP endpoint.
- ``PADDLEOCR_ACCESS_TOKEN``: Access token for the endpoint.
"""
api_url = os.getenv("PADDLEOCR_VL_API_URL")
access_token = os.getenv("PADDLEOCR_ACCESS_TOKEN")
if not api_url or not access_token:
pytest.skip(
"PADDLEOCR_VL_API_URL and PADDLEOCR_ACCESS_TOKEN must be"
" set for integration tests."
)
tests_dir = Path(__file__).resolve().parents[2]
sample_paths = [
str(tests_dir / "data" / "sample_pdf.pdf"),
str(tests_dir / "data" / "sample_img.jpg"),
]
loader = PaddleOCRVLLoader(
file_path=sample_paths,
api_url=api_url,
access_token=SecretStr(access_token),
)
docs = list(loader.lazy_load())
assert len(docs) == 2
for doc, input_path in zip(docs, sample_paths):
assert isinstance(doc.page_content, str)
assert doc.metadata.get("source") == input_path
assert "paddleocr_vl_raw_response" in doc.metadata
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "langchain-paddleocr/tests/integration_tests/document_loaders/test_paddleocr_vl_loader.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PaddlePaddle/PaddleOCR:langchain-paddleocr/tests/unit_tests/document_loaders/test_paddleocr_vl_loader.py | from __future__ import annotations
from typing import Any
import pytest
from langchain_paddleocr import PaddleOCRVLLoader
def test_snake_to_camel_conversion_and_additional_params(
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Ensure that snake_case parameters and additional_params are converted to
camelCase."""
captured_service_params: dict[str, Any] = {}
# Patch the loader to expose internal _service_params for inspection.
original_init = PaddleOCRVLLoader.__init__
def _wrapped_init(self: PaddleOCRVLLoader, *args: Any, **kwargs: Any) -> None:
original_init(self, *args, **kwargs)
nonlocal captured_service_params
captured_service_params = getattr(self, "_service_params")
monkeypatch.setattr(PaddleOCRVLLoader, "__init__", _wrapped_init)
_ = PaddleOCRVLLoader(
file_path="dummy.pdf",
api_url="http://example.com",
use_doc_orientation_classify=True,
layout_unclip_ratio=(0.1, 0.9),
prompt_label="ocr",
additional_params={"customOption": 1, "anotherFlag": True},
)
# Keys from constructor
assert captured_service_params["useDocOrientationClassify"] is True
assert captured_service_params["layoutUnclipRatio"] == (0.1, 0.9)
assert captured_service_params["promptLabel"] == "ocr"
# Keys from additional_params
assert captured_service_params["customOption"] == 1
assert captured_service_params["anotherFlag"] is True
def test_file_type_normalization_and_inference(
tmp_path_factory: pytest.TempdirFactory,
) -> None:
"""Ensure file_type normalization and type inference from file extension
work as expected."""
pdf_file = tmp_path_factory.mktemp("pdf") / "sample.pdf"
pdf_file.parent.mkdir(parents=True, exist_ok=True)
pdf_file.write_bytes(b"%PDF-1.4")
image_file = tmp_path_factory.mktemp("img") / "sample.png"
image_file.parent.mkdir(parents=True, exist_ok=True)
image_file.write_bytes(b"\x89PNG\r\n\x1a\n")
loader_pdf_hint = PaddleOCRVLLoader(
file_path=str(pdf_file),
api_url="http://example.com",
file_type="pdf",
)
assert loader_pdf_hint.file_type == 0
loader_image_hint = PaddleOCRVLLoader(
file_path=str(image_file),
api_url="http://example.com",
file_type="image",
)
assert loader_image_hint.file_type == 1
def test_lazy_load_raises_for_unreadable_file() -> None:
"""Ensure lazy_load raises when a file cannot be read."""
loader = PaddleOCRVLLoader(
file_path="nonexistent-file.pdf",
api_url="http://example.com",
)
with pytest.raises(ValueError):
list(loader.lazy_load())
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "langchain-paddleocr/tests/unit_tests/document_loaders/test_paddleocr_vl_loader.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PaddlePaddle/PaddleOCR:tests/test_rec_postprocess.py | import os
import sys
import numpy as np
import pytest
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(current_dir, "..")))
from ppocr.postprocess.rec_postprocess import BaseRecLabelDecode
class TestBaseRecLabelDecode:
"""Tests for BaseRecLabelDecode.get_word_info() method."""
@pytest.fixture
def decoder(self):
"""Create a BaseRecLabelDecode instance for testing."""
return BaseRecLabelDecode()
def test_get_word_info_with_german_accented_chars(self, decoder):
"""Test that German words with accented characters are not split."""
text = "Grüßen"
selection = np.ones(len(text), dtype=bool)
word_list, _, state_list = decoder.get_word_info(text, selection)
assert len(word_list) == 1, "German word should not be split"
assert "".join(word_list[0]) == "Grüßen"
assert state_list[0] == "en&num"
def test_get_word_info_with_longer_german_word(self, decoder):
"""Test longer German words with umlauts remain intact."""
text = "ungewöhnlichen"
selection = np.ones(len(text), dtype=bool)
word_list, _, state_list = decoder.get_word_info(text, selection)
assert len(word_list) == 1, "German word should not be split"
assert "".join(word_list[0]) == "ungewöhnlichen"
assert state_list[0] == "en&num"
def test_get_word_info_with_french_accented_chars(self, decoder):
"""Test French words with accented characters."""
text = "café"
selection = np.ones(len(text), dtype=bool)
word_list, _, state_list = decoder.get_word_info(text, selection)
assert len(word_list) == 1, "French word should not be split"
assert "".join(word_list[0]) == "café"
def test_get_word_info_underscore_as_splitter(self, decoder):
"""Test that underscores are treated as word splitters."""
text = "hello_world"
selection = np.ones(len(text), dtype=bool)
word_list, _, state_list = decoder.get_word_info(text, selection)
assert len(word_list) == 2, "Underscore should split words"
assert "".join(word_list[0]) == "hello"
assert "".join(word_list[1]) == "world"
def test_get_word_info_with_mixed_content(self, decoder):
"""Test mixed content with spaces and accented characters."""
text = "Grüßen Sie"
selection = np.ones(len(text), dtype=bool)
word_list, _, state_list = decoder.get_word_info(text, selection)
assert len(word_list) == 2, "Should have two words separated by space"
assert "".join(word_list[0]) == "Grüßen"
assert "".join(word_list[1]) == "Sie"
def test_get_word_info_with_french_apostrophe(self, decoder):
"""Test French words with apostrophes like n'êtes."""
text = "n'êtes"
selection = np.ones(len(text), dtype=bool)
word_list, _, state_list = decoder.get_word_info(text, selection)
# Apostrophe should keep words connected in French context
assert len(word_list) == 1, "French apostrophe should connect words"
assert "".join(word_list[0]) == "n'êtes"
def test_get_word_info_with_ascii_only(self, decoder):
"""Test backward compatibility with ASCII-only text."""
text = "hello world"
selection = np.ones(len(text), dtype=bool)
word_list, _, state_list = decoder.get_word_info(text, selection)
assert len(word_list) == 2
assert "".join(word_list[0]) == "hello"
assert "".join(word_list[1]) == "world"
def test_get_word_info_with_numbers(self, decoder):
"""Test that numbers are properly handled."""
text = "VGG-16"
selection = np.ones(len(text), dtype=bool)
word_list, _, state_list = decoder.get_word_info(text, selection)
assert len(word_list) == 1, "Hyphenated word-number should stay together"
assert "".join(word_list[0]) == "VGG-16"
def test_get_word_info_with_floating_point(self, decoder):
"""Test floating point numbers stay together."""
text = "price 3.14"
selection = np.ones(len(text), dtype=bool)
word_list, _, state_list = decoder.get_word_info(text, selection)
assert len(word_list) == 2
assert "".join(word_list[0]) == "price"
assert "".join(word_list[1]) == "3.14"
def test_get_word_info_with_chinese(self, decoder):
"""Test Chinese characters are properly grouped."""
text = "你好啊"
selection = np.ones(len(text), dtype=bool)
word_list, _, state_list = decoder.get_word_info(text, selection)
assert len(word_list) == 1
assert "".join(word_list[0]) == "你好啊"
assert state_list[0] == "cn"
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "tests/test_rec_postprocess.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PaddlePaddle/PaddleOCR:paddleocr/_pipelines/paddleocr_vl.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .._utils.cli import (
add_simple_inference_args,
get_subcommand_args,
perform_simple_inference,
str2bool,
)
from .base import PaddleXPipelineWrapper, PipelineCLISubcommandExecutor
from .utils import create_config_from_structure
_AVAILABLE_PIPELINE_VERSIONS = ["v1", "v1.5"]
_DEFAULT_PIPELINE_VERSION = "v1.5"
_SUPPORTED_VL_BACKENDS = [
"native",
"vllm-server",
"sglang-server",
"fastdeploy-server",
"mlx-vlm-server",
"llama-cpp-server",
]
class PaddleOCRVL(PaddleXPipelineWrapper):
def __init__(
self,
pipeline_version=_DEFAULT_PIPELINE_VERSION,
layout_detection_model_name=None,
layout_detection_model_dir=None,
layout_threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
vl_rec_model_name=None,
vl_rec_model_dir=None,
vl_rec_backend=None,
vl_rec_server_url=None,
vl_rec_max_concurrency=None,
vl_rec_api_model_name=None,
vl_rec_api_key=None,
doc_orientation_classify_model_name=None,
doc_orientation_classify_model_dir=None,
doc_unwarping_model_name=None,
doc_unwarping_model_dir=None,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_layout_detection=None,
use_chart_recognition=None,
use_seal_recognition=None,
use_ocr_for_image_block=None,
format_block_content=None,
merge_layout_blocks=None,
markdown_ignore_labels=None,
use_queues=None,
**kwargs,
):
if pipeline_version not in _AVAILABLE_PIPELINE_VERSIONS:
raise ValueError(
f"Invalid pipeline version: {pipeline_version}. Supported versions are {_AVAILABLE_PIPELINE_VERSIONS}."
)
if vl_rec_backend is not None and vl_rec_backend not in _SUPPORTED_VL_BACKENDS:
raise ValueError(
f"Invalid backend for the VL recognition module: {vl_rec_backend}. Supported values are {_SUPPORTED_VL_BACKENDS}."
)
params = locals().copy()
params.pop("self")
params.pop("pipeline_version")
params.pop("kwargs")
self._params = params
self.pipeline_version = pipeline_version
super().__init__(**kwargs)
@property
def _paddlex_pipeline_name(self):
if self.pipeline_version == "v1":
return "PaddleOCR-VL"
elif self.pipeline_version == "v1.5":
return "PaddleOCR-VL-1.5"
else:
raise AssertionError(f"Unknown pipeline version: {self.pipeline_version}")
def predict_iter(
self,
input,
*,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_layout_detection=None,
use_chart_recognition=None,
use_seal_recognition=None,
use_ocr_for_image_block=None,
layout_threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
layout_shape_mode="auto",
use_queues=None,
prompt_label=None,
format_block_content=None,
repetition_penalty=None,
temperature=None,
top_p=None,
min_pixels=None,
max_pixels=None,
max_new_tokens=None,
merge_layout_blocks=None,
markdown_ignore_labels=None,
vlm_extra_args=None,
**kwargs,
):
return self.paddlex_pipeline.predict(
input,
use_doc_orientation_classify=use_doc_orientation_classify,
use_doc_unwarping=use_doc_unwarping,
use_layout_detection=use_layout_detection,
use_chart_recognition=use_chart_recognition,
use_seal_recognition=use_seal_recognition,
use_ocr_for_image_block=use_ocr_for_image_block,
layout_threshold=layout_threshold,
layout_nms=layout_nms,
layout_unclip_ratio=layout_unclip_ratio,
layout_merge_bboxes_mode=layout_merge_bboxes_mode,
layout_shape_mode=layout_shape_mode,
use_queues=use_queues,
prompt_label=prompt_label,
format_block_content=format_block_content,
repetition_penalty=repetition_penalty,
temperature=temperature,
top_p=top_p,
min_pixels=min_pixels,
max_pixels=max_pixels,
max_new_tokens=max_new_tokens,
merge_layout_blocks=merge_layout_blocks,
markdown_ignore_labels=markdown_ignore_labels,
vlm_extra_args=vlm_extra_args,
**kwargs,
)
def predict(
self,
input,
*,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_layout_detection=None,
use_chart_recognition=None,
use_seal_recognition=None,
use_ocr_for_image_block=None,
layout_threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
layout_shape_mode="auto",
use_queues=None,
prompt_label=None,
format_block_content=None,
repetition_penalty=None,
temperature=None,
top_p=None,
min_pixels=None,
max_pixels=None,
max_new_tokens=None,
merge_layout_blocks=None,
markdown_ignore_labels=None,
vlm_extra_args=None,
**kwargs,
):
return list(
self.predict_iter(
input,
use_doc_orientation_classify=use_doc_orientation_classify,
use_doc_unwarping=use_doc_unwarping,
use_layout_detection=use_layout_detection,
use_chart_recognition=use_chart_recognition,
use_seal_recognition=use_seal_recognition,
use_ocr_for_image_block=use_ocr_for_image_block,
layout_threshold=layout_threshold,
layout_nms=layout_nms,
layout_unclip_ratio=layout_unclip_ratio,
layout_merge_bboxes_mode=layout_merge_bboxes_mode,
layout_shape_mode=layout_shape_mode,
use_queues=use_queues,
prompt_label=prompt_label,
format_block_content=format_block_content,
repetition_penalty=repetition_penalty,
temperature=temperature,
top_p=top_p,
min_pixels=min_pixels,
max_pixels=max_pixels,
max_new_tokens=max_new_tokens,
merge_layout_blocks=merge_layout_blocks,
markdown_ignore_labels=markdown_ignore_labels,
vlm_extra_args=vlm_extra_args,
**kwargs,
)
)
def concatenate_markdown_pages(self, markdown_list):
return self.paddlex_pipeline.concatenate_markdown_pages(markdown_list)
def restructure_pages(
self, res_list, merge_tables=True, relevel_titles=True, concatenate_pages=False
):
return list(
self.paddlex_pipeline.restructure_pages(
res_list,
merge_tables=merge_tables,
relevel_titles=relevel_titles,
concatenate_pages=concatenate_pages,
)
)
@classmethod
def get_cli_subcommand_executor(cls):
return PaddleOCRVLCLISubcommandExecutor()
def _get_paddlex_config_overrides(self):
STRUCTURE = {
"SubPipelines.DocPreprocessor.use_doc_orientation_classify": self._params[
"use_doc_orientation_classify"
],
"SubPipelines.DocPreprocessor.use_doc_unwarping": self._params[
"use_doc_unwarping"
],
"use_doc_preprocessor": self._params["use_doc_orientation_classify"]
or self._params["use_doc_unwarping"],
"use_layout_detection": self._params["use_layout_detection"],
"use_chart_recognition": self._params["use_chart_recognition"],
"format_block_content": self._params["format_block_content"],
"merge_layout_blocks": self._params["merge_layout_blocks"],
"markdown_ignore_labels": self._params["markdown_ignore_labels"],
"use_queues": self._params["use_queues"],
"SubModules.LayoutDetection.model_name": self._params[
"layout_detection_model_name"
],
"SubModules.LayoutDetection.model_dir": self._params[
"layout_detection_model_dir"
],
"SubModules.LayoutDetection.threshold": self._params["layout_threshold"],
"SubModules.LayoutDetection.layout_nms": self._params["layout_nms"],
"SubModules.LayoutDetection.layout_unclip_ratio": self._params[
"layout_unclip_ratio"
],
"SubModules.LayoutDetection.layout_merge_bboxes_mode": self._params[
"layout_merge_bboxes_mode"
],
"SubModules.VLRecognition.model_name": self._params["vl_rec_model_name"],
"SubModules.VLRecognition.model_dir": self._params["vl_rec_model_dir"],
"SubModules.VLRecognition.genai_config.backend": self._params[
"vl_rec_backend"
],
"SubModules.VLRecognition.genai_config.server_url": self._params[
"vl_rec_server_url"
],
"SubModules.VLRecognition.genai_config.max_concurrency": self._params[
"vl_rec_max_concurrency"
],
"SubModules.VLRecognition.genai_config.client_kwargs.model_name": self._params[
"vl_rec_api_model_name"
],
"SubModules.VLRecognition.genai_config.client_kwargs.api_key": self._params[
"vl_rec_api_key"
],
"SubPipelines.DocPreprocessor.SubModules.DocOrientationClassify.model_name": self._params[
"doc_orientation_classify_model_name"
],
"SubPipelines.DocPreprocessor.SubModules.DocOrientationClassify.model_dir": self._params[
"doc_orientation_classify_model_dir"
],
"SubPipelines.DocPreprocessor.SubModules.DocUnwarping.model_name": self._params[
"doc_unwarping_model_name"
],
"SubPipelines.DocPreprocessor.SubModules.DocUnwarping.model_dir": self._params[
"doc_unwarping_model_dir"
],
"use_seal_recognition": self._params["use_seal_recognition"],
"use_ocr_for_image_block": self._params["use_ocr_for_image_block"],
}
return create_config_from_structure(STRUCTURE)
class PaddleOCRVLCLISubcommandExecutor(PipelineCLISubcommandExecutor):
@property
def subparser_name(self):
return "doc_parser"
def _update_subparser(self, subparser):
add_simple_inference_args(subparser)
subparser.add_argument(
"--pipeline_version",
type=str,
default=_DEFAULT_PIPELINE_VERSION,
choices=_AVAILABLE_PIPELINE_VERSIONS,
)
subparser.add_argument(
"--layout_detection_model_name",
type=str,
help="Name of the layout detection model.",
)
subparser.add_argument(
"--layout_detection_model_dir",
type=str,
help="Path to the layout detection model directory.",
)
subparser.add_argument(
"--layout_threshold",
type=float,
help="Score threshold for the layout detection model.",
)
subparser.add_argument(
"--layout_nms",
type=str2bool,
help="Whether to use NMS in layout detection.",
)
subparser.add_argument(
"--layout_unclip_ratio",
type=float,
help="Expansion coefficient for layout detection.",
)
subparser.add_argument(
"--layout_merge_bboxes_mode",
type=str,
help="Overlapping box filtering method.",
)
subparser.add_argument(
"--vl_rec_model_name",
type=str,
help="Name of the VL recognition model.",
)
subparser.add_argument(
"--vl_rec_model_dir",
type=str,
help="Path to the VL recognition model directory.",
)
subparser.add_argument(
"--vl_rec_backend",
type=str,
help="Backend used by the VL recognition module.",
choices=_SUPPORTED_VL_BACKENDS,
)
subparser.add_argument(
"--vl_rec_server_url",
type=str,
help="Server URL used by the VL recognition module.",
)
subparser.add_argument(
"--vl_rec_max_concurrency",
type=int,
help="Maximum concurrency for making VLM requests.",
)
subparser.add_argument(
"--vl_rec_api_model_name",
type=str,
help="Model name for the VLM server.",
)
subparser.add_argument(
"--vl_rec_api_key",
type=str,
help="API key for the VLM server.",
)
subparser.add_argument(
"--doc_orientation_classify_model_name",
type=str,
help="Name of the document image orientation classification model.",
)
subparser.add_argument(
"--doc_orientation_classify_model_dir",
type=str,
help="Path to the document image orientation classification model directory.",
)
subparser.add_argument(
"--doc_unwarping_model_name",
type=str,
help="Name of the text image unwarping model.",
)
subparser.add_argument(
"--doc_unwarping_model_dir",
type=str,
help="Path to the image unwarping model directory.",
)
subparser.add_argument(
"--use_doc_orientation_classify",
type=str2bool,
help="Whether to use document image orientation classification.",
)
subparser.add_argument(
"--use_doc_unwarping",
type=str2bool,
help="Whether to use text image unwarping.",
)
subparser.add_argument(
"--use_layout_detection",
type=str2bool,
help="Whether to use layout detection.",
)
subparser.add_argument(
"--use_chart_recognition",
type=str2bool,
help="Whether to use chart recognition.",
)
subparser.add_argument(
"--use_seal_recognition",
type=str2bool,
help="Whether to use seal recognition.",
)
subparser.add_argument(
"--use_ocr_for_image_block",
type=str2bool,
help="Whether to use OCR for image blocks.",
)
subparser.add_argument(
"--format_block_content",
type=str2bool,
help="Whether to format block content to Markdown.",
)
subparser.add_argument(
"--merge_layout_blocks",
type=str2bool,
help="Whether to merge layout blocks.",
)
subparser.add_argument(
"--markdown_ignore_labels",
type=str,
nargs="+",
help="List of layout labels to ignore in Markdown output.",
)
subparser.add_argument(
"--use_queues",
type=str2bool,
help="Whether to use queues for asynchronous processing.",
)
subparser.add_argument(
"--layout_shape_mode",
type=str,
default="auto",
help="Mode for layout shape.",
)
subparser.add_argument(
"--prompt_label",
type=str,
help="Prompt label for the VLM.",
)
subparser.add_argument(
"--repetition_penalty",
type=float,
help="Repetition penalty used in sampling for the VLM.",
)
subparser.add_argument(
"--temperature",
type=float,
help="Temperature parameter used in sampling for the VLM.",
)
subparser.add_argument(
"--top_p",
type=float,
help="Top-p parameter used in sampling for the VLM.",
)
subparser.add_argument(
"--min_pixels",
type=int,
help="Minimum pixels for image preprocessing for the VLM.",
)
subparser.add_argument(
"--max_pixels",
type=int,
help="Maximum pixels for image preprocessing for the VLM.",
)
subparser.add_argument(
"--max_new_tokens",
type=int,
help="Maximum number of tokens generated by the VLM.",
)
def execute_with_args(self, args):
params = get_subcommand_args(args)
perform_simple_inference(
PaddleOCRVL,
params,
predict_param_names={
"layout_shape_mode",
"prompt_label",
"repetition_penalty",
"temperature",
"top_p",
"min_pixels",
"max_pixels",
"max_new_tokens",
},
)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_pipelines/paddleocr_vl.py",
"license": "Apache License 2.0",
"lines": 486,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_models/_doc_vlm.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from .._utils.cli import (
get_subcommand_args,
perform_simple_inference,
)
from .base import PaddleXPredictorWrapper, PredictorCLISubcommandExecutor
from paddlex.utils.pipeline_arguments import custom_type
class BaseDocVLM(PaddleXPredictorWrapper):
def __init__(
self,
*args,
**kwargs,
):
self._extra_init_args = {}
super().__init__(*args, **kwargs)
def _get_extra_paddlex_predictor_init_args(self):
return self._extra_init_args
class BaseDocVLMSubcommandExecutor(PredictorCLISubcommandExecutor):
input_validator = staticmethod(custom_type(dict))
@property
@abc.abstractmethod
def wrapper_cls(self):
raise NotImplementedError
def execute_with_args(self, args):
params = get_subcommand_args(args)
params["input"] = self.input_validator(params["input"])
perform_simple_inference(self.wrapper_cls, params)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_models/_doc_vlm.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_models/chart_parsing.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .._utils.cli import add_simple_inference_args
from ._doc_vlm import (
BaseDocVLM,
BaseDocVLMSubcommandExecutor,
)
class ChartParsing(BaseDocVLM):
@property
def default_model_name(self):
return "PP-Chart2Table"
@classmethod
def get_cli_subcommand_executor(cls):
return ChartParsingSubcommandExecutor()
class ChartParsingSubcommandExecutor(BaseDocVLMSubcommandExecutor):
@property
def subparser_name(self):
return "chart_parsing"
@property
def wrapper_cls(self):
return ChartParsing
def _update_subparser(self, subparser):
add_simple_inference_args(
subparser,
input_help='Input dict, e.g. `{"image": "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/chart_parsing_02.png"}`.',
)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_models/chart_parsing.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_pipelines/pp_doctranslation.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .._utils.cli import (
get_subcommand_args,
str2bool,
)
from .._utils.logging import logger
from .base import PaddleXPipelineWrapper, PipelineCLISubcommandExecutor
from .utils import create_config_from_structure
class PPDocTranslation(PaddleXPipelineWrapper):
def __init__(
self,
layout_detection_model_name=None,
layout_detection_model_dir=None,
layout_threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
chart_recognition_model_name=None,
chart_recognition_model_dir=None,
chart_recognition_batch_size=None,
region_detection_model_name=None,
region_detection_model_dir=None,
doc_orientation_classify_model_name=None,
doc_orientation_classify_model_dir=None,
doc_unwarping_model_name=None,
doc_unwarping_model_dir=None,
text_detection_model_name=None,
text_detection_model_dir=None,
text_det_limit_side_len=None,
text_det_limit_type=None,
text_det_thresh=None,
text_det_box_thresh=None,
text_det_unclip_ratio=None,
textline_orientation_model_name=None,
textline_orientation_model_dir=None,
textline_orientation_batch_size=None,
text_recognition_model_name=None,
text_recognition_model_dir=None,
text_recognition_batch_size=None,
text_rec_score_thresh=None,
table_classification_model_name=None,
table_classification_model_dir=None,
wired_table_structure_recognition_model_name=None,
wired_table_structure_recognition_model_dir=None,
wireless_table_structure_recognition_model_name=None,
wireless_table_structure_recognition_model_dir=None,
wired_table_cells_detection_model_name=None,
wired_table_cells_detection_model_dir=None,
wireless_table_cells_detection_model_name=None,
wireless_table_cells_detection_model_dir=None,
table_orientation_classify_model_name=None,
table_orientation_classify_model_dir=None,
seal_text_detection_model_name=None,
seal_text_detection_model_dir=None,
seal_det_limit_side_len=None,
seal_det_limit_type=None,
seal_det_thresh=None,
seal_det_box_thresh=None,
seal_det_unclip_ratio=None,
seal_text_recognition_model_name=None,
seal_text_recognition_model_dir=None,
seal_text_recognition_batch_size=None,
seal_rec_score_thresh=None,
formula_recognition_model_name=None,
formula_recognition_model_dir=None,
formula_recognition_batch_size=None,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_textline_orientation=None,
use_seal_recognition=None,
use_table_recognition=None,
use_formula_recognition=None,
use_chart_recognition=None,
use_region_detection=None,
chat_bot_config=None,
**kwargs,
):
params = locals().copy()
params.pop("self")
params.pop("kwargs")
self._params = params
super().__init__(**kwargs)
@property
def _paddlex_pipeline_name(self):
return "PP-DocTranslation"
def visual_predict_iter(
self,
input,
*,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_textline_orientation=None,
use_seal_recognition=None,
use_table_recognition=None,
use_formula_recognition=None,
use_chart_recognition=None,
use_region_detection=None,
layout_threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
text_det_limit_side_len=None,
text_det_limit_type=None,
text_det_thresh=None,
text_det_box_thresh=None,
text_det_unclip_ratio=None,
text_rec_score_thresh=None,
seal_det_limit_side_len=None,
seal_det_limit_type=None,
seal_det_thresh=None,
seal_det_box_thresh=None,
seal_det_unclip_ratio=None,
seal_rec_score_thresh=None,
use_wired_table_cells_trans_to_html=False,
use_wireless_table_cells_trans_to_html=False,
use_table_orientation_classify=True,
use_ocr_results_with_table_cells=True,
use_e2e_wired_table_rec_model=False,
use_e2e_wireless_table_rec_model=True,
**kwargs,
):
return self.paddlex_pipeline.visual_predict(
input,
use_doc_orientation_classify=use_doc_orientation_classify,
use_doc_unwarping=use_doc_unwarping,
use_textline_orientation=use_textline_orientation,
use_seal_recognition=use_seal_recognition,
use_table_recognition=use_table_recognition,
use_formula_recognition=use_formula_recognition,
use_chart_recognition=use_chart_recognition,
use_region_detection=use_region_detection,
layout_threshold=layout_threshold,
layout_nms=layout_nms,
layout_unclip_ratio=layout_unclip_ratio,
layout_merge_bboxes_mode=layout_merge_bboxes_mode,
text_det_limit_side_len=text_det_limit_side_len,
text_det_limit_type=text_det_limit_type,
text_det_thresh=text_det_thresh,
text_det_box_thresh=text_det_box_thresh,
text_det_unclip_ratio=text_det_unclip_ratio,
text_rec_score_thresh=text_rec_score_thresh,
seal_det_limit_side_len=seal_det_limit_side_len,
seal_det_limit_type=seal_det_limit_type,
seal_det_thresh=seal_det_thresh,
seal_det_box_thresh=seal_det_box_thresh,
seal_det_unclip_ratio=seal_det_unclip_ratio,
seal_rec_score_thresh=seal_rec_score_thresh,
use_wired_table_cells_trans_to_html=use_wired_table_cells_trans_to_html,
use_wireless_table_cells_trans_to_html=use_wireless_table_cells_trans_to_html,
use_table_orientation_classify=use_table_orientation_classify,
use_ocr_results_with_table_cells=use_ocr_results_with_table_cells,
use_e2e_wired_table_rec_model=use_e2e_wired_table_rec_model,
use_e2e_wireless_table_rec_model=use_e2e_wireless_table_rec_model,
**kwargs,
)
def visual_predict(
self,
input,
*,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_textline_orientation=None,
use_seal_recognition=None,
use_table_recognition=None,
use_formula_recognition=None,
use_chart_recognition=None,
use_region_detection=None,
layout_threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
text_det_limit_side_len=None,
text_det_limit_type=None,
text_det_thresh=None,
text_det_box_thresh=None,
text_det_unclip_ratio=None,
text_rec_score_thresh=None,
seal_det_limit_side_len=None,
seal_det_limit_type=None,
seal_det_thresh=None,
seal_det_box_thresh=None,
seal_det_unclip_ratio=None,
seal_rec_score_thresh=None,
use_wired_table_cells_trans_to_html=False,
use_wireless_table_cells_trans_to_html=False,
use_table_orientation_classify=True,
use_ocr_results_with_table_cells=True,
use_e2e_wired_table_rec_model=False,
use_e2e_wireless_table_rec_model=True,
**kwargs,
):
return list(
self.visual_predict_iter(
input,
use_doc_orientation_classify=use_doc_orientation_classify,
use_doc_unwarping=use_doc_unwarping,
use_textline_orientation=use_textline_orientation,
use_seal_recognition=use_seal_recognition,
use_table_recognition=use_table_recognition,
use_formula_recognition=use_formula_recognition,
use_chart_recognition=use_chart_recognition,
use_region_detection=use_region_detection,
layout_threshold=layout_threshold,
layout_nms=layout_nms,
layout_unclip_ratio=layout_unclip_ratio,
layout_merge_bboxes_mode=layout_merge_bboxes_mode,
text_det_limit_side_len=text_det_limit_side_len,
text_det_limit_type=text_det_limit_type,
text_det_thresh=text_det_thresh,
text_det_box_thresh=text_det_box_thresh,
text_det_unclip_ratio=text_det_unclip_ratio,
text_rec_score_thresh=text_rec_score_thresh,
seal_det_limit_side_len=seal_det_limit_side_len,
seal_det_limit_type=seal_det_limit_type,
seal_det_thresh=seal_det_thresh,
seal_det_box_thresh=seal_det_box_thresh,
seal_det_unclip_ratio=seal_det_unclip_ratio,
seal_rec_score_thresh=seal_rec_score_thresh,
use_wired_table_cells_trans_to_html=use_wired_table_cells_trans_to_html,
use_wireless_table_cells_trans_to_html=use_wireless_table_cells_trans_to_html,
use_table_orientation_classify=use_table_orientation_classify,
use_ocr_results_with_table_cells=use_ocr_results_with_table_cells,
use_e2e_wired_table_rec_model=use_e2e_wired_table_rec_model,
use_e2e_wireless_table_rec_model=use_e2e_wireless_table_rec_model,
**kwargs,
)
)
def translate_iter(
self,
ori_md_info_list,
*,
target_language="zh",
chunk_size=5000,
task_description=None,
output_format=None,
rules_str=None,
few_shot_demo_text_content=None,
few_shot_demo_key_value_list=None,
glossary=None,
llm_request_interval=0.0,
chat_bot_config=None,
**kwargs,
):
return self.paddlex_pipeline.translate(
ori_md_info_list,
target_language=target_language,
chunk_size=chunk_size,
task_description=task_description,
output_format=output_format,
rules_str=rules_str,
few_shot_demo_text_content=few_shot_demo_text_content,
few_shot_demo_key_value_list=few_shot_demo_key_value_list,
glossary=glossary,
llm_request_interval=llm_request_interval,
chat_bot_config=chat_bot_config,
**kwargs,
)
def translate(
self,
ori_md_info_list,
*,
target_language="zh",
chunk_size=5000,
task_description=None,
output_format=None,
rules_str=None,
few_shot_demo_text_content=None,
few_shot_demo_key_value_list=None,
glossary=None,
llm_request_interval=0.0,
chat_bot_config=None,
**kwargs,
):
return list(
self.translate_iter(
ori_md_info_list,
target_language=target_language,
chunk_size=chunk_size,
task_description=task_description,
output_format=output_format,
rules_str=rules_str,
few_shot_demo_text_content=few_shot_demo_text_content,
few_shot_demo_key_value_list=few_shot_demo_key_value_list,
glossary=glossary,
llm_request_interval=llm_request_interval,
chat_bot_config=chat_bot_config,
**kwargs,
)
)
def load_from_markdown(self, input):
return self.paddlex_pipeline.load_from_markdown(input)
def concatenate_markdown_pages(self, markdown_list):
return self.paddlex_pipeline.concatenate_markdown_pages(markdown_list)
@classmethod
def get_cli_subcommand_executor(cls):
return PPDocTranslationCLISubcommandExecutor()
def _get_paddlex_config_overrides(self):
# HACK: We should consider reducing duplication.
STRUCTURE = {
"SubPipelines.LayoutParser.SubPipelines.DocPreprocessor.use_doc_orientation_classify": self._params[
"use_doc_orientation_classify"
],
"SubPipelines.LayoutParser.SubPipelines.DocPreprocessor.use_doc_unwarping": self._params[
"use_doc_unwarping"
],
"SubPipelines.LayoutParser.use_doc_preprocessor": self._params[
"use_doc_orientation_classify"
]
or self._params["use_doc_unwarping"],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.use_textline_orientation": self._params[
"use_textline_orientation"
],
"SubPipelines.LayoutParser.use_seal_recognition": self._params[
"use_seal_recognition"
],
"SubPipelines.LayoutParser.use_table_recognition": self._params[
"use_table_recognition"
],
"SubPipelines.LayoutParser.use_formula_recognition": self._params[
"use_formula_recognition"
],
"SubPipelines.LayoutParser.use_chart_recognition": self._params[
"use_chart_recognition"
],
"SubPipelines.LayoutParser.use_region_detection": self._params[
"use_region_detection"
],
"SubPipelines.LayoutParser.SubModules.LayoutDetection.model_name": self._params[
"layout_detection_model_name"
],
"SubPipelines.LayoutParser.SubModules.LayoutDetection.model_dir": self._params[
"layout_detection_model_dir"
],
"SubPipelines.LayoutParser.SubModules.LayoutDetection.threshold": self._params[
"layout_threshold"
],
"SubPipelines.LayoutParser.SubModules.LayoutDetection.layout_nms": self._params[
"layout_nms"
],
"SubPipelines.LayoutParser.SubModules.LayoutDetection.layout_unclip_ratio": self._params[
"layout_unclip_ratio"
],
"SubPipelines.LayoutParser.SubModules.LayoutDetection.layout_merge_bboxes_mode": self._params[
"layout_merge_bboxes_mode"
],
"SubPipelines.LayoutParser.SubModules.ChartRecognition.model_name": self._params[
"chart_recognition_model_name"
],
"SubPipelines.LayoutParser.SubModules.ChartRecognition.model_dir": self._params[
"chart_recognition_model_dir"
],
"SubPipelines.LayoutParser.SubModules.ChartRecognition.batch_size": self._params[
"chart_recognition_batch_size"
],
"SubPipelines.LayoutParser.SubModules.RegionDetection.model_name": self._params[
"region_detection_model_name"
],
"SubPipelines.LayoutParser.SubModules.RegionDetection.model_dir": self._params[
"region_detection_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.DocPreprocessor.SubModules.DocOrientationClassify.model_name": self._params[
"doc_orientation_classify_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.DocPreprocessor.SubModules.DocOrientationClassify.model_dir": self._params[
"doc_orientation_classify_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.DocPreprocessor.SubModules.DocUnwarping.model_name": self._params[
"doc_unwarping_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.DocPreprocessor.SubModules.DocUnwarping.model_dir": self._params[
"doc_unwarping_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextDetection.model_name": self._params[
"text_detection_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextDetection.model_dir": self._params[
"text_detection_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextDetection.limit_side_len": self._params[
"text_det_limit_side_len"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextDetection.limit_type": self._params[
"text_det_limit_type"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextDetection.thresh": self._params[
"text_det_thresh"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextDetection.box_thresh": self._params[
"text_det_box_thresh"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextDetection.unclip_ratio": self._params[
"text_det_unclip_ratio"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextLineOrientation.model_name": self._params[
"textline_orientation_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextLineOrientation.model_dir": self._params[
"textline_orientation_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextLineOrientation.batch_size": self._params[
"textline_orientation_batch_size"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextRecognition.model_name": self._params[
"text_recognition_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextRecognition.model_dir": self._params[
"text_recognition_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextRecognition.batch_size": self._params[
"text_recognition_batch_size"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextRecognition.score_thresh": self._params[
"text_rec_score_thresh"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubModules.TableClassification.model_name": self._params[
"table_classification_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubModules.TableClassification.model_dir": self._params[
"table_classification_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubModules.WiredTableStructureRecognition.model_name": self._params[
"wired_table_structure_recognition_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubModules.WiredTableStructureRecognition.model_dir": self._params[
"wired_table_structure_recognition_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubModules.WirelessTableStructureRecognition.model_name": self._params[
"wireless_table_structure_recognition_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubModules.WirelessTableStructureRecognition.model_dir": self._params[
"wireless_table_structure_recognition_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubModules.WiredTableCellsDetection.model_name": self._params[
"wired_table_cells_detection_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubModules.WiredTableCellsDetection.model_dir": self._params[
"wired_table_cells_detection_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubModules.WirelessTableCellsDetection.model_name": self._params[
"wireless_table_cells_detection_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubModules.WirelessTableCellsDetection.model_dir": self._params[
"wireless_table_cells_detection_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubModules.TableOrientationClassify.model_name": self._params[
"table_orientation_classify_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubModules.TableOrientationClassify.model_dir": self._params[
"table_orientation_classify_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextDetection.model_name": self._params[
"text_detection_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextDetection.model_dir": self._params[
"text_detection_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextDetection.limit_side_len": self._params[
"text_det_limit_side_len"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextDetection.limit_type": self._params[
"text_det_limit_type"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextDetection.thresh": self._params[
"text_det_thresh"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextDetection.box_thresh": self._params[
"text_det_box_thresh"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextDetection.unclip_ratio": self._params[
"text_det_unclip_ratio"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextLineOrientation.model_name": self._params[
"textline_orientation_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextLineOrientation.model_dir": self._params[
"textline_orientation_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextLineOrientation.batch_size": self._params[
"textline_orientation_batch_size"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextRecognition.model_name": self._params[
"text_recognition_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextRecognition.model_dir": self._params[
"text_recognition_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextRecognition.batch_size": self._params[
"text_recognition_batch_size"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextRecognition.score_thresh": self._params[
"text_rec_score_thresh"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.model_name": self._params[
"seal_text_detection_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.model_dir": self._params[
"seal_text_detection_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.limit_side_len": self._params[
"text_det_limit_side_len"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.limit_type": self._params[
"seal_det_limit_type"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.thresh": self._params[
"seal_det_thresh"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.box_thresh": self._params[
"seal_det_box_thresh"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.unclip_ratio": self._params[
"seal_det_unclip_ratio"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextRecognition.model_name": self._params[
"seal_text_recognition_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextRecognition.model_dir": self._params[
"seal_text_recognition_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextRecognition.batch_size": self._params[
"seal_text_recognition_batch_size"
],
"SubPipelines.LayoutParser.SubPipelines.FormulaRecognition.SubModules.FormulaRecognition.model_name": self._params[
"formula_recognition_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.FormulaRecognition.SubModules.FormulaRecognition.model_dir": self._params[
"formula_recognition_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.FormulaRecognition.SubModules.FormulaRecognition.batch_size": self._params[
"formula_recognition_batch_size"
],
"SubModules.LLM_Chat": self._params["chat_bot_config"],
}
return create_config_from_structure(STRUCTURE)
class PPDocTranslationCLISubcommandExecutor(PipelineCLISubcommandExecutor):
@property
def subparser_name(self):
return "pp_doctranslation"
def _update_subparser(self, subparser):
subparser.add_argument(
"-i",
"--input",
type=str,
required=True,
help="Input path or URL.",
)
subparser.add_argument(
"--save_path",
type=str,
help="Path to the output directory.",
)
subparser.add_argument(
"--target_language",
type=str,
default="zh",
help="Target language.",
)
subparser.add_argument(
"--layout_detection_model_name",
type=str,
help="Name of the layout detection model.",
)
subparser.add_argument(
"--layout_detection_model_dir",
type=str,
help="Path to the layout detection model directory.",
)
subparser.add_argument(
"--layout_threshold",
type=float,
help="Score threshold for the layout detection model.",
)
subparser.add_argument(
"--layout_nms",
type=str2bool,
help="Whether to use NMS in layout detection.",
)
subparser.add_argument(
"--layout_unclip_ratio",
type=float,
help="Expansion coefficient for layout detection.",
)
subparser.add_argument(
"--layout_merge_bboxes_mode",
type=str,
help="Overlapping box filtering method.",
)
subparser.add_argument(
"--chart_recognition_model_name",
type=str,
help="Name of the chart recognition model.",
)
subparser.add_argument(
"--chart_recognition_model_dir",
type=str,
help="Path to the chart recognition model directory.",
)
subparser.add_argument(
"--chart_recognition_batch_size",
type=int,
help="Batch size for the chart recognition model.",
)
subparser.add_argument(
"--region_detection_model_name",
type=str,
help="Name of the region detection model.",
)
subparser.add_argument(
"--region_detection_model_dir",
type=str,
help="Path to the region detection model directory.",
)
subparser.add_argument(
"--doc_orientation_classify_model_name",
type=str,
help="Name of the document image orientation classification model.",
)
subparser.add_argument(
"--doc_orientation_classify_model_dir",
type=str,
help="Path to the document image orientation classification model directory.",
)
subparser.add_argument(
"--doc_unwarping_model_name",
type=str,
help="Name of the text image unwarping model.",
)
subparser.add_argument(
"--doc_unwarping_model_dir",
type=str,
help="Path to the image unwarping model directory.",
)
subparser.add_argument(
"--text_detection_model_name",
type=str,
help="Name of the text detection model.",
)
subparser.add_argument(
"--text_detection_model_dir",
type=str,
help="Path to the text detection model directory.",
)
subparser.add_argument(
"--text_det_limit_side_len",
type=int,
help="This sets a limit on the side length of the input image for the text detection model.",
)
subparser.add_argument(
"--text_det_limit_type",
type=str,
help="This determines how the side length limit is applied to the input image before feeding it into the text deteciton model.",
)
subparser.add_argument(
"--text_det_thresh",
type=float,
help="Detection pixel threshold for the text detection model. Pixels with scores greater than this threshold in the output probability map are considered text pixels.",
)
subparser.add_argument(
"--text_det_box_thresh",
type=float,
help="Detection box threshold for the text detection model. A detection result is considered a text region if the average score of all pixels within the border of the result is greater than this threshold.",
)
subparser.add_argument(
"--text_det_unclip_ratio",
type=float,
help="Text detection expansion coefficient, which expands the text region using this method. The larger the value, the larger the expansion area.",
)
subparser.add_argument(
"--textline_orientation_model_name",
type=str,
help="Name of the text line orientation classification model.",
)
subparser.add_argument(
"--textline_orientation_model_dir",
type=str,
help="Path to the text line orientation classification directory.",
)
subparser.add_argument(
"--textline_orientation_batch_size",
type=int,
help="Batch size for the text line orientation classification model.",
)
subparser.add_argument(
"--text_recognition_model_name",
type=str,
help="Name of the text recognition model.",
)
subparser.add_argument(
"--text_recognition_model_dir",
type=str,
help="Path to the text recognition model directory.",
)
subparser.add_argument(
"--text_recognition_batch_size",
type=int,
help="Batch size for the text recognition model.",
)
subparser.add_argument(
"--text_rec_score_thresh",
type=float,
help="Text recognition threshold used in general OCR. Text results with scores greater than this threshold are retained.",
)
subparser.add_argument(
"--table_classification_model_name",
type=str,
help="Name of the table classification model.",
)
subparser.add_argument(
"--table_classification_model_dir",
type=str,
help="Path to the table classification model directory.",
)
subparser.add_argument(
"--wired_table_structure_recognition_model_name",
type=str,
help="Name of the wired table structure recognition model.",
)
subparser.add_argument(
"--wired_table_structure_recognition_model_dir",
type=str,
help="Path to the wired table structure recognition model directory.",
)
subparser.add_argument(
"--wireless_table_structure_recognition_model_name",
type=str,
help="Name of the wireless table structure recognition model.",
)
subparser.add_argument(
"--wireless_table_structure_recognition_model_dir",
type=str,
help="Path to the wired table structure recognition model directory.",
)
subparser.add_argument(
"--wired_table_cells_detection_model_name",
type=str,
help="Name of the wired table cells detection model.",
)
subparser.add_argument(
"--wired_table_cells_detection_model_dir",
type=str,
help="Path to the wired table cells detection model directory.",
)
subparser.add_argument(
"--wireless_table_cells_detection_model_name",
type=str,
help="Name of the wireless table cells detection model.",
)
subparser.add_argument(
"--wireless_table_cells_detection_model_dir",
type=str,
help="Path to the wireless table cells detection model directory.",
)
subparser.add_argument(
"--seal_text_detection_model_name",
type=str,
help="Name of the seal text detection model.",
)
subparser.add_argument(
"--seal_text_detection_model_dir",
type=str,
help="Path to the seal text detection model directory.",
)
subparser.add_argument(
"--seal_det_limit_side_len",
type=int,
help="This sets a limit on the side length of the input image for the seal text detection model.",
)
subparser.add_argument(
"--seal_det_limit_type",
type=str,
help="This determines how the side length limit is applied to the input image before feeding it into the seal text deteciton model.",
)
subparser.add_argument(
"--seal_det_thresh",
type=float,
help="Detection pixel threshold for the seal text detection model. Pixels with scores greater than this threshold in the output probability map are considered text pixels.",
)
subparser.add_argument(
"--seal_det_box_thresh",
type=float,
help="Detection box threshold for the seal text detection model. A detection result is considered a text region if the average score of all pixels within the border of the result is greater than this threshold.",
)
subparser.add_argument(
"--seal_det_unclip_ratio",
type=float,
help="Seal text detection expansion coefficient, which expands the text region using this method. The larger the value, the larger the expansion area.",
)
subparser.add_argument(
"--seal_text_recognition_model_name",
type=str,
help="Name of the seal text recognition model.",
)
subparser.add_argument(
"--seal_text_recognition_model_dir",
type=str,
help="Path to the seal text recognition model directory.",
)
subparser.add_argument(
"--seal_text_recognition_batch_size",
type=int,
help="Batch size for the seal text recognition model.",
)
subparser.add_argument(
"--seal_rec_score_thresh",
type=float,
help="Seal text recognition threshold. Text results with scores greater than this threshold are retained.",
)
subparser.add_argument(
"--formula_recognition_model_name",
type=str,
help="Name of the formula recognition model.",
)
subparser.add_argument(
"--formula_recognition_model_dir",
type=str,
help="Path to the formula recognition model directory.",
)
subparser.add_argument(
"--formula_recognition_batch_size",
type=int,
help="Batch size for the formula recognition model.",
)
subparser.add_argument(
"--use_doc_orientation_classify",
type=str2bool,
help="Whether to use document image orientation classification.",
)
subparser.add_argument(
"--use_doc_unwarping",
type=str2bool,
help="Whether to use text image unwarping.",
)
subparser.add_argument(
"--use_textline_orientation",
type=str2bool,
help="Whether to use text line orientation classification.",
)
subparser.add_argument(
"--use_seal_recognition",
type=str2bool,
help="Whether to use seal recognition.",
)
subparser.add_argument(
"--use_table_recognition",
type=str2bool,
help="Whether to use table recognition.",
)
subparser.add_argument(
"--use_formula_recognition",
type=str2bool,
help="Whether to use formula recognition.",
)
subparser.add_argument(
"--use_chart_recognition",
type=str2bool,
help="Whether to use chart recognition.",
)
subparser.add_argument(
"--use_region_detection",
type=str2bool,
help="Whether to use region detection.",
)
# FIXME: Passing API key through CLI is not secure; consider using
# environment variables.
subparser.add_argument(
"--qianfan_api_key",
type=str,
help="Configuration for the embedding model.",
)
def execute_with_args(self, args):
params = get_subcommand_args(args)
input = params.pop("input")
target_language = params.pop("target_language")
save_path = params.pop("save_path")
qianfan_api_key = params.pop("qianfan_api_key")
if qianfan_api_key is not None:
params["chat_bot_config"] = {
"module_name": "chat_bot",
"model_name": "ernie-3.5-8k",
"base_url": "https://qianfan.baidubce.com/v2",
"api_type": "openai",
"api_key": qianfan_api_key,
}
chatocr = PPDocTranslation(**params)
logger.info("Start analyzing images")
result_visual = chatocr.visual_predict_iter(input)
ori_md_info_list = []
for res in result_visual:
ori_md_info_list.append(res["layout_parsing_result"].markdown)
if save_path:
res["layout_parsing_result"].save_all(save_path)
logger.info("Start translation")
result_translate = chatocr.translate_iter(
ori_md_info_list,
target_language=target_language,
)
for res in result_translate:
res.print()
if save_path:
res.save_to_markdown(save_path)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_pipelines/pp_doctranslation.py",
"license": "Apache License 2.0",
"lines": 912,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:tests/pipelines/test_pp_doctranslation.py | import pytest
from paddleocr import PPDocTranslation
from ..testing_utils import TEST_DATA_DIR
@pytest.fixture(scope="module")
def pp_doctranslation_pipeline():
return PPDocTranslation()
@pytest.mark.parametrize(
"image_path",
[
TEST_DATA_DIR / "book.jpg",
],
)
def test_visual_predict(pp_doctranslation_pipeline, image_path):
result = pp_doctranslation_pipeline.visual_predict(str(image_path))
assert result is not None
assert isinstance(result, list)
assert len(result) == 1
res = result[0]
assert isinstance(res, dict)
assert res.keys() == {"layout_parsing_result"}
assert isinstance(res["layout_parsing_result"], dict)
@pytest.mark.parametrize(
"params",
[
{"use_doc_orientation_classify": False},
{"use_doc_unwarping": False},
{"use_table_recognition": False},
{"use_formula_recognition": False},
{"layout_threshold": 0.88},
{"layout_threshold": [0.45, 0.4]},
{"layout_threshold": {0: 0.45, 2: 0.48, 7: 0.4}},
{"layout_nms": False},
{"layout_unclip_ratio": 1.1},
{"layout_unclip_ratio": [1.2, 1.5]},
{"layout_unclip_ratio": {0: 1.2, 2: 1.5, 7: 1.8}},
{"layout_merge_bboxes_mode": "large"},
{"layout_merge_bboxes_mode": {0: "large", 2: "small", 7: "union"}},
{"text_det_limit_side_len": 640, "text_det_limit_type": "min"},
{"text_det_thresh": 0.5},
{"text_det_box_thresh": 0.3},
{"text_det_unclip_ratio": 3.0},
{"text_rec_score_thresh": 0.5},
],
)
def test_visual_predict_params(
monkeypatch,
pp_doctranslation_pipeline,
params,
):
def _dummy_visual_predict(input, **params):
yield {"layout_parsing_result": params}
monkeypatch.setattr(
pp_doctranslation_pipeline.paddlex_pipeline,
"visual_predict",
_dummy_visual_predict,
)
result = pp_doctranslation_pipeline.visual_predict(
input,
**params,
)
assert isinstance(result, list)
assert len(result) == 1
res = result[0]
res = res["layout_parsing_result"]
for k, v in params.items():
assert res[k] == v
# TODO: Test constructor and other methods
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "tests/pipelines/test_pp_doctranslation.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PaddlePaddle/PaddleOCR:mcp_server/paddleocr_mcp/__main__.py | #!/usr/bin/env python3
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import asyncio
import os
import sys
from fastmcp import FastMCP
from .pipelines import create_pipeline_handler
def _parse_args() -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="PaddleOCR MCP server - Supports local library, AI Studio service, and self-hosted servers."
)
parser.add_argument(
"--pipeline",
choices=["OCR", "PP-StructureV3", "PaddleOCR-VL", "PaddleOCR-VL-1.5"],
default=os.getenv("PADDLEOCR_MCP_PIPELINE", "OCR"),
help="Pipeline name.",
)
parser.add_argument(
"--ppocr_source",
choices=["local", "aistudio", "qianfan", "self_hosted"],
default=os.getenv("PADDLEOCR_MCP_PPOCR_SOURCE", "local"),
help="Source of PaddleOCR functionality: local (local library), aistudio (AI Studio service), qianfan (Qianfan service), self_hosted (self-hosted server).",
)
parser.add_argument(
"--http",
action="store_true",
help="Use HTTP transport instead of STDIO (suitable for remote deployment and multiple clients).",
)
parser.add_argument(
"--host",
default="127.0.0.1",
help="Host address for HTTP mode (default: 127.0.0.1).",
)
parser.add_argument(
"--port",
type=int,
default=8000,
help="Port for HTTP mode (default: 8000).",
)
parser.add_argument(
"--verbose", action="store_true", help="Enable verbose logging for debugging."
)
# Local mode configuration
parser.add_argument(
"--pipeline_config",
default=os.getenv("PADDLEOCR_MCP_PIPELINE_CONFIG"),
help="PaddleOCR pipeline configuration file path (for local mode).",
)
parser.add_argument(
"--device",
default=os.getenv("PADDLEOCR_MCP_DEVICE"),
help="Device to run inference on.",
)
# Service mode configuration
parser.add_argument(
"--server_url",
default=os.getenv("PADDLEOCR_MCP_SERVER_URL"),
help="Base URL of the underlying server (required in service mode).",
)
parser.add_argument(
"--aistudio_access_token",
default=os.getenv("PADDLEOCR_MCP_AISTUDIO_ACCESS_TOKEN"),
help="AI Studio access token (required for AI Studio).",
)
parser.add_argument(
"--qianfan_api_key",
default=os.getenv("PADDLEOCR_MCP_QIANFAN_API_KEY"),
help="Qianfan API key (required for Qianfan).",
)
parser.add_argument(
"--timeout",
type=int,
default=int(os.getenv("PADDLEOCR_MCP_TIMEOUT", "60")),
help="HTTP read timeout in seconds for API requests to the underlying server.",
)
args = parser.parse_args()
return args
def _validate_args(args: argparse.Namespace) -> None:
"""Validate command line arguments."""
if not args.http and (args.host != "127.0.0.1" or args.port != 8000):
print(
"Host and port arguments are only valid when using HTTP transport (see: `--http`).",
file=sys.stderr,
)
sys.exit(2)
if args.ppocr_source in ["aistudio", "qianfan", "self_hosted"]:
if not args.server_url:
print("Error: The server base URL is required.", file=sys.stderr)
print(
"Please either set `--server_url` or set the environment variable "
"`PADDLEOCR_MCP_SERVER_URL`.",
file=sys.stderr,
)
sys.exit(2)
if args.ppocr_source == "aistudio" and not args.aistudio_access_token:
print("Error: The AI Studio access token is required.", file=sys.stderr)
print(
"Please either set `--aistudio_access_token` or set the environment variable "
"`PADDLEOCR_MCP_AISTUDIO_ACCESS_TOKEN`.",
file=sys.stderr,
)
sys.exit(2)
elif args.ppocr_source == "qianfan":
if not args.qianfan_api_key:
print("Error: The Qianfan API key is required.", file=sys.stderr)
print(
"Please either set `--qianfan_api_key` or set the environment variable "
"`PADDLEOCR_MCP_QIANFAN_API_KEY`.",
file=sys.stderr,
)
sys.exit(2)
if args.pipeline not in ("PP-StructureV3", "PaddleOCR-VL"):
print(
f"{repr(args.pipeline)} is currently not supported when using the {repr(args.ppocr_source)} source.",
file=sys.stderr,
)
sys.exit(2)
async def async_main() -> None:
"""Asynchronous main entry point."""
args = _parse_args()
_validate_args(args)
try:
pipeline_handler = create_pipeline_handler(
args.pipeline,
args.ppocr_source,
pipeline_config=args.pipeline_config,
device=args.device,
server_url=args.server_url,
aistudio_access_token=args.aistudio_access_token,
qianfan_api_key=args.qianfan_api_key,
timeout=args.timeout,
)
except Exception as e:
print(f"Failed to create the pipeline handler: {e}", file=sys.stderr)
if args.verbose:
import traceback
traceback.print_exc(file=sys.stderr)
sys.exit(1)
try:
await pipeline_handler.start()
server_name = f"PaddleOCR {args.pipeline} MCP server"
mcp = FastMCP(
name=server_name,
mask_error_details=True,
)
pipeline_handler.register_tools(mcp)
log_level = "INFO" if args.verbose else "WARNING"
if args.http:
await mcp.run_async(
transport="streamable-http",
host=args.host,
port=args.port,
log_level=log_level,
)
else:
await mcp.run_async(log_level=log_level)
except Exception as e:
print(f"Failed to start the server: {e}", file=sys.stderr)
if args.verbose:
import traceback
traceback.print_exc(file=sys.stderr)
sys.exit(1)
finally:
await pipeline_handler.stop()
def main():
"""Main entry point."""
asyncio.run(async_main())
if __name__ == "__main__":
main()
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "mcp_server/paddleocr_mcp/__main__.py",
"license": "Apache License 2.0",
"lines": 183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:mcp_server/paddleocr_mcp/pipelines.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO:
# 1. Reuse `httpx` client.
# 2. Use `contextvars` to manage MCP context objects.
# 3. Implement structured logging, log stack traces, and log operation timing.
# 4. Report progress for long-running operations.
import abc
import asyncio
import base64
import io
import json
import re
from pathlib import PurePath
from queue import Queue
from threading import Thread
from typing import Any, Callable, Dict, List, NoReturn, Optional, Type, Union
from urllib.parse import urlparse
import httpx
import numpy as np
import puremagic
from fastmcp import Context, FastMCP
from mcp.types import ImageContent, TextContent
from PIL import Image as PILImage
from typing_extensions import Literal, Self, assert_never
try:
from paddleocr import PaddleOCR, PaddleOCRVL, PPStructureV3
LOCAL_OCR_AVAILABLE = True
except ImportError:
LOCAL_OCR_AVAILABLE = False
OutputMode = Literal["simple", "detailed"]
def _is_file_path(s: str) -> bool:
try:
PurePath(s)
return True
except Exception:
return False
def _is_base64(s: str) -> bool:
pattern = r"^[A-Za-z0-9+/]+={0,2}$"
return bool(re.fullmatch(pattern, s))
def _is_url(s: str) -> bool:
if not (s.startswith("http://") or s.startswith("https://")):
return False
result = urlparse(s)
return all([result.scheme, result.netloc]) and result.scheme in ("http", "https")
def _infer_file_type_from_bytes(data: bytes) -> Optional[str]:
mime = puremagic.from_string(data, mime=True)
if mime.startswith("image/"):
return "image"
elif mime == "application/pdf":
return "pdf"
return None
def get_str_with_max_len(obj: object, max_len: int) -> str:
s = str(obj)
if len(s) > max_len:
return s[:max_len] + "..."
else:
return s
class _EngineWrapper:
def __init__(self, engine: Any) -> None:
self._engine = engine
self._queue: Queue = Queue()
self._closed = False
self._loop = asyncio.get_running_loop()
self._thread = Thread(target=self._worker, daemon=False)
self._thread.start()
@property
def engine(self) -> Any:
return self._engine
async def call(self, func: Callable, *args: Any, **kwargs: Any) -> Any:
if self._closed:
raise RuntimeError("Engine wrapper has already been closed")
fut = self._loop.create_future()
self._queue.put((func, args, kwargs, fut))
return await fut
async def close(self) -> None:
if not self._closed:
self._queue.put(None)
await self._loop.run_in_executor(None, self._thread.join)
self._closed = True
def _worker(self) -> None:
while not self._closed:
item = self._queue.get()
if item is None:
break
func, args, kwargs, fut = item
try:
result = func(*args, **kwargs)
self._loop.call_soon_threadsafe(fut.set_result, result)
except Exception as e:
self._loop.call_soon_threadsafe(fut.set_exception, e)
finally:
self._queue.task_done()
class PipelineHandler(abc.ABC):
"""Abstract base class for pipeline handlers."""
def __init__(
self,
pipeline: str,
ppocr_source: str,
pipeline_config: Optional[str],
device: Optional[str],
server_url: Optional[str],
aistudio_access_token: Optional[str],
qianfan_api_key: Optional[str],
timeout: Optional[int],
) -> None:
"""Initialize the pipeline handler.
Args:
pipeline: Pipeline name.
ppocr_source: Source of PaddleOCR functionality.
pipeline_config: Path to pipeline configuration.
device: Device to run inference on.
server_url: Base URL for service mode.
aistudio_access_token: AI Studio access token.
qianfan_api_key: Qianfan API key.
timeout: Read timeout in seconds for HTTP requests.
"""
self._pipeline = pipeline
if ppocr_source == "local":
self._mode = "local"
elif ppocr_source in ("aistudio", "qianfan", "self_hosted"):
self._mode = "service"
else:
raise ValueError(f"Unknown PaddleOCR source {repr(ppocr_source)}")
self._ppocr_source = ppocr_source
self._pipeline_config = pipeline_config
self._device = device
self._server_url = server_url
self._aistudio_access_token = aistudio_access_token
self._qianfan_api_key = qianfan_api_key
self._timeout = timeout or 60
if self._mode == "local":
if not LOCAL_OCR_AVAILABLE:
raise RuntimeError("PaddleOCR is not locally available")
try:
self._engine = self._create_local_engine()
except Exception as e:
raise RuntimeError(
f"Failed to create PaddleOCR engine: {str(e)}"
) from e
self._status: Literal["initialized", "started", "stopped"] = "initialized"
async def start(self) -> None:
if self._status == "initialized":
if self._mode == "local":
self._engine_wrapper = _EngineWrapper(self._engine)
self._status = "started"
elif self._status == "started":
pass
elif self._status == "stopped":
raise RuntimeError("Pipeline handler has already been stopped")
else:
assert_never(self._status)
async def stop(self) -> None:
if self._status == "initialized":
raise RuntimeError("Pipeline handler has not been started")
elif self._status == "started":
if self._mode == "local":
await self._engine_wrapper.close()
self._status = "stopped"
elif self._status == "stopped":
pass
else:
assert_never(self._status)
async def __aenter__(self) -> Self:
await self.start()
return self
async def __aexit__(
self,
exc_type: Any,
exc_val: Any,
exc_tb: Any,
) -> None:
await self.stop()
@abc.abstractmethod
def register_tools(self, mcp: FastMCP) -> None:
"""Register tools with the MCP server.
Args:
mcp: The `FastMCP` instance.
"""
raise NotImplementedError
@abc.abstractmethod
def _create_local_engine(self) -> Any:
"""Create the local OCR engine.
Returns:
The OCR engine instance.
"""
raise NotImplementedError
@abc.abstractmethod
def _get_service_endpoint(self) -> str:
"""Get the service endpoint.
Returns:
Service endpoint path.
"""
raise NotImplementedError
@abc.abstractmethod
def _transform_local_kwargs(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""Transform keyword arguments for local execution.
Args:
kwargs: Keyword arguments.
Returns:
Transformed keyword arguments.
"""
raise NotImplementedError
@abc.abstractmethod
def _transform_service_kwargs(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""Transform keyword arguments for service execution.
Args:
kwargs: Keyword arguments.
Returns:
Transformed keyword arguments.
"""
raise NotImplementedError
@abc.abstractmethod
async def _parse_local_result(
self, local_result: Dict, ctx: Context
) -> Dict[str, Any]:
"""Parse raw result from local engine into a unified format.
Args:
local_result: Raw result from local engine.
ctx: MCP context.
Returns:
Parsed result in unified format.
"""
raise NotImplementedError
@abc.abstractmethod
async def _parse_service_result(
self, service_result: Dict[str, Any], ctx: Context
) -> Dict[str, Any]:
"""Parse raw result from the service into a unified format.
Args:
service_result: Raw result from the service.
ctx: MCP context.
Returns:
Parsed result in unified format.
"""
raise NotImplementedError
@abc.abstractmethod
async def _log_completion_stats(self, result: Dict[str, Any], ctx: Context) -> None:
"""Log statistics after processing completion.
Args:
result: Processing result.
ctx: MCP context.
"""
raise NotImplementedError
@abc.abstractmethod
async def _format_output(
self,
result: Dict[str, Any],
detailed: bool,
ctx: Context,
**kwargs: Any,
) -> Union[str, List[Union[TextContent, ImageContent]]]:
"""Format output into simple or detailed format.
Args:
result: Processing result.
detailed: Whether to use detailed format.
ctx: MCP context.
**kwargs: Additional arguments.
Returns:
Formatted output in requested format.
"""
raise NotImplementedError
async def _predict_with_local_engine(
self, processed_input: Union[str, np.ndarray], ctx: Context, **kwargs: Any
) -> Dict:
if not hasattr(self, "_engine_wrapper"):
raise RuntimeError("Engine wrapper has not been initialized")
return await self._engine_wrapper.call(
self._engine_wrapper.engine.predict, processed_input, **kwargs
)
class SimpleInferencePipelineHandler(PipelineHandler):
"""Base class for simple inference pipeline handlers."""
async def process(
self,
input_data: str,
output_mode: OutputMode,
ctx: Context,
file_type: Optional[str] = None,
infer_kwargs: Optional[Dict[str, Any]] = None,
format_kwargs: Optional[Dict[str, Any]] = None,
) -> Union[str, List[Union[TextContent, ImageContent]]]:
"""Process input data through the pipeline.
Args:
input_data: Input data (file path, URL, or Base64).
output_mode: Output mode ("simple" or "detailed").
ctx: MCP context.
file_type: File type for URLs ("image", "pdf", or None for auto-detection).
infer_kwargs: Additional arguments for performing pipeline inference.
format_kwargs: Additional arguments for formatting the output.
Returns:
Processed result in the requested output format.
"""
infer_kwargs = infer_kwargs or {}
format_kwargs = format_kwargs or {}
try:
await ctx.info(
f"Starting {self._pipeline} processing (source: {self._ppocr_source})"
)
if self._mode == "local":
processed_input = self._process_input_for_local(input_data, file_type)
infer_kwargs = self._transform_local_kwargs(infer_kwargs)
raw_result = await self._predict_with_local_engine(
processed_input, ctx, **infer_kwargs
)
result = await self._parse_local_result(raw_result, ctx)
else:
processed_input, inferred_file_type = self._process_input_for_service(
input_data, file_type
)
infer_kwargs = self._transform_service_kwargs(infer_kwargs)
raw_result = await self._call_service(
processed_input, inferred_file_type, ctx, **infer_kwargs
)
result = await self._parse_service_result(raw_result, ctx)
await self._log_completion_stats(result, ctx)
return await self._format_output(
result, output_mode == "detailed", ctx, **format_kwargs
)
except Exception as e:
await ctx.error(f"{self._pipeline} processing failed: {str(e)}")
self._handle_error(e, output_mode)
def _process_input_for_local(
self, input_data: str, file_type: Optional[str]
) -> Union[str, np.ndarray]:
# TODO: Use `file_type` to handle more cases.
if _is_base64(input_data):
if input_data.startswith("data:"):
base64_data = input_data.split(",", 1)[1]
else:
base64_data = input_data
try:
image_bytes = base64.b64decode(base64_data)
file_type = _infer_file_type_from_bytes(image_bytes)
if file_type != "image":
raise ValueError("Currently, only images can be passed via Base64.")
image_pil = PILImage.open(io.BytesIO(image_bytes))
image_arr = np.array(image_pil.convert("RGB"))
return np.ascontiguousarray(image_arr[..., ::-1])
except Exception as e:
raise ValueError(f"Failed to decode Base64 image: {str(e)}") from e
elif _is_file_path(input_data) or _is_url(input_data):
return input_data
else:
raise ValueError("Invalid input data format")
def _process_input_for_service(
self, input_data: str, file_type: Optional[str]
) -> tuple[str, Optional[str]]:
if _is_url(input_data):
norm_ft = None
if isinstance(file_type, str):
if file_type.lower() in ("None", "none", "null", "unknown", ""):
norm_ft = None
else:
norm_ft = file_type.lower()
return input_data, norm_ft
elif _is_base64(input_data):
try:
if input_data.startswith("data:"):
base64_data = input_data.split(",", 1)[1]
else:
base64_data = input_data
bytes_ = base64.b64decode(base64_data)
file_type_str = _infer_file_type_from_bytes(bytes_)
if file_type_str is None:
raise ValueError(
"Unsupported file type in Base64 data. "
"Only images (JPEG, PNG, etc.) and PDF documents are supported."
)
return input_data, file_type_str
except Exception as e:
raise ValueError(f"Failed to decode Base64 data: {str(e)}") from e
elif _is_file_path(input_data):
try:
with open(input_data, "rb") as f:
bytes_ = f.read()
input_data = base64.b64encode(bytes_).decode("ascii")
file_type_str = _infer_file_type_from_bytes(bytes_)
if file_type_str is None:
raise ValueError(
f"Unsupported file type for '{input_data}'. "
"Only images (JPEG, PNG, etc.) and PDF documents are supported."
)
return input_data, file_type_str
except Exception as e:
raise ValueError(f"Failed to read file: {str(e)}") from e
else:
raise ValueError("Invalid input data format")
async def _call_service(
self,
processed_input: str,
file_type: Optional[str],
ctx: Context,
**kwargs: Any,
) -> Dict[str, Any]:
if not self._server_url:
raise RuntimeError("Server URL not configured")
endpoint = self._get_service_endpoint()
if endpoint:
endpoint = "/" + endpoint
url = f"{self._server_url.rstrip('/')}{endpoint}"
payload = self._prepare_service_payload(processed_input, file_type, **kwargs)
headers = {"Content-Type": "application/json"}
if self._ppocr_source == "aistudio":
if not self._aistudio_access_token:
raise RuntimeError("Missing AI Studio access token")
headers["Authorization"] = f"token {self._aistudio_access_token}"
elif self._ppocr_source == "qianfan":
if not self._qianfan_api_key:
raise RuntimeError("Missing Qianfan API key")
headers["Authorization"] = f"Bearer {self._qianfan_api_key}"
try:
timeout = httpx.Timeout(
connect=30.0, read=self._timeout, write=30.0, pool=30.0
)
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.post(url, json=payload, headers=headers)
response.raise_for_status()
return response.json()
except httpx.HTTPError as e:
raise RuntimeError(f"HTTP request failed: {type(e).__name__}: {str(e)}")
except json.JSONDecodeError as e:
raise RuntimeError(f"Invalid service response: {str(e)}")
def _prepare_service_payload(
self, processed_input: str, file_type: Optional[str], **kwargs: Any
) -> Dict[str, Any]:
payload: Dict[str, Any] = {"file": processed_input, **kwargs}
if file_type == "image":
payload["fileType"] = 1
elif file_type == "pdf":
payload["fileType"] = 0
else:
payload["fileType"] = None
return payload
def _handle_error(self, exc: Exception, output_mode: OutputMode) -> NoReturn:
raise exc
class OCRHandler(SimpleInferencePipelineHandler):
def register_tools(self, mcp: FastMCP) -> None:
@mcp.tool("ocr")
async def _ocr(
input_data: str,
output_mode: OutputMode = "simple",
file_type: Optional[str] = None,
*,
ctx: Context,
) -> Union[str, List[Union[TextContent, ImageContent]]]:
"""Extracts text from images and PDFs. Accepts file path, URL, or Base64.
Args:
input_data: The file to process (file path, URL, or Base64 string).
output_mode: The desired output format.
- "simple": (Default) Clean, readable text suitable for most use cases.
- "detailed": A JSON output including text, confidence, and precise bounding box coordinates. Only use this when coordinates are specifically required.
file_type: File type. This parameter is REQUIRED when `input_data` is a URL and should be omitted for other types.
- "image": For image files
- "pdf": For PDF documents
- None: For unknown file types
"""
await ctx.info(
f"--- OCR tool received `input_data`: {get_str_with_max_len(input_data, 50)} ---"
)
return await self.process(input_data, output_mode, ctx, file_type)
def _create_local_engine(self) -> Any:
return PaddleOCR(
paddlex_config=self._pipeline_config,
device=self._device,
)
def _get_service_endpoint(self) -> str:
return "ocr"
def _transform_local_kwargs(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
return {
"use_doc_unwarping": False,
"use_doc_orientation_classify": False,
}
def _transform_service_kwargs(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
return {
"useDocUnwarping": False,
"useDocOrientationClassify": False,
}
async def _parse_local_result(self, local_result: Dict, ctx: Context) -> Dict:
clean_texts, confidences, text_lines = [], [], []
for result in local_result:
texts = result["rec_texts"]
scores = result["rec_scores"]
boxes = result["rec_boxes"]
for i, text in enumerate(texts):
if text and text.strip():
conf = scores[i] if i < len(scores) else 0
clean_texts.append(text.strip())
confidences.append(conf)
instance = {
"text": text.strip(),
"confidence": round(conf, 3),
"bbox": boxes[i].tolist(),
}
text_lines.append(instance)
return {
"text": "\n".join(clean_texts),
"confidence": sum(confidences) / len(confidences) if confidences else 0,
"text_lines": text_lines,
}
async def _parse_service_result(self, service_result: Dict, ctx: Context) -> Dict:
result_data = service_result.get("result", service_result)
ocr_results = result_data.get("ocrResults")
all_texts, all_confidences, text_lines = [], [], []
for ocr_result in ocr_results:
pruned = ocr_result["prunedResult"]
texts = pruned["rec_texts"]
scores = pruned["rec_scores"]
boxes = pruned["rec_boxes"]
for i, text in enumerate(texts):
if text and text.strip():
conf = scores[i] if i < len(scores) else 0
all_texts.append(text.strip())
all_confidences.append(conf)
instance = {
"text": text.strip(),
"confidence": round(conf, 3),
"bbox": boxes[i],
}
text_lines.append(instance)
return {
"text": "\n".join(all_texts),
"confidence": (
sum(all_confidences) / len(all_confidences) if all_confidences else 0
),
"text_lines": text_lines,
}
async def _log_completion_stats(self, result: Dict, ctx: Context) -> None:
text_length = len(result["text"])
text_line_count = len(result["text_lines"])
await ctx.info(
f"OCR completed: {text_length} characters, {text_line_count} text lines"
)
async def _format_output(
self,
result: Dict,
detailed: bool,
ctx: Context,
**kwargs: Any,
) -> Union[str, List[Union[TextContent, ImageContent]]]:
if not result["text"].strip():
return (
"❌ No text detected"
if not detailed
else json.dumps({"error": "No text detected"}, ensure_ascii=False)
)
if detailed:
return json.dumps(result, ensure_ascii=False, indent=2)
else:
confidence = result["confidence"]
text_line_count = len(result["text_lines"])
output = result["text"]
if confidence > 0:
output += f"\n\n📊 Confidence: {(confidence * 100):.1f}% | {text_line_count} text lines"
return output
class _LayoutParsingHandler(SimpleInferencePipelineHandler):
def _get_service_endpoint(self) -> str:
return "layout-parsing" if self._ppocr_source != "qianfan" else "paddleocr"
def _transform_local_kwargs(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
return {
"use_doc_unwarping": False,
"use_doc_orientation_classify": False,
}
def _transform_service_kwargs(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
return {
"useDocUnwarping": False,
"useDocOrientationClassify": False,
}
async def _parse_local_result(self, local_result: Dict, ctx: Context) -> Dict:
markdown_parts = []
all_images_mapping = {}
detailed_results = []
for result in local_result:
markdown = result.markdown
text = markdown["markdown_texts"]
markdown_parts.append(text)
images = markdown["markdown_images"]
processed_images = {}
for img_key, img_data in images.items():
with io.BytesIO() as buffer:
img_data.save(buffer, format="JPEG")
processed_images[img_key] = base64.b64encode(buffer.getvalue())
all_images_mapping.update(processed_images)
detailed_results.append(result)
return {
# TODO: Page concatenation can be done better via `pipeline.concatenate_markdown_pages`
"markdown": "\n".join(markdown_parts),
"pages": len(local_result),
"images_mapping": all_images_mapping,
"detailed_results": detailed_results,
}
async def _parse_service_result(self, service_result: Dict, ctx: Context) -> Dict:
result_data = service_result.get("result", service_result)
layout_results = result_data.get("layoutParsingResults")
if not layout_results:
return {
"markdown": "",
"pages": 0,
"images_mapping": {},
"detailed_results": [],
}
markdown_parts = []
all_images_mapping = {}
detailed_results = []
for res in layout_results:
markdown_parts.append(res["markdown"]["text"])
images = res["markdown"]["images"]
processed_images = {}
for img_key, img_data in images.items():
processed_images[img_key] = await self._process_image_data(
img_data, ctx
)
all_images_mapping.update(processed_images)
detailed_results.append(res["prunedResult"])
return {
"markdown": "\n".join(markdown_parts),
"pages": len(layout_results),
"images_mapping": all_images_mapping,
"detailed_results": detailed_results,
}
async def _process_image_data(self, img_data: str, ctx: Context) -> str:
if _is_url(img_data):
try:
timeout = httpx.Timeout(connect=30.0, read=30.0, write=30.0, pool=30.0)
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.get(img_data)
response.raise_for_status()
img_bytes = response.content
return base64.b64encode(img_bytes).decode("ascii")
except Exception as e:
await ctx.error(
f"Failed to download image from URL {img_data}: {str(e)}"
)
return img_data
elif _is_base64(img_data):
return img_data
else:
await ctx.error(
f"Unknown image data format: {get_str_with_max_len(img_data, 50)}"
)
return img_data
async def _log_completion_stats(self, result: Dict, ctx: Context) -> None:
page_count = result["pages"]
await ctx.info(f"Layout parsing completed: {page_count} pages")
async def _format_output(
self,
result: Dict,
detailed: bool,
ctx: Context,
**kwargs: Any,
) -> Union[str, List[Union[TextContent, ImageContent]]]:
if not result["markdown"].strip():
return (
"❌ No document content detected"
if not detailed
else json.dumps({"error": "No content detected"}, ensure_ascii=False)
)
markdown_text = result["markdown"]
images_mapping = result.get("images_mapping", {})
if kwargs.get("return_images"):
content_list = self._parse_markdown_with_images(
markdown_text, images_mapping
)
else:
content_list = [TextContent(type="text", text=markdown_text)]
if detailed:
if "detailed_results" in result and result["detailed_results"]:
for detailed_result in result["detailed_results"]:
content_list.append(
TextContent(
type="text",
text=json.dumps(
detailed_result,
ensure_ascii=False,
indent=2,
default=str,
),
)
)
return content_list
def _parse_markdown_with_images(
self, markdown_text: str, images_mapping: Dict[str, str]
) -> List[Union[TextContent, ImageContent]]:
"""Parse markdown text and return mixed list of text and images."""
if not images_mapping:
return [TextContent(type="text", text=markdown_text)]
content_list = []
img_pattern = r'<img[^>]+src="([^"]+)"[^>]*>'
last_pos = 0
for match in re.finditer(img_pattern, markdown_text):
text_before = markdown_text[last_pos : match.start()]
if text_before.strip():
content_list.append(TextContent(type="text", text=text_before))
img_src = match.group(1)
if img_src in images_mapping:
content_list.append(
ImageContent(
type="image",
data=images_mapping[img_src],
mimeType="image/jpeg",
)
)
last_pos = match.end()
remaining_text = markdown_text[last_pos:]
if remaining_text.strip():
content_list.append(TextContent(type="text", text=remaining_text))
return content_list or [TextContent(type="text", text=markdown_text)]
class PPStructureV3Handler(_LayoutParsingHandler):
def register_tools(self, mcp: FastMCP) -> None:
@mcp.tool("pp_structurev3")
async def _pp_structurev3(
input_data: str,
output_mode: OutputMode = "simple",
file_type: Optional[str] = None,
return_images: bool = True,
*,
ctx: Context,
) -> Union[str, List[Union[TextContent, ImageContent]]]:
"""Extracts structured markdown from complex documents (images/PDFs), including tables, formulas, etc. Accepts file path, URL, or Base64.
Args:
input_data: The file to process (file path, URL, or Base64 string).
output_mode: The desired output format.
- "simple": (Default) Clean, readable markdown with embedded images. Best for most use cases.
- "detailed": JSON data about document structure, plus markdown. Only use this when coordinates are specifically required.
file_type: File type. This parameter is REQUIRED when `input_data` is a URL and should be omitted for other types.
- "image": For image files
- "pdf": For PDF documents
- None: For unknown file types
return_images: Whether to return the images extracted from the document.
"""
return await self.process(
input_data,
output_mode,
ctx,
file_type,
format_kwargs={"return_images": return_images},
)
def _create_local_engine(self) -> Any:
return PPStructureV3(
paddlex_config=self._pipeline_config,
device=self._device,
)
def _transform_service_kwargs(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
kwargs = super()._transform_service_kwargs(kwargs)
if self._ppocr_source == "qianfan":
kwargs["model"] = "pp-structurev3"
return kwargs
class PaddleOCRVLHandler(_LayoutParsingHandler):
def register_tools(self, mcp: FastMCP) -> None:
@mcp.tool("paddleocr_vl")
async def _paddleocr_vl(
input_data: str,
output_mode: OutputMode = "simple",
file_type: Optional[str] = None,
return_images: bool = True,
*,
ctx: Context,
) -> Union[str, List[Union[TextContent, ImageContent]]]:
"""Extracts structured markdown from complex documents (images/PDFs) using a VLM-based approach. The extracted elements include tables, formulas, etc. Accepts file path, URL, or Base64.
Args:
input_data: The file to process (file path, URL, or Base64 string).
output_mode: The desired output format.
- "simple": (Default) Clean, readable markdown with embedded images. Best for most use cases.
- "detailed": JSON data about document structure, plus markdown. Only use this when coordinates are specifically required.
file_type: File type. This parameter is REQUIRED when `input_data` is a URL and should be omitted for other types.
- "image": For image files
- "pdf": For PDF documents
- None: For unknown file types
return_images: Whether to return the images extracted from the document.
"""
return await self.process(
input_data,
output_mode,
ctx,
file_type,
format_kwargs={"return_images": return_images},
)
def _create_local_engine(self) -> Any:
if self._pipeline == "PaddleOCR-VL":
pipeline_version = "v1"
elif self._pipeline == "PaddleOCR-VL-1.5":
pipeline_version = "v1.5"
else:
raise RuntimeError(f"Unknown pipeline {repr(self._pipeline)}")
return PaddleOCRVL(
pipeline_version=pipeline_version,
paddlex_config=self._pipeline_config,
device=self._device,
)
def _transform_service_kwargs(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
kwargs = super()._transform_service_kwargs(kwargs)
if self._ppocr_source == "qianfan":
if self._pipeline == "PaddleOCR-VL":
kwargs["model"] = "paddleocr-vl-0.9b"
else:
raise RuntimeError(
f"Unknown or unsupported pipeline {repr(self._pipeline)}"
)
return kwargs
_PIPELINE_HANDLERS: Dict[str, Type[PipelineHandler]] = {
"OCR": OCRHandler,
"PP-StructureV3": PPStructureV3Handler,
"PaddleOCR-VL": PaddleOCRVLHandler,
"PaddleOCR-VL-1.5": PaddleOCRVLHandler,
}
def create_pipeline_handler(
pipeline: str, /, *args: Any, **kwargs: Any
) -> PipelineHandler:
if pipeline in _PIPELINE_HANDLERS:
cls = _PIPELINE_HANDLERS[pipeline]
return cls(pipeline, *args, **kwargs)
else:
raise ValueError(f"Unknown pipeline {repr(pipeline)}")
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "mcp_server/paddleocr_mcp/pipelines.py",
"license": "Apache License 2.0",
"lines": 823,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_models/_text_detection.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class TextDetectionMixin:
def __init__(
self,
*,
limit_side_len=None,
limit_type=None,
thresh=None,
box_thresh=None,
unclip_ratio=None,
input_shape=None,
**kwargs,
):
self._extra_init_args = {
"limit_side_len": limit_side_len,
"limit_type": limit_type,
"thresh": thresh,
"box_thresh": box_thresh,
"unclip_ratio": unclip_ratio,
"input_shape": input_shape,
}
super().__init__(**kwargs)
def _get_extra_paddlex_predictor_init_args(self):
return self._extra_init_args
class TextDetectionSubcommandExecutorMixin:
def _add_text_detection_args(self, subparser):
subparser.add_argument(
"--limit_side_len",
type=int,
help="This sets a limit on the side length of the input image for the model.",
)
subparser.add_argument(
"--limit_type",
type=str,
help="This determines how the side length limit is applied to the input image before feeding it into the model.",
)
subparser.add_argument(
"--thresh",
type=float,
help="Detection pixel threshold for the model. Pixels with scores greater than this threshold in the output probability map are considered text pixels.",
)
subparser.add_argument(
"--box_thresh",
type=float,
help="Detection box threshold for the model. A detection result is considered a text region if the average score of all pixels within the border of the result is greater than this threshold.",
)
subparser.add_argument(
"--unclip_ratio",
type=float,
help="Expansion coefficient, which expands the text region using this method. The larger the value, the larger the expansion area.",
)
subparser.add_argument(
"--input_shape",
nargs=3,
type=int,
metavar=("C", "H", "W"),
help="Input shape of the model.",
)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_models/_text_detection.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/__main__.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from ._cli import main
def console_entry() -> int:
# See https://docs.python.org/3/library/signal.html#note-on-sigpipe
try:
# Flush output here to force SIGPIPE to be triggered while inside this
# try block.
main()
sys.stdout.flush()
sys.stderr.flush()
except BrokenPipeError:
# Python flushes standard streams on exit;
# redirect remaining output to devnull to avoid another BrokenPipeError
# at shutdown.
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, sys.stdout.fileno())
sys.exit(1)
if __name__ == "__main__":
console_entry()
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/__main__.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_abstract.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
class CLISubcommandExecutor(metaclass=abc.ABCMeta):
@abc.abstractmethod
def add_subparser(self, subparsers):
raise NotImplementedError
@abc.abstractmethod
def execute_with_args(self, args):
raise NotImplementedError
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_abstract.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_cli.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import subprocess
import sys
import time
import warnings
from threading import Thread
import requests
from ._models import (
ChartParsing,
DocImgOrientationClassification,
DocVLM,
FormulaRecognition,
LayoutDetection,
SealTextDetection,
TableCellsDetection,
TableClassification,
TableStructureRecognition,
TextDetection,
TextImageUnwarping,
TextLineOrientationClassification,
TextRecognition,
)
from ._pipelines import (
DocPreprocessor,
DocUnderstanding,
FormulaRecognitionPipeline,
PaddleOCR,
PaddleOCRVL,
PPChatOCRv4Doc,
PPDocTranslation,
PPStructureV3,
SealRecognition,
TableRecognitionPipelineV2,
)
from ._version import version
from ._utils.deprecation import CLIDeprecationWarning
from ._utils.logging import logger
def _register_pipelines(subparsers):
for cls in [
DocPreprocessor,
DocUnderstanding,
FormulaRecognitionPipeline,
PaddleOCR,
PaddleOCRVL,
PPChatOCRv4Doc,
PPDocTranslation,
PPStructureV3,
SealRecognition,
TableRecognitionPipelineV2,
]:
subcommand_executor = cls.get_cli_subcommand_executor()
subparser = subcommand_executor.add_subparser(subparsers)
subparser.set_defaults(executor=subcommand_executor.execute_with_args)
def _register_models(subparsers):
for cls in [
ChartParsing,
DocImgOrientationClassification,
DocVLM,
FormulaRecognition,
LayoutDetection,
SealTextDetection,
TableCellsDetection,
TableClassification,
TableStructureRecognition,
TextDetection,
TextImageUnwarping,
TextLineOrientationClassification,
TextRecognition,
]:
subcommand_executor = cls.get_cli_subcommand_executor()
subparser = subcommand_executor.add_subparser(subparsers)
subparser.set_defaults(executor=subcommand_executor.execute_with_args)
def _register_install_hpi_deps_command(subparsers):
def _install_hpi_deps(args):
hpip = f"hpi-{args.variant}"
try:
subprocess.check_call(["paddlex", "--install", hpip])
subprocess.check_call(["paddlex", "--install", "paddle2onnx"])
except subprocess.CalledProcessError:
sys.exit("Failed to install dependencies")
subparser = subparsers.add_parser("install_hpi_deps")
subparser.add_argument("variant", type=str, choices=["cpu", "gpu", "npu"])
subparser.set_defaults(executor=_install_hpi_deps)
def _register_install_genai_server_deps_command(subparsers):
def _install_genai_server_deps(args):
try:
subprocess.check_call(
["paddlex", "--install", f"genai-{args.variant}-server"]
)
except subprocess.CalledProcessError:
sys.exit("Failed to install dependencies")
subparser = subparsers.add_parser("install_genai_server_deps")
subparser.add_argument(
"variant", type=str, choices=["vllm", "sglang", "fastdeploy"]
)
subparser.set_defaults(executor=_install_genai_server_deps)
def _register_genai_server_command(subparsers):
# TODO: Register the subparser whether the plugin is installed or not
try:
from paddlex.inference.genai.server import get_arg_parser, run_genai_server
except RuntimeError:
return
def _show_prompt_when_server_is_running(host, port, backend):
if host == "0.0.0.0":
host = "localhost"
while True:
try:
resp = requests.get(f"http://{host}:{port}/health", timeout=1)
if resp.status_code == 200:
break
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
pass
time.sleep(1)
prompt = f"""The PaddleOCR GenAI server has been started. You can either:
1. Set the server URL in the module or pipeline configuration and call the PaddleOCR CLI or Python API. For example:
paddleocr doc_parser --input demo.png --vl_rec_backend {backend}-server --vl_rec_server_url http://{host}:{port}/v1
2. Make HTTP requests directly, or using the OpenAI client library."""
logger.info(prompt)
def _run_genai_server(args):
Thread(
target=_show_prompt_when_server_is_running,
args=(args.host, args.port, args.backend),
daemon=True,
).start()
try:
run_genai_server(args)
except subprocess.CalledProcessError:
sys.exit("Failed to run the server")
paddlex_parser = get_arg_parser()
subparser = subparsers.add_parser(
"genai_server", parents=[paddlex_parser], conflict_handler="resolve"
)
subparser.set_defaults(executor=_run_genai_server)
def _get_parser():
parser = argparse.ArgumentParser(prog="paddleocr")
parser.add_argument(
"-v", "--version", action="version", version=f"%(prog)s {version}"
)
subparsers = parser.add_subparsers(dest="subcommand")
_register_pipelines(subparsers)
_register_models(subparsers)
_register_install_hpi_deps_command(subparsers)
_register_install_genai_server_deps_command(subparsers)
_register_genai_server_command(subparsers)
return parser
def _execute(args):
args.executor(args)
def main():
logger.setLevel(logging.INFO)
warnings.filterwarnings("default", category=CLIDeprecationWarning)
parser = _get_parser()
args = parser.parse_args()
if args.subcommand is None:
parser.print_usage(sys.stderr)
sys.exit(2)
_execute(args)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_cli.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_common_args.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddlex.inference import PaddlePredictorOption
from paddlex.utils.device import get_default_device, parse_device
from ._constants import (
DEFAULT_CPU_THREADS,
DEFAULT_DEVICE,
DEFAULT_ENABLE_MKLDNN,
DEFAULT_MKLDNN_CACHE_CAPACITY,
DEFAULT_PRECISION,
DEFAULT_USE_TENSORRT,
SUPPORTED_PRECISION_LIST,
DEFAULT_USE_CINN,
)
from ._utils.cli import str2bool
def parse_common_args(kwargs, *, default_enable_hpi):
default_vals = {
"device": DEFAULT_DEVICE,
"enable_hpi": default_enable_hpi,
"use_tensorrt": DEFAULT_USE_TENSORRT,
"precision": DEFAULT_PRECISION,
"enable_mkldnn": DEFAULT_ENABLE_MKLDNN,
"mkldnn_cache_capacity": DEFAULT_MKLDNN_CACHE_CAPACITY,
"cpu_threads": DEFAULT_CPU_THREADS,
"enable_cinn": DEFAULT_USE_CINN,
}
unknown_names = kwargs.keys() - default_vals.keys()
for name in unknown_names:
raise ValueError(f"Unknown argument: {name}")
kwargs = {**default_vals, **kwargs}
if kwargs["precision"] not in SUPPORTED_PRECISION_LIST:
raise ValueError(
f"Invalid precision: {kwargs['precision']}. Supported values are: {SUPPORTED_PRECISION_LIST}."
)
kwargs["use_pptrt"] = kwargs.pop("use_tensorrt")
kwargs["pptrt_precision"] = kwargs.pop("precision")
return kwargs
def prepare_common_init_args(model_name, common_args):
device = common_args["device"]
if device is None:
device = get_default_device()
device_type, _ = parse_device(device)
init_kwargs = {}
init_kwargs["device"] = device
init_kwargs["use_hpip"] = common_args["enable_hpi"]
pp_option = PaddlePredictorOption()
if device_type == "gpu":
if common_args["use_pptrt"]:
if common_args["pptrt_precision"] == "fp32":
pp_option.run_mode = "trt_fp32"
else:
assert common_args["pptrt_precision"] == "fp16", common_args[
"pptrt_precision"
]
pp_option.run_mode = "trt_fp16"
else:
pp_option.run_mode = "paddle"
elif device_type == "cpu":
enable_mkldnn = common_args["enable_mkldnn"]
if enable_mkldnn:
pp_option.mkldnn_cache_capacity = common_args["mkldnn_cache_capacity"]
else:
pp_option.run_mode = "paddle"
pp_option.cpu_threads = common_args["cpu_threads"]
else:
pp_option.run_mode = "paddle"
pp_option.enable_cinn = common_args["enable_cinn"]
init_kwargs["pp_option"] = pp_option
return init_kwargs
def add_common_cli_opts(parser, *, default_enable_hpi, allow_multiple_devices):
if allow_multiple_devices:
help_ = "Device(s) to use for inference, e.g., `cpu`, `gpu`, `npu`, `gpu:0`, `gpu:0,1`. If multiple devices are specified, inference will be performed in parallel. Note that parallel inference is not always supported. By default, GPU 0 will be used if available; otherwise, the CPU will be used."
else:
help_ = "Device to use for inference, e.g., `cpu`, `gpu`, `npu`, `gpu:0`. By default, GPU 0 will be used if available; otherwise, the CPU will be used."
parser.add_argument(
"--device",
type=str,
default=DEFAULT_DEVICE,
help=help_,
)
parser.add_argument(
"--enable_hpi",
type=str2bool,
default=default_enable_hpi,
help="Enable the high performance inference.",
)
parser.add_argument(
"--use_tensorrt",
type=str2bool,
default=DEFAULT_USE_TENSORRT,
help="Whether to use the Paddle Inference TensorRT subgraph engine. If the model does not support TensorRT acceleration, even if this flag is set, acceleration will not be used.",
)
parser.add_argument(
"--precision",
type=str,
default=DEFAULT_PRECISION,
choices=SUPPORTED_PRECISION_LIST,
help="Precision for TensorRT when using the Paddle Inference TensorRT subgraph engine.",
)
parser.add_argument(
"--enable_mkldnn",
type=str2bool,
default=DEFAULT_ENABLE_MKLDNN,
help="Enable MKL-DNN acceleration for inference. If MKL-DNN is unavailable or the model does not support it, acceleration will not be used even if this flag is set.",
)
parser.add_argument(
"--mkldnn_cache_capacity",
type=int,
default=DEFAULT_MKLDNN_CACHE_CAPACITY,
help="MKL-DNN cache capacity.",
)
parser.add_argument(
"--cpu_threads",
type=int,
default=DEFAULT_CPU_THREADS,
help="Number of threads to use for inference on CPUs.",
)
parser.add_argument(
"--enable_cinn",
type=str2bool,
default=DEFAULT_USE_CINN,
help="Whether to use the CINN compiler.",
)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_common_args.py",
"license": "Apache License 2.0",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_constants.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DEFAULT_DEVICE = None
DEFAULT_USE_TENSORRT = False
DEFAULT_PRECISION = "fp32"
DEFAULT_ENABLE_MKLDNN = True
DEFAULT_MKLDNN_CACHE_CAPACITY = 10
DEFAULT_CPU_THREADS = 10
SUPPORTED_PRECISION_LIST = ["fp32", "fp16"]
DEFAULT_USE_CINN = False
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_constants.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_env.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
DISABLE_AUTO_LOGGING_CONFIG = (
os.getenv("PADDLEOCR_DISABLE_AUTO_LOGGING_CONFIG", "0") == "1"
)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_env.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_models/_image_classification.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from .._utils.cli import (
add_simple_inference_args,
get_subcommand_args,
perform_simple_inference,
)
from .base import PaddleXPredictorWrapper, PredictorCLISubcommandExecutor
class ImageClassification(PaddleXPredictorWrapper):
def __init__(
self,
*,
topk=None,
**kwargs,
):
self._extra_init_args = {
"topk": topk,
}
super().__init__(**kwargs)
def _get_extra_paddlex_predictor_init_args(self):
return self._extra_init_args
class ImageClassificationSubcommandExecutor(PredictorCLISubcommandExecutor):
def _update_subparser(self, subparser):
add_simple_inference_args(subparser)
subparser.add_argument(
"--topk",
type=int,
help="Top-k value for prediction results.",
)
@property
@abc.abstractmethod
def wrapper_cls(self):
raise NotImplementedError
def execute_with_args(self, args):
params = get_subcommand_args(args)
perform_simple_inference(self.wrapper_cls, params)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_models/_image_classification.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_models/_object_detection.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from .._utils.cli import (
add_simple_inference_args,
get_subcommand_args,
perform_simple_inference,
str2bool,
)
from .base import PaddleXPredictorWrapper, PredictorCLISubcommandExecutor
class ObjectDetection(PaddleXPredictorWrapper):
def __init__(
self,
*,
img_size=None,
threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
**kwargs,
):
self._extra_init_args = {
"img_size": img_size,
"threshold": threshold,
"layout_nms": layout_nms,
"layout_unclip_ratio": layout_unclip_ratio,
"layout_merge_bboxes_mode": layout_merge_bboxes_mode,
}
super().__init__(**kwargs)
def _get_extra_paddlex_predictor_init_args(self):
return self._extra_init_args
class ObjectDetectionSubcommandExecutor(PredictorCLISubcommandExecutor):
def _update_subparser(self, subparser):
add_simple_inference_args(subparser)
subparser.add_argument(
"--img_size",
type=int,
help="Input image size (w, h).",
)
subparser.add_argument(
"--threshold",
type=float,
help="Threshold for filtering out low-confidence predictions.",
)
subparser.add_argument(
"--layout_nms",
type=str2bool,
help="Whether to use layout-aware NMS.",
)
subparser.add_argument(
"--layout_unclip_ratio",
type=float,
help="Ratio of unclipping the bounding box.",
)
subparser.add_argument(
"--layout_merge_bboxes_mode",
type=str,
help="Mode for merging bounding boxes.",
)
@property
@abc.abstractmethod
def wrapper_cls(self):
raise NotImplementedError
def execute_with_args(self, args):
params = get_subcommand_args(args)
perform_simple_inference(self.wrapper_cls, params)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_models/_object_detection.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_models/base.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from paddlex import create_predictor
from paddlex.utils.deps import DependencyError
from .._abstract import CLISubcommandExecutor
from .._common_args import (
add_common_cli_opts,
parse_common_args,
prepare_common_init_args,
)
_DEFAULT_ENABLE_HPI = False
class PaddleXPredictorWrapper(metaclass=abc.ABCMeta):
def __init__(
self,
*,
model_name=None,
model_dir=None,
**common_args,
):
super().__init__()
self._model_name = (
model_name if model_name is not None else self.default_model_name
)
self._model_dir = model_dir
self._common_args = parse_common_args(
common_args, default_enable_hpi=_DEFAULT_ENABLE_HPI
)
self.paddlex_predictor = self._create_paddlex_predictor()
@property
@abc.abstractmethod
def default_model_name(self):
raise NotImplementedError
def predict_iter(self, *args, **kwargs):
return self.paddlex_predictor.predict(*args, **kwargs)
def predict(self, *args, **kwargs):
result = list(self.predict_iter(*args, **kwargs))
return result
def close(self):
self.paddlex_predictor.close()
@classmethod
@abc.abstractmethod
def get_cli_subcommand_executor(cls):
raise NotImplementedError
def _get_extra_paddlex_predictor_init_args(self):
return {}
def _create_paddlex_predictor(self):
kwargs = prepare_common_init_args(self._model_name, self._common_args)
kwargs = {**self._get_extra_paddlex_predictor_init_args(), **kwargs}
# Should we check model names?
try:
return create_predictor(
model_name=self._model_name, model_dir=self._model_dir, **kwargs
)
except DependencyError as e:
raise RuntimeError(
"A dependency error occurred during predictor creation. Please refer to the installation documentation to ensure all required dependencies are installed."
) from e
class PredictorCLISubcommandExecutor(CLISubcommandExecutor):
@property
@abc.abstractmethod
def subparser_name(self):
raise NotImplementedError
def add_subparser(self, subparsers):
subparser = subparsers.add_parser(name=self.subparser_name)
self._update_subparser(subparser)
subparser.add_argument("--model_name", type=str, help="Name of the model.")
subparser.add_argument(
"--model_dir", type=str, help="Directory where the model is stored."
)
add_common_cli_opts(
subparser,
default_enable_hpi=_DEFAULT_ENABLE_HPI,
allow_multiple_devices=False,
)
return subparser
@abc.abstractmethod
def _update_subparser(self, subparser):
raise NotImplementedError
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_models/base.py",
"license": "Apache License 2.0",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_models/doc_img_orientation_classification.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._image_classification import (
ImageClassification,
ImageClassificationSubcommandExecutor,
)
class DocImgOrientationClassification(ImageClassification):
@property
def default_model_name(self):
return "PP-LCNet_x1_0_doc_ori"
@classmethod
def get_cli_subcommand_executor(cls):
return DocImgOrientationClassificationSubcommandExecutor()
class DocImgOrientationClassificationSubcommandExecutor(
ImageClassificationSubcommandExecutor
):
@property
def subparser_name(self):
return "doc_img_orientation_classification"
@property
def wrapper_cls(self):
return DocImgOrientationClassification
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_models/doc_img_orientation_classification.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_models/doc_vlm.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .._utils.cli import add_simple_inference_args
from ._doc_vlm import (
BaseDocVLM,
BaseDocVLMSubcommandExecutor,
)
class DocVLM(BaseDocVLM):
@property
def default_model_name(self):
return "PP-DocBee2-3B"
@classmethod
def get_cli_subcommand_executor(cls):
return DocVLMSubcommandExecutor()
class DocVLMSubcommandExecutor(BaseDocVLMSubcommandExecutor):
@property
def subparser_name(self):
return "doc_vlm"
@property
def wrapper_cls(self):
return DocVLM
def _update_subparser(self, subparser):
add_simple_inference_args(
subparser,
input_help='Input dict, e.g. `{"image": "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/medal_table.png", "query": "Recognize this table"}`.',
)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_models/doc_vlm.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_models/formula_recognition.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .._utils.cli import (
add_simple_inference_args,
get_subcommand_args,
perform_simple_inference,
)
from .base import PaddleXPredictorWrapper, PredictorCLISubcommandExecutor
class FormulaRecognition(PaddleXPredictorWrapper):
def __init__(
self,
*args,
**kwargs,
):
self._extra_init_args = {}
super().__init__(*args, **kwargs)
@property
def default_model_name(self):
return "PP-FormulaNet_plus-M"
@classmethod
def get_cli_subcommand_executor(cls):
return FormulaRecognitionSubcommandExecutor()
def _get_extra_paddlex_predictor_init_args(self):
return self._extra_init_args
class FormulaRecognitionSubcommandExecutor(PredictorCLISubcommandExecutor):
@property
def subparser_name(self):
return "formula_recognition"
def _update_subparser(self, subparser):
add_simple_inference_args(subparser)
def execute_with_args(self, args):
params = get_subcommand_args(args)
perform_simple_inference(FormulaRecognition, params)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_models/formula_recognition.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_models/layout_detection.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._object_detection import (
ObjectDetection,
ObjectDetectionSubcommandExecutor,
)
class LayoutDetection(ObjectDetection):
@property
def default_model_name(self):
return "PP-DocLayout_plus-L"
@classmethod
def get_cli_subcommand_executor(cls):
return LayoutDetectionSubcommandExecutor()
class LayoutDetectionSubcommandExecutor(ObjectDetectionSubcommandExecutor):
@property
def subparser_name(self):
return "layout_detection"
@property
def wrapper_cls(self):
return LayoutDetection
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_models/layout_detection.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_models/seal_text_detection.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .._utils.cli import (
add_simple_inference_args,
get_subcommand_args,
perform_simple_inference,
)
from .base import PaddleXPredictorWrapper, PredictorCLISubcommandExecutor
from ._text_detection import TextDetectionMixin, TextDetectionSubcommandExecutorMixin
class SealTextDetection(TextDetectionMixin, PaddleXPredictorWrapper):
@property
def default_model_name(self):
return "PP-OCRv4_mobile_seal_det"
@classmethod
def get_cli_subcommand_executor(cls):
return SealTextDetectionSubcommandExecutor()
class SealTextDetectionSubcommandExecutor(
TextDetectionSubcommandExecutorMixin, PredictorCLISubcommandExecutor
):
@property
def subparser_name(self):
return "seal_text_detection"
def _update_subparser(self, subparser):
add_simple_inference_args(subparser)
self._add_text_detection_args(subparser)
def execute_with_args(self, args):
params = get_subcommand_args(args)
perform_simple_inference(SealTextDetection, params)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_models/seal_text_detection.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_models/table_cells_detection.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._object_detection import (
ObjectDetection,
ObjectDetectionSubcommandExecutor,
)
class TableCellsDetection(ObjectDetection):
@property
def default_model_name(self):
return "RT-DETR-L_wired_table_cell_det"
@classmethod
def get_cli_subcommand_executor(cls):
return TableCellsDetectionSubcommandExecutor()
class TableCellsDetectionSubcommandExecutor(ObjectDetectionSubcommandExecutor):
@property
def subparser_name(self):
return "table_cells_detection"
@property
def wrapper_cls(self):
return TableCellsDetection
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_models/table_cells_detection.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_models/table_classification.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._image_classification import (
ImageClassification,
ImageClassificationSubcommandExecutor,
)
class TableClassification(ImageClassification):
@property
def default_model_name(self):
return "PP-LCNet_x1_0_table_cls"
@classmethod
def get_cli_subcommand_executor(cls):
return TableClassificationSubcommandExecutor()
class TableClassificationSubcommandExecutor(ImageClassificationSubcommandExecutor):
@property
def subparser_name(self):
return "table_classification"
@property
def wrapper_cls(self):
return TableClassification
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_models/table_classification.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_models/table_structure_recognition.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .._utils.cli import (
add_simple_inference_args,
get_subcommand_args,
perform_simple_inference,
)
from .base import PaddleXPredictorWrapper, PredictorCLISubcommandExecutor
class TableStructureRecognition(PaddleXPredictorWrapper):
def __init__(
self,
*args,
**kwargs,
):
self._extra_init_args = {}
super().__init__(*args, **kwargs)
@property
def default_model_name(self):
return "SLANet"
@classmethod
def get_cli_subcommand_executor(cls):
return TableStructureRecognitionSubcommandExecutor()
def _get_extra_paddlex_predictor_init_args(self):
return self._extra_init_args
class TableStructureRecognitionSubcommandExecutor(PredictorCLISubcommandExecutor):
@property
def subparser_name(self):
return "table_structure_recognition"
def _update_subparser(self, subparser):
add_simple_inference_args(subparser)
def execute_with_args(self, args):
params = get_subcommand_args(args)
perform_simple_inference(TableStructureRecognition, params)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_models/table_structure_recognition.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_models/text_detection.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .._utils.cli import (
add_simple_inference_args,
get_subcommand_args,
perform_simple_inference,
)
from .base import PaddleXPredictorWrapper, PredictorCLISubcommandExecutor
from ._text_detection import TextDetectionMixin, TextDetectionSubcommandExecutorMixin
class TextDetection(TextDetectionMixin, PaddleXPredictorWrapper):
@property
def default_model_name(self):
return "PP-OCRv5_server_det"
@classmethod
def get_cli_subcommand_executor(cls):
return TextDetectionSubcommandExecutor()
class TextDetectionSubcommandExecutor(
TextDetectionSubcommandExecutorMixin, PredictorCLISubcommandExecutor
):
@property
def subparser_name(self):
return "text_detection"
def _update_subparser(self, subparser):
add_simple_inference_args(subparser)
self._add_text_detection_args(subparser)
def execute_with_args(self, args):
params = get_subcommand_args(args)
perform_simple_inference(TextDetection, params)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_models/text_detection.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_models/text_image_unwarping.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .._utils.cli import (
add_simple_inference_args,
get_subcommand_args,
perform_simple_inference,
)
from .base import PaddleXPredictorWrapper, PredictorCLISubcommandExecutor
class TextImageUnwarping(PaddleXPredictorWrapper):
def __init__(
self,
*args,
**kwargs,
):
self._extra_init_args = {}
super().__init__(*args, **kwargs)
@property
def default_model_name(self):
return "UVDoc"
@classmethod
def get_cli_subcommand_executor(cls):
return TextImageUnwarpingSubcommandExecutor()
def _get_extra_paddlex_predictor_init_args(self):
return self._extra_init_args
class TextImageUnwarpingSubcommandExecutor(PredictorCLISubcommandExecutor):
@property
def subparser_name(self):
return "text_image_unwarping"
def _update_subparser(self, subparser):
add_simple_inference_args(subparser)
def execute_with_args(self, args):
params = get_subcommand_args(args)
perform_simple_inference(TextImageUnwarping, params)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_models/text_image_unwarping.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_models/text_recognition.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .._utils.cli import (
add_simple_inference_args,
get_subcommand_args,
perform_simple_inference,
)
from .base import PaddleXPredictorWrapper, PredictorCLISubcommandExecutor
class TextRecognition(PaddleXPredictorWrapper):
def __init__(
self,
*,
input_shape=None,
**kwargs,
):
self._extra_init_args = {
"input_shape": input_shape,
}
super().__init__(**kwargs)
@property
def default_model_name(self):
return "PP-OCRv5_server_rec"
@classmethod
def get_cli_subcommand_executor(cls):
return TextRecognitionSubcommandExecutor()
def _get_extra_paddlex_predictor_init_args(self):
return self._extra_init_args
class TextRecognitionSubcommandExecutor(PredictorCLISubcommandExecutor):
@property
def subparser_name(self):
return "text_recognition"
def _update_subparser(self, subparser):
add_simple_inference_args(subparser)
subparser.add_argument(
"--input_shape",
nargs=3,
type=int,
metavar=("C", "H", "W"),
help="Input shape of the model.",
)
def execute_with_args(self, args):
params = get_subcommand_args(args)
perform_simple_inference(TextRecognition, params)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_models/text_recognition.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_pipelines/base.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import yaml
from paddlex import create_pipeline
from paddlex.inference import load_pipeline_config
from paddlex.utils.config import AttrDict
from paddlex.utils.deps import DependencyError
from .._abstract import CLISubcommandExecutor
from .._common_args import (
add_common_cli_opts,
parse_common_args,
prepare_common_init_args,
)
_DEFAULT_ENABLE_HPI = None
def _merge_dicts(d1, d2):
res = d1.copy()
for k, v in d2.items():
if k in res and isinstance(res[k], dict) and isinstance(v, dict):
res[k] = _merge_dicts(res[k], v)
else:
res[k] = v
return res
def _to_builtin(obj):
if isinstance(obj, AttrDict):
return {k: _to_builtin(v) for k, v in obj.items()}
elif isinstance(obj, dict):
return {k: _to_builtin(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [_to_builtin(item) for item in obj]
else:
return obj
class PaddleXPipelineWrapper(metaclass=abc.ABCMeta):
def __init__(
self,
*,
paddlex_config=None,
**common_args,
):
super().__init__()
self._paddlex_config = paddlex_config
self._common_args = parse_common_args(
common_args, default_enable_hpi=_DEFAULT_ENABLE_HPI
)
self._merged_paddlex_config = self._get_merged_paddlex_config()
self.paddlex_pipeline = self._create_paddlex_pipeline()
@property
@abc.abstractmethod
def _paddlex_pipeline_name(self):
raise NotImplementedError
def export_paddlex_config_to_yaml(self, yaml_path):
with open(yaml_path, "w", encoding="utf-8") as f:
config = _to_builtin(self._merged_paddlex_config)
yaml.safe_dump(config, f)
def close(self):
self.paddlex_pipeline.close()
@classmethod
@abc.abstractmethod
def get_cli_subcommand_executor(cls):
raise NotImplementedError
def _get_paddlex_config_overrides(self):
return {}
def _get_merged_paddlex_config(self):
if self._paddlex_config is None:
config = load_pipeline_config(self._paddlex_pipeline_name)
elif isinstance(self._paddlex_config, str):
config = load_pipeline_config(self._paddlex_config)
else:
config = self._paddlex_config
overrides = self._get_paddlex_config_overrides()
return _merge_dicts(config, overrides)
def _create_paddlex_pipeline(self):
kwargs = prepare_common_init_args(None, self._common_args)
try:
return create_pipeline(config=self._merged_paddlex_config, **kwargs)
except DependencyError as e:
raise RuntimeError(
"A dependency error occurred during pipeline creation. Please refer to the installation documentation to ensure all required dependencies are installed."
) from e
class PipelineCLISubcommandExecutor(CLISubcommandExecutor):
@property
@abc.abstractmethod
def subparser_name(self):
raise NotImplementedError
def add_subparser(self, subparsers):
subparser = subparsers.add_parser(name=self.subparser_name)
self._update_subparser(subparser)
add_common_cli_opts(
subparser,
default_enable_hpi=_DEFAULT_ENABLE_HPI,
allow_multiple_devices=True,
)
subparser.add_argument(
"--paddlex_config",
type=str,
help="Path to PaddleX pipeline configuration file.",
)
return subparser
@abc.abstractmethod
def _update_subparser(self, subparser):
raise NotImplementedError
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_pipelines/base.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_pipelines/doc_preprocessor.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .._utils.cli import (
add_simple_inference_args,
get_subcommand_args,
perform_simple_inference,
str2bool,
)
from .base import PaddleXPipelineWrapper, PipelineCLISubcommandExecutor
from .utils import create_config_from_structure
class DocPreprocessor(PaddleXPipelineWrapper):
def __init__(
self,
doc_orientation_classify_model_name=None,
doc_orientation_classify_model_dir=None,
doc_unwarping_model_name=None,
doc_unwarping_model_dir=None,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
**kwargs,
):
self._params = {
"doc_orientation_classify_model_name": doc_orientation_classify_model_name,
"doc_orientation_classify_model_dir": doc_orientation_classify_model_dir,
"doc_unwarping_model_name": doc_unwarping_model_name,
"doc_unwarping_model_dir": doc_unwarping_model_dir,
"use_doc_orientation_classify": use_doc_orientation_classify,
"use_doc_unwarping": use_doc_unwarping,
}
super().__init__(**kwargs)
@property
def _paddlex_pipeline_name(self):
return "doc_preprocessor"
def predict_iter(
self,
input,
*,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
):
return self.paddlex_pipeline.predict(
input,
use_doc_orientation_classify=use_doc_orientation_classify,
use_doc_unwarping=use_doc_unwarping,
)
def predict(
self,
input,
*,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
):
return list(
self.predict_iter(
input,
use_doc_orientation_classify=use_doc_orientation_classify,
use_doc_unwarping=use_doc_unwarping,
)
)
@classmethod
def get_cli_subcommand_executor(cls):
return DocPreprocessorCLISubcommandExecutor()
def _get_paddlex_config_overrides(self):
STRUCTURE = {
"SubModules.DocOrientationClassify.model_name": self._params[
"doc_orientation_classify_model_name"
],
"SubModules.DocOrientationClassify.model_dir": self._params[
"doc_orientation_classify_model_dir"
],
"SubModules.DocUnwarping.model_name": self._params[
"doc_unwarping_model_name"
],
"SubModules.DocUnwarping.model_dir": self._params[
"doc_unwarping_model_dir"
],
"use_doc_orientation_classify": self._params[
"use_doc_orientation_classify"
],
"use_doc_unwarping": self._params["use_doc_unwarping"],
}
return create_config_from_structure(STRUCTURE)
class DocPreprocessorCLISubcommandExecutor(PipelineCLISubcommandExecutor):
@property
def subparser_name(self):
return "doc_preprocessor"
def _update_subparser(self, subparser):
add_simple_inference_args(subparser)
subparser.add_argument(
"--doc_orientation_classify_model_name",
type=str,
help="Name of the document image orientation classification model.",
)
subparser.add_argument(
"--doc_orientation_classify_model_dir",
type=str,
help="Path to the document image orientation classification model directory.",
)
subparser.add_argument(
"--doc_unwarping_model_name",
type=str,
help="Name of the document image unwarping model.",
)
subparser.add_argument(
"--doc_unwarping_model_dir",
type=str,
help="Path to the document image unwarping model directory.",
)
subparser.add_argument(
"--use_doc_orientation_classify",
type=str2bool,
help="Whether to use document image orientation classification.",
)
subparser.add_argument(
"--use_doc_unwarping",
type=str2bool,
help="Whether to use text image unwarping.",
)
def execute_with_args(self, args):
params = get_subcommand_args(args)
perform_simple_inference(DocPreprocessor, params)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_pipelines/doc_preprocessor.py",
"license": "Apache License 2.0",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_pipelines/doc_understanding.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddlex.utils.pipeline_arguments import custom_type
from .._utils.cli import (
add_simple_inference_args,
get_subcommand_args,
perform_simple_inference,
)
from .base import PaddleXPipelineWrapper, PipelineCLISubcommandExecutor
from .utils import create_config_from_structure
class DocUnderstanding(PaddleXPipelineWrapper):
def __init__(
self,
doc_understanding_model_name=None,
doc_understanding_model_dir=None,
doc_understanding_batch_size=None,
**kwargs,
):
self._params = {
"doc_understanding_model_name": doc_understanding_model_name,
"doc_understanding_model_dir": doc_understanding_model_dir,
"doc_understanding_batch_size": doc_understanding_batch_size,
}
super().__init__(**kwargs)
@property
def _paddlex_pipeline_name(self):
return "doc_understanding"
def predict_iter(self, input, **kwargs):
return self.paddlex_pipeline.predict(input, **kwargs)
def predict(
self,
input,
**kwargs,
):
return list(self.predict_iter(input, **kwargs))
@classmethod
def get_cli_subcommand_executor(cls):
return DocUnderstandingCLISubcommandExecutor()
def _get_paddlex_config_overrides(self):
STRUCTURE = {
"SubModules.DocUnderstanding.model_name": self._params[
"doc_understanding_model_name"
],
"SubModules.DocUnderstanding.model_dir": self._params[
"doc_understanding_model_dir"
],
"SubModules.DocUnderstanding.batch_size": self._params[
"doc_understanding_batch_size"
],
}
return create_config_from_structure(STRUCTURE)
class DocUnderstandingCLISubcommandExecutor(PipelineCLISubcommandExecutor):
input_validator = staticmethod(custom_type(dict))
@property
def subparser_name(self):
return "doc_understanding"
def _update_subparser(self, subparser):
add_simple_inference_args(
subparser,
input_help='Input dict, e.g. `{"image": "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/medal_table.png", "query": "Recognize this table"}`.',
)
subparser.add_argument(
"--doc_understanding_model_name",
type=str,
help="Name of the document understanding model.",
)
subparser.add_argument(
"--doc_understanding_model_dir",
type=str,
help="Path to the document understanding model directory.",
)
subparser.add_argument(
"--doc_understanding_batch_size",
type=str,
help="Batch size for the document understanding model.",
)
def execute_with_args(self, args):
params = get_subcommand_args(args)
params["input"] = self.input_validator(params["input"])
perform_simple_inference(DocUnderstanding, params)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_pipelines/doc_understanding.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_pipelines/formula_recognition.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .._utils.cli import (
add_simple_inference_args,
get_subcommand_args,
perform_simple_inference,
str2bool,
)
from .base import PaddleXPipelineWrapper, PipelineCLISubcommandExecutor
from .utils import create_config_from_structure
class FormulaRecognitionPipeline(PaddleXPipelineWrapper):
def __init__(
self,
doc_orientation_classify_model_name=None,
doc_orientation_classify_model_dir=None,
doc_orientation_classify_batch_size=None,
doc_unwarping_model_name=None,
doc_unwarping_model_dir=None,
doc_unwarping_batch_size=None,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
layout_detection_model_name=None,
layout_detection_model_dir=None,
layout_threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
layout_detection_batch_size=None,
use_layout_detection=None,
formula_recognition_model_name=None,
formula_recognition_model_dir=None,
formula_recognition_batch_size=None,
**kwargs,
):
params = locals().copy()
params.pop("self")
params.pop("kwargs")
self._params = params
super().__init__(**kwargs)
@property
def _paddlex_pipeline_name(self):
return "formula_recognition"
def predict_iter(
self,
input,
*,
use_layout_detection=None,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
layout_det_res=None,
layout_threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
**kwargs,
):
return self.paddlex_pipeline.predict(
input,
use_layout_detection=use_layout_detection,
use_doc_orientation_classify=use_doc_orientation_classify,
use_doc_unwarping=use_doc_unwarping,
layout_det_res=layout_det_res,
layout_threshold=layout_threshold,
layout_nms=layout_nms,
layout_unclip_ratio=layout_unclip_ratio,
layout_merge_bboxes_mode=layout_merge_bboxes_mode,
**kwargs,
)
def predict(
self,
input,
*,
use_layout_detection=None,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
layout_det_res=None,
layout_threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
**kwargs,
):
return list(
self.predict_iter(
input,
use_layout_detection=use_layout_detection,
use_doc_orientation_classify=use_doc_orientation_classify,
use_doc_unwarping=use_doc_unwarping,
layout_det_res=layout_det_res,
layout_threshold=layout_threshold,
layout_nms=layout_nms,
layout_unclip_ratio=layout_unclip_ratio,
layout_merge_bboxes_mode=layout_merge_bboxes_mode,
**kwargs,
)
)
@classmethod
def get_cli_subcommand_executor(cls):
return FormulaRecognitionPipelineCLISubcommandExecutor()
def _get_paddlex_config_overrides(self):
STRUCTURE = {
"use_layout_detection": self._params["use_layout_detection"],
"SubModules.LayoutDetection.model_name": self._params[
"layout_detection_model_name"
],
"SubModules.LayoutDetection.model_dir": self._params[
"layout_detection_model_dir"
],
"SubModules.LayoutDetection.threshold": self._params["layout_threshold"],
"SubModules.LayoutDetection.layout_nms": self._params["layout_nms"],
"SubModules.LayoutDetection.layout_unclip_ratio": self._params[
"layout_unclip_ratio"
],
"SubModules.LayoutDetection.layout_merge_bboxes_mode": self._params[
"layout_merge_bboxes_mode"
],
"SubModules.LayoutDetection.batch_size": self._params[
"layout_detection_batch_size"
],
"SubModules.FormulaRecognition.model_name": self._params[
"formula_recognition_model_name"
],
"SubModules.FormulaRecognition.model_dir": self._params[
"formula_recognition_model_dir"
],
"SubModules.FormulaRecognition.batch_size": self._params[
"formula_recognition_batch_size"
],
"SubPipelines.DocPreprocessor.use_doc_orientation_classify": self._params[
"use_doc_orientation_classify"
],
"SubPipelines.DocPreprocessor.use_doc_unwarping": self._params[
"use_doc_unwarping"
],
"use_doc_preprocessor": self._params["use_doc_orientation_classify"]
or self._params["use_doc_unwarping"],
"SubPipelines.DocPreprocessor.SubModules.DocOrientationClassify.model_name": self._params[
"doc_orientation_classify_model_name"
],
"SubPipelines.DocPreprocessor.SubModules.DocOrientationClassify.model_dir": self._params[
"doc_orientation_classify_model_dir"
],
"SubPipelines.DocPreprocessor.SubModules.DocOrientationClassify.batch_size": self._params[
"doc_orientation_classify_batch_size"
],
"SubPipelines.DocPreprocessor.SubModules.DocUnwarping.model_name": self._params[
"doc_unwarping_model_name"
],
"SubPipelines.DocPreprocessor.SubModules.DocUnwarping.model_dir": self._params[
"doc_unwarping_model_dir"
],
"SubPipelines.DocPreprocessor.SubModules.DocUnwarping.batch_size": self._params[
"doc_unwarping_batch_size"
],
}
return create_config_from_structure(STRUCTURE)
class FormulaRecognitionPipelineCLISubcommandExecutor(PipelineCLISubcommandExecutor):
@property
def subparser_name(self):
return "formula_recognition_pipeline"
def _update_subparser(self, subparser):
add_simple_inference_args(subparser)
subparser.add_argument(
"--doc_orientation_classify_model_name",
type=str,
help="Name of the document image orientation classification model.",
)
subparser.add_argument(
"--doc_orientation_classify_model_dir",
type=str,
help="Directory of the document image orientation classification model.",
)
subparser.add_argument(
"--doc_orientation_classify_batch_size",
type=int,
help="Batch size for document image orientation classification.",
)
subparser.add_argument(
"--doc_unwarping_model_name",
type=str,
help="Name of the document unwarping model.",
)
subparser.add_argument(
"--doc_unwarping_model_dir",
type=str,
help="Directory of the document unwarping model.",
)
subparser.add_argument(
"--doc_unwarping_batch_size",
type=int,
help="Batch size for document unwarping.",
)
subparser.add_argument(
"--use_doc_orientation_classify",
type=str2bool,
help="Use document image orientation classification.",
)
subparser.add_argument(
"--use_doc_unwarping",
type=str2bool,
help="Use document unwarping.",
)
subparser.add_argument(
"--layout_detection_model_name",
type=str,
help="Name of the layout detection model.",
)
subparser.add_argument(
"--layout_detection_model_dir",
type=str,
help="Directory of the layout detection model.",
)
subparser.add_argument(
"--layout_threshold",
type=float,
help="Threshold for layout detection.",
)
subparser.add_argument(
"--layout_nms",
type=str2bool,
help="Non-maximum suppression for layout detection.",
)
subparser.add_argument(
"--layout_unclip_ratio",
type=float,
help="Unclip ratio for layout detection.",
)
subparser.add_argument(
"--layout_merge_bboxes_mode",
type=str,
help="Mode for merging bounding boxes in layout detection.",
)
subparser.add_argument(
"--layout_detection_batch_size",
type=int,
help="Batch size for layout detection.",
)
subparser.add_argument(
"--use_layout_detection",
type=str2bool,
help="Use layout detection.",
)
subparser.add_argument(
"--formula_recognition_model_name",
type=str,
help="Name of the formula recognition model.",
)
subparser.add_argument(
"--formula_recognition_model_dir",
type=str,
help="Directory of the formula recognition model.",
)
subparser.add_argument(
"--formula_recognition_batch_size",
type=int,
help="Batch size for formula recognition.",
)
def execute_with_args(self, args):
params = get_subcommand_args(args)
perform_simple_inference(FormulaRecognitionPipeline, params)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_pipelines/formula_recognition.py",
"license": "Apache License 2.0",
"lines": 271,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_pipelines/ocr.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Should we use a third-party CLI library to auto-generate command-line
# arguments from the pipeline class, to reduce boilerplate and improve
# maintainability?
import sys
import warnings
from .._utils.cli import (
add_simple_inference_args,
get_subcommand_args,
perform_simple_inference,
str2bool,
)
from .._utils.deprecation import (
DeprecatedOptionAction,
deprecated,
warn_deprecated_param,
)
from .._utils.logging import logger
from .base import PaddleXPipelineWrapper, PipelineCLISubcommandExecutor
from .utils import create_config_from_structure
_DEPRECATED_PARAM_NAME_MAPPING = {
"det_model_dir": "text_detection_model_dir",
"det_limit_side_len": "text_det_limit_side_len",
"det_limit_type": "text_det_limit_type",
"det_db_thresh": "text_det_thresh",
"det_db_box_thresh": "text_det_box_thresh",
"det_db_unclip_ratio": "text_det_unclip_ratio",
"rec_model_dir": "text_recognition_model_dir",
"rec_batch_num": "text_recognition_batch_size",
"use_angle_cls": "use_textline_orientation",
"cls_model_dir": "textline_orientation_model_dir",
"cls_batch_num": "textline_orientation_batch_size",
}
_SUPPORTED_OCR_VERSIONS = ["PP-OCRv3", "PP-OCRv4", "PP-OCRv5"]
# Be comptable with PaddleOCR 2.x interfaces
class PaddleOCR(PaddleXPipelineWrapper):
def __init__(
self,
doc_orientation_classify_model_name=None,
doc_orientation_classify_model_dir=None,
doc_unwarping_model_name=None,
doc_unwarping_model_dir=None,
text_detection_model_name=None,
text_detection_model_dir=None,
textline_orientation_model_name=None,
textline_orientation_model_dir=None,
textline_orientation_batch_size=None,
text_recognition_model_name=None,
text_recognition_model_dir=None,
text_recognition_batch_size=None,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_textline_orientation=None,
text_det_limit_side_len=None,
text_det_limit_type=None,
text_det_thresh=None,
text_det_box_thresh=None,
text_det_unclip_ratio=None,
text_det_input_shape=None,
text_rec_score_thresh=None,
return_word_box=None,
text_rec_input_shape=None,
lang=None,
ocr_version=None,
**kwargs,
):
if ocr_version is not None and ocr_version not in _SUPPORTED_OCR_VERSIONS:
raise ValueError(
f"Invalid OCR version: {ocr_version}. Supported values are {_SUPPORTED_OCR_VERSIONS}."
)
if all(
map(
lambda p: p is None,
(
text_detection_model_name,
text_detection_model_dir,
text_recognition_model_name,
text_recognition_model_dir,
),
)
):
if lang is not None or ocr_version is not None:
det_model_name, rec_model_name = self._get_ocr_model_names(
lang, ocr_version
)
if det_model_name is None or rec_model_name is None:
raise ValueError(
f"No models are available for the language {repr(lang)} and OCR version {repr(ocr_version)}."
)
text_detection_model_name = det_model_name
text_recognition_model_name = rec_model_name
else:
if lang is not None or ocr_version is not None:
warnings.warn(
"`lang` and `ocr_version` will be ignored when model names or model directories are not `None`.",
stacklevel=2,
)
params = {
"doc_orientation_classify_model_name": doc_orientation_classify_model_name,
"doc_orientation_classify_model_dir": doc_orientation_classify_model_dir,
"doc_unwarping_model_name": doc_unwarping_model_name,
"doc_unwarping_model_dir": doc_unwarping_model_dir,
"text_detection_model_name": text_detection_model_name,
"text_detection_model_dir": text_detection_model_dir,
"textline_orientation_model_name": textline_orientation_model_name,
"textline_orientation_model_dir": textline_orientation_model_dir,
"textline_orientation_batch_size": textline_orientation_batch_size,
"text_recognition_model_name": text_recognition_model_name,
"text_recognition_model_dir": text_recognition_model_dir,
"text_recognition_batch_size": text_recognition_batch_size,
"use_doc_orientation_classify": use_doc_orientation_classify,
"use_doc_unwarping": use_doc_unwarping,
"use_textline_orientation": use_textline_orientation,
"text_det_limit_side_len": text_det_limit_side_len,
"text_det_limit_type": text_det_limit_type,
"text_det_thresh": text_det_thresh,
"text_det_box_thresh": text_det_box_thresh,
"text_det_unclip_ratio": text_det_unclip_ratio,
"text_det_input_shape": text_det_input_shape,
"text_rec_score_thresh": text_rec_score_thresh,
"return_word_box": return_word_box,
"text_rec_input_shape": text_rec_input_shape,
}
base_params = {}
for name, val in kwargs.items():
if name in _DEPRECATED_PARAM_NAME_MAPPING:
new_name = _DEPRECATED_PARAM_NAME_MAPPING[name]
warn_deprecated_param(name, new_name)
assert (
new_name in params
), f"{repr(new_name)} is not a valid parameter name."
if params[new_name] is not None:
raise ValueError(
f"`{name}` and `{new_name}` are mutually exclusive."
)
params[new_name] = val
else:
base_params[name] = val
self._params = params
super().__init__(**base_params)
@property
def _paddlex_pipeline_name(self):
return "OCR"
def predict_iter(
self,
input,
*,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_textline_orientation=None,
text_det_limit_side_len=None,
text_det_limit_type=None,
text_det_thresh=None,
text_det_box_thresh=None,
text_det_unclip_ratio=None,
text_rec_score_thresh=None,
return_word_box=None,
):
return self.paddlex_pipeline.predict(
input,
use_doc_orientation_classify=use_doc_orientation_classify,
use_doc_unwarping=use_doc_unwarping,
use_textline_orientation=use_textline_orientation,
text_det_limit_side_len=text_det_limit_side_len,
text_det_limit_type=text_det_limit_type,
text_det_thresh=text_det_thresh,
text_det_box_thresh=text_det_box_thresh,
text_det_unclip_ratio=text_det_unclip_ratio,
text_rec_score_thresh=text_rec_score_thresh,
return_word_box=return_word_box,
)
def predict(
self,
input,
*,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_textline_orientation=None,
text_det_limit_side_len=None,
text_det_limit_type=None,
text_det_thresh=None,
text_det_box_thresh=None,
text_det_unclip_ratio=None,
text_rec_score_thresh=None,
return_word_box=None,
):
return list(
self.predict_iter(
input,
use_doc_orientation_classify=use_doc_orientation_classify,
use_doc_unwarping=use_doc_unwarping,
use_textline_orientation=use_textline_orientation,
text_det_limit_side_len=text_det_limit_side_len,
text_det_limit_type=text_det_limit_type,
text_det_thresh=text_det_thresh,
text_det_box_thresh=text_det_box_thresh,
text_det_unclip_ratio=text_det_unclip_ratio,
text_rec_score_thresh=text_rec_score_thresh,
return_word_box=return_word_box,
)
)
@deprecated("Please use `predict` instead.")
def ocr(self, img, **kwargs):
return self.predict(img, **kwargs)
@classmethod
def get_cli_subcommand_executor(cls):
return PaddleOCRCLISubcommandExecutor()
def _get_paddlex_config_overrides(self):
STRUCTURE = {
"SubPipelines.DocPreprocessor.SubModules.DocOrientationClassify.model_name": self._params[
"doc_orientation_classify_model_name"
],
"SubPipelines.DocPreprocessor.SubModules.DocOrientationClassify.model_dir": self._params[
"doc_orientation_classify_model_dir"
],
"SubPipelines.DocPreprocessor.SubModules.DocUnwarping.model_name": self._params[
"doc_unwarping_model_name"
],
"SubPipelines.DocPreprocessor.SubModules.DocUnwarping.model_dir": self._params[
"doc_unwarping_model_dir"
],
"SubModules.TextDetection.model_name": self._params[
"text_detection_model_name"
],
"SubModules.TextDetection.model_dir": self._params[
"text_detection_model_dir"
],
"SubModules.TextLineOrientation.model_name": self._params[
"textline_orientation_model_name"
],
"SubModules.TextLineOrientation.model_dir": self._params[
"textline_orientation_model_dir"
],
"SubModules.TextLineOrientation.batch_size": self._params[
"textline_orientation_batch_size"
],
"SubModules.TextRecognition.model_name": self._params[
"text_recognition_model_name"
],
"SubModules.TextRecognition.model_dir": self._params[
"text_recognition_model_dir"
],
"SubModules.TextRecognition.batch_size": self._params[
"text_recognition_batch_size"
],
"SubPipelines.DocPreprocessor.use_doc_orientation_classify": self._params[
"use_doc_orientation_classify"
],
"SubPipelines.DocPreprocessor.use_doc_unwarping": self._params[
"use_doc_unwarping"
],
"use_doc_preprocessor": self._params["use_doc_orientation_classify"]
or self._params["use_doc_unwarping"],
"use_textline_orientation": self._params["use_textline_orientation"],
"SubModules.TextDetection.limit_side_len": self._params[
"text_det_limit_side_len"
],
"SubModules.TextDetection.limit_type": self._params["text_det_limit_type"],
"SubModules.TextDetection.thresh": self._params["text_det_thresh"],
"SubModules.TextDetection.box_thresh": self._params["text_det_box_thresh"],
"SubModules.TextDetection.unclip_ratio": self._params[
"text_det_unclip_ratio"
],
"SubModules.TextDetection.input_shape": self._params[
"text_det_input_shape"
],
"SubModules.TextRecognition.score_thresh": self._params[
"text_rec_score_thresh"
],
"SubModules.TextRecognition.return_word_box": self._params[
"return_word_box"
],
"SubModules.TextRecognition.input_shape": self._params[
"text_rec_input_shape"
],
}
return create_config_from_structure(STRUCTURE)
def _get_ocr_model_names(self, lang, ppocr_version):
LATIN_LANGS = [
"af",
"az",
"bs",
"cs",
"cy",
"da",
"de",
"es",
"et",
"fr",
"ga",
"hr",
"hu",
"id",
"is",
"it",
"ku",
"la",
"lt",
"lv",
"mi",
"ms",
"mt",
"nl",
"no",
"oc",
"pi",
"pl",
"pt",
"ro",
"rs_latin",
"sk",
"sl",
"sq",
"sv",
"sw",
"tl",
"tr",
"uz",
"vi",
"french",
"german",
"fi",
"eu",
"gl",
"lb",
"rm",
"ca",
"qu",
]
ARABIC_LANGS = ["ar", "fa", "ug", "ur", "ps", "ku", "sd", "bal"]
ESLAV_LANGS = ["ru", "be", "uk"]
CYRILLIC_LANGS = [
"ru",
"rs_cyrillic",
"be",
"bg",
"uk",
"mn",
"abq",
"ady",
"kbd",
"ava",
"dar",
"inh",
"che",
"lbe",
"lez",
"tab",
"kk",
"ky",
"tg",
"mk",
"tt",
"cv",
"ba",
"mhr",
"mo",
"udm",
"kv",
"os",
"bua",
"xal",
"tyv",
"sah",
"kaa",
]
DEVANAGARI_LANGS = [
"hi",
"mr",
"ne",
"bh",
"mai",
"ang",
"bho",
"mah",
"sck",
"new",
"gom",
"sa",
"bgc",
]
SPECIFIC_LANGS = [
"ch",
"en",
"korean",
"japan",
"chinese_cht",
"te",
"ka",
"ta",
]
if lang is None:
lang = "ch"
if ppocr_version is None:
if (
lang
in [
"ch",
"chinese_cht",
"en",
"japan",
"korean",
"th",
"el",
"te",
"ta",
]
+ LATIN_LANGS
+ ESLAV_LANGS
+ ARABIC_LANGS
+ CYRILLIC_LANGS
+ DEVANAGARI_LANGS
):
ppocr_version = "PP-OCRv5"
elif lang in (SPECIFIC_LANGS):
ppocr_version = "PP-OCRv3"
else:
# Unknown language specified
return None, None
if ppocr_version == "PP-OCRv5":
rec_lang, rec_model_name = None, None
if lang in ("ch", "chinese_cht", "japan"):
rec_model_name = "PP-OCRv5_server_rec"
elif lang == "en":
rec_model_name = "en_PP-OCRv5_mobile_rec"
elif lang in LATIN_LANGS:
rec_lang = "latin"
elif lang in ESLAV_LANGS:
rec_lang = "eslav"
elif lang in ARABIC_LANGS:
rec_lang = "arabic"
elif lang in CYRILLIC_LANGS:
rec_lang = "cyrillic"
elif lang in DEVANAGARI_LANGS:
rec_lang = "devanagari"
elif lang == "korean":
rec_lang = "korean"
elif lang == "th":
rec_lang = "th"
elif lang == "el":
rec_lang = "el"
elif lang == "te":
rec_lang = "te"
elif lang == "ta":
rec_lang = "ta"
if rec_lang is not None:
rec_model_name = f"{rec_lang}_PP-OCRv5_mobile_rec"
return "PP-OCRv5_server_det", rec_model_name
elif ppocr_version == "PP-OCRv4":
if lang == "ch":
return "PP-OCRv4_mobile_det", "PP-OCRv4_mobile_rec"
elif lang == "en":
return "PP-OCRv4_mobile_det", "en_PP-OCRv4_mobile_rec"
else:
return None, None
else:
# PP-OCRv3
rec_lang = None
if lang in LATIN_LANGS:
rec_lang = "latin"
elif lang in ARABIC_LANGS:
rec_lang = "arabic"
elif lang in CYRILLIC_LANGS:
rec_lang = "cyrillic"
elif lang in DEVANAGARI_LANGS:
rec_lang = "devanagari"
else:
if lang in SPECIFIC_LANGS:
rec_lang = lang
rec_model_name = None
if rec_lang == "ch":
rec_model_name = "PP-OCRv3_mobile_rec"
elif rec_lang is not None:
rec_model_name = f"{rec_lang}_PP-OCRv3_mobile_rec"
return "PP-OCRv3_mobile_det", rec_model_name
class PaddleOCRCLISubcommandExecutor(PipelineCLISubcommandExecutor):
@property
def subparser_name(self):
return "ocr"
def _update_subparser(self, subparser):
add_simple_inference_args(subparser)
subparser.add_argument(
"--doc_orientation_classify_model_name",
type=str,
help="Name of the document image orientation classification model.",
)
subparser.add_argument(
"--doc_orientation_classify_model_dir",
type=str,
help="Path to the document image orientation classification model directory.",
)
subparser.add_argument(
"--doc_unwarping_model_name",
type=str,
help="Name of the text image unwarping model.",
)
subparser.add_argument(
"--doc_unwarping_model_dir",
type=str,
help="Path to the image unwarping model directory.",
)
subparser.add_argument(
"--text_detection_model_name",
type=str,
help="Name of the text detection model.",
)
subparser.add_argument(
"--text_detection_model_dir",
type=str,
help="Path to the text detection model directory.",
)
subparser.add_argument(
"--textline_orientation_model_name",
type=str,
help="Name of the text line orientation classification model.",
)
subparser.add_argument(
"--textline_orientation_model_dir",
type=str,
help="Path to the text line orientation classification model directory.",
)
subparser.add_argument(
"--textline_orientation_batch_size",
type=int,
help="Batch size for the text line orientation classification model.",
)
subparser.add_argument(
"--text_recognition_model_name",
type=str,
help="Name of the text recognition model.",
)
subparser.add_argument(
"--text_recognition_model_dir",
type=str,
help="Path to the text recognition model directory.",
)
subparser.add_argument(
"--text_recognition_batch_size",
type=int,
help="Batch size for the text recognition model.",
)
subparser.add_argument(
"--use_doc_orientation_classify",
type=str2bool,
help="Whether to use document image orientation classification.",
)
subparser.add_argument(
"--use_doc_unwarping",
type=str2bool,
help="Whether to use text image unwarping.",
)
subparser.add_argument(
"--use_textline_orientation",
type=str2bool,
help="Whether to use text line orientation classification.",
)
subparser.add_argument(
"--text_det_limit_side_len",
type=int,
help="This sets a limit on the side length of the input image for the text detection model.",
)
subparser.add_argument(
"--text_det_limit_type",
type=str,
help="This determines how the side length limit is applied to the input image before feeding it into the text deteciton model.",
)
subparser.add_argument(
"--text_det_thresh",
type=float,
help="Detection pixel threshold for the text detection model. Pixels with scores greater than this threshold in the output probability map are considered text pixels.",
)
subparser.add_argument(
"--text_det_box_thresh",
type=float,
help="Detection box threshold for the text detection model. A detection result is considered a text region if the average score of all pixels within the border of the result is greater than this threshold.",
)
subparser.add_argument(
"--text_det_unclip_ratio",
type=float,
help="Text detection expansion coefficient, which expands the text region using this method. The larger the value, the larger the expansion area.",
)
subparser.add_argument(
"--text_det_input_shape",
nargs=3,
type=int,
metavar=("C", "H", "W"),
help="Input shape of the text detection model.",
)
subparser.add_argument(
"--text_rec_score_thresh",
type=float,
help="Text recognition threshold. Text results with scores greater than this threshold are retained.",
)
subparser.add_argument(
"--return_word_box",
type=str2bool,
help="Whether to return the coordinates of the recognition result.",
)
subparser.add_argument(
"--text_rec_input_shape",
nargs=3,
type=int,
metavar=("C", "H", "W"),
help="Input shape of the text recognition model.",
)
subparser.add_argument(
"--lang", type=str, help="Language in the input image for OCR processing."
)
subparser.add_argument(
"--ocr_version",
type=str,
choices=_SUPPORTED_OCR_VERSIONS,
help="PP-OCR version to use.",
)
deprecated_arg_types = {
"det_model_dir": str,
"det_limit_side_len": int,
"det_limit_type": str,
"det_db_thresh": float,
"det_db_box_thresh": float,
"det_db_unclip_ratio": float,
"rec_model_dir": str,
"rec_batch_num": int,
"use_angle_cls": str2bool,
"cls_model_dir": str,
"cls_batch_num": int,
}
for name, new_name in _DEPRECATED_PARAM_NAME_MAPPING.items():
assert name in deprecated_arg_types, name
subparser.add_argument(
"--" + name,
action=DeprecatedOptionAction,
type=str,
help=f"[Deprecated] Please use `--{new_name}` instead.",
)
def execute_with_args(self, args):
params = get_subcommand_args(args)
for name, new_name in _DEPRECATED_PARAM_NAME_MAPPING.items():
assert name in params
val = params[name]
new_val = params[new_name]
if val is not None and new_val is not None:
logger.error(
"`--%s` and `--%s` are mutually exclusive.", name, new_name
)
sys.exit(2)
if val is None:
params.pop(name)
perform_simple_inference(PaddleOCR, params)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_pipelines/ocr.py",
"license": "Apache License 2.0",
"lines": 661,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_pipelines/pp_chatocrv4_doc.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .._utils.cli import (
get_subcommand_args,
str2bool,
)
from .base import PaddleXPipelineWrapper, PipelineCLISubcommandExecutor
from .utils import create_config_from_structure
class PPChatOCRv4Doc(PaddleXPipelineWrapper):
def __init__(
self,
layout_detection_model_name=None,
layout_detection_model_dir=None,
doc_orientation_classify_model_name=None,
doc_orientation_classify_model_dir=None,
doc_unwarping_model_name=None,
doc_unwarping_model_dir=None,
text_detection_model_name=None,
text_detection_model_dir=None,
textline_orientation_model_name=None,
textline_orientation_model_dir=None,
textline_orientation_batch_size=None,
text_recognition_model_name=None,
text_recognition_model_dir=None,
text_recognition_batch_size=None,
table_structure_recognition_model_name=None,
table_structure_recognition_model_dir=None,
seal_text_detection_model_name=None,
seal_text_detection_model_dir=None,
seal_text_recognition_model_name=None,
seal_text_recognition_model_dir=None,
seal_text_recognition_batch_size=None,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_textline_orientation=None,
use_seal_recognition=None,
use_table_recognition=None,
layout_threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
text_det_limit_side_len=None,
text_det_limit_type=None,
text_det_thresh=None,
text_det_box_thresh=None,
text_det_unclip_ratio=None,
text_rec_score_thresh=None,
seal_det_limit_side_len=None,
seal_det_limit_type=None,
seal_det_thresh=None,
seal_det_box_thresh=None,
seal_det_unclip_ratio=None,
seal_rec_score_thresh=None,
retriever_config=None,
mllm_chat_bot_config=None,
chat_bot_config=None,
**kwargs,
):
params = locals().copy()
params.pop("self")
params.pop("kwargs")
self._params = params
super().__init__(**kwargs)
@property
def _paddlex_pipeline_name(self):
return "PP-ChatOCRv4-doc"
def save_vector(self, vector_info, save_path, retriever_config=None):
return self.paddlex_pipeline.save_vector(
vector_info=vector_info,
save_path=save_path,
retriever_config=retriever_config,
)
def load_vector(self, data_path, retriever_config=None):
return self.paddlex_pipeline.load_vector(
data_path=data_path, retriever_config=retriever_config
)
def load_visual_info_list(self, data_path):
return self.paddlex_pipeline.load_visual_info_list(data_path=data_path)
def save_visual_info_list(self, visual_info, save_path):
return self.paddlex_pipeline.save_visual_info_list(
visual_info=visual_info, save_path=save_path
)
def visual_predict_iter(
self,
input,
*,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_textline_orientation=None,
use_seal_recognition=None,
use_table_recognition=None,
layout_threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
text_det_limit_side_len=None,
text_det_limit_type=None,
text_det_thresh=None,
text_det_box_thresh=None,
text_det_unclip_ratio=None,
text_rec_score_thresh=None,
seal_det_limit_side_len=None,
seal_det_limit_type=None,
seal_det_thresh=None,
seal_det_box_thresh=None,
seal_det_unclip_ratio=None,
seal_rec_score_thresh=None,
**kwargs,
):
return self.paddlex_pipeline.visual_predict(
input,
use_doc_orientation_classify=use_doc_orientation_classify,
use_doc_unwarping=use_doc_unwarping,
use_textline_orientation=use_textline_orientation,
use_seal_recognition=use_seal_recognition,
use_table_recognition=use_table_recognition,
layout_threshold=layout_threshold,
layout_nms=layout_nms,
layout_unclip_ratio=layout_unclip_ratio,
layout_merge_bboxes_mode=layout_merge_bboxes_mode,
text_det_limit_side_len=text_det_limit_side_len,
text_det_limit_type=text_det_limit_type,
text_det_thresh=text_det_thresh,
text_det_box_thresh=text_det_box_thresh,
text_det_unclip_ratio=text_det_unclip_ratio,
text_rec_score_thresh=text_rec_score_thresh,
seal_det_limit_side_len=seal_det_limit_side_len,
seal_det_limit_type=seal_det_limit_type,
seal_det_thresh=seal_det_thresh,
seal_det_box_thresh=seal_det_box_thresh,
seal_det_unclip_ratio=seal_det_unclip_ratio,
seal_rec_score_thresh=seal_rec_score_thresh,
**kwargs,
)
def visual_predict(
self,
input,
*,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_textline_orientation=None,
use_seal_recognition=None,
use_table_recognition=None,
layout_threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
text_det_limit_side_len=None,
text_det_limit_type=None,
text_det_thresh=None,
text_det_box_thresh=None,
text_det_unclip_ratio=None,
text_rec_score_thresh=None,
seal_det_limit_side_len=None,
seal_det_limit_type=None,
seal_det_thresh=None,
seal_det_box_thresh=None,
seal_det_unclip_ratio=None,
seal_rec_score_thresh=None,
**kwargs,
):
return list(
self.visual_predict_iter(
input,
use_doc_orientation_classify=use_doc_orientation_classify,
use_doc_unwarping=use_doc_unwarping,
use_textline_orientation=use_textline_orientation,
use_seal_recognition=use_seal_recognition,
use_table_recognition=use_table_recognition,
layout_threshold=layout_threshold,
layout_nms=layout_nms,
layout_unclip_ratio=layout_unclip_ratio,
layout_merge_bboxes_mode=layout_merge_bboxes_mode,
text_det_limit_side_len=text_det_limit_side_len,
text_det_limit_type=text_det_limit_type,
text_det_thresh=text_det_thresh,
text_det_box_thresh=text_det_box_thresh,
text_det_unclip_ratio=text_det_unclip_ratio,
text_rec_score_thresh=text_rec_score_thresh,
seal_det_limit_side_len=seal_det_limit_side_len,
seal_det_limit_type=seal_det_limit_type,
seal_det_thresh=seal_det_thresh,
seal_det_box_thresh=seal_det_box_thresh,
seal_det_unclip_ratio=seal_det_unclip_ratio,
seal_rec_score_thresh=seal_rec_score_thresh,
**kwargs,
)
)
def build_vector(
self,
visual_info,
*,
min_characters=3500,
block_size=300,
flag_save_bytes_vector=False,
retriever_config=None,
):
return self.paddlex_pipeline.build_vector(
visual_info,
min_characters=min_characters,
block_size=block_size,
flag_save_bytes_vector=flag_save_bytes_vector,
retriever_config=retriever_config,
)
def mllm_pred(self, input, key_list, *, mllm_chat_bot_config=None):
return self.paddlex_pipeline.mllm_pred(
input,
key_list,
mllm_chat_bot_config=mllm_chat_bot_config,
)
def chat(
self,
key_list,
visual_info,
*,
use_vector_retrieval=True,
vector_info=None,
min_characters=3500,
text_task_description=None,
text_output_format=None,
text_rules_str=None,
text_few_shot_demo_text_content=None,
text_few_shot_demo_key_value_list=None,
table_task_description=None,
table_output_format=None,
table_rules_str=None,
table_few_shot_demo_text_content=None,
table_few_shot_demo_key_value_list=None,
mllm_predict_info=None,
mllm_integration_strategy="integration",
chat_bot_config=None,
retriever_config=None,
):
return self.paddlex_pipeline.chat(
key_list,
visual_info,
use_vector_retrieval=use_vector_retrieval,
vector_info=vector_info,
min_characters=min_characters,
text_task_description=text_task_description,
text_output_format=text_output_format,
text_rules_str=text_rules_str,
text_few_shot_demo_text_content=text_few_shot_demo_text_content,
text_few_shot_demo_key_value_list=text_few_shot_demo_key_value_list,
table_task_description=table_task_description,
table_output_format=table_output_format,
table_rules_str=table_rules_str,
table_few_shot_demo_text_content=table_few_shot_demo_text_content,
table_few_shot_demo_key_value_list=table_few_shot_demo_key_value_list,
mllm_predict_info=mllm_predict_info,
mllm_integration_strategy=mllm_integration_strategy,
chat_bot_config=chat_bot_config,
retriever_config=retriever_config,
)
@classmethod
def get_cli_subcommand_executor(cls):
return PPChatOCRv4DocCLISubcommandExecutor()
def _get_paddlex_config_overrides(self):
STRUCTURE = {
"SubPipelines.LayoutParser.SubModules.LayoutDetection.model_name": self._params[
"layout_detection_model_name"
],
"SubPipelines.LayoutParser.SubModules.LayoutDetection.model_dir": self._params[
"layout_detection_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.DocPreprocessor.SubModules.DocOrientationClassify.model_name": self._params[
"doc_orientation_classify_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.DocPreprocessor.SubModules.DocOrientationClassify.model_dir": self._params[
"doc_orientation_classify_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.DocPreprocessor.SubModules.DocUnwarping.model_name": self._params[
"doc_unwarping_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.DocPreprocessor.SubModules.DocUnwarping.model_dir": self._params[
"doc_unwarping_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextDetection.model_name": self._params[
"text_detection_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextDetection.model_dir": self._params[
"text_detection_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextLineOrientation.model_name": self._params[
"textline_orientation_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextLineOrientation.model_dir": self._params[
"textline_orientation_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextLineOrientation.batch_size": self._params[
"textline_orientation_batch_size"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextRecognition.model_name": self._params[
"text_recognition_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextRecognition.model_dir": self._params[
"text_recognition_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextRecognition.batch_size": self._params[
"text_recognition_batch_size"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubModules.TableStructureRecognition.model_name": self._params[
"table_structure_recognition_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.TableRecognition.SubModules.TableStructureRecognition.model_dir": self._params[
"table_structure_recognition_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.model_name": self._params[
"seal_text_detection_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.model_dir": self._params[
"seal_text_detection_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextRecognition.model_name": self._params[
"seal_text_recognition_model_name"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextRecognition.model_dir": self._params[
"seal_text_recognition_model_dir"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextRecognition.batch_size": self._params[
"seal_text_recognition_batch_size"
],
"SubPipelines.LayoutParser.SubPipelines.DocPreprocessor.use_doc_orientation_classify": self._params[
"use_doc_orientation_classify"
],
"SubPipelines.LayoutParser.SubPipelines.DocPreprocessor.use_doc_unwarping": self._params[
"use_doc_unwarping"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.use_textline_orientation": self._params[
"use_textline_orientation"
],
"SubPipelines.LayoutParser.use_doc_preprocessor": self._params[
"use_doc_orientation_classify"
]
or self._params["use_doc_unwarping"],
"SubPipelines.LayoutParser.use_seal_recognition": self._params[
"use_seal_recognition"
],
"SubPipelines.LayoutParser.use_table_recognition": self._params[
"use_table_recognition"
],
"SubPipelines.LayoutParser.SubModules.LayoutDetection.threshold": self._params[
"layout_threshold"
],
"SubPipelines.LayoutParser.SubModules.LayoutDetection.nms": self._params[
"layout_nms"
],
"SubPipelines.LayoutParser.SubModules.LayoutDetection.unclip_ratio": self._params[
"layout_unclip_ratio"
],
"SubPipelines.LayoutParser.SubModules.LayoutDetection.merge_bboxes_mode": self._params[
"layout_merge_bboxes_mode"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextDetection.limit_side_len": self._params[
"text_det_limit_side_len"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextDetection.limit_type": self._params[
"text_det_limit_type"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextDetection.thresh": self._params[
"text_det_thresh"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextDetection.box_thresh": self._params[
"text_det_box_thresh"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextDetection.unclip_ratio": self._params[
"text_det_unclip_ratio"
],
"SubPipelines.LayoutParser.SubPipelines.GeneralOCR.SubModules.TextRecognition.score_thresh": self._params[
"text_rec_score_thresh"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.limit_side_len": self._params[
"text_det_limit_side_len"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.limit_type": self._params[
"seal_det_limit_type"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.thresh": self._params[
"seal_det_thresh"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.box_thresh": self._params[
"seal_det_box_thresh"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.unclip_ratio": self._params[
"seal_det_unclip_ratio"
],
"SubPipelines.LayoutParser.SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextRecognition.score_thresh": self._params[
"seal_rec_score_thresh"
],
"SubModules.LLM_Retriever": self._params["retriever_config"],
"SubModules.MLLM_Chat": self._params["mllm_chat_bot_config"],
"SubModules.LLM_Chat": self._params["chat_bot_config"],
}
return create_config_from_structure(STRUCTURE)
class PPChatOCRv4DocCLISubcommandExecutor(PipelineCLISubcommandExecutor):
@property
def subparser_name(self):
return "pp_chatocrv4_doc"
def _update_subparser(self, subparser):
subparser.add_argument(
"-i",
"--input",
type=str,
required=True,
help="Input path or URL.",
)
subparser.add_argument(
"-k",
"--keys",
type=str,
nargs="+",
required=True,
metavar="KEY",
help="Keys use for information extraction.",
)
subparser.add_argument(
"--save_path",
type=str,
help="Path to the output directory.",
)
subparser.add_argument(
"--invoke_mllm",
type=str2bool,
default=False,
help="Whether to invoke the multimodal large language model.",
)
subparser.add_argument(
"--layout_detection_model_name",
type=str,
help="Name of the layout detection model.",
)
subparser.add_argument(
"--layout_detection_model_dir",
type=str,
help="Path to the layout detection model directory.",
)
subparser.add_argument(
"--doc_orientation_classify_model_name",
type=str,
help="Name of the document image orientation classification model.",
)
subparser.add_argument(
"--doc_orientation_classify_model_dir",
type=str,
help="Path to the document image orientation classification model directory.",
)
subparser.add_argument(
"--doc_unwarping_model_name",
type=str,
help="Name of the text image unwarping model.",
)
subparser.add_argument(
"--doc_unwarping_model_dir",
type=str,
help="Path to the image unwarping model directory.",
)
subparser.add_argument(
"--text_detection_model_name",
type=str,
help="Name of the text detection model.",
)
subparser.add_argument(
"--text_detection_model_dir",
type=str,
help="Path to the text detection model directory.",
)
subparser.add_argument(
"--textline_orientation_model_name",
type=str,
help="Name of the text line orientation classification model.",
)
subparser.add_argument(
"--textline_orientation_model_dir",
type=str,
help="Path to the text line orientation classification model directory.",
)
subparser.add_argument(
"--textline_orientation_batch_size",
type=int,
help="Batch size for the text line orientation classification model.",
)
subparser.add_argument(
"--text_recognition_model_name",
type=str,
help="Name of the text recognition model.",
)
subparser.add_argument(
"--text_recognition_model_dir",
type=str,
help="Path to the text recognition model directory.",
)
subparser.add_argument(
"--text_recognition_batch_size",
type=int,
help="Batch size for the text recognition model.",
)
subparser.add_argument(
"--table_structure_recognition_model_name",
type=str,
help="Name of the table structure recognition model.",
)
subparser.add_argument(
"--table_structure_recognition_model_dir",
type=str,
help="Path to the table structure recognition model directory.",
)
subparser.add_argument(
"--seal_text_detection_model_name",
type=str,
help="Name of the seal text detection model.",
)
subparser.add_argument(
"--seal_text_detection_model_dir",
type=str,
help="Path to the seal text detection model directory.",
)
subparser.add_argument(
"--seal_text_recognition_model_name",
type=str,
help="Name of the seal text recognition model.",
)
subparser.add_argument(
"--seal_text_recognition_model_dir",
type=str,
help="Path to the seal text recognition model directory.",
)
subparser.add_argument(
"--seal_text_recognition_batch_size",
type=int,
help="Batch size for the seal text recognition model.",
)
subparser.add_argument(
"--use_doc_orientation_classify",
type=str2bool,
help="Whether to use document image orientation classification.",
)
subparser.add_argument(
"--use_doc_unwarping",
type=str2bool,
help="Whether to use text image unwarping.",
)
subparser.add_argument(
"--use_textline_orientation",
type=str2bool,
help="Whether to use text line orientation classification.",
)
subparser.add_argument(
"--use_seal_recognition",
type=str2bool,
help="Whether to use seal recognition.",
)
subparser.add_argument(
"--use_table_recognition",
type=str2bool,
help="Whether to use table recognition.",
)
# TODO: Support dict and list types
subparser.add_argument(
"--layout_threshold",
type=float,
help="Score threshold for the layout detection model.",
)
subparser.add_argument(
"--layout_nms",
type=str2bool,
help="Whether to use NMS in layout detection.",
)
subparser.add_argument(
"--layout_unclip_ratio",
type=float,
help="Expansion coefficient for layout detection.",
)
subparser.add_argument(
"--layout_merge_bboxes_mode",
type=str,
help="Overlapping box filtering method.",
)
subparser.add_argument(
"--text_det_limit_side_len",
type=int,
help="This sets a limit on the side length of the input image for the text detection model.",
)
subparser.add_argument(
"--text_det_limit_type",
type=str,
help="This determines how the side length limit is applied to the input image before feeding it into the text deteciton model.",
)
subparser.add_argument(
"--text_det_thresh",
type=float,
help="Detection pixel threshold for the text detection model. Pixels with scores greater than this threshold in the output probability map are considered text pixels.",
)
subparser.add_argument(
"--text_det_box_thresh",
type=float,
help="Detection box threshold for the text detection model. A detection result is considered a text region if the average score of all pixels within the border of the result is greater than this threshold.",
)
subparser.add_argument(
"--text_det_unclip_ratio",
type=float,
help="Text detection expansion coefficient, which expands the text region using this method. The larger the value, the larger the expansion area.",
)
subparser.add_argument(
"--text_rec_score_thresh",
type=float,
help="Text recognition threshold used in general OCR. Text results with scores greater than this threshold are retained.",
)
subparser.add_argument(
"--seal_det_limit_side_len",
type=int,
help="This sets a limit on the side length of the input image for the seal text detection model.",
)
subparser.add_argument(
"--seal_det_limit_type",
type=str,
help="This determines how the side length limit is applied to the input image before feeding it into the seal text deteciton model.",
)
subparser.add_argument(
"--seal_det_thresh",
type=float,
help="Detection pixel threshold for the seal text detection model. Pixels with scores greater than this threshold in the output probability map are considered text pixels.",
)
subparser.add_argument(
"--seal_det_box_thresh",
type=float,
help="Detection box threshold for the seal text detection model. A detection result is considered a text region if the average score of all pixels within the border of the result is greater than this threshold.",
)
subparser.add_argument(
"--seal_det_unclip_ratio",
type=float,
help="Seal text detection expansion coefficient, which expands the text region using this method. The larger the value, the larger the expansion area.",
)
subparser.add_argument(
"--seal_rec_score_thresh",
type=float,
help="Seal text recognition threshold. Text results with scores greater than this threshold are retained.",
)
# FIXME: Passing API key through CLI is not secure; consider using
# environment variables.
subparser.add_argument(
"--qianfan_api_key",
type=str,
help="Configuration for the embedding model.",
)
subparser.add_argument(
"--pp_docbee_base_url",
type=str,
help="Configuration for the multimodal large language model.",
)
def execute_with_args(self, args):
params = get_subcommand_args(args)
input = params.pop("input")
keys = params.pop("keys")
save_path = params.pop("save_path")
invoke_mllm = params.pop("invoke_mllm")
qianfan_api_key = params.pop("qianfan_api_key")
if qianfan_api_key is not None:
params["retriever_config"] = {
"module_name": "retriever",
"model_name": "embedding-v1",
"base_url": "https://qianfan.baidubce.com/v2",
"api_type": "qianfan",
"api_key": qianfan_api_key,
}
params["chat_bot_config"] = {
"module_name": "chat_bot",
"model_name": "ernie-3.5-8k",
"base_url": "https://qianfan.baidubce.com/v2",
"api_type": "openai",
"api_key": qianfan_api_key,
}
pp_docbee_base_url = params.pop("pp_docbee_base_url")
if pp_docbee_base_url is not None:
params["mllm_chat_bot_config"] = {
"module_name": "chat_bot",
"model_name": "PP-DocBee",
# PaddleX requires endpoints such as ".../chat/completions",
# which, as the parameter name suggests, are not base URLs.
"base_url": pp_docbee_base_url,
"api_type": "openai",
"api_key": "fake_key",
}
chatocr = PPChatOCRv4Doc(**params)
result_visual = chatocr.visual_predict_iter(input)
visual_info_list = []
for res in result_visual:
visual_info_list.append(res["visual_info"])
if save_path:
res["layout_parsing_result"].save_all(save_path)
vector_info = chatocr.build_vector(visual_info_list)
if invoke_mllm:
result_mllm = chatocr.mllm_pred(input, keys)
mllm_predict_info = result_mllm["mllm_res"]
else:
mllm_predict_info = None
result_chat = chatocr.chat(
keys,
visual_info_list,
vector_info=vector_info,
mllm_predict_info=mllm_predict_info,
)
# Print the result to stdout
for k, v in result_chat["chat_res"].items():
print(f"{k} {v}")
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_pipelines/pp_chatocrv4_doc.py",
"license": "Apache License 2.0",
"lines": 715,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_pipelines/pp_structurev3.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from .._utils.cli import (
add_simple_inference_args,
get_subcommand_args,
perform_simple_inference,
str2bool,
)
from .base import PaddleXPipelineWrapper, PipelineCLISubcommandExecutor
from .utils import create_config_from_structure
from ._patch_layout_parsing import apply_patches as _apply_layout_parsing_patches
_apply_layout_parsing_patches()
_SUPPORTED_OCR_VERSIONS = ["PP-OCRv3", "PP-OCRv4", "PP-OCRv5"]
class PPStructureV3(PaddleXPipelineWrapper):
def __init__(
self,
layout_detection_model_name=None,
layout_detection_model_dir=None,
layout_threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
chart_recognition_model_name=None,
chart_recognition_model_dir=None,
chart_recognition_batch_size=None,
region_detection_model_name=None,
region_detection_model_dir=None,
doc_orientation_classify_model_name=None,
doc_orientation_classify_model_dir=None,
doc_unwarping_model_name=None,
doc_unwarping_model_dir=None,
text_detection_model_name=None,
text_detection_model_dir=None,
text_det_limit_side_len=None,
text_det_limit_type=None,
text_det_thresh=None,
text_det_box_thresh=None,
text_det_unclip_ratio=None,
textline_orientation_model_name=None,
textline_orientation_model_dir=None,
textline_orientation_batch_size=None,
text_recognition_model_name=None,
text_recognition_model_dir=None,
text_recognition_batch_size=None,
text_rec_score_thresh=None,
table_classification_model_name=None,
table_classification_model_dir=None,
wired_table_structure_recognition_model_name=None,
wired_table_structure_recognition_model_dir=None,
wireless_table_structure_recognition_model_name=None,
wireless_table_structure_recognition_model_dir=None,
wired_table_cells_detection_model_name=None,
wired_table_cells_detection_model_dir=None,
wireless_table_cells_detection_model_name=None,
wireless_table_cells_detection_model_dir=None,
table_orientation_classify_model_name=None,
table_orientation_classify_model_dir=None,
seal_text_detection_model_name=None,
seal_text_detection_model_dir=None,
seal_det_limit_side_len=None,
seal_det_limit_type=None,
seal_det_thresh=None,
seal_det_box_thresh=None,
seal_det_unclip_ratio=None,
seal_text_recognition_model_name=None,
seal_text_recognition_model_dir=None,
seal_text_recognition_batch_size=None,
seal_rec_score_thresh=None,
formula_recognition_model_name=None,
formula_recognition_model_dir=None,
formula_recognition_batch_size=None,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_textline_orientation=None,
use_seal_recognition=None,
use_table_recognition=None,
use_formula_recognition=None,
use_chart_recognition=None,
use_region_detection=None,
format_block_content=None,
markdown_ignore_labels=None,
lang=None,
ocr_version=None,
**kwargs,
):
if ocr_version is not None and ocr_version not in _SUPPORTED_OCR_VERSIONS:
raise ValueError(
f"Invalid OCR version: {ocr_version}. Supported values are {_SUPPORTED_OCR_VERSIONS}."
)
if all(
map(
lambda p: p is None,
(
text_detection_model_name,
text_detection_model_dir,
text_recognition_model_name,
text_recognition_model_dir,
),
)
):
if lang is not None or ocr_version is not None:
det_model_name, rec_model_name = self._get_ocr_model_names(
lang, ocr_version
)
if det_model_name is None or rec_model_name is None:
raise ValueError(
f"No models are available for the language {repr(lang)} and OCR version {repr(ocr_version)}."
)
text_detection_model_name = det_model_name
text_recognition_model_name = rec_model_name
else:
if lang is not None or ocr_version is not None:
warnings.warn(
"`lang` and `ocr_version` will be ignored when model names or model directories are not `None`.",
stacklevel=2,
)
params = locals().copy()
params["text_detection_model_name"] = text_detection_model_name
params["text_recognition_model_name"] = text_recognition_model_name
params.pop("self")
params.pop("kwargs")
self._params = params
super().__init__(**kwargs)
@property
def _paddlex_pipeline_name(self):
return "PP-StructureV3"
def predict_iter(
self,
input,
*,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_textline_orientation=None,
use_seal_recognition=None,
use_table_recognition=None,
use_formula_recognition=None,
use_chart_recognition=None,
use_region_detection=None,
format_block_content=None,
layout_threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
text_det_limit_side_len=None,
text_det_limit_type=None,
text_det_thresh=None,
text_det_box_thresh=None,
text_det_unclip_ratio=None,
text_rec_score_thresh=None,
seal_det_limit_side_len=None,
seal_det_limit_type=None,
seal_det_thresh=None,
seal_det_box_thresh=None,
seal_det_unclip_ratio=None,
seal_rec_score_thresh=None,
use_wired_table_cells_trans_to_html=False,
use_wireless_table_cells_trans_to_html=False,
use_table_orientation_classify=True,
use_ocr_results_with_table_cells=True,
use_e2e_wired_table_rec_model=False,
use_e2e_wireless_table_rec_model=True,
markdown_ignore_labels=None,
**kwargs,
):
return self.paddlex_pipeline.predict(
input,
use_doc_orientation_classify=use_doc_orientation_classify,
use_doc_unwarping=use_doc_unwarping,
use_textline_orientation=use_textline_orientation,
use_seal_recognition=use_seal_recognition,
use_table_recognition=use_table_recognition,
use_formula_recognition=use_formula_recognition,
use_chart_recognition=use_chart_recognition,
use_region_detection=use_region_detection,
format_block_content=format_block_content,
layout_threshold=layout_threshold,
layout_nms=layout_nms,
layout_unclip_ratio=layout_unclip_ratio,
layout_merge_bboxes_mode=layout_merge_bboxes_mode,
text_det_limit_side_len=text_det_limit_side_len,
text_det_limit_type=text_det_limit_type,
text_det_thresh=text_det_thresh,
text_det_box_thresh=text_det_box_thresh,
text_det_unclip_ratio=text_det_unclip_ratio,
text_rec_score_thresh=text_rec_score_thresh,
seal_det_limit_side_len=seal_det_limit_side_len,
seal_det_limit_type=seal_det_limit_type,
seal_det_thresh=seal_det_thresh,
seal_det_box_thresh=seal_det_box_thresh,
seal_det_unclip_ratio=seal_det_unclip_ratio,
seal_rec_score_thresh=seal_rec_score_thresh,
use_wired_table_cells_trans_to_html=use_wired_table_cells_trans_to_html,
use_wireless_table_cells_trans_to_html=use_wireless_table_cells_trans_to_html,
use_table_orientation_classify=use_table_orientation_classify,
use_ocr_results_with_table_cells=use_ocr_results_with_table_cells,
use_e2e_wired_table_rec_model=use_e2e_wired_table_rec_model,
use_e2e_wireless_table_rec_model=use_e2e_wireless_table_rec_model,
markdown_ignore_labels=markdown_ignore_labels,
**kwargs,
)
def predict(
self,
input,
*,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_textline_orientation=None,
use_seal_recognition=None,
use_table_recognition=None,
use_formula_recognition=None,
use_chart_recognition=None,
use_region_detection=None,
format_block_content=None,
layout_threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
text_det_limit_side_len=None,
text_det_limit_type=None,
text_det_thresh=None,
text_det_box_thresh=None,
text_det_unclip_ratio=None,
text_rec_score_thresh=None,
seal_det_limit_side_len=None,
seal_det_limit_type=None,
seal_det_thresh=None,
seal_det_box_thresh=None,
seal_det_unclip_ratio=None,
seal_rec_score_thresh=None,
use_wired_table_cells_trans_to_html=False,
use_wireless_table_cells_trans_to_html=False,
use_table_orientation_classify=True,
use_ocr_results_with_table_cells=True,
use_e2e_wired_table_rec_model=False,
use_e2e_wireless_table_rec_model=True,
markdown_ignore_labels=None,
**kwargs,
):
return list(
self.predict_iter(
input,
use_doc_orientation_classify=use_doc_orientation_classify,
use_doc_unwarping=use_doc_unwarping,
use_textline_orientation=use_textline_orientation,
use_seal_recognition=use_seal_recognition,
use_table_recognition=use_table_recognition,
use_formula_recognition=use_formula_recognition,
use_chart_recognition=use_chart_recognition,
use_region_detection=use_region_detection,
format_block_content=format_block_content,
layout_threshold=layout_threshold,
layout_nms=layout_nms,
layout_unclip_ratio=layout_unclip_ratio,
layout_merge_bboxes_mode=layout_merge_bboxes_mode,
text_det_limit_side_len=text_det_limit_side_len,
text_det_limit_type=text_det_limit_type,
text_det_thresh=text_det_thresh,
text_det_box_thresh=text_det_box_thresh,
text_det_unclip_ratio=text_det_unclip_ratio,
text_rec_score_thresh=text_rec_score_thresh,
seal_det_limit_side_len=seal_det_limit_side_len,
seal_det_limit_type=seal_det_limit_type,
seal_det_thresh=seal_det_thresh,
seal_det_box_thresh=seal_det_box_thresh,
seal_det_unclip_ratio=seal_det_unclip_ratio,
seal_rec_score_thresh=seal_rec_score_thresh,
use_wired_table_cells_trans_to_html=use_wired_table_cells_trans_to_html,
use_wireless_table_cells_trans_to_html=use_wireless_table_cells_trans_to_html,
use_table_orientation_classify=use_table_orientation_classify,
use_ocr_results_with_table_cells=use_ocr_results_with_table_cells,
use_e2e_wired_table_rec_model=use_e2e_wired_table_rec_model,
use_e2e_wireless_table_rec_model=use_e2e_wireless_table_rec_model,
markdown_ignore_labels=markdown_ignore_labels,
**kwargs,
)
)
def concatenate_markdown_pages(self, markdown_list):
return self.paddlex_pipeline.concatenate_markdown_pages(markdown_list)
@classmethod
def get_cli_subcommand_executor(cls):
return PPStructureV3CLISubcommandExecutor()
def _get_paddlex_config_overrides(self):
STRUCTURE = {
"SubPipelines.DocPreprocessor.use_doc_orientation_classify": self._params[
"use_doc_orientation_classify"
],
"SubPipelines.DocPreprocessor.use_doc_unwarping": self._params[
"use_doc_unwarping"
],
"use_doc_preprocessor": self._params["use_doc_orientation_classify"]
or self._params["use_doc_unwarping"],
"SubPipelines.GeneralOCR.use_textline_orientation": self._params[
"use_textline_orientation"
],
"use_seal_recognition": self._params["use_seal_recognition"],
"use_table_recognition": self._params["use_table_recognition"],
"use_formula_recognition": self._params["use_formula_recognition"],
"use_chart_recognition": self._params["use_chart_recognition"],
"use_region_detection": self._params["use_region_detection"],
"format_block_content": self._params["format_block_content"],
"markdown_ignore_labels": self._params["markdown_ignore_labels"],
"SubModules.LayoutDetection.model_name": self._params[
"layout_detection_model_name"
],
"SubModules.LayoutDetection.model_dir": self._params[
"layout_detection_model_dir"
],
"SubModules.LayoutDetection.threshold": self._params["layout_threshold"],
"SubModules.LayoutDetection.layout_nms": self._params["layout_nms"],
"SubModules.LayoutDetection.layout_unclip_ratio": self._params[
"layout_unclip_ratio"
],
"SubModules.LayoutDetection.layout_merge_bboxes_mode": self._params[
"layout_merge_bboxes_mode"
],
"SubModules.ChartRecognition.model_name": self._params[
"chart_recognition_model_name"
],
"SubModules.ChartRecognition.model_dir": self._params[
"chart_recognition_model_dir"
],
"SubModules.ChartRecognition.batch_size": self._params[
"chart_recognition_batch_size"
],
"SubModules.RegionDetection.model_name": self._params[
"region_detection_model_name"
],
"SubModules.RegionDetection.model_dir": self._params[
"region_detection_model_dir"
],
"SubPipelines.DocPreprocessor.SubModules.DocOrientationClassify.model_name": self._params[
"doc_orientation_classify_model_name"
],
"SubPipelines.DocPreprocessor.SubModules.DocOrientationClassify.model_dir": self._params[
"doc_orientation_classify_model_dir"
],
"SubPipelines.DocPreprocessor.SubModules.DocUnwarping.model_name": self._params[
"doc_unwarping_model_name"
],
"SubPipelines.DocPreprocessor.SubModules.DocUnwarping.model_dir": self._params[
"doc_unwarping_model_dir"
],
"SubPipelines.GeneralOCR.SubModules.TextDetection.model_name": self._params[
"text_detection_model_name"
],
"SubPipelines.GeneralOCR.SubModules.TextDetection.model_dir": self._params[
"text_detection_model_dir"
],
"SubPipelines.GeneralOCR.SubModules.TextDetection.limit_side_len": self._params[
"text_det_limit_side_len"
],
"SubPipelines.GeneralOCR.SubModules.TextDetection.limit_type": self._params[
"text_det_limit_type"
],
"SubPipelines.GeneralOCR.SubModules.TextDetection.thresh": self._params[
"text_det_thresh"
],
"SubPipelines.GeneralOCR.SubModules.TextDetection.box_thresh": self._params[
"text_det_box_thresh"
],
"SubPipelines.GeneralOCR.SubModules.TextDetection.unclip_ratio": self._params[
"text_det_unclip_ratio"
],
"SubPipelines.GeneralOCR.SubModules.TextLineOrientation.model_name": self._params[
"textline_orientation_model_name"
],
"SubPipelines.GeneralOCR.SubModules.TextLineOrientation.model_dir": self._params[
"textline_orientation_model_dir"
],
"SubPipelines.GeneralOCR.SubModules.TextLineOrientation.batch_size": self._params[
"textline_orientation_batch_size"
],
"SubPipelines.GeneralOCR.SubModules.TextRecognition.model_name": self._params[
"text_recognition_model_name"
],
"SubPipelines.GeneralOCR.SubModules.TextRecognition.model_dir": self._params[
"text_recognition_model_dir"
],
"SubPipelines.GeneralOCR.SubModules.TextRecognition.batch_size": self._params[
"text_recognition_batch_size"
],
"SubPipelines.GeneralOCR.SubModules.TextRecognition.score_thresh": self._params[
"text_rec_score_thresh"
],
"SubPipelines.TableRecognition.SubModules.TableClassification.model_name": self._params[
"table_classification_model_name"
],
"SubPipelines.TableRecognition.SubModules.TableClassification.model_dir": self._params[
"table_classification_model_dir"
],
"SubPipelines.TableRecognition.SubModules.WiredTableStructureRecognition.model_name": self._params[
"wired_table_structure_recognition_model_name"
],
"SubPipelines.TableRecognition.SubModules.WiredTableStructureRecognition.model_dir": self._params[
"wired_table_structure_recognition_model_dir"
],
"SubPipelines.TableRecognition.SubModules.WirelessTableStructureRecognition.model_name": self._params[
"wireless_table_structure_recognition_model_name"
],
"SubPipelines.TableRecognition.SubModules.WirelessTableStructureRecognition.model_dir": self._params[
"wireless_table_structure_recognition_model_dir"
],
"SubPipelines.TableRecognition.SubModules.WiredTableCellsDetection.model_name": self._params[
"wired_table_cells_detection_model_name"
],
"SubPipelines.TableRecognition.SubModules.WiredTableCellsDetection.model_dir": self._params[
"wired_table_cells_detection_model_dir"
],
"SubPipelines.TableRecognition.SubModules.WirelessTableCellsDetection.model_name": self._params[
"wireless_table_cells_detection_model_name"
],
"SubPipelines.TableRecognition.SubModules.WirelessTableCellsDetection.model_dir": self._params[
"wireless_table_cells_detection_model_dir"
],
"SubPipelines.TableRecognition.SubModules.TableOrientationClassify.model_name": self._params[
"table_orientation_classify_model_name"
],
"SubPipelines.TableRecognition.SubModules.TableOrientationClassify.model_dir": self._params[
"table_orientation_classify_model_dir"
],
"SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextDetection.model_name": self._params[
"text_detection_model_name"
],
"SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextDetection.model_dir": self._params[
"text_detection_model_dir"
],
"SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextDetection.limit_side_len": self._params[
"text_det_limit_side_len"
],
"SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextDetection.limit_type": self._params[
"text_det_limit_type"
],
"SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextDetection.thresh": self._params[
"text_det_thresh"
],
"SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextDetection.box_thresh": self._params[
"text_det_box_thresh"
],
"SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextDetection.unclip_ratio": self._params[
"text_det_unclip_ratio"
],
"SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextLineOrientation.model_name": self._params[
"textline_orientation_model_name"
],
"SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextLineOrientation.model_dir": self._params[
"textline_orientation_model_dir"
],
"SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextLineOrientation.batch_size": self._params[
"textline_orientation_batch_size"
],
"SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextRecognition.model_name": self._params[
"text_recognition_model_name"
],
"SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextRecognition.model_dir": self._params[
"text_recognition_model_dir"
],
"SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextRecognition.batch_size": self._params[
"text_recognition_batch_size"
],
"SubPipelines.TableRecognition.SubPipelines.GeneralOCR.SubModules.TextRecognition.score_thresh": self._params[
"text_rec_score_thresh"
],
"SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.model_name": self._params[
"seal_text_detection_model_name"
],
"SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.model_dir": self._params[
"seal_text_detection_model_dir"
],
"SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.limit_side_len": self._params[
"text_det_limit_side_len"
],
"SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.limit_type": self._params[
"seal_det_limit_type"
],
"SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.thresh": self._params[
"seal_det_thresh"
],
"SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.box_thresh": self._params[
"seal_det_box_thresh"
],
"SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextDetection.unclip_ratio": self._params[
"seal_det_unclip_ratio"
],
"SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextRecognition.model_name": self._params[
"seal_text_recognition_model_name"
],
"SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextRecognition.model_dir": self._params[
"seal_text_recognition_model_dir"
],
"SubPipelines.SealRecognition.SubPipelines.SealOCR.SubModules.TextRecognition.batch_size": self._params[
"seal_text_recognition_batch_size"
],
"SubPipelines.FormulaRecognition.SubModules.FormulaRecognition.model_name": self._params[
"formula_recognition_model_name"
],
"SubPipelines.FormulaRecognition.SubModules.FormulaRecognition.model_dir": self._params[
"formula_recognition_model_dir"
],
"SubPipelines.FormulaRecognition.SubModules.FormulaRecognition.batch_size": self._params[
"formula_recognition_batch_size"
],
}
return create_config_from_structure(STRUCTURE)
def _get_ocr_model_names(self, lang, ppocr_version):
LATIN_LANGS = [
"af",
"az",
"bs",
"cs",
"cy",
"da",
"de",
"es",
"et",
"fr",
"ga",
"hr",
"hu",
"id",
"is",
"it",
"ku",
"la",
"lt",
"lv",
"mi",
"ms",
"mt",
"nl",
"no",
"oc",
"pi",
"pl",
"pt",
"ro",
"rs_latin",
"sk",
"sl",
"sq",
"sv",
"sw",
"tl",
"tr",
"uz",
"vi",
"french",
"german",
]
ARABIC_LANGS = ["ar", "fa", "ug", "ur"]
ESLAV_LANGS = ["ru", "be", "uk"]
CYRILLIC_LANGS = [
"ru",
"rs_cyrillic",
"be",
"bg",
"uk",
"mn",
"abq",
"ady",
"kbd",
"ava",
"dar",
"inh",
"che",
"lbe",
"lez",
"tab",
]
DEVANAGARI_LANGS = [
"hi",
"mr",
"ne",
"bh",
"mai",
"ang",
"bho",
"mah",
"sck",
"new",
"gom",
"sa",
"bgc",
]
SPECIFIC_LANGS = [
"ch",
"en",
"korean",
"japan",
"chinese_cht",
"te",
"ka",
"ta",
]
if lang is None:
lang = "ch"
if ppocr_version is None:
if (
lang
in ["ch", "chinese_cht", "en", "japan", "korean", "th", "el"]
+ LATIN_LANGS
+ ESLAV_LANGS
):
ppocr_version = "PP-OCRv5"
elif lang in (
LATIN_LANGS
+ ARABIC_LANGS
+ CYRILLIC_LANGS
+ DEVANAGARI_LANGS
+ SPECIFIC_LANGS
):
ppocr_version = "PP-OCRv3"
else:
# Unknown language specified
return None, None
if ppocr_version == "PP-OCRv5":
rec_lang, rec_model_name = None, None
if lang in ("ch", "chinese_cht", "en", "japan"):
rec_model_name = "PP-OCRv5_server_rec"
elif lang in LATIN_LANGS:
rec_lang = "latin"
elif lang in ESLAV_LANGS:
rec_lang = "eslav"
elif lang == "korean":
rec_lang = "korean"
elif lang == "th":
rec_lang = "th"
elif lang == "el":
rec_lang = "el"
if rec_lang is not None:
rec_model_name = f"{rec_lang}_PP-OCRv5_mobile_rec"
return "PP-OCRv5_server_det", rec_model_name
elif ppocr_version == "PP-OCRv4":
if lang == "ch":
return "PP-OCRv4_mobile_det", "PP-OCRv4_mobile_rec"
elif lang == "en":
return "PP-OCRv4_mobile_det", "en_PP-OCRv4_mobile_rec"
else:
return None, None
else:
# PP-OCRv3
rec_lang = None
if lang in LATIN_LANGS:
rec_lang = "latin"
elif lang in ARABIC_LANGS:
rec_lang = "arabic"
elif lang in CYRILLIC_LANGS:
rec_lang = "cyrillic"
elif lang in DEVANAGARI_LANGS:
rec_lang = "devanagari"
else:
if lang in SPECIFIC_LANGS:
rec_lang = lang
rec_model_name = None
if rec_lang == "ch":
rec_model_name = "PP-OCRv3_mobile_rec"
elif rec_lang is not None:
rec_model_name = f"{rec_lang}_PP-OCRv3_mobile_rec"
return "PP-OCRv3_mobile_det", rec_model_name
class PPStructureV3CLISubcommandExecutor(PipelineCLISubcommandExecutor):
@property
def subparser_name(self):
return "pp_structurev3"
def _update_subparser(self, subparser):
add_simple_inference_args(subparser)
subparser.add_argument(
"--layout_detection_model_name",
type=str,
help="Name of the layout detection model.",
)
subparser.add_argument(
"--layout_detection_model_dir",
type=str,
help="Path to the layout detection model directory.",
)
subparser.add_argument(
"--layout_threshold",
type=float,
help="Score threshold for the layout detection model.",
)
subparser.add_argument(
"--layout_nms",
type=str2bool,
help="Whether to use NMS in layout detection.",
)
subparser.add_argument(
"--layout_unclip_ratio",
type=float,
help="Expansion coefficient for layout detection.",
)
subparser.add_argument(
"--layout_merge_bboxes_mode",
type=str,
help="Overlapping box filtering method.",
)
subparser.add_argument(
"--chart_recognition_model_name",
type=str,
help="Name of the chart recognition model.",
)
subparser.add_argument(
"--chart_recognition_model_dir",
type=str,
help="Path to the chart recognition model directory.",
)
subparser.add_argument(
"--chart_recognition_batch_size",
type=int,
help="Batch size for the chart recognition model.",
)
subparser.add_argument(
"--region_detection_model_name",
type=str,
help="Name of the region detection model.",
)
subparser.add_argument(
"--region_detection_model_dir",
type=str,
help="Path to the region detection model directory.",
)
subparser.add_argument(
"--doc_orientation_classify_model_name",
type=str,
help="Name of the document image orientation classification model.",
)
subparser.add_argument(
"--doc_orientation_classify_model_dir",
type=str,
help="Path to the document image orientation classification model directory.",
)
subparser.add_argument(
"--doc_unwarping_model_name",
type=str,
help="Name of the text image unwarping model.",
)
subparser.add_argument(
"--doc_unwarping_model_dir",
type=str,
help="Path to the image unwarping model directory.",
)
subparser.add_argument(
"--text_detection_model_name",
type=str,
help="Name of the text detection model.",
)
subparser.add_argument(
"--text_detection_model_dir",
type=str,
help="Path to the text detection model directory.",
)
subparser.add_argument(
"--text_det_limit_side_len",
type=int,
help="This sets a limit on the side length of the input image for the text detection model.",
)
subparser.add_argument(
"--text_det_limit_type",
type=str,
help="This determines how the side length limit is applied to the input image before feeding it into the text deteciton model.",
)
subparser.add_argument(
"--text_det_thresh",
type=float,
help="Detection pixel threshold for the text detection model. Pixels with scores greater than this threshold in the output probability map are considered text pixels.",
)
subparser.add_argument(
"--text_det_box_thresh",
type=float,
help="Detection box threshold for the text detection model. A detection result is considered a text region if the average score of all pixels within the border of the result is greater than this threshold.",
)
subparser.add_argument(
"--text_det_unclip_ratio",
type=float,
help="Text detection expansion coefficient, which expands the text region using this method. The larger the value, the larger the expansion area.",
)
subparser.add_argument(
"--textline_orientation_model_name",
type=str,
help="Name of the text line orientation classification model.",
)
subparser.add_argument(
"--textline_orientation_model_dir",
type=str,
help="Path to the text line orientation classification directory.",
)
subparser.add_argument(
"--textline_orientation_batch_size",
type=int,
help="Batch size for the text line orientation classification model.",
)
subparser.add_argument(
"--text_recognition_model_name",
type=str,
help="Name of the text recognition model.",
)
subparser.add_argument(
"--text_recognition_model_dir",
type=str,
help="Path to the text recognition model directory.",
)
subparser.add_argument(
"--text_recognition_batch_size",
type=int,
help="Batch size for the text recognition model.",
)
subparser.add_argument(
"--text_rec_score_thresh",
type=float,
help="Text recognition threshold used in general OCR. Text results with scores greater than this threshold are retained.",
)
subparser.add_argument(
"--table_classification_model_name",
type=str,
help="Name of the table classification model.",
)
subparser.add_argument(
"--table_classification_model_dir",
type=str,
help="Path to the table classification model directory.",
)
subparser.add_argument(
"--wired_table_structure_recognition_model_name",
type=str,
help="Name of the wired table structure recognition model.",
)
subparser.add_argument(
"--wired_table_structure_recognition_model_dir",
type=str,
help="Path to the wired table structure recognition model directory.",
)
subparser.add_argument(
"--wireless_table_structure_recognition_model_name",
type=str,
help="Name of the wireless table structure recognition model.",
)
subparser.add_argument(
"--wireless_table_structure_recognition_model_dir",
type=str,
help="Path to the wired table structure recognition model directory.",
)
subparser.add_argument(
"--wired_table_cells_detection_model_name",
type=str,
help="Name of the wired table cells detection model.",
)
subparser.add_argument(
"--wired_table_cells_detection_model_dir",
type=str,
help="Path to the wired table cells detection model directory.",
)
subparser.add_argument(
"--wireless_table_cells_detection_model_name",
type=str,
help="Name of the wireless table cells detection model.",
)
subparser.add_argument(
"--wireless_table_cells_detection_model_dir",
type=str,
help="Path to the wireless table cells detection model directory.",
)
subparser.add_argument(
"--seal_text_detection_model_name",
type=str,
help="Name of the seal text detection model.",
)
subparser.add_argument(
"--seal_text_detection_model_dir",
type=str,
help="Path to the seal text detection model directory.",
)
subparser.add_argument(
"--seal_det_limit_side_len",
type=int,
help="This sets a limit on the side length of the input image for the seal text detection model.",
)
subparser.add_argument(
"--seal_det_limit_type",
type=str,
help="This determines how the side length limit is applied to the input image before feeding it into the seal text deteciton model.",
)
subparser.add_argument(
"--seal_det_thresh",
type=float,
help="Detection pixel threshold for the seal text detection model. Pixels with scores greater than this threshold in the output probability map are considered text pixels.",
)
subparser.add_argument(
"--seal_det_box_thresh",
type=float,
help="Detection box threshold for the seal text detection model. A detection result is considered a text region if the average score of all pixels within the border of the result is greater than this threshold.",
)
subparser.add_argument(
"--seal_det_unclip_ratio",
type=float,
help="Seal text detection expansion coefficient, which expands the text region using this method. The larger the value, the larger the expansion area.",
)
subparser.add_argument(
"--seal_text_recognition_model_name",
type=str,
help="Name of the seal text recognition model.",
)
subparser.add_argument(
"--seal_text_recognition_model_dir",
type=str,
help="Path to the seal text recognition model directory.",
)
subparser.add_argument(
"--seal_text_recognition_batch_size",
type=int,
help="Batch size for the seal text recognition model.",
)
subparser.add_argument(
"--seal_rec_score_thresh",
type=float,
help="Seal text recognition threshold. Text results with scores greater than this threshold are retained.",
)
subparser.add_argument(
"--formula_recognition_model_name",
type=str,
help="Name of the formula recognition model.",
)
subparser.add_argument(
"--formula_recognition_model_dir",
type=str,
help="Path to the formula recognition model directory.",
)
subparser.add_argument(
"--formula_recognition_batch_size",
type=int,
help="Batch size for the formula recognition model.",
)
subparser.add_argument(
"--use_doc_orientation_classify",
type=str2bool,
help="Whether to use document image orientation classification.",
)
subparser.add_argument(
"--use_doc_unwarping",
type=str2bool,
help="Whether to use text image unwarping.",
)
subparser.add_argument(
"--use_textline_orientation",
type=str2bool,
help="Whether to use text line orientation classification.",
)
subparser.add_argument(
"--use_seal_recognition",
type=str2bool,
help="Whether to use seal recognition.",
)
subparser.add_argument(
"--use_table_recognition",
type=str2bool,
help="Whether to use table recognition.",
)
subparser.add_argument(
"--use_formula_recognition",
type=str2bool,
help="Whether to use formula recognition.",
)
subparser.add_argument(
"--use_chart_recognition",
type=str2bool,
help="Whether to use chart recognition.",
)
subparser.add_argument(
"--use_region_detection",
type=str2bool,
help="Whether to use region detection.",
)
subparser.add_argument(
"--format_block_content",
type=str2bool,
help="Whether to format block content to Markdown.",
)
subparser.add_argument(
"--markdown_ignore_labels",
type=str,
nargs="+",
help="List of layout labels to ignore in Markdown output.",
)
def execute_with_args(self, args):
params = get_subcommand_args(args)
perform_simple_inference(
PPStructureV3,
params,
)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_pipelines/pp_structurev3.py",
"license": "Apache License 2.0",
"lines": 998,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_pipelines/seal_recognition.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .._utils.cli import (
add_simple_inference_args,
get_subcommand_args,
perform_simple_inference,
str2bool,
)
from .base import PaddleXPipelineWrapper, PipelineCLISubcommandExecutor
from .utils import create_config_from_structure
class SealRecognition(PaddleXPipelineWrapper):
def __init__(
self,
doc_orientation_classify_model_name=None,
doc_orientation_classify_model_dir=None,
doc_unwarping_model_name=None,
doc_unwarping_model_dir=None,
layout_detection_model_name=None,
layout_detection_model_dir=None,
seal_text_detection_model_name=None,
seal_text_detection_model_dir=None,
text_recognition_model_name=None,
text_recognition_model_dir=None,
text_recognition_batch_size=None,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_layout_detection=None,
layout_threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
seal_det_limit_side_len=None,
seal_det_limit_type=None,
seal_det_thresh=None,
seal_det_box_thresh=None,
seal_det_unclip_ratio=None,
seal_rec_score_thresh=None,
**kwargs,
):
self._params = {
"doc_orientation_classify_model_name": doc_orientation_classify_model_name,
"doc_orientation_classify_model_dir": doc_orientation_classify_model_dir,
"doc_unwarping_model_name": doc_unwarping_model_name,
"doc_unwarping_model_dir": doc_unwarping_model_dir,
"layout_detection_model_name": layout_detection_model_name,
"layout_detection_model_dir": layout_detection_model_dir,
"seal_text_detection_model_name": seal_text_detection_model_name,
"seal_text_detection_model_dir": seal_text_detection_model_dir,
"text_recognition_model_name": text_recognition_model_name,
"text_recognition_model_dir": text_recognition_model_dir,
"text_recognition_batch_size": text_recognition_batch_size,
"use_doc_orientation_classify": use_doc_orientation_classify,
"use_doc_unwarping": use_doc_unwarping,
"use_layout_detection": use_layout_detection,
"layout_threshold": layout_threshold,
"layout_nms": layout_nms,
"layout_unclip_ratio": layout_unclip_ratio,
"layout_merge_bboxes_mode": layout_merge_bboxes_mode,
"seal_det_limit_side_len": seal_det_limit_side_len,
"seal_det_limit_type": seal_det_limit_type,
"seal_det_thresh": seal_det_thresh,
"seal_det_box_thresh": seal_det_box_thresh,
"seal_det_unclip_ratio": seal_det_unclip_ratio,
"seal_rec_score_thresh": seal_rec_score_thresh,
}
super().__init__(**kwargs)
@property
def _paddlex_pipeline_name(self):
return "seal_recognition"
def predict_iter(
self,
input,
*,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_layout_detection=None,
layout_det_res=None,
layout_threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
seal_det_limit_side_len=None,
seal_det_limit_type=None,
seal_det_thresh=None,
seal_det_box_thresh=None,
seal_det_unclip_ratio=None,
seal_rec_score_thresh=None,
**kwargs,
):
return self.paddlex_pipeline.predict(
input,
use_doc_orientation_classify=use_doc_orientation_classify,
use_doc_unwarping=use_doc_unwarping,
use_layout_detection=use_layout_detection,
layout_det_res=layout_det_res,
layout_threshold=layout_threshold,
layout_nms=layout_nms,
layout_unclip_ratio=layout_unclip_ratio,
layout_merge_bboxes_mode=layout_merge_bboxes_mode,
seal_det_limit_side_len=seal_det_limit_side_len,
seal_det_limit_type=seal_det_limit_type,
seal_det_thresh=seal_det_thresh,
seal_det_box_thresh=seal_det_box_thresh,
seal_det_unclip_ratio=seal_det_unclip_ratio,
seal_rec_score_thresh=seal_rec_score_thresh,
**kwargs,
)
def predict(
self,
input,
*,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_layout_detection=None,
layout_det_res=None,
layout_threshold=None,
layout_nms=None,
layout_unclip_ratio=None,
layout_merge_bboxes_mode=None,
seal_det_limit_side_len=None,
seal_det_limit_type=None,
seal_det_thresh=None,
seal_det_box_thresh=None,
seal_det_unclip_ratio=None,
seal_rec_score_thresh=None,
**kwargs,
):
return list(
self.predict_iter(
input,
use_doc_orientation_classify=use_doc_orientation_classify,
use_doc_unwarping=use_doc_unwarping,
use_layout_detection=use_layout_detection,
layout_det_res=layout_det_res,
layout_threshold=layout_threshold,
layout_nms=layout_nms,
layout_unclip_ratio=layout_unclip_ratio,
layout_merge_bboxes_mode=layout_merge_bboxes_mode,
seal_det_limit_side_len=seal_det_limit_side_len,
seal_det_limit_type=seal_det_limit_type,
seal_det_thresh=seal_det_thresh,
seal_det_box_thresh=seal_det_box_thresh,
seal_det_unclip_ratio=seal_det_unclip_ratio,
seal_rec_score_thresh=seal_rec_score_thresh,
**kwargs,
)
)
@classmethod
def get_cli_subcommand_executor(cls):
return SealRecognitionCLISubcommandExecutor()
def _get_paddlex_config_overrides(self):
STRUCTURE = {
"SubPipelines.DocPreprocessor.SubModules.DocOrientationClassify.model_name": self._params[
"doc_orientation_classify_model_name"
],
"SubPipelines.DocPreprocessor.SubModules.DocOrientationClassify.model_dir": self._params[
"doc_orientation_classify_model_dir"
],
"SubPipelines.DocPreprocessor.SubModules.DocUnwarping.model_name": self._params[
"doc_unwarping_model_name"
],
"SubPipelines.DocPreprocessor.SubModules.DocUnwarping.model_dir": self._params[
"doc_unwarping_model_dir"
],
"SubModules.LayoutDetection.model_name": self._params[
"layout_detection_model_name"
],
"SubModules.LayoutDetection.model_dir": self._params[
"layout_detection_model_dir"
],
"SubModules.LayoutDetection.threshold": self._params["layout_threshold"],
"SubModules.LayoutDetection.layout_nms": self._params["layout_nms"],
"SubModules.LayoutDetection.layout_unclip_ratio": self._params[
"layout_unclip_ratio"
],
"SubModules.LayoutDetection.layout_merge_bboxes_mode": self._params[
"layout_merge_bboxes_mode"
],
"SubPipelines.DocPreprocessor.use_doc_orientation_classify": self._params[
"use_doc_orientation_classify"
],
"SubPipelines.DocPreprocessor.use_doc_unwarping": self._params[
"use_doc_unwarping"
],
"use_doc_preprocessor": self._params["use_doc_orientation_classify"]
or self._params["use_doc_unwarping"],
"SubPipelines.SealOCR.SubModules.TextDetection.model_name": self._params[
"seal_text_detection_model_name"
],
"SubPipelines.SealOCR.SubModules.TextDetection.model_dir": self._params[
"seal_text_detection_model_dir"
],
"SubPipelines.SealOCR.SubModules.TextDetection.limit_side_len": self._params[
"seal_det_limit_side_len"
],
"SubPipelines.SealOCR.SubModules.TextDetection.limit_type": self._params[
"seal_det_limit_type"
],
"SubPipelines.SealOCR.SubModules.TextDetection.thresh": self._params[
"seal_det_thresh"
],
"SubPipelines.SealOCR.SubModules.TextDetection.box_thresh": self._params[
"seal_det_box_thresh"
],
"SubPipelines.SealOCR.SubModules.TextDetection.unclip_ratio": self._params[
"seal_det_unclip_ratio"
],
"SubPipelines.SealOCR.SubModules.TextRecognition.model_name": self._params[
"text_recognition_model_name"
],
"SubPipelines.SealOCR.SubModules.TextRecognition.model_dir": self._params[
"text_recognition_model_dir"
],
"SubPipelines.SealOCR.SubModules.TextRecognition.batch_size": self._params[
"text_recognition_batch_size"
],
"SubPipelines.SealOCR.SubModules.TextRecognition.score_thresh": self._params[
"seal_rec_score_thresh"
],
"use_layout_detection": self._params["use_layout_detection"],
}
return create_config_from_structure(STRUCTURE)
class SealRecognitionCLISubcommandExecutor(PipelineCLISubcommandExecutor):
@property
def subparser_name(self):
return "seal_recognition"
def _update_subparser(self, subparser):
add_simple_inference_args(subparser)
subparser.add_argument(
"--doc_orientation_classify_model_name",
type=str,
help="Name of the document image orientation classification model.",
)
subparser.add_argument(
"--doc_orientation_classify_model_dir",
type=str,
help="Path to the document image orientation classification model directory.",
)
subparser.add_argument(
"--doc_unwarping_model_name",
type=str,
help="Name of the document image unwarping model.",
)
subparser.add_argument(
"--doc_unwarping_model_dir",
type=str,
help="Path to the document image unwarping model directory.",
)
subparser.add_argument(
"--layout_detection_model_name",
type=str,
help="Name of the layout detection model.",
)
subparser.add_argument(
"--layout_detection_model_dir",
type=str,
help="Path to the layout detection model directory.",
)
subparser.add_argument(
"--seal_text_detection_model_name",
type=str,
help="Name of the seal text detection model.",
)
subparser.add_argument(
"--seal_text_detection_model_dir",
type=str,
help="Path to the seal text detection model directory.",
)
subparser.add_argument(
"--text_recognition_model_name",
type=str,
help="Name of the text recognition model.",
)
subparser.add_argument(
"--text_recognition_model_dir",
type=str,
help="Path to the text recognition model directory.",
)
subparser.add_argument(
"--text_recognition_batch_size",
type=int,
help="Batch size for the text recognition model.",
)
subparser.add_argument(
"--use_doc_orientation_classify",
type=str2bool,
help="Whether to use document image orientation classification.",
)
subparser.add_argument(
"--use_doc_unwarping",
type=str2bool,
help="Whether to use document image unwarping.",
)
subparser.add_argument(
"--use_layout_detection",
type=str2bool,
help="Whether to use layout detection.",
)
subparser.add_argument(
"--layout_threshold",
type=float,
help="Threshold for layout detection model.",
)
subparser.add_argument(
"--layout_nms",
type=str2bool,
help="Non-Maximum Suppression threshold for layout detection.",
)
subparser.add_argument(
"--layout_unclip_ratio",
type=float,
help="Layout detection expansion coefficient.",
)
subparser.add_argument(
"--layout_merge_bboxes_mode",
type=str,
help="Mode for merging bounding boxes in layout detection.",
)
subparser.add_argument(
"--seal_det_limit_side_len",
type=int,
help="This sets a limit on the side length of the input image for the seal text detection model.",
)
subparser.add_argument(
"--seal_det_limit_type",
type=str,
help="This determines how the side length limit is applied to the input image before feeding it into the seal text detection model.",
)
subparser.add_argument(
"--seal_det_thresh",
type=float,
help="Detection pixel threshold for the seal text detection model. Pixels with scores greater than this threshold in the output probability map are considered text pixels.",
)
subparser.add_argument(
"--seal_det_box_thresh",
type=float,
help="Detection box threshold for the seal text detection model. A detection result is considered a text region if the average score of all pixels within the border of the result is greater than this threshold.",
)
subparser.add_argument(
"--seal_det_unclip_ratio",
type=float,
help="Seal text detection expansion coefficient, which expands the text region using this method. The larger the value, the larger the expansion area.",
)
subparser.add_argument(
"--seal_rec_score_thresh",
type=float,
help="Text recognition threshold. Text results with scores greater than this threshold are retained.",
)
def execute_with_args(self, args):
params = get_subcommand_args(args)
perform_simple_inference(SealRecognition, params)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_pipelines/seal_recognition.py",
"license": "Apache License 2.0",
"lines": 362,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_pipelines/table_recognition_v2.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .._utils.cli import (
add_simple_inference_args,
get_subcommand_args,
perform_simple_inference,
str2bool,
)
from .base import PaddleXPipelineWrapper, PipelineCLISubcommandExecutor
from .utils import create_config_from_structure
class TableRecognitionPipelineV2(PaddleXPipelineWrapper):
def __init__(
self,
layout_detection_model_name=None,
layout_detection_model_dir=None,
table_classification_model_name=None,
table_classification_model_dir=None,
wired_table_structure_recognition_model_name=None,
wired_table_structure_recognition_model_dir=None,
wireless_table_structure_recognition_model_name=None,
wireless_table_structure_recognition_model_dir=None,
wired_table_cells_detection_model_name=None,
wired_table_cells_detection_model_dir=None,
wireless_table_cells_detection_model_name=None,
wireless_table_cells_detection_model_dir=None,
doc_orientation_classify_model_name=None,
doc_orientation_classify_model_dir=None,
doc_unwarping_model_name=None,
doc_unwarping_model_dir=None,
text_detection_model_name=None,
text_detection_model_dir=None,
text_det_limit_side_len=None,
text_det_limit_type=None,
text_det_thresh=None,
text_det_box_thresh=None,
text_det_unclip_ratio=None,
text_recognition_model_name=None,
text_recognition_model_dir=None,
text_recognition_batch_size=None,
text_rec_score_thresh=None,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_layout_detection=None,
use_ocr_model=None,
**kwargs,
):
params = locals().copy()
params.pop("self")
params.pop("kwargs")
self._params = params
super().__init__(**kwargs)
@property
def _paddlex_pipeline_name(self):
return "table_recognition_v2"
def predict_iter(
self,
input,
*,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_layout_detection=None,
use_ocr_model=None,
overall_ocr_res=None,
layout_det_res=None,
text_det_limit_side_len=None,
text_det_limit_type=None,
text_det_thresh=None,
text_det_box_thresh=None,
text_det_unclip_ratio=None,
text_rec_score_thresh=None,
use_e2e_wired_table_rec_model=False,
use_e2e_wireless_table_rec_model=False,
use_wired_table_cells_trans_to_html=False,
use_wireless_table_cells_trans_to_html=False,
use_table_orientation_classify=True,
use_ocr_results_with_table_cells=True,
**kwargs,
):
return self.paddlex_pipeline.predict(
input,
use_doc_orientation_classify=use_doc_orientation_classify,
use_doc_unwarping=use_doc_unwarping,
use_layout_detection=use_layout_detection,
use_ocr_model=use_ocr_model,
overall_ocr_res=overall_ocr_res,
layout_det_res=layout_det_res,
text_det_limit_side_len=text_det_limit_side_len,
text_det_limit_type=text_det_limit_type,
text_det_thresh=text_det_thresh,
text_det_box_thresh=text_det_box_thresh,
text_det_unclip_ratio=text_det_unclip_ratio,
text_rec_score_thresh=text_rec_score_thresh,
use_e2e_wired_table_rec_model=use_e2e_wired_table_rec_model,
use_e2e_wireless_table_rec_model=use_e2e_wireless_table_rec_model,
use_wired_table_cells_trans_to_html=use_wired_table_cells_trans_to_html,
use_wireless_table_cells_trans_to_html=use_wireless_table_cells_trans_to_html,
use_table_orientation_classify=use_table_orientation_classify,
use_ocr_results_with_table_cells=use_ocr_results_with_table_cells,
**kwargs,
)
def predict(
self,
input,
*,
use_doc_orientation_classify=None,
use_doc_unwarping=None,
use_layout_detection=None,
use_ocr_model=None,
overall_ocr_res=None,
layout_det_res=None,
text_det_limit_side_len=None,
text_det_limit_type=None,
text_det_thresh=None,
text_det_box_thresh=None,
text_det_unclip_ratio=None,
text_rec_score_thresh=None,
use_e2e_wired_table_rec_model=False,
use_e2e_wireless_table_rec_model=False,
use_wired_table_cells_trans_to_html=False,
use_wireless_table_cells_trans_to_html=False,
use_table_orientation_classify=True,
use_ocr_results_with_table_cells=True,
**kwargs,
):
return list(
self.predict_iter(
input,
use_doc_orientation_classify=use_doc_orientation_classify,
use_doc_unwarping=use_doc_unwarping,
use_layout_detection=use_layout_detection,
use_ocr_model=use_ocr_model,
overall_ocr_res=overall_ocr_res,
layout_det_res=layout_det_res,
text_det_limit_side_len=text_det_limit_side_len,
text_det_limit_type=text_det_limit_type,
text_det_thresh=text_det_thresh,
text_det_box_thresh=text_det_box_thresh,
text_det_unclip_ratio=text_det_unclip_ratio,
text_rec_score_thresh=text_rec_score_thresh,
use_e2e_wired_table_rec_model=use_e2e_wired_table_rec_model,
use_e2e_wireless_table_rec_model=use_e2e_wireless_table_rec_model,
use_wired_table_cells_trans_to_html=use_wired_table_cells_trans_to_html,
use_wireless_table_cells_trans_to_html=use_wireless_table_cells_trans_to_html,
use_table_orientation_classify=use_table_orientation_classify,
use_ocr_results_with_table_cells=use_ocr_results_with_table_cells,
**kwargs,
)
)
@classmethod
def get_cli_subcommand_executor(cls):
return TableRecognitionPipelineV2CLISubcommandExecutor()
def _get_paddlex_config_overrides(self):
STRUCTURE = {
"SubPipelines.DocPreprocessor.use_doc_orientation_classify": self._params[
"use_doc_orientation_classify"
],
"SubPipelines.DocPreprocessor.use_doc_unwarping": self._params[
"use_doc_unwarping"
],
"use_doc_preprocessor": self._params["use_doc_orientation_classify"]
or self._params["use_doc_unwarping"],
"use_layout_detection": self._params["use_layout_detection"],
"use_ocr_model": self._params["use_ocr_model"],
"SubModules.LayoutDetection.model_name": self._params[
"layout_detection_model_name"
],
"SubModules.LayoutDetection.model_dir": self._params[
"layout_detection_model_dir"
],
"SubModules.TableClassification.model_name": self._params[
"table_classification_model_name"
],
"SubModules.TableClassification.model_dir": self._params[
"table_classification_model_dir"
],
"SubModules.WiredTableStructureRecognition.model_name": self._params[
"wired_table_structure_recognition_model_name"
],
"SubModules.WiredTableStructureRecognition.model_dir": self._params[
"wired_table_structure_recognition_model_dir"
],
"SubModules.WirelessTableStructureRecognition.model_name": self._params[
"wireless_table_structure_recognition_model_name"
],
"SubModules.WirelessTableStructureRecognition.model_dir": self._params[
"wireless_table_structure_recognition_model_dir"
],
"SubModules.WiredTableCellsDetection.model_name": self._params[
"wired_table_cells_detection_model_name"
],
"SubModules.WiredTableCellsDetection.model_dir": self._params[
"wired_table_cells_detection_model_dir"
],
"SubModules.WirelessTableCellsDetection.model_name": self._params[
"wireless_table_cells_detection_model_name"
],
"SubModules.WirelessTableCellsDetection.model_dir": self._params[
"wireless_table_cells_detection_model_dir"
],
"SubPipelines.DocPreprocessor.SubModules.DocOrientationClassify.model_name": self._params[
"doc_orientation_classify_model_name"
],
"SubPipelines.DocPreprocessor.SubModules.DocOrientationClassify.model_dir": self._params[
"doc_orientation_classify_model_dir"
],
"SubPipelines.DocPreprocessor.SubModules.DocUnwarping.model_name": self._params[
"doc_unwarping_model_name"
],
"SubPipelines.DocPreprocessor.SubModules.DocUnwarping.model_dir": self._params[
"doc_unwarping_model_dir"
],
"SubPipelines.GeneralOCR.SubModules.TextDetection.model_name": self._params[
"text_detection_model_name"
],
"SubPipelines.GeneralOCR.SubModules.TextDetection.model_dir": self._params[
"text_detection_model_dir"
],
"SubPipelines.GeneralOCR.SubModules.TextDetection.limit_side_len": self._params[
"text_det_limit_side_len"
],
"SubPipelines.GeneralOCR.SubModules.TextDetection.limit_type": self._params[
"text_det_limit_type"
],
"SubPipelines.GeneralOCR.SubModules.TextDetection.thresh": self._params[
"text_det_thresh"
],
"SubPipelines.GeneralOCR.SubModules.TextDetection.box_thresh": self._params[
"text_det_box_thresh"
],
"SubPipelines.GeneralOCR.SubModules.TextDetection.unclip_ratio": self._params[
"text_det_unclip_ratio"
],
"SubPipelines.GeneralOCR.SubModules.TextRecognition.model_name": self._params[
"text_recognition_model_name"
],
"SubPipelines.GeneralOCR.SubModules.TextRecognition.model_dir": self._params[
"text_recognition_model_dir"
],
"SubPipelines.GeneralOCR.SubModules.TextRecognition.batch_size": self._params[
"text_recognition_batch_size"
],
"SubPipelines.GeneralOCR.SubModules.TextRecognition.score_thresh": self._params[
"text_rec_score_thresh"
],
}
return create_config_from_structure(STRUCTURE)
class TableRecognitionPipelineV2CLISubcommandExecutor(PipelineCLISubcommandExecutor):
@property
def subparser_name(self):
return "table_recognition_v2"
def _update_subparser(self, subparser):
add_simple_inference_args(subparser)
subparser.add_argument(
"--layout_detection_model_name",
type=str,
help="Name of the layout detection model.",
)
subparser.add_argument(
"--layout_detection_model_dir",
type=str,
help="Path to the layout detection model directory.",
)
subparser.add_argument(
"--table_classification_model_name",
type=str,
help="Name of the table classification model.",
)
subparser.add_argument(
"--table_classification_model_dir",
type=str,
help="Path to the table classification model directory.",
)
subparser.add_argument(
"--wired_table_structure_recognition_model_name",
type=str,
help="Name of the wired table structure recognition model.",
)
subparser.add_argument(
"--wired_table_structure_recognition_model_dir",
type=str,
help="Path to the wired table structure recognition model directory.",
)
subparser.add_argument(
"--wireless_table_structure_recognition_model_name",
type=str,
help="Name of the wireless table structure recognition model.",
)
subparser.add_argument(
"--wireless_table_structure_recognition_model_dir",
type=str,
help="Path to the wired table structure recognition model directory.",
)
subparser.add_argument(
"--wired_table_cells_detection_model_name",
type=str,
help="Name of the wired table cells detection model.",
)
subparser.add_argument(
"--wired_table_cells_detection_model_dir",
type=str,
help="Path to the wired table cells detection model directory.",
)
subparser.add_argument(
"--wireless_table_cells_detection_model_name",
type=str,
help="Name of the wireless table cells detection model.",
)
subparser.add_argument(
"--wireless_table_cells_detection_model_dir",
type=str,
help="Path to the wireless table cells detection model directory.",
)
subparser.add_argument(
"--doc_orientation_classify_model_name",
type=str,
help="Name of the document image orientation classification model.",
)
subparser.add_argument(
"--doc_orientation_classify_model_dir",
type=str,
help="Path to the document image orientation classification model directory.",
)
subparser.add_argument(
"--doc_unwarping_model_name",
type=str,
help="Name of the text image unwarping model.",
)
subparser.add_argument(
"--doc_unwarping_model_dir",
type=str,
help="Path to the image unwarping model directory.",
)
subparser.add_argument(
"--text_detection_model_name",
type=str,
help="Name of the text detection model.",
)
subparser.add_argument(
"--text_detection_model_dir",
type=str,
help="Path to the text detection model directory.",
)
subparser.add_argument(
"--text_det_limit_side_len",
type=int,
help="This sets a limit on the side length of the input image for the text detection model.",
)
subparser.add_argument(
"--text_det_limit_type",
type=str,
help="This determines how the side length limit is applied to the input image before feeding it into the text deteciton model.",
)
subparser.add_argument(
"--text_det_thresh",
type=float,
help="Detection pixel threshold for the text detection model. Pixels with scores greater than this threshold in the output probability map are considered text pixels.",
)
subparser.add_argument(
"--text_det_box_thresh",
type=float,
help="Detection box threshold for the text detection model. A detection result is considered a text region if the average score of all pixels within the border of the result is greater than this threshold.",
)
subparser.add_argument(
"--text_det_unclip_ratio",
type=float,
help="Text detection expansion coefficient, which expands the text region using this method. The larger the value, the larger the expansion area.",
)
subparser.add_argument(
"--text_recognition_model_name",
type=str,
help="Name of the text recognition model.",
)
subparser.add_argument(
"--text_recognition_model_dir",
type=str,
help="Path to the text recognition model directory.",
)
subparser.add_argument(
"--text_recognition_batch_size",
type=int,
help="Batch size for the text recognition model.",
)
subparser.add_argument(
"--text_rec_score_thresh",
type=float,
help="Text recognition threshold used in general OCR. Text results with scores greater than this threshold are retained.",
)
subparser.add_argument(
"--use_doc_orientation_classify",
type=str2bool,
help="Whether to use document image orientation classification.",
)
subparser.add_argument(
"--use_doc_unwarping",
type=str2bool,
help="Whether to use text image unwarping.",
)
subparser.add_argument(
"--use_layout_detection",
type=str2bool,
help="Whether to use layout detection.",
)
subparser.add_argument(
"--use_ocr_model",
type=str2bool,
help="Whether to use OCR models.",
)
def execute_with_args(self, args):
params = get_subcommand_args(args)
perform_simple_inference(TableRecognitionPipelineV2, params)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_pipelines/table_recognition_v2.py",
"license": "Apache License 2.0",
"lines": 421,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_pipelines/utils.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def create_config_from_structure(structure, *, unset=None, config=None):
if config is None:
config = {}
for k, v in structure.items():
if v is unset:
continue
idx = k.find(".")
if idx == -1:
config[k] = v
else:
sk = k[:idx]
if sk not in config:
config[sk] = {}
create_config_from_structure({k[idx + 1 :]: v}, config=config[sk])
return config
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_pipelines/utils.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:paddleocr/_version.py | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.metadata
try:
version = importlib.metadata.version(__package__)
except importlib.metadata.PackageNotFoundError:
version = "0.0.0"
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "paddleocr/_version.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
PaddlePaddle/PaddleOCR:tests/pipelines/test_doc_preprocessor.py | import pytest
from paddleocr import DocPreprocessor
from ..testing_utils import (
TEST_DATA_DIR,
check_simple_inference_result,
check_wrapper_simple_inference_param_forwarding,
)
@pytest.fixture(scope="module")
def ocr_engine() -> DocPreprocessor:
return DocPreprocessor()
@pytest.mark.parametrize(
"image_path",
[
TEST_DATA_DIR / "book_rot180.jpg",
],
)
def test_predict(ocr_engine: DocPreprocessor, image_path: str) -> None:
"""
Test PaddleOCR's doc preprocessor functionality.
Args:
ocr_engine: An instance of `DocPreprocessor`.
image_path: Path to the image to be processed.
"""
result = ocr_engine.predict(str(image_path))
check_simple_inference_result(result)
res = result[0]
assert res["angle"] in {0, 90, 180, 270, -1}
assert res["rot_img"] is not None
assert res["output_img"] is not None
@pytest.mark.parametrize(
"params",
[
{"use_doc_orientation_classify": False},
{"use_doc_unwarping": False},
],
)
def test_predict_params(
monkeypatch,
ocr_engine: DocPreprocessor,
params: dict,
) -> None:
check_wrapper_simple_inference_param_forwarding(
monkeypatch,
ocr_engine,
"paddlex_pipeline",
"dummy_path",
params,
)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "tests/pipelines/test_doc_preprocessor.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PaddlePaddle/PaddleOCR:tests/pipelines/test_formula_recognition.py | import pytest
from paddleocr import FormulaRecognitionPipeline
from ..testing_utils import (
TEST_DATA_DIR,
check_simple_inference_result,
check_wrapper_simple_inference_param_forwarding,
)
@pytest.fixture(scope="module")
def formula_recognition_engine() -> FormulaRecognitionPipeline:
return FormulaRecognitionPipeline()
# TODO: Should we separate unit tests and integration tests?
@pytest.mark.parametrize(
"image_path",
[
TEST_DATA_DIR / "doc_with_formula.png",
],
)
def test_predict(
formula_recognition_engine: FormulaRecognitionPipeline, image_path: str
) -> None:
"""
Test FormulaRecognitionPipeline's formula_recognition functionality.
Args:
formula_recognition_engine: An instance of `FormulaRecognitionPipeline`.
image_path: Path to the image to be processed.
"""
result = formula_recognition_engine.predict(str(image_path))
check_simple_inference_result(result)
res = result[0]
assert isinstance(res["formula_res_list"], list)
assert len(res["formula_res_list"]) > 0
# TODO: Also check passing `None`
@pytest.mark.parametrize(
"params",
[
{"use_doc_orientation_classify": False},
{"use_doc_unwarping": False},
{"use_layout_detection": False},
{"layout_threshold": 0.5},
{"layout_nms": True},
{"layout_unclip_ratio": 1.5},
{"layout_merge_bboxes_mode": "large"},
],
)
def test_predict_params(
monkeypatch,
formula_recognition_engine: FormulaRecognitionPipeline,
params: dict,
) -> None:
check_wrapper_simple_inference_param_forwarding(
monkeypatch,
formula_recognition_engine,
"paddlex_pipeline",
"dummy_path",
params,
)
# TODO: Test init params
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "tests/pipelines/test_formula_recognition.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PaddlePaddle/PaddleOCR:tests/pipelines/test_ocr.py | import pytest
from paddleocr import PaddleOCR
from ..testing_utils import (
TEST_DATA_DIR,
check_simple_inference_result,
check_wrapper_simple_inference_param_forwarding,
)
@pytest.fixture(scope="module")
def ocr_engine() -> PaddleOCR:
return PaddleOCR()
# TODO: Should we separate unit tests and integration tests?
@pytest.mark.parametrize(
"image_path",
[
TEST_DATA_DIR / "table.jpg",
],
)
def test_predict(ocr_engine: PaddleOCR, image_path: str) -> None:
"""
Test PaddleOCR's OCR functionality.
Args:
ocr_engine: An instance of `PaddleOCR`.
image_path: Path to the image to be processed.
"""
result = ocr_engine.predict(str(image_path))
check_simple_inference_result(result)
res = result[0]
assert len(res["dt_polys"]) > 0
assert isinstance(res["rec_texts"], list)
assert len(res["rec_texts"]) > 0
for text in res["rec_texts"]:
assert isinstance(text, str)
# TODO: Also check passing `None`
@pytest.mark.parametrize(
"params",
[
{"use_doc_orientation_classify": False},
{"use_doc_unwarping": False},
{"use_textline_orientation": False},
{"text_det_limit_side_len": 640, "text_det_limit_type": "min"},
{"text_det_thresh": 0.5},
{"text_det_box_thresh": 0.3},
{"text_det_unclip_ratio": 3.0},
{"text_rec_score_thresh": 0.5},
],
)
def test_predict_params(
monkeypatch,
ocr_engine: PaddleOCR,
params: dict,
) -> None:
check_wrapper_simple_inference_param_forwarding(
monkeypatch,
ocr_engine,
"paddlex_pipeline",
"dummy_path",
params,
)
# TODO: Test init params
def test_lang_and_ocr_version():
ocr_engine = PaddleOCR(lang="ch", ocr_version="PP-OCRv5")
assert ocr_engine._params["text_detection_model_name"] == "PP-OCRv5_server_det"
assert ocr_engine._params["text_recognition_model_name"] == "PP-OCRv5_server_rec"
ocr_engine = PaddleOCR(lang="chinese_cht", ocr_version="PP-OCRv5")
assert ocr_engine._params["text_detection_model_name"] == "PP-OCRv5_server_det"
assert ocr_engine._params["text_recognition_model_name"] == "PP-OCRv5_server_rec"
ocr_engine = PaddleOCR(lang="en", ocr_version="PP-OCRv5")
assert ocr_engine._params["text_detection_model_name"] == "PP-OCRv5_server_det"
assert ocr_engine._params["text_recognition_model_name"] == "en_PP-OCRv5_mobile_rec"
ocr_engine = PaddleOCR(lang="japan", ocr_version="PP-OCRv5")
assert ocr_engine._params["text_detection_model_name"] == "PP-OCRv5_server_det"
assert ocr_engine._params["text_recognition_model_name"] == "PP-OCRv5_server_rec"
ocr_engine = PaddleOCR(lang="ch", ocr_version="PP-OCRv4")
assert ocr_engine._params["text_detection_model_name"] == "PP-OCRv4_mobile_det"
assert ocr_engine._params["text_recognition_model_name"] == "PP-OCRv4_mobile_rec"
ocr_engine = PaddleOCR(lang="en", ocr_version="PP-OCRv4")
assert ocr_engine._params["text_detection_model_name"] == "PP-OCRv4_mobile_det"
assert ocr_engine._params["text_recognition_model_name"] == "en_PP-OCRv4_mobile_rec"
ocr_engine = PaddleOCR(lang="ch", ocr_version="PP-OCRv3")
assert ocr_engine._params["text_detection_model_name"] == "PP-OCRv3_mobile_det"
assert ocr_engine._params["text_recognition_model_name"] == "PP-OCRv3_mobile_rec"
ocr_engine = PaddleOCR(lang="en", ocr_version="PP-OCRv3")
assert ocr_engine._params["text_detection_model_name"] == "PP-OCRv3_mobile_det"
assert ocr_engine._params["text_recognition_model_name"] == "en_PP-OCRv3_mobile_rec"
ocr_engine = PaddleOCR(lang="fr", ocr_version="PP-OCRv3")
assert ocr_engine._params["text_detection_model_name"] == "PP-OCRv3_mobile_det"
assert (
ocr_engine._params["text_recognition_model_name"] == "latin_PP-OCRv3_mobile_rec"
)
ocr_engine = PaddleOCR(lang="ar", ocr_version="PP-OCRv3")
assert ocr_engine._params["text_detection_model_name"] == "PP-OCRv3_mobile_det"
assert (
ocr_engine._params["text_recognition_model_name"]
== "arabic_PP-OCRv3_mobile_rec"
)
ocr_engine = PaddleOCR(lang="ru", ocr_version="PP-OCRv3")
assert ocr_engine._params["text_detection_model_name"] == "PP-OCRv3_mobile_det"
assert (
ocr_engine._params["text_recognition_model_name"]
== "cyrillic_PP-OCRv3_mobile_rec"
)
ocr_engine = PaddleOCR(lang="hi", ocr_version="PP-OCRv3")
assert ocr_engine._params["text_detection_model_name"] == "PP-OCRv3_mobile_det"
assert (
ocr_engine._params["text_recognition_model_name"]
== "devanagari_PP-OCRv3_mobile_rec"
)
ocr_engine = PaddleOCR(lang="japan", ocr_version="PP-OCRv3")
assert ocr_engine._params["text_detection_model_name"] == "PP-OCRv3_mobile_det"
assert (
ocr_engine._params["text_recognition_model_name"] == "japan_PP-OCRv3_mobile_rec"
)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "tests/pipelines/test_ocr.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PaddlePaddle/PaddleOCR:tests/pipelines/test_pp_chatocrv4_doc.py | import pytest
from paddleocr import PPChatOCRv4Doc
from ..testing_utils import TEST_DATA_DIR
@pytest.fixture(scope="module")
def pp_chatocrv4_doc_pipeline():
return PPChatOCRv4Doc()
@pytest.mark.parametrize(
"image_path",
[
TEST_DATA_DIR / "doc_with_formula.png",
],
)
def test_visual_predict(pp_chatocrv4_doc_pipeline, image_path):
result = pp_chatocrv4_doc_pipeline.visual_predict(str(image_path))
assert result is not None
assert isinstance(result, list)
assert len(result) == 1
res = result[0]
assert isinstance(res, dict)
assert res.keys() == {"visual_info", "layout_parsing_result"}
assert isinstance(res["visual_info"], dict)
assert isinstance(res["layout_parsing_result"], dict)
@pytest.mark.parametrize(
"params",
[
{"use_doc_orientation_classify": False},
{"use_doc_unwarping": False},
{"use_table_recognition": False},
{"layout_threshold": 0.88},
{"layout_threshold": [0.45, 0.4]},
{"layout_threshold": {0: 0.45, 2: 0.48, 7: 0.4}},
{"layout_nms": False},
{"layout_unclip_ratio": 1.1},
{"layout_unclip_ratio": [1.2, 1.5]},
{"layout_unclip_ratio": {0: 1.2, 2: 1.5, 7: 1.8}},
{"layout_merge_bboxes_mode": "large"},
{"layout_merge_bboxes_mode": {0: "large", 2: "small", 7: "union"}},
{"text_det_limit_side_len": 640, "text_det_limit_type": "min"},
{"text_det_thresh": 0.5},
{"text_det_box_thresh": 0.3},
{"text_det_unclip_ratio": 3.0},
{"text_rec_score_thresh": 0.5},
],
)
def test_visual_predict_params(
monkeypatch,
pp_chatocrv4_doc_pipeline,
params,
):
def _dummy_visual_predict(input, **params):
yield {"visual_info": {}, "layout_parsing_result": params}
monkeypatch.setattr(
pp_chatocrv4_doc_pipeline.paddlex_pipeline,
"visual_predict",
_dummy_visual_predict,
)
result = pp_chatocrv4_doc_pipeline.visual_predict(
input,
**params,
)
assert isinstance(result, list)
assert len(result) == 1
res = result[0]
res = res["layout_parsing_result"]
for k, v in params.items():
assert res[k] == v
# TODO: Test constructor and other methods
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "tests/pipelines/test_pp_chatocrv4_doc.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PaddlePaddle/PaddleOCR:tests/pipelines/test_pp_structurev3.py | import pytest
from paddleocr import PPStructureV3
from ..testing_utils import (
TEST_DATA_DIR,
check_simple_inference_result,
check_wrapper_simple_inference_param_forwarding,
)
@pytest.fixture(scope="module")
def pp_structurev3_pipeline():
return PPStructureV3()
@pytest.mark.resource_intensive
@pytest.mark.parametrize(
"image_path",
[
TEST_DATA_DIR / "doc_with_formula.png",
],
)
def test_predict(pp_structurev3_pipeline, image_path):
result = pp_structurev3_pipeline.predict(str(image_path))
check_simple_inference_result(result)
res = result[0]
overall_ocr_res = res["overall_ocr_res"]
assert len(overall_ocr_res["dt_polys"]) > 0
assert len(overall_ocr_res["rec_texts"]) > 0
assert len(overall_ocr_res["rec_polys"]) > 0
assert len(overall_ocr_res["rec_boxes"]) > 0
@pytest.mark.parametrize(
"params",
[
{"use_doc_orientation_classify": False},
{"use_doc_unwarping": False},
{"use_table_recognition": False},
{"use_formula_recognition": False},
{"layout_threshold": 0.88},
{"layout_threshold": [0.45, 0.4]},
{"layout_threshold": {0: 0.45, 2: 0.48, 7: 0.4}},
{"layout_nms": False},
{"layout_unclip_ratio": 1.1},
{"layout_unclip_ratio": [1.2, 1.5]},
{"layout_unclip_ratio": {0: 1.2, 2: 1.5, 7: 1.8}},
{"layout_merge_bboxes_mode": "large"},
{"layout_merge_bboxes_mode": {0: "large", 2: "small", 7: "union"}},
{"text_det_limit_side_len": 640, "text_det_limit_type": "min"},
{"text_det_thresh": 0.5},
{"text_det_box_thresh": 0.3},
{"text_det_unclip_ratio": 3.0},
{"text_rec_score_thresh": 0.5},
],
)
def test_predict_params(
monkeypatch,
pp_structurev3_pipeline,
params,
):
check_wrapper_simple_inference_param_forwarding(
monkeypatch,
pp_structurev3_pipeline,
"paddlex_pipeline",
"dummy_path",
params,
)
# TODO: Test constructor and other methods
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "tests/pipelines/test_pp_structurev3.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PaddlePaddle/PaddleOCR:tests/pipelines/test_seal_rec.py | import pytest
from paddleocr import SealRecognition
from ..testing_utils import (
TEST_DATA_DIR,
check_simple_inference_result,
check_wrapper_simple_inference_param_forwarding,
)
@pytest.fixture(scope="module")
def ocr_engine() -> SealRecognition:
return SealRecognition()
@pytest.mark.parametrize(
"image_path",
[
TEST_DATA_DIR / "seal.png",
],
)
def test_predict(ocr_engine: SealRecognition, image_path: str) -> None:
"""
Test PaddleOCR's seal recognition functionality.
Args:
ocr_engine: An instance of `SealRecognition`.
image_path: Path to the image to be processed.
"""
result = ocr_engine.predict(str(image_path))
check_simple_inference_result(result)
res = result[0]["seal_res_list"][0]
assert len(res["dt_polys"]) > 0
assert isinstance(res["rec_texts"], list)
assert len(res["rec_texts"]) > 0
for text in res["rec_texts"]:
assert isinstance(text, str)
@pytest.mark.parametrize(
"params",
[
{"use_doc_orientation_classify": False, "use_doc_unwarping": False},
{"use_layout_detection": False},
{"layout_det_res": None},
{"layout_threshold": 0.5},
{"layout_nms": False},
{"layout_unclip_ratio": 1.0},
{"layout_merge_bboxes_mode": "large"},
{"seal_det_limit_side_len": 736},
{"seal_det_limit_type": "min"},
{"seal_det_thresh": 0.5},
{"seal_det_box_thresh": 0.6},
{"seal_det_unclip_ratio": 0.5},
{"seal_rec_score_thresh": 0.05},
],
)
def test_predict_params(
monkeypatch,
ocr_engine: SealRecognition,
params: dict,
) -> None:
check_wrapper_simple_inference_param_forwarding(
monkeypatch,
ocr_engine,
"paddlex_pipeline",
"dummy_path",
params,
)
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "tests/pipelines/test_seal_rec.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
PaddlePaddle/PaddleOCR:tests/pipelines/test_table_recognition_v2.py | import pytest
from paddleocr import TableRecognitionPipelineV2
from ..testing_utils import (
TEST_DATA_DIR,
check_simple_inference_result,
check_wrapper_simple_inference_param_forwarding,
)
@pytest.fixture(scope="module")
def table_recognition_v2_pipeline():
return TableRecognitionPipelineV2()
@pytest.mark.parametrize(
"image_path",
[
TEST_DATA_DIR / "table.jpg",
],
)
def test_predict(table_recognition_v2_pipeline, image_path):
result = table_recognition_v2_pipeline.predict(
str(image_path), use_doc_orientation_classify=False, use_doc_unwarping=False
)
check_simple_inference_result(result)
res = result[0]
assert len(res["table_res_list"]) > 0
assert isinstance(res["table_res_list"][0], dict)
assert len(res["table_res_list"][0]["cell_box_list"]) > 0
assert isinstance(res["table_res_list"][0]["pred_html"], str)
assert isinstance(res["table_res_list"][0]["table_ocr_pred"], dict)
@pytest.mark.parametrize(
"params",
[
{"use_doc_orientation_classify": False},
{"use_doc_unwarping": False},
{"use_layout_detection": False},
{"use_ocr_model": False},
{"text_det_limit_side_len": 640, "text_det_limit_type": "min"},
{"text_det_thresh": 0.5},
{"text_det_box_thresh": 0.3},
{"text_det_unclip_ratio": 3.0},
{"text_rec_score_thresh": 0.5},
],
)
def test_predict_params(
monkeypatch,
table_recognition_v2_pipeline,
params,
):
check_wrapper_simple_inference_param_forwarding(
monkeypatch,
table_recognition_v2_pipeline,
"paddlex_pipeline",
"dummy_path",
params,
)
# TODO: Test constructor and other methods
| {
"repo_id": "PaddlePaddle/PaddleOCR",
"file_path": "tests/pipelines/test_table_recognition_v2.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.