sample_id
stringlengths
21
196
text
stringlengths
105
936k
metadata
dict
category
stringclasses
6 values
OpenBMB/ChatDev:tools/validate_all_yamls.py
""" Validate All YAML Workflow Configurations This tool performs strict validation on all YAML workflow configuration files in the yaml_instance/ directory. It ensures configuration integrity and prevents runtime errors by catching issues early in the development process. Purpose: - Validates YAML syntax and schema compliance for all workflow configurations - Prevents invalid configurations from causing runtime failures - Essential for CI/CD pipelines to ensure code quality - Provides detailed error reporting for debugging Usage: python tools/validate_all_yamls.py # or via Makefile: make validate-yamls """ import sys import subprocess from pathlib import Path def validate_all(): base_dir = Path("yaml_instance") if not base_dir.exists(): print(f"Directory {base_dir} not found.") sys.exit(1) # Recursive search for all .yaml files files = sorted(list(base_dir.rglob("*.yaml"))) if not files: print("No YAML files found.") return print( f"Found {len(files)} YAML files. Running FULL validation via check.check...\n" ) passed = 0 failed = 0 failed_files = [] for yaml_file in files: # Use relative path for cleaner output try: rel_path = yaml_file.relative_to(Path.cwd()) except ValueError: rel_path = yaml_file # NOW we run check.check, which we just patched to have a main() # This performs the stricter load_config() validation cmd = [sys.executable, "-m", "check.check", "--path", str(yaml_file)] try: result = subprocess.run(cmd, capture_output=True, text=True) if result.returncode == 0: print(f"{rel_path}") passed += 1 else: print(f"{rel_path}") # Indent error output if result.stdout: print(" stdout:", result.stdout.strip().replace("\n", "\n ")) # Validation errors usually print to stdout/stderr depending on impl # Our new main prints to stdout for success/failure message failed += 1 failed_files.append(str(rel_path)) except Exception as e: print(f"{rel_path} (Execution Failed)") print(f" Error: {e}") failed += 1 failed_files.append(str(rel_path)) print("\n" + "=" * 40) print(f"YAML Validation Summary") print("=" * 40) print(f"Total Files: {len(files)}") print(f"Passed: {passed}") print(f"Failed: {failed}") if failed > 0: print("\nFailed Files:") for f in failed_files: print(f"- {f}") # Overall validation status print("\n" + "=" * 40) print("Overall Validation Status") print("=" * 40) if failed > 0: print("YAML validation: FAILED") sys.exit(1) else: print("All validations passed successfully.") sys.exit(0) if __name__ == "__main__": validate_all()
{ "repo_id": "OpenBMB/ChatDev", "file_path": "tools/validate_all_yamls.py", "license": "Apache License 2.0", "lines": 84, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:server/routes/batch.py
import asyncio from fastapi import APIRouter, File, Form, HTTPException, UploadFile from entity.enums import LogLevel from server.services.batch_parser import parse_batch_file from server.services.batch_run_service import BatchRunService from server.state import ensure_known_session from utils.exceptions import ValidationError router = APIRouter() @router.post("/api/workflows/batch") async def execute_batch( file: UploadFile = File(...), session_id: str = Form(...), yaml_file: str = Form(...), max_parallel: int = Form(5), log_level: str | None = Form(None), ): try: manager = ensure_known_session(session_id, require_connection=True) except ValidationError as exc: raise HTTPException(status_code=400, detail=str(exc)) if max_parallel < 1: raise HTTPException(status_code=400, detail="max_parallel must be >= 1") try: content = await file.read() tasks, file_base = parse_batch_file(content, file.filename or "batch.csv") except ValidationError as exc: raise HTTPException(status_code=400, detail=str(exc)) resolved_level = None if log_level: try: resolved_level = LogLevel(log_level) except ValueError: raise HTTPException(status_code=400, detail="log_level must be either DEBUG or INFO") service = BatchRunService() asyncio.create_task( service.run_batch( session_id, yaml_file, tasks, manager, max_parallel=max_parallel, file_base=file_base, log_level=resolved_level, ) ) return { "status": "accepted", "session_id": session_id, "batch_id": session_id, "task_count": len(tasks), }
{ "repo_id": "OpenBMB/ChatDev", "file_path": "server/routes/batch.py", "license": "Apache License 2.0", "lines": 51, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:server/services/batch_parser.py
"""Parse batch task files (CSV/Excel) into runnable tasks.""" import json from dataclasses import dataclass from io import BytesIO from pathlib import Path from typing import Any, Dict, List, Optional, Tuple import pandas as pd from utils.exceptions import ValidationError @dataclass(frozen=True) class BatchTask: row_index: int task_id: Optional[str] task_prompt: str attachment_paths: List[str] vars_override: Dict[str, Any] def parse_batch_file(content: bytes, filename: str) -> Tuple[List[BatchTask], str]: """Parse a CSV/Excel batch file and return tasks plus file base name.""" suffix = Path(filename or "").suffix.lower() if suffix not in {".csv", ".xlsx", ".xls"}: raise ValidationError("Unsupported file type; must be .csv or .xlsx/.xls", field="file") if suffix == ".csv": df = _read_csv(content) else: df = _read_excel(content) file_base = Path(filename).stem or "batch" tasks = _parse_dataframe(df) if not tasks: raise ValidationError("Batch file contains no tasks", field="file") return tasks, file_base def _read_csv(content: bytes) -> pd.DataFrame: try: import chardet except Exception: chardet = None encoding = "utf-8" if chardet: detected = chardet.detect(content) encoding = detected.get("encoding") or encoding try: return pd.read_csv(BytesIO(content), encoding=encoding) except Exception as exc: raise ValidationError(f"Failed to read CSV: {exc}", field="file") def _read_excel(content: bytes) -> pd.DataFrame: try: return pd.read_excel(BytesIO(content)) except Exception as exc: raise ValidationError(f"Failed to read Excel file: {exc}", field="file") def _parse_dataframe(df: pd.DataFrame) -> List[BatchTask]: column_map = {str(col).strip().lower(): col for col in df.columns} id_col = column_map.get("id") task_col = column_map.get("task") attachments_col = column_map.get("attachments") vars_col = column_map.get("vars") tasks: List[BatchTask] = [] seen_ids: set[str] = set() for row_index, row in enumerate(df.to_dict(orient="records"), start=1): task_prompt = _get_cell_text(row, task_col) attachment_paths = _parse_json_list(row, attachments_col, row_index) vars_override = _parse_json_dict(row, vars_col, row_index) if not task_prompt and not attachment_paths: raise ValidationError( "Task and attachments cannot both be empty", details={"row_index": row_index}, ) task_id = _get_cell_text(row, id_col) if task_id: if task_id in seen_ids: raise ValidationError( "Duplicate ID in batch file", details={"row_index": row_index, "task_id": task_id}, ) seen_ids.add(task_id) tasks.append( BatchTask( row_index=row_index, task_id=task_id or None, task_prompt=task_prompt, attachment_paths=attachment_paths, vars_override=vars_override, ) ) return tasks def _get_cell_text(row: Dict[str, Any], column: Optional[str]) -> str: if not column: return "" value = row.get(column) if value is None: return "" if isinstance(value, float) and pd.isna(value): return "" if pd.isna(value): return "" return str(value).strip() def _parse_json_list( row: Dict[str, Any], column: Optional[str], row_index: int, ) -> List[str]: if not column: return [] raw_value = row.get(column) if raw_value is None or (isinstance(raw_value, float) and pd.isna(raw_value)): return [] if isinstance(raw_value, list): return _ensure_string_list(raw_value, row_index, "Attachments") if isinstance(raw_value, str): if not raw_value.strip(): return [] try: parsed = json.loads(raw_value) except json.JSONDecodeError as exc: raise ValidationError( f"Invalid JSON in Attachments: {exc}", details={"row_index": row_index}, ) return _ensure_string_list(parsed, row_index, "Attachments") raise ValidationError( "Attachments must be a JSON list", details={"row_index": row_index}, ) def _parse_json_dict( row: Dict[str, Any], column: Optional[str], row_index: int, ) -> Dict[str, Any]: if not column: return {} raw_value = row.get(column) if raw_value is None or (isinstance(raw_value, float) and pd.isna(raw_value)): return {} if isinstance(raw_value, dict): return raw_value if isinstance(raw_value, str): if not raw_value.strip(): return {} try: parsed = json.loads(raw_value) except json.JSONDecodeError as exc: raise ValidationError( f"Invalid JSON in Vars: {exc}", details={"row_index": row_index}, ) if not isinstance(parsed, dict): raise ValidationError( "Vars must be a JSON object", details={"row_index": row_index}, ) return parsed raise ValidationError( "Vars must be a JSON object", details={"row_index": row_index}, ) def _ensure_string_list(value: Any, row_index: int, field: str) -> List[str]: if not isinstance(value, list): raise ValidationError( f"{field} must be a JSON list", details={"row_index": row_index}, ) result: List[str] = [] for item in value: if item is None or (isinstance(item, float) and pd.isna(item)): continue result.append(str(item)) return result
{ "repo_id": "OpenBMB/ChatDev", "file_path": "server/services/batch_parser.py", "license": "Apache License 2.0", "lines": 164, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:server/services/batch_run_service.py
"""Batch workflow execution helpers.""" import asyncio import csv import json import logging import re import time import uuid from pathlib import Path from typing import Any, Dict, List, Optional from check.check import load_config from entity.enums import LogLevel from entity.graph_config import GraphConfig from utils.exceptions import ValidationError from utils.task_input import TaskInputBuilder from workflow.graph import GraphExecutor from workflow.graph_context import GraphContext from server.services.batch_parser import BatchTask from server.services.workflow_storage import validate_workflow_filename from server.settings import WARE_HOUSE_DIR, YAML_DIR class BatchRunService: """Runs batch workflows and reports progress over WebSocket.""" def __init__(self) -> None: self.logger = logging.getLogger(__name__) async def run_batch( self, session_id: str, yaml_file: str, tasks: List[BatchTask], websocket_manager, *, max_parallel: int = 5, file_base: str = "batch", log_level: Optional[LogLevel] = None, ) -> None: batch_id = session_id total = len(tasks) await websocket_manager.send_message( session_id, {"type": "batch_started", "data": {"batch_id": batch_id, "total": total}}, ) semaphore = asyncio.Semaphore(max_parallel) success_count = 0 failure_count = 0 result_rows: List[Dict[str, Any]] = [] result_lock = asyncio.Lock() async def run_task(task: BatchTask) -> None: nonlocal success_count, failure_count task_id = task.task_id or str(uuid.uuid4()) task_dir = self._sanitize_label(f"{file_base}-{task_id}") await websocket_manager.send_message( session_id, { "type": "batch_task_started", "data": { "row_index": task.row_index, "task_id": task_id, "task_dir": task_dir, }, }, ) try: result = await asyncio.to_thread( self._run_single_task, session_id, yaml_file, task, task_dir, log_level, ) success_count += 1 async with result_lock: result_rows.append( { "row_index": task.row_index, "task_id": task_id, "task_dir": task_dir, "status": "success", "duration_ms": result["duration_ms"], "token_usage": result["token_usage"], "graph_output": result["graph_output"], "results": result["results"], "error": "", } ) await websocket_manager.send_message( session_id, { "type": "batch_task_completed", "data": { "row_index": task.row_index, "task_id": task_id, "task_dir": task_dir, "results": result["results"], "token_usage": result["token_usage"], "duration_ms": result["duration_ms"], }, }, ) except Exception as exc: failure_count += 1 async with result_lock: result_rows.append( { "row_index": task.row_index, "task_id": task_id, "task_dir": task_dir, "status": "failed", "duration_ms": None, "token_usage": None, "graph_output": "", "results": None, "error": str(exc), } ) await websocket_manager.send_message( session_id, { "type": "batch_task_failed", "data": { "row_index": task.row_index, "task_id": task_id, "task_dir": task_dir, "error": str(exc), }, }, ) async def run_with_limit(task: BatchTask) -> None: async with semaphore: await run_task(task) await asyncio.gather(*(run_with_limit(task) for task in tasks)) self._write_batch_outputs(session_id, result_rows) await websocket_manager.send_message( session_id, { "type": "batch_completed", "data": { "batch_id": batch_id, "total": total, "succeeded": success_count, "failed": failure_count, }, }, ) def _write_batch_outputs(self, session_id: str, result_rows: List[Dict[str, Any]]) -> None: output_root = WARE_HOUSE_DIR / f"session_{session_id}" output_root.mkdir(parents=True, exist_ok=True) csv_path = output_root / "batch_results.csv" json_path = output_root / "batch_manifest.json" fieldnames = [ "row_index", "task_id", "task_dir", "status", "duration_ms", "token_usage", "results", "error", ] with csv_path.open("w", newline="", encoding="utf-8") as handle: writer = csv.DictWriter(handle, fieldnames=fieldnames, extrasaction="ignore") writer.writeheader() for row in result_rows: row_copy = dict(row) row_copy["token_usage"] = json.dumps(row_copy.get("token_usage")) row_copy["results"] = row_copy.get("graph_output", "") writer.writerow(row_copy) with json_path.open("w", encoding="utf-8") as handle: json.dump(result_rows, handle, ensure_ascii=True, indent=2) def _run_single_task( self, session_id: str, yaml_file: str, task: BatchTask, task_dir: str, log_level: Optional[LogLevel], ) -> Dict[str, Any]: yaml_path = self._resolve_yaml_path(yaml_file) design = load_config(yaml_path, vars_override=task.vars_override or None) if any(node.type == "human" for node in design.graph.nodes): raise ValidationError( "Batch execution does not support human nodes", details={"yaml_file": yaml_file}, ) output_root = WARE_HOUSE_DIR / f"session_{session_id}" graph_config = GraphConfig.from_definition( design.graph, name=task_dir, output_root=output_root, source_path=str(yaml_path), vars=design.vars, ) graph_config.metadata["fixed_output_dir"] = True if log_level: graph_config.log_level = log_level graph_config.definition.log_level = log_level graph_context = GraphContext(config=graph_config) start_time = time.perf_counter() executor = GraphExecutor(graph_context, session_id=session_id) task_input = self._build_task_input(executor.attachment_store, task) executor._execute(task_input) duration_ms = int((time.perf_counter() - start_time) * 1000) return { "results": executor.outputs, "token_usage": executor.token_tracker.get_token_usage(), "duration_ms": duration_ms, "graph_output": executor.get_final_output(), } @staticmethod def _build_task_input(attachment_store, task: BatchTask): if task.attachment_paths: builder = TaskInputBuilder(attachment_store) return builder.build_from_file_paths(task.task_prompt, task.attachment_paths) return task.task_prompt @staticmethod def _sanitize_label(value: str) -> str: cleaned = re.sub(r"[^a-zA-Z0-9._-]+", "_", value) return cleaned.strip("_") or "task" @staticmethod def _resolve_yaml_path(yaml_filename: str) -> Path: safe_name = validate_workflow_filename(yaml_filename, require_yaml_extension=True) yaml_path = YAML_DIR / safe_name if not yaml_path.exists(): raise ValidationError("YAML file not found", details={"yaml_file": safe_name}) return yaml_path
{ "repo_id": "OpenBMB/ChatDev", "file_path": "server/services/batch_run_service.py", "license": "Apache License 2.0", "lines": 225, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:check/check.py
"""Utilities for loading, validating design_0.4.0 workflows.""" from pathlib import Path from typing import Any, Dict, Optional from runtime.bootstrap.schema import ensure_schema_registry_populated from check.check_yaml import validate_design from check.check_workflow import check_workflow_structure from entity.config_loader import prepare_design_mapping from entity.configs import DesignConfig, ConfigError from schema_registry import iter_node_schemas from utils.io_utils import read_yaml ensure_schema_registry_populated() class DesignError(RuntimeError): """Raised when a workflow design cannot be loaded or validated.""" def _allowed_node_types() -> set[str]: names = set(iter_node_schemas().keys()) if not names: raise DesignError("No node types registered; cannot validate workflow") return names def _ensure_supported(graph: Dict[str, Any]) -> None: """Ensure the MVP constraints are satisfied for the provided graph.""" for node in graph.get("nodes", []) or []: nid = node.get("id") ntype = node.get("type") allowed = _allowed_node_types() if ntype not in allowed: raise DesignError( f"Unsupported node type '{ntype}' for node '{nid}'. Only {allowed} nodes are supported." ) if ntype == "agent": agent_cfg = node.get("config") or {} if not isinstance(agent_cfg, dict): raise DesignError(f"Agent node '{nid}' config must be an object") for legacy_key in ["memory"]: if legacy_key in agent_cfg: raise DesignError( f"'{legacy_key}' is deprecated. Use the new graph-level memory stores for node '{nid}'." ) def load_config( config_path: Path, *, fn_module: Optional[str] = None, set_defaults: bool = True, vars_override: Optional[Dict[str, Any]] = None, ) -> DesignConfig: """Load, validate, and sanity-check a workflow file.""" try: raw_data = read_yaml(config_path) except FileNotFoundError as exc: raise DesignError(f"Design file not found: {config_path}") from exc if not isinstance(raw_data, dict): raise DesignError("YAML root must be a mapping") if vars_override: merged_vars = dict(raw_data.get("vars") or {}) merged_vars.update(vars_override) raw_data = dict(raw_data) raw_data["vars"] = merged_vars data = prepare_design_mapping(raw_data, source=str(config_path)) schema_errors = validate_design(data, set_defaults=set_defaults, fn_module_ref=fn_module) if schema_errors: formatted = "\n".join(f"- {err}" for err in schema_errors) raise DesignError(f"Design validation failed for '{config_path}':\n{formatted}") try: design = DesignConfig.from_dict(data, path="root") except ConfigError as exc: raise DesignError(f"Design parsing failed for '{config_path}': {exc}") from exc logic_errors = check_workflow_structure(data) if logic_errors: formatted = "\n".join(f"- {err}" for err in logic_errors) raise DesignError(f"Workflow logical issues detected for '{config_path}':\n{formatted}") else: print("Workflow OK.") graph = data.get("graph") or {} _ensure_supported(graph) return design def check_config(yaml_content: Any) -> str: if not isinstance(yaml_content, dict): return "YAML root must be a mapping" # Skip placeholder resolution during save - users may configure env vars at runtime # Use yaml_content directly instead of prepare_design_mapping() schema_errors = validate_design(yaml_content) if schema_errors: formatted = "\n".join(f"- {err}" for err in schema_errors) return formatted logic_errors = check_workflow_structure(yaml_content) if logic_errors: formatted = "\n".join(f"- {err}" for err in logic_errors) return formatted graph = yaml_content.get("graph") or {} try: _ensure_supported(graph) except Exception as e: return str(e) return ""
{ "repo_id": "OpenBMB/ChatDev", "file_path": "check/check.py", "license": "Apache License 2.0", "lines": 93, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:check/check_workflow.py
import argparse from typing import Any, Dict, List, Optional, Tuple import yaml from check import check_yaml from utils.io_utils import read_yaml def _node_ids(graph: Dict[str, Any]) -> List[str]: nodes = graph.get("nodes", []) or [] ids: List[str] = [] for n in nodes: nid = n.get("id") if isinstance(nid, str): ids.append(nid) return ids def _edge_list(graph: Dict[str, Any]) -> List[Dict[str, Any]]: edges = graph.get("edges", []) or [] return [e for e in edges if isinstance(e, dict) and "from" in e and "to" in e] def _analyze_graph(graph: Dict[str, Any], base_path: str, errors: List[str]) -> None: # Majority voting graphs are skipped for start/end structure checks is_mv = graph.get("is_majority_voting", False) if is_mv: return nodes = _node_ids(graph) node_set = set(nodes) # Validate provided start/end (if any) reference existing nodes # start = graph.get("start") end = graph.get("end") # if start is not None and start not in node_set: # errors.append(f"{base_path}.start references unknown node id '{start}'") # Normalize to list if end is not None: if isinstance(end, str): end_list = [end] elif isinstance(end, list): end_list = end else: errors.append(f"{base_path}.end must be a string or list of strings") return # Check each node ID in the end list for end_node_id in end_list: if not isinstance(end_node_id, str): errors.append( f"{base_path}.end contains non-string element: {end_node_id}" ) elif end_node_id not in node_set: errors.append( f"{base_path}.end references unknown node id '{end_node_id}'" ) # Compute in/out degrees within this graph scope indeg = {nid: 0 for nid in nodes} outdeg = {nid: 0 for nid in nodes} for e in _edge_list(graph): frm = e.get("from") to = e.get("to") if frm in outdeg: outdeg[frm] += 1 if to in indeg: indeg[to] += 1 # sources = [nid for nid in nodes if indeg.get(nid, 0) == 0] sinks = [nid for nid in nodes if outdeg.get(nid, 0) == 0] # # Rule: # # - A non-cyclic (sub)graph should have exactly one natural source AND exactly one natural sink. # # - Otherwise (e.g., multiple sources/sinks or cycles -> none), require explicit start or end. # has_unique_source = len(sources) == 1 # has_unique_sink = len(sinks) == 1 # if not (has_unique_source and has_unique_sink): # if start is None and end is None: # errors.append( # f"{base_path}: graph lacks a unique natural start and end; specify 'start' or 'end' explicitly" # ) if not (len(sinks) == 1): if end is None: errors.append( f"{base_path}: graph lacks a unique natural end; specify 'end' explicitly" ) # Recurse into subgraphs for i, n in enumerate(graph.get("nodes", []) or []): if isinstance(n, dict) and n.get("type") == "subgraph": sub = n.get("config") or {} if not isinstance(sub, dict): errors.append(f"{base_path}.nodes[{i}].config must be object for subgraph nodes") continue sg_type = sub.get("type") if sg_type == "config": config_block = sub.get("config") if not isinstance(config_block, dict): errors.append( f"{base_path}.nodes[{i}].config.config must be object when type=config" ) continue _analyze_graph(config_block, f"{base_path}.nodes[{i}].config.config", errors) elif sg_type == "file": file_block = sub.get("config") if not (isinstance(file_block, dict) and isinstance(file_block.get("path"), str)): errors.append( f"{base_path}.nodes[{i}].config.config.path must be string when type=file" ) else: errors.append( f"{base_path}.nodes[{i}].config.type must be 'config' or 'file'" ) def check_workflow_structure(data: Any) -> List[str]: errors: List[str] = [] if not isinstance(data, dict) or "graph" not in data: return ["<root>.graph is required"] graph = data["graph"] if not isinstance(graph, dict): return ["<root>.graph must be object"] _analyze_graph(graph, "graph", errors) return errors def main(): parser = argparse.ArgumentParser( description="Check workflow structure: unique natural start/end or explicit start/end per (sub)graph") parser.add_argument("path", nargs="?", default="design_0.4.0.yaml", help="Path to YAML file") parser.add_argument("--no-schema", action="store_true", help="Skip schema validation (0.4.0)") parser.add_argument("--fn-module", dest="fn_module", default=None, help="Module name or .py path where edge functions are defined (for schema validation)") args = parser.parse_args() data = read_yaml(args.path) if not args.no_schema: schema_errors = check_yaml.validate_design(data, set_defaults=True, fn_module_ref=args.fn_module) if schema_errors: print("Invalid schema:") for e in schema_errors: print(f"- {e}") raise SystemExit(1) logic_errors = check_workflow_structure(data) if logic_errors: print("Workflow issues:") for e in logic_errors: print(f"- {e}") raise SystemExit(2) else: print("Workflow OK.") if __name__ == "__main__": main()
{ "repo_id": "OpenBMB/ChatDev", "file_path": "check/check_workflow.py", "license": "Apache License 2.0", "lines": 135, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:check/check_yaml.py
"""Lightweight schema validation leveraging typed config loaders.""" import argparse from pathlib import Path from typing import Any, List, Optional from entity.configs import ConfigError, DesignConfig from utils.io_utils import read_yaml def validate_design(data: Any, set_defaults: bool = True, fn_module_ref: Optional[str] = None) -> List[str]: """Validate raw YAML data using the typed config loader. Note: This function validates schema structure only, without resolving environment variable placeholders like ${VAR}. This allows workflows to be saved even when environment variables are not yet configured - they will be resolved at runtime. """ try: if not isinstance(data, dict): raise ConfigError("YAML root must be a mapping", path="root") # Use DesignConfig.from_dict directly to skip placeholder resolution # Users may configure environment variables at runtime DesignConfig.from_dict(data) return [] except ConfigError as exc: return [str(exc)] def main() -> None: parser = argparse.ArgumentParser(description="Validate workflow YAML structure against the typed config loader") parser.add_argument("path", help="Path to the workflow YAML file") args = parser.parse_args() data = read_yaml(args.path) errors = validate_design(data) if errors: print("Design validation failed:") for err in errors: print(f"- {err}") raise SystemExit(1) print("Design validation successful.") if __name__ == "__main__": main()
{ "repo_id": "OpenBMB/ChatDev", "file_path": "check/check_yaml.py", "license": "Apache License 2.0", "lines": 36, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:entity/config_loader.py
"""Helpers for loading validated configuration objects.""" from pathlib import Path from typing import Any, Mapping import yaml from entity.configs import DesignConfig, ConfigError from utils.env_loader import load_dotenv_file, build_env_var_map from utils.vars_resolver import resolve_design_placeholders def prepare_design_mapping(data: Mapping[str, Any], *, source: str | None = None) -> Mapping[str, Any]: load_dotenv_file() env_lookup = build_env_var_map() prepared = dict(data) resolve_design_placeholders(prepared, env_lookup=env_lookup, path=source or "root") return prepared def load_design_from_mapping(data: Mapping[str, Any], *, source: str | None = None) -> DesignConfig: """Parse a raw dictionary into a typed :class:`DesignConfig`.""" prepared = prepare_design_mapping(data, source=source) return DesignConfig.from_dict(prepared, path="root") def load_design_from_file(path: Path) -> DesignConfig: """Read a YAML file and parse it into a :class:`DesignConfig`.""" with path.open("r", encoding="utf-8") as handle: data = yaml.load(handle, Loader=yaml.FullLoader) if not isinstance(data, Mapping): raise ConfigError("YAML root must be a mapping", path=str(path)) return load_design_from_mapping(data, source=str(path))
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/config_loader.py", "license": "Apache License 2.0", "lines": 24, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:entity/configs/base.py
"""Shared helpers and base classes for configuration dataclasses.""" from dataclasses import dataclass, field, replace from typing import Any, Dict, Iterable, List, Mapping, MutableMapping, Sequence, TypeVar, ClassVar, Optional TConfig = TypeVar("TConfig", bound="BaseConfig") class ConfigError(ValueError): """Raised when configuration parsing or validation fails.""" def __init__(self, message: str, path: str | None = None): self.path = path full_message = f"{path}: {message}" if path else message super().__init__(full_message) @dataclass(frozen=True) class RuntimeConstraint: """Represents a conditional requirement for configuration fields.""" when: Mapping[str, Any] require: Sequence[str] message: str def to_json(self) -> Dict[str, Any]: return { "when": dict(self.when), "require": list(self.require), "message": self.message, } @dataclass(frozen=True) class ChildKey: """Identifies a conditional navigation target for nested schemas.""" field: str value: Any | None = None # variant: str | None = None def matches(self, field: str, value: Any | None) -> bool: if self.field != field: return False # if self.variant is not None and self.variant != str(value): # return False if self.value is None: return True return self.value == value def to_json(self) -> Dict[str, Any]: payload: Dict[str, Any] = {"field": self.field} if self.value is not None: payload["value"] = self.value # if self.variant is not None: # payload["variant"] = self.variant return payload @dataclass(frozen=True) class EnumOption: """Rich metadata for enum values shown in UI.""" value: Any label: str | None = None description: str | None = None def to_json(self) -> Dict[str, Any]: payload: Dict[str, Any] = {"value": self.value} if self.label: payload["label"] = self.label if self.description: payload["description"] = self.description return payload @dataclass(frozen=True) class ConfigFieldSpec: """Describes a single configuration field for schema export.""" name: str type_hint: str required: bool = False display_name: str | None = None default: Any | None = None enum: Sequence[Any] | None = None enum_options: Sequence[EnumOption] | None = None description: str | None = None child: type["BaseConfig"] | None = None advance: bool = False # ui: Mapping[str, Any] | None = None def with_name(self, name: str) -> "ConfigFieldSpec": if self.name == name: return self return replace(self, name=name) def to_json(self) -> Dict[str, Any]: display = self.display_name or self.name data: Dict[str, Any] = { "name": self.name, "displayName": display, "type": self.type_hint, "required": self.required, "advance": self.advance, } if self.default is not None: data["default"] = self.default if self.enum is not None: data["enum"] = list(self.enum) if self.enum_options: data["enumOptions"] = [option.to_json() for option in self.enum_options] if self.description: data["description"] = self.description if self.child is not None: data["childNode"] = self.child.__name__ # if self.ui: # data["ui"] = dict(self.ui) return data @dataclass(frozen=True) class SchemaNode: """Serializable representation of a configuration node.""" node: str fields: Sequence[ConfigFieldSpec] constraints: Sequence[RuntimeConstraint] = field(default_factory=list) def to_json(self) -> Dict[str, Any]: return { "node": self.node, "fields": [spec.to_json() for spec in self.fields], "constraints": [constraint.to_json() for constraint in self.constraints], } @dataclass class BaseConfig: """Base dataclass providing validation and schema hooks.""" path: str # Class-level hooks populated by concrete configs. FIELD_SPECS: ClassVar[Dict[str, ConfigFieldSpec]] = {} CONSTRAINTS: ClassVar[Sequence[RuntimeConstraint]] = () CHILD_ROUTES: ClassVar[Dict[ChildKey, type["BaseConfig"]]] = {} def __post_init__(self) -> None: # pragma: no cover - thin wrapper self.validate() def validate(self) -> None: """Hook for subclasses to implement structural validation.""" # Default implementation intentionally empty. return None @classmethod def field_specs(cls) -> Dict[str, ConfigFieldSpec]: return {name: spec.with_name(name) for name, spec in getattr(cls, "FIELD_SPECS", {}).items()} @classmethod def constraints(cls) -> Sequence[RuntimeConstraint]: return tuple(getattr(cls, "CONSTRAINTS", ()) or ()) @classmethod def child_routes(cls) -> Dict[ChildKey, type["BaseConfig"]]: return dict(getattr(cls, "CHILD_ROUTES", {}) or {}) @classmethod def resolve_child(cls, field: str, value: Any | None = None) -> type["BaseConfig"] | None: for key, target in cls.child_routes().items(): if key.matches(field, value): return target return None def as_config(self, expected_type: type[TConfig], *, attr: str = "config") -> TConfig | None: """Return the nested config stored under *attr* if it matches the expected type.""" value = getattr(self, attr, None) if isinstance(value, expected_type): return value return None @classmethod def collect_schema(cls) -> SchemaNode: return SchemaNode(node=cls.__name__, fields=list(cls.field_specs().values()), constraints=list(cls.constraints())) @classmethod def example(cls) -> Dict[str, Any]: """Placeholder for future example export support.""" return {} T = TypeVar("T") def ensure_list(value: Any) -> List[Any]: if value is None: return [] if isinstance(value, list): return list(value) if isinstance(value, (tuple, set)): return list(value) return [value] def ensure_dict(value: Mapping[str, Any] | None) -> Dict[str, Any]: if value is None: return {} if isinstance(value, MutableMapping): return dict(value) if isinstance(value, Mapping): return dict(value) raise ConfigError("expected mapping", path=str(value)) def require_mapping(data: Any, path: str) -> Mapping[str, Any]: if not isinstance(data, Mapping): raise ConfigError("expected mapping", path) return data def require_str(data: Mapping[str, Any], key: str, path: str, *, allow_empty: bool = False) -> str: value = data.get(key) key_path = f"{path}.{key}" if path else key if not isinstance(value, str): raise ConfigError("expected string", key_path) if not allow_empty and not value.strip(): raise ConfigError("expected non-empty string", key_path) return value def optional_str(data: Mapping[str, Any], key: str, path: str) -> str | None: value = data.get(key) if value is None or value == "": return None key_path = f"{path}.{key}" if path else key if not isinstance(value, str): raise ConfigError("expected string", key_path) return value def require_bool(data: Mapping[str, Any], key: str, path: str) -> bool: value = data.get(key) key_path = f"{path}.{key}" if path else key if not isinstance(value, bool): raise ConfigError("expected boolean", key_path) return value def optional_bool(data: Mapping[str, Any], key: str, path: str, *, default: bool | None = None) -> bool | None: if key not in data: return default value = data[key] key_path = f"{path}.{key}" if path else key if not isinstance(value, bool): raise ConfigError("expected boolean", key_path) return value def optional_dict(data: Mapping[str, Any], key: str, path: str) -> Dict[str, Any] | None: if key not in data or data[key] is None: return None value = data[key] key_path = f"{path}.{key}" if path else key if not isinstance(value, Mapping): raise ConfigError("expected mapping", key_path) return dict(value) def extend_path(path: str, suffix: str) -> str: if not path: return suffix if suffix.startswith("["): return f"{path}{suffix}" return f"{path}.{suffix}"
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/base.py", "license": "Apache License 2.0", "lines": 215, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:entity/configs/dynamic_base.py
"""Shared dynamic configuration classes for both node and edge level execution. This module contains the base classes used by both node-level and edge-level dynamic execution configurations to avoid circular imports. """ from dataclasses import dataclass, fields, replace from typing import Any, ClassVar, Dict, Mapping, Optional, Type, TypeVar from entity.configs.base import ( BaseConfig, ChildKey, ConfigError, ConfigFieldSpec, extend_path, optional_bool, optional_str, require_mapping, require_str, ) from entity.enum_options import enum_options_from_values def _serialize_config(config: BaseConfig) -> Dict[str, Any]: """Serialize a config to dict, excluding the path field.""" payload: Dict[str, Any] = {} for field_obj in fields(config): if field_obj.name == "path": continue payload[field_obj.name] = getattr(config, field_obj.name) return payload class SplitTypeConfig(BaseConfig): """Base helper class for split type configs.""" def display_label(self) -> str: return self.__class__.__name__ def to_external_value(self) -> Any: return _serialize_config(self) @dataclass class MessageSplitConfig(SplitTypeConfig): """Configuration for message-based splitting. Each input message becomes one execution unit. No additional configuration needed. """ FIELD_SPECS: ClassVar[Dict[str, ConfigFieldSpec]] = {} @classmethod def from_dict(cls, data: Mapping[str, Any] | None, *, path: str) -> "MessageSplitConfig": # No config needed for message split return cls(path=path) def display_label(self) -> str: return "message" _NO_MATCH_DESCRIPTIONS = { "pass": "Leave the content unchanged when no match is found.", "empty": "Return empty content when no match is found.", } @dataclass class RegexSplitConfig(SplitTypeConfig): """Configuration for regex-based splitting. Split content by regex pattern matches. Each match becomes one execution unit. Attributes: pattern: Python regular expression used to split content. group: Capture group name or index. Defaults to the entire match (group 0). case_sensitive: Whether the regex should be case sensitive. multiline: Enable multiline mode (re.MULTILINE). dotall: Enable dotall mode (re.DOTALL). on_no_match: Behavior when no match is found. """ pattern: str = "" group: str | int | None = None case_sensitive: bool = True multiline: bool = False dotall: bool = False on_no_match: str = "pass" FIELD_SPECS = { "pattern": ConfigFieldSpec( name="pattern", display_name="Regex Pattern", type_hint="str", required=True, description="Python regular expression used to split content.", ), "group": ConfigFieldSpec( name="group", display_name="Capture Group", type_hint="str", required=False, description="Capture group name or index. Defaults to the entire match (group 0).", ), "case_sensitive": ConfigFieldSpec( name="case_sensitive", display_name="Case Sensitive", type_hint="bool", required=False, default=True, description="Whether the regex should be case sensitive.", ), "multiline": ConfigFieldSpec( name="multiline", display_name="Multiline Flag", type_hint="bool", required=False, default=False, description="Enable multiline mode (re.MULTILINE).", advance=True, ), "dotall": ConfigFieldSpec( name="dotall", display_name="Dotall Flag", type_hint="bool", required=False, default=False, description="Enable dotall mode (re.DOTALL).", advance=True, ), "on_no_match": ConfigFieldSpec( name="on_no_match", display_name="No Match Behavior", type_hint="enum", required=False, default="pass", enum=["pass", "empty"], description="Behavior when no match is found.", enum_options=enum_options_from_values( list(_NO_MATCH_DESCRIPTIONS.keys()), _NO_MATCH_DESCRIPTIONS, preserve_label_case=True, ), advance=True, ), } @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "RegexSplitConfig": mapping = require_mapping(data, path) pattern = require_str(mapping, "pattern", path, allow_empty=False) group_value = mapping.get("group") group_normalized: str | int | None = None if group_value is not None: if isinstance(group_value, int): group_normalized = group_value elif isinstance(group_value, str): if group_value.isdigit(): group_normalized = int(group_value) else: group_normalized = group_value else: raise ConfigError("group must be str or int", extend_path(path, "group")) case_sensitive = optional_bool(mapping, "case_sensitive", path, default=True) multiline = optional_bool(mapping, "multiline", path, default=False) dotall = optional_bool(mapping, "dotall", path, default=False) on_no_match = optional_str(mapping, "on_no_match", path) or "pass" if on_no_match not in {"pass", "empty"}: raise ConfigError("on_no_match must be 'pass' or 'empty'", extend_path(path, "on_no_match")) return cls( pattern=pattern, group=group_normalized, case_sensitive=True if case_sensitive is None else bool(case_sensitive), multiline=bool(multiline) if multiline is not None else False, dotall=bool(dotall) if dotall is not None else False, on_no_match=on_no_match, path=path, ) def display_label(self) -> str: return f"regex({self.pattern})" @dataclass class JsonPathSplitConfig(SplitTypeConfig): """Configuration for JSON path-based splitting. Split content by extracting array items from JSON using a path expression. Each array item becomes one execution unit. Attributes: json_path: Simple dot-notation path to array (e.g., 'items', 'data.results'). """ json_path: str = "" FIELD_SPECS = { "json_path": ConfigFieldSpec( name="json_path", display_name="JSON Path", type_hint="str", required=True, description="Simple dot-notation path to array (e.g., 'items', 'data.results').", ), } @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "JsonPathSplitConfig": mapping = require_mapping(data, path) json_path_value = require_str(mapping, "json_path", path, allow_empty=True) return cls(json_path=json_path_value, path=path) def display_label(self) -> str: return f"json_path({self.json_path})" # Registry for split types _SPLIT_TYPE_REGISTRY: Dict[str, Dict[str, Any]] = { "message": { "config_cls": MessageSplitConfig, "summary": "Each input message becomes one unit", }, "regex": { "config_cls": RegexSplitConfig, "summary": "Split by regex pattern matches", }, "json_path": { "config_cls": JsonPathSplitConfig, "summary": "Split by JSON array path", }, } def get_split_type_config(name: str) -> Type[SplitTypeConfig]: """Get the config class for a split type.""" entry = _SPLIT_TYPE_REGISTRY.get(name) if not entry: raise ConfigError(f"Unknown split type: {name}", None) return entry["config_cls"] def iter_split_type_registrations() -> Dict[str, Type[SplitTypeConfig]]: """Iterate over all registered split types.""" return {name: entry["config_cls"] for name, entry in _SPLIT_TYPE_REGISTRY.items()} def iter_split_type_metadata() -> Dict[str, Dict[str, Any]]: """Iterate over split type metadata.""" return {name: {"summary": entry.get("summary")} for name, entry in _SPLIT_TYPE_REGISTRY.items()} TSplitConfig = TypeVar("TSplitConfig", bound=SplitTypeConfig) @dataclass class SplitConfig(BaseConfig): """Configuration for how to split inputs into execution units. Attributes: type: Split strategy type (message, regex, json_path) config: Type-specific configuration """ type: str = "message" config: SplitTypeConfig | None = None FIELD_SPECS = { "type": ConfigFieldSpec( name="type", display_name="Split Type", type_hint="str", required=True, default="message", description="Strategy for splitting inputs into parallel execution units", ), "config": ConfigFieldSpec( name="config", display_name="Split Config", type_hint="object", required=False, description="Type-specific split configuration", ), } @classmethod def child_routes(cls) -> Dict[ChildKey, Type[BaseConfig]]: return { ChildKey(field="config", value=name): config_cls for name, config_cls in iter_split_type_registrations().items() } @classmethod def field_specs(cls) -> Dict[str, ConfigFieldSpec]: specs = super().field_specs() type_spec = specs.get("type") if type_spec: registrations = iter_split_type_registrations() metadata = iter_split_type_metadata() type_names = list(registrations.keys()) descriptions = {name: (metadata.get(name) or {}).get("summary") for name in type_names} specs["type"] = replace( type_spec, enum=type_names, enum_options=enum_options_from_values(type_names, descriptions), ) return specs @classmethod def from_dict(cls, data: Mapping[str, Any] | None, *, path: str) -> "SplitConfig": if data is None: # Default to message split return cls(type="message", config=MessageSplitConfig(path=extend_path(path, "config")), path=path) mapping = require_mapping(data, path) split_type = optional_str(mapping, "type", path) or "message" if split_type not in _SPLIT_TYPE_REGISTRY: raise ConfigError( f"split type must be one of {list(_SPLIT_TYPE_REGISTRY.keys())}, got '{split_type}'", extend_path(path, "type"), ) config_cls = get_split_type_config(split_type) config_data = mapping.get("config") config_path = extend_path(path, "config") # For message type, config is optional if split_type == "message": config = config_cls.from_dict(config_data, path=config_path) else: if config_data is None: raise ConfigError(f"{split_type} split requires 'config' field", path) config = config_cls.from_dict(config_data, path=config_path) return cls(type=split_type, config=config, path=path) def display_label(self) -> str: if self.config: return self.config.display_label() return self.type def to_external_value(self) -> Any: return { "type": self.type, "config": self.config.to_external_value() if self.config else {}, } def as_split_config(self, expected_type: Type[TSplitConfig]) -> TSplitConfig | None: """Return the nested config if it matches the expected type.""" if isinstance(self.config, expected_type): return self.config return None # Convenience properties for backward compatibility and easy access @property def pattern(self) -> Optional[str]: """Get regex pattern if this is a regex split.""" if isinstance(self.config, RegexSplitConfig): return self.config.pattern return None @property def json_path(self) -> Optional[str]: """Get json_path if this is a json_path split.""" if isinstance(self.config, JsonPathSplitConfig): return self.config.json_path return None @dataclass class MapDynamicConfig(BaseConfig): """Configuration for Map dynamic mode (fan-out only). Map mode is similar to passthrough - minimal config required. Attributes: max_parallel: Maximum concurrent executions """ max_parallel: int = 10 FIELD_SPECS = { "max_parallel": ConfigFieldSpec( name="max_parallel", display_name="Max Parallel", type_hint="int", required=False, default=10, description="Maximum number of parallel executions", ), } @classmethod def from_dict(cls, data: Mapping[str, Any] | None, *, path: str) -> "MapDynamicConfig": if data is None: return cls(path=path) mapping = require_mapping(data, path) max_parallel = int(mapping.get("max_parallel", 10)) return cls(max_parallel=max_parallel, path=path) @dataclass class TreeDynamicConfig(BaseConfig): """Configuration for Tree dynamic mode (fan-out and reduce). Attributes: group_size: Number of items per group in reduction max_parallel: Maximum concurrent executions per layer """ group_size: int = 3 max_parallel: int = 10 FIELD_SPECS = { "group_size": ConfigFieldSpec( name="group_size", display_name="Group Size", type_hint="int", required=False, default=3, description="Number of items per group during reduction", ), "max_parallel": ConfigFieldSpec( name="max_parallel", display_name="Max Parallel", type_hint="int", required=False, default=10, description="Maximum concurrent executions per layer", ), } @classmethod def from_dict(cls, data: Mapping[str, Any] | None, *, path: str) -> "TreeDynamicConfig": if data is None: return cls(path=path) mapping = require_mapping(data, path) group_size = int(mapping.get("group_size", 3)) if group_size < 2: raise ConfigError("group_size must be at least 2", extend_path(path, "group_size")) max_parallel = int(mapping.get("max_parallel", 10)) return cls(group_size=group_size, max_parallel=max_parallel, path=path)
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/dynamic_base.py", "license": "Apache License 2.0", "lines": 368, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:entity/configs/edge/dynamic_edge_config.py
"""Dynamic edge configuration for edge-level Map and Tree execution modes.""" from dataclasses import dataclass, field, replace from typing import Any, Dict, Mapping from entity.configs.base import ( BaseConfig, ConfigError, ConfigFieldSpec, ChildKey, extend_path, require_mapping, require_str, ) from entity.configs.dynamic_base import ( SplitConfig, MapDynamicConfig, TreeDynamicConfig, ) from entity.enum_options import enum_options_from_values from utils.registry import Registry, RegistryError # Local registry for edge-level dynamic types (reuses same type names) dynamic_edge_type_registry = Registry("dynamic_edge_type") def register_dynamic_edge_type( name: str, *, config_cls: type[BaseConfig], description: str | None = None, ) -> None: metadata = {"summary": description} if description else None dynamic_edge_type_registry.register(name, target=config_cls, metadata=metadata) def get_dynamic_edge_type_config(name: str) -> type[BaseConfig]: entry = dynamic_edge_type_registry.get(name) config_cls = entry.load() if not isinstance(config_cls, type) or not issubclass(config_cls, BaseConfig): raise RegistryError(f"Entry '{name}' is not a BaseConfig subclass") return config_cls def iter_dynamic_edge_type_registrations() -> Dict[str, type[BaseConfig]]: return {name: entry.load() for name, entry in dynamic_edge_type_registry.items()} def iter_dynamic_edge_type_metadata() -> Dict[str, Dict[str, Any]]: return {name: dict(entry.metadata or {}) for name, entry in dynamic_edge_type_registry.items()} @dataclass class DynamicEdgeConfig(BaseConfig): """Dynamic configuration for edge-level Map and Tree execution modes. When configured on an edge, the target node will be dynamically expanded based on the split results. The split logic is applied to messages passing through this edge. Attributes: type: Dynamic mode type (map or tree) split: How to split the payload passing through this edge config: Mode-specific configuration (MapDynamicConfig or TreeDynamicConfig) """ type: str split: SplitConfig = field(default_factory=lambda: SplitConfig()) config: BaseConfig | None = None FIELD_SPECS = { "type": ConfigFieldSpec( name="type", display_name="Dynamic Type", type_hint="str", required=True, description="Dynamic execution mode (map or tree)", ), "split": ConfigFieldSpec( name="split", display_name="Split Strategy", type_hint="SplitConfig", required=False, description="How to split the edge payload into parallel execution units", child=SplitConfig, ), "config": ConfigFieldSpec( name="config", display_name="Dynamic Config", type_hint="object", required=False, description="Mode-specific configuration", ), } @classmethod def child_routes(cls) -> Dict[ChildKey, type[BaseConfig]]: return { ChildKey(field="config", value=name): config_cls for name, config_cls in iter_dynamic_edge_type_registrations().items() } @classmethod def field_specs(cls) -> Dict[str, ConfigFieldSpec]: specs = super().field_specs() type_spec = specs.get("type") if type_spec: registrations = iter_dynamic_edge_type_registrations() metadata = iter_dynamic_edge_type_metadata() type_names = list(registrations.keys()) descriptions = {name: (metadata.get(name) or {}).get("summary") for name in type_names} specs["type"] = replace( type_spec, enum=type_names, enum_options=enum_options_from_values(type_names, descriptions), ) return specs @classmethod def from_dict(cls, data: Mapping[str, Any] | None, *, path: str) -> "DynamicEdgeConfig | None": if data is None: return None mapping = require_mapping(data, path) dynamic_type = require_str(mapping, "type", path) try: config_cls = get_dynamic_edge_type_config(dynamic_type) except RegistryError as exc: raise ConfigError( f"dynamic type must be one of {list(iter_dynamic_edge_type_registrations().keys())}", extend_path(path, "type"), ) from exc # Parse split at top level split_data = mapping.get("split") split = SplitConfig.from_dict(split_data, path=extend_path(path, "split")) # Parse mode-specific config config_data = mapping.get("config") config_path = extend_path(path, "config") config = config_cls.from_dict(config_data, path=config_path) return cls(type=dynamic_type, split=split, config=config, path=path) def is_map(self) -> bool: return self.type == "map" def is_tree(self) -> bool: return self.type == "tree" def as_map_config(self) -> MapDynamicConfig | None: return self.config if self.is_map() and isinstance(self.config, MapDynamicConfig) else None def as_tree_config(self) -> TreeDynamicConfig | None: return self.config if self.is_tree() and isinstance(self.config, TreeDynamicConfig) else None @property def max_parallel(self) -> int: """Get max_parallel from config.""" if hasattr(self.config, "max_parallel"): return getattr(self.config, "max_parallel") return 10 @property def group_size(self) -> int: """Get group_size (tree mode only, defaults to 3).""" if isinstance(self.config, TreeDynamicConfig): return self.config.group_size return 3 # Register dynamic edge types register_dynamic_edge_type( "map", config_cls=MapDynamicConfig, description="Fan-out only: split into parallel units and collect results", ) register_dynamic_edge_type( "tree", config_cls=TreeDynamicConfig, description="Fan-out and reduce: split into units, then iteratively reduce results", )
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/edge/dynamic_edge_config.py", "license": "Apache License 2.0", "lines": 150, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:entity/configs/edge/edge.py
"""Edge configuration dataclasses.""" from dataclasses import dataclass, field from typing import Any, Dict, Mapping from entity.configs.base import ( BaseConfig, ConfigFieldSpec, require_mapping, require_str, optional_bool, extend_path, ) from .edge_condition import EdgeConditionConfig from .edge_processor import EdgeProcessorConfig from .dynamic_edge_config import DynamicEdgeConfig @dataclass class EdgeConfig(BaseConfig): source: str target: str trigger: bool = True condition: EdgeConditionConfig | None = None carry_data: bool = True keep_message: bool = False clear_context: bool = False clear_kept_context: bool = False process: EdgeProcessorConfig | None = None dynamic: DynamicEdgeConfig | None = None @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "EdgeConfig": mapping = require_mapping(data, path) source = require_str(mapping, "from", path) target = require_str(mapping, "to", path) trigger_value = optional_bool(mapping, "trigger", path, default=True) carry_data_value = optional_bool(mapping, "carry_data", path, default=True) keep_message_value = optional_bool(mapping, "keep_message", path, default=False) clear_context_value = optional_bool(mapping, "clear_context", path, default=False) clear_kept_context_value = optional_bool(mapping, "clear_kept_context", path, default=False) condition_value = mapping.get("condition", "true") condition_cfg = EdgeConditionConfig.from_dict(condition_value, path=extend_path(path, "condition")) process_cfg = None if "process" in mapping and mapping["process"] is not None: process_cfg = EdgeProcessorConfig.from_dict(mapping["process"], path=extend_path(path, "process")) dynamic_cfg = None if "dynamic" in mapping and mapping["dynamic"] is not None: dynamic_cfg = DynamicEdgeConfig.from_dict(mapping["dynamic"], path=extend_path(path, "dynamic")) return cls( source=source, target=target, trigger=bool(trigger_value) if trigger_value is not None else True, condition=condition_cfg, carry_data=bool(carry_data_value) if carry_data_value is not None else True, keep_message=bool(keep_message_value) if keep_message_value is not None else False, clear_context=bool(clear_context_value) if clear_context_value is not None else False, clear_kept_context=bool(clear_kept_context_value) if clear_kept_context_value is not None else False, process=process_cfg, dynamic=dynamic_cfg, path=path, ) FIELD_SPECS = { "from": ConfigFieldSpec( name="from", display_name="Source Node ID", type_hint="str", required=True, description="Source node ID of the edge", ), "to": ConfigFieldSpec( name="to", display_name="Target Node ID", type_hint="str", required=True, description="Target node ID of the edge", ), "trigger": ConfigFieldSpec( name="trigger", type_hint="bool", required=False, default=True, display_name="Can Trigger Successor", description="Whether this edge can trigger successor nodes", advance=True, ), "condition": ConfigFieldSpec( name="condition", type_hint="EdgeConditionConfig", required=False, display_name="Edge Condition", description="Edge condition configuration(type + config)", advance=True, child=EdgeConditionConfig, ), "carry_data": ConfigFieldSpec( name="carry_data", type_hint="bool", required=False, default=True, display_name="Pass Data to Target", description="Whether to pass data to the target node", advance=True, ), "keep_message": ConfigFieldSpec( name="keep_message", type_hint="bool", required=False, default=False, display_name="Keep Message Input", description="Whether to always keep this message input in the target node without being cleared", advance=True, ), "clear_context": ConfigFieldSpec( name="clear_context", type_hint="bool", required=False, default=False, display_name="Clear Context", description="Clear all incoming context messages without keep=True before passing new payload", advance=True, ), "clear_kept_context": ConfigFieldSpec( name="clear_kept_context", type_hint="bool", required=False, default=False, display_name="Clear Kept Context", description="Clear messages marked with keep=True before passing new payload", advance=True, ), "process": ConfigFieldSpec( name="process", type_hint="EdgeProcessorConfig", required=False, display_name="Payload Processor", description="Optional payload processor applied after the condition is met (regex extraction, custom functions, etc.)", advance=True, child=EdgeProcessorConfig, ), "dynamic": ConfigFieldSpec( name="dynamic", type_hint="DynamicEdgeConfig", required=False, display_name="Dynamic Expansion", description="Dynamic expansion configuration for edge-level Map (fan-out) or Tree (fan-out + reduce) modes. When set, the target node is dynamically expanded based on split results.", advance=True, child=DynamicEdgeConfig, ), }
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/edge/edge.py", "license": "Apache License 2.0", "lines": 145, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:entity/configs/edge/edge_condition.py
"""Edge condition configuration models.""" from dataclasses import dataclass, field, fields, replace from typing import Any, Dict, Mapping, Type, TypeVar, cast from entity.enum_options import enum_options_from_values from schema_registry import ( SchemaLookupError, get_edge_condition_schema, iter_edge_condition_schemas, ) from entity.configs.base import ( BaseConfig, ChildKey, ConfigError, ConfigFieldSpec, ensure_list, optional_bool, require_mapping, require_str, extend_path, ) from utils.function_catalog import get_function_catalog from utils.function_manager import EDGE_FUNCTION_DIR def _serialize_config(config: BaseConfig) -> Dict[str, Any]: payload: Dict[str, Any] = {} for field_obj in fields(config): if field_obj.name == "path": continue payload[field_obj.name] = getattr(config, field_obj.name) return payload class EdgeConditionTypeConfig(BaseConfig): """Base helper for condition-specific configuration classes.""" def display_label(self) -> str: return self.__class__.__name__ def to_external_value(self) -> Any: return _serialize_config(self) @dataclass class FunctionEdgeConditionConfig(EdgeConditionTypeConfig): """Configuration for function-based conditions.""" name: str = "true" FIELD_SPECS = { "name": ConfigFieldSpec( name="name", display_name="Function Name", type_hint="str", required=True, default="true", description="Function Name or 'true' (indicating perpetual satisfaction)", ) } @classmethod def from_dict(cls, data: Mapping[str, Any] | None, *, path: str) -> "FunctionEdgeConditionConfig": if data is None: return cls(name="true", path=path) mapping = require_mapping(data, path) function_name = require_str(mapping, "name", path, allow_empty=False) return cls(name=function_name, path=path) @classmethod def field_specs(cls) -> Dict[str, ConfigFieldSpec]: specs = super().field_specs() name_spec = specs.get("name") if name_spec is None: return specs catalog = get_function_catalog(EDGE_FUNCTION_DIR) names = catalog.list_function_names() metadata = catalog.list_metadata() description = name_spec.description or "Conditional function name" if catalog.load_error: description = f"{description} (Loading failed: {catalog.load_error})" elif not names: description = f"{description} (No available conditional functions found)" if "true" not in names: names.insert(0, "true") descriptions = {"true": "Default condition (always met)"} for name in names: if name == "true": continue meta = metadata.get(name) descriptions[name] = (meta.description if meta else None) or "The conditional function is not described." specs["name"] = replace( name_spec, enum=names or None, enum_options=enum_options_from_values(names, descriptions, preserve_label_case=True), description=description, ) return specs def display_label(self) -> str: return self.name or "true" def to_external_value(self) -> Any: return self.name or "true" def _normalize_keyword_list(value: Any, path: str) -> list[str]: items = ensure_list(value) normalized: list[str] = [] for idx, item in enumerate(items): if not isinstance(item, str): raise ConfigError("entries must be strings", extend_path(path, f"[{idx}]")) normalized.append(item) return normalized @dataclass class KeywordEdgeConditionConfig(EdgeConditionTypeConfig): """Configuration for declarative keyword checks.""" any_keywords: list[str] = field(default_factory=list) none_keywords: list[str] = field(default_factory=list) regex_patterns: list[str] = field(default_factory=list) case_sensitive: bool = True default: bool = False FIELD_SPECS = { "any": ConfigFieldSpec( name="any", display_name="Contains keywords", type_hint="list[str]", required=False, description="Returns True if any keyword is matched.", ), "none": ConfigFieldSpec( name="none", display_name="Exclude keywords", type_hint="list[str]", required=False, description="If any of the excluded keywords are matched, return False (highest priority).", ), "regex": ConfigFieldSpec( name="regex", display_name="Regular expressions", type_hint="list[str]", required=False, description="Returns True if any regular expression is matched.", advance=True, ), "case_sensitive": ConfigFieldSpec( name="case_sensitive", display_name="case sensitive", type_hint="bool", required=False, default=True, description="Whether to distinguish between uppercase and lowercase letters (default is true).", ), # "default": ConfigFieldSpec( # name="default", # display_name="Default Result", # type_hint="bool", # required=False, # default=False, # description="Return value when no condition matches; defaults to False", # advance=True, # ), } @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "KeywordEdgeConditionConfig": mapping = require_mapping(data, path) any_keywords = _normalize_keyword_list(mapping.get("any", []), extend_path(path, "any")) none_keywords = _normalize_keyword_list(mapping.get("none", []), extend_path(path, "none")) regex_patterns = _normalize_keyword_list(mapping.get("regex", []), extend_path(path, "regex")) case_sensitive = optional_bool(mapping, "case_sensitive", path, default=True) default_value = optional_bool(mapping, "default", path, default=False) if not (any_keywords or none_keywords or regex_patterns): raise ConfigError("keyword condition requires any/none/regex", path) return cls( any_keywords=any_keywords, none_keywords=none_keywords, regex_patterns=regex_patterns, case_sensitive=True if case_sensitive is None else bool(case_sensitive), default=False if default_value is None else bool(default_value), path=path, ) def display_label(self) -> str: return f"keyword(any={len(self.any_keywords)}, none={len(self.none_keywords)}, regex={len(self.regex_patterns)})" def to_external_value(self) -> Any: payload: Dict[str, Any] = {} if self.any_keywords: payload["any"] = list(self.any_keywords) if self.none_keywords: payload["none"] = list(self.none_keywords) if self.regex_patterns: payload["regex"] = list(self.regex_patterns) payload["case_sensitive"] = self.case_sensitive payload["default"] = self.default return payload TConditionConfig = TypeVar("TConditionConfig", bound=EdgeConditionTypeConfig) @dataclass class EdgeConditionConfig(BaseConfig): """Wrapper config that stores condition type + concrete config.""" type: str config: EdgeConditionTypeConfig FIELD_SPECS = { "type": ConfigFieldSpec( name="type", display_name="Condition Type", type_hint="str", required=True, description="Select which condition implementation to run (function, keyword, etc.) so the engine can resolve the schema.", ), "config": ConfigFieldSpec( name="config", display_name="Condition Config", type_hint="object", required=True, description="Payload interpreted by the chosen function or any/none/regex lists for keyword mode.", ), } @classmethod def _normalize_value(cls, value: Any, path: str) -> Mapping[str, Any]: if value is None: return {"type": "function", "config": {"name": "true"}} if isinstance(value, bool): if value: return {"type": "function", "config": {"name": "true"}} return {"type": "function", "config": {"name": "always_false"}} if isinstance(value, str): return {"type": "function", "config": {"name": value}} return require_mapping(value, path) @classmethod def from_dict(cls, data: Any, *, path: str) -> "EdgeConditionConfig": mapping = cls._normalize_value(data, path) condition_type = require_str(mapping, "type", path) config_payload = mapping.get("config") config_path = extend_path(path, "config") try: schema = get_edge_condition_schema(condition_type) except SchemaLookupError as exc: raise ConfigError(f"unknown condition type '{condition_type}'", extend_path(path, "type")) from exc if config_payload is None: raise ConfigError("condition config is required", config_path) condition_config = schema.config_cls.from_dict(config_payload, path=config_path) return cls(type=condition_type, config=condition_config, path=path) @classmethod def child_routes(cls) -> Dict[ChildKey, Type[BaseConfig]]: return { ChildKey(field="config", value=name): schema.config_cls for name, schema in iter_edge_condition_schemas().items() } @classmethod def field_specs(cls) -> Dict[str, ConfigFieldSpec]: specs = super().field_specs() type_spec = specs.get("type") if type_spec: registrations = iter_edge_condition_schemas() names = list(registrations.keys()) descriptions = {name: schema.summary for name, schema in registrations.items()} specs["type"] = replace( type_spec, enum=names, enum_options=enum_options_from_values(names, descriptions, preserve_label_case=True), ) return specs def display_label(self) -> str: return self.config.display_label() def to_external_value(self) -> Any: if self.type == "function": return self.config.to_external_value() return { "type": self.type, "config": self.config.to_external_value(), } def as_config(self, expected_type: Type[TConditionConfig]) -> TConditionConfig | None: config = self.config if isinstance(config, expected_type): return cast(TConditionConfig, config) return None
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/edge/edge_condition.py", "license": "Apache License 2.0", "lines": 258, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:entity/configs/edge/edge_processor.py
"""Edge payload processor configuration dataclasses.""" from dataclasses import dataclass, field, fields, replace from typing import Any, Dict, Mapping, Type, TypeVar, cast from entity.enum_options import enum_options_from_values from utils.function_catalog import get_function_catalog from utils.function_manager import EDGE_PROCESSOR_FUNCTION_DIR from schema_registry import ( SchemaLookupError, get_edge_processor_schema, iter_edge_processor_schemas, ) from entity.configs.base import ( BaseConfig, ChildKey, ConfigError, ConfigFieldSpec, ensure_list, optional_bool, optional_str, require_mapping, require_str, extend_path, ) def _serialize_config(config: BaseConfig) -> Dict[str, Any]: payload: Dict[str, Any] = {} for field_obj in fields(config): if field_obj.name == "path": continue payload[field_obj.name] = getattr(config, field_obj.name) return payload class EdgeProcessorTypeConfig(BaseConfig): """Base helper class for payload processor configs.""" def display_label(self) -> str: return self.__class__.__name__ def to_external_value(self) -> Any: return _serialize_config(self) _NO_MATCH_DESCRIPTIONS = { "pass": "Leave the payload untouched when no match is found.", "default": "Apply default_value (or empty string) if nothing matches.", "drop": "Discard the payload entirely when the regex does not match.", } @dataclass class RegexEdgeProcessorConfig(EdgeProcessorTypeConfig): """Configuration for regex-based payload extraction.""" pattern: str = "" group: str | int | None = None case_sensitive: bool = True multiline: bool = False dotall: bool = False multiple: bool = False template: str | None = None on_no_match: str = "pass" default_value: str | None = None FIELD_SPECS = { "pattern": ConfigFieldSpec( name="pattern", display_name="Regex Pattern", type_hint="str", required=True, description="Python regular expression used to extract content.", ), "group": ConfigFieldSpec( name="group", display_name="Capture Group", type_hint="str", required=False, description="Capture group name or index. Defaults to the entire match.", ), "case_sensitive": ConfigFieldSpec( name="case_sensitive", display_name="Case Sensitive", type_hint="bool", required=False, default=True, description="Whether the regex should be case sensitive.", ), "multiline": ConfigFieldSpec( name="multiline", display_name="Multiline Flag", type_hint="bool", required=False, default=False, description="Enable multiline mode (re.MULTILINE).", advance=True, ), "dotall": ConfigFieldSpec( name="dotall", display_name="Dotall Flag", type_hint="bool", required=False, default=False, description="Enable dotall mode (re.DOTALL).", advance=True, ), "multiple": ConfigFieldSpec( name="multiple", display_name="Return Multiple Matches", type_hint="bool", required=False, default=False, description="Whether to collect all matches instead of only the first.", advance=True, ), "template": ConfigFieldSpec( name="template", display_name="Output Template", type_hint="str", required=False, description="Optional template applied to the extracted value. Use '{match}' placeholder.", advance=True, ), "on_no_match": ConfigFieldSpec( name="on_no_match", display_name="No Match Behavior", type_hint="enum", required=False, default="pass", enum=["pass", "default", "drop"], description="Behavior when no match is found.", enum_options=enum_options_from_values( list(_NO_MATCH_DESCRIPTIONS.keys()), _NO_MATCH_DESCRIPTIONS, preserve_label_case=True, ), advance=True, ), "default_value": ConfigFieldSpec( name="default_value", display_name="Default Value", type_hint="str", required=False, description="Fallback content when on_no_match=default.", advance=True, ), } @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "RegexEdgeProcessorConfig": mapping = require_mapping(data, path) pattern = require_str(mapping, "pattern", path, allow_empty=False) group_value = mapping.get("group") group_normalized: str | int | None = None if group_value is not None: if isinstance(group_value, int): group_normalized = group_value elif isinstance(group_value, str): if group_value.isdigit(): group_normalized = int(group_value) else: group_normalized = group_value else: raise ConfigError("group must be str or int", extend_path(path, "group")) multiple = optional_bool(mapping, "multiple", path, default=False) case_sensitive = optional_bool(mapping, "case_sensitive", path, default=True) multiline = optional_bool(mapping, "multiline", path, default=False) dotall = optional_bool(mapping, "dotall", path, default=False) on_no_match = optional_str(mapping, "on_no_match", path) or "pass" if on_no_match not in {"pass", "default", "drop"}: raise ConfigError("on_no_match must be pass, default or drop", extend_path(path, "on_no_match")) template = optional_str(mapping, "template", path) default_value = optional_str(mapping, "default_value", path) return cls( pattern=pattern, group=group_normalized, case_sensitive=True if case_sensitive is None else bool(case_sensitive), multiline=bool(multiline) if multiline is not None else False, dotall=bool(dotall) if dotall is not None else False, multiple=bool(multiple) if multiple is not None else False, template=template, on_no_match=on_no_match, default_value=default_value, path=path, ) def display_label(self) -> str: return f"regex({self.pattern})" @dataclass class FunctionEdgeProcessorConfig(EdgeProcessorTypeConfig): """Configuration for function-based payload processors.""" name: str = "" FIELD_SPECS = { "name": ConfigFieldSpec( name="name", display_name="Function Name", type_hint="str", required=True, description="Name of the Python function located in functions/edge_processor.", ) } @classmethod def field_specs(cls) -> Dict[str, ConfigFieldSpec]: specs = super().field_specs() name_spec = specs.get("name") if not name_spec: return specs catalog = get_function_catalog(EDGE_PROCESSOR_FUNCTION_DIR) names = catalog.list_function_names() metadata = catalog.list_metadata() description = name_spec.description or "Processor function name" if catalog.load_error: description = f"{description} (Loading failed: {catalog.load_error})" elif not names: description = f"{description} (No processor functions found in functions/edge_processor)" descriptions = {} for name in names: meta = metadata.get(name) descriptions[name] = (meta.description if meta else None) or "No description provided." specs["name"] = replace( name_spec, enum=names or None, enum_options=enum_options_from_values(names, descriptions, preserve_label_case=True) if names else None, description=description, ) return specs @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "FunctionEdgeProcessorConfig": mapping = require_mapping(data, path) name = require_str(mapping, "name", path, allow_empty=False) return cls(name=name, path=path) def display_label(self) -> str: return self.name or "function" def to_external_value(self) -> Any: return {"name": self.name} TProcessorConfig = TypeVar("TProcessorConfig", bound=EdgeProcessorTypeConfig) @dataclass class EdgeProcessorConfig(BaseConfig): """Wrapper config storing processor type and payload.""" type: str config: EdgeProcessorTypeConfig FIELD_SPECS = { "type": ConfigFieldSpec( name="type", display_name="Processor Type", type_hint="str", required=True, description="Select which processor implementation to use (regex_extract, function, etc.).", ), "config": ConfigFieldSpec( name="config", display_name="Processor Config", type_hint="object", required=True, description="Payload interpreted by the selected processor.", ), } @classmethod def from_dict(cls, data: Any, *, path: str) -> "EdgeProcessorConfig": if data is None: raise ConfigError("processor configuration cannot be null", path) mapping = require_mapping(data, path) processor_type = require_str(mapping, "type", path) config_payload = mapping.get("config") if config_payload is None: raise ConfigError("processor config is required", extend_path(path, "config")) try: schema = get_edge_processor_schema(processor_type) except SchemaLookupError as exc: raise ConfigError(f"unknown processor type '{processor_type}'", extend_path(path, "type")) from exc processor_config = schema.config_cls.from_dict(config_payload, path=extend_path(path, "config")) return cls(type=processor_type, config=processor_config, path=path) @classmethod def child_routes(cls) -> Dict[ChildKey, Type[BaseConfig]]: return { ChildKey(field="config", value=name): schema.config_cls for name, schema in iter_edge_processor_schemas().items() } @classmethod def field_specs(cls) -> Dict[str, ConfigFieldSpec]: specs = super().field_specs() type_spec = specs.get("type") if type_spec: registrations = iter_edge_processor_schemas() names = list(registrations.keys()) descriptions = {name: schema.summary for name, schema in registrations.items()} specs["type"] = replace( type_spec, enum=names, enum_options=enum_options_from_values(names, descriptions, preserve_label_case=True), ) return specs def display_label(self) -> str: return self.config.display_label() def to_external_value(self) -> Any: return { "type": self.type, "config": self.config.to_external_value(), } def as_config(self, expected_type: Type[TProcessorConfig]) -> TProcessorConfig | None: config = self.config if isinstance(config, expected_type): return cast(TProcessorConfig, config) return None
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/edge/edge_processor.py", "license": "Apache License 2.0", "lines": 290, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:entity/configs/graph.py
"""Graph-level configuration dataclasses.""" from dataclasses import dataclass, field from collections import Counter from typing import Any, Dict, List, Mapping from entity.enums import LogLevel from entity.enum_options import enum_options_for from .base import ( BaseConfig, ConfigError, ConfigFieldSpec, ensure_list, optional_bool, optional_dict, optional_str, require_mapping, extend_path, ) from .edge import EdgeConfig from entity.configs.node.memory import MemoryStoreConfig from entity.configs.node.agent import AgentConfig from entity.configs.node.node import Node @dataclass class GraphDefinition(BaseConfig): id: str | None description: str | None log_level: LogLevel is_majority_voting: bool nodes: List[Node] = field(default_factory=list) edges: List[EdgeConfig] = field(default_factory=list) memory: List[MemoryStoreConfig] | None = None organization: str | None = None initial_instruction: str | None = None start_nodes: List[str] = field(default_factory=list) end_nodes: List[str] | None = None FIELD_SPECS = { "id": ConfigFieldSpec( name="id", display_name="Graph ID", type_hint="str", required=True, description="Graph identifier for referencing. Can only contain alphanumeric characters, underscores or hyphens, no spaces", ), "description": ConfigFieldSpec( name="description", display_name="Graph Description", type_hint="text", required=False, description="Human-readable narrative shown in UI/templates that explains the workflow goal, scope, and manual touchpoints.", ), "log_level": ConfigFieldSpec( name="log_level", display_name="Log Level", type_hint="enum:LogLevel", required=False, default=LogLevel.DEBUG.value, enum=[lvl.value for lvl in LogLevel], description="Runtime log level", advance=True, enum_options=enum_options_for(LogLevel), ), "is_majority_voting": ConfigFieldSpec( name="is_majority_voting", display_name="Majority Voting Mode", type_hint="bool", required=False, default=False, description="Whether this is a majority voting graph", advance=True, ), "nodes": ConfigFieldSpec( name="nodes", display_name="Node List", type_hint="list[Node]", required=False, description="Node list, must contain at least one node", child=Node, ), "edges": ConfigFieldSpec( name="edges", display_name="Edge List", type_hint="list[EdgeConfig]", required=False, description="Directed edges between nodes", child=EdgeConfig, ), "memory": ConfigFieldSpec( name="memory", display_name="Memory Stores", type_hint="list[MemoryStoreConfig]", required=False, description="Optional list of memory stores that nodes can reference through their model.memories attachments.", child=MemoryStoreConfig, ), # "organization": ConfigFieldSpec( # name="organization", # display_name="Organization Name", # type_hint="str", # required=False, # description="Organization name", # ), "initial_instruction": ConfigFieldSpec( name="initial_instruction", display_name="Initial Instruction", type_hint="text", required=False, description="Graph level initial instruction (for user)", ), "start": ConfigFieldSpec( name="start", display_name="Start Node", type_hint="list[str]", required=False, description="Start node ID list (entry list executed at workflow start; not recommended to edit manually)", advance=True, ), "end": ConfigFieldSpec( name="end", display_name="End Node", type_hint="list[str]", required=False, description="End node ID list (used to collect final graph output, not part of execution logic). Commonly needed in subgraphs. This is an ordered list: earlier nodes are checked first; the first with output becomes the graph output, otherwise continue down the list.", advance=True, ), } # CONSTRAINTS = ( # RuntimeConstraint( # when={"memory": "*"}, # require=["memory"], # message="After defining memory, at least one store must be declared", # ), # ) @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "GraphDefinition": mapping = require_mapping(data, path) graph_id = optional_str(mapping, "id", path) description = optional_str(mapping, "description", path) if "vars" in mapping and mapping["vars"]: raise ConfigError("vars are only supported at DesignConfig root", extend_path(path, "vars")) log_level_raw = mapping.get("log_level", LogLevel.DEBUG.value) try: log_level = LogLevel(log_level_raw) except ValueError as exc: raise ConfigError( f"log_level must be one of {[lvl.value for lvl in LogLevel]}", extend_path(path, "log_level") ) from exc is_majority = optional_bool(mapping, "is_majority_voting", path, default=False) organization = optional_str(mapping, "organization", path) initial_instruction = optional_str(mapping, "initial_instruction", path) nodes_raw = ensure_list(mapping.get("nodes")) # if not nodes_raw: # raise ConfigError("graph must define at least one node", extend_path(path, "nodes")) nodes: List[Node] = [] for idx, node_dict in enumerate(nodes_raw): nodes.append(Node.from_dict(node_dict, path=extend_path(path, f"nodes[{idx}]"))) edges_raw = ensure_list(mapping.get("edges")) edges: List[EdgeConfig] = [] for idx, edge_dict in enumerate(edges_raw): edges.append(EdgeConfig.from_dict(edge_dict, path=extend_path(path, f"edges[{idx}]"))) memory_cfg: List[MemoryStoreConfig] | None = None if "memory" in mapping and mapping["memory"] is not None: raw_stores = ensure_list(mapping.get("memory")) stores: List[MemoryStoreConfig] = [] seen: set[str] = set() for idx, item in enumerate(raw_stores): store = MemoryStoreConfig.from_dict(item, path=extend_path(path, f"memory[{idx}]")) if store.name in seen: raise ConfigError( f"duplicated memory store name '{store.name}'", extend_path(path, f"memory[{idx}].name"), ) seen.add(store.name) stores.append(store) memory_cfg = stores start_nodes: List[str] = [] if "start" in mapping and mapping["start"] is not None: start_value = mapping["start"] if isinstance(start_value, str): start_nodes = [start_value] elif isinstance(start_value, list) and all(isinstance(item, str) for item in start_value): seen = set() start_nodes = [] for item in start_value: if item not in seen: seen.add(item) start_nodes.append(item) else: raise ConfigError("start must be a string or list of strings if provided", extend_path(path, "start")) end_nodes = None if "end" in mapping and mapping["end"] is not None: end_value = mapping["end"] if isinstance(end_value, str): end_nodes = [end_value] elif isinstance(end_value, list) and all(isinstance(item, str) for item in end_value): end_nodes = list(end_value) else: raise ConfigError("end must be a string or list of strings", extend_path(path, "end")) definition = cls( id=graph_id, description=description, log_level=log_level, is_majority_voting=bool(is_majority) if is_majority is not None else False, nodes=nodes, edges=edges, memory=memory_cfg, organization=organization, initial_instruction=initial_instruction, start_nodes=start_nodes, end_nodes=end_nodes, path=path, ) definition.validate() return definition def validate(self) -> None: node_ids = [node.id for node in self.nodes] counts = Counter(node_ids) duplicates = [nid for nid, count in counts.items() if count > 1] if duplicates: dup_list = ", ".join(sorted(duplicates)) raise ConfigError(f"duplicate node ids detected: {dup_list}", extend_path(self.path, "nodes")) node_set = set(node_ids) for start_node in self.start_nodes: if start_node not in node_set: raise ConfigError( f"start node '{start_node}' not defined in nodes", extend_path(self.path, "start"), ) for edge in self.edges: if edge.source not in node_set: raise ConfigError( f"edge references unknown source node '{edge.source}'", extend_path(self.path, f"edges->{edge.source}->{edge.target}"), ) if edge.target not in node_set: raise ConfigError( f"edge references unknown target node '{edge.target}'", extend_path(self.path, f"edges->{edge.source}->{edge.target}"), ) store_names = {store.name for store in self.memory} if self.memory else set() for node in self.nodes: model = node.as_config(AgentConfig) if model: for attachment in model.memories: if attachment.name not in store_names: raise ConfigError( f"memory reference '{attachment.name}' not defined in graph.memory", attachment.path or extend_path(node.path, "config.memories"), ) @dataclass class DesignConfig(BaseConfig): version: str vars: Dict[str, Any] graph: GraphDefinition FIELD_SPECS = { "version": ConfigFieldSpec( name="version", display_name="Configuration Version", type_hint="str", required=False, default="0.0.0", description="Configuration version number", advance=True, ), "vars": ConfigFieldSpec( name="vars", display_name="Global Variables", type_hint="dict[str, Any]", required=False, default={}, description="Global variables that can be referenced via ${VAR}", ), "graph": ConfigFieldSpec( name="graph", display_name="Graph Definition", type_hint="GraphDefinition", required=True, description="Core graph definition", child=GraphDefinition, ), } @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str = "root") -> "DesignConfig": mapping = require_mapping(data, path) version = optional_str(mapping, "version", path) or "0.0.0" vars_block = optional_dict(mapping, "vars", path) or {} if "graph" not in mapping or mapping["graph"] is None: raise ConfigError("graph section is required", extend_path(path, "graph")) graph = GraphDefinition.from_dict(mapping["graph"], path=extend_path(path, "graph")) return cls(version=version, vars=vars_block, graph=graph, path=path)
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/graph.py", "license": "Apache License 2.0", "lines": 288, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:entity/configs/node/agent.py
"""Agent-specific configuration dataclasses.""" from dataclasses import dataclass, field, replace from typing import Any, Dict, Iterable, List, Mapping, Sequence try: # pragma: no cover - Python < 3.11 lacks BaseExceptionGroup from builtins import BaseExceptionGroup as _BASE_EXCEPTION_GROUP_TYPE # type: ignore[attr-defined] except ImportError: # pragma: no cover _BASE_EXCEPTION_GROUP_TYPE = None # type: ignore[assignment] from entity.enums import AgentInputMode from schema_registry import iter_model_provider_schemas from utils.strs import titleize from entity.configs.base import ( BaseConfig, ConfigError, ConfigFieldSpec, EnumOption, optional_bool, optional_dict, optional_str, require_mapping, require_str, extend_path, ) from .memory import MemoryAttachmentConfig from .thinking import ThinkingConfig from entity.configs.node.tooling import ToolingConfig DEFAULT_RETRYABLE_STATUS_CODES = [408, 409, 425, 429, 500, 502, 503, 504] DEFAULT_RETRYABLE_EXCEPTION_TYPES = [ "RateLimitError", "APITimeoutError", "APIError", "APIConnectionError", "ServiceUnavailableError", "TimeoutError", "InternalServerError", "RemoteProtocolError", "TransportError", "ConnectError", "ConnectTimeout", "ReadError", "ReadTimeout", ] DEFAULT_RETRYABLE_MESSAGE_SUBSTRINGS = [ "rate limit", "temporarily unavailable", "timeout", "server disconnected", "connection reset", ] def _coerce_float(value: Any, *, field_path: str, minimum: float = 0.0) -> float: if isinstance(value, (int, float)): coerced = float(value) else: raise ConfigError("expected number", field_path) if coerced < minimum: raise ConfigError(f"value must be >= {minimum}", field_path) return coerced def _coerce_positive_int(value: Any, *, field_path: str, minimum: int = 1) -> int: if isinstance(value, bool): raise ConfigError("expected integer", field_path) if isinstance(value, int): coerced = value else: raise ConfigError("expected integer", field_path) if coerced < minimum: raise ConfigError(f"value must be >= {minimum}", field_path) return coerced def _coerce_str_list(value: Any, *, field_path: str) -> List[str]: if value is None: return [] if not isinstance(value, Sequence) or isinstance(value, (str, bytes)): raise ConfigError("expected list of strings", field_path) result: List[str] = [] for idx, item in enumerate(value): if not isinstance(item, str): raise ConfigError("expected list of strings", f"{field_path}[{idx}]") result.append(item.strip()) return result def _coerce_int_list(value: Any, *, field_path: str) -> List[int]: if value is None: return [] if not isinstance(value, Sequence) or isinstance(value, (str, bytes)): raise ConfigError("expected list of integers", field_path) ints: List[int] = [] for idx, item in enumerate(value): if isinstance(item, bool) or not isinstance(item, int): raise ConfigError("expected list of integers", f"{field_path}[{idx}]") ints.append(item) return ints @dataclass class AgentRetryConfig(BaseConfig): enabled: bool = True max_attempts: int = 5 min_wait_seconds: float = 1.0 max_wait_seconds: float = 6.0 retry_on_status_codes: List[int] = field(default_factory=lambda: list(DEFAULT_RETRYABLE_STATUS_CODES)) retry_on_exception_types: List[str] = field(default_factory=lambda: [name.lower() for name in DEFAULT_RETRYABLE_EXCEPTION_TYPES]) non_retry_exception_types: List[str] = field(default_factory=list) retry_on_error_substrings: List[str] = field(default_factory=lambda: list(DEFAULT_RETRYABLE_MESSAGE_SUBSTRINGS)) FIELD_SPECS = { "enabled": ConfigFieldSpec( name="enabled", display_name="Enable Retry", type_hint="bool", required=False, default=True, description="Toggle automatic retry for provider calls", ), "max_attempts": ConfigFieldSpec( name="max_attempts", display_name="Max Attempts", type_hint="int", required=False, default=5, description="Maximum number of total attempts (initial call + retries)", ), "min_wait_seconds": ConfigFieldSpec( name="min_wait_seconds", display_name="Min Wait Seconds", type_hint="float", required=False, default=1.0, description="Minimum backoff wait before retry", advance=True, ), "max_wait_seconds": ConfigFieldSpec( name="max_wait_seconds", display_name="Max Wait Seconds", type_hint="float", required=False, default=6.0, description="Maximum backoff wait before retry", advance=True, ), "retry_on_status_codes": ConfigFieldSpec( name="retry_on_status_codes", display_name="Retryable Status Codes", type_hint="list[int]", required=False, description="HTTP status codes that should trigger a retry", advance=True, ), "retry_on_exception_types": ConfigFieldSpec( name="retry_on_exception_types", display_name="Retryable Exception Types", type_hint="list[str]", required=False, description="Exception class names (case-insensitive) that should trigger retries", advance=True, ), "non_retry_exception_types": ConfigFieldSpec( name="non_retry_exception_types", display_name="Non-Retryable Exception Types", type_hint="list[str]", required=False, description="Exception class names (case-insensitive) that should never retry", advance=True, ), "retry_on_error_substrings": ConfigFieldSpec( name="retry_on_error_substrings", display_name="Retryable Message Substrings", type_hint="list[str]", required=False, description="Substring matches within exception messages that enable retry", advance=True, ), } @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "AgentRetryConfig": mapping = require_mapping(data, path) enabled = optional_bool(mapping, "enabled", path, default=True) if enabled is None: enabled = True max_attempts = _coerce_positive_int(mapping.get("max_attempts", 5), field_path=extend_path(path, "max_attempts")) min_wait = _coerce_float(mapping.get("min_wait_seconds", 1.0), field_path=extend_path(path, "min_wait_seconds"), minimum=0.0) max_wait = _coerce_float(mapping.get("max_wait_seconds", 6.0), field_path=extend_path(path, "max_wait_seconds"), minimum=0.0) if max_wait < min_wait: raise ConfigError("max_wait_seconds must be >= min_wait_seconds", extend_path(path, "max_wait_seconds")) status_codes = mapping.get("retry_on_status_codes") if status_codes is None: retry_status_codes = list(DEFAULT_RETRYABLE_STATUS_CODES) else: retry_status_codes = _coerce_int_list(status_codes, field_path=extend_path(path, "retry_on_status_codes")) retry_types_raw = mapping.get("retry_on_exception_types") if retry_types_raw is None: retry_types = [name.lower() for name in DEFAULT_RETRYABLE_EXCEPTION_TYPES] else: retry_types = [value.lower() for value in _coerce_str_list(retry_types_raw, field_path=extend_path(path, "retry_on_exception_types")) if value] non_retry_types = [value.lower() for value in _coerce_str_list(mapping.get("non_retry_exception_types"), field_path=extend_path(path, "non_retry_exception_types")) if value] retry_substrings_raw = mapping.get("retry_on_error_substrings") if retry_substrings_raw is None: retry_substrings = list(DEFAULT_RETRYABLE_MESSAGE_SUBSTRINGS) else: retry_substrings = [ value.lower() for value in _coerce_str_list( retry_substrings_raw, field_path=extend_path(path, "retry_on_error_substrings"), ) if value ] return cls( enabled=enabled, max_attempts=max_attempts, min_wait_seconds=min_wait, max_wait_seconds=max_wait, retry_on_status_codes=retry_status_codes, retry_on_exception_types=retry_types, non_retry_exception_types=non_retry_types, retry_on_error_substrings=retry_substrings, path=path, ) @property def is_active(self) -> bool: return self.enabled and self.max_attempts > 1 def should_retry(self, exc: BaseException) -> bool: if not self.is_active: return False chain: List[tuple[BaseException, set[str], int | None, str]] = [] for error in self._iter_exception_chain(exc): chain.append( ( error, self._exception_name_set(error), self._extract_status_code(error), str(error).lower(), ) ) if self.non_retry_exception_types: for _, names, _, _ in chain: if any(name in names for name in self.non_retry_exception_types): return False if self.retry_on_exception_types: for _, names, _, _ in chain: if any(name in names for name in self.retry_on_exception_types): return True if self.retry_on_status_codes: for _, _, status_code, _ in chain: if status_code is not None and status_code in self.retry_on_status_codes: return True if self.retry_on_error_substrings: for _, _, _, message in chain: if message and any(substr in message for substr in self.retry_on_error_substrings): return True return False def _exception_name_set(self, exc: BaseException) -> set[str]: names: set[str] = set() for cls in exc.__class__.mro(): names.add(cls.__name__.lower()) names.add(f"{cls.__module__}.{cls.__name__}".lower()) return names def _extract_status_code(self, exc: BaseException) -> int | None: for attr in ("status_code", "http_status", "status", "statusCode"): value = getattr(exc, attr, None) if isinstance(value, int): return value response = getattr(exc, "response", None) if response is not None: for attr in ("status_code", "status", "statusCode"): value = getattr(response, attr, None) if isinstance(value, int): return value return None def _iter_exception_chain(self, exc: BaseException) -> Iterable[BaseException]: seen: set[int] = set() stack: List[BaseException] = [exc] while stack: current = stack.pop() if id(current) in seen: continue seen.add(id(current)) yield current linked: List[BaseException] = [] cause = getattr(current, "__cause__", None) context = getattr(current, "__context__", None) if isinstance(cause, BaseException): linked.append(cause) if isinstance(context, BaseException): linked.append(context) if _BASE_EXCEPTION_GROUP_TYPE is not None and isinstance(current, _BASE_EXCEPTION_GROUP_TYPE): for exc_item in getattr(current, "exceptions", None) or (): if isinstance(exc_item, BaseException): linked.append(exc_item) stack.extend(linked) @dataclass class AgentConfig(BaseConfig): provider: str base_url: str name: str role: str | None = None api_key: str | None = None params: Dict[str, Any] = field(default_factory=dict) retry: AgentRetryConfig | None = None input_mode: AgentInputMode = AgentInputMode.MESSAGES tooling: List[ToolingConfig] = field(default_factory=list) thinking: ThinkingConfig | None = None memories: List[MemoryAttachmentConfig] = field(default_factory=list) # Runtime attributes (attached dynamically) token_tracker: Any | None = field(default=None, init=False, repr=False) node_id: str | None = field(default=None, init=False, repr=False) @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "AgentConfig": mapping = require_mapping(data, path) provider = require_str(mapping, "provider", path) base_url = optional_str(mapping, "base_url", path) name_value = mapping.get("name") if isinstance(name_value, str) and name_value.strip(): model_name = name_value.strip() else: raise ConfigError("model.name must be a non-empty string", extend_path(path, "name")) role = optional_str(mapping, "role", path) api_key = optional_str(mapping, "api_key", path) params = optional_dict(mapping, "params", path) or {} raw_input_mode = optional_str(mapping, "input_mode", path) input_mode = AgentInputMode.MESSAGES if raw_input_mode: try: input_mode = AgentInputMode(raw_input_mode.strip().lower()) except ValueError as exc: raise ConfigError( "model.input_mode must be 'prompt' or 'messages'", extend_path(path, "input_mode"), ) from exc tooling_cfg: List[ToolingConfig] = [] if "tooling" in mapping and mapping["tooling"] is not None: raw_tooling = mapping["tooling"] if not isinstance(raw_tooling, list): raise ConfigError("tooling must be a list", extend_path(path, "tooling")) for idx, item in enumerate(raw_tooling): tooling_cfg.append( ToolingConfig.from_dict(item, path=extend_path(path, f"tooling[{idx}]")) ) thinking_cfg = None if "thinking" in mapping and mapping["thinking"] is not None: thinking_cfg = ThinkingConfig.from_dict(mapping["thinking"], path=extend_path(path, "thinking")) memories_cfg: List[MemoryAttachmentConfig] = [] if "memories" in mapping and mapping["memories"] is not None: raw_memories = mapping["memories"] if not isinstance(raw_memories, list): raise ConfigError("memories must be a list", extend_path(path, "memories")) for idx, item in enumerate(raw_memories): memories_cfg.append( MemoryAttachmentConfig.from_dict(item, path=extend_path(path, f"memories[{idx}]")) ) retry_cfg = None if "retry" in mapping and mapping["retry"] is not None: retry_cfg = AgentRetryConfig.from_dict(mapping["retry"], path=extend_path(path, "retry")) return cls( provider=provider, base_url=base_url, name=model_name, role=role, api_key=api_key, params=params, tooling=tooling_cfg, thinking=thinking_cfg, memories=memories_cfg, retry=retry_cfg, input_mode=input_mode, path=path, ) FIELD_SPECS = { "name": ConfigFieldSpec( name="name", display_name="Model Name", type_hint="str", required=True, description="Specific model name e.g. gpt-4o", ), "role": ConfigFieldSpec( name="role", display_name="System Prompt", type_hint="text", required=False, description="Model system prompt", ), "provider": ConfigFieldSpec( name="provider", display_name="Model Provider", type_hint="str", required=True, description="Name of a registered provider (openai, gemini, etc.) that selects the underlying client adapter.", default="openai", ), "base_url": ConfigFieldSpec( name="base_url", display_name="Base URL", type_hint="str", required=False, description="Override the provider's default endpoint; leave empty to use the built-in base URL.", advance=True, default="${BASE_URL}", ), "api_key": ConfigFieldSpec( name="api_key", display_name="API Key", type_hint="str", required=False, description="Credential consumed by the provider client; reference an env var such as ${API_KEY} that matches the selected provider.", advance=True, default="${API_KEY}", ), "params": ConfigFieldSpec( name="params", display_name="Call Parameters", type_hint="dict[str, Any]", required=False, default={}, description="Call parameters (temperature, top_p, etc.)", advance=True, ), # "input_mode": ConfigFieldSpec( # currently, many features depend on messages mode, so hide this and force messages # name="input_mode", # display_name="Input Mode", # type_hint="enum:AgentInputMode", # required=False, # default=AgentInputMode.MESSAGES.value, # description="Model input mode: messages (default) or prompt", # enum=[item.value for item in AgentInputMode], # advance=True, # enum_options=enum_options_for(AgentInputMode), # ), "tooling": ConfigFieldSpec( name="tooling", display_name="Tool Configuration", type_hint="list[ToolingConfig]", required=False, description="Bound tool configuration list", child=ToolingConfig, advance=True, ), "thinking": ConfigFieldSpec( name="thinking", display_name="Thinking Configuration", type_hint="ThinkingConfig", required=False, description="Thinking process configuration", child=ThinkingConfig, advance=True, ), "memories": ConfigFieldSpec( name="memories", display_name="Memory Attachments", type_hint="list[MemoryAttachmentConfig]", required=False, description="Associated memory references", child=MemoryAttachmentConfig, advance=True, ), "retry": ConfigFieldSpec( name="retry", display_name="Retry Policy", type_hint="AgentRetryConfig", required=False, description="Automatic retry policy for this model", child=AgentRetryConfig, advance=True, ), } @classmethod def field_specs(cls) -> Dict[str, ConfigFieldSpec]: specs = super().field_specs() provider_spec = specs.get("provider") if provider_spec: enum_spec = cls._apply_provider_enum(provider_spec) specs["provider"] = enum_spec return specs @staticmethod def _apply_provider_enum(provider_spec: ConfigFieldSpec) -> ConfigFieldSpec: provider_names, metadata = AgentConfig._provider_registry_snapshot() if not provider_names: return provider_spec enum_options: List[EnumOption] = [] for name in provider_names: meta = metadata.get(name) or {} label = meta.get("label") or titleize(name) enum_options.append( EnumOption( value=name, label=label, description=meta.get("summary"), ) ) default_value = provider_spec.default if not default_value or default_value not in provider_names: default_value = AgentConfig._preferred_provider_default(provider_names) return replace( provider_spec, enum=provider_names, enum_options=enum_options, default=default_value, ) @staticmethod def _preferred_provider_default(provider_names: List[str]) -> str: if "openai" in provider_names: return "openai" return provider_names[0] @staticmethod def _provider_registry_snapshot() -> tuple[List[str], Dict[str, Dict[str, Any]]]: specs = iter_model_provider_schemas() names = list(specs.keys()) metadata: Dict[str, Dict[str, Any]] = {} for name, spec in specs.items(): metadata[name] = { "label": spec.label, "summary": spec.summary, **(spec.metadata or {}), } return names, metadata
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/node/agent.py", "license": "Apache License 2.0", "lines": 508, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:entity/configs/node/human.py
"""Human node configuration.""" from dataclasses import dataclass from typing import Any, Mapping from entity.configs.base import BaseConfig, ConfigFieldSpec, optional_str, require_mapping @dataclass class HumanConfig(BaseConfig): description: str | None = None @classmethod def from_dict(cls, data: Mapping[str, Any] | None, *, path: str) -> "HumanConfig": if data is None: return cls(description=None, path=path) mapping = require_mapping(data, path) description = optional_str(mapping, "description", path) return cls(description=description, path=path) FIELD_SPECS = { "description": ConfigFieldSpec( name="description", display_name="Human Task Description", type_hint="text", required=False, description="Description of the task for human to complete", ) }
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/node/human.py", "license": "Apache License 2.0", "lines": 23, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:entity/configs/node/literal.py
"""Configuration for literal nodes.""" from dataclasses import dataclass from typing import Mapping, Any from entity.configs.base import ( BaseConfig, ConfigError, ConfigFieldSpec, EnumOption, optional_str, require_mapping, require_str, ) from entity.messages import MessageRole @dataclass class LiteralNodeConfig(BaseConfig): """Config describing the literal payload emitted by the node.""" content: str = "" role: MessageRole = MessageRole.USER @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "LiteralNodeConfig": mapping = require_mapping(data, path) content = require_str(mapping, "content", path) if not content: raise ConfigError("content cannot be empty", f"{path}.content") role_value = optional_str(mapping, "role", path) role = MessageRole.USER if role_value: normalized = role_value.strip().lower() if normalized not in (MessageRole.USER.value, MessageRole.ASSISTANT.value): raise ConfigError("role must be 'user' or 'assistant'", f"{path}.role") role = MessageRole(normalized) return cls(content=content, role=role, path=path) def validate(self) -> None: if not self.content: raise ConfigError("content cannot be empty", f"{self.path}.content") if self.role not in (MessageRole.USER, MessageRole.ASSISTANT): raise ConfigError("role must be 'user' or 'assistant'", f"{self.path}.role") FIELD_SPECS = { "content": ConfigFieldSpec( name="content", display_name="Literal Content", type_hint="text", required=True, description="Plain text emitted whenever the node executes.", ), "role": ConfigFieldSpec( name="role", display_name="Message Role", type_hint="str", required=False, default=MessageRole.USER.value, enum=[MessageRole.USER.value, MessageRole.ASSISTANT.value], enum_options=[ EnumOption(value=MessageRole.USER.value, label="user"), EnumOption(value=MessageRole.ASSISTANT.value, label="assistant"), ], description="Select whether the literal message should appear as a user or assistant entry.", ), }
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/node/literal.py", "license": "Apache License 2.0", "lines": 59, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:entity/configs/node/loop_counter.py
"""Configuration for loop counter guard nodes.""" from dataclasses import dataclass from typing import Mapping, Any, Optional from entity.configs.base import ( BaseConfig, ConfigError, ConfigFieldSpec, require_mapping, extend_path, optional_str, ) @dataclass class LoopCounterConfig(BaseConfig): """Configuration schema for the loop counter node type.""" max_iterations: int = 10 reset_on_emit: bool = True message: Optional[str] = None @classmethod def from_dict(cls, data: Mapping[str, Any] | None, *, path: str) -> "LoopCounterConfig": mapping = require_mapping(data or {}, path) max_iterations_raw = mapping.get("max_iterations", 10) try: max_iterations = int(max_iterations_raw) except (TypeError, ValueError) as exc: # pragma: no cover - defensive raise ConfigError( "max_iterations must be an integer", extend_path(path, "max_iterations"), ) from exc if max_iterations < 1: raise ConfigError("max_iterations must be >= 1", extend_path(path, "max_iterations")) reset_on_emit = bool(mapping.get("reset_on_emit", True)) message = optional_str(mapping, "message", path) return cls( max_iterations=max_iterations, reset_on_emit=reset_on_emit, message=message, path=path, ) def validate(self) -> None: if self.max_iterations < 1: raise ConfigError("max_iterations must be >= 1", extend_path(self.path, "max_iterations")) FIELD_SPECS = { "max_iterations": ConfigFieldSpec( name="max_iterations", display_name="Maximum Iterations", type_hint="int", required=True, default=10, description="How many times the loop can run before this node emits an output.", ), "reset_on_emit": ConfigFieldSpec( name="reset_on_emit", display_name="Reset After Emit", type_hint="bool", required=False, default=True, description="Whether to reset the internal counter after reaching the limit.", advance=True, ), "message": ConfigFieldSpec( name="message", display_name="Release Message", type_hint="text", required=False, description="Optional text sent downstream once the iteration cap is reached.", advance=True, ), }
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/node/loop_counter.py", "license": "Apache License 2.0", "lines": 68, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:entity/configs/node/memory.py
"""Memory-related configuration dataclasses.""" from dataclasses import dataclass, field, replace from typing import Any, Dict, List, Mapping from entity.enums import AgentExecFlowStage from entity.enum_options import enum_options_for, enum_options_from_values from schema_registry import ( SchemaLookupError, get_memory_store_schema, iter_memory_store_schemas, ) from entity.configs.base import ( BaseConfig, ConfigError, ConfigFieldSpec, ChildKey, ensure_list, optional_dict, optional_str, require_mapping, require_str, extend_path, ) @dataclass class EmbeddingConfig(BaseConfig): provider: str model: str api_key: str | None = None base_url: str | None = None params: Dict[str, Any] = field(default_factory=dict) @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "EmbeddingConfig": mapping = require_mapping(data, path) provider = require_str(mapping, "provider", path) model = require_str(mapping, "model", path) api_key = optional_str(mapping, "api_key", path) base_url = optional_str(mapping, "base_url", path) params = optional_dict(mapping, "params", path) or {} return cls(provider=provider, model=model, api_key=api_key, base_url=base_url, params=params, path=path) FIELD_SPECS = { "provider": ConfigFieldSpec( name="provider", display_name="Embedding Provider", type_hint="str", required=True, default="openai", description="Embedding provider", ), "model": ConfigFieldSpec( name="model", display_name="Embedding Model", type_hint="str", required=True, default="text-embedding-3-small", description="Embedding model name", ), "api_key": ConfigFieldSpec( name="api_key", display_name="API Key", type_hint="str", required=False, description="API key", default="${API_KEY}", advance=True, ), "base_url": ConfigFieldSpec( name="base_url", display_name="Base URL", type_hint="str", required=False, description="Custom Base URL", default="${BASE_URL}", advance=True, ), "params": ConfigFieldSpec( name="params", display_name="Custom Parameters", type_hint="dict[str, Any]", required=False, default={}, description="Embedding parameters (temperature, etc.)", advance=True, ), } @dataclass class FileSourceConfig(BaseConfig): source_path: str file_types: List[str] | None = None recursive: bool = True encoding: str = "utf-8" @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "FileSourceConfig": mapping = require_mapping(data, path) file_path = require_str(mapping, "path", path) file_types_value = mapping.get("file_types") file_types: List[str] | None = None if file_types_value is not None: items = ensure_list(file_types_value) normalized: List[str] = [] for idx, item in enumerate(items): if not isinstance(item, str): raise ConfigError("file_types entries must be strings", extend_path(path, f"file_types[{idx}]") ) normalized.append(item) file_types = normalized recursive_value = mapping.get("recursive", True) if not isinstance(recursive_value, bool): raise ConfigError("recursive must be boolean", extend_path(path, "recursive")) encoding = optional_str(mapping, "encoding", path) or "utf-8" return cls(source_path=file_path, file_types=file_types, recursive=recursive_value, encoding=encoding, path=path) FIELD_SPECS = { "path": ConfigFieldSpec( name="path", display_name="File/Directory Path", type_hint="str", required=True, description="Path to file/directory to be indexed", ), "file_types": ConfigFieldSpec( name="file_types", display_name="File Type Filter", type_hint="list[str]", required=False, description="List of file type suffixes to limit (e.g. .md, .txt)", ), "recursive": ConfigFieldSpec( name="recursive", display_name="Recursive Subdirectories", type_hint="bool", required=False, default=True, description="Whether to include subdirectories recursively", advance=True, ), "encoding": ConfigFieldSpec( name="encoding", display_name="File Encoding", type_hint="str", required=False, default="utf-8", description="Encoding used to read files", advance=True, ), } @dataclass class SimpleMemoryConfig(BaseConfig): memory_path: str | None = None embedding: EmbeddingConfig | None = None @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "SimpleMemoryConfig": mapping = require_mapping(data, path) memory_path = optional_str(mapping, "memory_path", path) embedding_cfg = None if "embedding" in mapping and mapping["embedding"] is not None: embedding_cfg = EmbeddingConfig.from_dict(mapping["embedding"], path=extend_path(path, "embedding")) return cls(memory_path=memory_path, embedding=embedding_cfg, path=path) FIELD_SPECS = { "memory_path": ConfigFieldSpec( name="memory_path", display_name="Memory File Path", type_hint="str", required=False, description="Simple memory file path", advance=True, ), "embedding": ConfigFieldSpec( name="embedding", display_name="Embedding Configuration", type_hint="EmbeddingConfig", required=False, description="Optional embedding configuration", child=EmbeddingConfig, ), } @dataclass class FileMemoryConfig(BaseConfig): index_path: str | None = None file_sources: List[FileSourceConfig] = field(default_factory=list) embedding: EmbeddingConfig | None = None @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "FileMemoryConfig": mapping = require_mapping(data, path) sources_raw = ensure_list(mapping.get("file_sources")) if not sources_raw: raise ConfigError("file_sources must contain at least one entry", extend_path(path, "file_sources")) sources: List[FileSourceConfig] = [] for idx, item in enumerate(sources_raw): sources.append(FileSourceConfig.from_dict(item, path=extend_path(path, f"file_sources[{idx}]"))) index_path = optional_str(mapping, "index_path", path) if index_path is None: index_path = optional_str(mapping, "memory_path", path) embedding_cfg = None if "embedding" in mapping and mapping["embedding"] is not None: embedding_cfg = EmbeddingConfig.from_dict(mapping["embedding"], path=extend_path(path, "embedding")) return cls(index_path=index_path, file_sources=sources, embedding=embedding_cfg, path=path) FIELD_SPECS = { "index_path": ConfigFieldSpec( name="index_path", display_name="Index Path", type_hint="str", required=False, description="Vector index storage path", advance=True, ), "file_sources": ConfigFieldSpec( name="file_sources", display_name="File Source List", type_hint="list[FileSourceConfig]", required=True, description="List of file sources to ingest", child=FileSourceConfig, ), "embedding": ConfigFieldSpec( name="embedding", display_name="Embedding Configuration", type_hint="EmbeddingConfig", required=False, description="Embedding used for file memory", child=EmbeddingConfig, ), } @dataclass class BlackboardMemoryConfig(BaseConfig): memory_path: str | None = None max_items: int = 1000 @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "BlackboardMemoryConfig": mapping = require_mapping(data, path) memory_path = optional_str(mapping, "memory_path", path) max_items_value = mapping.get("max_items", 1000) if not isinstance(max_items_value, int) or max_items_value <= 0: raise ConfigError("max_items must be a positive integer", extend_path(path, "max_items")) return cls(memory_path=memory_path, max_items=max_items_value, path=path) FIELD_SPECS = { "memory_path": ConfigFieldSpec( name="memory_path", display_name="Blackboard Path", type_hint="str", required=False, description="JSON path for blackboard memory writing. Pass 'auto' to auto-create in working directory, valid for this run only", default="auto", advance=True, ), "max_items": ConfigFieldSpec( name="max_items", display_name="Maximum Items", type_hint="int", required=False, default=1000, description="Maximum number of memory items to retain (trimmed by time)", advance=True, ), } @dataclass class MemoryStoreConfig(BaseConfig): name: str type: str config: BaseConfig | None = None @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "MemoryStoreConfig": mapping = require_mapping(data, path) name = require_str(mapping, "name", path) store_type = require_str(mapping, "type", path) try: schema = get_memory_store_schema(store_type) except SchemaLookupError as exc: raise ConfigError(f"unsupported memory store type '{store_type}'", extend_path(path, "type")) from exc if "config" not in mapping or mapping["config"] is None: raise ConfigError("memory store requires config block", extend_path(path, "config")) config_obj = schema.config_cls.from_dict(mapping["config"], path=extend_path(path, "config")) return cls(name=name, type=store_type, config=config_obj, path=path) def require_payload(self) -> BaseConfig: if not self.config: raise ConfigError("memory store payload missing", extend_path(self.path, "config")) return self.config FIELD_SPECS = { "name": ConfigFieldSpec( name="name", display_name="Store Name", type_hint="str", required=True, description="Unique name of the memory store", ), "type": ConfigFieldSpec( name="type", display_name="Store Type", type_hint="str", required=True, description="Memory store type", ), "config": ConfigFieldSpec( name="config", display_name="Store Configuration", type_hint="object", required=True, description="Schema required by the selected store type (simple/file/blackboard/etc.), following that type's required keys.", ), } @classmethod def child_routes(cls) -> Dict[ChildKey, type[BaseConfig]]: return { ChildKey(field="config", value=name): schema.config_cls for name, schema in iter_memory_store_schemas().items() } @classmethod def field_specs(cls) -> Dict[str, ConfigFieldSpec]: specs = super().field_specs() type_spec = specs.get("type") if type_spec: registrations = iter_memory_store_schemas() names = list(registrations.keys()) descriptions = {name: schema.summary for name, schema in registrations.items()} specs["type"] = replace( type_spec, enum=names, enum_options=enum_options_from_values(names, descriptions, preserve_label_case=True), ) return specs @dataclass class MemoryAttachmentConfig(BaseConfig): name: str retrieve_stage: List[AgentExecFlowStage] | None = None top_k: int = 3 similarity_threshold: float = -1.0 read: bool = True write: bool = True @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "MemoryAttachmentConfig": mapping = require_mapping(data, path) name = require_str(mapping, "name", path) stages_raw = mapping.get("retrieve_stage") stages: List[AgentExecFlowStage] | None = None if stages_raw is not None: stage_list = ensure_list(stages_raw) parsed: List[AgentExecFlowStage] = [] for idx, item in enumerate(stage_list): try: parsed.append(AgentExecFlowStage(item)) except ValueError as exc: raise ConfigError( f"retrieve_stage entries must be one of {[stage.value for stage in AgentExecFlowStage]}", extend_path(path, f"retrieve_stage[{idx}]"), ) from exc stages = parsed top_k_value = mapping.get("top_k", 3) if not isinstance(top_k_value, int) or top_k_value <= 0: raise ConfigError("top_k must be a positive integer", extend_path(path, "top_k")) threshold_value = mapping.get("similarity_threshold", -1.0) if not isinstance(threshold_value, (int, float)): raise ConfigError("similarity_threshold must be numeric", extend_path(path, "similarity_threshold")) read_value = mapping.get("read", True) if not isinstance(read_value, bool): raise ConfigError("read must be boolean", extend_path(path, "read")) write_value = mapping.get("write", True) if not isinstance(write_value, bool): raise ConfigError("write must be boolean", extend_path(path, "write")) return cls( name=name, retrieve_stage=stages, top_k=top_k_value, similarity_threshold=float(threshold_value), read=read_value, write=write_value, path=path, ) FIELD_SPECS = { "name": ConfigFieldSpec( name="name", display_name="Memory Name", type_hint="str", required=True, description="Name of the referenced memory store", ), "retrieve_stage": ConfigFieldSpec( name="retrieve_stage", display_name="Retrieve Stage", type_hint="list[AgentExecFlowStage]", required=False, description="Execution stages when memory retrieval occurs. If not set, defaults to all stages. NOTE: this config is related to thinking, if the thinking module is not used, this config has only effect on `gen` stage.", enum=[stage.value for stage in AgentExecFlowStage], enum_options=enum_options_for(AgentExecFlowStage), ), "top_k": ConfigFieldSpec( name="top_k", display_name="Top K", type_hint="int", required=False, default=3, description="Number of items to retrieve", advance=True, ), "similarity_threshold": ConfigFieldSpec( name="similarity_threshold", display_name="Similarity Threshold", type_hint="float", required=False, default=-1.0, description="Similarity threshold (-1 means no similarity threshold filter)", advance=True, ), "read": ConfigFieldSpec( name="read", display_name="Allow Read", type_hint="bool", required=False, default=True, description="Whether to read this memory during execution", advance=True, ), "write": ConfigFieldSpec( name="write", display_name="Allow Write", type_hint="bool", required=False, default=True, description="Whether to write back to this memory after execution", advance=True, ), }
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/node/memory.py", "license": "Apache License 2.0", "lines": 417, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:entity/configs/node/node.py
"""Node configuration dataclasses.""" from dataclasses import dataclass, field, replace from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple from entity.messages import Message, MessageRole from schema_registry import ( SchemaLookupError, get_node_schema, iter_node_schemas, ) from entity.configs.base import ( BaseConfig, ConfigError, ConfigFieldSpec, EnumOption, ChildKey, ensure_list, optional_str, require_mapping, require_str, extend_path, ) from entity.configs.edge.edge_condition import EdgeConditionConfig from entity.configs.edge.edge_processor import EdgeProcessorConfig from entity.configs.edge.dynamic_edge_config import DynamicEdgeConfig from entity.configs.node.agent import AgentConfig from entity.configs.node.human import HumanConfig from entity.configs.node.tooling import FunctionToolConfig NodePayload = Message @dataclass class EdgeLink: target: "Node" config: Dict[str, Any] = field(default_factory=dict) trigger: bool = True condition: str = "true" condition_config: EdgeConditionConfig | None = None condition_type: str | None = None condition_metadata: Dict[str, Any] = field(default_factory=dict) triggered: bool = False carry_data: bool = True keep_message: bool = False clear_context: bool = False clear_kept_context: bool = False condition_manager: Any = None process_config: EdgeProcessorConfig | None = None process_type: str | None = None process_metadata: Dict[str, Any] = field(default_factory=dict) payload_processor: Any = None dynamic_config: DynamicEdgeConfig | None = None def __post_init__(self) -> None: self.config = dict(self.config or {}) @dataclass class Node(BaseConfig): id: str type: str description: str | None = None # keep_context: bool = False log_output: bool = True context_window: int = 0 vars: Dict[str, Any] = field(default_factory=dict) config: BaseConfig | None = None # dynamic configuration has been moved to edges (DynamicEdgeConfig) input: List[Message] = field(default_factory=list) output: List[NodePayload] = field(default_factory=list) # Runtime flag for explicit graph start nodes start_triggered: bool = False predecessors: List["Node"] = field(default_factory=list, repr=False) successors: List["Node"] = field(default_factory=list, repr=False) _outgoing_edges: List[EdgeLink] = field(default_factory=list, repr=False) FIELD_SPECS = { "id": ConfigFieldSpec( name="id", display_name="Node ID", type_hint="str", required=True, description="Unique node identifier", ), "type": ConfigFieldSpec( name="type", display_name="Node Type", type_hint="str", required=True, description="Select a node type registered in node.registry (agent, human, python_runner, etc.); it determines the config schema.", ), "description": ConfigFieldSpec( name="description", display_name="Node Description", type_hint="str", required=False, advance=True, description="Short summary shown in consoles/logs to explain this node's role or prompt context.", ), # "keep_context": ConfigFieldSpec( # name="keep_context", # display_name="Preserve Context", # type_hint="bool", # required=False, # default=False, # description="Nodes clear their context by default; set to True to keep context data after execution.", # ), "context_window": ConfigFieldSpec( name="context_window", display_name="Context Window Size", type_hint="int", required=False, default=0, description="Number of context messages accessible during node execution. 0 means clear all context except messages with keep_message=True, -1 means unlimited, other values represent the number of context messages to keep besides those with keep_message=True.", # advance=True, ), "log_output": ConfigFieldSpec( name="log_output", display_name="Log Output", type_hint="bool", required=False, default=True, advance=True, description="Whether to log this node's output content. Set to false to avoid logging outputs.", ), "config": ConfigFieldSpec( name="config", display_name="Node Configuration", type_hint="object", required=True, description="Configuration object required by the chosen node type (see Schema API for the supported fields).", ), # Dynamic execution configuration has been moved to edges (DynamicEdgeConfig) } @classmethod def child_routes(cls) -> Dict[ChildKey, type[BaseConfig]]: routes: Dict[ChildKey, type[BaseConfig]] = {} for name, schema in iter_node_schemas().items(): routes[ChildKey(field="config", value=name)] = schema.config_cls return routes @classmethod def field_specs(cls) -> Dict[str, ConfigFieldSpec]: specs = super().field_specs() type_spec = specs.get("type") if type_spec: registrations = iter_node_schemas() specs["type"] = replace( type_spec, enum=list(registrations.keys()), enum_options=[ EnumOption( value=name, label=name, description=schema.summary or "No description provided for this node type", ) for name, schema in registrations.items() ], ) return specs @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "Node": mapping = require_mapping(data, path) node_id = require_str(mapping, "id", path) node_type = require_str(mapping, "type", path) try: schema = get_node_schema(node_type) except SchemaLookupError as exc: raise ConfigError( f"unsupported node type '{node_type}'", extend_path(path, "type"), ) from exc description = optional_str(mapping, "description", path) # keep_context = bool(mapping.get("keep_context", False)) log_output = bool(mapping.get("log_output", True)) context_window = int(mapping.get("context_window", 0)) input_value = ensure_list(mapping.get("input")) output_value = ensure_list(mapping.get("output")) input_messages: List[Message] = [] for value in input_value: if isinstance(value, dict) and "role" in value: input_messages.append(Message.from_dict(value)) elif isinstance(value, Message): input_messages.append(value) else: input_messages.append(Message(role=MessageRole.USER, content=str(value))) if "config" not in mapping or mapping["config"] is None: raise ConfigError("node config block required", extend_path(path, "config")) config_obj = schema.config_cls.from_dict( mapping["config"], path=extend_path(path, "config") ) formatted_output: List[NodePayload] = [] for value in output_value: if isinstance(value, dict) and "role" in value: formatted_output.append(Message.from_dict(value)) elif isinstance(value, Message): formatted_output.append(value) else: formatted_output.append( Message(role=MessageRole.ASSISTANT, content=str(value)) ) # Dynamic configuration parsing removed - dynamic is now on edges node = cls( id=node_id, type=node_type, description=description, log_output=log_output, input=input_messages, output=formatted_output, # keep_context=keep_context, context_window=context_window, vars={}, config=config_obj, path=path, ) node.validate() return node def append_input(self, message: Message) -> None: self.input.append(message) def append_output(self, payload: NodePayload) -> None: self.output.append(payload) def clear_input(self, *, preserve_kept: bool = False, context_window: int = 0) -> int: """Clear queued inputs according to the node's context window semantics.""" if not preserve_kept: self.input = [] return len(self.input) if context_window < 0: return len(self.input) if context_window == 0: self.input = [message for message in self.input if getattr(message, "keep", False)] return len(self.input) # context_window > 0 => retain the newest messages up to the specified # capacity, but never drop messages flagged with keep=True. Those kept # messages still count toward the window, effectively consuming slots that # would otherwise be available for non-kept inputs. keep_count = sum(1 for message in self.input if getattr(message, "keep", False)) allowed_non_keep = max(0, context_window - keep_count) non_keep_total = sum(1 for message in self.input if not getattr(message, "keep", False)) non_keep_to_drop = max(0, non_keep_total - allowed_non_keep) trimmed_inputs: List[Message] = [] for message in self.input: if getattr(message, "keep", False): trimmed_inputs.append(message) continue if non_keep_to_drop > 0: non_keep_to_drop -= 1 continue trimmed_inputs.append(message) self.input = trimmed_inputs return len(self.input) def clear_inputs_by_flag(self, *, drop_non_keep: bool, drop_keep: bool) -> Tuple[int, int]: """Clear queued inputs according to keep markers.""" if not drop_non_keep and not drop_keep: return 0, 0 remaining: List[Message] = [] removed_non_keep = 0 removed_keep = 0 for message in self.input: is_keep = message.keep if is_keep and drop_keep: removed_keep += 1 continue if not is_keep and drop_non_keep: removed_non_keep += 1 continue remaining.append(message) if removed_non_keep or removed_keep: self.input = remaining return removed_non_keep, removed_keep def validate(self) -> None: if not self.config: raise ConfigError("node configuration missing", extend_path(self.path, "config")) if hasattr(self.config, "validate"): self.config.validate() @property def node_type(self) -> str: return self.type @property def model_name(self) -> Optional[str]: agent = self.as_config(AgentConfig) if not agent: return None return agent.name @property def role(self) -> Optional[str]: agent = self.as_config(AgentConfig) if agent: return agent.role human = self.as_config(HumanConfig) if human: return human.description return None @property def tools(self) -> List[Any]: agent = self.as_config(AgentConfig) if agent and agent.tooling: all_tools: List[Any] = [] for tool_config in agent.tooling: func_cfg = tool_config.as_config(FunctionToolConfig) if func_cfg: all_tools.extend(func_cfg.tools) return all_tools return [] @property def memories(self) -> List[Any]: agent = self.as_config(AgentConfig) if agent: return list(agent.memories) return [] @property def params(self) -> Dict[str, Any]: agent = self.as_config(AgentConfig) if agent: return dict(agent.params) return {} @property def base_url(self) -> Optional[str]: agent = self.as_config(AgentConfig) if agent: return agent.base_url return None def add_successor(self, node: "Node", edge_config: Optional[Dict[str, Any]] = None) -> None: if node not in self.successors: self.successors.append(node) payload = dict(edge_config or {}) existing = next((link for link in self._outgoing_edges if link.target is node), None) trigger = bool(payload.get("trigger", True)) if payload else True carry_data = bool(payload.get("carry_data", True)) if payload else True keep_message = bool(payload.get("keep_message", False)) if payload else False clear_context = bool(payload.get("clear_context", False)) if payload else False clear_kept_context = bool(payload.get("clear_kept_context", False)) if payload else False condition_config = payload.pop("condition_config", None) if not isinstance(condition_config, EdgeConditionConfig): raw_value = payload.get("condition", "true") condition_config = EdgeConditionConfig.from_dict( raw_value, path=extend_path(self.path, f"edge[{self.id}->{node.id}].condition"), ) condition_label = condition_config.display_label() condition_type = condition_config.type condition_serializable = condition_config.to_external_value() process_config = payload.pop("process_config", None) if process_config is None and payload.get("process") is not None: process_config = EdgeProcessorConfig.from_dict( payload.get("process"), path=extend_path(self.path, f"edge[{self.id}->{node.id}].process"), ) process_serializable = process_config.to_external_value() if isinstance(process_config, EdgeProcessorConfig) else None process_type = process_config.type if isinstance(process_config, EdgeProcessorConfig) else None process_label = process_config.display_label() if isinstance(process_config, EdgeProcessorConfig) else None # Handle dynamic_config dynamic_config = payload.pop("dynamic_config", None) if dynamic_config is None and payload.get("dynamic") is not None: dynamic_config = DynamicEdgeConfig.from_dict( payload.get("dynamic"), path=extend_path(self.path, f"edge[{self.id}->{node.id}].dynamic"), ) payload["condition"] = condition_serializable payload["condition_label"] = condition_label payload["condition_type"] = condition_type if process_serializable is not None: payload["process"] = process_serializable payload["process_label"] = process_label payload["process_type"] = process_type if existing: existing.config.update(payload) existing.trigger = trigger existing.condition = condition_label existing.condition_config = condition_config existing.condition_type = condition_type existing.carry_data = carry_data existing.keep_message = keep_message existing.clear_context = clear_context existing.clear_kept_context = clear_kept_context if isinstance(process_config, EdgeProcessorConfig): existing.process_config = process_config existing.process_type = process_type else: existing.process_config = None existing.process_type = None existing.dynamic_config = dynamic_config else: self._outgoing_edges.append( EdgeLink( target=node, config=payload, trigger=trigger, condition=condition_label, condition_config=condition_config, condition_type=condition_type, carry_data=carry_data, keep_message=keep_message, clear_context=clear_context, clear_kept_context=clear_kept_context, process_config=process_config if isinstance(process_config, EdgeProcessorConfig) else None, process_type=process_type, dynamic_config=dynamic_config, ) ) def add_predecessor(self, node: "Node") -> None: if node not in self.predecessors: self.predecessors.append(node) def iter_outgoing_edges(self) -> Iterable[EdgeLink]: return tuple(self._outgoing_edges) def find_outgoing_edge(self, node_id: str) -> EdgeLink | None: for link in self._outgoing_edges: if link.target.id == node_id: return link return None def is_triggered(self) -> bool: if self.start_triggered: return True for predecessor in self.predecessors: for edge_link in predecessor.iter_outgoing_edges(): if edge_link.target is self and edge_link.trigger and edge_link.triggered: return True return False def reset_triggers(self) -> None: self.start_triggered = False for predecessor in self.predecessors: for edge_link in predecessor.iter_outgoing_edges(): if edge_link.target is self: edge_link.triggered = False def merge_vars(self, parent_vars: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: merged = dict(parent_vars or {}) merged.update(self.vars) return merged
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/node/node.py", "license": "Apache License 2.0", "lines": 418, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:entity/configs/node/passthrough.py
"""Configuration for passthrough nodes.""" from dataclasses import dataclass from typing import Mapping, Any from entity.configs.base import BaseConfig, ConfigFieldSpec, optional_bool, require_mapping @dataclass class PassthroughConfig(BaseConfig): """Configuration for passthrough nodes.""" only_last_message: bool = True @classmethod def from_dict(cls, data: Mapping[str, Any] | None, *, path: str) -> "PassthroughConfig": if data is None: return cls(only_last_message=True, path=path) mapping = require_mapping(data, path) only_last_message = optional_bool(mapping, "only_last_message", path, default=True) return cls(only_last_message=only_last_message, path=path) FIELD_SPECS = { "only_last_message": ConfigFieldSpec( name="only_last_message", display_name="Only Last Message", type_hint="bool", required=False, default=True, description="If True, only pass the last received message. If False, pass all messages.", ), }
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/node/passthrough.py", "license": "Apache License 2.0", "lines": 25, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:entity/configs/node/python_runner.py
"""Configuration for Python code execution nodes.""" import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Mapping from entity.configs.base import ( BaseConfig, ConfigError, ConfigFieldSpec, ensure_list, optional_dict, optional_str, require_mapping, ) def _default_interpreter() -> str: return sys.executable or "python3" @dataclass class PythonRunnerConfig(BaseConfig): interpreter: str = field(default_factory=_default_interpreter) args: List[str] = field(default_factory=list) env: Dict[str, str] = field(default_factory=dict) timeout_seconds: int = 60 encoding: str = "utf-8" @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "PythonRunnerConfig": mapping = require_mapping(data, path) interpreter = optional_str(mapping, "interpreter", path) or _default_interpreter() args_raw = mapping.get("args") args = [str(item) for item in ensure_list(args_raw)] if args_raw is not None else [] env = optional_dict(mapping, "env", path) or {} timeout_value = mapping.get("timeout_seconds", 60) if not isinstance(timeout_value, int) or timeout_value <= 0: raise ConfigError("timeout_seconds must be a positive integer", f"{path}.timeout_seconds") encoding = optional_str(mapping, "encoding", path) or "utf-8" if not encoding: raise ConfigError("encoding cannot be empty", f"{path}.encoding") return cls( interpreter=interpreter, args=args, env={str(key): str(value) for key, value in env.items()}, timeout_seconds=timeout_value, encoding=encoding, path=path, ) FIELD_SPECS = { "interpreter": ConfigFieldSpec( name="interpreter", display_name="Python Path", type_hint="str", required=False, default=_default_interpreter(), description="Python executable file path, defaults to current process interpreter", advance=True, ), "args": ConfigFieldSpec( name="args", display_name="Startup Parameters", type_hint="list[str]", required=False, default=[], description="Parameter list appended after interpreter", advance=True, ), "env": ConfigFieldSpec( name="env", display_name="Additional Environment Variables", type_hint="dict[str, str]", required=False, default={}, description="Additional environment variables, will override process defaults", advance=True, ), "timeout_seconds": ConfigFieldSpec( name="timeout_seconds", display_name="Timeout (seconds)", type_hint="int", required=False, default=60, description="Script execution timeout (seconds)", ), "encoding": ConfigFieldSpec( name="encoding", display_name="Output Encoding", type_hint="str", required=False, default="utf-8", description="Encoding used to parse stdout/stderr", advance=True, ), }
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/node/python_runner.py", "license": "Apache License 2.0", "lines": 89, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:entity/configs/node/subgraph.py
"""Subgraph node configuration and registry helpers.""" from dataclasses import dataclass, replace from typing import Any, Dict, Mapping from entity.enums import LogLevel from entity.enum_options import enum_options_for, enum_options_from_values from entity.configs.base import ( BaseConfig, ConfigError, ConfigFieldSpec, ChildKey, require_mapping, require_str, extend_path, ) from entity.configs.edge.edge import EdgeConfig from entity.configs.node.memory import MemoryStoreConfig from utils.registry import Registry, RegistryError subgraph_source_registry = Registry("subgraph_source") def register_subgraph_source( name: str, *, config_cls: type[BaseConfig], description: str | None = None, ) -> None: """Register a subgraph source configuration class.""" metadata = {"summary": description} if description else None subgraph_source_registry.register(name, target=config_cls, metadata=metadata) def get_subgraph_source_config(name: str) -> type[BaseConfig]: entry = subgraph_source_registry.get(name) config_cls = entry.load() if not isinstance(config_cls, type) or not issubclass(config_cls, BaseConfig): raise RegistryError(f"Entry '{name}' is not a BaseConfig subclass") return config_cls def iter_subgraph_source_registrations() -> Dict[str, type[BaseConfig]]: return {name: entry.load() for name, entry in subgraph_source_registry.items()} def iter_subgraph_source_metadata() -> Dict[str, Dict[str, Any]]: return {name: dict(entry.metadata or {}) for name, entry in subgraph_source_registry.items()} @dataclass class SubgraphFileConfig(BaseConfig): file_path: str @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "SubgraphFileConfig": mapping = require_mapping(data, path) file_path = require_str(mapping, "path", path) return cls(file_path=file_path, path=path) FIELD_SPECS = { "path": ConfigFieldSpec( name="path", display_name="Subgraph File Path", type_hint="str", required=True, description="Subgraph file path (relative to yaml_instance/ or absolute path)", ), } @dataclass class SubgraphInlineConfig(BaseConfig): graph: Dict[str, Any] @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "SubgraphInlineConfig": mapping = require_mapping(data, path) return cls(graph=dict(mapping), path=path) def validate(self) -> None: if "nodes" not in self.graph: raise ConfigError("subgraph config must define nodes", extend_path(self.path, "nodes")) if "edges" not in self.graph: raise ConfigError("subgraph config must define edges", extend_path(self.path, "edges")) FIELD_SPECS = { "id": ConfigFieldSpec( name="id", display_name="Subgraph ID", type_hint="str", required=True, description="Subgraph identifier", ), "description": ConfigFieldSpec( name="description", display_name="Subgraph Description", type_hint="str", required=False, description="Describe the subgraph's responsibility, trigger conditions, and success criteria so reviewers know when to call it.", ), "log_level": ConfigFieldSpec( name="log_level", display_name="Log Level", type_hint="enum:LogLevel", required=False, default=LogLevel.INFO.value, enum=[lvl.value for lvl in LogLevel], description="Subgraph runtime log level", enum_options=enum_options_for(LogLevel), ), "is_majority_voting": ConfigFieldSpec( name="is_majority_voting", display_name="Majority Voting", type_hint="bool", required=False, default=False, description="Whether to perform majority voting on node results", ), "nodes": ConfigFieldSpec( name="nodes", display_name="Node List", type_hint="list[Node]", required=True, description="Subgraph node list, must contain at least one node", ), "edges": ConfigFieldSpec( name="edges", display_name="Edge List", type_hint="list[EdgeConfig]", required=True, description="Subgraph edge list", child=EdgeConfig, ), "memory": ConfigFieldSpec( name="memory", display_name="Memory Stores", type_hint="list[MemoryStoreConfig]", required=False, description="Provide a list of memory stores if this subgraph needs dedicated stores; leave empty to inherit parent graph stores.", child=MemoryStoreConfig, ), "vars": ConfigFieldSpec( name="vars", display_name="Variables", type_hint="dict[str, Any]", required=False, default={}, description="Variables passed to subgraph nodes", ), "organization": ConfigFieldSpec( name="organization", display_name="Organization", type_hint="str", required=False, description="Subgraph organization/team identifier", ), "initial_instruction": ConfigFieldSpec( name="initial_instruction", display_name="Initial Instruction", type_hint="str", required=False, description="Subgraph level initial instruction", ), "start": ConfigFieldSpec( name="start", display_name="Start Node", type_hint="str | list[str]", required=False, description="Start node ID list (entry list executed at subgraph start; not recommended to edit manually)", ), "end": ConfigFieldSpec( name="end", display_name="End Node", type_hint="str | list[str]", required=False, description="End node ID list (used to collect final subgraph output, not part of execution logic). This is an ordered list: earlier nodes are checked first; the first with output becomes the subgraph output, otherwise continue down the list.", ), } @classmethod def field_specs(cls) -> Dict[str, ConfigFieldSpec]: specs = super().field_specs() nodes_spec = specs.get("nodes") if nodes_spec: from entity.configs.node.node import Node specs["nodes"] = replace(nodes_spec, child=Node) return specs @dataclass class SubgraphConfig(BaseConfig): type: str config: BaseConfig | None = None @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "SubgraphConfig": mapping = require_mapping(data, path) source_type = require_str(mapping, "type", path) if "vars" in mapping and mapping["vars"]: raise ConfigError("vars is only allowed at root level (DesignConfig.vars)", extend_path(path, "vars")) if "config" not in mapping or mapping["config"] is None: raise ConfigError("subgraph configuration requires 'config' block", extend_path(path, "config")) try: config_cls = get_subgraph_source_config(source_type) except RegistryError as exc: raise ConfigError( f"subgraph.type must be one of {list(iter_subgraph_source_registrations().keys())}", extend_path(path, "type"), ) from exc config_obj = config_cls.from_dict(mapping["config"], path=extend_path(path, "config")) return cls(type=source_type, config=config_obj, path=path) def validate(self) -> None: if not self.config: raise ConfigError("subgraph config missing", extend_path(self.path, "config")) if hasattr(self.config, "validate"): self.config.validate() FIELD_SPECS = { "type": ConfigFieldSpec( name="type", display_name="Subgraph Source Type", type_hint="str", required=True, description="Registered subgraph source such as 'config' or 'file' (see subgraph_source_registry).", ), "config": ConfigFieldSpec( name="config", display_name="Subgraph Configuration", type_hint="object", required=True, description="Payload interpreted by the chosen type—for example inline graph schema for 'config' or file path payload for 'file'.", ), } @classmethod def child_routes(cls) -> Dict[ChildKey, type[BaseConfig]]: return { ChildKey(field="config", value=name): config_cls for name, config_cls in iter_subgraph_source_registrations().items() } @classmethod def field_specs(cls) -> Dict[str, ConfigFieldSpec]: specs = super().field_specs() type_spec = specs.get("type") if type_spec: registrations = iter_subgraph_source_registrations() metadata = iter_subgraph_source_metadata() names = list(registrations.keys()) descriptions = { name: (metadata.get(name) or {}).get("summary") for name in names } specs["type"] = replace( type_spec, enum=names, enum_options=enum_options_from_values(names, descriptions, preserve_label_case=True), ) return specs
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/node/subgraph.py", "license": "Apache License 2.0", "lines": 232, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:entity/configs/node/thinking.py
"""Thinking configuration models.""" from dataclasses import dataclass, replace from typing import Any, Dict, Mapping from entity.enum_options import enum_options_from_values from schema_registry import ( SchemaLookupError, get_thinking_schema, iter_thinking_schemas, ) from entity.configs.base import BaseConfig, ConfigError, ConfigFieldSpec, ChildKey, extend_path, require_mapping, require_str @dataclass class ReflectionThinkingConfig(BaseConfig): reflection_prompt: str @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "ReflectionThinkingConfig": mapping = require_mapping(data, path) prompt = require_str(mapping, "reflection_prompt", path) return cls(reflection_prompt=prompt, path=path) FIELD_SPECS = { "reflection_prompt": ConfigFieldSpec( name="reflection_prompt", display_name="Reflection Prompt", type_hint="str", required=True, description="Prompt used for reflection in reflection mode", ) } @dataclass class ThinkingConfig(BaseConfig): type: str config: BaseConfig | None = None @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "ThinkingConfig": mapping = require_mapping(data, path) thinking_type = require_str(mapping, "type", path) try: schema = get_thinking_schema(thinking_type) except SchemaLookupError as exc: raise ConfigError(f"unsupported thinking type '{thinking_type}'", extend_path(path, "type")) from exc if "config" not in mapping or mapping["config"] is None: raise ConfigError("thinking config requires config block", extend_path(path, "config")) config_obj = schema.config_cls.from_dict(mapping["config"], path=extend_path(path, "config")) return cls(type=thinking_type, config=config_obj, path=path) FIELD_SPECS = { "type": ConfigFieldSpec( name="type", display_name="Thinking Mode", type_hint="str", required=True, description="Thinking mode type", ), "config": ConfigFieldSpec( name="config", display_name="Thinking Configuration", type_hint="object", required=True, description="Thinking mode configuration body", ), } @classmethod def child_routes(cls) -> dict[ChildKey, type[BaseConfig]]: return { ChildKey(field="config", value=name): schema.config_cls for name, schema in iter_thinking_schemas().items() } @classmethod def field_specs(cls) -> Dict[str, ConfigFieldSpec]: specs = super().field_specs() type_spec = specs.get("type") if type_spec: registrations = iter_thinking_schemas() names = list(registrations.keys()) descriptions = {name: schema.summary for name, schema in registrations.items()} specs["type"] = replace( type_spec, enum=names, enum_options=enum_options_from_values(names, descriptions, preserve_label_case=True), ) return specs
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/node/thinking.py", "license": "Apache License 2.0", "lines": 79, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:entity/configs/node/tooling.py
"""Tooling configuration models.""" import hashlib from copy import deepcopy from dataclasses import dataclass, field, replace from typing import Any, Dict, List, Mapping, Tuple from entity.configs.base import ( BaseConfig, ConfigError, ConfigFieldSpec, EnumOption, ChildKey, ensure_list, optional_bool, optional_str, require_mapping, require_str, extend_path, ) from entity.enum_options import enum_options_from_values from utils.registry import Registry, RegistryError from utils.function_catalog import FunctionCatalog, get_function_catalog tooling_type_registry = Registry("tooling_type") MODULE_ALL_SUFFIX = ":All" def register_tooling_type( name: str, *, config_cls: type[BaseConfig], description: str | None = None, ) -> None: metadata = {"summary": description} if description else None tooling_type_registry.register(name, target=config_cls, metadata=metadata) def get_tooling_type_config(name: str) -> type[BaseConfig]: entry = tooling_type_registry.get(name) config_cls = entry.load() if not isinstance(config_cls, type) or not issubclass(config_cls, BaseConfig): raise RegistryError(f"Entry '{name}' is not a BaseConfig subclass") return config_cls def iter_tooling_type_registrations() -> Dict[str, type[BaseConfig]]: return {name: entry.load() for name, entry in tooling_type_registry.items()} def iter_tooling_type_metadata() -> Dict[str, Dict[str, Any]]: return {name: dict(entry.metadata or {}) for name, entry in tooling_type_registry.items()} @dataclass class FunctionToolEntryConfig(BaseConfig): """Schema helper used to describe per-function options.""" name: str | None = None description: str | None = None parameters: Dict[str, Any] | None = None auto_fill: bool = True FIELD_SPECS = { "name": ConfigFieldSpec( name="name", display_name="Function Name", type_hint="str", required=True, description="Function name from functions/function_calling directory", ), # "description": ConfigFieldSpec( # name="description", # display_name="Description", # type_hint="str", # required=False, # description="Override auto-parsed function description, optional", # advance=True, # ), # "parameters": ConfigFieldSpec( # name="parameters", # display_name="Parameter Schema", # type_hint="object", # required=False, # description="Override JSON Schema generated from function signature, optional", # advance=True, # ), # "auto_fill": ConfigFieldSpec( # name="auto_fill", # display_name="Auto Fill Description", # type_hint="bool", # required=False, # default=True, # description="Whether to auto-fill description/parameters based on Python function signature", # advance=True, # ), } @classmethod def field_specs(cls) -> Dict[str, ConfigFieldSpec]: specs = super().field_specs() catalog = get_function_catalog() modules = catalog.iter_modules() name_spec = specs.get("name") if name_spec is not None: description = name_spec.description or "Function name" enum_options: List[EnumOption] | None = None enum_values: List[str] | None = None if catalog.load_error: description = f"{description} (loading failed: {catalog.load_error})" elif not modules: description = f"{description} (no functions found in directory)" else: enum_options = [] enum_values = [] for module_name, metas in modules: all_label = f"{module_name}{MODULE_ALL_SUFFIX}" enum_values.append(all_label) preview = ", ".join(meta.name for meta in metas[:3]) suffix = "..." if len(metas) > 3 else "" module_hint = f"{module_name}.py" enum_options.append( EnumOption( value=all_label, label=all_label, description=( f"Load all {len(metas)} functions from {module_hint}" + (f" ({preview}{suffix})" if preview else "") ), ) ) for module_name, metas in modules: for meta in metas: label = f"{module_name}:{meta.name}" enum_values.append(meta.name) option_description = meta.description or "This function does not provide a docstring" enum_options.append( EnumOption( value=meta.name, label=label, description=option_description, ) ) specs["name"] = replace( name_spec, enum=enum_values, enum_options=enum_options, description=description, ) return specs @dataclass class FunctionToolConfig(BaseConfig): tools: List[Dict[str, Any]] auto_load: bool = True timeout: float | None = None # schema_version: str | None = None FIELD_SPECS = { "tools": ConfigFieldSpec( name="tools", display_name="Function Tool List", type_hint="list[FunctionToolEntryConfig]", required=True, description="Function tool list, at least one item", child=FunctionToolEntryConfig, ), # "auto_load": ConfigFieldSpec( # name="auto_load", # display_name="Auto Load Directory", # type_hint="bool", # required=False, # default=True, # description="Auto-load functions directory on startup", # advance=True # ), "timeout": ConfigFieldSpec( name="timeout", display_name="Execution Timeout", type_hint="float", required=False, description="Tool execution timeout (seconds)", advance=True ), # "schema_version": ConfigFieldSpec( # name="schema_version", # display_name="Schema Version", # type_hint="str", # required=False, # description="Tool schema version", # ), } @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "FunctionToolConfig": mapping = require_mapping(data, path) tools = ensure_list(mapping.get("tools")) if not tools: raise ConfigError("tools must be provided for function tooling", extend_path(path, "tools")) catalog = get_function_catalog() expanded_tools: List[Tuple[Dict[str, Any], str]] = [] for idx, tool in enumerate(tools): tool_path = extend_path(path, f"tools[{idx}]") if not isinstance(tool, Mapping): raise ConfigError("tool entry must be a mapping", tool_path) normalized = dict(tool) raw_name = normalized.get("name") if not isinstance(raw_name, str) or not raw_name.strip(): raise ConfigError("tool name is required", extend_path(tool_path, "name")) name = raw_name.strip() normalized["name"] = name module_name = cls._extract_module_from_all(name) if module_name: expanded_tools.extend( cls._expand_module_all_entry( module_name=module_name, catalog=catalog, path=tool_path, original=normalized, ) ) continue expanded_tools.append((normalized, tool_path)) tool_specs: List[Dict[str, Any]] = [] seen_functions: Dict[str, str] = {} for entry, entry_path in expanded_tools: normalized = dict(entry) name = normalized.get("name") if not isinstance(name, str) or not name.strip(): raise ConfigError("tool name is required", extend_path(entry_path, "name")) metadata = catalog.get(name) if metadata is None: raise ConfigError( f"function '{name}' not found under function directory", extend_path(entry_path, "name"), ) previous = seen_functions.get(name) if previous is not None: raise ConfigError( f"function '{name}' is declared multiple times (also in {previous})", extend_path(entry_path, "name"), ) seen_functions[name] = entry_path auto_fill = normalized.get("auto_fill", True) if not isinstance(auto_fill, bool): raise ConfigError("auto_fill must be boolean", extend_path(entry_path, "auto_fill")) merged = dict(normalized) if auto_fill: if not merged.get("description") and metadata.description: merged["description"] = metadata.description if not merged.get("parameters"): merged["parameters"] = deepcopy(metadata.parameters_schema) merged.pop("auto_fill", None) tool_specs.append(merged) auto_load = optional_bool(mapping, "auto_load", path, default=True) timeout_value = mapping.get("timeout") if timeout_value is not None and not isinstance(timeout_value, (int, float)): raise ConfigError("timeout must be numeric", extend_path(path, "timeout")) # schema_version = optional_str(mapping, "schema_version", path) return cls( tools=tool_specs, auto_load=bool(auto_load) if auto_load is not None else True, timeout=float(timeout_value) if isinstance(timeout_value, (int, float)) else None, # schema_version=schema_version, path=path, ) @staticmethod def _extract_module_from_all(value: str) -> str | None: if not value.endswith(MODULE_ALL_SUFFIX): return None module = value[: -len(MODULE_ALL_SUFFIX)].strip() return module or None @staticmethod def _expand_module_all_entry( *, module_name: str, catalog: FunctionCatalog, path: str, original: Mapping[str, Any], ) -> List[Tuple[Dict[str, Any], str]]: disallowed = [key for key in ("description", "parameters", "auto_fill") if key in original] if disallowed: fields = ", ".join(disallowed) raise ConfigError( f"{module_name}{MODULE_ALL_SUFFIX} does not support overriding {fields}", extend_path(path, "name"), ) functions = catalog.functions_for_module(module_name) if not functions: raise ConfigError( f"module '{module_name}' has no functions under function directory", extend_path(path, "name"), ) entries: List[Tuple[Dict[str, Any], str]] = [] for fn_name in functions: entries.append(({"name": fn_name}, path)) return entries @dataclass class McpRemoteConfig(BaseConfig): server: str headers: Dict[str, str] = field(default_factory=dict) timeout: float | None = None FIELD_SPECS = { "server": ConfigFieldSpec( name="server", display_name="MCP Server URL", type_hint="str", required=True, description="HTTP(S) endpoint of the MCP server", ), "headers": ConfigFieldSpec( name="headers", display_name="Custom Headers", type_hint="dict[str, str]", required=False, description="Additional request headers (e.g. Authorization)", advance=True, ), "timeout": ConfigFieldSpec( name="timeout", display_name="Client Timeout", type_hint="float", required=False, description="Per-request timeout in seconds", advance=True, ), } @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "McpRemoteConfig": mapping = require_mapping(data, path) server = require_str(mapping, "server", path) headers_raw = mapping.get("headers") headers: Dict[str, str] = {} if headers_raw is not None: if not isinstance(headers_raw, Mapping): raise ConfigError("headers must be a mapping", extend_path(path, "headers")) headers = {str(k): str(v) for k, v in headers_raw.items()} timeout_value = mapping.get("timeout") timeout: float | None if timeout_value is None: timeout = None elif isinstance(timeout_value, (int, float)): timeout = float(timeout_value) else: raise ConfigError("timeout must be numeric", extend_path(path, "timeout")) return cls(server=server, headers=headers, timeout=timeout, path=path) def cache_key(self) -> str: payload = ( self.server, tuple(sorted(self.headers.items())), self.timeout, ) return hashlib.sha1(repr(payload).encode("utf-8")).hexdigest() @dataclass class McpLocalConfig(BaseConfig): command: str args: List[str] = field(default_factory=list) cwd: str | None = None env: Dict[str, str] = field(default_factory=dict) inherit_env: bool = True startup_timeout: float = 10.0 wait_for_log: str | None = None FIELD_SPECS = { "command": ConfigFieldSpec( name="command", display_name="Launch Command", type_hint="str", required=True, description="Executable used to start the MCP stdio server (e.g. uvx)", ), "args": ConfigFieldSpec( name="args", display_name="Arguments", type_hint="list[str]", required=False, description="Command arguments, defaults to empty list", ), "cwd": ConfigFieldSpec( name="cwd", display_name="Working Directory", type_hint="str", required=False, description="Optional working directory for the launch command", advance=True, ), "env": ConfigFieldSpec( name="env", display_name="Environment Variables", type_hint="dict[str, str]", required=False, description="Additional environment variables for the process", advance=True, ), "inherit_env": ConfigFieldSpec( name="inherit_env", display_name="Inherit Parent Env", type_hint="bool", required=False, default=True, description="Whether to start from parent env before applying overrides", advance=True, ), "startup_timeout": ConfigFieldSpec( name="startup_timeout", display_name="Startup Timeout", type_hint="float", required=False, default=10.0, description="Seconds to wait for readiness logs", advance=True, ), "wait_for_log": ConfigFieldSpec( name="wait_for_log", display_name="Ready Log Pattern", type_hint="str", required=False, description="Regex that marks readiness when matched against stdout", advance=True, ), } @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "McpLocalConfig": mapping = require_mapping(data, path) command = require_str(mapping, "command", path) args_raw = ensure_list(mapping.get("args")) normalized_args: List[str] = [] for idx, arg in enumerate(args_raw): arg_path = extend_path(path, f"args[{idx}]") if not isinstance(arg, str): raise ConfigError("args entries must be strings", arg_path) normalized_args.append(arg) cwd = optional_str(mapping, "cwd", path) inherit_env = optional_bool(mapping, "inherit_env", path, default=True) if inherit_env is None: inherit_env = True env_mapping = mapping.get("env") if env_mapping is not None: if not isinstance(env_mapping, Mapping): raise ConfigError("env must be a mapping", extend_path(path, "env")) env = {str(k): str(v) for k, v in env_mapping.items()} else: env = {} timeout_value = mapping.get("startup_timeout", 10.0) if timeout_value is None: startup_timeout = 10.0 elif isinstance(timeout_value, (int, float)): startup_timeout = float(timeout_value) else: raise ConfigError("startup_timeout must be numeric", extend_path(path, "startup_timeout")) wait_for_log = optional_str(mapping, "wait_for_log", path) return cls( command=command, args=normalized_args, cwd=cwd, env=env, inherit_env=bool(inherit_env), startup_timeout=startup_timeout, wait_for_log=wait_for_log, path=path, ) def cache_key(self) -> str: payload = ( self.command, tuple(self.args), self.cwd or "", tuple(sorted(self.env.items())), self.inherit_env, self.startup_timeout, self.wait_for_log or "", ) return hashlib.sha1(repr(payload).encode("utf-8")).hexdigest() register_tooling_type( "function", config_cls=FunctionToolConfig, description="Use local Python functions", ) register_tooling_type( "mcp_remote", config_cls=McpRemoteConfig, description="Connect to an HTTP-based MCP server", ) register_tooling_type( "mcp_local", config_cls=McpLocalConfig, description="Launch and connect to a local stdio MCP server", ) @dataclass class ToolingConfig(BaseConfig): type: str config: BaseConfig | None = None prefix: str | None = None FIELD_SPECS = { "type": ConfigFieldSpec( name="type", display_name="Tool Type", type_hint="str", required=True, description="Select a tooling adapter registered via tooling_type_registry (function, mcp_remote, mcp_local, etc.).", ), "prefix": ConfigFieldSpec( name="prefix", display_name="Tool Prefix", type_hint="str", required=False, description="Optional prefix for all tools from this source to prevent name collisions (e.g. 'mcp1').", advance=True, ), "config": ConfigFieldSpec( name="config", display_name="Tool Configuration", type_hint="object", required=True, description="Configuration block validated by the chosen tool type (Python function list, MCP server settings, local command MCP launch, etc.).", ), } @classmethod def child_routes(cls) -> Dict[ChildKey, type[BaseConfig]]: return { ChildKey(field="config", value=name): config_cls for name, config_cls in iter_tooling_type_registrations().items() } @classmethod def from_dict(cls, data: Mapping[str, Any], *, path: str) -> "ToolingConfig": mapping = require_mapping(data, path) tooling_type = require_str(mapping, "type", path) try: config_cls = get_tooling_type_config(tooling_type) except RegistryError as exc: raise ConfigError( f"tooling.type must be one of {list(iter_tooling_type_registrations().keys())}", extend_path(path, "type"), ) from exc config_payload = mapping.get("config") if config_payload is None: raise ConfigError("tooling requires config block", extend_path(path, "config")) config_obj = config_cls.from_dict(config_payload, path=extend_path(path, "config")) prefix = optional_str(mapping, "prefix", path) return cls(type=tooling_type, config=config_obj, prefix=prefix, path=path) @classmethod def field_specs(cls) -> Dict[str, ConfigFieldSpec]: specs = super().field_specs() type_spec = specs.get("type") if type_spec: registrations = iter_tooling_type_registrations() metadata = iter_tooling_type_metadata() type_names = list(registrations.keys()) default_value = type_names[0] if type_names else None descriptions = {name: (metadata.get(name) or {}).get("summary") for name in type_names} specs["type"] = replace( type_spec, enum=type_names, default=default_value, enum_options=enum_options_from_values(type_names, descriptions), ) return specs
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/configs/node/tooling.py", "license": "Apache License 2.0", "lines": 536, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:entity/enum_options.py
"""Helper utilities for building EnumOption metadata.""" from enum import Enum from typing import Dict, List, Mapping, Sequence, Type, TypeVar from entity.configs.base import EnumOption from entity.enums import LogLevel, AgentExecFlowStage, AgentInputMode from utils.strs import titleize EnumT = TypeVar("EnumT", bound=Enum) _ENUM_DESCRIPTIONS: Dict[Type[Enum], Dict[Enum, str]] = { LogLevel: { LogLevel.DEBUG: "Verbose developer logging; useful when debugging graph behavior.", LogLevel.INFO: "High-level execution progress and key checkpoints.", LogLevel.WARNING: "Recoverable problems that require attention but do not stop the run.", LogLevel.ERROR: "Errors that abort the current node or edge execution, even the entire workflow.", LogLevel.CRITICAL: "Fatal issues that stop the workflow immediately.", }, AgentInputMode: { AgentInputMode.PROMPT: "Send a single string prompt assembled from previous messages.", AgentInputMode.MESSAGES: "Send structured role/content messages (Chat Completions style) which is recommended.", }, AgentExecFlowStage: { AgentExecFlowStage.PRE_GEN_THINKING_STAGE: "Pre-generation thinking / planning stage.", AgentExecFlowStage.GEN_STAGE: "Main generation stage; also covers tool calling.", AgentExecFlowStage.POST_GEN_THINKING_STAGE: "Reflection or verification after generation.", AgentExecFlowStage.FINISHED_STAGE: "Finalization stage for cleanup and summary.", }, } def enum_options_for(enum_cls: Type[EnumT]) -> List[EnumOption]: """Return EnumOption entries for a Python Enum class.""" descriptions = _ENUM_DESCRIPTIONS.get(enum_cls, {}) options: List[EnumOption] = [] for member in enum_cls: label = titleize(member.name) options.append(EnumOption(value=member.value, label=label, description=descriptions.get(member))) return options def enum_options_from_values( values: Sequence[str], descriptions: Mapping[str, str | None] | None = None, *, preserve_label_case: bool = False, ) -> List[EnumOption]: """Create EnumOption entries from literal string values.""" options: List[EnumOption] = [] desc_map = descriptions or {} for value in values: label = value if preserve_label_case else titleize(value) options.append(EnumOption(value=value, label=label, description=desc_map.get(value))) return options def describe_enums_map() -> Dict[str, Dict[str, str]]: """Return a serializable description map (mostly for tests/debugging).""" payload: Dict[str, Dict[str, str]] = {} for enum_cls, mapping in _ENUM_DESCRIPTIONS.items(): payload[enum_cls.__name__] = {member.value: text for member, text in mapping.items() if text} return payload __all__ = [ "enum_options_for", "enum_options_from_values", "describe_enums_map", ]
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/enum_options.py", "license": "Apache License 2.0", "lines": 58, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:entity/enums.py
from enum import Enum class AgentExecFlowStage(str, Enum): """Execution stages used to orchestrate agent workflows.""" # INPUT_STAGE = "input" PRE_GEN_THINKING_STAGE = "pre_gen_thinking" GEN_STAGE = "gen" # Includes tool calling plus the final response when applicable POST_GEN_THINKING_STAGE = "post_gen_thinking" FINISHED_STAGE = "finished" class LogLevel(str, Enum): DEBUG = "DEBUG" INFO = "INFO" WARNING = "WARNING" ERROR = "ERROR" CRITICAL = "CRITICAL" __level_values = { "DEBUG": 10, "INFO": 20, "WARNING": 30, "ERROR": 40, "CRITICAL": 50, } @property def level(self) -> int: return self.__level_values[self.value] def __lt__(self, other): if isinstance(other, LogLevel): return self.level < other.level return NotImplemented def __le__(self, other): if isinstance(other, LogLevel): return self.level <= other.level return NotImplemented def __gt__(self, other): if isinstance(other, LogLevel): return self.level > other.level return NotImplemented def __ge__(self, other): if isinstance(other, LogLevel): return self.level >= other.level return NotImplemented def __eq__(self, other): if isinstance(other, LogLevel): return self.level == other.level return super().__eq__(other) def __hash__(self): return super().__hash__() class EventType(str, Enum): NODE_START = "NODE_START" NODE_END = "NODE_END" EDGE_PROCESS = "EDGE_PROCESS" MODEL_CALL = "MODEL_CALL" TOOL_CALL = "TOOL_CALL" AGENT_CALL = "AGENT_CALL" HUMAN_INTERACTION = "HUMAN_INTERACTION" THINKING_PROCESS = "THINKING_PROCESS" MEMORY_OPERATION = "MEMORY_OPERATION" WORKFLOW_START = "WORKFLOW_START" WORKFLOW_END = "WORKFLOW_END" TEST = "TEST" class CallStage(str, Enum): BEFORE = "before" AFTER = "after" class AgentInputMode(str, Enum): """Controls how node inputs are fed into agent providers.""" PROMPT = "prompt" MESSAGES = "messages"
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/enums.py", "license": "Apache License 2.0", "lines": 66, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:entity/graph_config.py
"""GraphConfig wraps parsed graph definitions with runtime metadata.""" from dataclasses import dataclass, field from pathlib import Path from typing import Any, Dict, List, Optional from entity.enums import LogLevel from entity.configs import GraphDefinition, MemoryStoreConfig, Node, EdgeConfig @dataclass class GraphConfig: definition: GraphDefinition name: str output_root: Path log_level: LogLevel metadata: Dict[str, Any] = field(default_factory=dict) source_path: Optional[str] = None vars: Dict[str, Any] = field(default_factory=dict) @classmethod def from_dict( cls, config: Dict[str, Any], name: str, output_root: Path | str, *, source_path: str | None = None, vars: Dict[str, Any] | None = None, ) -> "GraphConfig": definition = GraphDefinition.from_dict(config, path="graph") return cls( definition=definition, name=name, output_root=Path(output_root) if output_root else Path("WareHouse"), log_level=definition.log_level, metadata={}, source_path=source_path, vars=dict(vars or {}), ) @classmethod def from_definition( cls, definition: GraphDefinition, name: str, output_root: Path | str, *, source_path: str | None = None, vars: Dict[str, Any] | None = None, ) -> "GraphConfig": return cls( definition=definition, name=name, output_root=Path(output_root) if output_root else Path("WareHouse"), log_level=definition.log_level, metadata={}, source_path=source_path, vars=dict(vars or {}), ) def get_node_definitions(self) -> List[Node]: return self.definition.nodes def get_edge_definitions(self) -> List[EdgeConfig]: return self.definition.edges def get_memory_config(self) -> List[MemoryStoreConfig] | None: return self.definition.memory def get_organization(self) -> str: return self.definition.organization or "DefaultOrg" def get_source_path(self) -> str: if self.source_path: return self.source_path return self.definition.id or "config.yaml" def get_initial_instruction(self) -> str: return self.definition.initial_instruction or "" @property def is_majority_voting(self) -> bool: return self.definition.is_majority_voting def to_dict(self) -> Dict[str, Any]: return { "name": self.name, "output_root": str(self.output_root), "log_level": self.log_level.value, "metadata": self.metadata, "graph": self.definition, "vars": self.vars, }
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/graph_config.py", "license": "Apache License 2.0", "lines": 80, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:entity/messages.py
"""Core message abstractions used across providers and executors.""" import copy from dataclasses import dataclass, field import json from enum import Enum from typing import Any, Dict, List, Optional, Union class MessageRole(str, Enum): """Unified message roles for internal conversations.""" SYSTEM = "system" USER = "user" ASSISTANT = "assistant" TOOL = "tool" class MessageBlockType(str, Enum): """Supported block types for multimodal message content.""" TEXT = "text" IMAGE = "image" AUDIO = "audio" VIDEO = "video" FILE = "file" DATA = "data" @classmethod def from_mime_type(cls, mime_type: str) -> "MessageBlockType": """Guess block type from MIME type.""" if not mime_type: return MessageBlockType.FILE if mime_type.startswith("image/"): return MessageBlockType.IMAGE if mime_type.startswith("audio/"): return MessageBlockType.AUDIO if mime_type.startswith("video/"): return MessageBlockType.VIDEO return MessageBlockType.FILE @dataclass class AttachmentRef: """Metadata for a payload stored locally or uploaded to a provider.""" attachment_id: str mime_type: Optional[str] = None name: Optional[str] = None size: Optional[int] = None sha256: Optional[str] = None local_path: Optional[str] = None remote_file_id: Optional[str] = None data_uri: Optional[str] = None metadata: Dict[str, Any] = field(default_factory=dict) def to_dict(self, include_data: bool = True) -> Dict[str, Any]: payload: Dict[str, Any] = { "attachment_id": self.attachment_id, "mime_type": self.mime_type, "name": self.name, "size": self.size, "sha256": self.sha256, "local_path": self.local_path, "remote_file_id": self.remote_file_id, "metadata": dict(self.metadata), } if include_data and self.data_uri: payload["data_uri"] = self.data_uri elif self.data_uri and not include_data: payload["data_uri"] = "[omitted]" # Remove keys that are None to keep payload compact return {key: value for key, value in payload.items() if value is not None and value != {}} @classmethod def from_dict(cls, data: Dict[str, Any]) -> "AttachmentRef": return cls( attachment_id=data.get("attachment_id", ""), mime_type=data.get("mime_type"), name=data.get("name"), size=data.get("size"), sha256=data.get("sha256"), local_path=data.get("local_path"), remote_file_id=data.get("remote_file_id"), data_uri=data.get("data_uri"), metadata=data.get("metadata") or {}, ) def copy(self) -> "AttachmentRef": return AttachmentRef( attachment_id=self.attachment_id, mime_type=self.mime_type, name=self.name, size=self.size, sha256=self.sha256, local_path=self.local_path, remote_file_id=self.remote_file_id, data_uri=self.data_uri, metadata=dict(self.metadata), ) @dataclass class MessageBlock: """Single block of multimodal content.""" type: MessageBlockType text: Optional[str] = None attachment: Optional[AttachmentRef] = None data: Dict[str, Any] = field(default_factory=dict) def to_dict(self, include_data: bool = True) -> Dict[str, Any]: payload: Dict[str, Any] = { "type": self.type.value, } if self.text is not None: payload["text"] = self.text if self.attachment: payload["attachment"] = self.attachment.to_dict(include_data=include_data) if self.data: payload["data"] = self.data return payload @classmethod def from_dict(cls, data: Dict[str, Any]) -> "MessageBlock": raw_type = data.get("type") or MessageBlockType.TEXT.value try: block_type = MessageBlockType(raw_type) except ValueError: block_type = MessageBlockType.DATA attachment_data = data.get("attachment") attachment = None if isinstance(attachment_data, dict): attachment = AttachmentRef.from_dict(attachment_data) return cls( type=block_type, text=data.get("text"), attachment=attachment, data=data.get("data") or {}, ) @classmethod def text_block(cls, text: str) -> "MessageBlock": return cls(type=MessageBlockType.TEXT, text=text) def describe(self) -> str: """Human-friendly summary for logging.""" if self.type is MessageBlockType.TEXT and self.text: return self.text if self.attachment: name = self.attachment.name or self.attachment.attachment_id return f"[{self.type.value} attachment: {name}]" if self.text: return self.text if "text" in self.data: return str(self.data["text"]) return f"[{self.type.value} block]" def copy(self) -> "MessageBlock": return MessageBlock( type=self.type, text=self.text, attachment=self.attachment.copy() if self.attachment else None, data=dict(self.data), ) @dataclass class ToolCallPayload: """Unified representation of a tool call request.""" id: str function_name: str arguments: str type: str = "function" metadata: Dict[str, Any] = field(default_factory=dict) def to_openai_dict(self) -> Dict[str, Any]: """Convert to OpenAI-compatible schema.""" return { "id": self.id, "type": self.type, "function": { "name": self.function_name, "arguments": self.arguments, }, } @dataclass class FunctionCallOutputEvent: """Structured event recorded when a tool execution finishes.""" call_id: str function_name: Optional[str] = None output_blocks: List[MessageBlock] = field(default_factory=list) output_text: Optional[str] = None metadata: Dict[str, Any] = field(default_factory=dict) @property def type(self) -> str: return "function_call_output" def to_dict(self, include_data: bool = True) -> Dict[str, Any]: payload: Dict[str, Any] = { "type": self.type, "call_id": self.call_id, } if self.function_name: payload["function_name"] = self.function_name if self.output_blocks: payload["output_blocks"] = [ block.to_dict(include_data=include_data) for block in self.output_blocks ] if self.output_text is not None: payload["output"] = self.output_text if self.metadata: payload["metadata"] = self.metadata return payload def has_blocks(self) -> bool: return bool(self.output_blocks) def describe(self) -> str: if self.output_text: return self.output_text if self.output_blocks: descriptions = [block.describe() for block in self.output_blocks] return "\n".join(filter(None, descriptions)) return "" MessageContent = Union[str, List[MessageBlock], List[Dict[str, Any]]] @dataclass class Message: """Unified message structure shared by executors and providers.""" role: MessageRole content: MessageContent name: Optional[str] = None tool_call_id: Optional[str] = None metadata: Dict[str, Any] = field(default_factory=dict) tool_calls: List[ToolCallPayload] = field(default_factory=list) keep: bool = False preserve_role: bool = False def with_content(self, content: MessageContent) -> "Message": """Return a shallow copy with updated content.""" return Message( role=self.role, content=content, name=self.name, tool_call_id=self.tool_call_id, metadata=dict(self.metadata), tool_calls=list(self.tool_calls), keep=self.keep, preserve_role=self.preserve_role, ) def with_role(self, role: MessageRole) -> "Message": """Return a shallow copy with updated role.""" return Message( role=role, content=self.content, name=self.name, tool_call_id=self.tool_call_id, metadata=dict(self.metadata), tool_calls=list(self.tool_calls), keep=self.keep, preserve_role=self.preserve_role, ) def text_content(self) -> str: """Best-effort string representation of the content.""" if self.content is None: return "" if isinstance(self.content, str): return self.content # Some providers (e.g., multimodal) return list content; join textual parts. parts = [] for block in self.blocks(): description = block.describe() if description: parts.append(description) return "\n".join(parts) def blocks(self) -> List[MessageBlock]: """Return content as a list of MessageBlock items.""" if self.content is None: return [] if isinstance(self.content, str): return [MessageBlock.text_block(self.content)] blocks: List[MessageBlock] = [] for block in self.content: if isinstance(block, MessageBlock): blocks.append(block) elif isinstance(block, dict): try: blocks.append(MessageBlock.from_dict(block)) except Exception: # Fallback to text representation of unexpected dicts text_value = block.get("text") if isinstance(block, dict) else None blocks.append(MessageBlock(MessageBlockType.DATA, text=text_value, data=block if isinstance(block, dict) else {})) return blocks def clone(self) -> "Message": """Deep copy of the message, preserving content blocks.""" return Message( role=self.role, content=_copy_content(self.content), name=self.name, tool_call_id=self.tool_call_id, metadata=dict(self.metadata), tool_calls=list(self.tool_calls), keep=self.keep, preserve_role=self.preserve_role, ) def to_dict(self, include_data: bool = True) -> Dict[str, Any]: """Return a JSON-serializable representation.""" payload = { "role": self.role.value, } if isinstance(self.content, list): payload["content"] = [ block.to_dict(include_data=include_data) if isinstance(block, MessageBlock) else block for block in self.content ] else: payload["content"] = self.content if self.name: payload["name"] = self.name if self.tool_call_id: payload["tool_call_id"] = self.tool_call_id if self.metadata: payload["metadata"] = self.metadata if self.tool_calls: payload["tool_calls"] = [call.to_openai_dict() for call in self.tool_calls] if self.keep: payload["keep"] = self.keep if self.preserve_role: payload["preserve_role"] = self.preserve_role return payload @classmethod def from_dict(cls, data: Dict[str, Any]) -> "Message": role_value = data.get("role") if not role_value: raise ValueError("message dict missing role") role = MessageRole(role_value) content = data.get("content") if isinstance(content, list): converted: List[MessageBlock] = [] for block in content: if isinstance(block, MessageBlock): converted.append(block) elif isinstance(block, dict): try: converted.append(MessageBlock.from_dict(block)) except Exception: # Preserve raw dict for debugging; text_content will stringify best-effort converted.append( MessageBlock( type=MessageBlockType.DATA, text=str(block), data=block, ) ) content = converted tool_calls_data = data.get("tool_calls") or [] tool_calls: List[ToolCallPayload] = [] for item in tool_calls_data: if not isinstance(item, dict): continue fn = item.get("function", {}) or {} metadata = item.get("metadata") or {} tool_calls.append( ToolCallPayload( id=item.get("id", ""), function_name=fn.get("name", ""), arguments=fn.get("arguments", ""), type=item.get("type", "function"), metadata=metadata, ) ) return cls( role=role, content=content, name=data.get("name"), tool_call_id=data.get("tool_call_id"), metadata=data.get("metadata") or {}, tool_calls=tool_calls, keep=bool(data.get("keep", False)), preserve_role=bool(data.get("preserve_role", False)), ) def serialize_messages(messages: List[Message], *, include_data: bool = True) -> str: """Serialize message list into JSON string.""" return json.dumps([msg.to_dict(include_data=include_data) for msg in messages], ensure_ascii=False) def deserialize_messages(payload: str) -> List[Message]: """Deserialize JSON string back to messages.""" if not payload: return [] raw = json.loads(payload) if not isinstance(raw, list): raise ValueError("message payload must be a list") return [Message.from_dict(item) for item in raw if isinstance(item, dict)] def _copy_content(content: MessageContent) -> MessageContent: if content is None: return None if isinstance(content, str): return content copied: List[Any] = [] for block in content: if isinstance(block, MessageBlock): copied.append(block.copy()) else: copied.append(copy.deepcopy(block)) return copied
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/messages.py", "license": "Apache License 2.0", "lines": 374, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:entity/tool_spec.py
"""Provider-agnostic tool specification dataclasses.""" from dataclasses import dataclass, field from typing import Any, Dict @dataclass class ToolSpec: """Generic representation of a callable tool.""" name: str description: str = "" parameters: Dict[str, Any] = field(default_factory=dict) metadata: Dict[str, Any] = field(default_factory=dict) def to_openai_dict(self) -> Dict[str, Any]: """Convert to OpenAI Responses API function schema.""" return { "type": "function", "name": self.name, "description": self.description, "parameters": self.parameters or {"type": "object", "properties": {}}, } def to_gemini_function(self) -> Dict[str, Any]: """Convert to Gemini FunctionDeclaration schema.""" return { "name": self.name, "description": self.description, "parameters": self.parameters or {"type": "object", "properties": {}}, }
{ "repo_id": "OpenBMB/ChatDev", "file_path": "entity/tool_spec.py", "license": "Apache License 2.0", "lines": 25, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:frontend/src/count_lines.py
import os def count_lines(root: str, extensions=None) -> int: """ Count total number of lines in files under `root` whose extension is in `extensions`. :param root: Root directory to walk (e.g., "."). :param extensions: Iterable of file extensions (including the dot), e.g. [".py", ".js"]. :return: Total number of lines across all matching files. """ if extensions is None: # Common source-code extensions used in this project extensions = {".py", ".js", ".jsx", ".ts", ".tsx", ".vue", ".css", ".scss", ".md"} total_lines = 0 for dirpath, dirnames, filenames in os.walk(root): # Skip typical build/output folders if present under src dirnames[:] = [d for d in dirnames if d not in {".git", "node_modules", "dist", "__pycache__"}] for filename in filenames: _, ext = os.path.splitext(filename) if ext.lower() not in extensions: continue file_path = os.path.join(dirpath, filename) try: with open(file_path, "r", encoding="utf-8", errors="ignore") as f: for _ in f: total_lines += 1 except (IOError, OSError): # If a file can't be read, skip it but continue counting others continue return total_lines def main(): root = "." total = count_lines(root) print(f"Total lines of code under '{os.path.abspath(root)}': {total}") if __name__ == "__main__": main()
{ "repo_id": "OpenBMB/ChatDev", "file_path": "frontend/src/count_lines.py", "license": "Apache License 2.0", "lines": 34, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:functions/edge/conditions.py
"""Edge condition helpers used by workflow YAML definitions.""" import re def contains_keyword(data: str) -> bool: """Check if data contains the keyword 'trigger'.""" return "trigger" in data.lower() def length_greater_than_5(data: str) -> bool: """Check if data length is greater than 5.""" return len(data) > 5 def always_false(data: str) -> bool: """Always return false for testing.""" return False def not_contains_keyword(data: str) -> bool: """Check if data contains the keyword 'trigger'.""" return "trigger" not in data.lower() def code_pass(data: str) -> bool: return "==CODE EXECUTION FAILED==" not in data def code_fail(data: str) -> bool: return "==CODE EXECUTION FAILED==" in data def _extract_verdict(data: str) -> str | None: """Parse `Verdict: CONTINUE|STOP` markers from agent outputs.""" match = re.search(r"verdict\s*:\s*(\w+)", data, re.IGNORECASE) if not match: return None return match.group(1).upper() def need_reflection_loop(data: str) -> bool: """Return True when the Reasoner asks for another Reflexion iteration.""" verdict = _extract_verdict(data) if verdict is None: return True # Default to continuing until STOP is explicit return verdict not in {"STOP", "DONE"} def should_stop_loop(data: str) -> bool: """Return True when the Reasoner signals the Reflexion loop can stop.""" verdict = _extract_verdict(data) if verdict is None: return False return verdict in {"STOP", "DONE"}
{ "repo_id": "OpenBMB/ChatDev", "file_path": "functions/edge/conditions.py", "license": "Apache License 2.0", "lines": 36, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:functions/edge_processor/transformers.py
from typing import Dict, Any, Tuple import os import re import shutil import signal import subprocess import time from pathlib import Path from functions.function_calling.file import FileToolContext def uppercase_payload(data: str, _context: Dict[str, Any]) -> str: """Return an uppercase copy of the payload text.""" return (data or "").upper() def code_save_and_run(data: str, _context: Dict[str, Any]) -> str: """ Clears the workspace, saves code from the payload, runs main.py, and returns code + result. The payload format is expected to be: FILENAME ```python CODE ``` Repeated for multiple files. """ # Parse matches first pattern = re.compile(r"(?P<filename>[^\n]+)\n```(?:python)?\n(?P<code>.*?)\n```", re.DOTALL) matches = list(pattern.finditer(data)) if not matches: return data ctx = FileToolContext(_context) workspace_root = ctx.workspace_root # 1. Clear workspace if workspace_root.exists(): for item in workspace_root.iterdir(): if item.name == "attachments": continue if item.is_dir(): shutil.rmtree(item) else: item.unlink() saved_code_blocks = [] for match in matches: filename = match.group("filename").strip() code = match.group("code") # Save to file file_path = workspace_root / filename # Ensure parent dirs exist if filename contains path separators file_path.parent.mkdir(parents=True, exist_ok=True) file_path.write_text(code, encoding="utf-8") saved_code_blocks.append(f"{filename}\n```python\n{code}\n```") cleaned_code_str = "\n".join(saved_code_blocks) # 3. Execute main.py success_info = "The software run successfully without errors." execution_result = "" try: env = os.environ.copy() env["PYTHONUNBUFFERED"] = "1" if os.name == 'nt': command = f"cd {workspace_root} && dir && uv run main.py" process = subprocess.Popen( command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP, env=env ) else: command = "uv run main.py" process = subprocess.Popen( command, shell=True, cwd=str(workspace_root), preexec_fn=os.setsid, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env ) try: stdout, stderr = process.communicate(timeout=3) except subprocess.TimeoutExpired: if "killpg" in dir(os): os.killpg(os.getpgid(process.pid), signal.SIGTERM) else: os.kill(process.pid, signal.SIGTERM) if process.poll() is None and hasattr(signal, 'CTRL_BREAK_EVENT'): os.kill(process.pid, signal.CTRL_BREAK_EVENT) stdout, stderr = process.communicate() stdout_str = stdout.decode('utf-8', errors='ignore') if stdout else "" stderr_str = stderr.decode('utf-8', errors='ignore') if stderr else "" # Filter out uv experimental warning uv_warning_pattern = "warning: The `extra-build-dependencies` option is experimental and may change without warning." if uv_warning_pattern in stderr_str: stderr_str = "\n".join([line for line in stderr_str.splitlines() if uv_warning_pattern not in line]) return_code = process.returncode # Treat as success if return code is 0 OR if there are no errors in stderr # (This covers the case where the process times out/is killed but didn't crash) if return_code == 0 or not stderr_str.strip(): parts = [] if stdout_str.strip(): parts.append(stdout_str.strip()) parts.append(success_info) execution_result = "\n\n".join(parts) else: if "Traceback".lower() in stderr_str.lower(): execution_result = stderr_str.replace((workspace_root / "").__str__(), "") else: execution_result = stderr_str except Exception as e: execution_result = f"Error: {e}" return cleaned_code_str + "\n\n" + execution_result
{ "repo_id": "OpenBMB/ChatDev", "file_path": "functions/edge_processor/transformers.py", "license": "Apache License 2.0", "lines": 109, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:functions/function_calling/code_executor.py
def execute_code(code: str, time_out: int = 60) -> str: """ Execute code and return std outputs and std error. Args: code (str): Code to execute. time_out (int): time out, in second. Returns: str: std output and std error """ import os import sys import subprocess import uuid from pathlib import Path def __write_script_file(_code: str): _workspace = Path(os.getenv('TEMP_CODE_DIR', 'temp')) _workspace.mkdir(exist_ok=True) filename = f"{uuid.uuid4()}.py" code_path = _workspace / filename code_content = _code if _code.endswith("\n") else _code + "\n" code_path.write_text(code_content, encoding="utf-8") return code_path def __default_interpreter() -> str: return sys.executable or "python3" script_path = None stdout = "" stderr = "" try: script_path = __write_script_file(code) workspace = script_path.parent cmd = [__default_interpreter(), str(script_path)] try: completed = subprocess.run( cmd, cwd=str(workspace), capture_output=True, timeout=time_out, check=False ) stdout = completed.stdout.decode('utf-8', errors="replace") stderr = completed.stderr.decode('utf-8', errors="replace") except subprocess.TimeoutExpired as e: stdout = e.stdout.decode('utf-8', errors="replace") if e.stdout else "" stderr = e.stderr.decode('utf-8', errors="replace") if e.stderr else "" stderr += f"\nError: Execution timed out after {time_out} seconds." except Exception as e: stderr = f"Execution error: {str(e)}" except Exception as e: stderr = f"Setup error: {str(e)}" finally: if script_path and script_path.exists(): try: os.remove(script_path) except Exception: pass return stdout + stderr
{ "repo_id": "OpenBMB/ChatDev", "file_path": "functions/function_calling/code_executor.py", "license": "Apache License 2.0", "lines": 56, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:functions/function_calling/deep_research.py
"""Deep research tools for search results and report management.""" import json import re from pathlib import Path from typing import Annotated, Any, Dict, List, Optional, Tuple from filelock import FileLock from entity.messages import MessageBlock, MessageBlockType from functions.function_calling.file import FileToolContext from utils.function_catalog import ParamMeta # Constants for file paths (relative to workspace root) SEARCH_RESULTS_FILE = "deep_research/search_results.json" SEARCH_LOCK_FILE = "deep_research/search_results.lock" REPORT_FILE = "deep_research/report.md" REPORT_LOCK_FILE = "deep_research/report.lock" def _get_files(ctx: FileToolContext) -> Tuple[Path, Path]: search_file = ctx.resolve_under_workspace(SEARCH_RESULTS_FILE) report_file = ctx.resolve_under_workspace(REPORT_FILE) return search_file, report_file def _get_locks(ctx: FileToolContext) -> Tuple[Path, Path]: search_lock = ctx.resolve_under_workspace(SEARCH_LOCK_FILE) report_lock = ctx.resolve_under_workspace(REPORT_LOCK_FILE) return search_lock, report_lock def _load_search_results(file_path: Path) -> Dict[str, Any]: if not file_path.exists(): return {} try: return json.loads(file_path.read_text(encoding="utf-8")) except json.JSONDecodeError: return {} def _save_search_results(file_path: Path, data: Dict[str, Any]) -> None: file_path.parent.mkdir(parents=True, exist_ok=True) file_path.write_text(json.dumps(data, indent=2, ensure_ascii=False), encoding="utf-8") def _format_search_result(url: str, data: Dict[str, Any], concise: bool) -> str: keys = data.get("highlight_keys", []) highlight_str = f" [IMPORTANT MATCHES: {', '.join(keys)}]" if keys else "" if concise: return ( f"URL: {url}{highlight_str}\n" f"Title: {data.get('title', '')}\n" f"Abstract: {data.get('abs', '')}\n" f"{'-' * 40}" ) else: return ( f"URL: {url}{highlight_str}\n" f"Title: {data.get('title', '')}\n" f"Abstract: {data.get('abs', '')}\n" f"Detail: {data.get('detail', '')}\n" f"{'-' * 40}" ) def search_save_result( url: Annotated[str, ParamMeta(description="URL of the search result (used as key)")], title: Annotated[str, ParamMeta(description="Title of the search result")], abs: Annotated[str, ParamMeta(description="Abstract/Summary of the content")], detail: Annotated[str, ParamMeta(description="Detailed content")], _context: Dict[str, Any] | None = None, ) -> str: """ Save or update a search result. """ ctx = FileToolContext(_context) search_file, _ = _get_files(ctx) search_lock, _ = _get_locks(ctx) with FileLock(search_lock): data = _load_search_results(search_file) current = data.get(url, {}) # Preserve existing keys if updating highlight_keys = current.get("highlight_keys", []) data[url] = { "title": title, "abs": abs, "detail": detail, "highlight_keys": highlight_keys, } _save_search_results(search_file, data) return f"Saved result for {url}" def search_load_all( # concise: Annotated[bool, ParamMeta(description="If True, only show concise information")], _context: Dict[str, Any] | None = None, ) -> str: """ Load all saved search results. """ ctx = FileToolContext(_context) search_file, _ = _get_files(ctx) search_lock, _ = _get_locks(ctx) with FileLock(search_lock): data = _load_search_results(search_file) if not data: return "No search results found." results = [] for url, content in data.items(): results.append(_format_search_result(url, content, concise=True)) return "\n\n".join(results) def search_load_by_url( url: Annotated[str, ParamMeta(description="URL to retrieve")], _context: Dict[str, Any] | None = None, ) -> str: """ Load a specific search result by URL. """ ctx = FileToolContext(_context) search_file, _ = _get_files(ctx) search_lock, _ = _get_locks(ctx) with FileLock(search_lock): data = _load_search_results(search_file) if url not in data: return f"No result found for {url}" return _format_search_result(url, data[url], concise=False) def search_high_light_key( url: Annotated[str, ParamMeta(description="URL to highlight keys for")], keys: Annotated[List[str], ParamMeta(description="List of keys/terms to highlight")], _context: Dict[str, Any] | None = None, ) -> str: """ Save highlighted keys for a specific search result. """ ctx = FileToolContext(_context) search_file, _ = _get_files(ctx) search_lock, _ = _get_locks(ctx) with FileLock(search_lock): data = _load_search_results(search_file) if url not in data: return f"URL {url} not found in results. Please save it first." current_keys = set(data[url].get("highlight_keys", [])) current_keys.update(keys) data[url]["highlight_keys"] = list(current_keys) _save_search_results(search_file, data) return f"Updated highlights for {url}: {list(current_keys)}" # Report Helpers def _read_report_lines(file_path: Path) -> List[str]: if not file_path.exists(): return [] return file_path.read_text(encoding="utf-8").splitlines() def _save_report(file_path: Path, lines: List[str]) -> None: file_path.parent.mkdir(parents=True, exist_ok=True) # Ensure final newline content = "\n".join(lines) if content and not content.endswith("\n"): content += "\n" file_path.write_text(content, encoding="utf-8") def _parse_header(line: str) -> Tuple[int, str]: """Returns (level, title) if line is a header, else (0, "").""" match = re.match(r"^(#+)\s+(.+)$", line) if match: return len(match.group(1)), match.group(2).strip() return 0, "" def _find_chapter_range(lines: List[str], title_path: str) -> Tuple[int, int]: """ Find the start and end indices (inclusive, exclusive) of a chapter. title_path is like "Chapter 1/Section 2" """ titles = [t.strip() for t in title_path.split("/")] current_level_idx = 0 start_idx = -1 # We need to find the sequence of headers search_start = 0 for i, target_title in enumerate(titles): found = False for idx in range(search_start, len(lines)): level, text = _parse_header(lines[idx]) if level > 0 and text == target_title: # Found the current segment search_start = idx + 1 found = True if i == len(titles) - 1: start_idx = idx current_level_idx = level break if not found: return -1, -1 if start_idx == -1: return -1, -1 # Find end: next header of same or lower level (higher importance, smaller integer) end_idx = len(lines) for idx in range(start_idx + 1, len(lines)): level, _ = _parse_header(lines[idx]) if level > 0 and level <= current_level_idx: end_idx = idx break return start_idx, end_idx def report_read( _context: Dict[str, Any] | None = None, ) -> str: """ Read the current content of the report. """ ctx = FileToolContext(_context) _, report_file = _get_files(ctx) _, report_lock = _get_locks(ctx) with FileLock(report_lock): if not report_file.exists(): return "Report is empty." return report_file.read_text(encoding="utf-8") def report_read_chapter( title: Annotated[str, ParamMeta(description="Chapter title to read (supports multi-level index e.g. 'Intro/Background')")], _context: Dict[str, Any] | None = None, ) -> str: """ Read the content of a specific chapter. """ ctx = FileToolContext(_context) _, report_file = _get_files(ctx) _, report_lock = _get_locks(ctx) with FileLock(report_lock): lines = _read_report_lines(report_file) start, end = _find_chapter_range(lines, title) if start == -1: return f"Chapter '{title}' not found." # Return content (excluding header) # start is the header line, so start+1 return "\\n".join(lines[start+1:end]) def report_outline( _context: Dict[str, Any] | None = None, ) -> str: """ Get the outline of the report (headers). """ ctx = FileToolContext(_context) _, report_file = _get_files(ctx) _, report_lock = _get_locks(ctx) with FileLock(report_lock): lines = _read_report_lines(report_file) outline = [] for line in lines: level, title = _parse_header(line) if level > 0: outline.append(f"{'#' * level} {title}") if not outline: return "No headers found in report." return "\n".join(outline) def report_create_chapter( title: Annotated[str, ParamMeta(description="Chapter title (supports 'Parent/NewChild' to insert into existing). Use '|' to specify insertion point e.g. 'Prev|New' to insert after 'Prev', or '|New' to insert at start.")], level: Annotated[int, ParamMeta(description="Header level (1-6)")], content: Annotated[str, ParamMeta(description="Content of the chapter")], _context: Dict[str, Any] | None = None, ) -> str: """ Create a new chapter in the report. """ ctx = FileToolContext(_context) _, report_file = _get_files(ctx) _, report_lock = _get_locks(ctx) with FileLock(report_lock): lines = _read_report_lines(report_file) # Check for routing path parent_path = None display_title = title p_start, p_end = -1, len(lines) if "/" in title: # Handle recursive "Parent/Child" structure, where Child might contain "|" parent_path, new_title = title.rsplit("/", 1) p_start, p_end = _find_chapter_range(lines, parent_path) if p_start == -1: return f"Parent chapter '{parent_path}' not found. Cannot create '{new_title}' inside it." display_title = new_title # Check for "|" syntax in the leaf title insert_after_target = None # None means append, "" means start, "str" means after that chapter if "|" in display_title: target, real_title = display_title.split("|", 1) display_title = real_title insert_after_target = target # Determine insertion index insert_idx = -1 if insert_after_target is not None: if insert_after_target == "": # Insert at the beginning of the context if parent_path: # Inside parent: Insert after parent header (and its intro text), before first subchapter insert_idx = p_end # Default to appending if no subchapters found # Scan for first header inside parent for idx in range(p_start + 1, len(lines)): if idx >= p_end: break lvl, _ = _parse_header(lines[idx]) if lvl > 0: insert_idx = idx break else: # Top level: Insert at start of file insert_idx = 0 else: # Insert after the specified chapter # If we are inside a parent, the target must be relative to the parent? # The user requirement says "Prev|New". # If inside "Parent", "Prev" should be a sibling inside "Parent". search_target = insert_after_target if parent_path: # Construct full path for search if we are scoped search_target = f"{parent_path}/{insert_after_target}" a_start, a_end = _find_chapter_range(lines, search_target) if a_start == -1: return f"Target chapter '{search_target}' not found." insert_idx = a_end else: # Default: Append to parent context or file end insert_idx = p_end if parent_path else len(lines) header = f"{'#' * level} {display_title}" new_section = [header] + content.splitlines() + [""] # Insert lines[insert_idx:insert_idx] = new_section _save_report(report_file, lines) final_path = f"{parent_path}/{display_title}" if parent_path else display_title return f"Created chapter '{final_path}' at level {level}" def report_rewrite_chapter( title: Annotated[str, ParamMeta(description="Chapter title to rewrite (supports multi-level index e.g. 'Intro/Background')")], content: Annotated[str, ParamMeta(description="New content")], _context: Dict[str, Any] | None = None, ) -> str: """ Rewrite the content of an existing chapter. """ ctx = FileToolContext(_context) _, report_file = _get_files(ctx) _, report_lock = _get_locks(ctx) with FileLock(report_lock): lines = _read_report_lines(report_file) start, end = _find_chapter_range(lines, title) if start == -1: return f"Chapter '{title}' not found." # Keep the header, replace the body # new body should not contain the header itself, just the content new_body = [lines[start]] + content.splitlines() + [""] # Replace slice lines[start:end] = new_body _save_report(report_file, lines) return f"Rewrote chapter '{title}'" def report_continue_chapter( title: Annotated[str, ParamMeta(description="Chapter title to append to (supports multi-level index e.g. 'Intro/Background')")], content: Annotated[str, ParamMeta(description="Content to append")], _context: Dict[str, Any] | None = None, ) -> str: """ Append content to an existing chapter. """ ctx = FileToolContext(_context) _, report_file = _get_files(ctx) _, report_lock = _get_locks(ctx) with FileLock(report_lock): lines = _read_report_lines(report_file) start, end = _find_chapter_range(lines, title) if start == -1: return f"Chapter '{title}' not found." # Append content before 'end' (which is the start of next section or end of file) new_lines = content.splitlines() + [""] lines[end:end] = new_lines _save_report(report_file, lines) return f"Appended content to chapter '{title}'" def report_reorder_chapters( new_order: Annotated[List[str], ParamMeta(description="List of chapter titles in the new desired order")], _context: Dict[str, Any] | None = None, ) -> str: """ Reorder chapters in the report. This swaps the positions of the specified chapters, preserving their content and valid text between them. All specified chapters must exist and must not overlap (e.g. cannot reorder a parent and its child). """ ctx = FileToolContext(_context) _, report_file = _get_files(ctx) _, report_lock = _get_locks(ctx) with FileLock(report_lock): lines = _read_report_lines(report_file) # 1. Find all ranges chapters = [] # (index in new_order, title, start, end) for i, title in enumerate(new_order): s, e = _find_chapter_range(lines, title) if s == -1: return f"Chapter '{title}' not found." chapters.append({ "target_order_idx": i, "title": title, "content": lines[s:e], "start": s, "end": e }) # 2. Sort by original position in file to identify slots chapters_sorted_by_pos = sorted(chapters, key=lambda x: x["start"]) # 3. Validation: Check for overlaps for i in range(len(chapters_sorted_by_pos) - 1): curr = chapters_sorted_by_pos[i] next_ch = chapters_sorted_by_pos[i+1] if curr["end"] > next_ch["start"]: return f"Chapters '{curr['title']}' and '{next_ch['title']}' overlap. Cannot reorder nested or overlapping chapters." # 4. Construct new line list result_lines = [] current_idx = 0 for k, original_slot_holder in enumerate(chapters_sorted_by_pos): # Append text before this slot result_lines.extend(lines[current_idx : original_slot_holder["start"]]) # Append the content of the chapter that belongs in this k-th slot # The slot sequence corresponds to the input list order desired_chapter = chapters[k] result_lines.extend(desired_chapter["content"]) current_idx = original_slot_holder["end"] # Append remaining file content result_lines.extend(lines[current_idx:]) _save_report(report_file, result_lines) return "Reordered chapters successfully." def report_del_chapter( title: Annotated[str, ParamMeta(description="Chapter title to delete (supports multi-level index e.g. 'Intro/Background')")], _context: Dict[str, Any] | None = None, ) -> str: """ Delete a chapter and its content. """ ctx = FileToolContext(_context) _, report_file = _get_files(ctx) _, report_lock = _get_locks(ctx) with FileLock(report_lock): lines = _read_report_lines(report_file) start, end = _find_chapter_range(lines, title) if start == -1: return f"Chapter '{title}' not found." del lines[start:end] _save_report(report_file, lines) return f"Deleted chapter '{title}'" def report_export_pdf( _context: Dict[str, Any] | None = None, ) -> List[MessageBlock]: """ Export the report to PDF. """ ctx = FileToolContext(_context) _, report_file = _get_files(ctx) _, report_lock = _get_locks(ctx) with FileLock(report_lock): if not report_file.exists(): raise FileNotFoundError("Report file does not exist.") text = report_file.read_text(encoding="utf-8") text = re.sub(r"([^\n])\n(#{1,6}\s)", r"\1\n\n\2", text) text = re.sub(r"(?m)^(?!\s*(?:[*+-]|\d+\.)\s)(.+)\n(\s*(?:[*+-]|\d+\.)\s)", r"\1\n\n\2", text) try: import markdown from xhtml2pdf import pisa except ImportError: raise ImportError( "Error: strict dependencies 'markdown' and 'xhtml2pdf' are missing." ) pdf_file = report_file.with_suffix(".pdf") # Convert to HTML extensions = ["extra", "codehilite", "nl2br", "tables"] html_content = markdown.markdown(text, extensions=extensions) styled_html = f""" <html> <head> <style> @page {{ size: A4; margin: 2cm; }} body {{ font-family: sans-serif; line-height: 1.6; font-size: 10pt; word-wrap: break-word; word-break: break-all; }} h1, h2, h3 {{ color: #2c3e50; margin-top: 25px; /* Add spacing above the title */ margin-bottom: 15px; border-bottom: 1px solid #eee; /* Add an underline to the main title for clarity */ padding-bottom: 5px; }} /* --- Table style fixes --- */ table {{ width: 100%; border-collapse: collapse; margin-bottom: 20px; border: 1px solid #ddd; }} th, td {{ border: 1px solid #ddd; /* Explicitly add borders */ padding: 8px; text-align: left; vertical-align: top; }} th {{ background-color: #f2f2f2; font-weight: bold; color: #333; }} /* ------------------ */ code {{ background-color: #f4f4f4; padding: 2px 5px; border-radius: 3px; font-family: monospace; }} pre {{ background-color: #f4f4f4; padding: 10px; border-radius: 5px; overflow-x: auto; white-space: pre-wrap; }} ul, ol {{ margin-top: 8px; margin-bottom: 8px; padding-left: 20px; }} li {{ margin-bottom: 4px; }} blockquote {{ border-left: 4px solid #ccc; padding-left: 10px; color: #666; margin: 10px 0; }} </style> </head> <body> {html_content} </body> </html> """ # Convert to PDF try: with open(pdf_file, "wb") as f: pisa_status = pisa.CreatePDF(styled_html, dest=f) if pisa_status.err: raise RuntimeError("Failed to generate PDF: xhtml2pdf error") except Exception as e: raise RuntimeError(f"Failed to generate PDF: {e}") record = ctx.attachment_store.register_file( pdf_file, kind=MessageBlockType.FILE, display_name=pdf_file.name, mime_type="application/pdf", copy_file=False, persist=False, deduplicate=True, extra={ "source": "generated_report", "workspace_path": str(pdf_file), }, ) return [record.as_message_block()]
{ "repo_id": "OpenBMB/ChatDev", "file_path": "functions/function_calling/deep_research.py", "license": "Apache License 2.0", "lines": 517, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:functions/function_calling/file.py
"""File-related function tools for model-invoked file access.""" import fnmatch import locale import mimetypes import os import re import shutil from dataclasses import dataclass from pathlib import Path from typing import ( Annotated, Any, Dict, Iterable, List, Literal, Mapping, MutableSequence, Optional, Sequence, ) from entity.messages import MessageBlock, MessageBlockType from utils.attachments import AttachmentStore from utils.workspace_scanner import iter_workspace_entries from utils.function_catalog import ParamMeta class FileToolContext: """Helper to read runtime context injected via `_context` kwarg.""" def __init__(self, ctx: Dict[str, Any] | None): if ctx is None: raise ValueError("_context is required for file tools") self._ctx = ctx self.attachment_store = self._require_store(ctx.get("attachment_store")) self.workspace_root = self._require_workspace(ctx.get("python_workspace_root")) self.session_root = self._require_session_root(ctx.get("graph_directory"), self.workspace_root) @staticmethod def _require_store(store: Any) -> AttachmentStore: if not isinstance(store, AttachmentStore): raise ValueError("attachment_store missing from _context") return store @staticmethod def _require_workspace(root: Any) -> Path: if root is None: raise ValueError("python_workspace_root missing from _context") path = Path(root).resolve() path.mkdir(parents=True, exist_ok=True) return path @staticmethod def _require_session_root(root: Any, workspace_root: Path) -> Path: base = root or workspace_root.parent path = Path(base).resolve() path.mkdir(parents=True, exist_ok=True) return path def resolve_under_workspace(self, relative_path: str | Path) -> Path: rel = Path(relative_path) target = rel.resolve() if rel.is_absolute() else (self.workspace_root / rel).resolve() if self.workspace_root not in target.parents and target != self.workspace_root: raise ValueError("Path is outside workspace") return target def resolve_under_session(self, relative_path: str | Path) -> Path: raw = Path(relative_path) candidates = [] if raw.is_absolute(): candidates.append(raw.resolve()) else: candidates.append((self.session_root / raw).resolve()) candidates.append(raw.resolve()) for target in candidates: if self.session_root in target.parents or target == self.session_root: return target raise ValueError("Path is outside session directory") def to_session_relative(self, absolute_path: str | Path | None) -> Optional[str]: if not absolute_path: return None target = Path(absolute_path).resolve() if self.session_root in target.parents or target == self.session_root: return target.relative_to(self.session_root).as_posix() return str(target) def to_workspace_relative(self, absolute_path: str | Path | None) -> Optional[str]: if not absolute_path: return None target = Path(absolute_path).resolve() if self.workspace_root in target.parents or target == self.workspace_root: rel = target.relative_to(self.workspace_root) return rel.as_posix() or "." return None def _check_attachments_not_modified(path: str) -> None: if path.startswith("attachments"): raise ValueError("Modifications to the attachments directory are not allowed") def describe_available_files( *, recursive: bool = True, limit: int = 200, include_hidden: bool = False, # max_depth: int = 5, _context: Dict[str, Any] | None = None, ) -> Dict[str, Any]: """ List accessible files from the attachment store and the current code_workspace. """ max_depth = 8 ctx = FileToolContext(_context) entries: List[Dict[str, Any]] = [] total_limit = max(1, limit) # # Attachment store (user uploads or files registered via load_file) # for attachment_id, record in ctx.attachment_store.list_records().items(): # ref = record.ref # workspace_path = ctx.to_workspace_relative(ref.local_path) # session_path = ctx.to_session_relative(ref.local_path) # display_path = workspace_path or session_path or ref.local_path # entries.append( # { # "id": attachment_id, # "name": ref.name, # "source": record.extra.get("source") if record.extra else "attachment", # "mime": ref.mime_type, # "size": ref.size, # "type": "file", # "path": display_path, # } # ) # if len(entries) >= total_limit: # return {"files": entries} # Workspace files (includes attachments directory because it sits inside workspace) for entry in iter_workspace_entries( ctx.workspace_root, recursive=recursive, max_depth=max_depth, include_hidden=include_hidden, ): if len(entries) >= total_limit: break abs_path = (ctx.workspace_root / entry.path).resolve() workspace_path = Path(entry.path) # session_path = ctx.to_session_relative(abs_path) entries.append( { "id": entry.path, "name": Path(entry.path).name, "source": "workspace", "path": workspace_path, "absolute_path": abs_path, "type": entry.type, "size": entry.size, "depth": entry.depth, } ) return {"files": entries[:total_limit]} def list_directory( path: Annotated[str, ParamMeta(description="Workspace-relative directory path")]=".", *, recursive: Annotated[bool, ParamMeta(description="Traverse subdirectories")] = False, max_depth: Annotated[int, ParamMeta(description="Maximum depth when recursive=True")] = 3, include_hidden: Annotated[bool, ParamMeta(description="Include entries starting with '.'")] = False, limit: Annotated[int, ParamMeta(description="Maximum entries to return")] = 500, _context: Dict[str, Any] | None = None, ) -> Dict[str, Any]: """List contents of a workspace-relative directory.""" ctx = FileToolContext(_context) target = ctx.resolve_under_workspace(path) if not target.exists(): raise FileNotFoundError(f"Directory not found: {path}") if not target.is_dir(): raise NotADirectoryError(f"Path is not a directory: {path}") if limit <= 0: raise ValueError("limit must be positive") if recursive and max_depth < 1: raise ValueError("max_depth must be >= 1 when recursive") entries: List[Dict[str, Any]] = [] stack: List[tuple[Path, int]] = [(target, 0)] base_relative = ctx.to_workspace_relative(target) or "." while stack and len(entries) < limit: current, depth = stack.pop() try: children = sorted(current.iterdir(), key=lambda p: p.name.lower()) except (FileNotFoundError, PermissionError): continue for child in children: rel = child.relative_to(target) if not include_hidden and _path_is_hidden(rel): continue stat_size = None modified = None try: stat = child.stat() modified = stat.st_mtime if child.is_file(): stat_size = stat.st_size except (FileNotFoundError, PermissionError, OSError): pass entry = { "name": child.name, "relative_path": rel.as_posix(), "absolute_path": str(child), "type": "directory" if child.is_dir() else "file", "size": stat_size, "modified_ts": modified, "depth": depth, } entries.append(entry) if len(entries) >= limit: break if recursive and child.is_dir() and depth + 1 < max_depth: stack.append((child, depth + 1)) return { "directory": base_relative, "entries": entries[:limit], "truncated": len(entries) >= limit, "recursive": recursive, } def create_folder( path: Annotated[str, ParamMeta(description="Workspace-relative folder path")], *, parents: Annotated[bool, ParamMeta(description="Create missing parent directories")] = True, exist_ok: Annotated[bool, ParamMeta(description="Do not raise if folder already exists")] = True, _context: Dict[str, Any] | None = None, ) -> Dict[str, Any]: """Create a directory tree under the workspace.""" if not path: raise ValueError("path must be provided") _check_attachments_not_modified(path) ctx = FileToolContext(_context) target = ctx.resolve_under_workspace(path) if target.exists() and not target.is_dir(): raise ValueError("Target exists and is not a directory") previously_exists = target.exists() target.mkdir(parents=parents, exist_ok=exist_ok) return { "path": ctx.to_workspace_relative(target), "absolute_path": str(target), "created": not previously_exists, } def delete_path( path: Annotated[str, ParamMeta(description="Workspace-relative file or folder path")], *, recursive: Annotated[ bool, ParamMeta(description="Allow deleting non-empty directories recursively"), ] = False, missing_ok: Annotated[bool, ParamMeta(description="Suppress error if path is missing")] = False, _context: Dict[str, Any] | None = None, ) -> Dict[str, Any]: """Delete a workspace file or directory.""" if not path: raise ValueError("path must be provided") _check_attachments_not_modified(path) ctx = FileToolContext(_context) target = ctx.resolve_under_workspace(path) if not target.exists(): if missing_ok: return { "path": ctx.to_workspace_relative(target), "absolute_path": str(target), "deleted": False, "reason": "missing", } raise FileNotFoundError(f"Path not found: {path}") if target.is_dir(): if not recursive: raise IsADirectoryError("Set recursive=True to delete directories") shutil.rmtree(target) deleted_type = "directory" else: target.unlink() deleted_type = "file" return { "path": ctx.to_workspace_relative(target), "absolute_path": str(target), "deleted": True, "type": deleted_type, } def load_file( path_or_id: str, *, # mime_override: Optional[str] = None, _context: Dict[str, Any] | None = None, ) -> List[MessageBlock]: """ Load an attachment by ID or register a workspace file as a new attachment. """ ctx = FileToolContext(_context) # First, try existing attachment id record = ctx.attachment_store.get(path_or_id) if record: return [record.as_message_block()] # Otherwise treat as workspace path target = ctx.resolve_under_workspace(path_or_id) if not target.exists() or not target.is_file(): raise ValueError(f"Workspace file not found: {path_or_id}") # mime_type = mime_override or (mimetypes.guess_type(target.name)[0] or "application/octet-stream") mime_type = mimetypes.guess_type(target.name)[0] or "application/octet-stream" record = ctx.attachment_store.register_file( target, kind=MessageBlockType.from_mime_type(mime_type), display_name=target.name, mime_type=mime_type, copy_file=False, persist=False, deduplicate=True, extra={ "source": "workspace", "workspace_path": path_or_id, "storage": "reference", }, ) return [record.as_message_block()] def save_file( path: str, content: str, *, encoding: str = "utf-8", mode: Literal["overwrite", "append"] = "overwrite", _context: Dict[str, Any] | None = None, ) -> Dict[str, Any]: """ Persist data to a workspace file while optionally registering it as an attachment. Args: path: Relative path where the file will be written. content: Plain-text payload encoded with `encoding`. encoding: Text encoding used when `content` is provided. mode: Whether to replace the file (`overwrite`) or append to it (`append`). Returns: A dictionary describing the persisted file, including workspace path, absolute path, and byte size. Raises: ValueError: If arguments are missing/invalid or the path escapes the workspace. OSError: If the file cannot be written. """ if mode not in {"overwrite", "append"}: raise ValueError("mode must be either 'overwrite' or 'append'") ctx = FileToolContext(_context) target = ctx.resolve_under_workspace(path) if target.exists() and target.is_dir(): raise ValueError("Target path points to a directory") target.parent.mkdir(parents=True, exist_ok=True) data = content.encode(encoding) write_mode = "wb" if mode == "overwrite" else "ab" try: with target.open(write_mode) as handle: handle.write(data) except OSError as exc: raise OSError(f"Failed to write file '{target}': {exc}") from exc size = target.stat().st_size if target.exists() else None return { "path": ctx.to_workspace_relative(target), "absolute_path": str(target), "size": size, # "mode": mode, # "encoding": encoding if content is not None else None, } def read_text_file_snippet( path: str, *, offset: int = 0, limit: int = 4000, encoding: str = "utf-8", _context: Dict[str, Any] | None = None, ) -> Dict[str, Any]: """Read a snippet of a workspace text file without loading entire content.""" ctx = FileToolContext(_context) target: Path | None = None try: candidate = ctx.resolve_under_workspace(path) except ValueError: candidate = None if candidate and candidate.exists() and candidate.is_file(): target = candidate if target is None: target = ctx.resolve_under_session(path) if not target.exists() or not target.is_file(): raise ValueError(f"File not found in session attachments/workspace: {path}") data = target.read_text(encoding=encoding, errors="replace") snippet = data[offset : offset + limit] return { "snippet": snippet, "truncated": offset + limit < len(data), "length": len(data), "offset": offset, } def read_file_segment( path: Annotated[str, ParamMeta(description="Workspace-relative text file path")], *, start_line: Annotated[int, ParamMeta(description="1-based line to begin the snippet")]=1, line_count: Annotated[int, ParamMeta(description="Number of lines to include starting from start_line")]=40, inline_line_numbers: Annotated[ bool, ParamMeta(description="If true, prefix each snippet line with its line number inside the snippet"), ] = False, encoding: Annotated[str, ParamMeta(description="Explicit encoding or 'auto'")]="auto", include_line_offsets: Annotated[ bool, ParamMeta(description="Include 1-based line metadata for the returned snippet"), ] = False, _context: Dict[str, Any] | None = None, ) -> Dict[str, Any]: """Read a line range plus metadata from a workspace file.""" if start_line < 1: raise ValueError("start_line must be >= 1") if line_count < 1: raise ValueError("line_count must be >= 1") ctx = FileToolContext(_context) target = ctx.resolve_under_workspace(path) if not target.exists() or not target.is_file(): raise FileNotFoundError(f"File not found: {path}") text, used_encoding = _read_text_content(target, encoding) newline_style = _detect_newline(text) stat = target.stat() lines_with_breaks = text.splitlines(keepends=True) if not lines_with_breaks: lines_with_breaks = [""] total_lines = len(lines_with_breaks) start_idx = start_line - 1 if start_idx >= total_lines: raise ValueError("start_line is beyond the total number of lines in the file") lines_returned = min(line_count, total_lines - start_idx) end_idx = start_idx + lines_returned segment_lines = lines_with_breaks[start_idx:end_idx] snippet = "".join(segment_lines) raw_snippet = snippet line_starts: List[int] = [0] for line in lines_with_breaks: line_starts.append(line_starts[-1] + len(line)) start_char = line_starts[start_idx] response: Dict[str, Any] = { "path": ctx.to_workspace_relative(target), "encoding": used_encoding, "newline": newline_style, "start_line": start_line, "end_line": start_line + lines_returned - 1, "line_count": line_count, "lines_returned": lines_returned, "total_lines": total_lines, "snippet": raw_snippet, "truncated": end_idx < total_lines, "file_size": stat.st_size, "modified_ts": stat.st_mtime, "mode": "line_range", } if inline_line_numbers: snippet = _render_snippet_with_line_numbers( segment_lines, start_line, newline_style, raw_snippet.endswith(("\r\n", "\n", "\r")), ) response["snippet"] = snippet if include_line_offsets: response.update(_describe_segment_line_offsets(text, start_char, raw_snippet)) return response @dataclass(frozen=True) class TextEdit: """Normalized representation of a single line edit.""" start_line: int end_line: int replacement_lines: List[str] def apply_text_edits( path: Annotated[str, ParamMeta(description="Workspace-relative file to edit")], *, start_line: Annotated[int, ParamMeta(description="1-based line where the replacement should begin")], end_line: Annotated[ Optional[int], ParamMeta(description="Last line (>= start_line-1) to replace; defaults to start_line"), ] = None, replacement: Annotated[ Optional[str], ParamMeta(description="Text that should replace the selected line range"), ] = "", encoding: Annotated[str, ParamMeta(description="Text encoding or 'auto'")]="auto", newline: Annotated[ str, ParamMeta(description="Newline style: 'preserve', 'lf', 'crlf', or 'cr'"), ]="preserve", ensure_trailing_newline: Annotated[ Optional[bool], ParamMeta(description="Force presence/absence of trailing newline; default preserves original"), ] = None, _context: Dict[str, Any] | None = None, ) -> Dict[str, Any]: """Apply ordered line edits with newline and encoding preservation.""" ctx = FileToolContext(_context) target = ctx.resolve_under_workspace(path) if not target.exists() or not target.is_file(): raise FileNotFoundError(f"File not found: {path}") _check_attachments_not_modified(path) normalized = _normalize_edits(_build_single_edit(start_line, end_line, replacement)) original_text, used_encoding = _read_text_content(target, encoding) lines, had_trailing_newline = _split_lines(original_text) newline_style = _resolve_newline_choice(newline, _detect_newline(original_text)) _apply_edits_in_place(lines, normalized) if ensure_trailing_newline is None: final_trailing = had_trailing_newline else: final_trailing = ensure_trailing_newline rendered = newline_style.join(lines) if final_trailing: rendered += newline_style target.parent.mkdir(parents=True, exist_ok=True) target.write_text(rendered, encoding=used_encoding) stat = target.stat() return { "path": ctx.to_workspace_relative(target), "encoding": used_encoding, "newline": newline_style, "line_count": len(lines), "applied_edits": len(normalized), "trailing_newline": final_trailing, "file_size": stat.st_size, "modified_ts": stat.st_mtime, } def rename_path( src: Annotated[str, ParamMeta(description="Existing workspace-relative path")], dst: Annotated[str, ParamMeta(description="New workspace-relative path")], *, overwrite: Annotated[ bool, ParamMeta(description="Allow replacing an existing destination"), ] = False, _context: Dict[str, Any] | None = None, ) -> Dict[str, Any]: """Rename files or directories inside the workspace.""" ctx = FileToolContext(_context) source = ctx.resolve_under_workspace(src) destination = ctx.resolve_under_workspace(dst) _check_attachments_not_modified(src) _check_attachments_not_modified(dst) if not source.exists(): raise FileNotFoundError(f"Source does not exist: {src}") if source == destination: return { "path": ctx.to_workspace_relative(destination), "operation": "rename", "skipped": True, } _clear_destination(destination, overwrite) destination.parent.mkdir(parents=True, exist_ok=True) source.rename(destination) return { "path": ctx.to_workspace_relative(destination), "previous_path": ctx.to_workspace_relative(source), "operation": "rename", } def copy_path( src: Annotated[str, ParamMeta(description="Source workspace-relative path")], dst: Annotated[str, ParamMeta(description="Destination workspace-relative path")], *, overwrite: Annotated[ bool, ParamMeta(description="Allow replacing destination if it exists"), ] = False, _context: Dict[str, Any] | None = None, ) -> Dict[str, Any]: """Copy a file tree within the workspace.""" ctx = FileToolContext(_context) source = ctx.resolve_under_workspace(src) destination = ctx.resolve_under_workspace(dst) _check_attachments_not_modified(dst) if not source.exists(): raise FileNotFoundError(f"Source does not exist: {src}") if destination.exists(): if not overwrite: raise FileExistsError(f"Destination already exists: {dst}") _clear_destination(destination, overwrite=True) destination.parent.mkdir(parents=True, exist_ok=True) if source.is_dir(): shutil.copytree(source, destination) else: shutil.copy2(source, destination) return { "path": ctx.to_workspace_relative(destination), "source": ctx.to_workspace_relative(source), "operation": "copy", } def move_path( src: Annotated[str, ParamMeta(description="Source workspace-relative path")], dst: Annotated[str, ParamMeta(description="Destination workspace-relative path")], *, overwrite: Annotated[ bool, ParamMeta(description="Allow replacing destination path"), ] = False, _context: Dict[str, Any] | None = None, ) -> Dict[str, Any]: """Move files or directories, mirroring `mv` semantics across platforms.""" ctx = FileToolContext(_context) source = ctx.resolve_under_workspace(src) destination = ctx.resolve_under_workspace(dst) _check_attachments_not_modified(src) _check_attachments_not_modified(dst) if not source.exists(): raise FileNotFoundError(f"Source does not exist: {src}") if source == destination: return { "path": ctx.to_workspace_relative(destination), "operation": "move", "skipped": True, } _clear_destination(destination, overwrite) destination.parent.mkdir(parents=True, exist_ok=True) shutil.move(source, destination) return { "path": ctx.to_workspace_relative(destination), "source": ctx.to_workspace_relative(source), "operation": "move", } def search_in_files( pattern: Annotated[str, ParamMeta(description="Plain text or regex pattern")], *, globs: Annotated[ Optional[Sequence[str]], ParamMeta(description="Restrict search to these glob patterns"), ] = None, exclude_globs: Annotated[ Optional[Sequence[str]], ParamMeta(description="Glob patterns to exclude"), ] = None, use_regex: Annotated[bool, ParamMeta(description="Treat pattern as regex")]=True, case_sensitive: Annotated[bool, ParamMeta(description="Match case when True")]=False, max_results: Annotated[int, ParamMeta(description="Stop after this many matches")]=200, before_context: Annotated[int, ParamMeta(description="Lines to include before match")]=2, after_context: Annotated[int, ParamMeta(description="Lines to include after match")]=2, include_hidden: Annotated[bool, ParamMeta(description="Search hidden files/folders")]=False, _context: Dict[str, Any] | None = None, ) -> Dict[str, Any]: """Search workspace files and return structured matches.""" if max_results <= 0: raise ValueError("max_results must be positive") ctx = FileToolContext(_context) include_patterns = _normalize_globs(globs) or ["**/*"] exclude_patterns = _normalize_globs(exclude_globs) matches: List[Dict[str, Any]] = [] searched_files = 0 compiled_regex: Optional[re.Pattern[str]] = None literal = pattern if case_sensitive else pattern.lower() if use_regex: flags = re.MULTILINE if not case_sensitive: flags |= re.IGNORECASE compiled_regex = re.compile(pattern, flags) for candidate in _iter_candidate_files( ctx.workspace_root, include_patterns, exclude_patterns, include_hidden, ): searched_files += 1 lines = _read_file_lines_for_search(candidate) if not lines: continue for match in _iter_line_matches( lines, compiled_regex, literal, pattern, case_sensitive, use_regex, ): before = _slice_context(lines, match["line_number"], before_context, before=True) after = _slice_context(lines, match["line_number"], after_context, before=False) matches.append( { "file": ctx.to_workspace_relative(candidate), "line": match["line_number"], "column": match["column"], "line_text": match["line_text"], "before": before, "after": after, } ) if len(matches) >= max_results: return { "matches": matches, "limited": True, "engine": "python", "searched_files": searched_files, } return { "matches": matches, "limited": False, "engine": "python", "searched_files": searched_files, } def _read_text_content(path: Path, encoding: str) -> tuple[str, str]: if encoding != "auto": return path.read_text(encoding=encoding), encoding raw = path.read_bytes() for candidate in _candidate_encodings(): try: return raw.decode(candidate), candidate except UnicodeDecodeError: continue return raw.decode("utf-8", errors="replace"), "utf-8" def _candidate_encodings() -> List[str]: preferred = locale.getpreferredencoding(False) or "" ordered = [ "utf-8-sig", "utf-8", preferred, "utf-16", "utf-16-le", "utf-16-be", "latin-1", ] seen: set[str] = set() result: List[str] = [] for item in ordered: normalized = (item or "").lower() if not normalized or normalized in seen: continue seen.add(normalized) result.append(item) return result _LINE_BREAK_RE = re.compile(r"\r\n|\r|\n") def _detect_newline(text: str) -> str: if "\r\n" in text: return "\r\n" if "\r" in text and "\n" not in text: return "\r" return "\n" def _split_lines(text: str) -> tuple[List[str], bool]: if not text: return [], False has_trailing = text.endswith(("\r\n", "\n", "\r")) return text.splitlines(), has_trailing def _describe_segment_line_offsets(full_text: str, start_index: int, snippet: str) -> Dict[str, Any]: """Return 1-based line metadata (columns are 0-based) for a snippet extracted from full_text.""" before_segment = full_text[:start_index] start_line = 1 last_break_end = 0 for match in _LINE_BREAK_RE.finditer(before_segment): start_line += 1 last_break_end = match.end() start_column = start_index - last_break_end line_offsets: List[Dict[str, int]] = [ {"line": start_line, "offset": 0, "column": start_column}, ] line_number = start_line last_break_inside = 0 for match in _LINE_BREAK_RE.finditer(snippet): last_break_inside = match.end() line_number += 1 line_offsets.append({"line": line_number, "offset": match.end(), "column": 0}) if snippet: if last_break_inside: end_column = len(snippet) - last_break_inside else: end_column = start_column + len(snippet) else: end_column = start_column return { "start_line": start_line, "start_column": start_column, "end_line": line_number, "end_column": end_column, "line_offsets": line_offsets, } def _render_snippet_with_line_numbers( lines: Sequence[str], start_line: int, newline_style: str, preserve_trailing_newline: bool, ) -> str: numbered: List[str] = [] for idx, line in enumerate(lines): body = line.rstrip("\r\n") numbered.append(f"{start_line + idx}:{body}") rendered = newline_style.join(numbered) if preserve_trailing_newline and numbered: rendered += newline_style return rendered def _normalize_edits(edits: Sequence[Mapping[str, Any]]) -> List[TextEdit]: if not edits: raise ValueError("at least one edit instruction is required") normalized: List[TextEdit] = [] for item in edits: if not isinstance(item, Mapping): raise ValueError("each edit entry must be a mapping object") try: start_line = int(item["start_line"]) except (KeyError, TypeError, ValueError) as exc: raise ValueError("start_line is required for each edit") from exc end_line_raw = item.get("end_line", start_line) try: end_line = int(end_line_raw) except (TypeError, ValueError) as exc: raise ValueError("end_line must be an integer") from exc if start_line < 1: raise ValueError("start_line must be >= 1") if end_line < start_line - 1: raise ValueError("end_line must be >= start_line - 1") replacement = item.get("replacement", "") if not isinstance(replacement, str): raise ValueError("replacement must be a string") normalized.append( TextEdit( start_line=start_line, end_line=end_line, replacement_lines=replacement.splitlines(), ) ) normalized.sort(key=lambda edit: (edit.start_line, edit.end_line)) _validate_edit_ranges(normalized) return normalized def _build_single_edit( start_line: int, end_line: Optional[int], replacement: Optional[str], ) -> List[Mapping[str, Any]]: effective_end = end_line if end_line is not None else start_line payload = { "start_line": start_line, "end_line": effective_end, "replacement": replacement if replacement is not None else "", } return [payload] def _validate_edit_ranges(edits: Sequence[TextEdit]) -> None: previous_range_end = 0 for edit in edits: effective_end = max(edit.end_line, edit.start_line - 1) if edit.start_line <= previous_range_end and previous_range_end > 0: raise ValueError("edit ranges overlap; merge them before calling apply_text_edits") previous_range_end = max(previous_range_end, effective_end) def _apply_edits_in_place(lines: MutableSequence[str], edits: Sequence[TextEdit]) -> None: for edit in reversed(edits): current_line_count = len(lines) if edit.start_line > current_line_count + 1: raise ValueError("start_line is beyond the end of the file") start_idx = min(edit.start_line - 1, current_line_count) if start_idx > current_line_count: raise ValueError("start_line is beyond the end of the file") removal_count = max(edit.end_line - edit.start_line + 1, 0) if removal_count > 0: end_line = min(edit.end_line, len(lines)) removal_count = max(end_line - edit.start_line + 1, 0) end_idx = start_idx + removal_count lines[start_idx:end_idx] = edit.replacement_lines def _resolve_newline_choice(preference: str, detected: str) -> str: normalized = (preference or "").lower() if normalized == "lf": return "\n" if normalized == "crlf": return "\r\n" if normalized == "cr": return "\r" return detected or os.linesep def _clear_destination(destination: Path, overwrite: bool) -> None: if not destination.exists(): return if not overwrite: raise FileExistsError(f"Destination already exists: {destination}") if destination.is_dir(): shutil.rmtree(destination) else: destination.unlink() def _normalize_globs(patterns: Optional[Sequence[str]]) -> List[str]: if not patterns: return [] normalized: List[str] = [] for raw in patterns: if not raw: continue normalized.append(str(raw)) return normalized def _iter_candidate_files( root: Path, include_patterns: Sequence[str], exclude_patterns: Sequence[str], include_hidden: bool, ) -> Iterable[Path]: yielded: set[str] = set() for pattern in include_patterns: for candidate in root.glob(pattern): if not candidate.is_file(): continue rel = candidate.relative_to(root) rel_key = rel.as_posix() if rel_key in yielded: continue if not include_hidden and _path_is_hidden(rel): continue if _is_excluded(rel_key, exclude_patterns): continue yielded.add(rel_key) yield candidate def _path_is_hidden(path: Path) -> bool: return any(part.startswith(".") for part in path.parts) def _is_excluded(relative_posix: str, exclude_patterns: Sequence[str]) -> bool: for pattern in exclude_patterns: if fnmatch.fnmatch(relative_posix, pattern): return True return False def _read_file_lines_for_search(path: Path) -> List[str]: raw, _ = _read_text_content(path, encoding="auto") return raw.splitlines() def _iter_line_matches( lines: Sequence[str], compiled_regex: Optional[re.Pattern[str]], literal_lower: str, original_pattern: str, case_sensitive: bool, use_regex: bool, ) -> Iterable[Dict[str, Any]]: for idx, raw_line in enumerate(lines): line_number = idx + 1 line_text = raw_line if use_regex and compiled_regex is not None: for match in compiled_regex.finditer(line_text): yield { "line_number": line_number, "column": match.start() + 1, "line_text": line_text, } else: if not original_pattern: continue haystack = line_text if case_sensitive else line_text.lower() needle = original_pattern if case_sensitive else literal_lower start = haystack.find(needle) while start != -1: yield { "line_number": line_number, "column": start + 1, "line_text": line_text, } start = haystack.find(needle, start + max(len(needle), 1)) def _slice_context( lines: Sequence[str], center_line: int, span: int, *, before: bool, ) -> List[str]: if span <= 0: return [] if before: start_line = max(center_line - span, 1) end_line = center_line - 1 else: start_line = center_line + 1 end_line = min(center_line + span, len(lines)) if end_line < start_line: return [] start_idx = start_line - 1 end_idx = end_line return list(lines[start_idx:end_idx])
{ "repo_id": "OpenBMB/ChatDev", "file_path": "functions/function_calling/file.py", "license": "Apache License 2.0", "lines": 946, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:functions/function_calling/user.py
def call_user(instruction: str, _context: dict | None = None) -> str: """ If you think it's necessary to get input from the user, use this function to send the instruction to the user and get their response. Args: instruction: The instruction to send to the user. """ prompt = _context.get("human_prompt") if _context else None if prompt is None: return f"Human prompt unavailable, default response for instruction: {instruction}" result = prompt.request( node_id=_context.get("node_id", "model_function_calling"), task_description="Please response to the model instruction.", inputs=instruction, metadata={"source": "function_tool"}, ) return result.text
{ "repo_id": "OpenBMB/ChatDev", "file_path": "functions/function_calling/user.py", "license": "Apache License 2.0", "lines": 16, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:functions/function_calling/utils.py
import time from typing import Union def wait(seconds: float): """ Wait for a specified number of seconds. Args: seconds: The number of seconds to wait. """ if isinstance(seconds, str): # Convert string to float if necessary try: if "." in seconds: seconds = float(seconds) else: seconds = int(seconds) except ValueError: # Fallback to float if int conversion fails or other string formats seconds = float(seconds) time.sleep(seconds) def get_current_time(): """ Get the current time in the format: YYYY-MM-DD HH:MM:SS. Returns: str: The current time in the format: YYYY-MM-DD HH:MM:SS. """ return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
{ "repo_id": "OpenBMB/ChatDev", "file_path": "functions/function_calling/utils.py", "license": "Apache License 2.0", "lines": 25, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:functions/function_calling/uv_related.py
"""Utility tool to manage Python environments via uv.""" import os import re import subprocess from pathlib import Path from typing import Any, Dict, List, Mapping, Sequence _SAFE_PACKAGE_RE = re.compile(r"^[A-Za-z0-9_.\-+=<>!\[\],@:/]+$") _DEFAULT_TIMEOUT = float(os.getenv("LIB_INSTALL_TIMEOUT", "120")) _OUTPUT_SNIPPET_LIMIT = 240 def _trim_output_preview(stdout: str, stderr: str) -> str | None: """Return a short preview from stdout or stderr for error messaging.""" preview_source = stdout.strip() or stderr.strip() if not preview_source: return None if len(preview_source) <= _OUTPUT_SNIPPET_LIMIT: return preview_source return f"{preview_source[:_OUTPUT_SNIPPET_LIMIT].rstrip()}... [truncated]" def _build_timeout_message(step: str | None, timeout_value: float, stdout: str, stderr: str) -> str: """Create a descriptive timeout error message with optional output preview.""" label = "uv command" if step: label = f"{label} ({step})" message = f"{label} timed out after {timeout_value} seconds" preview = _trim_output_preview(stdout, stderr) if preview: return f"{message}. Last output: {preview}" return message class WorkspaceCommandContext: """Resolve the workspace root from the injected runtime context.""" def __init__(self, ctx: Dict[str, Any] | None): if ctx is None: raise ValueError("_context is required for uv tools") self.workspace_root = self._require_workspace(ctx.get("python_workspace_root")) self._raw_ctx = ctx @staticmethod def _require_workspace(raw_path: Any) -> Path: if raw_path is None: raise ValueError("python_workspace_root missing from _context") path = Path(raw_path).expanduser().resolve() path.mkdir(parents=True, exist_ok=True) return path def resolve_under_workspace(self, relative_path: str | Path) -> Path: candidate = Path(relative_path) absolute = candidate if candidate.is_absolute() else self.workspace_root / candidate absolute = absolute.expanduser().resolve() if self.workspace_root not in absolute.parents and absolute != self.workspace_root: raise ValueError("script path is outside workspace root") return absolute def _validate_packages(packages: Sequence[str]) -> List[str]: normalized: List[str] = [] for pkg in packages: if not isinstance(pkg, str): raise ValueError("package entries must be strings") stripped = pkg.strip() if not stripped: raise ValueError("package names cannot be empty") if not _SAFE_PACKAGE_RE.match(stripped): raise ValueError(f"unsafe characters detected in package spec {pkg}") if stripped.startswith("-"): raise ValueError(f"flags are not allowed in packages list: {pkg}") normalized.append(stripped) if not normalized: raise ValueError("at least one package is required") return normalized def _coerce_timeout_seconds(timeout_seconds: Any) -> float | None: if timeout_seconds is None: return None if isinstance(timeout_seconds, bool): raise ValueError("timeout_seconds must be a number") if isinstance(timeout_seconds, (int, float)): value = float(timeout_seconds) elif isinstance(timeout_seconds, str): raw = timeout_seconds.strip() if not raw: raise ValueError("timeout_seconds cannot be empty") try: if re.fullmatch(r"[+-]?\d+", raw): value = float(int(raw)) else: value = float(raw) except ValueError as exc: raise ValueError("timeout_seconds must be a number") from exc else: raise ValueError("timeout_seconds must be a number") if value <= 0: raise ValueError("timeout_seconds must be positive") return value def _validate_flag_args(args: Sequence[str] | None) -> List[str]: normalized: List[str] = [] if not args: return normalized for arg in args: if not isinstance(arg, str): raise ValueError("extra args must be strings") stripped = arg.strip() if not stripped: raise ValueError("extra args cannot be empty") if not stripped.startswith("-"): raise ValueError(f"extra args must be flags, got {arg}") normalized.append(stripped) return normalized def _validate_args(args: Sequence[str] | None) -> List[str]: normalized: List[str] = [] if not args: return normalized for arg in args: if not isinstance(arg, str): raise ValueError("args entries must be strings") stripped = arg.strip() if not stripped: raise ValueError("args entries cannot be empty") normalized.append(stripped) return normalized def _validate_env(env: Mapping[str, str] | None) -> Dict[str, str]: if env is None: return {} result: Dict[str, str] = {} for key, value in env.items(): if not isinstance(key, str) or not key: raise ValueError("environment variable keys must be non-empty strings") if not isinstance(value, str): raise ValueError("environment variable values must be strings") result[key] = value return result def _run_uv_command( cmd: List[str], workspace_root: Path, *, step: str | None = None, env: Dict[str, str] | None = None, timeout: float | None = None, ) -> Dict[str, Any]: timeout_value = _DEFAULT_TIMEOUT if timeout is None else timeout env_vars = None if env is None else {**os.environ, **env} try: completed = subprocess.run( cmd, cwd=str(workspace_root), capture_output=True, text=True, timeout=timeout_value, check=False, env=env_vars, ) except FileNotFoundError as exc: raise RuntimeError("uv command not found in PATH") from exc except subprocess.TimeoutExpired as exc: stdout_text = exc.stdout if stdout_text is None: stdout_text = getattr(exc, "output", "") or "" stderr_text = exc.stderr or "" message = _build_timeout_message(step, timeout_value, stdout_text, stderr_text) return { "command": cmd, "stdout": stdout_text, "stderr": stderr_text, "returncode": None, "step": step, "timed_out": True, "timeout": timeout_value, "error": message, } return { "command": cmd, "stdout": completed.stdout or "", "stderr": completed.stderr or "", "returncode": completed.returncode, # "cwd": str(workspace_root), "step": step, } def install_python_packages( packages: Sequence[str], *, upgrade: bool = False, # extra_args: Sequence[str] | None = None, _context: Dict[str, Any] | None = None, ) -> Dict[str, Any]: """Install Python packages inside the workspace using uv add.""" ctx = WorkspaceCommandContext(_context) safe_packages = _validate_packages(packages) cmd: List[str] = ["uv", "add"] if upgrade: cmd.append("--upgrade") # if extra_args: # flags = _validate_flag_args(extra_args) # cmd.extend(flags) cmd.extend(safe_packages) result = _run_uv_command(cmd, ctx.workspace_root, step="uv add") # result["workspace_root"] = str(ctx.workspace_root) return result def init_python_env( *, # recreate: bool = False, python_version: str | None = None, # lock_args: Sequence[str] | None = None, # venv_args: Sequence[str] | None = None, _context: Dict[str, Any] | None = None, ) -> Dict[str, Any]: """Run uv lock and uv venv inside the workspace.""" ctx = WorkspaceCommandContext(_context) steps: List[Dict[str, Any]] = [] lock_cmd: List[str] = ["uv", "lock"] # lock_cmd.extend(_validate_flag_args(lock_args)) lock_result = _run_uv_command(lock_cmd, ctx.workspace_root, step="uv lock") steps.append(lock_result) if lock_result["returncode"] != 0: return { "workspace_root": str(ctx.workspace_root), "steps": steps, } venv_cmd: List[str] = ["uv", "venv"] # if recreate: # venv_cmd.append("--recreate") # venv_cmd.extend(_validate_flag_args(venv_args)) if python_version is not None: python_spec = python_version.strip() if not python_spec: raise ValueError("python argument cannot be empty") venv_cmd.extend(["--python", python_spec]) venv_result = _run_uv_command(venv_cmd, ctx.workspace_root, step="uv venv") steps.append(venv_result) init_cmd: List[str] = ["uv", "init", "--bare", "--no-workspace"] init_result = _run_uv_command(init_cmd, ctx.workspace_root, step="uv init") steps.append(init_result) return { "workspace_root": str(ctx.workspace_root), "steps": steps, } def uv_run( *, module: str | None = None, script: str | None = None, args: Sequence[str] | None = None, env: Mapping[str, str] | None = None, timeout_seconds: float | None = None, _context: Dict[str, Any] | None = None, ) -> Dict[str, Any]: """Execute uv run for a module or script inside the workspace root.""" ctx = WorkspaceCommandContext(_context) timeout_seconds = _coerce_timeout_seconds(timeout_seconds) has_module = module is not None has_script = script is not None if has_module == has_script: raise ValueError("Provide exactly one of module or script") cmd: List[str] = ["uv", "run"] if has_module: module_name = module.strip() if not module_name: raise ValueError("module cannot be empty") cmd.extend(["python", "-m", module_name]) else: script_value = script.strip() if isinstance(script, str) else script if not script_value: raise ValueError("script cannot be empty") script_path = ctx.resolve_under_workspace(script_value) cmd.append(str(script_path)) cmd.extend(_validate_args(args)) env_overrides = _validate_env(env) result = _run_uv_command( cmd, ctx.workspace_root, step="uv run", env=env_overrides, timeout=timeout_seconds, ) result["workspace_root"] = str(ctx.workspace_root) return result
{ "repo_id": "OpenBMB/ChatDev", "file_path": "functions/function_calling/uv_related.py", "license": "Apache License 2.0", "lines": 267, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:functions/function_calling/video.py
import shutil import sys from pathlib import Path import ast import subprocess import tempfile def _get_class_names(py_file: str) -> list[str]: file_path = Path(py_file) source = file_path.read_text(encoding="utf-8") tree = ast.parse(source, filename=str(file_path)) return [node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)] def render_manim( script_path: str, quality: str = "h", preview: bool = True, ) -> Path: output_dir = Path.cwd() / "media" print("Clearing media folder:", output_dir) shutil.rmtree(output_dir, ignore_errors=True) script_path = Path(script_path).resolve() if not script_path.exists(): raise FileNotFoundError(script_path, " does not exist.") scene_name = _get_class_names(str(script_path))[0] cmd = [ sys.executable, "-m", "manim", f"-pq{quality}", ] if preview: cmd.insert(3, "-p") cmd.extend([str(script_path), scene_name]) print("Running:", " ".join(cmd)) try: subprocess.run(cmd, check=True, capture_output=True, text=True) except subprocess.CalledProcessError as e: print("Manim rendering failed:") print("stdout:", e.stdout) print("stderr:", e.stderr) error_info = f"Error rendering {scene_name} from {script_path}: {e.stderr}" # shutil.rmtree(output_dir, ignore_errors=True) raise RuntimeError(error_info) # Find valid mp4 files where no parent directory contains a partial_movie_files folder video_file = None for mp4_file in (Path.cwd() / "media" / "videos").parent.rglob("*.mp4"): if mp4_file.name == f"{scene_name}.mp4": video_file = mp4_file break target_path = script_path.parent / video_file.name print(f"Copying video to {target_path}") shutil.copy2(video_file, target_path) shutil.rmtree(output_dir, ignore_errors=True) return target_path def concat_videos(video_paths: list[Path]) -> Path: if not video_paths: raise ValueError("No video files to concatenate") video_paths = [Path(p).resolve() for p in video_paths] output_path = video_paths[0].parent / "combined_video.mp4" with tempfile.NamedTemporaryFile(mode="w", suffix=".txt", delete=False) as f: for p in video_paths: f.write(f"file '{p.as_posix()}'\n") list_file = f.name cmd = [ "ffmpeg", "-y", "-f", "concat", "-safe", "0", "-i", list_file, "-c", "copy", str(output_path) ] subprocess.run(cmd, check=True) return output_path
{ "repo_id": "OpenBMB/ChatDev", "file_path": "functions/function_calling/video.py", "license": "Apache License 2.0", "lines": 71, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:functions/function_calling/weather.py
def get_city_num(city: str) -> dict: """ Fetch the city code for a given city name. Example response: { "city": "Beijing", "city_num": "1010", } """ return { "city_num": 3701 } def get_weather(city_num: int, unit: str = "celsius") -> dict: """ Fetch weather information for the city represented by ``city_num``. Example response: { "city_num": "1010", "temperature": 20, "unit": "celsius" } """ temperature_c = 15 # Hardcode the temperature value if unit == "fahrenheit": temperature = temperature_c * 9 / 5 + 32 else: temperature = temperature_c return { "city_num": city_num, "temperature": temperature, "unit": unit }
{ "repo_id": "OpenBMB/ChatDev", "file_path": "functions/function_calling/weather.py", "license": "Apache License 2.0", "lines": 32, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
OpenBMB/ChatDev:functions/function_calling/web.py
import os def web_search(query: str, page: int = 1, language: str = "en", country: str = "us") -> str: """ Performs a web search based on the user-provided query with pagination. Args: query (str): The keyword(s) to search for. page (int): The page number of the results to return. Defaults to 1. language (str): The language of the search results. Defaults to "en", can be "en", "zh-cn", "zh-tw", "ja", "ko". country (str): The country of the search results. Defaults to "us", can be "us", "cn", "jp", "kr". Returns: str: A formatted string containing the title, link, and snippet of the search results for the specified page. """ import requests import json url = "https://google.serper.dev/search" payload = json.dumps({ "q": query, "page": page, "hl": language, "gl": country }) headers = { 'X-API-KEY': os.getenv("SERPER_DEV_API_KEY"), 'Content-Type': 'application/json' } response = requests.request("POST", url, headers=headers, data=payload) try: data = response.json() return __format_serper_results(data) except json.JSONDecodeError: return response.text def __format_serper_results(data: dict) -> str: """ Formats the raw JSON response from Serper.dev into a structured string. """ formatted_output = [] # 1. Knowledge Graph if "knowledgeGraph" in data: kg = data["knowledgeGraph"] formatted_output.append("## Knowledge Graph") if "title" in kg: formatted_output.append(f"**Title**: {kg['title']}") if "type" in kg: formatted_output.append(f"**Type**: {kg['type']}") if "description" in kg: if "descriptionSource" in kg and "descriptionLink" in kg: formatted_output.append(f"**Description**: {kg['description']} (Source: [{kg['descriptionSource']}]({kg['descriptionLink']}))") else: formatted_output.append(f"**Description**: {kg['description']}") if "attributes" in kg: formatted_output.append("**Attributes**:") for key, value in kg["attributes"].items(): formatted_output.append(f"- {key}: {value}") formatted_output.append("") # Add spacing # 2. Organic Results if "organic" in data and data["organic"]: formatted_output.append("## Organic Results") for i, result in enumerate(data["organic"], 1): title = result.get("title", "No Title") link = result.get("link", "#") snippet = result.get("snippet", "") formatted_output.append(f"{i}. **[{title}]({link})**") if snippet: formatted_output.append(f" {snippet}") # Optional: Include attributes if useful, but keep it concise if "attributes" in result: for key, value in result["attributes"].items(): formatted_output.append(f" - {key}: {value}") formatted_output.append("") # 3. People Also Ask if "peopleAlsoAsk" in data and data["peopleAlsoAsk"]: formatted_output.append("## People Also Ask") for item in data["peopleAlsoAsk"]: question = item.get("question") snippet = item.get("snippet") link = item.get("link") title = item.get("title") if question: formatted_output.append(f"- **{question}**") if snippet: formatted_output.append(f" {snippet}") if link and title: formatted_output.append(f" Source: [{title}]({link})") formatted_output.append("") # 4. Related Searches if "relatedSearches" in data and data["relatedSearches"]: formatted_output.append("## Related Searches") queries = [item["query"] for item in data["relatedSearches"] if "query" in item] formatted_output.append(", ".join(queries)) return "\n".join(formatted_output).strip() def read_webpage_content(url: str) -> str: """ Reads the content of a webpage and returns it as a string. """ import requests import time from collections import deque import threading # Rate limiting configuration RATE_LIMIT = 20 # requests TIME_WINDOW = 60 # seconds # Global state for rate limiting (thread-safe) if not hasattr(read_webpage_content, "_request_timestamps"): read_webpage_content._request_timestamps = deque() read_webpage_content._lock = threading.Lock() target_url = f"https://r.jina.ai/{url}" key = os.getenv("JINA_API_KEY") headers = {} if key: headers["Authorization"] = key else: # Apply rate limiting if no key is present with read_webpage_content._lock: current_time = time.time() # Remove timestamps older than the time window while read_webpage_content._request_timestamps and \ current_time - read_webpage_content._request_timestamps[0] > TIME_WINDOW: read_webpage_content._request_timestamps.popleft() # Check if limit reached if len(read_webpage_content._request_timestamps) >= RATE_LIMIT: # Calculate sleep time oldest_request = read_webpage_content._request_timestamps[0] sleep_time = TIME_WINDOW - (current_time - oldest_request) if sleep_time > 0: time.sleep(sleep_time) # After sleeping, we can pop the oldest since it expired (logically) # Re-check time/clean just to be safe and accurate, # but effectively we just waited for the slot to free up. # Ideally, we add the *new* request time now. # Note: after sleep, the current_time has advanced. current_time = time.time() # Clean up again while read_webpage_content._request_timestamps and \ current_time - read_webpage_content._request_timestamps[0] > TIME_WINDOW: read_webpage_content._request_timestamps.popleft() # Record the execution read_webpage_content._request_timestamps.append(time.time()) response = requests.get(target_url, headers=headers) return response.text if __name__ == "__main__": pass
{ "repo_id": "OpenBMB/ChatDev", "file_path": "functions/function_calling/web.py", "license": "Apache License 2.0", "lines": 141, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:mcp_example/mcp_server.py
from fastmcp import FastMCP import random from datetime import datetime from typing import Dict, Optional # Initialize MCP server mcp = FastMCP( "Company Simple MCP Server", # api_route="/mcp/", debug=True ) @mcp.tool def rand_num(a: int, b: int) -> int: """Generate a random number between a and b.""" num = random.randint(a, b) print(num) return num if __name__ == "__main__": print("Starting simple MCP server...") print("Run with: uv run fastmcp run simple_server.py --transport 'streamable-http' --port 8001") # mcp.run(transport="streamable-http", host="127.0.0.1", port=8001) mcp.run()
{ "repo_id": "OpenBMB/ChatDev", "file_path": "mcp_example/mcp_server.py", "license": "Apache License 2.0", "lines": 21, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:run.py
"""CLI entry point for executing ChatDev_new workflows.""" import argparse import json from pathlib import Path from typing import List, Union from runtime.bootstrap.schema import ensure_schema_registry_populated from check.check import load_config from entity.graph_config import GraphConfig from entity.messages import Message from utils.attachments import AttachmentStore from utils.schema_exporter import build_schema_response, SchemaResolutionError from utils.task_input import TaskInputBuilder from workflow.graph_context import GraphContext from workflow.graph import GraphExecutor OUTPUT_ROOT = Path("WareHouse") ensure_schema_registry_populated() def build_task_input_payload( graph_context: GraphContext, prompt: str, attachment_paths: List[str] ) -> Union[str, List[Message]]: """Construct the initial task input, embedding attachments when available.""" if not attachment_paths: return prompt code_workspace = graph_context.directory / "code_workspace" attachments_dir = code_workspace / "attachments" attachments_dir.mkdir(parents=True, exist_ok=True) store = AttachmentStore(attachments_dir) builder = TaskInputBuilder(store) return builder.build_from_file_paths(prompt, attachment_paths) def parse_arguments(): parser = argparse.ArgumentParser(description="Run ChatDev_new workflow") parser.add_argument( "--path", type=Path, default=Path("yaml_instance/net_loop_test_included.yaml"), help="Path to the design_0.4.0 workflow file", ) parser.add_argument( "--name", type=str, default="test_project", help="Name of the project", ) parser.add_argument( "--fn-module", dest="fn_module", default=None, help="Optional module providing edge helper functions referenced by the design", ) parser.add_argument( "--inspect-schema", action="store_true", help="Output configuration schema (optionally scoped by breadcrumbs) and exit", ) parser.add_argument( "--schema-breadcrumbs", type=str, default=None, help="JSON array describing schema breadcrumbs (e.g. '[{\"node\":\"DesignConfig\",\"field\":\"graph\"}]')", ) parser.add_argument( "--attachment", action="append", default=[], help="Path to a file to attach to the initial user message (repeatable)", ) return parser.parse_args() def main() -> None: args = parse_arguments() if args.inspect_schema: breadcrumbs = None if args.schema_breadcrumbs: try: breadcrumbs = json.loads(args.schema_breadcrumbs) except json.JSONDecodeError as exc: raise SystemExit(f"Invalid --schema-breadcrumbs JSON: {exc}") try: schema = build_schema_response(breadcrumbs) except SchemaResolutionError as exc: raise SystemExit(f"Failed to resolve schema: {exc}") print(json.dumps(schema, indent=2, ensure_ascii=False)) return design = load_config( args.path, fn_module=args.fn_module, ) task_prompt = input("Please enter the task prompt: ") # Create GraphConfig and GraphContext graph_config = GraphConfig.from_definition( design.graph, name=args.name, output_root=OUTPUT_ROOT, source_path=str(args.path), vars=design.vars, ) graph_context = GraphContext(config=graph_config) task_input = build_task_input_payload( graph_context, task_prompt, args.attachment or [], ) GraphExecutor.execute_graph(graph_context, task_input) print(graph_context.final_message()) if __name__ == "__main__": main()
{ "repo_id": "OpenBMB/ChatDev", "file_path": "run.py", "license": "Apache License 2.0", "lines": 106, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/bootstrap/schema.py
"""Ensure schema registry is populated with runtime-provided registrations.""" from importlib import import_module from typing import Iterable _BOOTSTRAPPED = False def _modules_to_import() -> Iterable[str]: return ( "runtime.node.builtin_nodes", "runtime.node.agent.memory.builtin_stores", "runtime.node.agent.thinking.builtin_thinking", "runtime.edge.conditions.builtin_types", "runtime.node.agent.providers.builtin_providers", ) def ensure_schema_registry_populated() -> None: """Import built-in runtime registration modules exactly once.""" global _BOOTSTRAPPED if _BOOTSTRAPPED: return for module_name in _modules_to_import(): import_module(module_name) _BOOTSTRAPPED = True
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/bootstrap/schema.py", "license": "Apache License 2.0", "lines": 20, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/edge/conditions/base.py
"""EdgeConditionManager abstraction that unifies edge condition compilation.""" from abc import ABC, abstractmethod from dataclasses import dataclass from typing import Any, Callable, Generic, TypeVar from entity.messages import Message, MessageRole from runtime.node.executor import ExecutionContext from utils.log_manager import LogManager from utils.structured_logger import get_server_logger from entity.configs.node.node import EdgeLink, Node from entity.configs.edge.edge_condition import EdgeConditionTypeConfig from utils.function_manager import FunctionManager ConditionEvaluator = Callable[[str], bool] TConfig = TypeVar("TConfig", bound="EdgeConditionTypeConfig") @dataclass(slots=True) class ConditionFactoryContext: """Context passed to managers at initialization time.""" function_manager: "FunctionManager | None" = None log_manager: "LogManager | None" = None class EdgeConditionManager(Generic[TConfig], ABC): """An abstract base class is required, and all edge condition types must implement the process logic.""" def __init__(self, config: TConfig, ctx: ConditionFactoryContext, execution_context: ExecutionContext) -> None: self.config = config self.ctx = ctx self.execution_context = execution_context @abstractmethod def process( self, edge_link: "EdgeLink", source_result: Message, from_node: "Node", log_manager: LogManager, ) -> None: """The execution logic is implemented by the subclasses.""" def transform_payload( self, payload: Message, *, source_result: Message, from_node: "Node", edge_link: "EdgeLink", log_manager: LogManager, ) -> Message | None: processor = edge_link.payload_processor if not processor: return payload try: return processor.transform( payload, source_result=source_result, from_node=from_node, edge_link=edge_link, log_manager=log_manager, context=self.execution_context, ) except Exception as exc: # pragma: no cover error_msg = ( f"Edge payload processor failed for {from_node.id}->{edge_link.target.id}: {exc}" ) if self.ctx and self.ctx.log_manager: self.ctx.log_manager.error( error_msg, details={ "processor_type": getattr(edge_link, "process_type", None), "processor_metadata": getattr(edge_link, "process_metadata", {}), }, ) server_logger = get_server_logger() server_logger.log_exception( exc, error_msg, processor_type=getattr(edge_link, "process_type", None), processor_metadata=getattr(edge_link, "process_metadata", {}), ) return payload # ------------------------------------------------------------------ # internal helpers # ------------------------------------------------------------------ def _process_with_condition( self, evaluator: ConditionEvaluator, *, label: str, metadata: dict[str, Any], edge_link: "EdgeLink", source_result: Message, from_node: "Node", log_manager: LogManager, ) -> None: target_node = edge_link.target log_manager.record_edge_process(from_node.id, target_node.id, edge_link.config) serialized_source = self._payload_to_text(source_result) try: condition_met = bool(evaluator(serialized_source)) except Exception as exc: # pragma: no cover error_msg = f"Error calling condition '{label}': {exc}" log_manager.error( error_msg, details={ "condition_type": edge_link.condition_type, "condition_metadata": metadata, }, ) server_logger = get_server_logger() server_logger.log_exception( exc, error_msg, condition_type=edge_link.condition_type, condition_metadata=metadata, ) condition_met = True if not condition_met: log_manager.debug( f"Edge condition not met for {from_node.id} -> {target_node.id}, skipping edge processing" ) return log_manager.info(f"Edge condition met for {from_node.id} -> {target_node.id}") self._clear_target_context( target_node, drop_non_keep=getattr(edge_link, "clear_context", False), drop_keep=getattr(edge_link, "clear_kept_context", False), from_node=from_node, log_manager=log_manager, ) if edge_link.carry_data: payload = self._prepare_payload_for_target(source_result, from_node, target_node, edge_link.keep_message) payload = self.transform_payload( payload, source_result=source_result, from_node=from_node, edge_link=edge_link, log_manager=log_manager, ) if payload is None: log_manager.debug( f"Payload processor dropped message for edge {from_node.id} -> {target_node.id}" ) return # Tag message with dynamic edge info for later processing if edge_link.dynamic_config is not None: metadata = dict(payload.metadata) metadata["_from_dynamic_edge"] = True metadata["_dynamic_edge_source"] = from_node.id payload.metadata = metadata target_node.append_input(payload) log_manager.debug( f"Data passed from {from_node.id} to {target_node.id}'s input queue " f"(type: {self._describe_payload(payload)})" ) else: log_manager.debug( f"Edge {from_node.id} -> {target_node.id} does not carry data, skipping data transfer" ) if edge_link.trigger: edge_link.triggered = True log_manager.debug(f"Edge {from_node.id} -> {target_node.id} triggered") def _payload_to_text(self, payload: Any) -> str: if isinstance(payload, Message): return payload.text_content() if payload is None: return "" return str(payload) def _prepare_payload_for_target( self, payload: Any, from_node: Node, target_node: Node, keep: bool = False, ) -> Message: if not isinstance(payload, Message): payload = Message(role=MessageRole.ASSISTANT, content=str(payload)) cloned = payload.clone() if not cloned.preserve_role: cloned_role = MessageRole.ASSISTANT if from_node.id == target_node.id else MessageRole.USER cloned.role = cloned_role metadata = dict(cloned.metadata) metadata["source"] = from_node.id cloned.metadata = metadata if keep: cloned.keep = True return cloned def _clear_target_context( self, target_node: Node, *, drop_non_keep: bool, drop_keep: bool, from_node: Node, log_manager: LogManager, ) -> None: if not drop_non_keep and not drop_keep: return removed_non_keep, removed_keep = target_node.clear_inputs_by_flag( drop_non_keep=drop_non_keep, drop_keep=drop_keep, ) if removed_non_keep or removed_keep: log_manager.debug( f"Cleared target context for edge {from_node.id} -> {target_node.id}", details={ "removed_non_keep": removed_non_keep, "removed_keep": removed_keep, "drop_non_keep": drop_non_keep, "drop_keep": drop_keep, }, ) def _describe_payload(self, payload: Any) -> str: if isinstance(payload, Message): return f"message:{payload.role.value}" return type(payload).__name__
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/edge/conditions/base.py", "license": "Apache License 2.0", "lines": 209, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/edge/conditions/builtin_types.py
"""Register built-in edge condition manager implementations.""" from entity.configs.edge.edge_condition import ( FunctionEdgeConditionConfig, KeywordEdgeConditionConfig, ) from runtime.edge.conditions.registry import register_edge_condition from runtime.edge.conditions.function_manager import FunctionEdgeConditionManager from runtime.edge.conditions.keyword_manager import KeywordEdgeConditionManager register_edge_condition( "function", manager_cls=FunctionEdgeConditionManager, summary="Calls registered Python functions from functions/edge (default 'true').", config_cls=FunctionEdgeConditionConfig ) register_edge_condition( "keyword", manager_cls=KeywordEdgeConditionManager, summary="Declarative conditions based on include/exclude keywords or regex matching", config_cls=KeywordEdgeConditionConfig )
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/edge/conditions/builtin_types.py", "license": "Apache License 2.0", "lines": 20, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/edge/conditions/function_manager.py
"""FunctionEdgeManager centralizes function-based edge conditions.""" from typing import Callable, Optional from entity.configs.edge.edge_condition import FunctionEdgeConditionConfig from utils.structured_logger import get_server_logger from .base import ConditionFactoryContext from .base import EdgeConditionManager from utils.function_manager import FunctionManager from ...node.executor import ExecutionContext class FunctionEdgeConditionManager(EdgeConditionManager[FunctionEdgeConditionConfig]): def __init__(self, config: FunctionEdgeConditionConfig, ctx: ConditionFactoryContext, execution_context: ExecutionContext) -> None: super().__init__(config, ctx, execution_context) self._name = config.name or "true" self.label = self._name or "true" self.metadata = {"function": self._name} self._evaluator = self._build_evaluator() def _build_evaluator(self) -> Callable[[str], bool]: if self._name == "true": return lambda _: True function_obj = self._resolve_function(self._name, self.ctx.function_manager) if function_obj is None: logger = get_server_logger() logger.warning(f"Edge condition function '{self._name}' not found; defaulting to true") self.metadata = {"function": self._name, "missing": True} def missing(_: str) -> bool: if self.ctx.log_manager: self.ctx.log_manager.warning( f"Condition function '{self._name}' not found, defaulting to true" ) return True return missing def evaluator(data: str) -> bool: return bool(function_obj(data)) return evaluator def process( self, edge_link, source_result, from_node, log_manager, ) -> None: self._process_with_condition( self._evaluator, label=self.label, metadata=self.metadata, edge_link=edge_link, source_result=source_result, from_node=from_node, log_manager=log_manager, ) def _resolve_function( self, name: str, function_manager: Optional["FunctionManager"] ) -> Optional[Callable[[str], bool]]: if not function_manager: return None return function_manager.get_function(name)
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/edge/conditions/function_manager.py", "license": "Apache License 2.0", "lines": 55, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/edge/conditions/keyword_manager.py
"""KeywordEdgeManager implements declarative keyword conditions.""" import re from entity.configs.edge.edge_condition import KeywordEdgeConditionConfig from .base import ConditionFactoryContext from .base import EdgeConditionManager from ...node.executor import ExecutionContext class KeywordEdgeConditionManager(EdgeConditionManager[KeywordEdgeConditionConfig]): def __init__(self, config: KeywordEdgeConditionConfig, ctx: ConditionFactoryContext, execution_context: ExecutionContext) -> None: super().__init__(config, ctx, execution_context) self.any_keywords = list(config.any_keywords) self.none_keywords = list(config.none_keywords) self.regex_patterns = list(config.regex_patterns) self.case_sensitive = bool(config.case_sensitive) self.default_value = bool(config.default) lowered_regex_flags = 0 if self.case_sensitive else re.IGNORECASE if self.case_sensitive: self.processed_any = self.any_keywords self.processed_none = self.none_keywords else: self.processed_any = [kw.lower() for kw in self.any_keywords] self.processed_none = [kw.lower() for kw in self.none_keywords] self.compiled_regex = [re.compile(pattern, lowered_regex_flags) for pattern in self.regex_patterns] self.label = f"keyword(any={len(self.any_keywords)}, none={len(self.none_keywords)}, regex={len(self.regex_patterns)})" self.metadata = { "any": self.any_keywords, "none": self.none_keywords, "regex": self.regex_patterns, "case_sensitive": self.case_sensitive, "default": self.default_value, } def _evaluate(self, data: str) -> bool: haystack = data if self.case_sensitive else data.lower() for keyword in self.processed_none: if keyword and keyword in haystack: return False for keyword in self.processed_any: if keyword and keyword in haystack: return True for pattern in self.compiled_regex: if pattern.search(data): return True if self.any_keywords or self.regex_patterns: return False return True def process( self, edge_link, source_result, from_node, log_manager, ) -> None: self._process_with_condition( self._evaluate, label=self.label, metadata=self.metadata, edge_link=edge_link, source_result=source_result, from_node=from_node, log_manager=log_manager, )
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/edge/conditions/keyword_manager.py", "license": "Apache License 2.0", "lines": 60, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/edge/conditions/registry.py
"""Registry helpers for pluggable edge condition managers.""" from dataclasses import dataclass from importlib import import_module from typing import Any, Dict, Type from schema_registry import register_edge_condition_schema from utils.registry import Registry, RegistryEntry, RegistryError from entity.configs.edge.edge_condition import EdgeConditionConfig, EdgeConditionTypeConfig from .base import EdgeConditionManager, ConditionFactoryContext from ...node.executor import ExecutionContext @dataclass(slots=True) class EdgeConditionRegistration: """Registry entry describing a pluggable condition type.""" name: str config_cls: Type["EdgeConditionTypeConfig"] manager_cls: Type["EdgeConditionManager[Any]"] summary: str | None = None edge_condition_registry = Registry("edge_condition_type") _BUILTINS_LOADED = False def _ensure_builtins_loaded() -> None: global _BUILTINS_LOADED if not _BUILTINS_LOADED: import_module("runtime.edge.conditions.builtin_types") _BUILTINS_LOADED = True def register_edge_condition( name: str, *, config_cls: Type["EdgeConditionTypeConfig"], manager_cls: Type["EdgeConditionManager[Any]"], summary: str | None = None, ) -> None: """Register a manager class that encapsulates edge processing logic.""" if name in edge_condition_registry.names(): raise RegistryError(f"Edge condition type '{name}' already registered") entry = EdgeConditionRegistration( name=name, config_cls=config_cls, manager_cls=manager_cls, summary=summary, ) edge_condition_registry.register(name, target=entry) register_edge_condition_schema(name, config_cls=config_cls, summary=summary) def get_edge_condition_registration(name: str) -> EdgeConditionRegistration: """Retrieve a registered condition type.""" _ensure_builtins_loaded() entry: RegistryEntry = edge_condition_registry.get(name) registration = entry.load() if not isinstance(registration, EdgeConditionRegistration): raise RegistryError(f"Entry '{name}' is not an EdgeConditionRegistration") return registration def iter_edge_condition_registrations() -> Dict[str, EdgeConditionRegistration]: """Iterate over registered condition types.""" _ensure_builtins_loaded() return {name: entry.load() for name, entry in edge_condition_registry.items()} def build_edge_condition_manager( condition: "EdgeConditionConfig", context: ConditionFactoryContext, execution_context: ExecutionContext ) -> "EdgeConditionManager[Any]": """Instantiate the manager responsible for a specific edge.""" registration = get_edge_condition_registration(condition.type) manager_cls = registration.manager_cls if not manager_cls: raise RegistryError( f"Edge condition type '{condition.type}' does not provide a manager implementation" ) return manager_cls(condition.config, context, execution_context) __all__ = [ "EdgeConditionRegistration", "register_edge_condition", "get_edge_condition_registration", "iter_edge_condition_registrations", "build_edge_condition_manager", ]
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/edge/conditions/registry.py", "license": "Apache License 2.0", "lines": 73, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/edge/processors/base.py
"""Base classes for payload processors applied on edges.""" from abc import ABC, abstractmethod from dataclasses import dataclass from typing import Any, Generic, TypeVar from entity.messages import Message from runtime.node.executor import ExecutionContext from utils.log_manager import LogManager from utils.function_manager import FunctionManager TConfig = TypeVar("TConfig") @dataclass(slots=True) class ProcessorFactoryContext: """Context passed to processor implementations.""" function_manager: "FunctionManager | None" = None log_manager: "LogManager | None" = None class EdgePayloadProcessor(Generic[TConfig], ABC): """Base payload processor API.""" label: str | None = None metadata: dict[str, Any] | None = None def __init__(self, config: TConfig, ctx: ProcessorFactoryContext) -> None: self.config = config self.ctx = ctx @abstractmethod def transform( self, payload: Message, *, source_result: Message, from_node: Any, edge_link: Any, log_manager: LogManager, context: ExecutionContext, ) -> Message | None: """Return transformed payload or None to drop the message.""" def _clone(self, payload: Message) -> Message: return payload.clone() def _text(self, payload: Message) -> str: return payload.text_content()
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/edge/processors/base.py", "license": "Apache License 2.0", "lines": 37, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/edge/processors/builtin_types.py
"""Register built-in edge payload processors.""" from entity.configs.edge.edge_processor import ( RegexEdgeProcessorConfig, FunctionEdgeProcessorConfig, ) from .registry import register_edge_processor from .regex_processor import RegexEdgePayloadProcessor from .function_processor import FunctionEdgePayloadProcessor register_edge_processor( "regex_extract", processor_cls=RegexEdgePayloadProcessor, summary="Extract payload fragments via Python regular expressions.", config_cls=RegexEdgeProcessorConfig, ) register_edge_processor( "function", processor_cls=FunctionEdgePayloadProcessor, summary="Delegate message transformation to Python functions in functions/edge_processor.", config_cls=FunctionEdgeProcessorConfig, )
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/edge/processors/builtin_types.py", "license": "Apache License 2.0", "lines": 20, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/edge/processors/function_processor.py
"""Payload processor that delegates to Python functions.""" from typing import Any, Callable, Mapping from entity.messages import Message from utils.log_manager import LogManager from utils.structured_logger import get_server_logger from .base import EdgePayloadProcessor, ProcessorFactoryContext from entity.configs.edge.edge_processor import FunctionEdgeProcessorConfig from runtime.node.executor import ExecutionContext class FunctionEdgePayloadProcessor(EdgePayloadProcessor[FunctionEdgeProcessorConfig]): def __init__(self, config: FunctionEdgeProcessorConfig, ctx: ProcessorFactoryContext) -> None: super().__init__(config, ctx) self._name = config.name self.label = f"function({self._name})" self.metadata = {"function": self._name} self._callable = self._resolve() def transform( self, payload: Message, *, source_result: Message, from_node, edge_link, log_manager: LogManager, context: ExecutionContext, ) -> Message | None: if self._callable is None: log_manager.warning( f"Processor function '{self._name}' not found. Falling back to passthrough." ) return payload try: result = self._callable(payload.text_content(), context.global_state) except Exception as exc: # pragma: no cover - defensive logging error_msg = f"Processor function '{self._name}' failed: {exc}" log_manager.error(error_msg) server_logger = get_server_logger() server_logger.log_exception(exc, error_msg, processor_name=self._name) return payload if result is None: return None return self._coerce_result(payload, result) def _resolve(self) -> Callable[[str, dict[str, Any]], Any] | None: manager = self.ctx.function_manager if self.ctx else None if not manager: return None func = manager.get_function(self._name) if func is None: return None return func def _coerce_result(self, payload: Message, result: Any) -> Message | None: if isinstance(result, Message): return result cloned = payload.clone() if isinstance(result, str): cloned.content = result return cloned if isinstance(result, Mapping): if result.get("drop"): return None if "content" in result: cloned.content = result["content"] metadata = dict(cloned.metadata) metadata.update(result.get("metadata") or {}) cloned.metadata = metadata return cloned cloned.content = str(result) return cloned
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/edge/processors/function_processor.py", "license": "Apache License 2.0", "lines": 67, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/edge/processors/regex_processor.py
"""Regex-based payload processor.""" import re from typing import List from entity.messages import Message from utils.log_manager import LogManager from .base import EdgePayloadProcessor, ProcessorFactoryContext from entity.configs.edge.edge_processor import RegexEdgeProcessorConfig from runtime.node.executor import ExecutionContext class RegexEdgePayloadProcessor(EdgePayloadProcessor[RegexEdgeProcessorConfig]): def __init__(self, config: RegexEdgeProcessorConfig, ctx: ProcessorFactoryContext) -> None: super().__init__(config, ctx) flags = 0 if not config.case_sensitive: flags |= re.IGNORECASE if config.multiline: flags |= re.MULTILINE if config.dotall: flags |= re.DOTALL self._pattern = re.compile(config.pattern, flags) self.label = f"regex({config.pattern})" self.metadata = { "pattern": config.pattern, "group": config.group, "multiple": config.multiple, } def transform( self, payload: Message, *, source_result: Message, from_node, edge_link, log_manager: LogManager, context: ExecutionContext, ) -> Message | None: matches = list(self._pattern.finditer(self._text(payload))) if not matches: return self._handle_no_match(payload, log_manager, from_node.id, edge_link.target.id) extracted = self._extract_values(matches) if not extracted: return self._handle_no_match(payload, log_manager, from_node.id, edge_link.target.id) cloned = payload.clone() if self.config.multiple: cloned.content = "\n".join(extracted) else: cloned.content = extracted[0] return cloned def _extract_values(self, matches: List[re.Match]) -> List[str]: values: List[str] = [] for match in matches: group = self.config.group if group is None: value = match.group(0) else: value = match.group(group) if value is None: continue if self.config.template: value = self.config.template.replace("{match}", value) values.append(value) if not self.config.multiple: break return values def _handle_no_match( self, payload: Message, log_manager: LogManager, from_node_id: str, to_node_id: str, ) -> Message | None: behavior = self.config.on_no_match if behavior == "drop": log_manager.debug( f"Regex processor dropped payload for {from_node_id}->{to_node_id} (no match)" ) return None if behavior == "default": default_value = self.config.default_value or "" cloned = payload.clone() cloned.content = default_value return cloned return payload
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/edge/processors/regex_processor.py", "license": "Apache License 2.0", "lines": 82, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/edge/processors/registry.py
"""Registry helpers for edge payload processors.""" from dataclasses import dataclass from importlib import import_module from typing import Any, Dict, Type from schema_registry import register_edge_processor_schema from utils.registry import Registry, RegistryEntry, RegistryError from entity.configs.edge.edge_processor import EdgeProcessorConfig, EdgeProcessorTypeConfig from .base import EdgePayloadProcessor, ProcessorFactoryContext @dataclass(slots=True) class EdgeProcessorRegistration: name: str config_cls: Type["EdgeProcessorTypeConfig"] processor_cls: Type["EdgePayloadProcessor[Any]"] summary: str | None = None edge_processor_registry = Registry("edge_processor_type") _BUILTINS_LOADED = False def _ensure_builtins_loaded() -> None: global _BUILTINS_LOADED if not _BUILTINS_LOADED: import_module("runtime.edge.processors.builtin_types") _BUILTINS_LOADED = True def register_edge_processor( name: str, *, config_cls: Type["EdgeProcessorTypeConfig"], processor_cls: Type["EdgePayloadProcessor[Any]"], summary: str | None = None, ) -> None: if name in edge_processor_registry.names(): raise RegistryError(f"Edge processor type '{name}' already registered") entry = EdgeProcessorRegistration( name=name, config_cls=config_cls, processor_cls=processor_cls, summary=summary, ) edge_processor_registry.register(name, target=entry) register_edge_processor_schema(name, config_cls=config_cls, summary=summary) def get_edge_processor_registration(name: str) -> EdgeProcessorRegistration: _ensure_builtins_loaded() entry: RegistryEntry = edge_processor_registry.get(name) registration = entry.load() if not isinstance(registration, EdgeProcessorRegistration): raise RegistryError(f"Entry '{name}' is not an EdgeProcessorRegistration") return registration def iter_edge_processor_registrations() -> Dict[str, EdgeProcessorRegistration]: _ensure_builtins_loaded() return {name: entry.load() for name, entry in edge_processor_registry.items()} def build_edge_processor( processor_config: "EdgeProcessorConfig", context: ProcessorFactoryContext, ) -> "EdgePayloadProcessor[Any]": registration = get_edge_processor_registration(processor_config.type) processor_cls = registration.processor_cls if not processor_cls: raise RegistryError(f"Edge processor type '{processor_config.type}' does not provide an implementation") return processor_cls(processor_config.config, context) __all__ = [ "register_edge_processor", "get_edge_processor_registration", "iter_edge_processor_registrations", "build_edge_processor", ]
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/edge/processors/registry.py", "license": "Apache License 2.0", "lines": 63, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/node/agent/memory/blackboard_memory.py
"""Lightweight append-only Blackboard memory implementation.""" import json import os import time import uuid from typing import List from entity.configs import MemoryStoreConfig from entity.configs.node.memory import BlackboardMemoryConfig from runtime.node.agent.memory.memory_base import ( MemoryBase, MemoryContentSnapshot, MemoryItem, MemoryWritePayload, ) class BlackboardMemory(MemoryBase): """Simple append-only memory: save raw outputs, retrieve by recency.""" def __init__(self, store: MemoryStoreConfig): config = store.as_config(BlackboardMemoryConfig) if not config: raise ValueError("BlackboardMemory requires a blackboard memory store configuration") super().__init__(store) self.config = config self.memory_path = config.memory_path self.max_items = config.max_items # -------- Persistence -------- def load(self) -> None: if not self.memory_path or not os.path.exists(self.memory_path): self.contents = [] return try: with open(self.memory_path, "r", encoding="utf-8") as file: data = json.load(file) contents: List[MemoryItem] = [] for raw in data: try: contents.append(MemoryItem.from_dict(raw)) except Exception: continue self.contents = contents except Exception: # Corrupted file -> reset to empty to avoid blocking execution self.contents = [] def save(self) -> None: if not self.memory_path: return os.makedirs(os.path.dirname(self.memory_path), exist_ok=True) payload = [item.to_dict() for item in self.contents[-self.max_items :]] with open(self.memory_path, "w", encoding="utf-8") as file: json.dump(payload, file, ensure_ascii=False, indent=2) # -------- Memory operations -------- def retrieve( self, agent_role: str, query: MemoryContentSnapshot, top_k: int, similarity_threshold: float, ) -> List[MemoryItem]: if not self.contents: return [] if top_k <= 0 or top_k >= len(self.contents): return list(self.contents) return list(self.contents[-top_k:]) def update(self, payload: MemoryWritePayload) -> None: snapshot = payload.output_snapshot or payload.input_snapshot content = (snapshot.text if snapshot else payload.inputs_text or "").strip() if not content: return metadata = { "agent_role": payload.agent_role, "input_preview": (payload.inputs_text or "")[:200], "attachments": snapshot.attachment_overview() if snapshot else [], } memory_item = MemoryItem( id=f"bb_{uuid.uuid4().hex}", content_summary=content, metadata=metadata, timestamp=time.time(), input_snapshot=payload.input_snapshot, output_snapshot=payload.output_snapshot, ) self.contents.append(memory_item) if len(self.contents) > self.max_items: self.contents = self.contents[-self.max_items :]
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/agent/memory/blackboard_memory.py", "license": "Apache License 2.0", "lines": 83, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/node/agent/memory/builtin_stores.py
"""Register built-in memory stores.""" from entity.configs.node.memory import ( BlackboardMemoryConfig, FileMemoryConfig, SimpleMemoryConfig, MemoryStoreConfig, ) from runtime.node.agent.memory.blackboard_memory import BlackboardMemory from runtime.node.agent.memory.file_memory import FileMemory from runtime.node.agent.memory.memory_base import MemoryBase from runtime.node.agent.memory.simple_memory import SimpleMemory from runtime.node.agent.memory.registry import register_memory_store, get_memory_store_registration register_memory_store( "simple", config_cls=SimpleMemoryConfig, factory=lambda store: SimpleMemory(store), summary="In-memory store that resets between runs; best for testing", ) register_memory_store( "file", config_cls=FileMemoryConfig, factory=lambda store: FileMemory(store), summary="Persists documents on disk and supports embedding search", ) register_memory_store( "blackboard", config_cls=BlackboardMemoryConfig, factory=lambda store: BlackboardMemory(store), summary="Shared blackboard memory allowing multiple nodes to read/write", ) class MemoryFactory: @staticmethod def create_memory(store: MemoryStoreConfig) -> MemoryBase: registration = get_memory_store_registration(store.type) return registration.factory(store)
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/agent/memory/builtin_stores.py", "license": "Apache License 2.0", "lines": 35, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/node/agent/memory/embedding.py
from abc import ABC, abstractmethod import re import logging from typing import List, Optional import openai from tenacity import ( retry, stop_after_attempt, wait_random_exponential, ) from entity.configs import EmbeddingConfig logger = logging.getLogger(__name__) class EmbeddingBase(ABC): def __init__(self, embedding_config: EmbeddingConfig): self.config = embedding_config @abstractmethod def get_embedding(self, text): ... def _preprocess_text(self, text: str) -> str: """Preprocess text to improve embedding quality.""" if not text: return "" # Remove extra whitespace text = re.sub(r'\s+', ' ', text.strip()) # Remove special characters and emoji text = re.sub(r'[^\w\s\u4e00-\u9fff]', ' ', text) # Clean up whitespace again text = re.sub(r'\s+', ' ', text.strip()) return text def _chunk_text(self, text: str, max_length: int = 500) -> List[str]: """Split long text into chunks to improve embedding quality.""" if len(text) <= max_length: return [text] # Split by sentence boundaries sentences = re.split(r'[\u3002\uff01\uff1f\uff1b\n]', text) chunks = [] current_chunk = "" for sentence in sentences: sentence = sentence.strip() if not sentence: continue if len(current_chunk + sentence) <= max_length: current_chunk += sentence + "\u3002" else: if current_chunk: chunks.append(current_chunk.strip()) current_chunk = sentence + "\u3002" if current_chunk: chunks.append(current_chunk.strip()) return chunks class EmbeddingFactory: @staticmethod def create_embedding(embedding_config: EmbeddingConfig) -> EmbeddingBase: model = embedding_config.provider if model == 'openai': return OpenAIEmbedding(embedding_config) elif model == 'local': return LocalEmbedding(embedding_config) else: raise ValueError(f"Unsupported embedding model: {model}") class OpenAIEmbedding(EmbeddingBase): def __init__(self, embedding_config: EmbeddingConfig): super().__init__(embedding_config) self.base_url = embedding_config.base_url self.api_key = embedding_config.api_key self.model_name = embedding_config.model or "text-embedding-3-small" # Default model self.max_length = embedding_config.params.get('max_length', 8191) self.use_chunking = embedding_config.params.get('use_chunking', False) self.chunk_strategy = embedding_config.params.get('chunk_strategy', 'average') if self.base_url: self.client = openai.OpenAI(api_key=self.api_key, base_url=self.base_url) else: self.client = openai.OpenAI(api_key=self.api_key) @retry(wait=wait_random_exponential(min=2, max=5), stop=stop_after_attempt(10)) def get_embedding(self, text): # Preprocess the text processed_text = self._preprocess_text(text) if not processed_text: logger.warning("Empty text after preprocessing") return [0.0] * 1536 # Return a zero vector # Handle long text via chunking if self.use_chunking and len(processed_text) > self.max_length: return self._get_chunked_embedding(processed_text) # Truncate text truncated_text = processed_text[:self.max_length] try: response = self.client.embeddings.create( input=truncated_text, model=self.model_name, encoding_format="float" ) embedding = response.data[0].embedding return embedding except Exception as e: logger.error(f"Error getting embedding: {e}") return [0.0] * 1536 # Return zero vector as fallback def _get_chunked_embedding(self, text: str) -> List[float]: """Chunk long text, embed each chunk, then aggregate.""" chunks = self._chunk_text(text, self.max_length // 2) # Halve the chunk length if not chunks: return [0.0] * 1536 chunk_embeddings = [] for chunk in chunks: try: response = self.client.embeddings.create( input=chunk, model=self.model_name, encoding_format="float" ) chunk_embeddings.append(response.data[0].embedding) except Exception as e: logger.warning(f"Error getting chunk embedding: {e}") continue if not chunk_embeddings: return [0.0] * 1536 # Aggregation strategy if self.chunk_strategy == 'average': # Mean aggregation return [sum(chunk[i] for chunk in chunk_embeddings) / len(chunk_embeddings) for i in range(len(chunk_embeddings[0]))] elif self.chunk_strategy == 'weighted': # Weighted aggregation (earlier chunks weigh more) weights = [1.0 / (i + 1) for i in range(len(chunk_embeddings))] total_weight = sum(weights) return [sum(chunk[i] * weights[j] for j, chunk in enumerate(chunk_embeddings)) / total_weight for i in range(len(chunk_embeddings[0]))] else: # Default to the first chunk return chunk_embeddings[0] class LocalEmbedding(EmbeddingBase): def __init__(self, embedding_config: EmbeddingConfig): super().__init__(embedding_config) self.model_path = embedding_config.params.get('model_path') self.device = embedding_config.params.get('device', 'cpu') if not self.model_path: raise ValueError("LocalEmbedding requires model_path parameter") # Load the local embedding model (e.g., sentence-transformers) try: from sentence_transformers import SentenceTransformer self.model = SentenceTransformer(self.model_path, device=self.device) except ImportError: raise ImportError("sentence-transformers is required for LocalEmbedding") def get_embedding(self, text): # Preprocess text before encoding processed_text = self._preprocess_text(text) if not processed_text: return [0.0] * 768 # Return zero vector try: embedding = self.model.encode(processed_text, convert_to_tensor=False) return embedding.tolist() except Exception as e: logger.error(f"Error getting local embedding: {e}") return [0.0] * 768 # Return zero vector as fallback
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/agent/memory/embedding.py", "license": "Apache License 2.0", "lines": 153, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/node/agent/memory/file_memory.py
""" FileMemory: Memory system for vectorizing and retrieving file contents """ import json import os import hashlib import logging from pathlib import Path from typing import List, Dict, Any import time import faiss import numpy as np from runtime.node.agent.memory.memory_base import ( MemoryBase, MemoryContentSnapshot, MemoryItem, MemoryWritePayload, ) from entity.configs import MemoryStoreConfig, FileSourceConfig from entity.configs.node.memory import FileMemoryConfig logger = logging.getLogger(__name__) class FileMemory(MemoryBase): """ File-based memory system that indexes and retrieves content from files/directories. Supports multiple file types, chunking strategies, and incremental updates. """ def __init__(self, store: MemoryStoreConfig): config = store.as_config(FileMemoryConfig) if not config: raise ValueError("FileMemory requires a file memory store configuration") super().__init__(store) if not config.file_sources: raise ValueError("FileMemory requires at least one file_source in configuration") self.file_config = config self.file_sources: List[FileSourceConfig] = config.file_sources self.index_path = self.file_config.index_path # Path to store the index # Chunking configuration self.chunk_size = 500 # Characters per chunk self.chunk_overlap = 50 # Overlapping characters between chunks # File metadata cache {file_path: {hash, chunks_count, ...}} self.file_metadata: Dict[str, Dict[str, Any]] = {} def load(self) -> None: """ Load existing index or build new one from file sources. Validates index integrity and performs incremental updates if needed. """ if self.index_path and os.path.exists(self.index_path): logger.info(f"Loading existing index from {self.index_path}") self._load_from_file() # Validate and update if files changed if self._validate_and_update_index(): logger.info("Index updated due to file changes") self.save() else: logger.info("Building new index from file sources") self._build_index_from_sources() if self.index_path: self.save() def save(self) -> None: """Persist the memory index to disk""" if not self.index_path: logger.warning("No index_path specified, skipping save") return # Ensure directory exists os.makedirs(os.path.dirname(self.index_path), exist_ok=True) # Prepare data for serialization data = { "file_metadata": self.file_metadata, "contents": [item.to_dict() for item in self.contents], "config": { "chunk_size": self.chunk_size, "chunk_overlap": self.chunk_overlap, } } # Save to JSON with open(self.index_path, 'w', encoding='utf-8') as f: json.dump(data, f, indent=2, ensure_ascii=False) logger.info(f"Index saved to {self.index_path} ({len(self.contents)} chunks)") def retrieve( self, agent_role: str, query: MemoryContentSnapshot, top_k: int, similarity_threshold: float, ) -> List[MemoryItem]: """ Retrieve relevant file chunks based on query. Args: agent_role: Agent role (not used in file memory) inputs: Query text top_k: Number of results to return similarity_threshold: Minimum similarity score Returns: List of MemoryItem with file chunks """ if self.count_memories() == 0: return [] # Generate query embedding query_embedding = self.embedding.get_embedding(query.text) if isinstance(query_embedding, list): query_embedding = np.array(query_embedding, dtype=np.float32) query_embedding = query_embedding.reshape(1, -1) faiss.normalize_L2(query_embedding) # Collect embeddings from memory items memory_embeddings = [] valid_items = [] for item in self.contents: if item.embedding is not None: memory_embeddings.append(item.embedding) valid_items.append(item) if not memory_embeddings: return [] memory_embeddings = np.array(memory_embeddings, dtype=np.float32) # Build FAISS index and search index = faiss.IndexFlatIP(memory_embeddings.shape[1]) index.add(memory_embeddings) similarities, indices = index.search(query_embedding, min(top_k, len(valid_items))) # Filter by threshold and return results results = [] for i in range(len(indices[0])): idx = indices[0][i] similarity = similarities[0][i] if idx != -1 and similarity >= similarity_threshold: results.append(valid_items[idx]) return results def update(self, payload: MemoryWritePayload) -> None: """ FileMemory is read-only, updates are not supported. This method is a no-op to maintain interface compatibility. """ logger.debug("FileMemory.update() called but FileMemory is read-only") pass # ========== Private Helper Methods ========== def _load_from_file(self) -> None: """Load index from JSON file""" try: with open(self.index_path, 'r', encoding='utf-8') as f: data = json.load(f) self.file_metadata = data.get("file_metadata", {}) raw_contents = data.get("contents", []) contents: List[MemoryItem] = [] for raw in raw_contents: try: contents.append(MemoryItem.from_dict(raw)) except Exception: continue self.contents = contents # Load config if present config = data.get("config", {}) self.chunk_size = config.get("chunk_size", self.chunk_size) self.chunk_overlap = config.get("chunk_overlap", self.chunk_overlap) logger.info(f"Loaded {len(self.contents)} chunks from index") except Exception as e: logger.error(f"Error loading index: {e}") self.file_metadata = {} self.contents = [] def _build_index_from_sources(self) -> None: """Build index by scanning all file sources""" all_chunks = [] for source in self.file_sources: logger.info(f"Scanning source: {source.source_path}") files = self._scan_files(source) logger.info(f"Found {len(files)} files in {source.source_path}") for file_path in files: chunks = self._read_and_chunk_file(file_path, source.encoding) all_chunks.extend(chunks) logger.info(f"Total chunks to index: {len(all_chunks)}") # Generate embeddings for all chunks self.contents = self._build_embeddings(all_chunks) logger.info(f"Index built with {len(self.contents)} chunks") def _validate_and_update_index(self) -> bool: """ Validate index integrity and update if files changed. Returns: True if index was updated, False otherwise """ updated = False current_files = set() # Scan current files for source in self.file_sources: files = self._scan_files(source) current_files.update(files) # Check for deleted files indexed_files = set(self.file_metadata.keys()) deleted_files = indexed_files - current_files if deleted_files: logger.info(f"Removing {len(deleted_files)} deleted files from index") self._remove_files_from_index(deleted_files) updated = True # Check for new or modified files for source in self.file_sources: files = self._scan_files(source) for file_path in files: file_hash = self._compute_file_hash(file_path) # New file if file_path not in self.file_metadata: logger.info(f"Indexing new file: {file_path}") self._index_file(file_path, source.encoding) updated = True # Modified file elif self.file_metadata[file_path].get("hash") != file_hash: logger.info(f"Re-indexing modified file: {file_path}") self._remove_files_from_index([file_path]) self._index_file(file_path, source.encoding) updated = True return updated def _scan_files(self, source: FileSourceConfig) -> List[str]: """ Scan file path and return list of matching files. Args: source: FileSourceConfig with path and filters Returns: List of absolute file paths """ path = Path(source.source_path).expanduser().resolve() # Single file if path.is_file(): if self._matches_file_types(path, source.file_types): return [str(path)] return [] # Directory if not path.is_dir(): logger.warning(f"Path does not exist: {source.source_path}") return [] files = [] if source.recursive: # Recursive scan for file_path in path.rglob("*"): if file_path.is_file() and self._matches_file_types(file_path, source.file_types): files.append(str(file_path)) else: # Non-recursive scan for file_path in path.glob("*"): if file_path.is_file() and self._matches_file_types(file_path, source.file_types): files.append(str(file_path)) return files def _matches_file_types(self, file_path: Path, file_types: List[str]) -> bool: """Check if file matches the file type filter""" if file_types is None: return True return file_path.suffix in file_types def _read_and_chunk_file(self, file_path: str, encoding: str = "utf-8") -> List[Dict]: """ Read file and split into chunks. Args: file_path: Path to file encoding: File encoding Returns: List of chunk dictionaries with content and metadata """ try: with open(file_path, 'r', encoding=encoding, errors='ignore') as f: content = f.read() except Exception as e: logger.error(f"Error reading file {file_path}: {e}") return [] if not content.strip(): return [] # Compute file hash file_hash = self._compute_file_hash(file_path) file_size = os.path.getsize(file_path) # Chunk the content chunks = self._chunk_text(content) # Build chunk metadata chunk_dicts = [] for i, chunk_text in enumerate(chunks): chunk_dicts.append({ "content": chunk_text, "metadata": { "source_type": "file", "file_path": file_path, "file_name": os.path.basename(file_path), "file_hash": file_hash, "file_size": file_size, "chunk_index": i, "total_chunks": len(chunks), "encoding": encoding, } }) # Update file metadata cache self.file_metadata[file_path] = { "hash": file_hash, "size": file_size, "chunks_count": len(chunks), "indexed_at": time.time(), } return chunk_dicts def _chunk_text(self, text: str) -> List[str]: """ Split text into chunks with overlap. Args: text: Input text Returns: List of text chunks """ if len(text) <= self.chunk_size: return [text] chunks = [] start = 0 while start < len(text): end = start + self.chunk_size chunk = text[start:end] # Try to break at sentence boundary if end < len(text): # Look for sentence endings last_sentence = max( chunk.rfind('。'), chunk.rfind('!'), chunk.rfind('?'), chunk.rfind('.'), chunk.rfind('!'), chunk.rfind('?'), chunk.rfind('\n') ) if last_sentence > self.chunk_size // 2: # Don't break too early chunk = chunk[:last_sentence + 1] end = start + last_sentence + 1 chunks.append(chunk.strip()) # Move start with overlap start = end - self.chunk_overlap if start >= len(text): break return [c for c in chunks if c] # Filter empty chunks def _build_embeddings(self, chunks: List[Dict]) -> List[MemoryItem]: """ Generate embeddings for chunks and create MemoryItems. Args: chunks: List of chunk dictionaries Returns: List of MemoryItem objects """ memory_items = [] for chunk_dict in chunks: content = chunk_dict["content"] metadata = chunk_dict["metadata"] # Generate embedding try: embedding = self.embedding.get_embedding(content) if isinstance(embedding, list): embedding = np.array(embedding, dtype=np.float32).reshape(1, -1) faiss.normalize_L2(embedding) embedding_list = embedding.tolist()[0] except Exception as e: logger.error(f"Error generating embedding for chunk: {e}") continue # Create MemoryItem item_id = f"{metadata['file_hash']}_{metadata['chunk_index']}" memory_item = MemoryItem( id=item_id, content_summary=content, metadata=metadata, embedding=embedding_list, timestamp=time.time(), ) memory_items.append(memory_item) return memory_items def _compute_file_hash(self, file_path: str) -> str: """Compute MD5 hash of file""" hash_md5 = hashlib.md5() try: with open(file_path, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest()[:16] except Exception as e: logger.error(f"Error computing hash for {file_path}: {e}") return "error" def _index_file(self, file_path: str, encoding: str = "utf-8") -> None: """Index a single file (helper for incremental updates)""" chunks = self._read_and_chunk_file(file_path, encoding) if chunks: new_items = self._build_embeddings(chunks) self.contents.extend(new_items) def _remove_files_from_index(self, file_paths: List[str]) -> None: """Remove chunks from deleted files""" file_paths_set = set(file_paths) # Filter out chunks from deleted files self.contents = [ item for item in self.contents if item.metadata.get("file_path") not in file_paths_set ] # Remove from metadata for file_path in file_paths: self.file_metadata.pop(file_path, None)
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/agent/memory/file_memory.py", "license": "Apache License 2.0", "lines": 384, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/node/agent/memory/memory_base.py
"""Base memory abstractions with multimodal snapshots.""" from dataclasses import dataclass, field from typing import Any, Dict, List, Optional import time from entity.configs import MemoryAttachmentConfig, MemoryStoreConfig from entity.configs.node.memory import FileMemoryConfig, SimpleMemoryConfig from entity.enums import AgentExecFlowStage from entity.messages import Message, MessageBlock from runtime.node.agent.memory.embedding import EmbeddingBase, EmbeddingFactory @dataclass class MemoryContentSnapshot: """Lightweight serialization of a multimodal payload.""" text: str blocks: List[Dict[str, Any]] = field(default_factory=list) def to_dict(self) -> Dict[str, Any]: return {"text": self.text, "blocks": self.blocks} @classmethod def from_dict(cls, payload: Dict[str, Any] | None) -> "MemoryContentSnapshot | None": if not payload: return None text = payload.get("text", "") blocks = payload.get("blocks") or [] return cls(text=text, blocks=list(blocks)) @classmethod def from_message(cls, message: Message | str | None) -> "MemoryContentSnapshot | None": if message is None: return None if isinstance(message, Message): return cls( text=message.text_content(), blocks=[ { "role": message.role.value, "block": block.to_dict(include_data=True), } for block in message.blocks() ], ) if isinstance(message, str): return cls(text=message, blocks=[]) return cls(text=str(message), blocks=[]) @classmethod def from_messages(cls, messages: List[Message]) -> "MemoryContentSnapshot | None": if not messages: return None parts: List[str] = [] blocks: List[Dict[str, Any]] = [] for message in messages: parts.append(f"({message.role.value}) {message.text_content()}") for block in message.blocks(): blocks.append( { "role": message.role.value, "block": block.to_dict(include_data=True), } ) return cls(text="\n\n".join(parts), blocks=blocks) def to_message_blocks(self) -> List[MessageBlock]: blocks: List[MessageBlock] = [] for payload in self.blocks: block_data = payload.get("block") if isinstance(payload, dict) else None if not isinstance(block_data, dict): continue try: blocks.append(MessageBlock.from_dict(block_data)) except Exception: continue return blocks def attachment_overview(self) -> List[Dict[str, Any]]: attachments: List[Dict[str, Any]] = [] for payload in self.blocks: block_data = payload.get("block") if isinstance(payload, dict) else None if not isinstance(block_data, dict): continue attachment = block_data.get("attachment") if attachment: attachments.append( { "role": payload.get("role"), "attachment_id": attachment.get("attachment_id"), "mime_type": attachment.get("mime_type"), "name": attachment.get("name"), "size": attachment.get("size"), } ) return attachments @classmethod def from_blocks( cls, *, text: str, blocks: List[MessageBlock], role: str = "input", ) -> "MemoryContentSnapshot": serialized = [ { "role": role, "block": block.to_dict(include_data=True), } for block in blocks ] return cls(text=text, blocks=serialized) @dataclass class MemoryItem: id: str content_summary: str metadata: Dict[str, Any] embedding: Optional[List[float]] = None timestamp: float | None = None input_snapshot: MemoryContentSnapshot | None = None output_snapshot: MemoryContentSnapshot | None = None def __post_init__(self) -> None: if self.timestamp is None: self.timestamp = time.time() def to_dict(self) -> Dict[str, Any]: payload: Dict[str, Any] = { "id": self.id, "content_summary": self.content_summary, "metadata": self.metadata, "embedding": self.embedding, "timestamp": self.timestamp, } if self.input_snapshot: payload["input_snapshot"] = self.input_snapshot.to_dict() if self.output_snapshot: payload["output_snapshot"] = self.output_snapshot.to_dict() return payload @classmethod def from_dict(cls, payload: Dict[str, Any]) -> "MemoryItem": return cls( id=payload["id"], content_summary=payload.get("content_summary", ""), metadata=payload.get("metadata") or {}, embedding=payload.get("embedding"), timestamp=payload.get("timestamp"), input_snapshot=MemoryContentSnapshot.from_dict(payload.get("input_snapshot")), output_snapshot=MemoryContentSnapshot.from_dict(payload.get("output_snapshot")), ) def attachments(self) -> List[Dict[str, Any]]: attachments: List[Dict[str, Any]] = [] if self.input_snapshot: attachments.extend(self.input_snapshot.attachment_overview()) if self.output_snapshot: attachments.extend(self.output_snapshot.attachment_overview()) return attachments @dataclass class MemoryWritePayload: agent_role: str inputs_text: str input_snapshot: MemoryContentSnapshot | None output_snapshot: MemoryContentSnapshot | None @dataclass class MemoryRetrievalResult: formatted_text: str items: List[MemoryItem] def has_multimodal(self) -> bool: return any( (item.input_snapshot and item.input_snapshot.blocks) or (item.output_snapshot and item.output_snapshot.blocks) for item in self.items ) def attachment_overview(self) -> List[Dict[str, Any]]: attachments: List[Dict[str, Any]] = [] for item in self.items: attachments.extend(item.attachments()) return attachments class MemoryBase: def __init__(self, store: MemoryStoreConfig): self.store = store self.name = store.name self.contents: List[MemoryItem] = [] embedding_cfg = None simple_cfg = store.as_config(SimpleMemoryConfig) file_cfg = store.as_config(FileMemoryConfig) if simple_cfg and simple_cfg.embedding: embedding_cfg = simple_cfg.embedding elif file_cfg and file_cfg.embedding: embedding_cfg = file_cfg.embedding self.embedding: EmbeddingBase | None = ( EmbeddingFactory.create_embedding(embedding_cfg) if embedding_cfg else None ) def count_memories(self) -> int: return len(self.contents) def load(self) -> None: # pragma: no cover - implemented by subclasses raise NotImplementedError def save(self) -> None: # pragma: no cover - implemented by subclasses raise NotImplementedError def retrieve( self, agent_role: str, query: MemoryContentSnapshot, top_k: int, similarity_threshold: float, ) -> List[MemoryItem]: raise NotImplementedError def update(self, payload: MemoryWritePayload) -> None: raise NotImplementedError class MemoryManager: def __init__(self, attachments: List[MemoryAttachmentConfig], stores: Dict[str, MemoryBase]): self.attachments = attachments self.memories: Dict[str, MemoryBase] = {} for attachment in attachments: memory = stores.get(attachment.name) if not memory: raise ValueError(f"memory store {attachment.name} not found") self.memories[attachment.name] = memory def retrieve( self, agent_role: str, query: MemoryContentSnapshot, current_stage: AgentExecFlowStage, ) -> MemoryRetrievalResult | None: results: List[tuple[str, MemoryItem, float]] = [] for attachment in self.attachments: if attachment.retrieve_stage and current_stage not in attachment.retrieve_stage: continue if not attachment.read: continue memory = self.memories.get(attachment.name) if not memory: continue items = memory.retrieve(agent_role, query, attachment.top_k, attachment.similarity_threshold) for item in items: combined_score = self._score_memory(item, query.text) results.append((attachment.name, item, combined_score)) if not results: return None results.sort(key=lambda entry: entry[2], reverse=True) formatted = ["===== Related Memories ====="] grouped: Dict[str, List[MemoryItem]] = {} for name, item, _ in results: grouped.setdefault(name, []).append(item) for name, items in grouped.items(): formatted.append(f"\n--- {name} ---") for idx, item in enumerate(items, 1): formatted.append(f"{idx}. {item.content_summary}") formatted.append("\n===== End of Memory =====") ordered_items = [item for _, item, _ in results] return MemoryRetrievalResult(formatted_text="\n".join(formatted), items=ordered_items) def update(self, payload: MemoryWritePayload) -> None: for attachment in self.attachments: if not attachment.write: continue memory = self.memories.get(attachment.name) if not memory: continue memory.update(payload) memory.save() def _score_memory(self, memory_item: MemoryItem, query: str) -> float: current_time = time.time() age_hours = (current_time - (memory_item.timestamp or current_time)) / 3600 time_decay = max(0.1, 1.0 - age_hours / (24 * 30)) length = len(memory_item.content_summary) if length < 20: length_factor = 0.5 elif length > 200: length_factor = 0.8 else: length_factor = 1.0 query_words = set(query.lower().split()) content_words = set(memory_item.content_summary.lower().split()) relevance = len(query_words & content_words) / len(query_words) if query_words else 0.0 return 0.7 * time_decay * length_factor + 0.3 * relevance
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/agent/memory/memory_base.py", "license": "Apache License 2.0", "lines": 263, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/node/agent/memory/registry.py
"""Registry for memory store implementations.""" from dataclasses import dataclass from importlib import import_module from typing import Any, Callable, Dict, Type from schema_registry import register_memory_store_schema from utils.registry import Registry, RegistryEntry, RegistryError from entity.configs import MemoryStoreConfig from runtime.node.agent.memory.memory_base import MemoryBase memory_store_registry = Registry("memory_store") _BUILTINS_LOADED = False @dataclass(slots=True) class MemoryStoreRegistration: name: str config_cls: Type[Any] factory: Callable[["MemoryStoreConfig"], "MemoryBase"] summary: str | None = None def _ensure_builtins_loaded() -> None: global _BUILTINS_LOADED if not _BUILTINS_LOADED: import_module("runtime.node.agent.memory.builtin_stores") _BUILTINS_LOADED = True def register_memory_store( name: str, *, config_cls: Type[Any], factory: Callable[["MemoryStoreConfig"], "MemoryBase"], summary: str | None = None, ) -> None: if name in memory_store_registry.names(): raise RegistryError(f"Memory store '{name}' already registered") entry = MemoryStoreRegistration(name=name, config_cls=config_cls, factory=factory, summary=summary) memory_store_registry.register(name, target=entry) register_memory_store_schema(name, config_cls=config_cls, summary=summary) def get_memory_store_registration(name: str) -> MemoryStoreRegistration: _ensure_builtins_loaded() entry: RegistryEntry = memory_store_registry.get(name) registration = entry.load() if not isinstance(registration, MemoryStoreRegistration): raise RegistryError(f"Entry '{name}' is not a MemoryStoreRegistration") return registration def iter_memory_store_registrations() -> Dict[str, MemoryStoreRegistration]: _ensure_builtins_loaded() return {name: entry.load() for name, entry in memory_store_registry.items()} __all__ = [ "memory_store_registry", "MemoryStoreRegistration", "register_memory_store", "get_memory_store_registration", "iter_memory_store_registrations", ]
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/agent/memory/registry.py", "license": "Apache License 2.0", "lines": 50, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/node/agent/memory/simple_memory.py
import hashlib import json import os import re import time from typing import List from entity.configs import MemoryStoreConfig from entity.configs.node.memory import SimpleMemoryConfig from runtime.node.agent.memory.memory_base import ( MemoryBase, MemoryContentSnapshot, MemoryItem, MemoryWritePayload, ) import faiss import numpy as np class SimpleMemory(MemoryBase): def __init__(self, store: MemoryStoreConfig): config = store.as_config(SimpleMemoryConfig) if not config: raise ValueError("SimpleMemory requires a simple memory store configuration") super().__init__(store) self.config = config # Optimized prompt templates for clarity self.retrieve_prompt = "Query: {input}" self.update_prompt = "Input: {input}\nOutput: {output}" self.memory_path = self.config.memory_path # auto # Content extraction configuration self.max_content_length = 500 # Maximum content length self.min_content_length = 20 # Minimum content length def _extract_key_content(self, content: str) -> str: """Extract key content while stripping redundant text.""" # Remove redundant whitespace content = re.sub(r'\s+', ' ', content.strip()) # Skip heavy processing for short snippets if len(content) <= 100: return content # Remove common templated instructions content = re.sub(r'(?:Agent|Model) Role:.*?\n\n', '', content) content = re.sub("(?:You are|\u4f60\u662f\u4e00\u4f4d).*?(?:,|\uff0c)", '', content) content = re.sub("(?:User will input|\u7528\u6237\u4f1a\u8f93\u5165).*?(?:,|\uff0c)", '', content) content = re.sub("(?:You need to|\u4f60\u9700\u8981).*?(?:,|\uff0c)", '', content) # Extract key sentences while skipping very short ones sentences = re.split(r'[\u3002\uff01\uff1f\uff1b\n]', content) key_sentences = [s.strip() for s in sentences if len(s.strip()) >= self.min_content_length] # Fallback to original content when no sentence survives if not key_sentences: return content[:self.max_content_length] # Recombine and limit the number of sentences (max 3) extracted_content = '\u3002'.join(key_sentences[:3]) if len(extracted_content) > self.max_content_length: extracted_content = extracted_content[:self.max_content_length] + "..." return extracted_content.strip() def _generate_content_hash(self, content: str) -> str: """Generate a content hash used for deduplication.""" return hashlib.md5(content.encode('utf-8')).hexdigest()[:8] def load(self) -> None: if self.memory_path and os.path.exists(self.memory_path) and self.memory_path.endswith(".json"): try: with open(self.memory_path) as file: raw_data = json.load(file) contents = [] for raw in raw_data: try: contents.append(MemoryItem.from_dict(raw)) except Exception: continue self.contents = contents except Exception: self.contents = [] def save(self) -> None: if self.memory_path and self.memory_path.endswith(".json"): os.makedirs(os.path.dirname(self.memory_path), exist_ok=True) with open(self.memory_path, "w") as file: json.dump([item.to_dict() for item in self.contents], file, indent=2, ensure_ascii=False) def retrieve( self, agent_role: str, query: MemoryContentSnapshot, top_k: int, similarity_threshold: float, ) -> List[MemoryItem]: if self.count_memories() == 0 or not self.embedding: return [] # Build an optimized query for retrieval query_text = self.retrieve_prompt.format(input=query.text) query_text = self._extract_key_content(query_text) inputs_embedding = self.embedding.get_embedding(query_text) if isinstance(inputs_embedding, list): inputs_embedding = np.array(inputs_embedding, dtype=np.float32) inputs_embedding = inputs_embedding.reshape(1, -1) faiss.normalize_L2(inputs_embedding) memory_embeddings = [] valid_items = [] for item in self.contents: if item.embedding is not None: memory_embeddings.append(item.embedding) valid_items.append(item) if not memory_embeddings: return [] memory_embeddings = np.array(memory_embeddings, dtype=np.float32) # Use an efficient inner-product index index = faiss.IndexFlatIP(memory_embeddings.shape[1]) index.add(memory_embeddings) # Retrieve extra candidates for reranking retrieval_k = min(top_k * 3, len(valid_items)) similarities, indices = index.search(inputs_embedding, retrieval_k) # Filter and rerank the candidates candidates = [] for i in range(len(indices[0])): idx = indices[0][i] similarity = similarities[0][i] if idx != -1 and similarity >= similarity_threshold: item = valid_items[idx] # Calculate an auxiliary semantic similarity score semantic_score = self._calculate_semantic_similarity(query_text, item.content_summary) # Combine similarity metrics combined_score = 0.7 * similarity + 0.3 * semantic_score candidates.append((item, combined_score)) # Sort by the combined score and return the top_k items candidates.sort(key=lambda x: x[1], reverse=True) results = [item for item, score in candidates[:top_k]] return results def _calculate_semantic_similarity(self, query: str, content: str) -> float: """Compute a semantic similarity value.""" # Enhanced semantic similarity computation query_lower = query.lower() content_lower = content.lower() # 1. Token overlap (Jaccard similarity) query_words = set(query_lower.split()) content_words = set(content_lower.split()) if not query_words or not content_words: jaccard_sim = 0.0 else: intersection = query_words & content_words union = query_words | content_words jaccard_sim = len(intersection) / len(union) if union else 0.0 # 2. Longest common subsequence similarity lcs_sim = self._calculate_lcs_similarity(query_lower, content_lower) # 3. Keyword match score keyword_sim = self._calculate_keyword_similarity(query_lower, content_lower) # 4. Length penalty factor (avoid overly short/long matches) length_factor = self._calculate_length_factor(query_lower, content_lower) # Weighted final score final_score = (0.4 * jaccard_sim + 0.3 * lcs_sim + 0.2 * keyword_sim + 0.1 * length_factor) return min(final_score, 1.0) def _calculate_lcs_similarity(self, s1: str, s2: str) -> float: """Compute longest common subsequence similarity.""" m, n = len(s1), len(s2) dp = [[0] * (n + 1) for _ in range(m + 1)] for i in range(1, m + 1): for j in range(1, n + 1): if s1[i-1] == s2[j-1]: dp[i][j] = dp[i-1][j-1] + 1 else: dp[i][j] = max(dp[i-1][j], dp[i][j-1]) lcs_length = dp[m][n] return lcs_length / max(len(s1), len(s2)) if max(len(s1), len(s2)) > 0 else 0.0 def _calculate_keyword_similarity(self, query: str, content: str) -> float: """Compute keyword match similarity.""" # Extract potential keywords (length >= 2) query_keywords = set(word for word in query.split() if len(word) >= 2) content_keywords = set(word for word in content.split() if len(word) >= 2) if not query_keywords: return 0.0 matches = query_keywords & content_keywords return len(matches) / len(query_keywords) def _calculate_length_factor(self, query: str, content: str) -> float: """Penalize matches that deviate too much in length.""" query_len = len(query) content_len = len(content) if content_len == 0: return 0.0 # Ideal length ratio range ideal_ratio_min = 0.5 ideal_ratio_max = 2.0 ratio = content_len / query_len if ideal_ratio_min <= ratio <= ideal_ratio_max: return 1.0 elif ratio < ideal_ratio_min: return ratio / ideal_ratio_min else: return max(0.1, ideal_ratio_max / ratio) def update(self, payload: MemoryWritePayload) -> None: if not self.embedding: return snapshot = payload.output_snapshot if not snapshot or not snapshot.text.strip(): return raw_content = self.update_prompt.format( input=payload.inputs_text, output=snapshot.text, ) extracted_content = self._extract_key_content(raw_content) if len(extracted_content) < self.min_content_length: return content_hash = self._generate_content_hash(extracted_content) for existing_item in self.contents: existing_hash = self._generate_content_hash(existing_item.content_summary) if existing_hash == content_hash: return embedding_vector = self.embedding.get_embedding(extracted_content) if isinstance(embedding_vector, list): embedding_vector = np.array(embedding_vector, dtype=np.float32) if embedding_vector is None: return embedding_array = np.array(embedding_vector, dtype=np.float32).reshape(1, -1) faiss.normalize_L2(embedding_array) metadata = { "agent_role": payload.agent_role, "input_preview": (payload.inputs_text or "")[:200], "content_length": len(extracted_content), "attachments": snapshot.attachment_overview(), } memory_item = MemoryItem( id=f"{content_hash}_{int(time.time())}", content_summary=extracted_content, metadata=metadata, embedding=embedding_array.tolist()[0], input_snapshot=payload.input_snapshot, output_snapshot=snapshot, ) self.contents.append(memory_item) max_memories = 1000 if len(self.contents) > max_memories: self.contents = self.contents[-max_memories:]
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/agent/memory/simple_memory.py", "license": "Apache License 2.0", "lines": 229, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/node/agent/providers/base.py
"""Abstract base classes for agent providers.""" from abc import ABC, abstractmethod from typing import Any, Dict, List, Optional from entity.configs import AgentConfig from entity.messages import Message from schema_registry import register_model_provider_schema from entity.tool_spec import ToolSpec from runtime.node.agent.providers.response import ModelResponse from utils.token_tracker import TokenUsage from utils.registry import Registry class ModelProvider(ABC): """Abstract base class for all agent providers.""" def __init__(self, config: AgentConfig): """ Initialize the agent provider with configuration. Args: config: Agent configuration instance """ self.config = config self.base_url = config.base_url self.api_key = config.api_key self.model_name = config.name if isinstance(config.name, str) else str(config.name) self.provider = config.provider self.params = config.params or {} @abstractmethod def create_client(self): """ Create and return the appropriate client for this provider. Returns: Client instance for making API calls """ pass @abstractmethod def call_model( self, client, conversation: List[Message], timeline: List[Any], tool_specs: Optional[List[ToolSpec]] = None, **kwargs, ) -> ModelResponse: """ Call the model with the given messages and parameters. Args: client: Provider-specific client instance conversation: List of messages in the conversation tool_specs: Tool specifications available for this call **kwargs: Additional parameters for the model call Returns: ModelResponse containing content and potentially tool calls """ pass @abstractmethod def extract_token_usage(self, response: Any) -> TokenUsage: """ Extract token usage from the API response. Args: response: Raw API response from the model call Returns: TokenUsage instance with token counts """ pass _provider_registry = Registry("agent_provider") class ProviderRegistry: """Registry facade for agent providers.""" @classmethod def register( cls, name: str, provider_class: type, *, label: str | None = None, summary: str | None = None, ) -> None: metadata = { "label": label, "summary": summary, } # Drop None values so schema consumers don't need to filter. metadata = {key: value for key, value in metadata.items() if value is not None} _provider_registry.register(name, target=provider_class, metadata=metadata) register_model_provider_schema(name, label=label, summary=summary) @classmethod def get_provider(cls, name: str) -> type | None: try: entry = _provider_registry.get(name) except Exception: return None return entry.load() @classmethod def list_providers(cls) -> List[str]: return list(_provider_registry.names()) @classmethod def iter_metadata(cls) -> Dict[str, Dict[str, Any]]: return {name: dict(entry.metadata or {}) for name, entry in _provider_registry.items()}
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/agent/providers/base.py", "license": "Apache License 2.0", "lines": 95, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/node/agent/providers/builtin_providers.py
"""Register built-in agent providers.""" from runtime.node.agent.providers.base import ProviderRegistry from runtime.node.agent.providers.openai_provider import OpenAIProvider ProviderRegistry.register( "openai", OpenAIProvider, label="OpenAI", summary="OpenAI models via the official OpenAI SDK (responses API)", ) try: from runtime.node.agent.providers.gemini_provider import GeminiProvider except ImportError: GeminiProvider = None if GeminiProvider is not None: ProviderRegistry.register( "gemini", GeminiProvider, label="Google Gemini", summary="Google Gemini models via google-genai", ) else: print("Gemini provider not registered: google-genai library not found.")
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/agent/providers/builtin_providers.py", "license": "Apache License 2.0", "lines": 22, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/node/agent/providers/gemini_provider.py
"""Gemini provider implementation.""" import base64 import binascii import json import os import uuid from typing import Any, Dict, List, Optional, Sequence, Tuple from google import genai from google.genai import types as genai_types from google.genai.types import GenerateContentResponse from entity.messages import ( AttachmentRef, FunctionCallOutputEvent, Message, MessageBlock, MessageBlockType, MessageRole, ToolCallPayload, ) from entity.tool_spec import ToolSpec from runtime.node.agent import ModelProvider from runtime.node.agent import ModelResponse from utils.token_tracker import TokenUsage class GeminiProvider(ModelProvider): """Gemini provider implementation.""" CSV_INLINE_CHAR_LIMIT = 200_000 CSV_INLINE_SIZE_THRESHOLD_BYTES = 3 * 1024 * 1024 # 3 MB def create_client(self): """ Create and return the Gemini client. """ client_kwargs: Dict[str, Any] = {} if self.api_key: client_kwargs["api_key"] = self.api_key base_url = (self.base_url or "").strip() http_options = self._build_http_options(base_url) if http_options: client_kwargs["http_options"] = http_options return genai.Client(**client_kwargs) def call_model( self, client, conversation: List[Message], timeline: List[Any], tool_specs: Optional[List[ToolSpec]] = None, **kwargs, ) -> ModelResponse: """ Call the Gemini model using the unified conversation timeline. """ contents, system_instruction = self._build_contents(timeline) config = self._build_generation_config(system_instruction, tool_specs, kwargs) # print(contents) # print(config) response: GenerateContentResponse = client.models.generate_content( model=self.model_name, contents=contents, config=config, ) # print(response) self._track_token_usage(response) self._append_response_contents(timeline, response) message = self._deserialize_response(response) return ModelResponse(message=message, raw_response=response) def extract_token_usage(self, response: Any) -> TokenUsage: """Extract token usage from Gemini usage metadata.""" usage_metadata = getattr(response, "usage_metadata", None) if not usage_metadata: return TokenUsage() prompt_tokens = getattr(usage_metadata, "prompt_token_count", None) or 0 candidate_tokens = getattr(usage_metadata, "candidates_token_count", None) or 0 total_tokens = getattr(usage_metadata, "total_token_count", None) cached_tokens = getattr(usage_metadata, "cached_content_token_count", None) metadata = { "prompt_token_count": prompt_tokens, "candidates_token_count": candidate_tokens, } if total_tokens is not None: metadata["total_token_count"] = total_tokens if cached_tokens is not None: metadata["cached_content_token_count"] = cached_tokens return TokenUsage( input_tokens=prompt_tokens, output_tokens=candidate_tokens, total_tokens=total_tokens or (prompt_tokens + candidate_tokens), metadata=metadata, ) # --------------------------------------------------------------------- # Serialization helpers # --------------------------------------------------------------------- def _build_contents( self, timeline: List[Any], ) -> Tuple[List[genai_types.Content], Optional[str]]: contents: List[genai_types.Content] = [] system_prompts: List[str] = [] for item in timeline: if isinstance(item, Message): if item.role is MessageRole.SYSTEM: text = item.text_content().strip() if text: system_prompts.append(text) continue contents.append(self._message_to_content(item)) continue if isinstance(item, FunctionCallOutputEvent): contents.append(self._function_output_event_to_content(item)) continue if isinstance(item, genai_types.Content): contents.append(item) if not contents: contents.append( genai_types.Content( role="user", parts=[genai_types.Part(text="")], ) ) system_instruction = "\n\n".join(system_prompts) if system_prompts else None return contents, system_instruction def _append_response_contents(self, timeline: List[Any], response: Any) -> None: candidates = getattr(response, "candidates", None) if not candidates: return for candidate in candidates: content = getattr(candidate, "content", None) if content: timeline.append(content) def _message_to_content(self, message: Message) -> genai_types.Content: role = self._map_role(message.role) if message.role is MessageRole.TOOL: part = self._build_tool_response_part(message) return genai_types.Content(role="user", parts=[part]) parts: List[genai_types.Part] = [] for block in message.blocks(): parts.extend(self._block_to_parts(block)) if not parts: text = message.text_content() parts.append(genai_types.Part(text=text)) return genai_types.Content(role=role, parts=parts) def _function_output_event_to_content( self, event: FunctionCallOutputEvent, ) -> genai_types.Content: function_name = event.function_name or event.call_id or "tool" payload: Dict[str, Any] = {} function_result_parts: List[genai_types.FunctionResponsePart] = [] result_texts: List[str] = [] if event.output_blocks: for block in event.output_blocks: # Describe the block for the text result desc = self._describe_block(block) if desc: result_texts.append(desc) if self._block_has_attachment(block): # Check if we should inline this attachment as text if self._should_inline_attachment_as_text(block): text_content = self._read_attachment_text(block.attachment) if text_content: result_texts.append(f"\n[Attachment Content: {block.attachment.name}]\n{text_content}") continue # Otherwise treat as binary part general_parts = self._block_to_parts(block) function_result_parts.extend(self._general_parts_to_function_response_parts(general_parts)) else: if event.output_text: result_texts.append(event.output_text) payload["result"] = "\n".join(result_texts) function_part = genai_types.Part.from_function_response( name=function_name, response=payload or {"result": ""}, parts=function_result_parts or None ) parts: List[genai_types.Part] = [function_part] return genai_types.Content(role="user", parts=parts) def _should_inline_attachment_as_text(self, block: MessageBlock) -> bool: if not block.attachment: return False mime = (block.attachment.mime_type or "").lower() return ( mime.startswith("text/") or mime == "application/json" or mime.endswith("+json") or mime.endswith("+xml") ) def _read_attachment_text(self, attachment: AttachmentRef) -> Optional[str]: data_bytes = self._read_attachment_bytes(attachment) return self._bytes_to_text(data_bytes) def _general_parts_to_function_response_parts(self, parts: List[genai_types.Part]) -> List[genai_types.FunctionResponsePart]: function_response_parts: List[genai_types.FunctionResponsePart] = [] for part in parts: if part.inline_data: # Convert inline_data (bytes) to base64 data URI and use from_uri function_response_parts.append( genai_types.FunctionResponsePart.from_bytes(data=part.inline_data.data, mime_type=part.inline_data.mime_type or "application/octet-stream") ) if part.file_data: function_response_parts.append( genai_types.FunctionResponsePart.from_uri(file_uri=part.file_data.file_uri, mime_type=part.file_data.mime_type or "application/octet-stream") ) return function_response_parts def _build_tool_response_part(self, message: Message) -> genai_types.Part: tool_name = message.metadata.get("tool_name") if isinstance(message.metadata, dict) else None tool_name = tool_name or message.tool_call_id or "tool" payload, block_parts = self._serialize_tool_message_payload(message) return genai_types.Part( function_response=genai_types.FunctionResponse( name=tool_name, response=payload, parts=block_parts or None, ) ) def _block_has_attachment(self, block: Any) -> bool: return isinstance(block, MessageBlock) and block.attachment is not None def _serialize_tool_message_payload(self, message: Message) -> Tuple[Dict[str, Any], List[genai_types.FunctionResponsePart]]: content = message.content blocks: List[MessageBlock] = [] if isinstance(content, str): stripped = content.strip() if stripped: try: payload = json.loads(stripped) except json.JSONDecodeError: payload = {"result": stripped} else: payload = {"result": ""} return payload, [] if isinstance(content, list): blocks_payload = [] for block in content: if isinstance(block, MessageBlock): blocks_payload.append(block.to_dict()) blocks.append(block) elif isinstance(block, dict): blocks_payload.append(block) try: blocks.append(MessageBlock.from_dict(block)) except Exception: continue parts = self._blocks_to_function_parts(blocks) return {"blocks": blocks_payload, "result": message.text_content()}, parts parts = self._blocks_to_function_parts(blocks) return {"result": message.text_content()}, parts def _describe_block(self, block: Any) -> str: if isinstance(block, MessageBlock): return block.describe() if isinstance(block, dict): text = block.get("text") if text: return str(text) return str(block) def _block_to_parts(self, block: MessageBlock) -> List[genai_types.Part]: if block.type is MessageBlockType.TEXT: return [genai_types.Part(text=block.text or "")] if block.type is MessageBlockType.FILE: csv_text = self._maybe_inline_large_csv(block) if csv_text is not None: return [genai_types.Part(text=csv_text)] if block.type in ( MessageBlockType.IMAGE, MessageBlockType.AUDIO, MessageBlockType.VIDEO, MessageBlockType.FILE, ): media_part = self._attachment_block_to_part(block) return [media_part] if media_part else [] if block.type is MessageBlockType.DATA: data_payload = block.data or {} text = block.text or json.dumps(data_payload, ensure_ascii=False) return [genai_types.Part(text=text)] return [] def _maybe_inline_large_csv(self, block: MessageBlock) -> Optional[str]: """Convert large CSV attachments to inline text to avoid Gemini upload size limits.""" attachment = block.attachment if not attachment: return None mime = (attachment.mime_type or "").lower() name = (attachment.name or "").lower() if "text/csv" not in mime and not name.endswith(".csv"): return None if attachment.remote_file_id: return None threshold = getattr( self, "csv_inline_size_threshold_bytes", self.CSV_INLINE_SIZE_THRESHOLD_BYTES, ) size_bytes = attachment.size data_bytes: Optional[bytes] = None if size_bytes is None: data_bytes = self._read_attachment_bytes(attachment) if data_bytes is None: return None size_bytes = len(data_bytes) if size_bytes is None or size_bytes <= threshold: return None if data_bytes is None: data_bytes = self._read_attachment_bytes(attachment) if data_bytes is None: return None text = self._bytes_to_text(data_bytes) if text is None: return None char_limit = getattr(self, "csv_inline_char_limit", self.CSV_INLINE_CHAR_LIMIT) truncated = False if len(text) > char_limit: text = text[:char_limit] truncated = True display_name = attachment.name or attachment.attachment_id or "attachment.csv" suffix = f"\n\n[truncated after {char_limit} characters]" if truncated else "" return f"CSV file '{display_name}' (converted from >3MB upload):\n{text}{suffix}" def _bytes_to_text(self, data_bytes: Optional[bytes]) -> Optional[str]: if data_bytes is None: return None try: return data_bytes.decode("utf-8") except UnicodeDecodeError: return data_bytes.decode("utf-8", errors="replace") def _attachment_block_to_part(self, block: MessageBlock) -> Optional[genai_types.Part]: attachment = block.attachment if not attachment: return None metadata = attachment.metadata or {} gemini_file_uri = metadata.get("gemini_file_uri") or attachment.remote_file_id mime_type = attachment.mime_type or self._guess_mime_from_block(block) if gemini_file_uri: return genai_types.Part( file_data=genai_types.FileData( file_uri=gemini_file_uri, mime_type=mime_type, # display_name=attachment.name ) ) blob_data = self._read_attachment_bytes(attachment) if blob_data is None: return None return genai_types.Part( inline_data=genai_types.Blob( mime_type=mime_type or "application/octet-stream", data=blob_data, # display_name=attachment.name, ) ) def _blocks_to_function_parts( self, blocks: Optional[Sequence[Any]], ) -> List[genai_types.FunctionResponsePart]: if not blocks: return [] parts: List[genai_types.FunctionResponsePart] = [] for block in blocks: if not isinstance(block, MessageBlock): if isinstance(block, dict): try: block = MessageBlock.from_dict(block) except Exception: continue else: continue attachment = block.attachment if not attachment: continue mime_type = attachment.mime_type or self._guess_mime_from_block(block) file_uri = (attachment.metadata or {}).get("gemini_file_uri") or attachment.remote_file_id if file_uri: parts.append( genai_types.FunctionResponsePart( file_data=genai_types.FunctionResponseFileData( file_uri=file_uri, mime_type=mime_type, display_name=attachment.name, ) ) ) continue data_bytes = self._read_attachment_bytes(attachment) if not data_bytes: continue parts.append( genai_types.FunctionResponsePart( inline_data=genai_types.FunctionResponseBlob( mime_type=mime_type or "application/octet-stream", data=data_bytes, display_name=attachment.name, ) ) ) return parts def _coerce_message_blocks(self, payload: Any) -> List[MessageBlock]: if not isinstance(payload, Sequence) or isinstance(payload, (str, bytes, bytearray)): return [] blocks: List[MessageBlock] = [] for item in payload: if isinstance(item, MessageBlock): blocks.append(item) elif isinstance(item, dict): try: blocks.append(MessageBlock.from_dict(item)) except Exception: continue return blocks def _encode_thought_signature(self, value: Any) -> Optional[str]: if value is None: return None if isinstance(value, bytes): return base64.b64encode(value).decode("ascii") try: return str(value) except Exception: return None def _read_attachment_bytes(self, attachment: AttachmentRef) -> Optional[bytes]: if attachment.data_uri: decoded = self._decode_data_uri(attachment.data_uri) if decoded is not None: return decoded if attachment.local_path and os.path.exists(attachment.local_path): try: with open(attachment.local_path, "rb") as handle: return handle.read() except OSError: return None return None def _decode_data_uri(self, data_uri: str) -> Optional[bytes]: if not data_uri.startswith("data:"): return None header, _, data = data_uri.partition(",") if not _: return None if ";base64" in header: try: return base64.b64decode(data) except (ValueError, binascii.Error): return None return data.encode("utf-8") def _guess_mime_from_block(self, block: MessageBlock) -> str: if block.attachment and block.attachment.mime_type: return block.attachment.mime_type if block.type is MessageBlockType.IMAGE: return "image/png" if block.type is MessageBlockType.AUDIO: return "audio/mpeg" if block.type is MessageBlockType.VIDEO: return "video/mp4" return "application/octet-stream" def _map_role(self, role: MessageRole) -> str: if role is MessageRole.USER: return "user" if role is MessageRole.ASSISTANT: return "model" if role is MessageRole.TOOL: return "tool" return "user" # --------------------------------------------------------------------- # Config builders # --------------------------------------------------------------------- def _build_generation_config( self, system_instruction: Optional[str], tool_specs: Optional[List[ToolSpec]], call_params: Dict[str, Any], ) -> genai_types.GenerateContentConfig: params = dict(self.params or {}) params.update(call_params) config_kwargs: Dict[str, Any] = {} if system_instruction: config_kwargs["system_instruction"] = system_instruction for key in ( "temperature", "top_p", "top_k", "candidate_count", "max_output_tokens", "response_modalities", "stop_sequences", "seed", "presence_penalty", "frequency_penalty", ): if key in params: config_kwargs[key] = params.pop(key) safety_settings = params.pop("safety_settings", None) if safety_settings: config_kwargs["safety_settings"] = safety_settings image_config = params.pop("image_config", None) aspect_ratio = params.pop("aspect_ratio", None) if aspect_ratio: if image_config is None: image_config = {"aspect_ratio": aspect_ratio} elif isinstance(image_config, dict): image_config = dict(image_config) image_config.setdefault("aspect_ratio", aspect_ratio) elif isinstance(image_config, genai_types.ImageConfig): try: image_config.aspect_ratio = aspect_ratio except Exception: image_config = {"aspect_ratio": aspect_ratio} else: image_config = {"aspect_ratio": aspect_ratio} if image_config: config_kwargs["image_config"] = self._coerce_image_config(image_config) audio_config = params.pop("audio_config", None) if audio_config: config_kwargs["audio_config"] = audio_config video_config = params.pop("video_config", None) if video_config: config_kwargs["video_config"] = video_config tools = self._build_tools(tool_specs or []) if tools: config_kwargs["tools"] = tools tool_config_payload = params.pop("tool_config", None) function_calling_payload = params.pop("function_calling_config", None) if function_calling_payload: tool_config_payload = tool_config_payload or {} tool_config_payload["function_calling_config"] = function_calling_payload if tool_config_payload: config_kwargs["tool_config"] = self._coerce_tool_config(tool_config_payload) automatic_fn_calling = params.pop("automatic_function_calling", None) if automatic_fn_calling: config_kwargs["automatic_function_calling"] = self._coerce_automatic_function_calling( automatic_fn_calling ) return genai_types.GenerateContentConfig(**config_kwargs) def _build_http_options(self, base_url: str) -> Optional[genai_types.HttpOptions]: if not base_url: return None try: return genai_types.HttpOptions(base_url=base_url, timeout=4 * 60 * 1000) # 4 min except Exception: return None def _coerce_image_config(self, image_config: Any) -> Any: if isinstance(image_config, genai_types.ImageConfig): return image_config if isinstance(image_config, dict): try: return genai_types.ImageConfig(**image_config) except Exception: return image_config return image_config def _build_tools(self, tool_specs: List[ToolSpec]) -> List[genai_types.Tool]: if not tool_specs: return [] declarations = [] for spec in tool_specs: fn_payload = spec.to_gemini_function() parameters = fn_payload.get("parameters") or {"type": "object", "properties": {}} if 'title' in parameters: parameters.pop('title') # Replace 'title' with 'description' in properties for prop_name, prop_value in parameters.get('properties', {}).items(): if isinstance(prop_value, dict) and 'title' in prop_value: prop_value['description'] = prop_value.pop('title') declarations.append( genai_types.FunctionDeclaration( name=fn_payload.get("name", ""), description=fn_payload.get("description") or "", parameters=parameters, ) ) return [genai_types.Tool(function_declarations=declarations)] def _coerce_tool_config(self, payload: Any) -> genai_types.ToolConfig: if isinstance(payload, genai_types.ToolConfig): return payload kwargs: Dict[str, Any] = {} if isinstance(payload, dict): fn_payload = payload.get("function_calling_config") if fn_payload: kwargs["function_calling_config"] = self._coerce_function_calling_config(fn_payload) return genai_types.ToolConfig(**kwargs) def _coerce_function_calling_config(self, payload: Any) -> genai_types.FunctionCallingConfig: if isinstance(payload, genai_types.FunctionCallingConfig): return payload if isinstance(payload, str): return genai_types.FunctionCallingConfig(mode=payload) if isinstance(payload, dict): return genai_types.FunctionCallingConfig(**payload) raise ValueError("Invalid function calling configuration payload") def _coerce_automatic_function_calling(self, payload: Any) -> Any: config_cls = getattr(genai_types, "AutomaticFunctionCallingConfig", None) if config_cls is None: raise ValueError("Automatic function calling config not supported in current SDK version") if isinstance(payload, config_cls): return payload if isinstance(payload, dict): return config_cls(**payload) raise ValueError("Invalid automatic function calling config payload") # --------------------------------------------------------------------- # Response parsing # --------------------------------------------------------------------- def _deserialize_response(self, response: Any) -> Message: candidate = self._select_primary_candidate(response) if not candidate: return Message(role=MessageRole.ASSISTANT, content="") content = getattr(candidate, "content", None) if not content: return Message(role=MessageRole.ASSISTANT, content=response.text if hasattr(response, "text") else "") blocks, tool_calls = self._parse_candidate_parts(getattr(content, "parts", []) or []) if not blocks: fallback = getattr(response, "text", None) or "" blocks = [MessageBlock(MessageBlockType.TEXT, text=fallback)] if fallback else [] return Message( role=MessageRole.ASSISTANT, content=blocks or "", tool_calls=tool_calls, ) def _select_primary_candidate(self, response: Any) -> Any: candidates = getattr(response, "candidates", None) or [] if not candidates: return None return candidates[0] def _parse_candidate_parts( self, parts: Sequence[Any], ) -> Tuple[List[MessageBlock], List[ToolCallPayload]]: blocks: List[MessageBlock] = [] tool_calls: List[ToolCallPayload] = [] for part in parts: if hasattr(part, "text") and part.text is not None: blocks.append(MessageBlock(MessageBlockType.TEXT, text=part.text)) continue function_call = getattr(part, "function_call", None) if function_call: thought_signature = getattr(part, "thought_signature", None) tool_calls.append( self._build_tool_call_payload(function_call, thought_signature=thought_signature) ) continue inline_data = getattr(part, "inline_data", None) if inline_data: blocks.append(self._build_inline_block(inline_data)) continue file_data = getattr(part, "file_data", None) if file_data: blocks.append(self._build_file_block(file_data)) continue function_response = getattr(part, "function_response", None) if function_response: blocks.append( MessageBlock( type=MessageBlockType.DATA, text=json.dumps(function_response.response or {}, ensure_ascii=False), data={ "function_name": getattr(function_response, "name", ""), "response": function_response.response or {}, }, ) ) continue return blocks, tool_calls def _build_tool_call_payload(self, fn_call: Any, *, thought_signature: Any = None) -> ToolCallPayload: call_id = getattr(fn_call, "name", "") or uuid.uuid4().hex arguments = getattr(fn_call, "args", {}) or {} try: arg_str = json.dumps(arguments, ensure_ascii=False) except (TypeError, ValueError): arg_str = str(arguments) metadata: Dict[str, Any] = {} encoded_signature = self._encode_thought_signature(thought_signature) if encoded_signature: metadata["gemini_thought_signature_b64"] = encoded_signature return ToolCallPayload( id=call_id, function_name=getattr(fn_call, "name", "") or call_id, arguments=arg_str, type="function", metadata=metadata, ) def _build_inline_block(self, blob: Any) -> MessageBlock: mime_type = getattr(blob, "mime_type", "") or "application/octet-stream" data_bytes = getattr(blob, "data", None) or b"" data_uri = self._encode_data_uri(mime_type, data_bytes) block_type = self._block_type_from_mime(mime_type) return MessageBlock( type=block_type, attachment=AttachmentRef( attachment_id=uuid.uuid4().hex, mime_type=mime_type, data_uri=data_uri, metadata={"source": "gemini_inline"}, ), ) def _build_file_block(self, file_data: Any) -> MessageBlock: mime_type = getattr(file_data, "mime_type", None) file_uri = getattr(file_data, "file_uri", None) or getattr(file_data, "file", None) block_type = self._block_type_from_mime(mime_type or "") return MessageBlock( type=block_type, attachment=AttachmentRef( attachment_id=uuid.uuid4().hex, mime_type=mime_type, remote_file_id=file_uri, metadata={"gemini_file_uri": file_uri, "source": "gemini_file"}, ), ) def _block_type_from_mime(self, mime_type: str) -> MessageBlockType: if mime_type.startswith("image/"): return MessageBlockType.IMAGE if mime_type.startswith("audio/"): return MessageBlockType.AUDIO if mime_type.startswith("video/"): return MessageBlockType.VIDEO return MessageBlockType.FILE def _encode_data_uri(self, mime_type: str, data: bytes) -> str: encoded = base64.b64encode(data).decode("utf-8") return f"data:{mime_type};base64,{encoded}" # --------------------------------------------------------------------- # Token tracking # --------------------------------------------------------------------- def _track_token_usage(self, response: Any) -> None: token_tracker = getattr(self.config, "token_tracker", None) if not token_tracker: return usage = self.extract_token_usage(response) if usage.input_tokens == 0 and usage.output_tokens == 0 and not usage.metadata: return node_id = getattr(self.config, "node_id", "ALL") usage.node_id = node_id usage.model_name = self.model_name usage.workflow_id = token_tracker.workflow_id usage.provider = "gemini" token_tracker.record_usage(node_id, self.model_name, usage, provider="gemini")
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/agent/providers/gemini_provider.py", "license": "Apache License 2.0", "lines": 719, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/node/agent/providers/openai_provider.py
"""OpenAI provider implementation.""" import base64 import hashlib import binascii import os from typing import Any, Dict, List, Optional, Union from urllib.parse import unquote_to_bytes import openai from openai import OpenAI from entity.messages import ( AttachmentRef, FunctionCallOutputEvent, Message, MessageBlock, MessageBlockType, MessageRole, ToolCallPayload, ) from entity.tool_spec import ToolSpec from runtime.node.agent import ModelProvider from runtime.node.agent import ModelResponse from utils.token_tracker import TokenUsage class OpenAIProvider(ModelProvider): """OpenAI provider implementation.""" CSV_INLINE_CHAR_LIMIT = 200_000 # safeguard large attachments TEXT_INLINE_CHAR_LIMIT = 200_000 # safeguard large text/* attachments MAX_INLINE_FILE_BYTES = 50 * 1024 * 1024 # OpenAI function output limit (~50 MB) def create_client(self): """ Create and return the OpenAI client. Returns: OpenAI client instance with token tracking if available """ if self.base_url: return OpenAI( api_key=self.api_key, base_url=self.base_url, ) else: return OpenAI( api_key=self.api_key, ) def call_model( self, client: openai.Client, conversation: List[Message], timeline: List[Any], tool_specs: Optional[List[ToolSpec]] = None, **kwargs, ) -> ModelResponse: """ Call the OpenAI model with the given messages and parameters. """ # 1. Determine if we should use Chat Completions directly is_chat = self._is_chat_completions_mode(client) if is_chat: request_payload = self._build_chat_payload(conversation, tool_specs, kwargs) response = client.chat.completions.create(**request_payload) self._track_token_usage(response) self._append_chat_response_output(timeline, response) message = self._deserialize_chat_response(response) return ModelResponse(message=message, raw_response=response) # 2. Try Responses API with fallback request_payload = self._build_request_payload(timeline, tool_specs, kwargs) try: response = client.responses.create(**request_payload) self._track_token_usage(response) self._append_response_output(timeline, response) message = self._deserialize_response(response) return ModelResponse(message=message, raw_response=response) except Exception as e: new_request_payload = self._build_chat_payload(conversation, tool_specs, kwargs) response = client.chat.completions.create(**new_request_payload) self._track_token_usage(response) self._append_chat_response_output(timeline, response) message = self._deserialize_chat_response(response) return ModelResponse(message=message, raw_response=response) def _is_chat_completions_mode(self, client: Any) -> bool: """Determine if we should use standard chat completions instead of responses API.""" protocol = self.params.get("protocol") if protocol == "chat": return True if protocol == "responses": return False # Default to Responses API only if it exists on the client return not hasattr(client, "responses") def extract_token_usage(self, response: Any) -> TokenUsage: """ Extract token usage from the OpenAI API response. Args: response: OpenAI API response from the model call Returns: TokenUsage instance with token counts """ usage = getattr(response, "usage", None) if not usage: return TokenUsage() def _get(name: str) -> Any: if hasattr(usage, name): return getattr(usage, name) if isinstance(usage, dict): return usage.get(name) return None prompt_tokens = _get("prompt_tokens") completion_tokens = _get("completion_tokens") input_tokens = _get("input_tokens") output_tokens = _get("output_tokens") resolved_input = input_tokens if input_tokens is not None else prompt_tokens or 0 resolved_output = output_tokens if output_tokens is not None else completion_tokens or 0 total_tokens = _get("total_tokens") if total_tokens is None: total_tokens = (resolved_input or 0) + (resolved_output or 0) metadata = { "prompt_tokens": prompt_tokens or 0, "completion_tokens": completion_tokens or 0, "input_tokens": resolved_input or 0, "output_tokens": resolved_output or 0, "total_tokens": total_tokens or 0, } return TokenUsage( input_tokens=resolved_input or 0, output_tokens=resolved_output or 0, total_tokens=total_tokens or 0, metadata=metadata, ) def _track_token_usage(self, response: Any) -> None: """Record token usage if a tracker is attached to the config.""" token_tracker = getattr(self.config, "token_tracker", None) if not token_tracker: return usage = self.extract_token_usage(response) if usage.input_tokens == 0 and usage.output_tokens == 0 and not usage.metadata: return node_id = getattr(self.config, "node_id", "ALL") usage.node_id = node_id usage.model_name = self.model_name usage.workflow_id = token_tracker.workflow_id usage.provider = "openai" token_tracker.record_usage(node_id, self.model_name, usage, provider="openai") def _build_request_payload( self, timeline: List[Any], tool_specs: Optional[List[ToolSpec]], raw_params: Dict[str, Any], ) -> Dict[str, Any]: """Construct the Responses API payload from event timeline.""" params = dict(raw_params) max_tokens = params.pop("max_tokens", None) max_output_tokens = params.pop("max_output_tokens", None) if max_output_tokens is None and max_tokens is not None: max_output_tokens = max_tokens input_messages: List[Any] = [] for item in timeline: serialized = self._serialize_timeline_item(item) if serialized is not None: input_messages.append(serialized) if not input_messages: input_messages = [ { "role": "user", "content": [{"type": "input_text", "text": ""}], } ] payload: Dict[str, Any] = { "model": self.model_name, "input": input_messages, "temperature": params.pop("temperature", 0.7), "timeout": params.pop("timeout", 300), # 5 min } if max_output_tokens is not None: payload["max_output_tokens"] = max_output_tokens elif self.params.get("max_output_tokens"): payload["max_output_tokens"] = self.params["max_output_tokens"] user_tools = params.pop("tools", None) merged_tools: List[Any] = [] if isinstance(user_tools, list): merged_tools.extend(user_tools) elif user_tools is not None: raise ValueError("params.tools must be a list when provided") if tool_specs: merged_tools.extend(spec.to_openai_dict() for spec in tool_specs) if merged_tools: payload["tools"] = merged_tools tool_choice = params.pop("tool_choice", None) if tool_choice is not None: payload["tool_choice"] = tool_choice elif tool_specs: payload.setdefault("tool_choice", "auto") # Pass any remaining kwargs directly payload.update(params) return payload def _build_chat_payload( self, conversation: List[Message], tool_specs: Optional[List[ToolSpec]], raw_params: Dict[str, Any], ) -> Dict[str, Any]: """Construct standard Chat Completions API payload.""" params = dict(raw_params) max_output_tokens = params.pop("max_output_tokens", None) max_tokens = params.pop("max_tokens", None) if max_tokens is None and max_output_tokens is not None: max_tokens = max_output_tokens messages: List[Any] = [] for item in conversation: serialized = self._serialize_message_for_chat(item) if serialized is not None: messages.append(serialized) if not messages: messages = [{"role": "user", "content": ""}] payload: Dict[str, Any] = { "model": self.model_name, "messages": messages, "temperature": params.pop("temperature", 0.7), } if max_tokens is not None: payload["max_tokens"] = max_tokens elif self.params.get("max_tokens"): payload["max_tokens"] = self.params["max_tokens"] user_tools = params.pop("tools", None) merged_tools: List[Any] = [] if isinstance(user_tools, list): merged_tools.extend(user_tools) if tool_specs: for spec in tool_specs: merged_tools.append({ "type": "function", "function": { "name": spec.name, "description": spec.description, "parameters": spec.parameters or {"type": "object", "properties": {}}, } }) if merged_tools: payload["tools"] = merged_tools tool_choice = params.pop("tool_choice", None) if tool_choice is not None: payload["tool_choice"] = tool_choice elif tool_specs: payload.setdefault("tool_choice", "auto") payload.update(params) return payload def _serialize_timeline_item_for_chat(self, item: Any) -> Optional[Any]: if isinstance(item, Message): return self._serialize_message_for_chat(item) if isinstance(item, FunctionCallOutputEvent): return self._serialize_function_call_output_event_for_chat(item) if isinstance(item, dict): # basic conversion if it looks like a Responses output role = item.get("role") content = item.get("content") tool_calls = item.get("tool_calls") if role and (content or tool_calls): return { "role": role, "content": self._transform_blocks_for_chat(content) if isinstance(content, list) else content, "tool_calls": tool_calls } return None def _serialize_message_for_chat(self, message: Message) -> Dict[str, Any]: """Convert internal Message to standard Chat Completions schema.""" role_value = message.role.value blocks = message.blocks() if not blocks or message.role == MessageRole.TOOL: content = message.text_content() else: content = self._transform_blocks_for_chat(self._serialize_blocks(blocks, message.role)) payload: Dict[str, Any] = { "role": role_value, "content": content, } if message.name: payload["name"] = message.name if message.tool_call_id: payload["tool_call_id"] = message.tool_call_id if message.tool_calls: payload["tool_calls"] = [tc.to_openai_dict() for tc in message.tool_calls] return payload def _serialize_function_call_output_event_for_chat(self, event: FunctionCallOutputEvent) -> Dict[str, Any]: """Convert tool result to standard Chat Completions schema.""" text = event.output_text or "" if event.output_blocks: # simple concatenation for tool output in chat mode text = "\n".join(b.describe() for b in event.output_blocks) return { "role": "tool", "tool_call_id": event.call_id or "tool_call", "content": text, } def _transform_blocks_for_chat(self, blocks: List[Dict[str, Any]]) -> Union[str, List[Dict[str, Any]]]: """Convert Responses block types to Chat block types (e.g., input_text -> text).""" transformed: List[Dict[str, Any]] = [] for block in blocks: b_type = block.get("type", "") if b_type in ("input_text", "output_text"): transformed.append({"type": "text", "text": block.get("text", "")}) elif b_type in ("input_image", "output_image"): transformed.append({"type": "image_url", "image_url": {"url": block.get("image_url", "")}}) else: # Keep as is or drop if complex transformed.append(block) # If only one text block, return as string for better compatibility if len(transformed) == 1 and transformed[0]["type"] == "text": return transformed[0]["text"] return transformed def _deserialize_chat_response(self, response: Any) -> Message: """Convert Chat Completions output to internal Message.""" choices = self._get_attr(response, "choices") or [] if not choices: return Message(role=MessageRole.ASSISTANT, content="") choice = choices[0] msg = self._get_attr(choice, "message") tool_calls: List[ToolCallPayload] = [] tc_data = self._get_attr(msg, "tool_calls") if tc_data: for idx, tc in enumerate(tc_data): f_data = self._get_attr(tc, "function") or {} function_name = self._get_attr(f_data, "name") or "" arguments = self._get_attr(f_data, "arguments") or "" if not isinstance(arguments, str): arguments = str(arguments) call_id = self._get_attr(tc, "id") if not call_id: call_id = self._build_tool_call_id(function_name, arguments, fallback_prefix=f"tool_call_{idx}") tool_calls.append(ToolCallPayload( id=call_id, function_name=function_name, arguments=arguments, type="function" )) return Message( role=MessageRole.ASSISTANT, content=self._get_attr(msg, "content") or "", tool_calls=tool_calls ) def _append_chat_response_output(self, timeline: List[Any], response: Any) -> None: """Add chat response to timeline, preserving tool_calls (Chat API compatible).""" msg = response.choices[0].message assistant_msg = { "role": "assistant", "content": msg.content or "" } if getattr(msg, "tool_calls", None): assistant_msg["tool_calls"] = [] for idx, tc in enumerate(msg.tool_calls): function_name = tc.function.name arguments = tc.function.arguments or "" if not isinstance(arguments, str): arguments = str(arguments) call_id = tc.id or self._build_tool_call_id(function_name, arguments, fallback_prefix=f"tool_call_{idx}") assistant_msg["tool_calls"].append({ "id": call_id, "type": "function", "function": { "name": function_name, "arguments": arguments, }, }) timeline.append(assistant_msg) def _serialize_timeline_item(self, item: Any) -> Optional[Any]: if isinstance(item, Message): return self._serialize_message_for_responses(item) if isinstance(item, FunctionCallOutputEvent): return self._serialize_function_call_output_event(item) return item def _serialize_message_for_responses(self, message: Message) -> Dict[str, Any]: """Convert internal Message to Responses input schema.""" role_value = message.role.value content_blocks = self._serialize_content_blocks(message) payload: Dict[str, Any] = { "role": role_value, "content": content_blocks, } if message.name: payload["name"] = message.name if message.tool_call_id: payload["tool_call_id"] = message.tool_call_id return payload def _serialize_content_blocks(self, message: Message) -> List[Dict[str, Any]]: blocks = message.blocks() if not blocks: text = message.text_content() block_type = "output_text" if message.role is MessageRole.ASSISTANT else "input_text" return [{"type": block_type, "text": text}] return self._serialize_blocks(blocks, message.role) def _serialize_blocks(self, blocks: List[MessageBlock], role: MessageRole) -> List[Dict[str, Any]]: serialized: List[Dict[str, Any]] = [] for block in blocks: serialized.append(self._serialize_block(block, role)) return serialized def _serialize_block(self, block: MessageBlock, role: MessageRole) -> Dict[str, Any]: if block.type is MessageBlockType.TEXT: content_type = "output_text" if role is MessageRole.ASSISTANT else "input_text" return { "type": content_type, "text": block.text or "", } attachment = block.attachment if block.type is MessageBlockType.IMAGE: media_type = "output_image" if role is MessageRole.ASSISTANT else "input_image" return self._serialize_media_block(media_type, attachment) if block.type is MessageBlockType.AUDIO: media_type = "output_audio" if role is MessageRole.ASSISTANT else "input_audio" return self._serialize_media_block(media_type, attachment) if block.type is MessageBlockType.VIDEO: media_type = "output_video" if role is MessageRole.ASSISTANT else "input_video" return self._serialize_media_block(media_type, attachment) if block.type is MessageBlockType.FILE: inline_text = self._maybe_inline_text_file(block) if inline_text is not None: content_type = "output_text" if role is MessageRole.ASSISTANT else "input_text" return { "type": content_type, "text": inline_text, } return self._serialize_file_block(attachment, block) # Fallback: treat as text/data return { "type": "input_text", "text": block.describe(), } def _serialize_media_block( self, media_type: str, attachment: Optional[AttachmentRef], ) -> Dict[str, Any]: payload: Dict[str, Any] = {"type": media_type} if not attachment: return payload url_key = { "input_image": "image_url", "output_image": "image_url", "input_audio": "audio_url", "output_audio": "audio_url", "input_video": "video_url", "output_video": "video_url", }.get(media_type) if attachment.remote_file_id: payload["file_id"] = attachment.remote_file_id elif attachment.data_uri and url_key: payload[url_key] = attachment.data_uri elif attachment.local_path and url_key: payload[url_key] = self._make_data_uri_from_path(attachment.local_path, attachment.mime_type) return payload def _serialize_file_block( self, attachment: Optional[AttachmentRef], block: MessageBlock, ) -> Dict[str, Any]: payload: Dict[str, Any] = {"type": "input_file"} if attachment: if attachment.remote_file_id: payload["file_id"] = attachment.remote_file_id else: data_uri = attachment.data_uri if not data_uri and attachment.local_path: data_uri = self._make_data_uri_from_path(attachment.local_path, attachment.mime_type) if data_uri: payload["file_data"] = data_uri else: raise ValueError("Attachment missing file_id or data for input_file block") if attachment.name: payload["filename"] = attachment.name else: raise ValueError("File block requires an attachment reference") return payload def _maybe_inline_text_file(self, block: MessageBlock) -> Optional[str]: """Inline local text/* attachments to avoid unsupported file-type uploads.""" attachment = block.attachment if not attachment: return None mime = (attachment.mime_type or "").lower() name = (attachment.name or "").lower() is_json = mime in { "application/json", "application/jsonl", "application/x-ndjson", "application/ndjson", } or name.endswith((".json", ".jsonl", ".ndjson")) if not (mime.startswith("text/") or is_json): return None if attachment.remote_file_id: return None # nothing to inline if already remote-only text = self._read_attachment_text(attachment) if text is None: return None is_csv = "text/csv" in mime or name.endswith(".csv") limit_attr = "csv_inline_char_limit" if is_csv else "text_inline_char_limit" default_limit = self.CSV_INLINE_CHAR_LIMIT if is_csv else self.TEXT_INLINE_CHAR_LIMIT limit = getattr(self, limit_attr, default_limit) truncated = False if len(text) > limit: text = text[:limit] truncated = True display_name = attachment.name or attachment.attachment_id or ("attachment.csv" if is_csv else "attachment.txt") suffix = "\n\n[truncated after %d characters]" % limit if truncated else "" if is_csv: return f"CSV file '{display_name}':\n{text}{suffix}" mime_display = attachment.mime_type or "text/*" return f"Text file '{display_name}' ({mime_display}):\n```text\n{text}\n```{suffix}" def _maybe_inline_csv(self, block: MessageBlock) -> Optional[str]: """Backward compatible alias for older call sites/tests.""" return self._maybe_inline_text_file(block) def _read_attachment_text(self, attachment: AttachmentRef) -> Optional[str]: data_bytes: Optional[bytes] = None if attachment.data_uri: data_bytes = self._decode_data_uri(attachment.data_uri) elif attachment.local_path and os.path.exists(attachment.local_path): try: with open(attachment.local_path, "rb") as handle: data_bytes = handle.read() except OSError: return None if data_bytes is None: return None try: return data_bytes.decode("utf-8") except UnicodeDecodeError: return data_bytes.decode("utf-8", errors="replace") def _decode_data_uri(self, data_uri: str) -> Optional[bytes]: if not data_uri.startswith("data:"): return None header, _, data = data_uri.partition(",") if not _: return None if ";base64" in header: try: return base64.b64decode(data) except (ValueError, binascii.Error): return None return unquote_to_bytes(data) def _deserialize_response(self, response: Any) -> Message: """Convert Responses API output to internal Message.""" output_blocks = getattr(response, "output", []) or [] assistant_blocks: List[MessageBlock] = [] tool_calls: List[ToolCallPayload] = [] for item in output_blocks: item_type = self._get_attr(item, "type") if item_type == "message": role_value = self._get_attr(item, "role") or "assistant" if role_value != "assistant": continue content_items = self._get_attr(item, "content") or [] parsed_blocks, parsed_calls = self._parse_output_content(content_items) assistant_blocks.extend(parsed_blocks) tool_calls.extend(parsed_calls) elif item_type == "image_generation_call": assistant_blocks.append(self._parse_image_generation_call(item)) elif item_type in {"tool_call", "function_call"}: parsed_call = self._parse_tool_call(item) if parsed_call: tool_calls.append(parsed_call) if not assistant_blocks: fallback_text = self._extract_fallback_text(response) if fallback_text: assistant_blocks.append(MessageBlock(MessageBlockType.TEXT, text=fallback_text)) return Message( role=MessageRole.ASSISTANT, content=assistant_blocks or "", tool_calls=tool_calls, ) def _extract_fallback_text(self, response: Any) -> Optional[str]: """Return the concatenated output text without triggering Responses errors.""" output = getattr(response, "output", None) if not output: return None try: return getattr(response, "output_text", None) except TypeError: # OpenAI SDK raises TypeError when output is None; treat as missing text return None except AttributeError: return None def _parse_output_content( self, content_items: List[Any], ) -> tuple[List[MessageBlock], List[ToolCallPayload]]: blocks: List[MessageBlock] = [] tool_calls: List[ToolCallPayload] = [] for part in content_items: part_type = self._get_attr(part, "type") if part_type in {"output_text", "text"}: blocks.append(MessageBlock(MessageBlockType.TEXT, text=self._get_attr(part, "text") or "")) elif part_type in {"output_image", "image"}: blocks.append( MessageBlock( type=MessageBlockType.IMAGE, attachment=AttachmentRef( attachment_id=self._get_attr(part, "id") or "", data_uri=self._get_attr(part, "image_base64"), metadata=self._get_attr(part, "metadata") or {}, ), ) ) elif part_type in {"tool_call", "function_call"}: parsed = self._parse_tool_call(part) if parsed: tool_calls.append(parsed) else: blocks.append( MessageBlock( type=MessageBlockType.DATA, text=str(self._get_attr(part, "text") or ""), data=self._maybe_to_dict(part), ) ) return blocks, tool_calls def _parse_image_generation_call(self, payload: Any) -> MessageBlock: status = self._get_attr(payload, "status") or "" if status != "completed": raise RuntimeError(f"Image generation call not completed (status={status})") image_b64 = self._get_attr(payload, "result") if not image_b64: raise RuntimeError("Image generation call returned empty result") attachment_id = self._get_attr(payload, "id") or "" data_uri = f"data:image/png;base64,{image_b64}" return MessageBlock( type=MessageBlockType.IMAGE, attachment=AttachmentRef( attachment_id=attachment_id, data_uri=data_uri, metadata={"source": "image_generation_call"}, ), ) def _parse_tool_call(self, payload: Any) -> Optional[ToolCallPayload]: function_payload = self._get_attr(payload, "function") or {} function_name = self._get_attr(function_payload, "name") or self._get_attr(payload, "name") or "" arguments = self._get_attr(function_payload, "arguments") or self._get_attr(payload, "arguments") or "" if not function_name: return None if isinstance(arguments, (dict, list)): try: import json arguments_str = json.dumps(arguments, ensure_ascii=False) except Exception: arguments_str = str(arguments) else: arguments_str = str(arguments) call_id = self._get_attr(payload, "call_id") or self._get_attr(payload, "id") or "" if not call_id: call_id = self._build_tool_call_id(function_name, arguments_str) return ToolCallPayload( id=call_id, function_name=function_name, arguments=arguments_str, type="function", ) def _build_tool_call_id(self, function_name: str, arguments: str, *, fallback_prefix: str = "tool_call") -> str: base = function_name or fallback_prefix payload = f"{base}:{arguments or ''}".encode("utf-8") digest = hashlib.md5(payload).hexdigest()[:8] return f"{base}_{digest}" def _get_attr(self, payload: Any, key: str) -> Any: if hasattr(payload, key): return getattr(payload, key) if isinstance(payload, dict): return payload.get(key) return None def _maybe_to_dict(self, payload: Any) -> Dict[str, Any]: if hasattr(payload, "model_dump"): try: return payload.model_dump() except Exception: return {} if isinstance(payload, dict): return payload return {} def _make_data_uri_from_path(self, path: str, mime_type: Optional[str]) -> str: mime = mime_type or "application/octet-stream" file_size = os.path.getsize(path) if file_size > self.MAX_INLINE_FILE_BYTES: raise ValueError( f"Attachment '{path}' is {file_size} bytes; exceeds inline limit of {self.MAX_INLINE_FILE_BYTES} bytes" ) with open(path, "rb") as handle: encoded = base64.b64encode(handle.read()).decode("utf-8") return f"data:{mime};base64,{encoded}" def _serialize_function_call_output_event( self, event: FunctionCallOutputEvent, ) -> Dict[str, Any]: payload: Dict[str, Any] = { "type": event.type, "call_id": event.call_id or event.function_name or "tool_call", } if event.output_blocks: payload["output"] = self._serialize_blocks(event.output_blocks, MessageRole.TOOL) else: text = event.output_text or "" payload["output"] = [ { "type": "input_text", "text": text, } ] return payload def _append_response_output(self, timeline: List[Any], response: Any) -> None: output = getattr(response, "output", None) if not output: return timeline.extend(output)
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/agent/providers/openai_provider.py", "license": "Apache License 2.0", "lines": 701, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/node/agent/providers/response.py
"""Normalized provider response dataclasses.""" from dataclasses import dataclass from typing import Any from entity.messages import Message @dataclass class ModelResponse: """Represents a provider response with normalized message payload.""" message: Message raw_response: Any | None = None def has_tool_calls(self) -> bool: return bool(self.message.tool_calls) def to_dict(self) -> dict: """Return a simple dict representation for compatibility.""" payload = { "role": self.message.role.value, } if isinstance(self.message.content, list): payload["content"] = [ block.to_dict() if hasattr(block, "to_dict") else block for block in self.message.content # type: ignore[arg-type] ] else: payload["content"] = self.message.content if self.message.tool_calls: payload["tool_calls"] = [call.to_openai_dict() for call in self.message.tool_calls] if self.message.tool_call_id: payload["tool_call_id"] = self.message.tool_call_id if self.message.name: payload["name"] = self.message.name return payload def str_raw_response(self): return self.raw_response.__str__()
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/agent/providers/response.py", "license": "Apache License 2.0", "lines": 31, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/node/agent/thinking/builtin_thinking.py
"""Register built-in thinking modes.""" from entity.configs.node.thinking import ReflectionThinkingConfig, ThinkingConfig from runtime.node.agent.thinking.thinking_manager import ThinkingManagerBase from runtime.node.agent.thinking.self_reflection import SelfReflectionThinkingManager from runtime.node.agent.thinking.registry import ( register_thinking_mode, get_thinking_registration, ) register_thinking_mode( "reflection", config_cls=ReflectionThinkingConfig, manager_cls=SelfReflectionThinkingManager, summary="LLM reflects on its output and refine its output", ) class ThinkingManagerFactory: @staticmethod def get_thinking_manager(config: ThinkingConfig) -> ThinkingManagerBase: registration = get_thinking_registration(config.type) typed_config = config.as_config(registration.config_cls) if not typed_config: raise ValueError(f"Invalid thinking config for type '{config.type}'") return registration.manager_cls(typed_config)
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/agent/thinking/builtin_thinking.py", "license": "Apache License 2.0", "lines": 22, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/node/agent/thinking/registry.py
"""Registry for thinking managers.""" from dataclasses import dataclass from importlib import import_module from typing import Any, Dict, Type from schema_registry import register_thinking_schema from utils.registry import Registry, RegistryEntry, RegistryError from runtime.node.agent.thinking.thinking_manager import ThinkingManagerBase thinking_registry = Registry("thinking_mode") _BUILTINS_LOADED = False @dataclass(slots=True) class ThinkingRegistration: name: str config_cls: Type[Any] manager_cls: Type["ThinkingManagerBase"] summary: str | None = None def _ensure_builtins_loaded() -> None: global _BUILTINS_LOADED if not _BUILTINS_LOADED: import_module("runtime.node.agent.thinking.builtin_thinking") _BUILTINS_LOADED = True def register_thinking_mode( name: str, *, config_cls: Type[Any], manager_cls: Type["ThinkingManagerBase"], summary: str | None = None, ) -> None: if name in thinking_registry.names(): raise RegistryError(f"Thinking mode '{name}' already registered") entry = ThinkingRegistration(name=name, config_cls=config_cls, manager_cls=manager_cls, summary=summary) thinking_registry.register(name, target=entry) register_thinking_schema(name, config_cls=config_cls, summary=summary) def get_thinking_registration(name: str) -> ThinkingRegistration: _ensure_builtins_loaded() entry: RegistryEntry = thinking_registry.get(name) registration = entry.load() if not isinstance(registration, ThinkingRegistration): raise RegistryError(f"Entry '{name}' is not a ThinkingRegistration") return registration def iter_thinking_registrations() -> Dict[str, ThinkingRegistration]: _ensure_builtins_loaded() return {name: entry.load() for name, entry in thinking_registry.items()} __all__ = [ "thinking_registry", "ThinkingRegistration", "register_thinking_mode", "get_thinking_registration", "iter_thinking_registrations", ]
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/agent/thinking/registry.py", "license": "Apache License 2.0", "lines": 49, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/node/agent/thinking/self_reflection.py
from entity.configs import ReflectionThinkingConfig from entity.messages import Message, MessageRole from runtime.node.agent.thinking.thinking_manager import ( ThinkingManagerBase, AgentInvoker, ThinkingPayload, ) class SelfReflectionThinkingManager(ThinkingManagerBase): """ A simple implementation of thinking manager, named self-reflection. This part of the code is borrowed from ChatDev (https://github.com/OpenBMB/ChatDev) and adapted. """ def __init__(self, config: ReflectionThinkingConfig): super().__init__(config) self.before_gen_think_enabled = False self.after_gen_think_enabled = True self.base_prompt = """Here is a conversation between two roles: {conversations} {reflection_prompt}""" self.reflection_prompt = config.reflection_prompt or "Reflect on the given information and summarize key points in a few words." def _before_gen_think( self, agent_invoker: AgentInvoker, input_payload: ThinkingPayload, agent_role: str, memory: ThinkingPayload | None, ) -> tuple[str, bool]: ... def _after_gen_think( self, agent_invoker: AgentInvoker, input_payload: ThinkingPayload, agent_role: str, memory: ThinkingPayload | None, gen_payload: ThinkingPayload, ) -> tuple[str, bool]: conversations = [ f"SYSTEM: {agent_role}", f"USER: {input_payload.text}", f"ASSISTANT: {gen_payload.text}", ] if memory and memory.text: conversations = [memory.text] + conversations prompt = self.base_prompt.format(conversations="\n\n".join(conversations), reflection_prompt=self.reflection_prompt) reflection_message = agent_invoker( [Message(role=MessageRole.USER, content=prompt)] ) return reflection_message.text_content(), True
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/agent/thinking/self_reflection.py", "license": "Apache License 2.0", "lines": 47, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/node/agent/thinking/thinking_manager.py
from abc import abstractmethod, ABC from dataclasses import dataclass, field from typing import Any, Callable, Dict, List from entity.configs import ThinkingConfig from entity.messages import Message, MessageRole, MessageBlock AgentInvoker = Callable[[List[Message]], Message] @dataclass class ThinkingPayload: """Container used to pass multimodal context into thinking managers.""" text: str blocks: List[MessageBlock] = field(default_factory=list) metadata: Dict[str, Any] = field(default_factory=dict) raw: Any | None = None def describe(self) -> str: return self.text class ThinkingManagerBase(ABC): def __init__(self, config: ThinkingConfig): self.config = config self.before_gen_think_enabled = False self.after_gen_think_enabled = False # you can customize the prompt by override this attribute self.thinking_concat_prompt = "{origin}\n\nThinking Result: {thinking}" @abstractmethod def _before_gen_think( self, agent_invoker: AgentInvoker, input_payload: ThinkingPayload, agent_role: str, memory: ThinkingPayload | None, ) -> tuple[str, bool]: """ think based on input_data before calling model API for node to generate Returns: str: thinking result bool: whether to replace the original input_data with the modified one """ ... @abstractmethod def _after_gen_think( self, agent_invoker: AgentInvoker, input_payload: ThinkingPayload, agent_role: str, memory: ThinkingPayload | None, gen_payload: ThinkingPayload, ) -> tuple[str, bool]: """ think based on input_data and gen_data after calling model API for node to generate Returns: str: thinking result bool: whether to replace the original gen_data with the modified one """ ... def think( self, agent_invoker: AgentInvoker, input_payload: ThinkingPayload, agent_role: str, memory: ThinkingPayload | None, gen_payload: ThinkingPayload | None = None, ) -> str | Message: """ think based on input_data and gen_data if provided Returns: str: result for next step """ normalized_input = input_payload.text normalized_gen = gen_payload.text if gen_payload is not None else None if gen_payload is None and self.before_gen_think_enabled: think_result, replace_input = self._before_gen_think( agent_invoker, input_payload, agent_role, memory ) if replace_input: return think_result else: return self.thinking_concat_prompt.format(origin=normalized_input, thinking=think_result) elif gen_payload is not None and self.after_gen_think_enabled: think_result, replace_gen = self._after_gen_think( agent_invoker, input_payload, agent_role, memory, gen_payload ) if replace_gen: return think_result else: return self.thinking_concat_prompt.format(origin=normalized_gen or "", thinking=think_result) else: if gen_payload is not None: return gen_payload.raw if gen_payload.raw is not None else gen_payload.text return input_payload.raw if input_payload.raw is not None else input_payload.text
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/agent/thinking/thinking_manager.py", "license": "Apache License 2.0", "lines": 88, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/node/agent/tool/tool_manager.py
"""Tool management for function calling and MCP.""" import asyncio import base64 import binascii from dataclasses import dataclass import inspect import logging import mimetypes import os import threading from pathlib import Path from typing import Any, Dict, List, Mapping, Sequence from fastmcp import Client from fastmcp.client.client import CallToolResult as FastMcpCallToolResult from fastmcp.client.transports import StreamableHttpTransport, StdioTransport from mcp import types from entity.configs import ToolingConfig, ConfigError from entity.configs.node.tooling import FunctionToolConfig, McpLocalConfig, McpRemoteConfig from entity.messages import MessageBlock, MessageBlockType from entity.tool_spec import ToolSpec from utils.attachments import AttachmentStore from utils.function_manager import FUNCTION_CALLING_DIR, FunctionManager logger = logging.getLogger(__name__) DEFAULT_MCP_HTTP_TIMEOUT = 10.0 @dataclass class _FunctionManagerCacheEntry: manager: FunctionManager auto_loaded: bool = False class ToolManager: """Manage function tools for agent nodes.""" def __init__(self) -> None: self._functions_dir: Path = FUNCTION_CALLING_DIR self._function_managers: Dict[Path, _FunctionManagerCacheEntry] = {} self._mcp_tool_cache: Dict[str, List[Any]] = {} self._mcp_stdio_clients: Dict[str, "_StdioClientWrapper"] = {} def _get_function_manager(self) -> FunctionManager: entry = self._function_managers.get(self._functions_dir) if entry is None: entry = _FunctionManagerCacheEntry(manager=FunctionManager(self._functions_dir)) self._function_managers[self._functions_dir] = entry return entry.manager def _ensure_functions_loaded(self, auto_load: bool) -> None: if not auto_load: return entry = self._function_managers.setdefault( self._functions_dir, _FunctionManagerCacheEntry(manager=FunctionManager(self._functions_dir)) ) if not entry.auto_loaded: entry.manager.load_functions() entry.auto_loaded = True async def _fetch_mcp_tools_http( self, server_url: str, *, headers: Dict[str, str] | None = None, timeout: float | None = None, attempts: int = 3, ) -> List[Any]: delay = 0.5 last_error: Exception | None = None for attempt in range(1, attempts + 1): try: client = Client( transport=StreamableHttpTransport(server_url, headers=headers or None), timeout=timeout or DEFAULT_MCP_HTTP_TIMEOUT, ) async with client: return await client.list_tools() except Exception as exc: # pragma: no cover - passthrough to caller last_error = exc if attempt == attempts: raise await asyncio.sleep(delay) delay *= 2 if last_error: raise last_error return [] async def _fetch_mcp_tools_stdio(self, config: McpLocalConfig, launch_key: str) -> List[Any]: client = self._get_stdio_client(config, launch_key) return client.list_tools() def get_tool_specs(self, tool_configs: List[ToolingConfig] | None) -> List[ToolSpec]: """Return provider-agnostic tool specifications for the given config list.""" if not tool_configs: return [] specs: List[ToolSpec] = [] seen_tools: set[str] = set() for idx, tool_config in enumerate(tool_configs): current_specs: List[ToolSpec] = [] if tool_config.type == "function": config = tool_config.as_config(FunctionToolConfig) if not config: raise ValueError("Function tooling configuration missing") current_specs = self._build_function_specs(config) elif tool_config.type == "mcp_remote": config = tool_config.as_config(McpRemoteConfig) if not config: raise ValueError("MCP remote configuration missing") current_specs = self._build_mcp_remote_specs(config) elif tool_config.type == "mcp_local": config = tool_config.as_config(McpLocalConfig) if not config: raise ValueError("MCP local configuration missing") current_specs = self._build_mcp_local_specs(config) else: # Skip unknown types or raise error? Existing code raised error in execute but ignored in get_specs? # Better to ignore or log warning for robustness, but let's stick to safe behavior. pass prefix = tool_config.prefix for spec in current_specs: original_name = spec.name final_name = f"{prefix}_{original_name}" if prefix else original_name if final_name in seen_tools: raise ConfigError( f"Duplicate tool name '{final_name}' detected. " f"Please use a unique 'prefix' in your tooling configuration." ) seen_tools.add(final_name) # Update spec spec.name = final_name spec.metadata["_config_index"] = idx spec.metadata["original_name"] = original_name specs.append(spec) return specs async def execute_tool( self, tool_name: str, arguments: Dict[str, Any], tool_config: ToolingConfig, *, tool_context: Dict[str, Any] | None = None, ) -> Any: """Execute a tool using the provided configuration.""" if tool_config.type == "function": config = tool_config.as_config(FunctionToolConfig) if not config: raise ValueError("Function tooling configuration missing") return self._execute_function_tool(tool_name, arguments, config, tool_context) if tool_config.type == "mcp_remote": config = tool_config.as_config(McpRemoteConfig) if not config: raise ValueError("MCP remote configuration missing") return await self._execute_mcp_remote_tool(tool_name, arguments, config, tool_context) if tool_config.type == "mcp_local": config = tool_config.as_config(McpLocalConfig) if not config: raise ValueError("MCP local configuration missing") return await self._execute_mcp_local_tool(tool_name, arguments, config, tool_context) raise ValueError(f"Unsupported tool type: {tool_config.type}") def _build_function_specs(self, config: FunctionToolConfig) -> List[ToolSpec]: self._ensure_functions_loaded(config.auto_load) specs: List[ToolSpec] = [] for tool in config.tools: parameters = tool.get("parameters") if not isinstance(parameters, Mapping): parameters = {"type": "object", "properties": {}} specs.append( ToolSpec( name=tool.get("name", ""), description=tool.get("description") or "", parameters=parameters, metadata={"source": "function"}, ) ) return specs def _build_mcp_remote_specs(self, config: McpRemoteConfig) -> List[ToolSpec]: cache_key = f"remote:{config.cache_key()}" tools = self._mcp_tool_cache.get(cache_key) if tools is None: tools = asyncio.run( self._fetch_mcp_tools_http( config.server, headers=config.headers, timeout=config.timeout, ) ) self._mcp_tool_cache[cache_key] = tools specs: List[ToolSpec] = [] for tool in tools: specs.append( ToolSpec( name=tool.name, description=tool.description or "", parameters=tool.inputSchema or {"type": "object", "properties": {}}, metadata={"source": "mcp", "server": config.server, "mode": "remote"}, ) ) return specs def _build_mcp_local_specs(self, config: McpLocalConfig) -> List[ToolSpec]: launch_key = config.cache_key() if not launch_key: raise ValueError("MCP local configuration missing launch key") cache_key = f"stdio:{launch_key}" tools = self._mcp_tool_cache.get(cache_key) if tools is None: tools = asyncio.run(self._fetch_mcp_tools_stdio(config, launch_key)) self._mcp_tool_cache[cache_key] = tools specs: List[ToolSpec] = [] for tool in tools: specs.append( ToolSpec( name=tool.name, description=tool.description or "", parameters=tool.inputSchema or {"type": "object", "properties": {}}, metadata={"source": "mcp", "server": "stdio", "mode": "local"}, ) ) return specs def _execute_function_tool( self, tool_name: str, arguments: Dict[str, Any], config: FunctionToolConfig, tool_context: Dict[str, Any] | None = None, ) -> Any: mgr = self._get_function_manager() if config.auto_load: mgr.load_functions() func = mgr.get_function(tool_name) if func is None: raise ValueError(f"Tool {tool_name} not found in {self._functions_dir}") call_args = dict(arguments or {}) if ( tool_context is not None # and "_context" not in call_args and self._function_accepts_context(func) ): call_args["_context"] = tool_context return func(**call_args) def _function_accepts_context(self, func: Any) -> bool: try: signature = inspect.signature(func) except (ValueError, TypeError): return False for param in signature.parameters.values(): if param.kind is inspect.Parameter.VAR_KEYWORD: return True if param.name == "_context" and param.kind in ( inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY, ): return True return False async def _execute_mcp_remote_tool( self, tool_name: str, arguments: Dict[str, Any], config: McpRemoteConfig, tool_context: Dict[str, Any] | None = None, ) -> Any: client = Client( transport=StreamableHttpTransport(config.server, headers=config.headers or None), timeout=config.timeout or DEFAULT_MCP_HTTP_TIMEOUT, ) async with client: result = await client.call_tool(tool_name, arguments) return self._normalize_mcp_result(tool_name, result, tool_context) async def _execute_mcp_local_tool( self, tool_name: str, arguments: Dict[str, Any], config: McpLocalConfig, tool_context: Dict[str, Any] | None = None, ) -> Any: launch_key = config.cache_key() if not launch_key: raise ValueError("MCP local configuration missing launch key") stdio_client = self._get_stdio_client(config, launch_key) result = stdio_client.call_tool(tool_name, arguments) return self._normalize_mcp_result(tool_name, result, tool_context) def _normalize_mcp_result( self, tool_name: str, result: FastMcpCallToolResult, tool_context: Dict[str, Any] | None, ) -> Any: attachment_store = self._extract_attachment_store(tool_context) blocks = self._convert_mcp_content_to_blocks(tool_name, result.content, attachment_store) if blocks: return blocks if result.structured_content is not None: return result.structured_content if result.content: content = result.content[0] if isinstance(content, types.TextContent): return content.text return str(content) return None def _extract_attachment_store(self, tool_context: Dict[str, Any] | None) -> AttachmentStore | None: if not tool_context: return None candidate = tool_context.get("attachment_store") if isinstance(candidate, AttachmentStore): return candidate if candidate is not None: logger.warning( "attachment_store in tool_context is not AttachmentStore (got %s)", type(candidate).__name__, ) return None def _convert_mcp_content_to_blocks( self, tool_name: str, contents: Sequence[types.ContentBlock] | None, attachment_store: AttachmentStore | None, ) -> List[MessageBlock]: blocks: List[MessageBlock] = [] if not contents: return blocks for idx, content in enumerate(contents): converted = self._convert_single_mcp_block(tool_name, content, idx, attachment_store) if converted: blocks.extend(converted) return blocks def _convert_single_mcp_block( self, tool_name: str, content: types.ContentBlock, block_index: int, attachment_store: AttachmentStore | None, ) -> List[MessageBlock]: if isinstance(content, types.TextContent): return [MessageBlock.text_block(content.text)] if isinstance(content, types.ImageContent): return self._materialize_mcp_binary_block( tool_name, content.data, content.mimeType, MessageBlockType.IMAGE, block_index, attachment_store, ) if isinstance(content, types.AudioContent): return self._materialize_mcp_binary_block( tool_name, content.data, content.mimeType, MessageBlockType.AUDIO, block_index, attachment_store, ) if isinstance(content, types.EmbeddedResource): resource = content.resource if isinstance(resource, types.TextResourceContents): data_payload = { "uri": str(resource.uri), "mime_type": resource.mimeType, } return [ MessageBlock( type=MessageBlockType.TEXT, text=resource.text, data={k: v for k, v in data_payload.items() if v is not None}, ) ] if isinstance(resource, types.BlobResourceContents): extra = { "resource_uri": str(resource.uri), } return self._materialize_mcp_binary_block( tool_name, resource.blob, resource.mimeType, self._message_block_type_from_mime(resource.mimeType), block_index, attachment_store, extra=extra, ) if isinstance(content, types.ResourceLink): data_payload = { "uri": str(content.uri), "mime_type": content.mimeType, "description": content.description, } return [ MessageBlock( type=MessageBlockType.DATA, text=content.description or f"Resource link: {content.uri}", data={k: v for k, v in data_payload.items() if v is not None}, ) ] logger.warning("Unhandled MCP content block type: %s", type(content).__name__) return [] def _materialize_mcp_binary_block( self, tool_name: str, payload_b64: str, mime_type: str | None, block_type: MessageBlockType, block_index: int, attachment_store: AttachmentStore | None, *, extra: Dict[str, Any] | None = None, ) -> List[MessageBlock]: display_name = self._build_attachment_name(tool_name, block_type, block_index, mime_type) try: binary = base64.b64decode(payload_b64) except (binascii.Error, ValueError) as exc: logger.warning("Failed to decode MCP %s payload for %s: %s", block_type.value, tool_name, exc) return [ MessageBlock.text_block( f"[failed to decode {block_type.value} content from {tool_name}]" ) ] metadata = { "source": "mcp_tool", "tool_name": tool_name, "block_type": block_type.value, } if extra: metadata.update(extra) if attachment_store is None: placeholder = ( f"[binary content omitted: {display_name} ({mime_type or 'application/octet-stream'})]" ) return [ MessageBlock( type=MessageBlockType.TEXT, text=placeholder, data={**metadata, "reason": "attachment_store_missing", "mime_type": mime_type}, ) ] record = attachment_store.register_bytes( binary, kind=block_type, mime_type=mime_type, display_name=display_name, extra=metadata, ) return [record.as_message_block()] def _build_attachment_name( self, tool_name: str, block_type: MessageBlockType, block_index: int, mime_type: str | None, ) -> str: base = f"{tool_name}_{block_type.value}_{block_index + 1}".strip() or "attachment" safe_base = "".join(ch if ch.isalnum() or ch in {"-", "_"} else "_" for ch in base) ext = mimetypes.guess_extension(mime_type or "") or "" return f"{safe_base}{ext}" def _message_block_type_from_mime(self, mime_type: str | None) -> MessageBlockType: if not mime_type: return MessageBlockType.FILE if mime_type.startswith("image/"): return MessageBlockType.IMAGE if mime_type.startswith("audio/"): return MessageBlockType.AUDIO if mime_type.startswith("video/"): return MessageBlockType.VIDEO return MessageBlockType.FILE def _get_stdio_client(self, config: McpLocalConfig, launch_key: str) -> "_StdioClientWrapper": client = self._mcp_stdio_clients.get(launch_key) if client is None: client = _StdioClientWrapper(config) self._mcp_stdio_clients[launch_key] = client return client class _StdioClientWrapper: def __init__(self, config: McpLocalConfig) -> None: env = os.environ.copy() if config.inherit_env else {} env.update(config.env) env_payload = env or None transport = StdioTransport( command=config.command, args=list(config.args), env=env_payload, cwd=config.cwd, keep_alive=True, ) self._client = Client(transport=transport) self._loop = asyncio.new_event_loop() self._thread = threading.Thread(target=self._run_loop, daemon=True) self._thread.start() init_future = asyncio.run_coroutine_threadsafe(self._initialize(), self._loop) init_future.result() def _run_loop(self) -> None: asyncio.set_event_loop(self._loop) self._loop.run_forever() async def _initialize(self) -> None: self._lock = asyncio.Lock() await self._client.__aenter__() def list_tools(self) -> List[Any]: future = asyncio.run_coroutine_threadsafe(self._call("list_tools"), self._loop) return future.result() def call_tool(self, name: str, arguments: Dict[str, Any]) -> Any: future = asyncio.run_coroutine_threadsafe( self._call("call_tool", name, arguments), self._loop, ) return future.result() async def _call(self, method: str, *args: Any, **kwargs: Any) -> Any: async with self._lock: func = getattr(self._client, method) return await func(*args, **kwargs) def close(self) -> None: future = asyncio.run_coroutine_threadsafe(self._shutdown(), self._loop) future.result() self._loop.call_soon_threadsafe(self._loop.stop) self._thread.join() async def _shutdown(self) -> None: async with self._lock: await self._client.__aexit__(None, None, None)
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/agent/tool/tool_manager.py", "license": "Apache License 2.0", "lines": 502, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/node/builtin_nodes.py
"""Register built-in workflow node types.""" from entity.configs.node.agent import AgentConfig from entity.configs.node.human import HumanConfig from entity.configs.node.subgraph import ( SubgraphConfig, SubgraphFileConfig, SubgraphInlineConfig, register_subgraph_source, ) from entity.configs.node.passthrough import PassthroughConfig from entity.configs.node.literal import LiteralNodeConfig from entity.configs.node.python_runner import PythonRunnerConfig from entity.configs.node.loop_counter import LoopCounterConfig from entity.configs.node.loop_timer import LoopTimerConfig from runtime.node.executor.agent_executor import AgentNodeExecutor from runtime.node.executor.human_executor import HumanNodeExecutor from runtime.node.executor.passthrough_executor import PassthroughNodeExecutor from runtime.node.executor.literal_executor import LiteralNodeExecutor from runtime.node.executor.python_executor import PythonNodeExecutor from runtime.node.executor.subgraph_executor import SubgraphNodeExecutor from runtime.node.executor.loop_counter_executor import LoopCounterNodeExecutor from runtime.node.executor.loop_timer_executor import LoopTimerNodeExecutor from runtime.node.registry import NodeCapabilities, register_node_type register_node_type( "agent", config_cls=AgentConfig, executor_cls=AgentNodeExecutor, capabilities=NodeCapabilities( default_role_field="role", exposes_tools=True, ), summary="Agent execution node backed by configured LLM/tool providers with support for tooling, memory, and thinking extensions.", ) register_node_type( "human", config_cls=HumanConfig, executor_cls=HumanNodeExecutor, capabilities=NodeCapabilities( resource_key="node_type:human", resource_limit=1, ), summary="Pauses graph and waits for human operator response", ) register_node_type( "subgraph", config_cls=SubgraphConfig, executor_cls=SubgraphNodeExecutor, capabilities=NodeCapabilities(), executor_factory=lambda context, subgraphs=None: SubgraphNodeExecutor( context, subgraphs or {} ), summary="Embeds (through file path or inline config) and runs another named subgraph within the current workflow", ) register_node_type( "python", config_cls=PythonRunnerConfig, executor_cls=PythonNodeExecutor, capabilities=NodeCapabilities( resource_key="node_type:python", resource_limit=1, ), summary="Executes repository Python snippets", ) register_node_type( "passthrough", config_cls=PassthroughConfig, executor_cls=PassthroughNodeExecutor, capabilities=NodeCapabilities(), summary="Forwards prior node output downstream without modification", ) register_node_type( "literal", config_cls=LiteralNodeConfig, executor_cls=LiteralNodeExecutor, capabilities=NodeCapabilities(), summary="Emits the configured text message every time it is triggered", ) register_node_type( "loop_counter", config_cls=LoopCounterConfig, executor_cls=LoopCounterNodeExecutor, capabilities=NodeCapabilities(), summary="Blocks downstream edges until the configured iteration limit is reached, then emits a message to release the loop.", ) register_node_type( "loop_timer", config_cls=LoopTimerConfig, executor_cls=LoopTimerNodeExecutor, capabilities=NodeCapabilities(), summary="Blocks downstream edges until the configured time limit is reached, then emits a message to release the loop.", ) # Register subgraph source types (file-based and inline config) register_subgraph_source( "config", config_cls=SubgraphInlineConfig, description="Inline subgraph definition embedded directly in the YAML graph", ) register_subgraph_source( "file", config_cls=SubgraphFileConfig, description="Reference an external YAML file containing the subgraph", )
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/builtin_nodes.py", "license": "Apache License 2.0", "lines": 102, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/node/executor/agent_executor.py
"""Agent node executor. Responsible for running agent nodes, including - memory retrieval - thinking workflows - agent invocation - tool calling """ import asyncio import base64 import json import traceback from typing import Any, Callable, Dict, List, Optional, Sequence from entity.configs import Node from entity.configs.node.agent import AgentConfig, AgentRetryConfig from entity.enums import CallStage, AgentExecFlowStage, AgentInputMode from entity.messages import ( AttachmentRef, FunctionCallOutputEvent, Message, MessageBlock, MessageRole, ToolCallPayload, ) from entity.tool_spec import ToolSpec from runtime.node.executor.base import NodeExecutor from runtime.node.agent.memory.memory_base import ( MemoryContentSnapshot, MemoryRetrievalResult, MemoryWritePayload, ) from runtime.node.agent import ThinkingPayload from runtime.node.agent import ModelProvider, ProviderRegistry, ModelResponse from tenacity import Retrying, retry_if_exception, stop_after_attempt, wait_random_exponential class AgentNodeExecutor(NodeExecutor): """Executor that runs agent nodes.""" def execute(self, node: Node, inputs: List[Message]) -> List[Message]: """Execute an agent node. Args: node: Agent node definition inputs: Input messages collected from upstream nodes Returns: Agent output messages """ self._ensure_not_cancelled() if node.node_type != "agent": raise ValueError(f"Node {node.id} is not an agent node") agent_config = node.as_config(AgentConfig) if not agent_config: raise ValueError(f"Node {node.id} missing agent config") try: self._current_node_id = node.id provider_class = ProviderRegistry.get_provider(agent_config.provider) if not provider_class: raise ValueError(f"Provider '{agent_config.provider}' not found") agent_config.token_tracker = self.context.get_token_tracker() agent_config.node_id = node.id input_data = self._inputs_to_text(inputs) input_payload = self._build_thinking_payload_from_inputs(inputs, input_data) memory_query_snapshot = self._build_memory_query_snapshot(inputs, input_data) input_mode = agent_config.input_mode or AgentInputMode.PROMPT provider = provider_class(agent_config) client = provider.create_client() if input_mode is AgentInputMode.PROMPT: conversation = self._prepare_prompt_messages(node, input_data) else: conversation = self._prepare_message_conversation(node, inputs) call_options = self._prepare_call_options(node) tool_specs = self.tool_manager.get_tool_specs(agent_config.tooling) agent_invoker = self._build_agent_invoker( provider, client, call_options, tool_specs, node, ) if agent_config.thinking: self._apply_pre_generation_thinking( node, conversation, input_payload, memory_query_snapshot, AgentExecFlowStage.PRE_GEN_THINKING_STAGE, agent_invoker, input_mode, ) self._apply_memory_retrieval( node, conversation, memory_query_snapshot, AgentExecFlowStage.GEN_STAGE, input_mode, ) timeline = self._build_initial_timeline(conversation) response_obj = self._invoke_provider( provider, client, conversation, timeline, call_options, tool_specs, node, ) if response_obj.has_tool_calls(): response_message = self._handle_tool_calls( node, provider, client, conversation, timeline, call_options, response_obj, tool_specs, ) else: response_message = response_obj.message self._persist_message_attachments(response_message, node.id) final_message: Message | str = response_message if agent_config.thinking: gen_payload = self._build_thinking_payload_from_message(final_message, source="model_output") final_message = self._apply_post_generation_thinking( node, conversation, memory_query_snapshot, input_payload, gen_payload, AgentExecFlowStage.POST_GEN_THINKING_STAGE, agent_invoker, input_mode, ) self._update_memory(node, input_data, inputs, final_message) if isinstance(final_message, Message): return [self._clone_with_source(final_message, node.id)] return [self._build_message( role=MessageRole.ASSISTANT, content=final_message, source=node.id, )] except Exception as e: traceback.print_exc() error_msg = f"[Node: {node.id}] Error calling model: {str(e)}" self.log_manager.error(error_msg) return [self._build_message( role=MessageRole.ASSISTANT, content=f"Error calling model {node.model_name}: {str(e)}\n\nOriginal input: {input_data[:200]}...", source=node.id, )] finally: self._current_node_id = None def _prepare_prompt_messages(self, node: Node, input_data: str) -> List[Message]: """Prepare the prompt-style message sequence.""" messages: List[Message] = [] if node.role: messages.append(Message(role=MessageRole.SYSTEM, content=node.role)) try: if isinstance(input_data, str): clean_input = input_data.encode("utf-8", errors="ignore").decode("utf-8") else: clean_input = str(input_data) except Exception as encoding_error: self.log_manager.error(f"[Node: {node.id}] Encoding error: {encoding_error}") clean_input = str(input_data) messages.append(Message(role=MessageRole.USER, content=clean_input)) return messages def _prepare_message_conversation(self, node: Node, inputs: List[Message]) -> List[Message]: messages: List[Message] = [] if node.role: messages.append(Message(role=MessageRole.SYSTEM, content=node.role)) normalized_inputs = self._coerce_inputs_to_messages(inputs) if normalized_inputs: messages.extend(normalized_inputs) else: messages.append(Message(role=MessageRole.USER, content="")) return messages def _prepare_call_options(self, node: Node) -> Dict[str, Any]: """Prepare model call options (excluding conversation messages).""" call_options: Dict[str, Any] = {} model = node.as_config(AgentConfig) if not model: raise ValueError(f"Node {node.id} missing model config") if model.params: call_options.update(model.params) # call_options.setdefault("temperature", 0.7) # call_options.setdefault("max_tokens", 4096) return call_options def _build_agent_invoker( self, provider: ModelProvider, client: Any, base_call_options: Dict[str, Any], default_tool_specs: List[ToolSpec], node: Node, ) -> Callable[[List[Message]], Message]: """Create a callable that other components can use to invoke the model.""" def invoke( conversation: List[Message], *, tools: Optional[List[ToolSpec]] = None, **overrides: Any, ) -> Message: call_options = dict(base_call_options) call_options.update(overrides) timeline = self._build_initial_timeline(conversation) response = self._invoke_provider( provider, client, conversation, timeline, call_options, tools if tools is not None else default_tool_specs, node, ) return response.message return invoke def _invoke_provider( self, provider: ModelProvider, client: Any, conversation: List[Message], timeline: List[Any], call_options: Dict[str, Any], tool_specs: List[ToolSpec] | None, node: Node, ) -> ModelResponse: """Invoke provider with logging + token tracking.""" self._ensure_not_cancelled() if self.context.token_tracker: self.context.token_tracker.current_node_id = node.id agent_config = node.as_config(AgentConfig) retry_policy = self._resolve_retry_policy(node, agent_config) def _call_provider() -> ModelResponse: return provider.call_model( client, conversation=conversation, timeline=timeline, tool_specs=tool_specs or None, **call_options, ) last_input = ''.join(msg.text_content() for msg in conversation) if conversation else "" self._record_model_call(node, last_input, None, CallStage.BEFORE) response = self._execute_with_retry(node, retry_policy, _call_provider) self.log_manager.debug(response.str_raw_response()) self._record_model_call(node, last_input, response, CallStage.AFTER) return response def _record_model_call( self, node: Node, input_payload: str, response: ModelResponse | None, stage: CallStage = CallStage.AFTER, ) -> None: """Record model invocation to the log manager.""" response_text = response.message.text_content() if response else None call_details = {"has_tool_calls": response.has_tool_calls()} if response else {} self.log_manager.record_model_call( node.id, node.model_name, input_payload, response_text, call_details, stage, ) def _execute_with_retry( self, node: Node, retry_config: AgentRetryConfig | None, func: Callable[[], ModelResponse], ) -> ModelResponse: if not retry_config or not retry_config.is_active: return func() wait = wait_random_exponential( min=retry_config.min_wait_seconds, max=retry_config.max_wait_seconds, ) retry_condition = retry_if_exception(lambda exc: retry_config.should_retry(exc)) def _before_sleep(retry_state) -> None: exc = retry_state.outcome.exception() if exc is None: return attempt = retry_state.attempt_number details = { "attempt": attempt, "max_attempts": retry_config.max_attempts, "exception": exc.__class__.__name__, } self.log_manager.warning( f"[Node: {node.id}] Model call attempt {attempt} failed: {exc}", node_id=node.id, details=details, ) retrier = Retrying( stop=stop_after_attempt(retry_config.max_attempts), wait=wait, retry=retry_condition, before_sleep=_before_sleep, reraise=True, ) return retrier(func) def _resolve_retry_policy( self, node: Node, agent_config: AgentConfig | None, ) -> AgentRetryConfig | None: """Ensure every agent node has a retry policy even if config omits it.""" if not agent_config: return None if agent_config.retry is not None: return agent_config.retry base_path = getattr(agent_config, "path", None) or getattr(node, "path", None) or "<runtime>" retry_path = f"{base_path}.retry" if base_path else "retry" default_retry = AgentRetryConfig(path=retry_path) agent_config.retry = default_retry return default_retry def _apply_pre_generation_thinking( self, node: Node, conversation: List[Message], input_payload: ThinkingPayload, query_snapshot: MemoryContentSnapshot, stage: AgentExecFlowStage, agent_invoker: Callable[[List[Message]], Message], input_mode: AgentInputMode, ) -> None: """Apply pre-generation thinking.""" self._ensure_not_cancelled() thinking_manager = self.context.get_thinking_manager(node.id) if not thinking_manager or not conversation: return model = node.as_config(AgentConfig) with self.log_manager.thinking_timer(node.id, stage.value): retrieved_memory = self._retrieve_memory(node, query_snapshot, stage) memory_payload = self._memory_result_to_thinking_payload(retrieved_memory) thinking_result = thinking_manager.think( agent_invoker=agent_invoker, input_payload=input_payload, agent_role=node.role or "", memory=memory_payload, gen_payload=None, ) mode_value = model.thinking.type if model and model.thinking else "unknown" self.log_manager.record_thinking_process( node.id, mode_value, thinking_result if isinstance(thinking_result, str) else "[message]", stage.value, {"has_memory": bool(retrieved_memory and retrieved_memory.items)}, ) if input_mode is AgentInputMode.MESSAGES: if isinstance(thinking_result, Message): self._persist_message_attachments(thinking_result, node.id) conversation.append(self._clone_with_source(thinking_result, node.id)) else: self._append_user_message(conversation, thinking_result, node_id=node.id) else: content = thinking_result if isinstance(thinking_result, str) else thinking_result.text_content() conversation[-1] = conversation[-1].with_content(content) def _apply_memory_retrieval( self, node: Node, conversation: List[Message], query_snapshot: MemoryContentSnapshot, stage: AgentExecFlowStage, input_mode: AgentInputMode, ) -> None: """Apply memory retrieval side effects.""" self._ensure_not_cancelled() if not conversation: return retrieved_memory = self._retrieve_memory(node, query_snapshot, stage) if retrieved_memory and retrieved_memory.formatted_text: if input_mode is AgentInputMode.MESSAGES: self._insert_memory_message(conversation, retrieved_memory.formatted_text, node_id=node.id) else: last_message = conversation[-1] merged_content = f"{retrieved_memory.formatted_text}\n\n{last_message.text_content()}" conversation[-1] = last_message.with_content(merged_content) def _retrieve_memory( self, node: Node, query_snapshot: MemoryContentSnapshot, stage: AgentExecFlowStage ) -> MemoryRetrievalResult | None: """Retrieve memory for the node.""" memory_manager = self.context.get_memory_manager(node.id) if not memory_manager: return None with self.log_manager.memory_timer(node.id, "RETRIEVE", stage.value): retrieved_memory = memory_manager.retrieve( agent_role=node.role if node.role else "", query=query_snapshot, current_stage=stage, ) preview_text = retrieved_memory.formatted_text if retrieved_memory else "" details = { "stage": stage.value, "item_count": len(retrieved_memory.items) if retrieved_memory else 0, "attachment_count": len(retrieved_memory.attachment_overview()) if retrieved_memory else 0, } self.log_manager.record_memory_operation( node.id, "RETRIEVE", stage.value, preview_text, details, ) return retrieved_memory def _handle_tool_calls( self, node: Node, provider: ModelProvider, client: Any, conversation: List[Message], timeline: List[Any], call_options: Dict[str, Any], initial_response: ModelResponse, tool_specs: List[ToolSpec], ) -> Message: """Handle tool calls until completion or until the loop limit is reached.""" assistant_message = initial_response.message trace_messages: List[Message] = [] loop_limit = self._get_tool_loop_limit(node) iteration = 0 while True: self._ensure_not_cancelled() cloned_assistant = self._clone_with_source(assistant_message, node.id) conversation.append(cloned_assistant) trace_messages.append(cloned_assistant) if not assistant_message.tool_calls: return self._finalize_tool_trace(assistant_message, trace_messages, True, node.id) if iteration >= loop_limit: self.log_manager.warning( f"[Node: {node.id}] Tool call limit {loop_limit} reached, returning last assistant response" ) return self._finalize_tool_trace(assistant_message, trace_messages, False, node.id) iteration += 1 tool_call_messages, tool_events = self._execute_tool_batch(node, assistant_message.tool_calls, tool_specs) conversation.extend(tool_call_messages) timeline.extend(tool_events) trace_messages.extend(self._clone_with_source(msg, node.id) for msg in tool_call_messages) follow_up_response = self._invoke_provider( provider, client, conversation, timeline, call_options, tool_specs, node, ) assistant_message = follow_up_response.message def _execute_tool_batch( self, node: Node, tool_calls: List[ToolCallPayload], tool_specs: List[ToolSpec], ) -> tuple[List[Message], List[FunctionCallOutputEvent]]: """Execute a batch of tool calls and return conversation + timeline events.""" messages: List[Message] = [] events: List[FunctionCallOutputEvent] = [] model = node.as_config(AgentConfig) # Build map for fast lookup spec_map = {spec.name: spec for spec in tool_specs} configs = model.tooling if model else [] context_state = self.context.global_state previous_node_id = context_state.get("node_id") if context_state is not None else None if context_state is not None: context_state["node_id"] = node.id try: for tool_call in tool_calls: self._ensure_not_cancelled() tool_name = tool_call.function_name arguments = self._parse_tool_call_arguments(tool_call.arguments) # Resolve tool config spec = spec_map.get(tool_name) tool_config = None execution_name = tool_name if spec: idx = spec.metadata.get("_config_index") if idx is not None and 0 <= idx < len(configs): tool_config = configs[idx] # Use original name if prefixed execution_name = spec.metadata.get("original_name", tool_name) if not tool_config: # Fallback check: if we have 1 config, maybe it's that one? # But strict routing is safer. If spec not found, it's a hallucination or error. # We proceed and let tool_manager raise error or handle it. # But execute_tool requires tool_config. # Construct a helpful error message error_msg = f"Tool '{tool_name}' configuration not found." self.log_manager.record_tool_call( node.id, tool_name, False, None, {"error": error_msg, "arguments": arguments}, CallStage.AFTER, ) tool_message = Message( role=MessageRole.TOOL, content=f"Error: {error_msg}", tool_call_id=tool_call.id, metadata={"tool_name": tool_name, "source": node.id}, ) events.append( FunctionCallOutputEvent( call_id=tool_call.id or tool_call.function_name or "tool_call", function_name=tool_call.function_name, output_text=f"error: {error_msg}", ) ) messages.append(tool_message) continue try: self.log_manager.record_tool_call( node.id, tool_name, None, None, {"arguments": arguments}, CallStage.BEFORE, ) with self.log_manager.tool_timer(node.id, tool_name): result = asyncio.run( self.tool_manager.execute_tool( execution_name, arguments, tool_config, tool_context=self.context.global_state, ) ) tool_message = self._build_tool_message( result, tool_call, node_id=node.id, tool_name=tool_name, ) events.append( self._build_function_call_output_event( tool_call, result, ) ) self.log_manager.record_tool_call( node.id, tool_name, True, self._serialize_tool_result(result), {"arguments": arguments}, CallStage.AFTER, ) except Exception as exc: self.log_manager.record_tool_call( node.id, tool_name, False, None, {"error": str(exc), "arguments": arguments}, CallStage.AFTER, ) tool_message = Message( role=MessageRole.TOOL, content=f"Tool {tool_name} error: {exc}", tool_call_id=tool_call.id, metadata={"tool_name": tool_name, "source": node.id}, ) events.append( FunctionCallOutputEvent( call_id=tool_call.id or tool_call.function_name or "tool_call", function_name=tool_call.function_name, output_text=f"error: {exc}", ) ) messages.append(tool_message) finally: if context_state is not None: if previous_node_id is None: context_state.pop("node_id", None) else: context_state["node_id"] = previous_node_id return messages, events def _build_function_call_output_event( self, tool_call: ToolCallPayload, result: Any, ) -> FunctionCallOutputEvent: call_id = tool_call.id or tool_call.function_name or "tool_call" blocks = self._coerce_tool_result_to_blocks(result) if blocks: return FunctionCallOutputEvent( call_id=call_id, function_name=tool_call.function_name, output_blocks=blocks, ) return FunctionCallOutputEvent( call_id=call_id, function_name=tool_call.function_name, output_text=self._stringify_tool_result(result), ) def _stringify_tool_result(self, result: Any) -> str: if isinstance(result, Message): return result.text_content() if isinstance(result, list) and all(isinstance(item, MessageBlock) for item in result): parts = [block.describe() for block in result if block.describe()] return "\n".join(parts) if isinstance(result, (dict, list)): try: return json.dumps(result, ensure_ascii=False) except Exception: return str(result) return str(result) def _serialize_tool_result(self, result: Any) -> Any: """Convert tool outputs into JSON-serializable structures for logging.""" from utils.attachments import AttachmentRecord # local import to avoid cycles if result is None: return None if isinstance(result, Message): return result.to_dict(include_data=False) if isinstance(result, MessageBlock): return result.to_dict(include_data=False) if isinstance(result, AttachmentRecord): return result.to_dict() if isinstance(result, list): return [self._serialize_tool_result(item) for item in result] if isinstance(result, dict): return { str(key): self._serialize_tool_result(value) for key, value in result.items() } if hasattr(result, "to_dict"): try: return self._serialize_tool_result(result.to_dict()) except Exception: return str(result) return result if isinstance(result, (str, int, float, bool)) else str(result) def _build_tool_message( self, result: Any, tool_call: ToolCallPayload, *, node_id: str, tool_name: str, ) -> Message: base_metadata = {"tool_name": tool_name, "source": node_id} if isinstance(result, Message): msg = result.clone() msg = msg.with_role(MessageRole.TOOL) msg.tool_call_id = tool_call.id metadata = dict(base_metadata) metadata.update(msg.metadata) msg.metadata = metadata return msg from utils.attachments import AttachmentRecord # local import if isinstance(result, AttachmentRecord): content = [result.as_message_block()] elif isinstance(result, list) and all(isinstance(item, MessageBlock) for item in result): content = [block.copy() for block in result] else: content = result if isinstance(result, dict): content = json.dumps(self._serialize_tool_result(content), ensure_ascii=False, indent=2) elif not isinstance(result, str): content = str(result) return Message( role=MessageRole.TOOL, content=content, tool_call_id=tool_call.id, metadata=base_metadata, ) def _build_initial_timeline(self, conversation: List[Message]) -> List[Any]: return [msg.clone() for msg in conversation] def _finalize_tool_trace( self, message: Message, trace_messages: List[Message], complete: bool, node_id: str, ) -> Message: final_message = self._clone_with_source(message, node_id) if trace_messages: metadata = dict(final_message.metadata) metadata["context_trace"] = [item.to_dict() for item in trace_messages] metadata["context_trace_complete"] = complete final_message.metadata = metadata return final_message def _clone_with_source(self, message: Message, node_id: str) -> Message: cloned = message.clone() metadata = dict(cloned.metadata) metadata.setdefault("source", node_id) cloned.metadata = metadata return cloned def _coerce_tool_result_to_blocks(self, result: Any) -> List[MessageBlock]: """Convert supported tool outputs into MessageBlock sequences.""" if result is None: return [] if isinstance(result, Message): return [block.copy() for block in result.blocks()] if isinstance(result, MessageBlock): return [result.copy()] from utils.attachments import AttachmentRecord # local import to avoid cycles if isinstance(result, AttachmentRecord): return [result.as_message_block()] if isinstance(result, Sequence) and not isinstance(result, (str, bytes, bytearray)): blocks: List[MessageBlock] = [] for item in result: blocks.extend(self._coerce_tool_result_to_blocks(item)) return blocks return [] def _parse_tool_call_arguments(self, raw_arguments: Any) -> Dict[str, Any]: if isinstance(raw_arguments, dict): return raw_arguments if not raw_arguments: return {} if isinstance(raw_arguments, str): try: parsed = json.loads(raw_arguments) except json.JSONDecodeError: return {} return parsed if isinstance(parsed, dict) else {} return {} def _get_tool_loop_limit(self, node: Node) -> int: default_limit = 50 model = node.as_config(AgentConfig) if not model or not model.params: return default_limit custom_limit = model.params.get("tool_loop_limit") if isinstance(custom_limit, int) and custom_limit > 0: return custom_limit return default_limit def _persist_message_attachments(self, message: Message, node_id: str) -> None: """Register attachments produced by model outputs to the attachment store.""" store = self.context.global_state.get("attachment_store") if store is None: return for block in message.blocks(): attachment = block.attachment if not attachment: continue try: self._persist_single_attachment(store, block, node_id) except Exception as exc: raise RuntimeError(f"Failed to persist attachment '{attachment.name or attachment.attachment_id}': {exc}") from exc def _persist_single_attachment(self, store: Any, block: MessageBlock, node_id: str) -> None: attachment = block.attachment if attachment is None: return if attachment.remote_file_id and not attachment.data_uri and not attachment.local_path: record = store.register_remote_file( remote_file_id=attachment.remote_file_id, name=attachment.name or attachment.attachment_id or "remote_file", mime_type=attachment.mime_type, size=attachment.size, kind=block.type, attachment_id=attachment.attachment_id, ) block.attachment = record.ref return workspace_root = self.context.global_state.get("python_workspace_root") if workspace_root is None or not node_id: raise RuntimeError("Workspace or node context missing for attachment persistence") target_dir = workspace_root / "generated" / node_id target_dir.mkdir(parents=True, exist_ok=True) inferred_mime = attachment.mime_type or self._guess_mime_from_data_uri(attachment.data_uri) attachment.mime_type = inferred_mime data_bytes = self._decode_data_uri(attachment.data_uri) if attachment.data_uri else None target_path = None if data_bytes is None and attachment.local_path: target_path = target_dir / (attachment.name or self._make_generated_filename(attachment)) import shutil shutil.copy2(attachment.local_path, target_path) elif data_bytes is not None: target_path = target_dir / (attachment.name or self._make_generated_filename(attachment)) with open(target_path, "wb") as handle: handle.write(data_bytes) else: raise ValueError("Attachment missing data for persistence") record = store.register_file( target_path, kind=block.type, display_name=attachment.name or target_path.name, mime_type=attachment.mime_type, attachment_id=attachment.attachment_id, copy_file=False, persist=True, ) block.attachment = record.ref def _decode_data_uri(self, data_uri: Optional[str]) -> Optional[bytes]: if not data_uri: return None if not data_uri.startswith("data:"): return None header, _, payload = data_uri.partition(",") if not _: return None if ";base64" in header: return base64.b64decode(payload) return payload.encode("utf-8") def _make_generated_filename(self, attachment: AttachmentRef) -> str: """Generate a filename based on mime type or attachment id.""" name = attachment.name if name: return name mime = attachment.mime_type or "" ext = "" if "/" in mime: subtype = mime.split("/", 1)[1] if subtype: ext = f".{subtype.split('+')[0]}" if not ext: ext = ".bin" return f"{attachment.attachment_id or 'generated'}{ext}" def _guess_mime_from_data_uri(self, data_uri: Optional[str]) -> Optional[str]: if not data_uri or not data_uri.startswith("data:"): return None header = data_uri.split(",", 1)[0] if ":" in header: header = header.split(":", 1)[1] return header.split(";")[0] if ";" in header else header def _apply_post_generation_thinking( self, node: Node, conversation: List[Message], query_snapshot: MemoryContentSnapshot, input_payload: ThinkingPayload, gen_payload: ThinkingPayload, stage: AgentExecFlowStage, agent_invoker: Callable[[List[Message]], Message], input_mode: AgentInputMode, ) -> Message | str: """Apply post-generation thinking.""" self._ensure_not_cancelled() thinking_manager = self.context.get_thinking_manager(node.id) if not thinking_manager: return gen_payload.raw if gen_payload.raw is not None else gen_payload.text model = node.as_config(AgentConfig) with self.log_manager.thinking_timer(node.id, stage.value): retrieved_memory = self._retrieve_memory(node, query_snapshot, stage) memory_payload = self._memory_result_to_thinking_payload(retrieved_memory) result = thinking_manager.think( agent_invoker=agent_invoker, input_payload=input_payload, agent_role=node.role or "", memory=memory_payload, gen_payload=gen_payload, ) mode_value = model.thinking.type if model and model.thinking else "unknown" self.log_manager.record_thinking_process( node.id, mode_value, result if isinstance(result, str) else "[message]", stage.value, {"has_memory": bool(retrieved_memory and retrieved_memory.items)}, ) if input_mode is AgentInputMode.MESSAGES: if isinstance(result, Message): self._persist_message_attachments(result, node.id) self._reset_conversation_with_user_result(conversation, result, node_id=node.id) else: self._reset_conversation_with_user_result(conversation, result, node_id=node.id) return result def _coerce_inputs_to_messages(self, inputs: List[Message]) -> List[Message]: return [message.clone() for message in inputs if isinstance(message, Message)] def _append_user_message(self, conversation: List[Message], content: str, *, node_id: str) -> None: conversation.append( Message(role=MessageRole.USER, content=content, metadata={"source": node_id}) ) def _insert_memory_message(self, conversation: List[Message], content: str, *, node_id: str) -> None: last_user_idx = self._find_last_user_index(conversation) insert_idx = last_user_idx if last_user_idx is not None else len(conversation) conversation.insert( insert_idx, Message(role=MessageRole.USER, content=content, metadata={"source": node_id}), ) def _find_last_user_index(self, conversation: List[Message]) -> Optional[int]: for idx in range(len(conversation) - 1, -1, -1): if conversation[idx].role is MessageRole.USER: return idx return None def _reset_conversation_with_user_result(self, conversation: List[Message], content: Message | str, *, node_id: str) -> None: system_messages = [msg.clone() for msg in conversation if msg.role is MessageRole.SYSTEM] conversation.clear() conversation.extend(system_messages) if isinstance(content, Message): conversation.append(self._clone_with_source(content.with_role(MessageRole.USER), node_id)) else: conversation.append( Message(role=MessageRole.USER, content=content, metadata={"source": node_id}) ) def _update_memory(self, node: Node, input_data: str, inputs: List[Message], result: Message | str) -> None: """Update the memory store with the latest conversation.""" memory_manager = self.context.get_memory_manager(node.id) if not memory_manager: return stage = AgentExecFlowStage.FINISHED_STAGE input_snapshot = MemoryContentSnapshot.from_messages(inputs) output_snapshot = MemoryContentSnapshot.from_message(result) payload = MemoryWritePayload( agent_role=node.role if node.role else "", inputs_text=input_data, input_snapshot=input_snapshot, output_snapshot=output_snapshot, ) with self.log_manager.memory_timer(node.id, "UPDATE", stage.value): memory_manager.update(payload) # Record the memory update normalized_result = result.text_content() if isinstance(result, Message) else str(result) self.log_manager.record_memory_operation( node.id, "UPDATE", stage.value, normalized_result, { "stage": stage.value, "input_size": len(str(input_data)), "output_size": len(normalized_result), "attachment_count": len(output_snapshot.attachment_overview()) if output_snapshot else 0, } ) def _build_thinking_payload_from_inputs(self, inputs: List[Message], input_text: str) -> ThinkingPayload: blocks: List[MessageBlock] = [] for message in inputs: blocks.extend(message.blocks()) return ThinkingPayload( text=input_text, blocks=blocks, metadata={"source": "inputs"}, raw=input_text, ) def _build_memory_query_snapshot( self, inputs: List[Message], input_text: str, ) -> MemoryContentSnapshot: base_snapshot = MemoryContentSnapshot.from_messages(inputs) blocks = list(base_snapshot.blocks) if base_snapshot else [] return MemoryContentSnapshot(text=input_text, blocks=blocks) def _build_thinking_payload_from_message(self, message: Message | str | None, *, source: str) -> ThinkingPayload: if isinstance(message, Message): return ThinkingPayload( text=message.text_content(), blocks=message.blocks(), metadata={"source": source}, raw=message, ) text = "" if message is None else str(message) return ThinkingPayload(text=text, blocks=[], metadata={"source": source}, raw=text) def _memory_result_to_thinking_payload( self, result: MemoryRetrievalResult | None, ) -> ThinkingPayload | None: if not result: return None blocks: List[MessageBlock] = [] for item in result.items: if item.output_snapshot: blocks.extend(item.output_snapshot.to_message_blocks()) if item.input_snapshot: blocks.extend(item.input_snapshot.to_message_blocks()) metadata = { "source": "memory", "has_multimodal": result.has_multimodal(), "attachment_count": len(result.attachment_overview()), } return ThinkingPayload(text=result.formatted_text, blocks=blocks, metadata=metadata)
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/executor/agent_executor.py", "license": "Apache License 2.0", "lines": 961, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/node/executor/base.py
"""Abstract base classes for node executors. Defines the interfaces that every node executor must implement. """ from abc import ABC, abstractmethod from dataclasses import dataclass, field from typing import Any, Dict, Optional, List from entity.configs import Node from entity.messages import Message, MessageContent, MessageRole, serialize_messages from runtime.node.agent import MemoryManager from runtime.node.agent import ThinkingManagerBase from runtime.node.agent import ToolManager from utils.function_manager import FunctionManager from utils.human_prompt import HumanPromptService from utils.log_manager import LogManager from utils.token_tracker import TokenTracker from utils.exceptions import WorkflowCancelledError @dataclass class ExecutionContext: """Node execution context that bundles every service and state the executor needs. Attributes: tool_manager: Tool manager shared by executors function_manager: Function manager registry log_manager: Structured log manager memory_managers: Mapping of node_id to ``MemoryManager`` instances thinking_managers: Mapping of node_id to ``ThinkingManagerBase`` instances token_tracker: Token tracker used for accounting global_state: Shared global state dictionary """ tool_manager: ToolManager function_manager: FunctionManager log_manager: LogManager memory_managers: Dict[str, MemoryManager] = field(default_factory=dict) thinking_managers: Dict[str, ThinkingManagerBase] = field(default_factory=dict) token_tracker: Optional[TokenTracker] = None global_state: Dict[str, Any] = field(default_factory=dict) workspace_hook: Optional[Any] = None human_prompt_service: Optional[HumanPromptService] = None cancel_event: Optional[Any] = None def get_memory_manager(self, node_id: str) -> Optional[MemoryManager]: """Return the memory manager for a given node.""" return self.memory_managers.get(node_id) def get_thinking_manager(self, node_id: str) -> Optional[ThinkingManagerBase]: """Return the thinking manager for a given node.""" return self.thinking_managers.get(node_id) def get_token_tracker(self) -> Optional[TokenTracker]: """Return the configured token tracker.""" return self.token_tracker def get_human_prompt_service(self) -> Optional[HumanPromptService]: """Return the interactive human prompt service.""" return self.human_prompt_service class NodeExecutor(ABC): """Abstract base class for node executors. Every concrete executor must inherit from this class and implement ``execute``. """ def __init__(self, context: ExecutionContext): """Initialize the executor with the shared execution context. Args: context: Execution context """ self.context = context @abstractmethod def execute(self, node: Node, inputs: List[Message]) -> List[Message]: """Execute the node logic. Args: node: Node definition to execute inputs: Input queue for the node Returns: List of payload messages produced by the node. Empty list when the node intentionally suppresses downstream propagation. Standard nodes return a single-element list. Raises: Exception: Raised when execution fails """ pass @property def tool_manager(self) -> ToolManager: """Return the shared tool manager.""" return self.context.tool_manager @property def function_manager(self) -> FunctionManager: """Return the shared function manager.""" return self.context.function_manager @property def log_manager(self) -> LogManager: """Return the structured log manager.""" return self.context.log_manager def _inputs_to_text(self, inputs: List[Message]) -> str: if not inputs: return "" parts: list[str] = [] for message in inputs: source = message.metadata.get("source", "UNKNOWN") parts.append( f"=== INPUT FROM {source} ({message.role.value}) ===\n\n{message.text_content()}" ) return "\n\n".join(parts) def _inputs_to_message_json(self, inputs: List[Message]) -> str | None: if not inputs: return None return serialize_messages(inputs) def _build_message( self, role: MessageRole, content: MessageContent, *, source: str | None = None, metadata: Dict[str, Any] | None = None, preserve_role: bool = False, ) -> Message: meta = dict(metadata or {}) if source: meta.setdefault("source", source) return Message(role=role, content=content, metadata=meta, preserve_role=preserve_role) def _clone_messages(self, messages: List[Message]) -> List[Message]: return [message.clone() for message in messages] def _ensure_not_cancelled(self) -> None: event = getattr(self.context, "cancel_event", None) if event is not None and event.is_set(): raise WorkflowCancelledError("Workflow execution cancelled")
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/executor/base.py", "license": "Apache License 2.0", "lines": 119, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/node/executor/factory.py
"""Factory helpers for node executors. Create and manage executors for different node types. """ from typing import Dict from runtime.node.executor.base import NodeExecutor, ExecutionContext from runtime.node.registry import iter_node_registrations class NodeExecutorFactory: """Factory class that instantiates executors for every node type.""" @staticmethod def create_executors(context: ExecutionContext, subgraphs: dict = None) -> Dict[str, NodeExecutor]: """Create executors for every registered node type. Args: context: Shared execution context subgraphs: Mapping of subgraph nodes (used by Subgraph executors) Returns: Mapping from node type to executor instance """ subgraphs = subgraphs or {} executors: Dict[str, NodeExecutor] = {} for name, registration in iter_node_registrations().items(): executors[name] = registration.build_executor(context, subgraphs=subgraphs) return executors @staticmethod def create_executor( node_type: str, context: ExecutionContext, subgraphs: dict = None ) -> NodeExecutor: """Create an executor for the requested node type. Args: node_type: Registered node type name context: Shared execution context subgraphs: Mapping of subgraph nodes (used by Subgraph executors) Returns: Executor instance for the requested type Raises: ValueError: If the node type is not supported """ subgraphs = subgraphs or {} registrations = iter_node_registrations() if node_type not in registrations: raise ValueError(f"Unsupported node type: {node_type}") return registrations[node_type].build_executor(context, subgraphs=subgraphs)
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/executor/factory.py", "license": "Apache License 2.0", "lines": 43, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
OpenBMB/ChatDev:runtime/node/executor/human_executor.py
"""Executor for Human nodes. Runs the human-in-the-loop interaction nodes. """ from typing import List from entity.configs import Node from entity.configs.node.human import HumanConfig from entity.messages import Message, MessageRole from runtime.node.executor.base import NodeExecutor class HumanNodeExecutor(NodeExecutor): """Executor used for human interaction nodes.""" def execute(self, node: Node, inputs: List[Message]) -> List[Message]: """Execute a human node. Args: node: Human node definition inputs: Input messages Returns: Result supplied by the human reviewer """ self._ensure_not_cancelled() if node.node_type != "human": raise ValueError(f"Node {node.id} is not a human node") human_config = node.as_config(HumanConfig) if not human_config: raise ValueError(f"Node {node.id} has no human configuration") human_task_description = human_config.description # Use prompt-style preview so humans see the same flattened text format # instead of raw message JSON. input_data = self._inputs_to_text(inputs) prompt_service = self.context.get_human_prompt_service() if prompt_service is None: raise RuntimeError("HumanPromptService is not configured; cannot execute human node") prompt_result = prompt_service.request( node.id, human_task_description or "", inputs=input_data, metadata={"node_type": "human"}, ) return [self._build_message( MessageRole.USER, prompt_result.as_message_content(), source=node.id, )]
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/executor/human_executor.py", "license": "Apache License 2.0", "lines": 42, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/node/executor/literal_executor.py
"""Literal node executor.""" from typing import List from entity.configs import Node from entity.configs.node.literal import LiteralNodeConfig from entity.messages import Message from runtime.node.executor.base import NodeExecutor class LiteralNodeExecutor(NodeExecutor): """Emit the configured literal message whenever triggered.""" def execute(self, node: Node, inputs: List[Message]) -> List[Message]: if node.node_type != "literal": raise ValueError(f"Node {node.id} is not a literal node") config = node.as_config(LiteralNodeConfig) if config is None: raise ValueError(f"Node {node.id} missing literal configuration") self._ensure_not_cancelled() return [self._build_message( role=config.role, content=config.content, source=node.id, preserve_role=True, )]
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/executor/literal_executor.py", "license": "Apache License 2.0", "lines": 21, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/node/executor/loop_counter_executor.py
"""Loop counter guard node executor.""" from typing import List, Dict, Any from entity.configs import Node from entity.configs.node.loop_counter import LoopCounterConfig from entity.messages import Message, MessageRole from runtime.node.executor.base import NodeExecutor class LoopCounterNodeExecutor(NodeExecutor): """Track loop iterations and emit output only after hitting the limit.""" STATE_KEY = "loop_counter" def execute(self, node: Node, inputs: List[Message]) -> List[Message]: config = node.as_config(LoopCounterConfig) if config is None: raise ValueError(f"Node {node.id} missing loop_counter configuration") state = self._get_state() counter = state.setdefault(node.id, {"count": 0}) counter["count"] += 1 count = counter["count"] if count < config.max_iterations: self.log_manager.debug( f"LoopCounter {node.id}: iteration {count}/{config.max_iterations} (suppress downstream)" ) return [] if config.reset_on_emit: counter["count"] = 0 content = config.message or f"Loop limit reached ({config.max_iterations})" metadata = { "loop_counter": { "count": count, "max": config.max_iterations, "reset_on_emit": config.reset_on_emit, } } self.log_manager.debug( f"LoopCounter {node.id}: iteration {count}/{config.max_iterations} reached limit, releasing output" ) return [Message( role=MessageRole.ASSISTANT, content=content, metadata=metadata, )] def _get_state(self) -> Dict[str, Dict[str, Any]]: return self.context.global_state.setdefault(self.STATE_KEY, {})
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/executor/loop_counter_executor.py", "license": "Apache License 2.0", "lines": 42, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/node/executor/passthrough_executor.py
"""Passthrough node executor.""" from typing import List from entity.configs import Node from entity.configs.node.passthrough import PassthroughConfig from entity.messages import Message, MessageRole from runtime.node.executor.base import NodeExecutor class PassthroughNodeExecutor(NodeExecutor): """Forward input messages without modifications.""" def execute(self, node: Node, inputs: List[Message]) -> List[Message]: if node.node_type != "passthrough": raise ValueError(f"Node {node.id} is not a passthrough node") config = node.as_config(PassthroughConfig) if config is None: raise ValueError(f"Node {node.id} missing passthrough configuration") if not inputs: warning_msg = f"Passthrough node '{node.id}' triggered without inputs" self.log_manager.warning(warning_msg, node_id=node.id, details={"input_count": 0}) return [Message(content="", role=MessageRole.USER)] if config.only_last_message: if len(inputs) > 1: self.log_manager.debug( f"Passthrough node '{node.id}' received {len(inputs)} inputs; forwarding the latest entry", node_id=node.id, details={"input_count": len(inputs)}, ) return [inputs[-1].clone()] else: return [msg.clone() for msg in inputs]
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/executor/passthrough_executor.py", "license": "Apache License 2.0", "lines": 28, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/node/executor/python_executor.py
"""Executor for Python code runner nodes.""" import os import re import subprocess import textwrap from dataclasses import dataclass from pathlib import Path from typing import List from entity.configs import Node from entity.configs.node.python_runner import PythonRunnerConfig from entity.messages import Message, MessageRole from runtime.node.executor.base import NodeExecutor _CODE_BLOCK_RE = re.compile(r"```(?P<lang>[a-zA-Z0-9_+-]*)?\s*\n(?P<code>.*?)```", re.DOTALL) @dataclass class _ExecutionResult: success: bool stdout: str stderr: str exit_code: int | None error: str | None = None class PythonNodeExecutor(NodeExecutor): """Execute inline Python code passed to the node.""" WORKSPACE_KEY = "python_workspace_root" COUNTER_KEY = "python_node_run_counters" def execute(self, node: Node, inputs: List[Message]) -> List[Message]: if node.node_type != "python": raise ValueError(f"Node {node.id} is not a python node") workspace = self._ensure_workspace_root() last_message = inputs[-1] if inputs else None code_payload = self._extract_code(last_message) if not code_payload: return [self._build_failure_message( node, workspace, error_text="No executable code segment found", )] script_path = self._write_script_file(node, workspace, code_payload) config = node.as_config(PythonRunnerConfig) if not config: raise ValueError(f"Node {node.id} missing PythonRunnerConfig") result = self._run_process(config, script_path, workspace, node) metadata = { "workspace": str(workspace), "script_path": str(script_path), } if result.success: if result.stderr: self.log_manager.debug( f"Python node {node.id} stderr", node_id=node.id, details={"stderr": result.stderr} ) return [self._build_message( role=MessageRole.ASSISTANT, content=result.stdout, source=node.id, metadata=metadata, )] error_text = result.error or "Script execution failed" return [self._build_failure_message( node, workspace, error_text=error_text, exit_code=result.exit_code, stderr=result.stderr, script_path=script_path, )] def _ensure_workspace_root(self) -> Path: root = self.context.global_state.setdefault(self.WORKSPACE_KEY, None) if root is None: graph_dir = self.context.global_state.get("graph_directory") if not graph_dir: raise RuntimeError("graph_directory missing from execution context") root = (Path(graph_dir) / "code_workspace").resolve() root.mkdir(parents=True, exist_ok=True) self.context.global_state[self.WORKSPACE_KEY] = str(root) else: root = Path(root).resolve() root.mkdir(parents=True, exist_ok=True) return root def _extract_code(self, message: Message | None) -> str: if not message: return "" raw = message.text_content() if not raw or not raw.strip(): return "" match = _CODE_BLOCK_RE.search(raw) code = match.group("code") if match else raw return textwrap.dedent(code).strip() def _write_script_file(self, node: Node, workspace: Path, code: str) -> Path: counters = self.context.global_state.setdefault(self.COUNTER_KEY, {}) safe_node_id = re.sub(r"[^0-9A-Za-z_\-]", "_", node.id) run_count = counters.get(node.id, 0) + 1 counters[node.id] = run_count suffix = f"_run-{run_count}" if run_count > 1 else "" filename = f"{safe_node_id}{suffix}.py" path = (workspace / filename).resolve() path.write_text(code + ("\n" if not code.endswith("\n") else ""), encoding="utf-8") return path def _run_process( self, config: PythonRunnerConfig, script_path: Path, workspace: Path, node: Node, ) -> _ExecutionResult: cmd = [config.interpreter] if config.args: cmd.extend(config.args) cmd.append(str(script_path)) env = os.environ.copy() env.update(config.env or {}) env.update( { "MAC_CODE_WORKSPACE": str(workspace), "MAC_CODE_SCRIPT": str(script_path), "MAC_NODE_ID": node.id, } ) try: completed = subprocess.run( cmd, cwd=str(workspace), capture_output=True, check=False, timeout=config.timeout_seconds, ) except subprocess.TimeoutExpired as exc: return _ExecutionResult( success=False, stdout="", stderr=exc.stdout.decode(config.encoding, errors="replace") if exc.stdout else "", exit_code=None, error=f"Script did not finish within {config.timeout_seconds}s", ) except FileNotFoundError: return _ExecutionResult( success=False, stdout="", stderr="", exit_code=None, error=f"Interpreter {config.interpreter} not found", ) stdout = completed.stdout.decode(config.encoding, errors="replace") stderr = completed.stderr.decode(config.encoding, errors="replace") return _ExecutionResult( success=completed.returncode == 0, stdout=stdout, stderr=stderr, exit_code=completed.returncode, ) def _build_failure_message( self, node: Node, workspace: Path, *, error_text: str, exit_code: int | None = None, stderr: str | None = None, script_path: Path | None = None, ) -> Message: metadata = { "workspace": str(workspace), } if script_path: metadata["script_path"] = str(script_path) if exit_code is not None: metadata["exit_code"] = exit_code if stderr: metadata["stderr"] = stderr content_lines = ["==CODE EXECUTION FAILED==", error_text] if exit_code is not None: content_lines.append(f"exit_code={exit_code}") if stderr: content_lines.append(f"stderr:\n{stderr}") return self._build_message( role=MessageRole.ASSISTANT, content="\n".join(content_lines), source=node.id, metadata=metadata, ) # workspace hook handled via ExecutionContext.workspace_hook
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/executor/python_executor.py", "license": "Apache License 2.0", "lines": 180, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/node/executor/subgraph_executor.py
"""Executor for subgraph nodes. Runs nested graph nodes inside the parent workflow. """ from typing import List import copy from entity.configs import Node from entity.configs.node.subgraph import SubgraphConfig from runtime.node.executor.base import NodeExecutor from entity.messages import Message, MessageRole class SubgraphNodeExecutor(NodeExecutor): """Subgraph node executor. Note: this executor needs access to ``GraphContext.subgraphs``. """ def __init__(self, context, subgraphs: dict): """Initialize the executor. Args: context: Execution context subgraphs: Mapping from node_id to ``GraphContext`` """ super().__init__(context) self.subgraphs = subgraphs def execute(self, node: Node, inputs: List[Message]) -> List[Message]: """Execute a subgraph node. Args: node: Subgraph node definition inputs: Input messages list Returns: Result produced by the subgraph """ if node.node_type != "subgraph": raise ValueError(f"Node {node.id} is not a subgraph node") subgraph_config = node.as_config(SubgraphConfig) if not subgraph_config: raise ValueError(f"Node {node.id} has no subgraph configuration") task_payload: List[Message] = self._clone_messages(inputs) if not task_payload: task_payload = [self._build_message(MessageRole.USER, "", source="SUBGRAPH")] input_data = self._inputs_to_text(task_payload) self.log_manager.debug( f"Subgraph processing for node {node.id}", node_id=node.id, details={ "input_size": len(str(input_data)), "input_result": input_data } ) # Retrieve the subgraph context if node.id not in self.subgraphs: raise ValueError(f"Subgraph for node {node.id} not found") subgraph = self.subgraphs[node.id] # Deep copy the subgraph to ensure isolation during parallel execution # process. Nodes in the subgraph (e.g. Start) hold state (inputs/outputs) # that must not be shared across threads. subgraph = copy.deepcopy(subgraph) # Execute the subgraph (requires importing ``GraphExecutor``) from workflow.graph import GraphExecutor executor = GraphExecutor.execute_graph(subgraph, task_prompt=task_payload) result_messages = executor.get_final_output_messages() final_results = [] if not result_messages: # Fallback for no output fallback = self._build_message( MessageRole.ASSISTANT, "", source=node.id, ) final_results.append(fallback) else: for msg in result_messages: result_message = msg.clone() meta = dict(result_message.metadata) meta.setdefault("source", node.id) result_message.metadata = meta final_results.append(result_message) self.log_manager.debug( f"Subgraph processing completed for node {node.id}", node_id=node.id, details=executor.log_manager.logs_to_dict() ) return final_results
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/executor/subgraph_executor.py", "license": "Apache License 2.0", "lines": 80, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/node/registry.py
"""Registry helpers for pluggable workflow node types.""" from dataclasses import dataclass, field from typing import Any, Callable, Dict, Type from schema_registry import register_node_schema from utils.registry import Registry, RegistryEntry, RegistryError node_registry = Registry("node_type") _BUILTINS_LOADED = False def _ensure_builtins_loaded() -> None: global _BUILTINS_LOADED if not _BUILTINS_LOADED: from importlib import import_module import_module("runtime.node.builtin_nodes") _BUILTINS_LOADED = True @dataclass(slots=True) class NodeCapabilities: default_role_field: str | None = None exposes_tools: bool = False resource_key: str | None = None resource_limit: int | None = None @dataclass(slots=True) class NodeRegistration: name: str config_cls: Type[Any] executor_cls: Type[Any] capabilities: NodeCapabilities = field(default_factory=NodeCapabilities) executor_factory: Callable[..., Any] | None = None summary: str | None = None def build_executor(self, context: Any, *, subgraphs: Dict[str, Any] | None = None) -> Any: if self.executor_factory: return self.executor_factory(context, subgraphs=subgraphs) return self.executor_cls(context) def register_node_type( name: str, *, config_cls: Type[Any], executor_cls: Type[Any], capabilities: NodeCapabilities | None = None, executor_factory: Callable[..., Any] | None = None, summary: str | None = None, ) -> None: if name in node_registry.names(): raise RegistryError(f"Node type '{name}' already registered") entry = NodeRegistration( name=name, config_cls=config_cls, executor_cls=executor_cls, capabilities=capabilities or NodeCapabilities(), executor_factory=executor_factory, summary=summary, ) node_registry.register(name, target=entry) register_node_schema(name, config_cls=config_cls, summary=summary) def get_node_registration(name: str) -> NodeRegistration: _ensure_builtins_loaded() entry: RegistryEntry = node_registry.get(name) registration = entry.load() if not isinstance(registration, NodeRegistration): raise RegistryError(f"Registry entry '{name}' is not a NodeRegistration") return registration def iter_node_registrations() -> Dict[str, NodeRegistration]: _ensure_builtins_loaded() return {name: entry.load() for name, entry in node_registry.items()}
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/registry.py", "license": "Apache License 2.0", "lines": 62, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:runtime/node/splitter.py
"""Split strategies for dynamic node execution. Provides different methods to split input messages into execution units. """ import json import re from abc import ABC, abstractmethod from typing import List, Any, Optional from entity.configs.dynamic_base import SplitConfig, RegexSplitConfig, JsonPathSplitConfig from entity.messages import Message, MessageRole class Splitter(ABC): """Abstract base class for input splitters.""" @abstractmethod def split(self, inputs: List[Message]) -> List[List[Message]]: """Split inputs into execution units. Args: inputs: Input messages to split Returns: List of message groups, each group is one execution unit """ pass class MessageSplitter(Splitter): """Split by message - each message becomes one execution unit.""" def split(self, inputs: List[Message]) -> List[List[Message]]: """Each input message becomes a separate unit.""" return [[msg] for msg in inputs] class RegexSplitter(Splitter): """Split by regex pattern matches.""" def __init__( self, pattern: str, *, group: str | int | None = None, case_sensitive: bool = True, multiline: bool = False, dotall: bool = False, on_no_match: str = "pass", ): """Initialize with regex pattern and options. Args: pattern: Regex pattern to match group: Capture group name or index. Defaults to entire match (0). case_sensitive: Whether the regex should be case sensitive. multiline: Enable multiline mode (re.MULTILINE). dotall: Enable dotall mode (re.DOTALL). on_no_match: Behavior when no match is found ('pass' or 'empty'). """ flags = 0 if not case_sensitive: flags |= re.IGNORECASE if multiline: flags |= re.MULTILINE if dotall: flags |= re.DOTALL self.pattern = re.compile(pattern, flags) self.group = group self.on_no_match = on_no_match def split(self, inputs: List[Message]) -> List[List[Message]]: """Split by finding all regex matches across all inputs.""" units: List[List[Message]] = [] for msg in inputs: text = msg.text_content() # Find all matches matches = list(self.pattern.finditer(text)) if not matches: # Handle no match case if self.on_no_match == "pass": units.append([msg]) elif self.on_no_match == "empty": # Return empty content unit_msg = Message( role=msg.role, content="", metadata={**msg.metadata, "split_source": "regex", "split_no_match": True}, ) units.append([unit_msg]) continue for match in matches: # Extract the appropriate group if self.group is not None: try: match_text = match.group(self.group) except (IndexError, re.error): match_text = match.group(0) else: match_text = match.group(0) if match_text is None: match_text = "" unit_msg = Message( role=msg.role, content=match_text, metadata={**msg.metadata, "split_source": "regex"}, ) units.append([unit_msg]) return units if units else [[msg] for msg in inputs] class JsonPathSplitter(Splitter): """Split by JSON array path extraction.""" def __init__(self, json_path: str): """Initialize with JSON path. Args: json_path: Simple dot-notation path to array (e.g., 'items', 'data.results') """ self.json_path = json_path def _extract_array(self, data: Any) -> List[Any]: """Extract array from data using simple dot notation path.""" if not self.json_path: if isinstance(data, list): return data return [data] parts = self.json_path.split(".") current = data for part in parts: if isinstance(current, dict): current = current.get(part) elif isinstance(current, list) and part.isdigit(): idx = int(part) current = current[idx] if idx < len(current) else None else: return [] if current is None: return [] if isinstance(current, list): return current return [current] def split(self, inputs: List[Message]) -> List[List[Message]]: """Split by extracting array items from JSON content.""" units: List[List[Message]] = [] for msg in inputs: text = msg.text_content() # Try to parse as JSON try: data = json.loads(text) items = self._extract_array(data) for item in items: if isinstance(item, (dict, list)): content = json.dumps(item, ensure_ascii=False) else: content = str(item) unit_msg = Message( role=msg.role, content=content, metadata={**msg.metadata, "split_source": "json_path"}, ) units.append([unit_msg]) except json.JSONDecodeError: # If not valid JSON, treat as single unit units.append([msg]) return units if units else [[msg] for msg in inputs] def create_splitter( split_type: str, pattern: Optional[str] = None, json_path: Optional[str] = None, *, group: str | int | None = None, case_sensitive: bool = True, multiline: bool = False, dotall: bool = False, on_no_match: str = "pass", ) -> Splitter: """Factory function to create appropriate splitter. Args: split_type: One of 'message', 'regex', 'json_path' pattern: Regex pattern (required for 'regex' type) json_path: JSON path (required for 'json_path' type) group: Capture group for regex (optional) case_sensitive: Case sensitivity for regex (default True) multiline: Multiline mode for regex (default False) dotall: Dotall mode for regex (default False) on_no_match: Behavior when no regex match ('pass' or 'empty') Returns: Configured Splitter instance Raises: ValueError: If required arguments are missing """ if split_type == "message": return MessageSplitter() elif split_type == "regex": if not pattern: raise ValueError("regex splitter requires 'pattern' argument") return RegexSplitter( pattern, group=group, case_sensitive=case_sensitive, multiline=multiline, dotall=dotall, on_no_match=on_no_match, ) elif split_type == "json_path": if not json_path: raise ValueError("json_path splitter requires 'json_path' argument") return JsonPathSplitter(json_path) else: raise ValueError(f"Unknown split type: {split_type}") def create_splitter_from_config(split_config: "SplitConfig") -> Splitter: """Create a splitter from a SplitConfig object. Args: split_config: The split configuration Returns: Configured Splitter instance """ if split_config.type == "message": return MessageSplitter() elif split_config.type == "regex": regex_config = split_config.as_split_config(RegexSplitConfig) if not regex_config: raise ValueError("Invalid regex split configuration") return RegexSplitter( regex_config.pattern, group=regex_config.group, case_sensitive=regex_config.case_sensitive, multiline=regex_config.multiline, dotall=regex_config.dotall, on_no_match=regex_config.on_no_match, ) elif split_config.type == "json_path": json_config = split_config.as_split_config(JsonPathSplitConfig) if not json_config: raise ValueError("Invalid json_path split configuration") return JsonPathSplitter(json_config.json_path) else: raise ValueError(f"Unknown split type: {split_config.type}") def group_messages(messages: List[Message], group_size: int) -> List[List[Message]]: """Group messages into batches for tree reduction. Args: messages: Messages to group group_size: Target size per group Returns: List of message groups. Last group may have fewer items. """ if not messages: return [] groups: List[List[Message]] = [] for i in range(0, len(messages), group_size): groups.append(messages[i:i + group_size]) return groups
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/node/splitter.py", "license": "Apache License 2.0", "lines": 234, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:runtime/sdk.py
"""SDK helpers for executing workflows from Python code.""" from dataclasses import dataclass from datetime import datetime from pathlib import Path from typing import Any, Dict, Optional, Sequence, Union from check.check import load_config from entity.enums import LogLevel from entity.graph_config import GraphConfig from entity.messages import Message from runtime.bootstrap.schema import ensure_schema_registry_populated from utils.attachments import AttachmentStore from utils.exceptions import ValidationError from server.settings import YAML_DIR from utils.task_input import TaskInputBuilder from workflow.graph import GraphExecutor from workflow.graph_context import GraphContext OUTPUT_ROOT = Path("WareHouse") @dataclass class WorkflowMetaInfo: session_name: str yaml_file: str log_id: Optional[str] outputs: Optional[Dict[str, Any]] token_usage: Optional[Dict[str, Any]] output_dir: Path @dataclass class WorkflowRunResult: final_message: Optional[Message] meta_info: WorkflowMetaInfo def _normalize_session_name(yaml_path: Path, session_name: Optional[str]) -> str: if session_name and session_name.strip(): return session_name.strip() timestamp = datetime.now().strftime("%Y%m%d%H%M%S") return f"sdk_{yaml_path.stem}_{timestamp}" def _build_task_input( graph_context: GraphContext, prompt: str, attachments: Sequence[Union[str, Path]], ) -> Union[str, list[Message]]: if not attachments: return prompt attachments_dir = graph_context.directory / "code_workspace" / "attachments" attachments_dir.mkdir(parents=True, exist_ok=True) store = AttachmentStore(attachments_dir) builder = TaskInputBuilder(store) normalized_paths = [str(Path(path).expanduser()) for path in attachments] return builder.build_from_file_paths(prompt, normalized_paths) def _resolve_yaml_path(yaml_file: Union[str, Path]) -> Path: candidate = Path(yaml_file).expanduser() if candidate.is_absolute(): return candidate if candidate.exists(): return candidate repo_root = Path(__file__).resolve().parents[1] yaml_root = YAML_DIR if YAML_DIR.is_absolute() else (repo_root / YAML_DIR) return (yaml_root / candidate).expanduser() def run_workflow( yaml_file: Union[str, Path], *, task_prompt: str, attachments: Optional[Sequence[Union[str, Path]]] = None, session_name: Optional[str] = None, fn_module: Optional[str] = None, variables: Optional[Dict[str, Any]] = None, log_level: Optional[Union[LogLevel, str]] = None, ) -> WorkflowRunResult: """Run a workflow YAML and return the end-node message plus metadata.""" ensure_schema_registry_populated() yaml_path = _resolve_yaml_path(yaml_file) if not yaml_path.exists(): raise FileNotFoundError(f"YAML file not found: {yaml_path}") attachments = attachments or [] if (not task_prompt or not task_prompt.strip()) and not attachments: raise ValidationError( "Task prompt cannot be empty", details={"task_prompt_provided": bool(task_prompt)}, ) design = load_config(yaml_path, fn_module=fn_module, vars_override=variables) normalized_session = _normalize_session_name(yaml_path, session_name) graph_config = GraphConfig.from_definition( design.graph, name=normalized_session, output_root=OUTPUT_ROOT, source_path=str(yaml_path), vars=design.vars, ) if log_level: resolved_level = LogLevel(log_level) if isinstance(log_level, str) else log_level graph_config.log_level = resolved_level graph_config.definition.log_level = resolved_level graph_context = GraphContext(config=graph_config) task_input = _build_task_input(graph_context, task_prompt, attachments) executor = GraphExecutor.execute_graph(graph_context, task_input) final_message = executor.get_final_output_message() logger = executor.log_manager.get_logger() if executor.log_manager else None log_id = logger.workflow_id if logger else None token_usage = executor.token_tracker.get_token_usage() if executor.token_tracker else None meta_info = WorkflowMetaInfo( session_name=normalized_session, yaml_file=str(yaml_path), log_id=log_id, outputs=executor.outputs, token_usage=token_usage, output_dir=graph_context.directory, ) return WorkflowRunResult(final_message=final_message, meta_info=meta_info)
{ "repo_id": "OpenBMB/ChatDev", "file_path": "runtime/sdk.py", "license": "Apache License 2.0", "lines": 106, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:schema_registry/registry.py
"""Schema registries for entity-layer configuration classes.""" from dataclasses import dataclass, field from typing import Any, Dict, Mapping, MutableMapping, Type from entity.configs.base import BaseConfig class SchemaLookupError(RuntimeError): """Raised when a requested schema spec is not registered.""" class SchemaRegistrationError(RuntimeError): """Raised when schema registration is inconsistent.""" @dataclass class NodeSchemaSpec: name: str config_cls: Type["BaseConfig"] summary: str | None = None metadata: Dict[str, Any] = field(default_factory=dict) @dataclass class EdgeConditionSchemaSpec: name: str config_cls: Type["BaseConfig"] summary: str | None = None metadata: Dict[str, Any] = field(default_factory=dict) @dataclass class EdgeProcessorSchemaSpec: name: str config_cls: Type["BaseConfig"] summary: str | None = None metadata: Dict[str, Any] = field(default_factory=dict) @dataclass class MemoryStoreSchemaSpec: name: str config_cls: Type["BaseConfig"] summary: str | None = None metadata: Dict[str, Any] = field(default_factory=dict) @dataclass class ThinkingSchemaSpec: name: str config_cls: Type["BaseConfig"] summary: str | None = None metadata: Dict[str, Any] = field(default_factory=dict) @dataclass class ModelProviderSchemaSpec: name: str label: str | None = None summary: str | None = None metadata: Dict[str, Any] = field(default_factory=dict) _node_schemas: Dict[str, NodeSchemaSpec] = {} _edge_condition_schemas: Dict[str, EdgeConditionSchemaSpec] = {} _edge_processor_schemas: Dict[str, EdgeProcessorSchemaSpec] = {} _edge_processor_builtins_loaded = False _memory_store_schemas: Dict[str, MemoryStoreSchemaSpec] = {} _thinking_schemas: Dict[str, ThinkingSchemaSpec] = {} _model_provider_schemas: Dict[str, ModelProviderSchemaSpec] = {} def _update_metadata(target: MutableMapping[str, Any], new_items: Mapping[str, Any] | None) -> None: if not new_items: return target.update({key: value for key, value in new_items.items() if value is not None}) def register_node_schema( name: str, *, config_cls: Type["BaseConfig"], summary: str | None = None, metadata: Mapping[str, Any] | None = None, ) -> NodeSchemaSpec: spec = _node_schemas.get(name) if spec: if spec.config_cls is not config_cls: raise SchemaRegistrationError( f"Node schema '{name}' already registered with a different config class" ) if summary: spec.summary = summary _update_metadata(spec.metadata, metadata) return spec spec = NodeSchemaSpec(name=name, config_cls=config_cls, summary=summary) _update_metadata(spec.metadata, metadata) _node_schemas[name] = spec return spec def iter_node_schemas() -> Dict[str, NodeSchemaSpec]: return dict(_node_schemas) def get_node_schema(name: str) -> NodeSchemaSpec: try: return _node_schemas[name] except KeyError as exc: # pragma: no cover - defensive guard raise SchemaLookupError(f"Node schema '{name}' is not registered") from exc def register_edge_condition_schema( name: str, *, config_cls: Type["BaseConfig"], summary: str | None = None, metadata: Mapping[str, Any] | None = None, ) -> EdgeConditionSchemaSpec: spec = _edge_condition_schemas.get(name) if spec: if spec.config_cls is not config_cls: raise SchemaRegistrationError( f"Edge condition schema '{name}' already registered with a different config class" ) if summary: spec.summary = summary _update_metadata(spec.metadata, metadata) return spec spec = EdgeConditionSchemaSpec(name=name, config_cls=config_cls, summary=summary) _update_metadata(spec.metadata, metadata) _edge_condition_schemas[name] = spec return spec def iter_edge_condition_schemas() -> Dict[str, EdgeConditionSchemaSpec]: return dict(_edge_condition_schemas) def get_edge_condition_schema(name: str) -> EdgeConditionSchemaSpec: try: return _edge_condition_schemas[name] except KeyError as exc: # pragma: no cover raise SchemaLookupError(f"Edge condition schema '{name}' is not registered") from exc def register_edge_processor_schema( name: str, *, config_cls: Type["BaseConfig"], summary: str | None = None, metadata: Mapping[str, Any] | None = None, ) -> EdgeProcessorSchemaSpec: spec = _edge_processor_schemas.get(name) if spec: if spec.config_cls is not config_cls: raise SchemaRegistrationError( f"Edge processor schema '{name}' already registered with a different config class" ) if summary: spec.summary = summary _update_metadata(spec.metadata, metadata) return spec spec = EdgeProcessorSchemaSpec(name=name, config_cls=config_cls, summary=summary) _update_metadata(spec.metadata, metadata) _edge_processor_schemas[name] = spec return spec def iter_edge_processor_schemas() -> Dict[str, EdgeProcessorSchemaSpec]: _ensure_edge_processor_builtins_loaded() return dict(_edge_processor_schemas) def get_edge_processor_schema(name: str) -> EdgeProcessorSchemaSpec: _ensure_edge_processor_builtins_loaded() try: return _edge_processor_schemas[name] except KeyError as exc: # pragma: no cover raise SchemaLookupError(f"Edge processor schema '{name}' is not registered") from exc def register_memory_store_schema( name: str, *, config_cls: Type["BaseConfig"], summary: str | None = None, metadata: Mapping[str, Any] | None = None, ) -> MemoryStoreSchemaSpec: spec = _memory_store_schemas.get(name) if spec: if spec.config_cls is not config_cls: raise SchemaRegistrationError( f"Memory store schema '{name}' already registered with a different config class" ) if summary: spec.summary = summary _update_metadata(spec.metadata, metadata) return spec spec = MemoryStoreSchemaSpec(name=name, config_cls=config_cls, summary=summary) _update_metadata(spec.metadata, metadata) _memory_store_schemas[name] = spec return spec def iter_memory_store_schemas() -> Dict[str, MemoryStoreSchemaSpec]: return dict(_memory_store_schemas) def get_memory_store_schema(name: str) -> MemoryStoreSchemaSpec: try: return _memory_store_schemas[name] except KeyError as exc: # pragma: no cover raise SchemaLookupError(f"Memory store schema '{name}' is not registered") from exc def register_thinking_schema( name: str, *, config_cls: Type["BaseConfig"], summary: str | None = None, metadata: Mapping[str, Any] | None = None, ) -> ThinkingSchemaSpec: spec = _thinking_schemas.get(name) if spec: if spec.config_cls is not config_cls: raise SchemaRegistrationError( f"Thinking schema '{name}' already registered with a different config class" ) if summary: spec.summary = summary _update_metadata(spec.metadata, metadata) return spec spec = ThinkingSchemaSpec(name=name, config_cls=config_cls, summary=summary) _update_metadata(spec.metadata, metadata) _thinking_schemas[name] = spec return spec def iter_thinking_schemas() -> Dict[str, ThinkingSchemaSpec]: return dict(_thinking_schemas) def get_thinking_schema(name: str) -> ThinkingSchemaSpec: try: return _thinking_schemas[name] except KeyError as exc: # pragma: no cover raise SchemaLookupError(f"Thinking schema '{name}' is not registered") from exc def register_model_provider_schema( name: str, *, label: str | None = None, summary: str | None = None, metadata: Mapping[str, Any] | None = None, ) -> ModelProviderSchemaSpec: spec = _model_provider_schemas.get(name) if spec: if label: spec.label = label if summary: spec.summary = summary _update_metadata(spec.metadata, metadata) return spec spec = ModelProviderSchemaSpec(name=name, label=label, summary=summary) _update_metadata(spec.metadata, metadata) _model_provider_schemas[name] = spec return spec def iter_model_provider_schemas() -> Dict[str, ModelProviderSchemaSpec]: return dict(_model_provider_schemas) def get_model_provider_schema(name: str) -> ModelProviderSchemaSpec: try: return _model_provider_schemas[name] except KeyError as exc: # pragma: no cover raise SchemaLookupError(f"Model provider schema '{name}' is not registered") from exc def _ensure_edge_processor_builtins_loaded() -> None: global _edge_processor_builtins_loaded if _edge_processor_builtins_loaded: return try: import runtime.edge.processors.builtin_types # noqa: F401 except Exception: pass _edge_processor_builtins_loaded = True
{ "repo_id": "OpenBMB/ChatDev", "file_path": "schema_registry/registry.py", "license": "Apache License 2.0", "lines": 232, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:server/bootstrap.py
"""Application bootstrap helpers for the FastAPI server.""" from fastapi import FastAPI from server import state from server.config_schema_router import router as config_schema_router from server.routes import ALL_ROUTERS from utils.error_handler import add_exception_handlers from utils.middleware import add_middleware def init_app(app: FastAPI) -> None: """Apply shared middleware, routers, and global state to ``app``.""" add_exception_handlers(app) add_middleware(app) state.init_state() for router in ALL_ROUTERS: app.include_router(router) app.include_router(config_schema_router)
{ "repo_id": "OpenBMB/ChatDev", "file_path": "server/bootstrap.py", "license": "Apache License 2.0", "lines": 15, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:server/config_schema_router.py
"""FastAPI router for dynamic configuration schema endpoints.""" from typing import Any, Dict, List, Mapping import yaml from fastapi import APIRouter, HTTPException from pydantic import BaseModel, Field from entity.config_loader import load_design_from_mapping from entity.configs import ConfigError from utils.schema_exporter import build_schema_response, SchemaResolutionError router = APIRouter(prefix="/api/config", tags=["config-schema"]) class SchemaRequest(BaseModel): breadcrumbs: List[Mapping[str, Any]] | None = Field( default=None, description="Breadcrumb path starting from DesignConfig, e.g. [{\"node\":\"DesignConfig\",\"field\":\"graph\"}]", ) class SchemaValidateRequest(SchemaRequest): document: str = Field(..., description="Full YAML/JSON content") def _resolve_schema(breadcrumbs: List[Mapping[str, Any]] | None) -> Dict[str, Any] | None: if not breadcrumbs: return None try: return build_schema_response(breadcrumbs) except SchemaResolutionError: return None @router.post("/schema") def get_schema(request: SchemaRequest) -> Dict[str, Any]: try: return build_schema_response(request.breadcrumbs) except SchemaResolutionError as exc: raise HTTPException(status_code=422, detail={"message": str(exc)}) from exc @router.post("/schema/validate") def validate_document(request: SchemaValidateRequest) -> Dict[str, Any]: try: parsed = yaml.safe_load(request.document) except yaml.YAMLError as exc: raise HTTPException(status_code=400, detail={"message": "invalid_yaml", "error": str(exc)}) from exc if not isinstance(parsed, Mapping): raise HTTPException(status_code=422, detail={"message": "document_root_not_mapping"}) try: load_design_from_mapping(parsed) except ConfigError as exc: return { "valid": False, "error": str(exc), "path": exc.path, "schema": _resolve_schema(request.breadcrumbs), } return { "valid": True, "schema": _resolve_schema(request.breadcrumbs), } __all__ = ["router"]
{ "repo_id": "OpenBMB/ChatDev", "file_path": "server/config_schema_router.py", "license": "Apache License 2.0", "lines": 51, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:server/models.py
"""Pydantic models shared across server routes.""" from typing import List, Literal, Optional from pydantic import BaseModel, constr class WorkflowRequest(BaseModel): yaml_file: str task_prompt: str session_id: Optional[str] = None attachments: Optional[List[str]] = None log_level: Literal["INFO", "DEBUG"] = "INFO" class WorkflowUploadContentRequest(BaseModel): filename: str content: str class WorkflowUpdateContentRequest(BaseModel): content: str class WorkflowRenameRequest(BaseModel): new_filename: str class WorkflowCopyRequest(BaseModel): new_filename: str class VueGraphContentPayload(BaseModel): filename: constr(strip_whitespace=True, min_length=1, max_length=255) content: str
{ "repo_id": "OpenBMB/ChatDev", "file_path": "server/models.py", "license": "Apache License 2.0", "lines": 21, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:server/routes/artifacts.py
import asyncio from pathlib import Path from typing import List, Optional from fastapi import APIRouter, HTTPException, Query from fastapi.responses import StreamingResponse from server.state import get_websocket_manager from utils.attachments import encode_file_to_data_uri router = APIRouter() MAX_FILE_SIZE = 20 * 1024 * 1024 # 20 MB def _split_csv(value: Optional[str]) -> Optional[List[str]]: if not value: return None parts = [part.strip() for part in value.split(",")] filtered = [part for part in parts if part] return filtered or None def _get_session_and_queue(session_id: str): manager = get_websocket_manager() session = manager.session_store.get_session(session_id) if not session: raise HTTPException(status_code=404, detail="Session not found") queue = session.artifact_queue if queue is None: raise HTTPException(status_code=404, detail="Artifact stream not available") return manager, queue @router.get("/api/sessions/{session_id}/artifact-events") async def poll_artifact_events( session_id: str, wait_seconds: float = Query(25.0, ge=0.0, le=60.0), after: Optional[int] = Query(None, ge=0), include_mime: Optional[str] = Query(None), include_ext: Optional[str] = Query(None), max_size: Optional[int] = Query(None, gt=0), limit: int = Query(25, ge=1, le=100), ): manager, queue = _get_session_and_queue(session_id) include_mime_list = _split_csv(include_mime) include_ext_list = _split_csv(include_ext) events, next_cursor, timed_out = await asyncio.to_thread( queue.wait_for_events, after=after, include_mime=include_mime_list, include_ext=include_ext_list, max_size=max_size, limit=limit, timeout=wait_seconds, ) payload = { "events": [event.to_dict() for event in events], "next_cursor": next_cursor, "timed_out": timed_out, "has_more": queue.last_sequence > (next_cursor or 0), } return payload @router.get("/api/sessions/{session_id}/artifacts/{artifact_id}") async def get_artifact( session_id: str, artifact_id: str, mode: str = Query("meta", pattern="^(meta|stream)$"), download: bool = Query(False), ): manager, _ = _get_session_and_queue(session_id) store = manager.attachment_service.get_attachment_store(session_id) record = store.get(artifact_id) if not record: raise HTTPException(status_code=404, detail="Artifact not found") ref = record.ref if mode == "stream": local_path = ref.local_path if not local_path: raise HTTPException(status_code=404, detail="Artifact content unavailable") path = Path(local_path) if not path.exists(): raise HTTPException(status_code=404, detail="Artifact file missing") media_type = ref.mime_type or "application/octet-stream" disposition = "attachment" if download else "inline" headers = {"Content-Disposition": f'{disposition}; filename="{ref.name}"'} return StreamingResponse(path.open("rb"), media_type=media_type, headers=headers) data_uri = ref.data_uri if not data_uri and ref.local_path and (ref.size or 0) <= MAX_FILE_SIZE: local_path = Path(ref.local_path) if local_path.exists(): data_uri = encode_file_to_data_uri(local_path, ref.mime_type or "application/octet-stream") return { "artifact_id": artifact_id, "name": ref.name, "mime_type": ref.mime_type, "size": ref.size, "sha256": ref.sha256, "data_uri": data_uri, "local_path": ref.local_path, "extra": record.extra, }
{ "repo_id": "OpenBMB/ChatDev", "file_path": "server/routes/artifacts.py", "license": "Apache License 2.0", "lines": 92, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:server/routes/execute.py
import asyncio from fastapi import APIRouter, HTTPException from entity.enums import LogLevel from server.models import WorkflowRequest from server.state import ensure_known_session from utils.exceptions import ValidationError, WorkflowExecutionError from utils.structured_logger import get_server_logger, LogType router = APIRouter() @router.post("/api/workflow/execute") async def execute_workflow(request: WorkflowRequest): try: manager = ensure_known_session(request.session_id, require_connection=True) # log_level = LogLevel(request.log_level) if request.log_level else None log_level = None except ValueError: raise HTTPException(status_code=400, detail="log_level must be either DEBUG or INFO") try: asyncio.create_task( manager.workflow_run_service.start_workflow( request.session_id, request.yaml_file, request.task_prompt, manager, attachments=request.attachments, log_level=log_level, ) ) logger = get_server_logger() logger.info( "Workflow execution started", log_type=LogType.WORKFLOW, session_id=request.session_id, yaml_file=request.yaml_file, task_prompt_length=len(request.task_prompt or ""), ) return { "status": "started", "session_id": request.session_id, "message": "Workflow execution started", } except ValidationError as exc: raise HTTPException(status_code=400, detail=str(exc)) except Exception as exc: logger = get_server_logger() logger.log_exception(exc, "Failed to start workflow execution") raise WorkflowExecutionError(f"Failed to start workflow: {exc}")
{ "repo_id": "OpenBMB/ChatDev", "file_path": "server/routes/execute.py", "license": "Apache License 2.0", "lines": 46, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:server/routes/health.py
from fastapi import APIRouter from utils.structured_logger import get_server_logger, LogType router = APIRouter() @router.get("/health") async def health_check(): logger = get_server_logger() logger.info("Health check requested", log_type=LogType.REQUEST) return {"status": "healthy"} @router.get("/health/live") async def liveness_check(): return {"status": "alive"} @router.get("/health/ready") async def readiness_check(): return {"status": "ready"}
{ "repo_id": "OpenBMB/ChatDev", "file_path": "server/routes/health.py", "license": "Apache License 2.0", "lines": 14, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:server/routes/sessions.py
import atexit import re import shutil import tempfile from pathlib import Path from fastapi import APIRouter, HTTPException from fastapi.responses import FileResponse from server.settings import WARE_HOUSE_DIR from utils.exceptions import ResourceNotFoundError, ValidationError from utils.structured_logger import get_server_logger, LogType router = APIRouter() @router.get("/api/sessions/{session_id}/download") async def download_session(session_id: str): try: if not re.match(r"^[a-zA-Z0-9_-]+$", session_id): logger = get_server_logger() logger.log_security_event( "INVALID_SESSION_ID_FORMAT", f"Invalid session_id format: {session_id}", details={"received_session_id": session_id}, ) raise ValidationError( "Invalid session_id: only letters, digits, underscores, and hyphens are allowed", field="session_id", ) dir_name = f"session_{session_id}" session_path = WARE_HOUSE_DIR / dir_name if not session_path.exists() or not session_path.is_dir(): raise ResourceNotFoundError( "Session directory not found", resource_type="session", resource_id=session_id, ) with tempfile.NamedTemporaryFile(delete=False, suffix=".zip") as tmp_file: zip_path = Path(tmp_file.name) archive_base = zip_path.with_suffix("") try: shutil.make_archive(str(archive_base), "zip", root_dir=WARE_HOUSE_DIR, base_dir=dir_name) except Exception as exc: if zip_path.exists(): zip_path.unlink() logger = get_server_logger() logger.log_exception(exc, f"Failed to create zip archive for session: {session_id}") raise HTTPException(status_code=500, detail="Failed to create zip archive") logger = get_server_logger() logger.info( "Session download prepared", log_type=LogType.WORKFLOW, session_id=session_id, archive_path=str(zip_path), ) def cleanup_zip(): if zip_path.exists(): zip_path.unlink() atexit.register(cleanup_zip) return FileResponse( path=zip_path, filename=f"{dir_name}.zip", media_type="application/zip", headers={"Content-Disposition": f"attachment; filename={dir_name}.zip"}, ) except ValidationError as exc: raise HTTPException(status_code=400, detail=str(exc)) except ResourceNotFoundError: raise HTTPException(status_code=404, detail="Session directory not found") except HTTPException: raise except Exception as exc: logger = get_server_logger() logger.log_exception(exc, f"Unexpected error during session download: {session_id}") raise HTTPException(status_code=500, detail="Failed to download session")
{ "repo_id": "OpenBMB/ChatDev", "file_path": "server/routes/sessions.py", "license": "Apache License 2.0", "lines": 71, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:server/routes/uploads.py
from fastapi import APIRouter, File, HTTPException, UploadFile from server.state import ensure_known_session from utils.exceptions import ValidationError from utils.structured_logger import get_server_logger, LogType router = APIRouter() @router.post("/api/uploads/{session_id}") async def upload_attachment(session_id: str, file: UploadFile = File(...)): try: manager = ensure_known_session(session_id, require_connection=False) except ValidationError as exc: raise HTTPException(status_code=400, detail=str(exc)) try: record = await manager.attachment_service.save_upload_file(session_id, file) except ValidationError: raise HTTPException(status_code=400, detail="Session not connected") except Exception as exc: logger = get_server_logger() logger.error( "Failed to save attachment", log_type=LogType.REQUEST, session_id=session_id, error=str(exc), ) raise HTTPException(status_code=500, detail="Failed to store attachment") ref = record.ref return { "attachment_id": ref.attachment_id, "name": ref.name, "mime_type": ref.mime_type, "size": ref.size, } @router.get("/api/uploads/{session_id}") async def list_attachments(session_id: str): try: manager = ensure_known_session(session_id, require_connection=False) except ValidationError as exc: raise HTTPException(status_code=400, detail=str(exc)) manifest = manager.attachment_service.list_attachment_manifests(session_id) return {"attachments": manifest}
{ "repo_id": "OpenBMB/ChatDev", "file_path": "server/routes/uploads.py", "license": "Apache License 2.0", "lines": 39, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:server/routes/vuegraphs.py
from fastapi import APIRouter, HTTPException from server.models import VueGraphContentPayload from server.services.vuegraphs_storage import fetch_vuegraph_content, save_vuegraph_content from utils.structured_logger import get_server_logger, LogType router = APIRouter() @router.post("/api/vuegraphs/upload/content") async def upload_vuegraph_content(payload: VueGraphContentPayload): logger = get_server_logger() try: save_vuegraph_content(payload.filename, payload.content) except Exception as exc: logger.error( "Failed to persist Vue graph content", log_type=LogType.ERROR, filename=payload.filename, error=str(exc), ) raise HTTPException(status_code=500, detail="Unable to save graph content") logger.info( "Vue graph content saved", log_type=LogType.REQUEST, filename=payload.filename, ) return {"filename": payload.filename, "status": "saved"} @router.get("/api/vuegraphs/{filename}") async def get_vuegraph_content(filename: str): logger = get_server_logger() try: content = fetch_vuegraph_content(filename) except Exception as exc: logger.error( "Failed to load Vue graph content", log_type=LogType.ERROR, filename=filename, error=str(exc), ) raise HTTPException(status_code=500, detail="Unable to read graph content") if content is None: raise HTTPException(status_code=404, detail="Graph content not found") logger.info( "Vue graph content fetched", log_type=LogType.REQUEST, filename=filename, ) return {"filename": filename, "content": content}
{ "repo_id": "OpenBMB/ChatDev", "file_path": "server/routes/vuegraphs.py", "license": "Apache License 2.0", "lines": 45, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:server/routes/websocket.py
"""WebSocket endpoint routing.""" from fastapi import APIRouter, WebSocket, WebSocketDisconnect from server.state import get_websocket_manager router = APIRouter() @router.websocket("/ws") async def websocket_endpoint(websocket: WebSocket): manager = get_websocket_manager() session_id = await manager.connect(websocket) try: while True: message = await websocket.receive_text() await manager.handle_message(session_id, message) except WebSocketDisconnect: manager.disconnect(session_id)
{ "repo_id": "OpenBMB/ChatDev", "file_path": "server/routes/websocket.py", "license": "Apache License 2.0", "lines": 14, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:server/routes/workflows.py
from fastapi import APIRouter, HTTPException from server.models import ( WorkflowCopyRequest, WorkflowRenameRequest, WorkflowUpdateContentRequest, WorkflowUploadContentRequest, ) from server.services.workflow_storage import ( copy_workflow, persist_workflow, rename_workflow, validate_workflow_content, validate_workflow_filename, ) from server.settings import YAML_DIR from utils.exceptions import ( ResourceConflictError, ResourceNotFoundError, SecurityError, ValidationError, WorkflowExecutionError, ) from utils.structured_logger import get_server_logger, LogType router = APIRouter() def _persist_workflow_from_content( filename: str, content: str, *, allow_overwrite: bool, action: str, success_message: str, ): try: safe_filename, yaml_content = validate_workflow_content(filename.strip(), content) save_path = YAML_DIR / safe_filename if save_path.exists() and not allow_overwrite: raise HTTPException(status_code=409, detail="Workflow already exists; use the update API to overwrite") if not save_path.exists() and allow_overwrite: raise HTTPException(status_code=404, detail="Workflow file not found") persist_workflow(safe_filename, content, yaml_content, action=action, directory=YAML_DIR) return { "status": "success", "filename": safe_filename, "message": success_message.format(filename=safe_filename), } except ValidationError: raise except HTTPException: raise except Exception as exc: logger = get_server_logger() logger.log_exception(exc, f"Unexpected error during workflow {action}") raise WorkflowExecutionError(f"Failed to {action} workflow: {exc}") @router.get("/api/workflows") async def list_workflows(): if not YAML_DIR.exists(): return {"workflows": []} return {"workflows": [file.name for file in YAML_DIR.glob("*.yaml")]} @router.post("/api/workflows/upload/content") async def upload_workflow_content(request: WorkflowUploadContentRequest): return _persist_workflow_from_content( request.filename, request.content, allow_overwrite=False, action="upload", success_message="Workflow {filename} created successfully from content", ) @router.put("/api/workflows/{filename}") async def update_workflow_content(filename: str, request: WorkflowUpdateContentRequest): return _persist_workflow_from_content( filename, request.content, allow_overwrite=True, action="update", success_message="Workflow {filename} updated successfully", ) @router.delete("/api/workflows/{filename}") async def delete_workflow(filename: str): try: safe_filename = validate_workflow_filename(filename, require_yaml_extension=True) file_path = YAML_DIR / safe_filename if not file_path.exists() or not file_path.is_file(): raise ResourceNotFoundError( "Workflow file not found", resource_type="workflow", resource_id=safe_filename, ) try: file_path.unlink() except Exception as exc: logger = get_server_logger() logger.log_exception(exc, f"Failed to delete workflow file: {safe_filename}") raise WorkflowExecutionError("Failed to delete workflow file", details={"filename": safe_filename}) logger = get_server_logger() logger.info( "Workflow file deleted", log_type=LogType.WORKFLOW, filename=safe_filename, ) return { "status": "deleted", "filename": safe_filename, "message": f"Workflow '{safe_filename}' deleted successfully", } except ValidationError: raise except SecurityError: raise except ResourceNotFoundError: raise except Exception as exc: logger = get_server_logger() logger.log_exception(exc, f"Unexpected error deleting workflow: {filename}") raise WorkflowExecutionError(f"Failed to delete workflow: {exc}") @router.post("/api/workflows/{filename}/rename") async def rename_workflow_file(filename: str, request: WorkflowRenameRequest): try: rename_workflow(filename, request.new_filename, directory=YAML_DIR) return { "status": "success", "source": validate_workflow_filename(filename, require_yaml_extension=True), "target": validate_workflow_filename(request.new_filename, require_yaml_extension=True), "message": f"Workflow renamed to '{request.new_filename}' successfully", } except ValidationError: raise except SecurityError: raise except ResourceConflictError: raise except ResourceNotFoundError: raise except Exception as exc: logger = get_server_logger() logger.log_exception(exc, f"Unexpected error renaming workflow: {filename}") raise WorkflowExecutionError(f"Failed to rename workflow: {exc}") @router.post("/api/workflows/{filename}/copy") async def copy_workflow_file(filename: str, request: WorkflowCopyRequest): try: copy_workflow(filename, request.new_filename, directory=YAML_DIR) return { "status": "success", "source": validate_workflow_filename(filename, require_yaml_extension=True), "target": validate_workflow_filename(request.new_filename, require_yaml_extension=True), "message": f"Workflow copied to '{request.new_filename}' successfully", } except ValidationError: raise except SecurityError: raise except ResourceConflictError: raise except ResourceNotFoundError: raise except Exception as exc: logger = get_server_logger() logger.log_exception(exc, f"Unexpected error copying workflow: {filename}") raise WorkflowExecutionError(f"Failed to copy workflow: {exc}") @router.get("/api/workflows/{filename}") async def get_workflow_raw_content(filename: str): try: safe_filename = validate_workflow_filename(filename, require_yaml_extension=True) file_path = YAML_DIR / safe_filename if not file_path.exists() or not file_path.is_file(): raise ResourceNotFoundError( "Workflow file not found", resource_type="workflow", resource_id=safe_filename, ) with open(file_path, "r", encoding="utf-8") as handle: raw_content = handle.read() logger = get_server_logger() logger.info("Workflow file content retrieved", log_type=LogType.WORKFLOW, filename=safe_filename) return {"content": raw_content} except ValidationError: raise except SecurityError: raise except ResourceNotFoundError: raise except Exception as exc: logger = get_server_logger() logger.log_exception(exc, f"Unexpected error retrieving workflow: {filename}") raise WorkflowExecutionError(f"Failed to retrieve workflow: {exc}")
{ "repo_id": "OpenBMB/ChatDev", "file_path": "server/routes/workflows.py", "license": "Apache License 2.0", "lines": 184, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
OpenBMB/ChatDev:server/services/artifact_dispatcher.py
"""Utilities to distribute artifact events to internal consumers.""" import logging from typing import Sequence from server.services.artifact_events import ArtifactEvent from server.services.session_store import WorkflowSessionStore from workflow.hooks.workspace_artifact import WorkspaceArtifact class ArtifactDispatcher: """Persists artifact events and optionally mirrors them to WebSocket clients.""" def __init__( self, session_id: str, session_store: WorkflowSessionStore, websocket_manager=None, ) -> None: self.session_id = session_id self.session_store = session_store self.websocket_manager = websocket_manager self.logger = logging.getLogger(__name__) def emit_workspace_artifacts(self, artifacts: Sequence[WorkspaceArtifact]) -> None: if not artifacts: return events = [self._workspace_to_event(artifact) for artifact in artifacts] self.emit(events) def emit(self, events: Sequence[ArtifactEvent]) -> None: if not events: return queue = self.session_store.get_artifact_queue(self.session_id) if not queue: self.logger.debug("Artifact queue missing for session %s", self.session_id) return queue.append_many(events) if self.websocket_manager: payload = { "type": "artifact_created", "data": { "session_id": self.session_id, "events": [event.to_dict() for event in events], }, } try: self.websocket_manager.send_message_sync(self.session_id, payload) except Exception as exc: self.logger.warning("Failed to broadcast artifact events: %s", exc) def _workspace_to_event(self, artifact: WorkspaceArtifact) -> ArtifactEvent: return ArtifactEvent( node_id=artifact.node_id, attachment_id=artifact.attachment_id, file_name=artifact.file_name, relative_path=artifact.relative_path, workspace_path=artifact.absolute_path, mime_type=artifact.mime_type, size=artifact.size, sha256=artifact.sha256, data_uri=artifact.data_uri, created_at=artifact.created_at, change_type=artifact.change_type, extra=artifact.extra, )
{ "repo_id": "OpenBMB/ChatDev", "file_path": "server/services/artifact_dispatcher.py", "license": "Apache License 2.0", "lines": 58, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
OpenBMB/ChatDev:server/services/artifact_events.py
"""Artifact event queue utilities used to expose workflow-produced files.""" import threading import time import uuid from collections import deque from dataclasses import dataclass, field from pathlib import Path from typing import Any, Deque, Dict, Iterable, List, Optional, Sequence @dataclass class ArtifactEvent: """Represents a single file artifact surfaced to the frontend.""" node_id: str attachment_id: str file_name: str relative_path: str workspace_path: str mime_type: Optional[str] size: Optional[int] sha256: Optional[str] data_uri: Optional[str] created_at: float = field(default_factory=lambda: time.time()) event_id: str = field(default_factory=lambda: uuid.uuid4().hex) sequence: int = 0 change_type: str = "created" extra: Dict[str, Any] = field(default_factory=dict) def to_dict(self) -> Dict[str, Any]: return { "event_id": self.event_id, "sequence": self.sequence, "node_id": self.node_id, "attachment_id": self.attachment_id, "file_name": self.file_name, "relative_path": self.relative_path, "workspace_path": self.workspace_path, "mime_type": self.mime_type, "size": self.size, "sha256": self.sha256, "data_uri": self.data_uri, "created_at": self.created_at, "change_type": self.change_type, "extra": self.extra, } def matches_filter( self, *, include_mime: Optional[Sequence[str]] = None, include_ext: Optional[Sequence[str]] = None, max_size: Optional[int] = None, ) -> bool: if max_size is not None and self.size is not None and self.size > max_size: return False if include_mime: mime = (self.mime_type or "").lower() if mime and any(mime.startswith(prefix.lower()) for prefix in include_mime): pass elif mime in (m.lower() for m in include_mime): pass else: return False if include_ext: suffix = Path(self.file_name).suffix.lower() if suffix.startswith("."): suffix = suffix[1:] include_ext_normalized = {ext.lower().lstrip(".") for ext in include_ext} if suffix not in include_ext_normalized: return False return True class ArtifactEventQueue: """Thread-safe bounded queue that supports blocking waits.""" def __init__(self, *, max_events: int = 2000) -> None: self._events: Deque[ArtifactEvent] = deque() self._condition = threading.Condition() self._max_events = max_events self._last_sequence = 0 self._min_sequence = 1 def append_many(self, events: Iterable[ArtifactEvent]) -> None: materialized = [event for event in events if event is not None] if not materialized: return with self._condition: for event in materialized: self._last_sequence += 1 event.sequence = self._last_sequence self._events.append(event) while len(self._events) > self._max_events: self._events.popleft() self._min_sequence = max(self._min_sequence, self._last_sequence - len(self._events) + 1) self._condition.notify_all() def snapshot( self, *, after: Optional[int] = None, include_mime: Optional[Sequence[str]] = None, include_ext: Optional[Sequence[str]] = None, max_size: Optional[int] = None, limit: int = 50, ) -> tuple[List[ArtifactEvent], int]: limit = max(1, min(limit, 200)) start_seq = after if after is not None else 0 start_seq = max(start_seq, self._min_sequence - 1) events: List[ArtifactEvent] = [] next_cursor = start_seq for event in self._events: if event.sequence <= start_seq: continue next_cursor = event.sequence if event.matches_filter( include_mime=include_mime, include_ext=include_ext, max_size=max_size, ): events.append(event) if len(events) >= limit: break if next_cursor < start_seq: next_cursor = start_seq return events, next_cursor def wait_for_events( self, *, after: Optional[int], include_mime: Optional[Sequence[str]], include_ext: Optional[Sequence[str]], max_size: Optional[int], limit: int, timeout: float, ) -> tuple[List[ArtifactEvent], int, bool]: """Block until matching events appear or timeout expires. Returns (events, next_cursor, timeout_reached) """ deadline = time.time() + max(0.0, timeout) with self._condition: events, next_cursor = self.snapshot( after=after, include_mime=include_mime, include_ext=include_ext, max_size=max_size, limit=limit, ) while not events and time.time() < deadline: remaining = deadline - time.time() if remaining <= 0: break self._condition.wait(timeout=remaining) events, next_cursor = self.snapshot( after=after, include_mime=include_mime, include_ext=include_ext, max_size=max_size, limit=limit, ) timed_out = not events return events, next_cursor or (after or 0), timed_out @property def last_sequence(self) -> int: return self._last_sequence
{ "repo_id": "OpenBMB/ChatDev", "file_path": "server/services/artifact_events.py", "license": "Apache License 2.0", "lines": 156, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex