| | from __future__ import annotations
|
| |
|
| | import os
|
| | import time
|
| | import logging
|
| | from enum import Enum
|
| | from typing import Any, Dict, Optional, List
|
| |
|
| | import httpx
|
| | from fastmcp import FastMCP
|
| | from pydantic import BaseModel, Field
|
| | from starlette.requests import Request
|
| | from starlette.responses import JSONResponse
|
| |
|
| | from .downstream_autodiscovery import DISCOVERED_TOOLS, discover_downstream_tools
|
| | from .downstream import call_downstream_tool
|
| | from .risk_model import compute_risk, risk_result_to_dict, initialize_plugins, get_plugin_cache_status
|
| | from .policy import decide_from_risk, decision_to_dict
|
| | from .sanitizer import sanitize_arguments, sanitize_output
|
| | from .audit import write_audit
|
| | from .rate_limiter import RateLimiter
|
| | from .config import RATE_LIMIT_MAX_CALLS, RATE_LIMIT_WINDOW_SECONDS
|
| |
|
| |
|
| | logger = logging.getLogger(__name__)
|
| |
|
| |
|
| | _rate_limiter = RateLimiter(max_calls=RATE_LIMIT_MAX_CALLS, window_seconds=RATE_LIMIT_WINDOW_SECONDS)
|
| |
|
| |
|
| | class ErrorCategory(str, Enum):
|
| | """Categorization of error types for structured error handling."""
|
| | VALIDATION = "validation"
|
| | NETWORK = "network"
|
| | TIMEOUT = "timeout"
|
| | POLICY_DENIED = "policy_denied"
|
| | DOWNSTREAM_ERROR = "downstream_error"
|
| | RATE_LIMITED = "rate_limited"
|
| | UNKNOWN = "unknown"
|
| |
|
| |
|
| | def _normalize_downstream_result(raw: Any, success: bool, error: str | None = None) -> Dict[str, Any]:
|
| | """Normalize downstream response to standard envelope."""
|
| | return {
|
| | "success": success,
|
| | "data": raw if success else None,
|
| | "error": error
|
| | }
|
| |
|
| |
|
| | def _categorize_error(exc: Exception) -> ErrorCategory:
|
| | """Map exception to error category."""
|
| | msg = str(exc).lower()
|
| | if "timeout" in msg or "timed out" in msg:
|
| | return ErrorCategory.TIMEOUT
|
| | if "connection" in msg or "unreachable" in msg or "refused" in msg:
|
| | return ErrorCategory.NETWORK
|
| | if "validation" in msg or "invalid" in msg or "schema" in msg:
|
| | return ErrorCategory.VALIDATION
|
| | if "downstream" in msg or "call failed" in msg:
|
| | return ErrorCategory.DOWNSTREAM_ERROR
|
| | return ErrorCategory.UNKNOWN
|
| |
|
| |
|
| |
|
| | CRITICAL_PATH_KEYS = {"path", "filepath", "target"}
|
| |
|
| |
|
| | def _critical_arg_changed(before: Any, after: Any) -> bool:
|
| | """Recursively detect if any critical path-like argument changed due to sanitization.
|
| |
|
| | We only consider keys in CRITICAL_PATH_KEYS to avoid blocking benign PII redactions.
|
| | """
|
| | def walk(bv: Any, av: Any) -> bool:
|
| | if isinstance(bv, dict) and isinstance(av, dict):
|
| |
|
| | for k in CRITICAL_PATH_KEYS:
|
| | if k in bv and k in av and bv[k] != av[k]:
|
| | return True
|
| |
|
| | for k in set(bv.keys()) | set(av.keys()):
|
| | if walk(bv.get(k), av.get(k)):
|
| | return True
|
| | return False
|
| | if isinstance(bv, list) and isinstance(av, list):
|
| | for i in range(min(len(bv), len(av))):
|
| | if walk(bv[i], av[i]):
|
| | return True
|
| | return False
|
| | return False
|
| |
|
| | try:
|
| | return walk(before, after)
|
| | except Exception:
|
| | return False
|
| |
|
| |
|
| | def _sanitized_critical(before: Dict[str, Any], after: Dict[str, Any]) -> bool:
|
| | """Detect if any critical key was sanitized (redacted token) or changed.
|
| |
|
| | Prefer detecting explicit redaction tokens; fall back to value inequality.
|
| | """
|
| | try:
|
| | for k in CRITICAL_PATH_KEYS:
|
| | if k in before or k in after:
|
| | bv = before.get(k) if isinstance(before, dict) else None
|
| | av = after.get(k) if isinstance(after, dict) else None
|
| | if isinstance(av, str) and av.startswith("[REDACTED_"):
|
| | return True
|
| | if bv is not None and av is not None and bv != av:
|
| | return True
|
| |
|
| | return _critical_arg_changed(before, after)
|
| | except Exception:
|
| | return False
|
| |
|
| |
|
| | mcp = FastMCP(
|
| | name="SecurityGateway",
|
| | instructions=(
|
| | "A unified, secure MCP tool gateway. Exposes all tools through the "
|
| | "'secure_call' API. Performs risk scoring, sanitization, and auditing. "
|
| | "Use list_available_tools() to discover capabilities; always call "
|
| | "secure_call() instead of downstream tools directly."
|
| | ),
|
| | )
|
| |
|
| |
|
| | class SecureCallInput(BaseModel):
|
| | user_id: str = Field(description="Logical user id (e.g., 'admin', 'judge-1').")
|
| | server: str = Field(description="Downstream server key, e.g. 'filesystem'.")
|
| | tool: str = Field(description="Tool name on the downstream server.")
|
| | arguments: Dict[str, Any] = Field(
|
| | default_factory=dict,
|
| | description="Arguments to pass to the downstream tool.",
|
| | )
|
| | llm_context: Optional[str] = Field(
|
| | default=None,
|
| | description="Optional prompt or reasoning context for risk analysis.",
|
| | )
|
| |
|
| |
|
| | class SecureCallOutput(BaseModel):
|
| | allowed: bool
|
| | redacted: bool
|
| | reason: str
|
| | risk_score: float
|
| | risk_factors: List[str] = Field(default_factory=list, description="Explicit risk reasons")
|
| | error_category: Optional[ErrorCategory] = Field(default=None, description="Categorized error type")
|
| | execution_time_ms: float = Field(default=0.0, description="Execution time in milliseconds")
|
| | policy: Dict[str, Any]
|
| | risk: Dict[str, Any]
|
| | downstream_result: Dict[str, Any] = Field(description="Normalized downstream response")
|
| | original_arguments: Dict[str, Any] = Field(default_factory=dict, description="Raw unsanitized arguments provided")
|
| | sanitized_arguments: Dict[str, Any] = Field(default_factory=dict, description="Arguments after sanitization")
|
| | policy_decision: str = Field(description="High-level policy decision: allow | redacted | blocked")
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | async def _secure_call_impl(data: SecureCallInput) -> SecureCallOutput:
|
| | """
|
| | Secure proxy for ANY downstream MCP tool.
|
| |
|
| | The LLM must always use this tool instead of calling downstream servers
|
| | directly.
|
| | """
|
| | start_time = time.perf_counter()
|
| |
|
| |
|
| | allowed, rate_info = _rate_limiter.check_rate_limit(data.user_id)
|
| | if not allowed:
|
| | elapsed = (time.perf_counter() - start_time) * 1000
|
| | error_msg = f"Rate limit exceeded: {rate_info['current_count']}/{rate_info['limit']} calls in {rate_info['window_seconds']}s. Try again in {rate_info['reset_in_seconds']}s."
|
| | return SecureCallOutput(
|
| | allowed=False,
|
| | redacted=False,
|
| | reason=error_msg,
|
| | risk_score=0.0,
|
| | risk_factors=["Rate limit exceeded"],
|
| | error_category=ErrorCategory.RATE_LIMITED,
|
| | execution_time_ms=elapsed,
|
| | policy={"allow": False, "reason": error_msg, "rate_limit": rate_info},
|
| | risk={"score": 0.0, "reasons": [], "flags": {}},
|
| | downstream_result=_normalize_downstream_result(None, False, error_msg),
|
| | original_arguments=data.arguments,
|
| | sanitized_arguments={},
|
| | policy_decision="blocked",
|
| | )
|
| |
|
| | server_key = data.server
|
| | tool_name = data.tool
|
| | raw_args = data.arguments
|
| |
|
| |
|
| | if server_key not in DISCOVERED_TOOLS:
|
| |
|
| | from .config import DOWNSTREAM_SERVERS
|
| | if server_key not in DOWNSTREAM_SERVERS:
|
| | raise Exception(
|
| | f"Unknown server '{server_key}'. "
|
| | "Call list_available_tools() to see valid options."
|
| | )
|
| |
|
| | logger.warning(f"⚠️ Server '{server_key}' is configured but tools not discovered yet. Proceeding with request.")
|
| |
|
| | elif tool_name not in DISCOVERED_TOOLS[server_key]:
|
| |
|
| | raise Exception(
|
| | f"Unknown tool '{tool_name}' for server '{server_key}'. "
|
| | "Call list_available_tools() to see valid tools."
|
| | )
|
| |
|
| |
|
| | risk = compute_risk(
|
| | user_id=data.user_id,
|
| | server_key=server_key,
|
| | tool=tool_name,
|
| | arguments=raw_args,
|
| | llm_context=data.llm_context,
|
| | )
|
| | policy = decide_from_risk(risk)
|
| |
|
| |
|
| | sanitized_args = sanitize_arguments(raw_args)
|
| |
|
| |
|
| | if _sanitized_critical(raw_args, sanitized_args):
|
| | elapsed = (time.perf_counter() - start_time) * 1000 if 'start_time' in locals() else 0.0
|
| | reason = "Blocked: sanitized critical argument (e.g., path traversal or sensitive path)."
|
| | outcome = {"success": False, "error": reason, "category": ErrorCategory.POLICY_DENIED.value}
|
| | write_audit(
|
| | user_id=data.user_id,
|
| | server_key=server_key,
|
| | tool=tool_name,
|
| | raw_arguments=raw_args,
|
| | sanitized_arguments=sanitized_args,
|
| | policy=decision_to_dict(policy),
|
| | risk=risk_result_to_dict(risk),
|
| | outcome=outcome,
|
| | execution_time_ms=elapsed,
|
| | )
|
| | return SecureCallOutput(
|
| | allowed=False,
|
| | redacted=True,
|
| | reason=reason,
|
| | risk_score=policy.risk_score,
|
| | risk_factors=risk.reasons,
|
| | error_category=ErrorCategory.POLICY_DENIED,
|
| | execution_time_ms=elapsed,
|
| | policy=decision_to_dict(policy),
|
| | risk=risk_result_to_dict(risk),
|
| | downstream_result=_normalize_downstream_result(None, False, reason),
|
| | original_arguments=raw_args,
|
| | sanitized_arguments=sanitized_args,
|
| | policy_decision="blocked",
|
| | )
|
| |
|
| | if not policy.allow:
|
| | elapsed = (time.perf_counter() - start_time) * 1000
|
| | outcome = {"success": False, "error": "Blocked by security policy.", "category": "policy_denied"}
|
| | write_audit(
|
| | user_id=data.user_id,
|
| | server_key=server_key,
|
| | tool=tool_name,
|
| | raw_arguments=raw_args,
|
| | sanitized_arguments=sanitized_args,
|
| | policy=decision_to_dict(policy),
|
| | risk=risk_result_to_dict(risk),
|
| | outcome=outcome,
|
| | execution_time_ms=elapsed,
|
| | )
|
| |
|
| | return SecureCallOutput(
|
| | allowed=False,
|
| | redacted=True,
|
| | reason=policy.reason,
|
| | risk_score=policy.risk_score,
|
| | risk_factors=risk.reasons,
|
| | error_category=ErrorCategory.POLICY_DENIED,
|
| | execution_time_ms=elapsed,
|
| | policy=decision_to_dict(policy),
|
| | risk=risk_result_to_dict(risk),
|
| | downstream_result=_normalize_downstream_result(None, False, "Blocked by policy"),
|
| | original_arguments=raw_args,
|
| | sanitized_arguments=sanitized_args,
|
| | policy_decision="blocked",
|
| | )
|
| |
|
| |
|
| | error_category = None
|
| | try:
|
| | downstream_raw = await call_downstream_tool(
|
| | server_key=server_key,
|
| | tool=tool_name,
|
| | arguments=sanitized_args,
|
| | )
|
| | downstream_result = (
|
| | sanitize_output(downstream_raw)
|
| | if policy.redact_output
|
| | else downstream_raw
|
| | )
|
| | normalized = _normalize_downstream_result(downstream_result, True)
|
| | outcome = {"success": True, "redacted": policy.redact_output}
|
| | except Exception as exc:
|
| | error_category = _categorize_error(exc)
|
| | normalized = _normalize_downstream_result(None, False, str(exc))
|
| | outcome = {"success": False, "error": str(exc), "category": error_category.value}
|
| |
|
| | elapsed = (time.perf_counter() - start_time) * 1000
|
| |
|
| |
|
| | write_audit(
|
| | user_id=data.user_id,
|
| | server_key=server_key,
|
| | tool=tool_name,
|
| | raw_arguments=raw_args,
|
| | sanitized_arguments=sanitized_args,
|
| | policy=decision_to_dict(policy),
|
| | risk=risk_result_to_dict(risk),
|
| | outcome=outcome,
|
| | execution_time_ms=elapsed,
|
| | )
|
| |
|
| |
|
| |
|
| | if error_category:
|
| |
|
| | if error_category == ErrorCategory.TIMEOUT:
|
| | policy_decision = "timeout"
|
| | else:
|
| | policy_decision = "error"
|
| | elif policy.allow:
|
| |
|
| | policy_decision = "redacted" if policy.redact_output else "allowed"
|
| | else:
|
| |
|
| | policy_decision = "blocked"
|
| |
|
| | return SecureCallOutput(
|
| | allowed=policy.allow,
|
| | redacted=policy.redact_output,
|
| | reason=policy.reason,
|
| | risk_score=policy.risk_score,
|
| | risk_factors=risk.reasons,
|
| | error_category=error_category,
|
| | execution_time_ms=elapsed,
|
| | policy=decision_to_dict(policy),
|
| | risk=risk_result_to_dict(risk),
|
| | downstream_result=normalized,
|
| | original_arguments=raw_args,
|
| | sanitized_arguments=sanitized_args,
|
| | policy_decision=policy_decision,
|
| | )
|
| |
|
| |
|
| | @mcp.tool()
|
| | async def secure_call(data: SecureCallInput) -> SecureCallOutput:
|
| | """
|
| | MCP tool wrapper for secure downstream tool invocation.
|
| |
|
| | This is the registered MCP tool that LLMs use to safely call downstream servers.
|
| | It delegates to _secure_call_impl for the actual implementation.
|
| | """
|
| | return await _secure_call_impl(data)
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | def _flatten_discovered_tools(tools_dict: Dict[str, Any]) -> Dict[str, Any]:
|
| | """
|
| | Flatten nested discovered tools into single-level namespace with server__tool_name format.
|
| |
|
| | Converts:
|
| | {"server_key": {"tool_name": {...}}}
|
| | To:
|
| | {"server_key__tool_name": {...}}
|
| |
|
| | This makes tool names parsable by all LLMs (Claude, OpenAI, Gemini) consistently.
|
| | """
|
| | flattened = {}
|
| | for server_key, server_tools in tools_dict.items():
|
| | if not isinstance(server_tools, dict):
|
| | continue
|
| | for tool_name, tool_meta in server_tools.items():
|
| | if tool_name.startswith("_"):
|
| |
|
| | continue
|
| | flattened_name = f"{server_key}__{tool_name}"
|
| | flattened[flattened_name] = tool_meta
|
| | return flattened
|
| |
|
| |
|
| | @mcp.tool()
|
| | async def list_available_tools(refresh: bool = False) -> Dict[str, Any]:
|
| | """
|
| | List discovered downstream servers and their tools with flattened names.
|
| |
|
| | - If ``refresh`` is true, forces a re-discovery.
|
| | - If nothing has been discovered yet, performs discovery on-demand.
|
| |
|
| | Returns tools in flattened format: server__tool_name (e.g., "ultimate_scraper__searchEventListings")
|
| | This format is parsable by Claude, OpenAI, Gemini, and other LLMs consistently.
|
| | """
|
| | if refresh or not DISCOVERED_TOOLS:
|
| | await discover_downstream_tools()
|
| | return _flatten_discovered_tools(DISCOVERED_TOOLS)
|
| |
|
| |
|
| | @mcp.tool()
|
| | async def refresh_discovery() -> Dict[str, Any]:
|
| | """Force downstream tool re-discovery and return the updated registry with flattened names."""
|
| | await discover_downstream_tools()
|
| | return _flatten_discovered_tools(DISCOVERED_TOOLS)
|
| |
|
| |
|
| | async def _on_startup() -> None:
|
| | """
|
| | Called once when the MCP server starts.
|
| | Initializes the plugin system and performs downstream tool autodiscovery.
|
| | """
|
| |
|
| | print("[Server] Initializing security plugins...")
|
| | initialize_plugins()
|
| | cache_status = get_plugin_cache_status()
|
| | print(f"[Server] Plugin cache: {cache_status.get('loaded_plugins', 0)} plugins loaded")
|
| |
|
| |
|
| | print("[Server] Discovering downstream tools...")
|
| | await discover_downstream_tools()
|
| |
|
| |
|
| |
|
| | @mcp.custom_route("/tools/list", methods=["GET"])
|
| | async def _http_list_tools(request: Request) -> JSONResponse:
|
| | """List all available tools from discovered downstream servers.
|
| |
|
| | Triggers discovery on-demand if the registry is empty.
|
| | Returns both discovered tools and native functions (web_search, code_interpreter).
|
| | """
|
| | tools = await list_available_tools()
|
| | return JSONResponse({"tools": tools})
|
| |
|
| |
|
| | @mcp.custom_route("/tools/refresh", methods=["POST"])
|
| | async def _http_refresh_tools(request: Request) -> JSONResponse:
|
| | """Force downstream autodiscovery and return the updated registry."""
|
| | await discover_downstream_tools()
|
| | return JSONResponse(DISCOVERED_TOOLS)
|
| |
|
| |
|
| |
|
| |
|
| | @mcp.custom_route("/config/servers", methods=["GET"])
|
| | async def _http_get_config(request: Request) -> JSONResponse:
|
| | """
|
| | Expose servers configuration for distributed clients.
|
| |
|
| | Used by Real LLM and other clients to fetch tool parameter definitions.
|
| | Returns the entire servers.yaml configuration as JSON.
|
| | """
|
| | try:
|
| | from pathlib import Path
|
| | import yaml
|
| |
|
| |
|
| | config_paths = [
|
| | Path(__file__).parent / "config" / "servers.yaml",
|
| | Path("config/servers.yaml"),
|
| | Path("security_gateway/config/servers.yaml"),
|
| | ]
|
| |
|
| | config_data = None
|
| | for config_path in config_paths:
|
| | if config_path.exists():
|
| | with open(config_path, 'r') as f:
|
| | config_data = yaml.safe_load(f)
|
| | break
|
| |
|
| | if not config_data:
|
| | return JSONResponse(
|
| | {"error": "servers.yaml not found", "servers": {}},
|
| | status_code=404
|
| | )
|
| |
|
| | return JSONResponse(config_data)
|
| |
|
| | except Exception as e:
|
| | return JSONResponse(
|
| | {"error": str(e), "servers": {}},
|
| | status_code=500
|
| | )
|
| |
|
| |
|
| | @mcp.custom_route("/tools/secure_call", methods=["POST"])
|
| | async def _http_secure_call(request: Request) -> JSONResponse:
|
| |
|
| | if not DISCOVERED_TOOLS:
|
| | await discover_downstream_tools()
|
| |
|
| | try:
|
| | body = await request.json()
|
| | except Exception:
|
| | return JSONResponse({"error": "invalid JSON"}, status_code=400)
|
| |
|
| |
|
| | if isinstance(body, dict) and "data" in body and isinstance(body["data"], dict):
|
| | user_id = body["data"].get("user_id", "anonymous")
|
| | elif isinstance(body, dict):
|
| | user_id = body.get("user_id", "anonymous")
|
| | else:
|
| | user_id = "anonymous"
|
| |
|
| |
|
| | allowed, rate_info = _rate_limiter.check_rate_limit(user_id)
|
| | if not allowed:
|
| | headers = {
|
| | "X-RateLimit-Limit": str(rate_info["limit"]),
|
| | "X-RateLimit-Remaining": "0",
|
| | "X-RateLimit-Reset": str(int(time.time() + rate_info["reset_in_seconds"])),
|
| | "Retry-After": str(int(rate_info["reset_in_seconds"])),
|
| | }
|
| | error_msg = f"Rate limit exceeded: {rate_info['current_count']}/{rate_info['limit']} calls in {rate_info['window_seconds']}s. Try again in {rate_info['reset_in_seconds']}s."
|
| | return JSONResponse(
|
| | {"error": error_msg, "rate_limit": rate_info},
|
| | status_code=429,
|
| | headers=headers
|
| | )
|
| |
|
| |
|
| |
|
| | if isinstance(body, dict) and "data" in body and isinstance(body["data"], dict):
|
| | call_args = {"data": body["data"]}
|
| | elif isinstance(body, dict):
|
| | call_args = {"data": body}
|
| | else:
|
| | return JSONResponse({"error": "expected JSON object"}, status_code=400)
|
| |
|
| | try:
|
| |
|
| | if "data" in call_args and isinstance(call_args["data"], dict):
|
| | data_dict: Dict[str, Any] = call_args["data"]
|
| | else:
|
| | data_dict = call_args
|
| |
|
| | data_obj = SecureCallInput(**data_dict)
|
| |
|
| |
|
| | if data_obj.server not in DISCOVERED_TOOLS:
|
| | raise Exception(
|
| | f"Unknown server '{data_obj.server}'. "
|
| | "Call list_available_tools() to see valid options."
|
| | )
|
| |
|
| | if data_obj.tool not in DISCOVERED_TOOLS[data_obj.server]:
|
| | raise Exception(
|
| | f"Unknown tool '{data_obj.tool}' for server '{data_obj.server}'. "
|
| | "Call list_available_tools() to see valid tools."
|
| | )
|
| |
|
| |
|
| | risk = compute_risk(
|
| | user_id=data_obj.user_id,
|
| | server_key=data_obj.server,
|
| | tool=data_obj.tool,
|
| | arguments=data_obj.arguments,
|
| | llm_context=data_obj.llm_context,
|
| | )
|
| | policy = decide_from_risk(risk)
|
| |
|
| |
|
| | sanitized_args = sanitize_arguments(data_obj.arguments)
|
| |
|
| |
|
| | if _sanitized_critical(data_obj.arguments, sanitized_args):
|
| | elapsed = 0.0
|
| | reason = "Blocked: sanitized critical argument (e.g., path traversal or sensitive path)."
|
| | outcome = {"success": False, "error": reason, "category": ErrorCategory.POLICY_DENIED.value}
|
| | write_audit(
|
| | user_id=data_obj.user_id,
|
| | server_key=data_obj.server,
|
| | tool=data_obj.tool,
|
| | raw_arguments=data_obj.arguments,
|
| | sanitized_arguments=sanitized_args,
|
| | policy=decision_to_dict(policy),
|
| | risk=risk_result_to_dict(risk),
|
| | outcome=outcome,
|
| | execution_time_ms=elapsed,
|
| | )
|
| |
|
| | result_obj = SecureCallOutput(
|
| | allowed=False,
|
| | redacted=True,
|
| | reason=reason,
|
| | risk_score=policy.risk_score,
|
| | risk_factors=risk.reasons,
|
| | error_category=ErrorCategory.POLICY_DENIED,
|
| | execution_time_ms=elapsed,
|
| | policy=decision_to_dict(policy),
|
| | risk=risk_result_to_dict(risk),
|
| | downstream_result=_normalize_downstream_result(None, False, reason),
|
| | original_arguments=data_obj.arguments,
|
| | sanitized_arguments=sanitized_args,
|
| | policy_decision="blocked",
|
| | )
|
| |
|
| | try:
|
| | payload = result_obj.model_dump()
|
| | except Exception:
|
| | payload = getattr(result_obj, "dict", lambda: None)() or {}
|
| | return JSONResponse(payload)
|
| |
|
| | start_time = time.perf_counter()
|
| |
|
| | if not policy.allow:
|
| | elapsed = (time.perf_counter() - start_time) * 1000
|
| | outcome = {"success": False, "error": "Blocked by security policy.", "category": "policy_denied"}
|
| | write_audit(
|
| | user_id=data_obj.user_id,
|
| | server_key=data_obj.server,
|
| | tool=data_obj.tool,
|
| | raw_arguments=data_obj.arguments,
|
| | sanitized_arguments=sanitized_args,
|
| | policy=decision_to_dict(policy),
|
| | risk=risk_result_to_dict(risk),
|
| | outcome=outcome,
|
| | execution_time_ms=elapsed,
|
| | )
|
| |
|
| | result_obj = SecureCallOutput(
|
| | allowed=False,
|
| | redacted=True,
|
| | reason=policy.reason,
|
| | risk_score=policy.risk_score,
|
| | risk_factors=risk.reasons,
|
| | error_category=ErrorCategory.POLICY_DENIED,
|
| | execution_time_ms=elapsed,
|
| | policy=decision_to_dict(policy),
|
| | risk=risk_result_to_dict(risk),
|
| | downstream_result=_normalize_downstream_result(None, False, "Blocked by policy"),
|
| | original_arguments=data_obj.arguments,
|
| | sanitized_arguments=sanitized_args,
|
| | policy_decision="blocked",
|
| | )
|
| | else:
|
| |
|
| | error_category = None
|
| | try:
|
| | downstream_raw = await call_downstream_tool(
|
| | server_key=data_obj.server,
|
| | tool=data_obj.tool,
|
| | arguments=sanitized_args,
|
| | )
|
| | downstream_result = (
|
| | sanitize_output(downstream_raw)
|
| | if policy.redact_output
|
| | else downstream_raw
|
| | )
|
| | normalized = _normalize_downstream_result(downstream_result, True)
|
| | outcome = {"success": True, "redacted": policy.redact_output}
|
| | except Exception as exc:
|
| | error_category = _categorize_error(exc)
|
| | normalized = _normalize_downstream_result(None, False, str(exc))
|
| | outcome = {"success": False, "error": str(exc), "category": error_category.value}
|
| |
|
| | elapsed = (time.perf_counter() - start_time) * 1000
|
| |
|
| |
|
| | write_audit(
|
| | user_id=data_obj.user_id,
|
| | server_key=data_obj.server,
|
| | tool=data_obj.tool,
|
| | raw_arguments=data_obj.arguments,
|
| | sanitized_arguments=sanitized_args,
|
| | policy=decision_to_dict(policy),
|
| | risk=risk_result_to_dict(risk),
|
| | outcome=outcome,
|
| | execution_time_ms=elapsed,
|
| | )
|
| |
|
| | result_obj = SecureCallOutput(
|
| | allowed=policy.allow,
|
| | redacted=policy.redact_output,
|
| | reason=policy.reason,
|
| | risk_score=policy.risk_score,
|
| | risk_factors=risk.reasons,
|
| | error_category=error_category,
|
| | execution_time_ms=elapsed,
|
| | policy=decision_to_dict(policy),
|
| | risk=risk_result_to_dict(risk),
|
| | downstream_result=normalized,
|
| | original_arguments=data_obj.arguments,
|
| | sanitized_arguments=sanitized_args,
|
| | policy_decision="redacted" if policy.allow and policy.redact_output else ("allow" if policy.allow else "blocked"),
|
| | )
|
| | except Exception as exc:
|
| | import traceback
|
| |
|
| | tb = traceback.format_exc()
|
| | print(f"ERROR in _http_secure_call: {exc}")
|
| | print(f"TRACEBACK:\n{tb}")
|
| | return JSONResponse({"error": str(exc), "traceback": tb}, status_code=500)
|
| |
|
| |
|
| | try:
|
| | payload = result_obj.model_dump()
|
| | except Exception:
|
| |
|
| | payload = getattr(result_obj, "dict", lambda: None)() or {}
|
| |
|
| |
|
| | _, rate_info = _rate_limiter.check_rate_limit(user_id)
|
| | headers = {
|
| | "X-RateLimit-Limit": str(rate_info["limit"]),
|
| | "X-RateLimit-Remaining": str(rate_info["remaining"]),
|
| | "X-RateLimit-Reset": str(int(time.time() + rate_info["reset_in_seconds"])),
|
| | }
|
| |
|
| | return JSONResponse(payload, headers=headers)
|
| |
|
| |
|
| | @mcp.custom_route("/audit/latest", methods=["GET"])
|
| | async def _http_audit_latest(request: Request) -> JSONResponse:
|
| | """
|
| | Retrieve the latest audit logs for the auditeye-dashboard.
|
| |
|
| | Returns the most recent audit entries from the audit.log.jsonl file.
|
| | """
|
| | try:
|
| | from pathlib import Path
|
| | import json
|
| | from .config import AUDIT_LOG_PATH
|
| |
|
| |
|
| | audit_paths = [
|
| | Path(AUDIT_LOG_PATH),
|
| | Path(__file__).parent.parent / "audit.log.jsonl",
|
| | Path("audit.log.jsonl"),
|
| | Path("./audit.log.jsonl"),
|
| | ]
|
| |
|
| | audit_entries = []
|
| | audit_file = None
|
| |
|
| |
|
| | for path in audit_paths:
|
| | if path.exists():
|
| | audit_file = path
|
| | break
|
| |
|
| | if not audit_file:
|
| | return JSONResponse({"logs": [], "error": f"No audit log found at: {AUDIT_LOG_PATH}"}, status_code=200)
|
| |
|
| |
|
| | try:
|
| | with open(audit_file, 'r') as f:
|
| | for line in f:
|
| | if line.strip():
|
| | try:
|
| | entry = json.loads(line)
|
| | audit_entries.append(entry)
|
| | except json.JSONDecodeError:
|
| | continue
|
| | except Exception as e:
|
| | return JSONResponse(
|
| | {"logs": [], "error": f"Failed to read audit file: {str(e)}"},
|
| | status_code=200
|
| | )
|
| |
|
| |
|
| | latest = sorted(audit_entries, key=lambda x: x.get("timestamp", ""), reverse=True)[:100]
|
| |
|
| | return JSONResponse({"logs": latest})
|
| |
|
| | except Exception as e:
|
| | import traceback
|
| | tb = traceback.format_exc()
|
| | return JSONResponse(
|
| | {"logs": [], "error": str(e), "traceback": tb},
|
| | status_code=500
|
| | )
|
| |
|
| |
|
| |
|
| |
|
| | if __name__ == "__main__":
|
| | import asyncio
|
| |
|
| |
|
| | asyncio.run(discover_downstream_tools())
|
| |
|
| |
|
| | mcp.run(transport="http", host="0.0.0.0", port=8000)
|
| |
|