nothingworry's picture
feat(web-search): use Google Custom Search for live web results
29116ed
raw
history blame
4.54 kB
from __future__ import annotations
import logging
import os
from typing import Any, Dict, Optional
from dotenv import load_dotenv
load_dotenv()
logger = logging.getLogger("integrachat.mcp")
if not logger.handlers:
handler = logging.StreamHandler()
formatter = logging.Formatter(
"[%(asctime)s] %(levelname)s %(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(os.getenv("LOG_LEVEL", "INFO").upper())
try:
from backend.api.storage.analytics_store import AnalyticsStore
except Exception: # pragma: no cover - analytics storage is optional during tests
AnalyticsStore = None # type: ignore
_analytics_store: Optional["AnalyticsStore"] = None
_analytics_failed = False
_analytics_disabled = os.getenv("ANALYTICS_DISABLED", "").lower() in {"1", "true", "yes"}
def _get_analytics_store() -> Optional["AnalyticsStore"]:
"""
Lazily create the analytics store so missing Supabase credentials or package
do not prevent the MCP server from starting. When initialization fails we
keep analytics disabled for the remainder of the process.
"""
global _analytics_store, _analytics_failed
if _analytics_disabled or _analytics_failed:
return None
if _analytics_store is not None:
return _analytics_store
if AnalyticsStore is None:
_analytics_failed = True
return None
try:
_analytics_store = AnalyticsStore()
except RuntimeError as exc:
logger.warning("Analytics disabled: %s", exc)
_analytics_failed = True
_analytics_store = None
except Exception as exc: # pragma: no cover - unexpected failures
logger.debug("Unexpected analytics init failure: %s", exc)
_analytics_failed = True
_analytics_store = None
return _analytics_store
def log_tool_usage(
tool_name: str,
tenant_id: Optional[str],
*,
success: bool,
latency_ms: Optional[int] = None,
metadata: Optional[Dict[str, Any]] = None,
error_message: Optional[str] = None,
user_id: Optional[str] = None,
):
log_data = {
"tool": tool_name,
"tenant_id": tenant_id,
"success": success,
"latency_ms": latency_ms,
"user_id": user_id,
"metadata": metadata or {},
}
if error_message:
log_data["error"] = error_message
if success:
logger.info("tool_completed %s", log_data)
else:
logger.warning("tool_failed %s", log_data)
store = _get_analytics_store()
if store and tenant_id:
try:
store.log_tool_usage(
tenant_id=tenant_id,
tool_name=tool_name,
latency_ms=latency_ms,
success=success,
error_message=error_message,
metadata=metadata,
user_id=user_id,
)
except Exception as exc: # pragma: no cover - analytics failures shouldn't crash tools
logger.debug("analytics logging failed: %s", exc)
def log_rag_search_metrics(
tenant_id: str,
query: str,
hits_count: int,
avg_score: Optional[float],
top_score: Optional[float],
latency_ms: Optional[int] = None,
):
store = _get_analytics_store()
if store:
try:
store.log_rag_search(
tenant_id=tenant_id,
query=query,
hits_count=hits_count,
avg_score=avg_score,
top_score=top_score,
latency_ms=latency_ms,
)
except Exception as exc: # pragma: no cover
logger.debug("rag analytics logging failed: %s", exc)
def log_redflag_violation(
tenant_id: str,
rule_id: str,
rule_pattern: str,
severity: str,
matched_text: str,
*,
confidence: Optional[float] = None,
message_preview: Optional[str] = None,
user_id: Optional[str] = None,
):
store = _get_analytics_store()
if store:
try:
store.log_redflag_violation(
tenant_id=tenant_id,
rule_id=rule_id,
rule_pattern=rule_pattern,
severity=severity,
matched_text=matched_text,
confidence=confidence,
message_preview=message_preview,
user_id=user_id,
)
except Exception as exc: # pragma: no cover
logger.debug("redflag logging failed: %s", exc)