Spaces:
Sleeping
Sleeping
Add application file
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .dockerignore +153 -0
- .gitignore +132 -0
- Dockerfile +60 -0
- app/__init__.py +1 -0
- app/agents/__init__.py +19 -0
- app/agents/escalation_agent.py +143 -0
- app/agents/extraction_agent.py +181 -0
- app/agents/intent_agent.py +136 -0
- app/agents/invoice_agent.py +75 -0
- app/agents/ledger_agent.py +72 -0
- app/agents/monitor_agent.py +124 -0
- app/agents/payment_agent.py +58 -0
- app/agents/prediction_agent.py +154 -0
- app/agents/recovery_agent.py +192 -0
- app/agents/skill_router_agent.py +53 -0
- app/agents/urgency_agent.py +109 -0
- app/agents/validation_agent.py +53 -0
- app/agents/verification_agent.py +128 -0
- app/api/__init__.py +1 -0
- app/api/notification_routes.py +387 -0
- app/config.py +100 -0
- app/core/__init__.py +21 -0
- app/core/autonomy_planner.py +161 -0
- app/core/base_agent.py +122 -0
- app/core/config_bridge.py +6 -0
- app/core/context.py +169 -0
- app/core/event_bus.py +125 -0
- app/core/llm_router.py +147 -0
- app/core/llm_service.py +459 -0
- app/core/orchestrator.py +566 -0
- app/core/planner.py +197 -0
- app/core/priority.py +106 -0
- app/core/registry.py +136 -0
- app/main.py +198 -0
- app/memory/__init__.py +1 -0
- app/memory/agent_memory.py +145 -0
- app/services/__init__.py +1 -0
- app/services/excel_sync.py +210 -0
- app/services/gemini_client.py +184 -0
- app/services/google_sheets_service.py +140 -0
- app/services/inventory_service.py +146 -0
- app/services/invoice_service.py +98 -0
- app/services/notification_generator.py +109 -0
- app/services/router.py +88 -0
- app/services/skill_generator.py +207 -0
- app/skills/__init__.py +1 -0
- app/skills/credit_skill.py +69 -0
- app/skills/order_skill.py +130 -0
- app/skills/payment_skill.py +68 -0
- app/skills/preparation_skill.py +117 -0
.dockerignore
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# =============================================================================
|
| 2 |
+
# .dockerignore - Exclude unnecessary files from Docker context
|
| 3 |
+
# =============================================================================
|
| 4 |
+
|
| 5 |
+
# Python & Virtual Environments
|
| 6 |
+
__pycache__/
|
| 7 |
+
*.py[cod]
|
| 8 |
+
*$py.class
|
| 9 |
+
*.so
|
| 10 |
+
.Python
|
| 11 |
+
env/
|
| 12 |
+
ENV/
|
| 13 |
+
venv/
|
| 14 |
+
Denv/
|
| 15 |
+
.venv
|
| 16 |
+
build/
|
| 17 |
+
develop-eggs/
|
| 18 |
+
dist/
|
| 19 |
+
downloads/
|
| 20 |
+
eggs/
|
| 21 |
+
.eggs/
|
| 22 |
+
lib/
|
| 23 |
+
lib64/
|
| 24 |
+
parts/
|
| 25 |
+
sdist/
|
| 26 |
+
var/
|
| 27 |
+
wheels/
|
| 28 |
+
*.egg-info/
|
| 29 |
+
.installed.cfg
|
| 30 |
+
*.egg
|
| 31 |
+
pip-log.txt
|
| 32 |
+
pip-delete-this-directory.txt
|
| 33 |
+
|
| 34 |
+
# Testing & Coverage
|
| 35 |
+
.pytest_cache/
|
| 36 |
+
.tox/
|
| 37 |
+
.coverage
|
| 38 |
+
.coverage.*
|
| 39 |
+
.cache
|
| 40 |
+
nosetests.xml
|
| 41 |
+
coverage.xml
|
| 42 |
+
*.cover
|
| 43 |
+
.hypothesis/
|
| 44 |
+
|
| 45 |
+
# IDE & Editor Files
|
| 46 |
+
.vscode/
|
| 47 |
+
.idea/
|
| 48 |
+
*.swp
|
| 49 |
+
*.swo
|
| 50 |
+
*~
|
| 51 |
+
.DS_Store
|
| 52 |
+
*.sublime-project
|
| 53 |
+
*.sublime-workspace
|
| 54 |
+
.project
|
| 55 |
+
.pydevproject
|
| 56 |
+
.settings
|
| 57 |
+
*.iml
|
| 58 |
+
.vscode-container-dockerfile
|
| 59 |
+
|
| 60 |
+
# Git
|
| 61 |
+
.git/
|
| 62 |
+
.gitignore
|
| 63 |
+
.gitattributes
|
| 64 |
+
.github/
|
| 65 |
+
|
| 66 |
+
# Node (Frontend build artifacts)
|
| 67 |
+
node_modules/
|
| 68 |
+
NPM_DEBUG.log
|
| 69 |
+
npm-debug.log
|
| 70 |
+
yarn-error.log
|
| 71 |
+
.npm
|
| 72 |
+
dist/
|
| 73 |
+
build/
|
| 74 |
+
|
| 75 |
+
# TypeScript
|
| 76 |
+
*.tsbuildinfo
|
| 77 |
+
tsconfig.tsbuild.json
|
| 78 |
+
ts_errors*.txt
|
| 79 |
+
|
| 80 |
+
# Temporary & Log Files
|
| 81 |
+
*.log
|
| 82 |
+
*.pot
|
| 83 |
+
*.mo
|
| 84 |
+
.env
|
| 85 |
+
.env.local
|
| 86 |
+
.env.*
|
| 87 |
+
temp/
|
| 88 |
+
tmp/
|
| 89 |
+
*.tmp
|
| 90 |
+
*.swp
|
| 91 |
+
logs/
|
| 92 |
+
|
| 93 |
+
# Database & Data Files (unless needed at runtime)
|
| 94 |
+
*.db
|
| 95 |
+
*.sqlite
|
| 96 |
+
*.sqlite3
|
| 97 |
+
notiflow_data.xlsx
|
| 98 |
+
agent_memory.json
|
| 99 |
+
|
| 100 |
+
# Documentation
|
| 101 |
+
CLAUDE.md
|
| 102 |
+
TODO.md
|
| 103 |
+
README.md # Include only if needed
|
| 104 |
+
docs/
|
| 105 |
+
mkdocs.yml
|
| 106 |
+
|
| 107 |
+
# CI/CD
|
| 108 |
+
.github/
|
| 109 |
+
.gitlab-ci.yml
|
| 110 |
+
.travis.yml
|
| 111 |
+
Jenkinsfile
|
| 112 |
+
azure-pipelines.yml
|
| 113 |
+
|
| 114 |
+
# Docker Files (don't need these in image)
|
| 115 |
+
Dockerfile
|
| 116 |
+
.dockerignore
|
| 117 |
+
docker-compose.yml
|
| 118 |
+
docker-compose.*.yml
|
| 119 |
+
|
| 120 |
+
# Package Manager Locks (handled by requirements.txt)
|
| 121 |
+
package-lock.json
|
| 122 |
+
yarn.lock
|
| 123 |
+
poetry.lock
|
| 124 |
+
Pipfile.lock
|
| 125 |
+
|
| 126 |
+
# OS Files
|
| 127 |
+
Thumbs.db
|
| 128 |
+
.DS_Store
|
| 129 |
+
.AppleDouble
|
| 130 |
+
.LSOverride
|
| 131 |
+
|
| 132 |
+
# Development Files
|
| 133 |
+
test_*.py
|
| 134 |
+
demo/
|
| 135 |
+
examples/
|
| 136 |
+
scripts/
|
| 137 |
+
*.ipynb
|
| 138 |
+
.ipynb_checkpoints/
|
| 139 |
+
|
| 140 |
+
# Credentials (IMPORTANT - never include in image)
|
| 141 |
+
credentials/
|
| 142 |
+
*.json
|
| 143 |
+
*.key
|
| 144 |
+
*.pem
|
| 145 |
+
*.pfx
|
| 146 |
+
.credentials
|
| 147 |
+
|
| 148 |
+
# Large Files
|
| 149 |
+
*.zip
|
| 150 |
+
*.tar.gz
|
| 151 |
+
*.tar
|
| 152 |
+
*.rar
|
| 153 |
+
*.7z
|
.gitignore
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# =============================================================================
|
| 2 |
+
# NotiFlow v2 — .gitignore
|
| 3 |
+
# =============================================================================
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
# ---------------------------------------------------------------------------
|
| 7 |
+
# Secrets & Environment
|
| 8 |
+
# ---------------------------------------------------------------------------
|
| 9 |
+
.env
|
| 10 |
+
.env.*
|
| 11 |
+
!.env.example # keep the example template
|
| 12 |
+
|
| 13 |
+
# ---------------------------------------------------------------------------
|
| 14 |
+
# Python virtual environments
|
| 15 |
+
# ---------------------------------------------------------------------------
|
| 16 |
+
.venv/
|
| 17 |
+
venv/
|
| 18 |
+
env/
|
| 19 |
+
ENV/
|
| 20 |
+
.Python
|
| 21 |
+
|
| 22 |
+
# ---------------------------------------------------------------------------
|
| 23 |
+
# Python compiled / cache
|
| 24 |
+
# ---------------------------------------------------------------------------
|
| 25 |
+
__pycache__/
|
| 26 |
+
*.py[cod]
|
| 27 |
+
*$py.class
|
| 28 |
+
*.so
|
| 29 |
+
*.pyd
|
| 30 |
+
|
| 31 |
+
# ---------------------------------------------------------------------------
|
| 32 |
+
# Build / packaging
|
| 33 |
+
# ---------------------------------------------------------------------------
|
| 34 |
+
build/
|
| 35 |
+
dist/
|
| 36 |
+
develop-eggs/
|
| 37 |
+
downloads/
|
| 38 |
+
eggs/
|
| 39 |
+
.eggs/
|
| 40 |
+
lib/
|
| 41 |
+
lib64/
|
| 42 |
+
parts/
|
| 43 |
+
sdist/
|
| 44 |
+
var/
|
| 45 |
+
wheels/
|
| 46 |
+
*.egg-info/
|
| 47 |
+
.installed.cfg
|
| 48 |
+
*.egg
|
| 49 |
+
MANIFEST
|
| 50 |
+
|
| 51 |
+
# ---------------------------------------------------------------------------
|
| 52 |
+
# Data files — generated at runtime, not source code
|
| 53 |
+
# ---------------------------------------------------------------------------
|
| 54 |
+
data/notiflow_data.xlsx
|
| 55 |
+
data/*.xlsx
|
| 56 |
+
data/*.xls
|
| 57 |
+
data/*.csv
|
| 58 |
+
data/agent_memory.json
|
| 59 |
+
*.db
|
| 60 |
+
*.sqlite3
|
| 61 |
+
|
| 62 |
+
# ---------------------------------------------------------------------------
|
| 63 |
+
# Logs & temporary output
|
| 64 |
+
# ---------------------------------------------------------------------------
|
| 65 |
+
*.log
|
| 66 |
+
*.log.*
|
| 67 |
+
logs/
|
| 68 |
+
test_out.txt
|
| 69 |
+
test_output.txt
|
| 70 |
+
*.tmp
|
| 71 |
+
*.bak
|
| 72 |
+
*.cache
|
| 73 |
+
|
| 74 |
+
# ---------------------------------------------------------------------------
|
| 75 |
+
# Testing & coverage
|
| 76 |
+
# ---------------------------------------------------------------------------
|
| 77 |
+
.coverage
|
| 78 |
+
.pytest_cache/
|
| 79 |
+
htmlcov/
|
| 80 |
+
.tox/
|
| 81 |
+
.hypothesis/
|
| 82 |
+
*.coveragerc
|
| 83 |
+
|
| 84 |
+
# ---------------------------------------------------------------------------
|
| 85 |
+
# IDE & Editor
|
| 86 |
+
# ---------------------------------------------------------------------------
|
| 87 |
+
.vscode/
|
| 88 |
+
.idea/
|
| 89 |
+
*.iml
|
| 90 |
+
*.swp
|
| 91 |
+
*.swo
|
| 92 |
+
*~
|
| 93 |
+
.gradle/
|
| 94 |
+
out/
|
| 95 |
+
|
| 96 |
+
# ---------------------------------------------------------------------------
|
| 97 |
+
# OS generated
|
| 98 |
+
# ---------------------------------------------------------------------------
|
| 99 |
+
.DS_Store
|
| 100 |
+
.DS_Store?
|
| 101 |
+
._*
|
| 102 |
+
.Spotlight-V100
|
| 103 |
+
.Trashes
|
| 104 |
+
ehthumbs.db
|
| 105 |
+
Thumbs.db
|
| 106 |
+
desktop.ini
|
| 107 |
+
|
| 108 |
+
# ---------------------------------------------------------------------------
|
| 109 |
+
# Jupyter
|
| 110 |
+
# ---------------------------------------------------------------------------
|
| 111 |
+
.ipynb_checkpoints
|
| 112 |
+
*.ipynb
|
| 113 |
+
|
| 114 |
+
# ---------------------------------------------------------------------------
|
| 115 |
+
# Node (if frontend is ever added)
|
| 116 |
+
# ---------------------------------------------------------------------------
|
| 117 |
+
node_modules/
|
| 118 |
+
.eslintcache
|
| 119 |
+
.stylelintcache
|
| 120 |
+
.node_repl_history
|
| 121 |
+
npm-debug.log*
|
| 122 |
+
yarn-debug.log*
|
| 123 |
+
yarn-error.log*
|
| 124 |
+
.pnp/
|
| 125 |
+
.pnp.js
|
| 126 |
+
|
| 127 |
+
# ---------------------------------------------------------------------------
|
| 128 |
+
# Miscellaneous stray artefacts (PowerShell / curl leftovers)
|
| 129 |
+
# ---------------------------------------------------------------------------
|
| 130 |
+
-Body
|
| 131 |
+
-ContentType
|
| 132 |
+
-Uri
|
Dockerfile
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# =============================================================================
|
| 2 |
+
# VyaparFlow - Hugging Face Space Dockerfile
|
| 3 |
+
# =============================================================================
|
| 4 |
+
# Multi-stage build for optimized image size
|
| 5 |
+
# Runs FastAPI backend on port 7860 (Hugging Face requirement)
|
| 6 |
+
|
| 7 |
+
# Stage 1: Builder
|
| 8 |
+
# =============================================================================
|
| 9 |
+
FROM python:3.13-slim as builder
|
| 10 |
+
|
| 11 |
+
WORKDIR /build
|
| 12 |
+
|
| 13 |
+
# Install system dependencies for build
|
| 14 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 15 |
+
build-essential \
|
| 16 |
+
git \
|
| 17 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 18 |
+
|
| 19 |
+
# Copy requirements and install Python dependencies
|
| 20 |
+
COPY requirements.txt .
|
| 21 |
+
RUN pip install --user --no-cache-dir -r requirements.txt
|
| 22 |
+
|
| 23 |
+
# Stage 2: Runtime
|
| 24 |
+
# =============================================================================
|
| 25 |
+
FROM python:3.13-slim
|
| 26 |
+
|
| 27 |
+
# Set environment variables
|
| 28 |
+
ENV PYTHONUNBUFFERED=1 \
|
| 29 |
+
PYTHONDONTWRITEBYTECODE=1 \
|
| 30 |
+
PATH=/root/.local/bin:$PATH \
|
| 31 |
+
PORT=7860 \
|
| 32 |
+
NOTIFLOW_DEMO_MODE=true
|
| 33 |
+
|
| 34 |
+
WORKDIR /app
|
| 35 |
+
|
| 36 |
+
# Install runtime dependencies only
|
| 37 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 38 |
+
curl \
|
| 39 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 40 |
+
|
| 41 |
+
# Copy Python dependencies from builder
|
| 42 |
+
COPY --from=builder /root/.local /root/.local
|
| 43 |
+
|
| 44 |
+
# Copy entire project
|
| 45 |
+
COPY . .
|
| 46 |
+
|
| 47 |
+
# Create necessary directories with proper permissions
|
| 48 |
+
RUN mkdir -p /app/data /app/credentials && \
|
| 49 |
+
chmod -R 755 /app
|
| 50 |
+
|
| 51 |
+
# Expose Hugging Face default port
|
| 52 |
+
EXPOSE 7860
|
| 53 |
+
|
| 54 |
+
# Health check
|
| 55 |
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
| 56 |
+
CMD curl -f http://localhost:7860/health || exit 1
|
| 57 |
+
|
| 58 |
+
# Default command: Run FastAPI with uvicorn
|
| 59 |
+
# Listens on 0.0.0.0:7860 (required for Hugging Face Spaces)
|
| 60 |
+
CMD ["uvicorn", "backend.main:app", "--host", "0.0.0.0", "--port", "7860"]
|
app/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"""NotiFlow Autonomous — app package."""
|
app/agents/__init__.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""app/agents — NotiFlow agent registry."""
|
| 2 |
+
|
| 3 |
+
from app.agents.intent_agent import IntentAgent
|
| 4 |
+
from app.agents.extraction_agent import ExtractionAgent
|
| 5 |
+
from app.agents.validation_agent import ValidationAgent
|
| 6 |
+
from app.agents.invoice_agent import InvoiceAgent
|
| 7 |
+
from app.agents.payment_agent import PaymentAgent
|
| 8 |
+
from app.agents.skill_router_agent import SkillRouterAgent
|
| 9 |
+
from app.agents.ledger_agent import LedgerAgent
|
| 10 |
+
|
| 11 |
+
__all__ = [
|
| 12 |
+
"IntentAgent",
|
| 13 |
+
"ExtractionAgent",
|
| 14 |
+
"ValidationAgent",
|
| 15 |
+
"InvoiceAgent",
|
| 16 |
+
"PaymentAgent",
|
| 17 |
+
"SkillRouterAgent",
|
| 18 |
+
"LedgerAgent",
|
| 19 |
+
]
|
app/agents/escalation_agent.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/agents/escalation_agent.py
|
| 3 |
+
-------------------------------
|
| 4 |
+
EscalationAgent — Autonomy Layer, Step 5.
|
| 5 |
+
|
| 6 |
+
Triggers when priority is "high"/"critical" OR risk is "high".
|
| 7 |
+
Logs structured alerts and simulates an external notification.
|
| 8 |
+
|
| 9 |
+
In production, replace _notify() with a real webhook/SMS/email call.
|
| 10 |
+
|
| 11 |
+
Output written to context["alerts"]:
|
| 12 |
+
[
|
| 13 |
+
{
|
| 14 |
+
"level": "warning" | "critical",
|
| 15 |
+
"trigger": str, # what caused this alert
|
| 16 |
+
"message": str, # human-readable description
|
| 17 |
+
"timestamp": str, # ISO-8601
|
| 18 |
+
"notified": bool, # True if notification was "sent"
|
| 19 |
+
},
|
| 20 |
+
...
|
| 21 |
+
]
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
from __future__ import annotations
|
| 25 |
+
|
| 26 |
+
import logging
|
| 27 |
+
from datetime import datetime, timezone
|
| 28 |
+
from typing import Any
|
| 29 |
+
|
| 30 |
+
from app.core.base_agent import BaseAgent
|
| 31 |
+
|
| 32 |
+
logger = logging.getLogger(__name__)
|
| 33 |
+
|
| 34 |
+
_ESCALATION_PRIORITIES = {"high", "critical"}
|
| 35 |
+
_ESCALATION_RISKS = {"high"}
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def _now_iso() -> str:
|
| 39 |
+
return datetime.now(timezone.utc).isoformat()
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class EscalationAgent(BaseAgent):
|
| 43 |
+
"""Detect high-priority or high-risk situations and raise alerts."""
|
| 44 |
+
|
| 45 |
+
name = "EscalationAgent"
|
| 46 |
+
input_keys = ["priority", "risk", "errors", "intent", "data"]
|
| 47 |
+
output_keys = ["alerts"]
|
| 48 |
+
action = "Raise alerts and simulate notifications for critical situations"
|
| 49 |
+
|
| 50 |
+
def execute(self, context: dict[str, Any]) -> dict[str, Any]:
|
| 51 |
+
priority = (context.get("priority") or "low").lower()
|
| 52 |
+
risk = context.get("risk", {})
|
| 53 |
+
errors = context.get("errors", [])
|
| 54 |
+
intent = (context.get("intent") or "other").lower()
|
| 55 |
+
data = context.get("data", {})
|
| 56 |
+
|
| 57 |
+
alerts: list[dict[str, Any]] = []
|
| 58 |
+
|
| 59 |
+
# ── Trigger 1: high priority score (was "critical" in old model) ─────
|
| 60 |
+
score = context.get("priority_score", 0)
|
| 61 |
+
if score >= 80:
|
| 62 |
+
alert = self._build_alert(
|
| 63 |
+
level = "critical",
|
| 64 |
+
trigger = f"priority_score={score}",
|
| 65 |
+
message = (
|
| 66 |
+
f"CRITICAL priority score {score}/100. "
|
| 67 |
+
f"Intent: {intent}. "
|
| 68 |
+
f"Customer: {data.get('customer', 'unknown')}."
|
| 69 |
+
),
|
| 70 |
+
)
|
| 71 |
+
alerts.append(alert)
|
| 72 |
+
self._notify(alert)
|
| 73 |
+
|
| 74 |
+
# ── Trigger 2: high priority label ───────────────────────────────────
|
| 75 |
+
elif priority == "high":
|
| 76 |
+
alert = self._build_alert(
|
| 77 |
+
level = "warning",
|
| 78 |
+
trigger = f"priority=high score={score}",
|
| 79 |
+
message = (
|
| 80 |
+
f"High priority transaction flagged. "
|
| 81 |
+
f"Intent: {intent}. "
|
| 82 |
+
f"Score: {score}/100. "
|
| 83 |
+
f"Reason: {context.get('metadata', {}).get('urgency_reason', 'n/a')}."
|
| 84 |
+
),
|
| 85 |
+
)
|
| 86 |
+
alerts.append(alert)
|
| 87 |
+
self._notify(alert)
|
| 88 |
+
|
| 89 |
+
# ── Trigger 3: high risk ─────────────────────────────────────────────
|
| 90 |
+
if risk.get("level") in _ESCALATION_RISKS:
|
| 91 |
+
risk_reasons = "; ".join(risk.get("reasons", []))
|
| 92 |
+
alert = self._build_alert(
|
| 93 |
+
level = "warning",
|
| 94 |
+
trigger = "risk=high",
|
| 95 |
+
message = f"High risk transaction. Reasons: {risk_reasons}",
|
| 96 |
+
)
|
| 97 |
+
alerts.append(alert)
|
| 98 |
+
self._notify(alert)
|
| 99 |
+
|
| 100 |
+
# ── Trigger 4: critical errors in pipeline ───────────────────────────
|
| 101 |
+
critical_errors = [e for e in errors if "[Monitor]" in e]
|
| 102 |
+
if len(critical_errors) >= 2:
|
| 103 |
+
alert = self._build_alert(
|
| 104 |
+
level = "warning",
|
| 105 |
+
trigger = "monitor_issues",
|
| 106 |
+
message = (
|
| 107 |
+
f"{len(critical_errors)} monitor issue(s) detected: "
|
| 108 |
+
f"{critical_errors[0]}"
|
| 109 |
+
),
|
| 110 |
+
)
|
| 111 |
+
alerts.append(alert)
|
| 112 |
+
|
| 113 |
+
context["alerts"] = alerts
|
| 114 |
+
if alerts:
|
| 115 |
+
logger.warning(
|
| 116 |
+
"[EscalationAgent] %d alert(s) raised (priority=%s risk=%s)",
|
| 117 |
+
len(alerts), priority, risk.get("level", "unknown")
|
| 118 |
+
)
|
| 119 |
+
else:
|
| 120 |
+
logger.info("[EscalationAgent] no escalation needed")
|
| 121 |
+
return context
|
| 122 |
+
|
| 123 |
+
@staticmethod
|
| 124 |
+
def _build_alert(level: str, trigger: str, message: str) -> dict[str, Any]:
|
| 125 |
+
return {
|
| 126 |
+
"level": level,
|
| 127 |
+
"trigger": trigger,
|
| 128 |
+
"message": message,
|
| 129 |
+
"timestamp": _now_iso(),
|
| 130 |
+
"notified": False, # updated by _notify()
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
@staticmethod
|
| 134 |
+
def _notify(alert: dict[str, Any]) -> None:
|
| 135 |
+
"""
|
| 136 |
+
Simulate sending an external notification.
|
| 137 |
+
Replace this method with a real webhook/SMS/email integration.
|
| 138 |
+
"""
|
| 139 |
+
alert["notified"] = True
|
| 140 |
+
logger.warning(
|
| 141 |
+
"[EscalationAgent] 🚨 ALERT [%s] %s",
|
| 142 |
+
alert["level"].upper(), alert["message"]
|
| 143 |
+
)
|
app/agents/extraction_agent.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/agents/extraction_agent.py
|
| 3 |
+
------------------------------
|
| 4 |
+
ExtractionAgent — Stage 2 of the NotiFlow pipeline.
|
| 5 |
+
|
| 6 |
+
Phase 5: Multi-intent support.
|
| 7 |
+
- Reads context["intents"] (list); falls back to context["intent"]
|
| 8 |
+
- Single LLM call extracts data for ALL intents at once
|
| 9 |
+
- Writes context["multi_data"] = {intent: {fields}} (all intents)
|
| 10 |
+
- Writes context["data"] = multi_data[primary] (backward compat)
|
| 11 |
+
|
| 12 |
+
Single-intent path is identical to Phase 1-4 behaviour.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
from __future__ import annotations
|
| 16 |
+
|
| 17 |
+
import json
|
| 18 |
+
import logging
|
| 19 |
+
import re
|
| 20 |
+
from pathlib import Path
|
| 21 |
+
from typing import Any
|
| 22 |
+
|
| 23 |
+
from app.core.base_agent import BaseAgent
|
| 24 |
+
from app.core.context import update_context
|
| 25 |
+
from app.core.llm_service import get_llm
|
| 26 |
+
|
| 27 |
+
logger = logging.getLogger(__name__)
|
| 28 |
+
|
| 29 |
+
_PROMPT_PATH = Path(__file__).parent.parent.parent / "prompts" / "extraction_prompt.txt"
|
| 30 |
+
|
| 31 |
+
INTENT_SCHEMA: dict[str, list[str]] = {
|
| 32 |
+
"order": ["customer", "item", "quantity"],
|
| 33 |
+
"payment": ["customer", "amount", "payment_type"],
|
| 34 |
+
"credit": ["customer", "item", "quantity", "amount"],
|
| 35 |
+
"return": ["customer", "item", "reason"],
|
| 36 |
+
"preparation": ["item", "quantity"],
|
| 37 |
+
"other": ["note"],
|
| 38 |
+
}
|
| 39 |
+
VALID_INTENTS = set(INTENT_SCHEMA.keys())
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class ExtractionAgent(BaseAgent):
|
| 43 |
+
"""Extract structured business fields for ALL detected intents."""
|
| 44 |
+
|
| 45 |
+
name = "ExtractionAgent"
|
| 46 |
+
input_keys = ["message", "intent", "intents"]
|
| 47 |
+
output_keys = ["data", "multi_data", "state"]
|
| 48 |
+
action = "Extract structured entities for all detected intents"
|
| 49 |
+
|
| 50 |
+
def execute(self, context: dict[str, Any]) -> dict[str, Any]:
|
| 51 |
+
message = context.get("message", "").strip()
|
| 52 |
+
|
| 53 |
+
# Resolve intents — Phase 5 uses context["intents"], fallback to ["intent"]
|
| 54 |
+
intents = context.get("intents") or []
|
| 55 |
+
if not intents:
|
| 56 |
+
primary = (context.get("intent") or "other").lower().strip()
|
| 57 |
+
intents = [primary]
|
| 58 |
+
|
| 59 |
+
# Validate each intent
|
| 60 |
+
intents = [i if i in VALID_INTENTS else "other" for i in intents]
|
| 61 |
+
primary = intents[0]
|
| 62 |
+
|
| 63 |
+
if not message:
|
| 64 |
+
multi_data = {i: self._null_data(i) for i in intents}
|
| 65 |
+
update_context(context,
|
| 66 |
+
data=multi_data[primary],
|
| 67 |
+
multi_data=multi_data,
|
| 68 |
+
state="extracted")
|
| 69 |
+
return context
|
| 70 |
+
|
| 71 |
+
# ── Single LLM call for all intents ──────────────────────────────
|
| 72 |
+
prompt = self._load_prompt(message, intents)
|
| 73 |
+
try:
|
| 74 |
+
raw = get_llm().generate(
|
| 75 |
+
prompt,
|
| 76 |
+
max_tokens=400,
|
| 77 |
+
agent_name=self.name,
|
| 78 |
+
task_type="extraction",
|
| 79 |
+
context=context,
|
| 80 |
+
)
|
| 81 |
+
except Exception as exc:
|
| 82 |
+
logger.error("[ExtractionAgent] all LLM backends failed - using null extraction: %s", exc)
|
| 83 |
+
raw = "{}"
|
| 84 |
+
multi_data = self._parse(raw, intents)
|
| 85 |
+
|
| 86 |
+
update_context(context,
|
| 87 |
+
data=multi_data[primary], # backward compat
|
| 88 |
+
multi_data=multi_data,
|
| 89 |
+
state="extracted")
|
| 90 |
+
logger.info("[ExtractionAgent] intents=%s extracted=%s", intents, multi_data)
|
| 91 |
+
return context
|
| 92 |
+
|
| 93 |
+
# ── Helpers ──────────────────────────────────────────────────────────────
|
| 94 |
+
|
| 95 |
+
@staticmethod
|
| 96 |
+
def _load_prompt(message: str, intents: list[str]) -> str:
|
| 97 |
+
template = _PROMPT_PATH.read_text(encoding="utf-8")
|
| 98 |
+
intents_str = ", ".join(intents)
|
| 99 |
+
prompt = template.replace("{message}", message.strip())
|
| 100 |
+
prompt = prompt.replace("{intents}", intents_str)
|
| 101 |
+
# Legacy placeholder — extraction_prompt.txt used to have {intent}
|
| 102 |
+
prompt = prompt.replace("{intent}", intents_str)
|
| 103 |
+
return prompt
|
| 104 |
+
|
| 105 |
+
@staticmethod
|
| 106 |
+
def _parse(raw: str, intents: list[str]) -> dict[str, dict]:
|
| 107 |
+
"""
|
| 108 |
+
Parse LLM output into a per-intent data dict.
|
| 109 |
+
|
| 110 |
+
Handles two response shapes:
|
| 111 |
+
Single: {"customer": "Rahul", "amount": 500, ...}
|
| 112 |
+
Multi: {"payment": {"customer": ...}, "order": {"item": ...}}
|
| 113 |
+
|
| 114 |
+
Returns: {intent: {fields}} for every intent in intents.
|
| 115 |
+
"""
|
| 116 |
+
cleaned = re.sub(r"```(?:json)?|```", "", raw).strip()
|
| 117 |
+
parsed: dict = {}
|
| 118 |
+
try:
|
| 119 |
+
parsed = json.loads(cleaned)
|
| 120 |
+
except json.JSONDecodeError:
|
| 121 |
+
match = re.search(r"\{.*\}", cleaned, re.DOTALL)
|
| 122 |
+
if match:
|
| 123 |
+
try:
|
| 124 |
+
parsed = json.loads(match.group(0))
|
| 125 |
+
except json.JSONDecodeError:
|
| 126 |
+
logger.warning("[ExtractionAgent] could not parse JSON; using nulls")
|
| 127 |
+
|
| 128 |
+
multi_data: dict[str, dict] = {}
|
| 129 |
+
|
| 130 |
+
if len(intents) == 1:
|
| 131 |
+
# Single-intent response — flat dict
|
| 132 |
+
intent = intents[0]
|
| 133 |
+
multi_data[intent] = ExtractionAgent._extract_single(parsed, intent)
|
| 134 |
+
else:
|
| 135 |
+
# Multi-intent response — keyed by intent name
|
| 136 |
+
for intent in intents:
|
| 137 |
+
if intent in parsed and isinstance(parsed[intent], dict):
|
| 138 |
+
multi_data[intent] = ExtractionAgent._extract_single(
|
| 139 |
+
parsed[intent], intent
|
| 140 |
+
)
|
| 141 |
+
else:
|
| 142 |
+
# Fallback: maybe LLM returned flat dict for first intent
|
| 143 |
+
if not multi_data and intent == intents[0]:
|
| 144 |
+
multi_data[intent] = ExtractionAgent._extract_single(parsed, intent)
|
| 145 |
+
else:
|
| 146 |
+
multi_data[intent] = ExtractionAgent._null_data(intent)
|
| 147 |
+
|
| 148 |
+
# Guarantee every requested intent has an entry
|
| 149 |
+
for intent in intents:
|
| 150 |
+
if intent not in multi_data:
|
| 151 |
+
multi_data[intent] = ExtractionAgent._null_data(intent)
|
| 152 |
+
|
| 153 |
+
return multi_data
|
| 154 |
+
|
| 155 |
+
@staticmethod
|
| 156 |
+
def _extract_single(src: dict, intent: str) -> dict:
|
| 157 |
+
"""Build a schema-validated dict for one intent from a source dict."""
|
| 158 |
+
schema = INTENT_SCHEMA.get(intent, INTENT_SCHEMA["other"])
|
| 159 |
+
result = {field: src.get(field) for field in schema}
|
| 160 |
+
|
| 161 |
+
# Normalise customer to Title Case
|
| 162 |
+
if "customer" in result and isinstance(result["customer"], str):
|
| 163 |
+
result["customer"] = result["customer"].strip().title()
|
| 164 |
+
if result["customer"].lower() in ("null", "none", ""):
|
| 165 |
+
result["customer"] = None
|
| 166 |
+
|
| 167 |
+
# Coerce numeric fields
|
| 168 |
+
for num_field in ("amount", "quantity"):
|
| 169 |
+
if num_field in result and result[num_field] is not None:
|
| 170 |
+
try:
|
| 171 |
+
val = float(result[num_field])
|
| 172 |
+
result[num_field] = int(val) if val == int(val) else val
|
| 173 |
+
except (ValueError, TypeError):
|
| 174 |
+
result[num_field] = None
|
| 175 |
+
|
| 176 |
+
return result
|
| 177 |
+
|
| 178 |
+
@staticmethod
|
| 179 |
+
def _null_data(intent: str) -> dict:
|
| 180 |
+
schema = INTENT_SCHEMA.get(intent, INTENT_SCHEMA["other"])
|
| 181 |
+
return {field: None for field in schema}
|
app/agents/intent_agent.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/agents/intent_agent.py
|
| 3 |
+
--------------------------
|
| 4 |
+
IntentAgent — Stage 1 of the NotiFlow pipeline.
|
| 5 |
+
|
| 6 |
+
Phase 5: Multi-intent support.
|
| 7 |
+
- Prompt now returns {"intents": ["payment", "order"]}
|
| 8 |
+
- context["intents"] = full detected list
|
| 9 |
+
- context["intent"] = intents[0] ← backward compat
|
| 10 |
+
|
| 11 |
+
Backward compat: if LLM returns old {"intent": "x"} format, wraps in list.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
from __future__ import annotations
|
| 15 |
+
|
| 16 |
+
import json
|
| 17 |
+
import logging
|
| 18 |
+
import re
|
| 19 |
+
from pathlib import Path
|
| 20 |
+
from typing import Any
|
| 21 |
+
|
| 22 |
+
from app.core.base_agent import BaseAgent
|
| 23 |
+
from app.core.context import update_context
|
| 24 |
+
from app.core.llm_service import get_llm
|
| 25 |
+
|
| 26 |
+
logger = logging.getLogger(__name__)
|
| 27 |
+
|
| 28 |
+
# Prompt lives in the repo-level prompts/ folder
|
| 29 |
+
_PROMPT_PATH = Path(__file__).parent.parent.parent / "prompts" / "intent_prompt.txt"
|
| 30 |
+
|
| 31 |
+
VALID_INTENTS = {"order", "payment", "credit", "return", "preparation", "other"}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class IntentAgent(BaseAgent):
|
| 35 |
+
"""Classify ALL business intents from a Hinglish message."""
|
| 36 |
+
|
| 37 |
+
name = "IntentAgent"
|
| 38 |
+
input_keys = ["message"]
|
| 39 |
+
output_keys = ["intent", "intents", "state"]
|
| 40 |
+
action = "Classify all business intents from Hinglish message"
|
| 41 |
+
|
| 42 |
+
def execute(self, context: dict[str, Any]) -> dict[str, Any]:
|
| 43 |
+
"""
|
| 44 |
+
Detect intents and write them into context.
|
| 45 |
+
|
| 46 |
+
Writes:
|
| 47 |
+
context["intents"] — ordered list of all detected intents
|
| 48 |
+
context["intent"] — primary intent (intents[0]) for backward compat
|
| 49 |
+
Transitions state to "intent_detected" on success.
|
| 50 |
+
"""
|
| 51 |
+
message = context.get("message", "").strip()
|
| 52 |
+
if not message:
|
| 53 |
+
logger.warning("[IntentAgent] empty message — defaulting to 'other'")
|
| 54 |
+
update_context(context, intent="other", intents=["other"], state="intent_detected")
|
| 55 |
+
return context
|
| 56 |
+
|
| 57 |
+
prompt = self._load_prompt(message)
|
| 58 |
+
try:
|
| 59 |
+
raw = get_llm().generate(
|
| 60 |
+
prompt,
|
| 61 |
+
max_tokens=96,
|
| 62 |
+
agent_name=self.name,
|
| 63 |
+
task_type="classification",
|
| 64 |
+
context=context,
|
| 65 |
+
)
|
| 66 |
+
except Exception as exc:
|
| 67 |
+
logger.error("[IntentAgent] all LLM backends failed - defaulting to 'other': %s", exc)
|
| 68 |
+
raw = '{"intent": "other"}'
|
| 69 |
+
intents = self._parse(raw)
|
| 70 |
+
primary = intents[0]
|
| 71 |
+
|
| 72 |
+
update_context(context, intent=primary, intents=intents, state="intent_detected")
|
| 73 |
+
logger.info("[IntentAgent] intents=%s (primary=%s)", intents, primary)
|
| 74 |
+
return context
|
| 75 |
+
|
| 76 |
+
# ── Helpers ──────────────────────────────────────────────────────────────
|
| 77 |
+
|
| 78 |
+
@staticmethod
|
| 79 |
+
def _load_prompt(message: str) -> str:
|
| 80 |
+
template = _PROMPT_PATH.read_text(encoding="utf-8")
|
| 81 |
+
return template.replace("{message}", message)
|
| 82 |
+
|
| 83 |
+
@staticmethod
|
| 84 |
+
def _parse(raw: str) -> list[str]:
|
| 85 |
+
"""
|
| 86 |
+
Parse LLM output into a list of valid intents.
|
| 87 |
+
|
| 88 |
+
Handles:
|
| 89 |
+
- New format: {"intents": ["payment", "order"]}
|
| 90 |
+
- Old format: {"intent": "payment"} ← backward compat
|
| 91 |
+
- Bare string: payment
|
| 92 |
+
Returns at least ["other"] on any parse failure.
|
| 93 |
+
"""
|
| 94 |
+
cleaned = re.sub(r"```(?:json)?|```", "", raw).strip()
|
| 95 |
+
intents: list[str] = []
|
| 96 |
+
|
| 97 |
+
try:
|
| 98 |
+
result = json.loads(cleaned)
|
| 99 |
+
if isinstance(result, dict):
|
| 100 |
+
if "intents" in result:
|
| 101 |
+
raw_list = result["intents"]
|
| 102 |
+
if isinstance(raw_list, list):
|
| 103 |
+
intents = [str(x).lower().strip() for x in raw_list]
|
| 104 |
+
else:
|
| 105 |
+
intents = [str(raw_list).lower().strip()]
|
| 106 |
+
elif "intent" in result:
|
| 107 |
+
# Old single-intent format — wrap in list
|
| 108 |
+
intents = [str(result["intent"]).lower().strip()]
|
| 109 |
+
except json.JSONDecodeError:
|
| 110 |
+
# Try regex fallback for intents array (handles truncated/malformed JSON)
|
| 111 |
+
arr_match = re.search(r'"intents"\s*:\s*\[([^\]]*)', cleaned)
|
| 112 |
+
if arr_match:
|
| 113 |
+
raw_items = arr_match.group(1)
|
| 114 |
+
intents = [
|
| 115 |
+
m.lower().strip()
|
| 116 |
+
for m in re.findall(r'"(\w+)"', raw_items)
|
| 117 |
+
]
|
| 118 |
+
else:
|
| 119 |
+
# Try old single-intent regex
|
| 120 |
+
match = re.search(r'"intent"\s*:\s*"(\w+)"', cleaned)
|
| 121 |
+
if match:
|
| 122 |
+
intents = [match.group(1).lower()]
|
| 123 |
+
|
| 124 |
+
# Validate — keep only known intents, deduplicate, preserve order
|
| 125 |
+
seen: set[str] = set()
|
| 126 |
+
valid: list[str] = []
|
| 127 |
+
for intent in intents:
|
| 128 |
+
if intent in VALID_INTENTS and intent not in seen:
|
| 129 |
+
valid.append(intent)
|
| 130 |
+
seen.add(intent)
|
| 131 |
+
|
| 132 |
+
if not valid:
|
| 133 |
+
logger.warning("[IntentAgent] could not parse valid intents from: %r", raw[:80])
|
| 134 |
+
valid = ["other"]
|
| 135 |
+
|
| 136 |
+
return valid
|
app/agents/invoice_agent.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
from typing import Any
|
| 5 |
+
|
| 6 |
+
from app.core.base_agent import BaseAgent
|
| 7 |
+
from app.core.context import update_context
|
| 8 |
+
from app.core.event_bus import emit_event, push_live_log, store_invoice
|
| 9 |
+
from app.services.invoice_service import InvoiceBuilder
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
PRICE_MAP = {
|
| 14 |
+
"kurti": 50.0,
|
| 15 |
+
"sugar": 50.0,
|
| 16 |
+
"atta": 40.0,
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class InvoiceAgent(BaseAgent):
|
| 21 |
+
"""Generate a business invoice from validated order data."""
|
| 22 |
+
|
| 23 |
+
name = "InvoiceAgent"
|
| 24 |
+
input_keys = ["intent", "data", "source"]
|
| 25 |
+
output_keys = ["invoice", "event", "state"]
|
| 26 |
+
action = "Generate invoice from validated business data"
|
| 27 |
+
|
| 28 |
+
def execute(self, context: dict[str, Any]) -> dict[str, Any]:
|
| 29 |
+
intent = (context.get("intent") or "other").lower()
|
| 30 |
+
data = context.get("data", {}) or {}
|
| 31 |
+
|
| 32 |
+
if intent != "order":
|
| 33 |
+
update_context(
|
| 34 |
+
context,
|
| 35 |
+
event={"event": f"{intent}_received", "data": data},
|
| 36 |
+
state="invoice_skipped",
|
| 37 |
+
)
|
| 38 |
+
return context
|
| 39 |
+
|
| 40 |
+
builder = InvoiceBuilder(catalog_prices=PRICE_MAP)
|
| 41 |
+
invoice = builder.build(
|
| 42 |
+
customer=data.get("customer") or "Walk-in customer",
|
| 43 |
+
item=data.get("item"),
|
| 44 |
+
quantity=data.get("quantity"),
|
| 45 |
+
order_id=context.get("event", {}).get("order_id"),
|
| 46 |
+
)
|
| 47 |
+
invoice = store_invoice(invoice)
|
| 48 |
+
|
| 49 |
+
update_context(
|
| 50 |
+
context,
|
| 51 |
+
invoice=invoice,
|
| 52 |
+
event={"event": "invoice_generated", "invoice": invoice},
|
| 53 |
+
state="invoice_generated",
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
invoice_log = push_live_log(
|
| 57 |
+
context,
|
| 58 |
+
{
|
| 59 |
+
"agent": self.name,
|
| 60 |
+
"status": "success",
|
| 61 |
+
"action": f"Invoice generated: {invoice['invoice_id']}",
|
| 62 |
+
"detail": f"[{self.name}] Invoice generated: {invoice['invoice_id']}",
|
| 63 |
+
},
|
| 64 |
+
)
|
| 65 |
+
emit_event(
|
| 66 |
+
context,
|
| 67 |
+
"invoice_generated",
|
| 68 |
+
invoice,
|
| 69 |
+
agent=self.name,
|
| 70 |
+
step="invoice",
|
| 71 |
+
message=f"Invoice generated: {invoice['invoice_id']}",
|
| 72 |
+
log_entry=invoice_log,
|
| 73 |
+
)
|
| 74 |
+
logger.info("[InvoiceAgent] invoice=%s", invoice["invoice_id"])
|
| 75 |
+
return context
|
app/agents/ledger_agent.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/agents/ledger_agent.py
|
| 3 |
+
--------------------------
|
| 4 |
+
LedgerAgent — Stage 5 of the NotiFlow pipeline.
|
| 5 |
+
|
| 6 |
+
Wraps services/google_sheets_service.py into the BaseAgent interface.
|
| 7 |
+
Appends the processed transaction to the live Google Sheets ledger.
|
| 8 |
+
|
| 9 |
+
Non-fatal: if Sheets is unavailable the pipeline continues normally and
|
| 10 |
+
context["metadata"]["sheet_updated"] is set to False.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
from __future__ import annotations
|
| 14 |
+
|
| 15 |
+
import logging
|
| 16 |
+
from typing import Any
|
| 17 |
+
|
| 18 |
+
from app.core.base_agent import BaseAgent
|
| 19 |
+
from app.core.context import add_error
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class LedgerAgent(BaseAgent):
|
| 25 |
+
"""Sync the processed transaction to the Google Sheets ledger."""
|
| 26 |
+
|
| 27 |
+
name = "LedgerAgent"
|
| 28 |
+
input_keys = ["intent", "data", "invoice", "payment", "metadata"]
|
| 29 |
+
output_keys = ["metadata", "state"]
|
| 30 |
+
action = "Append transaction row to Google Sheets ledger"
|
| 31 |
+
|
| 32 |
+
def execute(self, context: dict[str, Any]) -> dict[str, Any]:
|
| 33 |
+
"""
|
| 34 |
+
Append a row to Google Sheets.
|
| 35 |
+
|
| 36 |
+
Non-fatal design: ALL exceptions from the Sheets API are caught here
|
| 37 |
+
inside execute() and recorded via add_error(). The method never
|
| 38 |
+
raises, so BaseAgent.run() always logs a "success" history entry —
|
| 39 |
+
reflecting that the agent completed its contract (best-effort sync),
|
| 40 |
+
not that Sheets itself succeeded.
|
| 41 |
+
|
| 42 |
+
Reads: context["intent"], context["data"], context["metadata"]["source"]
|
| 43 |
+
Writes: context["metadata"]["sheet_updated"]
|
| 44 |
+
"""
|
| 45 |
+
intent = context.get("intent", "other")
|
| 46 |
+
data = context.get("data", {})
|
| 47 |
+
invoice = context.get("invoice") or {}
|
| 48 |
+
payment = context.get("payment") or {}
|
| 49 |
+
source = context.get("metadata", {}).get("source", "system")
|
| 50 |
+
|
| 51 |
+
ledger_data = {
|
| 52 |
+
"customer": invoice.get("customer") or data.get("customer"),
|
| 53 |
+
"item": invoice.get("item") or data.get("item"),
|
| 54 |
+
"quantity": invoice.get("quantity") or data.get("quantity"),
|
| 55 |
+
"amount": payment.get("amount") or invoice.get("total") or data.get("amount"),
|
| 56 |
+
"invoice_id": invoice.get("invoice_id"),
|
| 57 |
+
"status": payment.get("status") or invoice.get("status"),
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
sheet_updated = False
|
| 61 |
+
try:
|
| 62 |
+
from app.services.google_sheets_service import append_transaction
|
| 63 |
+
sheet_updated = append_transaction(intent=intent, data=ledger_data, source=source)
|
| 64 |
+
except Exception as exc:
|
| 65 |
+
# Soft failure — record the error but do NOT raise.
|
| 66 |
+
# This keeps LedgerAgent non-fatal without overriding run().
|
| 67 |
+
logger.warning("[LedgerAgent] Sheets sync failed (%s) — continuing", exc)
|
| 68 |
+
add_error(context, f"LedgerAgent: {exc}")
|
| 69 |
+
|
| 70 |
+
context.setdefault("metadata", {})["sheet_updated"] = sheet_updated
|
| 71 |
+
logger.info("[LedgerAgent] sheet_updated=%s", sheet_updated)
|
| 72 |
+
return context
|
app/agents/monitor_agent.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/agents/monitor_agent.py
|
| 3 |
+
----------------------------
|
| 4 |
+
MonitorAgent — Autonomy Layer, Step 2.
|
| 5 |
+
|
| 6 |
+
Scans the context after main pipeline execution and detects:
|
| 7 |
+
- missing required fields for the intent
|
| 8 |
+
- agents that errored in history
|
| 9 |
+
- inconsistencies (e.g. payment event but no amount in data)
|
| 10 |
+
- pipeline state anomalies
|
| 11 |
+
|
| 12 |
+
Appends findings to context["errors"] (non-fatal) and writes a
|
| 13 |
+
structured summary to context["monitor"].
|
| 14 |
+
|
| 15 |
+
Output written to context["monitor"]:
|
| 16 |
+
{
|
| 17 |
+
"issues": list[str], # all detected problems
|
| 18 |
+
"warnings": list[str], # non-critical observations
|
| 19 |
+
"healthy": bool, # True if no hard issues found
|
| 20 |
+
}
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
from __future__ import annotations
|
| 24 |
+
|
| 25 |
+
import logging
|
| 26 |
+
from typing import Any
|
| 27 |
+
|
| 28 |
+
from app.core.base_agent import BaseAgent
|
| 29 |
+
from app.core.context import add_error
|
| 30 |
+
|
| 31 |
+
logger = logging.getLogger(__name__)
|
| 32 |
+
|
| 33 |
+
# Fields that must not be null for each intent to be considered healthy
|
| 34 |
+
_CRITICAL_FIELDS: dict[str, list[str]] = {
|
| 35 |
+
"order": ["item"],
|
| 36 |
+
"payment": ["amount"],
|
| 37 |
+
"credit": ["customer"],
|
| 38 |
+
"return": ["reason"],
|
| 39 |
+
"preparation": ["item"],
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class MonitorAgent(BaseAgent):
|
| 44 |
+
"""Detect failures, missing data, and pipeline inconsistencies."""
|
| 45 |
+
|
| 46 |
+
name = "MonitorAgent"
|
| 47 |
+
input_keys = ["intent", "data", "event", "history", "errors", "state"]
|
| 48 |
+
output_keys = ["monitor"]
|
| 49 |
+
action = "Scan pipeline results for failures and inconsistencies"
|
| 50 |
+
|
| 51 |
+
def execute(self, context: dict[str, Any]) -> dict[str, Any]:
|
| 52 |
+
issues: list[str] = []
|
| 53 |
+
warnings: list[str] = []
|
| 54 |
+
|
| 55 |
+
intent = (context.get("intent") or "other").lower()
|
| 56 |
+
data = context.get("data", {})
|
| 57 |
+
event = context.get("event", {})
|
| 58 |
+
history = context.get("history", [])
|
| 59 |
+
state = context.get("state", "unknown")
|
| 60 |
+
|
| 61 |
+
# ── Check 1: pipeline state ──────────────────────────────────────────
|
| 62 |
+
if state == "failed":
|
| 63 |
+
issues.append(f"Pipeline ended in failed state")
|
| 64 |
+
elif state == "partial":
|
| 65 |
+
warnings.append("Pipeline ended in partial state — some steps may have been skipped")
|
| 66 |
+
|
| 67 |
+
# ── Check 2: agent errors in history ────────────────────────────────
|
| 68 |
+
errored_agents = [
|
| 69 |
+
h["agent"] for h in history if h.get("status") == "error"
|
| 70 |
+
]
|
| 71 |
+
for agent_name in errored_agents:
|
| 72 |
+
issues.append(f"Agent '{agent_name}' reported an error during execution")
|
| 73 |
+
|
| 74 |
+
# ── Check 3: missing critical fields ────────────────────────────────
|
| 75 |
+
critical = _CRITICAL_FIELDS.get(intent, [])
|
| 76 |
+
for field in critical:
|
| 77 |
+
if data.get(field) is None:
|
| 78 |
+
issues.append(
|
| 79 |
+
f"Critical field '{field}' is null for intent '{intent}'"
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
# ── Check 4: empty event after routing ──────────────────────────────
|
| 83 |
+
routed = any(
|
| 84 |
+
h["agent"] == "SkillRouterAgent" and h.get("status") == "success"
|
| 85 |
+
for h in history
|
| 86 |
+
)
|
| 87 |
+
if routed and not event:
|
| 88 |
+
issues.append("SkillRouterAgent completed but event dict is empty")
|
| 89 |
+
|
| 90 |
+
# ── Check 5: intent/event consistency ───────────────────────────────
|
| 91 |
+
event_name = event.get("event", "")
|
| 92 |
+
if intent == "payment" and event_name and "payment" not in event_name:
|
| 93 |
+
warnings.append(
|
| 94 |
+
f"Intent is 'payment' but event name is '{event_name}'"
|
| 95 |
+
)
|
| 96 |
+
if intent == "order" and event_name and "order" not in event_name:
|
| 97 |
+
warnings.append(
|
| 98 |
+
f"Intent is 'order' but event name is '{event_name}'"
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
# ── Check 6: duplicate errors already in context ────────────────────
|
| 102 |
+
existing_errors = context.get("errors", [])
|
| 103 |
+
if len(existing_errors) > 3:
|
| 104 |
+
warnings.append(
|
| 105 |
+
f"{len(existing_errors)} errors already accumulated in context"
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
# ── Write new issues as context errors ───────────────────────────────
|
| 109 |
+
for issue in issues:
|
| 110 |
+
add_error(context, f"[Monitor] {issue}")
|
| 111 |
+
|
| 112 |
+
healthy = len(issues) == 0
|
| 113 |
+
monitor = {
|
| 114 |
+
"issues": issues,
|
| 115 |
+
"warnings": warnings,
|
| 116 |
+
"healthy": healthy,
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
context["monitor"] = monitor
|
| 120 |
+
logger.info(
|
| 121 |
+
"[MonitorAgent] healthy=%s issues=%d warnings=%d",
|
| 122 |
+
healthy, len(issues), len(warnings)
|
| 123 |
+
)
|
| 124 |
+
return context
|
app/agents/payment_agent.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
from typing import Any
|
| 5 |
+
|
| 6 |
+
from app.core.base_agent import BaseAgent
|
| 7 |
+
from app.core.context import update_context
|
| 8 |
+
from app.core.event_bus import emit_event, push_live_log
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class PaymentAgent(BaseAgent):
|
| 14 |
+
"""Prepare pending payment state for generated invoices."""
|
| 15 |
+
|
| 16 |
+
name = "PaymentAgent"
|
| 17 |
+
input_keys = ["intent", "invoice"]
|
| 18 |
+
output_keys = ["payment", "state"]
|
| 19 |
+
action = "Prepare payment request from invoice state"
|
| 20 |
+
|
| 21 |
+
def execute(self, context: dict[str, Any]) -> dict[str, Any]:
|
| 22 |
+
intent = (context.get("intent") or "other").lower()
|
| 23 |
+
invoice = context.get("invoice")
|
| 24 |
+
|
| 25 |
+
if intent != "order" or not invoice:
|
| 26 |
+
update_context(context, state="payment_skipped")
|
| 27 |
+
return context
|
| 28 |
+
|
| 29 |
+
payment = {
|
| 30 |
+
"invoice_id": invoice.get("invoice_id"),
|
| 31 |
+
"amount": invoice.get("total") or invoice.get("total_amount") or 0,
|
| 32 |
+
"status": "pending",
|
| 33 |
+
}
|
| 34 |
+
update_context(context, payment=payment, state="payment_requested")
|
| 35 |
+
|
| 36 |
+
payment_log = push_live_log(
|
| 37 |
+
context,
|
| 38 |
+
{
|
| 39 |
+
"agent": self.name,
|
| 40 |
+
"status": "success",
|
| 41 |
+
"action": f"Payment requested for {invoice['invoice_id']}",
|
| 42 |
+
"detail": f"[{self.name}] Payment requested: {invoice['invoice_id']}",
|
| 43 |
+
},
|
| 44 |
+
)
|
| 45 |
+
emit_event(
|
| 46 |
+
context,
|
| 47 |
+
"payment_requested",
|
| 48 |
+
{
|
| 49 |
+
**invoice,
|
| 50 |
+
"payment": payment,
|
| 51 |
+
},
|
| 52 |
+
agent=self.name,
|
| 53 |
+
step="payment",
|
| 54 |
+
message=f"Payment requested for {invoice['invoice_id']}",
|
| 55 |
+
log_entry=payment_log,
|
| 56 |
+
)
|
| 57 |
+
logger.info("[PaymentAgent] payment requested for %s", invoice["invoice_id"])
|
| 58 |
+
return context
|
app/agents/prediction_agent.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/agents/prediction_agent.py
|
| 3 |
+
-------------------------------
|
| 4 |
+
PredictionAgent — Autonomy Layer, Step 3.
|
| 5 |
+
|
| 6 |
+
Pure rule-based risk scoring. No ML, no external calls.
|
| 7 |
+
|
| 8 |
+
Rules evaluated (each contributes a score 0–1):
|
| 9 |
+
1. High amount (payment/credit > 10000) → risk +0.4
|
| 10 |
+
2. Very high amount (> 50000) → risk +0.3 additional
|
| 11 |
+
3. Null customer on payment/credit → risk +0.3
|
| 12 |
+
4. Repeated errors in context → risk +0.2 per error (max 0.4)
|
| 13 |
+
5. Verification failed/partial → risk +0.3
|
| 14 |
+
6. Pipeline ended in partial/failed state → risk +0.2
|
| 15 |
+
7. Return with no reason → risk +0.2
|
| 16 |
+
8. Credit with no amount → risk +0.2
|
| 17 |
+
|
| 18 |
+
Final risk level:
|
| 19 |
+
score < 0.3 → "low"
|
| 20 |
+
score < 0.6 → "medium"
|
| 21 |
+
score >= 0.6 → "high"
|
| 22 |
+
|
| 23 |
+
Output written to context["risk"]:
|
| 24 |
+
{
|
| 25 |
+
"level": "low" | "medium" | "high",
|
| 26 |
+
"score": float, # 0.0–1.0
|
| 27 |
+
"reasons": list[str], # which rules fired
|
| 28 |
+
"action": str, # recommended action
|
| 29 |
+
}
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
from __future__ import annotations
|
| 33 |
+
|
| 34 |
+
import logging
|
| 35 |
+
from typing import Any
|
| 36 |
+
|
| 37 |
+
from app.core.base_agent import BaseAgent
|
| 38 |
+
|
| 39 |
+
logger = logging.getLogger(__name__)
|
| 40 |
+
|
| 41 |
+
_HIGH_AMOUNT = 10_000
|
| 42 |
+
_VERY_HIGH_AMOUNT = 50_000
|
| 43 |
+
|
| 44 |
+
_ACTIONS = {
|
| 45 |
+
"low": "Continue normal processing",
|
| 46 |
+
"medium": "Flag for manual review",
|
| 47 |
+
"high": "Escalate immediately and pause processing",
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class PredictionAgent(BaseAgent):
|
| 52 |
+
"""Rule-based risk scorer — no ML, fully deterministic."""
|
| 53 |
+
|
| 54 |
+
name = "PredictionAgent"
|
| 55 |
+
input_keys = ["intent", "data", "verification", "errors", "state"]
|
| 56 |
+
output_keys = ["risk"]
|
| 57 |
+
action = "Score transaction risk using rule-based evaluation"
|
| 58 |
+
|
| 59 |
+
def execute(self, context: dict[str, Any]) -> dict[str, Any]:
|
| 60 |
+
intent = (context.get("intent") or "other").lower()
|
| 61 |
+
data = context.get("data", {})
|
| 62 |
+
verification = context.get("verification", {})
|
| 63 |
+
errors = context.get("errors", [])
|
| 64 |
+
state = context.get("state", "")
|
| 65 |
+
|
| 66 |
+
score = 0.0
|
| 67 |
+
reasons: list[str] = []
|
| 68 |
+
|
| 69 |
+
# ── Rule 1 & 2: high-value transaction ───────────────────────────────
|
| 70 |
+
amount = data.get("amount")
|
| 71 |
+
if amount is not None and intent in ("payment", "credit"):
|
| 72 |
+
try:
|
| 73 |
+
amount_f = float(amount)
|
| 74 |
+
if amount_f > _VERY_HIGH_AMOUNT:
|
| 75 |
+
score += 0.7
|
| 76 |
+
reasons.append(
|
| 77 |
+
f"Very high amount ₹{amount_f:,.0f} (>{_VERY_HIGH_AMOUNT:,})"
|
| 78 |
+
)
|
| 79 |
+
elif amount_f > _HIGH_AMOUNT:
|
| 80 |
+
score += 0.4
|
| 81 |
+
reasons.append(
|
| 82 |
+
f"High amount ₹{amount_f:,.0f} (>{_HIGH_AMOUNT:,})"
|
| 83 |
+
)
|
| 84 |
+
except (ValueError, TypeError):
|
| 85 |
+
pass
|
| 86 |
+
|
| 87 |
+
# ── Rule 3: null customer on financial intent ────────────────────────
|
| 88 |
+
if intent in ("payment", "credit") and not data.get("customer"):
|
| 89 |
+
score += 0.3
|
| 90 |
+
reasons.append("Customer is unknown on a financial transaction")
|
| 91 |
+
|
| 92 |
+
# ── Rule 4: repeated errors ──────────────────────────────────────────
|
| 93 |
+
error_count = len(errors)
|
| 94 |
+
if error_count > 0:
|
| 95 |
+
error_score = min(0.4, error_count * 0.2)
|
| 96 |
+
score += error_score
|
| 97 |
+
reasons.append(f"{error_count} error(s) accumulated in pipeline")
|
| 98 |
+
|
| 99 |
+
# ── Rule 5: verification failed ──────────────────────────────────────
|
| 100 |
+
v_status = verification.get("status", "")
|
| 101 |
+
if v_status == "fail":
|
| 102 |
+
score += 0.3
|
| 103 |
+
reasons.append("Verification failed for this transaction")
|
| 104 |
+
elif v_status == "partial":
|
| 105 |
+
score += 0.15
|
| 106 |
+
reasons.append("Verification only partially passed")
|
| 107 |
+
|
| 108 |
+
# ── Rule 6: abnormal pipeline state ──────────────────────────────────
|
| 109 |
+
if state in ("failed", "partial"):
|
| 110 |
+
score += 0.2
|
| 111 |
+
reasons.append(f"Pipeline ended in '{state}' state")
|
| 112 |
+
|
| 113 |
+
# ── Rule 7: return with no reason ────────────────────────────────────
|
| 114 |
+
if intent == "return" and not data.get("reason"):
|
| 115 |
+
score += 0.2
|
| 116 |
+
reasons.append("Return request has no stated reason")
|
| 117 |
+
|
| 118 |
+
# ── Rule 8: credit with no amount ────────────────────────────────────
|
| 119 |
+
if intent == "credit" and data.get("amount") is None:
|
| 120 |
+
score += 0.2
|
| 121 |
+
reasons.append("Credit extended with no amount specified")
|
| 122 |
+
|
| 123 |
+
# ── Clamp and classify ───────────────────────────────────────────────
|
| 124 |
+
score = min(round(score, 2), 1.0)
|
| 125 |
+
if score < 0.3:
|
| 126 |
+
level = "low"
|
| 127 |
+
elif score < 0.6:
|
| 128 |
+
level = "medium"
|
| 129 |
+
else:
|
| 130 |
+
level = "high"
|
| 131 |
+
|
| 132 |
+
risk = {
|
| 133 |
+
"level": level,
|
| 134 |
+
"score": score,
|
| 135 |
+
"reasons": reasons,
|
| 136 |
+
"action": _ACTIONS[level],
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
context["risk"] = risk
|
| 140 |
+
logger.info(
|
| 141 |
+
"[PredictionAgent] risk=%s score=%.2f reasons=%d",
|
| 142 |
+
level, score, len(reasons)
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
# ── Also contribute to the shared priority score ──────────────────────
|
| 146 |
+
# High risk contributes 35 pts, medium 15 pts — UrgencyAgent will
|
| 147 |
+
# combine these with keyword/amount signals to derive the final label.
|
| 148 |
+
from app.core.priority import contribute_priority_score
|
| 149 |
+
if level == "high":
|
| 150 |
+
contribute_priority_score(context, 35, f"Risk level is high (score={score})")
|
| 151 |
+
elif level == "medium":
|
| 152 |
+
contribute_priority_score(context, 15, f"Risk level is medium (score={score})")
|
| 153 |
+
|
| 154 |
+
return context
|
app/agents/recovery_agent.py
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/agents/recovery_agent.py
|
| 3 |
+
-----------------------------
|
| 4 |
+
RecoveryAgent — Autonomy Layer, Step 6 (most important).
|
| 5 |
+
|
| 6 |
+
Implements self-healing logic based on the failure state of the pipeline.
|
| 7 |
+
|
| 8 |
+
Decision tree:
|
| 9 |
+
if verification.status == "ok" → no recovery needed
|
| 10 |
+
elif retry_count == 0 → retry (re-run failed agents from history)
|
| 11 |
+
elif retry_count == 1 → fallback (use safe defaults)
|
| 12 |
+
elif retry_count >= 2 → critical escalation, halt retries
|
| 13 |
+
|
| 14 |
+
The agent does NOT re-run the full pipeline (no circular execution).
|
| 15 |
+
Instead it performs a targeted fix:
|
| 16 |
+
RETRY — re-runs only the agents that errored, one more time
|
| 17 |
+
FALLBACK — fills missing critical fields with safe placeholder values
|
| 18 |
+
ESCALATE — marks context as needing human intervention
|
| 19 |
+
|
| 20 |
+
Output written to context["recovery"]:
|
| 21 |
+
{
|
| 22 |
+
"action": "none" | "retry" | "fallback" | "escalate",
|
| 23 |
+
"retry_count": int,
|
| 24 |
+
"details": str,
|
| 25 |
+
"success": bool,
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
context["metadata"]["retry_count"] is incremented on each recovery attempt.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
from __future__ import annotations
|
| 32 |
+
|
| 33 |
+
import logging
|
| 34 |
+
from typing import Any
|
| 35 |
+
|
| 36 |
+
from app.core.base_agent import BaseAgent
|
| 37 |
+
from app.core.context import add_error
|
| 38 |
+
|
| 39 |
+
logger = logging.getLogger(__name__)
|
| 40 |
+
|
| 41 |
+
_MAX_RETRIES = 1 # after this, switch to fallback
|
| 42 |
+
|
| 43 |
+
# Safe placeholder values used during fallback
|
| 44 |
+
_FALLBACK_DEFAULTS: dict[str, Any] = {
|
| 45 |
+
"customer": "Unknown",
|
| 46 |
+
"item": "unspecified",
|
| 47 |
+
"quantity": 0,
|
| 48 |
+
"amount": 0,
|
| 49 |
+
"payment_type": None,
|
| 50 |
+
"reason": "not provided",
|
| 51 |
+
"note": "recovered by fallback",
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class RecoveryAgent(BaseAgent):
|
| 56 |
+
"""Self-healing agent: retry failed agents, apply fallback, or escalate."""
|
| 57 |
+
|
| 58 |
+
name = "RecoveryAgent"
|
| 59 |
+
input_keys = ["verification", "errors", "history", "data", "metadata"]
|
| 60 |
+
output_keys = ["recovery", "metadata"]
|
| 61 |
+
action = "Attempt recovery from pipeline failures"
|
| 62 |
+
|
| 63 |
+
def execute(self, context: dict[str, Any]) -> dict[str, Any]:
|
| 64 |
+
verification = context.get("verification", {})
|
| 65 |
+
v_status = verification.get("status", "ok")
|
| 66 |
+
metadata = context.setdefault("metadata", {})
|
| 67 |
+
retry_count = metadata.get("retry_count", 0)
|
| 68 |
+
|
| 69 |
+
# ── No recovery needed ───────────────────────────────────────────────
|
| 70 |
+
if v_status == "ok":
|
| 71 |
+
context["recovery"] = {
|
| 72 |
+
"action": "none",
|
| 73 |
+
"retry_count": retry_count,
|
| 74 |
+
"details": "Verification passed — no recovery needed",
|
| 75 |
+
"success": True,
|
| 76 |
+
}
|
| 77 |
+
logger.info("[RecoveryAgent] no recovery needed")
|
| 78 |
+
return context
|
| 79 |
+
|
| 80 |
+
# ── Decide recovery strategy ─────────────────────────────────────────
|
| 81 |
+
if retry_count == 0:
|
| 82 |
+
action = "retry"
|
| 83 |
+
success = self._do_retry(context)
|
| 84 |
+
details = "Retried failed agents"
|
| 85 |
+
elif retry_count <= _MAX_RETRIES:
|
| 86 |
+
action = "fallback"
|
| 87 |
+
success = self._do_fallback(context)
|
| 88 |
+
details = "Applied safe fallback defaults to missing fields"
|
| 89 |
+
else:
|
| 90 |
+
action = "escalate"
|
| 91 |
+
success = False
|
| 92 |
+
details = (
|
| 93 |
+
f"Recovery exhausted after {retry_count} attempt(s). "
|
| 94 |
+
"Human intervention required."
|
| 95 |
+
)
|
| 96 |
+
add_error(context, f"[Recovery] {details}")
|
| 97 |
+
logger.error("[RecoveryAgent] escalating — recovery exhausted")
|
| 98 |
+
|
| 99 |
+
# Increment retry counter
|
| 100 |
+
metadata["retry_count"] = retry_count + 1
|
| 101 |
+
|
| 102 |
+
context["recovery"] = {
|
| 103 |
+
"action": action,
|
| 104 |
+
"retry_count": retry_count + 1,
|
| 105 |
+
"details": details,
|
| 106 |
+
"success": success,
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
logger.info(
|
| 110 |
+
"[RecoveryAgent] action=%s retry_count=%d success=%s",
|
| 111 |
+
action, retry_count + 1, success
|
| 112 |
+
)
|
| 113 |
+
return context
|
| 114 |
+
|
| 115 |
+
# ── Recovery strategies ──────────────────────────────────────────────────
|
| 116 |
+
|
| 117 |
+
def _do_retry(self, context: dict[str, Any]) -> bool:
|
| 118 |
+
"""
|
| 119 |
+
Re-run agents that errored in the history.
|
| 120 |
+
Uses the registry to look them up — no direct imports.
|
| 121 |
+
Returns True if at least one agent recovered successfully.
|
| 122 |
+
"""
|
| 123 |
+
from app.core.registry import get_agent
|
| 124 |
+
|
| 125 |
+
history = context.get("history", [])
|
| 126 |
+
errored_agents = [
|
| 127 |
+
h["agent"] for h in history if h.get("status") == "error"
|
| 128 |
+
]
|
| 129 |
+
|
| 130 |
+
if not errored_agents:
|
| 131 |
+
logger.info("[RecoveryAgent] retry: no errored agents found in history")
|
| 132 |
+
return True
|
| 133 |
+
|
| 134 |
+
recovered = 0
|
| 135 |
+
for agent_name in errored_agents:
|
| 136 |
+
# Find registry key by matching agent name to class name
|
| 137 |
+
key = self._name_to_key(agent_name)
|
| 138 |
+
if not key:
|
| 139 |
+
logger.warning(
|
| 140 |
+
"[RecoveryAgent] retry: cannot find registry key for '%s'",
|
| 141 |
+
agent_name
|
| 142 |
+
)
|
| 143 |
+
continue
|
| 144 |
+
try:
|
| 145 |
+
agent = get_agent(key)
|
| 146 |
+
context = agent.run(context)
|
| 147 |
+
logger.info("[RecoveryAgent] retry: '%s' re-ran successfully", agent_name)
|
| 148 |
+
recovered += 1
|
| 149 |
+
except Exception as exc:
|
| 150 |
+
logger.warning(
|
| 151 |
+
"[RecoveryAgent] retry: '%s' failed again: %s", agent_name, exc
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
return recovered > 0
|
| 155 |
+
|
| 156 |
+
def _do_fallback(self, context: dict[str, Any]) -> bool:
|
| 157 |
+
"""
|
| 158 |
+
Fill missing critical data fields with safe placeholder values.
|
| 159 |
+
Returns True always — fallback is best-effort.
|
| 160 |
+
"""
|
| 161 |
+
data = context.get("data", {})
|
| 162 |
+
intent = (context.get("intent") or "other").lower()
|
| 163 |
+
filled = 0
|
| 164 |
+
|
| 165 |
+
for field, default in _FALLBACK_DEFAULTS.items():
|
| 166 |
+
if field in data and data[field] is None:
|
| 167 |
+
data[field] = default
|
| 168 |
+
filled += 1
|
| 169 |
+
logger.info(
|
| 170 |
+
"[RecoveryAgent] fallback: set data['%s'] = %r", field, default
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
context["data"] = data
|
| 174 |
+
logger.info("[RecoveryAgent] fallback: filled %d null field(s)", filled)
|
| 175 |
+
return True
|
| 176 |
+
|
| 177 |
+
@staticmethod
|
| 178 |
+
def _name_to_key(agent_class_name: str) -> str | None:
|
| 179 |
+
"""Map agent class name → registry key."""
|
| 180 |
+
mapping = {
|
| 181 |
+
"IntentAgent": "intent",
|
| 182 |
+
"ExtractionAgent": "extraction",
|
| 183 |
+
"ValidationAgent": "validation",
|
| 184 |
+
"SkillRouterAgent": "router",
|
| 185 |
+
"LedgerAgent": "ledger",
|
| 186 |
+
"VerificationAgent": "verification",
|
| 187 |
+
"MonitorAgent": "monitor",
|
| 188 |
+
"PredictionAgent": "prediction",
|
| 189 |
+
"UrgencyAgent": "urgency",
|
| 190 |
+
"EscalationAgent": "escalation",
|
| 191 |
+
}
|
| 192 |
+
return mapping.get(agent_class_name)
|
app/agents/skill_router_agent.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/agents/skill_router_agent.py
|
| 3 |
+
--------------------------------
|
| 4 |
+
SkillRouterAgent — Stage 4 of the NotiFlow pipeline.
|
| 5 |
+
|
| 6 |
+
Wraps agent/router.py into the BaseAgent interface.
|
| 7 |
+
Dispatches to the correct business skill based on context["intent"]
|
| 8 |
+
and writes the skill result to context["event"].
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from __future__ import annotations
|
| 12 |
+
|
| 13 |
+
import logging
|
| 14 |
+
from typing import Any
|
| 15 |
+
|
| 16 |
+
from app.core.base_agent import BaseAgent
|
| 17 |
+
from app.core.context import update_context
|
| 18 |
+
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class SkillRouterAgent(BaseAgent):
|
| 23 |
+
"""Route the validated context to the appropriate business skill."""
|
| 24 |
+
|
| 25 |
+
name = "SkillRouterAgent"
|
| 26 |
+
input_keys = ["intent", "data"]
|
| 27 |
+
output_keys = ["event", "invoice", "events", "state"]
|
| 28 |
+
action = "Dispatch to business skill and execute"
|
| 29 |
+
|
| 30 |
+
def execute(self, context: dict[str, Any]) -> dict[str, Any]:
|
| 31 |
+
"""
|
| 32 |
+
Dispatch to the correct skill and write result to context["event"].
|
| 33 |
+
|
| 34 |
+
Reads context["intent"] and context["data"].
|
| 35 |
+
Writes context["event"] and transitions state to "routed".
|
| 36 |
+
"""
|
| 37 |
+
intent = context.get("intent", "other")
|
| 38 |
+
data = context.get("data", {})
|
| 39 |
+
|
| 40 |
+
try:
|
| 41 |
+
from app.services.router import route_to_skill
|
| 42 |
+
except ImportError:
|
| 43 |
+
from app.services.router import route_to_skill # type: ignore
|
| 44 |
+
|
| 45 |
+
event = route_to_skill(intent, data, context=context)
|
| 46 |
+
update_context(
|
| 47 |
+
context,
|
| 48 |
+
event=event,
|
| 49 |
+
invoice=event.get("invoice", context.get("invoice")),
|
| 50 |
+
state="routed",
|
| 51 |
+
)
|
| 52 |
+
logger.info("[SkillRouterAgent] event=%s", event.get("event"))
|
| 53 |
+
return context
|
app/agents/urgency_agent.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/agents/urgency_agent.py
|
| 3 |
+
----------------------------
|
| 4 |
+
UrgencyAgent — Autonomy Layer, Step 4.
|
| 5 |
+
|
| 6 |
+
Detects urgency signals and contributes to context["priority_score"].
|
| 7 |
+
After accumulating all signals it calls derive_priority_label() which
|
| 8 |
+
derives the final context["priority"] string from the score.
|
| 9 |
+
|
| 10 |
+
Score contributions:
|
| 11 |
+
Crisis keyword OR amount > 100k → +80 points
|
| 12 |
+
Urgency keyword → +50 points
|
| 13 |
+
Amount > 50k → +45 points
|
| 14 |
+
Risk level == "high" → +35 points
|
| 15 |
+
Intent == "other" → –10 (score stays 0 if nothing else)
|
| 16 |
+
|
| 17 |
+
Final label derived by derive_priority_label():
|
| 18 |
+
score > 70 → "high"
|
| 19 |
+
score > 40 → "medium"
|
| 20 |
+
score ≤ 40 → "low"
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
from __future__ import annotations
|
| 24 |
+
|
| 25 |
+
import logging
|
| 26 |
+
from typing import Any
|
| 27 |
+
|
| 28 |
+
from app.core.base_agent import BaseAgent
|
| 29 |
+
from app.core.priority import contribute_priority_score, derive_priority_label
|
| 30 |
+
|
| 31 |
+
logger = logging.getLogger(__name__)
|
| 32 |
+
|
| 33 |
+
_CRISIS_KEYWORDS = {"emergency", "crisis", "block", "fraud", "chori", "problem"}
|
| 34 |
+
_URGENT_KEYWORDS = {
|
| 35 |
+
"jaldi", "urgent", "asap", "abhi", "turant", "important",
|
| 36 |
+
"zaruri", "help", "please", "kal tak", "immediately", "now",
|
| 37 |
+
}
|
| 38 |
+
_CRITICAL_AMOUNT = 100_000
|
| 39 |
+
_HIGH_AMOUNT = 50_000
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class UrgencyAgent(BaseAgent):
|
| 43 |
+
"""Detect urgency signals, accumulate priority score, derive final label."""
|
| 44 |
+
|
| 45 |
+
name = "UrgencyAgent"
|
| 46 |
+
input_keys = ["message", "data", "intent", "risk", "priority_score"]
|
| 47 |
+
output_keys = ["priority_score", "priority"]
|
| 48 |
+
action = "Accumulate urgency signals into priority score and derive label"
|
| 49 |
+
|
| 50 |
+
def execute(self, context: dict[str, Any]) -> dict[str, Any]:
|
| 51 |
+
message = (context.get("message") or "").lower()
|
| 52 |
+
data = context.get("data", {})
|
| 53 |
+
intent = (context.get("intent") or "other").lower()
|
| 54 |
+
risk = context.get("risk", {})
|
| 55 |
+
words = set(message.split())
|
| 56 |
+
amount = self._safe_amount(data.get("amount"))
|
| 57 |
+
risk_level = risk.get("level", "low")
|
| 58 |
+
|
| 59 |
+
# ── Contribute scores for each signal ────────────────────────────────
|
| 60 |
+
if words & _CRISIS_KEYWORDS:
|
| 61 |
+
matched = words & _CRISIS_KEYWORDS
|
| 62 |
+
contribute_priority_score(
|
| 63 |
+
context, 80,
|
| 64 |
+
f"Crisis keyword(s) detected: {', '.join(matched)}"
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
if amount is not None and amount > _CRITICAL_AMOUNT:
|
| 68 |
+
contribute_priority_score(
|
| 69 |
+
context, 80,
|
| 70 |
+
f"Amount ₹{amount:,.0f} exceeds critical threshold ({_CRITICAL_AMOUNT:,})"
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
if words & _URGENT_KEYWORDS:
|
| 74 |
+
matched = words & _URGENT_KEYWORDS
|
| 75 |
+
contribute_priority_score(
|
| 76 |
+
context, 50,
|
| 77 |
+
f"Urgency keyword(s): {', '.join(matched)}"
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
if amount is not None and _HIGH_AMOUNT < amount <= _CRITICAL_AMOUNT:
|
| 81 |
+
contribute_priority_score(
|
| 82 |
+
context, 45,
|
| 83 |
+
f"Amount ₹{amount:,.0f} exceeds high threshold ({_HIGH_AMOUNT:,})"
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
if risk_level == "high":
|
| 87 |
+
contribute_priority_score(context, 35, "Risk level is high")
|
| 88 |
+
|
| 89 |
+
# Store urgency reason for escalation agent to read
|
| 90 |
+
score = context.get("priority_score", 0)
|
| 91 |
+
context.setdefault("metadata", {})["urgency_reason"] = (
|
| 92 |
+
f"priority_score={score}"
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
# ── Derive final label from accumulated score ─────────────────────────
|
| 96 |
+
label = derive_priority_label(context)
|
| 97 |
+
logger.info(
|
| 98 |
+
"[UrgencyAgent] score=%d → priority=%s", score, label
|
| 99 |
+
)
|
| 100 |
+
return context
|
| 101 |
+
|
| 102 |
+
@staticmethod
|
| 103 |
+
def _safe_amount(value: Any) -> float | None:
|
| 104 |
+
if value is None:
|
| 105 |
+
return None
|
| 106 |
+
try:
|
| 107 |
+
return float(value)
|
| 108 |
+
except (ValueError, TypeError):
|
| 109 |
+
return None
|
app/agents/validation_agent.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/agents/validation_agent.py
|
| 3 |
+
------------------------------
|
| 4 |
+
ValidationAgent — Stage 3 of the NotiFlow pipeline.
|
| 5 |
+
|
| 6 |
+
Wraps validators/data_validator.py into the BaseAgent interface.
|
| 7 |
+
Normalises numbers, text, and payment aliases in context["data"].
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
from __future__ import annotations
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
from typing import Any
|
| 14 |
+
|
| 15 |
+
from app.core.base_agent import BaseAgent
|
| 16 |
+
from app.core.context import update_context
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class ValidationAgent(BaseAgent):
|
| 22 |
+
"""Normalise and validate extracted business data."""
|
| 23 |
+
|
| 24 |
+
name = "ValidationAgent"
|
| 25 |
+
input_keys = ["intent", "data"]
|
| 26 |
+
output_keys = ["data", "state"]
|
| 27 |
+
action = "Normalise numbers, text, and payment aliases"
|
| 28 |
+
|
| 29 |
+
def execute(self, context: dict[str, Any]) -> dict[str, Any]:
|
| 30 |
+
"""
|
| 31 |
+
Validate context["data"] in-place and transition state to "validated".
|
| 32 |
+
|
| 33 |
+
Reads context["intent"] and context["data"].
|
| 34 |
+
Writes cleaned data back to context["data"].
|
| 35 |
+
"""
|
| 36 |
+
intent = context.get("intent", "other")
|
| 37 |
+
raw = context.get("data", {})
|
| 38 |
+
|
| 39 |
+
try:
|
| 40 |
+
# Import path works regardless of old vs new layout
|
| 41 |
+
try:
|
| 42 |
+
from app.validators.data_validator import validate_data
|
| 43 |
+
except ImportError:
|
| 44 |
+
from app.validators.data_validator import validate_data
|
| 45 |
+
|
| 46 |
+
validated = validate_data(intent, raw)
|
| 47 |
+
except Exception as exc:
|
| 48 |
+
logger.warning("[ValidationAgent] validation error (%s) — using raw data", exc)
|
| 49 |
+
validated = raw
|
| 50 |
+
|
| 51 |
+
update_context(context, data=validated, state="validated")
|
| 52 |
+
logger.info("[ValidationAgent] validated data for intent '%s'", intent)
|
| 53 |
+
return context
|
app/agents/verification_agent.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/agents/verification_agent.py
|
| 3 |
+
---------------------------------
|
| 4 |
+
VerificationAgent — Autonomy Layer, Step 1.
|
| 5 |
+
|
| 6 |
+
Validates that the skill execution produced the expected output.
|
| 7 |
+
Reads context["intent"] and context["event"], writes context["verification"].
|
| 8 |
+
|
| 9 |
+
Output shape written to context["verification"]:
|
| 10 |
+
{
|
| 11 |
+
"status": "ok" | "fail" | "partial",
|
| 12 |
+
"confidence": 0.0 – 1.0,
|
| 13 |
+
"checks": list[str], # human-readable check results
|
| 14 |
+
"reason": str, # summary
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
Never raises — failures are recorded as verification["status"] = "fail".
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
from __future__ import annotations
|
| 21 |
+
|
| 22 |
+
import logging
|
| 23 |
+
from typing import Any
|
| 24 |
+
|
| 25 |
+
from app.core.base_agent import BaseAgent
|
| 26 |
+
|
| 27 |
+
logger = logging.getLogger(__name__)
|
| 28 |
+
|
| 29 |
+
# Expected event name per intent
|
| 30 |
+
_EXPECTED_EVENTS: dict[str, str] = {
|
| 31 |
+
"order": "order_received",
|
| 32 |
+
"payment": "payment_recorded",
|
| 33 |
+
"credit": "credit_recorded",
|
| 34 |
+
"return": "return_requested",
|
| 35 |
+
"preparation": "preparation_queued",
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
# Required fields inside event sub-dict per intent
|
| 39 |
+
_REQUIRED_FIELDS: dict[str, dict[str, list[str]]] = {
|
| 40 |
+
"order": {"order": ["order_id", "item", "status"]},
|
| 41 |
+
"payment": {"payment": ["customer", "amount", "status"]},
|
| 42 |
+
"credit": {"credit": ["customer", "status"]},
|
| 43 |
+
"return": {"return": ["return_id", "status"]},
|
| 44 |
+
"preparation": {"preparation": ["prep_id", "status"]},
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class VerificationAgent(BaseAgent):
|
| 49 |
+
"""Validate that the pipeline produced a complete, expected result."""
|
| 50 |
+
|
| 51 |
+
name = "VerificationAgent"
|
| 52 |
+
input_keys = ["intent", "event", "data"]
|
| 53 |
+
output_keys = ["verification"]
|
| 54 |
+
action = "Verify skill execution produced expected output"
|
| 55 |
+
|
| 56 |
+
def execute(self, context: dict[str, Any]) -> dict[str, Any]:
|
| 57 |
+
intent = (context.get("intent") or "other").lower()
|
| 58 |
+
event = context.get("event", {})
|
| 59 |
+
checks: list[str] = []
|
| 60 |
+
passed = 0
|
| 61 |
+
total = 0
|
| 62 |
+
|
| 63 |
+
# ── Check 1: event exists ────────────────────────────────────────────
|
| 64 |
+
total += 1
|
| 65 |
+
if event:
|
| 66 |
+
checks.append("✓ event dict is non-empty")
|
| 67 |
+
passed += 1
|
| 68 |
+
else:
|
| 69 |
+
checks.append("✗ event dict is empty")
|
| 70 |
+
|
| 71 |
+
# ── Check 2: correct event name ──────────────────────────────────────
|
| 72 |
+
expected_event = _EXPECTED_EVENTS.get(intent)
|
| 73 |
+
if expected_event:
|
| 74 |
+
total += 1
|
| 75 |
+
actual_event = event.get("event", "")
|
| 76 |
+
if actual_event == expected_event:
|
| 77 |
+
checks.append(f"✓ event name '{actual_event}' matches expected")
|
| 78 |
+
passed += 1
|
| 79 |
+
else:
|
| 80 |
+
checks.append(
|
| 81 |
+
f"✗ event name '{actual_event}' != expected '{expected_event}'"
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
# ── Check 3: required sub-dict fields present ────────────────────────
|
| 85 |
+
required_map = _REQUIRED_FIELDS.get(intent, {})
|
| 86 |
+
for sub_key, fields in required_map.items():
|
| 87 |
+
sub = event.get(sub_key, {})
|
| 88 |
+
for field in fields:
|
| 89 |
+
total += 1
|
| 90 |
+
if sub.get(field) is not None:
|
| 91 |
+
checks.append(f"✓ {sub_key}.{field} present")
|
| 92 |
+
passed += 1
|
| 93 |
+
else:
|
| 94 |
+
checks.append(f"✗ {sub_key}.{field} missing or null")
|
| 95 |
+
|
| 96 |
+
# ── Check 4: data fields not all null ────────────────────────────────
|
| 97 |
+
total += 1
|
| 98 |
+
data = context.get("data", {})
|
| 99 |
+
non_null = sum(1 for v in data.values() if v is not None)
|
| 100 |
+
if non_null > 0:
|
| 101 |
+
checks.append(f"✓ data has {non_null} non-null field(s)")
|
| 102 |
+
passed += 1
|
| 103 |
+
else:
|
| 104 |
+
checks.append("✗ all data fields are null")
|
| 105 |
+
|
| 106 |
+
# ── Compute confidence and status ────────────────────────────────────
|
| 107 |
+
confidence = round(passed / total, 2) if total > 0 else 0.0
|
| 108 |
+
|
| 109 |
+
if confidence >= 0.85:
|
| 110 |
+
status = "ok"
|
| 111 |
+
elif confidence >= 0.5:
|
| 112 |
+
status = "partial"
|
| 113 |
+
else:
|
| 114 |
+
status = "fail"
|
| 115 |
+
|
| 116 |
+
verification = {
|
| 117 |
+
"status": status,
|
| 118 |
+
"confidence": confidence,
|
| 119 |
+
"checks": checks,
|
| 120 |
+
"reason": f"{passed}/{total} checks passed",
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
context["verification"] = verification
|
| 124 |
+
logger.info(
|
| 125 |
+
"[VerificationAgent] status=%s confidence=%.2f (%s)",
|
| 126 |
+
status, confidence, verification["reason"]
|
| 127 |
+
)
|
| 128 |
+
return context
|
app/api/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"""app/api — FastAPI routers."""
|
app/api/notification_routes.py
ADDED
|
@@ -0,0 +1,387 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
api/notification_routes.py
|
| 3 |
+
--------------------------
|
| 4 |
+
FastAPI router for Notiflow notification endpoints.
|
| 5 |
+
|
| 6 |
+
Phase 5: API now calls process_message() directly (the new multi-agent
|
| 7 |
+
orchestrator) instead of run_notiflow(). Returns the full result including
|
| 8 |
+
intents, multi_data, priority, risk, and alerts.
|
| 9 |
+
|
| 10 |
+
Backward compatibility: all original fields (message, intent, data, event,
|
| 11 |
+
source, sheet_updated, model) are still present in the response.
|
| 12 |
+
|
| 13 |
+
Endpoints
|
| 14 |
+
---------
|
| 15 |
+
POST /api/notification
|
| 16 |
+
Receives a notification, runs the full agent pipeline,
|
| 17 |
+
returns the structured orchestrator result.
|
| 18 |
+
|
| 19 |
+
GET /api/notifications/generate
|
| 20 |
+
Calls Gemini to generate a batch of demo notifications.
|
| 21 |
+
Query param: n (default 5)
|
| 22 |
+
|
| 23 |
+
WebSocket /ws/notifications
|
| 24 |
+
Streams live notifications to connected clients.
|
| 25 |
+
Accepts both frontend-pushed and Gemini-generated events.
|
| 26 |
+
Broadcasts to all connected clients.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
from __future__ import annotations
|
| 30 |
+
|
| 31 |
+
import asyncio
|
| 32 |
+
import json
|
| 33 |
+
import logging
|
| 34 |
+
from typing import Any
|
| 35 |
+
|
| 36 |
+
from fastapi import APIRouter, WebSocket, WebSocketDisconnect, Query, HTTPException, Request
|
| 37 |
+
from fastapi.responses import StreamingResponse
|
| 38 |
+
from app.core.event_bus import (
|
| 39 |
+
confirm_invoice_payment,
|
| 40 |
+
emit_global_event,
|
| 41 |
+
get_events_since,
|
| 42 |
+
get_latest_event_sequence,
|
| 43 |
+
get_logs,
|
| 44 |
+
push_live_log,
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
logger = logging.getLogger(__name__)
|
| 48 |
+
|
| 49 |
+
router = APIRouter()
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
# ---------------------------------------------------------------------------
|
| 53 |
+
# Request model
|
| 54 |
+
# ---------------------------------------------------------------------------
|
| 55 |
+
|
| 56 |
+
from pydantic import BaseModel
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class NotificationRequest(BaseModel):
|
| 60 |
+
"""Incoming notification payload."""
|
| 61 |
+
source: str = "system" # e.g. "whatsapp", "amazon", "payment", "return"
|
| 62 |
+
message: str # Raw Hinglish business message
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class PaymentConfirmRequest(BaseModel):
|
| 66 |
+
invoice_id: str
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
# ---------------------------------------------------------------------------
|
| 70 |
+
# WebSocket connection manager
|
| 71 |
+
# ---------------------------------------------------------------------------
|
| 72 |
+
|
| 73 |
+
class _ConnectionManager:
|
| 74 |
+
"""Simple in-memory broadcast manager for WebSocket clients."""
|
| 75 |
+
|
| 76 |
+
def __init__(self):
|
| 77 |
+
self._active: list[WebSocket] = []
|
| 78 |
+
|
| 79 |
+
async def connect(self, ws: WebSocket) -> None:
|
| 80 |
+
await ws.accept()
|
| 81 |
+
self._active.append(ws)
|
| 82 |
+
logger.info("WS client connected. Total: %d", len(self._active))
|
| 83 |
+
|
| 84 |
+
def disconnect(self, ws: WebSocket) -> None:
|
| 85 |
+
self._active = [c for c in self._active if c is not ws]
|
| 86 |
+
logger.info("WS client disconnected. Total: %d", len(self._active))
|
| 87 |
+
|
| 88 |
+
async def broadcast(self, payload: dict) -> None:
|
| 89 |
+
"""Send JSON payload to all connected WebSocket clients."""
|
| 90 |
+
dead = []
|
| 91 |
+
for ws in self._active:
|
| 92 |
+
try:
|
| 93 |
+
await ws.send_json(payload)
|
| 94 |
+
except Exception:
|
| 95 |
+
dead.append(ws)
|
| 96 |
+
for ws in dead:
|
| 97 |
+
self.disconnect(ws)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
_manager = _ConnectionManager()
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
# ---------------------------------------------------------------------------
|
| 104 |
+
# Internal: run the pipeline and build the API response dict
|
| 105 |
+
# ---------------------------------------------------------------------------
|
| 106 |
+
|
| 107 |
+
def _run_pipeline(message: str, source: str) -> dict[str, Any]:
|
| 108 |
+
"""
|
| 109 |
+
Run the full multi-agent pipeline via process_message() and return
|
| 110 |
+
a flat API response dict that includes both legacy and new fields.
|
| 111 |
+
"""
|
| 112 |
+
from app.core.orchestrator import process_message
|
| 113 |
+
|
| 114 |
+
result = process_message(message.strip(), source=source)
|
| 115 |
+
|
| 116 |
+
# Strip the raw context from the response (keep it lightweight for API)
|
| 117 |
+
ctx = result.pop("context", {})
|
| 118 |
+
|
| 119 |
+
# Determine model tag
|
| 120 |
+
event_str = str(result.get("event", {}).get("event", ""))
|
| 121 |
+
is_live = any(event_str.endswith(sfx)
|
| 122 |
+
for sfx in ("_recorded", "_received", "_requested", "_queued"))
|
| 123 |
+
model_tag = "live" if is_live else "demo"
|
| 124 |
+
|
| 125 |
+
return {
|
| 126 |
+
# ── Backward-compatible core fields ──────────────────────────────
|
| 127 |
+
"message": result["message"],
|
| 128 |
+
"intent": result.get("intent", "other"),
|
| 129 |
+
"data": result.get("data", {}),
|
| 130 |
+
"event": result.get("event", {}),
|
| 131 |
+
"invoice": result.get("invoice"),
|
| 132 |
+
"events": result.get("events", []),
|
| 133 |
+
"live_logs": result.get("live_logs", []),
|
| 134 |
+
"history": result.get("history", ctx.get("history", [])),
|
| 135 |
+
"customer": result.get("customer", {}),
|
| 136 |
+
"order": result.get("order", {}),
|
| 137 |
+
"payment": result.get("payment", {}),
|
| 138 |
+
"decision": result.get("decision", {}),
|
| 139 |
+
"source": source,
|
| 140 |
+
"sheet_updated": result.get("sheet_updated", False),
|
| 141 |
+
"model": model_tag,
|
| 142 |
+
# ── Phase 5: multi-intent fields ─────────────────────────────────
|
| 143 |
+
"intents": result.get("intents", [result.get("intent", "other")]),
|
| 144 |
+
"multi_data": result.get("multi_data", {}),
|
| 145 |
+
# ── Autonomy fields ───────────────────────────────────────────────
|
| 146 |
+
"priority": result.get("priority", "low"),
|
| 147 |
+
"priority_score": result.get("priority_score", 0),
|
| 148 |
+
"risk": result.get("risk", {}),
|
| 149 |
+
"alerts": result.get("alerts", []),
|
| 150 |
+
"verification": result.get("verification", {}),
|
| 151 |
+
"recovery": result.get("recovery", {}),
|
| 152 |
+
"monitor": result.get("monitor", {}),
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
async def _broadcast_pipeline_response(response: dict[str, Any]) -> None:
|
| 157 |
+
await _manager.broadcast({"type": "pipeline_result", "data": response})
|
| 158 |
+
for event in response.get("events", []):
|
| 159 |
+
await _manager.broadcast({"type": "domain_event", "event": event})
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
# ---------------------------------------------------------------------------
|
| 163 |
+
# POST /api/notification
|
| 164 |
+
# ---------------------------------------------------------------------------
|
| 165 |
+
|
| 166 |
+
@router.post("/api/notification")
|
| 167 |
+
async def process_notification(body: NotificationRequest):
|
| 168 |
+
"""
|
| 169 |
+
Receive a business notification and run the full Notiflow pipeline.
|
| 170 |
+
|
| 171 |
+
Uses process_message() from app.core.orchestrator — the new multi-agent
|
| 172 |
+
system with dynamic planner, autonomy planner, and multi-intent support.
|
| 173 |
+
|
| 174 |
+
After processing, the result is broadcast to all connected WebSocket
|
| 175 |
+
clients so the live stream panel updates in real time.
|
| 176 |
+
|
| 177 |
+
Args:
|
| 178 |
+
body: {"source": "whatsapp", "message": "bhaiya 3 kurti bhej dena"}
|
| 179 |
+
|
| 180 |
+
Returns:
|
| 181 |
+
Full orchestrator result including intents, multi_data, priority, risk.
|
| 182 |
+
"""
|
| 183 |
+
if not body.message or not body.message.strip():
|
| 184 |
+
raise HTTPException(status_code=422, detail="Message cannot be empty.")
|
| 185 |
+
|
| 186 |
+
logger.info(
|
| 187 |
+
"POST /api/notification | source=%s | msg=%r",
|
| 188 |
+
body.source, body.message
|
| 189 |
+
)
|
| 190 |
+
print("🔥 RECEIVED MESSAGE:", body.message)
|
| 191 |
+
|
| 192 |
+
try:
|
| 193 |
+
response = _run_pipeline(body.message.strip(), body.source)
|
| 194 |
+
except Exception as exc:
|
| 195 |
+
logger.error("Pipeline error: %s", exc)
|
| 196 |
+
raise HTTPException(status_code=500, detail=f"Pipeline error: {exc}")
|
| 197 |
+
|
| 198 |
+
# Broadcast full result to WebSocket clients
|
| 199 |
+
await _broadcast_pipeline_response(response)
|
| 200 |
+
|
| 201 |
+
return response
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
@router.get("/api/stream/logs")
|
| 205 |
+
async def stream_logs(limit: int = Query(default=200, ge=1, le=500)):
|
| 206 |
+
return {"logs": get_logs(limit)}
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
@router.get("/api/stream/events")
|
| 210 |
+
async def stream_events(request: Request):
|
| 211 |
+
async def event_generator():
|
| 212 |
+
last_sequence = max(0, get_latest_event_sequence() - 20)
|
| 213 |
+
while True:
|
| 214 |
+
if await request.is_disconnected():
|
| 215 |
+
break
|
| 216 |
+
|
| 217 |
+
events = get_events_since(last_sequence)
|
| 218 |
+
for event in events:
|
| 219 |
+
last_sequence = max(last_sequence, int(event.get("sequence", 0)))
|
| 220 |
+
yield f"data: {json.dumps(event)}\n\n"
|
| 221 |
+
|
| 222 |
+
await asyncio.sleep(0.25)
|
| 223 |
+
|
| 224 |
+
return StreamingResponse(
|
| 225 |
+
event_generator(),
|
| 226 |
+
media_type="text/event-stream",
|
| 227 |
+
headers={
|
| 228 |
+
"Cache-Control": "no-cache",
|
| 229 |
+
"Connection": "keep-alive",
|
| 230 |
+
"X-Accel-Buffering": "no",
|
| 231 |
+
},
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
@router.post("/api/payment/confirm")
|
| 236 |
+
async def confirm_payment(body: PaymentConfirmRequest):
|
| 237 |
+
invoice_id = body.invoice_id.strip()
|
| 238 |
+
if not invoice_id:
|
| 239 |
+
raise HTTPException(status_code=422, detail="invoice_id is required")
|
| 240 |
+
|
| 241 |
+
from app.utils.excel_writer import append_row
|
| 242 |
+
|
| 243 |
+
invoice = confirm_invoice_payment(invoice_id)
|
| 244 |
+
if invoice is None:
|
| 245 |
+
raise HTTPException(status_code=404, detail=f"Invoice not found: {invoice_id}")
|
| 246 |
+
|
| 247 |
+
payment_entry = {
|
| 248 |
+
"entry_id": f"PAY-{invoice_id}",
|
| 249 |
+
"timestamp": invoice.get("timestamp"),
|
| 250 |
+
"type": "payment",
|
| 251 |
+
"customer": invoice.get("customer"),
|
| 252 |
+
"item": invoice.get("item"),
|
| 253 |
+
"quantity": invoice.get("quantity"),
|
| 254 |
+
"amount": invoice.get("total") or invoice.get("total_amount"),
|
| 255 |
+
"payment_type": "manual",
|
| 256 |
+
"status": "received",
|
| 257 |
+
}
|
| 258 |
+
append_row("Ledger", payment_entry)
|
| 259 |
+
|
| 260 |
+
payment_log = push_live_log(None, {
|
| 261 |
+
"agent": "PaymentAPI",
|
| 262 |
+
"status": "success",
|
| 263 |
+
"action": f"Payment confirmed for {invoice_id}",
|
| 264 |
+
"detail": f"[PaymentAPI] Payment confirmed: {invoice_id}",
|
| 265 |
+
})
|
| 266 |
+
event = emit_global_event(
|
| 267 |
+
"payment_completed",
|
| 268 |
+
invoice,
|
| 269 |
+
agent="PaymentAPI",
|
| 270 |
+
step="payment",
|
| 271 |
+
message=f"Payment confirmed for {invoice_id}",
|
| 272 |
+
log_entry=payment_log,
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
response = {
|
| 276 |
+
"invoice": invoice,
|
| 277 |
+
"payment": {
|
| 278 |
+
"invoice_id": invoice.get("invoice_id"),
|
| 279 |
+
"amount": invoice.get("total") or invoice.get("total_amount"),
|
| 280 |
+
"status": "paid",
|
| 281 |
+
},
|
| 282 |
+
"event": event,
|
| 283 |
+
}
|
| 284 |
+
await _manager.broadcast({"type": "domain_event", "event": event})
|
| 285 |
+
return response
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
# ---------------------------------------------------------------------------
|
| 289 |
+
# GET /api/notifications/generate
|
| 290 |
+
# ---------------------------------------------------------------------------
|
| 291 |
+
|
| 292 |
+
@router.get("/api/notifications/generate")
|
| 293 |
+
async def generate_demo_notifications(n: int = Query(default=5, ge=1, le=20)):
|
| 294 |
+
"""
|
| 295 |
+
Generate n demo notifications using Gemini (or static fallback).
|
| 296 |
+
|
| 297 |
+
Query params:
|
| 298 |
+
n: number of notifications to generate (1-20, default 5)
|
| 299 |
+
|
| 300 |
+
Returns:
|
| 301 |
+
{"notifications": [{"source": str, "message": str}, ...]}
|
| 302 |
+
"""
|
| 303 |
+
from app.services.notification_generator import get_notifications
|
| 304 |
+
notifications = get_notifications(n)
|
| 305 |
+
return {"notifications": notifications}
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
# ---------------------------------------------------------------------------
|
| 309 |
+
# WebSocket /ws/notifications
|
| 310 |
+
# ---------------------------------------------------------------------------
|
| 311 |
+
|
| 312 |
+
@router.websocket("/ws/notifications")
|
| 313 |
+
async def websocket_notification_stream(websocket: WebSocket):
|
| 314 |
+
"""
|
| 315 |
+
WebSocket endpoint for real-time notification streaming.
|
| 316 |
+
|
| 317 |
+
Clients connect and receive:
|
| 318 |
+
- Notifications pushed by the frontend simulation
|
| 319 |
+
- Notifications generated by Gemini automation
|
| 320 |
+
- Results of processed notifications (broadcast from POST endpoint)
|
| 321 |
+
|
| 322 |
+
The client can also SEND a notification over the WebSocket:
|
| 323 |
+
{"source": "whatsapp", "message": "bhaiya 3 kurti bhej dena"}
|
| 324 |
+
|
| 325 |
+
The server will process it through the pipeline and broadcast the
|
| 326 |
+
result to all connected clients.
|
| 327 |
+
|
| 328 |
+
Protocol:
|
| 329 |
+
Client → Server: {"source": str, "message": str}
|
| 330 |
+
Server → Client: Full pipeline result JSON (same shape as POST response)
|
| 331 |
+
"""
|
| 332 |
+
await _manager.connect(websocket)
|
| 333 |
+
try:
|
| 334 |
+
while True:
|
| 335 |
+
raw = await websocket.receive_text()
|
| 336 |
+
try:
|
| 337 |
+
payload = json.loads(raw)
|
| 338 |
+
source = payload.get("source", "websocket")
|
| 339 |
+
message = payload.get("message", "").strip()
|
| 340 |
+
|
| 341 |
+
if not message:
|
| 342 |
+
await websocket.send_json({"error": "Empty message"})
|
| 343 |
+
continue
|
| 344 |
+
|
| 345 |
+
response = _run_pipeline(message, source)
|
| 346 |
+
await _broadcast_pipeline_response(response)
|
| 347 |
+
|
| 348 |
+
except json.JSONDecodeError:
|
| 349 |
+
await websocket.send_json({"error": "Invalid JSON payload"})
|
| 350 |
+
except Exception as exc:
|
| 351 |
+
logger.error("WS pipeline error: %s", exc)
|
| 352 |
+
await websocket.send_json({"error": str(exc)})
|
| 353 |
+
|
| 354 |
+
except WebSocketDisconnect:
|
| 355 |
+
_manager.disconnect(websocket)
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
# ---------------------------------------------------------------------------
|
| 359 |
+
# GET /api/stream/start
|
| 360 |
+
# ---------------------------------------------------------------------------
|
| 361 |
+
|
| 362 |
+
@router.get("/api/stream/start")
|
| 363 |
+
async def start_gemini_stream(
|
| 364 |
+
n: int = Query(default=5, ge=1, le=20),
|
| 365 |
+
delay: float = Query(default=2.0, ge=0.5, le=30.0),
|
| 366 |
+
):
|
| 367 |
+
"""
|
| 368 |
+
Trigger Gemini to generate n notifications and stream them to all
|
| 369 |
+
connected WebSocket clients with a delay between each.
|
| 370 |
+
|
| 371 |
+
Query params:
|
| 372 |
+
n: number of notifications (default 5)
|
| 373 |
+
delay: seconds between each broadcast (default 2.0)
|
| 374 |
+
|
| 375 |
+
This runs in the background — the HTTP response returns immediately.
|
| 376 |
+
"""
|
| 377 |
+
async def _stream():
|
| 378 |
+
from app.services.notification_generator import stream_notifications
|
| 379 |
+
async for notification in stream_notifications(n=n, delay_seconds=delay):
|
| 380 |
+
await _manager.broadcast({
|
| 381 |
+
"type": "incoming_notification",
|
| 382 |
+
"source": notification["source"],
|
| 383 |
+
"message": notification["message"],
|
| 384 |
+
})
|
| 385 |
+
|
| 386 |
+
asyncio.create_task(_stream())
|
| 387 |
+
return {"status": "streaming started", "n": n, "delay_seconds": delay}
|
app/config.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/config.py
|
| 3 |
+
-------------
|
| 4 |
+
Central configuration for NotiFlow Autonomous.
|
| 5 |
+
|
| 6 |
+
All file paths, feature flags, and model settings live here.
|
| 7 |
+
Every other module imports from this file — no hardcoded paths elsewhere.
|
| 8 |
+
|
| 9 |
+
CHANGED from original:
|
| 10 |
+
- Removed: BEDROCK_REGION, BEDROCK_MODEL_ID (AWS Bedrock fully removed)
|
| 11 |
+
- Added: NVIDIA_NIM_API_KEY, NVIDIA_NIM_BASE_URL, NVIDIA_NIM_MODEL
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import os
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
|
| 17 |
+
# ---------------------------------------------------------------------------
|
| 18 |
+
# Project root
|
| 19 |
+
# ---------------------------------------------------------------------------
|
| 20 |
+
|
| 21 |
+
ROOT = Path(__file__).parent.parent # notiflow/
|
| 22 |
+
|
| 23 |
+
# ---------------------------------------------------------------------------
|
| 24 |
+
# Data paths
|
| 25 |
+
# ---------------------------------------------------------------------------
|
| 26 |
+
|
| 27 |
+
DATA_DIR = ROOT / "data"
|
| 28 |
+
DATA_FILE = DATA_DIR / "notiflow_data.xlsx"
|
| 29 |
+
MEMORY_FILE = DATA_DIR / "agent_memory.json"
|
| 30 |
+
REGISTRY_FILE = ROOT / "skills" / "skill_registry.json"
|
| 31 |
+
|
| 32 |
+
# ---------------------------------------------------------------------------
|
| 33 |
+
# Feature flags
|
| 34 |
+
# ---------------------------------------------------------------------------
|
| 35 |
+
|
| 36 |
+
NOTIFLOW_DEMO_MODE = os.getenv("NOTIFLOW_DEMO_MODE", "true").lower() == "true"
|
| 37 |
+
DEMO_MODE = NOTIFLOW_DEMO_MODE # legacy alias
|
| 38 |
+
|
| 39 |
+
# ---------------------------------------------------------------------------
|
| 40 |
+
# NVIDIA NIM settings (replaces AWS Bedrock)
|
| 41 |
+
# ---------------------------------------------------------------------------
|
| 42 |
+
|
| 43 |
+
NVIDIA_NIM_API_KEY : str | None = (
|
| 44 |
+
os.getenv("NVIDIA_NIM_API_KEY") or os.getenv("NVIDIA_API_KEY")
|
| 45 |
+
)
|
| 46 |
+
NVIDIA_NIM_BASE_URL: str = (
|
| 47 |
+
os.getenv("NVIDIA_NIM_BASE_URL")
|
| 48 |
+
or os.getenv("NIM_BASE_URL")
|
| 49 |
+
or "https://integrate.api.nvidia.com/v1"
|
| 50 |
+
)
|
| 51 |
+
# Legacy single-model override (Phase 1-3 compat)
|
| 52 |
+
NVIDIA_NIM_MODEL : str = os.getenv(
|
| 53 |
+
"NVIDIA_NIM_MODEL", "deepseek-ai/deepseek-v3.2"
|
| 54 |
+
)
|
| 55 |
+
# Phase 4: per-role model routing
|
| 56 |
+
NIM_PRIMARY_MODEL : str = os.getenv(
|
| 57 |
+
"NIM_PRIMARY_MODEL", "deepseek-ai/deepseek-v3.2"
|
| 58 |
+
)
|
| 59 |
+
NIM_FALLBACK_MODEL : str = os.getenv(
|
| 60 |
+
"NIM_FALLBACK_MODEL", "deepseek-ai/deepseek-v3.1"
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
# ---------------------------------------------------------------------------
|
| 64 |
+
# OpenRouter settings (Phase 4 fallback)
|
| 65 |
+
# ---------------------------------------------------------------------------
|
| 66 |
+
|
| 67 |
+
OPENROUTER_API_KEY : str | None = os.getenv("OPENROUTER_API_KEY")
|
| 68 |
+
OPENROUTER_MODEL : str = os.getenv(
|
| 69 |
+
"OPENROUTER_MODEL", "deepseek/deepseek-chat"
|
| 70 |
+
)
|
| 71 |
+
OPENROUTER_REFERER : str = os.getenv("OPENROUTER_REFERER", "")
|
| 72 |
+
OPENROUTER_TITLE : str = os.getenv("OPENROUTER_TITLE", "NotiFlow")
|
| 73 |
+
|
| 74 |
+
# ---------------------------------------------------------------------------
|
| 75 |
+
# Gemini settings (fallback)
|
| 76 |
+
# ---------------------------------------------------------------------------
|
| 77 |
+
|
| 78 |
+
GEMINI_API_KEY: str | None = os.getenv("GEMINI_API_KEY")
|
| 79 |
+
|
| 80 |
+
# ---------------------------------------------------------------------------
|
| 81 |
+
# Excel sync path
|
| 82 |
+
# ---------------------------------------------------------------------------
|
| 83 |
+
|
| 84 |
+
_env_excel = os.getenv("EXCEL_FILE_PATH")
|
| 85 |
+
EXCEL_SYNC_FILE = Path(_env_excel) if _env_excel else DATA_FILE
|
| 86 |
+
|
| 87 |
+
# ---------------------------------------------------------------------------
|
| 88 |
+
# Google Sheets settings
|
| 89 |
+
# ---------------------------------------------------------------------------
|
| 90 |
+
|
| 91 |
+
GOOGLE_SHEETS_CREDENTIALS: str = os.getenv(
|
| 92 |
+
"GOOGLE_SHEETS_CREDENTIALS", "credentials/sheets.json"
|
| 93 |
+
)
|
| 94 |
+
GOOGLE_SHEET_ID: str = os.getenv("GOOGLE_SHEET_ID", "")
|
| 95 |
+
|
| 96 |
+
# ---------------------------------------------------------------------------
|
| 97 |
+
# Ensure data directory exists at import time
|
| 98 |
+
# ---------------------------------------------------------------------------
|
| 99 |
+
|
| 100 |
+
DATA_DIR.mkdir(parents=True, exist_ok=True)
|
app/core/__init__.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""app/core — NotiFlow core primitives."""
|
| 2 |
+
|
| 3 |
+
from app.core.llm_service import LLMService, get_llm
|
| 4 |
+
from app.core.llm_router import route_llm, ModelEntry
|
| 5 |
+
from app.core.context import create_context, update_context, log_step, add_error
|
| 6 |
+
from app.core.base_agent import BaseAgent
|
| 7 |
+
from app.core.planner import build_plan, PlanRule
|
| 8 |
+
from app.core.autonomy_planner import build_autonomy_plan, AutonomyRule
|
| 9 |
+
from app.core.priority import contribute_priority_score, derive_priority_label, reset_priority_score
|
| 10 |
+
from app.core.registry import get_agent, register, list_agents, AGENT_REGISTRY
|
| 11 |
+
|
| 12 |
+
__all__ = [
|
| 13 |
+
"LLMService", "get_llm",
|
| 14 |
+
"route_llm", "ModelEntry",
|
| 15 |
+
"create_context", "update_context", "log_step", "add_error",
|
| 16 |
+
"BaseAgent",
|
| 17 |
+
"build_plan", "PlanRule",
|
| 18 |
+
"build_autonomy_plan", "AutonomyRule",
|
| 19 |
+
"contribute_priority_score", "derive_priority_label", "reset_priority_score",
|
| 20 |
+
"get_agent", "register", "list_agents", "AGENT_REGISTRY",
|
| 21 |
+
]
|
app/core/autonomy_planner.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/core/autonomy_planner.py
|
| 3 |
+
-----------------------------
|
| 4 |
+
Dynamic Autonomy Planner for NotiFlow Autonomous.
|
| 5 |
+
|
| 6 |
+
Mirrors the design of app/core/planner.py but governs the autonomy layer.
|
| 7 |
+
Each rule has a condition that can skip agents based on context state,
|
| 8 |
+
avoiding unnecessary work (e.g. skip escalation if priority is low,
|
| 9 |
+
skip recovery if verification already passed).
|
| 10 |
+
|
| 11 |
+
Extending
|
| 12 |
+
---------
|
| 13 |
+
To add a new autonomy step:
|
| 14 |
+
1. Create app/agents/my_autonomy_agent.py
|
| 15 |
+
2. Register it in app/core/registry.py
|
| 16 |
+
3. Append one AutonomyRule here — zero other changes needed
|
| 17 |
+
|
| 18 |
+
Public API
|
| 19 |
+
----------
|
| 20 |
+
build_autonomy_plan(context) -> list[dict]
|
| 21 |
+
Evaluates rules, writes context["autonomy_plan"], returns the plan.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
from __future__ import annotations
|
| 25 |
+
|
| 26 |
+
from dataclasses import dataclass
|
| 27 |
+
from typing import Any, Callable
|
| 28 |
+
|
| 29 |
+
# ---------------------------------------------------------------------------
|
| 30 |
+
# Rule definition (same pattern as PlanRule in planner.py)
|
| 31 |
+
# ---------------------------------------------------------------------------
|
| 32 |
+
|
| 33 |
+
@dataclass
|
| 34 |
+
class AutonomyRule:
|
| 35 |
+
"""
|
| 36 |
+
A single conditional step in the autonomy execution plan.
|
| 37 |
+
|
| 38 |
+
Attributes:
|
| 39 |
+
agent: Registry key of the agent to run.
|
| 40 |
+
condition: callable(ctx) → bool — True means include this agent.
|
| 41 |
+
description: Human-readable reason (written to autonomy_plan entries).
|
| 42 |
+
"""
|
| 43 |
+
agent: str
|
| 44 |
+
condition: Callable[[dict[str, Any]], bool]
|
| 45 |
+
description: str = ""
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# ---------------------------------------------------------------------------
|
| 49 |
+
# Condition functions
|
| 50 |
+
# ---------------------------------------------------------------------------
|
| 51 |
+
|
| 52 |
+
def _always(ctx: dict[str, Any]) -> bool:
|
| 53 |
+
return True
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def _skip_if_verified(ctx: dict[str, Any]) -> bool:
|
| 57 |
+
"""Skip monitor if verification already passed cleanly — nothing to flag."""
|
| 58 |
+
v = ctx.get("verification", {})
|
| 59 |
+
return v.get("status") != "ok"
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def _skip_if_no_data(ctx: dict[str, Any]) -> bool:
|
| 63 |
+
"""Skip prediction if there is no data to score."""
|
| 64 |
+
return bool(ctx.get("data"))
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def _skip_if_low_score(ctx: dict[str, Any]) -> bool:
|
| 68 |
+
"""Skip urgency if priority score is already 0 and intent is 'other'."""
|
| 69 |
+
intent = (ctx.get("intent") or "other").lower()
|
| 70 |
+
if intent == "other" and ctx.get("priority_score", 0) == 0:
|
| 71 |
+
return False # nothing will change — skip
|
| 72 |
+
return True
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def _escalation_needed(ctx: dict[str, Any]) -> bool:
|
| 76 |
+
"""
|
| 77 |
+
Only run escalation if priority is high/critical OR risk is high.
|
| 78 |
+
Avoids noisy alert logs on routine transactions.
|
| 79 |
+
"""
|
| 80 |
+
priority = (ctx.get("priority") or "normal").lower()
|
| 81 |
+
risk = ctx.get("risk", {}).get("level", "low")
|
| 82 |
+
return priority in ("high", "critical") or risk == "high"
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def _recovery_needed(ctx: dict[str, Any]) -> bool:
|
| 86 |
+
"""
|
| 87 |
+
Run recovery only if something actually went wrong.
|
| 88 |
+
Skipped if verification passed and no errors exist.
|
| 89 |
+
"""
|
| 90 |
+
v_status = ctx.get("verification", {}).get("status", "ok")
|
| 91 |
+
errors = ctx.get("errors", [])
|
| 92 |
+
return v_status != "ok" or len(errors) > 0
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
# ---------------------------------------------------------------------------
|
| 96 |
+
# Rule set (order matters — this IS the autonomy pipeline definition)
|
| 97 |
+
# ---------------------------------------------------------------------------
|
| 98 |
+
|
| 99 |
+
_AUTONOMY_RULES: list[AutonomyRule] = [
|
| 100 |
+
AutonomyRule(
|
| 101 |
+
agent = "verification",
|
| 102 |
+
condition = _always,
|
| 103 |
+
description = "Validate skill execution produced expected output",
|
| 104 |
+
),
|
| 105 |
+
AutonomyRule(
|
| 106 |
+
agent = "monitor",
|
| 107 |
+
condition = _skip_if_verified,
|
| 108 |
+
description = "Scan for missing fields, errors, and inconsistencies",
|
| 109 |
+
),
|
| 110 |
+
AutonomyRule(
|
| 111 |
+
agent = "prediction",
|
| 112 |
+
condition = _skip_if_no_data,
|
| 113 |
+
description = "Rule-based risk scoring on extracted data",
|
| 114 |
+
),
|
| 115 |
+
AutonomyRule(
|
| 116 |
+
agent = "urgency",
|
| 117 |
+
condition = _skip_if_low_score,
|
| 118 |
+
description = "Detect urgency signals and derive final priority label",
|
| 119 |
+
),
|
| 120 |
+
AutonomyRule(
|
| 121 |
+
agent = "escalation",
|
| 122 |
+
condition = _escalation_needed,
|
| 123 |
+
description = "Raise alerts for high-priority or high-risk situations",
|
| 124 |
+
),
|
| 125 |
+
AutonomyRule(
|
| 126 |
+
agent = "recovery",
|
| 127 |
+
condition = _recovery_needed,
|
| 128 |
+
description = "Attempt self-healing for pipeline failures",
|
| 129 |
+
),
|
| 130 |
+
]
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
# ---------------------------------------------------------------------------
|
| 134 |
+
# Public API
|
| 135 |
+
# ---------------------------------------------------------------------------
|
| 136 |
+
|
| 137 |
+
def build_autonomy_plan(context: dict[str, Any]) -> list[dict[str, Any]]:
|
| 138 |
+
"""
|
| 139 |
+
Evaluate autonomy rules against the current context.
|
| 140 |
+
|
| 141 |
+
Writes the plan to context["autonomy_plan"] and returns it.
|
| 142 |
+
|
| 143 |
+
Args:
|
| 144 |
+
context: The live request context dict (after main pipeline ran).
|
| 145 |
+
|
| 146 |
+
Returns:
|
| 147 |
+
Ordered list of autonomy steps, each a dict:
|
| 148 |
+
{
|
| 149 |
+
"agent": str, # registry key
|
| 150 |
+
"description": str, # human-readable reason
|
| 151 |
+
}
|
| 152 |
+
"""
|
| 153 |
+
plan = []
|
| 154 |
+
for rule in _AUTONOMY_RULES:
|
| 155 |
+
if rule.condition(context):
|
| 156 |
+
plan.append({
|
| 157 |
+
"agent": rule.agent,
|
| 158 |
+
"description": rule.description,
|
| 159 |
+
})
|
| 160 |
+
context["autonomy_plan"] = plan
|
| 161 |
+
return plan
|
app/core/base_agent.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/core/base_agent.py
|
| 3 |
+
----------------------
|
| 4 |
+
BaseAgent — the standard interface for all NotiFlow agents.
|
| 5 |
+
|
| 6 |
+
Every agent in the system inherits from BaseAgent and implements `execute()`.
|
| 7 |
+
The public `run()` method provides:
|
| 8 |
+
- unified error handling
|
| 9 |
+
- structured logging into context["history"]
|
| 10 |
+
- state transition on success / failure
|
| 11 |
+
|
| 12 |
+
Usage
|
| 13 |
+
-----
|
| 14 |
+
class MyAgent(BaseAgent):
|
| 15 |
+
name = "MyAgent"
|
| 16 |
+
|
| 17 |
+
def execute(self, context: dict) -> dict:
|
| 18 |
+
# do work, mutate context, return it
|
| 19 |
+
context["data"]["my_field"] = "value"
|
| 20 |
+
return context
|
| 21 |
+
|
| 22 |
+
result_ctx = MyAgent().run(context)
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
from __future__ import annotations
|
| 26 |
+
|
| 27 |
+
import logging
|
| 28 |
+
from typing import Any
|
| 29 |
+
|
| 30 |
+
from app.core.context import log_step, add_error, update_context
|
| 31 |
+
|
| 32 |
+
logger = logging.getLogger(__name__)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class BaseAgent:
|
| 36 |
+
"""
|
| 37 |
+
Abstract base for all NotiFlow agents.
|
| 38 |
+
|
| 39 |
+
Subclasses must:
|
| 40 |
+
- Set a class-level ``name`` attribute
|
| 41 |
+
- Implement ``execute(context) -> dict``
|
| 42 |
+
|
| 43 |
+
Optional class-level audit declarations (used in history logs):
|
| 44 |
+
input_keys : list[str] — context keys this agent reads
|
| 45 |
+
output_keys : list[str] — context keys this agent writes
|
| 46 |
+
action : str — one-line description of what the agent does
|
| 47 |
+
|
| 48 |
+
The ``run()`` wrapper handles logging and error isolation automatically.
|
| 49 |
+
Agents should raise exceptions from ``execute()`` on unrecoverable errors;
|
| 50 |
+
for soft/non-fatal issues they should call ``add_error(ctx, msg)`` and
|
| 51 |
+
continue.
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
#: Human-readable agent identifier used in logs and history.
|
| 55 |
+
name: str = "BaseAgent"
|
| 56 |
+
|
| 57 |
+
#: Audit metadata — override in subclasses for richer logs
|
| 58 |
+
input_keys: list[str] = []
|
| 59 |
+
output_keys: list[str] = []
|
| 60 |
+
action: str = ""
|
| 61 |
+
|
| 62 |
+
def run(self, context: dict[str, Any]) -> dict[str, Any]:
|
| 63 |
+
"""
|
| 64 |
+
Execute the agent with full error handling and audit logging.
|
| 65 |
+
|
| 66 |
+
This is the ONLY method callers should invoke externally.
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
context: The live request context dict.
|
| 70 |
+
|
| 71 |
+
Returns:
|
| 72 |
+
The (mutated) context dict.
|
| 73 |
+
|
| 74 |
+
Raises:
|
| 75 |
+
Exception: Re-raises any exception from execute() so the
|
| 76 |
+
orchestrator can decide whether to abort.
|
| 77 |
+
"""
|
| 78 |
+
logger.info("[%s] starting", self.name)
|
| 79 |
+
try:
|
| 80 |
+
context = self.execute(context)
|
| 81 |
+
log_step(
|
| 82 |
+
context,
|
| 83 |
+
self.name,
|
| 84 |
+
"success",
|
| 85 |
+
action = self.action or f"{self.name} completed",
|
| 86 |
+
input_keys = list(self.input_keys),
|
| 87 |
+
output_keys = list(self.output_keys),
|
| 88 |
+
)
|
| 89 |
+
logger.info("[%s] completed successfully", self.name)
|
| 90 |
+
except Exception as exc:
|
| 91 |
+
error_msg = f"{self.name} failed: {exc}"
|
| 92 |
+
logger.error(error_msg, exc_info=True)
|
| 93 |
+
add_error(context, error_msg)
|
| 94 |
+
log_step(
|
| 95 |
+
context,
|
| 96 |
+
self.name,
|
| 97 |
+
"error",
|
| 98 |
+
str(exc),
|
| 99 |
+
action = self.action or f"{self.name} failed",
|
| 100 |
+
input_keys = list(self.input_keys),
|
| 101 |
+
output_keys = [],
|
| 102 |
+
)
|
| 103 |
+
update_context(context, state="failed")
|
| 104 |
+
raise
|
| 105 |
+
return context
|
| 106 |
+
|
| 107 |
+
def execute(self, context: dict[str, Any]) -> dict[str, Any]:
|
| 108 |
+
"""
|
| 109 |
+
Agent-specific logic. Subclasses MUST override this method.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
context: The live request context dict.
|
| 113 |
+
|
| 114 |
+
Returns:
|
| 115 |
+
The mutated context dict.
|
| 116 |
+
|
| 117 |
+
Raises:
|
| 118 |
+
NotImplementedError: If subclass does not implement this.
|
| 119 |
+
"""
|
| 120 |
+
raise NotImplementedError(
|
| 121 |
+
f"{self.__class__.__name__} must implement execute(context)."
|
| 122 |
+
)
|
app/core/config_bridge.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/config.py is the authoritative config.
|
| 3 |
+
This file at the project root ensures `from app.config import X` works
|
| 4 |
+
when the project root is on sys.path (which backend/main.py guarantees).
|
| 5 |
+
"""
|
| 6 |
+
# Nothing needed — app/config.py is already at app/config.py
|
app/core/context.py
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/core/context.py
|
| 3 |
+
-------------------
|
| 4 |
+
Unified request context for NotiFlow Autonomous.
|
| 5 |
+
|
| 6 |
+
The context object is the single source of truth for every request.
|
| 7 |
+
It is created at the API boundary, threaded through every agent, and
|
| 8 |
+
returned to the caller as part of the final response.
|
| 9 |
+
|
| 10 |
+
Structure
|
| 11 |
+
---------
|
| 12 |
+
{
|
| 13 |
+
"message": str, # original raw message
|
| 14 |
+
"intent": str | None, # detected intent (filled by IntentAgent)
|
| 15 |
+
"data": dict, # extracted + validated fields
|
| 16 |
+
"event": dict, # skill execution result
|
| 17 |
+
"state": str, # pipeline lifecycle state
|
| 18 |
+
"history": list[dict], # audit-level agent execution log ← UPGRADED
|
| 19 |
+
"errors": list[str], # non-fatal errors accumulated during run
|
| 20 |
+
"priority": str, # "low" | "medium" | "high" (derived by UrgencyAgent)
|
| 21 |
+
"priority_score": int, # additive score 0-100 (contributed by multiple agents)
|
| 22 |
+
"priority_score_reasons": list[dict], # audit trail of score contributions
|
| 23 |
+
"plan": list[dict], # ordered main plan steps (set by Planner)
|
| 24 |
+
"autonomy_plan": list[dict], # ordered autonomy steps (set by AutonomyPlanner)
|
| 25 |
+
"metadata": dict, # source, sheet_updated, model, etc.
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
History entry shape (audit-ready):
|
| 29 |
+
{
|
| 30 |
+
"agent": str, # agent name
|
| 31 |
+
"action": str, # what the agent did (human-readable)
|
| 32 |
+
"input_keys": list[str], # context keys read by this agent
|
| 33 |
+
"output_keys": list[str], # context keys written by this agent
|
| 34 |
+
"status": str, # "success" | "error" | "skipped"
|
| 35 |
+
"detail": str, # error message or extra note
|
| 36 |
+
"timestamp": str, # ISO-8601 UTC
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
Pipeline states:
|
| 40 |
+
initialized → intent_detected → extracted → validated → routed → completed
|
| 41 |
+
Any stage can transition to: failed
|
| 42 |
+
|
| 43 |
+
Public API (unchanged from Phase 1 — fully backward compatible)
|
| 44 |
+
----------
|
| 45 |
+
create_context(message, source) -> dict
|
| 46 |
+
update_context(ctx, **kwargs) -> dict
|
| 47 |
+
log_step(ctx, agent, status, detail, *, action, input_keys, output_keys) -> None
|
| 48 |
+
add_error(ctx, error) -> None
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
from __future__ import annotations
|
| 52 |
+
|
| 53 |
+
from datetime import datetime, timezone
|
| 54 |
+
from typing import Any
|
| 55 |
+
|
| 56 |
+
from app.core.event_bus import push_live_log
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# ---------------------------------------------------------------------------
|
| 60 |
+
# Factory
|
| 61 |
+
# ---------------------------------------------------------------------------
|
| 62 |
+
|
| 63 |
+
def create_context(message: str, source: str = "system") -> dict[str, Any]:
|
| 64 |
+
"""
|
| 65 |
+
Create a fresh request context for a new message.
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
message: Raw business message (Hinglish or English).
|
| 69 |
+
source: Notification source (e.g. "whatsapp", "gpay").
|
| 70 |
+
|
| 71 |
+
Returns:
|
| 72 |
+
Fully initialised context dict.
|
| 73 |
+
"""
|
| 74 |
+
return {
|
| 75 |
+
"message": message.strip(),
|
| 76 |
+
"source": source,
|
| 77 |
+
# ── Intent (Phase 5: multi-intent) ────────────────────────────────
|
| 78 |
+
"intents": [], # all detected intents, ordered (set by IntentAgent)
|
| 79 |
+
"intent": None, # primary intent = intents[0] — kept for backward compat
|
| 80 |
+
# ── Extraction ────────────────────────────────────────────────────
|
| 81 |
+
"multi_data": {}, # per-intent extracted fields {intent_name: {fields}}
|
| 82 |
+
"data": {}, # primary intent extraction — kept for backward compat
|
| 83 |
+
"event": {},
|
| 84 |
+
"invoice": None,
|
| 85 |
+
"payment": None,
|
| 86 |
+
"events": [],
|
| 87 |
+
"live_logs": [],
|
| 88 |
+
"state": "initialized",
|
| 89 |
+
"history": [],
|
| 90 |
+
"errors": [],
|
| 91 |
+
"priority": "normal", # final derived label (set by UrgencyAgent)
|
| 92 |
+
"priority_score": 0, # additive score 0-100 (Phase 3 fix)
|
| 93 |
+
"priority_score_reasons": [], # audit trail of score contributions
|
| 94 |
+
# plan is [] until Planner fills it; list[dict] in Phase 2
|
| 95 |
+
"plan": [],
|
| 96 |
+
"autonomy_plan": [], # filled by autonomy_planner
|
| 97 |
+
"metadata": {
|
| 98 |
+
"source": source,
|
| 99 |
+
"sheet_updated": False,
|
| 100 |
+
"model": None,
|
| 101 |
+
"retry_count": 0, # replan loop counter
|
| 102 |
+
"created_at": datetime.now(timezone.utc).isoformat(),
|
| 103 |
+
},
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
# ---------------------------------------------------------------------------
|
| 108 |
+
# Mutators
|
| 109 |
+
# ---------------------------------------------------------------------------
|
| 110 |
+
|
| 111 |
+
def update_context(ctx: dict[str, Any], **kwargs: Any) -> dict[str, Any]:
|
| 112 |
+
"""
|
| 113 |
+
Apply keyword updates to the context (in-place, returns ctx).
|
| 114 |
+
|
| 115 |
+
Supports double-underscore for nested dicts:
|
| 116 |
+
update_context(ctx, metadata__source="whatsapp")
|
| 117 |
+
update_context(ctx, intent="order", state="intent_detected")
|
| 118 |
+
"""
|
| 119 |
+
for key, value in kwargs.items():
|
| 120 |
+
if "__" in key:
|
| 121 |
+
parent, child = key.split("__", 1)
|
| 122 |
+
if parent in ctx and isinstance(ctx[parent], dict):
|
| 123 |
+
ctx[parent][child] = value
|
| 124 |
+
else:
|
| 125 |
+
ctx[key] = value
|
| 126 |
+
return ctx
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def log_step(
|
| 130 |
+
ctx: dict[str, Any],
|
| 131 |
+
agent: str,
|
| 132 |
+
status: str,
|
| 133 |
+
detail: str = "",
|
| 134 |
+
*,
|
| 135 |
+
action: str = "",
|
| 136 |
+
input_keys: list[str] | None = None,
|
| 137 |
+
output_keys: list[str] | None = None,
|
| 138 |
+
) -> None:
|
| 139 |
+
"""
|
| 140 |
+
Append an audit-level execution entry to context["history"].
|
| 141 |
+
|
| 142 |
+
Backward compatible: the original 4-arg signature still works.
|
| 143 |
+
New callers can supply keyword-only audit fields for richer logs.
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
ctx: The active context dict.
|
| 147 |
+
agent: Agent name (e.g. "IntentAgent").
|
| 148 |
+
status: "success" | "error" | "skipped".
|
| 149 |
+
detail: Error message or human-readable note.
|
| 150 |
+
action: What the agent did (e.g. "classified intent as payment").
|
| 151 |
+
input_keys: Context keys the agent READ (e.g. ["message"]).
|
| 152 |
+
output_keys: Context keys the agent WROTE (e.g. ["intent", "state"]).
|
| 153 |
+
"""
|
| 154 |
+
entry = {
|
| 155 |
+
"agent": agent,
|
| 156 |
+
"action": action or f"{agent} executed",
|
| 157 |
+
"input_keys": input_keys or [],
|
| 158 |
+
"output_keys": output_keys or [],
|
| 159 |
+
"status": status,
|
| 160 |
+
"detail": detail,
|
| 161 |
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
| 162 |
+
}
|
| 163 |
+
ctx["history"].append(entry)
|
| 164 |
+
push_live_log(ctx, entry)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def add_error(ctx: dict[str, Any], error: str) -> None:
|
| 168 |
+
"""Record a non-fatal error in context["errors"]."""
|
| 169 |
+
ctx["errors"].append(error)
|
app/core/event_bus.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from collections import deque
|
| 4 |
+
from copy import deepcopy
|
| 5 |
+
from datetime import datetime, timezone
|
| 6 |
+
from typing import Any
|
| 7 |
+
|
| 8 |
+
_LOG_BUFFER: deque[dict[str, Any]] = deque(maxlen=500)
|
| 9 |
+
_EVENT_BUFFER: deque[dict[str, Any]] = deque(maxlen=200)
|
| 10 |
+
_INVOICE_STORE: dict[str, dict[str, Any]] = {}
|
| 11 |
+
_EVENT_SEQ = 0
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def _now_iso() -> str:
|
| 15 |
+
return datetime.now(timezone.utc).isoformat()
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def _make_id(prefix: str) -> str:
|
| 19 |
+
stamp = datetime.now(timezone.utc).strftime("%Y%m%d%H%M%S%f")
|
| 20 |
+
return f"{prefix}-{stamp}"
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def push_live_log(ctx: dict[str, Any] | None, entry: dict[str, Any]) -> dict[str, Any]:
|
| 24 |
+
log_entry = {
|
| 25 |
+
"id": entry.get("id") or _make_id("log"),
|
| 26 |
+
"agent": entry.get("agent", "System"),
|
| 27 |
+
"status": entry.get("status", "info"),
|
| 28 |
+
"detail": entry.get("detail", ""),
|
| 29 |
+
"action": entry.get("action", ""),
|
| 30 |
+
"timestamp": entry.get("timestamp") or _now_iso(),
|
| 31 |
+
}
|
| 32 |
+
if ctx is not None:
|
| 33 |
+
ctx.setdefault("live_logs", []).append(log_entry)
|
| 34 |
+
_LOG_BUFFER.append(log_entry)
|
| 35 |
+
return log_entry
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def get_logs(limit: int | None = None) -> list[dict[str, Any]]:
|
| 39 |
+
logs = list(_LOG_BUFFER)
|
| 40 |
+
if limit is not None:
|
| 41 |
+
logs = logs[-limit:]
|
| 42 |
+
return logs
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def emit_event(
|
| 46 |
+
ctx: dict[str, Any] | None,
|
| 47 |
+
event_type: str,
|
| 48 |
+
payload: dict[str, Any],
|
| 49 |
+
*,
|
| 50 |
+
agent: str | None = None,
|
| 51 |
+
step: str | None = None,
|
| 52 |
+
message: str | None = None,
|
| 53 |
+
log_entry: dict[str, Any] | None = None,
|
| 54 |
+
) -> dict[str, Any]:
|
| 55 |
+
global _EVENT_SEQ
|
| 56 |
+
_EVENT_SEQ += 1
|
| 57 |
+
event_id = _make_id("evt")
|
| 58 |
+
event = {
|
| 59 |
+
"id": event_id,
|
| 60 |
+
"event_id": event_id,
|
| 61 |
+
"sequence": _EVENT_SEQ,
|
| 62 |
+
"sequence_number": _EVENT_SEQ,
|
| 63 |
+
"type": event_type,
|
| 64 |
+
"timestamp": _now_iso(),
|
| 65 |
+
"agent": agent,
|
| 66 |
+
"step": step or event_type,
|
| 67 |
+
"message": message or "",
|
| 68 |
+
"data": deepcopy(payload),
|
| 69 |
+
"payload": deepcopy(payload),
|
| 70 |
+
"log": deepcopy(log_entry) if log_entry else None,
|
| 71 |
+
}
|
| 72 |
+
if ctx is not None:
|
| 73 |
+
ctx.setdefault("events", []).append(event)
|
| 74 |
+
_EVENT_BUFFER.append(event)
|
| 75 |
+
|
| 76 |
+
invoice_id = payload.get("invoice_id") or payload.get("id")
|
| 77 |
+
if invoice_id and isinstance(payload.get("items"), list):
|
| 78 |
+
store_invoice(payload)
|
| 79 |
+
|
| 80 |
+
return event
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def emit_global_event(event_type: str, payload: dict[str, Any], **kwargs: Any) -> dict[str, Any]:
|
| 84 |
+
return emit_event(None, event_type, payload, **kwargs)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def get_events(limit: int | None = None) -> list[dict[str, Any]]:
|
| 88 |
+
events = list(_EVENT_BUFFER)
|
| 89 |
+
if limit is not None:
|
| 90 |
+
events = events[-limit:]
|
| 91 |
+
return events
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def get_events_since(sequence: int) -> list[dict[str, Any]]:
|
| 95 |
+
return [event for event in _EVENT_BUFFER if int(event.get("sequence", 0)) > sequence]
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def get_latest_event_sequence() -> int:
|
| 99 |
+
if not _EVENT_BUFFER:
|
| 100 |
+
return 0
|
| 101 |
+
return int(_EVENT_BUFFER[-1].get("sequence", 0))
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def store_invoice(invoice: dict[str, Any]) -> dict[str, Any]:
|
| 105 |
+
normalized = deepcopy(invoice)
|
| 106 |
+
invoice_id = normalized.get("invoice_id") or normalized.get("id")
|
| 107 |
+
if not invoice_id:
|
| 108 |
+
return normalized
|
| 109 |
+
normalized["invoice_id"] = invoice_id
|
| 110 |
+
normalized["id"] = invoice_id
|
| 111 |
+
_INVOICE_STORE[invoice_id] = normalized
|
| 112 |
+
return normalized
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def get_invoice(invoice_id: str) -> dict[str, Any] | None:
|
| 116 |
+
invoice = _INVOICE_STORE.get(invoice_id)
|
| 117 |
+
return deepcopy(invoice) if invoice else None
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def confirm_invoice_payment(invoice_id: str) -> dict[str, Any] | None:
|
| 121 |
+
invoice = _INVOICE_STORE.get(invoice_id)
|
| 122 |
+
if not invoice:
|
| 123 |
+
return None
|
| 124 |
+
invoice["status"] = "paid"
|
| 125 |
+
return store_invoice(invoice)
|
app/core/llm_router.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/core/llm_router.py
|
| 3 |
+
-----------------------
|
| 4 |
+
LLM Router for NotiFlow Autonomous — Phase 4.
|
| 5 |
+
|
| 6 |
+
Maps (agent_name, task_type) → ordered list of models to try.
|
| 7 |
+
Pure data + logic, zero I/O. LLMService consumes this.
|
| 8 |
+
|
| 9 |
+
Model entry shape:
|
| 10 |
+
{
|
| 11 |
+
"provider": "nim" | "openrouter",
|
| 12 |
+
"model": str, # exact model ID
|
| 13 |
+
"max_tokens": int | None, # None = caller decides
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
Routing strategy
|
| 17 |
+
----------------
|
| 18 |
+
primary fallback-1 fallback-2
|
| 19 |
+
intent → deepseek-v3.2 (nim) openrouter-fallback deepseek-v3.1 (nim)
|
| 20 |
+
extraction → deepseek-v3.2 (nim) openrouter-fallback deepseek-v3.1 (nim)
|
| 21 |
+
planning → deepseek-v3.2 (nim) openrouter-fallback deepseek-v3.1 (nim)
|
| 22 |
+
reasoning → deepseek-v3.2 (nim) openrouter-fallback deepseek-v3.1 (nim)
|
| 23 |
+
default → deepseek-v3.2 (nim) openrouter-fallback deepseek-v3.1 (nim)
|
| 24 |
+
|
| 25 |
+
Extending
|
| 26 |
+
---------
|
| 27 |
+
Add / change routing by editing _ROUTES below. No other file needs to change.
|
| 28 |
+
|
| 29 |
+
Public API
|
| 30 |
+
----------
|
| 31 |
+
route_llm(agent_name, task_type) -> dict
|
| 32 |
+
Returns {"primary": ModelEntry, "fallbacks": [ModelEntry, ...]}
|
| 33 |
+
|
| 34 |
+
ModelEntry = {"provider": str, "model": str, "max_tokens": int | None}
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
from __future__ import annotations
|
| 38 |
+
|
| 39 |
+
import os
|
| 40 |
+
from typing import TypedDict
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class ModelEntry(TypedDict):
|
| 44 |
+
provider: str # "nim" | "openrouter"
|
| 45 |
+
model: str # model identifier
|
| 46 |
+
max_tokens: int | None # None = use caller default
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# ---------------------------------------------------------------------------
|
| 50 |
+
# Model catalogue
|
| 51 |
+
# ---------------------------------------------------------------------------
|
| 52 |
+
|
| 53 |
+
_NIM_PRIMARY : ModelEntry = {
|
| 54 |
+
"provider": "nim",
|
| 55 |
+
"model": os.getenv("NIM_PRIMARY_MODEL", "deepseek-ai/deepseek-v3.2"),
|
| 56 |
+
"max_tokens": None,
|
| 57 |
+
}
|
| 58 |
+
_NIM_FALLBACK : ModelEntry = {
|
| 59 |
+
"provider": "nim",
|
| 60 |
+
"model": os.getenv("NIM_FALLBACK_MODEL", "deepseek-ai/deepseek-v3.1"),
|
| 61 |
+
"max_tokens": None,
|
| 62 |
+
}
|
| 63 |
+
_OPENROUTER : ModelEntry = {
|
| 64 |
+
"provider": "openrouter",
|
| 65 |
+
"model": os.getenv(
|
| 66 |
+
"OPENROUTER_MODEL",
|
| 67 |
+
"deepseek/deepseek-chat", # sensible default
|
| 68 |
+
),
|
| 69 |
+
"max_tokens": None,
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
# ---------------------------------------------------------------------------
|
| 74 |
+
# Routing table
|
| 75 |
+
# (agent_name, task_type) → [primary, fallback1, fallback2, ...]
|
| 76 |
+
# Keys are lowercased at lookup time.
|
| 77 |
+
# ---------------------------------------------------------------------------
|
| 78 |
+
|
| 79 |
+
_ROUTES: dict[tuple[str, str], list[ModelEntry]] = {
|
| 80 |
+
|
| 81 |
+
# Fast classification — intent detection
|
| 82 |
+
("intentagent", "classification"): [_NIM_PRIMARY, _OPENROUTER, _NIM_FALLBACK],
|
| 83 |
+
("intentagent", ""): [_NIM_PRIMARY, _OPENROUTER, _NIM_FALLBACK],
|
| 84 |
+
|
| 85 |
+
# Structured extraction — needs reliable JSON output
|
| 86 |
+
("extractionagent", "extraction"): [_NIM_PRIMARY, _OPENROUTER, _NIM_FALLBACK],
|
| 87 |
+
("extractionagent", ""): [_NIM_PRIMARY, _OPENROUTER, _NIM_FALLBACK],
|
| 88 |
+
|
| 89 |
+
# Planning requires deeper reasoning
|
| 90 |
+
("planner", "planning"): [_NIM_PRIMARY, _OPENROUTER, _NIM_FALLBACK],
|
| 91 |
+
("planner", ""): [_NIM_PRIMARY, _OPENROUTER, _NIM_FALLBACK],
|
| 92 |
+
|
| 93 |
+
# Reasoning-heavy agents — prediction, recovery
|
| 94 |
+
("predictionagent", "reasoning"): [_NIM_PRIMARY, _OPENROUTER, _NIM_FALLBACK],
|
| 95 |
+
("predictionagent", ""): [_NIM_PRIMARY, _OPENROUTER, _NIM_FALLBACK],
|
| 96 |
+
("recoveryagent", "reasoning"): [_NIM_PRIMARY, _OPENROUTER, _NIM_FALLBACK],
|
| 97 |
+
("recoveryagent", ""): [_NIM_PRIMARY, _OPENROUTER, _NIM_FALLBACK],
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
# Default route when no specific entry is found
|
| 101 |
+
_DEFAULT_ROUTE: list[ModelEntry] = [_NIM_PRIMARY, _OPENROUTER, _NIM_FALLBACK]
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
# ---------------------------------------------------------------------------
|
| 105 |
+
# Public API
|
| 106 |
+
# ---------------------------------------------------------------------------
|
| 107 |
+
|
| 108 |
+
def route_llm(agent_name: str = "", task_type: str = "") -> dict:
|
| 109 |
+
"""
|
| 110 |
+
Return the model routing plan for a given agent and task type.
|
| 111 |
+
|
| 112 |
+
Args:
|
| 113 |
+
agent_name: Class name of the calling agent (e.g. "IntentAgent").
|
| 114 |
+
Case-insensitive.
|
| 115 |
+
task_type: Nature of the task (e.g. "classification", "extraction",
|
| 116 |
+
"reasoning", "planning"). Case-insensitive.
|
| 117 |
+
|
| 118 |
+
Returns:
|
| 119 |
+
{
|
| 120 |
+
"primary": ModelEntry,
|
| 121 |
+
"fallbacks": [ModelEntry, ...],
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
Examples:
|
| 125 |
+
>>> route_llm("IntentAgent", "classification")
|
| 126 |
+
{"primary": {...nim primary...}, "fallbacks": [{...nim fallback...}, {...openrouter...}]}
|
| 127 |
+
|
| 128 |
+
>>> route_llm() # unknown agent → default route
|
| 129 |
+
{"primary": {...}, "fallbacks": [...]}
|
| 130 |
+
"""
|
| 131 |
+
key = (agent_name.lower(), task_type.lower())
|
| 132 |
+
|
| 133 |
+
# Exact match first
|
| 134 |
+
models = _ROUTES.get(key)
|
| 135 |
+
|
| 136 |
+
# Try (agent_name, "") if task_type not found
|
| 137 |
+
if models is None:
|
| 138 |
+
models = _ROUTES.get((agent_name.lower(), ""))
|
| 139 |
+
|
| 140 |
+
# Fall through to default
|
| 141 |
+
if models is None:
|
| 142 |
+
models = _DEFAULT_ROUTE
|
| 143 |
+
|
| 144 |
+
return {
|
| 145 |
+
"primary": models[0],
|
| 146 |
+
"fallbacks": models[1:],
|
| 147 |
+
}
|
app/core/llm_service.py
ADDED
|
@@ -0,0 +1,459 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/core/llm_service.py
|
| 3 |
+
-----------------------
|
| 4 |
+
Unified LLM Service for NotiFlow Autonomous — Phase 4.
|
| 5 |
+
|
| 6 |
+
ALL LLM calls in the system go through this single service.
|
| 7 |
+
No other module is allowed to call an LLM API directly.
|
| 8 |
+
|
| 9 |
+
Phase 4 additions
|
| 10 |
+
-----------------
|
| 11 |
+
* generate() accepts optional agent_name + task_type kwargs
|
| 12 |
+
* Internally calls llm_router.route_llm() to get ordered model list
|
| 13 |
+
* Iterates primary → fallback-1 → fallback-2 until one succeeds
|
| 14 |
+
* Writes model_used / fallback_used into context if passed
|
| 15 |
+
* Supports NVIDIA NIM and OpenRouter (both OpenAI-compatible)
|
| 16 |
+
|
| 17 |
+
Backward compatibility
|
| 18 |
+
----------------------
|
| 19 |
+
generate(prompt) — still works, uses default routing
|
| 20 |
+
generate(prompt, max_tokens=256) — still works
|
| 21 |
+
generate(prompt, agent_name="intent", task_type="classification") — Phase 4
|
| 22 |
+
|
| 23 |
+
Configuration (.env)
|
| 24 |
+
--------------------
|
| 25 |
+
NVIDIA_NIM_API_KEY — NIM primary key
|
| 26 |
+
NVIDIA_NIM_BASE_URL — default: https://integrate.api.nvidia.com/v1
|
| 27 |
+
NIM_PRIMARY_MODEL — default: deepseek-ai/deepseek-v3
|
| 28 |
+
NIM_FALLBACK_MODEL — default: deepseek-ai/deepseek-r1
|
| 29 |
+
OPENROUTER_API_KEY — OpenRouter fallback key
|
| 30 |
+
OPENROUTER_MODEL — default: deepseek/deepseek-chat
|
| 31 |
+
|
| 32 |
+
Public API
|
| 33 |
+
----------
|
| 34 |
+
LLMService().generate(prompt, max_tokens, agent_name, task_type, context) -> str
|
| 35 |
+
get_llm() -> LLMService
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
from __future__ import annotations
|
| 39 |
+
|
| 40 |
+
import logging
|
| 41 |
+
import os
|
| 42 |
+
from typing import Any, Optional
|
| 43 |
+
|
| 44 |
+
logger = logging.getLogger(__name__)
|
| 45 |
+
|
| 46 |
+
# ---------------------------------------------------------------------------
|
| 47 |
+
# Configuration
|
| 48 |
+
# ---------------------------------------------------------------------------
|
| 49 |
+
|
| 50 |
+
_NIM_API_KEY = os.getenv("NVIDIA_NIM_API_KEY") or os.getenv("NVIDIA_API_KEY")
|
| 51 |
+
_NIM_BASE_URL = (
|
| 52 |
+
os.getenv("NVIDIA_NIM_BASE_URL")
|
| 53 |
+
or os.getenv("NIM_BASE_URL")
|
| 54 |
+
or "https://integrate.api.nvidia.com/v1"
|
| 55 |
+
)
|
| 56 |
+
_OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
|
| 57 |
+
_OPENROUTER_BASE = "https://openrouter.ai/api/v1"
|
| 58 |
+
_LEGACY_NIM_MODEL = os.getenv("NVIDIA_NIM_MODEL", "deepseek-ai/deepseek-v3.2")
|
| 59 |
+
_REQUEST_TIMEOUT_S = float(os.getenv("LLM_TIMEOUT_SECONDS", "60"))
|
| 60 |
+
_RETRY_COUNT = int(os.getenv("LLM_RETRY_COUNT", "2"))
|
| 61 |
+
_SIMULATE_NIM_FAILURE = os.getenv("SIMULATE_NIM_FAILURE", "false").lower() == "true"
|
| 62 |
+
_SIMULATED_NIM_FAILURE_USED = False
|
| 63 |
+
_SIMULATE_NIM_FAIL = os.getenv("SIMULATE_NIM_FAILURE", "false").lower() == "true"
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# ---------------------------------------------------------------------------
|
| 67 |
+
# LLMService
|
| 68 |
+
# ---------------------------------------------------------------------------
|
| 69 |
+
|
| 70 |
+
class LLMService:
|
| 71 |
+
"""
|
| 72 |
+
Single entry point for all LLM inference in NotiFlow.
|
| 73 |
+
|
| 74 |
+
Phase 4: routes per agent/task, iterates fallbacks automatically,
|
| 75 |
+
optionally writes audit info to context.
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
def call_llm(
|
| 79 |
+
self,
|
| 80 |
+
prompt: str,
|
| 81 |
+
agent_name: str,
|
| 82 |
+
*,
|
| 83 |
+
max_tokens: int = 256,
|
| 84 |
+
task_type: str = "",
|
| 85 |
+
context: Optional[dict[str, Any]] = None,
|
| 86 |
+
stream: bool = False,
|
| 87 |
+
) -> str:
|
| 88 |
+
"""Convenience wrapper for agent-aware LLM calls."""
|
| 89 |
+
return self.generate(
|
| 90 |
+
prompt,
|
| 91 |
+
max_tokens=max_tokens,
|
| 92 |
+
agent_name=agent_name,
|
| 93 |
+
task_type=task_type,
|
| 94 |
+
context=context,
|
| 95 |
+
stream=stream,
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
def generate(
|
| 99 |
+
self,
|
| 100 |
+
prompt: str,
|
| 101 |
+
max_tokens: int = 256,
|
| 102 |
+
*,
|
| 103 |
+
agent_name: str = "",
|
| 104 |
+
task_type: str = "",
|
| 105 |
+
context: Optional[dict[str, Any]] = None,
|
| 106 |
+
stream: bool = False,
|
| 107 |
+
) -> str:
|
| 108 |
+
"""
|
| 109 |
+
Send a prompt to the best available model for this agent/task.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
prompt: Fully rendered prompt string.
|
| 113 |
+
max_tokens: Maximum tokens to generate (default 256).
|
| 114 |
+
agent_name: Calling agent class name — used by router.
|
| 115 |
+
task_type: Task category — used by router.
|
| 116 |
+
context: Optional live context dict. If provided, writes
|
| 117 |
+
context["model_used"] and context["fallback_used"].
|
| 118 |
+
|
| 119 |
+
Returns:
|
| 120 |
+
Raw text response (JSON or plain text — callers parse it).
|
| 121 |
+
|
| 122 |
+
Raises:
|
| 123 |
+
RuntimeError: If ALL models in the route fail.
|
| 124 |
+
"""
|
| 125 |
+
from app.core.llm_router import route_llm
|
| 126 |
+
from app.core.event_bus import emit_event
|
| 127 |
+
|
| 128 |
+
route = route_llm(agent_name, task_type)
|
| 129 |
+
primary = route["primary"]
|
| 130 |
+
fallbacks = route["fallbacks"]
|
| 131 |
+
all_models = [primary] + fallbacks
|
| 132 |
+
self._stream_requested = stream
|
| 133 |
+
|
| 134 |
+
if context is not None:
|
| 135 |
+
context.setdefault("metadata", {}).update({
|
| 136 |
+
"llm_timeout_seconds": _REQUEST_TIMEOUT_S,
|
| 137 |
+
"llm_retry_count": _RETRY_COUNT,
|
| 138 |
+
"llm_stream": stream,
|
| 139 |
+
})
|
| 140 |
+
|
| 141 |
+
last_exc: Optional[Exception] = None
|
| 142 |
+
tried: list[str] = []
|
| 143 |
+
|
| 144 |
+
for idx, model_entry in enumerate(all_models):
|
| 145 |
+
provider = model_entry["provider"]
|
| 146 |
+
model_name = model_entry["model"]
|
| 147 |
+
tokens = model_entry.get("max_tokens") or max_tokens
|
| 148 |
+
|
| 149 |
+
# ── Error Simulation: NIM failure ─────────────────────────────
|
| 150 |
+
if _SIMULATE_NIM_FAIL and provider == "nim" and "v3.2" in model_name:
|
| 151 |
+
logger.warning(f"SIMULATION: Intentional NIM timeout for {model_name}")
|
| 152 |
+
exc = RuntimeError(f"NIM gateway timeout (simulated) for {model_name}")
|
| 153 |
+
tried.append(model_name)
|
| 154 |
+
last_exc = exc
|
| 155 |
+
next_model = all_models[idx + 1] if idx + 1 < len(all_models) else None
|
| 156 |
+
if context is not None:
|
| 157 |
+
emit_event(
|
| 158 |
+
context,
|
| 159 |
+
"error_occurred",
|
| 160 |
+
{
|
| 161 |
+
"step": agent_name or "llm",
|
| 162 |
+
"message": str(exc),
|
| 163 |
+
"provider": provider,
|
| 164 |
+
"model": model_name,
|
| 165 |
+
},
|
| 166 |
+
agent=agent_name or "LLMService",
|
| 167 |
+
step="llm",
|
| 168 |
+
message=str(exc),
|
| 169 |
+
status="error",
|
| 170 |
+
)
|
| 171 |
+
if next_model is not None:
|
| 172 |
+
emit_event(
|
| 173 |
+
context,
|
| 174 |
+
"recovery_triggered",
|
| 175 |
+
{
|
| 176 |
+
"step": agent_name or "llm",
|
| 177 |
+
"failed_provider": provider,
|
| 178 |
+
"failed_model": model_name,
|
| 179 |
+
"fallback_provider": next_model.get("provider"),
|
| 180 |
+
"fallback_model": next_model.get("model"),
|
| 181 |
+
},
|
| 182 |
+
agent=agent_name or "LLMService",
|
| 183 |
+
step="llm",
|
| 184 |
+
message=f"Fallback triggered → {next_model.get('provider')}/{next_model.get('model')}",
|
| 185 |
+
)
|
| 186 |
+
self._log_fallback(agent_name, provider, model_name, exc, next_model)
|
| 187 |
+
continue
|
| 188 |
+
|
| 189 |
+
try:
|
| 190 |
+
response = self._call_model(provider, model_name, prompt, tokens)
|
| 191 |
+
|
| 192 |
+
# ── Write audit info to context ───────────────────────────
|
| 193 |
+
if context is not None:
|
| 194 |
+
context["model_used"] = model_name
|
| 195 |
+
context["fallback_used"] = idx > 0
|
| 196 |
+
context.setdefault("metadata", {}).update({
|
| 197 |
+
"model_used": model_name,
|
| 198 |
+
"model_provider": provider,
|
| 199 |
+
"fallback_used": idx > 0,
|
| 200 |
+
"models_tried": tried + [model_name],
|
| 201 |
+
})
|
| 202 |
+
if idx > 0:
|
| 203 |
+
emit_event(
|
| 204 |
+
context,
|
| 205 |
+
"recovery_success",
|
| 206 |
+
{
|
| 207 |
+
"step": agent_name or "llm",
|
| 208 |
+
"provider": provider,
|
| 209 |
+
"model": model_name,
|
| 210 |
+
"models_tried": tried + [model_name],
|
| 211 |
+
},
|
| 212 |
+
agent=agent_name or "LLMService",
|
| 213 |
+
step="llm",
|
| 214 |
+
message=f"Recovered with {provider}/{model_name}",
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
level = "primary" if idx == 0 else f"fallback #{idx}"
|
| 218 |
+
logger.info(
|
| 219 |
+
"LLMService → %s/%s (%s, agent=%s)",
|
| 220 |
+
provider, model_name, level, agent_name or "unknown"
|
| 221 |
+
)
|
| 222 |
+
return response
|
| 223 |
+
|
| 224 |
+
except Exception as exc:
|
| 225 |
+
tried.append(model_name)
|
| 226 |
+
last_exc = exc
|
| 227 |
+
next_model = all_models[idx + 1] if idx + 1 < len(all_models) else None
|
| 228 |
+
if context is not None:
|
| 229 |
+
emit_event(
|
| 230 |
+
context,
|
| 231 |
+
"error_occurred",
|
| 232 |
+
{
|
| 233 |
+
"step": agent_name or "llm",
|
| 234 |
+
"message": str(exc),
|
| 235 |
+
"provider": provider,
|
| 236 |
+
"model": model_name,
|
| 237 |
+
},
|
| 238 |
+
agent=agent_name or "LLMService",
|
| 239 |
+
step="llm",
|
| 240 |
+
message=str(exc),
|
| 241 |
+
)
|
| 242 |
+
if next_model is not None:
|
| 243 |
+
emit_event(
|
| 244 |
+
context,
|
| 245 |
+
"recovery_triggered",
|
| 246 |
+
{
|
| 247 |
+
"step": agent_name or "llm",
|
| 248 |
+
"failed_provider": provider,
|
| 249 |
+
"failed_model": model_name,
|
| 250 |
+
"fallback_provider": next_model.get("provider"),
|
| 251 |
+
"fallback_model": next_model.get("model"),
|
| 252 |
+
},
|
| 253 |
+
agent=agent_name or "LLMService",
|
| 254 |
+
step="llm",
|
| 255 |
+
message=f"Fallback to {next_model.get('provider')}/{next_model.get('model')}",
|
| 256 |
+
)
|
| 257 |
+
self._log_fallback(agent_name, provider, model_name, exc, next_model)
|
| 258 |
+
continue
|
| 259 |
+
|
| 260 |
+
if not agent_name and not task_type:
|
| 261 |
+
try:
|
| 262 |
+
logger.warning("[LLMService] route exhausted - using legacy Gemini shim")
|
| 263 |
+
response = self._call_gemini(prompt)
|
| 264 |
+
if context is not None:
|
| 265 |
+
context["model_used"] = "gemini-legacy"
|
| 266 |
+
context["fallback_used"] = True
|
| 267 |
+
context.setdefault("metadata", {}).update({
|
| 268 |
+
"model_used": "gemini-legacy",
|
| 269 |
+
"model_provider": "gemini",
|
| 270 |
+
"fallback_used": True,
|
| 271 |
+
"models_tried": tried + ["gemini-legacy"],
|
| 272 |
+
})
|
| 273 |
+
return response
|
| 274 |
+
except Exception as gemini_exc:
|
| 275 |
+
last_exc = gemini_exc
|
| 276 |
+
tried.append("gemini-legacy")
|
| 277 |
+
|
| 278 |
+
logger.error("LLMService: all %d model(s) failed. Last: %s", len(tried), last_exc)
|
| 279 |
+
raise RuntimeError(
|
| 280 |
+
f"All LLM backends failed for agent='{agent_name}' task='{task_type}'. "
|
| 281 |
+
f"Tried: {tried}. Last error: {last_exc}"
|
| 282 |
+
) from last_exc
|
| 283 |
+
|
| 284 |
+
# ── Model dispatch ────────────────────────────────────────────────────────
|
| 285 |
+
|
| 286 |
+
def _call_model(self, provider: str, model_name: str, prompt: str, max_tokens: int) -> str:
|
| 287 |
+
if provider == "nim":
|
| 288 |
+
return self._call_nim(model_name, prompt, max_tokens)
|
| 289 |
+
elif provider == "openrouter":
|
| 290 |
+
return self._call_openrouter(model_name, prompt, max_tokens)
|
| 291 |
+
else:
|
| 292 |
+
raise RuntimeError(f"Unknown LLM provider: '{provider}'")
|
| 293 |
+
|
| 294 |
+
# ── NVIDIA NIM ────────────────────────────────────────────────────────────
|
| 295 |
+
|
| 296 |
+
def _call_nim(self, model_name: str, prompt: str, max_tokens: int) -> str:
|
| 297 |
+
global _SIMULATED_NIM_FAILURE_USED
|
| 298 |
+
if _SIMULATE_NIM_FAILURE and not _SIMULATED_NIM_FAILURE_USED:
|
| 299 |
+
_SIMULATED_NIM_FAILURE_USED = True
|
| 300 |
+
raise TimeoutError("NIM timeout")
|
| 301 |
+
if not _NIM_API_KEY:
|
| 302 |
+
raise RuntimeError("NVIDIA_NIM_API_KEY is not set.")
|
| 303 |
+
try:
|
| 304 |
+
from openai import OpenAI
|
| 305 |
+
except ImportError:
|
| 306 |
+
raise RuntimeError("openai not installed. Run: pip install openai")
|
| 307 |
+
|
| 308 |
+
client = OpenAI(
|
| 309 |
+
base_url=_NIM_BASE_URL,
|
| 310 |
+
api_key=_NIM_API_KEY,
|
| 311 |
+
timeout=_REQUEST_TIMEOUT_S,
|
| 312 |
+
max_retries=0,
|
| 313 |
+
)
|
| 314 |
+
text = self._request_with_retry(
|
| 315 |
+
provider_label=f"NIM[{model_name}]",
|
| 316 |
+
request_fn=lambda: self._create_chat_completion(client, model_name, prompt, max_tokens),
|
| 317 |
+
)
|
| 318 |
+
logger.debug("NIM[%s] raw: %r", model_name, text[:200])
|
| 319 |
+
return text.strip()
|
| 320 |
+
|
| 321 |
+
# ── OpenRouter ────────────────────────────────────────────────────────────
|
| 322 |
+
|
| 323 |
+
def _call_openrouter(self, model_name: str, prompt: str, max_tokens: int) -> str:
|
| 324 |
+
if not _OPENROUTER_API_KEY:
|
| 325 |
+
raise RuntimeError("OPENROUTER_API_KEY is not set.")
|
| 326 |
+
try:
|
| 327 |
+
from openai import OpenAI
|
| 328 |
+
except ImportError:
|
| 329 |
+
raise RuntimeError("openai not installed. Run: pip install openai")
|
| 330 |
+
|
| 331 |
+
extra_headers = {"X-Title": os.getenv("OPENROUTER_TITLE", "NotiFlow")}
|
| 332 |
+
referer = os.getenv("OPENROUTER_REFERER")
|
| 333 |
+
if referer:
|
| 334 |
+
extra_headers["HTTP-Referer"] = referer
|
| 335 |
+
|
| 336 |
+
client = OpenAI(
|
| 337 |
+
base_url=_OPENROUTER_BASE,
|
| 338 |
+
api_key=_OPENROUTER_API_KEY,
|
| 339 |
+
default_headers=extra_headers,
|
| 340 |
+
timeout=_REQUEST_TIMEOUT_S,
|
| 341 |
+
max_retries=0,
|
| 342 |
+
)
|
| 343 |
+
text = self._request_with_retry(
|
| 344 |
+
provider_label=f"OpenRouter[{model_name}]",
|
| 345 |
+
request_fn=lambda: self._create_chat_completion(client, model_name, prompt, max_tokens),
|
| 346 |
+
)
|
| 347 |
+
logger.debug("OpenRouter[%s] raw: %r", model_name, text[:200])
|
| 348 |
+
return text.strip()
|
| 349 |
+
|
| 350 |
+
# ── Legacy shims — test_pipeline.py patches these directly ───────────────
|
| 351 |
+
# DO NOT rename or remove — existing tests mock them by name.
|
| 352 |
+
|
| 353 |
+
def _call_gemini(self, prompt: str) -> str:
|
| 354 |
+
"""Gemini shim — kept for test_pipeline.py backward compat."""
|
| 355 |
+
try:
|
| 356 |
+
from app.services.gemini_client import generate
|
| 357 |
+
except ImportError:
|
| 358 |
+
from app.services.gemini_client import generate # type: ignore
|
| 359 |
+
return generate(prompt)
|
| 360 |
+
|
| 361 |
+
def _create_chat_completion(self, client: Any, model_name: str, prompt: str, max_tokens: int) -> str:
|
| 362 |
+
completion = client.chat.completions.create(
|
| 363 |
+
model=model_name,
|
| 364 |
+
messages=[{"role": "user", "content": prompt}],
|
| 365 |
+
temperature=0.0,
|
| 366 |
+
max_tokens=max_tokens,
|
| 367 |
+
stream=getattr(self, "_stream_requested", False),
|
| 368 |
+
)
|
| 369 |
+
if getattr(self, "_stream_requested", False):
|
| 370 |
+
chunks: list[str] = []
|
| 371 |
+
for event in completion:
|
| 372 |
+
if not getattr(event, "choices", None):
|
| 373 |
+
continue
|
| 374 |
+
delta = getattr(event.choices[0].delta, "content", None)
|
| 375 |
+
if delta:
|
| 376 |
+
chunks.append(delta)
|
| 377 |
+
return "".join(chunks)
|
| 378 |
+
return completion.choices[0].message.content or ""
|
| 379 |
+
|
| 380 |
+
def _request_with_retry(self, provider_label: str, request_fn: Any) -> str:
|
| 381 |
+
last_exc: Optional[Exception] = None
|
| 382 |
+
for attempt in range(_RETRY_COUNT + 1):
|
| 383 |
+
try:
|
| 384 |
+
return request_fn()
|
| 385 |
+
except Exception as exc:
|
| 386 |
+
last_exc = exc
|
| 387 |
+
if attempt >= _RETRY_COUNT:
|
| 388 |
+
break
|
| 389 |
+
logger.warning(
|
| 390 |
+
"%s request failed (%s) - retry %d/%d",
|
| 391 |
+
provider_label,
|
| 392 |
+
exc,
|
| 393 |
+
attempt + 1,
|
| 394 |
+
_RETRY_COUNT,
|
| 395 |
+
)
|
| 396 |
+
raise last_exc if last_exc is not None else RuntimeError(f"{provider_label} failed")
|
| 397 |
+
|
| 398 |
+
def _log_fallback(
|
| 399 |
+
self,
|
| 400 |
+
agent_name: str,
|
| 401 |
+
provider: str,
|
| 402 |
+
model_name: str,
|
| 403 |
+
exc: Exception,
|
| 404 |
+
next_model: Optional[dict[str, Any]],
|
| 405 |
+
) -> None:
|
| 406 |
+
agent_label = agent_name or "LLMService"
|
| 407 |
+
failure_kind = "timeout" if self._is_timeout_error(exc) else "error"
|
| 408 |
+
if next_model is None:
|
| 409 |
+
logger.error(
|
| 410 |
+
"[%s] %s %s on %s - no fallback remaining",
|
| 411 |
+
agent_label,
|
| 412 |
+
self._provider_label(provider),
|
| 413 |
+
failure_kind,
|
| 414 |
+
model_name,
|
| 415 |
+
)
|
| 416 |
+
return
|
| 417 |
+
logger.warning(
|
| 418 |
+
"[%s] %s %s - fallback %s",
|
| 419 |
+
agent_label,
|
| 420 |
+
self._provider_label(provider),
|
| 421 |
+
failure_kind,
|
| 422 |
+
self._fallback_target_label(next_model),
|
| 423 |
+
)
|
| 424 |
+
|
| 425 |
+
@staticmethod
|
| 426 |
+
def _provider_label(provider: str) -> str:
|
| 427 |
+
return "NIM" if provider == "nim" else "OpenRouter"
|
| 428 |
+
|
| 429 |
+
def _fallback_target_label(self, model_entry: dict[str, Any]) -> str:
|
| 430 |
+
provider = model_entry.get("provider", "")
|
| 431 |
+
model_name = str(model_entry.get("model", ""))
|
| 432 |
+
if provider == "openrouter":
|
| 433 |
+
return "OpenRouter"
|
| 434 |
+
if provider == "nim" and "v3.1" in model_name:
|
| 435 |
+
return "v3.1"
|
| 436 |
+
if provider == "nim" and "v3.2" in model_name:
|
| 437 |
+
return "v3.2"
|
| 438 |
+
return model_name or self._provider_label(provider)
|
| 439 |
+
|
| 440 |
+
@staticmethod
|
| 441 |
+
def _is_timeout_error(exc: Exception) -> bool:
|
| 442 |
+
name = exc.__class__.__name__.lower()
|
| 443 |
+
text = str(exc).lower()
|
| 444 |
+
return "timeout" in name or "timeout" in text or "timed out" in text
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
# ---------------------------------------------------------------------------
|
| 448 |
+
# Module-level singleton
|
| 449 |
+
# ---------------------------------------------------------------------------
|
| 450 |
+
|
| 451 |
+
_instance: Optional[LLMService] = None
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
def get_llm() -> LLMService:
|
| 455 |
+
"""Return the shared LLMService singleton."""
|
| 456 |
+
global _instance
|
| 457 |
+
if _instance is None:
|
| 458 |
+
_instance = LLMService()
|
| 459 |
+
return _instance
|
app/core/orchestrator.py
ADDED
|
@@ -0,0 +1,566 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/core/orchestrator.py
|
| 3 |
+
------------------------
|
| 4 |
+
Dynamic Agent Orchestrator for NotiFlow Autonomous (Phase 2).
|
| 5 |
+
|
| 6 |
+
Replaces the static pipeline list with a planner-driven execution loop.
|
| 7 |
+
|
| 8 |
+
Flow
|
| 9 |
+
----
|
| 10 |
+
create_context()
|
| 11 |
+
↓
|
| 12 |
+
build_plan(ctx) ← Planner evaluates rules, writes ctx["plan"]
|
| 13 |
+
↓
|
| 14 |
+
for step in ctx["plan"]:
|
| 15 |
+
agent = AGENT_REGISTRY[step["agent"]]
|
| 16 |
+
agent.run(ctx) ← executes, writes audit entry to ctx["history"]
|
| 17 |
+
if failed and step["critical"]: abort
|
| 18 |
+
↓
|
| 19 |
+
_build_result(ctx) ← flatten ctx → public API response shape
|
| 20 |
+
|
| 21 |
+
Key properties
|
| 22 |
+
--------------
|
| 23 |
+
- No hardcoded execution order anywhere in this file
|
| 24 |
+
- LedgerAgent is NOT special-cased — it is just another plan step
|
| 25 |
+
- Adding a new agent = one registry entry + one planner rule, zero changes here
|
| 26 |
+
- Non-critical step failures are recorded and skipped, not aborted
|
| 27 |
+
|
| 28 |
+
Return shape (backward compatible):
|
| 29 |
+
{
|
| 30 |
+
"message": str,
|
| 31 |
+
"intent": str,
|
| 32 |
+
"data": dict,
|
| 33 |
+
"event": dict,
|
| 34 |
+
"sheet_updated": bool,
|
| 35 |
+
"context": dict, ← full context (debug/audit)
|
| 36 |
+
}
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
from __future__ import annotations
|
| 40 |
+
|
| 41 |
+
import logging
|
| 42 |
+
from typing import Any
|
| 43 |
+
|
| 44 |
+
from app.core.context import create_context, update_context, log_step, add_error
|
| 45 |
+
from app.core.event_bus import emit_event, push_live_log
|
| 46 |
+
from app.core.planner import build_plan
|
| 47 |
+
from app.core.autonomy_planner import build_autonomy_plan
|
| 48 |
+
from app.core.priority import reset_priority_score
|
| 49 |
+
from app.core.registry import get_agent
|
| 50 |
+
|
| 51 |
+
logger = logging.getLogger(__name__)
|
| 52 |
+
|
| 53 |
+
_MAX_REPLANS = 2 # hard cap on feedback-loop iterations
|
| 54 |
+
|
| 55 |
+
_STEP_EVENT_MAP: dict[str, tuple[str, str]] = {
|
| 56 |
+
"intent": ("intent_detected", "IntentAgent"),
|
| 57 |
+
"extraction": ("extraction_done", "ExtractionAgent"),
|
| 58 |
+
"validation": ("validation_done", "ValidationAgent"),
|
| 59 |
+
"invoice_agent": ("invoice_generated", "InvoiceAgent"),
|
| 60 |
+
"payment_agent": ("payment_requested", "PaymentAgent"),
|
| 61 |
+
"ledger": ("execution_done", "LedgerAgent"),
|
| 62 |
+
"recovery": ("recovery_triggered", "RecoveryAgent"),
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def process_message(message: str, source: str = "system") -> dict[str, Any]:
|
| 67 |
+
"""
|
| 68 |
+
Run a raw business message through the full NotiFlow agent pipeline.
|
| 69 |
+
|
| 70 |
+
Phase 2: main pipeline driven by Planner (dynamic).
|
| 71 |
+
Phase 3: autonomy layer driven by AutonomyPlanner (dynamic).
|
| 72 |
+
Fix: feedback loop — replan up to _MAX_REPLANS times if needed.
|
| 73 |
+
|
| 74 |
+
Args:
|
| 75 |
+
message: Raw Hinglish or English business message.
|
| 76 |
+
source: Notification source (e.g. "whatsapp", "gpay").
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
Flat result dict + full context.
|
| 80 |
+
|
| 81 |
+
Raises:
|
| 82 |
+
ValueError: Empty message.
|
| 83 |
+
"""
|
| 84 |
+
if not message or not message.strip():
|
| 85 |
+
raise ValueError("Message cannot be empty.")
|
| 86 |
+
|
| 87 |
+
ctx = create_context(message.strip(), source=source)
|
| 88 |
+
logger.info("Orchestrator ← %r (source=%s)", message, source)
|
| 89 |
+
_emit_pipeline_event(
|
| 90 |
+
ctx,
|
| 91 |
+
"message_received",
|
| 92 |
+
{
|
| 93 |
+
"message": ctx["message"],
|
| 94 |
+
"source": source,
|
| 95 |
+
"state": ctx.get("state"),
|
| 96 |
+
},
|
| 97 |
+
agent="Orchestrator",
|
| 98 |
+
step="message",
|
| 99 |
+
message=f"Received message from {source}",
|
| 100 |
+
log_text=f"[Orchestrator] Message received from {source}: {ctx['message']}",
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
# ── Main plan + autonomy + feedback loop ──────────────────────────────
|
| 104 |
+
while True:
|
| 105 |
+
retry_count = ctx["metadata"].get("retry_count", 0)
|
| 106 |
+
|
| 107 |
+
# ── 1. Build and run main plan ────────────────────────────────────
|
| 108 |
+
plan = build_plan(ctx)
|
| 109 |
+
logger.info(
|
| 110 |
+
"[cycle=%d] Main plan: [%s]",
|
| 111 |
+
retry_count,
|
| 112 |
+
", ".join(s["agent"] for s in plan),
|
| 113 |
+
)
|
| 114 |
+
ctx = _run_plan(ctx, plan)
|
| 115 |
+
|
| 116 |
+
# ── 2. Build and run autonomy plan ────────────────────────────────
|
| 117 |
+
autonomy_plan = build_autonomy_plan(ctx)
|
| 118 |
+
logger.info(
|
| 119 |
+
"[cycle=%d] Autonomy plan: [%s]",
|
| 120 |
+
retry_count,
|
| 121 |
+
", ".join(s["agent"] for s in autonomy_plan),
|
| 122 |
+
)
|
| 123 |
+
ctx = _run_autonomy(ctx, autonomy_plan)
|
| 124 |
+
|
| 125 |
+
# ── 3. Check if replan is needed ──────────────────────────────────
|
| 126 |
+
if retry_count >= _MAX_REPLANS:
|
| 127 |
+
logger.info("Replan cap reached (%d) — stopping loop.", _MAX_REPLANS)
|
| 128 |
+
break
|
| 129 |
+
|
| 130 |
+
if not _should_replan(ctx):
|
| 131 |
+
logger.info("[cycle=%d] No replan needed.", retry_count)
|
| 132 |
+
break
|
| 133 |
+
|
| 134 |
+
# ── 4. Prepare for replan ─────────────────────────────────────────
|
| 135 |
+
logger.warning(
|
| 136 |
+
"[cycle=%d] Replan triggered — retry_count → %d",
|
| 137 |
+
retry_count, retry_count + 1,
|
| 138 |
+
)
|
| 139 |
+
_emit_pipeline_event(
|
| 140 |
+
ctx,
|
| 141 |
+
"recovery_triggered",
|
| 142 |
+
{
|
| 143 |
+
"retry_count": retry_count + 1,
|
| 144 |
+
"reason": "replan_required",
|
| 145 |
+
"errors": ctx.get("errors", []),
|
| 146 |
+
},
|
| 147 |
+
agent="RecoveryAgent",
|
| 148 |
+
step="recovery",
|
| 149 |
+
message="Recovery triggered after pipeline replan request",
|
| 150 |
+
log_text=f"[RecoveryAgent] Recovery triggered after cycle {retry_count}",
|
| 151 |
+
)
|
| 152 |
+
ctx["metadata"]["retry_count"] = retry_count + 1
|
| 153 |
+
|
| 154 |
+
# Reset priority score so contributors don't double-count
|
| 155 |
+
reset_priority_score(ctx)
|
| 156 |
+
|
| 157 |
+
# Clear autonomy outputs so fresh evaluation happens
|
| 158 |
+
for key in ("verification", "monitor", "risk", "alerts", "recovery"):
|
| 159 |
+
ctx.pop(key, None)
|
| 160 |
+
|
| 161 |
+
# Clear accumulated errors from previous cycle (keep original errors)
|
| 162 |
+
ctx["errors"] = [e for e in ctx["errors"] if not e.startswith("[Monitor]")]
|
| 163 |
+
|
| 164 |
+
# ── Mark final state ──────────────────────────────────────────────────
|
| 165 |
+
if ctx.get("state") not in ("failed",):
|
| 166 |
+
update_context(ctx, state="completed")
|
| 167 |
+
|
| 168 |
+
logger.info(
|
| 169 |
+
"Orchestrator done → state=%s intents=%s errors=%d risk=%s priority=%s score=%d",
|
| 170 |
+
ctx["state"],
|
| 171 |
+
ctx.get("intents", [ctx.get("intent")]),
|
| 172 |
+
len(ctx["errors"]),
|
| 173 |
+
ctx.get("risk", {}).get("level", "n/a"),
|
| 174 |
+
ctx.get("priority", "n/a"),
|
| 175 |
+
ctx.get("priority_score", 0),
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
return _build_result(ctx)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def _run_plan(ctx: dict[str, Any], plan: list[dict]) -> dict[str, Any]:
|
| 182 |
+
"""Execute the planner-generated main pipeline steps."""
|
| 183 |
+
for step in plan:
|
| 184 |
+
agent_key = step["agent"]
|
| 185 |
+
is_critical = step.get("critical", True)
|
| 186 |
+
_emit_pipeline_step_event(ctx, agent_key, "started")
|
| 187 |
+
|
| 188 |
+
try:
|
| 189 |
+
agent = get_agent(agent_key)
|
| 190 |
+
except KeyError as exc:
|
| 191 |
+
msg = f"Orchestrator: {exc}"
|
| 192 |
+
logger.error(msg)
|
| 193 |
+
add_error(ctx, msg)
|
| 194 |
+
log_step(ctx, agent_key, "skipped",
|
| 195 |
+
detail=f"Agent not found in registry: {agent_key}")
|
| 196 |
+
_emit_pipeline_event(
|
| 197 |
+
ctx,
|
| 198 |
+
"error_occurred",
|
| 199 |
+
{
|
| 200 |
+
"step": agent_key,
|
| 201 |
+
"message": msg,
|
| 202 |
+
"critical": is_critical,
|
| 203 |
+
},
|
| 204 |
+
agent="Orchestrator",
|
| 205 |
+
step=agent_key,
|
| 206 |
+
message=msg,
|
| 207 |
+
log_text=f"[Orchestrator] {msg}",
|
| 208 |
+
status="error",
|
| 209 |
+
)
|
| 210 |
+
if is_critical:
|
| 211 |
+
update_context(ctx, state="failed")
|
| 212 |
+
break
|
| 213 |
+
continue
|
| 214 |
+
|
| 215 |
+
try:
|
| 216 |
+
ctx = agent.run(ctx)
|
| 217 |
+
_emit_pipeline_step_event(ctx, agent_key, "completed")
|
| 218 |
+
_emit_step_success(ctx, agent_key)
|
| 219 |
+
except Exception as exc:
|
| 220 |
+
logger.error("Orchestrator: %s raised %s", agent_key, exc)
|
| 221 |
+
_emit_pipeline_step_event(ctx, agent_key, "failed", str(exc))
|
| 222 |
+
_emit_pipeline_event(
|
| 223 |
+
ctx,
|
| 224 |
+
"error_occurred",
|
| 225 |
+
{
|
| 226 |
+
"step": agent_key,
|
| 227 |
+
"message": str(exc),
|
| 228 |
+
"critical": is_critical,
|
| 229 |
+
},
|
| 230 |
+
agent=getattr(agent, "name", agent_key),
|
| 231 |
+
step=agent_key,
|
| 232 |
+
message=str(exc),
|
| 233 |
+
log_text=f"[{getattr(agent, 'name', agent_key)}] {exc}",
|
| 234 |
+
status="error",
|
| 235 |
+
)
|
| 236 |
+
if is_critical:
|
| 237 |
+
logger.error("Critical agent '%s' failed — aborting pipeline.", agent_key)
|
| 238 |
+
break
|
| 239 |
+
else:
|
| 240 |
+
logger.warning("Non-critical agent '%s' failed — continuing.", agent_key)
|
| 241 |
+
if ctx.get("state") == "failed":
|
| 242 |
+
update_context(ctx, state="partial")
|
| 243 |
+
continue
|
| 244 |
+
|
| 245 |
+
if ctx.get("state") == "failed" and is_critical:
|
| 246 |
+
logger.error("Pipeline in failed state after critical agent '%s'.", agent_key)
|
| 247 |
+
break
|
| 248 |
+
|
| 249 |
+
return ctx
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def _run_autonomy(ctx: dict[str, Any], plan: list[dict]) -> dict[str, Any]:
|
| 253 |
+
"""
|
| 254 |
+
Run the dynamically planned autonomy sequence.
|
| 255 |
+
|
| 256 |
+
All autonomy agents are non-critical — failures are recorded but
|
| 257 |
+
never abort the sequence or corrupt the main pipeline result.
|
| 258 |
+
"""
|
| 259 |
+
for step in plan:
|
| 260 |
+
agent_key = step["agent"]
|
| 261 |
+
_emit_pipeline_step_event(ctx, agent_key, "started")
|
| 262 |
+
try:
|
| 263 |
+
agent = get_agent(agent_key)
|
| 264 |
+
except KeyError:
|
| 265 |
+
logger.warning("[Autonomy] agent '%s' not in registry — skipping", agent_key)
|
| 266 |
+
continue
|
| 267 |
+
try:
|
| 268 |
+
ctx = agent.run(ctx)
|
| 269 |
+
_emit_pipeline_step_event(ctx, agent_key, "completed")
|
| 270 |
+
if agent_key == "recovery":
|
| 271 |
+
recovery = ctx.get("recovery", {}) or {}
|
| 272 |
+
recovery_event = "recovery_success" if recovery.get("success") else "recovery_triggered"
|
| 273 |
+
_emit_pipeline_event(
|
| 274 |
+
ctx,
|
| 275 |
+
recovery_event,
|
| 276 |
+
recovery or {"action": "none"},
|
| 277 |
+
agent=getattr(agent, "name", agent_key),
|
| 278 |
+
step=agent_key,
|
| 279 |
+
message=recovery.get("details") or "Recovery step completed",
|
| 280 |
+
log_text=f"[{getattr(agent, 'name', agent_key)}] {recovery.get('details', 'Recovery step completed')}",
|
| 281 |
+
)
|
| 282 |
+
except Exception as exc:
|
| 283 |
+
logger.error("[Autonomy] '%s' raised %s — continuing", agent_key, exc)
|
| 284 |
+
add_error(ctx, f"[Autonomy] {agent_key} failed: {exc}")
|
| 285 |
+
_emit_pipeline_step_event(ctx, agent_key, "failed", str(exc))
|
| 286 |
+
_emit_pipeline_event(
|
| 287 |
+
ctx,
|
| 288 |
+
"error_occurred",
|
| 289 |
+
{
|
| 290 |
+
"step": agent_key,
|
| 291 |
+
"message": str(exc),
|
| 292 |
+
"critical": False,
|
| 293 |
+
},
|
| 294 |
+
agent=getattr(agent, "name", agent_key),
|
| 295 |
+
step=agent_key,
|
| 296 |
+
message=str(exc),
|
| 297 |
+
log_text=f"[{getattr(agent, 'name', agent_key)}] {exc}",
|
| 298 |
+
status="error",
|
| 299 |
+
)
|
| 300 |
+
if ctx.get("state") == "failed":
|
| 301 |
+
update_context(ctx, state="partial")
|
| 302 |
+
return ctx
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def _should_replan(ctx: dict[str, Any]) -> bool:
|
| 306 |
+
"""
|
| 307 |
+
Return True if the feedback loop should trigger a replan.
|
| 308 |
+
|
| 309 |
+
Conditions (any one is sufficient):
|
| 310 |
+
1. verification.status == "fail"
|
| 311 |
+
2. risk.level == "high"
|
| 312 |
+
3. errors list is non-empty (excluding autonomy-internal noise)
|
| 313 |
+
"""
|
| 314 |
+
v_status = ctx.get("verification", {}).get("status", "ok")
|
| 315 |
+
if v_status == "fail":
|
| 316 |
+
logger.info("[Feedback] replan trigger: verification=fail")
|
| 317 |
+
return True
|
| 318 |
+
|
| 319 |
+
risk_level = ctx.get("risk", {}).get("level", "low")
|
| 320 |
+
if risk_level == "high":
|
| 321 |
+
logger.info("[Feedback] replan trigger: risk=high")
|
| 322 |
+
return True
|
| 323 |
+
|
| 324 |
+
# Only count errors that are not pure autonomy-internal noise
|
| 325 |
+
meaningful_errors = [
|
| 326 |
+
e for e in ctx.get("errors", [])
|
| 327 |
+
if not e.startswith("[Autonomy]")
|
| 328 |
+
]
|
| 329 |
+
if meaningful_errors:
|
| 330 |
+
logger.info(
|
| 331 |
+
"[Feedback] replan trigger: %d meaningful error(s)", len(meaningful_errors)
|
| 332 |
+
)
|
| 333 |
+
return True
|
| 334 |
+
|
| 335 |
+
return False
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
def _build_result(ctx: dict[str, Any]) -> dict[str, Any]:
|
| 339 |
+
"""Flatten context into the public API response shape."""
|
| 340 |
+
event = ctx.get("event", {}) or {}
|
| 341 |
+
data = ctx.get("data", {}) or {}
|
| 342 |
+
event_order = event.get("order", {}) if isinstance(event.get("order"), dict) else {}
|
| 343 |
+
risk = ctx.get("risk", {}) or {}
|
| 344 |
+
source = ctx.get("source", "system")
|
| 345 |
+
|
| 346 |
+
payment_state = ctx.get("payment") or {}
|
| 347 |
+
|
| 348 |
+
amount = payment_state.get("amount", event.get("amount"))
|
| 349 |
+
if amount is None:
|
| 350 |
+
amount = data.get("amount", 0)
|
| 351 |
+
|
| 352 |
+
try:
|
| 353 |
+
numeric_amount = float(amount or 0)
|
| 354 |
+
except (TypeError, ValueError):
|
| 355 |
+
numeric_amount = 0
|
| 356 |
+
|
| 357 |
+
return {
|
| 358 |
+
# ── Core (backward compatible) ────────────────────────────────────
|
| 359 |
+
"message": ctx["message"],
|
| 360 |
+
"intent": ctx.get("intent") or "other",
|
| 361 |
+
"intents": ctx.get("intents") or [ctx.get("intent") or "other"],
|
| 362 |
+
"data": ctx.get("data", {}),
|
| 363 |
+
"multi_data": ctx.get("multi_data", {}),
|
| 364 |
+
"event": ctx.get("event", {}),
|
| 365 |
+
"invoice": ctx.get("invoice"),
|
| 366 |
+
"events": ctx.get("events", []),
|
| 367 |
+
"live_logs": ctx.get("live_logs", []),
|
| 368 |
+
"history": ctx.get("history", []),
|
| 369 |
+
"sheet_updated": ctx.get("metadata", {}).get("sheet_updated", False),
|
| 370 |
+
"customer": {"name": event.get("customer") or data.get("customer") or "Walk-in customer"},
|
| 371 |
+
"order": {
|
| 372 |
+
"item": event.get("item") or event_order.get("item") or data.get("item"),
|
| 373 |
+
"quantity": event.get("quantity") or event_order.get("quantity") or data.get("quantity"),
|
| 374 |
+
"status": event.get("status") or "received",
|
| 375 |
+
"source": source,
|
| 376 |
+
},
|
| 377 |
+
"payment": {
|
| 378 |
+
"invoice_id": payment_state.get("invoice_id") or (ctx.get("invoice") or {}).get("invoice_id") or event.get("invoice_id") or data.get("invoice_id"),
|
| 379 |
+
"amount": payment_state.get("amount") or (ctx.get("invoice") or {}).get("total") or numeric_amount,
|
| 380 |
+
"status": payment_state.get("status") or (ctx.get("invoice") or {}).get("status") or ("paid" if numeric_amount > 0 else "pending"),
|
| 381 |
+
},
|
| 382 |
+
"decision": {
|
| 383 |
+
"intent": ctx.get("intent") or "other",
|
| 384 |
+
"priority": ctx.get("priority", "low"),
|
| 385 |
+
"priority_score": ctx.get("priority_score", 0),
|
| 386 |
+
"risk": risk.get("level"),
|
| 387 |
+
},
|
| 388 |
+
# ── Autonomy fields ───────────────────────────────────────────────
|
| 389 |
+
"verification": ctx.get("verification", {}),
|
| 390 |
+
"risk": ctx.get("risk", {}),
|
| 391 |
+
"priority": ctx.get("priority", "low"),
|
| 392 |
+
"priority_score": ctx.get("priority_score", 0),
|
| 393 |
+
"alerts": ctx.get("alerts", []),
|
| 394 |
+
"recovery": ctx.get("recovery", {}),
|
| 395 |
+
"monitor": ctx.get("monitor", {}),
|
| 396 |
+
# ── Debug ─────────────────────────────────────────────────────────
|
| 397 |
+
"context": ctx,
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
def _emit_step_success(ctx: dict[str, Any], agent_key: str) -> None:
|
| 402 |
+
if agent_key in {"invoice_agent", "payment_agent"}:
|
| 403 |
+
return
|
| 404 |
+
event_type, agent_name = _STEP_EVENT_MAP.get(agent_key, ("execution_done", agent_key))
|
| 405 |
+
payload = _build_step_payload(ctx, agent_key)
|
| 406 |
+
message, log_text = _build_step_messages(ctx, agent_key, agent_name)
|
| 407 |
+
_emit_pipeline_event(
|
| 408 |
+
ctx,
|
| 409 |
+
event_type,
|
| 410 |
+
payload,
|
| 411 |
+
agent=agent_name,
|
| 412 |
+
step=agent_key,
|
| 413 |
+
message=message,
|
| 414 |
+
log_text=log_text,
|
| 415 |
+
)
|
| 416 |
+
|
| 417 |
+
def _build_step_payload(ctx: dict[str, Any], agent_key: str) -> dict[str, Any]:
|
| 418 |
+
if agent_key == "intent":
|
| 419 |
+
return {
|
| 420 |
+
"intent": ctx.get("intent") or "other",
|
| 421 |
+
"intents": ctx.get("intents", []),
|
| 422 |
+
"model_used": ctx.get("model_used"),
|
| 423 |
+
}
|
| 424 |
+
if agent_key == "extraction":
|
| 425 |
+
return {
|
| 426 |
+
"intent": ctx.get("intent") or "other",
|
| 427 |
+
"data": ctx.get("data", {}),
|
| 428 |
+
"multi_data": ctx.get("multi_data", {}),
|
| 429 |
+
}
|
| 430 |
+
if agent_key == "validation":
|
| 431 |
+
return {
|
| 432 |
+
"intent": ctx.get("intent") or "other",
|
| 433 |
+
"data": ctx.get("data", {}),
|
| 434 |
+
}
|
| 435 |
+
if agent_key == "invoice_agent":
|
| 436 |
+
return {
|
| 437 |
+
"intent": ctx.get("intent") or "other",
|
| 438 |
+
"invoice": ctx.get("invoice"),
|
| 439 |
+
}
|
| 440 |
+
if agent_key == "payment_agent":
|
| 441 |
+
return {
|
| 442 |
+
"intent": ctx.get("intent") or "other",
|
| 443 |
+
"payment": ctx.get("payment"),
|
| 444 |
+
"invoice": ctx.get("invoice"),
|
| 445 |
+
}
|
| 446 |
+
if agent_key == "ledger":
|
| 447 |
+
return {
|
| 448 |
+
"sheet_updated": ctx.get("metadata", {}).get("sheet_updated", False),
|
| 449 |
+
"event": ctx.get("event", {}),
|
| 450 |
+
}
|
| 451 |
+
if agent_key == "recovery":
|
| 452 |
+
return ctx.get("recovery", {})
|
| 453 |
+
return {
|
| 454 |
+
"state": ctx.get("state"),
|
| 455 |
+
"event": ctx.get("event", {}),
|
| 456 |
+
}
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
def _build_step_messages(ctx: dict[str, Any], agent_key: str, agent_name: str) -> tuple[str, str]:
|
| 460 |
+
if agent_key == "intent":
|
| 461 |
+
intent = ctx.get("intent") or "other"
|
| 462 |
+
return (
|
| 463 |
+
f"Intent detected: {intent}",
|
| 464 |
+
f"[{agent_name}] Intent detected: {intent}",
|
| 465 |
+
)
|
| 466 |
+
if agent_key == "extraction":
|
| 467 |
+
data = ctx.get("data", {}) or {}
|
| 468 |
+
detail_parts = [f"{key}={value}" for key, value in data.items() if value not in (None, "", [], {})]
|
| 469 |
+
detail = ", ".join(detail_parts) or "no structured fields extracted"
|
| 470 |
+
return (
|
| 471 |
+
f"Extraction completed: {detail}",
|
| 472 |
+
f"[{agent_name}] Extracted: {detail}",
|
| 473 |
+
)
|
| 474 |
+
if agent_key == "validation":
|
| 475 |
+
data = ctx.get("data", {}) or {}
|
| 476 |
+
detail_parts = [f"{key}={value}" for key, value in data.items() if value not in (None, "", [], {})]
|
| 477 |
+
detail = ", ".join(detail_parts) or "validation passed with empty payload"
|
| 478 |
+
return (
|
| 479 |
+
f"Validation completed: {detail}",
|
| 480 |
+
f"[{agent_name}] Validated: {detail}",
|
| 481 |
+
)
|
| 482 |
+
if agent_key == "invoice_agent":
|
| 483 |
+
invoice_id = (ctx.get("invoice") or {}).get("invoice_id") or "unknown"
|
| 484 |
+
return (
|
| 485 |
+
f"Invoice generated: {invoice_id}",
|
| 486 |
+
f"[{agent_name}] Invoice generated: {invoice_id}",
|
| 487 |
+
)
|
| 488 |
+
if agent_key == "payment_agent":
|
| 489 |
+
invoice_id = (ctx.get("payment") or {}).get("invoice_id") or (ctx.get("invoice") or {}).get("invoice_id") or "unknown"
|
| 490 |
+
amount = (ctx.get("payment") or {}).get("amount", 0)
|
| 491 |
+
return (
|
| 492 |
+
f"Payment requested: {invoice_id}",
|
| 493 |
+
f"[{agent_name}] Payment requested: {invoice_id} amount={amount}",
|
| 494 |
+
)
|
| 495 |
+
if agent_key == "ledger":
|
| 496 |
+
updated = ctx.get("metadata", {}).get("sheet_updated", False)
|
| 497 |
+
return (
|
| 498 |
+
f"Execution completed: sheet_updated={updated}",
|
| 499 |
+
f"[{agent_name}] Ledger update status: {updated}",
|
| 500 |
+
)
|
| 501 |
+
recovery = (ctx.get("recovery", {}) or {}).get("details") or "Recovery step completed"
|
| 502 |
+
return (recovery, f"[{agent_name}] {recovery}")
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
def _emit_pipeline_event(
|
| 506 |
+
ctx: dict[str, Any],
|
| 507 |
+
event_type: str,
|
| 508 |
+
payload: dict[str, Any],
|
| 509 |
+
*,
|
| 510 |
+
agent: str,
|
| 511 |
+
step: str,
|
| 512 |
+
message: str,
|
| 513 |
+
log_text: str,
|
| 514 |
+
status: str = "success",
|
| 515 |
+
) -> None:
|
| 516 |
+
log_entry = push_live_log(
|
| 517 |
+
ctx,
|
| 518 |
+
{
|
| 519 |
+
"agent": agent,
|
| 520 |
+
"status": status,
|
| 521 |
+
"action": message,
|
| 522 |
+
"detail": log_text,
|
| 523 |
+
},
|
| 524 |
+
)
|
| 525 |
+
emit_event(
|
| 526 |
+
ctx,
|
| 527 |
+
"log",
|
| 528 |
+
{
|
| 529 |
+
"step": step,
|
| 530 |
+
"message": log_text,
|
| 531 |
+
},
|
| 532 |
+
agent=agent,
|
| 533 |
+
step=step,
|
| 534 |
+
message=log_text,
|
| 535 |
+
log_entry=log_entry,
|
| 536 |
+
)
|
| 537 |
+
emit_event(
|
| 538 |
+
ctx,
|
| 539 |
+
event_type,
|
| 540 |
+
payload,
|
| 541 |
+
agent=agent,
|
| 542 |
+
step=step,
|
| 543 |
+
message=message,
|
| 544 |
+
log_entry=log_entry,
|
| 545 |
+
)
|
| 546 |
+
|
| 547 |
+
|
| 548 |
+
def _emit_pipeline_step_event(
|
| 549 |
+
ctx: dict[str, Any],
|
| 550 |
+
step: str,
|
| 551 |
+
status: str,
|
| 552 |
+
detail: str = "",
|
| 553 |
+
) -> None:
|
| 554 |
+
step_message = f"{step} {status}"
|
| 555 |
+
emit_event(
|
| 556 |
+
ctx,
|
| 557 |
+
"pipeline_step",
|
| 558 |
+
{
|
| 559 |
+
"step": step,
|
| 560 |
+
"status": status,
|
| 561 |
+
"detail": detail,
|
| 562 |
+
},
|
| 563 |
+
agent="Orchestrator",
|
| 564 |
+
step=step,
|
| 565 |
+
message=step_message,
|
| 566 |
+
)
|
app/core/planner.py
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/core/planner.py
|
| 3 |
+
-------------------
|
| 4 |
+
Decision Engine (Planner) for NotiFlow Autonomous.
|
| 5 |
+
|
| 6 |
+
Converts a context snapshot into an ordered execution plan.
|
| 7 |
+
|
| 8 |
+
Design
|
| 9 |
+
------
|
| 10 |
+
The planner uses an ordered list of PlanRule objects. Each rule has:
|
| 11 |
+
- agent: registry key of the agent to run
|
| 12 |
+
- condition: callable(context) → bool
|
| 13 |
+
True = include this agent in the plan
|
| 14 |
+
False = skip it
|
| 15 |
+
- critical: if True, a failure from this agent aborts the pipeline
|
| 16 |
+
if False, failure is recorded but execution continues
|
| 17 |
+
|
| 18 |
+
Plan output shape (written to context["plan"]):
|
| 19 |
+
[
|
| 20 |
+
{"agent": "intent", "critical": True},
|
| 21 |
+
{"agent": "extraction", "critical": True},
|
| 22 |
+
{"agent": "validation", "critical": True},
|
| 23 |
+
{"agent": "invoice_agent", "critical": True},
|
| 24 |
+
{"agent": "payment_agent", "critical": True},
|
| 25 |
+
{"agent": "ledger", "critical": False},
|
| 26 |
+
]
|
| 27 |
+
|
| 28 |
+
Skipping logic (avoids redundant work on re-entry):
|
| 29 |
+
- "intent" skipped if context["intent"] is already set
|
| 30 |
+
- "extraction" skipped if context["data"] is already non-empty
|
| 31 |
+
- "validation" skipped if context["state"] == "validated"
|
| 32 |
+
- "router" skipped if context["event"] is already non-empty
|
| 33 |
+
- "ledger" always included (idempotent sync)
|
| 34 |
+
|
| 35 |
+
Extending
|
| 36 |
+
---------
|
| 37 |
+
To add a new step, append one PlanRule to _RULES. No other file changes.
|
| 38 |
+
|
| 39 |
+
Public API
|
| 40 |
+
----------
|
| 41 |
+
build_plan(context) -> list[dict]
|
| 42 |
+
Returns the ordered plan list and writes it to context["plan"].
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
from __future__ import annotations
|
| 46 |
+
|
| 47 |
+
from dataclasses import dataclass, field
|
| 48 |
+
from typing import Any, Callable
|
| 49 |
+
|
| 50 |
+
# ---------------------------------------------------------------------------
|
| 51 |
+
# Rule definition
|
| 52 |
+
# ---------------------------------------------------------------------------
|
| 53 |
+
|
| 54 |
+
@dataclass
|
| 55 |
+
class PlanRule:
|
| 56 |
+
"""
|
| 57 |
+
A single conditional step in the execution plan.
|
| 58 |
+
|
| 59 |
+
Attributes:
|
| 60 |
+
agent: Key in AGENT_REGISTRY to execute.
|
| 61 |
+
condition: callable(ctx) → bool — True means "include this agent".
|
| 62 |
+
critical: If True, failure aborts the rest of the pipeline.
|
| 63 |
+
If False, failure is logged and execution continues.
|
| 64 |
+
description: Human-readable reason for this rule (for audit logs).
|
| 65 |
+
"""
|
| 66 |
+
agent: str
|
| 67 |
+
condition: Callable[[dict[str, Any]], bool]
|
| 68 |
+
critical: bool = True
|
| 69 |
+
description: str = ""
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
# ---------------------------------------------------------------------------
|
| 73 |
+
# Rule set (order matters — this IS the pipeline definition)
|
| 74 |
+
# ---------------------------------------------------------------------------
|
| 75 |
+
|
| 76 |
+
def _intent_needed(ctx: dict[str, Any]) -> bool:
|
| 77 |
+
"""Run intent detection unless intent is already resolved."""
|
| 78 |
+
return not ctx.get("intent")
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def _extraction_needed(ctx: dict[str, Any]) -> bool:
|
| 82 |
+
"""Run extraction unless structured data already exists for all detected intents."""
|
| 83 |
+
if not ctx.get("data") and not ctx.get("multi_data"):
|
| 84 |
+
return True
|
| 85 |
+
# On replan: re-extract if intents changed or multi_data is missing entries
|
| 86 |
+
intents = ctx.get("intents") or []
|
| 87 |
+
multi_data = ctx.get("multi_data", {})
|
| 88 |
+
if intents and any(i not in multi_data for i in intents):
|
| 89 |
+
return True
|
| 90 |
+
return False
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def _validation_needed(ctx: dict[str, Any]) -> bool:
|
| 94 |
+
"""Run validation unless already validated."""
|
| 95 |
+
return ctx.get("state") != "validated"
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def _invoice_needed(ctx: dict[str, Any]) -> bool:
|
| 99 |
+
"""Generate an invoice for order workflows when not already present."""
|
| 100 |
+
return (ctx.get("intent") or "").lower() == "order" and not ctx.get("invoice")
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def _payment_needed(ctx: dict[str, Any]) -> bool:
|
| 104 |
+
"""Create pending payment state after invoice generation."""
|
| 105 |
+
return (ctx.get("intent") or "").lower() == "order" and not ctx.get("payment")
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def _ledger_needed(ctx: dict[str, Any]) -> bool:
|
| 109 |
+
"""
|
| 110 |
+
Run ledger sync unless the pipeline failed before routing.
|
| 111 |
+
We still attempt it on partial failures so any partial data is recorded.
|
| 112 |
+
"""
|
| 113 |
+
return True # always attempt — LedgerAgent is non-fatal anyway
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
_RULES: list[PlanRule] = [
|
| 117 |
+
PlanRule(
|
| 118 |
+
agent = "intent",
|
| 119 |
+
condition = _intent_needed,
|
| 120 |
+
critical = True,
|
| 121 |
+
description = "Classify business intent from message",
|
| 122 |
+
),
|
| 123 |
+
PlanRule(
|
| 124 |
+
agent = "extraction",
|
| 125 |
+
condition = _extraction_needed,
|
| 126 |
+
critical = True,
|
| 127 |
+
description = "Extract structured fields from message",
|
| 128 |
+
),
|
| 129 |
+
PlanRule(
|
| 130 |
+
agent = "validation",
|
| 131 |
+
condition = _validation_needed,
|
| 132 |
+
critical = True,
|
| 133 |
+
description = "Normalise and validate extracted data",
|
| 134 |
+
),
|
| 135 |
+
PlanRule(
|
| 136 |
+
agent = "invoice_agent",
|
| 137 |
+
condition = _invoice_needed,
|
| 138 |
+
critical = True,
|
| 139 |
+
description = "Generate structured invoice from validated data",
|
| 140 |
+
),
|
| 141 |
+
PlanRule(
|
| 142 |
+
agent = "payment_agent",
|
| 143 |
+
condition = _payment_needed,
|
| 144 |
+
critical = True,
|
| 145 |
+
description = "Prepare pending payment workflow",
|
| 146 |
+
),
|
| 147 |
+
PlanRule(
|
| 148 |
+
agent = "ledger",
|
| 149 |
+
condition = _ledger_needed,
|
| 150 |
+
critical = False, # Sheets failure must never crash the pipeline
|
| 151 |
+
description = "Sync transaction to Google Sheets ledger",
|
| 152 |
+
),
|
| 153 |
+
]
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
# ---------------------------------------------------------------------------
|
| 157 |
+
# Public API
|
| 158 |
+
# ---------------------------------------------------------------------------
|
| 159 |
+
|
| 160 |
+
def build_plan(context: dict[str, Any]) -> list[dict[str, Any]]:
|
| 161 |
+
"""
|
| 162 |
+
Evaluate rules against the current context and produce an execution plan.
|
| 163 |
+
|
| 164 |
+
Writes the plan to context["plan"] and also returns it.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
context: The live request context dict.
|
| 168 |
+
|
| 169 |
+
Returns:
|
| 170 |
+
Ordered list of plan steps, each a dict with:
|
| 171 |
+
{
|
| 172 |
+
"agent": str, # registry key
|
| 173 |
+
"critical": bool, # abort pipeline on failure?
|
| 174 |
+
"description": str, # human-readable reason
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
Example:
|
| 178 |
+
>>> ctx = create_context("rahul ne 15000 bheja")
|
| 179 |
+
>>> build_plan(ctx)
|
| 180 |
+
[
|
| 181 |
+
{"agent": "intent", "critical": True, "description": "..."},
|
| 182 |
+
{"agent": "extraction", "critical": True, "description": "..."},
|
| 183 |
+
{"agent": "validation", "critical": True, "description": "..."},
|
| 184 |
+
{"agent": "router", "critical": True, "description": "..."},
|
| 185 |
+
{"agent": "ledger", "critical": False, "description": "..."},
|
| 186 |
+
]
|
| 187 |
+
"""
|
| 188 |
+
plan = []
|
| 189 |
+
for rule in _RULES:
|
| 190 |
+
if rule.condition(context):
|
| 191 |
+
plan.append({
|
| 192 |
+
"agent": rule.agent,
|
| 193 |
+
"critical": rule.critical,
|
| 194 |
+
"description": rule.description,
|
| 195 |
+
})
|
| 196 |
+
context["plan"] = plan
|
| 197 |
+
return plan
|
app/core/priority.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/core/priority.py
|
| 3 |
+
---------------------
|
| 4 |
+
Shared priority scoring utilities for NotiFlow Autonomous.
|
| 5 |
+
|
| 6 |
+
Replaces the single-agent string assignment pattern with an
|
| 7 |
+
additive scoring model. Any agent can contribute points.
|
| 8 |
+
UrgencyAgent is the sole agent that derives the final label.
|
| 9 |
+
|
| 10 |
+
Score scale: 0 – 100 (int, clamped)
|
| 11 |
+
> 70 → "high"
|
| 12 |
+
> 40 → "medium"
|
| 13 |
+
≤ 40 → "low"
|
| 14 |
+
|
| 15 |
+
Public API
|
| 16 |
+
----------
|
| 17 |
+
contribute_priority_score(ctx, points, reason) -> None
|
| 18 |
+
Add points to context["priority_score"] and log the reason.
|
| 19 |
+
|
| 20 |
+
derive_priority_label(ctx) -> str
|
| 21 |
+
Read context["priority_score"] and return the label string.
|
| 22 |
+
Also writes context["priority"] = label and returns it.
|
| 23 |
+
|
| 24 |
+
reset_priority_score(ctx) -> None
|
| 25 |
+
Zero the score (used at replan boundaries).
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
from __future__ import annotations
|
| 29 |
+
|
| 30 |
+
import logging
|
| 31 |
+
from typing import Any
|
| 32 |
+
|
| 33 |
+
logger = logging.getLogger(__name__)
|
| 34 |
+
|
| 35 |
+
_SCORE_KEY = "priority_score"
|
| 36 |
+
_REASONS_KEY = "priority_score_reasons"
|
| 37 |
+
|
| 38 |
+
# Thresholds
|
| 39 |
+
_HIGH_THRESHOLD = 70
|
| 40 |
+
_MEDIUM_THRESHOLD = 40
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def contribute_priority_score(
|
| 44 |
+
ctx: dict[str, Any],
|
| 45 |
+
points: int,
|
| 46 |
+
reason: str,
|
| 47 |
+
) -> None:
|
| 48 |
+
"""
|
| 49 |
+
Add points (0–100 scale) to the running priority score.
|
| 50 |
+
|
| 51 |
+
Points are clamped so the total never exceeds 100.
|
| 52 |
+
The reason is appended to context["priority_score_reasons"] for audit.
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
ctx: The live request context dict.
|
| 56 |
+
points: Integer contribution (positive only).
|
| 57 |
+
reason: Human-readable explanation of why this score was added.
|
| 58 |
+
"""
|
| 59 |
+
if points <= 0:
|
| 60 |
+
return
|
| 61 |
+
current = ctx.get(_SCORE_KEY, 0)
|
| 62 |
+
new_val = min(100, current + points)
|
| 63 |
+
ctx[_SCORE_KEY] = new_val
|
| 64 |
+
ctx.setdefault(_REASONS_KEY, []).append(
|
| 65 |
+
{"points": points, "reason": reason, "total_after": new_val}
|
| 66 |
+
)
|
| 67 |
+
logger.debug(
|
| 68 |
+
"[Priority] +%d (%s) → total=%d", points, reason, new_val
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def derive_priority_label(ctx: dict[str, Any]) -> str:
|
| 73 |
+
"""
|
| 74 |
+
Derive the final priority label from the accumulated score.
|
| 75 |
+
|
| 76 |
+
Writes context["priority"] = label and returns the label.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
ctx: The live request context dict.
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
"high" | "medium" | "low"
|
| 83 |
+
"""
|
| 84 |
+
score = ctx.get(_SCORE_KEY, 0)
|
| 85 |
+
if score > _HIGH_THRESHOLD:
|
| 86 |
+
label = "high"
|
| 87 |
+
elif score > _MEDIUM_THRESHOLD:
|
| 88 |
+
label = "medium"
|
| 89 |
+
else:
|
| 90 |
+
label = "low"
|
| 91 |
+
ctx["priority"] = label
|
| 92 |
+
logger.info(
|
| 93 |
+
"[Priority] score=%d → label=%s", score, label
|
| 94 |
+
)
|
| 95 |
+
return label
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def reset_priority_score(ctx: dict[str, Any]) -> None:
|
| 99 |
+
"""
|
| 100 |
+
Reset the priority score to 0 (used at replan boundaries).
|
| 101 |
+
|
| 102 |
+
Args:
|
| 103 |
+
ctx: The live request context dict.
|
| 104 |
+
"""
|
| 105 |
+
ctx[_SCORE_KEY] = 0
|
| 106 |
+
ctx[_REASONS_KEY] = []
|
app/core/registry.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/core/registry.py
|
| 3 |
+
--------------------
|
| 4 |
+
Agent Registry for NotiFlow Autonomous.
|
| 5 |
+
|
| 6 |
+
Single source of truth for all registered agents.
|
| 7 |
+
The orchestrator resolves agent names from this registry — it never
|
| 8 |
+
imports agent classes directly.
|
| 9 |
+
|
| 10 |
+
Extending the system
|
| 11 |
+
--------------------
|
| 12 |
+
To add a new agent:
|
| 13 |
+
1. Create app/agents/my_agent.py (subclass BaseAgent)
|
| 14 |
+
2. Add one line here: "my_agent": MyAgent()
|
| 15 |
+
|
| 16 |
+
That's it. The planner and orchestrator pick it up automatically as
|
| 17 |
+
long as the planner emits the agent's key in a plan step.
|
| 18 |
+
|
| 19 |
+
Public API
|
| 20 |
+
----------
|
| 21 |
+
AGENT_REGISTRY : dict[str, BaseAgent]
|
| 22 |
+
The live registry dict. Import and use directly.
|
| 23 |
+
|
| 24 |
+
get_agent(name) -> BaseAgent
|
| 25 |
+
Safe accessor — raises KeyError with a helpful message if missing.
|
| 26 |
+
|
| 27 |
+
register(name, agent) -> None
|
| 28 |
+
Runtime registration (useful for plugins / tests).
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
from __future__ import annotations
|
| 32 |
+
|
| 33 |
+
from typing import TYPE_CHECKING
|
| 34 |
+
|
| 35 |
+
if TYPE_CHECKING:
|
| 36 |
+
from app.core.base_agent import BaseAgent
|
| 37 |
+
|
| 38 |
+
# ---------------------------------------------------------------------------
|
| 39 |
+
# Lazy imports — prevents circular import chains at module load time
|
| 40 |
+
# ---------------------------------------------------------------------------
|
| 41 |
+
|
| 42 |
+
def _build_registry() -> dict[str, "BaseAgent"]:
|
| 43 |
+
from app.agents.intent_agent import IntentAgent
|
| 44 |
+
from app.agents.extraction_agent import ExtractionAgent
|
| 45 |
+
from app.agents.validation_agent import ValidationAgent
|
| 46 |
+
from app.agents.invoice_agent import InvoiceAgent
|
| 47 |
+
from app.agents.payment_agent import PaymentAgent
|
| 48 |
+
from app.agents.skill_router_agent import SkillRouterAgent
|
| 49 |
+
from app.agents.ledger_agent import LedgerAgent
|
| 50 |
+
# ── Phase 3: Autonomy Layer ──────────────────────────────────────────────
|
| 51 |
+
from app.agents.verification_agent import VerificationAgent
|
| 52 |
+
from app.agents.monitor_agent import MonitorAgent
|
| 53 |
+
from app.agents.prediction_agent import PredictionAgent
|
| 54 |
+
from app.agents.urgency_agent import UrgencyAgent
|
| 55 |
+
from app.agents.escalation_agent import EscalationAgent
|
| 56 |
+
from app.agents.recovery_agent import RecoveryAgent
|
| 57 |
+
|
| 58 |
+
return {
|
| 59 |
+
# Core pipeline agents
|
| 60 |
+
"intent": IntentAgent(),
|
| 61 |
+
"extraction": ExtractionAgent(),
|
| 62 |
+
"validation": ValidationAgent(),
|
| 63 |
+
"invoice_agent": InvoiceAgent(),
|
| 64 |
+
"payment_agent": PaymentAgent(),
|
| 65 |
+
"router": SkillRouterAgent(),
|
| 66 |
+
"ledger": LedgerAgent(),
|
| 67 |
+
# Autonomy layer agents
|
| 68 |
+
"verification": VerificationAgent(),
|
| 69 |
+
"monitor": MonitorAgent(),
|
| 70 |
+
"prediction": PredictionAgent(),
|
| 71 |
+
"urgency": UrgencyAgent(),
|
| 72 |
+
"escalation": EscalationAgent(),
|
| 73 |
+
"recovery": RecoveryAgent(),
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
# Module-level registry — built once on first access via get_agent()
|
| 78 |
+
# Direct dict access also works: AGENT_REGISTRY["intent"]
|
| 79 |
+
AGENT_REGISTRY: dict[str, "BaseAgent"] = {}
|
| 80 |
+
|
| 81 |
+
_initialised = False
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def _ensure_init() -> None:
|
| 85 |
+
global _initialised
|
| 86 |
+
if not _initialised:
|
| 87 |
+
AGENT_REGISTRY.update(_build_registry())
|
| 88 |
+
_initialised = True
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
# ---------------------------------------------------------------------------
|
| 92 |
+
# Public API
|
| 93 |
+
# ---------------------------------------------------------------------------
|
| 94 |
+
|
| 95 |
+
def get_agent(name: str) -> "BaseAgent":
|
| 96 |
+
"""
|
| 97 |
+
Retrieve an agent by registry key.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
name: Agent key (e.g. "intent", "extraction", "ledger").
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
The registered BaseAgent instance.
|
| 104 |
+
|
| 105 |
+
Raises:
|
| 106 |
+
KeyError: If no agent is registered under that name, with a
|
| 107 |
+
helpful message listing valid keys.
|
| 108 |
+
"""
|
| 109 |
+
_ensure_init()
|
| 110 |
+
if name not in AGENT_REGISTRY:
|
| 111 |
+
valid = ", ".join(sorted(AGENT_REGISTRY.keys()))
|
| 112 |
+
raise KeyError(
|
| 113 |
+
f"No agent registered as '{name}'. "
|
| 114 |
+
f"Valid keys: {valid}"
|
| 115 |
+
)
|
| 116 |
+
return AGENT_REGISTRY[name]
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def register(name: str, agent: "BaseAgent") -> None:
|
| 120 |
+
"""
|
| 121 |
+
Register a new agent (or replace an existing one) at runtime.
|
| 122 |
+
|
| 123 |
+
Useful for plugins, testing, and dynamic skill agents.
|
| 124 |
+
|
| 125 |
+
Args:
|
| 126 |
+
name: Registry key (e.g. "my_custom_agent").
|
| 127 |
+
agent: Instantiated BaseAgent subclass.
|
| 128 |
+
"""
|
| 129 |
+
_ensure_init()
|
| 130 |
+
AGENT_REGISTRY[name] = agent
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def list_agents() -> list[str]:
|
| 134 |
+
"""Return sorted list of all registered agent keys."""
|
| 135 |
+
_ensure_init()
|
| 136 |
+
return sorted(AGENT_REGISTRY.keys())
|
app/main.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app/main.py
|
| 3 |
+
-----------
|
| 4 |
+
Primary entry point for NotiFlow Autonomous.
|
| 5 |
+
|
| 6 |
+
Public API (UNCHANGED — backward compatible):
|
| 7 |
+
run_notiflow(message, demo_mode, source) -> dict
|
| 8 |
+
|
| 9 |
+
Changes from original:
|
| 10 |
+
- Live mode now calls app.core.orchestrator.process_message()
|
| 11 |
+
instead of agent.orchestrator.process_message()
|
| 12 |
+
- Demo mode is unchanged (same static responses + keyword fallback)
|
| 13 |
+
- Bedrock references removed entirely
|
| 14 |
+
- Context is created here in live mode and flows through pipeline
|
| 15 |
+
|
| 16 |
+
CLI usage (unchanged):
|
| 17 |
+
python app/main.py "rahul ne 15000 bheja"
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
from __future__ import annotations
|
| 21 |
+
|
| 22 |
+
import json
|
| 23 |
+
import logging
|
| 24 |
+
import sys
|
| 25 |
+
from typing import Any
|
| 26 |
+
|
| 27 |
+
from app.config import DEMO_MODE
|
| 28 |
+
|
| 29 |
+
logger = logging.getLogger(__name__)
|
| 30 |
+
|
| 31 |
+
# ---------------------------------------------------------------------------
|
| 32 |
+
# Demo pipeline (no cloud credentials needed)
|
| 33 |
+
# ---------------------------------------------------------------------------
|
| 34 |
+
|
| 35 |
+
_DEMO_RESPONSES: dict[str, dict] = {
|
| 36 |
+
"rahul ne 15000 bheja": {
|
| 37 |
+
"intent": "payment",
|
| 38 |
+
"data": {"customer": "Rahul", "amount": 15000, "payment_type": None},
|
| 39 |
+
"event": {"event": "payment_recorded",
|
| 40 |
+
"payment": {"customer": "Rahul", "amount": 15000,
|
| 41 |
+
"payment_type": None, "status": "received"}},
|
| 42 |
+
},
|
| 43 |
+
"bhaiya 3 kurti bhej dena": {
|
| 44 |
+
"intent": "order",
|
| 45 |
+
"data": {"customer": None, "item": "kurti", "quantity": 3},
|
| 46 |
+
"event": {"event": "order_received",
|
| 47 |
+
"order": {"customer": None, "item": "kurti",
|
| 48 |
+
"quantity": 3, "status": "pending"},
|
| 49 |
+
"invoice": {"invoice_id": "INV-DEMO-0001", "total_amount": 0.0}},
|
| 50 |
+
},
|
| 51 |
+
"priya ke liye 2 kilo aata bhej dena": {
|
| 52 |
+
"intent": "order",
|
| 53 |
+
"data": {"customer": "Priya", "item": "aata", "quantity": 2},
|
| 54 |
+
"event": {"event": "order_received",
|
| 55 |
+
"order": {"customer": "Priya", "item": "aata",
|
| 56 |
+
"quantity": 2, "status": "pending"},
|
| 57 |
+
"invoice": {"invoice_id": "INV-DEMO-0002", "total_amount": 0.0}},
|
| 58 |
+
},
|
| 59 |
+
"size chota hai exchange karna hai": {
|
| 60 |
+
"intent": "return",
|
| 61 |
+
"data": {"customer": None, "item": None, "reason": "size issue"},
|
| 62 |
+
"event": {"event": "return_requested",
|
| 63 |
+
"return": {"customer": None, "item": None,
|
| 64 |
+
"reason": "size issue", "status": "pending_review"}},
|
| 65 |
+
},
|
| 66 |
+
"udhar me de dijiye": {
|
| 67 |
+
"intent": "credit",
|
| 68 |
+
"data": {"customer": None, "item": None, "quantity": None, "amount": None},
|
| 69 |
+
"event": {"event": "credit_recorded",
|
| 70 |
+
"credit": {"customer": None, "amount": None, "status": "open"}},
|
| 71 |
+
},
|
| 72 |
+
"suresh ko 500 ka maal udhar dena": {
|
| 73 |
+
"intent": "credit",
|
| 74 |
+
"data": {"customer": "Suresh", "item": "goods", "quantity": None, "amount": 500},
|
| 75 |
+
"event": {"event": "credit_recorded",
|
| 76 |
+
"credit": {"customer": "Suresh", "amount": 500, "status": "open"}},
|
| 77 |
+
},
|
| 78 |
+
"3 kurti ka set ready rakhna": {
|
| 79 |
+
"intent": "preparation",
|
| 80 |
+
"data": {"item": "kurti", "quantity": 3},
|
| 81 |
+
"event": {"event": "preparation_queued",
|
| 82 |
+
"preparation": {"item": "kurti", "quantity": 3, "status": "queued"}},
|
| 83 |
+
},
|
| 84 |
+
"amit bhai ka 8000 gpay se aaya": {
|
| 85 |
+
"intent": "payment",
|
| 86 |
+
"data": {"customer": "Amit", "amount": 8000, "payment_type": "upi"},
|
| 87 |
+
"event": {"event": "payment_recorded",
|
| 88 |
+
"payment": {"customer": "Amit", "amount": 8000,
|
| 89 |
+
"payment_type": "upi", "status": "received"}},
|
| 90 |
+
},
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def _fallback_intent(message: str) -> str:
|
| 95 |
+
m = message.lower()
|
| 96 |
+
if any(w in m for w in ["bheja", "aaya", "cash", "gpay", "upi", "paytm", "online"]):
|
| 97 |
+
return "payment"
|
| 98 |
+
if any(w in m for w in ["exchange", "wapas", "return", "vapas", "size"]):
|
| 99 |
+
return "return"
|
| 100 |
+
if any(w in m for w in ["udhar", "credit", "baad"]):
|
| 101 |
+
return "credit"
|
| 102 |
+
if any(w in m for w in ["ready", "pack", "rakhna", "taiyar"]):
|
| 103 |
+
return "preparation"
|
| 104 |
+
if any(w in m for w in ["bhej", "dena", "chahiye", "kilo", "piece"]):
|
| 105 |
+
return "order"
|
| 106 |
+
return "other"
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def _run_demo(message: str, source: str = "system") -> dict[str, Any]:
|
| 110 |
+
key = message.strip().lower()
|
| 111 |
+
response = _DEMO_RESPONSES.get(key)
|
| 112 |
+
if response is None:
|
| 113 |
+
intent = _fallback_intent(message)
|
| 114 |
+
response = {
|
| 115 |
+
"intent": intent,
|
| 116 |
+
"data": {"note": f"Demo: classified as '{intent}'"},
|
| 117 |
+
"event": {"event": f"{intent}_recorded",
|
| 118 |
+
"note": "Demo fallback — no exact match"},
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
from app.services.google_sheets_service import append_transaction
|
| 122 |
+
sheet_updated = append_transaction(
|
| 123 |
+
intent=response["intent"],
|
| 124 |
+
data=response["data"],
|
| 125 |
+
source=source,
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
return {
|
| 129 |
+
"message": message,
|
| 130 |
+
"intent": response["intent"],
|
| 131 |
+
"data": response["data"],
|
| 132 |
+
"event": response["event"],
|
| 133 |
+
"sheet_updated": sheet_updated,
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
# ---------------------------------------------------------------------------
|
| 138 |
+
# Public API
|
| 139 |
+
# ---------------------------------------------------------------------------
|
| 140 |
+
|
| 141 |
+
def run_notiflow(
|
| 142 |
+
message: str,
|
| 143 |
+
demo_mode: bool | None = None,
|
| 144 |
+
source: str = "system",
|
| 145 |
+
) -> dict[str, Any]:
|
| 146 |
+
"""
|
| 147 |
+
Run a business message through the full NotiFlow pipeline.
|
| 148 |
+
|
| 149 |
+
.. deprecated::
|
| 150 |
+
The API layer (notification_routes.py) now calls
|
| 151 |
+
``app.core.orchestrator.process_message()`` directly.
|
| 152 |
+
``run_notiflow`` is kept for:
|
| 153 |
+
- Demo mode (static responses + keyword fallback)
|
| 154 |
+
- CLI usage (python app/main.py "...")
|
| 155 |
+
- Backward-compat tests that mock it
|
| 156 |
+
|
| 157 |
+
New code should use ``process_message()`` directly.
|
| 158 |
+
|
| 159 |
+
Args:
|
| 160 |
+
message: Raw Hinglish or English business message.
|
| 161 |
+
demo_mode: Override DEMO_MODE from config. None = use config value.
|
| 162 |
+
source: Notification source (e.g. "whatsapp", "gpay").
|
| 163 |
+
"""
|
| 164 |
+
if not message or not message.strip():
|
| 165 |
+
raise ValueError("Message cannot be empty.")
|
| 166 |
+
|
| 167 |
+
use_demo = DEMO_MODE if demo_mode is None else demo_mode
|
| 168 |
+
|
| 169 |
+
if use_demo:
|
| 170 |
+
logger.info("run_notiflow [demo] ← %r", message)
|
| 171 |
+
return _run_demo(message.strip(), source=source)
|
| 172 |
+
else:
|
| 173 |
+
logger.info("run_notiflow [live] ← %r", message)
|
| 174 |
+
# ── NEW: use context-driven orchestrator ─────────────────────────────
|
| 175 |
+
from app.core.orchestrator import process_message
|
| 176 |
+
return process_message(message.strip(), source=source)
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
# ---------------------------------------------------------------------------
|
| 180 |
+
# CLI entry point
|
| 181 |
+
# ---------------------------------------------------------------------------
|
| 182 |
+
|
| 183 |
+
if __name__ == "__main__":
|
| 184 |
+
logging.basicConfig(level=logging.WARNING)
|
| 185 |
+
|
| 186 |
+
if len(sys.argv) < 2:
|
| 187 |
+
print('Usage: python app/main.py "<business message>"')
|
| 188 |
+
sys.exit(1)
|
| 189 |
+
|
| 190 |
+
input_message = " ".join(sys.argv[1:])
|
| 191 |
+
try:
|
| 192 |
+
result = run_notiflow(input_message)
|
| 193 |
+
# Strip context from CLI output to keep it readable
|
| 194 |
+
result.pop("context", None)
|
| 195 |
+
print(json.dumps(result, indent=2, ensure_ascii=False))
|
| 196 |
+
except Exception as exc:
|
| 197 |
+
print(json.dumps({"error": str(exc)}, indent=2))
|
| 198 |
+
sys.exit(1)
|
app/memory/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"""app/memory — agent context memory."""
|
app/memory/agent_memory.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
agent_memory.py
|
| 3 |
+
---------------
|
| 4 |
+
Agent memory layer for Notiflow.
|
| 5 |
+
|
| 6 |
+
Stores recent business context so skills and future agents can reference
|
| 7 |
+
what was last discussed (customer names, items, etc.).
|
| 8 |
+
|
| 9 |
+
Storage: JSON file at the path defined in app/config.py (MEMORY_FILE).
|
| 10 |
+
Structure:
|
| 11 |
+
{
|
| 12 |
+
"recent_customers": ["Rahul", "Priya"], # newest last
|
| 13 |
+
"recent_items": ["kurti", "aata"]
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
Public API
|
| 17 |
+
----------
|
| 18 |
+
load_memory() -> dict
|
| 19 |
+
update_memory(customer=None, item=None) -> None
|
| 20 |
+
|
| 21 |
+
Design notes:
|
| 22 |
+
- Maximum 10 entries per list (oldest pruned automatically).
|
| 23 |
+
- Read-modify-write is done in one function call to minimise race window.
|
| 24 |
+
- None values are silently ignored (no-op).
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
from __future__ import annotations
|
| 28 |
+
|
| 29 |
+
import json
|
| 30 |
+
import logging
|
| 31 |
+
from pathlib import Path
|
| 32 |
+
from typing import Optional
|
| 33 |
+
|
| 34 |
+
from app.config import MEMORY_FILE
|
| 35 |
+
|
| 36 |
+
logger = logging.getLogger(__name__)
|
| 37 |
+
|
| 38 |
+
_MAX_ENTRIES = 10
|
| 39 |
+
|
| 40 |
+
_EMPTY_MEMORY: dict = {
|
| 41 |
+
"recent_customers": [],
|
| 42 |
+
"recent_items": [],
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# ---------------------------------------------------------------------------
|
| 47 |
+
# Internal helpers
|
| 48 |
+
# ---------------------------------------------------------------------------
|
| 49 |
+
|
| 50 |
+
def _read_file() -> dict:
|
| 51 |
+
"""Read memory from disk; return empty structure if file missing/corrupt."""
|
| 52 |
+
path = Path(MEMORY_FILE)
|
| 53 |
+
if not path.exists():
|
| 54 |
+
return {k: list(v) for k, v in _EMPTY_MEMORY.items()}
|
| 55 |
+
try:
|
| 56 |
+
with path.open("r", encoding="utf-8") as f:
|
| 57 |
+
data = json.load(f)
|
| 58 |
+
# Ensure both keys are present even if file is partial
|
| 59 |
+
data.setdefault("recent_customers", [])
|
| 60 |
+
data.setdefault("recent_items", [])
|
| 61 |
+
return data
|
| 62 |
+
except (json.JSONDecodeError, OSError) as exc:
|
| 63 |
+
logger.warning("Could not read memory file (%s) — using empty memory.", exc)
|
| 64 |
+
return {k: list(v) for k, v in _EMPTY_MEMORY.items()}
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def _write_file(memory: dict) -> None:
|
| 68 |
+
"""Write memory dict to disk atomically (write to temp then rename)."""
|
| 69 |
+
path = Path(MEMORY_FILE)
|
| 70 |
+
tmp = path.with_suffix(".tmp")
|
| 71 |
+
try:
|
| 72 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 73 |
+
with tmp.open("w", encoding="utf-8") as f:
|
| 74 |
+
json.dump(memory, f, indent=2, ensure_ascii=False)
|
| 75 |
+
tmp.replace(path)
|
| 76 |
+
except OSError as exc:
|
| 77 |
+
logger.error("Could not write memory file: %s", exc)
|
| 78 |
+
if tmp.exists():
|
| 79 |
+
tmp.unlink(missing_ok=True)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def _append_unique(lst: list, value: str, max_size: int = _MAX_ENTRIES) -> list:
|
| 83 |
+
"""
|
| 84 |
+
Append value to list, deduplicate, and keep only the most recent entries.
|
| 85 |
+
Most recent item is always at the end.
|
| 86 |
+
"""
|
| 87 |
+
if value in lst:
|
| 88 |
+
lst.remove(value) # remove old occurrence so it moves to end
|
| 89 |
+
lst.append(value)
|
| 90 |
+
return lst[-max_size:] # keep newest max_size entries
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
# ---------------------------------------------------------------------------
|
| 94 |
+
# Public API
|
| 95 |
+
# ---------------------------------------------------------------------------
|
| 96 |
+
|
| 97 |
+
def load_memory() -> dict:
|
| 98 |
+
"""
|
| 99 |
+
Load the current agent memory from disk.
|
| 100 |
+
|
| 101 |
+
Returns:
|
| 102 |
+
{
|
| 103 |
+
"recent_customers": [str, ...],
|
| 104 |
+
"recent_items": [str, ...]
|
| 105 |
+
}
|
| 106 |
+
"""
|
| 107 |
+
memory = _read_file()
|
| 108 |
+
logger.debug("Memory loaded: %s", memory)
|
| 109 |
+
return memory
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def update_memory(
|
| 113 |
+
customer: Optional[str] = None,
|
| 114 |
+
item: Optional[str] = None,
|
| 115 |
+
) -> None:
|
| 116 |
+
"""
|
| 117 |
+
Update agent memory with a new customer name and/or item.
|
| 118 |
+
|
| 119 |
+
None values are silently ignored.
|
| 120 |
+
Duplicates are deduplicated and moved to the end (most recent position).
|
| 121 |
+
|
| 122 |
+
Args:
|
| 123 |
+
customer: Customer name to remember (e.g. "Rahul").
|
| 124 |
+
item: Item name to remember (e.g. "kurti").
|
| 125 |
+
|
| 126 |
+
Example:
|
| 127 |
+
>>> update_memory(customer="Rahul", item="kurti")
|
| 128 |
+
"""
|
| 129 |
+
if customer is None and item is None:
|
| 130 |
+
return
|
| 131 |
+
|
| 132 |
+
memory = _read_file()
|
| 133 |
+
|
| 134 |
+
if customer:
|
| 135 |
+
memory["recent_customers"] = _append_unique(
|
| 136 |
+
memory["recent_customers"], str(customer).strip()
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
if item:
|
| 140 |
+
memory["recent_items"] = _append_unique(
|
| 141 |
+
memory["recent_items"], str(item).strip()
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
_write_file(memory)
|
| 145 |
+
logger.info("Memory updated: customer=%s item=%s", customer, item)
|
app/services/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"""app/services — external integrations and stateless business services."""
|
app/services/excel_sync.py
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
services/excel_sync.py
|
| 3 |
+
----------------------
|
| 4 |
+
Semantic Excel sync wrapper for Notiflow.
|
| 5 |
+
|
| 6 |
+
Wraps utils/excel_writer.append_row() with business-named functions so
|
| 7 |
+
skills and the FastAPI backend can call append_order(), append_payment()
|
| 8 |
+
etc. without knowing the sheet schema details.
|
| 9 |
+
|
| 10 |
+
Excel writes always go to EXCEL_SYNC_FILE from app/config.py.
|
| 11 |
+
If EXCEL_FILE_PATH env var is set, that path is used; otherwise falls
|
| 12 |
+
back to the default DATA_FILE path (data/notiflow_data.xlsx).
|
| 13 |
+
|
| 14 |
+
No Excel writing logic is duplicated here — this is a thin adapter only.
|
| 15 |
+
|
| 16 |
+
Public API
|
| 17 |
+
----------
|
| 18 |
+
append_order(event_dict) — writes to Orders sheet
|
| 19 |
+
append_payment(event_dict) — writes to Ledger sheet (type="payment")
|
| 20 |
+
append_return(event_dict) — writes to Returns sheet
|
| 21 |
+
append_credit(event_dict) — writes to Ledger sheet (type="credit")
|
| 22 |
+
append_inventory(record) — writes to Inventory sheet
|
| 23 |
+
append_invoice(record) — writes to Invoices sheet
|
| 24 |
+
sync_from_event(result) — auto-routes based on intent (convenience)
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
from __future__ import annotations
|
| 28 |
+
|
| 29 |
+
import logging
|
| 30 |
+
from pathlib import Path
|
| 31 |
+
|
| 32 |
+
from app.config import EXCEL_SYNC_FILE
|
| 33 |
+
from app.utils.excel_writer import append_row, EXCEL_FILE as _DEFAULT_FILE
|
| 34 |
+
|
| 35 |
+
logger = logging.getLogger(__name__)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def _active_file() -> Path:
|
| 39 |
+
"""
|
| 40 |
+
Return the active Excel file path.
|
| 41 |
+
EXCEL_SYNC_FILE (from env EXCEL_FILE_PATH) takes priority over default.
|
| 42 |
+
"""
|
| 43 |
+
return Path(EXCEL_SYNC_FILE)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# ---------------------------------------------------------------------------
|
| 47 |
+
# Sheet-specific append functions
|
| 48 |
+
# ---------------------------------------------------------------------------
|
| 49 |
+
|
| 50 |
+
def append_order(event_dict: dict) -> None:
|
| 51 |
+
"""
|
| 52 |
+
Append an order record to the Orders sheet.
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
event_dict: The "order" sub-dict from an order skill event.
|
| 56 |
+
Expected keys: order_id, timestamp, customer, item,
|
| 57 |
+
quantity, status
|
| 58 |
+
"""
|
| 59 |
+
record = {
|
| 60 |
+
"order_id": event_dict.get("order_id"),
|
| 61 |
+
"timestamp": event_dict.get("timestamp"),
|
| 62 |
+
"customer": event_dict.get("customer"),
|
| 63 |
+
"item": event_dict.get("item"),
|
| 64 |
+
"quantity": event_dict.get("quantity"),
|
| 65 |
+
"status": event_dict.get("status", "pending"),
|
| 66 |
+
}
|
| 67 |
+
append_row("Orders", record)
|
| 68 |
+
logger.info("Excel sync → Orders: %s", record.get("order_id"))
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def append_payment(event_dict: dict) -> None:
|
| 72 |
+
"""
|
| 73 |
+
Append a payment record to the Ledger sheet.
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
event_dict: The "payment" sub-dict from a payment skill event.
|
| 77 |
+
Expected keys: entry_id, timestamp, customer,
|
| 78 |
+
amount, payment_type, status
|
| 79 |
+
"""
|
| 80 |
+
record = {
|
| 81 |
+
"entry_id": event_dict.get("entry_id"),
|
| 82 |
+
"timestamp": event_dict.get("timestamp"),
|
| 83 |
+
"type": "payment",
|
| 84 |
+
"customer": event_dict.get("customer"),
|
| 85 |
+
"item": None,
|
| 86 |
+
"quantity": None,
|
| 87 |
+
"amount": event_dict.get("amount"),
|
| 88 |
+
"payment_type": event_dict.get("payment_type"),
|
| 89 |
+
"status": event_dict.get("status", "received"),
|
| 90 |
+
}
|
| 91 |
+
append_row("Ledger", record)
|
| 92 |
+
logger.info("Excel sync → Ledger (payment): customer=%s", record.get("customer"))
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def append_return(event_dict: dict) -> None:
|
| 96 |
+
"""
|
| 97 |
+
Append a return record to the Returns sheet.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
event_dict: The "return" sub-dict from a return skill event.
|
| 101 |
+
Expected keys: return_id, timestamp, customer,
|
| 102 |
+
item, reason, status
|
| 103 |
+
"""
|
| 104 |
+
record = {
|
| 105 |
+
"return_id": event_dict.get("return_id"),
|
| 106 |
+
"timestamp": event_dict.get("timestamp"),
|
| 107 |
+
"customer": event_dict.get("customer"),
|
| 108 |
+
"item": event_dict.get("item"),
|
| 109 |
+
"reason": event_dict.get("reason"),
|
| 110 |
+
"status": event_dict.get("status", "pending_review"),
|
| 111 |
+
}
|
| 112 |
+
append_row("Returns", record)
|
| 113 |
+
logger.info("Excel sync → Returns: %s", record.get("return_id"))
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def append_credit(event_dict: dict) -> None:
|
| 117 |
+
"""
|
| 118 |
+
Append a credit (udhar) record to the Ledger sheet.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
event_dict: The "credit" sub-dict from a credit skill event.
|
| 122 |
+
"""
|
| 123 |
+
record = {
|
| 124 |
+
"entry_id": event_dict.get("entry_id"),
|
| 125 |
+
"timestamp": event_dict.get("timestamp"),
|
| 126 |
+
"type": "credit",
|
| 127 |
+
"customer": event_dict.get("customer"),
|
| 128 |
+
"item": event_dict.get("item"),
|
| 129 |
+
"quantity": event_dict.get("quantity"),
|
| 130 |
+
"amount": event_dict.get("amount"),
|
| 131 |
+
"payment_type": None,
|
| 132 |
+
"status": event_dict.get("status", "open"),
|
| 133 |
+
}
|
| 134 |
+
append_row("Ledger", record)
|
| 135 |
+
logger.info("Excel sync → Ledger (credit): customer=%s", record.get("customer"))
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def append_inventory(record: dict) -> None:
|
| 139 |
+
"""
|
| 140 |
+
Append a stock movement record to the Inventory sheet.
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
record: Dict with keys: timestamp, item, change, direction,
|
| 144 |
+
reference_id, note
|
| 145 |
+
"""
|
| 146 |
+
append_row("Inventory", record)
|
| 147 |
+
logger.info(
|
| 148 |
+
"Excel sync → Inventory: %s %s (%s)",
|
| 149 |
+
record.get("direction"), record.get("item"), record.get("change"),
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def append_invoice(record: dict) -> None:
|
| 154 |
+
"""
|
| 155 |
+
Append an invoice record to the Invoices sheet.
|
| 156 |
+
|
| 157 |
+
Args:
|
| 158 |
+
record: Invoice dict from invoice_service.generate_invoice().
|
| 159 |
+
"""
|
| 160 |
+
append_row("Invoices", record)
|
| 161 |
+
logger.info("Excel sync → Invoices: %s", record.get("invoice_id"))
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
# ---------------------------------------------------------------------------
|
| 165 |
+
# Convenience router — auto-dispatches based on orchestrator result intent
|
| 166 |
+
# ---------------------------------------------------------------------------
|
| 167 |
+
|
| 168 |
+
def sync_from_event(result: dict) -> None:
|
| 169 |
+
"""
|
| 170 |
+
Automatically sync an orchestrator result to the correct Excel sheet(s).
|
| 171 |
+
|
| 172 |
+
Called by the FastAPI notification handler after skill execution so
|
| 173 |
+
the ledger is always up to date without skills needing to know about
|
| 174 |
+
the sync layer.
|
| 175 |
+
|
| 176 |
+
Note: Skills already write to Excel internally (Stages 5-6).
|
| 177 |
+
This function provides a secondary sync point for the FastAPI path
|
| 178 |
+
when skills are called via run_notiflow() in demo mode (where skills
|
| 179 |
+
don't execute and nothing is written). In live mode, this is a no-op
|
| 180 |
+
safety net — duplicate rows are avoided by checking event type.
|
| 181 |
+
|
| 182 |
+
Args:
|
| 183 |
+
result: Full orchestrator result dict
|
| 184 |
+
{message, intent, data, event}
|
| 185 |
+
"""
|
| 186 |
+
intent = result.get("intent", "other")
|
| 187 |
+
event = result.get("event", {})
|
| 188 |
+
event_name = event.get("event", "")
|
| 189 |
+
|
| 190 |
+
# In live mode the skill already wrote to Excel — skip to avoid duplicates.
|
| 191 |
+
# We only sync here if the result came from demo mode (event has no IDs).
|
| 192 |
+
order_data = event.get("order")
|
| 193 |
+
payment_data = event.get("payment")
|
| 194 |
+
return_data = event.get("return")
|
| 195 |
+
credit_data = event.get("credit")
|
| 196 |
+
|
| 197 |
+
if intent == "order" and order_data and order_data.get("order_id"):
|
| 198 |
+
# Already written by skill — skip
|
| 199 |
+
logger.debug("sync_from_event: order already persisted by skill, skipping.")
|
| 200 |
+
elif intent == "payment" and payment_data and payment_data.get("entry_id"):
|
| 201 |
+
logger.debug("sync_from_event: payment already persisted by skill, skipping.")
|
| 202 |
+
elif intent == "return" and return_data and return_data.get("return_id"):
|
| 203 |
+
logger.debug("sync_from_event: return already persisted by skill, skipping.")
|
| 204 |
+
elif intent == "credit" and credit_data and credit_data.get("entry_id"):
|
| 205 |
+
logger.debug("sync_from_event: credit already persisted by skill, skipping.")
|
| 206 |
+
else:
|
| 207 |
+
logger.debug(
|
| 208 |
+
"sync_from_event: demo mode result or missing IDs — "
|
| 209 |
+
"no additional Excel write needed."
|
| 210 |
+
)
|
app/services/gemini_client.py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
models/gemini_client.py
|
| 3 |
+
-----------------------
|
| 4 |
+
Gemini API client for Notiflow.
|
| 5 |
+
|
| 6 |
+
Serves two distinct roles:
|
| 7 |
+
|
| 8 |
+
1. FALLBACK REASONER — used by ModelRouter when Nova is unavailable.
|
| 9 |
+
generate(prompt) returns a plain-text response that must match
|
| 10 |
+
Nova's JSON output schema exactly (enforced by the prompt).
|
| 11 |
+
|
| 12 |
+
2. NOTIFICATION GENERATOR — used by notification_generator.py to
|
| 13 |
+
produce realistic Hinglish business notifications for demo mode.
|
| 14 |
+
generate_notifications(n) returns a list of notification dicts.
|
| 15 |
+
|
| 16 |
+
Configuration
|
| 17 |
+
-------------
|
| 18 |
+
GEMINI_API_KEY — from .env / environment variable
|
| 19 |
+
|
| 20 |
+
Dependencies
|
| 21 |
+
------------
|
| 22 |
+
pip install google-generativeai
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
from __future__ import annotations
|
| 26 |
+
|
| 27 |
+
import json
|
| 28 |
+
import logging
|
| 29 |
+
import re
|
| 30 |
+
from typing import Any
|
| 31 |
+
|
| 32 |
+
from app.config import GEMINI_API_KEY
|
| 33 |
+
|
| 34 |
+
logger = logging.getLogger(__name__)
|
| 35 |
+
|
| 36 |
+
# Gemini model to use — flash is fast and sufficient for this use case
|
| 37 |
+
_GEMINI_MODEL = "gemini-2.5-flash"
|
| 38 |
+
|
| 39 |
+
_client = None # lazy singleton
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _get_client():
|
| 43 |
+
"""Return a cached Gemini GenerativeModel instance."""
|
| 44 |
+
global _client
|
| 45 |
+
if _client is not None:
|
| 46 |
+
return _client
|
| 47 |
+
|
| 48 |
+
if not GEMINI_API_KEY:
|
| 49 |
+
raise RuntimeError(
|
| 50 |
+
"GEMINI_API_KEY is not set. "
|
| 51 |
+
"Add it to your .env file: GEMINI_API_KEY=your_key_here"
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
try:
|
| 55 |
+
import google.generativeai as genai
|
| 56 |
+
genai.configure(api_key=GEMINI_API_KEY)
|
| 57 |
+
_client = genai.GenerativeModel(_GEMINI_MODEL)
|
| 58 |
+
logger.info("Gemini client initialised (model=%s)", _GEMINI_MODEL)
|
| 59 |
+
return _client
|
| 60 |
+
except ImportError:
|
| 61 |
+
raise RuntimeError(
|
| 62 |
+
"google-generativeai is not installed. "
|
| 63 |
+
"Run: pip install google-generativeai"
|
| 64 |
+
)
|
| 65 |
+
except Exception as exc:
|
| 66 |
+
raise RuntimeError(f"Failed to initialise Gemini client: {exc}") from exc
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def _strip_fences(text: str) -> str:
|
| 70 |
+
"""Strip markdown code fences from model output."""
|
| 71 |
+
return re.sub(r"```(?:json)?|```", "", text).strip()
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
# ---------------------------------------------------------------------------
|
| 75 |
+
# Public API — Role 1: Fallback reasoner for ModelRouter
|
| 76 |
+
# ---------------------------------------------------------------------------
|
| 77 |
+
|
| 78 |
+
def generate(prompt: str) -> str:
|
| 79 |
+
"""
|
| 80 |
+
Send a prompt to Gemini and return the raw text response.
|
| 81 |
+
|
| 82 |
+
Used by ModelRouter as a Nova fallback. The prompt already instructs
|
| 83 |
+
the model to return JSON — this function returns the raw string and
|
| 84 |
+
lets the caller parse it.
|
| 85 |
+
|
| 86 |
+
Args:
|
| 87 |
+
prompt: Fully rendered prompt (same prompt sent to Nova).
|
| 88 |
+
|
| 89 |
+
Returns:
|
| 90 |
+
Raw text response from Gemini (may contain JSON).
|
| 91 |
+
|
| 92 |
+
Raises:
|
| 93 |
+
RuntimeError: If the API call fails or client cannot be initialised.
|
| 94 |
+
"""
|
| 95 |
+
client = _get_client()
|
| 96 |
+
try:
|
| 97 |
+
response = client.generate_content(prompt)
|
| 98 |
+
raw = response.text or ""
|
| 99 |
+
logger.debug("Gemini raw response: %r", raw[:200])
|
| 100 |
+
return _strip_fences(raw)
|
| 101 |
+
except Exception as exc:
|
| 102 |
+
logger.error("Gemini generate() failed: %s", exc)
|
| 103 |
+
raise RuntimeError(f"Gemini API error: {exc}") from exc
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
# ---------------------------------------------------------------------------
|
| 107 |
+
# Public API — Role 2: Notification generator
|
| 108 |
+
# ---------------------------------------------------------------------------
|
| 109 |
+
|
| 110 |
+
_NOTIFICATION_PROMPT = """
|
| 111 |
+
You are simulating incoming business notifications for a small business in India.
|
| 112 |
+
|
| 113 |
+
Generate {n} realistic business notifications in Hinglish (Hindi + English mix).
|
| 114 |
+
|
| 115 |
+
Each notification must come from one of these sources:
|
| 116 |
+
- whatsapp (informal text from customers or suppliers)
|
| 117 |
+
- payment (UPI payment confirmation message)
|
| 118 |
+
- amazon (marketplace order or return notification)
|
| 119 |
+
- return (customer return or exchange request)
|
| 120 |
+
|
| 121 |
+
Each notification must represent ONE of these business events:
|
| 122 |
+
- An order for a product (item name + quantity)
|
| 123 |
+
- A payment received (person name + amount)
|
| 124 |
+
- A credit/udhar request
|
| 125 |
+
- A return or exchange request
|
| 126 |
+
- A preparation/packing request
|
| 127 |
+
|
| 128 |
+
Rules:
|
| 129 |
+
- Use natural Hinglish phrasing. Not too formal.
|
| 130 |
+
- Vary sources and event types.
|
| 131 |
+
- Include real-sounding names (Rahul, Priya, Suresh, Amit, etc.)
|
| 132 |
+
- Include real product names (kurti, aata, daal, saree, etc.)
|
| 133 |
+
- Do NOT include any explanation or preamble.
|
| 134 |
+
- Return ONLY a valid JSON array, nothing else.
|
| 135 |
+
|
| 136 |
+
Output format (JSON array only, no markdown):
|
| 137 |
+
[
|
| 138 |
+
{{"source": "whatsapp", "message": "bhaiya 3 kurti bhej dena"}},
|
| 139 |
+
{{"source": "payment", "message": "Rahul ne 15000 bheja UPI se"}}
|
| 140 |
+
]
|
| 141 |
+
"""
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def generate_notifications(n: int = 5) -> list[dict[str, str]]:
|
| 145 |
+
"""
|
| 146 |
+
Generate n realistic Hinglish business notifications via Gemini.
|
| 147 |
+
|
| 148 |
+
Args:
|
| 149 |
+
n: Number of notifications to generate (default 5).
|
| 150 |
+
|
| 151 |
+
Returns:
|
| 152 |
+
List of dicts with keys "source" and "message".
|
| 153 |
+
Returns an empty list if generation fails (non-fatal).
|
| 154 |
+
|
| 155 |
+
Example:
|
| 156 |
+
>>> generate_notifications(3)
|
| 157 |
+
[
|
| 158 |
+
{"source": "whatsapp", "message": "bhaiya 3 kurti bhej dena"},
|
| 159 |
+
{"source": "payment", "message": "Rahul ne 15000 bheja"},
|
| 160 |
+
{"source": "return", "message": "size chota hai exchange karna hai"},
|
| 161 |
+
]
|
| 162 |
+
"""
|
| 163 |
+
prompt = _NOTIFICATION_PROMPT.format(n=n)
|
| 164 |
+
try:
|
| 165 |
+
raw = generate(prompt)
|
| 166 |
+
data = json.loads(raw)
|
| 167 |
+
if not isinstance(data, list):
|
| 168 |
+
logger.warning("Gemini notification response was not a list: %s", type(data))
|
| 169 |
+
return []
|
| 170 |
+
# Validate each entry has required keys
|
| 171 |
+
valid = [
|
| 172 |
+
item for item in data
|
| 173 |
+
if isinstance(item, dict)
|
| 174 |
+
and "source" in item
|
| 175 |
+
and "message" in item
|
| 176 |
+
]
|
| 177 |
+
logger.info("Generated %d notifications via Gemini", len(valid))
|
| 178 |
+
return valid
|
| 179 |
+
except json.JSONDecodeError as exc:
|
| 180 |
+
logger.warning("Could not parse Gemini notification output as JSON: %s", exc)
|
| 181 |
+
return []
|
| 182 |
+
except Exception as exc:
|
| 183 |
+
logger.warning("generate_notifications failed: %s", exc)
|
| 184 |
+
return []
|
app/services/google_sheets_service.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
services/google_sheets_service.py
|
| 3 |
+
---------------------------------
|
| 4 |
+
Google Sheets ledger integration for Notiflow.
|
| 5 |
+
|
| 6 |
+
Appends a row to the configured Google Sheet every time a notification
|
| 7 |
+
is processed. Uses a service-account JSON file for authentication.
|
| 8 |
+
|
| 9 |
+
Environment variables (loaded via python-dotenv):
|
| 10 |
+
GOOGLE_SHEETS_CREDENTIALS — path to the service-account JSON file
|
| 11 |
+
GOOGLE_SHEET_ID — the Sheet key (from the URL)
|
| 12 |
+
|
| 13 |
+
Row structure:
|
| 14 |
+
[intent, item, quantity, customer, amount, source, timestamp]
|
| 15 |
+
|
| 16 |
+
If the Sheets API is unreachable or misconfigured, the service logs
|
| 17 |
+
the error and returns False — it NEVER crashes the pipeline.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
from __future__ import annotations
|
| 21 |
+
|
| 22 |
+
import logging
|
| 23 |
+
import os
|
| 24 |
+
from datetime import datetime, timezone
|
| 25 |
+
from pathlib import Path
|
| 26 |
+
from typing import Any
|
| 27 |
+
|
| 28 |
+
logger = logging.getLogger(__name__)
|
| 29 |
+
|
| 30 |
+
# ---------------------------------------------------------------------------
|
| 31 |
+
# Lazy-loaded module-level client
|
| 32 |
+
# ---------------------------------------------------------------------------
|
| 33 |
+
|
| 34 |
+
_sheet = None # gspread.Worksheet (first sheet)
|
| 35 |
+
_initialised = False # True after first attempt
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def _get_sheet():
|
| 39 |
+
"""
|
| 40 |
+
Lazy-initialise the gspread worksheet.
|
| 41 |
+
|
| 42 |
+
Returns the worksheet on success, or None if credentials / sheet ID
|
| 43 |
+
are missing or invalid.
|
| 44 |
+
"""
|
| 45 |
+
global _sheet, _initialised
|
| 46 |
+
|
| 47 |
+
if _initialised:
|
| 48 |
+
return _sheet
|
| 49 |
+
|
| 50 |
+
_initialised = True
|
| 51 |
+
|
| 52 |
+
try:
|
| 53 |
+
import gspread
|
| 54 |
+
from google.oauth2.service_account import Credentials
|
| 55 |
+
except ImportError as exc:
|
| 56 |
+
logger.warning("Google Sheets libraries not installed (%s). "
|
| 57 |
+
"Install gspread and google-auth.", exc)
|
| 58 |
+
return None
|
| 59 |
+
|
| 60 |
+
creds_path = os.getenv("GOOGLE_SHEETS_CREDENTIALS", "credentials/sheets.json")
|
| 61 |
+
sheet_id = os.getenv("GOOGLE_SHEET_ID", "")
|
| 62 |
+
|
| 63 |
+
if not sheet_id:
|
| 64 |
+
logger.warning("GOOGLE_SHEET_ID not set — Sheets sync disabled.")
|
| 65 |
+
return None
|
| 66 |
+
|
| 67 |
+
# Resolve relative path from project root
|
| 68 |
+
creds_file = Path(creds_path)
|
| 69 |
+
if not creds_file.is_absolute():
|
| 70 |
+
creds_file = Path(__file__).parent.parent / creds_file
|
| 71 |
+
|
| 72 |
+
if not creds_file.exists():
|
| 73 |
+
logger.warning("Google credentials file not found at %s", creds_file)
|
| 74 |
+
return None
|
| 75 |
+
|
| 76 |
+
try:
|
| 77 |
+
scopes = [
|
| 78 |
+
"https://www.googleapis.com/auth/spreadsheets",
|
| 79 |
+
"https://www.googleapis.com/auth/drive",
|
| 80 |
+
]
|
| 81 |
+
credentials = Credentials.from_service_account_file(
|
| 82 |
+
str(creds_file), scopes=scopes,
|
| 83 |
+
)
|
| 84 |
+
client = gspread.authorize(credentials)
|
| 85 |
+
spreadsheet = client.open_by_key(sheet_id)
|
| 86 |
+
_sheet = spreadsheet.sheet1 # first worksheet
|
| 87 |
+
logger.info("Google Sheets connected: %s", spreadsheet.title)
|
| 88 |
+
return _sheet
|
| 89 |
+
except Exception as exc:
|
| 90 |
+
logger.error("Failed to connect to Google Sheets: %s", exc)
|
| 91 |
+
return None
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
# ---------------------------------------------------------------------------
|
| 95 |
+
# Public API
|
| 96 |
+
# ---------------------------------------------------------------------------
|
| 97 |
+
|
| 98 |
+
def append_transaction(
|
| 99 |
+
intent: str,
|
| 100 |
+
data: dict[str, Any],
|
| 101 |
+
source: str = "system",
|
| 102 |
+
) -> bool:
|
| 103 |
+
"""
|
| 104 |
+
Append a ledger row to the configured Google Sheet.
|
| 105 |
+
|
| 106 |
+
Args:
|
| 107 |
+
intent: Detected intent (e.g. "order", "payment").
|
| 108 |
+
data: Validated entity dict from the extraction agent.
|
| 109 |
+
source: Notification source (e.g. "whatsapp", "gpay").
|
| 110 |
+
|
| 111 |
+
Returns:
|
| 112 |
+
True if the row was written successfully, False otherwise.
|
| 113 |
+
"""
|
| 114 |
+
ws = _get_sheet()
|
| 115 |
+
if ws is None:
|
| 116 |
+
logger.warning("Google Sheets update skipped — not connected.")
|
| 117 |
+
return False
|
| 118 |
+
|
| 119 |
+
timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M")
|
| 120 |
+
|
| 121 |
+
row = [
|
| 122 |
+
intent,
|
| 123 |
+
str(data.get("item", "")),
|
| 124 |
+
str(data.get("quantity", "")),
|
| 125 |
+
str(data.get("customer", "")),
|
| 126 |
+
str(data.get("amount", "")),
|
| 127 |
+
source,
|
| 128 |
+
timestamp,
|
| 129 |
+
]
|
| 130 |
+
|
| 131 |
+
# Defensive padding — always exactly 7 columns to prevent column drift
|
| 132 |
+
row = (row + [""] * 7)[:7]
|
| 133 |
+
|
| 134 |
+
try:
|
| 135 |
+
ws.append_row(row, value_input_option="USER_ENTERED", table_range="A1")
|
| 136 |
+
logger.info("Google Sheets ledger updated successfully")
|
| 137 |
+
return True
|
| 138 |
+
except Exception as exc:
|
| 139 |
+
logger.error("Google Sheets update failed: %s", exc)
|
| 140 |
+
return False
|
app/services/inventory_service.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
inventory_service.py
|
| 3 |
+
--------------------
|
| 4 |
+
Stage 6: Inventory Service for Notiflow
|
| 5 |
+
|
| 6 |
+
Tracks stock movements as a delta log in the Inventory Excel sheet.
|
| 7 |
+
Each event appends one row recording what changed, by how much,
|
| 8 |
+
and in which direction (in / out).
|
| 9 |
+
|
| 10 |
+
Design: delta-log (not current-stock snapshot)
|
| 11 |
+
- Every inventory change is a new row
|
| 12 |
+
- Current stock for an item = sum of all deltas for that item
|
| 13 |
+
- This keeps the history intact and avoids row-update complexity
|
| 14 |
+
|
| 15 |
+
Directions:
|
| 16 |
+
"out" — stock leaves (order fulfilled)
|
| 17 |
+
"in" — stock arrives (return accepted, restock)
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
import logging
|
| 21 |
+
from datetime import datetime, timezone
|
| 22 |
+
|
| 23 |
+
from app.utils.excel_writer import append_row, read_sheet
|
| 24 |
+
|
| 25 |
+
logger = logging.getLogger(__name__)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# ---------------------------------------------------------------------------
|
| 29 |
+
# Internal helpers
|
| 30 |
+
# ---------------------------------------------------------------------------
|
| 31 |
+
|
| 32 |
+
def _now_iso() -> str:
|
| 33 |
+
return datetime.now(timezone.utc).isoformat()
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# ---------------------------------------------------------------------------
|
| 37 |
+
# Public API
|
| 38 |
+
# ---------------------------------------------------------------------------
|
| 39 |
+
|
| 40 |
+
def deduct_stock(item: str, quantity: int | float, reference_id: str, note: str = "") -> dict:
|
| 41 |
+
"""
|
| 42 |
+
Record a stock deduction (items going out — e.g. an order is fulfilled).
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
item: Name of the inventory item.
|
| 46 |
+
quantity: Number of units being deducted.
|
| 47 |
+
reference_id: ID of the triggering record (e.g. order_id, invoice_id).
|
| 48 |
+
note: Optional human-readable note.
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
The inventory movement record that was persisted.
|
| 52 |
+
|
| 53 |
+
Example:
|
| 54 |
+
>>> deduct_stock("kurti", 3, "ORD-20240115-0001", "order fulfilled")
|
| 55 |
+
{
|
| 56 |
+
"timestamp": "...",
|
| 57 |
+
"item": "kurti",
|
| 58 |
+
"change": 3,
|
| 59 |
+
"direction": "out",
|
| 60 |
+
"reference_id": "ORD-20240115-0001",
|
| 61 |
+
"note": "order fulfilled"
|
| 62 |
+
}
|
| 63 |
+
"""
|
| 64 |
+
if quantity is None or quantity <= 0:
|
| 65 |
+
logger.warning("deduct_stock called with invalid quantity: %s", quantity)
|
| 66 |
+
return {}
|
| 67 |
+
|
| 68 |
+
record = {
|
| 69 |
+
"timestamp": _now_iso(),
|
| 70 |
+
"item": item,
|
| 71 |
+
"change": quantity,
|
| 72 |
+
"direction": "out",
|
| 73 |
+
"reference_id": reference_id,
|
| 74 |
+
"note": note or "stock deducted",
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
append_row("Inventory", record)
|
| 78 |
+
logger.info("Stock deducted: %s × %s (ref: %s)", quantity, item, reference_id)
|
| 79 |
+
return record
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def add_stock(item: str, quantity: int | float, reference_id: str, note: str = "") -> dict:
|
| 83 |
+
"""
|
| 84 |
+
Record a stock addition (items coming in — e.g. a return is accepted).
|
| 85 |
+
|
| 86 |
+
Args:
|
| 87 |
+
item: Name of the inventory item.
|
| 88 |
+
quantity: Number of units being added.
|
| 89 |
+
reference_id: ID of the triggering record (e.g. return_id).
|
| 90 |
+
note: Optional human-readable note.
|
| 91 |
+
|
| 92 |
+
Returns:
|
| 93 |
+
The inventory movement record that was persisted.
|
| 94 |
+
"""
|
| 95 |
+
if quantity is None or quantity <= 0:
|
| 96 |
+
logger.warning("add_stock called with invalid quantity: %s", quantity)
|
| 97 |
+
return {}
|
| 98 |
+
|
| 99 |
+
record = {
|
| 100 |
+
"timestamp": _now_iso(),
|
| 101 |
+
"item": item,
|
| 102 |
+
"change": quantity,
|
| 103 |
+
"direction": "in",
|
| 104 |
+
"reference_id": reference_id,
|
| 105 |
+
"note": note or "stock added",
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
append_row("Inventory", record)
|
| 109 |
+
logger.info("Stock added: %s × %s (ref: %s)", quantity, item, reference_id)
|
| 110 |
+
return record
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def get_stock_level(item: str) -> int | float:
|
| 114 |
+
"""
|
| 115 |
+
Calculate the current stock level for an item by summing all deltas.
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
item: Name of the inventory item (case-insensitive match).
|
| 119 |
+
|
| 120 |
+
Returns:
|
| 121 |
+
Net stock level (int or float). Returns 0 if no records found.
|
| 122 |
+
|
| 123 |
+
Example:
|
| 124 |
+
>>> get_stock_level("kurti")
|
| 125 |
+
47
|
| 126 |
+
"""
|
| 127 |
+
df = read_sheet("Inventory")
|
| 128 |
+
|
| 129 |
+
if df.empty or "item" not in df.columns:
|
| 130 |
+
return 0
|
| 131 |
+
|
| 132 |
+
item_rows = df[df["item"].str.lower() == item.lower()]
|
| 133 |
+
|
| 134 |
+
if item_rows.empty:
|
| 135 |
+
return 0
|
| 136 |
+
|
| 137 |
+
total = 0
|
| 138 |
+
for _, row in item_rows.iterrows():
|
| 139 |
+
change = row.get("change", 0) or 0
|
| 140 |
+
direction = row.get("direction", "out")
|
| 141 |
+
if direction == "in":
|
| 142 |
+
total += change
|
| 143 |
+
else:
|
| 144 |
+
total -= change
|
| 145 |
+
|
| 146 |
+
return max(total, 0) # Stock can't go below 0 in the display
|
app/services/invoice_service.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
invoice_service.py
|
| 3 |
+
------------------
|
| 4 |
+
Invoice generation service for Notiflow.
|
| 5 |
+
|
| 6 |
+
Responsibility: build a structured invoice object only.
|
| 7 |
+
Excel persistence is handled separately by the skill layer.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
from __future__ import annotations
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
from datetime import datetime, timezone
|
| 14 |
+
from typing import Optional
|
| 15 |
+
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
_CATALOG_PRICES: dict[str, float] = {
|
| 19 |
+
"sugar": 50.0,
|
| 20 |
+
"atta": 50.0,
|
| 21 |
+
"rice": 60.0,
|
| 22 |
+
"kurti": 50.0,
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def _now_iso() -> str:
|
| 27 |
+
return datetime.now(timezone.utc).isoformat()
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def _make_invoice_id() -> str:
|
| 31 |
+
return f"INV-{datetime.now(timezone.utc).strftime('%Y%m%d%H%M%S')}"
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class InvoiceBuilder:
|
| 35 |
+
"""Create normalized invoice payloads from item, quantity, and optional price."""
|
| 36 |
+
|
| 37 |
+
def __init__(self, catalog_prices: dict[str, float] | None = None):
|
| 38 |
+
self.catalog_prices = catalog_prices or _CATALOG_PRICES
|
| 39 |
+
|
| 40 |
+
def build(
|
| 41 |
+
self,
|
| 42 |
+
*,
|
| 43 |
+
customer: Optional[str],
|
| 44 |
+
item: Optional[str],
|
| 45 |
+
quantity: Optional[int | float],
|
| 46 |
+
price: Optional[float] = None,
|
| 47 |
+
order_id: Optional[str] = None,
|
| 48 |
+
) -> dict:
|
| 49 |
+
invoice_id = _make_invoice_id()
|
| 50 |
+
normalized_item = (item or "").strip() or None
|
| 51 |
+
qty = quantity or 0
|
| 52 |
+
unit_price = self._resolve_price(normalized_item, price)
|
| 53 |
+
total_amount = round(float(qty) * unit_price, 2)
|
| 54 |
+
|
| 55 |
+
invoice = {
|
| 56 |
+
"id": invoice_id,
|
| 57 |
+
"invoice_id": invoice_id,
|
| 58 |
+
"timestamp": _now_iso(),
|
| 59 |
+
"order_id": order_id,
|
| 60 |
+
"customer": customer,
|
| 61 |
+
"items": [{"name": normalized_item, "qty": qty, "price": unit_price}],
|
| 62 |
+
"item": normalized_item,
|
| 63 |
+
"quantity": quantity,
|
| 64 |
+
"unit_price": unit_price,
|
| 65 |
+
"total": total_amount,
|
| 66 |
+
"total_amount": total_amount,
|
| 67 |
+
"status": "pending",
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
logger.info(
|
| 71 |
+
"Invoice generated: %s | customer=%s item=%s qty=%s total=%.2f",
|
| 72 |
+
invoice_id, customer, normalized_item, quantity, total_amount,
|
| 73 |
+
)
|
| 74 |
+
return invoice
|
| 75 |
+
|
| 76 |
+
def _resolve_price(self, item: str | None, override_price: Optional[float]) -> float:
|
| 77 |
+
if override_price is not None:
|
| 78 |
+
return float(override_price)
|
| 79 |
+
if not item:
|
| 80 |
+
return 50.0
|
| 81 |
+
return float(self.catalog_prices.get(item.lower(), 50.0))
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def generate_invoice(
|
| 85 |
+
customer: Optional[str],
|
| 86 |
+
item: Optional[str],
|
| 87 |
+
quantity: Optional[int | float],
|
| 88 |
+
unit_price: float = 50.0,
|
| 89 |
+
order_id: Optional[str] = None,
|
| 90 |
+
) -> dict:
|
| 91 |
+
builder = InvoiceBuilder()
|
| 92 |
+
return builder.build(
|
| 93 |
+
customer=customer,
|
| 94 |
+
item=item,
|
| 95 |
+
quantity=quantity,
|
| 96 |
+
price=unit_price,
|
| 97 |
+
order_id=order_id,
|
| 98 |
+
)
|
app/services/notification_generator.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
services/notification_generator.py
|
| 3 |
+
-----------------------------------
|
| 4 |
+
Gemini-powered business notification generator for Notiflow.
|
| 5 |
+
|
| 6 |
+
Purpose: generate realistic Hinglish business notifications for demo
|
| 7 |
+
automation. This is entirely optional — the frontend simulation continues
|
| 8 |
+
to work independently.
|
| 9 |
+
|
| 10 |
+
Two modes:
|
| 11 |
+
1. Live Gemini generation — calls Gemini API (requires GEMINI_API_KEY)
|
| 12 |
+
2. Static fallback pool — returns from a hardcoded set when Gemini
|
| 13 |
+
is unavailable (safe for offline demos)
|
| 14 |
+
|
| 15 |
+
Public API
|
| 16 |
+
----------
|
| 17 |
+
get_notifications(n: int = 5) -> list[dict]
|
| 18 |
+
Returns a list of notification dicts:
|
| 19 |
+
[{"source": "whatsapp", "message": "..."}, ...]
|
| 20 |
+
|
| 21 |
+
stream_notifications(n, delay_seconds) -> AsyncGenerator
|
| 22 |
+
Async generator yielding one notification at a time with a delay.
|
| 23 |
+
Used by the WebSocket endpoint.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
from __future__ import annotations
|
| 27 |
+
|
| 28 |
+
import asyncio
|
| 29 |
+
import logging
|
| 30 |
+
import random
|
| 31 |
+
from typing import AsyncGenerator
|
| 32 |
+
|
| 33 |
+
logger = logging.getLogger(__name__)
|
| 34 |
+
|
| 35 |
+
# ---------------------------------------------------------------------------
|
| 36 |
+
# Static fallback pool (used when Gemini is unavailable)
|
| 37 |
+
# ---------------------------------------------------------------------------
|
| 38 |
+
|
| 39 |
+
_FALLBACK_NOTIFICATIONS: list[dict] = [
|
| 40 |
+
{"source": "whatsapp", "message": "bhaiya 3 kurti bhej dena"},
|
| 41 |
+
{"source": "payment", "message": "rahul ne 15000 bheja UPI se"},
|
| 42 |
+
{"source": "whatsapp", "message": "size chota hai exchange karna hai"},
|
| 43 |
+
{"source": "whatsapp", "message": "udhar me de dijiye"},
|
| 44 |
+
{"source": "amazon", "message": "priya ke liye 2 kilo aata bhej dena"},
|
| 45 |
+
{"source": "payment", "message": "amit bhai ka 8000 gpay se aaya"},
|
| 46 |
+
{"source": "whatsapp", "message": "3 kurti ka set ready rakhna"},
|
| 47 |
+
{"source": "return", "message": "maal kharab tha wapas bhej diya"},
|
| 48 |
+
{"source": "whatsapp", "message": "suresh ko 500 ka maal udhar dena"},
|
| 49 |
+
{"source": "amazon", "message": "order cancel karna hai, size bada hai"},
|
| 50 |
+
{"source": "payment", "message": "50 piece pack karke rakhna kal tak"},
|
| 51 |
+
{"source": "whatsapp", "message": "geeta ke liye 5 metre kapda bhej dena"},
|
| 52 |
+
]
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def _get_fallback(n: int) -> list[dict]:
|
| 56 |
+
"""Return n randomly sampled notifications from the static pool."""
|
| 57 |
+
pool = _FALLBACK_NOTIFICATIONS * (n // len(_FALLBACK_NOTIFICATIONS) + 1)
|
| 58 |
+
return random.sample(pool, min(n, len(pool)))
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
# ---------------------------------------------------------------------------
|
| 62 |
+
# Public API
|
| 63 |
+
# ---------------------------------------------------------------------------
|
| 64 |
+
|
| 65 |
+
def get_notifications(n: int = 5) -> list[dict]:
|
| 66 |
+
"""
|
| 67 |
+
Get n business notifications.
|
| 68 |
+
|
| 69 |
+
Tries Gemini first; falls back to static pool silently if unavailable.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
n: Number of notifications to generate/return.
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
List of {"source": str, "message": str} dicts.
|
| 76 |
+
"""
|
| 77 |
+
try:
|
| 78 |
+
from app.services.gemini_client import generate_notifications
|
| 79 |
+
results = generate_notifications(n)
|
| 80 |
+
if results:
|
| 81 |
+
return results
|
| 82 |
+
logger.info("Gemini returned empty list — using fallback pool.")
|
| 83 |
+
except Exception as exc:
|
| 84 |
+
logger.info("Gemini unavailable (%s) — using fallback pool.", exc)
|
| 85 |
+
|
| 86 |
+
return _get_fallback(n)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
async def stream_notifications(
|
| 90 |
+
n: int = 5,
|
| 91 |
+
delay_seconds: float = 2.0,
|
| 92 |
+
) -> AsyncGenerator[dict, None]:
|
| 93 |
+
"""
|
| 94 |
+
Async generator that yields one notification at a time.
|
| 95 |
+
|
| 96 |
+
Fetches a fresh batch from get_notifications() then yields them
|
| 97 |
+
one-by-one with a configurable delay between each.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
n: Number of notifications per batch.
|
| 101 |
+
delay_seconds: Pause between yielded notifications.
|
| 102 |
+
|
| 103 |
+
Yields:
|
| 104 |
+
{"source": str, "message": str}
|
| 105 |
+
"""
|
| 106 |
+
notifications = get_notifications(n)
|
| 107 |
+
for notification in notifications:
|
| 108 |
+
yield notification
|
| 109 |
+
await asyncio.sleep(delay_seconds)
|
app/services/router.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
router.py
|
| 3 |
+
---------
|
| 4 |
+
Stage 5: Skill Router for Notiflow
|
| 5 |
+
|
| 6 |
+
The Skill Router is the decision layer between the Extraction Agent
|
| 7 |
+
and the Business Skills. It receives a structured event (intent + data)
|
| 8 |
+
and dispatches to the correct skill.
|
| 9 |
+
|
| 10 |
+
Routing table:
|
| 11 |
+
|
| 12 |
+
intent → skill function
|
| 13 |
+
─────────────────────────────────
|
| 14 |
+
order → process_order()
|
| 15 |
+
payment → process_payment()
|
| 16 |
+
credit → process_credit()
|
| 17 |
+
return → process_return()
|
| 18 |
+
preparation → process_preparation()
|
| 19 |
+
other → (no skill; passthrough)
|
| 20 |
+
|
| 21 |
+
If an intent has no registered skill the router returns a lightweight
|
| 22 |
+
passthrough event so the pipeline never raises on unknown intents.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
import logging
|
| 26 |
+
from typing import Any
|
| 27 |
+
|
| 28 |
+
from app.skills.order_skill import process_order
|
| 29 |
+
from app.skills.payment_skill import process_payment
|
| 30 |
+
from app.skills.credit_skill import process_credit
|
| 31 |
+
from app.skills.return_skill import process_return
|
| 32 |
+
from app.skills.preparation_skill import process_preparation
|
| 33 |
+
|
| 34 |
+
logger = logging.getLogger(__name__)
|
| 35 |
+
|
| 36 |
+
# ---------------------------------------------------------------------------
|
| 37 |
+
# Routing table (intent → skill callable)
|
| 38 |
+
# ---------------------------------------------------------------------------
|
| 39 |
+
|
| 40 |
+
_SKILL_MAP: dict[str, Any] = {
|
| 41 |
+
"order": process_order,
|
| 42 |
+
"payment": process_payment,
|
| 43 |
+
"credit": process_credit,
|
| 44 |
+
"return": process_return,
|
| 45 |
+
"preparation": process_preparation,
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# ---------------------------------------------------------------------------
|
| 50 |
+
# Public API
|
| 51 |
+
# ---------------------------------------------------------------------------
|
| 52 |
+
|
| 53 |
+
def route_to_skill(intent: str, data: dict, context: dict | None = None) -> dict:
|
| 54 |
+
"""
|
| 55 |
+
Route a structured business event to the appropriate skill.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
intent: The detected intent string (e.g. "payment", "order").
|
| 59 |
+
data: The extracted field dict returned by the Extraction Agent
|
| 60 |
+
(without the "intent" key — that lives at the top level).
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
A skill event dict. Structure varies per skill but always contains
|
| 64 |
+
at minimum an "event" key describing what happened.
|
| 65 |
+
|
| 66 |
+
For unrecognised / "other" intents a passthrough dict is returned:
|
| 67 |
+
{"event": "unhandled", "intent": intent, "data": data}
|
| 68 |
+
|
| 69 |
+
Example:
|
| 70 |
+
>>> route_to_skill("payment", {"customer": "Rahul", "amount": 15000})
|
| 71 |
+
{
|
| 72 |
+
"event": "payment_recorded",
|
| 73 |
+
"payment": {"customer": "Rahul", "amount": 15000, "status": "received"}
|
| 74 |
+
}
|
| 75 |
+
"""
|
| 76 |
+
skill_fn = _SKILL_MAP.get(intent)
|
| 77 |
+
|
| 78 |
+
if skill_fn is None:
|
| 79 |
+
logger.info("No skill registered for intent '%s' — returning passthrough.", intent)
|
| 80 |
+
return {"event": "unhandled", "intent": intent, "data": data}
|
| 81 |
+
|
| 82 |
+
logger.info("Routing intent '%s' to skill: %s", intent, skill_fn.__name__)
|
| 83 |
+
if context is not None:
|
| 84 |
+
try:
|
| 85 |
+
return skill_fn(data, context=context)
|
| 86 |
+
except TypeError:
|
| 87 |
+
pass
|
| 88 |
+
return skill_fn(data)
|
app/services/skill_generator.py
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
skill_generator.py
|
| 3 |
+
------------------
|
| 4 |
+
Dynamic Skill Generator for Notiflow.
|
| 5 |
+
|
| 6 |
+
Creates new business skill Python files on demand and registers them in
|
| 7 |
+
skills/skill_registry.json.
|
| 8 |
+
|
| 9 |
+
Public API
|
| 10 |
+
----------
|
| 11 |
+
generate_skill(skill_name: str, description: str) -> dict
|
| 12 |
+
list_skills() -> dict
|
| 13 |
+
|
| 14 |
+
Safety rules:
|
| 15 |
+
- Raises SkillAlreadyExistsError if a skill with the same name exists.
|
| 16 |
+
- Skill names are normalised to snake_case.
|
| 17 |
+
- Generated files follow the standard skill template.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
from __future__ import annotations
|
| 21 |
+
|
| 22 |
+
import json
|
| 23 |
+
import logging
|
| 24 |
+
import re
|
| 25 |
+
from pathlib import Path
|
| 26 |
+
from typing import Optional
|
| 27 |
+
|
| 28 |
+
from app.config import ROOT, REGISTRY_FILE
|
| 29 |
+
|
| 30 |
+
logger = logging.getLogger(__name__)
|
| 31 |
+
|
| 32 |
+
SKILLS_DIR = ROOT / "skills"
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
# ---------------------------------------------------------------------------
|
| 36 |
+
# Exceptions
|
| 37 |
+
# ---------------------------------------------------------------------------
|
| 38 |
+
|
| 39 |
+
class SkillAlreadyExistsError(Exception):
|
| 40 |
+
"""Raised when a skill with the given name already exists."""
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# ---------------------------------------------------------------------------
|
| 44 |
+
# Skill file template
|
| 45 |
+
# ---------------------------------------------------------------------------
|
| 46 |
+
|
| 47 |
+
_SKILL_TEMPLATE = '''\
|
| 48 |
+
"""
|
| 49 |
+
{skill_name}.py
|
| 50 |
+
{underline}
|
| 51 |
+
Auto-generated business skill for Notiflow.
|
| 52 |
+
|
| 53 |
+
Description: {description}
|
| 54 |
+
|
| 55 |
+
Modify this file to implement the skill logic.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
from __future__ import annotations
|
| 59 |
+
|
| 60 |
+
import logging
|
| 61 |
+
from datetime import datetime, timezone
|
| 62 |
+
|
| 63 |
+
logger = logging.getLogger(__name__)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def {func_name}(data: dict) -> dict:
|
| 67 |
+
"""
|
| 68 |
+
Execute the {display_name} skill.
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
data: Validated extraction dict from the orchestrator.
|
| 72 |
+
|
| 73 |
+
Returns:
|
| 74 |
+
Structured skill event dict.
|
| 75 |
+
"""
|
| 76 |
+
logger.info("{display_name} skill executing: %s", data)
|
| 77 |
+
|
| 78 |
+
return {{
|
| 79 |
+
"event": "{event_name}",
|
| 80 |
+
"data": data,
|
| 81 |
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
| 82 |
+
}}
|
| 83 |
+
'''
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
# ---------------------------------------------------------------------------
|
| 87 |
+
# Helpers
|
| 88 |
+
# ---------------------------------------------------------------------------
|
| 89 |
+
|
| 90 |
+
def _to_snake_case(name: str) -> str:
|
| 91 |
+
"""Normalise skill name to snake_case (alphanumeric + underscores only)."""
|
| 92 |
+
name = name.strip().lower()
|
| 93 |
+
name = re.sub(r"[^a-z0-9]+", "_", name)
|
| 94 |
+
name = re.sub(r"_+", "_", name).strip("_")
|
| 95 |
+
return name
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def _load_registry() -> dict:
|
| 99 |
+
path = Path(REGISTRY_FILE)
|
| 100 |
+
if not path.exists():
|
| 101 |
+
return {}
|
| 102 |
+
try:
|
| 103 |
+
with path.open("r", encoding="utf-8") as f:
|
| 104 |
+
return json.load(f)
|
| 105 |
+
except (json.JSONDecodeError, OSError) as exc:
|
| 106 |
+
logger.warning("Could not read registry: %s", exc)
|
| 107 |
+
return {}
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def _save_registry(registry: dict) -> None:
|
| 111 |
+
path = Path(REGISTRY_FILE)
|
| 112 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 113 |
+
with path.open("w", encoding="utf-8") as f:
|
| 114 |
+
json.dump(registry, f, indent=2, ensure_ascii=False)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
# ---------------------------------------------------------------------------
|
| 118 |
+
# Public API
|
| 119 |
+
# ---------------------------------------------------------------------------
|
| 120 |
+
|
| 121 |
+
def generate_skill(skill_name: str, description: str) -> dict:
|
| 122 |
+
"""
|
| 123 |
+
Generate a new skill file and register it.
|
| 124 |
+
|
| 125 |
+
Args:
|
| 126 |
+
skill_name: Human-readable name (e.g. "discount_skill" or "Discount Skill").
|
| 127 |
+
Normalised to snake_case automatically.
|
| 128 |
+
description: One-line description stored in the registry.
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
Registry entry dict for the new skill:
|
| 132 |
+
{
|
| 133 |
+
"description": str,
|
| 134 |
+
"intent": None,
|
| 135 |
+
"file": "skills/<name>.py",
|
| 136 |
+
"builtin": false
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
Raises:
|
| 140 |
+
SkillAlreadyExistsError: If a skill with the same name already exists
|
| 141 |
+
(either as a .py file or registry entry).
|
| 142 |
+
ValueError: If skill_name is empty or invalid.
|
| 143 |
+
|
| 144 |
+
Example:
|
| 145 |
+
>>> generate_skill("discount_skill", "Apply discount to an order")
|
| 146 |
+
{"description": "Apply discount...", "file": "skills/discount_skill.py", ...}
|
| 147 |
+
"""
|
| 148 |
+
norm_name = _to_snake_case(skill_name)
|
| 149 |
+
if not norm_name:
|
| 150 |
+
raise ValueError(f"Invalid skill name: {skill_name!r}")
|
| 151 |
+
|
| 152 |
+
skill_file = SKILLS_DIR / f"{norm_name}.py"
|
| 153 |
+
registry = _load_registry()
|
| 154 |
+
|
| 155 |
+
# ── Collision guard ──────────────────────────────────────────────────────
|
| 156 |
+
if norm_name in registry:
|
| 157 |
+
raise SkillAlreadyExistsError(
|
| 158 |
+
f"Skill '{norm_name}' already exists in the registry. "
|
| 159 |
+
"Choose a different name or delete the existing entry first."
|
| 160 |
+
)
|
| 161 |
+
if skill_file.exists():
|
| 162 |
+
raise SkillAlreadyExistsError(
|
| 163 |
+
f"Skill file '{skill_file}' already exists on disk. "
|
| 164 |
+
"Choose a different name or delete the existing file first."
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
# ── Generate file ───────���────────────────────────────────────────────────
|
| 168 |
+
display_name = norm_name.replace("_", " ").title()
|
| 169 |
+
func_name = norm_name
|
| 170 |
+
event_name = f"{norm_name}_executed"
|
| 171 |
+
underline = "-" * (len(norm_name) + 3) # matches "name.py" length
|
| 172 |
+
|
| 173 |
+
source = _SKILL_TEMPLATE.format(
|
| 174 |
+
skill_name = norm_name,
|
| 175 |
+
underline = underline,
|
| 176 |
+
description = description,
|
| 177 |
+
func_name = func_name,
|
| 178 |
+
display_name = display_name,
|
| 179 |
+
event_name = event_name,
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
SKILLS_DIR.mkdir(parents=True, exist_ok=True)
|
| 183 |
+
skill_file.write_text(source, encoding="utf-8")
|
| 184 |
+
logger.info("Skill file created: %s", skill_file)
|
| 185 |
+
|
| 186 |
+
# ── Register ─────────────────────────────────────────────────────────────
|
| 187 |
+
entry = {
|
| 188 |
+
"description": description,
|
| 189 |
+
"intent": None, # caller can update after creation
|
| 190 |
+
"file": f"skills/{norm_name}.py",
|
| 191 |
+
"builtin": False,
|
| 192 |
+
}
|
| 193 |
+
registry[norm_name] = entry
|
| 194 |
+
_save_registry(registry)
|
| 195 |
+
logger.info("Skill '%s' registered.", norm_name)
|
| 196 |
+
|
| 197 |
+
return entry
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def list_skills() -> dict:
|
| 201 |
+
"""
|
| 202 |
+
Return the full skill registry.
|
| 203 |
+
|
| 204 |
+
Returns:
|
| 205 |
+
Dict mapping skill_name → registry entry.
|
| 206 |
+
"""
|
| 207 |
+
return _load_registry()
|
app/skills/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"""app/skills — business skill implementations."""
|
app/skills/credit_skill.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
credit_skill.py
|
| 3 |
+
---------------
|
| 4 |
+
Business Skill: Credit / Udhar (Stage 6 — with persistence)
|
| 5 |
+
|
| 6 |
+
Handles the "credit" intent.
|
| 7 |
+
Appends a credit entry to the Ledger sheet.
|
| 8 |
+
|
| 9 |
+
Expected input fields:
|
| 10 |
+
customer (str | None)
|
| 11 |
+
item (str | None)
|
| 12 |
+
quantity (int | None)
|
| 13 |
+
amount (int | None)
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import logging
|
| 17 |
+
from datetime import datetime, timezone
|
| 18 |
+
|
| 19 |
+
from app.utils.excel_writer import append_row, read_sheet
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def _now_iso() -> str:
|
| 25 |
+
return datetime.now(timezone.utc).isoformat()
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _generate_entry_id(prefix: str) -> str:
|
| 29 |
+
today = datetime.now(timezone.utc).strftime("%Y%m%d")
|
| 30 |
+
df = read_sheet("Ledger")
|
| 31 |
+
seq = len(df) + 1
|
| 32 |
+
return f"{prefix}-{today}-{seq:04d}"
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def process_credit(data: dict) -> dict:
|
| 36 |
+
"""
|
| 37 |
+
Process a credit (udhar) event and append it to the Ledger sheet.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
data: Extracted fields dict. Expected keys: customer, item, quantity, amount
|
| 41 |
+
|
| 42 |
+
Returns:
|
| 43 |
+
{
|
| 44 |
+
"event": "credit_recorded",
|
| 45 |
+
"credit": { ledger entry }
|
| 46 |
+
}
|
| 47 |
+
"""
|
| 48 |
+
logger.info("CreditSkill processing: %s", data)
|
| 49 |
+
|
| 50 |
+
entry_id = _generate_entry_id("CRD")
|
| 51 |
+
|
| 52 |
+
credit = {
|
| 53 |
+
"entry_id": entry_id,
|
| 54 |
+
"timestamp": _now_iso(),
|
| 55 |
+
"type": "credit",
|
| 56 |
+
"customer": data.get("customer"),
|
| 57 |
+
"item": data.get("item"),
|
| 58 |
+
"quantity": data.get("quantity"),
|
| 59 |
+
"amount": data.get("amount"),
|
| 60 |
+
"payment_type": None,
|
| 61 |
+
"status": "open",
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
append_row("Ledger", credit)
|
| 65 |
+
|
| 66 |
+
return {
|
| 67 |
+
"event": "credit_recorded",
|
| 68 |
+
"credit": credit,
|
| 69 |
+
}
|
app/skills/order_skill.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
order_skill.py
|
| 3 |
+
--------------
|
| 4 |
+
Business Skill: Order (Stage 6 + UPGRADE 1 — memory update)
|
| 5 |
+
|
| 6 |
+
On each order event this skill:
|
| 7 |
+
1. Appends an order record to the Orders sheet
|
| 8 |
+
2. Deducts stock from Inventory (delta log)
|
| 9 |
+
3. Generates an invoice object and saves it to Invoices sheet
|
| 10 |
+
4. Updates agent memory with customer + item ← NEW (Upgrade 1)
|
| 11 |
+
|
| 12 |
+
Expected input fields (all may be None if not captured):
|
| 13 |
+
customer (str | None)
|
| 14 |
+
item (str | None)
|
| 15 |
+
quantity (int | None)
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
from __future__ import annotations
|
| 19 |
+
|
| 20 |
+
import logging
|
| 21 |
+
from datetime import datetime, timezone
|
| 22 |
+
|
| 23 |
+
from app.core.event_bus import emit_event, push_live_log, store_invoice
|
| 24 |
+
from app.utils.excel_writer import append_row, read_sheet
|
| 25 |
+
from app.services.invoice_service import generate_invoice
|
| 26 |
+
from app.services.inventory_service import deduct_stock
|
| 27 |
+
from app.memory.agent_memory import update_memory
|
| 28 |
+
|
| 29 |
+
logger = logging.getLogger(__name__)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _now_iso() -> str:
|
| 33 |
+
return datetime.now(timezone.utc).isoformat()
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def _generate_order_id() -> str:
|
| 37 |
+
today = datetime.now(timezone.utc).strftime("%Y%m%d")
|
| 38 |
+
df = read_sheet("Orders")
|
| 39 |
+
seq = len(df) + 1
|
| 40 |
+
return f"ORD-{today}-{seq:04d}"
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def process_order(data: dict, context: dict | None = None) -> dict:
|
| 44 |
+
"""
|
| 45 |
+
Process an order event: persist order, update inventory, generate invoice,
|
| 46 |
+
and update agent memory.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
data: Validated extraction dict. Keys: customer, item, quantity.
|
| 50 |
+
|
| 51 |
+
Returns:
|
| 52 |
+
{
|
| 53 |
+
"event": "order_received",
|
| 54 |
+
"order": { order record },
|
| 55 |
+
"invoice": { invoice record }
|
| 56 |
+
}
|
| 57 |
+
"""
|
| 58 |
+
logger.info("OrderSkill ← %s", data)
|
| 59 |
+
|
| 60 |
+
customer = data.get("customer")
|
| 61 |
+
item = data.get("item")
|
| 62 |
+
quantity = data.get("quantity")
|
| 63 |
+
order_id = _generate_order_id()
|
| 64 |
+
|
| 65 |
+
# 1 ── Persist order ──────────────────────────────────────────────────────
|
| 66 |
+
order = {
|
| 67 |
+
"order_id": order_id,
|
| 68 |
+
"timestamp": _now_iso(),
|
| 69 |
+
"customer": customer,
|
| 70 |
+
"item": item,
|
| 71 |
+
"quantity": quantity,
|
| 72 |
+
"status": "pending",
|
| 73 |
+
}
|
| 74 |
+
append_row("Orders", order)
|
| 75 |
+
|
| 76 |
+
# 2 ── Inventory deduction ────────────────────────────────────────────────
|
| 77 |
+
if item and quantity:
|
| 78 |
+
deduct_stock(item, quantity, reference_id=order_id, note="order fulfilled")
|
| 79 |
+
|
| 80 |
+
# 3 ── Invoice generation ─────────────────────────────────────────────────
|
| 81 |
+
invoice = generate_invoice(
|
| 82 |
+
customer = customer,
|
| 83 |
+
item = item,
|
| 84 |
+
quantity = quantity,
|
| 85 |
+
order_id = order_id,
|
| 86 |
+
unit_price = 50.0,
|
| 87 |
+
)
|
| 88 |
+
invoice = store_invoice(invoice)
|
| 89 |
+
append_row("Invoices", invoice)
|
| 90 |
+
if context is not None:
|
| 91 |
+
context["invoice"] = invoice
|
| 92 |
+
invoice_log = push_live_log(context, {
|
| 93 |
+
"agent": "ExecutionAgent",
|
| 94 |
+
"status": "success",
|
| 95 |
+
"action": f"Invoice created: {invoice['invoice_id']}",
|
| 96 |
+
"detail": f"[ExecutionAgent] Invoice created: {invoice['invoice_id']}",
|
| 97 |
+
})
|
| 98 |
+
emit_event(
|
| 99 |
+
context,
|
| 100 |
+
"invoice_generated",
|
| 101 |
+
invoice,
|
| 102 |
+
agent="ExecutionAgent",
|
| 103 |
+
step="execution",
|
| 104 |
+
message=f"Invoice created: {invoice['invoice_id']}",
|
| 105 |
+
log_entry=invoice_log,
|
| 106 |
+
)
|
| 107 |
+
payment_log = push_live_log(context, {
|
| 108 |
+
"agent": "ExecutionAgent",
|
| 109 |
+
"status": "success",
|
| 110 |
+
"action": f"Payment requested for {invoice['invoice_id']}",
|
| 111 |
+
"detail": f"[ExecutionAgent] Payment requested: {invoice['invoice_id']}",
|
| 112 |
+
})
|
| 113 |
+
emit_event(
|
| 114 |
+
context,
|
| 115 |
+
"payment_requested",
|
| 116 |
+
invoice,
|
| 117 |
+
agent="ExecutionAgent",
|
| 118 |
+
step="payment",
|
| 119 |
+
message=f"Payment requested for {invoice['invoice_id']}",
|
| 120 |
+
log_entry=payment_log,
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
# 4 ── Memory update ──────────────────────────────────────────────────────
|
| 124 |
+
update_memory(customer=customer, item=item)
|
| 125 |
+
|
| 126 |
+
return {
|
| 127 |
+
"event": "order_received",
|
| 128 |
+
"order": order,
|
| 129 |
+
"invoice": invoice,
|
| 130 |
+
}
|
app/skills/payment_skill.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
payment_skill.py
|
| 3 |
+
----------------
|
| 4 |
+
Business Skill: Payment (Stage 6 — with persistence)
|
| 5 |
+
|
| 6 |
+
Handles the "payment" intent.
|
| 7 |
+
Appends a payment entry to the Ledger sheet.
|
| 8 |
+
|
| 9 |
+
Expected input fields:
|
| 10 |
+
customer (str | None) — name of the person who sent money
|
| 11 |
+
amount (int | None) — monetary amount
|
| 12 |
+
payment_type (str | None) — "cash", "upi", "online", "cheque", or None
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
import logging
|
| 16 |
+
from datetime import datetime, timezone
|
| 17 |
+
|
| 18 |
+
from app.utils.excel_writer import append_row, read_sheet
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def _now_iso() -> str:
|
| 24 |
+
return datetime.now(timezone.utc).isoformat()
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def _generate_entry_id(prefix: str) -> str:
|
| 28 |
+
today = datetime.now(timezone.utc).strftime("%Y%m%d")
|
| 29 |
+
df = read_sheet("Ledger")
|
| 30 |
+
seq = len(df) + 1
|
| 31 |
+
return f"{prefix}-{today}-{seq:04d}"
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def process_payment(data: dict) -> dict:
|
| 35 |
+
"""
|
| 36 |
+
Process a payment event and append it to the Ledger sheet.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
data: Extracted fields dict. Expected keys: customer, amount, payment_type
|
| 40 |
+
|
| 41 |
+
Returns:
|
| 42 |
+
{
|
| 43 |
+
"event": "payment_recorded",
|
| 44 |
+
"payment": { ledger entry }
|
| 45 |
+
}
|
| 46 |
+
"""
|
| 47 |
+
logger.info("PaymentSkill processing: %s", data)
|
| 48 |
+
|
| 49 |
+
entry_id = _generate_entry_id("PAY")
|
| 50 |
+
|
| 51 |
+
payment = {
|
| 52 |
+
"entry_id": entry_id,
|
| 53 |
+
"timestamp": _now_iso(),
|
| 54 |
+
"type": "payment",
|
| 55 |
+
"customer": data.get("customer"),
|
| 56 |
+
"item": None,
|
| 57 |
+
"quantity": None,
|
| 58 |
+
"amount": data.get("amount"),
|
| 59 |
+
"payment_type": data.get("payment_type"),
|
| 60 |
+
"status": "received",
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
append_row("Ledger", payment)
|
| 64 |
+
|
| 65 |
+
return {
|
| 66 |
+
"event": "payment_recorded",
|
| 67 |
+
"payment": payment,
|
| 68 |
+
}
|
app/skills/preparation_skill.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
preparation_skill.py
|
| 3 |
+
--------------------
|
| 4 |
+
Business Skill: Preparation / Inventory Pack (Stage 6 — with persistence)
|
| 5 |
+
|
| 6 |
+
Handles the "preparation" intent.
|
| 7 |
+
Appends a preparation task to the Inventory sheet as a "reserved" movement.
|
| 8 |
+
|
| 9 |
+
This records that stock is being set aside / packed, without fully
|
| 10 |
+
deducting it (deduction happens when the linked order ships).
|
| 11 |
+
If no linked order exists (standalone prep task), the record still logs
|
| 12 |
+
the intention for the shop owner's reference.
|
| 13 |
+
|
| 14 |
+
Expected input fields:
|
| 15 |
+
item (str | None) — item to prepare or pack
|
| 16 |
+
quantity (int | None) — number of units to prepare
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
import logging
|
| 20 |
+
from datetime import datetime, timezone
|
| 21 |
+
|
| 22 |
+
from app.utils.excel_writer import append_row, read_sheet
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# ---------------------------------------------------------------------------
|
| 28 |
+
# Internal helpers
|
| 29 |
+
# ---------------------------------------------------------------------------
|
| 30 |
+
|
| 31 |
+
def _now_iso() -> str:
|
| 32 |
+
return datetime.now(timezone.utc).isoformat()
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def _generate_prep_id() -> str:
|
| 36 |
+
"""Generate a sequential preparation ID: PREP-YYYYMMDD-XXXX."""
|
| 37 |
+
today = datetime.now(timezone.utc).strftime("%Y%m%d")
|
| 38 |
+
# Count existing preparation entries in Inventory to sequence the ID
|
| 39 |
+
df = read_sheet("Inventory")
|
| 40 |
+
prep_rows = df[df["direction"] == "reserved"] if not df.empty and "direction" in df.columns else df
|
| 41 |
+
seq = len(prep_rows) + 1
|
| 42 |
+
return f"PREP-{today}-{seq:04d}"
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# ---------------------------------------------------------------------------
|
| 46 |
+
# Public API
|
| 47 |
+
# ---------------------------------------------------------------------------
|
| 48 |
+
|
| 49 |
+
def process_preparation(data: dict) -> dict:
|
| 50 |
+
"""
|
| 51 |
+
Process a preparation / packing task and log it to the Inventory sheet.
|
| 52 |
+
|
| 53 |
+
The movement is logged with direction="reserved" so it is visible in
|
| 54 |
+
the inventory log but does not reduce the available stock count until
|
| 55 |
+
the items actually ship.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
data: Extracted fields dict from the Extraction Agent.
|
| 59 |
+
Expected keys: item, quantity
|
| 60 |
+
|
| 61 |
+
Returns:
|
| 62 |
+
{
|
| 63 |
+
"event": "preparation_queued",
|
| 64 |
+
"preparation": {
|
| 65 |
+
"prep_id": str,
|
| 66 |
+
"timestamp": ISO-8601 str,
|
| 67 |
+
"item": str | None,
|
| 68 |
+
"quantity": int | None,
|
| 69 |
+
"status": "queued"
|
| 70 |
+
}
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
Example:
|
| 74 |
+
>>> process_preparation({"item": "kurti", "quantity": 3})
|
| 75 |
+
{
|
| 76 |
+
"event": "preparation_queued",
|
| 77 |
+
"preparation": {
|
| 78 |
+
"prep_id": "PREP-20240115-0001",
|
| 79 |
+
"timestamp": "...",
|
| 80 |
+
"item": "kurti",
|
| 81 |
+
"quantity": 3,
|
| 82 |
+
"status": "queued"
|
| 83 |
+
}
|
| 84 |
+
}
|
| 85 |
+
"""
|
| 86 |
+
logger.info("PreparationSkill processing: %s", data)
|
| 87 |
+
|
| 88 |
+
item = data.get("item")
|
| 89 |
+
quantity = data.get("quantity")
|
| 90 |
+
prep_id = _generate_prep_id()
|
| 91 |
+
|
| 92 |
+
# Log to Inventory sheet as a "reserved" movement
|
| 93 |
+
if item:
|
| 94 |
+
inventory_record = {
|
| 95 |
+
"timestamp": _now_iso(),
|
| 96 |
+
"item": item,
|
| 97 |
+
"change": quantity or 0,
|
| 98 |
+
"direction": "reserved",
|
| 99 |
+
"reference_id": prep_id,
|
| 100 |
+
"note": "preparation task queued",
|
| 101 |
+
}
|
| 102 |
+
append_row("Inventory", inventory_record)
|
| 103 |
+
|
| 104 |
+
prep = {
|
| 105 |
+
"prep_id": prep_id,
|
| 106 |
+
"timestamp": _now_iso(),
|
| 107 |
+
"item": item,
|
| 108 |
+
"quantity": quantity,
|
| 109 |
+
"status": "queued",
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
logger.info("Preparation task logged: %s", prep_id)
|
| 113 |
+
|
| 114 |
+
return {
|
| 115 |
+
"event": "preparation_queued",
|
| 116 |
+
"preparation": prep,
|
| 117 |
+
}
|