Spaces:
Sleeping
Sleeping
'code'
Browse files- .gitattributes +0 -35
- .gitignore +11 -0
- Dockerfile +18 -0
- agent_architect.py +639 -0
- confg.py +23 -0
- extractor_agent_runner.py +397 -0
- firebase/serviceAccountKey.json +13 -0
- main.py +378 -0
- requirements.txt +9 -0
- schemas.py +13 -0
- test/test_connection.py +70 -0
- test/test_validation.py +18 -0
- validation_agent.py +190 -0
- web_search_agent.py +20 -0
- web_search_tool.py +10 -0
.gitattributes
DELETED
|
@@ -1,35 +0,0 @@
|
|
| 1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python-generated files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[oc]
|
| 4 |
+
build/
|
| 5 |
+
dist/
|
| 6 |
+
wheels/
|
| 7 |
+
*.egg-info
|
| 8 |
+
|
| 9 |
+
# Virtual environments
|
| 10 |
+
.venv
|
| 11 |
+
.env
|
Dockerfile
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Base image
|
| 2 |
+
FROM python:3.11-slim
|
| 3 |
+
|
| 4 |
+
# Set work directory
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Install dependencies
|
| 8 |
+
COPY requirements.txt .
|
| 9 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 10 |
+
|
| 11 |
+
# Copy project files
|
| 12 |
+
COPY . .
|
| 13 |
+
|
| 14 |
+
# Expose the port Hugging Face expects
|
| 15 |
+
EXPOSE 7860
|
| 16 |
+
|
| 17 |
+
# Command to run FastAPI with uvicorn
|
| 18 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
agent_architect.py
ADDED
|
@@ -0,0 +1,639 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# agent_architect.py
|
| 2 |
+
"""
|
| 3 |
+
Smart Business-Specific Agent Builder
|
| 4 |
+
Automatically detects business domain and creates relevant tools
|
| 5 |
+
"""
|
| 6 |
+
import asyncio
|
| 7 |
+
import json
|
| 8 |
+
from typing import Dict, Any, List, Optional, Tuple
|
| 9 |
+
from datetime import datetime
|
| 10 |
+
from pydantic import BaseModel, Field
|
| 11 |
+
from firebase_admin import db
|
| 12 |
+
from agents import Agent, AsyncOpenAI as AgentsAsyncOpenAI, OpenAIChatCompletionsModel, function_tool
|
| 13 |
+
from web_search_tool import web_search
|
| 14 |
+
|
| 15 |
+
# ------------------------
|
| 16 |
+
# Models
|
| 17 |
+
# ------------------------
|
| 18 |
+
class ToolDefinition(BaseModel):
|
| 19 |
+
name: str
|
| 20 |
+
description: str
|
| 21 |
+
function_code: str
|
| 22 |
+
parameters: dict = Field(default_factory=dict)
|
| 23 |
+
class Config:
|
| 24 |
+
extra = "forbid"
|
| 25 |
+
|
| 26 |
+
class AgentConfiguration(BaseModel):
|
| 27 |
+
agent_id: str
|
| 28 |
+
name: str
|
| 29 |
+
instructions: str
|
| 30 |
+
model: str
|
| 31 |
+
tools: List[ToolDefinition]
|
| 32 |
+
tone: str
|
| 33 |
+
business_context: Dict[str, Any]
|
| 34 |
+
deployment_ready: bool = True
|
| 35 |
+
class Config:
|
| 36 |
+
extra = "forbid"
|
| 37 |
+
|
| 38 |
+
class AgentBuildResult(BaseModel):
|
| 39 |
+
status: str
|
| 40 |
+
agent_config: Optional[AgentConfiguration] = None
|
| 41 |
+
agent_instance: Optional[Any] = None
|
| 42 |
+
agent_test_response: Optional[str] = None
|
| 43 |
+
deployment_code: Optional[str] = None
|
| 44 |
+
error: Optional[str] = None
|
| 45 |
+
metadata: Dict[str, Any] = Field(default_factory=dict)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# ------------------------
|
| 49 |
+
# Smart Business Domain Detector
|
| 50 |
+
# ------------------------
|
| 51 |
+
class BusinessDomainDetector:
|
| 52 |
+
"""Detects business domain from extraction data"""
|
| 53 |
+
|
| 54 |
+
DOMAIN_KEYWORDS = {
|
| 55 |
+
"pharmacy": ["pharmacy", "pharma", "drug", "medicine", "prescription", "medication", "patient", "medical", "healthcare"],
|
| 56 |
+
"ecommerce": ["ecommerce", "shop", "retail", "store", "product", "cart", "checkout", "order", "delivery"],
|
| 57 |
+
"restaurant": ["restaurant", "food", "menu", "order", "reservation", "dining", "kitchen", "waiter", "chef"],
|
| 58 |
+
"education": ["education", "school", "student", "teacher", "course", "class", "learn", "exam", "tutor"],
|
| 59 |
+
"real_estate": ["real estate", "property", "house", "apartment", "rent", "buy", "mortgage", "realtor"],
|
| 60 |
+
"agriculture": ["agriculture", "farm", "crop", "harvest", "soil", "fertilizer", "irrigation", "farming"],
|
| 61 |
+
"finance": ["finance", "bank", "loan", "investment", "account", "transaction", "payment", "credit"],
|
| 62 |
+
"logistics": ["logistics", "delivery", "shipping", "warehouse", "transport", "tracking", "freight"],
|
| 63 |
+
"hotel": ["hotel", "booking", "room", "reservation", "guest", "check-in", "hospitality"],
|
| 64 |
+
"fitness": ["fitness", "gym", "workout", "exercise", "trainer", "health", "nutrition", "sports"],
|
| 65 |
+
"saas": ["saas", "software", "subscription", "cloud", "platform", "app", "application", "users", "billing"],
|
| 66 |
+
"school": ["school", "student", "teacher", "classroom", "curriculum", "lesson", "grade", "homework", "attendance"],
|
| 67 |
+
"climate": ["climate", "carbon", "emissions", "sustainability", "environmental", "greenhouse", "renewable", "footprint", "advisor"],
|
| 68 |
+
"agritech": ["agritech", "precision", "farming", "drone", "sensor", "iot", "yield", "monitoring", "smart"],
|
| 69 |
+
"weather": ["weather", "forecast", "temperature", "precipitation", "humidity", "wind", "meteorology", "prediction"]
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
@classmethod
|
| 73 |
+
def detect(cls, business_name: str, industry: str, features: List[str], query: str) -> str:
|
| 74 |
+
"""Detect business domain from multiple sources"""
|
| 75 |
+
combined_text = f"{business_name} {industry} {query} {' '.join(features)}".lower()
|
| 76 |
+
|
| 77 |
+
scores = {}
|
| 78 |
+
for domain, keywords in cls.DOMAIN_KEYWORDS.items():
|
| 79 |
+
score = sum(1 for keyword in keywords if keyword in combined_text)
|
| 80 |
+
if score > 0:
|
| 81 |
+
scores[domain] = score
|
| 82 |
+
|
| 83 |
+
if scores:
|
| 84 |
+
return max(scores, key=scores.get)
|
| 85 |
+
return "generic"
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
# ------------------------
|
| 89 |
+
# Dynamic Tool Factory
|
| 90 |
+
# ------------------------
|
| 91 |
+
class DynamicToolFactory:
|
| 92 |
+
"""Creates business-specific tools based on domain"""
|
| 93 |
+
|
| 94 |
+
@staticmethod
|
| 95 |
+
def create_pharmacy_tools() -> Tuple[List[callable], List[ToolDefinition]]:
|
| 96 |
+
"""Pharmacy management tools"""
|
| 97 |
+
tools, defs = [], []
|
| 98 |
+
|
| 99 |
+
@function_tool
|
| 100 |
+
async def manage_prescription(action: str, prescription_id: str = None, patient_id: str = None, medication: str = None) -> dict:
|
| 101 |
+
"""Manage prescriptions - check, refill, or create"""
|
| 102 |
+
return {"prescription_id": prescription_id or f"RX-{datetime.now().strftime('%Y%m%d%H%M')}",
|
| 103 |
+
"action": action, "status": "Processed", "refills": 3}
|
| 104 |
+
|
| 105 |
+
@function_tool
|
| 106 |
+
async def check_drug_inventory(medication_name: str) -> dict:
|
| 107 |
+
"""Check medication stock and expiry"""
|
| 108 |
+
return {"medication": medication_name, "in_stock": True, "quantity": 250, "expiry": "2026-06-15"}
|
| 109 |
+
|
| 110 |
+
@function_tool
|
| 111 |
+
async def get_patient_info(patient_id: str) -> dict:
|
| 112 |
+
"""Retrieve patient records and allergies"""
|
| 113 |
+
return {"patient_id": patient_id, "allergies": ["Penicillin"], "medications": ["Metformin"]}
|
| 114 |
+
|
| 115 |
+
tools = [manage_prescription, check_drug_inventory, get_patient_info, web_search]
|
| 116 |
+
defs = [
|
| 117 |
+
ToolDefinition(name="manage_prescription", description="Manage prescriptions",
|
| 118 |
+
function_code="@function_tool\nasync def manage_prescription(...): ...",
|
| 119 |
+
parameters={"type": "object", "properties": {"action": {"type": "string"}, "prescription_id": {"type": "string"}}}),
|
| 120 |
+
ToolDefinition(name="check_drug_inventory", description="Check medication stock",
|
| 121 |
+
function_code="@function_tool\nasync def check_drug_inventory(...): ...",
|
| 122 |
+
parameters={"type": "object", "properties": {"medication_name": {"type": "string"}}, "required": ["medication_name"]}),
|
| 123 |
+
ToolDefinition(name="get_patient_info", description="Get patient info",
|
| 124 |
+
function_code="@function_tool\nasync def get_patient_info(...): ...",
|
| 125 |
+
parameters={"type": "object", "properties": {"patient_id": {"type": "string"}}, "required": ["patient_id"]}),
|
| 126 |
+
ToolDefinition(name="web_search", description="Perform a web search for current information",
|
| 127 |
+
function_code="@function_tool\ndef web_search(query: str): ...",
|
| 128 |
+
parameters={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]})
|
| 129 |
+
]
|
| 130 |
+
return tools, defs
|
| 131 |
+
|
| 132 |
+
@staticmethod
|
| 133 |
+
def create_ecommerce_tools() -> Tuple[List[callable], List[ToolDefinition]]:
|
| 134 |
+
"""E-commerce tools"""
|
| 135 |
+
tools, defs = [], []
|
| 136 |
+
|
| 137 |
+
@function_tool
|
| 138 |
+
async def search_products(query: str, category: str = None) -> dict:
|
| 139 |
+
"""Search product catalog"""
|
| 140 |
+
return {"query": query, "results": [{"id": "P001", "name": query, "price": 49.99, "stock": 50}]}
|
| 141 |
+
|
| 142 |
+
@function_tool
|
| 143 |
+
async def track_order(order_id: str) -> dict:
|
| 144 |
+
"""Track order status and delivery"""
|
| 145 |
+
return {"order_id": order_id, "status": "In Transit", "eta": "2025-11-20", "location": "Distribution Center"}
|
| 146 |
+
|
| 147 |
+
@function_tool
|
| 148 |
+
async def manage_cart(action: str, product_id: str = None, quantity: int = 1) -> dict:
|
| 149 |
+
"""Add, remove, or view cart items"""
|
| 150 |
+
return {"action": action, "product_id": product_id, "cart_total": 149.99, "items": 3}
|
| 151 |
+
|
| 152 |
+
tools = [search_products, track_order, manage_cart, web_search]
|
| 153 |
+
defs = [
|
| 154 |
+
ToolDefinition(name="search_products", description="Search products", function_code="...",
|
| 155 |
+
parameters={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]}),
|
| 156 |
+
ToolDefinition(name="track_order", description="Track order", function_code="...",
|
| 157 |
+
parameters={"type": "object", "properties": {"order_id": {"type": "string"}}, "required": ["order_id"]}),
|
| 158 |
+
ToolDefinition(name="manage_cart", description="Manage shopping cart", function_code="...",
|
| 159 |
+
parameters={"type": "object", "properties": {"action": {"type": "string", "enum": ["add", "remove", "view"]}}}),
|
| 160 |
+
ToolDefinition(name="web_search", description="Perform a web search for current information",
|
| 161 |
+
function_code="@function_tool\ndef web_search(query: str): ...",
|
| 162 |
+
parameters={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]})
|
| 163 |
+
]
|
| 164 |
+
return tools, defs
|
| 165 |
+
|
| 166 |
+
@staticmethod
|
| 167 |
+
def create_restaurant_tools() -> Tuple[List[callable], List[ToolDefinition]]:
|
| 168 |
+
"""Restaurant management tools"""
|
| 169 |
+
tools, defs = [], []
|
| 170 |
+
|
| 171 |
+
@function_tool
|
| 172 |
+
async def manage_reservation(action: str, customer_name: str = None, date: str = None, party_size: int = 2) -> dict:
|
| 173 |
+
"""Create, modify, or cancel restaurant reservations"""
|
| 174 |
+
return {"reservation_id": f"RES-{datetime.now().strftime('%Y%m%d%H%M')}",
|
| 175 |
+
"customer": customer_name, "date": date, "party_size": party_size, "status": "Confirmed"}
|
| 176 |
+
|
| 177 |
+
@function_tool
|
| 178 |
+
async def view_menu(category: str = None) -> dict:
|
| 179 |
+
"""View restaurant menu by category"""
|
| 180 |
+
return {"category": category or "All", "items": [
|
| 181 |
+
{"name": "Burger", "price": 12.99, "available": True},
|
| 182 |
+
{"name": "Pizza", "price": 15.99, "available": True}
|
| 183 |
+
]}
|
| 184 |
+
|
| 185 |
+
@function_tool
|
| 186 |
+
async def track_order_status(order_id: str) -> dict:
|
| 187 |
+
"""Check kitchen order status and estimated time"""
|
| 188 |
+
return {"order_id": order_id, "status": "Preparing", "estimated_time": "15 minutes", "items_ready": 2, "items_pending": 1}
|
| 189 |
+
|
| 190 |
+
tools = [manage_reservation, view_menu, track_order_status, web_search]
|
| 191 |
+
defs = [
|
| 192 |
+
ToolDefinition(name="manage_reservation", description="Manage reservations", function_code="...",
|
| 193 |
+
parameters={"type": "object", "properties": {"action": {"type": "string", "enum": ["create", "modify", "cancel"]}, "customer_name": {"type": "string"}}}),
|
| 194 |
+
ToolDefinition(name="view_menu", description="View menu", function_code="...",
|
| 195 |
+
parameters={"type": "object", "properties": {"category": {"type": "string"}}}),
|
| 196 |
+
ToolDefinition(name="track_order_status", description="Track order", function_code="...",
|
| 197 |
+
parameters={"type": "object", "properties": {"order_id": {"type": "string"}}, "required": ["order_id"]}),
|
| 198 |
+
ToolDefinition(name="web_search", description="Perform a web search for current information",
|
| 199 |
+
function_code="@function_tool\ndef web_search(query: str): ...",
|
| 200 |
+
parameters={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]})
|
| 201 |
+
]
|
| 202 |
+
return tools, defs
|
| 203 |
+
|
| 204 |
+
@staticmethod
|
| 205 |
+
def create_education_tools() -> Tuple[List[callable], List[ToolDefinition]]:
|
| 206 |
+
"""Education platform tools"""
|
| 207 |
+
tools, defs = [], []
|
| 208 |
+
|
| 209 |
+
@function_tool
|
| 210 |
+
async def search_courses(subject: str, level: str = "beginner") -> dict:
|
| 211 |
+
"""Search available courses"""
|
| 212 |
+
return {"subject": subject, "level": level, "courses": [{"id": "C001", "title": f"{subject} Basics", "duration": "8 weeks", "price": 49.99}]}
|
| 213 |
+
|
| 214 |
+
@function_tool
|
| 215 |
+
async def enroll_student(student_id: str, course_id: str) -> dict:
|
| 216 |
+
"""Enroll student in course"""
|
| 217 |
+
return {"enrollment_id": f"ENR-{datetime.now().timestamp()}", "student_id": student_id, "course_id": course_id, "status": "Enrolled", "start_date": "2025-12-01"}
|
| 218 |
+
|
| 219 |
+
@function_tool
|
| 220 |
+
async def check_progress(student_id: str, course_id: str) -> dict:
|
| 221 |
+
"""Check student progress in course"""
|
| 222 |
+
return {"student_id": student_id, "course_id": course_id, "progress": "65%", "completed_lessons": 13, "total_lessons": 20, "grade": "B+"}
|
| 223 |
+
|
| 224 |
+
tools = [search_courses, enroll_student, check_progress, web_search]
|
| 225 |
+
defs = [
|
| 226 |
+
ToolDefinition(name="search_courses", description="Search courses", function_code="...",
|
| 227 |
+
parameters={"type": "object", "properties": {"subject": {"type": "string"}, "level": {"type": "string"}}, "required": ["subject"]}),
|
| 228 |
+
ToolDefinition(name="enroll_student", description="Enroll in course", function_code="...",
|
| 229 |
+
parameters={"type": "object", "properties": {"student_id": {"type": "string"}, "course_id": {"type": "string"}}, "required": ["student_id", "course_id"]}),
|
| 230 |
+
ToolDefinition(name="check_progress", description="Check progress", function_code="...",
|
| 231 |
+
parameters={"type": "object", "properties": {"student_id": {"type": "string"}, "course_id": {"type": "string"}}, "required": ["student_id", "course_id"]}),
|
| 232 |
+
ToolDefinition(name="web_search", description="Perform a web search for current information",
|
| 233 |
+
function_code="@function_tool\ndef web_search(query: str): ...",
|
| 234 |
+
parameters={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]})
|
| 235 |
+
]
|
| 236 |
+
return tools, defs
|
| 237 |
+
|
| 238 |
+
@staticmethod
|
| 239 |
+
def create_saas_tools() -> Tuple[List[callable], List[ToolDefinition]]:
|
| 240 |
+
"""SaaS platform tools"""
|
| 241 |
+
tools, defs = [], []
|
| 242 |
+
|
| 243 |
+
@function_tool
|
| 244 |
+
async def manage_subscription(action: str, user_id: str, plan: str = None) -> dict:
|
| 245 |
+
"""Manage user subscriptions"""
|
| 246 |
+
return {"user_id": user_id, "action": action, "plan": plan or "pro", "status": "active", "next_billing": "2025-12-01"}
|
| 247 |
+
|
| 248 |
+
@function_tool
|
| 249 |
+
async def track_usage_metrics(user_id: str) -> dict:
|
| 250 |
+
"""Track user engagement metrics"""
|
| 251 |
+
return {"user_id": user_id, "daily_active": 42, "monthly_active": 1250, "feature_usage": {"dashboard": 95, "reports": 76, "api": 34}}
|
| 252 |
+
|
| 253 |
+
@function_tool
|
| 254 |
+
async def handle_billing_inquiry(user_id: str, inquiry_type: str) -> dict:
|
| 255 |
+
"""Handle billing and payment inquiries"""
|
| 256 |
+
return {"user_id": user_id, "inquiry_type": inquiry_type, "balance": 29.99, "due_date": "2025-12-01", "payment_methods": ["card", "paypal"]}
|
| 257 |
+
|
| 258 |
+
tools = [manage_subscription, track_usage_metrics, handle_billing_inquiry, web_search]
|
| 259 |
+
defs = [
|
| 260 |
+
ToolDefinition(name="manage_subscription", description="Manage user subscriptions", function_code="...",
|
| 261 |
+
parameters={"type": "object", "properties": {"action": {"type": "string", "enum": ["upgrade", "downgrade", "cancel"]}, "user_id": {"type": "string"}, "plan": {"type": "string"}}, "required": ["action", "user_id"]}),
|
| 262 |
+
ToolDefinition(name="track_usage_metrics", description="Track user engagement metrics", function_code="...",
|
| 263 |
+
parameters={"type": "object", "properties": {"user_id": {"type": "string"}}, "required": ["user_id"]}),
|
| 264 |
+
ToolDefinition(name="handle_billing_inquiry", description="Handle billing inquiries", function_code="...",
|
| 265 |
+
parameters={"type": "object", "properties": {"user_id": {"type": "string"}, "inquiry_type": {"type": "string", "enum": ["balance", "payment", "refund"]}}, "required": ["user_id", "inquiry_type"]}),
|
| 266 |
+
ToolDefinition(name="web_search", description="Perform a web search for current information",
|
| 267 |
+
function_code="@function_tool\ndef web_search(query: str): ...",
|
| 268 |
+
parameters={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]})
|
| 269 |
+
]
|
| 270 |
+
return tools, defs
|
| 271 |
+
|
| 272 |
+
@staticmethod
|
| 273 |
+
def create_school_tools() -> Tuple[List[callable], List[ToolDefinition]]:
|
| 274 |
+
"""School management tools"""
|
| 275 |
+
tools, defs = [], []
|
| 276 |
+
|
| 277 |
+
@function_tool
|
| 278 |
+
async def manage_student_record(action: str, student_id: str, data: dict = None) -> dict:
|
| 279 |
+
"""Manage student records"""
|
| 280 |
+
return {"student_id": student_id, "action": action, "status": "updated", "updated_fields": list(data.keys()) if data else []}
|
| 281 |
+
|
| 282 |
+
@function_tool
|
| 283 |
+
async def schedule_class(teacher_id: str, subject: str, date: str, duration: int) -> dict:
|
| 284 |
+
"""Schedule classes and manage timetable"""
|
| 285 |
+
return {"teacher_id": teacher_id, "subject": subject, "date": date, "duration": duration, "room": "A101", "status": "scheduled"}
|
| 286 |
+
|
| 287 |
+
@function_tool
|
| 288 |
+
async def track_attendance(class_id: str, date: str) -> dict:
|
| 289 |
+
"""Track student attendance"""
|
| 290 |
+
return {"class_id": class_id, "date": date, "present": 25, "absent": 3, "late": 2}
|
| 291 |
+
|
| 292 |
+
tools = [manage_student_record, schedule_class, track_attendance, web_search]
|
| 293 |
+
defs = [
|
| 294 |
+
ToolDefinition(name="manage_student_record", description="Manage student records", function_code="...",
|
| 295 |
+
parameters={"type": "object", "properties": {"action": {"type": "string", "enum": ["create", "update", "delete"]}, "student_id": {"type": "string"}, "data": {"type": "object"}}, "required": ["action", "student_id"]}),
|
| 296 |
+
ToolDefinition(name="schedule_class", description="Schedule classes", function_code="...",
|
| 297 |
+
parameters={"type": "object", "properties": {"teacher_id": {"type": "string"}, "subject": {"type": "string"}, "date": {"type": "string", "format": "date"}, "duration": {"type": "integer"}}, "required": ["teacher_id", "subject", "date", "duration"]}),
|
| 298 |
+
ToolDefinition(name="track_attendance", description="Track student attendance", function_code="...",
|
| 299 |
+
parameters={"type": "object", "properties": {"class_id": {"type": "string"}, "date": {"type": "string", "format": "date"}}, "required": ["class_id", "date"]}),
|
| 300 |
+
ToolDefinition(name="web_search", description="Perform a web search for current information",
|
| 301 |
+
function_code="@function_tool\ndef web_search(query: str): ...",
|
| 302 |
+
parameters={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]})
|
| 303 |
+
]
|
| 304 |
+
return tools, defs
|
| 305 |
+
|
| 306 |
+
@staticmethod
|
| 307 |
+
def create_climate_advisor_tools() -> Tuple[List[callable], List[ToolDefinition]]:
|
| 308 |
+
"""Climate advisory tools"""
|
| 309 |
+
tools, defs = [], []
|
| 310 |
+
|
| 311 |
+
@function_tool
|
| 312 |
+
async def calculate_carbon_footprint(industry: str, energy_usage: float, transport_km: float) -> dict:
|
| 313 |
+
"""Calculate carbon footprint"""
|
| 314 |
+
return {"industry": industry, "carbon_kg": energy_usage * 0.5 + transport_km * 0.2, "rating": "B", "offset_suggestion": "Plant 5 trees"}
|
| 315 |
+
|
| 316 |
+
@function_tool
|
| 317 |
+
async def recommend_sustainability_measures(company_size: str, industry: str) -> dict:
|
| 318 |
+
"""Recommend sustainability measures"""
|
| 319 |
+
return {"company_size": company_size, "industry": industry, "recommendations": ["Switch to LED lighting", "Implement recycling program", "Use renewable energy"], "roi_months": 18}
|
| 320 |
+
|
| 321 |
+
@function_tool
|
| 322 |
+
async def track_env_progress(goal_id: str) -> dict:
|
| 323 |
+
"""Track environmental progress"""
|
| 324 |
+
return {"goal_id": goal_id, "current_value": 45, "target_value": 100, "progress_percent": 45, "milestones": ["Baseline established", "20% reduction achieved"]}
|
| 325 |
+
|
| 326 |
+
tools = [calculate_carbon_footprint, recommend_sustainability_measures, track_env_progress, web_search]
|
| 327 |
+
defs = [
|
| 328 |
+
ToolDefinition(name="calculate_carbon_footprint", description="Calculate carbon footprint", function_code="...",
|
| 329 |
+
parameters={"type": "object", "properties": {"industry": {"type": "string"}, "energy_usage": {"type": "number"}, "transport_km": {"type": "number"}}, "required": ["industry", "energy_usage", "transport_km"]}),
|
| 330 |
+
ToolDefinition(name="recommend_sustainability_measures", description="Recommend sustainability measures", function_code="...",
|
| 331 |
+
parameters={"type": "object", "properties": {"company_size": {"type": "string"}, "industry": {"type": "string"}}, "required": ["company_size", "industry"]}),
|
| 332 |
+
ToolDefinition(name="track_env_progress", description="Track environmental progress", function_code="...",
|
| 333 |
+
parameters={"type": "object", "properties": {"goal_id": {"type": "string"}}, "required": ["goal_id"]}),
|
| 334 |
+
ToolDefinition(name="web_search", description="Perform a web search for current information",
|
| 335 |
+
function_code="@function_tool\ndef web_search(query: str): ...",
|
| 336 |
+
parameters={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]})
|
| 337 |
+
]
|
| 338 |
+
return tools, defs
|
| 339 |
+
|
| 340 |
+
@staticmethod
|
| 341 |
+
def create_agritech_tools() -> Tuple[List[callable], List[ToolDefinition]]:
|
| 342 |
+
"""Agritech tools"""
|
| 343 |
+
tools, defs = [], []
|
| 344 |
+
|
| 345 |
+
@function_tool
|
| 346 |
+
async def analyze_crop_health(field_id: str, sensor_data: dict) -> dict:
|
| 347 |
+
"""Analyze crop health from sensor data"""
|
| 348 |
+
return {"field_id": field_id, "health_index": 85, "issues": ["minor pest infestation"], "recommendations": ["Apply organic pesticide"]}
|
| 349 |
+
|
| 350 |
+
@function_tool
|
| 351 |
+
async def predict_yield(crop_type: str, field_area: float, weather_forecast: dict) -> dict:
|
| 352 |
+
"""Predict crop yield"""
|
| 353 |
+
return {"crop_type": crop_type, "predicted_yield": field_area * 2.5, "confidence": 0.85, "optimal_harvest_date": "2026-06-15"}
|
| 354 |
+
|
| 355 |
+
@function_tool
|
| 356 |
+
async def irrigation_schedule(field_id: str, soil_moisture: float, forecast_rain: float) -> dict:
|
| 357 |
+
"""Optimize irrigation schedule"""
|
| 358 |
+
return {"field_id": field_id, "irrigate": soil_moisture < 30 and forecast_rain < 10, "amount_mm": 25, "timing": "early morning"}
|
| 359 |
+
|
| 360 |
+
tools = [analyze_crop_health, predict_yield, irrigation_schedule, web_search]
|
| 361 |
+
defs = [
|
| 362 |
+
ToolDefinition(name="analyze_crop_health", description="Analyze crop health", function_code="...",
|
| 363 |
+
parameters={"type": "object", "properties": {"field_id": {"type": "string"}, "sensor_data": {"type": "object"}}, "required": ["field_id", "sensor_data"]}),
|
| 364 |
+
ToolDefinition(name="predict_yield", description="Predict crop yield", function_code="...",
|
| 365 |
+
parameters={"type": "object", "properties": {"crop_type": {"type": "string"}, "field_area": {"type": "number"}, "weather_forecast": {"type": "object"}}, "required": ["crop_type", "field_area", "weather_forecast"]}),
|
| 366 |
+
ToolDefinition(name="irrigation_schedule", description="Optimize irrigation schedule", function_code="...",
|
| 367 |
+
parameters={"type": "object", "properties": {"field_id": {"type": "string"}, "soil_moisture": {"type": "number"}, "forecast_rain": {"type": "number"}}, "required": ["field_id", "soil_moisture", "forecast_rain"]}),
|
| 368 |
+
ToolDefinition(name="web_search", description="Perform a web search for current information",
|
| 369 |
+
function_code="@function_tool\ndef web_search(query: str): ...",
|
| 370 |
+
parameters={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]})
|
| 371 |
+
]
|
| 372 |
+
return tools, defs
|
| 373 |
+
|
| 374 |
+
@staticmethod
|
| 375 |
+
def create_weather_tools() -> Tuple[List[callable], List[ToolDefinition]]:
|
| 376 |
+
"""Weather forecasting tools"""
|
| 377 |
+
tools, defs = [], []
|
| 378 |
+
|
| 379 |
+
@function_tool
|
| 380 |
+
async def get_forecast(location: str, days: int = 7) -> dict:
|
| 381 |
+
"""Get weather forecast"""
|
| 382 |
+
return {"location": location, "days": days, "forecast": [{"date": "2025-12-12", "high": 22, "low": 15, "condition": "partly cloudy"}]}
|
| 383 |
+
|
| 384 |
+
@function_tool
|
| 385 |
+
async def severe_weather_alert(location: str) -> dict:
|
| 386 |
+
"""Check for severe weather alerts"""
|
| 387 |
+
return {"location": location, "alerts": [], "severity": "none", "preparedness_tips": ["Normal precautions"]}
|
| 388 |
+
|
| 389 |
+
@function_tool
|
| 390 |
+
async def historical_weather_comparison(location: str, date: str) -> dict:
|
| 391 |
+
"""Compare current weather to historical data"""
|
| 392 |
+
return {"location": location, "date": date, "current_temp": 20, "historical_avg": 18, "difference": 2, "percentile": 65}
|
| 393 |
+
|
| 394 |
+
tools = [get_forecast, severe_weather_alert, historical_weather_comparison, web_search]
|
| 395 |
+
defs = [
|
| 396 |
+
ToolDefinition(name="get_forecast", description="Get weather forecast", function_code="...",
|
| 397 |
+
parameters={"type": "object", "properties": {"location": {"type": "string"}, "days": {"type": "integer", "default": 7}}, "required": ["location"]}),
|
| 398 |
+
ToolDefinition(name="severe_weather_alert", description="Check for severe weather alerts", function_code="...",
|
| 399 |
+
parameters={"type": "object", "properties": {"location": {"type": "string"}}, "required": ["location"]}),
|
| 400 |
+
ToolDefinition(name="historical_weather_comparison", description="Compare weather to historical data", function_code="...",
|
| 401 |
+
parameters={"type": "object", "properties": {"location": {"type": "string"}, "date": {"type": "string", "format": "date"}}, "required": ["location", "date"]}),
|
| 402 |
+
ToolDefinition(name="web_search", description="Perform a web search for current information",
|
| 403 |
+
function_code="@function_tool\ndef web_search(query: str): ...",
|
| 404 |
+
parameters={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]})
|
| 405 |
+
]
|
| 406 |
+
return tools, defs
|
| 407 |
+
|
| 408 |
+
@staticmethod
|
| 409 |
+
def create_generic_business_tools(business_name: str) -> Tuple[List[callable], List[ToolDefinition]]:
|
| 410 |
+
"""Generic business tools"""
|
| 411 |
+
tools, defs = [], []
|
| 412 |
+
|
| 413 |
+
@function_tool
|
| 414 |
+
async def generate_analytics(metric: str, time_range: str) -> dict:
|
| 415 |
+
"""Generate business analytics"""
|
| 416 |
+
return {"metric": metric, "time_range": time_range, "value": 12500, "trend": "+15%", "insights": f"{metric} growing"}
|
| 417 |
+
|
| 418 |
+
@function_tool
|
| 419 |
+
async def send_notification(recipient: str, message: str, channel: str = "email") -> dict:
|
| 420 |
+
"""Send notifications"""
|
| 421 |
+
return {"recipient": recipient, "message": message, "channel": channel, "status": "Sent"}
|
| 422 |
+
|
| 423 |
+
tools = [generate_analytics, send_notification, web_search]
|
| 424 |
+
defs = [
|
| 425 |
+
ToolDefinition(name="generate_analytics", description=f"Generate {business_name} analytics", function_code="...",
|
| 426 |
+
parameters={"type": "object", "properties": {"metric": {"type": "string"}, "time_range": {"type": "string"}}, "required": ["metric", "time_range"]}),
|
| 427 |
+
ToolDefinition(name="send_notification", description="Send notifications", function_code="...",
|
| 428 |
+
parameters={"type": "object", "properties": {"recipient": {"type": "string"}, "message": {"type": "string"}}, "required": ["recipient", "message"]}),
|
| 429 |
+
ToolDefinition(name="web_search", description="Perform a web search for current information",
|
| 430 |
+
function_code="@function_tool\ndef web_search(query: str): ...",
|
| 431 |
+
parameters={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]})
|
| 432 |
+
]
|
| 433 |
+
return tools, defs
|
| 434 |
+
|
| 435 |
+
@classmethod
|
| 436 |
+
def create_tools_for_domain(cls, domain: str, business_name: str) -> Tuple[List[callable], List[ToolDefinition]]:
|
| 437 |
+
"""Factory method to create tools based on domain"""
|
| 438 |
+
factory_map = {
|
| 439 |
+
"pharmacy": cls.create_pharmacy_tools,
|
| 440 |
+
"ecommerce": cls.create_ecommerce_tools,
|
| 441 |
+
"restaurant": cls.create_restaurant_tools,
|
| 442 |
+
"education": cls.create_education_tools,
|
| 443 |
+
"saas": cls.create_saas_tools,
|
| 444 |
+
"school": cls.create_school_tools,
|
| 445 |
+
"climate": cls.create_climate_advisor_tools,
|
| 446 |
+
"agritech": cls.create_agritech_tools,
|
| 447 |
+
"weather": cls.create_weather_tools,
|
| 448 |
+
"generic": lambda: cls.create_generic_business_tools(business_name)
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
factory = factory_map.get(domain, lambda: cls.create_generic_business_tools(business_name))
|
| 452 |
+
return factory()
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
# ------------------------
|
| 456 |
+
# Live Agent Architect
|
| 457 |
+
# ------------------------
|
| 458 |
+
class LiveAgentArchitect:
|
| 459 |
+
"""Smart agent builder with domain detection"""
|
| 460 |
+
|
| 461 |
+
def __init__(self, session_id: str):
|
| 462 |
+
self.session_id = session_id
|
| 463 |
+
self.detector = BusinessDomainDetector()
|
| 464 |
+
self.tool_factory = DynamicToolFactory()
|
| 465 |
+
|
| 466 |
+
async def _emit_log(self, message: str, type: str = "log"):
|
| 467 |
+
try:
|
| 468 |
+
ref = db.reference(f'sessions/{self.session_id}/logs')
|
| 469 |
+
ref.push({'message': message, 'type': type, 'timestamp': datetime.now().isoformat()})
|
| 470 |
+
except Exception as e:
|
| 471 |
+
print(f"Log error: {e}")
|
| 472 |
+
|
| 473 |
+
def _generate_comprehensive_instructions(self, extraction: Dict[str, Any], domain: str) -> str:
|
| 474 |
+
"""Generate domain-specific comprehensive instructions"""
|
| 475 |
+
business = extraction.get('business', {})
|
| 476 |
+
agent_spec = extraction.get('agent', {})
|
| 477 |
+
|
| 478 |
+
business_name = business.get('business_name', 'Business')
|
| 479 |
+
industry = business.get('industry', 'Industry')
|
| 480 |
+
capabilities = agent_spec.get('capabilities', [])
|
| 481 |
+
agent_name = agent_spec.get('agent_name', 'AI Assistant')
|
| 482 |
+
core_problem = business.get('core_problem', '')
|
| 483 |
+
solution = business.get('solution', '')
|
| 484 |
+
|
| 485 |
+
instructions = f"""You are the MASTER {industry} expert for {business_name} with comprehensive knowledge across ALL operational domains.
|
| 486 |
+
|
| 487 |
+
**Your Identity:**
|
| 488 |
+
Name: {agent_name}
|
| 489 |
+
Domain: {domain.title()}
|
| 490 |
+
Specialization: {industry}
|
| 491 |
+
|
| 492 |
+
**Your Expertise Covers:**
|
| 493 |
+
"""
|
| 494 |
+
for cap in capabilities:
|
| 495 |
+
instructions += f"- {cap}\n"
|
| 496 |
+
|
| 497 |
+
instructions += f"""
|
| 498 |
+
**The Problem You Solve:**
|
| 499 |
+
{core_problem}
|
| 500 |
+
|
| 501 |
+
**Your Solution Approach:**
|
| 502 |
+
{solution}
|
| 503 |
+
|
| 504 |
+
**Response Structure (MANDATORY):**
|
| 505 |
+
Always format your answers in this order:
|
| 506 |
+
1. 🎯 **Direct Answer** – Clear, one-sentence response
|
| 507 |
+
2. 📚 **Detailed Explanation** – Simple explanation with reasoning
|
| 508 |
+
3. 📏 **Practical Steps** – Specific actions, numbers, timings
|
| 509 |
+
4. 🌍 **Context & Tips** – Best practices, compliance, regional advice
|
| 510 |
+
5. 🔒 **Prevention & Next Steps** – Warnings, monitoring, escalation
|
| 511 |
+
|
| 512 |
+
**Response Rules:**
|
| 513 |
+
1. **Always Use Tools First**: Fetch real-time data before answering
|
| 514 |
+
2. **Be Specific**: Provide exact numbers, dates, quantities
|
| 515 |
+
3. **Simple Language**: Explain complex topics simply
|
| 516 |
+
4. **Actionable**: Every response must have clear next steps
|
| 517 |
+
5. **Safety**: Include warnings and when to seek help
|
| 518 |
+
|
| 519 |
+
**Tool Usage:**
|
| 520 |
+
- Use tools proactively for accurate data
|
| 521 |
+
- Combine multiple tools for comprehensive answers
|
| 522 |
+
- Validate responses before presenting
|
| 523 |
+
- Handle tool failures gracefully
|
| 524 |
+
|
| 525 |
+
**Communication:**
|
| 526 |
+
- Warm and supportive
|
| 527 |
+
- Acknowledge concerns
|
| 528 |
+
- Provide immediate + long-term solutions
|
| 529 |
+
- Use examples and analogies
|
| 530 |
+
|
| 531 |
+
**Always be the expert every user needs!**
|
| 532 |
+
"""
|
| 533 |
+
return instructions
|
| 534 |
+
|
| 535 |
+
async def build_agent(self, extraction: Dict[str, Any], model: str, api_key: str) -> AgentBuildResult:
|
| 536 |
+
"""Build domain-specific agent"""
|
| 537 |
+
try:
|
| 538 |
+
await self._emit_log("🏗️ Building smart business agent...", "system")
|
| 539 |
+
|
| 540 |
+
business = extraction.get('business', {})
|
| 541 |
+
agent_spec = extraction.get('agent', {})
|
| 542 |
+
|
| 543 |
+
business_name = business.get('business_name', 'Business')
|
| 544 |
+
industry = business.get('industry', '')
|
| 545 |
+
features = business.get('key_features', [])
|
| 546 |
+
query = business.get('query', '')
|
| 547 |
+
|
| 548 |
+
# Detect domain
|
| 549 |
+
domain = self.detector.detect(business_name, industry, features, query)
|
| 550 |
+
await self._emit_log(f"📊 Detected domain: {domain.upper()}", "success")
|
| 551 |
+
|
| 552 |
+
# Create domain-specific tools
|
| 553 |
+
tools, tool_defs = self.tool_factory.create_tools_for_domain(domain, business_name)
|
| 554 |
+
await self._emit_log(f"✅ Created {len(tools)} {domain}-specific tools", "success")
|
| 555 |
+
|
| 556 |
+
# Generate instructions
|
| 557 |
+
instructions = self._generate_comprehensive_instructions(extraction, domain)
|
| 558 |
+
await self._emit_log("✅ Generated instructions", "success")
|
| 559 |
+
|
| 560 |
+
# Setup model client
|
| 561 |
+
if "gemini" in model.lower():
|
| 562 |
+
client = AgentsAsyncOpenAI(api_key=api_key,
|
| 563 |
+
base_url="https://generativelanguage.googleapis.com/v1beta/openai/")
|
| 564 |
+
model_name = "gemini-2.0-flash-exp"
|
| 565 |
+
elif "grok" in model.lower():
|
| 566 |
+
client = AgentsAsyncOpenAI(api_key=api_key, base_url="https://api.x.ai/v1")
|
| 567 |
+
model_name = "grok-beta"
|
| 568 |
+
else:
|
| 569 |
+
client = AgentsAsyncOpenAI(api_key=api_key)
|
| 570 |
+
model_name = "gpt-4o"
|
| 571 |
+
|
| 572 |
+
MODEL = OpenAIChatCompletionsModel(model=model_name, openai_client=client)
|
| 573 |
+
|
| 574 |
+
# Create agent
|
| 575 |
+
agent_name = agent_spec.get('agent_name', f"{business_name} AI")
|
| 576 |
+
agent_instance = Agent(name=agent_name, instructions=instructions, model=MODEL, tools=tools)
|
| 577 |
+
|
| 578 |
+
await self._emit_log("✅ Agent created!", "success")
|
| 579 |
+
|
| 580 |
+
agent_id = f"agent_{domain}_{business_name.lower().replace(' ', '_')}_{int(datetime.now().timestamp())}"
|
| 581 |
+
|
| 582 |
+
agent_config = AgentConfiguration(
|
| 583 |
+
agent_id=agent_id,
|
| 584 |
+
name=agent_name,
|
| 585 |
+
instructions=instructions,
|
| 586 |
+
model=model,
|
| 587 |
+
tools=tool_defs,
|
| 588 |
+
tone="professional",
|
| 589 |
+
business_context={
|
| 590 |
+
"business_name": business_name,
|
| 591 |
+
"industry": industry,
|
| 592 |
+
"domain": domain,
|
| 593 |
+
"capabilities": agent_spec.get('capabilities', [])
|
| 594 |
+
},
|
| 595 |
+
deployment_ready=True
|
| 596 |
+
)
|
| 597 |
+
|
| 598 |
+
deployment_code = f'''# {agent_name} - {domain.title()} Agent
|
| 599 |
+
from agents import Agent, AsyncOpenAI, OpenAIChatCompletionsModel, function_tool
|
| 600 |
+
|
| 601 |
+
client = AsyncOpenAI(api_key="YOUR_KEY")
|
| 602 |
+
MODEL = OpenAIChatCompletionsModel(model="{model_name}", openai_client=client)
|
| 603 |
+
|
| 604 |
+
# ... tools here ...
|
| 605 |
+
|
| 606 |
+
{agent_name.replace(' ', '_')} = Agent(
|
| 607 |
+
name="{agent_name}",
|
| 608 |
+
instructions="""...""",
|
| 609 |
+
model=MODEL,
|
| 610 |
+
tools=[...]
|
| 611 |
+
)
|
| 612 |
+
'''
|
| 613 |
+
|
| 614 |
+
return AgentBuildResult(
|
| 615 |
+
status="success",
|
| 616 |
+
agent_config=agent_config,
|
| 617 |
+
agent_instance=agent_instance,
|
| 618 |
+
agent_test_response=f"{agent_name} ready with {len(tools)} {domain} tools!",
|
| 619 |
+
deployment_code=deployment_code,
|
| 620 |
+
metadata={"session_id": self.session_id, "domain": domain, "tools_count": len(tools)}
|
| 621 |
+
)
|
| 622 |
+
|
| 623 |
+
except Exception as e:
|
| 624 |
+
await self._emit_log(f"❌ Failed: {str(e)}", "error")
|
| 625 |
+
return AgentBuildResult(status="error", error=str(e), metadata={})
|
| 626 |
+
|
| 627 |
+
|
| 628 |
+
class AgentDeployer:
|
| 629 |
+
@staticmethod
|
| 630 |
+
async def save_to_firebase(session_id: str, agent_config: AgentConfiguration):
|
| 631 |
+
try:
|
| 632 |
+
ref = db.reference(f'sessions/{session_id}/agent')
|
| 633 |
+
ref.set(agent_config.model_dump())
|
| 634 |
+
agents_ref = db.reference(f'agents/{agent_config.agent_id}')
|
| 635 |
+
agents_ref.set({**agent_config.model_dump(), 'session_id': session_id, 'created_at': datetime.now().isoformat()})
|
| 636 |
+
return True
|
| 637 |
+
except Exception as e:
|
| 638 |
+
print(f"Error: {e}")
|
| 639 |
+
return False
|
confg.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from dotenv import load_dotenv
|
| 3 |
+
from agents import AsyncOpenAI,OpenAIChatCompletionsModel,set_tracing_disabled
|
| 4 |
+
from tavily import TavilyClient
|
| 5 |
+
|
| 6 |
+
set_tracing_disabled(True)
|
| 7 |
+
load_dotenv()
|
| 8 |
+
gemini_api_key = os.getenv("GEMINI_API_KEY")
|
| 9 |
+
tavily_api_key = os.getenv("TAVILY_SECRET_KEY")
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
tavily_client = TavilyClient(api_key=tavily_api_key)
|
| 13 |
+
|
| 14 |
+
client_provider = AsyncOpenAI(
|
| 15 |
+
api_key=gemini_api_key,
|
| 16 |
+
base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
model = OpenAIChatCompletionsModel(
|
| 20 |
+
model="gemini-2.5-flash",
|
| 21 |
+
openai_client=client_provider
|
| 22 |
+
)
|
| 23 |
+
|
extractor_agent_runner.py
ADDED
|
@@ -0,0 +1,397 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# extractor_agent_runner.py
|
| 2 |
+
import asyncio
|
| 3 |
+
import json
|
| 4 |
+
from typing import Dict, Any, Optional
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
from openai import AsyncOpenAI
|
| 7 |
+
from pydantic import BaseModel, Field
|
| 8 |
+
from firebase_admin import db
|
| 9 |
+
|
| 10 |
+
# ------------------------
|
| 11 |
+
# Pydantic Models for Structured Outputs
|
| 12 |
+
# ------------------------
|
| 13 |
+
class BusinessExtraction(BaseModel):
|
| 14 |
+
"""Extracted business information"""
|
| 15 |
+
business_name: str = Field(description="Name of the business idea")
|
| 16 |
+
industry: str = Field(description="Industry or sector")
|
| 17 |
+
target_audience: str = Field(description="Target customer base")
|
| 18 |
+
core_problem: str = Field(description="Main problem being solved")
|
| 19 |
+
solution: str = Field(description="Proposed solution")
|
| 20 |
+
key_features: list[str] = Field(description="List of key features or capabilities")
|
| 21 |
+
tech_stack: list[str] = Field(description="Recommended technology stack")
|
| 22 |
+
estimated_complexity: str = Field(description="Low, Medium, or High")
|
| 23 |
+
|
| 24 |
+
class Config:
|
| 25 |
+
extra = "forbid"
|
| 26 |
+
|
| 27 |
+
class AgentSpecification(BaseModel):
|
| 28 |
+
"""AI Agent specifications"""
|
| 29 |
+
agent_name: str = Field(description="Name of the AI agent")
|
| 30 |
+
agent_purpose: str = Field(description="Main purpose of the agent")
|
| 31 |
+
capabilities: list[str] = Field(description="List of agent capabilities")
|
| 32 |
+
integrations: list[str] = Field(description="Required integrations or APIs")
|
| 33 |
+
data_requirements: list[str] = Field(description="Data needed for the agent")
|
| 34 |
+
deployment_model: str = Field(description="e.g., Cloud, On-premise, Hybrid")
|
| 35 |
+
|
| 36 |
+
class Config:
|
| 37 |
+
extra = "forbid"
|
| 38 |
+
|
| 39 |
+
class Phase(BaseModel):
|
| 40 |
+
"""Single implementation phase"""
|
| 41 |
+
phase: str = Field(description="Phase name")
|
| 42 |
+
duration: str = Field(description="Time duration")
|
| 43 |
+
tasks: list[str] = Field(description="List of tasks in this phase")
|
| 44 |
+
|
| 45 |
+
class Config:
|
| 46 |
+
extra = "forbid"
|
| 47 |
+
|
| 48 |
+
class ImplementationPlan(BaseModel):
|
| 49 |
+
"""Implementation roadmap"""
|
| 50 |
+
phases: list[Phase] = Field(description="Implementation phases with timeline")
|
| 51 |
+
estimated_timeline: str = Field(description="Overall timeline (e.g., 3-6 months)")
|
| 52 |
+
team_requirements: list[str] = Field(description="Required team members and roles")
|
| 53 |
+
estimated_cost: str = Field(description="Estimated cost range")
|
| 54 |
+
risks: list[str] = Field(description="Potential risks and challenges")
|
| 55 |
+
success_metrics: list[str] = Field(description="KPIs to measure success")
|
| 56 |
+
|
| 57 |
+
class Config:
|
| 58 |
+
extra = "forbid"
|
| 59 |
+
|
| 60 |
+
class CompleteExtraction(BaseModel):
|
| 61 |
+
"""Complete extraction result"""
|
| 62 |
+
business: BusinessExtraction
|
| 63 |
+
agent: AgentSpecification
|
| 64 |
+
implementation: ImplementationPlan
|
| 65 |
+
summary: str = Field(description="Executive summary of the entire plan")
|
| 66 |
+
|
| 67 |
+
class Config:
|
| 68 |
+
extra = "forbid"
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
# ------------------------
|
| 72 |
+
# Agent Orchestrator
|
| 73 |
+
# ------------------------
|
| 74 |
+
class AgentOrchestrator:
|
| 75 |
+
"""
|
| 76 |
+
Orchestrates multiple AI agents for business idea extraction.
|
| 77 |
+
Supports OpenAI GPT, Google Gemini, and xAI Grok models.
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
def __init__(self, session_id: str):
|
| 81 |
+
self.session_id = session_id
|
| 82 |
+
self.client: Optional[AsyncOpenAI] = None
|
| 83 |
+
self.model: str = ""
|
| 84 |
+
|
| 85 |
+
async def _emit_log(self, message: str, type: str = "log"):
|
| 86 |
+
"""Emit log to Firebase"""
|
| 87 |
+
try:
|
| 88 |
+
ref = db.reference(f'sessions/{self.session_id}/logs')
|
| 89 |
+
ref.push({
|
| 90 |
+
'message': message,
|
| 91 |
+
'type': type,
|
| 92 |
+
'timestamp': datetime.now().isoformat()
|
| 93 |
+
})
|
| 94 |
+
except Exception as e:
|
| 95 |
+
print(f"Error emitting log: {e}")
|
| 96 |
+
|
| 97 |
+
def _setup_client(self, model: str, api_key: str):
|
| 98 |
+
"""Setup appropriate client based on model type"""
|
| 99 |
+
model_lower = model.lower()
|
| 100 |
+
|
| 101 |
+
if "gpt" in model_lower or "o1" in model_lower:
|
| 102 |
+
# OpenAI models
|
| 103 |
+
self.client = AsyncOpenAI(api_key=api_key)
|
| 104 |
+
self.model = model
|
| 105 |
+
|
| 106 |
+
elif "grok" in model_lower:
|
| 107 |
+
# xAI Grok models (OpenAI-compatible)
|
| 108 |
+
self.client = AsyncOpenAI(
|
| 109 |
+
api_key=api_key,
|
| 110 |
+
base_url="https://api.x.ai/v1"
|
| 111 |
+
)
|
| 112 |
+
self.model = model
|
| 113 |
+
|
| 114 |
+
elif "gemini" in model_lower:
|
| 115 |
+
# Google Gemini - use direct API
|
| 116 |
+
self.client = None # Will handle separately
|
| 117 |
+
self.model = model
|
| 118 |
+
|
| 119 |
+
else:
|
| 120 |
+
raise ValueError(f"Unsupported model: {model}")
|
| 121 |
+
|
| 122 |
+
async def _call_openai_structured(self, system_prompt: str, user_prompt: str, response_format: type[BaseModel]) -> BaseModel:
|
| 123 |
+
"""Call OpenAI with structured output"""
|
| 124 |
+
try:
|
| 125 |
+
response = await self.client.beta.chat.completions.parse(
|
| 126 |
+
model=self.model,
|
| 127 |
+
messages=[
|
| 128 |
+
{"role": "system", "content": system_prompt},
|
| 129 |
+
{"role": "user", "content": user_prompt}
|
| 130 |
+
],
|
| 131 |
+
response_format=response_format,
|
| 132 |
+
temperature=0.7
|
| 133 |
+
)
|
| 134 |
+
return response.choices[0].message.parsed
|
| 135 |
+
except Exception as e:
|
| 136 |
+
raise Exception(f"OpenAI API call failed: {str(e)}")
|
| 137 |
+
|
| 138 |
+
async def _call_gemini_structured(self, system_prompt: str, user_prompt: str, api_key: str, response_format: type[BaseModel]) -> BaseModel:
|
| 139 |
+
"""Call Gemini and parse JSON response into Pydantic model"""
|
| 140 |
+
try:
|
| 141 |
+
import google.generativeai as genai
|
| 142 |
+
genai.configure(api_key=api_key)
|
| 143 |
+
|
| 144 |
+
model = genai.GenerativeModel(self.model)
|
| 145 |
+
|
| 146 |
+
# Get schema for structured output
|
| 147 |
+
schema_example = response_format.model_json_schema()
|
| 148 |
+
|
| 149 |
+
full_prompt = f"""{system_prompt}
|
| 150 |
+
|
| 151 |
+
User Query: {user_prompt}
|
| 152 |
+
|
| 153 |
+
Respond ONLY with valid JSON matching this schema:
|
| 154 |
+
{json.dumps(schema_example, indent=2)}
|
| 155 |
+
|
| 156 |
+
Important: No markdown, no backticks, just pure JSON."""
|
| 157 |
+
|
| 158 |
+
response = await model.generate_content_async(full_prompt)
|
| 159 |
+
|
| 160 |
+
# Extract JSON from response
|
| 161 |
+
text = response.text.strip()
|
| 162 |
+
if text.startswith("```json"):
|
| 163 |
+
text = text[7:]
|
| 164 |
+
if text.startswith("```"):
|
| 165 |
+
text = text[3:]
|
| 166 |
+
if text.endswith("```"):
|
| 167 |
+
text = text[:-3]
|
| 168 |
+
text = text.strip()
|
| 169 |
+
|
| 170 |
+
# Parse and validate with Pydantic
|
| 171 |
+
data = json.loads(text)
|
| 172 |
+
return response_format(**data)
|
| 173 |
+
|
| 174 |
+
except Exception as e:
|
| 175 |
+
raise Exception(f"Gemini API call failed: {str(e)}")
|
| 176 |
+
|
| 177 |
+
async def _extract_business_info(self, query: str, api_key: str) -> BusinessExtraction:
|
| 178 |
+
"""Extract business information from query"""
|
| 179 |
+
await self._emit_log("🔍 Analyzing business idea...", "system")
|
| 180 |
+
|
| 181 |
+
system_prompt = """You are a business analyst expert. Extract detailed business information from the user's query.
|
| 182 |
+
Be thorough and specific. If information is not explicitly stated, make reasonable inferences based on the context."""
|
| 183 |
+
|
| 184 |
+
user_prompt = f"""Analyze this business idea and extract key information:
|
| 185 |
+
|
| 186 |
+
Query: {query}
|
| 187 |
+
|
| 188 |
+
Provide detailed extraction including:
|
| 189 |
+
- Business name (create a suitable name if not provided)
|
| 190 |
+
- Industry classification
|
| 191 |
+
- Target audience
|
| 192 |
+
- Core problem being solved
|
| 193 |
+
- Proposed solution
|
| 194 |
+
- Key features (at least 3-5)
|
| 195 |
+
- Recommended tech stack
|
| 196 |
+
- Estimated complexity (Low/Medium/High)"""
|
| 197 |
+
|
| 198 |
+
if "gemini" in self.model.lower():
|
| 199 |
+
# Gemini: Parse JSON manually
|
| 200 |
+
return await self._call_gemini_structured(system_prompt, user_prompt, api_key, BusinessExtraction)
|
| 201 |
+
else:
|
| 202 |
+
# OpenAI/Grok: Use structured outputs
|
| 203 |
+
return await self._call_openai_structured(system_prompt, user_prompt, BusinessExtraction)
|
| 204 |
+
|
| 205 |
+
async def _extract_agent_specs(self, query: str, business: BusinessExtraction, api_key: str) -> AgentSpecification:
|
| 206 |
+
"""Extract AI agent specifications"""
|
| 207 |
+
await self._emit_log("🤖 Designing AI agent specifications...", "system")
|
| 208 |
+
|
| 209 |
+
system_prompt = """You are an AI agent architect. Design detailed specifications for an AI agent based on the business requirements.
|
| 210 |
+
Be specific about capabilities, integrations, and technical requirements."""
|
| 211 |
+
|
| 212 |
+
user_prompt = f"""Design an AI agent for this business:
|
| 213 |
+
|
| 214 |
+
Business: {business.business_name}
|
| 215 |
+
Industry: {business.industry}
|
| 216 |
+
Problem: {business.core_problem}
|
| 217 |
+
Solution: {business.solution}
|
| 218 |
+
|
| 219 |
+
Create detailed agent specifications including:
|
| 220 |
+
- Agent name
|
| 221 |
+
- Primary purpose
|
| 222 |
+
- Specific capabilities (at least 5)
|
| 223 |
+
- Required integrations (APIs, databases, services)
|
| 224 |
+
- Data requirements
|
| 225 |
+
- Deployment model (Cloud/On-premise/Hybrid)"""
|
| 226 |
+
|
| 227 |
+
if "gemini" in self.model.lower():
|
| 228 |
+
return await self._call_gemini_structured(system_prompt, user_prompt, api_key, AgentSpecification)
|
| 229 |
+
else:
|
| 230 |
+
return await self._call_openai_structured(system_prompt, user_prompt, AgentSpecification)
|
| 231 |
+
|
| 232 |
+
async def _create_implementation_plan(self, query: str, business: BusinessExtraction, agent: AgentSpecification, api_key: str) -> ImplementationPlan:
|
| 233 |
+
"""Create implementation roadmap"""
|
| 234 |
+
await self._emit_log("📋 Creating implementation roadmap...", "system")
|
| 235 |
+
|
| 236 |
+
system_prompt = """You are a project manager and implementation strategist. Create a detailed implementation plan.
|
| 237 |
+
Include realistic timelines, resource requirements, and risk assessments."""
|
| 238 |
+
|
| 239 |
+
user_prompt = f"""Create an implementation plan for:
|
| 240 |
+
|
| 241 |
+
Business: {business.business_name}
|
| 242 |
+
Complexity: {business.estimated_complexity}
|
| 243 |
+
Agent: {agent.agent_name}
|
| 244 |
+
Capabilities: {', '.join(agent.capabilities)}
|
| 245 |
+
|
| 246 |
+
Provide:
|
| 247 |
+
- Implementation phases (at least 3-4 phases with specific tasks)
|
| 248 |
+
- Overall timeline estimate
|
| 249 |
+
- Team requirements (specific roles)
|
| 250 |
+
- Cost estimate range
|
| 251 |
+
- Potential risks (at least 3-5)
|
| 252 |
+
- Success metrics (KPIs)"""
|
| 253 |
+
|
| 254 |
+
if "gemini" in self.model.lower():
|
| 255 |
+
return await self._call_gemini_structured(system_prompt, user_prompt, api_key, ImplementationPlan)
|
| 256 |
+
else:
|
| 257 |
+
return await self._call_openai_structured(system_prompt, user_prompt, ImplementationPlan)
|
| 258 |
+
|
| 259 |
+
async def _create_summary(self, business: BusinessExtraction, agent: AgentSpecification, implementation: ImplementationPlan, api_key: str) -> str:
|
| 260 |
+
"""Create executive summary"""
|
| 261 |
+
await self._emit_log("📝 Generating executive summary...", "system")
|
| 262 |
+
|
| 263 |
+
system_prompt = "You are an executive summary writer. Create a concise, compelling summary."
|
| 264 |
+
|
| 265 |
+
user_prompt = f"""Create an executive summary (2-3 paragraphs) for:
|
| 266 |
+
|
| 267 |
+
Business: {business.business_name}
|
| 268 |
+
Industry: {business.industry}
|
| 269 |
+
Solution: {business.solution}
|
| 270 |
+
Agent: {agent.agent_name}
|
| 271 |
+
Timeline: {implementation.estimated_timeline}
|
| 272 |
+
Cost: {implementation.estimated_cost}
|
| 273 |
+
|
| 274 |
+
Make it compelling and actionable."""
|
| 275 |
+
|
| 276 |
+
if "gemini" in self.model.lower():
|
| 277 |
+
import google.generativeai as genai
|
| 278 |
+
genai.configure(api_key=api_key)
|
| 279 |
+
model = genai.GenerativeModel(self.model)
|
| 280 |
+
response = await model.generate_content_async(f"{system_prompt}\n\n{user_prompt}")
|
| 281 |
+
return response.text.strip()
|
| 282 |
+
else:
|
| 283 |
+
response = await self.client.chat.completions.create(
|
| 284 |
+
model=self.model,
|
| 285 |
+
messages=[
|
| 286 |
+
{"role": "system", "content": system_prompt},
|
| 287 |
+
{"role": "user", "content": user_prompt}
|
| 288 |
+
],
|
| 289 |
+
temperature=0.7,
|
| 290 |
+
max_tokens=500
|
| 291 |
+
)
|
| 292 |
+
return response.choices[0].message.content.strip()
|
| 293 |
+
|
| 294 |
+
async def run(self, query: str, model: str, api_key: str) -> Dict[str, Any]:
|
| 295 |
+
"""
|
| 296 |
+
Main orchestration method - runs all extraction agents in sequence.
|
| 297 |
+
|
| 298 |
+
Args:
|
| 299 |
+
query: User's business idea query
|
| 300 |
+
model: Model to use (gpt-4o, gemini-2.0-flash-exp, grok-beta, etc.)
|
| 301 |
+
api_key: API key for the model
|
| 302 |
+
|
| 303 |
+
Returns:
|
| 304 |
+
Dict with extraction results or error information
|
| 305 |
+
"""
|
| 306 |
+
try:
|
| 307 |
+
await self._emit_log(f"🚀 Starting extraction with {model}...", "system")
|
| 308 |
+
|
| 309 |
+
# Setup client
|
| 310 |
+
self._setup_client(model, api_key)
|
| 311 |
+
|
| 312 |
+
# Step 1: Extract business information
|
| 313 |
+
await self._emit_log("Step 1/4: Business Analysis", "system")
|
| 314 |
+
business = await self._extract_business_info(query, api_key)
|
| 315 |
+
await self._emit_log(f"✅ Identified: {business.business_name}", "success")
|
| 316 |
+
|
| 317 |
+
# Step 2: Design agent specifications
|
| 318 |
+
await self._emit_log("Step 2/4: Agent Design", "system")
|
| 319 |
+
agent = await self._extract_agent_specs(query, business, api_key)
|
| 320 |
+
await self._emit_log(f"✅ Agent designed: {agent.agent_name}", "success")
|
| 321 |
+
|
| 322 |
+
# Step 3: Create implementation plan
|
| 323 |
+
await self._emit_log("Step 3/4: Implementation Planning", "system")
|
| 324 |
+
implementation = await self._create_implementation_plan(query, business, agent, api_key)
|
| 325 |
+
await self._emit_log(f"✅ Timeline: {implementation.estimated_timeline}", "success")
|
| 326 |
+
|
| 327 |
+
# Step 4: Generate summary
|
| 328 |
+
await self._emit_log("Step 4/4: Summary Generation", "system")
|
| 329 |
+
summary = await self._create_summary(business, agent, implementation, api_key)
|
| 330 |
+
await self._emit_log("✅ Extraction complete!", "success")
|
| 331 |
+
|
| 332 |
+
# Compile results
|
| 333 |
+
complete_extraction = CompleteExtraction(
|
| 334 |
+
business=business,
|
| 335 |
+
agent=agent,
|
| 336 |
+
implementation=implementation,
|
| 337 |
+
summary=summary
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
return {
|
| 341 |
+
"status": "success",
|
| 342 |
+
"stage": "complete",
|
| 343 |
+
"extraction": complete_extraction.model_dump(),
|
| 344 |
+
"metadata": {
|
| 345 |
+
"model_used": model,
|
| 346 |
+
"timestamp": datetime.now().isoformat(),
|
| 347 |
+
"session_id": self.session_id
|
| 348 |
+
}
|
| 349 |
+
}
|
| 350 |
+
|
| 351 |
+
except Exception as e:
|
| 352 |
+
error_msg = str(e)
|
| 353 |
+
await self._emit_log(f"❌ Error: {error_msg}", "error")
|
| 354 |
+
|
| 355 |
+
return {
|
| 356 |
+
"status": "error",
|
| 357 |
+
"stage": "extraction",
|
| 358 |
+
"message": error_msg,
|
| 359 |
+
"metadata": {
|
| 360 |
+
"model_used": model,
|
| 361 |
+
"timestamp": datetime.now().isoformat(),
|
| 362 |
+
"session_id": self.session_id
|
| 363 |
+
}
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
# ------------------------
|
| 368 |
+
# Testing
|
| 369 |
+
# ------------------------
|
| 370 |
+
if __name__ == "__main__":
|
| 371 |
+
import os
|
| 372 |
+
from dotenv import load_dotenv
|
| 373 |
+
load_dotenv()
|
| 374 |
+
|
| 375 |
+
async def test_orchestrator():
|
| 376 |
+
# Mock session ID
|
| 377 |
+
test_session = "test-session-123"
|
| 378 |
+
|
| 379 |
+
# Test query
|
| 380 |
+
query = "Create an AI agent for pharmacy inventory management that tracks medication expiry dates and auto-orders stock"
|
| 381 |
+
|
| 382 |
+
# Get API key from env
|
| 383 |
+
api_key = os.getenv("OPENAI_API_KEY", "")
|
| 384 |
+
if not api_key:
|
| 385 |
+
print("❌ OPENAI_API_KEY not found in .env")
|
| 386 |
+
return
|
| 387 |
+
|
| 388 |
+
# Run orchestrator
|
| 389 |
+
orchestrator = AgentOrchestrator(session_id=test_session)
|
| 390 |
+
result = await orchestrator.run(query, "gpt-4o", api_key)
|
| 391 |
+
|
| 392 |
+
print("\n" + "="*50)
|
| 393 |
+
print("EXTRACTION RESULT")
|
| 394 |
+
print("="*50)
|
| 395 |
+
print(json.dumps(result, indent=2))
|
| 396 |
+
|
| 397 |
+
asyncio.run(test_orchestrator())
|
firebase/serviceAccountKey.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"type": "service_account",
|
| 3 |
+
"project_id": "agentic-ai-startup",
|
| 4 |
+
"private_key_id": "33d3281d2fd931599c8dec6415aaf7936e5ba62f",
|
| 5 |
+
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDImwVM6WzkWIbL\n2qGvi/y0nO/wTnxhHeET/Kv9z1p0zIG7XIsg+jaFFnfN3rPIt6y+soT4jNAzMi1Z\nvL+dtWDvurIwWMnV574vbJg1Owaf5F0FcoY/Qy50eNcGx8DwQHOO/ZkfSw3+TKNn\nzwC6r2nKP7JePaRRSHApPGOGsZt+BWld6iI7i6mH2BrWWqETgewMZskr75yH8HUD\nrCd3sg+w7yP+rlFDRypZLpmpKIknNgFVejzIsgOY2+E1IvQ6q2pDXZh9U1Y36vVY\nTanDwt08lRuxiCGewJ0Emb8k58exawc9Hv+QnggdFq/3BrJyIo92wQzI0ON9WOKQ\nIyiF6tNhAgMBAAECgf8u2lttO0JTktmTgzIq7hHM8q/s+xJ8cZ4Vgk4pWY9RovIp\n8H4OQGJmHc6dfAZBkE9TEB7pGPpc+rsyL64RXOuRvOkhTEYtJOh76xZ3AvI+e00T\n7fZXkDlUEA/TZZQ1M4MfQoZ2PfFmvj+WtKJW9+FIJi24FRhBDnTOghbnaVQHz+b9\ndIcW2vJ8xO82D83vbkSBOKvoMWxdVFE5+V8GQCCtmHZ+kFDEbvX6yVECv9ZyOYiI\nxveVN51TdiTtcI7iRSvqSGSuP4y+UDIuYwWOtjOZV1Hkx6U79eso2lsylGLTXnPc\n1y7+5OuOotPBFLOOuXSMTjqQ67/og6HSo9hZzBkCgYEA8nLh9rZpGKGO0TTrSLMx\nxnn89icB1LTZaP+0FYTPgLrLuxe97vXJQFZCzkeYWCz4Vy0L4T8go8YWk5JmjvPo\nVMhEct6LcC1osZcBPPeCWtE9EwBSJ9GrBRnzO8Sic/HsipFIiyUjVfhY3VGRaLL1\nkzvRg54BEDSYmNA1k02h26kCgYEA09Fq7RGtj0EON1bhoYgovo5bgueJtnOi2GXp\nBgCiwTfnOLTftw0YUjX0Vq+ZC+zzXFaOGzMl2PK8J2YUDNVSooNSVzFaRKCXkypg\nbcADdX4/8Nor/xe8RpaIeYa47WWPff+tlQ3YLfsc3lrs02mBfaZ2d7LSwUixITzj\nw//WTPkCgYEAq499tF3ZupNAA2xF/fjHhSH3TZvGoOSkX7dpkDydtQL5fVfMkBae\npYck2OEyvVp9FPsMSASqrRlUCnLzXb7crXnVbc9qriODzP8E4kZmmKstY4+7ku+7\n6+00ABwhtJgBgQBTbBsIiSImh93bXlIckmYm2NLq/7OoM6JJ9wVdPpkCgYEAlSiV\nSVf3hjdjkWim/p7pQjbxDh+KBGctt4EcNNPhyHpbYr1MBJU+GTZRV3HULmevf8ib\nzV+ZOUrFYGEroVXfGP2s0CW60TTdSA7BVMewJ969nBemf73xISwD8sACHXo1L58K\nVzomj0qTq6kPuFhFjXvZVLUiYGWWBjfglDbma9ECgYEA75ABFZnNJPElBSEaCDvJ\nauuaB2HtC/3OAYdDUMj9Iw2MPB6aBnD6icpYkQqk/zIVRr7HkWGCS+2xSUst0/em\ncF/WmSlb6tbBWqGJHf0PL24q7O2bf8YtrhS0RW2z1zrgs9lVrYhkogFDwR9wBbdi\nlxXBV3u80vROmdsdYXyw4lk=\n-----END PRIVATE KEY-----\n",
|
| 6 |
+
"client_email": "firebase-adminsdk-fbsvc@agentic-ai-startup.iam.gserviceaccount.com",
|
| 7 |
+
"client_id": "105928991960207433542",
|
| 8 |
+
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
|
| 9 |
+
"token_uri": "https://oauth2.googleapis.com/token",
|
| 10 |
+
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
|
| 11 |
+
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/firebase-adminsdk-fbsvc%40agentic-ai-startup.iam.gserviceaccount.com",
|
| 12 |
+
"universe_domain": "googleapis.com"
|
| 13 |
+
}
|
main.py
ADDED
|
@@ -0,0 +1,378 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import uuid
|
| 3 |
+
import time
|
| 4 |
+
import json
|
| 5 |
+
import asyncio
|
| 6 |
+
from fastapi import FastAPI, HTTPException
|
| 7 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 8 |
+
from pydantic import BaseModel, Field
|
| 9 |
+
from typing import Optional
|
| 10 |
+
import socketio
|
| 11 |
+
from firebase_admin import db, initialize_app, credentials, _apps
|
| 12 |
+
from dotenv import load_dotenv
|
| 13 |
+
from cryptography.fernet import Fernet
|
| 14 |
+
from validation_agent import run_validation_agent
|
| 15 |
+
from extractor_agent_runner import AgentOrchestrator
|
| 16 |
+
from agent_architect import LiveAgentArchitect, AgentDeployer
|
| 17 |
+
from agents import Runner, SQLiteSession, OpenAIChatCompletionsModel, Agent
|
| 18 |
+
from openai import AsyncOpenAI as AgentsAsyncOpenAI
|
| 19 |
+
|
| 20 |
+
load_dotenv()
|
| 21 |
+
|
| 22 |
+
# --- Encryption setup ---
|
| 23 |
+
ENCRYPTION_KEY = os.getenv("ENCRYPTION_KEY")
|
| 24 |
+
if not ENCRYPTION_KEY:
|
| 25 |
+
raise ValueError("ENCRYPTION_KEY not set in .env")
|
| 26 |
+
fernet = Fernet(ENCRYPTION_KEY.encode())
|
| 27 |
+
|
| 28 |
+
def encrypt_api_key(api_key: str) -> str:
|
| 29 |
+
return fernet.encrypt(api_key.encode()).decode()
|
| 30 |
+
|
| 31 |
+
def decrypt_api_key(encrypted_key: str) -> str:
|
| 32 |
+
return fernet.decrypt(encrypted_key.encode()).decode()
|
| 33 |
+
|
| 34 |
+
# --- Firebase initialization ---
|
| 35 |
+
cred = credentials.Certificate("firebase/serviceAccountKey.json")
|
| 36 |
+
if not _apps:
|
| 37 |
+
initialize_app(cred, {'databaseURL': os.getenv("FIREBASE_DB_URL")})
|
| 38 |
+
|
| 39 |
+
# --- FastAPI + Socket.IO setup ---
|
| 40 |
+
app = FastAPI(title="AgentForge API", version="1.0.0")
|
| 41 |
+
app.add_middleware(
|
| 42 |
+
CORSMiddleware,
|
| 43 |
+
allow_origins=["*"],
|
| 44 |
+
allow_credentials=True,
|
| 45 |
+
allow_methods=["*"],
|
| 46 |
+
allow_headers=["*"],
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
sio = socketio.AsyncServer(async_mode='asgi', cors_allowed_origins="*", max_http_buffer_size=10**8)
|
| 50 |
+
socket_app = socketio.ASGIApp(sio, app)
|
| 51 |
+
|
| 52 |
+
# --- Request/Response Models ---
|
| 53 |
+
class ValidateRequest(BaseModel):
|
| 54 |
+
model: str = Field(..., description="Model name")
|
| 55 |
+
api_key: str = Field(..., description="API key to validate")
|
| 56 |
+
|
| 57 |
+
class ValidateResponse(BaseModel):
|
| 58 |
+
is_valid: bool
|
| 59 |
+
error: Optional[str] = None
|
| 60 |
+
model: str
|
| 61 |
+
|
| 62 |
+
class RunSessionRequest(BaseModel):
|
| 63 |
+
query: str = Field(..., description="User's business idea")
|
| 64 |
+
model: str = Field(default="gpt-4o", description="Model to use")
|
| 65 |
+
api_key: str = Field(default=os.getenv("OPENAI_API_KEY"), description="API key")
|
| 66 |
+
|
| 67 |
+
class SessionResponse(BaseModel):
|
| 68 |
+
session_id: str
|
| 69 |
+
status: str = "created"
|
| 70 |
+
|
| 71 |
+
class RunResponse(BaseModel):
|
| 72 |
+
status: str
|
| 73 |
+
result: dict
|
| 74 |
+
|
| 75 |
+
class ChatWithAgentRequest(BaseModel):
|
| 76 |
+
agent_id: str = Field(..., description="Agent ID from build result")
|
| 77 |
+
message: str = Field(..., description="User message to the agent")
|
| 78 |
+
session_id: str = Field(..., description="Session ID")
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
# --- Helper functions ---
|
| 82 |
+
async def emit_log(session_id: str, message: str, type: str = "log"):
|
| 83 |
+
"""Emit log to Firebase and Socket.IO"""
|
| 84 |
+
try:
|
| 85 |
+
ref = db.reference(f'sessions/{session_id}/logs')
|
| 86 |
+
ref.push({
|
| 87 |
+
'message': message,
|
| 88 |
+
'type': type,
|
| 89 |
+
'timestamp': time.time()
|
| 90 |
+
})
|
| 91 |
+
await sio.emit('log_update', {
|
| 92 |
+
'message': message,
|
| 93 |
+
'type': type,
|
| 94 |
+
'timestamp': time.time()
|
| 95 |
+
}, room=session_id)
|
| 96 |
+
except Exception as e:
|
| 97 |
+
print(f"Error emitting log: {e}")
|
| 98 |
+
|
| 99 |
+
async def save_session_result(session_id: str, result: dict):
|
| 100 |
+
"""Save session result to Firebase"""
|
| 101 |
+
try:
|
| 102 |
+
ref = db.reference(f'sessions/{session_id}/result')
|
| 103 |
+
ref.set(result)
|
| 104 |
+
except Exception as e:
|
| 105 |
+
print(f"Error saving session result: {e}")
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
# --- API Endpoints ---
|
| 109 |
+
@app.get("/", tags=["Health"])
|
| 110 |
+
async def root():
|
| 111 |
+
return {
|
| 112 |
+
"status": "AgentForge API Online",
|
| 113 |
+
"version": "1.0.0",
|
| 114 |
+
"features": ["validation", "extraction", "live_agent_builder"],
|
| 115 |
+
"socket_io": "initialized"
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
@app.post("/validate", response_model=ValidateResponse, tags=["Validation"])
|
| 119 |
+
async def validate_key(request: ValidateRequest) -> ValidateResponse:
|
| 120 |
+
try:
|
| 121 |
+
result = await run_validation_agent(request.model, request.api_key)
|
| 122 |
+
return ValidateResponse(
|
| 123 |
+
is_valid=result.is_valid,
|
| 124 |
+
error=result.error,
|
| 125 |
+
model=request.model
|
| 126 |
+
)
|
| 127 |
+
except Exception as e:
|
| 128 |
+
return ValidateResponse(
|
| 129 |
+
is_valid=False,
|
| 130 |
+
error=f"Validation error: {str(e)}",
|
| 131 |
+
model=request.model
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
@app.post("/start", response_model=SessionResponse, tags=["Sessions"])
|
| 135 |
+
async def start_session() -> SessionResponse:
|
| 136 |
+
session_id = str(uuid.uuid4())
|
| 137 |
+
try:
|
| 138 |
+
db.reference(f'sessions/{session_id}').set({
|
| 139 |
+
'status': 'awaiting_query',
|
| 140 |
+
'created_at': time.time(),
|
| 141 |
+
'logs': [],
|
| 142 |
+
'api_keys': {}
|
| 143 |
+
})
|
| 144 |
+
await emit_log(session_id, "Session started. Ready to receive query.", "system")
|
| 145 |
+
return SessionResponse(session_id=session_id, status="created")
|
| 146 |
+
except Exception as e:
|
| 147 |
+
raise HTTPException(status_code=500, detail=f"Failed to create session: {str(e)}")
|
| 148 |
+
|
| 149 |
+
@app.post("/session/{sid}/run", response_model=RunResponse, tags=["Sessions"])
|
| 150 |
+
async def run_agents(sid: str, request_data: RunSessionRequest) -> RunResponse:
|
| 151 |
+
"""
|
| 152 |
+
Complete pipeline: Validation → Extraction → Live Agent Build
|
| 153 |
+
"""
|
| 154 |
+
query = request_data.query
|
| 155 |
+
model = request_data.model.lower()
|
| 156 |
+
api_key = request_data.api_key
|
| 157 |
+
|
| 158 |
+
if not query or not model or not api_key:
|
| 159 |
+
await emit_log(sid, "Error: Missing required fields", "error")
|
| 160 |
+
raise HTTPException(status_code=400, detail="query, model, and api_key are required")
|
| 161 |
+
|
| 162 |
+
try:
|
| 163 |
+
# Step 1: Log start
|
| 164 |
+
await emit_log(sid, f"💬 User Query: {query}", "user")
|
| 165 |
+
await emit_log(sid, f"🤖 Model: {model}", "system")
|
| 166 |
+
|
| 167 |
+
# Step 2: Encrypt & store key
|
| 168 |
+
encrypted_key = encrypt_api_key(api_key)
|
| 169 |
+
db.reference(f'sessions/{sid}/api_keys/{model}').set(encrypted_key)
|
| 170 |
+
await emit_log(sid, f"🔐 API key encrypted", "success")
|
| 171 |
+
api_key_decrypted = decrypt_api_key(encrypted_key)
|
| 172 |
+
|
| 173 |
+
# Step 3: Validate API key
|
| 174 |
+
await emit_log(sid, "🔍 Validating API key...", "system")
|
| 175 |
+
validation = await run_validation_agent(model, api_key_decrypted)
|
| 176 |
+
|
| 177 |
+
if not validation.is_valid:
|
| 178 |
+
await emit_log(sid, f"❌ Validation failed: {validation.error}", "error")
|
| 179 |
+
return RunResponse(
|
| 180 |
+
status="error",
|
| 181 |
+
result={"stage": "validation", "message": validation.error}
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
await emit_log(sid, "✅ API key validated", "success")
|
| 185 |
+
|
| 186 |
+
# Step 4: Run extraction
|
| 187 |
+
await emit_log(sid, "🚀 Starting extraction...", "system")
|
| 188 |
+
orchestrator = AgentOrchestrator(session_id=sid)
|
| 189 |
+
extraction_result = await orchestrator.run(query, model, api_key_decrypted)
|
| 190 |
+
|
| 191 |
+
if extraction_result["status"] != "success":
|
| 192 |
+
await emit_log(sid, f"❌ Extraction failed", "error")
|
| 193 |
+
db.reference(f'sessions/{sid}').update({'status': 'error'})
|
| 194 |
+
return RunResponse(status="error", result=extraction_result)
|
| 195 |
+
|
| 196 |
+
await emit_log(sid, "✅ Extraction complete!", "success")
|
| 197 |
+
|
| 198 |
+
# Step 5: BUILD LIVE AGENT (This is the missing part!)
|
| 199 |
+
await emit_log(sid, "🏗️ Building live OpenAI agent...", "system")
|
| 200 |
+
await emit_log(sid, "⚙️ Mapping features to tools...", "system")
|
| 201 |
+
|
| 202 |
+
architect = LiveAgentArchitect(session_id=sid)
|
| 203 |
+
extraction_data = extraction_result.get('extraction', {})
|
| 204 |
+
|
| 205 |
+
# Build the agent
|
| 206 |
+
agent_result = await architect.build_agent(extraction_data, model, api_key_decrypted)
|
| 207 |
+
|
| 208 |
+
# Add agent build results to response
|
| 209 |
+
if agent_result.status == "success":
|
| 210 |
+
# Save to Firebase
|
| 211 |
+
await AgentDeployer.save_to_firebase(sid, agent_result.agent_config)
|
| 212 |
+
|
| 213 |
+
# Deployment code is already in agent_result
|
| 214 |
+
deployment_code = agent_result.deployment_code
|
| 215 |
+
|
| 216 |
+
# Add to result
|
| 217 |
+
extraction_result["agent_build"] = {
|
| 218 |
+
"status": "success",
|
| 219 |
+
"agent_id": agent_result.agent_config.agent_id,
|
| 220 |
+
"agent_name": agent_result.agent_config.name,
|
| 221 |
+
"model": agent_result.agent_config.model,
|
| 222 |
+
"tools": [
|
| 223 |
+
{
|
| 224 |
+
"name": tool.name,
|
| 225 |
+
"description": tool.description,
|
| 226 |
+
"parameters": tool.parameters
|
| 227 |
+
} for tool in agent_result.agent_config.tools
|
| 228 |
+
],
|
| 229 |
+
"tools_count": len(agent_result.agent_config.tools),
|
| 230 |
+
"tone": agent_result.agent_config.tone,
|
| 231 |
+
"instructions": agent_result.agent_config.instructions,
|
| 232 |
+
"test_response": agent_result.agent_test_response,
|
| 233 |
+
"deployment_code": deployment_code,
|
| 234 |
+
"business_context": agent_result.agent_config.business_context
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
await emit_log(sid, f"✅ Agent '{agent_result.agent_config.name}' built!", "success")
|
| 238 |
+
await emit_log(sid, f"🆔 Agent ID: {agent_result.agent_config.agent_id}", "info")
|
| 239 |
+
await emit_log(sid, f"🔧 Tools: {len(agent_result.agent_config.tools)}", "info")
|
| 240 |
+
await emit_log(sid, f"🎭 Tone: {agent_result.agent_config.tone}", "info")
|
| 241 |
+
await emit_log(sid, f"\n💬 Test: {agent_result.agent_test_response[:100]}...", "info")
|
| 242 |
+
await emit_log(sid, "\n🎉 LIVE AGENT READY FOR DEPLOYMENT!", "success")
|
| 243 |
+
else:
|
| 244 |
+
await emit_log(sid, f"⚠️ Agent build failed: {agent_result.error}", "warning")
|
| 245 |
+
extraction_result["agent_build"] = {
|
| 246 |
+
"status": "error",
|
| 247 |
+
"error": agent_result.error
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
# Save final result
|
| 251 |
+
await save_session_result(sid, extraction_result)
|
| 252 |
+
|
| 253 |
+
# Summary logs
|
| 254 |
+
business = extraction_data.get('business', {})
|
| 255 |
+
await emit_log(sid, f"\n📊 Business: {business.get('business_name')}", "result")
|
| 256 |
+
await emit_log(sid, f"🏭 Industry: {business.get('industry')}", "result")
|
| 257 |
+
|
| 258 |
+
if extraction_result.get("agent_build", {}).get("status") == "success":
|
| 259 |
+
agent_build = extraction_result["agent_build"]
|
| 260 |
+
await emit_log(sid, f"\n🤖 Live Agent: {agent_build['agent_name']}", "success")
|
| 261 |
+
await emit_log(sid, f"🔧 Tools Available: {agent_build['tools_count']}", "success")
|
| 262 |
+
await emit_log(sid, "\n✅ Complete! Agent is live and ready.", "success")
|
| 263 |
+
|
| 264 |
+
db.reference(f'sessions/{sid}').update({'status': 'completed'})
|
| 265 |
+
return RunResponse(status="success", result=extraction_result)
|
| 266 |
+
|
| 267 |
+
except Exception as e:
|
| 268 |
+
error_msg = str(e)
|
| 269 |
+
await emit_log(sid, f"💥 Error: {error_msg}", "error")
|
| 270 |
+
db.reference(f'sessions/{sid}').update({'status': 'error'})
|
| 271 |
+
raise HTTPException(status_code=500, detail=error_msg)
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
@app.post("/agent/chat", tags=["Agent Execution"])
|
| 275 |
+
async def chat_with_agent(request: ChatWithAgentRequest):
|
| 276 |
+
try:
|
| 277 |
+
agent_id = request.agent_id
|
| 278 |
+
user_message = request.message
|
| 279 |
+
session_id = request.session_id
|
| 280 |
+
|
| 281 |
+
# Get agent configuration from Firebase
|
| 282 |
+
agent_ref = db.reference(f'agents/{agent_id}')
|
| 283 |
+
agent_data = agent_ref.get()
|
| 284 |
+
|
| 285 |
+
if not agent_data:
|
| 286 |
+
raise HTTPException(status_code=404, detail="Agent not found")
|
| 287 |
+
|
| 288 |
+
# Get encrypted API key from session
|
| 289 |
+
session_ref = db.reference(f'sessions/{session_id}/api_keys')
|
| 290 |
+
api_keys = session_ref.get()
|
| 291 |
+
|
| 292 |
+
model = agent_data.get('model', 'gpt-4o')
|
| 293 |
+
model_key = model.split('-')[0] if '-' in model else model
|
| 294 |
+
|
| 295 |
+
encrypted_key = None
|
| 296 |
+
for key_name, key_value in (api_keys or {}).items():
|
| 297 |
+
if model_key in key_name:
|
| 298 |
+
encrypted_key = key_value
|
| 299 |
+
break
|
| 300 |
+
|
| 301 |
+
if not encrypted_key:
|
| 302 |
+
raise HTTPException(status_code=400, detail="API key not found for this model")
|
| 303 |
+
|
| 304 |
+
api_key = decrypt_api_key(encrypted_key)
|
| 305 |
+
|
| 306 |
+
# Setup OpenAI client
|
| 307 |
+
if "gemini" in model.lower():
|
| 308 |
+
client = AgentsAsyncOpenAI(api_key=api_key, base_url="https://generativelanguage.googleapis.com/v1beta/openai/")
|
| 309 |
+
model_name = "gemini-2.0-flash-exp"
|
| 310 |
+
elif "grok" in model.lower():
|
| 311 |
+
client = AgentsAsyncOpenAI(api_key=api_key, base_url="https://api.x.ai/v1")
|
| 312 |
+
model_name = "grok-beta"
|
| 313 |
+
else:
|
| 314 |
+
client = AgentsAsyncOpenAI(api_key=api_key)
|
| 315 |
+
model_name = "gpt-4o"
|
| 316 |
+
|
| 317 |
+
MODEL = OpenAIChatCompletionsModel(model=model_name, openai_client=client)
|
| 318 |
+
|
| 319 |
+
# === FIXED PART: Recreate tools safely using domain ===
|
| 320 |
+
from agent_architect import DynamicToolFactory
|
| 321 |
+
|
| 322 |
+
domain = agent_data['business_context']['domain']
|
| 323 |
+
business_name = agent_data['business_context'].get('business_name', 'Agent')
|
| 324 |
+
|
| 325 |
+
tools, _ = DynamicToolFactory.create_tools_for_domain(domain, business_name)
|
| 326 |
+
|
| 327 |
+
agent = Agent(
|
| 328 |
+
name=agent_data['name'],
|
| 329 |
+
instructions=agent_data['instructions'],
|
| 330 |
+
model=MODEL,
|
| 331 |
+
tools=tools
|
| 332 |
+
)
|
| 333 |
+
# ======================================================
|
| 334 |
+
|
| 335 |
+
runner = Runner()
|
| 336 |
+
temp_session = SQLiteSession(f":memory:")
|
| 337 |
+
response = await runner.run(agent, user_message, session=temp_session)
|
| 338 |
+
final_output = str(response.final_output) if hasattr(response, 'final_output') else str(response)
|
| 339 |
+
|
| 340 |
+
await emit_log(session_id, f"User: {user_message}", "user")
|
| 341 |
+
await emit_log(session_id, f"{agent_data['name']}: {final_output[:200]}...", "agent")
|
| 342 |
+
|
| 343 |
+
return {
|
| 344 |
+
"status": "success",
|
| 345 |
+
"agent_id": agent_id,
|
| 346 |
+
"agent_name": agent_data['name'],
|
| 347 |
+
"user_message": user_message,
|
| 348 |
+
"agent_response": final_output,
|
| 349 |
+
"tools_used": [tool['name'] for tool in agent_data.get('tools', [])],
|
| 350 |
+
"timestamp": time.time()
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
except Exception as e:
|
| 354 |
+
error_msg = str(e)
|
| 355 |
+
await emit_log(session_id, f"Agent error: {error_msg}", "error")
|
| 356 |
+
raise HTTPException(status_code=500, detail=error_msg)
|
| 357 |
+
|
| 358 |
+
# --- Socket.IO Events ---
|
| 359 |
+
@sio.event
|
| 360 |
+
async def connect(sid, environ):
|
| 361 |
+
print(f"✅ Client connected: {sid}")
|
| 362 |
+
|
| 363 |
+
@sio.event
|
| 364 |
+
async def disconnect(sid):
|
| 365 |
+
print(f"❌ Client disconnected: {sid}")
|
| 366 |
+
|
| 367 |
+
@sio.event
|
| 368 |
+
async def join(sid, data):
|
| 369 |
+
session_id = data.get('session_id')
|
| 370 |
+
if session_id:
|
| 371 |
+
await sio.enter_room(sid, session_id)
|
| 372 |
+
print(f"👥 Client {sid} joined room: {session_id}")
|
| 373 |
+
await sio.emit('joined', {'session_id': session_id}, room=sid)
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
# Mount Socket.IO - IMPORTANT: Do this LAST
|
| 377 |
+
app.mount("/", socket_app)
|
| 378 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn
|
| 3 |
+
python-multipart
|
| 4 |
+
openai-agents
|
| 5 |
+
PyMuPDF
|
| 6 |
+
reportlab
|
| 7 |
+
python-dotenv
|
| 8 |
+
requests
|
| 9 |
+
pydantic
|
schemas.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel, Field
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
class ValidationResult(BaseModel):
|
| 5 |
+
is_valid: bool = Field(default=False, description="If API key is valid for model")
|
| 6 |
+
error: str = Field(default="", description="Validation error if any")
|
| 7 |
+
|
| 8 |
+
class ExtractionResult(BaseModel):
|
| 9 |
+
business_type: str = Field(default="", description="Detected business type (1-3 words)")
|
| 10 |
+
core_actions: List[str] = Field(default_factory=list, description="List of 2-4 key actions")
|
| 11 |
+
required_tools: List[str] = Field(default_factory=list, description="Snake_case tool names")
|
| 12 |
+
tone: str = Field(default="friendly", description="friendly | professional | casual")
|
| 13 |
+
error: str = Field(default="", description="Extraction error if any")
|
test/test_connection.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import json
|
| 3 |
+
import httpx
|
| 4 |
+
import socketio
|
| 5 |
+
import time
|
| 6 |
+
from firebase_admin import credentials, db, initialize_app
|
| 7 |
+
import os
|
| 8 |
+
from dotenv import load_dotenv
|
| 9 |
+
|
| 10 |
+
load_dotenv()
|
| 11 |
+
database_url = os.getenv("FIREBASE_DB_URL")
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
cred = credentials.Certificate("firebase/serviceAccountKey.json")
|
| 15 |
+
initialize_app(cred, {
|
| 16 |
+
'databaseURL': database_url
|
| 17 |
+
})
|
| 18 |
+
|
| 19 |
+
BASE_URL = "http://127.0.0.1:8000"
|
| 20 |
+
|
| 21 |
+
async def test_full_flow():
|
| 22 |
+
print("TESTING DAY 1 CONNECTION...\n")
|
| 23 |
+
|
| 24 |
+
print("1. Creating session...")
|
| 25 |
+
resp = httpx.post(f"{BASE_URL}/start")
|
| 26 |
+
data = resp.json()
|
| 27 |
+
session_id = data["session_id"]
|
| 28 |
+
print(f" Session ID: {session_id}")
|
| 29 |
+
|
| 30 |
+
print("\n2. Connecting to Socket.IO...")
|
| 31 |
+
sio = socketio.AsyncClient()
|
| 32 |
+
|
| 33 |
+
@sio.event
|
| 34 |
+
async def connect():
|
| 35 |
+
print(" Connected!")
|
| 36 |
+
await sio.emit('join', {'session_id': session_id})
|
| 37 |
+
|
| 38 |
+
@sio.on('log_update')
|
| 39 |
+
async def on_log(data):
|
| 40 |
+
print(f" [{data['type']}] {data['message']}")
|
| 41 |
+
|
| 42 |
+
await sio.connect(BASE_URL)
|
| 43 |
+
print(" Joined room")
|
| 44 |
+
|
| 45 |
+
# 3. Send Query
|
| 46 |
+
print("\n3. Sending query...")
|
| 47 |
+
httpx.post(
|
| 48 |
+
f"{BASE_URL}/session/{session_id}/query",
|
| 49 |
+
json={
|
| 50 |
+
"query": "I want an agent for my online pharmacy that checks prescription validity and sends SMS confirmations",
|
| 51 |
+
"api_key": os.getenv("OPENAI_API_KEY")
|
| 52 |
+
}
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
print("\n4. Receiving live logs...\n")
|
| 56 |
+
await asyncio.sleep(8)
|
| 57 |
+
|
| 58 |
+
print("\n5. Verifying Firebase Realtime DB...")
|
| 59 |
+
ref = db.reference(f'sessions/{session_id}')
|
| 60 |
+
fb_data = ref.get()
|
| 61 |
+
print(f" Status: {fb_data.get('status')}")
|
| 62 |
+
print(f" Logs count: {len(fb_data.get('logs', []))}")
|
| 63 |
+
print(f" User query: {fb_data.get('user_query')}")
|
| 64 |
+
|
| 65 |
+
await sio.disconnect()
|
| 66 |
+
print("\nDAY 1 CONNECTION: PASSED!")
|
| 67 |
+
print(f" Firebase URL: https://console.firebase.google.com/project/_/database/realtime/data/sessions/{session_id}")
|
| 68 |
+
|
| 69 |
+
if __name__ == "__main__":
|
| 70 |
+
asyncio.run(test_full_flow())
|
test/test_validation.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
from validation_agent import run_validation_agent
|
| 3 |
+
|
| 4 |
+
async def main():
|
| 5 |
+
# Try different providers
|
| 6 |
+
print("Testing GPT...")
|
| 7 |
+
result_gpt = await run_validation_agent("gpt", "sk-test-valid")
|
| 8 |
+
print(result_gpt)
|
| 9 |
+
|
| 10 |
+
print("\nTesting Gemini...")
|
| 11 |
+
result_gemini = await run_validation_agent("gemini", "fake-key")
|
| 12 |
+
print(result_gemini)
|
| 13 |
+
|
| 14 |
+
print("\nTesting Grok...")
|
| 15 |
+
result_grok = await run_validation_agent("grok", "xai-valid-key")
|
| 16 |
+
print(result_grok)
|
| 17 |
+
|
| 18 |
+
asyncio.run(main())
|
validation_agent.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# run_validation_agent.py
|
| 2 |
+
import asyncio
|
| 3 |
+
from openai import AsyncOpenAI
|
| 4 |
+
from schemas import ValidationResult
|
| 5 |
+
|
| 6 |
+
# ------------------------
|
| 7 |
+
# GPT validation via OpenAI SDK (Direct)
|
| 8 |
+
# ------------------------
|
| 9 |
+
async def validate_gpt_key(api_key: str, model: str = "gpt-4o") -> ValidationResult:
|
| 10 |
+
"""
|
| 11 |
+
Validates a GPT API key using OpenAI SDK directly.
|
| 12 |
+
"""
|
| 13 |
+
try:
|
| 14 |
+
# Create OpenAI client
|
| 15 |
+
client = AsyncOpenAI(api_key=api_key)
|
| 16 |
+
|
| 17 |
+
# Simple completion test
|
| 18 |
+
response = await client.chat.completions.create(
|
| 19 |
+
model=model,
|
| 20 |
+
messages=[
|
| 21 |
+
{"role": "system", "content": "You are an API key validator."},
|
| 22 |
+
{"role": "user", "content": "ping"}
|
| 23 |
+
],
|
| 24 |
+
max_tokens=10,
|
| 25 |
+
temperature=0
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
# Check if we got a valid response
|
| 29 |
+
if response.choices and response.choices[0].message.content:
|
| 30 |
+
return ValidationResult(is_valid=True)
|
| 31 |
+
|
| 32 |
+
return ValidationResult(
|
| 33 |
+
is_valid=False,
|
| 34 |
+
error="No valid response from OpenAI API"
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
except Exception as e:
|
| 38 |
+
error_msg = str(e)
|
| 39 |
+
# Provide more specific error messages
|
| 40 |
+
if "invalid_api_key" in error_msg.lower():
|
| 41 |
+
error_msg = "Invalid API key"
|
| 42 |
+
elif "rate_limit" in error_msg.lower():
|
| 43 |
+
error_msg = "Rate limit exceeded"
|
| 44 |
+
elif "insufficient_quota" in error_msg.lower():
|
| 45 |
+
error_msg = "Insufficient quota/credits"
|
| 46 |
+
|
| 47 |
+
return ValidationResult(
|
| 48 |
+
is_valid=False,
|
| 49 |
+
error=f"GPT validation failed: {error_msg}"
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
# ------------------------
|
| 54 |
+
# Gemini (Google Generative AI) validation
|
| 55 |
+
# ------------------------
|
| 56 |
+
async def validate_gemini_key(api_key: str, model: str = "gemini-2.0-flash-exp") -> ValidationResult:
|
| 57 |
+
"""
|
| 58 |
+
Validates a Gemini API key.
|
| 59 |
+
"""
|
| 60 |
+
try:
|
| 61 |
+
import google.generativeai as genai
|
| 62 |
+
|
| 63 |
+
# Configure with API key
|
| 64 |
+
genai.configure(api_key=api_key)
|
| 65 |
+
|
| 66 |
+
# Try to generate content
|
| 67 |
+
model_instance = genai.GenerativeModel(model)
|
| 68 |
+
response = await model_instance.generate_content_async("ping")
|
| 69 |
+
|
| 70 |
+
if response and response.text:
|
| 71 |
+
return ValidationResult(is_valid=True)
|
| 72 |
+
|
| 73 |
+
return ValidationResult(
|
| 74 |
+
is_valid=False,
|
| 75 |
+
error="No valid response from Gemini API"
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
except Exception as e:
|
| 79 |
+
error_msg = str(e)
|
| 80 |
+
if "API_KEY_INVALID" in error_msg or "invalid" in error_msg.lower():
|
| 81 |
+
error_msg = "Invalid API key"
|
| 82 |
+
|
| 83 |
+
return ValidationResult(
|
| 84 |
+
is_valid=False,
|
| 85 |
+
error=f"Gemini validation failed: {error_msg}"
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
# ------------------------
|
| 90 |
+
# Grok (xAI) validation
|
| 91 |
+
# ------------------------
|
| 92 |
+
async def validate_grok_key(api_key: str, model: str = "grok-beta") -> ValidationResult:
|
| 93 |
+
"""
|
| 94 |
+
Validates a Grok API key using OpenAI-compatible endpoint.
|
| 95 |
+
"""
|
| 96 |
+
try:
|
| 97 |
+
# Grok uses OpenAI-compatible API
|
| 98 |
+
client = AsyncOpenAI(
|
| 99 |
+
api_key=api_key,
|
| 100 |
+
base_url="https://api.x.ai/v1"
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
response = await client.chat.completions.create(
|
| 104 |
+
model=model,
|
| 105 |
+
messages=[
|
| 106 |
+
{"role": "system", "content": "You are an API key validator."},
|
| 107 |
+
{"role": "user", "content": "ping"}
|
| 108 |
+
],
|
| 109 |
+
max_tokens=10,
|
| 110 |
+
temperature=0
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
if response.choices and response.choices[0].message.content:
|
| 114 |
+
return ValidationResult(is_valid=True)
|
| 115 |
+
|
| 116 |
+
return ValidationResult(
|
| 117 |
+
is_valid=False,
|
| 118 |
+
error="No valid response from Grok API"
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
except Exception as e:
|
| 122 |
+
error_msg = str(e)
|
| 123 |
+
if "invalid_api_key" in error_msg.lower():
|
| 124 |
+
error_msg = "Invalid API key"
|
| 125 |
+
|
| 126 |
+
return ValidationResult(
|
| 127 |
+
is_valid=False,
|
| 128 |
+
error=f"Grok validation failed: {error_msg}"
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
# ------------------------
|
| 133 |
+
# Main runner
|
| 134 |
+
# ------------------------
|
| 135 |
+
async def run_validation_agent(model: str, api_key: str) -> ValidationResult:
|
| 136 |
+
"""
|
| 137 |
+
Validates API keys for GPT, Gemini, or Grok.
|
| 138 |
+
Routes to appropriate validator based on model name.
|
| 139 |
+
"""
|
| 140 |
+
model_lower = model.lower()
|
| 141 |
+
|
| 142 |
+
try:
|
| 143 |
+
if "gpt" in model_lower or "o1" in model_lower:
|
| 144 |
+
return await validate_gpt_key(api_key, model)
|
| 145 |
+
elif "gemini" in model_lower:
|
| 146 |
+
return await validate_gemini_key(api_key, model)
|
| 147 |
+
elif "grok" in model_lower:
|
| 148 |
+
return await validate_grok_key(api_key, model)
|
| 149 |
+
else:
|
| 150 |
+
return ValidationResult(
|
| 151 |
+
is_valid=False,
|
| 152 |
+
error=f"Unsupported model: {model}"
|
| 153 |
+
)
|
| 154 |
+
except Exception as e:
|
| 155 |
+
return ValidationResult(
|
| 156 |
+
is_valid=False,
|
| 157 |
+
error=f"Validation error: {str(e)}"
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
# ------------------------
|
| 162 |
+
# Optional test runner
|
| 163 |
+
# ------------------------
|
| 164 |
+
if __name__ == "__main__":
|
| 165 |
+
import os
|
| 166 |
+
from dotenv import load_dotenv
|
| 167 |
+
load_dotenv()
|
| 168 |
+
|
| 169 |
+
async def main():
|
| 170 |
+
tests = [
|
| 171 |
+
("gpt-4o", "OPENAI_API_KEY"),
|
| 172 |
+
("gemini-2.0-flash-exp", "GEMINI_API_KEY"),
|
| 173 |
+
("grok-beta", "GROK_API_KEY")
|
| 174 |
+
]
|
| 175 |
+
|
| 176 |
+
for model, key_env in tests:
|
| 177 |
+
key = os.getenv(key_env, "")
|
| 178 |
+
if not key:
|
| 179 |
+
print(f"⚠️ {model.upper()}: No API key found in env ({key_env})\n")
|
| 180 |
+
continue
|
| 181 |
+
|
| 182 |
+
print(f"Testing {model.upper()} key...")
|
| 183 |
+
result = await run_validation_agent(model, key)
|
| 184 |
+
|
| 185 |
+
if result.is_valid:
|
| 186 |
+
print(f"✅ Valid\n")
|
| 187 |
+
else:
|
| 188 |
+
print(f"❌ Invalid | {result.error}\n")
|
| 189 |
+
|
| 190 |
+
asyncio.run(main())
|
web_search_agent.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from agents import Agent
|
| 2 |
+
from confg import model
|
| 3 |
+
from web_search_tool import web_search
|
| 4 |
+
|
| 5 |
+
web_search_agent = Agent(
|
| 6 |
+
name = "Web Search Assistant",
|
| 7 |
+
instructions = (
|
| 8 |
+
"You are a web search assistant."
|
| 9 |
+
"For ANY query about current events, news, movies, weather, finance, or recent information,"
|
| 10 |
+
"you MUST call the `web_search` tool."
|
| 11 |
+
"Do not rely only on your internal knowledge."
|
| 12 |
+
"After searching, summarize the top results clearly and concisely in this format:\n\n"
|
| 13 |
+
"1. [Title] - short snippet (URL)\n"
|
| 14 |
+
"2. [Title] - short snippet (URL)\n"
|
| 15 |
+
"3. [Title] - short snippet (URL)\n\n"
|
| 16 |
+
"If no relevant results are found, politely say so."
|
| 17 |
+
),
|
| 18 |
+
model = model,
|
| 19 |
+
tools = [web_search]
|
| 20 |
+
)
|
web_search_tool.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from agents import function_tool
|
| 2 |
+
from confg import tavily_client
|
| 3 |
+
|
| 4 |
+
@function_tool
|
| 5 |
+
def web_search(query: str):
|
| 6 |
+
"""
|
| 7 |
+
Perform a deep web search using Tavily and return structured results.
|
| 8 |
+
"""
|
| 9 |
+
response =tavily_client.search(query)
|
| 10 |
+
return f"Response {response}"
|