Spaces:
Sleeping
Sleeping
Upload 22 files
Browse files- README.md +187 -13
- app.py +465 -0
- chat_handler.py +425 -0
- config.py +113 -0
- decision_agent.py +399 -0
- requirements.txt +35 -0
- tools/__init__.py +18 -0
- tools/__pycache__/__init__.cpython-311.pyc +0 -0
- tools/__pycache__/comfyui_builder.cpython-311.pyc +0 -0
- tools/__pycache__/comfyui_templates.cpython-311.pyc +0 -0
- tools/__pycache__/docker_helper.cpython-311.pyc +0 -0
- tools/__pycache__/github_search.cpython-311.pyc +0 -0
- tools/__pycache__/n8n_builder.cpython-311.pyc +0 -0
- tools/__pycache__/web_search.cpython-311.pyc +0 -0
- tools/__pycache__/workflow_templates.cpython-311.pyc +0 -0
- tools/comfyui_builder.py +546 -0
- tools/comfyui_templates.py +537 -0
- tools/docker_helper.py +449 -0
- tools/github_search.py +241 -0
- tools/n8n_builder.py +620 -0
- tools/web_search.py +169 -0
- tools/workflow_templates.py +585 -0
README.md
CHANGED
|
@@ -1,13 +1,187 @@
|
|
| 1 |
-
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
-
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AI Workflow Agent - Milestone 1 (M1 Only)
|
| 2 |
+
|
| 3 |
+
## What This Package Contains
|
| 4 |
+
|
| 5 |
+
This is a **standalone M1 implementation** - Core AI Decision Making with CrewAI and GitHub Search.
|
| 6 |
+
|
| 7 |
+
### Features:
|
| 8 |
+
- AI Decision Engine (picks n8n vs ComfyUI)
|
| 9 |
+
- Multi-turn Chat with Session Management
|
| 10 |
+
- Web Search (DuckDuckGo - no API key)
|
| 11 |
+
- GitHub Project Search
|
| 12 |
+
- 16 Workflow Templates (8 n8n + 8 ComfyUI)
|
| 13 |
+
- Docker Container Management
|
| 14 |
+
|
| 15 |
+
---
|
| 16 |
+
|
| 17 |
+
## Prerequisites
|
| 18 |
+
|
| 19 |
+
### ✅ Required:
|
| 20 |
+
- **Python 3.10+** with virtual environment (already included)
|
| 21 |
+
- **Nothing else!** M1 is fully standalone
|
| 22 |
+
|
| 23 |
+
### ⚠️ Optional (for enhanced features):
|
| 24 |
+
- **Ollama** (for AI-powered LLM responses)
|
| 25 |
+
- Without it: Uses smart keyword matching (works perfectly)
|
| 26 |
+
- With it: Uses Qwen2.5 LLM for deeper analysis
|
| 27 |
+
- Start: `docker-compose up -d ollama` (from parent directory)
|
| 28 |
+
- Pull model: `docker exec -it ollama ollama pull qwen2.5:3b`
|
| 29 |
+
|
| 30 |
+
- **Docker daemon** (for Docker management endpoints only)
|
| 31 |
+
- Only needed if you want to test `/docker/*` endpoints
|
| 32 |
+
- Not required for core M1 features
|
| 33 |
+
|
| 34 |
+
### 🚫 NOT Required:
|
| 35 |
+
- ❌ n8n (M1 only generates workflows, doesn't deploy them)
|
| 36 |
+
- ❌ ComfyUI (M1 only generates workflows, doesn't execute them)
|
| 37 |
+
- ❌ Database (M1 uses in-memory storage)
|
| 38 |
+
- ❌ API keys (DuckDuckGo and GitHub work without auth)
|
| 39 |
+
|
| 40 |
+
---
|
| 41 |
+
|
| 42 |
+
## Quick Start
|
| 43 |
+
|
| 44 |
+
### 1. Use Existing Virtual Environment
|
| 45 |
+
|
| 46 |
+
```powershell
|
| 47 |
+
# The existing venv from parent directory will be used automatically
|
| 48 |
+
# No need to create a new one!
|
| 49 |
+
```
|
| 50 |
+
|
| 51 |
+
### 2. Start M1 API Server
|
| 52 |
+
|
| 53 |
+
```powershell
|
| 54 |
+
cd M1_only
|
| 55 |
+
.\start_m1.ps1
|
| 56 |
+
```
|
| 57 |
+
|
| 58 |
+
Server runs on: **http://localhost:8001** (different from full app on 8000)
|
| 59 |
+
|
| 60 |
+
### 3. Access UI Dashboard 🎨
|
| 61 |
+
|
| 62 |
+
The UI dashboard will automatically open in your browser!
|
| 63 |
+
|
| 64 |
+
**Manual access:**
|
| 65 |
+
- **UI Dashboard (English):** Open `ui.html` in your browser
|
| 66 |
+
- **UI Dashboard (Spanish):** Open `ui_es.html` in your browser
|
| 67 |
+
- **API Docs:** http://localhost:8001/docs (Swagger documentation)
|
| 68 |
+
|
| 69 |
+
The UI Dashboard provides:
|
| 70 |
+
- ✨ Beautiful interface for testing all M1 features
|
| 71 |
+
- 🌐 Available in **English** and **Spanish**
|
| 72 |
+
- 🎯 Pre-filled example queries for quick demos
|
| 73 |
+
- 📊 Real-time response display with JSON formatting
|
| 74 |
+
- 🚀 Perfect for client demonstrations
|
| 75 |
+
|
| 76 |
+
---
|
| 77 |
+
|
| 78 |
+
## Run Tests
|
| 79 |
+
|
| 80 |
+
```powershell
|
| 81 |
+
# Run all M1 tests
|
| 82 |
+
python test_m1_integration.py
|
| 83 |
+
|
| 84 |
+
# Run unit tests
|
| 85 |
+
python test_m1.py
|
| 86 |
+
|
| 87 |
+
# Run Phase 0 tests
|
| 88 |
+
python test_agent.py
|
| 89 |
+
```
|
| 90 |
+
|
| 91 |
+
---
|
| 92 |
+
|
| 93 |
+
## M1 Endpoints
|
| 94 |
+
|
| 95 |
+
| Method | Endpoint | Description |
|
| 96 |
+
|--------|----------|-------------|
|
| 97 |
+
| GET | /health | Health check |
|
| 98 |
+
| POST | /analyze | AI decision making |
|
| 99 |
+
| POST | /build | Generate workflow |
|
| 100 |
+
| POST | /chat | Multi-turn chat |
|
| 101 |
+
| POST | /github/search | Search GitHub |
|
| 102 |
+
| GET | /search/web | Web search |
|
| 103 |
+
| GET | /search/projects | GitHub projects |
|
| 104 |
+
| GET | /search/alternatives | Find alternatives |
|
| 105 |
+
| GET | /docker/containers | List containers |
|
| 106 |
+
| GET | /docker/logs/{id} | Container logs |
|
| 107 |
+
| DELETE | /docker/stop/{id} | Stop container |
|
| 108 |
+
|
| 109 |
+
---
|
| 110 |
+
|
| 111 |
+
## File Structure
|
| 112 |
+
|
| 113 |
+
```
|
| 114 |
+
M1_only/
|
| 115 |
+
├── main_m1.py # M1 API server (port 8001)
|
| 116 |
+
├── config.py # Configuration
|
| 117 |
+
├── decision_agent.py # AI decision logic
|
| 118 |
+
├── chat_handler.py # Chat sessions
|
| 119 |
+
├── crew_agents.py # CrewAI agents
|
| 120 |
+
├── ui.html # Web UI Dashboard (English)
|
| 121 |
+
├── ui_es.html # Web UI Dashboard (Spanish)
|
| 122 |
+
├── tools/
|
| 123 |
+
│ ├── web_search.py # DuckDuckGo search
|
| 124 |
+
│ ├── github_search.py # GitHub API
|
| 125 |
+
│ ├── n8n_builder.py # n8n workflows
|
| 126 |
+
│ ├── comfyui_builder.py # ComfyUI workflows
|
| 127 |
+
│ ├── workflow_templates.py # 8 n8n templates
|
| 128 |
+
│ ├── comfyui_templates.py # 8 image templates
|
| 129 |
+
│ └── docker_helper.py # Docker management
|
| 130 |
+
├── test_agent.py # Phase 0 tests (6)
|
| 131 |
+
├── test_m1.py # M1 unit tests (7)
|
| 132 |
+
├── test_m1_integration.py # M1 integration tests (8)
|
| 133 |
+
├── requirements.txt # M1 dependencies only
|
| 134 |
+
├── start_m1.ps1 # Startup script (uses existing venv)
|
| 135 |
+
└── README.md # This file
|
| 136 |
+
```
|
| 137 |
+
|
| 138 |
+
---
|
| 139 |
+
|
| 140 |
+
## Testing Examples
|
| 141 |
+
|
| 142 |
+
### Test AI Decision
|
| 143 |
+
|
| 144 |
+
```bash
|
| 145 |
+
curl -X POST http://localhost:8001/analyze \
|
| 146 |
+
-H "Content-Type: application/json" \
|
| 147 |
+
-d '{"query":"send automated emails daily"}'
|
| 148 |
+
```
|
| 149 |
+
|
| 150 |
+
### Test Chat
|
| 151 |
+
|
| 152 |
+
```bash
|
| 153 |
+
curl -X POST http://localhost:8001/chat \
|
| 154 |
+
-H "Content-Type: application/json" \
|
| 155 |
+
-d '{"query":"create a workflow for processing customer data"}'
|
| 156 |
+
```
|
| 157 |
+
|
| 158 |
+
### Test Web Search
|
| 159 |
+
|
| 160 |
+
```bash
|
| 161 |
+
curl "http://localhost:8001/search/web?query=python+automation&max_results=3"
|
| 162 |
+
```
|
| 163 |
+
|
| 164 |
+
---
|
| 165 |
+
|
| 166 |
+
## Test Results
|
| 167 |
+
|
| 168 |
+
- **Phase 0 Tests**: 6/6 PASS
|
| 169 |
+
- **M1 Unit Tests**: 7/7 PASS
|
| 170 |
+
- **M1 Integration Tests**: 8/8 PASS
|
| 171 |
+
- **Total**: 21/21 PASS
|
| 172 |
+
|
| 173 |
+
---
|
| 174 |
+
|
| 175 |
+
## Key Points
|
| 176 |
+
|
| 177 |
+
- Runs on port **8001** (not 8000) to avoid conflicts
|
| 178 |
+
- Uses existing venv from parent directory
|
| 179 |
+
- Fully isolated from M2 and M3 code
|
| 180 |
+
- All M1 features included and tested
|
| 181 |
+
- Ready for standalone delivery
|
| 182 |
+
|
| 183 |
+
---
|
| 184 |
+
|
| 185 |
+
## Support
|
| 186 |
+
|
| 187 |
+
See M1_DELIVERY.md for complete documentation.
|
app.py
ADDED
|
@@ -0,0 +1,465 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Gradio Interface for M1 - Hugging Face Spaces Deployment
|
| 3 |
+
Replicates the functionality of ui_es.html in a Gradio interface
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import gradio as gr
|
| 7 |
+
import asyncio
|
| 8 |
+
import json
|
| 9 |
+
from decision_agent import DecisionAgent
|
| 10 |
+
from tools.n8n_builder import N8NWorkflowBuilder
|
| 11 |
+
from tools.comfyui_builder import ComfyUIWorkflowBuilder
|
| 12 |
+
from tools.github_search import GitHubSearchTool
|
| 13 |
+
from tools.web_search import WebSearchTool
|
| 14 |
+
from chat_handler import SessionManager
|
| 15 |
+
|
| 16 |
+
# Initialize components
|
| 17 |
+
decision_agent = DecisionAgent()
|
| 18 |
+
n8n_builder = N8NWorkflowBuilder()
|
| 19 |
+
comfyui_builder = ComfyUIWorkflowBuilder()
|
| 20 |
+
github_tool = GitHubSearchTool()
|
| 21 |
+
web_tool = WebSearchTool()
|
| 22 |
+
session_manager = SessionManager()
|
| 23 |
+
|
| 24 |
+
# Helper to run async functions
|
| 25 |
+
def run_async(coro):
|
| 26 |
+
"""Run async function in sync context"""
|
| 27 |
+
loop = asyncio.new_event_loop()
|
| 28 |
+
asyncio.set_event_loop(loop)
|
| 29 |
+
try:
|
| 30 |
+
return loop.run_until_complete(coro)
|
| 31 |
+
finally:
|
| 32 |
+
loop.close()
|
| 33 |
+
|
| 34 |
+
# ========================================
|
| 35 |
+
# TAB 1: AI Analysis
|
| 36 |
+
# ========================================
|
| 37 |
+
def analyze_query(query, use_llm):
|
| 38 |
+
"""Analyze user query with AI"""
|
| 39 |
+
if not query:
|
| 40 |
+
return "❌ Por favor ingrese una consulta"
|
| 41 |
+
|
| 42 |
+
try:
|
| 43 |
+
result = run_async(decision_agent.analyze(query, context={'use_llm': use_llm}))
|
| 44 |
+
|
| 45 |
+
# Format response nicely
|
| 46 |
+
output = f"""
|
| 47 |
+
## 🧠 Resultado del Análisis IA
|
| 48 |
+
|
| 49 |
+
**Tipo de Proyecto:** {result['project_type'].upper()}
|
| 50 |
+
**Confianza:** {result['confidence']*100:.0f}%
|
| 51 |
+
**Método:** {result.get('classification_method', 'N/A')}
|
| 52 |
+
|
| 53 |
+
### 📝 Explicación:
|
| 54 |
+
{result['explanation']}
|
| 55 |
+
|
| 56 |
+
### 🔧 Herramientas Sugeridas:
|
| 57 |
+
{chr(10).join('- ' + tool for tool in result.get('suggested_tools', []))}
|
| 58 |
+
|
| 59 |
+
### 📋 Próximos Pasos:
|
| 60 |
+
{chr(10).join(f"{i+1}. {step}" for i, step in enumerate(result.get('next_steps', [])))}
|
| 61 |
+
"""
|
| 62 |
+
return output
|
| 63 |
+
except Exception as e:
|
| 64 |
+
return f"❌ Error: {str(e)}"
|
| 65 |
+
|
| 66 |
+
# ========================================
|
| 67 |
+
# TAB 2: Build Workflow
|
| 68 |
+
# ========================================
|
| 69 |
+
def build_workflow(description, tool, use_llm):
|
| 70 |
+
"""Generate workflow from description"""
|
| 71 |
+
if not description:
|
| 72 |
+
return "❌ Por favor ingrese una descripción", ""
|
| 73 |
+
|
| 74 |
+
try:
|
| 75 |
+
if tool == "n8n":
|
| 76 |
+
workflow = run_async(n8n_builder.generate_workflow(description, context={'use_llm': use_llm}))
|
| 77 |
+
else:
|
| 78 |
+
workflow = run_async(comfyui_builder.generate_workflow(description, context={'use_llm': use_llm}))
|
| 79 |
+
|
| 80 |
+
# Format response
|
| 81 |
+
enhanced = workflow.get('meta', {}).get('generated_with_llm', False)
|
| 82 |
+
status = "🧠 Generado con IA" if enhanced else "⚡ Generado con plantillas"
|
| 83 |
+
|
| 84 |
+
output = f"""
|
| 85 |
+
## ✅ ¡Flujo de Trabajo Generado!
|
| 86 |
+
|
| 87 |
+
**Tipo:** {tool.upper()}
|
| 88 |
+
**Método:** {status}
|
| 89 |
+
**Nodos:** {len(workflow.get('nodes', []))}
|
| 90 |
+
**Nombre:** {workflow.get('name', 'N/A')}
|
| 91 |
+
"""
|
| 92 |
+
|
| 93 |
+
if enhanced and 'llm_analysis' in workflow.get('meta', {}):
|
| 94 |
+
analysis = workflow['meta']['llm_analysis']
|
| 95 |
+
output += f"""
|
| 96 |
+
### 📊 Análisis IA:
|
| 97 |
+
- **Explicación:** {analysis.get('explanation', 'N/A')[:200]}...
|
| 98 |
+
- **Confianza:** {analysis.get('confidence', 0)*100:.0f}%
|
| 99 |
+
"""
|
| 100 |
+
|
| 101 |
+
# Return formatted output + JSON
|
| 102 |
+
workflow_json = json.dumps(workflow, indent=2, ensure_ascii=False)
|
| 103 |
+
return output, workflow_json
|
| 104 |
+
|
| 105 |
+
except Exception as e:
|
| 106 |
+
return f"❌ Error: {str(e)}", ""
|
| 107 |
+
|
| 108 |
+
# ========================================
|
| 109 |
+
# TAB 3: GitHub Search
|
| 110 |
+
# ========================================
|
| 111 |
+
def search_github(keywords, max_results):
|
| 112 |
+
"""Search GitHub repositories"""
|
| 113 |
+
if not keywords:
|
| 114 |
+
return "❌ Por favor ingrese palabras clave"
|
| 115 |
+
|
| 116 |
+
try:
|
| 117 |
+
results = run_async(github_tool.search(keywords, max_results=int(max_results)))
|
| 118 |
+
|
| 119 |
+
if not results:
|
| 120 |
+
return "📭 No se encontraron repositorios"
|
| 121 |
+
|
| 122 |
+
output = f"## 🔍 Resultados de GitHub ({len(results)} encontrados)\n\n"
|
| 123 |
+
|
| 124 |
+
for i, repo in enumerate(results, 1):
|
| 125 |
+
output += f"""
|
| 126 |
+
### {i}. {repo.get('full_name', 'N/A')}
|
| 127 |
+
- ⭐ **Estrellas:** {repo.get('stars', 0):,}
|
| 128 |
+
- 📝 **Descripción:** {repo.get('description', 'Sin descripción')[:150]}...
|
| 129 |
+
- 🔗 **URL:** {repo.get('url', 'N/A')}
|
| 130 |
+
- 💻 **Lenguaje:** {repo.get('language', 'N/A')}
|
| 131 |
+
{'- 🐳 **Docker:** Sí' if repo.get('has_docker') else ''}
|
| 132 |
+
|
| 133 |
+
---
|
| 134 |
+
"""
|
| 135 |
+
return output
|
| 136 |
+
except Exception as e:
|
| 137 |
+
return f"❌ Error: {str(e)}"
|
| 138 |
+
|
| 139 |
+
# ========================================
|
| 140 |
+
# TAB 4: Web Search
|
| 141 |
+
# ========================================
|
| 142 |
+
def search_web(query, max_results):
|
| 143 |
+
"""Search the web with DuckDuckGo"""
|
| 144 |
+
if not query:
|
| 145 |
+
return "❌ Por favor ingrese una consulta"
|
| 146 |
+
|
| 147 |
+
try:
|
| 148 |
+
results = run_async(web_tool.search(query, max_results=int(max_results)))
|
| 149 |
+
|
| 150 |
+
if not results:
|
| 151 |
+
return "📭 No se encontraron resultados"
|
| 152 |
+
|
| 153 |
+
output = f"## 🌐 Resultados de Búsqueda ({len(results)} encontrados)\n\n"
|
| 154 |
+
|
| 155 |
+
for i, result in enumerate(results, 1):
|
| 156 |
+
output += f"""
|
| 157 |
+
### {i}. {result.get('title', 'Sin título')}
|
| 158 |
+
{result.get('snippet', 'Sin descripción')}
|
| 159 |
+
|
| 160 |
+
🔗 {result.get('url', 'N/A')}
|
| 161 |
+
|
| 162 |
+
---
|
| 163 |
+
"""
|
| 164 |
+
return output
|
| 165 |
+
except Exception as e:
|
| 166 |
+
return f"❌ Error: {str(e)}"
|
| 167 |
+
|
| 168 |
+
# ========================================
|
| 169 |
+
# TAB 5: Chat
|
| 170 |
+
# ========================================
|
| 171 |
+
def chat_response(message, history, use_llm):
|
| 172 |
+
"""Chat with AI assistant"""
|
| 173 |
+
if not message:
|
| 174 |
+
return history, ""
|
| 175 |
+
|
| 176 |
+
try:
|
| 177 |
+
# Create or get session
|
| 178 |
+
if not hasattr(chat_response, 'session'):
|
| 179 |
+
chat_response.session = session_manager.create_session()
|
| 180 |
+
|
| 181 |
+
# Add user message
|
| 182 |
+
chat_response.session.add_message("user", message)
|
| 183 |
+
|
| 184 |
+
# Get AI analysis
|
| 185 |
+
result = run_async(decision_agent.analyze(message, context={'use_llm': use_llm}))
|
| 186 |
+
|
| 187 |
+
response = f"{result['explanation']}\n\n**Herramientas sugeridas:** {', '.join(result.get('suggested_tools', []))}"
|
| 188 |
+
|
| 189 |
+
# Add assistant message
|
| 190 |
+
chat_response.session.add_message("assistant", response)
|
| 191 |
+
|
| 192 |
+
# Update history
|
| 193 |
+
history.append((message, response))
|
| 194 |
+
|
| 195 |
+
return history, ""
|
| 196 |
+
except Exception as e:
|
| 197 |
+
history.append((message, f"❌ Error: {str(e)}"))
|
| 198 |
+
return history, ""
|
| 199 |
+
|
| 200 |
+
# ========================================
|
| 201 |
+
# Create Gradio Interface
|
| 202 |
+
# ========================================
|
| 203 |
+
|
| 204 |
+
# Custom CSS for better styling
|
| 205 |
+
css = """
|
| 206 |
+
.gradio-container {
|
| 207 |
+
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
| 208 |
+
}
|
| 209 |
+
"""
|
| 210 |
+
|
| 211 |
+
with gr.Blocks(css=css, title="M1 - Agente de Flujo de Trabajo IA", theme=gr.themes.Soft()) as demo:
|
| 212 |
+
|
| 213 |
+
gr.Markdown("""
|
| 214 |
+
# 🤖 Agente de Flujo de Trabajo IA - Milestone 1
|
| 215 |
+
### Toma de Decisiones IA Básica
|
| 216 |
+
|
| 217 |
+
Sistema de análisis y generación de flujos de trabajo potenciado por IA.
|
| 218 |
+
""")
|
| 219 |
+
|
| 220 |
+
# Global AI Mode Toggle
|
| 221 |
+
with gr.Row():
|
| 222 |
+
use_llm_global = gr.Checkbox(
|
| 223 |
+
label="🧠 Modo IA Inteligente (Gemini)",
|
| 224 |
+
value=False,
|
| 225 |
+
info="Activar para usar análisis con Gemini AI. Desactivar para modo rápido con palabras clave."
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
# Create tabs
|
| 229 |
+
with gr.Tabs():
|
| 230 |
+
|
| 231 |
+
# ========================================
|
| 232 |
+
# HOME TAB
|
| 233 |
+
# ========================================
|
| 234 |
+
with gr.TabItem("🏠 Inicio"):
|
| 235 |
+
gr.Markdown("""
|
| 236 |
+
## Bienvenido al Panel M1
|
| 237 |
+
|
| 238 |
+
Este sistema proporciona:
|
| 239 |
+
|
| 240 |
+
### ✨ Características Principales:
|
| 241 |
+
- 🧠 **Análisis IA**: Analiza tus necesidades y recomienda la mejor herramienta
|
| 242 |
+
- ⚙️ **Generación de Flujos**: Crea flujos de trabajo de n8n y ComfyUI
|
| 243 |
+
- 🔍 **Búsqueda Inteligente**: Busca en GitHub y la web
|
| 244 |
+
- 💬 **Chat IA**: Asistente conversacional para automatización
|
| 245 |
+
|
| 246 |
+
### 📊 Estadísticas:
|
| 247 |
+
- **21 endpoints API** activos
|
| 248 |
+
- **16 plantillas** de flujos de trabajo (8 n8n + 8 ComfyUI)
|
| 249 |
+
- **2 modos**: Rápido (palabras clave) y Inteligente (Gemini AI)
|
| 250 |
+
|
| 251 |
+
### 🚀 Comenzar:
|
| 252 |
+
1. Activa el **Modo IA** arriba para análisis inteligente
|
| 253 |
+
2. Navega por las pestañas para explorar características
|
| 254 |
+
3. Prueba las consultas de ejemplo en cada pestaña
|
| 255 |
+
""")
|
| 256 |
+
|
| 257 |
+
# ========================================
|
| 258 |
+
# AI ANALYSIS TAB
|
| 259 |
+
# ========================================
|
| 260 |
+
with gr.TabItem("🧠 Análisis IA"):
|
| 261 |
+
gr.Markdown("### Permite que la IA analice tus necesidades de automatización")
|
| 262 |
+
|
| 263 |
+
with gr.Row():
|
| 264 |
+
with gr.Column():
|
| 265 |
+
analysis_query = gr.Textbox(
|
| 266 |
+
label="Tu Consulta",
|
| 267 |
+
placeholder="Describe lo que deseas automatizar...",
|
| 268 |
+
lines=3
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
gr.Examples(
|
| 272 |
+
examples=[
|
| 273 |
+
"Necesito enviar correos electrónicos automatizados a 1000 clientes cada mañana",
|
| 274 |
+
"Quiero generar arte IA a partir de descripciones de texto y publicar en Instagram",
|
| 275 |
+
"Procesar datos de clientes de Shopify y sincronizar con CRM",
|
| 276 |
+
"Crear un chatbot que responda preguntas de mi documentación"
|
| 277 |
+
],
|
| 278 |
+
inputs=analysis_query,
|
| 279 |
+
label="💡 Consultas de Ejemplo"
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
analyze_btn = gr.Button("Analizar con IA ✨", variant="primary")
|
| 283 |
+
|
| 284 |
+
with gr.Column():
|
| 285 |
+
analysis_output = gr.Markdown(label="Resultado")
|
| 286 |
+
|
| 287 |
+
analyze_btn.click(
|
| 288 |
+
fn=analyze_query,
|
| 289 |
+
inputs=[analysis_query, use_llm_global],
|
| 290 |
+
outputs=analysis_output
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
# ========================================
|
| 294 |
+
# BUILD WORKFLOW TAB
|
| 295 |
+
# ========================================
|
| 296 |
+
with gr.TabItem("⚙️ Construir Flujo"):
|
| 297 |
+
gr.Markdown("### Genera flujos de trabajo completos a partir de descripciones")
|
| 298 |
+
|
| 299 |
+
with gr.Row():
|
| 300 |
+
with gr.Column():
|
| 301 |
+
build_description = gr.Textbox(
|
| 302 |
+
label="Descripción del Flujo de Trabajo",
|
| 303 |
+
placeholder="Describe el flujo de trabajo que deseas crear...",
|
| 304 |
+
lines=4
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
build_tool = gr.Radio(
|
| 308 |
+
choices=["n8n", "comfyui"],
|
| 309 |
+
value="n8n",
|
| 310 |
+
label="Herramienta",
|
| 311 |
+
info="n8n para automatización, ComfyUI para generación de imágenes IA"
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
gr.Examples(
|
| 315 |
+
examples=[
|
| 316 |
+
"Crear un flujo que reciba datos de clientes desde webhook y envíe correo de confirmación",
|
| 317 |
+
"Construir un flujo que monitoree Twitter para menciones y envíe notificaciones de Slack",
|
| 318 |
+
"Generar fotos profesionales a partir de fotos subidas"
|
| 319 |
+
],
|
| 320 |
+
inputs=build_description,
|
| 321 |
+
label="💡 Flujos de Ejemplo"
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
build_btn = gr.Button("Generar Flujo de Trabajo 🚀", variant="primary")
|
| 325 |
+
|
| 326 |
+
with gr.Column():
|
| 327 |
+
build_output = gr.Markdown(label="Resultado")
|
| 328 |
+
build_json = gr.Code(label="JSON del Flujo de Trabajo", language="json")
|
| 329 |
+
|
| 330 |
+
build_btn.click(
|
| 331 |
+
fn=build_workflow,
|
| 332 |
+
inputs=[build_description, build_tool, use_llm_global],
|
| 333 |
+
outputs=[build_output, build_json]
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
# ========================================
|
| 337 |
+
# GITHUB SEARCH TAB
|
| 338 |
+
# ========================================
|
| 339 |
+
with gr.TabItem("🔍 Búsqueda GitHub"):
|
| 340 |
+
gr.Markdown("### Encuentra proyectos de código abierto relevantes")
|
| 341 |
+
|
| 342 |
+
with gr.Row():
|
| 343 |
+
with gr.Column():
|
| 344 |
+
github_keywords = gr.Textbox(
|
| 345 |
+
label="Palabras Clave de Búsqueda",
|
| 346 |
+
placeholder="ej., automatización de flujos de trabajo"
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
github_max = gr.Slider(
|
| 350 |
+
minimum=1,
|
| 351 |
+
maximum=10,
|
| 352 |
+
value=5,
|
| 353 |
+
step=1,
|
| 354 |
+
label="Resultados Máximos"
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
gr.Examples(
|
| 358 |
+
examples=[
|
| 359 |
+
"automatización de flujos de trabajo",
|
| 360 |
+
"implementación de aprendizaje automático",
|
| 361 |
+
"generación de imágenes IA"
|
| 362 |
+
],
|
| 363 |
+
inputs=github_keywords,
|
| 364 |
+
label="💡 Ejemplos de Búsqueda"
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
github_btn = gr.Button("Buscar en GitHub 🔍", variant="primary")
|
| 368 |
+
|
| 369 |
+
with gr.Column():
|
| 370 |
+
github_output = gr.Markdown(label="Resultados")
|
| 371 |
+
|
| 372 |
+
github_btn.click(
|
| 373 |
+
fn=search_github,
|
| 374 |
+
inputs=[github_keywords, github_max],
|
| 375 |
+
outputs=github_output
|
| 376 |
+
)
|
| 377 |
+
|
| 378 |
+
# ========================================
|
| 379 |
+
# WEB SEARCH TAB
|
| 380 |
+
# ========================================
|
| 381 |
+
with gr.TabItem("🌐 Búsqueda Web"):
|
| 382 |
+
gr.Markdown("### Busca en la web usando DuckDuckGo")
|
| 383 |
+
|
| 384 |
+
with gr.Row():
|
| 385 |
+
with gr.Column():
|
| 386 |
+
web_query = gr.Textbox(
|
| 387 |
+
label="Consulta de Búsqueda",
|
| 388 |
+
placeholder="ej., ejemplos de automatización n8n"
|
| 389 |
+
)
|
| 390 |
+
|
| 391 |
+
web_max = gr.Slider(
|
| 392 |
+
minimum=1,
|
| 393 |
+
maximum=10,
|
| 394 |
+
value=5,
|
| 395 |
+
step=1,
|
| 396 |
+
label="Resultados Máximos"
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
web_btn = gr.Button("Buscar en Web 🔍", variant="primary")
|
| 400 |
+
|
| 401 |
+
with gr.Column():
|
| 402 |
+
web_output = gr.Markdown(label="Resultados")
|
| 403 |
+
|
| 404 |
+
web_btn.click(
|
| 405 |
+
fn=search_web,
|
| 406 |
+
inputs=[web_query, web_max],
|
| 407 |
+
outputs=web_output
|
| 408 |
+
)
|
| 409 |
+
|
| 410 |
+
# ========================================
|
| 411 |
+
# CHAT TAB
|
| 412 |
+
# ========================================
|
| 413 |
+
with gr.TabItem("💬 Chat"):
|
| 414 |
+
gr.Markdown("### Conversa con la IA sobre tus necesidades de automatización")
|
| 415 |
+
|
| 416 |
+
chatbot = gr.Chatbot(label="Conversación", height=400)
|
| 417 |
+
|
| 418 |
+
with gr.Row():
|
| 419 |
+
chat_input = gr.Textbox(
|
| 420 |
+
label="Tu Mensaje",
|
| 421 |
+
placeholder="Pregunta al asistente IA cualquier cosa...",
|
| 422 |
+
scale=4
|
| 423 |
+
)
|
| 424 |
+
chat_btn = gr.Button("Enviar 💬", scale=1, variant="primary")
|
| 425 |
+
|
| 426 |
+
gr.Examples(
|
| 427 |
+
examples=[
|
| 428 |
+
"Quiero crear un bot que responda preguntas de clientes desde mi sitio web",
|
| 429 |
+
"Ayúdame a automatizar mis publicaciones en redes sociales",
|
| 430 |
+
"Necesito procesar archivos CSV y generar reportes"
|
| 431 |
+
],
|
| 432 |
+
inputs=chat_input,
|
| 433 |
+
label="💡 Iniciadores de Conversación"
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
chat_btn.click(
|
| 437 |
+
fn=chat_response,
|
| 438 |
+
inputs=[chat_input, chatbot, use_llm_global],
|
| 439 |
+
outputs=[chatbot, chat_input]
|
| 440 |
+
)
|
| 441 |
+
|
| 442 |
+
chat_input.submit(
|
| 443 |
+
fn=chat_response,
|
| 444 |
+
inputs=[chat_input, chatbot, use_llm_global],
|
| 445 |
+
outputs=[chatbot, chat_input]
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
# Footer
|
| 449 |
+
gr.Markdown("""
|
| 450 |
+
---
|
| 451 |
+
### 📝 Notas:
|
| 452 |
+
- **Modo Rápido (palabras clave)**: <100ms, usa plantillas predefinidas
|
| 453 |
+
- **Modo Inteligente (Gemini)**: 2-3s, análisis profundo con IA
|
| 454 |
+
- **Requisitos**: Clave API de Gemini para modo inteligente (gratis en [Google AI Studio](https://aistudio.google.com/app/apikeys))
|
| 455 |
+
|
| 456 |
+
🚀 **Milestone 1** - Sistema de toma de decisiones IA básico completado
|
| 457 |
+
""")
|
| 458 |
+
|
| 459 |
+
# Launch the app
|
| 460 |
+
if __name__ == "__main__":
|
| 461 |
+
demo.launch(
|
| 462 |
+
server_name="0.0.0.0",
|
| 463 |
+
server_port=7860, # HF Spaces default port
|
| 464 |
+
share=False
|
| 465 |
+
)
|
chat_handler.py
ADDED
|
@@ -0,0 +1,425 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AI Workflow Agent - Chat API
|
| 2 |
+
"""
|
| 3 |
+
Conversational interface for the AI Workflow Agent.
|
| 4 |
+
Supports multi-turn conversations with session management.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import uuid
|
| 8 |
+
import json
|
| 9 |
+
import logging
|
| 10 |
+
from datetime import datetime
|
| 11 |
+
from typing import Dict, Any, Optional, List
|
| 12 |
+
from dataclasses import dataclass, field, asdict
|
| 13 |
+
from enum import Enum
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class MessageRole(Enum):
|
| 19 |
+
"""Message roles in conversation."""
|
| 20 |
+
USER = "user"
|
| 21 |
+
ASSISTANT = "assistant"
|
| 22 |
+
SYSTEM = "system"
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class ConversationState(Enum):
|
| 26 |
+
"""Current state of conversation."""
|
| 27 |
+
INITIAL = "initial"
|
| 28 |
+
ANALYZING = "analyzing"
|
| 29 |
+
CLARIFYING = "clarifying"
|
| 30 |
+
PLANNING = "planning"
|
| 31 |
+
BUILDING = "building"
|
| 32 |
+
COMPLETE = "complete"
|
| 33 |
+
ERROR = "error"
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclass
|
| 37 |
+
class Message:
|
| 38 |
+
"""Single message in conversation."""
|
| 39 |
+
role: str
|
| 40 |
+
content: str
|
| 41 |
+
timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
|
| 42 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@dataclass
|
| 46 |
+
class Session:
|
| 47 |
+
"""Conversation session."""
|
| 48 |
+
session_id: str
|
| 49 |
+
created_at: str
|
| 50 |
+
state: str = ConversationState.INITIAL.value
|
| 51 |
+
messages: List[Dict[str, Any]] = field(default_factory=list)
|
| 52 |
+
context: Dict[str, Any] = field(default_factory=dict)
|
| 53 |
+
project_type: Optional[str] = None
|
| 54 |
+
workflow: Optional[Dict[str, Any]] = None
|
| 55 |
+
pending_questions: List[str] = field(default_factory=list)
|
| 56 |
+
|
| 57 |
+
def add_message(self, role: str, content: str, metadata: Dict = None):
|
| 58 |
+
"""Add a message to the conversation."""
|
| 59 |
+
self.messages.append({
|
| 60 |
+
"role": role,
|
| 61 |
+
"content": content,
|
| 62 |
+
"timestamp": datetime.now().isoformat(),
|
| 63 |
+
"metadata": metadata or {}
|
| 64 |
+
})
|
| 65 |
+
|
| 66 |
+
def get_history_text(self, limit: int = 10) -> str:
|
| 67 |
+
"""Get conversation history as text for LLM context."""
|
| 68 |
+
recent = self.messages[-limit:]
|
| 69 |
+
lines = []
|
| 70 |
+
for msg in recent:
|
| 71 |
+
role = msg["role"].upper()
|
| 72 |
+
content = msg["content"]
|
| 73 |
+
lines.append(f"{role}: {content}")
|
| 74 |
+
return "\n".join(lines)
|
| 75 |
+
|
| 76 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 77 |
+
"""Convert to dictionary."""
|
| 78 |
+
return asdict(self)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class SessionManager:
|
| 82 |
+
"""Manages conversation sessions."""
|
| 83 |
+
|
| 84 |
+
def __init__(self, max_sessions: int = 100):
|
| 85 |
+
self.sessions: Dict[str, Session] = {}
|
| 86 |
+
self.max_sessions = max_sessions
|
| 87 |
+
|
| 88 |
+
def create_session(self) -> Session:
|
| 89 |
+
"""Create a new conversation session."""
|
| 90 |
+
# Cleanup old sessions if limit reached
|
| 91 |
+
if len(self.sessions) >= self.max_sessions:
|
| 92 |
+
self._cleanup_old_sessions()
|
| 93 |
+
|
| 94 |
+
session_id = str(uuid.uuid4())[:8]
|
| 95 |
+
session = Session(
|
| 96 |
+
session_id=session_id,
|
| 97 |
+
created_at=datetime.now().isoformat()
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
# Add system message
|
| 101 |
+
session.add_message(
|
| 102 |
+
MessageRole.SYSTEM.value,
|
| 103 |
+
"AI Workflow Agent initialized. Ready to help build n8n, ComfyUI, "
|
| 104 |
+
"or hybrid workflows. Describe what you want to create."
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
self.sessions[session_id] = session
|
| 108 |
+
logger.info(f"Created session: {session_id}")
|
| 109 |
+
return session
|
| 110 |
+
|
| 111 |
+
def get_session(self, session_id: str) -> Optional[Session]:
|
| 112 |
+
"""Get existing session by ID."""
|
| 113 |
+
return self.sessions.get(session_id)
|
| 114 |
+
|
| 115 |
+
def get_or_create(self, session_id: Optional[str] = None) -> Session:
|
| 116 |
+
"""Get existing session or create new one."""
|
| 117 |
+
if session_id and session_id in self.sessions:
|
| 118 |
+
return self.sessions[session_id]
|
| 119 |
+
return self.create_session()
|
| 120 |
+
|
| 121 |
+
def update_state(self, session_id: str, state: ConversationState):
|
| 122 |
+
"""Update session state."""
|
| 123 |
+
if session_id in self.sessions:
|
| 124 |
+
self.sessions[session_id].state = state.value
|
| 125 |
+
|
| 126 |
+
def delete_session(self, session_id: str) -> bool:
|
| 127 |
+
"""Delete a session."""
|
| 128 |
+
if session_id in self.sessions:
|
| 129 |
+
del self.sessions[session_id]
|
| 130 |
+
logger.info(f"Deleted session: {session_id}")
|
| 131 |
+
return True
|
| 132 |
+
return False
|
| 133 |
+
|
| 134 |
+
def list_sessions(self) -> List[Dict[str, Any]]:
|
| 135 |
+
"""List all active sessions."""
|
| 136 |
+
return [
|
| 137 |
+
{
|
| 138 |
+
"session_id": s.session_id,
|
| 139 |
+
"created_at": s.created_at,
|
| 140 |
+
"state": s.state,
|
| 141 |
+
"message_count": len(s.messages),
|
| 142 |
+
"project_type": s.project_type
|
| 143 |
+
}
|
| 144 |
+
for s in self.sessions.values()
|
| 145 |
+
]
|
| 146 |
+
|
| 147 |
+
def _cleanup_old_sessions(self):
|
| 148 |
+
"""Remove oldest sessions to make room."""
|
| 149 |
+
if not self.sessions:
|
| 150 |
+
return
|
| 151 |
+
|
| 152 |
+
# Sort by creation time and remove oldest 20%
|
| 153 |
+
sorted_sessions = sorted(
|
| 154 |
+
self.sessions.items(),
|
| 155 |
+
key=lambda x: x[1].created_at
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
to_remove = len(sorted_sessions) // 5
|
| 159 |
+
for session_id, _ in sorted_sessions[:to_remove]:
|
| 160 |
+
del self.sessions[session_id]
|
| 161 |
+
|
| 162 |
+
logger.info(f"Cleaned up {to_remove} old sessions")
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
class ChatHandler:
|
| 166 |
+
"""Handles chat interactions with the agent system."""
|
| 167 |
+
|
| 168 |
+
def __init__(self):
|
| 169 |
+
self.session_manager = SessionManager()
|
| 170 |
+
self._agent_system = None # Lazy load
|
| 171 |
+
|
| 172 |
+
@property
|
| 173 |
+
def agent_system(self):
|
| 174 |
+
"""Lazy load agent system to avoid circular imports."""
|
| 175 |
+
if self._agent_system is None:
|
| 176 |
+
from crew_agents import crew_agent_system
|
| 177 |
+
self._agent_system = crew_agent_system
|
| 178 |
+
return self._agent_system
|
| 179 |
+
|
| 180 |
+
async def chat(
|
| 181 |
+
self,
|
| 182 |
+
message: str,
|
| 183 |
+
session_id: Optional[str] = None
|
| 184 |
+
) -> Dict[str, Any]:
|
| 185 |
+
"""
|
| 186 |
+
Process a chat message and return response.
|
| 187 |
+
|
| 188 |
+
Args:
|
| 189 |
+
message: User message
|
| 190 |
+
session_id: Optional existing session ID
|
| 191 |
+
|
| 192 |
+
Returns:
|
| 193 |
+
Dict with response, session_id, state, and optionally questions/workflow
|
| 194 |
+
"""
|
| 195 |
+
# Get or create session
|
| 196 |
+
session = self.session_manager.get_or_create(session_id)
|
| 197 |
+
|
| 198 |
+
# Add user message
|
| 199 |
+
session.add_message(MessageRole.USER.value, message)
|
| 200 |
+
|
| 201 |
+
try:
|
| 202 |
+
# Handle based on current state
|
| 203 |
+
if session.state == ConversationState.CLARIFYING.value:
|
| 204 |
+
# User is answering clarifying questions
|
| 205 |
+
return await self._handle_clarification(session, message)
|
| 206 |
+
else:
|
| 207 |
+
# New request or continuation
|
| 208 |
+
return await self._handle_request(session, message)
|
| 209 |
+
|
| 210 |
+
except Exception as e:
|
| 211 |
+
logger.error(f"Chat error: {e}")
|
| 212 |
+
session.state = ConversationState.ERROR.value
|
| 213 |
+
session.add_message(
|
| 214 |
+
MessageRole.ASSISTANT.value,
|
| 215 |
+
f"Sorry, I encountered an error: {str(e)}. Please try again."
|
| 216 |
+
)
|
| 217 |
+
return {
|
| 218 |
+
"success": False,
|
| 219 |
+
"session_id": session.session_id,
|
| 220 |
+
"response": f"Error: {str(e)}",
|
| 221 |
+
"state": session.state
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
async def _handle_request(self, session: Session, message: str) -> Dict[str, Any]:
|
| 225 |
+
"""Handle a new or continuing request."""
|
| 226 |
+
session.state = ConversationState.ANALYZING.value
|
| 227 |
+
|
| 228 |
+
# Analyze the request
|
| 229 |
+
analysis = await self.agent_system.analyze_request(
|
| 230 |
+
query=message,
|
| 231 |
+
session_id=session.session_id,
|
| 232 |
+
context={"history": session.get_history_text()}
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
if not analysis.get("success"):
|
| 236 |
+
error_msg = analysis.get("error", "Analysis failed")
|
| 237 |
+
session.add_message(MessageRole.ASSISTANT.value, f"Error: {error_msg}")
|
| 238 |
+
return {
|
| 239 |
+
"success": False,
|
| 240 |
+
"session_id": session.session_id,
|
| 241 |
+
"response": error_msg,
|
| 242 |
+
"state": session.state
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
# Check if clarification needed
|
| 246 |
+
if analysis.get("needs_clarification") and analysis.get("confidence", 0) < 0.7:
|
| 247 |
+
session.state = ConversationState.CLARIFYING.value
|
| 248 |
+
questions = analysis.get("questions", [])
|
| 249 |
+
session.pending_questions = questions
|
| 250 |
+
|
| 251 |
+
# Build response with questions
|
| 252 |
+
response_parts = [analysis.get("analysis", "I need some clarification:")]
|
| 253 |
+
for i, q in enumerate(questions, 1):
|
| 254 |
+
response_parts.append(f"\n{i}. {q}")
|
| 255 |
+
|
| 256 |
+
response = "\n".join(response_parts)
|
| 257 |
+
session.add_message(MessageRole.ASSISTANT.value, response)
|
| 258 |
+
|
| 259 |
+
return {
|
| 260 |
+
"success": True,
|
| 261 |
+
"session_id": session.session_id,
|
| 262 |
+
"response": response,
|
| 263 |
+
"state": session.state,
|
| 264 |
+
"needs_clarification": True,
|
| 265 |
+
"questions": questions,
|
| 266 |
+
"project_type": analysis.get("project_type")
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
# Proceed to build
|
| 270 |
+
return await self._build_workflow(session, analysis)
|
| 271 |
+
|
| 272 |
+
async def _handle_clarification(self, session: Session, answer: str) -> Dict[str, Any]:
|
| 273 |
+
"""Handle user's answer to clarifying questions."""
|
| 274 |
+
# Store the clarification
|
| 275 |
+
if session.pending_questions:
|
| 276 |
+
question = session.pending_questions[0]
|
| 277 |
+
self.agent_system.add_clarification(
|
| 278 |
+
session.session_id,
|
| 279 |
+
question,
|
| 280 |
+
answer
|
| 281 |
+
)
|
| 282 |
+
session.pending_questions = session.pending_questions[1:]
|
| 283 |
+
|
| 284 |
+
# If more questions pending, ask next one
|
| 285 |
+
if session.pending_questions:
|
| 286 |
+
next_question = session.pending_questions[0]
|
| 287 |
+
response = f"Thanks! Next question: {next_question}"
|
| 288 |
+
session.add_message(MessageRole.ASSISTANT.value, response)
|
| 289 |
+
|
| 290 |
+
return {
|
| 291 |
+
"success": True,
|
| 292 |
+
"session_id": session.session_id,
|
| 293 |
+
"response": response,
|
| 294 |
+
"state": session.state,
|
| 295 |
+
"needs_clarification": True,
|
| 296 |
+
"questions": session.pending_questions
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
# All questions answered, proceed to build
|
| 300 |
+
session.add_message(
|
| 301 |
+
MessageRole.ASSISTANT.value,
|
| 302 |
+
"Great, I have all the information I need. Building your workflow..."
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
# Re-analyze with new information
|
| 306 |
+
conv_context = self.agent_system.get_session(session.session_id)
|
| 307 |
+
if conv_context:
|
| 308 |
+
analysis = {
|
| 309 |
+
"project_type": conv_context.project_type,
|
| 310 |
+
"confidence": 0.9,
|
| 311 |
+
"requirements": conv_context.requirements
|
| 312 |
+
}
|
| 313 |
+
return await self._build_workflow(session, analysis)
|
| 314 |
+
|
| 315 |
+
return await self._handle_request(session, session.messages[-2]["content"])
|
| 316 |
+
|
| 317 |
+
async def _build_workflow(self, session: Session, analysis: Dict[str, Any]) -> Dict[str, Any]:
|
| 318 |
+
"""Build the workflow based on analysis."""
|
| 319 |
+
session.state = ConversationState.PLANNING.value
|
| 320 |
+
session.project_type = analysis.get("project_type")
|
| 321 |
+
|
| 322 |
+
# Use the simple builders for reliability (CrewAI for complex cases)
|
| 323 |
+
from tools.n8n_builder import N8NWorkflowBuilder
|
| 324 |
+
from tools.comfyui_builder import ComfyUIWorkflowBuilder
|
| 325 |
+
from tools.github_search import GitHubSearchTool
|
| 326 |
+
|
| 327 |
+
project_type = analysis.get("project_type", "unknown")
|
| 328 |
+
original_query = session.messages[1]["content"] if len(session.messages) > 1 else ""
|
| 329 |
+
|
| 330 |
+
session.state = ConversationState.BUILDING.value
|
| 331 |
+
|
| 332 |
+
try:
|
| 333 |
+
if project_type == "n8n":
|
| 334 |
+
builder = N8NWorkflowBuilder()
|
| 335 |
+
workflow = await builder.generate_workflow(original_query)
|
| 336 |
+
response = "I've generated an n8n workflow for you. Here's the configuration:"
|
| 337 |
+
|
| 338 |
+
elif project_type == "comfyui":
|
| 339 |
+
builder = ComfyUIWorkflowBuilder()
|
| 340 |
+
workflow = await builder.generate_workflow(original_query)
|
| 341 |
+
response = "I've generated a ComfyUI workflow. Here's the configuration:"
|
| 342 |
+
|
| 343 |
+
elif project_type == "hybrid":
|
| 344 |
+
n8n_builder = N8NWorkflowBuilder()
|
| 345 |
+
comfyui_builder = ComfyUIWorkflowBuilder()
|
| 346 |
+
|
| 347 |
+
n8n_wf = await n8n_builder.generate_workflow(original_query)
|
| 348 |
+
comfyui_wf = await comfyui_builder.generate_workflow(original_query)
|
| 349 |
+
|
| 350 |
+
workflow = {
|
| 351 |
+
"type": "hybrid",
|
| 352 |
+
"n8n_workflow": n8n_wf,
|
| 353 |
+
"comfyui_workflow": comfyui_wf,
|
| 354 |
+
"integration_note": "n8n can call ComfyUI via HTTP Request node to /prompt endpoint"
|
| 355 |
+
}
|
| 356 |
+
response = "I've generated a hybrid workflow combining n8n automation with ComfyUI for AI generation."
|
| 357 |
+
|
| 358 |
+
elif project_type == "external_repo":
|
| 359 |
+
github = GitHubSearchTool()
|
| 360 |
+
repos = await github.search(original_query, max_results=3)
|
| 361 |
+
recommendation = await github.generate_recommendation(repos)
|
| 362 |
+
|
| 363 |
+
workflow = {
|
| 364 |
+
"type": "external_repo",
|
| 365 |
+
"repositories": repos,
|
| 366 |
+
"recommendation": recommendation
|
| 367 |
+
}
|
| 368 |
+
response = f"I found some relevant repositories:\n\n{recommendation}"
|
| 369 |
+
|
| 370 |
+
else:
|
| 371 |
+
workflow = None
|
| 372 |
+
response = "I couldn't determine the project type. Could you provide more details?"
|
| 373 |
+
|
| 374 |
+
session.workflow = workflow
|
| 375 |
+
session.state = ConversationState.COMPLETE.value
|
| 376 |
+
|
| 377 |
+
if workflow and project_type not in ["external_repo"]:
|
| 378 |
+
response += f"\n\n```json\n{json.dumps(workflow, indent=2)[:2000]}\n```"
|
| 379 |
+
|
| 380 |
+
session.add_message(MessageRole.ASSISTANT.value, response[:500] + "..." if len(response) > 500 else response)
|
| 381 |
+
|
| 382 |
+
return {
|
| 383 |
+
"success": True,
|
| 384 |
+
"session_id": session.session_id,
|
| 385 |
+
"response": response,
|
| 386 |
+
"state": session.state,
|
| 387 |
+
"project_type": project_type,
|
| 388 |
+
"workflow": workflow
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
except Exception as e:
|
| 392 |
+
logger.error(f"Build error: {e}")
|
| 393 |
+
session.state = ConversationState.ERROR.value
|
| 394 |
+
response = f"Error building workflow: {str(e)}"
|
| 395 |
+
session.add_message(MessageRole.ASSISTANT.value, response)
|
| 396 |
+
|
| 397 |
+
return {
|
| 398 |
+
"success": False,
|
| 399 |
+
"session_id": session.session_id,
|
| 400 |
+
"response": response,
|
| 401 |
+
"state": session.state,
|
| 402 |
+
"error": str(e)
|
| 403 |
+
}
|
| 404 |
+
|
| 405 |
+
def get_session_info(self, session_id: str) -> Optional[Dict[str, Any]]:
|
| 406 |
+
"""Get session information."""
|
| 407 |
+
session = self.session_manager.get_session(session_id)
|
| 408 |
+
if session:
|
| 409 |
+
return session.to_dict()
|
| 410 |
+
return None
|
| 411 |
+
|
| 412 |
+
def list_sessions(self) -> List[Dict[str, Any]]:
|
| 413 |
+
"""List all sessions."""
|
| 414 |
+
return self.session_manager.list_sessions()
|
| 415 |
+
|
| 416 |
+
def clear_session(self, session_id: str) -> bool:
|
| 417 |
+
"""Clear a session."""
|
| 418 |
+
# Also clear from agent system
|
| 419 |
+
if hasattr(self, '_agent_system') and self._agent_system:
|
| 420 |
+
self._agent_system.clear_session(session_id)
|
| 421 |
+
return self.session_manager.delete_session(session_id)
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
# Singleton instance
|
| 425 |
+
chat_handler = ChatHandler()
|
config.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AI Workflow Agent - Configuration
|
| 2 |
+
import os
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from pydantic_settings import BaseSettings
|
| 5 |
+
from typing import Optional
|
| 6 |
+
|
| 7 |
+
# Load environment variables from .env file
|
| 8 |
+
from dotenv import load_dotenv
|
| 9 |
+
|
| 10 |
+
# Load .env file from the M1_only directory
|
| 11 |
+
env_path = Path(__file__).parent / '.env'
|
| 12 |
+
if env_path.exists():
|
| 13 |
+
load_dotenv(env_path)
|
| 14 |
+
else:
|
| 15 |
+
# Try to load from parent directory
|
| 16 |
+
env_path = Path(__file__).parent.parent / '.env'
|
| 17 |
+
if env_path.exists():
|
| 18 |
+
load_dotenv(env_path)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class Settings(BaseSettings):
|
| 22 |
+
"""Application settings loaded from environment variables."""
|
| 23 |
+
|
| 24 |
+
# LLM Configuration
|
| 25 |
+
LLM_PROVIDER: str = os.getenv("LLM_PROVIDER", "gemini") # gemini or ollama
|
| 26 |
+
GEMINI_API_KEY: Optional[str] = os.getenv("GEMINI_API_KEY", None)
|
| 27 |
+
USE_LLM: bool = os.getenv("USE_LLM", "true").lower() == "true" # Enabled by default if key exists
|
| 28 |
+
|
| 29 |
+
# Ollama LLM Configuration (fallback)
|
| 30 |
+
OLLAMA_HOST: str = os.getenv("OLLAMA_HOST", "http://localhost:11434")
|
| 31 |
+
OLLAMA_MODEL: str = os.getenv("OLLAMA_MODEL", "qwen2.5:3b")
|
| 32 |
+
|
| 33 |
+
# n8n Configuration
|
| 34 |
+
N8N_HOST: str = os.getenv("N8N_HOST", "http://localhost:5678")
|
| 35 |
+
N8N_API_KEY: Optional[str] = os.getenv("N8N_API_KEY", None)
|
| 36 |
+
|
| 37 |
+
# ComfyUI Configuration
|
| 38 |
+
COMFYUI_HOST: str = os.getenv("COMFYUI_HOST", "http://localhost:8188")
|
| 39 |
+
|
| 40 |
+
# PostgreSQL Configuration
|
| 41 |
+
POSTGRES_HOST: str = os.getenv("POSTGRES_HOST", "localhost")
|
| 42 |
+
POSTGRES_PORT: int = int(os.getenv("POSTGRES_PORT", "5432"))
|
| 43 |
+
POSTGRES_USER: str = os.getenv("POSTGRES_USER", "agent")
|
| 44 |
+
POSTGRES_PASSWORD: str = os.getenv("POSTGRES_PASSWORD", "agent_secret_2026")
|
| 45 |
+
POSTGRES_DB: str = os.getenv("POSTGRES_DB", "workflow_agent")
|
| 46 |
+
|
| 47 |
+
# GitHub Configuration
|
| 48 |
+
GITHUB_TOKEN: Optional[str] = os.getenv("GITHUB_TOKEN", None)
|
| 49 |
+
|
| 50 |
+
# Colab Configuration (Milestone 2)
|
| 51 |
+
NGROK_AUTH_TOKEN: Optional[str] = os.getenv("NGROK_AUTH_TOKEN", None)
|
| 52 |
+
|
| 53 |
+
# Project Directories
|
| 54 |
+
PROJECTS_DIR: str = os.getenv("PROJECTS_DIR", "/app/projects")
|
| 55 |
+
WORKFLOWS_DIR: str = os.getenv("WORKFLOWS_DIR", "/app/workflows")
|
| 56 |
+
|
| 57 |
+
@property
|
| 58 |
+
def database_url(self) -> str:
|
| 59 |
+
return f"postgresql://{self.POSTGRES_USER}:{self.POSTGRES_PASSWORD}@{self.POSTGRES_HOST}:{self.POSTGRES_PORT}/{self.POSTGRES_DB}"
|
| 60 |
+
|
| 61 |
+
class Config:
|
| 62 |
+
env_file = ".env"
|
| 63 |
+
case_sensitive = True
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# Global settings instance
|
| 67 |
+
settings = Settings()
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
# Project Type Definitions
|
| 71 |
+
class ProjectType:
|
| 72 |
+
N8N = "n8n"
|
| 73 |
+
COMFYUI = "comfyui"
|
| 74 |
+
HYBRID = "hybrid"
|
| 75 |
+
EXTERNAL_REPO = "external_repo"
|
| 76 |
+
UNKNOWN = "unknown"
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# Keywords for project classification
|
| 80 |
+
CLASSIFICATION_KEYWORDS = {
|
| 81 |
+
ProjectType.N8N: [
|
| 82 |
+
"automation", "workflow", "integrate", "api", "webhook", "schedule",
|
| 83 |
+
"email", "slack", "telegram", "notification", "trigger", "connect",
|
| 84 |
+
"sync", "transfer", "backup", "monitor", "alert", "scrape", "fetch",
|
| 85 |
+
"social media", "post", "publish", "send", "share", "broadcast",
|
| 86 |
+
"twitter", "facebook", "instagram", "linkedin", "discord", "whatsapp",
|
| 87 |
+
"bot", "chatbot", "automate", "automated", "batch", "bulk", "mass",
|
| 88 |
+
"campaign", "marketing", "crm", "customer", "lead", "contact",
|
| 89 |
+
"database", "spreadsheet", "csv", "excel", "google sheets",
|
| 90 |
+
"zapier", "ifttt", "data processing", "etl", "pipeline",
|
| 91 |
+
"report", "reporting", "export", "transform", "convert", "parse",
|
| 92 |
+
"import", "data", "file", "process data", "data export"
|
| 93 |
+
],
|
| 94 |
+
ProjectType.COMFYUI: [
|
| 95 |
+
"image", "generate", "ai art", "stable diffusion", "flux", "sdxl",
|
| 96 |
+
"inpaint", "upscale", "controlnet", "lora", "checkpoint", "model",
|
| 97 |
+
"txt2img", "img2img", "video", "animation", "diffusion", "generative",
|
| 98 |
+
"photo", "picture", "visual", "art", "create image", "make image",
|
| 99 |
+
"design", "logo", "banner", "thumbnail", "avatar", "illustration"
|
| 100 |
+
],
|
| 101 |
+
ProjectType.HYBRID: [
|
| 102 |
+
"generate image and", "create image and", "ai image and send",
|
| 103 |
+
"photo generation and", "ai art and", "design and automation",
|
| 104 |
+
"generate images and automate", "image generation workflow",
|
| 105 |
+
"stable diffusion and", "image to", "generate and email with images",
|
| 106 |
+
"automated image", "generative ai and automation"
|
| 107 |
+
],
|
| 108 |
+
ProjectType.EXTERNAL_REPO: [
|
| 109 |
+
"github", "repository", "repo", "clone", "download project",
|
| 110 |
+
"install", "setup project", "deploy", "docker project",
|
| 111 |
+
"existing project", "open source", "template"
|
| 112 |
+
]
|
| 113 |
+
}
|
decision_agent.py
ADDED
|
@@ -0,0 +1,399 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AI Workflow Agent - Decision Agent (Core Brain)
|
| 2 |
+
"""
|
| 3 |
+
Decision Agent using LLM (Gemini 2.5 Flash or Ollama)
|
| 4 |
+
Analyzes user queries and decides:
|
| 5 |
+
- n8n automation
|
| 6 |
+
- ComfyUI generative workflow
|
| 7 |
+
- Hybrid (n8n + ComfyUI)
|
| 8 |
+
- External repo project
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import httpx
|
| 12 |
+
import json
|
| 13 |
+
import logging
|
| 14 |
+
from typing import Dict, Any, Optional, List
|
| 15 |
+
|
| 16 |
+
from config import settings, ProjectType, CLASSIFICATION_KEYWORDS
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class DecisionAgent:
|
| 22 |
+
"""
|
| 23 |
+
Core decision-making agent that analyzes user queries
|
| 24 |
+
and determines the appropriate workflow type.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
def __init__(self):
|
| 28 |
+
self.llm_provider = settings.LLM_PROVIDER
|
| 29 |
+
self.gemini_api_key = settings.GEMINI_API_KEY
|
| 30 |
+
self.ollama_host = settings.OLLAMA_HOST
|
| 31 |
+
self.ollama_model = settings.OLLAMA_MODEL
|
| 32 |
+
self.client = httpx.AsyncClient(timeout=120.0)
|
| 33 |
+
|
| 34 |
+
async def check_ollama_health(self) -> str:
|
| 35 |
+
"""Check if Ollama is running and responsive."""
|
| 36 |
+
try:
|
| 37 |
+
response = await self.client.get(f"{self.ollama_host}/api/tags")
|
| 38 |
+
if response.status_code == 200:
|
| 39 |
+
return "healthy"
|
| 40 |
+
return "unhealthy"
|
| 41 |
+
except Exception as e:
|
| 42 |
+
logger.debug(f"Ollama health check failed: {e}")
|
| 43 |
+
return "unreachable"
|
| 44 |
+
|
| 45 |
+
async def ensure_model_available(self) -> bool:
|
| 46 |
+
"""Ensure the required model is available in Ollama."""
|
| 47 |
+
try:
|
| 48 |
+
# Check if model exists
|
| 49 |
+
response = await self.client.get(f"{self.ollama_host}/api/tags")
|
| 50 |
+
if response.status_code == 200:
|
| 51 |
+
models = response.json().get("models", [])
|
| 52 |
+
model_names = [m.get("name", "") for m in models]
|
| 53 |
+
|
| 54 |
+
if self.model not in model_names and f"{self.model}:latest" not in model_names:
|
| 55 |
+
logger.info(f"Pulling model {self.model}...")
|
| 56 |
+
# Pull the model
|
| 57 |
+
pull_response = await self.client.post(
|
| 58 |
+
f"{self.ollama_host}/api/pull",
|
| 59 |
+
json={"name": self.model},
|
| 60 |
+
timeout=600.0 # 10 minutes for large models
|
| 61 |
+
)
|
| 62 |
+
return pull_response.status_code == 200
|
| 63 |
+
return True
|
| 64 |
+
return False
|
| 65 |
+
except Exception as e:
|
| 66 |
+
logger.debug(f"Model check failed: {e}")
|
| 67 |
+
return False
|
| 68 |
+
|
| 69 |
+
async def analyze(self, query: str, context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
| 70 |
+
"""
|
| 71 |
+
Analyze user query and determine project type.
|
| 72 |
+
|
| 73 |
+
STRATEGY: Respect user's LLM toggle from context
|
| 74 |
+
- If context says use_llm=True → Use Gemini (if available)
|
| 75 |
+
- If context says use_llm=False → Use keywords only
|
| 76 |
+
- If no context → Use Gemini (if available)
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
Dict with project_type, confidence, explanation, suggested_tools, next_steps
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
# Check if LLM is explicitly disabled in context
|
| 83 |
+
use_llm = True # Default to LLM
|
| 84 |
+
if context and isinstance(context, dict):
|
| 85 |
+
use_llm = context.get('use_llm', True) # Respect the toggle
|
| 86 |
+
|
| 87 |
+
# PRIMARY: Try LLM if enabled and available
|
| 88 |
+
if use_llm and self.gemini_api_key:
|
| 89 |
+
llm_result = await self._llm_analyze(query, context)
|
| 90 |
+
|
| 91 |
+
# If Gemini succeeded, use it
|
| 92 |
+
if llm_result["project_type"] != ProjectType.UNKNOWN:
|
| 93 |
+
final_result = llm_result
|
| 94 |
+
final_result["classification_method"] = "Gemini 2.5 Flash (LLM)"
|
| 95 |
+
logger.info(f"Decision (Gemini): {final_result['project_type']} (confidence: {final_result['confidence']:.2f})")
|
| 96 |
+
final_result["suggested_tools"] = self._get_suggested_tools(final_result["project_type"])
|
| 97 |
+
final_result["next_steps"] = self._get_next_steps(final_result["project_type"], query)
|
| 98 |
+
return final_result
|
| 99 |
+
|
| 100 |
+
# FALLBACK: Use keywords when LLM disabled or unavailable
|
| 101 |
+
keyword_result = self._keyword_classify(query)
|
| 102 |
+
keyword_result["classification_method"] = "Keyword Matching (Fallback)"
|
| 103 |
+
logger.info(f"Decision (Keywords): {keyword_result['project_type']} (confidence: {keyword_result['confidence']:.2f})")
|
| 104 |
+
keyword_result["suggested_tools"] = self._get_suggested_tools(keyword_result["project_type"])
|
| 105 |
+
keyword_result["next_steps"] = self._get_next_steps(keyword_result["project_type"], query)
|
| 106 |
+
return keyword_result
|
| 107 |
+
|
| 108 |
+
def _keyword_classify(self, query: str) -> Dict[str, Any]:
|
| 109 |
+
"""Fast keyword-based classification (FALLBACK ONLY).
|
| 110 |
+
|
| 111 |
+
Logic:
|
| 112 |
+
- If both COMFYUI + N8N keywords present → HYBRID
|
| 113 |
+
- Otherwise pick highest scoring type
|
| 114 |
+
"""
|
| 115 |
+
query_lower = query.lower()
|
| 116 |
+
scores = {}
|
| 117 |
+
|
| 118 |
+
for project_type, keywords in CLASSIFICATION_KEYWORDS.items():
|
| 119 |
+
score = sum(1 for kw in keywords if kw in query_lower)
|
| 120 |
+
scores[project_type] = score
|
| 121 |
+
|
| 122 |
+
# Check for HYBRID: both image generation AND automation keywords present
|
| 123 |
+
comfyui_score = scores.get(ProjectType.COMFYUI, 0)
|
| 124 |
+
n8n_score = scores.get(ProjectType.N8N, 0)
|
| 125 |
+
|
| 126 |
+
# If both types have keywords, it's HYBRID
|
| 127 |
+
if comfyui_score >= 1 and n8n_score >= 1:
|
| 128 |
+
confidence = min((comfyui_score + n8n_score) / 3, 1.0)
|
| 129 |
+
return {
|
| 130 |
+
"project_type": ProjectType.HYBRID,
|
| 131 |
+
"confidence": confidence,
|
| 132 |
+
"explanation": "Detected hybrid automation + AI task."
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
# Otherwise use highest score
|
| 136 |
+
if not scores or max(scores.values()) == 0:
|
| 137 |
+
return {
|
| 138 |
+
"project_type": ProjectType.N8N,
|
| 139 |
+
"confidence": 0.3,
|
| 140 |
+
"explanation": "Default to n8n automation. Tell me more about what you want to automate."
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
best_type = max(scores, key=scores.get)
|
| 144 |
+
max_score = scores[best_type]
|
| 145 |
+
confidence = min(max_score / 3, 1.0)
|
| 146 |
+
|
| 147 |
+
explanations = {
|
| 148 |
+
ProjectType.N8N: "Detected automation task.",
|
| 149 |
+
ProjectType.COMFYUI: "Detected image generation task.",
|
| 150 |
+
ProjectType.HYBRID: "Detected hybrid automation + AI task.",
|
| 151 |
+
ProjectType.EXTERNAL_REPO: "Detected GitHub repository request."
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
return {
|
| 155 |
+
"project_type": best_type,
|
| 156 |
+
"confidence": confidence,
|
| 157 |
+
"explanation": explanations.get(best_type, f"Detected {best_type}.")
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
async def _llm_analyze(self, query: str, context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
| 161 |
+
"""LLM-based analysis using configured provider (Gemini or Ollama)."""
|
| 162 |
+
|
| 163 |
+
if self.llm_provider == "gemini" and self.gemini_api_key:
|
| 164 |
+
return await self._gemini_analyze(query, context)
|
| 165 |
+
else:
|
| 166 |
+
return await self._ollama_analyze(query, context)
|
| 167 |
+
|
| 168 |
+
async def _gemini_analyze(self, query: str, context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
| 169 |
+
"""Analyze using Google Gemini 2.5 Flash API.
|
| 170 |
+
|
| 171 |
+
Use PRINCIPLE-BASED reasoning, not example-based.
|
| 172 |
+
This way the LLM handles edge cases intelligently without constant prompt tweaking.
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
system_prompt = """You are a project classification assistant. Classify user requests into project types based on PRINCIPLES, not examples.
|
| 176 |
+
|
| 177 |
+
PRINCIPLES:
|
| 178 |
+
- **n8n**: Automation, workflows, data processing. Handles logic/connections/scheduling. (CSV reports are DATA, not images)
|
| 179 |
+
- **comfyui**: Visual generation. Creates images/photos/artwork using AI. (Image generation ONLY)
|
| 180 |
+
- **hybrid**: Both automation AND image generation in one workflow. (Must involve creating images AND automating something)
|
| 181 |
+
- **external_repo**: User wants to download/use existing code from GitHub
|
| 182 |
+
|
| 183 |
+
KEY DISTINCTION:
|
| 184 |
+
- "Report" = data document (n8n)
|
| 185 |
+
- "Image/photo" = visual content (comfyui)
|
| 186 |
+
- Both = hybrid
|
| 187 |
+
|
| 188 |
+
Respond ONLY with valid JSON:
|
| 189 |
+
{"project_type": "n8n" | "comfyui" | "hybrid" | "external_repo" | "unknown", "confidence": 0.0-1.0, "explanation": "..."}"""
|
| 190 |
+
|
| 191 |
+
user_prompt = f'Classify: "{query}"'
|
| 192 |
+
|
| 193 |
+
try:
|
| 194 |
+
response = await self.client.post(
|
| 195 |
+
"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent",
|
| 196 |
+
headers={"Content-Type": "application/json"},
|
| 197 |
+
json={
|
| 198 |
+
"contents": [{
|
| 199 |
+
"parts": [{
|
| 200 |
+
"text": f"{system_prompt}\n\n{user_prompt}"
|
| 201 |
+
}]
|
| 202 |
+
}]
|
| 203 |
+
},
|
| 204 |
+
params={"key": self.gemini_api_key}
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
if response.status_code == 200:
|
| 208 |
+
result = response.json()
|
| 209 |
+
try:
|
| 210 |
+
# Extract text from Gemini response
|
| 211 |
+
text_content = result["candidates"][0]["content"]["parts"][0]["text"]
|
| 212 |
+
|
| 213 |
+
# Try to extract JSON from the response
|
| 214 |
+
# Sometimes Gemini wraps JSON in markdown code blocks
|
| 215 |
+
if "```json" in text_content:
|
| 216 |
+
json_str = text_content.split("```json")[1].split("```")[0].strip()
|
| 217 |
+
elif "```" in text_content:
|
| 218 |
+
json_str = text_content.split("```")[1].split("```")[0].strip()
|
| 219 |
+
else:
|
| 220 |
+
json_str = text_content
|
| 221 |
+
|
| 222 |
+
parsed = json.loads(json_str)
|
| 223 |
+
return {
|
| 224 |
+
"project_type": parsed.get("project_type", ProjectType.UNKNOWN),
|
| 225 |
+
"confidence": float(parsed.get("confidence", 0.5)),
|
| 226 |
+
"explanation": parsed.get("explanation", "Gemini analysis")
|
| 227 |
+
}
|
| 228 |
+
except (json.JSONDecodeError, KeyError, IndexError) as e:
|
| 229 |
+
logger.warning(f"Failed to parse Gemini response: {e}")
|
| 230 |
+
return {
|
| 231 |
+
"project_type": ProjectType.UNKNOWN,
|
| 232 |
+
"confidence": 0.0,
|
| 233 |
+
"explanation": "Gemini response parsing failed"
|
| 234 |
+
}
|
| 235 |
+
else:
|
| 236 |
+
logger.debug(f"Gemini request failed: {response.status_code}")
|
| 237 |
+
return {
|
| 238 |
+
"project_type": ProjectType.UNKNOWN,
|
| 239 |
+
"confidence": 0.0,
|
| 240 |
+
"explanation": "Gemini request failed"
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
except Exception as e:
|
| 244 |
+
logger.debug(f"Gemini analysis error: {e}")
|
| 245 |
+
return {
|
| 246 |
+
"project_type": ProjectType.UNKNOWN,
|
| 247 |
+
"confidence": 0.0,
|
| 248 |
+
"explanation": f"Gemini error: {str(e)}"
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
async def _ollama_analyze(self, query: str, context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
| 252 |
+
"""Analyze using Ollama (Qwen2.5 or other model).
|
| 253 |
+
|
| 254 |
+
Use PRINCIPLE-BASED reasoning, not example-based.
|
| 255 |
+
"""
|
| 256 |
+
|
| 257 |
+
system_prompt = """You are a project classification assistant. Classify user requests into project types based on PRINCIPLES, not examples.
|
| 258 |
+
|
| 259 |
+
PRINCIPLES:
|
| 260 |
+
- **n8n**: Automation, workflows, data processing. Handles logic/connections/scheduling. (CSV reports are DATA, not images)
|
| 261 |
+
- **comfyui**: Visual generation. Creates images/photos/artwork using AI. (Image generation ONLY)
|
| 262 |
+
- **hybrid**: Both automation AND image generation in one workflow. (Must involve creating images AND automating something)
|
| 263 |
+
- **external_repo**: User wants to download/use existing code from GitHub
|
| 264 |
+
|
| 265 |
+
KEY DISTINCTION:
|
| 266 |
+
- "Report" = data document (n8n)
|
| 267 |
+
- "Image/photo" = visual content (comfyui)
|
| 268 |
+
- Both = hybrid
|
| 269 |
+
|
| 270 |
+
Respond ONLY with valid JSON:
|
| 271 |
+
{"project_type": "n8n" | "comfyui" | "hybrid" | "external_repo" | "unknown", "confidence": 0.0-1.0, "explanation": "..."}"""
|
| 272 |
+
|
| 273 |
+
user_prompt = f'Classify: "{query}"'
|
| 274 |
+
|
| 275 |
+
try:
|
| 276 |
+
response = await self.client.post(
|
| 277 |
+
f"{self.ollama_host}/api/generate",
|
| 278 |
+
json={
|
| 279 |
+
"model": self.ollama_model,
|
| 280 |
+
"prompt": f"{system_prompt}\n\n{user_prompt}",
|
| 281 |
+
"stream": False,
|
| 282 |
+
"format": "json"
|
| 283 |
+
}
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
if response.status_code == 200:
|
| 287 |
+
result = response.json()
|
| 288 |
+
llm_response = result.get("response", "{}")
|
| 289 |
+
|
| 290 |
+
# Parse JSON response
|
| 291 |
+
try:
|
| 292 |
+
parsed = json.loads(llm_response)
|
| 293 |
+
return {
|
| 294 |
+
"project_type": parsed.get("project_type", ProjectType.UNKNOWN),
|
| 295 |
+
"confidence": float(parsed.get("confidence", 0.5)),
|
| 296 |
+
"explanation": parsed.get("explanation", "Ollama analysis")
|
| 297 |
+
}
|
| 298 |
+
except json.JSONDecodeError:
|
| 299 |
+
logger.warning(f"Failed to parse Ollama response: {llm_response}")
|
| 300 |
+
return {
|
| 301 |
+
"project_type": ProjectType.UNKNOWN,
|
| 302 |
+
"confidence": 0.0,
|
| 303 |
+
"explanation": "Ollama response parsing failed"
|
| 304 |
+
}
|
| 305 |
+
else:
|
| 306 |
+
logger.debug(f"Ollama request failed: {response.status_code}")
|
| 307 |
+
return {
|
| 308 |
+
"project_type": ProjectType.UNKNOWN,
|
| 309 |
+
"confidence": 0.0,
|
| 310 |
+
"explanation": "Ollama request failed"
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
except Exception as e:
|
| 314 |
+
logger.debug(f"Ollama analysis error: {e}")
|
| 315 |
+
return {
|
| 316 |
+
"project_type": ProjectType.UNKNOWN,
|
| 317 |
+
"confidence": 0.0,
|
| 318 |
+
"explanation": f"Ollama error: {str(e)}"
|
| 319 |
+
}
|
| 320 |
+
|
| 321 |
+
def _get_suggested_tools(self, project_type: str) -> List[str]:
|
| 322 |
+
"""Get suggested tools based on project type."""
|
| 323 |
+
tools_map = {
|
| 324 |
+
ProjectType.N8N: [
|
| 325 |
+
"n8n_builder - Generate workflow JSON",
|
| 326 |
+
"n8n_deploy - Deploy to n8n instance",
|
| 327 |
+
"webhook_trigger - Setup webhook triggers"
|
| 328 |
+
],
|
| 329 |
+
ProjectType.COMFYUI: [
|
| 330 |
+
"comfyui_builder - Generate workflow graph",
|
| 331 |
+
"comfyui_execute - Run workflow",
|
| 332 |
+
"model_download - Download required models"
|
| 333 |
+
],
|
| 334 |
+
ProjectType.HYBRID: [
|
| 335 |
+
"n8n_builder - Generate automation workflow",
|
| 336 |
+
"comfyui_builder - Generate AI workflow",
|
| 337 |
+
"api_connector - Connect n8n to ComfyUI"
|
| 338 |
+
],
|
| 339 |
+
ProjectType.EXTERNAL_REPO: [
|
| 340 |
+
"github_search - Find relevant repositories",
|
| 341 |
+
"docker_helper - Clone and build",
|
| 342 |
+
"error_analyzer - Fix build issues"
|
| 343 |
+
]
|
| 344 |
+
}
|
| 345 |
+
return tools_map.get(project_type, ["unknown_tool"])
|
| 346 |
+
|
| 347 |
+
def _get_next_steps(self, project_type: str, query: str) -> List[str]:
|
| 348 |
+
"""Get recommended next steps."""
|
| 349 |
+
steps_map = {
|
| 350 |
+
ProjectType.N8N: [
|
| 351 |
+
"1. Generate workflow JSON template",
|
| 352 |
+
"2. Customize nodes and connections",
|
| 353 |
+
"3. Deploy to n8n instance",
|
| 354 |
+
"4. Test with sample data"
|
| 355 |
+
],
|
| 356 |
+
ProjectType.COMFYUI: [
|
| 357 |
+
"1. Generate ComfyUI workflow graph",
|
| 358 |
+
"2. Check required models are installed",
|
| 359 |
+
"3. Execute workflow",
|
| 360 |
+
"4. Review generated output"
|
| 361 |
+
],
|
| 362 |
+
ProjectType.HYBRID: [
|
| 363 |
+
"1. Create ComfyUI workflow for AI task",
|
| 364 |
+
"2. Create n8n workflow for automation",
|
| 365 |
+
"3. Connect n8n → ComfyUI via HTTP",
|
| 366 |
+
"4. Test end-to-end pipeline"
|
| 367 |
+
],
|
| 368 |
+
ProjectType.EXTERNAL_REPO: [
|
| 369 |
+
"1. Search GitHub for relevant projects",
|
| 370 |
+
"2. Select best matching repository",
|
| 371 |
+
"3. Clone and configure with Docker",
|
| 372 |
+
"4. Validate and fix any errors"
|
| 373 |
+
]
|
| 374 |
+
}
|
| 375 |
+
return steps_map.get(project_type, ["Please provide more details"])
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
# ============================================
|
| 379 |
+
# CrewAI Integration (Advanced Mode)
|
| 380 |
+
# ============================================
|
| 381 |
+
|
| 382 |
+
class CrewAIDecisionAgent:
|
| 383 |
+
"""
|
| 384 |
+
Advanced decision agent using CrewAI framework.
|
| 385 |
+
Used for more complex multi-step reasoning.
|
| 386 |
+
"""
|
| 387 |
+
|
| 388 |
+
def __init__(self):
|
| 389 |
+
self.simple_agent = DecisionAgent()
|
| 390 |
+
# CrewAI setup would go here
|
| 391 |
+
# We use simple agent for Phase 0, upgrade to CrewAI in Milestone 1
|
| 392 |
+
|
| 393 |
+
async def analyze_complex(self, query: str, context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
| 394 |
+
"""
|
| 395 |
+
Complex analysis using CrewAI agents.
|
| 396 |
+
Reserved for Milestone 1 implementation.
|
| 397 |
+
"""
|
| 398 |
+
# For now, delegate to simple agent
|
| 399 |
+
return await self.simple_agent.analyze(query, context)
|
requirements.txt
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Milestone 1: Core AI Decision Agent
|
| 2 |
+
# Works for: Local FastAPI server + Hugging Face Gradio deployment
|
| 3 |
+
# Python 3.9+
|
| 4 |
+
|
| 5 |
+
# Web Frameworks
|
| 6 |
+
fastapi==0.115.0
|
| 7 |
+
uvicorn==0.32.0
|
| 8 |
+
gradio>=4.0.0
|
| 9 |
+
|
| 10 |
+
# Data Validation
|
| 11 |
+
pydantic==2.9.2
|
| 12 |
+
pydantic-settings==2.6.1
|
| 13 |
+
|
| 14 |
+
# AI & LLM
|
| 15 |
+
crewai==0.80.0
|
| 16 |
+
litellm==1.81.13
|
| 17 |
+
google-generativeai>=0.3.0
|
| 18 |
+
|
| 19 |
+
# Configuration & Environment
|
| 20 |
+
python-dotenv==1.0.0
|
| 21 |
+
|
| 22 |
+
# HTTP & Web
|
| 23 |
+
httpx==0.28.1
|
| 24 |
+
requests==2.32.3
|
| 25 |
+
beautifulsoup4>=4.12.0
|
| 26 |
+
|
| 27 |
+
# Search & Tools
|
| 28 |
+
duckduckgo-search==6.3.5
|
| 29 |
+
|
| 30 |
+
# Docker (optional, for local development)
|
| 31 |
+
docker==7.1.0
|
| 32 |
+
|
| 33 |
+
# Testing
|
| 34 |
+
pytest==8.3.3
|
| 35 |
+
pytest-asyncio==0.24.0
|
tools/__init__.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Tools package initialization
|
| 2 |
+
from .github_search import GitHubSearchTool
|
| 3 |
+
from .n8n_builder import N8NWorkflowBuilder
|
| 4 |
+
from .comfyui_builder import ComfyUIWorkflowBuilder
|
| 5 |
+
from .docker_helper import DockerHelper
|
| 6 |
+
from .web_search import WebSearchTool
|
| 7 |
+
from .workflow_templates import get_workflow_templates
|
| 8 |
+
from .comfyui_templates import get_comfyui_templates
|
| 9 |
+
|
| 10 |
+
__all__ = [
|
| 11 |
+
"GitHubSearchTool",
|
| 12 |
+
"N8NWorkflowBuilder",
|
| 13 |
+
"ComfyUIWorkflowBuilder",
|
| 14 |
+
"DockerHelper",
|
| 15 |
+
"WebSearchTool",
|
| 16 |
+
"get_workflow_templates",
|
| 17 |
+
"get_comfyui_templates"
|
| 18 |
+
]
|
tools/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (741 Bytes). View file
|
|
|
tools/__pycache__/comfyui_builder.cpython-311.pyc
ADDED
|
Binary file (20.3 kB). View file
|
|
|
tools/__pycache__/comfyui_templates.cpython-311.pyc
ADDED
|
Binary file (13.8 kB). View file
|
|
|
tools/__pycache__/docker_helper.cpython-311.pyc
ADDED
|
Binary file (16.1 kB). View file
|
|
|
tools/__pycache__/github_search.cpython-311.pyc
ADDED
|
Binary file (12.7 kB). View file
|
|
|
tools/__pycache__/n8n_builder.cpython-311.pyc
ADDED
|
Binary file (20.1 kB). View file
|
|
|
tools/__pycache__/web_search.cpython-311.pyc
ADDED
|
Binary file (7.96 kB). View file
|
|
|
tools/__pycache__/workflow_templates.cpython-311.pyc
ADDED
|
Binary file (14.8 kB). View file
|
|
|
tools/comfyui_builder.py
ADDED
|
@@ -0,0 +1,546 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ComfyUI Workflow Builder Tool
|
| 2 |
+
"""
|
| 3 |
+
Generate and execute ComfyUI workflow JSON templates.
|
| 4 |
+
Supports common generative AI patterns.
|
| 5 |
+
LLM-enhanced generation when context.use_llm=True.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import httpx
|
| 9 |
+
import json
|
| 10 |
+
import logging
|
| 11 |
+
import uuid
|
| 12 |
+
from typing import Dict, Any, List, Optional
|
| 13 |
+
|
| 14 |
+
from config import settings
|
| 15 |
+
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class ComfyUIWorkflowBuilder:
|
| 20 |
+
"""
|
| 21 |
+
ComfyUI workflow generator and executor.
|
| 22 |
+
Creates JSON workflow graphs and executes via ComfyUI API.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(self):
|
| 26 |
+
self.comfyui_host = settings.COMFYUI_HOST
|
| 27 |
+
self.client = httpx.AsyncClient(timeout=300.0) # Long timeout for image generation
|
| 28 |
+
|
| 29 |
+
async def check_health(self) -> str:
|
| 30 |
+
"""Check if ComfyUI is running and responsive."""
|
| 31 |
+
try:
|
| 32 |
+
response = await self.client.get(f"{self.comfyui_host}/system_stats")
|
| 33 |
+
if response.status_code == 200:
|
| 34 |
+
return "healthy"
|
| 35 |
+
return "unhealthy"
|
| 36 |
+
except Exception as e:
|
| 37 |
+
logger.debug(f"ComfyUI health check failed: {e}")
|
| 38 |
+
return "unreachable"
|
| 39 |
+
|
| 40 |
+
async def generate_workflow(self, query: str, context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
| 41 |
+
"""
|
| 42 |
+
Generate ComfyUI workflow JSON based on user query.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
query: User's natural language request
|
| 46 |
+
context: Optional context with use_llm flag for LLM-enhanced generation
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
ComfyUI workflow JSON structure
|
| 50 |
+
"""
|
| 51 |
+
# Check if LLM enhancement is requested
|
| 52 |
+
use_llm = False
|
| 53 |
+
if context and isinstance(context, dict):
|
| 54 |
+
use_llm = context.get('use_llm', False)
|
| 55 |
+
|
| 56 |
+
# If LLM mode is enabled, use AI to analyze and create more intelligent workflow
|
| 57 |
+
if use_llm:
|
| 58 |
+
logger.info("Using LLM-enhanced workflow generation")
|
| 59 |
+
workflow = await self._generate_llm_workflow(query)
|
| 60 |
+
if workflow:
|
| 61 |
+
return workflow
|
| 62 |
+
# Fall back to template if LLM fails
|
| 63 |
+
logger.warning("LLM generation failed, falling back to templates")
|
| 64 |
+
|
| 65 |
+
# Template-based generation (keyword mode)
|
| 66 |
+
workflow_type = self._detect_workflow_type(query)
|
| 67 |
+
|
| 68 |
+
# Extract parameters from query
|
| 69 |
+
params = self._extract_params(query)
|
| 70 |
+
|
| 71 |
+
# Generate appropriate template
|
| 72 |
+
templates = {
|
| 73 |
+
"txt2img": self._generate_txt2img_workflow,
|
| 74 |
+
"img2img": self._generate_img2img_workflow,
|
| 75 |
+
"upscale": self._generate_upscale_workflow,
|
| 76 |
+
"inpaint": self._generate_inpaint_workflow,
|
| 77 |
+
"controlnet": self._generate_controlnet_workflow,
|
| 78 |
+
"generic": self._generate_generic_workflow
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
generator = templates.get(workflow_type, self._generate_generic_workflow)
|
| 82 |
+
workflow = generator(params)
|
| 83 |
+
|
| 84 |
+
return workflow
|
| 85 |
+
|
| 86 |
+
async def _generate_llm_workflow(self, query: str) -> Optional[Dict[str, Any]]:
|
| 87 |
+
"""
|
| 88 |
+
Use LLM to generate a more intelligent workflow based on query analysis.
|
| 89 |
+
|
| 90 |
+
Args:
|
| 91 |
+
query: User's natural language request
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
Enhanced ComfyUI workflow or None if LLM fails
|
| 95 |
+
"""
|
| 96 |
+
try:
|
| 97 |
+
# Import here to avoid circular dependency
|
| 98 |
+
from decision_agent import DecisionAgent
|
| 99 |
+
|
| 100 |
+
agent = DecisionAgent()
|
| 101 |
+
analysis = await agent.analyze(query, context={'use_llm': True})
|
| 102 |
+
|
| 103 |
+
# Use analysis explanation to create more detailed workflow
|
| 104 |
+
workflow_type = self._detect_workflow_type(query)
|
| 105 |
+
explanation = analysis.get('explanation', '')
|
| 106 |
+
params = self._extract_params(query)
|
| 107 |
+
|
| 108 |
+
# Generate base template
|
| 109 |
+
templates = {
|
| 110 |
+
"txt2img": self._generate_txt2img_workflow,
|
| 111 |
+
"img2img": self._generate_img2img_workflow,
|
| 112 |
+
"upscale": self._generate_upscale_workflow,
|
| 113 |
+
"inpaint": self._generate_inpaint_workflow,
|
| 114 |
+
"controlnet": self._generate_controlnet_workflow,
|
| 115 |
+
"generic": self._generate_generic_workflow
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
generator = templates.get(workflow_type, self._generate_generic_workflow)
|
| 119 |
+
workflow = generator(params)
|
| 120 |
+
|
| 121 |
+
# Enhance with LLM analysis
|
| 122 |
+
workflow['meta']['llm_analysis'] = {
|
| 123 |
+
'explanation': explanation,
|
| 124 |
+
'confidence': analysis.get('confidence', 0.0),
|
| 125 |
+
'suggested_tools': analysis.get('suggested_tools', []),
|
| 126 |
+
'next_steps': analysis.get('next_steps', [])
|
| 127 |
+
}
|
| 128 |
+
workflow['meta']['generated_with_llm'] = True
|
| 129 |
+
|
| 130 |
+
return workflow
|
| 131 |
+
|
| 132 |
+
except Exception as e:
|
| 133 |
+
logger.error(f"LLM workflow generation failed: {e}")
|
| 134 |
+
return None
|
| 135 |
+
|
| 136 |
+
def _detect_workflow_type(self, query: str) -> str:
|
| 137 |
+
"""Detect the type of ComfyUI workflow needed."""
|
| 138 |
+
query_lower = query.lower()
|
| 139 |
+
|
| 140 |
+
if any(w in query_lower for w in ["upscale", "enhance", "higher resolution", "4x", "2x"]):
|
| 141 |
+
return "upscale"
|
| 142 |
+
elif any(w in query_lower for w in ["inpaint", "edit", "remove", "fill", "mask"]):
|
| 143 |
+
return "inpaint"
|
| 144 |
+
elif any(w in query_lower for w in ["controlnet", "pose", "depth", "canny", "edge"]):
|
| 145 |
+
return "controlnet"
|
| 146 |
+
elif any(w in query_lower for w in ["img2img", "transform", "style transfer", "from image"]):
|
| 147 |
+
return "img2img"
|
| 148 |
+
else:
|
| 149 |
+
return "txt2img"
|
| 150 |
+
|
| 151 |
+
def _extract_params(self, query: str) -> Dict[str, Any]:
|
| 152 |
+
"""Extract generation parameters from query."""
|
| 153 |
+
# Default parameters
|
| 154 |
+
params = {
|
| 155 |
+
"prompt": query,
|
| 156 |
+
"negative_prompt": "bad quality, blurry, deformed",
|
| 157 |
+
"width": 512,
|
| 158 |
+
"height": 512,
|
| 159 |
+
"steps": 20,
|
| 160 |
+
"cfg": 7.0,
|
| 161 |
+
"seed": -1, # Random
|
| 162 |
+
"checkpoint": "v1-5-pruned-emaonly.safetensors"
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
query_lower = query.lower()
|
| 166 |
+
|
| 167 |
+
# Detect resolution
|
| 168 |
+
if "portrait" in query_lower or "vertical" in query_lower:
|
| 169 |
+
params["width"] = 512
|
| 170 |
+
params["height"] = 768
|
| 171 |
+
elif "landscape" in query_lower or "horizontal" in query_lower:
|
| 172 |
+
params["width"] = 768
|
| 173 |
+
params["height"] = 512
|
| 174 |
+
elif "square" in query_lower:
|
| 175 |
+
params["width"] = 512
|
| 176 |
+
params["height"] = 512
|
| 177 |
+
elif "hd" in query_lower or "1024" in query_lower:
|
| 178 |
+
params["width"] = 1024
|
| 179 |
+
params["height"] = 1024
|
| 180 |
+
|
| 181 |
+
# Detect model
|
| 182 |
+
if "sdxl" in query_lower:
|
| 183 |
+
params["checkpoint"] = "sd_xl_base_1.0.safetensors"
|
| 184 |
+
params["width"] = 1024
|
| 185 |
+
params["height"] = 1024
|
| 186 |
+
elif "flux" in query_lower:
|
| 187 |
+
params["checkpoint"] = "flux1-dev.safetensors"
|
| 188 |
+
|
| 189 |
+
# Detect quality settings
|
| 190 |
+
if "high quality" in query_lower or "detailed" in query_lower:
|
| 191 |
+
params["steps"] = 30
|
| 192 |
+
params["cfg"] = 8.0
|
| 193 |
+
elif "fast" in query_lower or "quick" in query_lower:
|
| 194 |
+
params["steps"] = 15
|
| 195 |
+
params["cfg"] = 6.0
|
| 196 |
+
|
| 197 |
+
return params
|
| 198 |
+
|
| 199 |
+
def _generate_txt2img_workflow(self, params: Dict[str, Any]) -> Dict[str, Any]:
|
| 200 |
+
"""Generate text-to-image workflow."""
|
| 201 |
+
return {
|
| 202 |
+
"prompt": {
|
| 203 |
+
"3": {
|
| 204 |
+
"inputs": {
|
| 205 |
+
"seed": params.get("seed", -1),
|
| 206 |
+
"steps": params.get("steps", 20),
|
| 207 |
+
"cfg": params.get("cfg", 7.0),
|
| 208 |
+
"sampler_name": "euler",
|
| 209 |
+
"scheduler": "normal",
|
| 210 |
+
"denoise": 1.0,
|
| 211 |
+
"model": ["4", 0],
|
| 212 |
+
"positive": ["6", 0],
|
| 213 |
+
"negative": ["7", 0],
|
| 214 |
+
"latent_image": ["5", 0]
|
| 215 |
+
},
|
| 216 |
+
"class_type": "KSampler",
|
| 217 |
+
"_meta": {"title": "KSampler"}
|
| 218 |
+
},
|
| 219 |
+
"4": {
|
| 220 |
+
"inputs": {
|
| 221 |
+
"ckpt_name": params.get("checkpoint", "v1-5-pruned-emaonly.safetensors")
|
| 222 |
+
},
|
| 223 |
+
"class_type": "CheckpointLoaderSimple",
|
| 224 |
+
"_meta": {"title": "Load Checkpoint"}
|
| 225 |
+
},
|
| 226 |
+
"5": {
|
| 227 |
+
"inputs": {
|
| 228 |
+
"width": params.get("width", 512),
|
| 229 |
+
"height": params.get("height", 512),
|
| 230 |
+
"batch_size": 1
|
| 231 |
+
},
|
| 232 |
+
"class_type": "EmptyLatentImage",
|
| 233 |
+
"_meta": {"title": "Empty Latent Image"}
|
| 234 |
+
},
|
| 235 |
+
"6": {
|
| 236 |
+
"inputs": {
|
| 237 |
+
"text": params.get("prompt", "beautiful landscape"),
|
| 238 |
+
"clip": ["4", 1]
|
| 239 |
+
},
|
| 240 |
+
"class_type": "CLIPTextEncode",
|
| 241 |
+
"_meta": {"title": "CLIP Text Encode (Prompt)"}
|
| 242 |
+
},
|
| 243 |
+
"7": {
|
| 244 |
+
"inputs": {
|
| 245 |
+
"text": params.get("negative_prompt", "bad quality, blurry"),
|
| 246 |
+
"clip": ["4", 1]
|
| 247 |
+
},
|
| 248 |
+
"class_type": "CLIPTextEncode",
|
| 249 |
+
"_meta": {"title": "CLIP Text Encode (Negative)"}
|
| 250 |
+
},
|
| 251 |
+
"8": {
|
| 252 |
+
"inputs": {
|
| 253 |
+
"samples": ["3", 0],
|
| 254 |
+
"vae": ["4", 2]
|
| 255 |
+
},
|
| 256 |
+
"class_type": "VAEDecode",
|
| 257 |
+
"_meta": {"title": "VAE Decode"}
|
| 258 |
+
},
|
| 259 |
+
"9": {
|
| 260 |
+
"inputs": {
|
| 261 |
+
"filename_prefix": "ComfyUI",
|
| 262 |
+
"images": ["8", 0]
|
| 263 |
+
},
|
| 264 |
+
"class_type": "SaveImage",
|
| 265 |
+
"_meta": {"title": "Save Image"}
|
| 266 |
+
}
|
| 267 |
+
},
|
| 268 |
+
"meta": {
|
| 269 |
+
"generated_by": "AI Workflow Agent",
|
| 270 |
+
"type": "txt2img",
|
| 271 |
+
"params": params
|
| 272 |
+
}
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
def _generate_img2img_workflow(self, params: Dict[str, Any]) -> Dict[str, Any]:
|
| 276 |
+
"""Generate image-to-image workflow."""
|
| 277 |
+
workflow = self._generate_txt2img_workflow(params)
|
| 278 |
+
|
| 279 |
+
# Modify for img2img
|
| 280 |
+
workflow["prompt"]["5"] = {
|
| 281 |
+
"inputs": {
|
| 282 |
+
"image": "INPUT_IMAGE_PATH",
|
| 283 |
+
"upload": "image"
|
| 284 |
+
},
|
| 285 |
+
"class_type": "LoadImage",
|
| 286 |
+
"_meta": {"title": "Load Image"}
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
# Add VAE encode for input
|
| 290 |
+
workflow["prompt"]["10"] = {
|
| 291 |
+
"inputs": {
|
| 292 |
+
"pixels": ["5", 0],
|
| 293 |
+
"vae": ["4", 2]
|
| 294 |
+
},
|
| 295 |
+
"class_type": "VAEEncode",
|
| 296 |
+
"_meta": {"title": "VAE Encode"}
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
# Update sampler to use encoded image
|
| 300 |
+
workflow["prompt"]["3"]["inputs"]["latent_image"] = ["10", 0]
|
| 301 |
+
workflow["prompt"]["3"]["inputs"]["denoise"] = 0.75
|
| 302 |
+
|
| 303 |
+
workflow["meta"]["type"] = "img2img"
|
| 304 |
+
|
| 305 |
+
return workflow
|
| 306 |
+
|
| 307 |
+
def _generate_upscale_workflow(self, params: Dict[str, Any]) -> Dict[str, Any]:
|
| 308 |
+
"""Generate upscale workflow."""
|
| 309 |
+
return {
|
| 310 |
+
"prompt": {
|
| 311 |
+
"1": {
|
| 312 |
+
"inputs": {
|
| 313 |
+
"image": "INPUT_IMAGE_PATH",
|
| 314 |
+
"upload": "image"
|
| 315 |
+
},
|
| 316 |
+
"class_type": "LoadImage",
|
| 317 |
+
"_meta": {"title": "Load Image"}
|
| 318 |
+
},
|
| 319 |
+
"2": {
|
| 320 |
+
"inputs": {
|
| 321 |
+
"model_name": "RealESRGAN_x4plus.pth"
|
| 322 |
+
},
|
| 323 |
+
"class_type": "UpscaleModelLoader",
|
| 324 |
+
"_meta": {"title": "Load Upscale Model"}
|
| 325 |
+
},
|
| 326 |
+
"3": {
|
| 327 |
+
"inputs": {
|
| 328 |
+
"upscale_model": ["2", 0],
|
| 329 |
+
"image": ["1", 0]
|
| 330 |
+
},
|
| 331 |
+
"class_type": "ImageUpscaleWithModel",
|
| 332 |
+
"_meta": {"title": "Upscale Image"}
|
| 333 |
+
},
|
| 334 |
+
"4": {
|
| 335 |
+
"inputs": {
|
| 336 |
+
"filename_prefix": "Upscaled",
|
| 337 |
+
"images": ["3", 0]
|
| 338 |
+
},
|
| 339 |
+
"class_type": "SaveImage",
|
| 340 |
+
"_meta": {"title": "Save Image"}
|
| 341 |
+
}
|
| 342 |
+
},
|
| 343 |
+
"meta": {
|
| 344 |
+
"generated_by": "AI Workflow Agent",
|
| 345 |
+
"type": "upscale",
|
| 346 |
+
"params": params
|
| 347 |
+
}
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
def _generate_inpaint_workflow(self, params: Dict[str, Any]) -> Dict[str, Any]:
|
| 351 |
+
"""Generate inpainting workflow."""
|
| 352 |
+
workflow = self._generate_txt2img_workflow(params)
|
| 353 |
+
|
| 354 |
+
# Add mask loading
|
| 355 |
+
workflow["prompt"]["10"] = {
|
| 356 |
+
"inputs": {
|
| 357 |
+
"image": "INPUT_IMAGE_PATH",
|
| 358 |
+
"upload": "image"
|
| 359 |
+
},
|
| 360 |
+
"class_type": "LoadImage",
|
| 361 |
+
"_meta": {"title": "Load Image"}
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
workflow["prompt"]["11"] = {
|
| 365 |
+
"inputs": {
|
| 366 |
+
"image": "MASK_IMAGE_PATH",
|
| 367 |
+
"upload": "image"
|
| 368 |
+
},
|
| 369 |
+
"class_type": "LoadImage",
|
| 370 |
+
"_meta": {"title": "Load Mask"}
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
# Replace empty latent with masked image
|
| 374 |
+
workflow["prompt"]["5"] = {
|
| 375 |
+
"inputs": {
|
| 376 |
+
"grow_mask_by": 6,
|
| 377 |
+
"pixels": ["10", 0],
|
| 378 |
+
"vae": ["4", 2],
|
| 379 |
+
"mask": ["11", 0]
|
| 380 |
+
},
|
| 381 |
+
"class_type": "VAEEncodeForInpaint",
|
| 382 |
+
"_meta": {"title": "VAE Encode (Inpaint)"}
|
| 383 |
+
}
|
| 384 |
+
|
| 385 |
+
workflow["meta"]["type"] = "inpaint"
|
| 386 |
+
|
| 387 |
+
return workflow
|
| 388 |
+
|
| 389 |
+
def _generate_controlnet_workflow(self, params: Dict[str, Any]) -> Dict[str, Any]:
|
| 390 |
+
"""Generate ControlNet workflow."""
|
| 391 |
+
workflow = self._generate_txt2img_workflow(params)
|
| 392 |
+
|
| 393 |
+
# Add ControlNet
|
| 394 |
+
workflow["prompt"]["10"] = {
|
| 395 |
+
"inputs": {
|
| 396 |
+
"control_net_name": "control_v11p_sd15_canny.pth"
|
| 397 |
+
},
|
| 398 |
+
"class_type": "ControlNetLoader",
|
| 399 |
+
"_meta": {"title": "Load ControlNet"}
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
workflow["prompt"]["11"] = {
|
| 403 |
+
"inputs": {
|
| 404 |
+
"image": "CONTROL_IMAGE_PATH",
|
| 405 |
+
"upload": "image"
|
| 406 |
+
},
|
| 407 |
+
"class_type": "LoadImage",
|
| 408 |
+
"_meta": {"title": "Load Control Image"}
|
| 409 |
+
}
|
| 410 |
+
|
| 411 |
+
workflow["prompt"]["12"] = {
|
| 412 |
+
"inputs": {
|
| 413 |
+
"strength": 1.0,
|
| 414 |
+
"conditioning": ["6", 0],
|
| 415 |
+
"control_net": ["10", 0],
|
| 416 |
+
"image": ["11", 0]
|
| 417 |
+
},
|
| 418 |
+
"class_type": "ControlNetApply",
|
| 419 |
+
"_meta": {"title": "Apply ControlNet"}
|
| 420 |
+
}
|
| 421 |
+
|
| 422 |
+
# Update sampler to use ControlNet conditioning
|
| 423 |
+
workflow["prompt"]["3"]["inputs"]["positive"] = ["12", 0]
|
| 424 |
+
|
| 425 |
+
workflow["meta"]["type"] = "controlnet"
|
| 426 |
+
|
| 427 |
+
return workflow
|
| 428 |
+
|
| 429 |
+
def _generate_generic_workflow(self, params: Dict[str, Any]) -> Dict[str, Any]:
|
| 430 |
+
"""Generate generic workflow (defaults to txt2img)."""
|
| 431 |
+
return self._generate_txt2img_workflow(params)
|
| 432 |
+
|
| 433 |
+
async def execute_workflow(self, workflow: Dict[str, Any]) -> Dict[str, Any]:
|
| 434 |
+
"""
|
| 435 |
+
Execute workflow in ComfyUI.
|
| 436 |
+
|
| 437 |
+
Args:
|
| 438 |
+
workflow: ComfyUI workflow JSON
|
| 439 |
+
|
| 440 |
+
Returns:
|
| 441 |
+
Execution result with output paths
|
| 442 |
+
"""
|
| 443 |
+
try:
|
| 444 |
+
# Get the prompt part of workflow
|
| 445 |
+
prompt = workflow.get("prompt", workflow)
|
| 446 |
+
|
| 447 |
+
# Generate client ID
|
| 448 |
+
client_id = str(uuid.uuid4())
|
| 449 |
+
|
| 450 |
+
# Queue the prompt
|
| 451 |
+
response = await self.client.post(
|
| 452 |
+
f"{self.comfyui_host}/prompt",
|
| 453 |
+
json={
|
| 454 |
+
"prompt": prompt,
|
| 455 |
+
"client_id": client_id
|
| 456 |
+
}
|
| 457 |
+
)
|
| 458 |
+
|
| 459 |
+
if response.status_code == 200:
|
| 460 |
+
result = response.json()
|
| 461 |
+
prompt_id = result.get("prompt_id")
|
| 462 |
+
|
| 463 |
+
logger.info(f"ComfyUI prompt queued: {prompt_id}")
|
| 464 |
+
|
| 465 |
+
# Wait for completion (poll history)
|
| 466 |
+
output = await self._wait_for_completion(prompt_id)
|
| 467 |
+
|
| 468 |
+
return {
|
| 469 |
+
"success": True,
|
| 470 |
+
"prompt_id": prompt_id,
|
| 471 |
+
"output": output
|
| 472 |
+
}
|
| 473 |
+
else:
|
| 474 |
+
logger.error(f"ComfyUI queue failed: {response.status_code}")
|
| 475 |
+
return {
|
| 476 |
+
"success": False,
|
| 477 |
+
"error": f"Queue failed: {response.status_code}"
|
| 478 |
+
}
|
| 479 |
+
|
| 480 |
+
except Exception as e:
|
| 481 |
+
logger.error(f"ComfyUI execute error: {e}")
|
| 482 |
+
return {
|
| 483 |
+
"success": False,
|
| 484 |
+
"error": str(e)
|
| 485 |
+
}
|
| 486 |
+
|
| 487 |
+
async def _wait_for_completion(
|
| 488 |
+
self,
|
| 489 |
+
prompt_id: str,
|
| 490 |
+
timeout: int = 300,
|
| 491 |
+
poll_interval: int = 2
|
| 492 |
+
) -> Dict[str, Any]:
|
| 493 |
+
"""Wait for ComfyUI prompt to complete."""
|
| 494 |
+
import asyncio
|
| 495 |
+
|
| 496 |
+
elapsed = 0
|
| 497 |
+
while elapsed < timeout:
|
| 498 |
+
try:
|
| 499 |
+
response = await self.client.get(
|
| 500 |
+
f"{self.comfyui_host}/history/{prompt_id}"
|
| 501 |
+
)
|
| 502 |
+
|
| 503 |
+
if response.status_code == 200:
|
| 504 |
+
history = response.json()
|
| 505 |
+
if prompt_id in history:
|
| 506 |
+
return history[prompt_id]
|
| 507 |
+
|
| 508 |
+
await asyncio.sleep(poll_interval)
|
| 509 |
+
elapsed += poll_interval
|
| 510 |
+
|
| 511 |
+
except Exception as e:
|
| 512 |
+
logger.warning(f"Poll error: {e}")
|
| 513 |
+
await asyncio.sleep(poll_interval)
|
| 514 |
+
elapsed += poll_interval
|
| 515 |
+
|
| 516 |
+
return {"status": "timeout", "elapsed": elapsed}
|
| 517 |
+
|
| 518 |
+
async def get_models(self) -> List[str]:
|
| 519 |
+
"""Get available models in ComfyUI."""
|
| 520 |
+
try:
|
| 521 |
+
response = await self.client.get(
|
| 522 |
+
f"{self.comfyui_host}/object_info/CheckpointLoaderSimple"
|
| 523 |
+
)
|
| 524 |
+
|
| 525 |
+
if response.status_code == 200:
|
| 526 |
+
data = response.json()
|
| 527 |
+
models = data.get("CheckpointLoaderSimple", {}).get(
|
| 528 |
+
"input", {}
|
| 529 |
+
).get("required", {}).get("ckpt_name", [[]])[0]
|
| 530 |
+
return models
|
| 531 |
+
return []
|
| 532 |
+
|
| 533 |
+
except Exception as e:
|
| 534 |
+
logger.error(f"Get models error: {e}")
|
| 535 |
+
return []
|
| 536 |
+
|
| 537 |
+
async def get_queue_status(self) -> Dict[str, Any]:
|
| 538 |
+
"""Get current ComfyUI queue status."""
|
| 539 |
+
try:
|
| 540 |
+
response = await self.client.get(f"{self.comfyui_host}/queue")
|
| 541 |
+
if response.status_code == 200:
|
| 542 |
+
return response.json()
|
| 543 |
+
return {}
|
| 544 |
+
except Exception as e:
|
| 545 |
+
logger.error(f"Queue status error: {e}")
|
| 546 |
+
return {}
|
tools/comfyui_templates.py
ADDED
|
@@ -0,0 +1,537 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Additional ComfyUI Workflow Templates
|
| 2 |
+
"""
|
| 3 |
+
Extended ComfyUI workflow templates for generative AI.
|
| 4 |
+
Milestone 1: More comprehensive image generation patterns.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
from typing import Dict, Any
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def get_comfyui_templates() -> Dict[str, callable]:
|
| 12 |
+
"""Return all available ComfyUI templates."""
|
| 13 |
+
return {
|
| 14 |
+
"text_to_image": text_to_image_workflow,
|
| 15 |
+
"image_to_image": image_to_image_workflow,
|
| 16 |
+
"inpainting": inpainting_workflow,
|
| 17 |
+
"upscale": upscale_workflow,
|
| 18 |
+
"controlnet": controlnet_workflow,
|
| 19 |
+
"batch_generation": batch_generation_workflow,
|
| 20 |
+
"style_transfer": style_transfer_workflow,
|
| 21 |
+
"lora_generation": lora_generation_workflow,
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def text_to_image_workflow(params: Dict[str, Any]) -> Dict[str, Any]:
|
| 26 |
+
"""Generate basic text-to-image workflow."""
|
| 27 |
+
prompt = params.get("prompt", "a beautiful landscape")
|
| 28 |
+
negative = params.get("negative_prompt", "blurry, low quality, distorted")
|
| 29 |
+
model = params.get("model", "sd_xl_base_1.0.safetensors")
|
| 30 |
+
width = params.get("width", 1024)
|
| 31 |
+
height = params.get("height", 1024)
|
| 32 |
+
steps = params.get("steps", 25)
|
| 33 |
+
cfg = params.get("cfg", 7.0)
|
| 34 |
+
seed = params.get("seed", -1)
|
| 35 |
+
|
| 36 |
+
return {
|
| 37 |
+
"3": {
|
| 38 |
+
"inputs": {
|
| 39 |
+
"seed": seed if seed > 0 else 12345,
|
| 40 |
+
"steps": steps,
|
| 41 |
+
"cfg": cfg,
|
| 42 |
+
"sampler_name": "dpmpp_2m",
|
| 43 |
+
"scheduler": "karras",
|
| 44 |
+
"denoise": 1.0,
|
| 45 |
+
"model": ["4", 0],
|
| 46 |
+
"positive": ["6", 0],
|
| 47 |
+
"negative": ["7", 0],
|
| 48 |
+
"latent_image": ["5", 0]
|
| 49 |
+
},
|
| 50 |
+
"class_type": "KSampler",
|
| 51 |
+
"_meta": {"title": "KSampler"}
|
| 52 |
+
},
|
| 53 |
+
"4": {
|
| 54 |
+
"inputs": {"ckpt_name": model},
|
| 55 |
+
"class_type": "CheckpointLoaderSimple",
|
| 56 |
+
"_meta": {"title": "Load Checkpoint"}
|
| 57 |
+
},
|
| 58 |
+
"5": {
|
| 59 |
+
"inputs": {"width": width, "height": height, "batch_size": 1},
|
| 60 |
+
"class_type": "EmptyLatentImage",
|
| 61 |
+
"_meta": {"title": "Empty Latent"}
|
| 62 |
+
},
|
| 63 |
+
"6": {
|
| 64 |
+
"inputs": {"text": prompt, "clip": ["4", 1]},
|
| 65 |
+
"class_type": "CLIPTextEncode",
|
| 66 |
+
"_meta": {"title": "Positive Prompt"}
|
| 67 |
+
},
|
| 68 |
+
"7": {
|
| 69 |
+
"inputs": {"text": negative, "clip": ["4", 1]},
|
| 70 |
+
"class_type": "CLIPTextEncode",
|
| 71 |
+
"_meta": {"title": "Negative Prompt"}
|
| 72 |
+
},
|
| 73 |
+
"8": {
|
| 74 |
+
"inputs": {"samples": ["3", 0], "vae": ["4", 2]},
|
| 75 |
+
"class_type": "VAEDecode",
|
| 76 |
+
"_meta": {"title": "VAE Decode"}
|
| 77 |
+
},
|
| 78 |
+
"9": {
|
| 79 |
+
"inputs": {"filename_prefix": "txt2img", "images": ["8", 0]},
|
| 80 |
+
"class_type": "SaveImage",
|
| 81 |
+
"_meta": {"title": "Save Image"}
|
| 82 |
+
}
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def image_to_image_workflow(params: Dict[str, Any]) -> Dict[str, Any]:
|
| 87 |
+
"""Generate image-to-image transformation workflow."""
|
| 88 |
+
prompt = params.get("prompt", "enhance this image")
|
| 89 |
+
negative = params.get("negative_prompt", "blurry, low quality")
|
| 90 |
+
model = params.get("model", "sd_xl_base_1.0.safetensors")
|
| 91 |
+
denoise = params.get("denoise", 0.75)
|
| 92 |
+
steps = params.get("steps", 30)
|
| 93 |
+
|
| 94 |
+
return {
|
| 95 |
+
"1": {
|
| 96 |
+
"inputs": {"image": "input.png", "upload": "image"},
|
| 97 |
+
"class_type": "LoadImage",
|
| 98 |
+
"_meta": {"title": "Load Input Image"}
|
| 99 |
+
},
|
| 100 |
+
"2": {
|
| 101 |
+
"inputs": {"ckpt_name": model},
|
| 102 |
+
"class_type": "CheckpointLoaderSimple",
|
| 103 |
+
"_meta": {"title": "Load Checkpoint"}
|
| 104 |
+
},
|
| 105 |
+
"3": {
|
| 106 |
+
"inputs": {"pixels": ["1", 0], "vae": ["2", 2]},
|
| 107 |
+
"class_type": "VAEEncode",
|
| 108 |
+
"_meta": {"title": "VAE Encode"}
|
| 109 |
+
},
|
| 110 |
+
"4": {
|
| 111 |
+
"inputs": {"text": prompt, "clip": ["2", 1]},
|
| 112 |
+
"class_type": "CLIPTextEncode",
|
| 113 |
+
"_meta": {"title": "Positive Prompt"}
|
| 114 |
+
},
|
| 115 |
+
"5": {
|
| 116 |
+
"inputs": {"text": negative, "clip": ["2", 1]},
|
| 117 |
+
"class_type": "CLIPTextEncode",
|
| 118 |
+
"_meta": {"title": "Negative Prompt"}
|
| 119 |
+
},
|
| 120 |
+
"6": {
|
| 121 |
+
"inputs": {
|
| 122 |
+
"seed": 12345,
|
| 123 |
+
"steps": steps,
|
| 124 |
+
"cfg": 7.0,
|
| 125 |
+
"sampler_name": "dpmpp_2m",
|
| 126 |
+
"scheduler": "karras",
|
| 127 |
+
"denoise": denoise,
|
| 128 |
+
"model": ["2", 0],
|
| 129 |
+
"positive": ["4", 0],
|
| 130 |
+
"negative": ["5", 0],
|
| 131 |
+
"latent_image": ["3", 0]
|
| 132 |
+
},
|
| 133 |
+
"class_type": "KSampler",
|
| 134 |
+
"_meta": {"title": "KSampler"}
|
| 135 |
+
},
|
| 136 |
+
"7": {
|
| 137 |
+
"inputs": {"samples": ["6", 0], "vae": ["2", 2]},
|
| 138 |
+
"class_type": "VAEDecode",
|
| 139 |
+
"_meta": {"title": "VAE Decode"}
|
| 140 |
+
},
|
| 141 |
+
"8": {
|
| 142 |
+
"inputs": {"filename_prefix": "img2img", "images": ["7", 0]},
|
| 143 |
+
"class_type": "SaveImage",
|
| 144 |
+
"_meta": {"title": "Save Image"}
|
| 145 |
+
}
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def inpainting_workflow(params: Dict[str, Any]) -> Dict[str, Any]:
|
| 150 |
+
"""Generate inpainting workflow."""
|
| 151 |
+
prompt = params.get("prompt", "fill in the masked area")
|
| 152 |
+
negative = params.get("negative_prompt", "blurry, distorted")
|
| 153 |
+
model = params.get("model", "sd_xl_base_1.0.safetensors")
|
| 154 |
+
|
| 155 |
+
return {
|
| 156 |
+
"1": {
|
| 157 |
+
"inputs": {"image": "input.png", "upload": "image"},
|
| 158 |
+
"class_type": "LoadImage",
|
| 159 |
+
"_meta": {"title": "Load Image"}
|
| 160 |
+
},
|
| 161 |
+
"2": {
|
| 162 |
+
"inputs": {"image": "mask.png", "upload": "image"},
|
| 163 |
+
"class_type": "LoadImage",
|
| 164 |
+
"_meta": {"title": "Load Mask"}
|
| 165 |
+
},
|
| 166 |
+
"3": {
|
| 167 |
+
"inputs": {"ckpt_name": model},
|
| 168 |
+
"class_type": "CheckpointLoaderSimple",
|
| 169 |
+
"_meta": {"title": "Load Checkpoint"}
|
| 170 |
+
},
|
| 171 |
+
"4": {
|
| 172 |
+
"inputs": {
|
| 173 |
+
"pixels": ["1", 0],
|
| 174 |
+
"vae": ["3", 2],
|
| 175 |
+
"mask": ["2", 0],
|
| 176 |
+
"grow_mask_by": 6
|
| 177 |
+
},
|
| 178 |
+
"class_type": "VAEEncodeForInpaint",
|
| 179 |
+
"_meta": {"title": "VAE Encode (Inpaint)"}
|
| 180 |
+
},
|
| 181 |
+
"5": {
|
| 182 |
+
"inputs": {"text": prompt, "clip": ["3", 1]},
|
| 183 |
+
"class_type": "CLIPTextEncode",
|
| 184 |
+
"_meta": {"title": "Positive"}
|
| 185 |
+
},
|
| 186 |
+
"6": {
|
| 187 |
+
"inputs": {"text": negative, "clip": ["3", 1]},
|
| 188 |
+
"class_type": "CLIPTextEncode",
|
| 189 |
+
"_meta": {"title": "Negative"}
|
| 190 |
+
},
|
| 191 |
+
"7": {
|
| 192 |
+
"inputs": {
|
| 193 |
+
"seed": 12345,
|
| 194 |
+
"steps": 30,
|
| 195 |
+
"cfg": 8.0,
|
| 196 |
+
"sampler_name": "dpmpp_2m_sde",
|
| 197 |
+
"scheduler": "karras",
|
| 198 |
+
"denoise": 1.0,
|
| 199 |
+
"model": ["3", 0],
|
| 200 |
+
"positive": ["5", 0],
|
| 201 |
+
"negative": ["6", 0],
|
| 202 |
+
"latent_image": ["4", 0]
|
| 203 |
+
},
|
| 204 |
+
"class_type": "KSampler",
|
| 205 |
+
"_meta": {"title": "KSampler"}
|
| 206 |
+
},
|
| 207 |
+
"8": {
|
| 208 |
+
"inputs": {"samples": ["7", 0], "vae": ["3", 2]},
|
| 209 |
+
"class_type": "VAEDecode",
|
| 210 |
+
"_meta": {"title": "VAE Decode"}
|
| 211 |
+
},
|
| 212 |
+
"9": {
|
| 213 |
+
"inputs": {"filename_prefix": "inpaint", "images": ["8", 0]},
|
| 214 |
+
"class_type": "SaveImage",
|
| 215 |
+
"_meta": {"title": "Save Image"}
|
| 216 |
+
}
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def upscale_workflow(params: Dict[str, Any]) -> Dict[str, Any]:
|
| 221 |
+
"""Generate image upscaling workflow."""
|
| 222 |
+
upscale_model = params.get("upscale_model", "RealESRGAN_x4plus.pth")
|
| 223 |
+
|
| 224 |
+
return {
|
| 225 |
+
"1": {
|
| 226 |
+
"inputs": {"image": "input.png", "upload": "image"},
|
| 227 |
+
"class_type": "LoadImage",
|
| 228 |
+
"_meta": {"title": "Load Image"}
|
| 229 |
+
},
|
| 230 |
+
"2": {
|
| 231 |
+
"inputs": {"model_name": upscale_model},
|
| 232 |
+
"class_type": "UpscaleModelLoader",
|
| 233 |
+
"_meta": {"title": "Load Upscale Model"}
|
| 234 |
+
},
|
| 235 |
+
"3": {
|
| 236 |
+
"inputs": {"upscale_model": ["2", 0], "image": ["1", 0]},
|
| 237 |
+
"class_type": "ImageUpscaleWithModel",
|
| 238 |
+
"_meta": {"title": "Upscale Image"}
|
| 239 |
+
},
|
| 240 |
+
"4": {
|
| 241 |
+
"inputs": {"filename_prefix": "upscaled", "images": ["3", 0]},
|
| 242 |
+
"class_type": "SaveImage",
|
| 243 |
+
"_meta": {"title": "Save Image"}
|
| 244 |
+
}
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
def controlnet_workflow(params: Dict[str, Any]) -> Dict[str, Any]:
|
| 249 |
+
"""Generate ControlNet-guided workflow."""
|
| 250 |
+
prompt = params.get("prompt", "a detailed illustration")
|
| 251 |
+
negative = params.get("negative_prompt", "blurry, low quality")
|
| 252 |
+
model = params.get("model", "sd_xl_base_1.0.safetensors")
|
| 253 |
+
controlnet = params.get("controlnet", "controlnet-canny-sdxl-1.0.safetensors")
|
| 254 |
+
strength = params.get("strength", 1.0)
|
| 255 |
+
|
| 256 |
+
return {
|
| 257 |
+
"1": {
|
| 258 |
+
"inputs": {"image": "control_image.png", "upload": "image"},
|
| 259 |
+
"class_type": "LoadImage",
|
| 260 |
+
"_meta": {"title": "Load Control Image"}
|
| 261 |
+
},
|
| 262 |
+
"2": {
|
| 263 |
+
"inputs": {"ckpt_name": model},
|
| 264 |
+
"class_type": "CheckpointLoaderSimple",
|
| 265 |
+
"_meta": {"title": "Load Checkpoint"}
|
| 266 |
+
},
|
| 267 |
+
"3": {
|
| 268 |
+
"inputs": {"control_net_name": controlnet},
|
| 269 |
+
"class_type": "ControlNetLoader",
|
| 270 |
+
"_meta": {"title": "Load ControlNet"}
|
| 271 |
+
},
|
| 272 |
+
"4": {
|
| 273 |
+
"inputs": {"text": prompt, "clip": ["2", 1]},
|
| 274 |
+
"class_type": "CLIPTextEncode",
|
| 275 |
+
"_meta": {"title": "Positive"}
|
| 276 |
+
},
|
| 277 |
+
"5": {
|
| 278 |
+
"inputs": {"text": negative, "clip": ["2", 1]},
|
| 279 |
+
"class_type": "CLIPTextEncode",
|
| 280 |
+
"_meta": {"title": "Negative"}
|
| 281 |
+
},
|
| 282 |
+
"6": {
|
| 283 |
+
"inputs": {
|
| 284 |
+
"strength": strength,
|
| 285 |
+
"conditioning": ["4", 0],
|
| 286 |
+
"control_net": ["3", 0],
|
| 287 |
+
"image": ["1", 0]
|
| 288 |
+
},
|
| 289 |
+
"class_type": "ControlNetApply",
|
| 290 |
+
"_meta": {"title": "Apply ControlNet"}
|
| 291 |
+
},
|
| 292 |
+
"7": {
|
| 293 |
+
"inputs": {"width": 1024, "height": 1024, "batch_size": 1},
|
| 294 |
+
"class_type": "EmptyLatentImage",
|
| 295 |
+
"_meta": {"title": "Empty Latent"}
|
| 296 |
+
},
|
| 297 |
+
"8": {
|
| 298 |
+
"inputs": {
|
| 299 |
+
"seed": 12345,
|
| 300 |
+
"steps": 30,
|
| 301 |
+
"cfg": 7.5,
|
| 302 |
+
"sampler_name": "dpmpp_2m",
|
| 303 |
+
"scheduler": "karras",
|
| 304 |
+
"denoise": 1.0,
|
| 305 |
+
"model": ["2", 0],
|
| 306 |
+
"positive": ["6", 0],
|
| 307 |
+
"negative": ["5", 0],
|
| 308 |
+
"latent_image": ["7", 0]
|
| 309 |
+
},
|
| 310 |
+
"class_type": "KSampler",
|
| 311 |
+
"_meta": {"title": "KSampler"}
|
| 312 |
+
},
|
| 313 |
+
"9": {
|
| 314 |
+
"inputs": {"samples": ["8", 0], "vae": ["2", 2]},
|
| 315 |
+
"class_type": "VAEDecode",
|
| 316 |
+
"_meta": {"title": "VAE Decode"}
|
| 317 |
+
},
|
| 318 |
+
"10": {
|
| 319 |
+
"inputs": {"filename_prefix": "controlnet", "images": ["9", 0]},
|
| 320 |
+
"class_type": "SaveImage",
|
| 321 |
+
"_meta": {"title": "Save Image"}
|
| 322 |
+
}
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
def batch_generation_workflow(params: Dict[str, Any]) -> Dict[str, Any]:
|
| 327 |
+
"""Generate batch image generation workflow."""
|
| 328 |
+
prompt = params.get("prompt", "artistic image")
|
| 329 |
+
negative = params.get("negative_prompt", "blurry")
|
| 330 |
+
model = params.get("model", "sd_xl_base_1.0.safetensors")
|
| 331 |
+
batch_size = params.get("batch_size", 4)
|
| 332 |
+
|
| 333 |
+
return {
|
| 334 |
+
"1": {
|
| 335 |
+
"inputs": {"ckpt_name": model},
|
| 336 |
+
"class_type": "CheckpointLoaderSimple",
|
| 337 |
+
"_meta": {"title": "Load Checkpoint"}
|
| 338 |
+
},
|
| 339 |
+
"2": {
|
| 340 |
+
"inputs": {"width": 1024, "height": 1024, "batch_size": batch_size},
|
| 341 |
+
"class_type": "EmptyLatentImage",
|
| 342 |
+
"_meta": {"title": "Empty Latent (Batch)"}
|
| 343 |
+
},
|
| 344 |
+
"3": {
|
| 345 |
+
"inputs": {"text": prompt, "clip": ["1", 1]},
|
| 346 |
+
"class_type": "CLIPTextEncode",
|
| 347 |
+
"_meta": {"title": "Positive"}
|
| 348 |
+
},
|
| 349 |
+
"4": {
|
| 350 |
+
"inputs": {"text": negative, "clip": ["1", 1]},
|
| 351 |
+
"class_type": "CLIPTextEncode",
|
| 352 |
+
"_meta": {"title": "Negative"}
|
| 353 |
+
},
|
| 354 |
+
"5": {
|
| 355 |
+
"inputs": {
|
| 356 |
+
"seed": 12345,
|
| 357 |
+
"steps": 25,
|
| 358 |
+
"cfg": 7.0,
|
| 359 |
+
"sampler_name": "dpmpp_2m",
|
| 360 |
+
"scheduler": "karras",
|
| 361 |
+
"denoise": 1.0,
|
| 362 |
+
"model": ["1", 0],
|
| 363 |
+
"positive": ["3", 0],
|
| 364 |
+
"negative": ["4", 0],
|
| 365 |
+
"latent_image": ["2", 0]
|
| 366 |
+
},
|
| 367 |
+
"class_type": "KSampler",
|
| 368 |
+
"_meta": {"title": "KSampler"}
|
| 369 |
+
},
|
| 370 |
+
"6": {
|
| 371 |
+
"inputs": {"samples": ["5", 0], "vae": ["1", 2]},
|
| 372 |
+
"class_type": "VAEDecode",
|
| 373 |
+
"_meta": {"title": "VAE Decode"}
|
| 374 |
+
},
|
| 375 |
+
"7": {
|
| 376 |
+
"inputs": {"filename_prefix": "batch", "images": ["6", 0]},
|
| 377 |
+
"class_type": "SaveImage",
|
| 378 |
+
"_meta": {"title": "Save Images"}
|
| 379 |
+
}
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
def style_transfer_workflow(params: Dict[str, Any]) -> Dict[str, Any]:
|
| 384 |
+
"""Generate style transfer workflow using IPAdapter."""
|
| 385 |
+
prompt = params.get("prompt", "in the style of the reference")
|
| 386 |
+
negative = params.get("negative_prompt", "blurry, low quality")
|
| 387 |
+
model = params.get("model", "sd_xl_base_1.0.safetensors")
|
| 388 |
+
weight = params.get("style_weight", 0.8)
|
| 389 |
+
|
| 390 |
+
return {
|
| 391 |
+
"1": {
|
| 392 |
+
"inputs": {"image": "content.png", "upload": "image"},
|
| 393 |
+
"class_type": "LoadImage",
|
| 394 |
+
"_meta": {"title": "Load Content Image"}
|
| 395 |
+
},
|
| 396 |
+
"2": {
|
| 397 |
+
"inputs": {"image": "style.png", "upload": "image"},
|
| 398 |
+
"class_type": "LoadImage",
|
| 399 |
+
"_meta": {"title": "Load Style Image"}
|
| 400 |
+
},
|
| 401 |
+
"3": {
|
| 402 |
+
"inputs": {"ckpt_name": model},
|
| 403 |
+
"class_type": "CheckpointLoaderSimple",
|
| 404 |
+
"_meta": {"title": "Load Checkpoint"}
|
| 405 |
+
},
|
| 406 |
+
"4": {
|
| 407 |
+
"inputs": {"ipadapter_file": "ip-adapter_sdxl.safetensors"},
|
| 408 |
+
"class_type": "IPAdapterModelLoader",
|
| 409 |
+
"_meta": {"title": "Load IPAdapter"}
|
| 410 |
+
},
|
| 411 |
+
"5": {
|
| 412 |
+
"inputs": {"clip_name": "clip_vision_g.safetensors"},
|
| 413 |
+
"class_type": "CLIPVisionLoader",
|
| 414 |
+
"_meta": {"title": "Load CLIP Vision"}
|
| 415 |
+
},
|
| 416 |
+
"6": {
|
| 417 |
+
"inputs": {
|
| 418 |
+
"weight": weight,
|
| 419 |
+
"model": ["3", 0],
|
| 420 |
+
"ipadapter": ["4", 0],
|
| 421 |
+
"image": ["2", 0],
|
| 422 |
+
"clip_vision": ["5", 0]
|
| 423 |
+
},
|
| 424 |
+
"class_type": "IPAdapterApply",
|
| 425 |
+
"_meta": {"title": "Apply IPAdapter"}
|
| 426 |
+
},
|
| 427 |
+
"7": {
|
| 428 |
+
"inputs": {"text": prompt, "clip": ["3", 1]},
|
| 429 |
+
"class_type": "CLIPTextEncode",
|
| 430 |
+
"_meta": {"title": "Positive"}
|
| 431 |
+
},
|
| 432 |
+
"8": {
|
| 433 |
+
"inputs": {"text": negative, "clip": ["3", 1]},
|
| 434 |
+
"class_type": "CLIPTextEncode",
|
| 435 |
+
"_meta": {"title": "Negative"}
|
| 436 |
+
},
|
| 437 |
+
"9": {
|
| 438 |
+
"inputs": {"pixels": ["1", 0], "vae": ["3", 2]},
|
| 439 |
+
"class_type": "VAEEncode",
|
| 440 |
+
"_meta": {"title": "VAE Encode"}
|
| 441 |
+
},
|
| 442 |
+
"10": {
|
| 443 |
+
"inputs": {
|
| 444 |
+
"seed": 12345,
|
| 445 |
+
"steps": 30,
|
| 446 |
+
"cfg": 7.0,
|
| 447 |
+
"sampler_name": "dpmpp_2m",
|
| 448 |
+
"scheduler": "karras",
|
| 449 |
+
"denoise": 0.7,
|
| 450 |
+
"model": ["6", 0],
|
| 451 |
+
"positive": ["7", 0],
|
| 452 |
+
"negative": ["8", 0],
|
| 453 |
+
"latent_image": ["9", 0]
|
| 454 |
+
},
|
| 455 |
+
"class_type": "KSampler",
|
| 456 |
+
"_meta": {"title": "KSampler"}
|
| 457 |
+
},
|
| 458 |
+
"11": {
|
| 459 |
+
"inputs": {"samples": ["10", 0], "vae": ["3", 2]},
|
| 460 |
+
"class_type": "VAEDecode",
|
| 461 |
+
"_meta": {"title": "VAE Decode"}
|
| 462 |
+
},
|
| 463 |
+
"12": {
|
| 464 |
+
"inputs": {"filename_prefix": "style_transfer", "images": ["11", 0]},
|
| 465 |
+
"class_type": "SaveImage",
|
| 466 |
+
"_meta": {"title": "Save Image"}
|
| 467 |
+
}
|
| 468 |
+
}
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
def lora_generation_workflow(params: Dict[str, Any]) -> Dict[str, Any]:
|
| 472 |
+
"""Generate LoRA-enhanced workflow."""
|
| 473 |
+
prompt = params.get("prompt", "high quality artwork")
|
| 474 |
+
negative = params.get("negative_prompt", "blurry, low quality")
|
| 475 |
+
model = params.get("model", "sd_xl_base_1.0.safetensors")
|
| 476 |
+
lora_name = params.get("lora", "detail_enhancer.safetensors")
|
| 477 |
+
lora_strength = params.get("lora_strength", 0.8)
|
| 478 |
+
|
| 479 |
+
return {
|
| 480 |
+
"1": {
|
| 481 |
+
"inputs": {"ckpt_name": model},
|
| 482 |
+
"class_type": "CheckpointLoaderSimple",
|
| 483 |
+
"_meta": {"title": "Load Checkpoint"}
|
| 484 |
+
},
|
| 485 |
+
"2": {
|
| 486 |
+
"inputs": {
|
| 487 |
+
"lora_name": lora_name,
|
| 488 |
+
"strength_model": lora_strength,
|
| 489 |
+
"strength_clip": lora_strength,
|
| 490 |
+
"model": ["1", 0],
|
| 491 |
+
"clip": ["1", 1]
|
| 492 |
+
},
|
| 493 |
+
"class_type": "LoraLoader",
|
| 494 |
+
"_meta": {"title": "Load LoRA"}
|
| 495 |
+
},
|
| 496 |
+
"3": {
|
| 497 |
+
"inputs": {"text": prompt, "clip": ["2", 1]},
|
| 498 |
+
"class_type": "CLIPTextEncode",
|
| 499 |
+
"_meta": {"title": "Positive"}
|
| 500 |
+
},
|
| 501 |
+
"4": {
|
| 502 |
+
"inputs": {"text": negative, "clip": ["2", 1]},
|
| 503 |
+
"class_type": "CLIPTextEncode",
|
| 504 |
+
"_meta": {"title": "Negative"}
|
| 505 |
+
},
|
| 506 |
+
"5": {
|
| 507 |
+
"inputs": {"width": 1024, "height": 1024, "batch_size": 1},
|
| 508 |
+
"class_type": "EmptyLatentImage",
|
| 509 |
+
"_meta": {"title": "Empty Latent"}
|
| 510 |
+
},
|
| 511 |
+
"6": {
|
| 512 |
+
"inputs": {
|
| 513 |
+
"seed": 12345,
|
| 514 |
+
"steps": 25,
|
| 515 |
+
"cfg": 7.0,
|
| 516 |
+
"sampler_name": "dpmpp_2m",
|
| 517 |
+
"scheduler": "karras",
|
| 518 |
+
"denoise": 1.0,
|
| 519 |
+
"model": ["2", 0],
|
| 520 |
+
"positive": ["3", 0],
|
| 521 |
+
"negative": ["4", 0],
|
| 522 |
+
"latent_image": ["5", 0]
|
| 523 |
+
},
|
| 524 |
+
"class_type": "KSampler",
|
| 525 |
+
"_meta": {"title": "KSampler"}
|
| 526 |
+
},
|
| 527 |
+
"7": {
|
| 528 |
+
"inputs": {"samples": ["6", 0], "vae": ["1", 2]},
|
| 529 |
+
"class_type": "VAEDecode",
|
| 530 |
+
"_meta": {"title": "VAE Decode"}
|
| 531 |
+
},
|
| 532 |
+
"8": {
|
| 533 |
+
"inputs": {"filename_prefix": "lora_gen", "images": ["7", 0]},
|
| 534 |
+
"class_type": "SaveImage",
|
| 535 |
+
"_meta": {"title": "Save Image"}
|
| 536 |
+
}
|
| 537 |
+
}
|
tools/docker_helper.py
ADDED
|
@@ -0,0 +1,449 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Docker Helper Tool
|
| 2 |
+
"""
|
| 3 |
+
Clone repositories and manage Docker builds.
|
| 4 |
+
Includes error analysis and fix suggestions.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import httpx
|
| 8 |
+
import subprocess
|
| 9 |
+
import os
|
| 10 |
+
import shutil
|
| 11 |
+
import logging
|
| 12 |
+
from typing import Dict, Any, Optional, List
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
|
| 15 |
+
from config import settings
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class DockerHelper:
|
| 21 |
+
"""
|
| 22 |
+
Docker automation helper.
|
| 23 |
+
Clones repos, builds containers, analyzes errors, suggests fixes.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self):
|
| 27 |
+
self.projects_dir = Path(settings.PROJECTS_DIR)
|
| 28 |
+
self.ollama_host = settings.OLLAMA_HOST
|
| 29 |
+
self.ollama_model = settings.OLLAMA_MODEL
|
| 30 |
+
self.client = httpx.AsyncClient(timeout=120.0)
|
| 31 |
+
|
| 32 |
+
# Ensure projects directory exists
|
| 33 |
+
self.projects_dir.mkdir(parents=True, exist_ok=True)
|
| 34 |
+
|
| 35 |
+
async def clone_and_build(
|
| 36 |
+
self,
|
| 37 |
+
repo_url: str,
|
| 38 |
+
branch: str = "main"
|
| 39 |
+
) -> Dict[str, Any]:
|
| 40 |
+
"""
|
| 41 |
+
Clone repository and attempt Docker build.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
repo_url: GitHub repository URL
|
| 45 |
+
branch: Branch to clone (default: main)
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
Dict with success status, logs, and fix suggestions
|
| 49 |
+
"""
|
| 50 |
+
project_name = self._extract_project_name(repo_url)
|
| 51 |
+
project_path = self.projects_dir / project_name
|
| 52 |
+
|
| 53 |
+
try:
|
| 54 |
+
# Step 1: Clone repository
|
| 55 |
+
clone_result = await self._clone_repo(repo_url, project_path, branch)
|
| 56 |
+
if not clone_result["success"]:
|
| 57 |
+
return clone_result
|
| 58 |
+
|
| 59 |
+
# Step 2: Detect project structure
|
| 60 |
+
structure = await self._detect_structure(project_path)
|
| 61 |
+
|
| 62 |
+
# Step 3: Attempt Docker build
|
| 63 |
+
build_result = await self._docker_build(project_path, structure)
|
| 64 |
+
|
| 65 |
+
if build_result["success"]:
|
| 66 |
+
return {
|
| 67 |
+
"success": True,
|
| 68 |
+
"message": f"Project {project_name} built successfully",
|
| 69 |
+
"container_id": build_result.get("container_id"),
|
| 70 |
+
"logs": build_result.get("logs", "")
|
| 71 |
+
}
|
| 72 |
+
else:
|
| 73 |
+
# Step 4: Analyze error and suggest fix
|
| 74 |
+
fix = await self._analyze_and_suggest_fix(
|
| 75 |
+
build_result.get("logs", ""),
|
| 76 |
+
project_path
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
return {
|
| 80 |
+
"success": False,
|
| 81 |
+
"message": f"Build failed for {project_name}",
|
| 82 |
+
"logs": build_result.get("logs", ""),
|
| 83 |
+
"fix_suggestion": fix
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
except Exception as e:
|
| 87 |
+
logger.error(f"Clone and build error: {e}")
|
| 88 |
+
return {
|
| 89 |
+
"success": False,
|
| 90 |
+
"message": f"Error: {str(e)}",
|
| 91 |
+
"logs": str(e)
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
def _extract_project_name(self, repo_url: str) -> str:
|
| 95 |
+
"""Extract project name from repository URL."""
|
| 96 |
+
# Handle various URL formats
|
| 97 |
+
url = repo_url.rstrip("/")
|
| 98 |
+
if url.endswith(".git"):
|
| 99 |
+
url = url[:-4]
|
| 100 |
+
return url.split("/")[-1]
|
| 101 |
+
|
| 102 |
+
async def _clone_repo(
|
| 103 |
+
self,
|
| 104 |
+
repo_url: str,
|
| 105 |
+
project_path: Path,
|
| 106 |
+
branch: str
|
| 107 |
+
) -> Dict[str, Any]:
|
| 108 |
+
"""Clone repository to local directory."""
|
| 109 |
+
try:
|
| 110 |
+
# Remove existing directory if present
|
| 111 |
+
if project_path.exists():
|
| 112 |
+
shutil.rmtree(project_path)
|
| 113 |
+
|
| 114 |
+
# Clone repository
|
| 115 |
+
result = subprocess.run(
|
| 116 |
+
["git", "clone", "--depth", "1", "-b", branch, repo_url, str(project_path)],
|
| 117 |
+
capture_output=True,
|
| 118 |
+
text=True,
|
| 119 |
+
timeout=120
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
if result.returncode == 0:
|
| 123 |
+
logger.info(f"Cloned {repo_url} to {project_path}")
|
| 124 |
+
return {"success": True, "message": "Repository cloned"}
|
| 125 |
+
else:
|
| 126 |
+
# Try without branch specification (use default)
|
| 127 |
+
result = subprocess.run(
|
| 128 |
+
["git", "clone", "--depth", "1", repo_url, str(project_path)],
|
| 129 |
+
capture_output=True,
|
| 130 |
+
text=True,
|
| 131 |
+
timeout=120
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
if result.returncode == 0:
|
| 135 |
+
return {"success": True, "message": "Repository cloned (default branch)"}
|
| 136 |
+
else:
|
| 137 |
+
return {
|
| 138 |
+
"success": False,
|
| 139 |
+
"message": f"Clone failed: {result.stderr}",
|
| 140 |
+
"logs": result.stderr
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
except subprocess.TimeoutExpired:
|
| 144 |
+
return {"success": False, "message": "Clone timed out"}
|
| 145 |
+
except Exception as e:
|
| 146 |
+
return {"success": False, "message": f"Clone error: {str(e)}"}
|
| 147 |
+
|
| 148 |
+
async def _detect_structure(self, project_path: Path) -> Dict[str, Any]:
|
| 149 |
+
"""Detect project structure and configuration files."""
|
| 150 |
+
structure = {
|
| 151 |
+
"has_dockerfile": False,
|
| 152 |
+
"has_compose": False,
|
| 153 |
+
"has_requirements": False,
|
| 154 |
+
"has_package_json": False,
|
| 155 |
+
"has_makefile": False,
|
| 156 |
+
"dockerfile_path": None,
|
| 157 |
+
"compose_path": None,
|
| 158 |
+
"language": "unknown"
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
files_to_check = {
|
| 162 |
+
"Dockerfile": ("has_dockerfile", "dockerfile_path"),
|
| 163 |
+
"docker-compose.yml": ("has_compose", "compose_path"),
|
| 164 |
+
"docker-compose.yaml": ("has_compose", "compose_path"),
|
| 165 |
+
"compose.yml": ("has_compose", "compose_path"),
|
| 166 |
+
"compose.yaml": ("has_compose", "compose_path"),
|
| 167 |
+
"requirements.txt": ("has_requirements", None),
|
| 168 |
+
"package.json": ("has_package_json", None),
|
| 169 |
+
"Makefile": ("has_makefile", None)
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
for filename, (flag, path_key) in files_to_check.items():
|
| 173 |
+
file_path = project_path / filename
|
| 174 |
+
if file_path.exists():
|
| 175 |
+
structure[flag] = True
|
| 176 |
+
if path_key:
|
| 177 |
+
structure[path_key] = str(file_path)
|
| 178 |
+
|
| 179 |
+
# Detect language
|
| 180 |
+
if structure["has_requirements"]:
|
| 181 |
+
structure["language"] = "python"
|
| 182 |
+
elif structure["has_package_json"]:
|
| 183 |
+
structure["language"] = "javascript"
|
| 184 |
+
|
| 185 |
+
return structure
|
| 186 |
+
|
| 187 |
+
async def _docker_build(
|
| 188 |
+
self,
|
| 189 |
+
project_path: Path,
|
| 190 |
+
structure: Dict[str, Any]
|
| 191 |
+
) -> Dict[str, Any]:
|
| 192 |
+
"""Attempt to build Docker container."""
|
| 193 |
+
try:
|
| 194 |
+
project_name = project_path.name.lower().replace("_", "-").replace(".", "-")
|
| 195 |
+
|
| 196 |
+
# Prefer docker-compose if available
|
| 197 |
+
if structure["has_compose"]:
|
| 198 |
+
compose_path = structure["compose_path"]
|
| 199 |
+
|
| 200 |
+
result = subprocess.run(
|
| 201 |
+
["docker", "compose", "-f", compose_path, "build"],
|
| 202 |
+
capture_output=True,
|
| 203 |
+
text=True,
|
| 204 |
+
cwd=str(project_path),
|
| 205 |
+
timeout=600 # 10 minute timeout
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
if result.returncode == 0:
|
| 209 |
+
# Start containers
|
| 210 |
+
start_result = subprocess.run(
|
| 211 |
+
["docker", "compose", "-f", compose_path, "up", "-d"],
|
| 212 |
+
capture_output=True,
|
| 213 |
+
text=True,
|
| 214 |
+
cwd=str(project_path),
|
| 215 |
+
timeout=300
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
return {
|
| 219 |
+
"success": start_result.returncode == 0,
|
| 220 |
+
"logs": result.stdout + start_result.stdout,
|
| 221 |
+
"method": "docker-compose"
|
| 222 |
+
}
|
| 223 |
+
else:
|
| 224 |
+
return {
|
| 225 |
+
"success": False,
|
| 226 |
+
"logs": result.stderr,
|
| 227 |
+
"method": "docker-compose"
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
# Fall back to Dockerfile
|
| 231 |
+
elif structure["has_dockerfile"]:
|
| 232 |
+
result = subprocess.run(
|
| 233 |
+
["docker", "build", "-t", project_name, "."],
|
| 234 |
+
capture_output=True,
|
| 235 |
+
text=True,
|
| 236 |
+
cwd=str(project_path),
|
| 237 |
+
timeout=600
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
if result.returncode == 0:
|
| 241 |
+
# Run container
|
| 242 |
+
run_result = subprocess.run(
|
| 243 |
+
["docker", "run", "-d", "--name", f"{project_name}-container", project_name],
|
| 244 |
+
capture_output=True,
|
| 245 |
+
text=True,
|
| 246 |
+
timeout=60
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
return {
|
| 250 |
+
"success": run_result.returncode == 0,
|
| 251 |
+
"container_id": run_result.stdout.strip()[:12] if run_result.returncode == 0 else None,
|
| 252 |
+
"logs": result.stdout + run_result.stdout,
|
| 253 |
+
"method": "dockerfile"
|
| 254 |
+
}
|
| 255 |
+
else:
|
| 256 |
+
return {
|
| 257 |
+
"success": False,
|
| 258 |
+
"logs": result.stderr,
|
| 259 |
+
"method": "dockerfile"
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
# No Docker configuration found - generate Dockerfile
|
| 263 |
+
else:
|
| 264 |
+
generated = await self._generate_dockerfile(project_path, structure)
|
| 265 |
+
if generated:
|
| 266 |
+
# Retry build with generated Dockerfile
|
| 267 |
+
structure["has_dockerfile"] = True
|
| 268 |
+
structure["dockerfile_path"] = str(project_path / "Dockerfile")
|
| 269 |
+
return await self._docker_build(project_path, structure)
|
| 270 |
+
else:
|
| 271 |
+
return {
|
| 272 |
+
"success": False,
|
| 273 |
+
"logs": "No Dockerfile found and auto-generation failed",
|
| 274 |
+
"method": "none"
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
except subprocess.TimeoutExpired:
|
| 278 |
+
return {"success": False, "logs": "Build timed out (>10 minutes)"}
|
| 279 |
+
except Exception as e:
|
| 280 |
+
return {"success": False, "logs": f"Build error: {str(e)}"}
|
| 281 |
+
|
| 282 |
+
async def _generate_dockerfile(
|
| 283 |
+
self,
|
| 284 |
+
project_path: Path,
|
| 285 |
+
structure: Dict[str, Any]
|
| 286 |
+
) -> bool:
|
| 287 |
+
"""Generate a Dockerfile based on project structure."""
|
| 288 |
+
try:
|
| 289 |
+
dockerfile_content = ""
|
| 290 |
+
|
| 291 |
+
if structure["language"] == "python":
|
| 292 |
+
dockerfile_content = """# Auto-generated Dockerfile
|
| 293 |
+
FROM python:3.11-slim
|
| 294 |
+
|
| 295 |
+
WORKDIR /app
|
| 296 |
+
|
| 297 |
+
COPY requirements.txt .
|
| 298 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 299 |
+
|
| 300 |
+
COPY . .
|
| 301 |
+
|
| 302 |
+
CMD ["python", "main.py"]
|
| 303 |
+
"""
|
| 304 |
+
elif structure["language"] == "javascript":
|
| 305 |
+
dockerfile_content = """# Auto-generated Dockerfile
|
| 306 |
+
FROM node:20-alpine
|
| 307 |
+
|
| 308 |
+
WORKDIR /app
|
| 309 |
+
|
| 310 |
+
COPY package*.json ./
|
| 311 |
+
RUN npm install
|
| 312 |
+
|
| 313 |
+
COPY . .
|
| 314 |
+
|
| 315 |
+
EXPOSE 3000
|
| 316 |
+
|
| 317 |
+
CMD ["npm", "start"]
|
| 318 |
+
"""
|
| 319 |
+
else:
|
| 320 |
+
# Generic fallback
|
| 321 |
+
dockerfile_content = """# Auto-generated Dockerfile
|
| 322 |
+
FROM ubuntu:22.04
|
| 323 |
+
|
| 324 |
+
WORKDIR /app
|
| 325 |
+
|
| 326 |
+
COPY . .
|
| 327 |
+
|
| 328 |
+
CMD ["bash"]
|
| 329 |
+
"""
|
| 330 |
+
|
| 331 |
+
dockerfile_path = project_path / "Dockerfile"
|
| 332 |
+
dockerfile_path.write_text(dockerfile_content)
|
| 333 |
+
|
| 334 |
+
logger.info(f"Generated Dockerfile for {project_path.name}")
|
| 335 |
+
return True
|
| 336 |
+
|
| 337 |
+
except Exception as e:
|
| 338 |
+
logger.error(f"Dockerfile generation error: {e}")
|
| 339 |
+
return False
|
| 340 |
+
|
| 341 |
+
async def _analyze_and_suggest_fix(
|
| 342 |
+
self,
|
| 343 |
+
error_logs: str,
|
| 344 |
+
project_path: Path
|
| 345 |
+
) -> str:
|
| 346 |
+
"""
|
| 347 |
+
Analyze build error and suggest fix using LLM.
|
| 348 |
+
|
| 349 |
+
NOTE: This only suggests ONE fix, no infinite loops.
|
| 350 |
+
"""
|
| 351 |
+
try:
|
| 352 |
+
prompt = f"""Analyze this Docker build error and suggest ONE specific fix.
|
| 353 |
+
|
| 354 |
+
Error logs:
|
| 355 |
+
```
|
| 356 |
+
{error_logs[:2000]}
|
| 357 |
+
```
|
| 358 |
+
|
| 359 |
+
Project: {project_path.name}
|
| 360 |
+
|
| 361 |
+
Provide a concise fix suggestion. If multiple issues, focus on the first/most critical one.
|
| 362 |
+
Format:
|
| 363 |
+
1. Problem: [what went wrong]
|
| 364 |
+
2. Fix: [specific action to take]
|
| 365 |
+
3. Command: [if applicable, the command to run]"""
|
| 366 |
+
|
| 367 |
+
response = await self.client.post(
|
| 368 |
+
f"{self.ollama_host}/api/generate",
|
| 369 |
+
json={
|
| 370 |
+
"model": self.ollama_model,
|
| 371 |
+
"prompt": prompt,
|
| 372 |
+
"stream": False
|
| 373 |
+
}
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
if response.status_code == 200:
|
| 377 |
+
result = response.json()
|
| 378 |
+
return result.get("response", "Unable to analyze error")
|
| 379 |
+
else:
|
| 380 |
+
return "Error analysis unavailable (LLM request failed)"
|
| 381 |
+
|
| 382 |
+
except Exception as e:
|
| 383 |
+
logger.error(f"Error analysis failed: {e}")
|
| 384 |
+
return f"Error analysis failed: {str(e)}"
|
| 385 |
+
|
| 386 |
+
async def get_container_logs(self, container_id: str, lines: int = 100) -> str:
|
| 387 |
+
"""Get logs from a running container."""
|
| 388 |
+
try:
|
| 389 |
+
result = subprocess.run(
|
| 390 |
+
["docker", "logs", "--tail", str(lines), container_id],
|
| 391 |
+
capture_output=True,
|
| 392 |
+
text=True,
|
| 393 |
+
timeout=30
|
| 394 |
+
)
|
| 395 |
+
return result.stdout + result.stderr
|
| 396 |
+
except Exception as e:
|
| 397 |
+
return f"Failed to get logs: {str(e)}"
|
| 398 |
+
|
| 399 |
+
async def list_containers(self, all_containers: bool = False) -> List[Dict[str, str]]:
|
| 400 |
+
"""List Docker containers."""
|
| 401 |
+
try:
|
| 402 |
+
cmd = ["docker", "ps", "--format", "{{.ID}}\t{{.Names}}\t{{.Status}}\t{{.Image}}"]
|
| 403 |
+
if all_containers:
|
| 404 |
+
cmd.append("-a")
|
| 405 |
+
|
| 406 |
+
result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
|
| 407 |
+
|
| 408 |
+
containers = []
|
| 409 |
+
for line in result.stdout.strip().split("\n"):
|
| 410 |
+
if line:
|
| 411 |
+
parts = line.split("\t")
|
| 412 |
+
if len(parts) >= 4:
|
| 413 |
+
containers.append({
|
| 414 |
+
"id": parts[0],
|
| 415 |
+
"name": parts[1],
|
| 416 |
+
"status": parts[2],
|
| 417 |
+
"image": parts[3]
|
| 418 |
+
})
|
| 419 |
+
return containers
|
| 420 |
+
|
| 421 |
+
except Exception as e:
|
| 422 |
+
logger.error(f"List containers error: {e}")
|
| 423 |
+
return []
|
| 424 |
+
|
| 425 |
+
async def stop_container(self, container_id: str) -> bool:
|
| 426 |
+
"""Stop a running container."""
|
| 427 |
+
try:
|
| 428 |
+
result = subprocess.run(
|
| 429 |
+
["docker", "stop", container_id],
|
| 430 |
+
capture_output=True,
|
| 431 |
+
text=True,
|
| 432 |
+
timeout=60
|
| 433 |
+
)
|
| 434 |
+
return result.returncode == 0
|
| 435 |
+
except Exception:
|
| 436 |
+
return False
|
| 437 |
+
|
| 438 |
+
async def remove_container(self, container_id: str) -> bool:
|
| 439 |
+
"""Remove a container."""
|
| 440 |
+
try:
|
| 441 |
+
result = subprocess.run(
|
| 442 |
+
["docker", "rm", "-f", container_id],
|
| 443 |
+
capture_output=True,
|
| 444 |
+
text=True,
|
| 445 |
+
timeout=60
|
| 446 |
+
)
|
| 447 |
+
return result.returncode == 0
|
| 448 |
+
except Exception:
|
| 449 |
+
return False
|
tools/github_search.py
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# GitHub Search Tool
|
| 2 |
+
"""
|
| 3 |
+
Search GitHub repositories and provide recommendations.
|
| 4 |
+
Simple keyword search with README summarization.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import httpx
|
| 8 |
+
import logging
|
| 9 |
+
from typing import List, Dict, Any, Optional
|
| 10 |
+
|
| 11 |
+
from config import settings
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class GitHubSearchTool:
|
| 17 |
+
"""
|
| 18 |
+
GitHub repository search tool.
|
| 19 |
+
Searches for relevant projects and provides recommendations.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def __init__(self):
|
| 23 |
+
self.api_base = "https://api.github.com"
|
| 24 |
+
self.token = settings.GITHUB_TOKEN
|
| 25 |
+
self.client = httpx.AsyncClient(timeout=30.0)
|
| 26 |
+
|
| 27 |
+
# Set headers
|
| 28 |
+
self.headers = {
|
| 29 |
+
"Accept": "application/vnd.github.v3+json",
|
| 30 |
+
"User-Agent": "AI-Workflow-Agent"
|
| 31 |
+
}
|
| 32 |
+
if self.token:
|
| 33 |
+
self.headers["Authorization"] = f"token {self.token}"
|
| 34 |
+
|
| 35 |
+
async def search(
|
| 36 |
+
self,
|
| 37 |
+
keywords: str,
|
| 38 |
+
max_results: int = 3,
|
| 39 |
+
language: Optional[str] = None
|
| 40 |
+
) -> List[Dict[str, Any]]:
|
| 41 |
+
"""
|
| 42 |
+
Search GitHub repositories by keywords.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
keywords: Search terms
|
| 46 |
+
max_results: Maximum number of results (default 3)
|
| 47 |
+
language: Optional language filter
|
| 48 |
+
|
| 49 |
+
Returns:
|
| 50 |
+
List of repository info dictionaries
|
| 51 |
+
"""
|
| 52 |
+
try:
|
| 53 |
+
# Build search query
|
| 54 |
+
query = keywords
|
| 55 |
+
if language:
|
| 56 |
+
query += f" language:{language}"
|
| 57 |
+
|
| 58 |
+
# Search repositories
|
| 59 |
+
response = await self.client.get(
|
| 60 |
+
f"{self.api_base}/search/repositories",
|
| 61 |
+
params={
|
| 62 |
+
"q": query,
|
| 63 |
+
"sort": "stars",
|
| 64 |
+
"order": "desc",
|
| 65 |
+
"per_page": max_results
|
| 66 |
+
},
|
| 67 |
+
headers=self.headers
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
if response.status_code == 200:
|
| 71 |
+
data = response.json()
|
| 72 |
+
repos = data.get("items", [])
|
| 73 |
+
|
| 74 |
+
# Extract relevant info
|
| 75 |
+
results = []
|
| 76 |
+
for repo in repos[:max_results]:
|
| 77 |
+
repo_info = await self._extract_repo_info(repo)
|
| 78 |
+
results.append(repo_info)
|
| 79 |
+
|
| 80 |
+
return results
|
| 81 |
+
else:
|
| 82 |
+
logger.error(f"GitHub search failed: {response.status_code}")
|
| 83 |
+
return []
|
| 84 |
+
|
| 85 |
+
except Exception as e:
|
| 86 |
+
logger.error(f"GitHub search error: {e}")
|
| 87 |
+
return []
|
| 88 |
+
|
| 89 |
+
async def _extract_repo_info(self, repo: Dict[str, Any]) -> Dict[str, Any]:
|
| 90 |
+
"""Extract relevant information from repository data."""
|
| 91 |
+
# Get README summary
|
| 92 |
+
readme_summary = await self._get_readme_summary(
|
| 93 |
+
repo.get("owner", {}).get("login", ""),
|
| 94 |
+
repo.get("name", "")
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
return {
|
| 98 |
+
"name": repo.get("name", ""),
|
| 99 |
+
"full_name": repo.get("full_name", ""),
|
| 100 |
+
"url": repo.get("html_url", ""),
|
| 101 |
+
"clone_url": repo.get("clone_url", ""),
|
| 102 |
+
"description": repo.get("description", "No description"),
|
| 103 |
+
"stars": repo.get("stargazers_count", 0),
|
| 104 |
+
"forks": repo.get("forks_count", 0),
|
| 105 |
+
"language": repo.get("language", "Unknown"),
|
| 106 |
+
"topics": repo.get("topics", []),
|
| 107 |
+
"updated_at": repo.get("updated_at", ""),
|
| 108 |
+
"has_docker": await self._check_has_docker(
|
| 109 |
+
repo.get("owner", {}).get("login", ""),
|
| 110 |
+
repo.get("name", "")
|
| 111 |
+
),
|
| 112 |
+
"readme_summary": readme_summary
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
async def _get_readme_summary(self, owner: str, repo: str) -> str:
|
| 116 |
+
"""Fetch and summarize repository README."""
|
| 117 |
+
try:
|
| 118 |
+
response = await self.client.get(
|
| 119 |
+
f"{self.api_base}/repos/{owner}/{repo}/readme",
|
| 120 |
+
headers=self.headers
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
if response.status_code == 200:
|
| 124 |
+
data = response.json()
|
| 125 |
+
# README is base64 encoded
|
| 126 |
+
import base64
|
| 127 |
+
content = base64.b64decode(data.get("content", "")).decode("utf-8")
|
| 128 |
+
|
| 129 |
+
# Simple summary: first 500 chars
|
| 130 |
+
summary = content[:500].replace("\n", " ").strip()
|
| 131 |
+
if len(content) > 500:
|
| 132 |
+
summary += "..."
|
| 133 |
+
return summary
|
| 134 |
+
return "README not available"
|
| 135 |
+
|
| 136 |
+
except Exception as e:
|
| 137 |
+
logger.warning(f"README fetch error: {e}")
|
| 138 |
+
return "README not available"
|
| 139 |
+
|
| 140 |
+
async def _check_has_docker(self, owner: str, repo: str) -> bool:
|
| 141 |
+
"""Check if repository has Dockerfile or docker-compose."""
|
| 142 |
+
try:
|
| 143 |
+
# Check for Dockerfile
|
| 144 |
+
response = await self.client.get(
|
| 145 |
+
f"{self.api_base}/repos/{owner}/{repo}/contents/Dockerfile",
|
| 146 |
+
headers=self.headers
|
| 147 |
+
)
|
| 148 |
+
if response.status_code == 200:
|
| 149 |
+
return True
|
| 150 |
+
|
| 151 |
+
# Check for docker-compose
|
| 152 |
+
response = await self.client.get(
|
| 153 |
+
f"{self.api_base}/repos/{owner}/{repo}/contents/docker-compose.yml",
|
| 154 |
+
headers=self.headers
|
| 155 |
+
)
|
| 156 |
+
if response.status_code == 200:
|
| 157 |
+
return True
|
| 158 |
+
|
| 159 |
+
# Check for docker-compose.yaml
|
| 160 |
+
response = await self.client.get(
|
| 161 |
+
f"{self.api_base}/repos/{owner}/{repo}/contents/docker-compose.yaml",
|
| 162 |
+
headers=self.headers
|
| 163 |
+
)
|
| 164 |
+
return response.status_code == 200
|
| 165 |
+
|
| 166 |
+
except Exception:
|
| 167 |
+
return False
|
| 168 |
+
|
| 169 |
+
async def generate_recommendation(self, repos: List[Dict[str, Any]]) -> str:
|
| 170 |
+
"""
|
| 171 |
+
Generate a recommendation summary for found repositories.
|
| 172 |
+
|
| 173 |
+
Args:
|
| 174 |
+
repos: List of repository info dictionaries
|
| 175 |
+
|
| 176 |
+
Returns:
|
| 177 |
+
Recommendation text
|
| 178 |
+
"""
|
| 179 |
+
if not repos:
|
| 180 |
+
return "No repositories found. Try different keywords."
|
| 181 |
+
|
| 182 |
+
# Build recommendation
|
| 183 |
+
lines = ["📦 **Repository Recommendations:**\n"]
|
| 184 |
+
|
| 185 |
+
for i, repo in enumerate(repos, 1):
|
| 186 |
+
stars = repo.get("stars", 0)
|
| 187 |
+
docker_status = "✅ Docker" if repo.get("has_docker") else "❌ No Docker"
|
| 188 |
+
|
| 189 |
+
lines.append(f"**{i}. {repo.get('name', 'Unknown')}** ⭐ {stars}")
|
| 190 |
+
lines.append(f" {repo.get('description', 'No description')}")
|
| 191 |
+
lines.append(f" Language: {repo.get('language', 'Unknown')} | {docker_status}")
|
| 192 |
+
lines.append(f" URL: {repo.get('url', '')}")
|
| 193 |
+
lines.append("")
|
| 194 |
+
|
| 195 |
+
# Add best pick recommendation
|
| 196 |
+
if repos:
|
| 197 |
+
best = max(repos, key=lambda r: r.get("stars", 0))
|
| 198 |
+
if best.get("has_docker"):
|
| 199 |
+
lines.append(f"💡 **Recommended:** {best.get('name')} (most stars + Docker support)")
|
| 200 |
+
else:
|
| 201 |
+
docker_repos = [r for r in repos if r.get("has_docker")]
|
| 202 |
+
if docker_repos:
|
| 203 |
+
best_docker = max(docker_repos, key=lambda r: r.get("stars", 0))
|
| 204 |
+
lines.append(f"💡 **Recommended:** {best_docker.get('name')} (has Docker support)")
|
| 205 |
+
else:
|
| 206 |
+
lines.append(f"💡 **Recommended:** {best.get('name')} (most stars, but needs Docker setup)")
|
| 207 |
+
|
| 208 |
+
return "\n".join(lines)
|
| 209 |
+
|
| 210 |
+
async def get_repo_details(self, repo_url: str) -> Dict[str, Any]:
|
| 211 |
+
"""
|
| 212 |
+
Get detailed information about a specific repository.
|
| 213 |
+
|
| 214 |
+
Args:
|
| 215 |
+
repo_url: GitHub repository URL or owner/repo format
|
| 216 |
+
|
| 217 |
+
Returns:
|
| 218 |
+
Repository details dictionary
|
| 219 |
+
"""
|
| 220 |
+
try:
|
| 221 |
+
# Parse owner/repo from URL or direct format
|
| 222 |
+
if "github.com" in repo_url:
|
| 223 |
+
parts = repo_url.rstrip("/").split("/")
|
| 224 |
+
owner = parts[-2]
|
| 225 |
+
repo = parts[-1]
|
| 226 |
+
else:
|
| 227 |
+
owner, repo = repo_url.split("/")
|
| 228 |
+
|
| 229 |
+
response = await self.client.get(
|
| 230 |
+
f"{self.api_base}/repos/{owner}/{repo}",
|
| 231 |
+
headers=self.headers
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
if response.status_code == 200:
|
| 235 |
+
return await self._extract_repo_info(response.json())
|
| 236 |
+
else:
|
| 237 |
+
return {"error": f"Repository not found: {response.status_code}"}
|
| 238 |
+
|
| 239 |
+
except Exception as e:
|
| 240 |
+
logger.error(f"Repo details error: {e}")
|
| 241 |
+
return {"error": str(e)}
|
tools/n8n_builder.py
ADDED
|
@@ -0,0 +1,620 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# n8n Workflow Builder Tool
|
| 2 |
+
"""
|
| 3 |
+
Generate and deploy n8n workflow JSON templates.
|
| 4 |
+
Supports common automation patterns.
|
| 5 |
+
LLM-enhanced generation when context.use_llm=True.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import httpx
|
| 9 |
+
import json
|
| 10 |
+
import logging
|
| 11 |
+
from typing import Dict, Any, List, Optional
|
| 12 |
+
from datetime import datetime
|
| 13 |
+
|
| 14 |
+
from config import settings
|
| 15 |
+
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class N8NWorkflowBuilder:
|
| 20 |
+
"""
|
| 21 |
+
n8n workflow generator and deployer.
|
| 22 |
+
Creates JSON workflow templates and deploys via n8n API.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(self):
|
| 26 |
+
self.n8n_host = settings.N8N_HOST
|
| 27 |
+
self.api_key = settings.N8N_API_KEY
|
| 28 |
+
self.client = httpx.AsyncClient(timeout=30.0)
|
| 29 |
+
|
| 30 |
+
# Headers for n8n API
|
| 31 |
+
self.headers = {
|
| 32 |
+
"Content-Type": "application/json"
|
| 33 |
+
}
|
| 34 |
+
if self.api_key:
|
| 35 |
+
self.headers["X-N8N-API-KEY"] = self.api_key
|
| 36 |
+
|
| 37 |
+
async def check_health(self) -> str:
|
| 38 |
+
"""Check if n8n is running and responsive."""
|
| 39 |
+
try:
|
| 40 |
+
response = await self.client.get(
|
| 41 |
+
f"{self.n8n_host}/healthz"
|
| 42 |
+
)
|
| 43 |
+
if response.status_code == 200:
|
| 44 |
+
return "healthy"
|
| 45 |
+
return "unhealthy"
|
| 46 |
+
except Exception as e:
|
| 47 |
+
logger.debug(f"n8n health check failed: {e}")
|
| 48 |
+
return "unreachable"
|
| 49 |
+
|
| 50 |
+
async def generate_workflow(self, query: str, context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
| 51 |
+
"""
|
| 52 |
+
Generate n8n workflow JSON based on user query.
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
query: User's natural language request
|
| 56 |
+
context: Optional context with use_llm flag for LLM-enhanced generation
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
n8n workflow JSON structure
|
| 60 |
+
"""
|
| 61 |
+
# Check if LLM enhancement is requested
|
| 62 |
+
use_llm = False
|
| 63 |
+
if context and isinstance(context, dict):
|
| 64 |
+
use_llm = context.get('use_llm', False)
|
| 65 |
+
|
| 66 |
+
# If LLM mode is enabled, use AI to analyze and create more intelligent workflow
|
| 67 |
+
if use_llm:
|
| 68 |
+
logger.info("Using LLM-enhanced workflow generation")
|
| 69 |
+
workflow = await self._generate_llm_workflow(query)
|
| 70 |
+
if workflow:
|
| 71 |
+
return workflow
|
| 72 |
+
# Fall back to template if LLM fails
|
| 73 |
+
logger.warning("LLM generation failed, falling back to templates")
|
| 74 |
+
|
| 75 |
+
# Template-based generation (keyword mode)
|
| 76 |
+
workflow_type = self._detect_workflow_type(query)
|
| 77 |
+
|
| 78 |
+
# Generate appropriate template
|
| 79 |
+
templates = {
|
| 80 |
+
"webhook": self._generate_webhook_workflow,
|
| 81 |
+
"schedule": self._generate_schedule_workflow,
|
| 82 |
+
"api_integration": self._generate_api_workflow,
|
| 83 |
+
"email": self._generate_email_workflow,
|
| 84 |
+
"data_transform": self._generate_transform_workflow,
|
| 85 |
+
"notification": self._generate_notification_workflow,
|
| 86 |
+
"generic": self._generate_generic_workflow
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
generator = templates.get(workflow_type, self._generate_generic_workflow)
|
| 90 |
+
workflow = generator(query)
|
| 91 |
+
|
| 92 |
+
return workflow
|
| 93 |
+
|
| 94 |
+
async def _generate_llm_workflow(self, query: str) -> Optional[Dict[str, Any]]:
|
| 95 |
+
"""
|
| 96 |
+
Use LLM to generate a more intelligent workflow based on query analysis.
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
query: User's natural language request
|
| 100 |
+
|
| 101 |
+
Returns:
|
| 102 |
+
Enhanced n8n workflow or None if LLM fails
|
| 103 |
+
"""
|
| 104 |
+
try:
|
| 105 |
+
# Import here to avoid circular dependency
|
| 106 |
+
from decision_agent import DecisionAgent
|
| 107 |
+
|
| 108 |
+
agent = DecisionAgent()
|
| 109 |
+
analysis = await agent.analyze(query, context={'use_llm': True})
|
| 110 |
+
|
| 111 |
+
# Use analysis explanation to create more detailed workflow
|
| 112 |
+
workflow_type = self._detect_workflow_type(query)
|
| 113 |
+
explanation = analysis.get('explanation', '')
|
| 114 |
+
|
| 115 |
+
# Generate base template
|
| 116 |
+
templates = {
|
| 117 |
+
"webhook": self._generate_webhook_workflow,
|
| 118 |
+
"schedule": self._generate_schedule_workflow,
|
| 119 |
+
"api_integration": self._generate_api_workflow,
|
| 120 |
+
"email": self._generate_email_workflow,
|
| 121 |
+
"data_transform": self._generate_transform_workflow,
|
| 122 |
+
"notification": self._generate_notification_workflow,
|
| 123 |
+
"generic": self._generate_generic_workflow
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
generator = templates.get(workflow_type, self._generate_generic_workflow)
|
| 127 |
+
workflow = generator(query)
|
| 128 |
+
|
| 129 |
+
# Enhance with LLM analysis
|
| 130 |
+
workflow['meta']['llm_analysis'] = {
|
| 131 |
+
'explanation': explanation,
|
| 132 |
+
'confidence': analysis.get('confidence', 0.0),
|
| 133 |
+
'suggested_tools': analysis.get('suggested_tools', []),
|
| 134 |
+
'next_steps': analysis.get('next_steps', [])
|
| 135 |
+
}
|
| 136 |
+
workflow['meta']['generated_with_llm'] = True
|
| 137 |
+
|
| 138 |
+
return workflow
|
| 139 |
+
|
| 140 |
+
except Exception as e:
|
| 141 |
+
logger.error(f"LLM workflow generation failed: {e}")
|
| 142 |
+
return None
|
| 143 |
+
|
| 144 |
+
def _detect_workflow_type(self, query: str) -> str:
|
| 145 |
+
"""Detect the type of workflow needed from query."""
|
| 146 |
+
query_lower = query.lower()
|
| 147 |
+
|
| 148 |
+
if any(w in query_lower for w in ["webhook", "http", "api call", "endpoint"]):
|
| 149 |
+
return "webhook"
|
| 150 |
+
elif any(w in query_lower for w in ["schedule", "cron", "every day", "every hour", "periodic"]):
|
| 151 |
+
return "schedule"
|
| 152 |
+
elif any(w in query_lower for w in ["api", "rest", "fetch", "get data"]):
|
| 153 |
+
return "api_integration"
|
| 154 |
+
elif any(w in query_lower for w in ["email", "mail", "send message", "gmail"]):
|
| 155 |
+
return "email"
|
| 156 |
+
elif any(w in query_lower for w in ["transform", "convert", "process", "parse"]):
|
| 157 |
+
return "data_transform"
|
| 158 |
+
elif any(w in query_lower for w in ["notify", "slack", "telegram", "alert"]):
|
| 159 |
+
return "notification"
|
| 160 |
+
else:
|
| 161 |
+
return "generic"
|
| 162 |
+
|
| 163 |
+
def _generate_webhook_workflow(self, query: str) -> Dict[str, Any]:
|
| 164 |
+
"""Generate webhook-triggered workflow."""
|
| 165 |
+
return {
|
| 166 |
+
"name": f"Webhook Workflow - {datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 167 |
+
"nodes": [
|
| 168 |
+
{
|
| 169 |
+
"parameters": {
|
| 170 |
+
"httpMethod": "POST",
|
| 171 |
+
"path": "webhook-trigger",
|
| 172 |
+
"responseMode": "onReceived",
|
| 173 |
+
"responseData": "allEntries"
|
| 174 |
+
},
|
| 175 |
+
"id": "webhook_1",
|
| 176 |
+
"name": "Webhook",
|
| 177 |
+
"type": "n8n-nodes-base.webhook",
|
| 178 |
+
"typeVersion": 1,
|
| 179 |
+
"position": [250, 300]
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"parameters": {},
|
| 183 |
+
"id": "set_1",
|
| 184 |
+
"name": "Process Data",
|
| 185 |
+
"type": "n8n-nodes-base.set",
|
| 186 |
+
"typeVersion": 1,
|
| 187 |
+
"position": [450, 300]
|
| 188 |
+
},
|
| 189 |
+
{
|
| 190 |
+
"parameters": {
|
| 191 |
+
"functionCode": "// Process incoming data\nconst items = $input.all();\nreturn items;"
|
| 192 |
+
},
|
| 193 |
+
"id": "code_1",
|
| 194 |
+
"name": "Custom Logic",
|
| 195 |
+
"type": "n8n-nodes-base.code",
|
| 196 |
+
"typeVersion": 1,
|
| 197 |
+
"position": [650, 300]
|
| 198 |
+
}
|
| 199 |
+
],
|
| 200 |
+
"connections": {
|
| 201 |
+
"Webhook": {
|
| 202 |
+
"main": [[{"node": "Process Data", "type": "main", "index": 0}]]
|
| 203 |
+
},
|
| 204 |
+
"Process Data": {
|
| 205 |
+
"main": [[{"node": "Custom Logic", "type": "main", "index": 0}]]
|
| 206 |
+
}
|
| 207 |
+
},
|
| 208 |
+
"settings": {
|
| 209 |
+
"executionOrder": "v1"
|
| 210 |
+
},
|
| 211 |
+
"meta": {
|
| 212 |
+
"generated_by": "AI Workflow Agent",
|
| 213 |
+
"query": query,
|
| 214 |
+
"type": "webhook"
|
| 215 |
+
}
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
def _generate_schedule_workflow(self, query: str) -> Dict[str, Any]:
|
| 219 |
+
"""Generate schedule-triggered workflow."""
|
| 220 |
+
return {
|
| 221 |
+
"name": f"Scheduled Workflow - {datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 222 |
+
"nodes": [
|
| 223 |
+
{
|
| 224 |
+
"parameters": {
|
| 225 |
+
"rule": {
|
| 226 |
+
"interval": [{"field": "hours", "hoursInterval": 1}]
|
| 227 |
+
}
|
| 228 |
+
},
|
| 229 |
+
"id": "schedule_1",
|
| 230 |
+
"name": "Schedule Trigger",
|
| 231 |
+
"type": "n8n-nodes-base.scheduleTrigger",
|
| 232 |
+
"typeVersion": 1,
|
| 233 |
+
"position": [250, 300]
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"parameters": {
|
| 237 |
+
"functionCode": "// Scheduled task logic\nconst now = new Date();\nreturn [{ json: { timestamp: now.toISOString(), status: 'executed' } }];"
|
| 238 |
+
},
|
| 239 |
+
"id": "code_1",
|
| 240 |
+
"name": "Execute Task",
|
| 241 |
+
"type": "n8n-nodes-base.code",
|
| 242 |
+
"typeVersion": 1,
|
| 243 |
+
"position": [450, 300]
|
| 244 |
+
}
|
| 245 |
+
],
|
| 246 |
+
"connections": {
|
| 247 |
+
"Schedule Trigger": {
|
| 248 |
+
"main": [[{"node": "Execute Task", "type": "main", "index": 0}]]
|
| 249 |
+
}
|
| 250 |
+
},
|
| 251 |
+
"settings": {
|
| 252 |
+
"executionOrder": "v1"
|
| 253 |
+
},
|
| 254 |
+
"meta": {
|
| 255 |
+
"generated_by": "AI Workflow Agent",
|
| 256 |
+
"query": query,
|
| 257 |
+
"type": "schedule"
|
| 258 |
+
}
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
def _generate_api_workflow(self, query: str) -> Dict[str, Any]:
|
| 262 |
+
"""Generate API integration workflow."""
|
| 263 |
+
return {
|
| 264 |
+
"name": f"API Integration - {datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 265 |
+
"nodes": [
|
| 266 |
+
{
|
| 267 |
+
"parameters": {
|
| 268 |
+
"httpMethod": "GET",
|
| 269 |
+
"path": "api-trigger",
|
| 270 |
+
"responseMode": "responseNode"
|
| 271 |
+
},
|
| 272 |
+
"id": "webhook_1",
|
| 273 |
+
"name": "API Trigger",
|
| 274 |
+
"type": "n8n-nodes-base.webhook",
|
| 275 |
+
"typeVersion": 1,
|
| 276 |
+
"position": [250, 300]
|
| 277 |
+
},
|
| 278 |
+
{
|
| 279 |
+
"parameters": {
|
| 280 |
+
"url": "https://api.example.com/data",
|
| 281 |
+
"method": "GET",
|
| 282 |
+
"options": {}
|
| 283 |
+
},
|
| 284 |
+
"id": "http_1",
|
| 285 |
+
"name": "HTTP Request",
|
| 286 |
+
"type": "n8n-nodes-base.httpRequest",
|
| 287 |
+
"typeVersion": 4,
|
| 288 |
+
"position": [450, 300]
|
| 289 |
+
},
|
| 290 |
+
{
|
| 291 |
+
"parameters": {
|
| 292 |
+
"respondWith": "json",
|
| 293 |
+
"responseBody": "={{ $json }}"
|
| 294 |
+
},
|
| 295 |
+
"id": "respond_1",
|
| 296 |
+
"name": "Respond",
|
| 297 |
+
"type": "n8n-nodes-base.respondToWebhook",
|
| 298 |
+
"typeVersion": 1,
|
| 299 |
+
"position": [650, 300]
|
| 300 |
+
}
|
| 301 |
+
],
|
| 302 |
+
"connections": {
|
| 303 |
+
"API Trigger": {
|
| 304 |
+
"main": [[{"node": "HTTP Request", "type": "main", "index": 0}]]
|
| 305 |
+
},
|
| 306 |
+
"HTTP Request": {
|
| 307 |
+
"main": [[{"node": "Respond", "type": "main", "index": 0}]]
|
| 308 |
+
}
|
| 309 |
+
},
|
| 310 |
+
"settings": {
|
| 311 |
+
"executionOrder": "v1"
|
| 312 |
+
},
|
| 313 |
+
"meta": {
|
| 314 |
+
"generated_by": "AI Workflow Agent",
|
| 315 |
+
"query": query,
|
| 316 |
+
"type": "api_integration"
|
| 317 |
+
}
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
def _generate_email_workflow(self, query: str) -> Dict[str, Any]:
|
| 321 |
+
"""Generate email workflow."""
|
| 322 |
+
return {
|
| 323 |
+
"name": f"Email Workflow - {datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 324 |
+
"nodes": [
|
| 325 |
+
{
|
| 326 |
+
"parameters": {
|
| 327 |
+
"httpMethod": "POST",
|
| 328 |
+
"path": "send-email",
|
| 329 |
+
"responseMode": "onReceived"
|
| 330 |
+
},
|
| 331 |
+
"id": "webhook_1",
|
| 332 |
+
"name": "Trigger",
|
| 333 |
+
"type": "n8n-nodes-base.webhook",
|
| 334 |
+
"typeVersion": 1,
|
| 335 |
+
"position": [250, 300]
|
| 336 |
+
},
|
| 337 |
+
{
|
| 338 |
+
"parameters": {
|
| 339 |
+
"fromEmail": "={{ $json.from || 'noreply@example.com' }}",
|
| 340 |
+
"toEmail": "={{ $json.to }}",
|
| 341 |
+
"subject": "={{ $json.subject }}",
|
| 342 |
+
"emailType": "text",
|
| 343 |
+
"message": "={{ $json.body }}"
|
| 344 |
+
},
|
| 345 |
+
"id": "email_1",
|
| 346 |
+
"name": "Send Email",
|
| 347 |
+
"type": "n8n-nodes-base.emailSend",
|
| 348 |
+
"typeVersion": 2,
|
| 349 |
+
"position": [450, 300],
|
| 350 |
+
"credentials": {
|
| 351 |
+
"smtp": {
|
| 352 |
+
"id": "SMTP_CREDENTIAL_ID",
|
| 353 |
+
"name": "SMTP Account"
|
| 354 |
+
}
|
| 355 |
+
}
|
| 356 |
+
}
|
| 357 |
+
],
|
| 358 |
+
"connections": {
|
| 359 |
+
"Trigger": {
|
| 360 |
+
"main": [[{"node": "Send Email", "type": "main", "index": 0}]]
|
| 361 |
+
}
|
| 362 |
+
},
|
| 363 |
+
"settings": {
|
| 364 |
+
"executionOrder": "v1"
|
| 365 |
+
},
|
| 366 |
+
"meta": {
|
| 367 |
+
"generated_by": "AI Workflow Agent",
|
| 368 |
+
"query": query,
|
| 369 |
+
"type": "email",
|
| 370 |
+
"note": "Requires SMTP credentials configuration"
|
| 371 |
+
}
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
def _generate_transform_workflow(self, query: str) -> Dict[str, Any]:
|
| 375 |
+
"""Generate data transformation workflow."""
|
| 376 |
+
return {
|
| 377 |
+
"name": f"Data Transform - {datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 378 |
+
"nodes": [
|
| 379 |
+
{
|
| 380 |
+
"parameters": {
|
| 381 |
+
"httpMethod": "POST",
|
| 382 |
+
"path": "transform",
|
| 383 |
+
"responseMode": "responseNode"
|
| 384 |
+
},
|
| 385 |
+
"id": "webhook_1",
|
| 386 |
+
"name": "Input",
|
| 387 |
+
"type": "n8n-nodes-base.webhook",
|
| 388 |
+
"typeVersion": 1,
|
| 389 |
+
"position": [250, 300]
|
| 390 |
+
},
|
| 391 |
+
{
|
| 392 |
+
"parameters": {
|
| 393 |
+
"functionCode": "// Transform data\nconst items = $input.all();\nconst transformed = items.map(item => {\n return {\n json: {\n ...item.json,\n processed: true,\n timestamp: new Date().toISOString()\n }\n };\n});\nreturn transformed;"
|
| 394 |
+
},
|
| 395 |
+
"id": "code_1",
|
| 396 |
+
"name": "Transform",
|
| 397 |
+
"type": "n8n-nodes-base.code",
|
| 398 |
+
"typeVersion": 1,
|
| 399 |
+
"position": [450, 300]
|
| 400 |
+
},
|
| 401 |
+
{
|
| 402 |
+
"parameters": {
|
| 403 |
+
"respondWith": "json",
|
| 404 |
+
"responseBody": "={{ $json }}"
|
| 405 |
+
},
|
| 406 |
+
"id": "respond_1",
|
| 407 |
+
"name": "Output",
|
| 408 |
+
"type": "n8n-nodes-base.respondToWebhook",
|
| 409 |
+
"typeVersion": 1,
|
| 410 |
+
"position": [650, 300]
|
| 411 |
+
}
|
| 412 |
+
],
|
| 413 |
+
"connections": {
|
| 414 |
+
"Input": {
|
| 415 |
+
"main": [[{"node": "Transform", "type": "main", "index": 0}]]
|
| 416 |
+
},
|
| 417 |
+
"Transform": {
|
| 418 |
+
"main": [[{"node": "Output", "type": "main", "index": 0}]]
|
| 419 |
+
}
|
| 420 |
+
},
|
| 421 |
+
"settings": {
|
| 422 |
+
"executionOrder": "v1"
|
| 423 |
+
},
|
| 424 |
+
"meta": {
|
| 425 |
+
"generated_by": "AI Workflow Agent",
|
| 426 |
+
"query": query,
|
| 427 |
+
"type": "data_transform"
|
| 428 |
+
}
|
| 429 |
+
}
|
| 430 |
+
|
| 431 |
+
def _generate_notification_workflow(self, query: str) -> Dict[str, Any]:
|
| 432 |
+
"""Generate notification workflow (Slack/Telegram)."""
|
| 433 |
+
return {
|
| 434 |
+
"name": f"Notification Workflow - {datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 435 |
+
"nodes": [
|
| 436 |
+
{
|
| 437 |
+
"parameters": {
|
| 438 |
+
"httpMethod": "POST",
|
| 439 |
+
"path": "notify",
|
| 440 |
+
"responseMode": "onReceived"
|
| 441 |
+
},
|
| 442 |
+
"id": "webhook_1",
|
| 443 |
+
"name": "Trigger",
|
| 444 |
+
"type": "n8n-nodes-base.webhook",
|
| 445 |
+
"typeVersion": 1,
|
| 446 |
+
"position": [250, 300]
|
| 447 |
+
},
|
| 448 |
+
{
|
| 449 |
+
"parameters": {
|
| 450 |
+
"channel": "={{ $json.channel || '#general' }}",
|
| 451 |
+
"text": "={{ $json.message }}",
|
| 452 |
+
"otherOptions": {}
|
| 453 |
+
},
|
| 454 |
+
"id": "slack_1",
|
| 455 |
+
"name": "Slack",
|
| 456 |
+
"type": "n8n-nodes-base.slack",
|
| 457 |
+
"typeVersion": 2,
|
| 458 |
+
"position": [450, 250],
|
| 459 |
+
"credentials": {
|
| 460 |
+
"slackApi": {
|
| 461 |
+
"id": "SLACK_CREDENTIAL_ID",
|
| 462 |
+
"name": "Slack Account"
|
| 463 |
+
}
|
| 464 |
+
}
|
| 465 |
+
},
|
| 466 |
+
{
|
| 467 |
+
"parameters": {
|
| 468 |
+
"chatId": "={{ $json.chat_id }}",
|
| 469 |
+
"text": "={{ $json.message }}"
|
| 470 |
+
},
|
| 471 |
+
"id": "telegram_1",
|
| 472 |
+
"name": "Telegram",
|
| 473 |
+
"type": "n8n-nodes-base.telegram",
|
| 474 |
+
"typeVersion": 1,
|
| 475 |
+
"position": [450, 350],
|
| 476 |
+
"credentials": {
|
| 477 |
+
"telegramApi": {
|
| 478 |
+
"id": "TELEGRAM_CREDENTIAL_ID",
|
| 479 |
+
"name": "Telegram Bot"
|
| 480 |
+
}
|
| 481 |
+
}
|
| 482 |
+
}
|
| 483 |
+
],
|
| 484 |
+
"connections": {
|
| 485 |
+
"Trigger": {
|
| 486 |
+
"main": [
|
| 487 |
+
[
|
| 488 |
+
{"node": "Slack", "type": "main", "index": 0},
|
| 489 |
+
{"node": "Telegram", "type": "main", "index": 0}
|
| 490 |
+
]
|
| 491 |
+
]
|
| 492 |
+
}
|
| 493 |
+
},
|
| 494 |
+
"settings": {
|
| 495 |
+
"executionOrder": "v1"
|
| 496 |
+
},
|
| 497 |
+
"meta": {
|
| 498 |
+
"generated_by": "AI Workflow Agent",
|
| 499 |
+
"query": query,
|
| 500 |
+
"type": "notification",
|
| 501 |
+
"note": "Requires Slack/Telegram credentials configuration"
|
| 502 |
+
}
|
| 503 |
+
}
|
| 504 |
+
|
| 505 |
+
def _generate_generic_workflow(self, query: str) -> Dict[str, Any]:
|
| 506 |
+
"""Generate generic workflow template."""
|
| 507 |
+
return {
|
| 508 |
+
"name": f"Workflow - {datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 509 |
+
"nodes": [
|
| 510 |
+
{
|
| 511 |
+
"parameters": {
|
| 512 |
+
"httpMethod": "POST",
|
| 513 |
+
"path": "workflow-trigger",
|
| 514 |
+
"responseMode": "responseNode"
|
| 515 |
+
},
|
| 516 |
+
"id": "webhook_1",
|
| 517 |
+
"name": "Start",
|
| 518 |
+
"type": "n8n-nodes-base.webhook",
|
| 519 |
+
"typeVersion": 1,
|
| 520 |
+
"position": [250, 300]
|
| 521 |
+
},
|
| 522 |
+
{
|
| 523 |
+
"parameters": {
|
| 524 |
+
"functionCode": f"// Generated for: {query}\n// Add your custom logic here\nconst input = $input.all();\nreturn input;"
|
| 525 |
+
},
|
| 526 |
+
"id": "code_1",
|
| 527 |
+
"name": "Process",
|
| 528 |
+
"type": "n8n-nodes-base.code",
|
| 529 |
+
"typeVersion": 1,
|
| 530 |
+
"position": [450, 300]
|
| 531 |
+
},
|
| 532 |
+
{
|
| 533 |
+
"parameters": {
|
| 534 |
+
"respondWith": "json",
|
| 535 |
+
"responseBody": "={{ $json }}"
|
| 536 |
+
},
|
| 537 |
+
"id": "respond_1",
|
| 538 |
+
"name": "End",
|
| 539 |
+
"type": "n8n-nodes-base.respondToWebhook",
|
| 540 |
+
"typeVersion": 1,
|
| 541 |
+
"position": [650, 300]
|
| 542 |
+
}
|
| 543 |
+
],
|
| 544 |
+
"connections": {
|
| 545 |
+
"Start": {
|
| 546 |
+
"main": [[{"node": "Process", "type": "main", "index": 0}]]
|
| 547 |
+
},
|
| 548 |
+
"Process": {
|
| 549 |
+
"main": [[{"node": "End", "type": "main", "index": 0}]]
|
| 550 |
+
}
|
| 551 |
+
},
|
| 552 |
+
"settings": {
|
| 553 |
+
"executionOrder": "v1"
|
| 554 |
+
},
|
| 555 |
+
"meta": {
|
| 556 |
+
"generated_by": "AI Workflow Agent",
|
| 557 |
+
"query": query,
|
| 558 |
+
"type": "generic"
|
| 559 |
+
}
|
| 560 |
+
}
|
| 561 |
+
|
| 562 |
+
async def deploy_workflow(self, workflow: Dict[str, Any]) -> str:
|
| 563 |
+
"""
|
| 564 |
+
Deploy workflow to n8n instance via API.
|
| 565 |
+
|
| 566 |
+
Args:
|
| 567 |
+
workflow: n8n workflow JSON
|
| 568 |
+
|
| 569 |
+
Returns:
|
| 570 |
+
Workflow ID if successful
|
| 571 |
+
"""
|
| 572 |
+
try:
|
| 573 |
+
response = await self.client.post(
|
| 574 |
+
f"{self.n8n_host}/api/v1/workflows",
|
| 575 |
+
headers=self.headers,
|
| 576 |
+
json=workflow
|
| 577 |
+
)
|
| 578 |
+
|
| 579 |
+
if response.status_code in [200, 201]:
|
| 580 |
+
data = response.json()
|
| 581 |
+
workflow_id = data.get("id", "unknown")
|
| 582 |
+
logger.info(f"Workflow deployed: {workflow_id}")
|
| 583 |
+
return workflow_id
|
| 584 |
+
else:
|
| 585 |
+
logger.error(f"Deploy failed: {response.status_code} - {response.text}")
|
| 586 |
+
raise Exception(f"Deploy failed: {response.status_code}")
|
| 587 |
+
|
| 588 |
+
except Exception as e:
|
| 589 |
+
logger.error(f"Deploy error: {e}")
|
| 590 |
+
raise
|
| 591 |
+
|
| 592 |
+
async def list_workflows(self) -> List[Dict[str, Any]]:
|
| 593 |
+
"""List all workflows in n8n."""
|
| 594 |
+
try:
|
| 595 |
+
response = await self.client.get(
|
| 596 |
+
f"{self.n8n_host}/api/v1/workflows",
|
| 597 |
+
headers=self.headers
|
| 598 |
+
)
|
| 599 |
+
|
| 600 |
+
if response.status_code == 200:
|
| 601 |
+
return response.json().get("data", [])
|
| 602 |
+
return []
|
| 603 |
+
|
| 604 |
+
except Exception as e:
|
| 605 |
+
logger.error(f"List workflows error: {e}")
|
| 606 |
+
return []
|
| 607 |
+
|
| 608 |
+
async def activate_workflow(self, workflow_id: str, active: bool = True) -> bool:
|
| 609 |
+
"""Activate or deactivate a workflow."""
|
| 610 |
+
try:
|
| 611 |
+
response = await self.client.patch(
|
| 612 |
+
f"{self.n8n_host}/api/v1/workflows/{workflow_id}",
|
| 613 |
+
headers=self.headers,
|
| 614 |
+
json={"active": active}
|
| 615 |
+
)
|
| 616 |
+
return response.status_code == 200
|
| 617 |
+
|
| 618 |
+
except Exception as e:
|
| 619 |
+
logger.error(f"Activate workflow error: {e}")
|
| 620 |
+
return False
|
tools/web_search.py
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Web Search Tool
|
| 2 |
+
"""
|
| 3 |
+
Search the web for project recommendations and documentation.
|
| 4 |
+
Uses DuckDuckGo (no API key required) for search.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import httpx
|
| 8 |
+
import logging
|
| 9 |
+
import re
|
| 10 |
+
from typing import Dict, Any, List, Optional
|
| 11 |
+
from urllib.parse import quote_plus
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class WebSearchTool:
|
| 17 |
+
"""
|
| 18 |
+
Web search tool for finding project recommendations.
|
| 19 |
+
Uses DuckDuckGo HTML search (no API required).
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def __init__(self):
|
| 23 |
+
self.client = httpx.AsyncClient(timeout=30.0)
|
| 24 |
+
self.headers = {
|
| 25 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
async def search(
|
| 29 |
+
self,
|
| 30 |
+
query: str,
|
| 31 |
+
max_results: int = 5,
|
| 32 |
+
site_filter: Optional[str] = None
|
| 33 |
+
) -> List[Dict[str, Any]]:
|
| 34 |
+
"""
|
| 35 |
+
Search the web for relevant results.
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
query: Search query
|
| 39 |
+
max_results: Maximum results to return
|
| 40 |
+
site_filter: Optional site to filter (e.g., "github.com")
|
| 41 |
+
|
| 42 |
+
Returns:
|
| 43 |
+
List of search results with title, url, snippet
|
| 44 |
+
"""
|
| 45 |
+
try:
|
| 46 |
+
# Build search query
|
| 47 |
+
search_query = query
|
| 48 |
+
if site_filter:
|
| 49 |
+
search_query = f"site:{site_filter} {query}"
|
| 50 |
+
|
| 51 |
+
# Use DuckDuckGo HTML search
|
| 52 |
+
url = f"https://html.duckduckgo.com/html/?q={quote_plus(search_query)}"
|
| 53 |
+
|
| 54 |
+
response = await self.client.get(url, headers=self.headers)
|
| 55 |
+
|
| 56 |
+
if response.status_code == 200:
|
| 57 |
+
results = self._parse_ddg_results(response.text, max_results)
|
| 58 |
+
return results
|
| 59 |
+
else:
|
| 60 |
+
logger.warning(f"Search failed: {response.status_code}")
|
| 61 |
+
return []
|
| 62 |
+
|
| 63 |
+
except Exception as e:
|
| 64 |
+
logger.error(f"Web search error: {e}")
|
| 65 |
+
return []
|
| 66 |
+
|
| 67 |
+
def _parse_ddg_results(self, html: str, max_results: int) -> List[Dict[str, Any]]:
|
| 68 |
+
"""Parse DuckDuckGo HTML results."""
|
| 69 |
+
results = []
|
| 70 |
+
|
| 71 |
+
# Simple regex parsing for result links
|
| 72 |
+
# Pattern matches result entries in DuckDuckGo HTML
|
| 73 |
+
link_pattern = r'<a rel="nofollow" class="result__a" href="([^"]+)"[^>]*>([^<]+)</a>'
|
| 74 |
+
snippet_pattern = r'<a class="result__snippet"[^>]*>([^<]+(?:<[^>]+>[^<]*</[^>]+>)*[^<]*)</a>'
|
| 75 |
+
|
| 76 |
+
links = re.findall(link_pattern, html)
|
| 77 |
+
snippets = re.findall(snippet_pattern, html)
|
| 78 |
+
|
| 79 |
+
for i, (url, title) in enumerate(links[:max_results]):
|
| 80 |
+
# Clean up URL (DuckDuckGo wraps URLs)
|
| 81 |
+
if "uddg=" in url:
|
| 82 |
+
# Extract actual URL from redirect
|
| 83 |
+
match = re.search(r'uddg=([^&]+)', url)
|
| 84 |
+
if match:
|
| 85 |
+
from urllib.parse import unquote
|
| 86 |
+
url = unquote(match.group(1))
|
| 87 |
+
|
| 88 |
+
result = {
|
| 89 |
+
"title": self._clean_html(title),
|
| 90 |
+
"url": url,
|
| 91 |
+
"snippet": self._clean_html(snippets[i]) if i < len(snippets) else ""
|
| 92 |
+
}
|
| 93 |
+
results.append(result)
|
| 94 |
+
|
| 95 |
+
return results
|
| 96 |
+
|
| 97 |
+
def _clean_html(self, text: str) -> str:
|
| 98 |
+
"""Remove HTML tags and clean text."""
|
| 99 |
+
# Remove HTML tags
|
| 100 |
+
text = re.sub(r'<[^>]+>', '', text)
|
| 101 |
+
# Decode HTML entities
|
| 102 |
+
text = text.replace('&', '&').replace('<', '<').replace('>', '>')
|
| 103 |
+
text = text.replace('"', '"').replace(''', "'")
|
| 104 |
+
# Clean whitespace
|
| 105 |
+
text = ' '.join(text.split())
|
| 106 |
+
return text.strip()
|
| 107 |
+
|
| 108 |
+
async def search_github_projects(
|
| 109 |
+
self,
|
| 110 |
+
query: str,
|
| 111 |
+
max_results: int = 5
|
| 112 |
+
) -> List[Dict[str, Any]]:
|
| 113 |
+
"""Search specifically for GitHub projects."""
|
| 114 |
+
return await self.search(query, max_results, site_filter="github.com")
|
| 115 |
+
|
| 116 |
+
async def search_documentation(
|
| 117 |
+
self,
|
| 118 |
+
tool: str,
|
| 119 |
+
topic: str,
|
| 120 |
+
max_results: int = 3
|
| 121 |
+
) -> List[Dict[str, Any]]:
|
| 122 |
+
"""Search for tool documentation."""
|
| 123 |
+
query = f"{tool} documentation {topic}"
|
| 124 |
+
return await self.search(query, max_results)
|
| 125 |
+
|
| 126 |
+
async def find_alternatives(
|
| 127 |
+
self,
|
| 128 |
+
tool: str,
|
| 129 |
+
purpose: str,
|
| 130 |
+
max_results: int = 5
|
| 131 |
+
) -> List[Dict[str, Any]]:
|
| 132 |
+
"""Find alternative tools for a specific purpose."""
|
| 133 |
+
query = f"best {purpose} tools alternatives to {tool} open source"
|
| 134 |
+
return await self.search(query, max_results)
|
| 135 |
+
|
| 136 |
+
async def search_with_summary(
|
| 137 |
+
self,
|
| 138 |
+
query: str,
|
| 139 |
+
max_results: int = 5
|
| 140 |
+
) -> Dict[str, Any]:
|
| 141 |
+
"""
|
| 142 |
+
Search and generate a summary of findings.
|
| 143 |
+
|
| 144 |
+
Returns:
|
| 145 |
+
Dict with results and AI-generated summary
|
| 146 |
+
"""
|
| 147 |
+
results = await self.search(query, max_results)
|
| 148 |
+
|
| 149 |
+
if not results:
|
| 150 |
+
return {
|
| 151 |
+
"results": [],
|
| 152 |
+
"summary": "No results found for your query."
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
# Generate simple summary
|
| 156 |
+
summary_parts = [f"Found {len(results)} relevant results:"]
|
| 157 |
+
for i, r in enumerate(results, 1):
|
| 158 |
+
summary_parts.append(f"{i}. **{r['title']}**")
|
| 159 |
+
if r['snippet']:
|
| 160 |
+
summary_parts.append(f" {r['snippet'][:100]}...")
|
| 161 |
+
|
| 162 |
+
return {
|
| 163 |
+
"results": results,
|
| 164 |
+
"summary": "\n".join(summary_parts)
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
# Singleton instance
|
| 169 |
+
web_search = WebSearchTool()
|
tools/workflow_templates.py
ADDED
|
@@ -0,0 +1,585 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Additional n8n Workflow Templates
|
| 2 |
+
"""
|
| 3 |
+
Extended workflow templates for common automation patterns.
|
| 4 |
+
Milestone 1: More comprehensive template library.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
from typing import Dict, Any
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def get_workflow_templates() -> Dict[str, callable]:
|
| 12 |
+
"""Return all available workflow templates."""
|
| 13 |
+
return {
|
| 14 |
+
"database_sync": database_sync_workflow,
|
| 15 |
+
"file_processor": file_processor_workflow,
|
| 16 |
+
"social_media": social_media_workflow,
|
| 17 |
+
"crm_integration": crm_integration_workflow,
|
| 18 |
+
"monitoring": monitoring_workflow,
|
| 19 |
+
"data_pipeline": data_pipeline_workflow,
|
| 20 |
+
"chatbot": chatbot_workflow,
|
| 21 |
+
"report_generator": report_generator_workflow,
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def database_sync_workflow(params: Dict[str, Any]) -> Dict[str, Any]:
|
| 26 |
+
"""Generate database synchronization workflow."""
|
| 27 |
+
return {
|
| 28 |
+
"name": f"Database Sync - {datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 29 |
+
"nodes": [
|
| 30 |
+
{
|
| 31 |
+
"parameters": {
|
| 32 |
+
"rule": {"interval": [{"field": "hours", "hoursInterval": 1}]}
|
| 33 |
+
},
|
| 34 |
+
"id": "schedule_1",
|
| 35 |
+
"name": "Schedule",
|
| 36 |
+
"type": "n8n-nodes-base.scheduleTrigger",
|
| 37 |
+
"typeVersion": 1,
|
| 38 |
+
"position": [250, 300]
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"parameters": {
|
| 42 |
+
"operation": "executeQuery",
|
| 43 |
+
"query": "SELECT * FROM source_table WHERE updated_at > NOW() - INTERVAL '1 hour'"
|
| 44 |
+
},
|
| 45 |
+
"id": "postgres_source",
|
| 46 |
+
"name": "Source DB",
|
| 47 |
+
"type": "n8n-nodes-base.postgres",
|
| 48 |
+
"typeVersion": 2,
|
| 49 |
+
"position": [450, 300],
|
| 50 |
+
"credentials": {"postgres": {"id": "SOURCE_DB_ID", "name": "Source Database"}}
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"parameters": {
|
| 54 |
+
"operation": "insert",
|
| 55 |
+
"table": "destination_table",
|
| 56 |
+
"columns": "={{ Object.keys($json).join(',') }}"
|
| 57 |
+
},
|
| 58 |
+
"id": "postgres_dest",
|
| 59 |
+
"name": "Destination DB",
|
| 60 |
+
"type": "n8n-nodes-base.postgres",
|
| 61 |
+
"typeVersion": 2,
|
| 62 |
+
"position": [650, 300],
|
| 63 |
+
"credentials": {"postgres": {"id": "DEST_DB_ID", "name": "Destination Database"}}
|
| 64 |
+
}
|
| 65 |
+
],
|
| 66 |
+
"connections": {
|
| 67 |
+
"Schedule": {"main": [[{"node": "Source DB", "type": "main", "index": 0}]]},
|
| 68 |
+
"Source DB": {"main": [[{"node": "Destination DB", "type": "main", "index": 0}]]}
|
| 69 |
+
},
|
| 70 |
+
"settings": {"executionOrder": "v1"},
|
| 71 |
+
"meta": {"type": "database_sync", "generated_by": "AI Workflow Agent"}
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def file_processor_workflow(params: Dict[str, Any]) -> Dict[str, Any]:
|
| 76 |
+
"""Generate file processing workflow."""
|
| 77 |
+
return {
|
| 78 |
+
"name": f"File Processor - {datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 79 |
+
"nodes": [
|
| 80 |
+
{
|
| 81 |
+
"parameters": {"path": "/data/input", "events": ["change"]},
|
| 82 |
+
"id": "file_trigger",
|
| 83 |
+
"name": "File Trigger",
|
| 84 |
+
"type": "n8n-nodes-base.localFileTrigger",
|
| 85 |
+
"typeVersion": 1,
|
| 86 |
+
"position": [250, 300]
|
| 87 |
+
},
|
| 88 |
+
{
|
| 89 |
+
"parameters": {"filePath": "={{ $json.path }}"},
|
| 90 |
+
"id": "read_file",
|
| 91 |
+
"name": "Read File",
|
| 92 |
+
"type": "n8n-nodes-base.readBinaryFiles",
|
| 93 |
+
"typeVersion": 1,
|
| 94 |
+
"position": [450, 300]
|
| 95 |
+
},
|
| 96 |
+
{
|
| 97 |
+
"parameters": {
|
| 98 |
+
"functionCode": """// Process file content
|
| 99 |
+
const items = $input.all();
|
| 100 |
+
const processed = items.map(item => {
|
| 101 |
+
// Add processing logic here
|
| 102 |
+
return {
|
| 103 |
+
json: {
|
| 104 |
+
...item.json,
|
| 105 |
+
processed: true,
|
| 106 |
+
processedAt: new Date().toISOString()
|
| 107 |
+
}
|
| 108 |
+
};
|
| 109 |
+
});
|
| 110 |
+
return processed;"""
|
| 111 |
+
},
|
| 112 |
+
"id": "process",
|
| 113 |
+
"name": "Process",
|
| 114 |
+
"type": "n8n-nodes-base.code",
|
| 115 |
+
"typeVersion": 1,
|
| 116 |
+
"position": [650, 300]
|
| 117 |
+
},
|
| 118 |
+
{
|
| 119 |
+
"parameters": {
|
| 120 |
+
"fileName": "={{ 'processed_' + $json.filename }}",
|
| 121 |
+
"filePath": "/data/output/"
|
| 122 |
+
},
|
| 123 |
+
"id": "write_file",
|
| 124 |
+
"name": "Save Output",
|
| 125 |
+
"type": "n8n-nodes-base.writeBinaryFile",
|
| 126 |
+
"typeVersion": 1,
|
| 127 |
+
"position": [850, 300]
|
| 128 |
+
}
|
| 129 |
+
],
|
| 130 |
+
"connections": {
|
| 131 |
+
"File Trigger": {"main": [[{"node": "Read File", "type": "main", "index": 0}]]},
|
| 132 |
+
"Read File": {"main": [[{"node": "Process", "type": "main", "index": 0}]]},
|
| 133 |
+
"Process": {"main": [[{"node": "Save Output", "type": "main", "index": 0}]]}
|
| 134 |
+
},
|
| 135 |
+
"settings": {"executionOrder": "v1"},
|
| 136 |
+
"meta": {"type": "file_processor", "generated_by": "AI Workflow Agent"}
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def social_media_workflow(params: Dict[str, Any]) -> Dict[str, Any]:
|
| 141 |
+
"""Generate social media automation workflow."""
|
| 142 |
+
return {
|
| 143 |
+
"name": f"Social Media - {datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 144 |
+
"nodes": [
|
| 145 |
+
{
|
| 146 |
+
"parameters": {
|
| 147 |
+
"httpMethod": "POST",
|
| 148 |
+
"path": "post-social",
|
| 149 |
+
"responseMode": "onReceived"
|
| 150 |
+
},
|
| 151 |
+
"id": "webhook",
|
| 152 |
+
"name": "Trigger",
|
| 153 |
+
"type": "n8n-nodes-base.webhook",
|
| 154 |
+
"typeVersion": 1,
|
| 155 |
+
"position": [250, 300]
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"parameters": {
|
| 159 |
+
"functionCode": """// Prepare content for different platforms
|
| 160 |
+
const content = $input.first().json;
|
| 161 |
+
return [
|
| 162 |
+
{
|
| 163 |
+
json: {
|
| 164 |
+
platform: 'twitter',
|
| 165 |
+
text: content.message.substring(0, 280),
|
| 166 |
+
media: content.image_url
|
| 167 |
+
}
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
json: {
|
| 171 |
+
platform: 'linkedin',
|
| 172 |
+
text: content.message,
|
| 173 |
+
media: content.image_url
|
| 174 |
+
}
|
| 175 |
+
}
|
| 176 |
+
];"""
|
| 177 |
+
},
|
| 178 |
+
"id": "prepare",
|
| 179 |
+
"name": "Prepare Content",
|
| 180 |
+
"type": "n8n-nodes-base.code",
|
| 181 |
+
"typeVersion": 1,
|
| 182 |
+
"position": [450, 300]
|
| 183 |
+
},
|
| 184 |
+
{
|
| 185 |
+
"parameters": {
|
| 186 |
+
"text": "={{ $json.text }}",
|
| 187 |
+
"additionalFields": {}
|
| 188 |
+
},
|
| 189 |
+
"id": "twitter",
|
| 190 |
+
"name": "Post to Twitter",
|
| 191 |
+
"type": "n8n-nodes-base.twitter",
|
| 192 |
+
"typeVersion": 2,
|
| 193 |
+
"position": [650, 200],
|
| 194 |
+
"credentials": {"twitterOAuth2Api": {"id": "TWITTER_ID", "name": "Twitter"}}
|
| 195 |
+
},
|
| 196 |
+
{
|
| 197 |
+
"parameters": {
|
| 198 |
+
"text": "={{ $json.text }}",
|
| 199 |
+
"shareMediaCategory": "NONE"
|
| 200 |
+
},
|
| 201 |
+
"id": "linkedin",
|
| 202 |
+
"name": "Post to LinkedIn",
|
| 203 |
+
"type": "n8n-nodes-base.linkedIn",
|
| 204 |
+
"typeVersion": 1,
|
| 205 |
+
"position": [650, 400],
|
| 206 |
+
"credentials": {"linkedInOAuth2Api": {"id": "LINKEDIN_ID", "name": "LinkedIn"}}
|
| 207 |
+
}
|
| 208 |
+
],
|
| 209 |
+
"connections": {
|
| 210 |
+
"Trigger": {"main": [[{"node": "Prepare Content", "type": "main", "index": 0}]]},
|
| 211 |
+
"Prepare Content": {"main": [
|
| 212 |
+
[{"node": "Post to Twitter", "type": "main", "index": 0}],
|
| 213 |
+
[{"node": "Post to LinkedIn", "type": "main", "index": 0}]
|
| 214 |
+
]}
|
| 215 |
+
},
|
| 216 |
+
"settings": {"executionOrder": "v1"},
|
| 217 |
+
"meta": {"type": "social_media", "generated_by": "AI Workflow Agent"}
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def crm_integration_workflow(params: Dict[str, Any]) -> Dict[str, Any]:
|
| 222 |
+
"""Generate CRM integration workflow."""
|
| 223 |
+
return {
|
| 224 |
+
"name": f"CRM Integration - {datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 225 |
+
"nodes": [
|
| 226 |
+
{
|
| 227 |
+
"parameters": {
|
| 228 |
+
"httpMethod": "POST",
|
| 229 |
+
"path": "new-lead",
|
| 230 |
+
"responseMode": "responseNode"
|
| 231 |
+
},
|
| 232 |
+
"id": "webhook",
|
| 233 |
+
"name": "New Lead",
|
| 234 |
+
"type": "n8n-nodes-base.webhook",
|
| 235 |
+
"typeVersion": 1,
|
| 236 |
+
"position": [250, 300]
|
| 237 |
+
},
|
| 238 |
+
{
|
| 239 |
+
"parameters": {
|
| 240 |
+
"resource": "contact",
|
| 241 |
+
"operation": "create",
|
| 242 |
+
"email": "={{ $json.email }}",
|
| 243 |
+
"additionalFields": {
|
| 244 |
+
"firstName": "={{ $json.first_name }}",
|
| 245 |
+
"lastName": "={{ $json.last_name }}",
|
| 246 |
+
"phone": "={{ $json.phone }}"
|
| 247 |
+
}
|
| 248 |
+
},
|
| 249 |
+
"id": "hubspot",
|
| 250 |
+
"name": "Create in HubSpot",
|
| 251 |
+
"type": "n8n-nodes-base.hubspot",
|
| 252 |
+
"typeVersion": 2,
|
| 253 |
+
"position": [450, 300],
|
| 254 |
+
"credentials": {"hubspotApi": {"id": "HUBSPOT_ID", "name": "HubSpot"}}
|
| 255 |
+
},
|
| 256 |
+
{
|
| 257 |
+
"parameters": {
|
| 258 |
+
"fromEmail": "sales@company.com",
|
| 259 |
+
"toEmail": "={{ $json.email }}",
|
| 260 |
+
"subject": "Welcome to our platform!",
|
| 261 |
+
"emailType": "html",
|
| 262 |
+
"message": "<h1>Welcome!</h1><p>Thank you for your interest.</p>"
|
| 263 |
+
},
|
| 264 |
+
"id": "email",
|
| 265 |
+
"name": "Send Welcome Email",
|
| 266 |
+
"type": "n8n-nodes-base.emailSend",
|
| 267 |
+
"typeVersion": 2,
|
| 268 |
+
"position": [650, 300],
|
| 269 |
+
"credentials": {"smtp": {"id": "SMTP_ID", "name": "SMTP"}}
|
| 270 |
+
},
|
| 271 |
+
{
|
| 272 |
+
"parameters": {
|
| 273 |
+
"respondWith": "json",
|
| 274 |
+
"responseBody": "={{ JSON.stringify({success: true, contact_id: $json.id}) }}"
|
| 275 |
+
},
|
| 276 |
+
"id": "respond",
|
| 277 |
+
"name": "Response",
|
| 278 |
+
"type": "n8n-nodes-base.respondToWebhook",
|
| 279 |
+
"typeVersion": 1,
|
| 280 |
+
"position": [850, 300]
|
| 281 |
+
}
|
| 282 |
+
],
|
| 283 |
+
"connections": {
|
| 284 |
+
"New Lead": {"main": [[{"node": "Create in HubSpot", "type": "main", "index": 0}]]},
|
| 285 |
+
"Create in HubSpot": {"main": [[{"node": "Send Welcome Email", "type": "main", "index": 0}]]},
|
| 286 |
+
"Send Welcome Email": {"main": [[{"node": "Response", "type": "main", "index": 0}]]}
|
| 287 |
+
},
|
| 288 |
+
"settings": {"executionOrder": "v1"},
|
| 289 |
+
"meta": {"type": "crm_integration", "generated_by": "AI Workflow Agent"}
|
| 290 |
+
}
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
def monitoring_workflow(params: Dict[str, Any]) -> Dict[str, Any]:
|
| 294 |
+
"""Generate monitoring and alerting workflow."""
|
| 295 |
+
return {
|
| 296 |
+
"name": f"Monitoring - {datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 297 |
+
"nodes": [
|
| 298 |
+
{
|
| 299 |
+
"parameters": {
|
| 300 |
+
"rule": {"interval": [{"field": "minutes", "minutesInterval": 5}]}
|
| 301 |
+
},
|
| 302 |
+
"id": "schedule",
|
| 303 |
+
"name": "Every 5 Minutes",
|
| 304 |
+
"type": "n8n-nodes-base.scheduleTrigger",
|
| 305 |
+
"typeVersion": 1,
|
| 306 |
+
"position": [250, 300]
|
| 307 |
+
},
|
| 308 |
+
{
|
| 309 |
+
"parameters": {
|
| 310 |
+
"url": "={{ $json.endpoint || 'https://api.example.com/health' }}",
|
| 311 |
+
"method": "GET",
|
| 312 |
+
"options": {"timeout": 10000}
|
| 313 |
+
},
|
| 314 |
+
"id": "check",
|
| 315 |
+
"name": "Health Check",
|
| 316 |
+
"type": "n8n-nodes-base.httpRequest",
|
| 317 |
+
"typeVersion": 4,
|
| 318 |
+
"position": [450, 300]
|
| 319 |
+
},
|
| 320 |
+
{
|
| 321 |
+
"parameters": {
|
| 322 |
+
"conditions": {
|
| 323 |
+
"boolean": [
|
| 324 |
+
{"value1": "={{ $json.status }}", "value2": 200, "operation": "notEqual"}
|
| 325 |
+
]
|
| 326 |
+
}
|
| 327 |
+
},
|
| 328 |
+
"id": "if_error",
|
| 329 |
+
"name": "If Error",
|
| 330 |
+
"type": "n8n-nodes-base.if",
|
| 331 |
+
"typeVersion": 1,
|
| 332 |
+
"position": [650, 300]
|
| 333 |
+
},
|
| 334 |
+
{
|
| 335 |
+
"parameters": {
|
| 336 |
+
"channel": "#alerts",
|
| 337 |
+
"text": "🚨 *Alert*: Service health check failed!\nStatus: {{ $json.status }}\nTime: {{ new Date().toISOString() }}"
|
| 338 |
+
},
|
| 339 |
+
"id": "slack",
|
| 340 |
+
"name": "Alert Slack",
|
| 341 |
+
"type": "n8n-nodes-base.slack",
|
| 342 |
+
"typeVersion": 2,
|
| 343 |
+
"position": [850, 200],
|
| 344 |
+
"credentials": {"slackApi": {"id": "SLACK_ID", "name": "Slack"}}
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"parameters": {},
|
| 348 |
+
"id": "noop",
|
| 349 |
+
"name": "All Good",
|
| 350 |
+
"type": "n8n-nodes-base.noOp",
|
| 351 |
+
"typeVersion": 1,
|
| 352 |
+
"position": [850, 400]
|
| 353 |
+
}
|
| 354 |
+
],
|
| 355 |
+
"connections": {
|
| 356 |
+
"Every 5 Minutes": {"main": [[{"node": "Health Check", "type": "main", "index": 0}]]},
|
| 357 |
+
"Health Check": {"main": [[{"node": "If Error", "type": "main", "index": 0}]]},
|
| 358 |
+
"If Error": {
|
| 359 |
+
"main": [
|
| 360 |
+
[{"node": "Alert Slack", "type": "main", "index": 0}],
|
| 361 |
+
[{"node": "All Good", "type": "main", "index": 0}]
|
| 362 |
+
]
|
| 363 |
+
}
|
| 364 |
+
},
|
| 365 |
+
"settings": {"executionOrder": "v1"},
|
| 366 |
+
"meta": {"type": "monitoring", "generated_by": "AI Workflow Agent"}
|
| 367 |
+
}
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
def data_pipeline_workflow(params: Dict[str, Any]) -> Dict[str, Any]:
|
| 371 |
+
"""Generate data processing pipeline workflow."""
|
| 372 |
+
return {
|
| 373 |
+
"name": f"Data Pipeline - {datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 374 |
+
"nodes": [
|
| 375 |
+
{
|
| 376 |
+
"parameters": {
|
| 377 |
+
"httpMethod": "POST",
|
| 378 |
+
"path": "ingest-data",
|
| 379 |
+
"responseMode": "onReceived"
|
| 380 |
+
},
|
| 381 |
+
"id": "webhook",
|
| 382 |
+
"name": "Data Input",
|
| 383 |
+
"type": "n8n-nodes-base.webhook",
|
| 384 |
+
"typeVersion": 1,
|
| 385 |
+
"position": [250, 300]
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
"parameters": {
|
| 389 |
+
"functionCode": """// Validate incoming data
|
| 390 |
+
const data = $input.all();
|
| 391 |
+
const validated = data.filter(item => {
|
| 392 |
+
const json = item.json;
|
| 393 |
+
return json.id && json.timestamp && json.value !== undefined;
|
| 394 |
+
});
|
| 395 |
+
if (validated.length === 0) {
|
| 396 |
+
throw new Error('No valid data received');
|
| 397 |
+
}
|
| 398 |
+
return validated;"""
|
| 399 |
+
},
|
| 400 |
+
"id": "validate",
|
| 401 |
+
"name": "Validate",
|
| 402 |
+
"type": "n8n-nodes-base.code",
|
| 403 |
+
"typeVersion": 1,
|
| 404 |
+
"position": [450, 300]
|
| 405 |
+
},
|
| 406 |
+
{
|
| 407 |
+
"parameters": {
|
| 408 |
+
"functionCode": """// Transform data
|
| 409 |
+
const items = $input.all();
|
| 410 |
+
return items.map(item => ({
|
| 411 |
+
json: {
|
| 412 |
+
...item.json,
|
| 413 |
+
processed_value: item.json.value * 1.1,
|
| 414 |
+
source: 'pipeline',
|
| 415 |
+
ingested_at: new Date().toISOString()
|
| 416 |
+
}
|
| 417 |
+
}));"""
|
| 418 |
+
},
|
| 419 |
+
"id": "transform",
|
| 420 |
+
"name": "Transform",
|
| 421 |
+
"type": "n8n-nodes-base.code",
|
| 422 |
+
"typeVersion": 1,
|
| 423 |
+
"position": [650, 300]
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"parameters": {
|
| 427 |
+
"operation": "insert",
|
| 428 |
+
"table": "processed_data"
|
| 429 |
+
},
|
| 430 |
+
"id": "store",
|
| 431 |
+
"name": "Store",
|
| 432 |
+
"type": "n8n-nodes-base.postgres",
|
| 433 |
+
"typeVersion": 2,
|
| 434 |
+
"position": [850, 300],
|
| 435 |
+
"credentials": {"postgres": {"id": "DB_ID", "name": "Database"}}
|
| 436 |
+
}
|
| 437 |
+
],
|
| 438 |
+
"connections": {
|
| 439 |
+
"Data Input": {"main": [[{"node": "Validate", "type": "main", "index": 0}]]},
|
| 440 |
+
"Validate": {"main": [[{"node": "Transform", "type": "main", "index": 0}]]},
|
| 441 |
+
"Transform": {"main": [[{"node": "Store", "type": "main", "index": 0}]]}
|
| 442 |
+
},
|
| 443 |
+
"settings": {"executionOrder": "v1"},
|
| 444 |
+
"meta": {"type": "data_pipeline", "generated_by": "AI Workflow Agent"}
|
| 445 |
+
}
|
| 446 |
+
|
| 447 |
+
|
| 448 |
+
def chatbot_workflow(params: Dict[str, Any]) -> Dict[str, Any]:
|
| 449 |
+
"""Generate AI chatbot workflow."""
|
| 450 |
+
return {
|
| 451 |
+
"name": f"AI Chatbot - {datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 452 |
+
"nodes": [
|
| 453 |
+
{
|
| 454 |
+
"parameters": {
|
| 455 |
+
"httpMethod": "POST",
|
| 456 |
+
"path": "chat",
|
| 457 |
+
"responseMode": "responseNode"
|
| 458 |
+
},
|
| 459 |
+
"id": "webhook",
|
| 460 |
+
"name": "Chat Input",
|
| 461 |
+
"type": "n8n-nodes-base.webhook",
|
| 462 |
+
"typeVersion": 1,
|
| 463 |
+
"position": [250, 300]
|
| 464 |
+
},
|
| 465 |
+
{
|
| 466 |
+
"parameters": {
|
| 467 |
+
"url": "http://ollama:11434/api/generate",
|
| 468 |
+
"method": "POST",
|
| 469 |
+
"sendBody": True,
|
| 470 |
+
"bodyParameters": {
|
| 471 |
+
"parameters": [
|
| 472 |
+
{"name": "model", "value": "qwen2.5:7b"},
|
| 473 |
+
{"name": "prompt", "value": "={{ $json.message }}"},
|
| 474 |
+
{"name": "stream", "value": "false"}
|
| 475 |
+
]
|
| 476 |
+
}
|
| 477 |
+
},
|
| 478 |
+
"id": "ollama",
|
| 479 |
+
"name": "Ask LLM",
|
| 480 |
+
"type": "n8n-nodes-base.httpRequest",
|
| 481 |
+
"typeVersion": 4,
|
| 482 |
+
"position": [450, 300]
|
| 483 |
+
},
|
| 484 |
+
{
|
| 485 |
+
"parameters": {
|
| 486 |
+
"respondWith": "json",
|
| 487 |
+
"responseBody": "={{ JSON.stringify({response: $json.response, model: 'qwen2.5'}) }}"
|
| 488 |
+
},
|
| 489 |
+
"id": "respond",
|
| 490 |
+
"name": "Response",
|
| 491 |
+
"type": "n8n-nodes-base.respondToWebhook",
|
| 492 |
+
"typeVersion": 1,
|
| 493 |
+
"position": [650, 300]
|
| 494 |
+
}
|
| 495 |
+
],
|
| 496 |
+
"connections": {
|
| 497 |
+
"Chat Input": {"main": [[{"node": "Ask LLM", "type": "main", "index": 0}]]},
|
| 498 |
+
"Ask LLM": {"main": [[{"node": "Response", "type": "main", "index": 0}]]}
|
| 499 |
+
},
|
| 500 |
+
"settings": {"executionOrder": "v1"},
|
| 501 |
+
"meta": {"type": "chatbot", "generated_by": "AI Workflow Agent"}
|
| 502 |
+
}
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
def report_generator_workflow(params: Dict[str, Any]) -> Dict[str, Any]:
|
| 506 |
+
"""Generate automated report workflow."""
|
| 507 |
+
return {
|
| 508 |
+
"name": f"Report Generator - {datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 509 |
+
"nodes": [
|
| 510 |
+
{
|
| 511 |
+
"parameters": {
|
| 512 |
+
"rule": {"interval": [{"field": "days", "daysInterval": 1}]},
|
| 513 |
+
"triggerAtTime": "09:00"
|
| 514 |
+
},
|
| 515 |
+
"id": "schedule",
|
| 516 |
+
"name": "Daily at 9 AM",
|
| 517 |
+
"type": "n8n-nodes-base.scheduleTrigger",
|
| 518 |
+
"typeVersion": 1,
|
| 519 |
+
"position": [250, 300]
|
| 520 |
+
},
|
| 521 |
+
{
|
| 522 |
+
"parameters": {
|
| 523 |
+
"operation": "executeQuery",
|
| 524 |
+
"query": """SELECT
|
| 525 |
+
DATE(created_at) as date,
|
| 526 |
+
COUNT(*) as total,
|
| 527 |
+
SUM(amount) as revenue
|
| 528 |
+
FROM orders
|
| 529 |
+
WHERE created_at >= NOW() - INTERVAL '7 days'
|
| 530 |
+
GROUP BY DATE(created_at)
|
| 531 |
+
ORDER BY date"""
|
| 532 |
+
},
|
| 533 |
+
"id": "query",
|
| 534 |
+
"name": "Get Data",
|
| 535 |
+
"type": "n8n-nodes-base.postgres",
|
| 536 |
+
"typeVersion": 2,
|
| 537 |
+
"position": [450, 300],
|
| 538 |
+
"credentials": {"postgres": {"id": "DB_ID", "name": "Database"}}
|
| 539 |
+
},
|
| 540 |
+
{
|
| 541 |
+
"parameters": {
|
| 542 |
+
"functionCode": """// Generate HTML report
|
| 543 |
+
const data = $input.all();
|
| 544 |
+
let html = '<h1>Weekly Report</h1><table border="1">';
|
| 545 |
+
html += '<tr><th>Date</th><th>Orders</th><th>Revenue</th></tr>';
|
| 546 |
+
data.forEach(row => {
|
| 547 |
+
html += '<tr>';
|
| 548 |
+
html += '<td>' + row.json.date + '</td>';
|
| 549 |
+
html += '<td>' + row.json.total + '</td>';
|
| 550 |
+
html += '<td>$' + row.json.revenue.toFixed(2) + '</td>';
|
| 551 |
+
html += '</tr>';
|
| 552 |
+
});
|
| 553 |
+
html += '</table>';
|
| 554 |
+
return [{json: {html, generated: new Date().toISOString()}}];"""
|
| 555 |
+
},
|
| 556 |
+
"id": "generate",
|
| 557 |
+
"name": "Generate Report",
|
| 558 |
+
"type": "n8n-nodes-base.code",
|
| 559 |
+
"typeVersion": 1,
|
| 560 |
+
"position": [650, 300]
|
| 561 |
+
},
|
| 562 |
+
{
|
| 563 |
+
"parameters": {
|
| 564 |
+
"fromEmail": "reports@company.com",
|
| 565 |
+
"toEmail": "team@company.com",
|
| 566 |
+
"subject": "Weekly Report - {{ new Date().toLocaleDateString() }}",
|
| 567 |
+
"emailType": "html",
|
| 568 |
+
"message": "={{ $json.html }}"
|
| 569 |
+
},
|
| 570 |
+
"id": "email",
|
| 571 |
+
"name": "Send Report",
|
| 572 |
+
"type": "n8n-nodes-base.emailSend",
|
| 573 |
+
"typeVersion": 2,
|
| 574 |
+
"position": [850, 300],
|
| 575 |
+
"credentials": {"smtp": {"id": "SMTP_ID", "name": "SMTP"}}
|
| 576 |
+
}
|
| 577 |
+
],
|
| 578 |
+
"connections": {
|
| 579 |
+
"Daily at 9 AM": {"main": [[{"node": "Get Data", "type": "main", "index": 0}]]},
|
| 580 |
+
"Get Data": {"main": [[{"node": "Generate Report", "type": "main", "index": 0}]]},
|
| 581 |
+
"Generate Report": {"main": [[{"node": "Send Report", "type": "main", "index": 0}]]}
|
| 582 |
+
},
|
| 583 |
+
"settings": {"executionOrder": "v1"},
|
| 584 |
+
"meta": {"type": "report_generator", "generated_by": "AI Workflow Agent"}
|
| 585 |
+
}
|