Spaces:
Runtime error
Runtime error
Abhishek commited on
Commit ·
2a902a6
1
Parent(s): f52dce2
Adding the application.
Browse files- .dummyenv +12 -0
- .gitignore +199 -0
- README.md +26 -0
- app.py +142 -638
- core/__init__.py +0 -0
- core/agents/SkeletonGraphAgent.py +814 -0
- core/agents/__init__.py +0 -0
- core/config/mcp_config.json +22 -0
- core/config/mcp_config.py +30 -0
- core/config/metadata.py +19 -0
- core/config/prompt.py +31 -0
- core/llms/__init__.py +0 -0
- core/llms/base_llm.py +50 -0
- core/mcp_tools/__init__.py +0 -0
- core/mcp_tools/erp_server.py +544 -0
- core/mcp_tools/global_disruptions_server.py +191 -0
- core/mcp_tools/math_server.py +24 -0
- core/mcp_tools/weather_server.py +49 -0
- dummydb.md +273 -0
- requirements.txt +11 -1
.dummyenv
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# OpenAI API Key
|
| 2 |
+
OPENAI_API_KEY=""
|
| 3 |
+
|
| 4 |
+
# Weather API Key
|
| 5 |
+
WEATHER_API_KEY = ""
|
| 6 |
+
|
| 7 |
+
# PostgreSQL Database Configuration
|
| 8 |
+
POSTGRES_HOST = ""
|
| 9 |
+
POSTGRES_DB = ""
|
| 10 |
+
POSTGRES_USER = ""
|
| 11 |
+
POSTGRES_PASSWORD = ""
|
| 12 |
+
POSTGRES_PORT = "5432"
|
.gitignore
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
share/python-wheels/
|
| 24 |
+
*.egg-info/
|
| 25 |
+
.installed.cfg
|
| 26 |
+
*.egg
|
| 27 |
+
MANIFEST
|
| 28 |
+
|
| 29 |
+
# PyInstaller
|
| 30 |
+
# Usually these files are written by a python script from a template
|
| 31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 32 |
+
*.manifest
|
| 33 |
+
*.spec
|
| 34 |
+
|
| 35 |
+
# Installer logs
|
| 36 |
+
pip-log.txt
|
| 37 |
+
pip-delete-this-directory.txt
|
| 38 |
+
|
| 39 |
+
# Unit test / coverage reports
|
| 40 |
+
htmlcov/
|
| 41 |
+
.tox/
|
| 42 |
+
.nox/
|
| 43 |
+
.coverage
|
| 44 |
+
.coverage.*
|
| 45 |
+
.cache
|
| 46 |
+
nosetests.xml
|
| 47 |
+
coverage.xml
|
| 48 |
+
*.cover
|
| 49 |
+
*.py,cover
|
| 50 |
+
.hypothesis/
|
| 51 |
+
.pytest_cache/
|
| 52 |
+
cover/
|
| 53 |
+
|
| 54 |
+
# Translations
|
| 55 |
+
*.mo
|
| 56 |
+
*.pot
|
| 57 |
+
|
| 58 |
+
# Django stuff:
|
| 59 |
+
*.log
|
| 60 |
+
local_settings.py
|
| 61 |
+
db.sqlite3
|
| 62 |
+
db.sqlite3-journal
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
.pybuilder/
|
| 76 |
+
target/
|
| 77 |
+
|
| 78 |
+
# Jupyter Notebook
|
| 79 |
+
.ipynb_checkpoints
|
| 80 |
+
|
| 81 |
+
# IPython
|
| 82 |
+
profile_default/
|
| 83 |
+
ipython_config.py
|
| 84 |
+
|
| 85 |
+
# pyenv
|
| 86 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 88 |
+
# .python-version
|
| 89 |
+
|
| 90 |
+
# pipenv
|
| 91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 94 |
+
# install all needed dependencies.
|
| 95 |
+
#Pipfile.lock
|
| 96 |
+
|
| 97 |
+
# UV
|
| 98 |
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
| 99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 100 |
+
# commonly ignored for libraries.
|
| 101 |
+
#uv.lock
|
| 102 |
+
|
| 103 |
+
# poetry
|
| 104 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 105 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 106 |
+
# commonly ignored for libraries.
|
| 107 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 108 |
+
#poetry.lock
|
| 109 |
+
|
| 110 |
+
# pdm
|
| 111 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 112 |
+
#pdm.lock
|
| 113 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 114 |
+
# in version control.
|
| 115 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
| 116 |
+
.pdm.toml
|
| 117 |
+
.pdm-python
|
| 118 |
+
.pdm-build/
|
| 119 |
+
|
| 120 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 121 |
+
__pypackages__/
|
| 122 |
+
|
| 123 |
+
# Celery stuff
|
| 124 |
+
celerybeat-schedule
|
| 125 |
+
celerybeat.pid
|
| 126 |
+
|
| 127 |
+
# SageMath parsed files
|
| 128 |
+
*.sage.py
|
| 129 |
+
|
| 130 |
+
# Environments
|
| 131 |
+
.env
|
| 132 |
+
.venv
|
| 133 |
+
env/
|
| 134 |
+
venv/
|
| 135 |
+
ENV/
|
| 136 |
+
env.bak/
|
| 137 |
+
venv.bak/
|
| 138 |
+
|
| 139 |
+
# Spyder project settings
|
| 140 |
+
.spyderproject
|
| 141 |
+
.spyproject
|
| 142 |
+
|
| 143 |
+
# Rope project settings
|
| 144 |
+
.ropeproject
|
| 145 |
+
|
| 146 |
+
# mkdocs documentation
|
| 147 |
+
/site
|
| 148 |
+
|
| 149 |
+
# mypy
|
| 150 |
+
.mypy_cache/
|
| 151 |
+
.dmypy.json
|
| 152 |
+
dmypy.json
|
| 153 |
+
|
| 154 |
+
# Pyre type checker
|
| 155 |
+
.pyre/
|
| 156 |
+
|
| 157 |
+
# pytype static type analyzer
|
| 158 |
+
.pytype/
|
| 159 |
+
|
| 160 |
+
# Cython debug symbols
|
| 161 |
+
cython_debug/
|
| 162 |
+
|
| 163 |
+
# PyCharm
|
| 164 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 165 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 166 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 167 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 168 |
+
#.idea/
|
| 169 |
+
|
| 170 |
+
# Abstra
|
| 171 |
+
# Abstra is an AI-powered process automation framework.
|
| 172 |
+
# Ignore directories containing user credentials, local state, and settings.
|
| 173 |
+
# Learn more at https://abstra.io/docs
|
| 174 |
+
.abstra/
|
| 175 |
+
|
| 176 |
+
# Visual Studio Code
|
| 177 |
+
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
| 178 |
+
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
| 179 |
+
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
| 180 |
+
# you could uncomment the following to ignore the enitre vscode folder
|
| 181 |
+
# .vscode/
|
| 182 |
+
|
| 183 |
+
# Ruff stuff:
|
| 184 |
+
.ruff_cache/
|
| 185 |
+
|
| 186 |
+
# PyPI configuration file
|
| 187 |
+
.pypirc
|
| 188 |
+
|
| 189 |
+
# Cursor
|
| 190 |
+
# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
|
| 191 |
+
# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
|
| 192 |
+
# refer to https://docs.cursor.com/context/ignore-files
|
| 193 |
+
.cursorignore
|
| 194 |
+
.cursorindexingignore
|
| 195 |
+
.env
|
| 196 |
+
.vscode
|
| 197 |
+
|
| 198 |
+
data
|
| 199 |
+
notebooks
|
README.md
CHANGED
|
@@ -12,3 +12,29 @@ short_description: Helps you keep track of your shipments.
|
|
| 12 |
---
|
| 13 |
|
| 14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
---
|
| 13 |
|
| 14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
agent-demo-track
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
## Introduction
|
| 21 |
+
|
| 22 |
+
TrackMate AI is an intelligent shipment tracking system that leverages artificial intelligence to provide real-time updates and insights about your packages. With TrackMate AI, you can:
|
| 23 |
+
|
| 24 |
+
- Monitor all your shipments in one place
|
| 25 |
+
- Receive smart predictions about delivery times
|
| 26 |
+
- Analyze delivery patterns and performance
|
| 27 |
+
- Access historical tracking data for reference
|
| 28 |
+
|
| 29 |
+
## Usage
|
| 30 |
+
|
| 31 |
+
1. Update the .env with required keys. Look at .dummyenv
|
| 32 |
+
|
| 33 |
+
2. Create a dummy postgres db. Look at dummydb.md
|
| 34 |
+
|
| 35 |
+
3. Run the app.py and type away.
|
| 36 |
+
|
| 37 |
+
## How everything works
|
| 38 |
+
|
| 39 |
+
The gradio app works on the SkeletonGraphAgent as the backend.
|
| 40 |
+
The SkeletonGraphAgent dynamically creates a graph based on the user inputs.
|
app.py
CHANGED
|
@@ -1,658 +1,162 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
import
|
| 3 |
-
|
| 4 |
-
import
|
| 5 |
-
|
| 6 |
-
from typing import
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
try:
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
database=os.getenv("POSTGRES_DB", "erp_db"),
|
| 16 |
-
user=os.getenv("POSTGRES_USER", "postgres"),
|
| 17 |
-
password=os.getenv("POSTGRES_PASSWORD", ""),
|
| 18 |
-
port=os.getenv("POSTGRES_PORT", "5432")
|
| 19 |
-
)
|
| 20 |
-
return conn
|
| 21 |
except Exception as e:
|
| 22 |
-
|
| 23 |
|
| 24 |
-
def
|
| 25 |
-
|
| 26 |
-
Execute a custom SQL query on the ERP database.
|
| 27 |
|
| 28 |
-
|
| 29 |
-
query: SQL query to execute
|
| 30 |
-
params: Parameters for parameterized queries (as a comma-separated string)
|
| 31 |
-
|
| 32 |
-
Returns:
|
| 33 |
-
Query results or error message
|
| 34 |
-
"""
|
| 35 |
try:
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
param_list = None
|
| 41 |
-
if params and params.strip():
|
| 42 |
-
param_list = [p.strip() for p in params.split(',')]
|
| 43 |
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
# Check if it's a SELECT query
|
| 50 |
-
if query.strip().upper().startswith('SELECT'):
|
| 51 |
-
results = cursor.fetchall()
|
| 52 |
-
# Convert RealDictRow to regular dict for JSON serialization
|
| 53 |
-
results = [dict(row) for row in results]
|
| 54 |
-
if not results:
|
| 55 |
-
return str({
|
| 56 |
-
"success": True,
|
| 57 |
-
"message": "No results found",
|
| 58 |
-
"results": []
|
| 59 |
-
})
|
| 60 |
-
return str({
|
| 61 |
-
"success": True,
|
| 62 |
-
"message": f"Query returned {len(results)} results",
|
| 63 |
-
"count": len(results),
|
| 64 |
-
"results": results
|
| 65 |
-
})
|
| 66 |
-
else:
|
| 67 |
-
# For INSERT, UPDATE, DELETE queries
|
| 68 |
-
conn.commit()
|
| 69 |
-
return str({
|
| 70 |
-
"success": True,
|
| 71 |
-
"message": f"Query executed successfully",
|
| 72 |
-
"rows_affected": cursor.rowcount
|
| 73 |
-
})
|
| 74 |
-
|
| 75 |
-
except Exception as e:
|
| 76 |
-
return str({
|
| 77 |
-
"success": False,
|
| 78 |
-
"error": str(e)
|
| 79 |
-
})
|
| 80 |
-
finally:
|
| 81 |
-
if 'conn' in locals():
|
| 82 |
-
conn.close()
|
| 83 |
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
List of ERP table names
|
| 90 |
-
"""
|
| 91 |
-
query = """
|
| 92 |
-
SELECT table_name
|
| 93 |
-
FROM information_schema.tables
|
| 94 |
-
WHERE table_schema = 'public'
|
| 95 |
-
AND table_name LIKE 'erp_%'
|
| 96 |
-
ORDER BY table_name;
|
| 97 |
-
"""
|
| 98 |
-
|
| 99 |
-
try:
|
| 100 |
-
conn = get_db_connection()
|
| 101 |
-
cursor = conn.cursor(cursor_factory=RealDictCursor)
|
| 102 |
-
cursor.execute(query)
|
| 103 |
-
results = cursor.fetchall()
|
| 104 |
-
|
| 105 |
-
table_names = [row['table_name'] for row in results]
|
| 106 |
-
if not table_names:
|
| 107 |
-
return str({
|
| 108 |
-
"success": False,
|
| 109 |
-
"message": "No ERP tables found",
|
| 110 |
-
"tables": []
|
| 111 |
-
})
|
| 112 |
-
return str({
|
| 113 |
-
"success": True,
|
| 114 |
-
"message": f"Found {len(table_names)} tables",
|
| 115 |
-
"count": len(table_names),
|
| 116 |
-
"tables": table_names
|
| 117 |
-
})
|
| 118 |
-
|
| 119 |
-
except Exception as e:
|
| 120 |
-
return str({
|
| 121 |
-
"success": False,
|
| 122 |
-
"error": str(e)
|
| 123 |
-
})
|
| 124 |
-
finally:
|
| 125 |
-
if 'conn' in locals():
|
| 126 |
-
conn.close()
|
| 127 |
|
| 128 |
-
|
| 129 |
-
"""
|
| 130 |
-
Get the status of an order by order ID.
|
| 131 |
-
|
| 132 |
-
Args:
|
| 133 |
-
order_id: The ID of the order to check
|
| 134 |
-
|
| 135 |
-
Returns:
|
| 136 |
-
Order status information including shipping and destination countries
|
| 137 |
-
"""
|
| 138 |
-
query = """
|
| 139 |
-
SELECT o.*, c.name as customer_name, c.email as customer_email,
|
| 140 |
-
o.shipping_country, o.destination_country
|
| 141 |
-
FROM erp_orders o
|
| 142 |
-
JOIN erp_customers c ON o.customer_id = c.customer_id
|
| 143 |
-
WHERE o.order_id = %s;
|
| 144 |
-
"""
|
| 145 |
-
|
| 146 |
-
try:
|
| 147 |
-
conn = get_db_connection()
|
| 148 |
-
cursor = conn.cursor(cursor_factory=RealDictCursor)
|
| 149 |
-
cursor.execute(query, [order_id])
|
| 150 |
-
order = cursor.fetchone()
|
| 151 |
-
|
| 152 |
-
if not order:
|
| 153 |
-
return str({
|
| 154 |
-
"success": False,
|
| 155 |
-
"message": f"Order with ID {order_id} not found"
|
| 156 |
-
})
|
| 157 |
-
|
| 158 |
-
# Get order items
|
| 159 |
-
items_query = """
|
| 160 |
-
SELECT oi.order_item_id, oi.order_id, oi.product_id, oi.quantity, oi.unit_price, oi.subtotal,
|
| 161 |
-
p.product_name, p.sku
|
| 162 |
-
FROM erp_order_items oi
|
| 163 |
-
JOIN erp_products p ON oi.product_id = p.product_id
|
| 164 |
-
WHERE oi.order_id = %s;
|
| 165 |
-
"""
|
| 166 |
-
cursor.execute(items_query, [order_id])
|
| 167 |
-
items = cursor.fetchall()
|
| 168 |
-
|
| 169 |
-
# Get order history
|
| 170 |
-
history_query = """
|
| 171 |
-
SELECT * FROM erp_order_history
|
| 172 |
-
WHERE order_id = %s
|
| 173 |
-
ORDER BY timestamp DESC;
|
| 174 |
-
"""
|
| 175 |
-
cursor.execute(history_query, [order_id])
|
| 176 |
-
history = cursor.fetchall()
|
| 177 |
-
|
| 178 |
-
order_dict = dict(order)
|
| 179 |
-
items_list = [dict(item) for item in items]
|
| 180 |
-
history_list = [dict(entry) for entry in history]
|
| 181 |
-
|
| 182 |
-
# Return JSON formatted response
|
| 183 |
-
return str({
|
| 184 |
-
"success": True,
|
| 185 |
-
"order": {
|
| 186 |
-
"order_id": order_id,
|
| 187 |
-
"customer": {
|
| 188 |
-
"customer_id": order_dict['customer_id'],
|
| 189 |
-
"name": order_dict['customer_name'],
|
| 190 |
-
"email": order_dict['customer_email']
|
| 191 |
-
},
|
| 192 |
-
"status": order_dict['status'],
|
| 193 |
-
"shipping_address": order_dict.get('shipping_address', 'N/A'),
|
| 194 |
-
"shipping_country": order_dict.get('shipping_country', 'N/A'),
|
| 195 |
-
"destination_country": order_dict.get('destination_country', 'N/A'),
|
| 196 |
-
"items": items_list,
|
| 197 |
-
"history": history_list
|
| 198 |
-
}
|
| 199 |
-
})
|
| 200 |
-
|
| 201 |
-
except Exception as e:
|
| 202 |
-
return str({
|
| 203 |
-
"success": False,
|
| 204 |
-
"error": str(e)
|
| 205 |
-
})
|
| 206 |
-
finally:
|
| 207 |
-
if 'conn' in locals():
|
| 208 |
-
conn.close()
|
| 209 |
|
| 210 |
-
def place_new_order(
|
| 211 |
-
customer_id: int,
|
| 212 |
-
product_ids: str,
|
| 213 |
-
quantities: str,
|
| 214 |
-
shipping_address: str,
|
| 215 |
-
shipping_country: str,
|
| 216 |
-
destination_country: str,
|
| 217 |
-
previous_order_id: int = None
|
| 218 |
-
):
|
| 219 |
-
"""
|
| 220 |
-
Place a new order in the ERP system.
|
| 221 |
-
|
| 222 |
-
Args:
|
| 223 |
-
customer_id: ID of the customer placing the order
|
| 224 |
-
product_ids: Comma-separated list of product IDs
|
| 225 |
-
quantities: Comma-separated list of quantities corresponding to product IDs
|
| 226 |
-
shipping_address: Shipping address for the order
|
| 227 |
-
shipping_country: Country where the order will be shipped from
|
| 228 |
-
destination_country: Country where the order will be delivered to
|
| 229 |
-
previous_order_id: ID of a previous order this is replacing (optional)
|
| 230 |
-
|
| 231 |
-
Returns:
|
| 232 |
-
New order information
|
| 233 |
-
"""
|
| 234 |
try:
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
# Create items list
|
| 246 |
-
items = [
|
| 247 |
-
{"product_id": pid, "quantity": qty}
|
| 248 |
-
for pid, qty in zip(product_id_list, quantity_list)
|
| 249 |
-
]
|
| 250 |
-
|
| 251 |
-
conn = get_db_connection()
|
| 252 |
-
cursor = conn.cursor(cursor_factory=RealDictCursor)
|
| 253 |
-
|
| 254 |
-
# Calculate total amount based on the total items and their prices
|
| 255 |
-
total_amount = 0
|
| 256 |
-
for item in items:
|
| 257 |
-
product_query = "SELECT price FROM erp_products WHERE product_id = %s;"
|
| 258 |
-
cursor.execute(product_query, [item['product_id']])
|
| 259 |
-
product = cursor.fetchone()
|
| 260 |
-
if not product:
|
| 261 |
-
return str({
|
| 262 |
-
"success": False,
|
| 263 |
-
"error": f"Product with ID {item['product_id']} not found"
|
| 264 |
-
})
|
| 265 |
-
total_amount += product['price'] * item['quantity']
|
| 266 |
-
|
| 267 |
-
# Insert new order into the erp_orders table for the customer
|
| 268 |
-
order_query = """
|
| 269 |
-
INSERT INTO erp_orders (
|
| 270 |
-
customer_id, order_date, total_amount, status,
|
| 271 |
-
shipping_address, shipping_country, destination_country, previous_order_id,
|
| 272 |
-
estimated_delivery, payment_status
|
| 273 |
-
) VALUES (
|
| 274 |
-
%s, CURRENT_DATE, %s, 'Processing',
|
| 275 |
-
%s, %s, %s, %s,
|
| 276 |
-
CURRENT_DATE + INTERVAL '7 days', 'Pending'
|
| 277 |
-
) RETURNING order_id;
|
| 278 |
-
"""
|
| 279 |
-
cursor.execute(order_query, [
|
| 280 |
-
customer_id, total_amount, shipping_address, shipping_country, destination_country, previous_order_id
|
| 281 |
-
])
|
| 282 |
-
new_order_id = cursor.fetchone()['order_id']
|
| 283 |
-
|
| 284 |
-
# Insert order items into erp_order_items table for the new order
|
| 285 |
-
for item in items:
|
| 286 |
-
# Get product price
|
| 287 |
-
cursor.execute(product_query, [item['product_id']])
|
| 288 |
-
product = cursor.fetchone()
|
| 289 |
-
unit_price = product['price']
|
| 290 |
-
subtotal = unit_price * item['quantity']
|
| 291 |
-
|
| 292 |
-
item_query = """
|
| 293 |
-
INSERT INTO erp_order_items (
|
| 294 |
-
order_id, product_id, quantity, unit_price, subtotal
|
| 295 |
-
) VALUES (
|
| 296 |
-
%s, %s, %s, %s, %s
|
| 297 |
-
);
|
| 298 |
-
"""
|
| 299 |
-
cursor.execute(item_query, [
|
| 300 |
-
new_order_id, item['product_id'], item['quantity'],
|
| 301 |
-
unit_price, subtotal
|
| 302 |
-
])
|
| 303 |
-
|
| 304 |
-
# Update product stock
|
| 305 |
-
update_stock_query = """
|
| 306 |
-
UPDATE erp_products
|
| 307 |
-
SET stock_quantity = stock_quantity - %s
|
| 308 |
-
WHERE product_id = %s;
|
| 309 |
-
"""
|
| 310 |
-
cursor.execute(update_stock_query, [item['quantity'], item['product_id']])
|
| 311 |
-
|
| 312 |
-
# Create order history entry into the erp_order_history for the new order
|
| 313 |
-
history_query = """
|
| 314 |
-
INSERT INTO erp_order_history (
|
| 315 |
-
order_id, timestamp, status_change, notes, updated_by
|
| 316 |
-
) VALUES (
|
| 317 |
-
%s, CURRENT_TIMESTAMP, 'Order Created', 'New order placed', 'System'
|
| 318 |
-
);
|
| 319 |
-
"""
|
| 320 |
-
cursor.execute(history_query, [new_order_id])
|
| 321 |
-
|
| 322 |
-
# If this is a replacement order, add a note to the previous order
|
| 323 |
-
if previous_order_id:
|
| 324 |
-
prev_order_note_query = """
|
| 325 |
-
INSERT INTO erp_order_history (
|
| 326 |
-
order_id, timestamp, status_change, notes, updated_by
|
| 327 |
-
) VALUES (
|
| 328 |
-
%s, CURRENT_TIMESTAMP, 'Replaced', 'Order replaced by order #%s', 'System'
|
| 329 |
-
);
|
| 330 |
-
"""
|
| 331 |
-
cursor.execute(prev_order_note_query, [previous_order_id, new_order_id])
|
| 332 |
-
|
| 333 |
-
# Generate invoice for the new order
|
| 334 |
-
invoice_query = """
|
| 335 |
-
INSERT INTO erp_invoices (
|
| 336 |
-
order_id, invoice_date, amount, payment_terms, due_date, is_paid, invoice_number
|
| 337 |
-
) VALUES (
|
| 338 |
-
%s, CURRENT_DATE, %s, 'Net 30', CURRENT_DATE + INTERVAL '30 days', FALSE, 'INV-' || %s
|
| 339 |
-
) RETURNING invoice_id;
|
| 340 |
-
"""
|
| 341 |
-
cursor.execute(invoice_query, [new_order_id, total_amount, new_order_id])
|
| 342 |
-
invoice_id = cursor.fetchone()['invoice_id']
|
| 343 |
-
|
| 344 |
-
conn.commit()
|
| 345 |
-
|
| 346 |
-
# Get the complete new order details
|
| 347 |
-
cursor.execute("SELECT * FROM erp_orders WHERE order_id = %s;", [new_order_id])
|
| 348 |
-
order = cursor.fetchone()
|
| 349 |
-
|
| 350 |
-
order_dict = dict(order)
|
| 351 |
-
|
| 352 |
-
# Return JSON formatted response
|
| 353 |
-
return str({
|
| 354 |
-
"success": True,
|
| 355 |
-
"order": {
|
| 356 |
-
"order_id": new_order_id,
|
| 357 |
-
"invoice_id": invoice_id,
|
| 358 |
-
"total_amount": float(total_amount),
|
| 359 |
-
"status": order_dict['status'],
|
| 360 |
-
"estimated_delivery": str(order_dict['estimated_delivery']) if order_dict['estimated_delivery'] else None,
|
| 361 |
-
"customer_id": customer_id,
|
| 362 |
-
"shipping_address": shipping_address,
|
| 363 |
-
"shipping_country": shipping_country,
|
| 364 |
-
"destination_country": destination_country,
|
| 365 |
-
"previous_order_id": previous_order_id,
|
| 366 |
-
"items": [{"product_id": pid, "quantity": qty} for pid, qty in zip(product_id_list, quantity_list)]
|
| 367 |
-
}
|
| 368 |
-
})
|
| 369 |
-
|
| 370 |
except Exception as e:
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
return str({
|
| 374 |
-
"success": False,
|
| 375 |
-
"error": str(e)
|
| 376 |
-
})
|
| 377 |
-
finally:
|
| 378 |
-
if 'conn' in locals():
|
| 379 |
-
conn.close()
|
| 380 |
|
| 381 |
-
def
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
Args:
|
| 386 |
-
order_id: ID of the order to cancel
|
| 387 |
-
reason: Reason for cancellation
|
| 388 |
-
|
| 389 |
-
Returns:
|
| 390 |
-
Result of the cancellation operation
|
| 391 |
-
"""
|
| 392 |
-
try:
|
| 393 |
-
conn = get_db_connection()
|
| 394 |
-
cursor = conn.cursor(cursor_factory=RealDictCursor)
|
| 395 |
-
|
| 396 |
-
# Check if order exists and can be cancelled
|
| 397 |
-
check_query = """
|
| 398 |
-
SELECT status, customer_id FROM erp_orders WHERE order_id = %s;
|
| 399 |
-
"""
|
| 400 |
-
cursor.execute(check_query, [order_id])
|
| 401 |
-
order = cursor.fetchone()
|
| 402 |
-
|
| 403 |
-
if not order:
|
| 404 |
-
return str({
|
| 405 |
-
"success": False,
|
| 406 |
-
"error": f"Order with ID {order_id} not found"
|
| 407 |
-
})
|
| 408 |
-
|
| 409 |
-
if order['status'] in ['Delivered', 'Cancelled']:
|
| 410 |
-
return str({
|
| 411 |
-
"success": False,
|
| 412 |
-
"error": f"Cannot cancel order with status '{order['status']}'"
|
| 413 |
-
})
|
| 414 |
-
|
| 415 |
-
# Get order items to restore stock
|
| 416 |
-
items_query = """
|
| 417 |
-
SELECT product_id, quantity FROM erp_order_items WHERE order_id = %s;
|
| 418 |
-
"""
|
| 419 |
-
cursor.execute(items_query, [order_id])
|
| 420 |
-
items = cursor.fetchall()
|
| 421 |
-
|
| 422 |
-
# Update order status to Cancelled
|
| 423 |
-
update_query = """
|
| 424 |
-
UPDATE erp_orders SET status = 'Cancelled', payment_status = 'Cancelled'
|
| 425 |
-
WHERE order_id = %s;
|
| 426 |
-
"""
|
| 427 |
-
cursor.execute(update_query, [order_id])
|
| 428 |
-
|
| 429 |
-
# Add entry to order history
|
| 430 |
-
history_query = """
|
| 431 |
-
INSERT INTO erp_order_history (
|
| 432 |
-
order_id, timestamp, status_change, notes, updated_by
|
| 433 |
-
) VALUES (
|
| 434 |
-
%s, CURRENT_TIMESTAMP, 'Cancelled', %s, 'System'
|
| 435 |
-
);
|
| 436 |
-
"""
|
| 437 |
-
cursor.execute(history_query, [order_id, f"Order cancelled: {reason}"])
|
| 438 |
-
|
| 439 |
-
# Restore product stock quantities
|
| 440 |
-
for item in items:
|
| 441 |
-
restore_stock_query = """
|
| 442 |
-
UPDATE erp_products
|
| 443 |
-
SET stock_quantity = stock_quantity + %s
|
| 444 |
-
WHERE product_id = %s;
|
| 445 |
-
"""
|
| 446 |
-
cursor.execute(restore_stock_query, [item['quantity'], item['product_id']])
|
| 447 |
-
|
| 448 |
-
# Update invoice if exists
|
| 449 |
-
invoice_query = """
|
| 450 |
-
UPDATE erp_invoices
|
| 451 |
-
SET is_paid = FALSE
|
| 452 |
-
WHERE order_id = %s;
|
| 453 |
-
"""
|
| 454 |
-
cursor.execute(invoice_query, [order_id])
|
| 455 |
-
|
| 456 |
-
conn.commit()
|
| 457 |
-
|
| 458 |
-
return str({
|
| 459 |
-
"success": True,
|
| 460 |
-
"message": f"Order #{order_id} has been successfully cancelled",
|
| 461 |
-
"order_id": order_id,
|
| 462 |
-
"reason": reason,
|
| 463 |
-
"items_restored": len(items)
|
| 464 |
-
})
|
| 465 |
-
|
| 466 |
-
except Exception as e:
|
| 467 |
-
if 'conn' in locals():
|
| 468 |
-
conn.rollback()
|
| 469 |
-
return str({
|
| 470 |
-
"success": False,
|
| 471 |
-
"error": str(e)
|
| 472 |
-
})
|
| 473 |
-
finally:
|
| 474 |
-
if 'conn' in locals():
|
| 475 |
-
conn.close()
|
| 476 |
|
| 477 |
-
|
| 478 |
-
|
| 479 |
-
Get invoice details by invoice ID or order ID.
|
| 480 |
-
|
| 481 |
-
Args:
|
| 482 |
-
invoice_id: ID of the invoice (optional)
|
| 483 |
-
order_id: ID of the order (optional)
|
| 484 |
-
|
| 485 |
-
Returns:
|
| 486 |
-
Invoice details including customer and order information
|
| 487 |
-
"""
|
| 488 |
-
if not invoice_id and not order_id:
|
| 489 |
-
return str({
|
| 490 |
-
"success": False,
|
| 491 |
-
"error": "Either invoice_id or order_id must be provided"
|
| 492 |
-
})
|
| 493 |
-
|
| 494 |
-
try:
|
| 495 |
-
conn = get_db_connection()
|
| 496 |
-
cursor = conn.cursor(cursor_factory=RealDictCursor)
|
| 497 |
-
|
| 498 |
-
if invoice_id:
|
| 499 |
-
query = """
|
| 500 |
-
SELECT i.*, o.order_date, o.status as order_status,
|
| 501 |
-
c.name as customer_name, c.email as customer_email, c.address as customer_address
|
| 502 |
-
FROM erp_invoices i
|
| 503 |
-
JOIN erp_orders o ON i.order_id = o.order_id
|
| 504 |
-
JOIN erp_customers c ON o.customer_id = c.customer_id
|
| 505 |
-
WHERE i.invoice_id = %s;
|
| 506 |
-
"""
|
| 507 |
-
cursor.execute(query, [invoice_id])
|
| 508 |
-
else:
|
| 509 |
-
query = """
|
| 510 |
-
SELECT i.*, o.order_date, o.status as order_status,
|
| 511 |
-
c.name as customer_name, c.email as customer_email, c.address as customer_address
|
| 512 |
-
FROM erp_invoices i
|
| 513 |
-
JOIN erp_orders o ON i.order_id = o.order_id
|
| 514 |
-
JOIN erp_customers c ON o.customer_id = c.customer_id
|
| 515 |
-
WHERE i.order_id = %s;
|
| 516 |
-
"""
|
| 517 |
-
cursor.execute(query, [order_id])
|
| 518 |
-
|
| 519 |
-
invoice = cursor.fetchone()
|
| 520 |
-
|
| 521 |
-
if not invoice:
|
| 522 |
-
return str({
|
| 523 |
-
"success": False,
|
| 524 |
-
"error": f"Invoice not found for the provided {'invoice_id' if invoice_id else 'order_id'}"
|
| 525 |
-
})
|
| 526 |
-
|
| 527 |
-
# Get order items
|
| 528 |
-
items_query = """
|
| 529 |
-
SELECT oi.*, p.product_name, p.sku
|
| 530 |
-
FROM erp_order_items oi
|
| 531 |
-
JOIN erp_products p ON oi.product_id = p.product_id
|
| 532 |
-
WHERE oi.order_id = %s;
|
| 533 |
-
"""
|
| 534 |
-
cursor.execute(items_query, [invoice['order_id']])
|
| 535 |
-
items = cursor.fetchall()
|
| 536 |
-
|
| 537 |
-
invoice_dict = dict(invoice)
|
| 538 |
-
items_list = [dict(item) for item in items]
|
| 539 |
-
|
| 540 |
-
# Return JSON formatted response
|
| 541 |
-
return str({
|
| 542 |
-
"success": True,
|
| 543 |
-
"invoice": {
|
| 544 |
-
"invoice_id": invoice_dict['invoice_id'],
|
| 545 |
-
"invoice_number": invoice_dict.get('invoice_number', ''),
|
| 546 |
-
"order_id": invoice_dict['order_id'],
|
| 547 |
-
"order_date": str(invoice_dict['order_date']) if invoice_dict['order_date'] else None,
|
| 548 |
-
"order_status": invoice_dict['order_status'],
|
| 549 |
-
"amount": float(invoice_dict['amount']),
|
| 550 |
-
"due_date": str(invoice_dict['due_date']) if invoice_dict['due_date'] else None,
|
| 551 |
-
"payment_status": "Paid" if invoice_dict['is_paid'] else "Unpaid",
|
| 552 |
-
"customer": {
|
| 553 |
-
"name": invoice_dict['customer_name'],
|
| 554 |
-
"email": invoice_dict['customer_email'],
|
| 555 |
-
"address": invoice_dict['customer_address']
|
| 556 |
-
},
|
| 557 |
-
"items": items_list
|
| 558 |
-
}
|
| 559 |
-
})
|
| 560 |
-
|
| 561 |
-
except Exception as e:
|
| 562 |
-
return str({
|
| 563 |
-
"success": False,
|
| 564 |
-
"error": str(e)
|
| 565 |
-
})
|
| 566 |
-
finally:
|
| 567 |
-
if 'conn' in locals():
|
| 568 |
-
conn.close()
|
| 569 |
-
|
| 570 |
-
# Create Gradio interfaces for each function
|
| 571 |
-
execute_query_interface = gr.Interface(
|
| 572 |
-
fn=execute_query,
|
| 573 |
-
inputs=[
|
| 574 |
-
gr.Textbox(lines=5, label="SQL Query"),
|
| 575 |
-
gr.Textbox(label="Parameters (comma-separated)", placeholder="Optional")
|
| 576 |
-
],
|
| 577 |
-
outputs=gr.Textbox(lines=10),
|
| 578 |
-
title="Execute SQL Query",
|
| 579 |
-
description="Execute a custom SQL query on the ERP database"
|
| 580 |
-
)
|
| 581 |
-
|
| 582 |
-
list_tables_interface = gr.Interface(
|
| 583 |
-
fn=list_erp_tables,
|
| 584 |
-
inputs=[],
|
| 585 |
-
outputs=gr.Textbox(lines=10),
|
| 586 |
-
title="List ERP Tables",
|
| 587 |
-
description="List all ERP tables in the database"
|
| 588 |
-
)
|
| 589 |
|
| 590 |
-
|
| 591 |
-
|
| 592 |
-
|
| 593 |
-
|
| 594 |
-
|
| 595 |
-
|
| 596 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 597 |
|
| 598 |
-
|
| 599 |
-
|
| 600 |
-
|
| 601 |
-
|
| 602 |
-
gr.Textbox(label="Product IDs (comma-separated)", placeholder="1, 2, 3"),
|
| 603 |
-
gr.Textbox(label="Quantities (comma-separated)", placeholder="2, 1, 3"),
|
| 604 |
-
gr.Textbox(label="Shipping Address"),
|
| 605 |
-
gr.Textbox(label="Shipping Country"),
|
| 606 |
-
gr.Textbox(label="Destination Country"),
|
| 607 |
-
gr.Number(label="Previous Order ID (optional)", precision=0)
|
| 608 |
-
],
|
| 609 |
-
outputs=gr.Textbox(lines=10),
|
| 610 |
-
title="Place New Order",
|
| 611 |
-
description="Place a new order in the ERP system"
|
| 612 |
-
)
|
| 613 |
|
| 614 |
-
|
| 615 |
-
|
| 616 |
-
|
| 617 |
-
|
| 618 |
-
|
| 619 |
-
],
|
| 620 |
-
outputs=gr.Textbox(lines=5),
|
| 621 |
-
title="Cancel Order",
|
| 622 |
-
description="Cancel an existing order in the ERP system"
|
| 623 |
-
)
|
| 624 |
|
| 625 |
-
|
| 626 |
-
|
| 627 |
-
|
| 628 |
-
|
| 629 |
-
|
| 630 |
-
],
|
| 631 |
-
outputs=gr.Textbox(lines=15),
|
| 632 |
-
title="Get Invoice Details",
|
| 633 |
-
description="Get invoice details by invoice ID or order ID"
|
| 634 |
-
)
|
| 635 |
|
| 636 |
-
|
| 637 |
-
demo = gr.TabbedInterface(
|
| 638 |
-
[
|
| 639 |
-
execute_query_interface,
|
| 640 |
-
list_tables_interface,
|
| 641 |
-
order_status_interface,
|
| 642 |
-
place_order_interface,
|
| 643 |
-
cancel_order_interface,
|
| 644 |
-
invoice_details_interface
|
| 645 |
-
],
|
| 646 |
-
[
|
| 647 |
-
"Execute Query",
|
| 648 |
-
"List Tables",
|
| 649 |
-
"Order Status",
|
| 650 |
-
"Place Order",
|
| 651 |
-
"Cancel Order",
|
| 652 |
-
"Invoice Details"
|
| 653 |
-
],
|
| 654 |
-
title="ERP System Tools"
|
| 655 |
-
)
|
| 656 |
|
| 657 |
-
|
| 658 |
-
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
import asyncio
|
| 3 |
+
import json
|
| 4 |
+
import time
|
| 5 |
+
import io
|
| 6 |
+
from typing import List
|
| 7 |
+
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
|
| 8 |
+
from core.agents.SkeletonGraphAgent import SkeletonGraphAgent
|
| 9 |
+
from core.config.prompt import get_default_system_prompt
|
| 10 |
+
from core.config.metadata import create_metadata
|
| 11 |
+
|
| 12 |
+
# ----- State -----
|
| 13 |
+
conversation_history = []
|
| 14 |
+
|
| 15 |
+
# ----- Agent Initialization -----
|
| 16 |
+
async def initialize_agent_async(metadata):
|
| 17 |
try:
|
| 18 |
+
new_agent = await SkeletonGraphAgent.create(metadata)
|
| 19 |
+
return new_agent, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
except Exception as e:
|
| 21 |
+
return None, str(e)
|
| 22 |
|
| 23 |
+
def initialize_agent_handler(model_name, temperature, max_tokens, system_prompt, api_key):
|
| 24 |
+
metadata = create_metadata(model_name, temperature, max_tokens, system_prompt, api_key)
|
|
|
|
| 25 |
|
| 26 |
+
# Use the existing event loop if available, otherwise create a new one
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
try:
|
| 28 |
+
loop = asyncio.get_event_loop()
|
| 29 |
+
except RuntimeError:
|
| 30 |
+
loop = asyncio.new_event_loop()
|
| 31 |
+
asyncio.set_event_loop(loop)
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
+
agent, error = loop.run_until_complete(initialize_agent_async(metadata))
|
| 34 |
+
if agent:
|
| 35 |
+
return agent, "✅ Agent initialized successfully!", "🟢 Agent Ready"
|
| 36 |
+
else:
|
| 37 |
+
return None, f"❌ Failed to initialize agent: {error}", "🔴 Agent Failed"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
+
# ----- Chat Handler -----
|
| 40 |
+
async def get_agent_response_async(agent, user_input):
|
| 41 |
+
global conversation_history
|
| 42 |
+
human_msg = HumanMessage(content=user_input)
|
| 43 |
+
conversation_history.append(human_msg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
+
input_message = {"messages": conversation_history.copy()}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
try:
|
| 48 |
+
result = await agent.workflow.ainvoke(input=input_message)
|
| 49 |
+
if result and 'messages' in result:
|
| 50 |
+
for msg in reversed(result['messages']):
|
| 51 |
+
if isinstance(msg, AIMessage):
|
| 52 |
+
conversation_history.append(msg)
|
| 53 |
+
return msg.content, None
|
| 54 |
+
fallback = AIMessage(content="No response generated")
|
| 55 |
+
conversation_history.append(fallback)
|
| 56 |
+
return fallback.content, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
except Exception as e:
|
| 58 |
+
conversation_history.pop()
|
| 59 |
+
return None, str(e)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
+
def chat_with_agent(agent, user_input, chat_history):
|
| 62 |
+
if not agent:
|
| 63 |
+
chat_history.append([user_input, "❌ Please initialize your agent first."])
|
| 64 |
+
return chat_history, ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
|
| 66 |
+
if not user_input.strip():
|
| 67 |
+
return chat_history, ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
+
# Use the existing event loop if available, otherwise create a new one
|
| 70 |
+
try:
|
| 71 |
+
loop = asyncio.get_event_loop()
|
| 72 |
+
except RuntimeError:
|
| 73 |
+
loop = asyncio.new_event_loop()
|
| 74 |
+
asyncio.set_event_loop(loop)
|
| 75 |
+
|
| 76 |
+
response, error = loop.run_until_complete(get_agent_response_async(agent, user_input))
|
| 77 |
+
|
| 78 |
+
if response:
|
| 79 |
+
chat_history.append([user_input, response])
|
| 80 |
+
else:
|
| 81 |
+
chat_history.append([user_input, f"❌ Error: {error}"])
|
| 82 |
+
|
| 83 |
+
return chat_history, ""
|
| 84 |
+
|
| 85 |
+
def reset_conversation():
|
| 86 |
+
global conversation_history
|
| 87 |
+
conversation_history = []
|
| 88 |
+
return [], "🔄 Conversation reset!", "🟢 Agent Ready"
|
| 89 |
+
|
| 90 |
+
# ----- Gradio Interface -----
|
| 91 |
+
def create_interface():
|
| 92 |
+
with gr.Blocks(title="TrackMate AI") as app:
|
| 93 |
+
agent_state = gr.State(value=None)
|
| 94 |
+
# Header
|
| 95 |
+
gr.HTML("""
|
| 96 |
+
<div style="text-align: center; padding: 1rem 0; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; border-radius: 10px; margin-bottom: 2rem;">
|
| 97 |
+
<h1 style="margin: 0; font-size: 2.5rem;">🧠 TrackMate AI</h1>
|
| 98 |
+
<p style="margin: 0; opacity: 0.9;">Your smart companion for tracking and managing orders.</p>
|
| 99 |
+
</div>
|
| 100 |
+
""")
|
| 101 |
+
with gr.Row():
|
| 102 |
+
with gr.Column(scale=1):
|
| 103 |
+
model_name = gr.Dropdown(
|
| 104 |
+
choices=["gpt-4o", "gpt-4", "gpt-3.5-turbo", "claude-3-sonnet", "claude-3-haiku"],
|
| 105 |
+
value="gpt-4o",
|
| 106 |
+
label="Model"
|
| 107 |
+
)
|
| 108 |
+
temperature = gr.Slider(0.0, 1.0, value=0.7, step=0.1, label="Temperature")
|
| 109 |
+
max_tokens = gr.Number(value=1000, minimum=100, maximum=4000, step=100, label="Max Tokens")
|
| 110 |
+
api_key = gr.Textbox(
|
| 111 |
+
value="",
|
| 112 |
+
label="API Key",
|
| 113 |
+
placeholder="Enter your API key",
|
| 114 |
+
type="password",
|
| 115 |
+
interactive=True
|
| 116 |
+
)
|
| 117 |
+
with gr.Accordion("System Prompt", open=False):
|
| 118 |
+
system_prompt = gr.Textbox(
|
| 119 |
+
value=get_default_system_prompt(),
|
| 120 |
+
lines=10,
|
| 121 |
+
label="System Prompt",
|
| 122 |
+
interactive=True
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
init_btn = gr.Button("🚀 Initialize Agent")
|
| 126 |
+
status_display = gr.Textbox(label="Status", interactive=False)
|
| 127 |
+
agent_status = gr.Textbox(label="Agent Status", interactive=False)
|
| 128 |
+
|
| 129 |
+
with gr.Column(scale=3, min_width=700):
|
| 130 |
+
chatbot = gr.Chatbot(label="TrackMate AI Chat", height=500)
|
| 131 |
+
msg_input = gr.Textbox(label="Message")
|
| 132 |
+
send_btn = gr.Button("Send")
|
| 133 |
+
reset_btn = gr.Button("🔄 Reset Chat")
|
| 134 |
+
|
| 135 |
+
init_btn.click(
|
| 136 |
+
fn=initialize_agent_handler,
|
| 137 |
+
inputs=[model_name, temperature, max_tokens, system_prompt, api_key],
|
| 138 |
+
outputs=[agent_state, status_display, agent_status]
|
| 139 |
+
)
|
| 140 |
|
| 141 |
+
reset_btn.click(
|
| 142 |
+
fn=reset_conversation,
|
| 143 |
+
outputs=[chatbot, status_display, agent_status]
|
| 144 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
|
| 146 |
+
send_btn.click(
|
| 147 |
+
fn=chat_with_agent,
|
| 148 |
+
inputs=[agent_state, msg_input, chatbot],
|
| 149 |
+
outputs=[chatbot, msg_input]
|
| 150 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
|
| 152 |
+
msg_input.submit(
|
| 153 |
+
fn=chat_with_agent,
|
| 154 |
+
inputs=[agent_state, msg_input, chatbot],
|
| 155 |
+
outputs=[chatbot, msg_input]
|
| 156 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 157 |
|
| 158 |
+
return app
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 159 |
|
| 160 |
+
if __name__ == "__main__":
|
| 161 |
+
app = create_interface()
|
| 162 |
+
app.launch(server_name="0.0.0.0", server_port=7860)
|
core/__init__.py
ADDED
|
File without changes
|
core/agents/SkeletonGraphAgent.py
ADDED
|
@@ -0,0 +1,814 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Module Name: SkeletonGraphAgent
|
| 3 |
+
Description: This module contains the Langgraph's Agent class that provides a flexible agent framework
|
| 4 |
+
using langgraph where all configuration is dynamically loaded from user input. This includes
|
| 5 |
+
system prompts, rules, input variables, LLM configuration, output structure, and tools.
|
| 6 |
+
Author: Abhishek Singh
|
| 7 |
+
Last Modified: 2025-05-29
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import logging, re, ast, json, os, aiohttp
|
| 11 |
+
from tqdm import tqdm
|
| 12 |
+
from typing import Any, Dict, List, Optional, Union, TypedDict, Annotated, Sequence
|
| 13 |
+
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage, BaseMessage
|
| 14 |
+
from pydantic import BaseModel, Field, create_model
|
| 15 |
+
from langgraph.graph.message import add_messages
|
| 16 |
+
from langgraph.graph import StateGraph, START, END, MessagesState
|
| 17 |
+
from langgraph.prebuilt import ToolNode, tools_condition
|
| 18 |
+
from langchain_community.callbacks import get_openai_callback
|
| 19 |
+
from core.llms.base_llm import get_llm
|
| 20 |
+
from langchain_mcp_adapters.client import MultiServerMCPClient
|
| 21 |
+
from urllib.parse import urlparse
|
| 22 |
+
import asyncio
|
| 23 |
+
|
| 24 |
+
# Configure logging
|
| 25 |
+
logger = logging.getLogger(__name__)
|
| 26 |
+
|
| 27 |
+
class AgentState(TypedDict):
|
| 28 |
+
"""
|
| 29 |
+
Represents the state of the agent.
|
| 30 |
+
"""
|
| 31 |
+
messages: Annotated[Sequence[BaseMessage], add_messages] # List of messages exchanged with the agent
|
| 32 |
+
input_variables: Dict[str, Any] # Input variables provided to the agent
|
| 33 |
+
final_response: Optional[Dict[str, Any]] # Final structured response from the agent
|
| 34 |
+
|
| 35 |
+
class SkeletonGraphAgent:
|
| 36 |
+
"""
|
| 37 |
+
A flexible agent built with langgraph that takes all configuration from user input including:
|
| 38 |
+
- System prompt
|
| 39 |
+
- Rules to be applied in the system prompt
|
| 40 |
+
- Input variables
|
| 41 |
+
- LLM configuration
|
| 42 |
+
- Output structure
|
| 43 |
+
- Tools to be used
|
| 44 |
+
|
| 45 |
+
This agent serves as a foundation for creating custom agents without modifying code.
|
| 46 |
+
It dynamically loads tools, creates structured output formats, and handles various input types.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
metadata (Dict[str, Any]): Configuration dictionary containing:
|
| 50 |
+
- model (Dict): LLM model configuration with name and parameters
|
| 51 |
+
- temperature (float): Temperature for LLM (0-1)
|
| 52 |
+
- tokens (int): Max tokens for LLM response
|
| 53 |
+
- system_prompt (str): Base system prompt for the agent
|
| 54 |
+
- input_variables (List[Dict]): List of input variables with names and default values
|
| 55 |
+
- outputType (List[Dict]): Structure of the expected output
|
| 56 |
+
- rules (List[Dict]): Rules to be applied in the system prompt
|
| 57 |
+
- tools (List[str]): List of tool names to be loaded and used by the agent
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
def __init__(self, metadata: Dict[str, Any]):
|
| 61 |
+
"""
|
| 62 |
+
Initializes the SkeletonGraphAgent with the provided metadata.
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
metadata (Dict[str, Any]): Configuration dictionary for the agent.
|
| 66 |
+
"""
|
| 67 |
+
# Initialize MCP-related attributes
|
| 68 |
+
self.client = None
|
| 69 |
+
self.mcp_tools = []
|
| 70 |
+
|
| 71 |
+
# Extract and set the LLM configuration parameters such as model name, temperature, max tokens, system prompt, etc.
|
| 72 |
+
self._configure_llm_parameters(metadata)
|
| 73 |
+
|
| 74 |
+
# Parse the output structure for structured responses
|
| 75 |
+
self._parse_structured_output(metadata)
|
| 76 |
+
|
| 77 |
+
# Create a pydantic model of the output structure
|
| 78 |
+
self._create_pydantic_model()
|
| 79 |
+
|
| 80 |
+
# Configure the Agents tools
|
| 81 |
+
self._configure_agents_tools(metadata)
|
| 82 |
+
|
| 83 |
+
# Configuring the llm(s), one llm for generation and one for responding in output format
|
| 84 |
+
self._configure_llm()
|
| 85 |
+
|
| 86 |
+
def _configure_llm_parameters(self, metadata: Dict[str, Any]):
|
| 87 |
+
"""
|
| 88 |
+
Configures the LLM parameters from the provided metadata.
|
| 89 |
+
|
| 90 |
+
Args:
|
| 91 |
+
metadata (Dict[str, Any]): Configuration dictionary containing LLM parameters.
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
str: The name of the configured LLM.
|
| 95 |
+
"""
|
| 96 |
+
# LLM Configuration
|
| 97 |
+
self.model_name = metadata.get("model", {}).get("input", "gpt-4o")
|
| 98 |
+
self.temperature = metadata.get("temperature", 0)
|
| 99 |
+
self.max_tokens = metadata.get("tokens", 1000)
|
| 100 |
+
self.system_prompt = metadata.get("system_prompt", "You are a helpful AI assistant.")
|
| 101 |
+
self.rules = self._parse_literal(metadata.get("rules", "[]"), [])
|
| 102 |
+
self.input_variables = metadata.get("input_variables", [{"name": "input", "input": ""}])
|
| 103 |
+
self.api_key = metadata.get("api_key", None)
|
| 104 |
+
|
| 105 |
+
# If rules are provided, append them to the system prompt
|
| 106 |
+
if self.rules:
|
| 107 |
+
for rule in self.rules:
|
| 108 |
+
self.system_prompt += f"\n{rule['rulename']}: {rule['ruleDescription']}"
|
| 109 |
+
|
| 110 |
+
def _parse_structured_output(self, metadata: Dict[str, Any]):
|
| 111 |
+
"""
|
| 112 |
+
Parse the outputType metadata into a dictionary of field definitions.
|
| 113 |
+
This defines the structure of the agent's output.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
metadata (Dict[str, Any]): The metadata containing output structure
|
| 117 |
+
default (Any): The default value to return if parsing fails
|
| 118 |
+
|
| 119 |
+
Returns:
|
| 120 |
+
Dict[str, Any]: Dictionary of output fields with their types and descriptions
|
| 121 |
+
"""
|
| 122 |
+
try:
|
| 123 |
+
# Parse the outputType from metadata
|
| 124 |
+
self.output_type = self._parse_literal(metadata.get("outputType", "[]"), [])
|
| 125 |
+
|
| 126 |
+
# Initialize output_fields as an empty dictionary
|
| 127 |
+
self.output_fields = {}
|
| 128 |
+
|
| 129 |
+
# Populate output_fields with the parsed outputType
|
| 130 |
+
for field in self.output_type:
|
| 131 |
+
self.output_fields[field["outputKey"]] = {
|
| 132 |
+
"type": field["outputKeyType"],
|
| 133 |
+
"description": field["outputDescription"]
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
except (ValueError, TypeError) as e:
|
| 137 |
+
logger.warning(f"Failed to parse output structure: {str(e)}")
|
| 138 |
+
|
| 139 |
+
def _create_pydantic_model(self):
|
| 140 |
+
"""
|
| 141 |
+
Dynamically create a Pydantic class based on user-provided fields.
|
| 142 |
+
This model defines the structure of the agent's output.
|
| 143 |
+
"""
|
| 144 |
+
# Check if output_fields is empty
|
| 145 |
+
if not self.output_fields:
|
| 146 |
+
logger.warning("No output fields defined. Using default model with a single 'output' field.")
|
| 147 |
+
|
| 148 |
+
try:
|
| 149 |
+
self.pydantic_model = None
|
| 150 |
+
|
| 151 |
+
if self.output_fields:
|
| 152 |
+
field_definitions = {
|
| 153 |
+
field_name: (field_info['type'], Field(
|
| 154 |
+
description=field_info['description']))
|
| 155 |
+
for field_name, field_info in self.output_fields.items()
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
self.pydantic_model = create_model(
|
| 159 |
+
'OutputFormat',
|
| 160 |
+
__doc__="Dynamically generated Pydantic model for agent output.",
|
| 161 |
+
**field_definitions
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
logger.debug(f"Created Pydantic model with fields: {list(self.output_fields.keys())}")
|
| 165 |
+
|
| 166 |
+
except Exception as e:
|
| 167 |
+
logger.error(f"Failed to create Pydantic model: {str(e)}")
|
| 168 |
+
|
| 169 |
+
def _configure_agents_tools(self, metadata: Dict[str, Any]):
|
| 170 |
+
"""
|
| 171 |
+
Configures the agent's tools and output structure based on the provided metadata.
|
| 172 |
+
|
| 173 |
+
Args:
|
| 174 |
+
metadata (Dict[str, Any]): Configuration dictionary containing tools and output structure.
|
| 175 |
+
"""
|
| 176 |
+
# Get tools from metadata
|
| 177 |
+
tools_config = self._parse_literal(metadata.get("tools", "[]"), [])
|
| 178 |
+
|
| 179 |
+
# Initialize tools list
|
| 180 |
+
self.tools = []
|
| 181 |
+
|
| 182 |
+
# Handle both tool names and tool instances
|
| 183 |
+
for tool in tools_config:
|
| 184 |
+
if not tool:
|
| 185 |
+
continue
|
| 186 |
+
|
| 187 |
+
try:
|
| 188 |
+
if callable(tool):
|
| 189 |
+
# If tool is a function/callable (e.g. @tool decorated function), use it directly
|
| 190 |
+
self.tools.append(tool)
|
| 191 |
+
logger.info(f"Successfully loaded tool function: {tool.__name__}")
|
| 192 |
+
elif isinstance(tool, str):
|
| 193 |
+
# If tool is a string, try to import from core.tools
|
| 194 |
+
module_path = f"core.tools.{tool}"
|
| 195 |
+
module = __import__(module_path, fromlist=[tool])
|
| 196 |
+
tool_class = getattr(module, tool)
|
| 197 |
+
# Check if it's already a tool instance
|
| 198 |
+
if callable(tool_class):
|
| 199 |
+
tool_instance = tool_class
|
| 200 |
+
else:
|
| 201 |
+
tool_instance = tool_class()
|
| 202 |
+
self.tools.append(tool_instance)
|
| 203 |
+
logger.info(f"Successfully loaded tool: {tool}")
|
| 204 |
+
else:
|
| 205 |
+
# If it's already a tool instance
|
| 206 |
+
self.tools.append(tool)
|
| 207 |
+
logger.info(f"Successfully loaded tool instance: {tool.__class__.__name__}")
|
| 208 |
+
except Exception as e:
|
| 209 |
+
logger.error(f"Failed to load tool {tool}: {str(e)}")
|
| 210 |
+
|
| 211 |
+
def _load_mcp_config(self, config_path: str = "core/config/mcp_config.json") -> Dict[str, Any]:
|
| 212 |
+
"""
|
| 213 |
+
Load MCP configuration from a JSON file.
|
| 214 |
+
|
| 215 |
+
Args:
|
| 216 |
+
config_path (str): Path to the MCP configuration file
|
| 217 |
+
|
| 218 |
+
Returns:
|
| 219 |
+
Dict[str, Any]: MCP configuration dictionary
|
| 220 |
+
"""
|
| 221 |
+
try:
|
| 222 |
+
if os.path.exists(config_path):
|
| 223 |
+
with open(config_path, 'r') as f:
|
| 224 |
+
config = json.load(f)
|
| 225 |
+
logger.info(f"Loaded MCP configuration from {config_path}")
|
| 226 |
+
return config
|
| 227 |
+
else:
|
| 228 |
+
logger.warning(f"MCP config file not found at {config_path}. Using empty config.")
|
| 229 |
+
return {}
|
| 230 |
+
except Exception as e:
|
| 231 |
+
logger.error(f"Failed to load MCP config from {config_path}: {str(e)}")
|
| 232 |
+
return {}
|
| 233 |
+
|
| 234 |
+
async def _check_mcp_server_health(self, url: str, timeout: int = 5) -> bool:
|
| 235 |
+
"""
|
| 236 |
+
Check if an MCP server is up and running.
|
| 237 |
+
|
| 238 |
+
Args:
|
| 239 |
+
url (str): The URL of the MCP server
|
| 240 |
+
timeout (int): Timeout in seconds for the health check
|
| 241 |
+
|
| 242 |
+
Returns:
|
| 243 |
+
bool: True if server is up, False otherwise
|
| 244 |
+
"""
|
| 245 |
+
try:
|
| 246 |
+
parsed_url = urlparse(url)
|
| 247 |
+
host = parsed_url.hostname
|
| 248 |
+
port = parsed_url.port
|
| 249 |
+
|
| 250 |
+
if not host or not port:
|
| 251 |
+
logger.warning(f"Invalid URL format: {url}")
|
| 252 |
+
return False
|
| 253 |
+
|
| 254 |
+
# Try to connect to the server
|
| 255 |
+
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=timeout)) as session:
|
| 256 |
+
try:
|
| 257 |
+
async with session.get(f"http://{host}:{port}") as response:
|
| 258 |
+
# If we get any response, consider the server up
|
| 259 |
+
logger.debug(f"MCP server at {url} is responding (status: {response.status})")
|
| 260 |
+
return True
|
| 261 |
+
except aiohttp.ClientError:
|
| 262 |
+
# Try a simple TCP connection if HTTP fails
|
| 263 |
+
try:
|
| 264 |
+
reader, writer = await asyncio.wait_for(
|
| 265 |
+
asyncio.open_connection(host, port),
|
| 266 |
+
timeout=timeout
|
| 267 |
+
)
|
| 268 |
+
writer.close()
|
| 269 |
+
await writer.wait_closed()
|
| 270 |
+
logger.debug(f"MCP server at {url} is reachable via TCP")
|
| 271 |
+
return True
|
| 272 |
+
except Exception:
|
| 273 |
+
logger.debug(f"MCP server at {url} is not reachable")
|
| 274 |
+
return False
|
| 275 |
+
except Exception as e:
|
| 276 |
+
logger.debug(f"Health check failed for {url}: {str(e)}")
|
| 277 |
+
return False
|
| 278 |
+
|
| 279 |
+
async def _configure_mcp_client(self, metadata: Dict[str, Any]):
|
| 280 |
+
"""
|
| 281 |
+
Configures the MCP client for the agent based on the provided metadata.
|
| 282 |
+
Handles both stdio and HTTP-based transports (sse, streamable_http).
|
| 283 |
+
"""
|
| 284 |
+
try:
|
| 285 |
+
# Load MCP configuration from file
|
| 286 |
+
mcp_config = self._load_mcp_config()
|
| 287 |
+
|
| 288 |
+
if not mcp_config:
|
| 289 |
+
logger.info("No MCP configuration found. Skipping MCP client setup.")
|
| 290 |
+
return
|
| 291 |
+
|
| 292 |
+
# Create the MCP client with available servers
|
| 293 |
+
self.client = MultiServerMCPClient(mcp_config)
|
| 294 |
+
|
| 295 |
+
# Start the client connection
|
| 296 |
+
await self.client.__aenter__()
|
| 297 |
+
|
| 298 |
+
# Get the tools from the client
|
| 299 |
+
self.mcp_tools = self.client.get_tools()
|
| 300 |
+
self.tools.extend(self.mcp_tools)
|
| 301 |
+
|
| 302 |
+
logger.info(f"MCP client configured successfully with {len(mcp_config)} servers and {len(self.mcp_tools)} tools.")
|
| 303 |
+
except Exception as e:
|
| 304 |
+
logger.error(f"Failed to configure MCP client: {str(e)}")
|
| 305 |
+
self.client = None
|
| 306 |
+
self.mcp_tools = []
|
| 307 |
+
|
| 308 |
+
@classmethod
|
| 309 |
+
async def create(cls, metadata: Dict[str, Any]):
|
| 310 |
+
"""
|
| 311 |
+
Async factory method to create and configure a SkeletonGraphAgent instance.
|
| 312 |
+
|
| 313 |
+
Args:
|
| 314 |
+
metadata (Dict[str, Any]): Configuration dictionary.
|
| 315 |
+
|
| 316 |
+
Returns:
|
| 317 |
+
SkeletonGraphAgent: Configured instance.
|
| 318 |
+
"""
|
| 319 |
+
self = cls(metadata) # Call __init__ with metadata
|
| 320 |
+
|
| 321 |
+
await self._configure_mcp_client(metadata) # Configure async parts
|
| 322 |
+
if self.mcp_tools:
|
| 323 |
+
self.main_llm = self.main_llm.bind_tools(self.tools) # Bind MCP tools to the main LLM
|
| 324 |
+
|
| 325 |
+
# Building the state graph for the agent, we build the state graph after the MCP client is configured
|
| 326 |
+
self._build_state_graph()
|
| 327 |
+
|
| 328 |
+
return self
|
| 329 |
+
|
| 330 |
+
def _configure_llm(self):
|
| 331 |
+
"""
|
| 332 |
+
Configures the LLM for the agent based on the provided metadata.
|
| 333 |
+
|
| 334 |
+
Args:
|
| 335 |
+
metadata (Dict[str, Any]): Configuration dictionary containing LLM parameters.
|
| 336 |
+
"""
|
| 337 |
+
try:
|
| 338 |
+
# Initialize the LLM with the specified model and parameters
|
| 339 |
+
self.main_llm = get_llm(
|
| 340 |
+
model_name=self.model_name,
|
| 341 |
+
provider="openai", # Default provider, can be changed if needed
|
| 342 |
+
api_key=self.api_key,
|
| 343 |
+
temperature=self.temperature,
|
| 344 |
+
max_tokens=self.max_tokens
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
# If tools are configured, bind them to the main LLM
|
| 348 |
+
if self.tools:
|
| 349 |
+
self.main_llm = self.main_llm.bind_tools(self.tools)
|
| 350 |
+
|
| 351 |
+
logger.info(f"LLM configured with model: {self.model_name}, temperature: {self.temperature}, max tokens: {self.max_tokens}")
|
| 352 |
+
|
| 353 |
+
# If a structured output is required, configure the LLM for structured output
|
| 354 |
+
if self.pydantic_model:
|
| 355 |
+
# If a second LLM is needed for structured output, configure it similarly
|
| 356 |
+
self.llm_for_response = get_llm(
|
| 357 |
+
model_name=self.model_name,
|
| 358 |
+
temperature=self.temperature,
|
| 359 |
+
max_tokens=self.max_tokens
|
| 360 |
+
)
|
| 361 |
+
|
| 362 |
+
self.llm_with_structured_output = self.llm_for_response.with_structured_output(self.pydantic_model)
|
| 363 |
+
|
| 364 |
+
except Exception as e:
|
| 365 |
+
logger.error(f"Failed to configure LLM: {str(e)}")
|
| 366 |
+
|
| 367 |
+
def _parse_literal(self, value: str, default: Any) -> Any:
|
| 368 |
+
"""
|
| 369 |
+
Parse a string value into a Python object.
|
| 370 |
+
Handles various string formats including lists, dictionaries, and type references.
|
| 371 |
+
|
| 372 |
+
Args:
|
| 373 |
+
value (str): The string value to parse
|
| 374 |
+
default (Any): The default value to return if parsing fails
|
| 375 |
+
|
| 376 |
+
Returns:
|
| 377 |
+
Any: The parsed Python object or the default value
|
| 378 |
+
"""
|
| 379 |
+
try:
|
| 380 |
+
# Handle type references in the string
|
| 381 |
+
cleaned_value = re.sub(r"<class '(\w+)'>", r"\1", str(value))
|
| 382 |
+
# Handle type references without quotes
|
| 383 |
+
cleaned_value = re.sub(r'"type":\s*(\w+)', lambda m: f'"type": "{m.group(1)}"', cleaned_value)
|
| 384 |
+
return ast.literal_eval(cleaned_value)
|
| 385 |
+
except (ValueError, SyntaxError) as e:
|
| 386 |
+
logger.debug(f"Failed to parse literal value: {value}. Error: {str(e)}")
|
| 387 |
+
# Handle comma-separated values
|
| 388 |
+
if isinstance(value, str):
|
| 389 |
+
if ',' in value:
|
| 390 |
+
return [item.strip() for item in value.split(',')]
|
| 391 |
+
elif ' ' in value:
|
| 392 |
+
return value.split()
|
| 393 |
+
return default
|
| 394 |
+
|
| 395 |
+
def _build_state_graph(self):
|
| 396 |
+
"""
|
| 397 |
+
Builds the state graph for the agent using langgraph.
|
| 398 |
+
This defines the flow of messages and tool usage in the agent's operation.
|
| 399 |
+
"""
|
| 400 |
+
try:
|
| 401 |
+
# Initialize the state graph
|
| 402 |
+
self.graph = StateGraph(AgentState)
|
| 403 |
+
|
| 404 |
+
# Define the main agent node that processes input and generates a response
|
| 405 |
+
self.graph.add_node("agent_node", self._agent_node)
|
| 406 |
+
|
| 407 |
+
# Set entry point of the graph
|
| 408 |
+
self.graph.set_entry_point("agent_node")
|
| 409 |
+
|
| 410 |
+
# If output is required in a structured format, add a respond node
|
| 411 |
+
if self.pydantic_model:
|
| 412 |
+
# Add a node for responding in structured format
|
| 413 |
+
self.graph.add_node("respond", self._respond)
|
| 414 |
+
|
| 415 |
+
# Connect the respond node to the END
|
| 416 |
+
self.graph.add_edge("respond", END)
|
| 417 |
+
|
| 418 |
+
# Add a node if tools are configured
|
| 419 |
+
if self.tools:
|
| 420 |
+
# Add a node for tools
|
| 421 |
+
self.graph.add_node("tools", ToolNode(self.tools))
|
| 422 |
+
|
| 423 |
+
# Connect the agent node to the tools node
|
| 424 |
+
self.graph.add_edge("tools", "agent_node")
|
| 425 |
+
|
| 426 |
+
# Adding the should_continue node to determine if the agent should continue processing
|
| 427 |
+
if self.pydantic_model and (self.tools or self.mcp_tools):
|
| 428 |
+
self.graph.add_conditional_edges(
|
| 429 |
+
"agent_node",
|
| 430 |
+
self._should_continue,
|
| 431 |
+
{
|
| 432 |
+
"continue": "tools", # Continue processing
|
| 433 |
+
"respond": "respond", # Respond in structured format
|
| 434 |
+
# "end": END # End the conversation
|
| 435 |
+
}
|
| 436 |
+
)
|
| 437 |
+
elif self.pydantic_model and not self.tools:
|
| 438 |
+
# If structured output is required, go to respond node
|
| 439 |
+
self.graph.add_edge(
|
| 440 |
+
"agent_node",
|
| 441 |
+
"respond"
|
| 442 |
+
)
|
| 443 |
+
elif not self.pydantic_model and (self.tools or self.mcp_tools):
|
| 444 |
+
self.graph.add_conditional_edges(
|
| 445 |
+
"agent_node",
|
| 446 |
+
self._should_continue,
|
| 447 |
+
{
|
| 448 |
+
"continue": "tools", # Continue processing
|
| 449 |
+
"end": END # End the conversation
|
| 450 |
+
}
|
| 451 |
+
)
|
| 452 |
+
else:
|
| 453 |
+
# If no structured output or tools, end the conversation
|
| 454 |
+
self.graph.add_edge("agent_node", END)
|
| 455 |
+
|
| 456 |
+
self.workflow = self.graph.compile()
|
| 457 |
+
|
| 458 |
+
logger.info("State graph built successfully with tools and initial system message.")
|
| 459 |
+
|
| 460 |
+
except Exception as e:
|
| 461 |
+
logger.error(f"Failed to build state graph: {str(e)}")
|
| 462 |
+
|
| 463 |
+
def _agent_node(self, state: AgentState) -> AgentState:
|
| 464 |
+
"""
|
| 465 |
+
The main agent node that processes the input and generates a response.
|
| 466 |
+
"""
|
| 467 |
+
# Get the current messages from the state
|
| 468 |
+
messages = state.get('messages', [])
|
| 469 |
+
|
| 470 |
+
# Add system message only if it's the first message
|
| 471 |
+
if not messages or not any(isinstance(msg, SystemMessage) for msg in messages):
|
| 472 |
+
messages = [SystemMessage(content=self.system_prompt)] + messages
|
| 473 |
+
|
| 474 |
+
# Add input variables to the messages
|
| 475 |
+
input_variables = state.get("input_variables", {})
|
| 476 |
+
if input_variables:
|
| 477 |
+
input_message = HumanMessage(content=json.dumps(input_variables))
|
| 478 |
+
messages.append(input_message)
|
| 479 |
+
else:
|
| 480 |
+
input_message = HumanMessage(content="No input variables provided.")
|
| 481 |
+
messages.append(input_message)
|
| 482 |
+
|
| 483 |
+
response = self.main_llm.invoke(messages)
|
| 484 |
+
|
| 485 |
+
# Return complete state
|
| 486 |
+
return {
|
| 487 |
+
"messages": messages + [response],
|
| 488 |
+
"input_variables": state.get("input_variables", {}), # Preserve input variables
|
| 489 |
+
"final_response": state.get("final_response") # Preserve any existing final response
|
| 490 |
+
}
|
| 491 |
+
|
| 492 |
+
def _respond(self, state: AgentState) -> AgentState:
|
| 493 |
+
"""
|
| 494 |
+
The Respond node, will be called if response is required in a Structured Format.
|
| 495 |
+
|
| 496 |
+
Args:
|
| 497 |
+
state (AgentState): The current state of the agent.
|
| 498 |
+
|
| 499 |
+
Returns:
|
| 500 |
+
AgentState: The updated state after processing the input.
|
| 501 |
+
"""
|
| 502 |
+
# Get the current messages from the state
|
| 503 |
+
messages = state.get("messages", [])
|
| 504 |
+
|
| 505 |
+
response = self.llm_with_structured_output.invoke(messages)
|
| 506 |
+
|
| 507 |
+
# Create an AIMessage with the structured response
|
| 508 |
+
ai_message = AIMessage(content=str(response))
|
| 509 |
+
|
| 510 |
+
# Preserve existing messages and append new message
|
| 511 |
+
return {
|
| 512 |
+
"final_response": response,
|
| 513 |
+
"messages": state.get("messages", []) + [ai_message],
|
| 514 |
+
"input_variables": state.get("input_variables", {}) # Preserve input variables
|
| 515 |
+
}
|
| 516 |
+
|
| 517 |
+
def _should_continue(self, state: AgentState) -> str:
|
| 518 |
+
"""
|
| 519 |
+
Determines whether the agent should continue processing based on the state.
|
| 520 |
+
"""
|
| 521 |
+
if not state.get("messages"):
|
| 522 |
+
return "end"
|
| 523 |
+
|
| 524 |
+
last_message = state["messages"][-1]
|
| 525 |
+
|
| 526 |
+
# Check if the last message is a ToolMessage or AIMessage
|
| 527 |
+
if not last_message.tool_calls:
|
| 528 |
+
if self.pydantic_model:
|
| 529 |
+
return "respond"
|
| 530 |
+
return "end"
|
| 531 |
+
return "continue"
|
| 532 |
+
|
| 533 |
+
|
| 534 |
+
def _show_graph(self):
|
| 535 |
+
"""
|
| 536 |
+
Displays the state graph of the agent.
|
| 537 |
+
This is useful for debugging and understanding the flow of the agent.
|
| 538 |
+
"""
|
| 539 |
+
from IPython.display import Image, display
|
| 540 |
+
display(Image(self.workflow.get_graph().draw_mermaid_png()))
|
| 541 |
+
|
| 542 |
+
async def _execute(self, input: str, metadata: Dict[str, Any] = {None}) -> Dict[str, Any]:
|
| 543 |
+
"""
|
| 544 |
+
Execute the agent with the provided inputs.
|
| 545 |
+
|
| 546 |
+
Args:
|
| 547 |
+
input (str): The primary input text
|
| 548 |
+
metadata (Dict[str, Any]): Additional metadata including:
|
| 549 |
+
- chat_history: Optional chat history for context
|
| 550 |
+
- input_variables: Values for the input variables defined during initialization
|
| 551 |
+
|
| 552 |
+
Returns:
|
| 553 |
+
Dict[str, Any]: Dictionary containing the results and execution metadata
|
| 554 |
+
"""
|
| 555 |
+
|
| 556 |
+
# Get the chat history from metadata, if provided
|
| 557 |
+
chat_history = metadata.get("chat_history", [])
|
| 558 |
+
|
| 559 |
+
# Convert history to BaseMessage objects if they're not already
|
| 560 |
+
processed_history = self._parse_history(chat_history)
|
| 561 |
+
|
| 562 |
+
# Add the main input to the messages
|
| 563 |
+
messages = processed_history + [HumanMessage(content=input)]
|
| 564 |
+
|
| 565 |
+
# Parse input variables from metadata
|
| 566 |
+
input_variables = self._parse_input_variables(metadata.get("input_variables", []))
|
| 567 |
+
|
| 568 |
+
try:
|
| 569 |
+
# Check if we're processing batch data
|
| 570 |
+
if "data" in input_variables:
|
| 571 |
+
result = await self._process_batch_data(input_variables, messages)
|
| 572 |
+
else:
|
| 573 |
+
result = await self._process_single_input(input_variables, messages)
|
| 574 |
+
|
| 575 |
+
except Exception as e:
|
| 576 |
+
logger.error(f"Error processing input: {str(e)}")
|
| 577 |
+
result = {
|
| 578 |
+
"success": False,
|
| 579 |
+
"message": f"Error processing input: {str(e)}",
|
| 580 |
+
"raw_response": None,
|
| 581 |
+
}
|
| 582 |
+
|
| 583 |
+
return result
|
| 584 |
+
|
| 585 |
+
def _parse_history(self, chat_history: List):
|
| 586 |
+
"""
|
| 587 |
+
Parses the chat history to convert it into a list of BaseMessage objects.
|
| 588 |
+
|
| 589 |
+
Args:
|
| 590 |
+
chat_history (List): The chat history to parse
|
| 591 |
+
|
| 592 |
+
Returns:
|
| 593 |
+
List[BaseMessage]: List of BaseMessage objects representing the chat history
|
| 594 |
+
"""
|
| 595 |
+
parsed_history = []
|
| 596 |
+
for msg in chat_history:
|
| 597 |
+
if isinstance(msg, BaseMessage):
|
| 598 |
+
parsed_history.append(msg)
|
| 599 |
+
elif isinstance(msg, tuple) and len(msg) == 2:
|
| 600 |
+
role, content = msg
|
| 601 |
+
if role.lower() == "user" or role.lower() == "human":
|
| 602 |
+
parsed_history.append(HumanMessage(content=content))
|
| 603 |
+
elif role.lower() == "assistant" or role.lower() == "ai":
|
| 604 |
+
parsed_history.append(AIMessage(content=content))
|
| 605 |
+
elif role.lower() == "system":
|
| 606 |
+
parsed_history.append(SystemMessage(content=content))
|
| 607 |
+
else:
|
| 608 |
+
parsed_history.append(HumanMessage(content=f"{role}: {content}"))
|
| 609 |
+
else:
|
| 610 |
+
# Default to human message
|
| 611 |
+
parsed_history.append(HumanMessage(content=str(msg)))
|
| 612 |
+
|
| 613 |
+
return parsed_history
|
| 614 |
+
|
| 615 |
+
def _parse_input_variables(self, input_variables: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 616 |
+
"""
|
| 617 |
+
Parses the input variables from the provided list into a dictionary.
|
| 618 |
+
|
| 619 |
+
Args:
|
| 620 |
+
input_variables (List[Dict[str, Any]]): List of input variable definitions
|
| 621 |
+
|
| 622 |
+
Returns:
|
| 623 |
+
Dict[str, Any]: Dictionary of input variables with their names and values
|
| 624 |
+
"""
|
| 625 |
+
parsed_variables = {}
|
| 626 |
+
for var in input_variables:
|
| 627 |
+
if isinstance(var, dict) and "name" in var:
|
| 628 |
+
name = var["name"]
|
| 629 |
+
value = var.get("input", "")
|
| 630 |
+
parsed_variables[name] = value
|
| 631 |
+
else:
|
| 632 |
+
logger.warning(f"Invalid input variable format: {var}")
|
| 633 |
+
return parsed_variables
|
| 634 |
+
|
| 635 |
+
def _process_structured_output(self, output: Dict[str, Any]) -> Union[Dict[str, Any], str]:
|
| 636 |
+
"""
|
| 637 |
+
Process the structured output from the agent.
|
| 638 |
+
|
| 639 |
+
Args:
|
| 640 |
+
output (Dict[str, Any]): The structured output from the agent
|
| 641 |
+
|
| 642 |
+
Returns:
|
| 643 |
+
Union[Dict[str, Any], str]: Processed structured output
|
| 644 |
+
"""
|
| 645 |
+
try:
|
| 646 |
+
# If a Pydantic model is defined, validate and return the structured output
|
| 647 |
+
if self.pydantic_model:
|
| 648 |
+
return {key: getattr(output['final_response'], key) for key in self.output_fields.keys()}
|
| 649 |
+
else:
|
| 650 |
+
# If no structured output is defined, return the raw output
|
| 651 |
+
return output
|
| 652 |
+
except Exception as e:
|
| 653 |
+
logger.error(f"Error processing structured output: {str(e)}")
|
| 654 |
+
return str(output)
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
async def _process_batch_data(self, execution_inputs: Dict[str, Any], messages) -> Dict[str, Any]:
|
| 658 |
+
"""
|
| 659 |
+
Process a batch of data items.
|
| 660 |
+
|
| 661 |
+
Args:
|
| 662 |
+
execution_inputs (Dict[str, Any]): The execution inputs including data array
|
| 663 |
+
|
| 664 |
+
Returns:
|
| 665 |
+
Dict[str, Any]: Results of batch processing
|
| 666 |
+
"""
|
| 667 |
+
with get_openai_callback() as cb:
|
| 668 |
+
response = []
|
| 669 |
+
|
| 670 |
+
try:
|
| 671 |
+
# Create a copy of inputs without the data field
|
| 672 |
+
data_inputs = execution_inputs.copy()
|
| 673 |
+
data = data_inputs.pop("data")
|
| 674 |
+
|
| 675 |
+
# Parse data if it's a string
|
| 676 |
+
if isinstance(data, str):
|
| 677 |
+
data = self._parse_literal(data, [])
|
| 678 |
+
|
| 679 |
+
total_docs = len(data)
|
| 680 |
+
logger.info(f"Processing batch of {total_docs} documents")
|
| 681 |
+
|
| 682 |
+
# Process each data item with a progress bar
|
| 683 |
+
with tqdm(total=total_docs, desc="Processing documents") as pbar:
|
| 684 |
+
for doc in data:
|
| 685 |
+
# Add the current data item to the inputs
|
| 686 |
+
data_inputs["data"] = doc
|
| 687 |
+
|
| 688 |
+
# Initialize the initial state with messages and input variables
|
| 689 |
+
initial_state = {
|
| 690 |
+
"messages": messages,
|
| 691 |
+
"input_variables": data_inputs,
|
| 692 |
+
"final_response": None
|
| 693 |
+
}
|
| 694 |
+
|
| 695 |
+
# Invoke the agent
|
| 696 |
+
result = await self.workflow.ainvoke(initial_state)
|
| 697 |
+
|
| 698 |
+
# Process the structured output if a Pydantic model is defined
|
| 699 |
+
if self.pydantic_model:
|
| 700 |
+
output_response = self._process_structured_output(result)
|
| 701 |
+
else:
|
| 702 |
+
# If no structured output is defined, use the raw result
|
| 703 |
+
output_response = result
|
| 704 |
+
|
| 705 |
+
response.append(output_response)
|
| 706 |
+
pbar.update(1)
|
| 707 |
+
|
| 708 |
+
# Create the final result with metadata
|
| 709 |
+
result = {
|
| 710 |
+
"success": True,
|
| 711 |
+
"message": json.dumps(response),
|
| 712 |
+
"metainfo": self._get_callback_metadata(cb)
|
| 713 |
+
}
|
| 714 |
+
|
| 715 |
+
except Exception as e:
|
| 716 |
+
logger.error(f"Error processing batch data: {str(e)}")
|
| 717 |
+
result = {
|
| 718 |
+
"success": False,
|
| 719 |
+
"message": f"Error processing batch data: {str(e)}",
|
| 720 |
+
"metainfo": self._get_callback_metadata(cb)
|
| 721 |
+
}
|
| 722 |
+
|
| 723 |
+
return result
|
| 724 |
+
|
| 725 |
+
async def _process_single_input(self, execution_inputs: Dict[str, Any], messages) -> Dict[str, Any]:
|
| 726 |
+
"""
|
| 727 |
+
Process a single input.
|
| 728 |
+
|
| 729 |
+
Args:
|
| 730 |
+
execution_inputs (Dict[str, Any]): The execution inputs
|
| 731 |
+
|
| 732 |
+
Returns:
|
| 733 |
+
Dict[str, Any]: Result of processing
|
| 734 |
+
"""
|
| 735 |
+
with get_openai_callback() as cb:
|
| 736 |
+
try:
|
| 737 |
+
# Initialize the initial state with messages and input variables
|
| 738 |
+
initial_state = {
|
| 739 |
+
"messages": messages,
|
| 740 |
+
"input_variables": execution_inputs,
|
| 741 |
+
"final_response": None
|
| 742 |
+
}
|
| 743 |
+
|
| 744 |
+
# Invoke the agent
|
| 745 |
+
response = await self.workflow.ainvoke(initial_state)
|
| 746 |
+
|
| 747 |
+
# Process the result based on whether fields were provided
|
| 748 |
+
dict_data = self._process_structured_output(response)
|
| 749 |
+
|
| 750 |
+
# Create the final result with metadata
|
| 751 |
+
result = {
|
| 752 |
+
"success": True,
|
| 753 |
+
"message": str(dict_data),
|
| 754 |
+
"response": response,
|
| 755 |
+
"metainfo": self._get_callback_metadata(cb)
|
| 756 |
+
}
|
| 757 |
+
|
| 758 |
+
except Exception as e:
|
| 759 |
+
logger.error(f"Error processing input: {str(e)}")
|
| 760 |
+
result = {
|
| 761 |
+
"success": False,
|
| 762 |
+
"message": f"Error processing input: {str(e)}",
|
| 763 |
+
"metainfo": self._get_callback_metadata(cb)
|
| 764 |
+
}
|
| 765 |
+
|
| 766 |
+
return result
|
| 767 |
+
|
| 768 |
+
def _get_callback_metadata(self, cb) -> Dict[str, Any]:
|
| 769 |
+
"""
|
| 770 |
+
Get metadata from the OpenAI callback.
|
| 771 |
+
|
| 772 |
+
Args:
|
| 773 |
+
cb: The OpenAI callback object
|
| 774 |
+
|
| 775 |
+
Returns:
|
| 776 |
+
Dict[str, Any]: Metadata about the API call
|
| 777 |
+
"""
|
| 778 |
+
return {
|
| 779 |
+
"prompt_tokens": cb.prompt_tokens,
|
| 780 |
+
"completion_tokens": cb.completion_tokens,
|
| 781 |
+
"total_tokens": cb.total_tokens,
|
| 782 |
+
"total_cost": cb.total_cost
|
| 783 |
+
}
|
| 784 |
+
|
| 785 |
+
async def close(self):
|
| 786 |
+
"""
|
| 787 |
+
Clean up resources, particularly the MCP client connection.
|
| 788 |
+
"""
|
| 789 |
+
if self.client:
|
| 790 |
+
try:
|
| 791 |
+
await self.client.__aexit__(None, None, None)
|
| 792 |
+
logger.info("MCP client connection closed successfully.")
|
| 793 |
+
except Exception as e:
|
| 794 |
+
logger.error(f"Error closing MCP client: {str(e)}")
|
| 795 |
+
finally:
|
| 796 |
+
self.client = None
|
| 797 |
+
self.mcp_tools = []
|
| 798 |
+
|
| 799 |
+
def __del__(self):
|
| 800 |
+
"""
|
| 801 |
+
Destructor to ensure cleanup when the object is garbage collected.
|
| 802 |
+
"""
|
| 803 |
+
if self.client:
|
| 804 |
+
import asyncio
|
| 805 |
+
try:
|
| 806 |
+
# Try to close the client if there's an active event loop
|
| 807 |
+
loop = asyncio.get_event_loop()
|
| 808 |
+
if loop.is_running():
|
| 809 |
+
loop.create_task(self.close())
|
| 810 |
+
else:
|
| 811 |
+
asyncio.run(self.close())
|
| 812 |
+
except Exception:
|
| 813 |
+
# If we can't close properly, at least log it
|
| 814 |
+
logger.warning("Could not properly close MCP client in destructor")
|
core/agents/__init__.py
ADDED
|
File without changes
|
core/config/mcp_config.json
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"math": {
|
| 3 |
+
"command": "python",
|
| 4 |
+
"args": ["core/mcp_tools/math_server.py"],
|
| 5 |
+
"transport": "stdio"
|
| 6 |
+
},
|
| 7 |
+
"weather": {
|
| 8 |
+
"command": "python",
|
| 9 |
+
"args": ["core/mcp_tools/weather_server.py"],
|
| 10 |
+
"transport": "stdio"
|
| 11 |
+
},
|
| 12 |
+
"global_disruptions": {
|
| 13 |
+
"command": "python",
|
| 14 |
+
"args": ["core/mcp_tools/global_disruptions_server.py"],
|
| 15 |
+
"transport": "stdio"
|
| 16 |
+
},
|
| 17 |
+
"erp": {
|
| 18 |
+
"command": "python",
|
| 19 |
+
"args": ["core/mcp_tools/erp_server.py"],
|
| 20 |
+
"transport": "stdio"
|
| 21 |
+
}
|
| 22 |
+
}
|
core/config/mcp_config.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Module for loading MCP configuration from JSON file.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import json
|
| 6 |
+
import os
|
| 7 |
+
from typing import Dict, Any
|
| 8 |
+
|
| 9 |
+
def load_mcp_config() -> Dict[str, Any]:
|
| 10 |
+
"""
|
| 11 |
+
Load MCP configuration from JSON file.
|
| 12 |
+
|
| 13 |
+
Returns:
|
| 14 |
+
Dict[str, Any]: MCP configuration dictionary
|
| 15 |
+
"""
|
| 16 |
+
config_path = os.path.join(os.path.dirname(__file__), "mcp_config.json")
|
| 17 |
+
try:
|
| 18 |
+
if os.path.exists(config_path):
|
| 19 |
+
with open(config_path, 'r') as f:
|
| 20 |
+
config = json.load(f)
|
| 21 |
+
return config
|
| 22 |
+
else:
|
| 23 |
+
print(f"MCP config file not found at {config_path}. Using empty config.")
|
| 24 |
+
return {}
|
| 25 |
+
except Exception as e:
|
| 26 |
+
print(f"Failed to load MCP config from {config_path}: {str(e)}")
|
| 27 |
+
return {}
|
| 28 |
+
|
| 29 |
+
# Load the configuration when the module is imported
|
| 30 |
+
mcp_config = load_mcp_config()
|
core/config/metadata.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def create_metadata(model_name, temperature, max_tokens, system_prompt, api_key: None, provider="openai"):
|
| 2 |
+
return {
|
| 3 |
+
"model": {"input": model_name},
|
| 4 |
+
"temperature": temperature,
|
| 5 |
+
"tokens": max_tokens,
|
| 6 |
+
"system_prompt": system_prompt,
|
| 7 |
+
"api_key": api_key,
|
| 8 |
+
"provider": provider,
|
| 9 |
+
"input_variables": [
|
| 10 |
+
{"name": "input", "input": ""},
|
| 11 |
+
{"name": "language", "input": "English"},
|
| 12 |
+
],
|
| 13 |
+
"tools": [],
|
| 14 |
+
"outputType": [],
|
| 15 |
+
"rules": [
|
| 16 |
+
{"rulename": "Be helpful", "ruleDescription": "Always try to be helpful and provide accurate information."},
|
| 17 |
+
{"rulename": "Be conversational", "ruleDescription": "Maintain a friendly and engaging conversational tone."}
|
| 18 |
+
],
|
| 19 |
+
}
|
core/config/prompt.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def get_default_system_prompt():
|
| 2 |
+
"""Get default system prompt"""
|
| 3 |
+
return """You are a helpful AI assistant specializing in order management and shipping operations. Engage in natural conversation with the user while proactively managing their orders and shipping concerns.
|
| 4 |
+
|
| 5 |
+
## Core Workflow for Order Status Requests
|
| 6 |
+
|
| 7 |
+
When the user asks about order status, follow this comprehensive workflow:
|
| 8 |
+
|
| 9 |
+
1. **Identify Order ID**: Extract the order ID from the user's query
|
| 10 |
+
2. **Get Order Status**: Use the appropriate tool to retrieve detailed order status information
|
| 11 |
+
3. **Extract Shipping Details**: From the order details, identify the origin and destination countries
|
| 12 |
+
4. **Check for Disruptions**: Use the disruption checking tool with the origin and destination countries to identify any active shipping disruptions
|
| 13 |
+
5. **Assess Impact**: Determine if any found disruptions affect the current order's shipping route
|
| 14 |
+
6. If disruptions are found that could impact the order, ask the user if they want to replace the order with a new shipping route:
|
| 15 |
+
- Cancel the current order using the order cancellation tool
|
| 16 |
+
- Place a new order using the order placement tool, the new order should be placed with the same items but a different shipping route.
|
| 17 |
+
- Verify the new route has no active disruptions before confirming the replacement order
|
| 18 |
+
|
| 19 |
+
When the user asks about data from the database, follow this workflow:
|
| 20 |
+
|
| 21 |
+
1. **Identify the tables**: Extract the relevant table names using the `list_erp_tables` tool.
|
| 22 |
+
2. **Query the Database**: Use the `execute_query` tool to retrieve data from the identified tables.
|
| 23 |
+
|
| 24 |
+
## Communication Guidelines
|
| 25 |
+
|
| 26 |
+
- Maintain a helpful, conversational tone
|
| 27 |
+
- Prioritize accuracy in all responses
|
| 28 |
+
- Be proactive in preventing shipping issues
|
| 29 |
+
- Explain the reasoning behind any order changes
|
| 30 |
+
- Confirm all actions taken with the customer
|
| 31 |
+
- Provide realistic timelines and expectations"""
|
core/llms/__init__.py
ADDED
|
File without changes
|
core/llms/base_llm.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import logging
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
from langchain_openai import AzureChatOpenAI, ChatOpenAI
|
| 5 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 6 |
+
|
| 7 |
+
# Configure logging
|
| 8 |
+
logger = logging.getLogger(__name__)
|
| 9 |
+
|
| 10 |
+
load_dotenv()
|
| 11 |
+
|
| 12 |
+
os.environ["OPENAI_API_VERSION"] = "2024-12-01-preview"
|
| 13 |
+
|
| 14 |
+
def get_llm(model_name, provider="openai", api_key=None, **kwargs):
|
| 15 |
+
"""
|
| 16 |
+
Get a language model instance based on the specified provider and model name.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
model_name (str): The name of the model to use.
|
| 20 |
+
provider (str): The provider of the model (openai, gemini, azure).
|
| 21 |
+
api_key (str, optional): API key for the provider. If None, will use environment variable.
|
| 22 |
+
|
| 23 |
+
Returns:
|
| 24 |
+
llm: An instance of the language model.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
if provider == "openai":
|
| 28 |
+
logging.info("Using OpenAI provider with model: %s", model_name)
|
| 29 |
+
|
| 30 |
+
# Use provided API key or fall back to environment variable
|
| 31 |
+
openai_api_key = api_key or os.getenv("OPENAI_API_KEY")
|
| 32 |
+
|
| 33 |
+
llm = ChatOpenAI(
|
| 34 |
+
model_name = "gpt-4o-mini",
|
| 35 |
+
api_key = openai_api_key,
|
| 36 |
+
)
|
| 37 |
+
elif provider == "gemini":
|
| 38 |
+
logging.info("Using Gemini provider with model: %s", model_name)
|
| 39 |
+
|
| 40 |
+
# Use provided API key or fall back to environment variable
|
| 41 |
+
google_api_key = api_key or os.getenv("GOOGLE_API_KEY")
|
| 42 |
+
llm = ChatGoogleGenerativeAI(
|
| 43 |
+
model='gemini-2.0-flash',
|
| 44 |
+
api_key=google_api_key,
|
| 45 |
+
**kwargs
|
| 46 |
+
)
|
| 47 |
+
else:
|
| 48 |
+
return None
|
| 49 |
+
|
| 50 |
+
return llm
|
core/mcp_tools/__init__.py
ADDED
|
File without changes
|
core/mcp_tools/erp_server.py
ADDED
|
@@ -0,0 +1,544 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# erp_server.py
|
| 2 |
+
from typing import Dict, Any, List, Optional
|
| 3 |
+
import psycopg2
|
| 4 |
+
from psycopg2.extras import RealDictCursor
|
| 5 |
+
from mcp.server.fastmcp import FastMCP
|
| 6 |
+
import os
|
| 7 |
+
from dotenv import load_dotenv
|
| 8 |
+
import json
|
| 9 |
+
from datetime import datetime
|
| 10 |
+
|
| 11 |
+
load_dotenv()
|
| 12 |
+
|
| 13 |
+
mcp = FastMCP("ERP Database")
|
| 14 |
+
|
| 15 |
+
def get_db_connection():
|
| 16 |
+
"""Get database connection using environment variables."""
|
| 17 |
+
try:
|
| 18 |
+
conn = psycopg2.connect(
|
| 19 |
+
host=os.getenv("POSTGRES_HOST", "localhost"),
|
| 20 |
+
database=os.getenv("POSTGRES_DB", "erp_db"),
|
| 21 |
+
user=os.getenv("POSTGRES_USER", "postgres"),
|
| 22 |
+
password=os.getenv("POSTGRES_PASSWORD", ""),
|
| 23 |
+
port=os.getenv("POSTGRES_PORT", "5432")
|
| 24 |
+
)
|
| 25 |
+
return conn
|
| 26 |
+
except Exception as e:
|
| 27 |
+
raise Exception(f"Database connection failed: {str(e)}")
|
| 28 |
+
|
| 29 |
+
@mcp.tool()
|
| 30 |
+
async def execute_query(query: str, params: Optional[List] = None) -> Dict[str, Any]:
|
| 31 |
+
"""
|
| 32 |
+
Execute a custom SQL query on the ERP database.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
query (str): SQL query to execute
|
| 36 |
+
params (List, optional): Parameters for parameterized queries
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
dict: Query results or error message
|
| 40 |
+
"""
|
| 41 |
+
try:
|
| 42 |
+
conn = get_db_connection()
|
| 43 |
+
cursor = conn.cursor(cursor_factory=RealDictCursor)
|
| 44 |
+
|
| 45 |
+
if params:
|
| 46 |
+
cursor.execute(query, params)
|
| 47 |
+
else:
|
| 48 |
+
cursor.execute(query)
|
| 49 |
+
|
| 50 |
+
# Check if it's a SELECT query
|
| 51 |
+
if query.strip().upper().startswith('SELECT'):
|
| 52 |
+
results = cursor.fetchall()
|
| 53 |
+
# Convert RealDictRow to regular dict for JSON serialization
|
| 54 |
+
results = [dict(row) for row in results]
|
| 55 |
+
if not results:
|
| 56 |
+
return "No results found"
|
| 57 |
+
return results
|
| 58 |
+
else:
|
| 59 |
+
# For INSERT, UPDATE, DELETE queries
|
| 60 |
+
conn.commit()
|
| 61 |
+
return f"Query executed successfully. {cursor.rowcount} rows affected."
|
| 62 |
+
|
| 63 |
+
except Exception as e:
|
| 64 |
+
return {
|
| 65 |
+
"success": False,
|
| 66 |
+
"error": str(e)
|
| 67 |
+
}
|
| 68 |
+
finally:
|
| 69 |
+
if 'conn' in locals():
|
| 70 |
+
conn.close()
|
| 71 |
+
|
| 72 |
+
@mcp.tool()
|
| 73 |
+
async def list_erp_tables() -> Dict[str, Any]:
|
| 74 |
+
"""
|
| 75 |
+
List all ERP tables in the database.
|
| 76 |
+
|
| 77 |
+
Returns:
|
| 78 |
+
dict: List of ERP table names
|
| 79 |
+
"""
|
| 80 |
+
query = """
|
| 81 |
+
SELECT table_name
|
| 82 |
+
FROM information_schema.tables
|
| 83 |
+
WHERE table_schema = 'public'
|
| 84 |
+
AND table_name LIKE 'erp_%'
|
| 85 |
+
ORDER BY table_name;
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
try:
|
| 89 |
+
conn = get_db_connection()
|
| 90 |
+
cursor = conn.cursor(cursor_factory=RealDictCursor)
|
| 91 |
+
cursor.execute(query)
|
| 92 |
+
results = cursor.fetchall()
|
| 93 |
+
|
| 94 |
+
table_names = [row['table_name'] for row in results]
|
| 95 |
+
if not table_names:
|
| 96 |
+
return {
|
| 97 |
+
"success": False,
|
| 98 |
+
"message": "No ERP tables found",
|
| 99 |
+
"tables": []
|
| 100 |
+
}
|
| 101 |
+
return {
|
| 102 |
+
"success": True,
|
| 103 |
+
"message": f"Found {len(table_names)} tables",
|
| 104 |
+
"count": len(table_names),
|
| 105 |
+
"tables": table_names
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
except Exception as e:
|
| 109 |
+
return {
|
| 110 |
+
"success": False,
|
| 111 |
+
"error": str(e)
|
| 112 |
+
}
|
| 113 |
+
finally:
|
| 114 |
+
if 'conn' in locals():
|
| 115 |
+
conn.close()
|
| 116 |
+
|
| 117 |
+
@mcp.tool()
|
| 118 |
+
async def get_order_status(order_id: int) -> Dict[str, Any]:
|
| 119 |
+
"""
|
| 120 |
+
Get the status of an order by order ID.
|
| 121 |
+
|
| 122 |
+
Args:
|
| 123 |
+
order_id (int): The ID of the order to check
|
| 124 |
+
|
| 125 |
+
Returns:
|
| 126 |
+
dict: Order status information including shipping and destination countries
|
| 127 |
+
"""
|
| 128 |
+
query = """
|
| 129 |
+
SELECT o.*, c.name as customer_name, c.email as customer_email,
|
| 130 |
+
o.shipping_country, o.destination_country
|
| 131 |
+
FROM erp_orders o
|
| 132 |
+
JOIN erp_customers c ON o.customer_id = c.customer_id
|
| 133 |
+
WHERE o.order_id = %s;
|
| 134 |
+
"""
|
| 135 |
+
|
| 136 |
+
try:
|
| 137 |
+
conn = get_db_connection()
|
| 138 |
+
cursor = conn.cursor(cursor_factory=RealDictCursor)
|
| 139 |
+
cursor.execute(query, [order_id])
|
| 140 |
+
order = cursor.fetchone()
|
| 141 |
+
|
| 142 |
+
if not order:
|
| 143 |
+
return {
|
| 144 |
+
"success": False,
|
| 145 |
+
"message": f"Order with ID {order_id} not found"
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
# Get order items
|
| 149 |
+
items_query = """
|
| 150 |
+
SELECT oi.order_item_id, oi.order_id, oi.product_id, oi.quantity, oi.unit_price, oi.subtotal,
|
| 151 |
+
p.product_name, p.sku
|
| 152 |
+
FROM erp_order_items oi
|
| 153 |
+
JOIN erp_products p ON oi.product_id = p.product_id
|
| 154 |
+
WHERE oi.order_id = %s;
|
| 155 |
+
"""
|
| 156 |
+
cursor.execute(items_query, [order_id])
|
| 157 |
+
items = cursor.fetchall()
|
| 158 |
+
|
| 159 |
+
# Get order history
|
| 160 |
+
history_query = """
|
| 161 |
+
SELECT * FROM erp_order_history
|
| 162 |
+
WHERE order_id = %s
|
| 163 |
+
ORDER BY timestamp DESC;
|
| 164 |
+
"""
|
| 165 |
+
cursor.execute(history_query, [order_id])
|
| 166 |
+
history = cursor.fetchall()
|
| 167 |
+
|
| 168 |
+
order_dict = dict(order)
|
| 169 |
+
items_list = [dict(item) for item in items]
|
| 170 |
+
history_list = [dict(entry) for entry in history]
|
| 171 |
+
|
| 172 |
+
# Return JSON formatted response
|
| 173 |
+
return {
|
| 174 |
+
"success": True,
|
| 175 |
+
"order": {
|
| 176 |
+
"order_id": order_id,
|
| 177 |
+
"customer": {
|
| 178 |
+
"customer_id": order_dict['customer_id'],
|
| 179 |
+
"name": order_dict['customer_name'],
|
| 180 |
+
"email": order_dict['customer_email']
|
| 181 |
+
},
|
| 182 |
+
"status": order_dict['status'],
|
| 183 |
+
"shipping_address": order_dict.get('shipping_address', 'N/A'),
|
| 184 |
+
"shipping_country": order_dict.get('shipping_country', 'N/A'),
|
| 185 |
+
"destination_country": order_dict.get('destination_country', 'N/A'),
|
| 186 |
+
"items": items_list,
|
| 187 |
+
"history": history_list
|
| 188 |
+
}
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
except Exception as e:
|
| 192 |
+
return {
|
| 193 |
+
"success": False,
|
| 194 |
+
"error": str(e)
|
| 195 |
+
}
|
| 196 |
+
finally:
|
| 197 |
+
if 'conn' in locals():
|
| 198 |
+
conn.close()
|
| 199 |
+
|
| 200 |
+
@mcp.tool()
|
| 201 |
+
async def place_new_order(
|
| 202 |
+
customer_id: int,
|
| 203 |
+
items: List[Dict[str, Any]],
|
| 204 |
+
shipping_address: str,
|
| 205 |
+
shipping_country: str,
|
| 206 |
+
destination_country: str,
|
| 207 |
+
previous_order_id: Optional[int] = None
|
| 208 |
+
) -> Dict[str, Any]:
|
| 209 |
+
"""
|
| 210 |
+
Place a new order in the ERP system, after ensuring there are no global disruptions affecting the shipment.
|
| 211 |
+
|
| 212 |
+
Args:
|
| 213 |
+
customer_id (int): ID of the customer placing the order
|
| 214 |
+
items (List[Dict]): List of items to order, each with product_id and quantity
|
| 215 |
+
shipping_address (str): Shipping address for the order
|
| 216 |
+
shipping_country (str): Country where the order will be shipped from
|
| 217 |
+
destination_country (str): Country where the order will be delivered to
|
| 218 |
+
previous_order_id (int, optional): ID of a previous order this is replacing
|
| 219 |
+
|
| 220 |
+
Returns:
|
| 221 |
+
dict: New order information
|
| 222 |
+
"""
|
| 223 |
+
try:
|
| 224 |
+
conn = get_db_connection()
|
| 225 |
+
cursor = conn.cursor(cursor_factory=RealDictCursor)
|
| 226 |
+
|
| 227 |
+
# Calculate total amount based on the total items and their prices
|
| 228 |
+
total_amount = 0
|
| 229 |
+
for item in items:
|
| 230 |
+
product_query = "SELECT price FROM erp_products WHERE product_id = %s;"
|
| 231 |
+
cursor.execute(product_query, [item['product_id']])
|
| 232 |
+
product = cursor.fetchone()
|
| 233 |
+
if not product:
|
| 234 |
+
return {
|
| 235 |
+
"success": False,
|
| 236 |
+
"error": f"Product with ID {item['product_id']} not found"
|
| 237 |
+
}
|
| 238 |
+
total_amount += product['price'] * item['quantity']
|
| 239 |
+
|
| 240 |
+
# Insert new order into the erp_orders table for the customer
|
| 241 |
+
order_query = """
|
| 242 |
+
INSERT INTO erp_orders (
|
| 243 |
+
customer_id, order_date, total_amount, status,
|
| 244 |
+
shipping_address, shipping_country, destination_country, previous_order_id,
|
| 245 |
+
estimated_delivery, payment_status
|
| 246 |
+
) VALUES (
|
| 247 |
+
%s, CURRENT_DATE, %s, 'Processing',
|
| 248 |
+
%s, %s, %s, %s,
|
| 249 |
+
CURRENT_DATE + INTERVAL '7 days', 'Pending'
|
| 250 |
+
) RETURNING order_id;
|
| 251 |
+
"""
|
| 252 |
+
cursor.execute(order_query, [
|
| 253 |
+
customer_id, total_amount, shipping_address, shipping_country, destination_country, previous_order_id
|
| 254 |
+
])
|
| 255 |
+
new_order_id = cursor.fetchone()['order_id']
|
| 256 |
+
|
| 257 |
+
# Insert order items into erp_order_items table for the new order
|
| 258 |
+
for item in items:
|
| 259 |
+
# Get product price
|
| 260 |
+
cursor.execute(product_query, [item['product_id']])
|
| 261 |
+
product = cursor.fetchone()
|
| 262 |
+
unit_price = product['price']
|
| 263 |
+
subtotal = unit_price * item['quantity']
|
| 264 |
+
|
| 265 |
+
item_query = """
|
| 266 |
+
INSERT INTO erp_order_items (
|
| 267 |
+
order_id, product_id, quantity, unit_price, subtotal
|
| 268 |
+
) VALUES (
|
| 269 |
+
%s, %s, %s, %s, %s
|
| 270 |
+
);
|
| 271 |
+
"""
|
| 272 |
+
cursor.execute(item_query, [
|
| 273 |
+
new_order_id, item['product_id'], item['quantity'],
|
| 274 |
+
unit_price, subtotal
|
| 275 |
+
])
|
| 276 |
+
|
| 277 |
+
# Update product stock
|
| 278 |
+
update_stock_query = """
|
| 279 |
+
UPDATE erp_products
|
| 280 |
+
SET stock_quantity = stock_quantity - %s
|
| 281 |
+
WHERE product_id = %s;
|
| 282 |
+
"""
|
| 283 |
+
cursor.execute(update_stock_query, [item['quantity'], item['product_id']])
|
| 284 |
+
|
| 285 |
+
# Create order history entry into the erp_order_history for the new order
|
| 286 |
+
history_query = """
|
| 287 |
+
INSERT INTO erp_order_history (
|
| 288 |
+
order_id, timestamp, status_change, notes, updated_by
|
| 289 |
+
) VALUES (
|
| 290 |
+
%s, CURRENT_TIMESTAMP, 'Order Created', 'New order placed', 'System'
|
| 291 |
+
);
|
| 292 |
+
"""
|
| 293 |
+
cursor.execute(history_query, [new_order_id])
|
| 294 |
+
|
| 295 |
+
# If this is a replacement order, add a note to the previous order
|
| 296 |
+
if previous_order_id:
|
| 297 |
+
prev_order_note_query = """
|
| 298 |
+
INSERT INTO erp_order_history (
|
| 299 |
+
order_id, timestamp, status_change, notes, updated_by
|
| 300 |
+
) VALUES (
|
| 301 |
+
%s, CURRENT_TIMESTAMP, 'Replaced', 'Order replaced by order #%s', 'System'
|
| 302 |
+
);
|
| 303 |
+
"""
|
| 304 |
+
cursor.execute(prev_order_note_query, [previous_order_id, new_order_id])
|
| 305 |
+
|
| 306 |
+
# Generate invoice for the new order
|
| 307 |
+
invoice_query = """
|
| 308 |
+
INSERT INTO erp_invoices (
|
| 309 |
+
order_id, invoice_date, amount, payment_terms, due_date, is_paid, invoice_number
|
| 310 |
+
) VALUES (
|
| 311 |
+
%s, CURRENT_DATE, %s, 'Net 30', CURRENT_DATE + INTERVAL '30 days', FALSE, 'INV-' || %s
|
| 312 |
+
) RETURNING invoice_id;
|
| 313 |
+
"""
|
| 314 |
+
cursor.execute(invoice_query, [new_order_id, total_amount, new_order_id])
|
| 315 |
+
invoice_id = cursor.fetchone()['invoice_id']
|
| 316 |
+
|
| 317 |
+
conn.commit()
|
| 318 |
+
|
| 319 |
+
# Get the complete new order details
|
| 320 |
+
cursor.execute("SELECT * FROM erp_orders WHERE order_id = %s;", [new_order_id])
|
| 321 |
+
order = cursor.fetchone()
|
| 322 |
+
|
| 323 |
+
order_dict = dict(order)
|
| 324 |
+
|
| 325 |
+
# Return JSON formatted response
|
| 326 |
+
return {
|
| 327 |
+
"success": True,
|
| 328 |
+
"order": {
|
| 329 |
+
"order_id": new_order_id,
|
| 330 |
+
"invoice_id": invoice_id,
|
| 331 |
+
"total_amount": float(total_amount),
|
| 332 |
+
"status": order_dict['status'],
|
| 333 |
+
"estimated_delivery": order_dict['estimated_delivery'].strftime("%Y-%m-%d") if order_dict['estimated_delivery'] else None,
|
| 334 |
+
"customer_id": customer_id,
|
| 335 |
+
"shipping_address": shipping_address,
|
| 336 |
+
"shipping_country": shipping_country,
|
| 337 |
+
"destination_country": destination_country,
|
| 338 |
+
"previous_order_id": previous_order_id
|
| 339 |
+
}
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
except Exception as e:
|
| 343 |
+
if 'conn' in locals():
|
| 344 |
+
conn.rollback()
|
| 345 |
+
return {
|
| 346 |
+
"success": False,
|
| 347 |
+
"error": str(e)
|
| 348 |
+
}
|
| 349 |
+
finally:
|
| 350 |
+
if 'conn' in locals():
|
| 351 |
+
conn.close()
|
| 352 |
+
|
| 353 |
+
@mcp.tool()
|
| 354 |
+
async def cancel_order(order_id: int, reason: str) -> Dict[str, Any]:
|
| 355 |
+
"""
|
| 356 |
+
Cancel an existing order in the ERP system.
|
| 357 |
+
|
| 358 |
+
Args:
|
| 359 |
+
order_id (int): ID of the order to cancel
|
| 360 |
+
reason (str): Reason for cancellation
|
| 361 |
+
|
| 362 |
+
Returns:
|
| 363 |
+
dict: Result of the cancellation operation
|
| 364 |
+
"""
|
| 365 |
+
try:
|
| 366 |
+
conn = get_db_connection()
|
| 367 |
+
cursor = conn.cursor(cursor_factory=RealDictCursor)
|
| 368 |
+
|
| 369 |
+
# Check if order exists and can be cancelled
|
| 370 |
+
check_query = """
|
| 371 |
+
SELECT status, customer_id FROM erp_orders WHERE order_id = %s;
|
| 372 |
+
"""
|
| 373 |
+
cursor.execute(check_query, [order_id])
|
| 374 |
+
order = cursor.fetchone()
|
| 375 |
+
|
| 376 |
+
if not order:
|
| 377 |
+
return {
|
| 378 |
+
"success": False,
|
| 379 |
+
"error": f"Order with ID {order_id} not found"
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
if order['status'] in ['Delivered', 'Cancelled']:
|
| 383 |
+
return {
|
| 384 |
+
"success": False,
|
| 385 |
+
"error": f"Cannot cancel order with status '{order['status']}'"
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
# Get order items to restore stock
|
| 389 |
+
items_query = """
|
| 390 |
+
SELECT product_id, quantity FROM erp_order_items WHERE order_id = %s;
|
| 391 |
+
"""
|
| 392 |
+
cursor.execute(items_query, [order_id])
|
| 393 |
+
items = cursor.fetchall()
|
| 394 |
+
|
| 395 |
+
# Update order status to Cancelled
|
| 396 |
+
update_query = """
|
| 397 |
+
UPDATE erp_orders SET status = 'Cancelled', payment_status = 'Cancelled'
|
| 398 |
+
WHERE order_id = %s;
|
| 399 |
+
"""
|
| 400 |
+
cursor.execute(update_query, [order_id])
|
| 401 |
+
|
| 402 |
+
# Add entry to order history
|
| 403 |
+
history_query = """
|
| 404 |
+
INSERT INTO erp_order_history (
|
| 405 |
+
order_id, timestamp, status_change, notes, updated_by
|
| 406 |
+
) VALUES (
|
| 407 |
+
%s, CURRENT_TIMESTAMP, 'Cancelled', %s, 'System'
|
| 408 |
+
);
|
| 409 |
+
"""
|
| 410 |
+
cursor.execute(history_query, [order_id, f"Order cancelled: {reason}"])
|
| 411 |
+
|
| 412 |
+
# Restore product stock quantities
|
| 413 |
+
for item in items:
|
| 414 |
+
restore_stock_query = """
|
| 415 |
+
UPDATE erp_products
|
| 416 |
+
SET stock_quantity = stock_quantity + %s
|
| 417 |
+
WHERE product_id = %s;
|
| 418 |
+
"""
|
| 419 |
+
cursor.execute(restore_stock_query, [item['quantity'], item['product_id']])
|
| 420 |
+
|
| 421 |
+
# Update invoice if exists
|
| 422 |
+
invoice_query = """
|
| 423 |
+
UPDATE erp_invoices
|
| 424 |
+
SET is_paid = FALSE
|
| 425 |
+
WHERE order_id = %s;
|
| 426 |
+
"""
|
| 427 |
+
cursor.execute(invoice_query, [order_id])
|
| 428 |
+
|
| 429 |
+
conn.commit()
|
| 430 |
+
|
| 431 |
+
return {
|
| 432 |
+
"success": True,
|
| 433 |
+
"message": f"Order #{order_id} has been successfully cancelled",
|
| 434 |
+
"reason": reason,
|
| 435 |
+
"items_restored": len(items)
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
except Exception as e:
|
| 439 |
+
if 'conn' in locals():
|
| 440 |
+
conn.rollback()
|
| 441 |
+
return {
|
| 442 |
+
"success": False,
|
| 443 |
+
"error": str(e)
|
| 444 |
+
}
|
| 445 |
+
finally:
|
| 446 |
+
if 'conn' in locals():
|
| 447 |
+
conn.close()
|
| 448 |
+
|
| 449 |
+
@mcp.tool()
|
| 450 |
+
async def get_invoice_details(invoice_id: Optional[int] = None, order_id: Optional[int] = None) -> Dict[str, Any]:
|
| 451 |
+
"""
|
| 452 |
+
Get invoice details by invoice ID or order ID.
|
| 453 |
+
|
| 454 |
+
Args:
|
| 455 |
+
invoice_id (int, optional): ID of the invoice
|
| 456 |
+
order_id (int, optional): ID of the order
|
| 457 |
+
|
| 458 |
+
Returns:
|
| 459 |
+
dict: Invoice details including customer and order information
|
| 460 |
+
"""
|
| 461 |
+
if not invoice_id and not order_id:
|
| 462 |
+
return {
|
| 463 |
+
"success": False,
|
| 464 |
+
"error": "Either invoice_id or order_id must be provided"
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
try:
|
| 468 |
+
conn = get_db_connection()
|
| 469 |
+
cursor = conn.cursor(cursor_factory=RealDictCursor)
|
| 470 |
+
|
| 471 |
+
if invoice_id:
|
| 472 |
+
query = """
|
| 473 |
+
SELECT i.*, o.order_date, o.status as order_status,
|
| 474 |
+
c.name as customer_name, c.email as customer_email, c.address as customer_address
|
| 475 |
+
FROM erp_invoices i
|
| 476 |
+
JOIN erp_orders o ON i.order_id = o.order_id
|
| 477 |
+
JOIN erp_customers c ON o.customer_id = c.customer_id
|
| 478 |
+
WHERE i.invoice_id = %s;
|
| 479 |
+
"""
|
| 480 |
+
cursor.execute(query, [invoice_id])
|
| 481 |
+
else:
|
| 482 |
+
query = """
|
| 483 |
+
SELECT i.*, o.order_date, o.status as order_status,
|
| 484 |
+
c.name as customer_name, c.email as customer_email, c.address as customer_address
|
| 485 |
+
FROM erp_invoices i
|
| 486 |
+
JOIN erp_orders o ON i.order_id = o.order_id
|
| 487 |
+
JOIN erp_customers c ON o.customer_id = c.customer_id
|
| 488 |
+
WHERE i.order_id = %s;
|
| 489 |
+
"""
|
| 490 |
+
cursor.execute(query, [order_id])
|
| 491 |
+
|
| 492 |
+
invoice = cursor.fetchone()
|
| 493 |
+
|
| 494 |
+
if not invoice:
|
| 495 |
+
return {
|
| 496 |
+
"success": False,
|
| 497 |
+
"error": f"Invoice not found for the provided {'invoice_id' if invoice_id else 'order_id'}"
|
| 498 |
+
}
|
| 499 |
+
|
| 500 |
+
# Get order items
|
| 501 |
+
items_query = """
|
| 502 |
+
SELECT oi.*, p.product_name, p.sku
|
| 503 |
+
FROM erp_order_items oi
|
| 504 |
+
JOIN erp_products p ON oi.product_id = p.product_id
|
| 505 |
+
WHERE oi.order_id = %s;
|
| 506 |
+
"""
|
| 507 |
+
cursor.execute(items_query, [invoice['order_id']])
|
| 508 |
+
items = cursor.fetchall()
|
| 509 |
+
|
| 510 |
+
invoice_dict = dict(invoice)
|
| 511 |
+
items_list = [dict(item) for item in items]
|
| 512 |
+
|
| 513 |
+
# Return JSON formatted response
|
| 514 |
+
return {
|
| 515 |
+
"success": True,
|
| 516 |
+
"invoice": {
|
| 517 |
+
"invoice_id": invoice_dict['invoice_id'],
|
| 518 |
+
"invoice_number": invoice_dict.get('invoice_number', ''),
|
| 519 |
+
"order_id": invoice_dict['order_id'],
|
| 520 |
+
"order_date": invoice_dict['order_date'].strftime("%Y-%m-%d") if invoice_dict['order_date'] else None,
|
| 521 |
+
"order_status": invoice_dict['order_status'],
|
| 522 |
+
"amount": float(invoice_dict['amount']),
|
| 523 |
+
"due_date": invoice_dict['due_date'].strftime("%Y-%m-%d") if invoice_dict['due_date'] else None,
|
| 524 |
+
"payment_status": "Paid" if invoice_dict['is_paid'] else "Unpaid",
|
| 525 |
+
"customer": {
|
| 526 |
+
"name": invoice_dict['customer_name'],
|
| 527 |
+
"email": invoice_dict['customer_email'],
|
| 528 |
+
"address": invoice_dict['customer_address']
|
| 529 |
+
},
|
| 530 |
+
"items": items_list
|
| 531 |
+
}
|
| 532 |
+
}
|
| 533 |
+
|
| 534 |
+
except Exception as e:
|
| 535 |
+
return {
|
| 536 |
+
"success": False,
|
| 537 |
+
"error": str(e)
|
| 538 |
+
}
|
| 539 |
+
finally:
|
| 540 |
+
if 'conn' in locals():
|
| 541 |
+
conn.close()
|
| 542 |
+
|
| 543 |
+
if __name__ == "__main__":
|
| 544 |
+
mcp.run(transport="stdio")
|
core/mcp_tools/global_disruptions_server.py
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# global_disruptions_server.py
|
| 2 |
+
from typing import Dict, Any, List, Optional
|
| 3 |
+
import psycopg2
|
| 4 |
+
from psycopg2.extras import RealDictCursor
|
| 5 |
+
from mcp.server.fastmcp import FastMCP
|
| 6 |
+
import os
|
| 7 |
+
from dotenv import load_dotenv
|
| 8 |
+
from datetime import datetime, date
|
| 9 |
+
|
| 10 |
+
load_dotenv()
|
| 11 |
+
|
| 12 |
+
mcp = FastMCP("Global Disruptions Database")
|
| 13 |
+
|
| 14 |
+
def get_db_connection():
|
| 15 |
+
"""Get database connection using environment variables."""
|
| 16 |
+
try:
|
| 17 |
+
conn = psycopg2.connect(
|
| 18 |
+
host=os.getenv("POSTGRES_HOST", "localhost"),
|
| 19 |
+
database=os.getenv("POSTGRES_DB", "erp_db"),
|
| 20 |
+
user=os.getenv("POSTGRES_USER", "postgres"),
|
| 21 |
+
password=os.getenv("POSTGRES_PASSWORD", ""),
|
| 22 |
+
port=os.getenv("POSTGRES_PORT", "5432")
|
| 23 |
+
)
|
| 24 |
+
return conn
|
| 25 |
+
except Exception as e:
|
| 26 |
+
raise Exception(f"Database connection failed: {str(e)}")
|
| 27 |
+
|
| 28 |
+
@mcp.tool()
|
| 29 |
+
async def get_active_disruptions(source_country: Optional[str] = None, destination_country: Optional[str] = None) -> Dict[str, Any]:
|
| 30 |
+
"""
|
| 31 |
+
Get active global disruptions that might affect orders between countries.
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
source_country (str, optional): The country of origin for the shipment
|
| 35 |
+
destination_country (str, optional): The destination country for the shipment
|
| 36 |
+
|
| 37 |
+
Returns:
|
| 38 |
+
dict: Information about current disruptions that might affect shipping
|
| 39 |
+
"""
|
| 40 |
+
try:
|
| 41 |
+
conn = get_db_connection()
|
| 42 |
+
cursor = conn.cursor(cursor_factory=RealDictCursor)
|
| 43 |
+
|
| 44 |
+
# Base query to get disruptions
|
| 45 |
+
query = """
|
| 46 |
+
SELECT
|
| 47 |
+
disruption_id,
|
| 48 |
+
source_country,
|
| 49 |
+
destination_country,
|
| 50 |
+
disruption_type,
|
| 51 |
+
severity,
|
| 52 |
+
start_date,
|
| 53 |
+
expected_end_date,
|
| 54 |
+
actual_end_date,
|
| 55 |
+
is_active,
|
| 56 |
+
description,
|
| 57 |
+
impact_hours,
|
| 58 |
+
created_at,
|
| 59 |
+
updated_at
|
| 60 |
+
FROM
|
| 61 |
+
live_global_disruptions
|
| 62 |
+
WHERE
|
| 63 |
+
is_active = TRUE
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
params = []
|
| 67 |
+
|
| 68 |
+
# Add filters for specific countries if provided
|
| 69 |
+
if source_country and destination_country:
|
| 70 |
+
query += """
|
| 71 |
+
AND (
|
| 72 |
+
(source_country = %s AND destination_country = %s) OR
|
| 73 |
+
(source_country = %s) OR
|
| 74 |
+
(destination_country = %s)
|
| 75 |
+
)
|
| 76 |
+
"""
|
| 77 |
+
params.extend([source_country, destination_country, source_country, destination_country])
|
| 78 |
+
elif source_country:
|
| 79 |
+
query += """
|
| 80 |
+
AND (source_country = %s)
|
| 81 |
+
"""
|
| 82 |
+
params.append(source_country)
|
| 83 |
+
elif destination_country:
|
| 84 |
+
query += """
|
| 85 |
+
AND (destination_country = %s)
|
| 86 |
+
"""
|
| 87 |
+
params.append(destination_country)
|
| 88 |
+
|
| 89 |
+
# Order by severity and recency
|
| 90 |
+
query += """
|
| 91 |
+
ORDER BY
|
| 92 |
+
severity DESC,
|
| 93 |
+
updated_at DESC;
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
print(query, params) # Debugging line to see the query and parameters
|
| 97 |
+
|
| 98 |
+
cursor.execute(query, params)
|
| 99 |
+
disruptions = cursor.fetchall()
|
| 100 |
+
|
| 101 |
+
now = datetime.now()
|
| 102 |
+
|
| 103 |
+
# Create message based on filter criteria
|
| 104 |
+
if source_country and destination_country:
|
| 105 |
+
message = f"Disruptions affecting shipments between {source_country} and {destination_country}"
|
| 106 |
+
elif source_country:
|
| 107 |
+
message = f"Disruptions affecting shipments from {source_country}"
|
| 108 |
+
elif destination_country:
|
| 109 |
+
message = f"Disruptions affecting shipments to {destination_country}"
|
| 110 |
+
else:
|
| 111 |
+
message = "All active global disruptions"
|
| 112 |
+
|
| 113 |
+
if not disruptions:
|
| 114 |
+
return {
|
| 115 |
+
"success": True,
|
| 116 |
+
"message": f"No active disruptions found for {message.lower()}.",
|
| 117 |
+
"source_country": source_country,
|
| 118 |
+
"destination_country": destination_country,
|
| 119 |
+
"disruptions": [],
|
| 120 |
+
"count": 0,
|
| 121 |
+
"has_critical": False,
|
| 122 |
+
"max_severity": 0,
|
| 123 |
+
"recommendation": "No disruptions found. Shipping should proceed normally."
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
# Format the response
|
| 127 |
+
formatted_disruptions = []
|
| 128 |
+
for disruption in disruptions:
|
| 129 |
+
d = dict(disruption)
|
| 130 |
+
|
| 131 |
+
# Format severity level as text
|
| 132 |
+
severity = d['severity']
|
| 133 |
+
severity_text = "Critical" if severity == 5 else "High" if severity == 4 else "Medium" if severity == 3 else "Low" if severity == 2 else "Minimal"
|
| 134 |
+
|
| 135 |
+
# Calculate days active
|
| 136 |
+
days_active = (now.date() - d['start_date']).days if d['start_date'] else None
|
| 137 |
+
|
| 138 |
+
# Format dates for JSON
|
| 139 |
+
formatted_disruption = {
|
| 140 |
+
"disruption_id": d['disruption_id'],
|
| 141 |
+
"source_country": d['source_country'],
|
| 142 |
+
"destination_country": d['destination_country'],
|
| 143 |
+
"disruption_type": d['disruption_type'],
|
| 144 |
+
"severity": d['severity'],
|
| 145 |
+
"severity_text": severity_text,
|
| 146 |
+
"start_date": d['start_date'].strftime("%Y-%m-%d") if d['start_date'] else None,
|
| 147 |
+
"expected_end_date": d['expected_end_date'].strftime("%Y-%m-%d") if d['expected_end_date'] else None,
|
| 148 |
+
"actual_end_date": d['actual_end_date'].strftime("%Y-%m-%d") if d['actual_end_date'] else None,
|
| 149 |
+
"days_active": days_active,
|
| 150 |
+
"is_active": d['is_active'],
|
| 151 |
+
"description": d['description'],
|
| 152 |
+
"impact_hours": d['impact_hours'],
|
| 153 |
+
"created_at": d['created_at'].strftime("%Y-%m-%d %H:%M:%S") if d['created_at'] else None,
|
| 154 |
+
"updated_at": d['updated_at'].strftime("%Y-%m-%d %H:%M:%S") if d['updated_at'] else None
|
| 155 |
+
}
|
| 156 |
+
formatted_disruptions.append(formatted_disruption)
|
| 157 |
+
|
| 158 |
+
# Add recommendations based on disruptions
|
| 159 |
+
max_severity = max(d['severity'] for d in disruptions)
|
| 160 |
+
has_critical = any(d['severity'] >= 4 for d in disruptions)
|
| 161 |
+
|
| 162 |
+
if max_severity >= 4:
|
| 163 |
+
recommendation = "Consider alternative shipping routes or delaying shipments until disruptions are resolved."
|
| 164 |
+
elif max_severity == 3:
|
| 165 |
+
recommendation = "Expect delays and consider adding buffer time to delivery estimates."
|
| 166 |
+
else:
|
| 167 |
+
recommendation = "Monitor situation for changes. Minor delays possible."
|
| 168 |
+
|
| 169 |
+
return {
|
| 170 |
+
"success": True,
|
| 171 |
+
"message": message,
|
| 172 |
+
"source_country": source_country,
|
| 173 |
+
"destination_country": destination_country,
|
| 174 |
+
"disruptions": formatted_disruptions,
|
| 175 |
+
"count": len(formatted_disruptions),
|
| 176 |
+
"has_critical": has_critical,
|
| 177 |
+
"max_severity": max_severity,
|
| 178 |
+
"recommendation": recommendation
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
except Exception as e:
|
| 182 |
+
return {
|
| 183 |
+
"success": False,
|
| 184 |
+
"error": str(e)
|
| 185 |
+
}
|
| 186 |
+
finally:
|
| 187 |
+
if 'conn' in locals():
|
| 188 |
+
conn.close()
|
| 189 |
+
|
| 190 |
+
if __name__ == "__main__":
|
| 191 |
+
mcp.run(transport="stdio")
|
core/mcp_tools/math_server.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# math_server.py
|
| 2 |
+
from mcp.server.fastmcp import FastMCP
|
| 3 |
+
|
| 4 |
+
mcp = FastMCP("Math")
|
| 5 |
+
|
| 6 |
+
mcp.settings.port = 8000
|
| 7 |
+
|
| 8 |
+
@mcp.tool()
|
| 9 |
+
async def add(a: int, b: int) -> int:
|
| 10 |
+
"""Add two numbers"""
|
| 11 |
+
return a + b
|
| 12 |
+
|
| 13 |
+
@mcp.tool()
|
| 14 |
+
async def multiply(a: int, b: int) -> int:
|
| 15 |
+
"""Multiply two numbers"""
|
| 16 |
+
return a * b
|
| 17 |
+
|
| 18 |
+
@mcp.tool()
|
| 19 |
+
async def subtract(a: int, b: int) -> int:
|
| 20 |
+
"""Subtract two numbers"""
|
| 21 |
+
return a - b
|
| 22 |
+
|
| 23 |
+
if __name__ == "__main__":
|
| 24 |
+
mcp.run(transport="stdio")
|
core/mcp_tools/weather_server.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# weather_server.py
|
| 2 |
+
from typing import Dict, Any, Optional
|
| 3 |
+
import requests
|
| 4 |
+
from mcp.server.fastmcp import FastMCP
|
| 5 |
+
import os
|
| 6 |
+
from dotenv import load_dotenv
|
| 7 |
+
load_dotenv()
|
| 8 |
+
|
| 9 |
+
mcp = FastMCP("Weather")
|
| 10 |
+
|
| 11 |
+
mcp.settings.port = 8001
|
| 12 |
+
|
| 13 |
+
@mcp.tool()
|
| 14 |
+
async def get_weather(location: str, lang: str = 'en') -> Dict[str, Any]:
|
| 15 |
+
"""
|
| 16 |
+
Get current weather for a given location using WeatherAPI.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
location (str): Location query, e.g., city name or "latitude,longitude".
|
| 20 |
+
lang (str): Language code for the response (default is 'en').
|
| 21 |
+
|
| 22 |
+
Returns:
|
| 23 |
+
dict: JSON response from WeatherAPI.
|
| 24 |
+
"""
|
| 25 |
+
api_key = os.getenv("WEATHER_API_KEY")
|
| 26 |
+
if not api_key:
|
| 27 |
+
return {"error": "WEATHER_API_KEY not found in environment variables"}
|
| 28 |
+
|
| 29 |
+
base_url = "http://api.weatherapi.com/v1"
|
| 30 |
+
endpoint = "/current.json"
|
| 31 |
+
|
| 32 |
+
params = {
|
| 33 |
+
'key': api_key,
|
| 34 |
+
'q': location,
|
| 35 |
+
'lang': lang
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
try:
|
| 39 |
+
response = requests.get(base_url + endpoint, params=params)
|
| 40 |
+
response.raise_for_status() # Raise exception for HTTP errors
|
| 41 |
+
data = response.json()
|
| 42 |
+
return data
|
| 43 |
+
except requests.RequestException as e:
|
| 44 |
+
return {"error": f"Request failed: {str(e)}"}
|
| 45 |
+
except Exception as e:
|
| 46 |
+
return {"error": f"Unexpected error: {str(e)}"}
|
| 47 |
+
|
| 48 |
+
if __name__ == "__main__":
|
| 49 |
+
mcp.run(transport="stdio")
|
dummydb.md
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Database Schema:
|
| 2 |
+
|
| 3 |
+
CREATE TABLE IF NOT EXISTS public.erp_customers
|
| 4 |
+
|
| 5 |
+
(
|
| 6 |
+
|
| 7 |
+
customer_id integer NOT NULL DEFAULT nextval('erp_customers_customer_id_seq'::regclass),
|
| 8 |
+
|
| 9 |
+
name character varying(100) COLLATE pg_catalog."default" NOT NULL,
|
| 10 |
+
|
| 11 |
+
email character varying(100) COLLATE pg_catalog."default" NOT NULL,
|
| 12 |
+
|
| 13 |
+
phone character varying(20) COLLATE pg_catalog."default",
|
| 14 |
+
|
| 15 |
+
address text COLLATE pg_catalog."default",
|
| 16 |
+
|
| 17 |
+
created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP,
|
| 18 |
+
|
| 19 |
+
CONSTRAINT erp_customers_pkey PRIMARY KEY (customer_id),
|
| 20 |
+
|
| 21 |
+
CONSTRAINT erp_customers_email_key UNIQUE (email)
|
| 22 |
+
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
TABLESPACE pg_default;
|
| 26 |
+
|
| 27 |
+
ALTER TABLE IF EXISTS public.erp_customers
|
| 28 |
+
|
| 29 |
+
OWNER to postgres;
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
CREATE TABLE IF NOT EXISTS public.erp_invoices
|
| 33 |
+
|
| 34 |
+
(
|
| 35 |
+
|
| 36 |
+
invoice_id integer NOT NULL DEFAULT nextval('erp_invoices_invoice_id_seq'::regclass),
|
| 37 |
+
|
| 38 |
+
order_id integer,
|
| 39 |
+
|
| 40 |
+
invoice_date date DEFAULT CURRENT_DATE,
|
| 41 |
+
|
| 42 |
+
amount numeric(12,2) NOT NULL,
|
| 43 |
+
|
| 44 |
+
payment_terms character varying(50) COLLATE pg_catalog."default",
|
| 45 |
+
|
| 46 |
+
due_date date,
|
| 47 |
+
|
| 48 |
+
is_paid boolean DEFAULT false,
|
| 49 |
+
|
| 50 |
+
invoice_number character varying(50) COLLATE pg_catalog."default",
|
| 51 |
+
|
| 52 |
+
CONSTRAINT erp_invoices_pkey PRIMARY KEY (invoice_id),
|
| 53 |
+
|
| 54 |
+
CONSTRAINT erp_invoices_invoice_number_key UNIQUE (invoice_number),
|
| 55 |
+
|
| 56 |
+
CONSTRAINT erp_invoices_order_id_fkey FOREIGN KEY (order_id)
|
| 57 |
+
|
| 58 |
+
REFERENCES public.erp_orders (order_id) MATCH SIMPLE
|
| 59 |
+
|
| 60 |
+
ON UPDATE NO ACTION
|
| 61 |
+
|
| 62 |
+
ON DELETE NO ACTION
|
| 63 |
+
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
TABLESPACE pg_default;
|
| 67 |
+
|
| 68 |
+
ALTER TABLE IF EXISTS public.erp_invoices
|
| 69 |
+
|
| 70 |
+
OWNER to postgres;
|
| 71 |
+
|
| 72 |
+
CREATE INDEX IF NOT EXISTS idx_invoices_order
|
| 73 |
+
|
| 74 |
+
ON public.erp_invoices USING btree
|
| 75 |
+
|
| 76 |
+
(order_id ASC NULLS LAST)
|
| 77 |
+
|
| 78 |
+
TABLESPACE pg_default;
|
| 79 |
+
|
| 80 |
+
CREATE TABLE IF NOT EXISTS public.erp_order_history
|
| 81 |
+
|
| 82 |
+
(
|
| 83 |
+
|
| 84 |
+
history_id integer NOT NULL DEFAULT nextval('erp_order_history_history_id_seq'::regclass),
|
| 85 |
+
|
| 86 |
+
order_id integer,
|
| 87 |
+
|
| 88 |
+
"timestamp" timestamp without time zone DEFAULT CURRENT_TIMESTAMP,
|
| 89 |
+
|
| 90 |
+
status_change character varying(50) COLLATE pg_catalog."default",
|
| 91 |
+
|
| 92 |
+
notes text COLLATE pg_catalog."default",
|
| 93 |
+
|
| 94 |
+
updated_by character varying(100) COLLATE pg_catalog."default",
|
| 95 |
+
|
| 96 |
+
CONSTRAINT erp_order_history_pkey PRIMARY KEY (history_id),
|
| 97 |
+
|
| 98 |
+
CONSTRAINT erp_order_history_order_id_fkey FOREIGN KEY (order_id)
|
| 99 |
+
|
| 100 |
+
REFERENCES public.erp_orders (order_id) MATCH SIMPLE
|
| 101 |
+
|
| 102 |
+
ON UPDATE NO ACTION
|
| 103 |
+
|
| 104 |
+
ON DELETE NO ACTION
|
| 105 |
+
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
TABLESPACE pg_default;
|
| 109 |
+
|
| 110 |
+
ALTER TABLE IF EXISTS public.erp_order_history
|
| 111 |
+
|
| 112 |
+
OWNER to postgres;
|
| 113 |
+
|
| 114 |
+
CREATE INDEX IF NOT EXISTS idx_order_history_order
|
| 115 |
+
|
| 116 |
+
ON public.erp_order_history USING btree
|
| 117 |
+
|
| 118 |
+
(order_id ASC NULLS LAST)
|
| 119 |
+
|
| 120 |
+
TABLESPACE pg_default;
|
| 121 |
+
|
| 122 |
+
CREATE TABLE IF NOT EXISTS public.erp_order_items
|
| 123 |
+
|
| 124 |
+
(
|
| 125 |
+
|
| 126 |
+
order_item_id integer NOT NULL DEFAULT nextval('erp_order_items_order_item_id_seq'::regclass),
|
| 127 |
+
|
| 128 |
+
order_id integer,
|
| 129 |
+
|
| 130 |
+
product_id integer,
|
| 131 |
+
|
| 132 |
+
quantity integer NOT NULL,
|
| 133 |
+
|
| 134 |
+
unit_price numeric(10,2) NOT NULL,
|
| 135 |
+
|
| 136 |
+
subtotal numeric(12,2) NOT NULL,
|
| 137 |
+
|
| 138 |
+
CONSTRAINT erp_order_items_pkey PRIMARY KEY (order_item_id),
|
| 139 |
+
|
| 140 |
+
CONSTRAINT erp_order_items_order_id_fkey FOREIGN KEY (order_id)
|
| 141 |
+
|
| 142 |
+
REFERENCES public.erp_orders (order_id) MATCH SIMPLE
|
| 143 |
+
|
| 144 |
+
ON UPDATE NO ACTION
|
| 145 |
+
|
| 146 |
+
ON DELETE NO ACTION,
|
| 147 |
+
|
| 148 |
+
CONSTRAINT erp_order_items_product_id_fkey FOREIGN KEY (product_id)
|
| 149 |
+
|
| 150 |
+
REFERENCES public.erp_products (product_id) MATCH SIMPLE
|
| 151 |
+
|
| 152 |
+
ON UPDATE NO ACTION
|
| 153 |
+
|
| 154 |
+
ON DELETE NO ACTION
|
| 155 |
+
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
TABLESPACE pg_default;
|
| 159 |
+
|
| 160 |
+
ALTER TABLE IF EXISTS public.erp_order_items
|
| 161 |
+
|
| 162 |
+
OWNER to postgres;
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
CREATE INDEX IF NOT EXISTS idx_order_items_order
|
| 166 |
+
|
| 167 |
+
ON public.erp_order_items USING btree
|
| 168 |
+
|
| 169 |
+
(order_id ASC NULLS LAST)
|
| 170 |
+
|
| 171 |
+
TABLESPACE pg_default;
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
CREATE INDEX IF NOT EXISTS idx_order_items_product
|
| 175 |
+
|
| 176 |
+
ON public.erp_order_items USING btree
|
| 177 |
+
|
| 178 |
+
(product_id ASC NULLS LAST)
|
| 179 |
+
|
| 180 |
+
TABLESPACE pg_default;
|
| 181 |
+
|
| 182 |
+
CREATE TABLE IF NOT EXISTS public.erp_orders
|
| 183 |
+
|
| 184 |
+
(
|
| 185 |
+
|
| 186 |
+
order_id integer NOT NULL DEFAULT nextval('erp_orders_order_id_seq'::regclass),
|
| 187 |
+
|
| 188 |
+
customer_id integer,
|
| 189 |
+
|
| 190 |
+
order_date date DEFAULT CURRENT_DATE,
|
| 191 |
+
|
| 192 |
+
total_amount numeric(12,2) NOT NULL,
|
| 193 |
+
|
| 194 |
+
status character varying(20) COLLATE pg_catalog."default" NOT NULL DEFAULT 'Processing'::character varying,
|
| 195 |
+
|
| 196 |
+
previous_order_id integer,
|
| 197 |
+
|
| 198 |
+
estimated_delivery date,
|
| 199 |
+
|
| 200 |
+
actual_delivery date,
|
| 201 |
+
|
| 202 |
+
payment_status character varying(20) COLLATE pg_catalog."default" DEFAULT 'Pending'::character varying,
|
| 203 |
+
|
| 204 |
+
shipping_address text COLLATE pg_catalog."default" NOT NULL,
|
| 205 |
+
|
| 206 |
+
shipping_country character varying(100) COLLATE pg_catalog."default",
|
| 207 |
+
|
| 208 |
+
destination_country character varying(100) COLLATE pg_catalog."default",
|
| 209 |
+
|
| 210 |
+
CONSTRAINT erp_orders_pkey PRIMARY KEY (order_id),
|
| 211 |
+
|
| 212 |
+
CONSTRAINT erp_orders_customer_id_fkey FOREIGN KEY (customer_id)
|
| 213 |
+
|
| 214 |
+
REFERENCES public.erp_customers (customer_id) MATCH SIMPLE
|
| 215 |
+
|
| 216 |
+
ON UPDATE NO ACTION
|
| 217 |
+
|
| 218 |
+
ON DELETE NO ACTION,
|
| 219 |
+
|
| 220 |
+
CONSTRAINT erp_orders_previous_order_id_fkey FOREIGN KEY (previous_order_id)
|
| 221 |
+
|
| 222 |
+
REFERENCES public.erp_orders (order_id) MATCH SIMPLE
|
| 223 |
+
|
| 224 |
+
ON UPDATE NO ACTION
|
| 225 |
+
|
| 226 |
+
ON DELETE NO ACTION
|
| 227 |
+
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
TABLESPACE pg_default;
|
| 231 |
+
|
| 232 |
+
ALTER TABLE IF EXISTS public.erp_orders
|
| 233 |
+
|
| 234 |
+
OWNER to postgres;
|
| 235 |
+
|
| 236 |
+
CREATE INDEX IF NOT EXISTS idx_orders_customer
|
| 237 |
+
|
| 238 |
+
ON public.erp_orders USING btree
|
| 239 |
+
|
| 240 |
+
(customer_id ASC NULLS LAST)
|
| 241 |
+
|
| 242 |
+
TABLESPACE pg_default;
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
CREATE TABLE IF NOT EXISTS public.erp_products
|
| 246 |
+
|
| 247 |
+
(
|
| 248 |
+
|
| 249 |
+
product_id integer NOT NULL DEFAULT nextval('erp_products_product_id_seq'::regclass),
|
| 250 |
+
|
| 251 |
+
product_name character varying(100) COLLATE pg_catalog."default" NOT NULL,
|
| 252 |
+
|
| 253 |
+
description text COLLATE pg_catalog."default",
|
| 254 |
+
|
| 255 |
+
category character varying(50) COLLATE pg_catalog."default",
|
| 256 |
+
|
| 257 |
+
price numeric(10,2) NOT NULL,
|
| 258 |
+
|
| 259 |
+
stock_quantity integer NOT NULL DEFAULT 0,
|
| 260 |
+
|
| 261 |
+
sku character varying(50) COLLATE pg_catalog."default",
|
| 262 |
+
|
| 263 |
+
CONSTRAINT erp_products_pkey PRIMARY KEY (product_id),
|
| 264 |
+
|
| 265 |
+
CONSTRAINT erp_products_sku_key UNIQUE (sku)
|
| 266 |
+
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
TABLESPACE pg_default;
|
| 270 |
+
|
| 271 |
+
ALTER TABLE IF EXISTS public.erp_products
|
| 272 |
+
|
| 273 |
+
OWNER to postgres;
|
requirements.txt
CHANGED
|
@@ -1 +1,11 @@
|
|
| 1 |
-
psycopg2-binary==2.9.10
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
psycopg2-binary==2.9.10
|
| 2 |
+
gradio>=4.0.0
|
| 3 |
+
langchain>=0.1.0
|
| 4 |
+
langchain-core>=0.1.0
|
| 5 |
+
langchain-openai>=0.0.5
|
| 6 |
+
langchain-google-genai>=0.0.5
|
| 7 |
+
langchain-community>=0.0.10
|
| 8 |
+
langgraph=0.4.7
|
| 9 |
+
langchain-mcp-adapters=0.0.5
|
| 10 |
+
python-dotenv>=1.0.0
|
| 11 |
+
mcp==1.9.0
|